file_name
stringlengths 5
52
| name
stringlengths 4
95
| original_source_type
stringlengths 0
23k
| source_type
stringlengths 9
23k
| source_definition
stringlengths 9
57.9k
| source
dict | source_range
dict | file_context
stringlengths 0
721k
| dependencies
dict | opens_and_abbrevs
listlengths 2
94
| vconfig
dict | interleaved
bool 1
class | verbose_type
stringlengths 1
7.42k
| effect
stringclasses 118
values | effect_flags
sequencelengths 0
2
| mutual_with
sequencelengths 0
11
| ideal_premises
sequencelengths 0
236
| proof_features
sequencelengths 0
1
| is_simple_lemma
bool 2
classes | is_div
bool 2
classes | is_proof
bool 2
classes | is_simply_typed
bool 2
classes | is_type
bool 2
classes | partial_definition
stringlengths 5
3.99k
| completed_definiton
stringlengths 1
1.63M
| isa_cross_project_example
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Spec.Frodo.Pack.fst | Spec.Frodo.Pack.frodo_pack_inner | val frodo_pack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> a:matrix n1 n2
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_pack_state #n1 #n2 d i
-> frodo_pack_state #n1 #n2 d (i + 1) | val frodo_pack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> a:matrix n1 n2
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_pack_state #n1 #n2 d i
-> frodo_pack_state #n1 #n2 d (i + 1) | let frodo_pack_inner #n1 #n2 d a i s =
s @| frodo_pack8 d (Seq.sub a (8 * i) 8) | {
"file_name": "specs/frodo/Spec.Frodo.Pack.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 42,
"end_line": 61,
"start_col": 0,
"start_line": 60
} | module Spec.Frodo.Pack
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Spec.Matrix
module Seq = Lib.Sequence
module Loops = Lib.LoopCombinators
#reset-options "--z3rlimit 100 --max_fuel 0 --max_ifuel 0 --using_facts_from '* -FStar +FStar.Pervasives +FStar.UInt -Spec +Spec.Frodo +Spec.Frodo.Params +Spec.Matrix'"
/// Pack
val frodo_pack8:
d:size_nat{d <= 16}
-> a:lseq uint16 8
-> lbytes d
let frodo_pack8 d a =
let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let a0 = Seq.index a 0 &. maskd in
let a1 = Seq.index a 1 &. maskd in
let a2 = Seq.index a 2 &. maskd in
let a3 = Seq.index a 3 &. maskd in
let a4 = Seq.index a 4 &. maskd in
let a5 = Seq.index a 5 &. maskd in
let a6 = Seq.index a 6 &. maskd in
let a7 = Seq.index a 7 &. maskd in
let templong =
to_u128 a0 <<. size (7 * d)
|. to_u128 a1 <<. size (6 * d)
|. to_u128 a2 <<. size (5 * d)
|. to_u128 a3 <<. size (4 * d)
|. to_u128 a4 <<. size (3 * d)
|. to_u128 a5 <<. size (2 * d)
|. to_u128 a6 <<. size (1 * d)
|. to_u128 a7 <<. size (0 * d)
in
let v16 = uint_to_bytes_be templong in
Seq.sub v16 (16 - d) d
val frodo_pack_state:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> i:size_nat{i <= (n1 * n2) / 8}
-> Type0
let frodo_pack_state #n1 #n2 d i = lseq uint8 (d * i)
val frodo_pack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> a:matrix n1 n2
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_pack_state #n1 #n2 d i | {
"checked_file": "/",
"dependencies": [
"Spec.Matrix.fst.checked",
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Frodo.Pack.fst"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "Spec.Matrix",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
d: Lib.IntTypes.size_nat{d * (n1 * n2 / 8) <= Lib.IntTypes.max_size_t /\ d <= 16} ->
a: Spec.Matrix.matrix n1 n2 ->
i: Lib.IntTypes.size_nat{i < n1 * n2 / 8} ->
s: Spec.Frodo.Pack.frodo_pack_state d i
-> Spec.Frodo.Pack.frodo_pack_state d (i + 1) | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_nat",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Lib.IntTypes.max_size_t",
"Prims.op_Equality",
"Prims.int",
"Prims.op_Modulus",
"Prims.op_Division",
"Spec.Matrix.matrix",
"Prims.op_LessThan",
"Spec.Frodo.Pack.frodo_pack_state",
"Lib.Sequence.op_At_Bar",
"Lib.IntTypes.uint8",
"Spec.Frodo.Pack.frodo_pack8",
"Lib.Sequence.sub",
"Spec.Matrix.elem",
"Prims.op_Addition"
] | [] | false | false | false | false | false | let frodo_pack_inner #n1 #n2 d a i s =
| s @| frodo_pack8 d (Seq.sub a (8 * i) 8) | false |
Spec.Frodo.Pack.fst | Spec.Frodo.Pack.frodo_pack8 | val frodo_pack8:
d:size_nat{d <= 16}
-> a:lseq uint16 8
-> lbytes d | val frodo_pack8:
d:size_nat{d <= 16}
-> a:lseq uint16 8
-> lbytes d | let frodo_pack8 d a =
let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let a0 = Seq.index a 0 &. maskd in
let a1 = Seq.index a 1 &. maskd in
let a2 = Seq.index a 2 &. maskd in
let a3 = Seq.index a 3 &. maskd in
let a4 = Seq.index a 4 &. maskd in
let a5 = Seq.index a 5 &. maskd in
let a6 = Seq.index a 6 &. maskd in
let a7 = Seq.index a 7 &. maskd in
let templong =
to_u128 a0 <<. size (7 * d)
|. to_u128 a1 <<. size (6 * d)
|. to_u128 a2 <<. size (5 * d)
|. to_u128 a3 <<. size (4 * d)
|. to_u128 a4 <<. size (3 * d)
|. to_u128 a5 <<. size (2 * d)
|. to_u128 a6 <<. size (1 * d)
|. to_u128 a7 <<. size (0 * d)
in
let v16 = uint_to_bytes_be templong in
Seq.sub v16 (16 - d) d | {
"file_name": "specs/frodo/Spec.Frodo.Pack.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 24,
"end_line": 42,
"start_col": 0,
"start_line": 21
} | module Spec.Frodo.Pack
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Spec.Matrix
module Seq = Lib.Sequence
module Loops = Lib.LoopCombinators
#reset-options "--z3rlimit 100 --max_fuel 0 --max_ifuel 0 --using_facts_from '* -FStar +FStar.Pervasives +FStar.UInt -Spec +Spec.Frodo +Spec.Frodo.Params +Spec.Matrix'"
/// Pack
val frodo_pack8:
d:size_nat{d <= 16}
-> a:lseq uint16 8 | {
"checked_file": "/",
"dependencies": [
"Spec.Matrix.fst.checked",
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Frodo.Pack.fst"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "Spec.Matrix",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | d: Lib.IntTypes.size_nat{d <= 16} -> a: Lib.Sequence.lseq Lib.IntTypes.uint16 8
-> Lib.ByteSequence.lbytes d | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Lib.Sequence.lseq",
"Lib.IntTypes.uint16",
"Lib.Sequence.sub",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Lib.IntTypes.numbytes",
"Lib.IntTypes.U128",
"Prims.op_Subtraction",
"Lib.IntTypes.int_t",
"Lib.ByteSequence.uint_to_bytes_be",
"Lib.IntTypes.op_Bar_Dot",
"Lib.IntTypes.op_Less_Less_Dot",
"Lib.IntTypes.to_u128",
"Lib.IntTypes.U16",
"Lib.IntTypes.size",
"FStar.Mul.op_Star",
"Lib.IntTypes.op_Amp_Dot",
"Lib.Sequence.index",
"Lib.IntTypes.op_Subtraction_Dot",
"Lib.IntTypes.to_u16",
"Lib.IntTypes.U32",
"Lib.IntTypes.u32",
"Lib.IntTypes.u16",
"Lib.ByteSequence.lbytes"
] | [] | false | false | false | false | false | let frodo_pack8 d a =
| let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let a0 = Seq.index a 0 &. maskd in
let a1 = Seq.index a 1 &. maskd in
let a2 = Seq.index a 2 &. maskd in
let a3 = Seq.index a 3 &. maskd in
let a4 = Seq.index a 4 &. maskd in
let a5 = Seq.index a 5 &. maskd in
let a6 = Seq.index a 6 &. maskd in
let a7 = Seq.index a 7 &. maskd in
let templong =
to_u128 a0 <<. size (7 * d) |. to_u128 a1 <<. size (6 * d) |. to_u128 a2 <<. size (5 * d) |.
to_u128 a3 <<. size (4 * d) |.
to_u128 a4 <<. size (3 * d) |.
to_u128 a5 <<. size (2 * d) |.
to_u128 a6 <<. size (1 * d) |.
to_u128 a7 <<. size (0 * d)
in
let v16 = uint_to_bytes_be templong in
Seq.sub v16 (16 - d) d | false |
Spec.Chacha20.fst | Spec.Chacha20.column_round | val column_round:shuffle | val column_round:shuffle | let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15 | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 25,
"end_line": 54,
"start_col": 0,
"start_line": 50
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Spec.Chacha20.shuffle | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.op_At",
"Spec.Chacha20.state",
"Spec.Chacha20.quarter_round"
] | [] | false | false | false | true | false | let column_round:shuffle =
| quarter_round 0 4 8 12 @ quarter_round 1 5 9 13 @ quarter_round 2 6 10 14 @ quarter_round 3 7 11 15 | false |
Spec.Chacha20.fst | Spec.Chacha20.line | val line (a b d: idx) (s: rotval U32) (m: state) : Tot state | val line (a b d: idx) (s: rotval U32) (m: state) : Tot state | let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 49,
"end_line": 42,
"start_col": 0,
"start_line": 40
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
a: Spec.Chacha20.idx ->
b: Spec.Chacha20.idx ->
d: Spec.Chacha20.idx ->
s: Lib.IntTypes.rotval Lib.IntTypes.U32 ->
m: Spec.Chacha20.state
-> Spec.Chacha20.state | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.idx",
"Lib.IntTypes.rotval",
"Lib.IntTypes.U32",
"Spec.Chacha20.state",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"Prims.l_and",
"Prims.eq2",
"FStar.Seq.Base.seq",
"Lib.Sequence.to_seq",
"FStar.Seq.Base.upd",
"Lib.IntTypes.rotate_left",
"Lib.IntTypes.logxor",
"Lib.Sequence.index",
"Prims.l_Forall",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_Subtraction",
"Prims.pow2",
"Prims.l_imp",
"Prims.op_LessThan",
"Prims.op_disEquality",
"Prims.l_or",
"FStar.Seq.Base.index",
"Lib.Sequence.op_String_Assignment",
"Lib.IntTypes.uint32",
"Lib.IntTypes.op_Less_Less_Less_Dot",
"Lib.IntTypes.op_Hat_Dot",
"Lib.Sequence.op_String_Access",
"Lib.IntTypes.add_mod",
"Lib.IntTypes.op_Plus_Dot"
] | [] | false | false | false | true | false | let line (a b d: idx) (s: rotval U32) (m: state) : Tot state =
| let m = m.[ a ] <- (m.[ a ] +. m.[ b ]) in
let m = m.[ d ] <- ((m.[ d ] ^. m.[ a ]) <<<. s) in
m | false |
Spec.Chacha20.fst | Spec.Chacha20.chacha20_add_counter | val chacha20_add_counter (s0: state) (ctr: counter) : Tot state | val chacha20_add_counter (s0: state) (ctr: counter) : Tot state | let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 31,
"end_line": 72,
"start_col": 0,
"start_line": 71
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1 | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | s0: Spec.Chacha20.state -> ctr: Spec.Chacha20.counter -> Spec.Chacha20.state | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.state",
"Spec.Chacha20.counter",
"Lib.Sequence.op_String_Assignment",
"Lib.IntTypes.uint32",
"Lib.IntTypes.op_Plus_Dot",
"Lib.IntTypes.U32",
"Lib.IntTypes.SEC",
"Lib.Sequence.op_String_Access",
"Lib.IntTypes.u32"
] | [] | false | false | false | true | false | let chacha20_add_counter (s0: state) (ctr: counter) : Tot state =
| s0.[ 12 ] <- s0.[ 12 ] +. u32 ctr | false |
Spec.Chacha20.fst | Spec.Chacha20.chacha20_init | val chacha20_init (k: key) (n: nonce) (ctr0: counter) : Tot state | val chacha20_init (k: key) (n: nonce) (ctr0: counter) : Tot state | let chacha20_init (k:key) (n:nonce) (ctr0:counter) : Tot state =
let st = create 16 (u32 0) in
let st = setup k n ctr0 st in
st | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 4,
"end_line": 125,
"start_col": 0,
"start_line": 122
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul
let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l
let setup (k:key) (n:nonce) (ctr0:counter) (st:state) : Tot state =
let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[12] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | k: Spec.Chacha20.key -> n: Spec.Chacha20.nonce -> ctr0: Spec.Chacha20.counter -> Spec.Chacha20.state | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.key",
"Spec.Chacha20.nonce",
"Spec.Chacha20.counter",
"Spec.Chacha20.state",
"Spec.Chacha20.setup",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U32",
"Lib.IntTypes.SEC",
"Prims.l_and",
"Prims.eq2",
"FStar.Seq.Base.seq",
"Lib.Sequence.to_seq",
"FStar.Seq.Base.create",
"Lib.IntTypes.mk_int",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.Sequence.index",
"Lib.Sequence.create",
"Lib.IntTypes.uint32",
"Lib.IntTypes.u32"
] | [] | false | false | false | true | false | let chacha20_init (k: key) (n: nonce) (ctr0: counter) : Tot state =
| let st = create 16 (u32 0) in
let st = setup k n ctr0 st in
st | false |
Spec.Chacha20.fst | Spec.Chacha20.sum_state | val sum_state (s0 s1: state) : Tot state | val sum_state (s0 s1: state) : Tot state | let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1 | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 17,
"end_line": 69,
"start_col": 0,
"start_line": 68
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | s0: Spec.Chacha20.state -> s1: Spec.Chacha20.state -> Spec.Chacha20.state | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.state",
"Lib.Sequence.map2",
"Lib.IntTypes.uint32",
"Lib.IntTypes.op_Plus_Dot",
"Lib.IntTypes.U32",
"Lib.IntTypes.SEC"
] | [] | false | false | false | true | false | let sum_state (s0 s1: state) : Tot state =
| map2 ( +. ) s0 s1 | false |
Spec.Frodo.Pack.fst | Spec.Frodo.Pack.frodo_unpack | val frodo_unpack:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> lbytes (d * ((n1 * n2) / 8))
-> matrix n1 n2 | val frodo_unpack:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> lbytes (d * ((n1 * n2) / 8))
-> matrix n1 n2 | let frodo_unpack #n1 #n2 d b =
Loops.repeat_gen ((n1 * n2) / 8)
(frodo_unpack_state #n1 #n2)
(frodo_unpack_inner #n1 #n2 d b)
FStar.Seq.empty | {
"file_name": "specs/frodo/Spec.Frodo.Pack.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 19,
"end_line": 125,
"start_col": 0,
"start_line": 121
} | module Spec.Frodo.Pack
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Spec.Matrix
module Seq = Lib.Sequence
module Loops = Lib.LoopCombinators
#reset-options "--z3rlimit 100 --max_fuel 0 --max_ifuel 0 --using_facts_from '* -FStar +FStar.Pervasives +FStar.UInt -Spec +Spec.Frodo +Spec.Frodo.Params +Spec.Matrix'"
/// Pack
val frodo_pack8:
d:size_nat{d <= 16}
-> a:lseq uint16 8
-> lbytes d
let frodo_pack8 d a =
let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let a0 = Seq.index a 0 &. maskd in
let a1 = Seq.index a 1 &. maskd in
let a2 = Seq.index a 2 &. maskd in
let a3 = Seq.index a 3 &. maskd in
let a4 = Seq.index a 4 &. maskd in
let a5 = Seq.index a 5 &. maskd in
let a6 = Seq.index a 6 &. maskd in
let a7 = Seq.index a 7 &. maskd in
let templong =
to_u128 a0 <<. size (7 * d)
|. to_u128 a1 <<. size (6 * d)
|. to_u128 a2 <<. size (5 * d)
|. to_u128 a3 <<. size (4 * d)
|. to_u128 a4 <<. size (3 * d)
|. to_u128 a5 <<. size (2 * d)
|. to_u128 a6 <<. size (1 * d)
|. to_u128 a7 <<. size (0 * d)
in
let v16 = uint_to_bytes_be templong in
Seq.sub v16 (16 - d) d
val frodo_pack_state:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> i:size_nat{i <= (n1 * n2) / 8}
-> Type0
let frodo_pack_state #n1 #n2 d i = lseq uint8 (d * i)
val frodo_pack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> a:matrix n1 n2
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_pack_state #n1 #n2 d i
-> frodo_pack_state #n1 #n2 d (i + 1)
let frodo_pack_inner #n1 #n2 d a i s =
s @| frodo_pack8 d (Seq.sub a (8 * i) 8)
val frodo_pack:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> a:matrix n1 n2
-> lbytes (d * ((n1 * n2) / 8))
let frodo_pack #n1 #n2 d a =
Loops.repeat_gen ((n1 * n2) / 8)
(frodo_pack_state #n1 #n2 d)
(frodo_pack_inner #n1 #n2 d a)
(Seq.create 0 (u8 0))
/// Unpack
val frodo_unpack8:
d:size_nat{d <= 16}
-> b:lbytes d
-> lseq uint16 8
let frodo_unpack8 d b =
let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let v16 = Seq.create 16 (u8 0) in
let src = update_sub v16 (16 - d) d b in
let templong: uint_t U128 SEC = uint_from_bytes_be src in
let res = Seq.create 8 (u16 0) in
let res = res.[0] <- to_u16 (templong >>. size (7 * d)) &. maskd in
let res = res.[1] <- to_u16 (templong >>. size (6 * d)) &. maskd in
let res = res.[2] <- to_u16 (templong >>. size (5 * d)) &. maskd in
let res = res.[3] <- to_u16 (templong >>. size (4 * d)) &. maskd in
let res = res.[4] <- to_u16 (templong >>. size (3 * d)) &. maskd in
let res = res.[5] <- to_u16 (templong >>. size (2 * d)) &. maskd in
let res = res.[6] <- to_u16 (templong >>. size (1 * d)) &. maskd in
let res = res.[7] <- to_u16 (templong >>. size (0 * d)) &. maskd in
res
val frodo_unpack_state:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> i:size_nat{i <= (n1 * n2) / 8}
-> Type0
let frodo_unpack_state #n1 #n2 i = lseq uint16 (8 * i)
val frodo_unpack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> b:lbytes (d * ((n1 * n2) / 8))
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_unpack_state #n1 #n2 i
-> frodo_unpack_state #n1 #n2 (i + 1)
let frodo_unpack_inner #n1 #n2 d b i s =
s @| frodo_unpack8 d (Seq.sub b (d * i) d)
val frodo_unpack:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> lbytes (d * ((n1 * n2) / 8)) | {
"checked_file": "/",
"dependencies": [
"Spec.Matrix.fst.checked",
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Frodo.Pack.fst"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "Spec.Matrix",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
d: Lib.IntTypes.size_nat{d * (n1 * n2 / 8) <= Lib.IntTypes.max_size_t /\ d <= 16} ->
b: Lib.ByteSequence.lbytes (d * (n1 * n2 / 8))
-> Spec.Matrix.matrix n1 n2 | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_nat",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Lib.IntTypes.max_size_t",
"Prims.op_Equality",
"Prims.int",
"Prims.op_Modulus",
"Prims.op_Division",
"Lib.ByteSequence.lbytes",
"Lib.LoopCombinators.repeat_gen",
"Spec.Frodo.Pack.frodo_unpack_state",
"Spec.Frodo.Pack.frodo_unpack_inner",
"FStar.Seq.Base.empty",
"Lib.IntTypes.uint16",
"Spec.Matrix.matrix"
] | [] | false | false | false | false | false | let frodo_unpack #n1 #n2 d b =
| Loops.repeat_gen ((n1 * n2) / 8)
(frodo_unpack_state #n1 #n2)
(frodo_unpack_inner #n1 #n2 d b)
FStar.Seq.empty | false |
Spec.Frodo.Pack.fst | Spec.Frodo.Pack.frodo_unpack_inner | val frodo_unpack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> b:lbytes (d * ((n1 * n2) / 8))
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_unpack_state #n1 #n2 i
-> frodo_unpack_state #n1 #n2 (i + 1) | val frodo_unpack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> b:lbytes (d * ((n1 * n2) / 8))
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_unpack_state #n1 #n2 i
-> frodo_unpack_state #n1 #n2 (i + 1) | let frodo_unpack_inner #n1 #n2 d b i s =
s @| frodo_unpack8 d (Seq.sub b (d * i) d) | {
"file_name": "specs/frodo/Spec.Frodo.Pack.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 44,
"end_line": 113,
"start_col": 0,
"start_line": 112
} | module Spec.Frodo.Pack
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Spec.Matrix
module Seq = Lib.Sequence
module Loops = Lib.LoopCombinators
#reset-options "--z3rlimit 100 --max_fuel 0 --max_ifuel 0 --using_facts_from '* -FStar +FStar.Pervasives +FStar.UInt -Spec +Spec.Frodo +Spec.Frodo.Params +Spec.Matrix'"
/// Pack
val frodo_pack8:
d:size_nat{d <= 16}
-> a:lseq uint16 8
-> lbytes d
let frodo_pack8 d a =
let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let a0 = Seq.index a 0 &. maskd in
let a1 = Seq.index a 1 &. maskd in
let a2 = Seq.index a 2 &. maskd in
let a3 = Seq.index a 3 &. maskd in
let a4 = Seq.index a 4 &. maskd in
let a5 = Seq.index a 5 &. maskd in
let a6 = Seq.index a 6 &. maskd in
let a7 = Seq.index a 7 &. maskd in
let templong =
to_u128 a0 <<. size (7 * d)
|. to_u128 a1 <<. size (6 * d)
|. to_u128 a2 <<. size (5 * d)
|. to_u128 a3 <<. size (4 * d)
|. to_u128 a4 <<. size (3 * d)
|. to_u128 a5 <<. size (2 * d)
|. to_u128 a6 <<. size (1 * d)
|. to_u128 a7 <<. size (0 * d)
in
let v16 = uint_to_bytes_be templong in
Seq.sub v16 (16 - d) d
val frodo_pack_state:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> i:size_nat{i <= (n1 * n2) / 8}
-> Type0
let frodo_pack_state #n1 #n2 d i = lseq uint8 (d * i)
val frodo_pack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> a:matrix n1 n2
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_pack_state #n1 #n2 d i
-> frodo_pack_state #n1 #n2 d (i + 1)
let frodo_pack_inner #n1 #n2 d a i s =
s @| frodo_pack8 d (Seq.sub a (8 * i) 8)
val frodo_pack:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> a:matrix n1 n2
-> lbytes (d * ((n1 * n2) / 8))
let frodo_pack #n1 #n2 d a =
Loops.repeat_gen ((n1 * n2) / 8)
(frodo_pack_state #n1 #n2 d)
(frodo_pack_inner #n1 #n2 d a)
(Seq.create 0 (u8 0))
/// Unpack
val frodo_unpack8:
d:size_nat{d <= 16}
-> b:lbytes d
-> lseq uint16 8
let frodo_unpack8 d b =
let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let v16 = Seq.create 16 (u8 0) in
let src = update_sub v16 (16 - d) d b in
let templong: uint_t U128 SEC = uint_from_bytes_be src in
let res = Seq.create 8 (u16 0) in
let res = res.[0] <- to_u16 (templong >>. size (7 * d)) &. maskd in
let res = res.[1] <- to_u16 (templong >>. size (6 * d)) &. maskd in
let res = res.[2] <- to_u16 (templong >>. size (5 * d)) &. maskd in
let res = res.[3] <- to_u16 (templong >>. size (4 * d)) &. maskd in
let res = res.[4] <- to_u16 (templong >>. size (3 * d)) &. maskd in
let res = res.[5] <- to_u16 (templong >>. size (2 * d)) &. maskd in
let res = res.[6] <- to_u16 (templong >>. size (1 * d)) &. maskd in
let res = res.[7] <- to_u16 (templong >>. size (0 * d)) &. maskd in
res
val frodo_unpack_state:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> i:size_nat{i <= (n1 * n2) / 8}
-> Type0
let frodo_unpack_state #n1 #n2 i = lseq uint16 (8 * i)
val frodo_unpack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> b:lbytes (d * ((n1 * n2) / 8))
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_unpack_state #n1 #n2 i | {
"checked_file": "/",
"dependencies": [
"Spec.Matrix.fst.checked",
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Frodo.Pack.fst"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "Spec.Matrix",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
d: Lib.IntTypes.size_nat{d * (n1 * n2 / 8) <= Lib.IntTypes.max_size_t /\ d <= 16} ->
b: Lib.ByteSequence.lbytes (d * (n1 * n2 / 8)) ->
i: Lib.IntTypes.size_nat{i < n1 * n2 / 8} ->
s: Spec.Frodo.Pack.frodo_unpack_state i
-> Spec.Frodo.Pack.frodo_unpack_state (i + 1) | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_nat",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Mul.op_Star",
"Lib.IntTypes.max_size_t",
"Prims.op_Equality",
"Prims.int",
"Prims.op_Modulus",
"Prims.op_Division",
"Lib.ByteSequence.lbytes",
"Prims.op_LessThan",
"Spec.Frodo.Pack.frodo_unpack_state",
"Lib.Sequence.op_At_Bar",
"Lib.IntTypes.uint16",
"Spec.Frodo.Pack.frodo_unpack8",
"Lib.Sequence.sub",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Prims.op_Addition"
] | [] | false | false | false | false | false | let frodo_unpack_inner #n1 #n2 d b i s =
| s @| frodo_unpack8 d (Seq.sub b (d * i) d) | false |
Spec.Chacha20.fst | Spec.Chacha20.chacha20_key_block | val chacha20_key_block (st: state) : Tot block | val chacha20_key_block (st: state) : Tot block | let chacha20_key_block (st:state) : Tot block =
let st = chacha20_core 0 st in
uints_to_bytes_le st | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 22,
"end_line": 134,
"start_col": 0,
"start_line": 132
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul
let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l
let setup (k:key) (n:nonce) (ctr0:counter) (st:state) : Tot state =
let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[12] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st
let chacha20_init (k:key) (n:nonce) (ctr0:counter) : Tot state =
let st = create 16 (u32 0) in
let st = setup k n ctr0 st in
st
let chacha20_key_block0 (k:key) (n:nonce) : Tot block =
let st = chacha20_init k n 0 in
let st = chacha20_core 0 st in
uints_to_bytes_le st | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | st: Spec.Chacha20.state -> Spec.Chacha20.block | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.state",
"Lib.ByteSequence.uints_to_bytes_le",
"Lib.IntTypes.U32",
"Lib.IntTypes.SEC",
"Spec.Chacha20.chacha20_core",
"Spec.Chacha20.block"
] | [] | false | false | false | true | false | let chacha20_key_block (st: state) : Tot block =
| let st = chacha20_core 0 st in
uints_to_bytes_le st | false |
Spec.Chacha20.fst | Spec.Chacha20.chacha20_decrypt_bytes | val chacha20_decrypt_bytes:
k: key
-> n: nonce
-> c: counter
-> cipher: bytes{length cipher / size_block <= max_size_t}
-> msg: bytes{length cipher == length msg} | val chacha20_decrypt_bytes:
k: key
-> n: nonce
-> c: counter
-> cipher: bytes{length cipher / size_block <= max_size_t}
-> msg: bytes{length cipher == length msg} | let chacha20_decrypt_bytes key nonce ctr0 cipher =
let st0 = chacha20_init key nonce ctr0 in
chacha20_update st0 cipher | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 28,
"end_line": 191,
"start_col": 0,
"start_line": 189
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul
let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l
let setup (k:key) (n:nonce) (ctr0:counter) (st:state) : Tot state =
let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[12] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st
let chacha20_init (k:key) (n:nonce) (ctr0:counter) : Tot state =
let st = create 16 (u32 0) in
let st = setup k n ctr0 st in
st
let chacha20_key_block0 (k:key) (n:nonce) : Tot block =
let st = chacha20_init k n 0 in
let st = chacha20_core 0 st in
uints_to_bytes_le st
let chacha20_key_block (st:state) : Tot block =
let st = chacha20_core 0 st in
uints_to_bytes_le st
let xor_block (k:state) (b:block) : block =
let ib = uints_from_bytes_le b in
let ob = map2 (^.) ib k in
uints_to_bytes_le ob
let chacha20_encrypt_block (st0:state) (incr:counter) (b:block) : Tot block =
let k = chacha20_core incr st0 in
xor_block k b
let chacha20_encrypt_last
(st0: state)
(incr: counter)
(len: size_nat{len < size_block})
(b: lbytes len) :
Tot (lbytes len) =
let plain = create size_block (u8 0) in
let plain = update_sub plain 0 len b in
let cipher = chacha20_encrypt_block st0 incr plain in
sub cipher 0 (length b)
val chacha20_update:
ctx: state
-> msg: bytes{length msg / size_block <= max_size_t}
-> cipher: bytes{length cipher == length msg}
let chacha20_update ctx msg =
let cipher = msg in
map_blocks size_block cipher
(chacha20_encrypt_block ctx)
(chacha20_encrypt_last ctx)
val chacha20_encrypt_bytes:
k: key
-> n: nonce
-> c: counter
-> msg: bytes{length msg / size_block <= max_size_t}
-> cipher: bytes{length cipher == length msg}
let chacha20_encrypt_bytes key nonce ctr0 msg =
let st0 = chacha20_init key nonce ctr0 in
chacha20_update st0 msg
val chacha20_decrypt_bytes:
k: key
-> n: nonce
-> c: counter
-> cipher: bytes{length cipher / size_block <= max_size_t}
-> msg: bytes{length cipher == length msg} | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
k: Spec.Chacha20.key ->
n: Spec.Chacha20.nonce ->
c: Spec.Chacha20.counter ->
cipher:
Lib.ByteSequence.bytes
{Lib.Sequence.length cipher / Spec.Chacha20.size_block <= Lib.IntTypes.max_size_t}
-> msg: Lib.ByteSequence.bytes{Lib.Sequence.length cipher == Lib.Sequence.length msg} | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.key",
"Spec.Chacha20.nonce",
"Spec.Chacha20.counter",
"Lib.ByteSequence.bytes",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_Division",
"Lib.Sequence.length",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Spec.Chacha20.size_block",
"Lib.IntTypes.max_size_t",
"Spec.Chacha20.chacha20_update",
"Spec.Chacha20.state",
"Spec.Chacha20.chacha20_init",
"Prims.eq2",
"Prims.nat"
] | [] | false | false | false | false | false | let chacha20_decrypt_bytes key nonce ctr0 cipher =
| let st0 = chacha20_init key nonce ctr0 in
chacha20_update st0 cipher | false |
Spec.Chacha20.fst | Spec.Chacha20.chacha20_constants | val chacha20_constants:lseq size_t 4 | val chacha20_constants:lseq size_t 4 | let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 11,
"end_line": 113,
"start_col": 0,
"start_line": 109
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Lib.Sequence.lseq (Lib.IntTypes.int_t Lib.IntTypes.U32 Lib.IntTypes.PUB) 4 | Prims.Tot | [
"total"
] | [] | [
"Lib.Sequence.createL",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"Prims.unit",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Prims.int",
"FStar.List.Tot.Base.length",
"Prims.list",
"Prims.Cons",
"Spec.Chacha20.c0",
"Spec.Chacha20.c1",
"Spec.Chacha20.c2",
"Spec.Chacha20.c3",
"Prims.Nil"
] | [] | false | false | false | false | false | let chacha20_constants:lseq size_t 4 =
| [@@ inline_let ]let l = [c0; c1; c2; c3] in
assert_norm (List.Tot.length l == 4);
createL l | false |
Spec.Chacha20.fst | Spec.Chacha20.chacha20_key_block0 | val chacha20_key_block0 (k: key) (n: nonce) : Tot block | val chacha20_key_block0 (k: key) (n: nonce) : Tot block | let chacha20_key_block0 (k:key) (n:nonce) : Tot block =
let st = chacha20_init k n 0 in
let st = chacha20_core 0 st in
uints_to_bytes_le st | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 22,
"end_line": 130,
"start_col": 0,
"start_line": 127
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul
let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l
let setup (k:key) (n:nonce) (ctr0:counter) (st:state) : Tot state =
let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[12] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st
let chacha20_init (k:key) (n:nonce) (ctr0:counter) : Tot state =
let st = create 16 (u32 0) in
let st = setup k n ctr0 st in
st | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | k: Spec.Chacha20.key -> n: Spec.Chacha20.nonce -> Spec.Chacha20.block | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.key",
"Spec.Chacha20.nonce",
"Lib.ByteSequence.uints_to_bytes_le",
"Lib.IntTypes.U32",
"Lib.IntTypes.SEC",
"Spec.Chacha20.state",
"Spec.Chacha20.chacha20_core",
"Spec.Chacha20.chacha20_init",
"Spec.Chacha20.block"
] | [] | false | false | false | true | false | let chacha20_key_block0 (k: key) (n: nonce) : Tot block =
| let st = chacha20_init k n 0 in
let st = chacha20_core 0 st in
uints_to_bytes_le st | false |
Spec.Chacha20.fst | Spec.Chacha20.xor_block | val xor_block (k: state) (b: block) : block | val xor_block (k: state) (b: block) : block | let xor_block (k:state) (b:block) : block =
let ib = uints_from_bytes_le b in
let ob = map2 (^.) ib k in
uints_to_bytes_le ob | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 22,
"end_line": 139,
"start_col": 0,
"start_line": 136
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul
let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l
let setup (k:key) (n:nonce) (ctr0:counter) (st:state) : Tot state =
let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[12] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st
let chacha20_init (k:key) (n:nonce) (ctr0:counter) : Tot state =
let st = create 16 (u32 0) in
let st = setup k n ctr0 st in
st
let chacha20_key_block0 (k:key) (n:nonce) : Tot block =
let st = chacha20_init k n 0 in
let st = chacha20_core 0 st in
uints_to_bytes_le st
let chacha20_key_block (st:state) : Tot block =
let st = chacha20_core 0 st in
uints_to_bytes_le st | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | k: Spec.Chacha20.state -> b: Spec.Chacha20.block -> Spec.Chacha20.block | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.state",
"Spec.Chacha20.block",
"Lib.ByteSequence.uints_to_bytes_le",
"Lib.IntTypes.U32",
"Lib.IntTypes.SEC",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.eq2",
"Lib.Sequence.index",
"Lib.IntTypes.logxor",
"Lib.Sequence.map2",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.uint32",
"Lib.IntTypes.op_Hat_Dot",
"Lib.ByteSequence.uints_from_bytes_le"
] | [] | false | false | false | true | false | let xor_block (k: state) (b: block) : block =
| let ib = uints_from_bytes_le b in
let ob = map2 ( ^. ) ib k in
uints_to_bytes_le ob | false |
Spec.Chacha20.fst | Spec.Chacha20.chacha20_encrypt_last | val chacha20_encrypt_last
(st0: state)
(incr: counter)
(len: size_nat{len < size_block})
(b: lbytes len)
: Tot (lbytes len) | val chacha20_encrypt_last
(st0: state)
(incr: counter)
(len: size_nat{len < size_block})
(b: lbytes len)
: Tot (lbytes len) | let chacha20_encrypt_last
(st0: state)
(incr: counter)
(len: size_nat{len < size_block})
(b: lbytes len) :
Tot (lbytes len) =
let plain = create size_block (u8 0) in
let plain = update_sub plain 0 len b in
let cipher = chacha20_encrypt_block st0 incr plain in
sub cipher 0 (length b) | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 25,
"end_line": 155,
"start_col": 0,
"start_line": 145
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul
let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l
let setup (k:key) (n:nonce) (ctr0:counter) (st:state) : Tot state =
let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[12] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st
let chacha20_init (k:key) (n:nonce) (ctr0:counter) : Tot state =
let st = create 16 (u32 0) in
let st = setup k n ctr0 st in
st
let chacha20_key_block0 (k:key) (n:nonce) : Tot block =
let st = chacha20_init k n 0 in
let st = chacha20_core 0 st in
uints_to_bytes_le st
let chacha20_key_block (st:state) : Tot block =
let st = chacha20_core 0 st in
uints_to_bytes_le st
let xor_block (k:state) (b:block) : block =
let ib = uints_from_bytes_le b in
let ob = map2 (^.) ib k in
uints_to_bytes_le ob
let chacha20_encrypt_block (st0:state) (incr:counter) (b:block) : Tot block =
let k = chacha20_core incr st0 in
xor_block k b | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
st0: Spec.Chacha20.state ->
incr: Spec.Chacha20.counter ->
len: Lib.IntTypes.size_nat{len < Spec.Chacha20.size_block} ->
b: Lib.ByteSequence.lbytes len
-> Lib.ByteSequence.lbytes len | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.state",
"Spec.Chacha20.counter",
"Lib.IntTypes.size_nat",
"Prims.b2t",
"Prims.op_LessThan",
"Spec.Chacha20.size_block",
"Lib.ByteSequence.lbytes",
"Lib.Sequence.sub",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Lib.Sequence.length",
"Spec.Chacha20.block",
"Spec.Chacha20.chacha20_encrypt_block",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Prims.l_and",
"Prims.eq2",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_or",
"Prims.op_LessThanOrEqual",
"Prims.op_Addition",
"FStar.Seq.Base.index",
"Lib.Sequence.to_seq",
"Lib.Sequence.index",
"Lib.Sequence.update_sub",
"FStar.Seq.Base.seq",
"FStar.Seq.Base.create",
"Lib.IntTypes.mk_int",
"Prims.l_imp",
"Lib.Sequence.create",
"Lib.IntTypes.u8"
] | [] | false | false | false | false | false | let chacha20_encrypt_last
(st0: state)
(incr: counter)
(len: size_nat{len < size_block})
(b: lbytes len)
: Tot (lbytes len) =
| let plain = create size_block (u8 0) in
let plain = update_sub plain 0 len b in
let cipher = chacha20_encrypt_block st0 incr plain in
sub cipher 0 (length b) | false |
Spec.Chacha20.fst | Spec.Chacha20.chacha20_encrypt_bytes | val chacha20_encrypt_bytes:
k: key
-> n: nonce
-> c: counter
-> msg: bytes{length msg / size_block <= max_size_t}
-> cipher: bytes{length cipher == length msg} | val chacha20_encrypt_bytes:
k: key
-> n: nonce
-> c: counter
-> msg: bytes{length msg / size_block <= max_size_t}
-> cipher: bytes{length cipher == length msg} | let chacha20_encrypt_bytes key nonce ctr0 msg =
let st0 = chacha20_init key nonce ctr0 in
chacha20_update st0 msg | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 25,
"end_line": 179,
"start_col": 0,
"start_line": 177
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul
let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l
let setup (k:key) (n:nonce) (ctr0:counter) (st:state) : Tot state =
let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[12] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st
let chacha20_init (k:key) (n:nonce) (ctr0:counter) : Tot state =
let st = create 16 (u32 0) in
let st = setup k n ctr0 st in
st
let chacha20_key_block0 (k:key) (n:nonce) : Tot block =
let st = chacha20_init k n 0 in
let st = chacha20_core 0 st in
uints_to_bytes_le st
let chacha20_key_block (st:state) : Tot block =
let st = chacha20_core 0 st in
uints_to_bytes_le st
let xor_block (k:state) (b:block) : block =
let ib = uints_from_bytes_le b in
let ob = map2 (^.) ib k in
uints_to_bytes_le ob
let chacha20_encrypt_block (st0:state) (incr:counter) (b:block) : Tot block =
let k = chacha20_core incr st0 in
xor_block k b
let chacha20_encrypt_last
(st0: state)
(incr: counter)
(len: size_nat{len < size_block})
(b: lbytes len) :
Tot (lbytes len) =
let plain = create size_block (u8 0) in
let plain = update_sub plain 0 len b in
let cipher = chacha20_encrypt_block st0 incr plain in
sub cipher 0 (length b)
val chacha20_update:
ctx: state
-> msg: bytes{length msg / size_block <= max_size_t}
-> cipher: bytes{length cipher == length msg}
let chacha20_update ctx msg =
let cipher = msg in
map_blocks size_block cipher
(chacha20_encrypt_block ctx)
(chacha20_encrypt_last ctx)
val chacha20_encrypt_bytes:
k: key
-> n: nonce
-> c: counter
-> msg: bytes{length msg / size_block <= max_size_t}
-> cipher: bytes{length cipher == length msg} | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
k: Spec.Chacha20.key ->
n: Spec.Chacha20.nonce ->
c: Spec.Chacha20.counter ->
msg:
Lib.ByteSequence.bytes
{Lib.Sequence.length msg / Spec.Chacha20.size_block <= Lib.IntTypes.max_size_t}
-> cipher: Lib.ByteSequence.bytes{Lib.Sequence.length cipher == Lib.Sequence.length msg} | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.key",
"Spec.Chacha20.nonce",
"Spec.Chacha20.counter",
"Lib.ByteSequence.bytes",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_Division",
"Lib.Sequence.length",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Spec.Chacha20.size_block",
"Lib.IntTypes.max_size_t",
"Spec.Chacha20.chacha20_update",
"Spec.Chacha20.state",
"Spec.Chacha20.chacha20_init",
"Prims.eq2",
"Prims.nat"
] | [] | false | false | false | false | false | let chacha20_encrypt_bytes key nonce ctr0 msg =
| let st0 = chacha20_init key nonce ctr0 in
chacha20_update st0 msg | false |
Spec.Chacha20.fst | Spec.Chacha20.setup | val setup (k: key) (n: nonce) (ctr0: counter) (st: state) : Tot state | val setup (k: key) (n: nonce) (ctr0: counter) (st: state) : Tot state | let setup (k:key) (n:nonce) (ctr0:counter) (st:state) : Tot state =
let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[12] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 4,
"end_line": 120,
"start_col": 0,
"start_line": 115
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul
let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
k: Spec.Chacha20.key ->
n: Spec.Chacha20.nonce ->
ctr0: Spec.Chacha20.counter ->
st: Spec.Chacha20.state
-> Spec.Chacha20.state | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.key",
"Spec.Chacha20.nonce",
"Spec.Chacha20.counter",
"Spec.Chacha20.state",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U32",
"Lib.IntTypes.SEC",
"Prims.l_and",
"Prims.eq2",
"Lib.Sequence.sub",
"Lib.ByteSequence.uints_from_bytes_le",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_or",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"Prims.op_Addition",
"FStar.Seq.Base.index",
"Lib.Sequence.to_seq",
"Lib.Sequence.index",
"Lib.Sequence.update_sub",
"Lib.IntTypes.uint32",
"FStar.Seq.Base.seq",
"FStar.Seq.Base.upd",
"Lib.IntTypes.mk_int",
"Prims.op_Subtraction",
"Prims.pow2",
"Prims.l_imp",
"Prims.op_disEquality",
"Lib.Sequence.op_String_Assignment",
"Lib.IntTypes.u32",
"Lib.Sequence.map",
"Lib.IntTypes.PUB",
"Lib.IntTypes.secret",
"Spec.Chacha20.chacha20_constants"
] | [] | false | false | false | true | false | let setup (k: key) (n: nonce) (ctr0: counter) (st: state) : Tot state =
| let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[ 12 ] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st | false |
Spec.Chacha20.fst | Spec.Chacha20.chacha20_update | val chacha20_update:
ctx: state
-> msg: bytes{length msg / size_block <= max_size_t}
-> cipher: bytes{length cipher == length msg} | val chacha20_update:
ctx: state
-> msg: bytes{length msg / size_block <= max_size_t}
-> cipher: bytes{length cipher == length msg} | let chacha20_update ctx msg =
let cipher = msg in
map_blocks size_block cipher
(chacha20_encrypt_block ctx)
(chacha20_encrypt_last ctx) | {
"file_name": "specs/Spec.Chacha20.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 31,
"end_line": 167,
"start_col": 0,
"start_line": 163
} | module Spec.Chacha20
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Lib.LoopCombinators
#set-options "--max_fuel 0 --z3rlimit 100"
/// Constants and Types
let size_key = 32
let size_block = 64
let size_nonce = 12
(* TODO: Remove, left here to avoid breaking implementation *)
let keylen = 32 (* in bytes *)
let blocklen = 64 (* in bytes *)
let noncelen = 12 (* in bytes *)
type key = lbytes size_key
type block = lbytes size_block
type nonce = lbytes size_nonce
type counter = size_nat
type subblock = b:bytes{length b <= size_block}
// Internally, blocks are represented as 16 x 4-byte integers
type state = lseq uint32 16
type idx = n:size_nat{n < 16}
type shuffle = state -> Tot state
// Using @ as a functional substitute for ;
let op_At f g = fun x -> g (f x)
/// Specification
let line (a:idx) (b:idx) (d:idx) (s:rotval U32) (m:state) : Tot state =
let m = m.[a] <- (m.[a] +. m.[b]) in
let m = m.[d] <- ((m.[d] ^. m.[a]) <<<. s) in m
let quarter_round a b c d : Tot shuffle =
line a b d (size 16) @
line c d b (size 12) @
line a b d (size 8) @
line c d b (size 7)
let column_round : shuffle =
quarter_round 0 4 8 12 @
quarter_round 1 5 9 13 @
quarter_round 2 6 10 14 @
quarter_round 3 7 11 15
let diagonal_round : shuffle =
quarter_round 0 5 10 15 @
quarter_round 1 6 11 12 @
quarter_round 2 7 8 13 @
quarter_round 3 4 9 14
let double_round : shuffle =
column_round @ diagonal_round (* 2 rounds *)
let rounds : shuffle =
repeat 10 double_round (* 20 rounds *)
let sum_state (s0:state) (s1:state) : Tot state =
map2 (+.) s0 s1
let chacha20_add_counter (s0:state) (ctr:counter) : Tot state =
s0.[12] <- s0.[12] +. u32 ctr
// protz 10:37 AM
// question about chacha20 spec: why the double counter increment in chacha20_core?
// https://github.com/project-everest/hacl-star/blob/_dev/specs/Spec.Chacha20.fst#L75
// is this in the spec?
// karthik 11:28 AM
// This is doing the same as:
//
// let chacha20_core (ctr:counter) (s0:state) : Tot state =
// let s0 = chacha20_add_counter s0 ctr in
// let k = rounds s0 in
// sum_state k s0
//
// but we rewrite in this way so that s0 remains constant
// (in the code)
// protz 11:32 AM
// do sum_state and add_counter commute?
// I feel like I'm missing some equational property of these sub-combinators
// to understand why this is true
// karthik 11:33 AM
// yes, they do.
let chacha20_core (ctr:counter) (s0:state) : Tot state =
let k = chacha20_add_counter s0 ctr in
let k = rounds k in
let k = sum_state k s0 in
chacha20_add_counter k ctr
inline_for_extraction
let c0 = 0x61707865ul
inline_for_extraction
let c1 = 0x3320646eul
inline_for_extraction
let c2 = 0x79622d32ul
inline_for_extraction
let c3 = 0x6b206574ul
let chacha20_constants : lseq size_t 4 =
[@ inline_let]
let l = [c0;c1;c2;c3] in
assert_norm(List.Tot.length l == 4);
createL l
let setup (k:key) (n:nonce) (ctr0:counter) (st:state) : Tot state =
let st = update_sub st 0 4 (map secret chacha20_constants) in
let st = update_sub st 4 8 (uints_from_bytes_le #U32 #SEC #8 k) in
let st = st.[12] <- u32 ctr0 in
let st = update_sub st 13 3 (uints_from_bytes_le #U32 #SEC #3 n) in
st
let chacha20_init (k:key) (n:nonce) (ctr0:counter) : Tot state =
let st = create 16 (u32 0) in
let st = setup k n ctr0 st in
st
let chacha20_key_block0 (k:key) (n:nonce) : Tot block =
let st = chacha20_init k n 0 in
let st = chacha20_core 0 st in
uints_to_bytes_le st
let chacha20_key_block (st:state) : Tot block =
let st = chacha20_core 0 st in
uints_to_bytes_le st
let xor_block (k:state) (b:block) : block =
let ib = uints_from_bytes_le b in
let ob = map2 (^.) ib k in
uints_to_bytes_le ob
let chacha20_encrypt_block (st0:state) (incr:counter) (b:block) : Tot block =
let k = chacha20_core incr st0 in
xor_block k b
let chacha20_encrypt_last
(st0: state)
(incr: counter)
(len: size_nat{len < size_block})
(b: lbytes len) :
Tot (lbytes len) =
let plain = create size_block (u8 0) in
let plain = update_sub plain 0 len b in
let cipher = chacha20_encrypt_block st0 incr plain in
sub cipher 0 (length b)
val chacha20_update:
ctx: state
-> msg: bytes{length msg / size_block <= max_size_t}
-> cipher: bytes{length cipher == length msg} | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Chacha20.fst"
} | [
{
"abbrev": false,
"full_module": "Lib.LoopCombinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
ctx: Spec.Chacha20.state ->
msg:
Lib.ByteSequence.bytes
{Lib.Sequence.length msg / Spec.Chacha20.size_block <= Lib.IntTypes.max_size_t}
-> cipher: Lib.ByteSequence.bytes{Lib.Sequence.length cipher == Lib.Sequence.length msg} | Prims.Tot | [
"total"
] | [] | [
"Spec.Chacha20.state",
"Lib.ByteSequence.bytes",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_Division",
"Lib.Sequence.length",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.SEC",
"Spec.Chacha20.size_block",
"Lib.IntTypes.max_size_t",
"Lib.Sequence.map_blocks",
"Spec.Chacha20.chacha20_encrypt_block",
"Spec.Chacha20.chacha20_encrypt_last",
"Lib.Sequence.seq",
"Lib.IntTypes.int_t",
"Prims.op_Subtraction",
"Prims.pow2",
"Prims.eq2",
"Prims.nat"
] | [] | false | false | false | false | false | let chacha20_update ctx msg =
| let cipher = msg in
map_blocks size_block cipher (chacha20_encrypt_block ctx) (chacha20_encrypt_last ctx) | false |
Lib.Sequence.Lemmas.fsti | Lib.Sequence.Lemmas.repeat_gen_blocks_map_l | val repeat_gen_blocks_map_l
(#a: Type0)
(blocksize: size_pos)
(hi: nat)
(l: (i: nat{i <= hi} -> rem: nat{rem < blocksize} -> lseq a rem -> lseq a rem))
(i: nat{i <= hi})
(rem: nat{rem < blocksize})
(block_l: lseq a rem)
(acc: map_blocks_a a blocksize hi i)
: seq a | val repeat_gen_blocks_map_l
(#a: Type0)
(blocksize: size_pos)
(hi: nat)
(l: (i: nat{i <= hi} -> rem: nat{rem < blocksize} -> lseq a rem -> lseq a rem))
(i: nat{i <= hi})
(rem: nat{rem < blocksize})
(block_l: lseq a rem)
(acc: map_blocks_a a blocksize hi i)
: seq a | let repeat_gen_blocks_map_l
(#a:Type0)
(blocksize:size_pos)
(hi:nat)
(l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem))
(i:nat{i <= hi})
(rem:nat{rem < blocksize})
(block_l:lseq a rem)
(acc:map_blocks_a a blocksize hi i) : seq a
=
if rem > 0 then Seq.append acc (l i rem block_l) else acc | {
"file_name": "lib/Lib.Sequence.Lemmas.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 60,
"end_line": 523,
"start_col": 0,
"start_line": 513
} | module Lib.Sequence.Lemmas
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
module Loops = Lib.LoopCombinators
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0 \
--using_facts_from '-* +Prims +FStar.Math.Lemmas +FStar.Seq +Lib.IntTypes +Lib.Sequence +Lib.Sequence.Lemmas'"
let get_block_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len})
(i:nat{i < len / blocksize * blocksize}) :
lseq a blocksize
=
div_mul_lt blocksize i (len / blocksize);
let j = i / blocksize in
let b: lseq a blocksize = Seq.slice inp (j * blocksize) ((j + 1) * blocksize) in
b
let get_last_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len}) :
lseq a (len % blocksize)
=
let rem = len % blocksize in
let b: lseq a rem = Seq.slice inp (len - rem) len in
b
val repeati_extensionality:
#a:Type0
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g i acc))
(ensures Loops.repeati n f acc0 == Loops.repeati n g acc0)
val repeat_right_extensionality:
n:nat
-> lo:nat
-> a_f:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> a_g:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> f:(i:nat{lo <= i /\ i < lo + n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo <= i /\ i < lo + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f lo ->
Lemma
(requires
(forall (i:nat{lo <= i /\ i <= lo + n}). a_f i == a_g i) /\
(forall (i:nat{lo <= i /\ i < lo + n}) (acc:a_f i). f i acc == g i acc))
(ensures
Loops.repeat_right lo (lo + n) a_f f acc0 ==
Loops.repeat_right lo (lo + n) a_g g acc0)
// Loops.repeat_gen n a_f f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
val repeat_gen_right_extensionality:
n:nat
-> lo_g:nat
-> a_f:(i:nat{i <= n} -> Type)
-> a_g:(i:nat{lo_g <= i /\ i <= lo_g + n} -> Type)
-> f:(i:nat{i < n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f 0 ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f i == a_g (lo_g + i)) /\
(forall (i:nat{i < n}) (acc:a_f i). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n a_f f acc0 ==
Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
// Loops.repeati n a f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0
val repeati_right_extensionality:
#a:Type
-> n:nat
-> lo_g:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n (Loops.fixed_a a) f acc0 ==
Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0)
/// A specialized version of the lemma above, for only shifting one computation,
/// but specified using repeati instead
val repeati_right_shift:
#a:Type
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < 1 + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (i + 1) acc))
(ensures Loops.repeati n f (g 0 acc0) == Loops.repeati (n + 1) g acc0)
///
/// `repeat_gen_blocks` is defined here to prove all the properties
/// needed for `map_blocks` and `repeat_blocks` once
///
let repeat_gen_blocks_f
(#inp_t:Type0)
(blocksize:size_pos)
(mi:nat)
(hi:nat)
(n:nat{mi + n <= hi})
(inp:seq inp_t{length inp == n * blocksize})
(a:(i:nat{i <= hi} -> Type))
(f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1)))
(i:nat{mi <= i /\ i < mi + n})
(acc:a i) : a (i + 1)
=
let i_b = i - mi in
Math.Lemmas.lemma_mult_le_right blocksize (i_b + 1) n;
let block = Seq.slice inp (i_b * blocksize) (i_b * blocksize + blocksize) in
f i block acc
//lo = 0
val repeat_gen_blocks_multi:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
a (mi + n)
val lemma_repeat_gen_blocks_multi:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
Lemma
(repeat_gen_blocks_multi #inp_t blocksize mi hi n inp a f acc0 ==
Loops.repeat_right mi (mi + n) a (repeat_gen_blocks_f blocksize mi hi n inp a f) acc0)
val repeat_gen_blocks:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq inp_t{mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acci:a mi ->
c
val lemma_repeat_gen_blocks:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq inp_t{mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acc0:a mi ->
Lemma
(let len = length inp in
let nb = len / blocksize in
let rem = len % blocksize in
let blocks = Seq.slice inp 0 (nb * blocksize) in
let last = Seq.slice inp (nb * blocksize) len in
Math.Lemmas.cancel_mul_div nb blocksize;
Math.Lemmas.cancel_mul_mod nb blocksize;
let acc = repeat_gen_blocks_multi #inp_t blocksize mi hi nb blocks a f acc0 in
repeat_gen_blocks blocksize mi hi inp a f l acc0 == l (mi + nb) rem last acc)
val repeat_gen_blocks_multi_extensionality_zero:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi_f:nat
-> hi_g:nat
-> n:nat{mi + n <= hi_f /\ n <= hi_g}
-> inp:seq inp_t{length inp == n * blocksize}
-> a_f:(i:nat{i <= hi_f} -> Type)
-> a_g:(i:nat{i <= hi_g} -> Type)
-> f:(i:nat{i < hi_f} -> lseq inp_t blocksize -> a_f i -> a_f (i + 1))
-> g:(i:nat{i < hi_g} -> lseq inp_t blocksize -> a_g i -> a_g (i + 1))
-> acc0:a_f mi ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f (mi + i) == a_g i) /\
(forall (i:nat{i < n}) (block:lseq inp_t blocksize) (acc:a_f (mi + i)).
f (mi + i) block acc == g i block acc))
(ensures
repeat_gen_blocks_multi blocksize mi hi_f n inp a_f f acc0 ==
repeat_gen_blocks_multi blocksize 0 hi_g n inp a_g g acc0)
val repeat_gen_blocks_extensionality_zero:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi_f:nat
-> hi_g:nat
-> n:nat{mi + n <= hi_f /\ n <= hi_g}
-> inp:seq inp_t{n == length inp / blocksize}
-> a_f:(i:nat{i <= hi_f} -> Type)
-> a_g:(i:nat{i <= hi_g} -> Type)
-> f:(i:nat{i < hi_f} -> lseq inp_t blocksize -> a_f i -> a_f (i + 1))
-> l_f:(i:nat{i <= hi_f} -> len:nat{len < blocksize} -> lseq inp_t len -> a_f i -> c)
-> g:(i:nat{i < hi_g} -> lseq inp_t blocksize -> a_g i -> a_g (i + 1))
-> l_g:(i:nat{i <= hi_g} -> len:nat{len < blocksize} -> lseq inp_t len -> a_g i -> c)
-> acc0:a_f mi ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f (mi + i) == a_g i) /\
(forall (i:nat{i < n}) (block:lseq inp_t blocksize) (acc:a_f (mi + i)).
f (mi + i) block acc == g i block acc) /\
(forall (i:nat{i <= n}) (len:nat{len < blocksize}) (block:lseq inp_t len) (acc:a_f (mi + i)).
l_f (mi + i) len block acc == l_g i len block acc))
(ensures
repeat_gen_blocks blocksize mi hi_f inp a_f f l_f acc0 ==
repeat_gen_blocks blocksize 0 hi_g inp a_g g l_g acc0)
val len0_div_bs: blocksize:pos -> len:nat -> len0:nat ->
Lemma
(requires len0 <= len /\ len0 % blocksize == 0)
(ensures len0 / blocksize + (len - len0) / blocksize == len / blocksize)
val split_len_lemma0: blocksize:pos -> n:nat -> len0:nat ->
Lemma
(requires len0 <= n * blocksize /\ len0 % blocksize = 0)
(ensures
(let len = n * blocksize in
let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
len % blocksize = 0 /\ len1 % blocksize = 0 /\ n0 + n1 = n /\
n0 * blocksize = len0 /\ n1 * blocksize = len1))
val split_len_lemma: blocksize:pos -> len:nat -> len0:nat ->
Lemma
(requires len0 <= len /\ len0 % blocksize = 0)
(ensures
(let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
let n = len / blocksize in
len % blocksize = len1 % blocksize /\
n0 * blocksize = len0 /\ n0 + n1 = n))
val repeat_gen_blocks_multi_split:
#inp_t:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize == 0}
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{len0 <= length inp /\ length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
Lemma
(let len = length inp in
let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
split_len_lemma0 blocksize n len0;
let t0 = Seq.slice inp 0 len0 in
let t1 = Seq.slice inp len0 len in
let acc : a (mi + n0) = repeat_gen_blocks_multi blocksize mi hi n0 t0 a f acc0 in
repeat_gen_blocks_multi blocksize mi hi n inp a f acc0 ==
repeat_gen_blocks_multi blocksize (mi + n0) hi n1 t1 a f acc)
val repeat_gen_blocks_split:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize == 0}
-> hi:nat
-> mi:nat{mi <= hi}
-> inp:seq inp_t{len0 <= length inp /\ mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acc0:a mi ->
Lemma
(let len = length inp in
let n = len / blocksize in
let n0 = len0 / blocksize in
split_len_lemma blocksize len len0;
let t0 = Seq.slice inp 0 len0 in
let t1 = Seq.slice inp len0 len in
let acc : a (mi + n0) = repeat_gen_blocks_multi blocksize mi hi n0 t0 a f acc0 in
repeat_gen_blocks blocksize mi hi inp a f l acc0 ==
repeat_gen_blocks blocksize (mi + n0) hi t1 a f l acc)
///
/// Properties related to the repeat_blocks combinator
///
val repeat_blocks_extensionality:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> inp:seq a
-> f1:(lseq a blocksize -> b -> b)
-> f2:(lseq a blocksize -> b -> b)
-> l1:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> l2:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(requires
(forall (block:lseq a blocksize) (acc:b). f1 block acc == f2 block acc) /\
(forall (rem:nat{rem < blocksize}) (last:lseq a rem) (acc:b). l1 rem last acc == l2 rem last acc))
(ensures
repeat_blocks blocksize inp f1 l1 acc0 == repeat_blocks blocksize inp f2 l2 acc0)
val lemma_repeat_blocks_via_multi:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> inp:seq a
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(let len = length inp in
let nb = len / blocksize in
let rem = len % blocksize in
let blocks = Seq.slice inp 0 (nb * blocksize) in
let last = Seq.slice inp (nb * blocksize) len in
Math.Lemmas.cancel_mul_mod nb blocksize;
let acc = repeat_blocks_multi blocksize blocks f acc0 in
repeat_blocks #a #b blocksize inp f l acc0 == l rem last acc)
val repeat_blocks_multi_is_repeat_gen_blocks_multi:
#a:Type0
-> #b:Type0
-> hi:nat
-> blocksize:size_pos
-> inp:seq a{length inp % blocksize = 0 /\ length inp / blocksize <= hi}
-> f:(lseq a blocksize -> b -> b)
-> acc0:b ->
Lemma
(let n = length inp / blocksize in
Math.Lemmas.div_exact_r (length inp) blocksize;
repeat_blocks_multi #a #b blocksize inp f acc0 ==
repeat_gen_blocks_multi #a blocksize 0 hi n inp (Loops.fixed_a b) (Loops.fixed_i f) acc0)
val repeat_blocks_is_repeat_gen_blocks:
#a:Type0
-> #b:Type0
-> #c:Type0
-> hi:nat
-> blocksize:size_pos
-> inp:seq a{length inp / blocksize <= hi}
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(repeat_blocks #a #b #c blocksize inp f l acc0 ==
repeat_gen_blocks #a #c blocksize 0 hi inp
(Loops.fixed_a b) (Loops.fixed_i f) (Loops.fixed_i l) acc0)
val repeat_blocks_multi_split:
#a:Type0
-> #b:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize = 0}
-> inp:seq a{len0 <= length inp /\ length inp % blocksize = 0}
-> f:(lseq a blocksize -> b -> b)
-> acc0:b ->
Lemma
(let len = length inp in
Math.Lemmas.lemma_div_exact len blocksize;
split_len_lemma0 blocksize (len / blocksize) len0;
Math.Lemmas.swap_mul blocksize (len / blocksize);
repeat_blocks_multi blocksize inp f acc0 ==
repeat_blocks_multi blocksize (Seq.slice inp len0 len) f
(repeat_blocks_multi blocksize (Seq.slice inp 0 len0) f acc0))
val repeat_blocks_split:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize = 0}
-> inp:seq a{len0 <= length inp}
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(let len = length inp in
split_len_lemma blocksize len len0;
repeat_blocks blocksize inp f l acc0 ==
repeat_blocks blocksize (Seq.slice inp len0 len) f l
(repeat_blocks_multi blocksize (Seq.slice inp 0 len0) f acc0))
///
val repeat_blocks_multi_extensionality:
#a:Type0
-> #b:Type0
-> blocksize:size_pos
-> inp:seq a{length inp % blocksize = 0}
-> f:(lseq a blocksize -> b -> b)
-> g:(lseq a blocksize -> b -> b)
-> init:b ->
Lemma
(requires
(forall (block:lseq a blocksize) (acc:b). f block acc == g block acc))
(ensures
repeat_blocks_multi blocksize inp f init ==
repeat_blocks_multi blocksize inp g init)
/// Properties related to the map_blocks combinator
///
val map_blocks_multi_extensionality:
#a:Type0
-> blocksize:size_pos
-> max:nat
-> n:nat{n <= max}
-> inp:seq a{length inp == max * blocksize}
-> f:(i:nat{i < max} -> lseq a blocksize -> lseq a blocksize)
-> g:(i:nat{i < max} -> lseq a blocksize -> lseq a blocksize) ->
Lemma
(requires
(forall (i:nat{i < max}) (b_v:lseq a blocksize). f i b_v == g i b_v))
(ensures
map_blocks_multi blocksize max n inp f ==
map_blocks_multi blocksize max n inp g)
val map_blocks_extensionality:
#a:Type0
-> blocksize:size_pos
-> inp:seq a
-> f:(block (length inp) blocksize -> lseq a blocksize -> lseq a blocksize)
-> l_f:(last (length inp) blocksize -> rem:size_nat{rem < blocksize} -> s:lseq a rem -> lseq a rem)
-> g:(block (length inp) blocksize -> lseq a blocksize -> lseq a blocksize)
-> l_g:(last (length inp) blocksize -> rem:size_nat{rem < blocksize} -> s:lseq a rem -> lseq a rem) ->
Lemma
(requires
(let n = length inp / blocksize in
(forall (i:nat{i < n}) (b_v:lseq a blocksize). f i b_v == g i b_v) /\
(forall (rem:nat{rem < blocksize}) (b_v:lseq a rem). l_f n rem b_v == l_g n rem b_v)))
(ensures
map_blocks blocksize inp f l_f == map_blocks blocksize inp g l_g)
///
/// New definition of `map_blocks` that takes extra parameter `acc`.
/// When `acc` = Seq.empty, map_blocks == map_blocks_acc
///
let repeat_gen_blocks_map_f
(#a:Type0)
(blocksize:size_pos)
(hi:nat)
(f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize))
(i:nat{i < hi})
(block:lseq a blocksize)
(acc:map_blocks_a a blocksize hi i) : map_blocks_a a blocksize hi (i + 1)
=
Seq.append acc (f i block) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Lib.Sequence.Lemmas.fsti"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
blocksize: Lib.IntTypes.size_pos ->
hi: Prims.nat ->
l:
(i: Prims.nat{i <= hi} -> rem: Prims.nat{rem < blocksize} -> _: Lib.Sequence.lseq a rem
-> Lib.Sequence.lseq a rem) ->
i: Prims.nat{i <= hi} ->
rem: Prims.nat{rem < blocksize} ->
block_l: Lib.Sequence.lseq a rem ->
acc: Lib.Sequence.map_blocks_a a blocksize hi i
-> Lib.Sequence.seq a | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_pos",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"Lib.Sequence.lseq",
"Lib.Sequence.map_blocks_a",
"Prims.op_GreaterThan",
"FStar.Seq.Base.append",
"Prims.bool",
"Lib.Sequence.seq"
] | [] | false | false | false | false | false | let repeat_gen_blocks_map_l
(#a: Type0)
(blocksize: size_pos)
(hi: nat)
(l: (i: nat{i <= hi} -> rem: nat{rem < blocksize} -> lseq a rem -> lseq a rem))
(i: nat{i <= hi})
(rem: nat{rem < blocksize})
(block_l: lseq a rem)
(acc: map_blocks_a a blocksize hi i)
: seq a =
| if rem > 0 then Seq.append acc (l i rem block_l) else acc | false |
Lib.Sequence.Lemmas.fsti | Lib.Sequence.Lemmas.get_block_s | val get_block_s
(#a: Type)
(#len: nat)
(blocksize: size_pos)
(inp: seq a {length inp == len})
(i: nat{i < (len / blocksize) * blocksize})
: lseq a blocksize | val get_block_s
(#a: Type)
(#len: nat)
(blocksize: size_pos)
(inp: seq a {length inp == len})
(i: nat{i < (len / blocksize) * blocksize})
: lseq a blocksize | let get_block_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len})
(i:nat{i < len / blocksize * blocksize}) :
lseq a blocksize
=
div_mul_lt blocksize i (len / blocksize);
let j = i / blocksize in
let b: lseq a blocksize = Seq.slice inp (j * blocksize) ((j + 1) * blocksize) in
b | {
"file_name": "lib/Lib.Sequence.Lemmas.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 3,
"end_line": 24,
"start_col": 0,
"start_line": 13
} | module Lib.Sequence.Lemmas
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
module Loops = Lib.LoopCombinators
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0 \
--using_facts_from '-* +Prims +FStar.Math.Lemmas +FStar.Seq +Lib.IntTypes +Lib.Sequence +Lib.Sequence.Lemmas'" | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Lib.Sequence.Lemmas.fsti"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
blocksize: Lib.IntTypes.size_pos ->
inp: Lib.Sequence.seq a {Lib.Sequence.length inp == len} ->
i: Prims.nat{i < (len / blocksize) * blocksize}
-> Lib.Sequence.lseq a blocksize | Prims.Tot | [
"total"
] | [] | [
"Prims.nat",
"Lib.IntTypes.size_pos",
"Lib.Sequence.seq",
"Prims.eq2",
"Lib.Sequence.length",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Mul.op_Star",
"Prims.op_Division",
"Lib.Sequence.lseq",
"FStar.Seq.Base.slice",
"Prims.op_Addition",
"Prims.int",
"Prims.unit",
"Lib.Sequence.div_mul_lt"
] | [] | false | false | false | false | false | let get_block_s
(#a: Type)
(#len: nat)
(blocksize: size_pos)
(inp: seq a {length inp == len})
(i: nat{i < (len / blocksize) * blocksize})
: lseq a blocksize =
| div_mul_lt blocksize i (len / blocksize);
let j = i / blocksize in
let b:lseq a blocksize = Seq.slice inp (j * blocksize) ((j + 1) * blocksize) in
b | false |
Lib.Sequence.Lemmas.fsti | Lib.Sequence.Lemmas.get_last_s | val get_last_s (#a: Type) (#len: nat) (blocksize: size_pos) (inp: seq a {length inp == len})
: lseq a (len % blocksize) | val get_last_s (#a: Type) (#len: nat) (blocksize: size_pos) (inp: seq a {length inp == len})
: lseq a (len % blocksize) | let get_last_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len}) :
lseq a (len % blocksize)
=
let rem = len % blocksize in
let b: lseq a rem = Seq.slice inp (len - rem) len in
b | {
"file_name": "lib/Lib.Sequence.Lemmas.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 3,
"end_line": 36,
"start_col": 0,
"start_line": 27
} | module Lib.Sequence.Lemmas
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
module Loops = Lib.LoopCombinators
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0 \
--using_facts_from '-* +Prims +FStar.Math.Lemmas +FStar.Seq +Lib.IntTypes +Lib.Sequence +Lib.Sequence.Lemmas'"
let get_block_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len})
(i:nat{i < len / blocksize * blocksize}) :
lseq a blocksize
=
div_mul_lt blocksize i (len / blocksize);
let j = i / blocksize in
let b: lseq a blocksize = Seq.slice inp (j * blocksize) ((j + 1) * blocksize) in
b | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Lib.Sequence.Lemmas.fsti"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | blocksize: Lib.IntTypes.size_pos -> inp: Lib.Sequence.seq a {Lib.Sequence.length inp == len}
-> Lib.Sequence.lseq a (len % blocksize) | Prims.Tot | [
"total"
] | [] | [
"Prims.nat",
"Lib.IntTypes.size_pos",
"Lib.Sequence.seq",
"Prims.eq2",
"Lib.Sequence.length",
"Lib.Sequence.lseq",
"FStar.Seq.Base.slice",
"Prims.op_Subtraction",
"Prims.int",
"Prims.op_Modulus"
] | [] | false | false | false | false | false | let get_last_s (#a: Type) (#len: nat) (blocksize: size_pos) (inp: seq a {length inp == len})
: lseq a (len % blocksize) =
| let rem = len % blocksize in
let b:lseq a rem = Seq.slice inp (len - rem) len in
b | false |
Lib.Sequence.Lemmas.fsti | Lib.Sequence.Lemmas.f_shift | val f_shift : blocksize: Lib.IntTypes.size_pos ->
mi: Prims.nat ->
hi: Prims.nat ->
n: Prims.nat{mi + n <= hi} ->
f: (i: Prims.nat{i < hi} -> _: Lib.Sequence.lseq a blocksize -> Lib.Sequence.lseq a blocksize) ->
i: Prims.nat{i < n} ->
_: Lib.Sequence.lseq a blocksize
-> Lib.Sequence.lseq a blocksize | let f_shift (#a:Type0) (blocksize:size_pos) (mi:nat) (hi:nat) (n:nat{mi + n <= hi})
(f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)) (i:nat{i < n}) = f (mi + i) | {
"file_name": "lib/Lib.Sequence.Lemmas.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 89,
"end_line": 609,
"start_col": 0,
"start_line": 608
} | module Lib.Sequence.Lemmas
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
module Loops = Lib.LoopCombinators
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0 \
--using_facts_from '-* +Prims +FStar.Math.Lemmas +FStar.Seq +Lib.IntTypes +Lib.Sequence +Lib.Sequence.Lemmas'"
let get_block_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len})
(i:nat{i < len / blocksize * blocksize}) :
lseq a blocksize
=
div_mul_lt blocksize i (len / blocksize);
let j = i / blocksize in
let b: lseq a blocksize = Seq.slice inp (j * blocksize) ((j + 1) * blocksize) in
b
let get_last_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len}) :
lseq a (len % blocksize)
=
let rem = len % blocksize in
let b: lseq a rem = Seq.slice inp (len - rem) len in
b
val repeati_extensionality:
#a:Type0
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g i acc))
(ensures Loops.repeati n f acc0 == Loops.repeati n g acc0)
val repeat_right_extensionality:
n:nat
-> lo:nat
-> a_f:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> a_g:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> f:(i:nat{lo <= i /\ i < lo + n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo <= i /\ i < lo + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f lo ->
Lemma
(requires
(forall (i:nat{lo <= i /\ i <= lo + n}). a_f i == a_g i) /\
(forall (i:nat{lo <= i /\ i < lo + n}) (acc:a_f i). f i acc == g i acc))
(ensures
Loops.repeat_right lo (lo + n) a_f f acc0 ==
Loops.repeat_right lo (lo + n) a_g g acc0)
// Loops.repeat_gen n a_f f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
val repeat_gen_right_extensionality:
n:nat
-> lo_g:nat
-> a_f:(i:nat{i <= n} -> Type)
-> a_g:(i:nat{lo_g <= i /\ i <= lo_g + n} -> Type)
-> f:(i:nat{i < n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f 0 ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f i == a_g (lo_g + i)) /\
(forall (i:nat{i < n}) (acc:a_f i). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n a_f f acc0 ==
Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
// Loops.repeati n a f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0
val repeati_right_extensionality:
#a:Type
-> n:nat
-> lo_g:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n (Loops.fixed_a a) f acc0 ==
Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0)
/// A specialized version of the lemma above, for only shifting one computation,
/// but specified using repeati instead
val repeati_right_shift:
#a:Type
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < 1 + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (i + 1) acc))
(ensures Loops.repeati n f (g 0 acc0) == Loops.repeati (n + 1) g acc0)
///
/// `repeat_gen_blocks` is defined here to prove all the properties
/// needed for `map_blocks` and `repeat_blocks` once
///
let repeat_gen_blocks_f
(#inp_t:Type0)
(blocksize:size_pos)
(mi:nat)
(hi:nat)
(n:nat{mi + n <= hi})
(inp:seq inp_t{length inp == n * blocksize})
(a:(i:nat{i <= hi} -> Type))
(f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1)))
(i:nat{mi <= i /\ i < mi + n})
(acc:a i) : a (i + 1)
=
let i_b = i - mi in
Math.Lemmas.lemma_mult_le_right blocksize (i_b + 1) n;
let block = Seq.slice inp (i_b * blocksize) (i_b * blocksize + blocksize) in
f i block acc
//lo = 0
val repeat_gen_blocks_multi:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
a (mi + n)
val lemma_repeat_gen_blocks_multi:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
Lemma
(repeat_gen_blocks_multi #inp_t blocksize mi hi n inp a f acc0 ==
Loops.repeat_right mi (mi + n) a (repeat_gen_blocks_f blocksize mi hi n inp a f) acc0)
val repeat_gen_blocks:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq inp_t{mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acci:a mi ->
c
val lemma_repeat_gen_blocks:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq inp_t{mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acc0:a mi ->
Lemma
(let len = length inp in
let nb = len / blocksize in
let rem = len % blocksize in
let blocks = Seq.slice inp 0 (nb * blocksize) in
let last = Seq.slice inp (nb * blocksize) len in
Math.Lemmas.cancel_mul_div nb blocksize;
Math.Lemmas.cancel_mul_mod nb blocksize;
let acc = repeat_gen_blocks_multi #inp_t blocksize mi hi nb blocks a f acc0 in
repeat_gen_blocks blocksize mi hi inp a f l acc0 == l (mi + nb) rem last acc)
val repeat_gen_blocks_multi_extensionality_zero:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi_f:nat
-> hi_g:nat
-> n:nat{mi + n <= hi_f /\ n <= hi_g}
-> inp:seq inp_t{length inp == n * blocksize}
-> a_f:(i:nat{i <= hi_f} -> Type)
-> a_g:(i:nat{i <= hi_g} -> Type)
-> f:(i:nat{i < hi_f} -> lseq inp_t blocksize -> a_f i -> a_f (i + 1))
-> g:(i:nat{i < hi_g} -> lseq inp_t blocksize -> a_g i -> a_g (i + 1))
-> acc0:a_f mi ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f (mi + i) == a_g i) /\
(forall (i:nat{i < n}) (block:lseq inp_t blocksize) (acc:a_f (mi + i)).
f (mi + i) block acc == g i block acc))
(ensures
repeat_gen_blocks_multi blocksize mi hi_f n inp a_f f acc0 ==
repeat_gen_blocks_multi blocksize 0 hi_g n inp a_g g acc0)
val repeat_gen_blocks_extensionality_zero:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi_f:nat
-> hi_g:nat
-> n:nat{mi + n <= hi_f /\ n <= hi_g}
-> inp:seq inp_t{n == length inp / blocksize}
-> a_f:(i:nat{i <= hi_f} -> Type)
-> a_g:(i:nat{i <= hi_g} -> Type)
-> f:(i:nat{i < hi_f} -> lseq inp_t blocksize -> a_f i -> a_f (i + 1))
-> l_f:(i:nat{i <= hi_f} -> len:nat{len < blocksize} -> lseq inp_t len -> a_f i -> c)
-> g:(i:nat{i < hi_g} -> lseq inp_t blocksize -> a_g i -> a_g (i + 1))
-> l_g:(i:nat{i <= hi_g} -> len:nat{len < blocksize} -> lseq inp_t len -> a_g i -> c)
-> acc0:a_f mi ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f (mi + i) == a_g i) /\
(forall (i:nat{i < n}) (block:lseq inp_t blocksize) (acc:a_f (mi + i)).
f (mi + i) block acc == g i block acc) /\
(forall (i:nat{i <= n}) (len:nat{len < blocksize}) (block:lseq inp_t len) (acc:a_f (mi + i)).
l_f (mi + i) len block acc == l_g i len block acc))
(ensures
repeat_gen_blocks blocksize mi hi_f inp a_f f l_f acc0 ==
repeat_gen_blocks blocksize 0 hi_g inp a_g g l_g acc0)
val len0_div_bs: blocksize:pos -> len:nat -> len0:nat ->
Lemma
(requires len0 <= len /\ len0 % blocksize == 0)
(ensures len0 / blocksize + (len - len0) / blocksize == len / blocksize)
val split_len_lemma0: blocksize:pos -> n:nat -> len0:nat ->
Lemma
(requires len0 <= n * blocksize /\ len0 % blocksize = 0)
(ensures
(let len = n * blocksize in
let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
len % blocksize = 0 /\ len1 % blocksize = 0 /\ n0 + n1 = n /\
n0 * blocksize = len0 /\ n1 * blocksize = len1))
val split_len_lemma: blocksize:pos -> len:nat -> len0:nat ->
Lemma
(requires len0 <= len /\ len0 % blocksize = 0)
(ensures
(let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
let n = len / blocksize in
len % blocksize = len1 % blocksize /\
n0 * blocksize = len0 /\ n0 + n1 = n))
val repeat_gen_blocks_multi_split:
#inp_t:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize == 0}
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{len0 <= length inp /\ length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
Lemma
(let len = length inp in
let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
split_len_lemma0 blocksize n len0;
let t0 = Seq.slice inp 0 len0 in
let t1 = Seq.slice inp len0 len in
let acc : a (mi + n0) = repeat_gen_blocks_multi blocksize mi hi n0 t0 a f acc0 in
repeat_gen_blocks_multi blocksize mi hi n inp a f acc0 ==
repeat_gen_blocks_multi blocksize (mi + n0) hi n1 t1 a f acc)
val repeat_gen_blocks_split:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize == 0}
-> hi:nat
-> mi:nat{mi <= hi}
-> inp:seq inp_t{len0 <= length inp /\ mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acc0:a mi ->
Lemma
(let len = length inp in
let n = len / blocksize in
let n0 = len0 / blocksize in
split_len_lemma blocksize len len0;
let t0 = Seq.slice inp 0 len0 in
let t1 = Seq.slice inp len0 len in
let acc : a (mi + n0) = repeat_gen_blocks_multi blocksize mi hi n0 t0 a f acc0 in
repeat_gen_blocks blocksize mi hi inp a f l acc0 ==
repeat_gen_blocks blocksize (mi + n0) hi t1 a f l acc)
///
/// Properties related to the repeat_blocks combinator
///
val repeat_blocks_extensionality:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> inp:seq a
-> f1:(lseq a blocksize -> b -> b)
-> f2:(lseq a blocksize -> b -> b)
-> l1:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> l2:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(requires
(forall (block:lseq a blocksize) (acc:b). f1 block acc == f2 block acc) /\
(forall (rem:nat{rem < blocksize}) (last:lseq a rem) (acc:b). l1 rem last acc == l2 rem last acc))
(ensures
repeat_blocks blocksize inp f1 l1 acc0 == repeat_blocks blocksize inp f2 l2 acc0)
val lemma_repeat_blocks_via_multi:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> inp:seq a
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(let len = length inp in
let nb = len / blocksize in
let rem = len % blocksize in
let blocks = Seq.slice inp 0 (nb * blocksize) in
let last = Seq.slice inp (nb * blocksize) len in
Math.Lemmas.cancel_mul_mod nb blocksize;
let acc = repeat_blocks_multi blocksize blocks f acc0 in
repeat_blocks #a #b blocksize inp f l acc0 == l rem last acc)
val repeat_blocks_multi_is_repeat_gen_blocks_multi:
#a:Type0
-> #b:Type0
-> hi:nat
-> blocksize:size_pos
-> inp:seq a{length inp % blocksize = 0 /\ length inp / blocksize <= hi}
-> f:(lseq a blocksize -> b -> b)
-> acc0:b ->
Lemma
(let n = length inp / blocksize in
Math.Lemmas.div_exact_r (length inp) blocksize;
repeat_blocks_multi #a #b blocksize inp f acc0 ==
repeat_gen_blocks_multi #a blocksize 0 hi n inp (Loops.fixed_a b) (Loops.fixed_i f) acc0)
val repeat_blocks_is_repeat_gen_blocks:
#a:Type0
-> #b:Type0
-> #c:Type0
-> hi:nat
-> blocksize:size_pos
-> inp:seq a{length inp / blocksize <= hi}
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(repeat_blocks #a #b #c blocksize inp f l acc0 ==
repeat_gen_blocks #a #c blocksize 0 hi inp
(Loops.fixed_a b) (Loops.fixed_i f) (Loops.fixed_i l) acc0)
val repeat_blocks_multi_split:
#a:Type0
-> #b:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize = 0}
-> inp:seq a{len0 <= length inp /\ length inp % blocksize = 0}
-> f:(lseq a blocksize -> b -> b)
-> acc0:b ->
Lemma
(let len = length inp in
Math.Lemmas.lemma_div_exact len blocksize;
split_len_lemma0 blocksize (len / blocksize) len0;
Math.Lemmas.swap_mul blocksize (len / blocksize);
repeat_blocks_multi blocksize inp f acc0 ==
repeat_blocks_multi blocksize (Seq.slice inp len0 len) f
(repeat_blocks_multi blocksize (Seq.slice inp 0 len0) f acc0))
val repeat_blocks_split:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize = 0}
-> inp:seq a{len0 <= length inp}
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(let len = length inp in
split_len_lemma blocksize len len0;
repeat_blocks blocksize inp f l acc0 ==
repeat_blocks blocksize (Seq.slice inp len0 len) f l
(repeat_blocks_multi blocksize (Seq.slice inp 0 len0) f acc0))
///
val repeat_blocks_multi_extensionality:
#a:Type0
-> #b:Type0
-> blocksize:size_pos
-> inp:seq a{length inp % blocksize = 0}
-> f:(lseq a blocksize -> b -> b)
-> g:(lseq a blocksize -> b -> b)
-> init:b ->
Lemma
(requires
(forall (block:lseq a blocksize) (acc:b). f block acc == g block acc))
(ensures
repeat_blocks_multi blocksize inp f init ==
repeat_blocks_multi blocksize inp g init)
/// Properties related to the map_blocks combinator
///
val map_blocks_multi_extensionality:
#a:Type0
-> blocksize:size_pos
-> max:nat
-> n:nat{n <= max}
-> inp:seq a{length inp == max * blocksize}
-> f:(i:nat{i < max} -> lseq a blocksize -> lseq a blocksize)
-> g:(i:nat{i < max} -> lseq a blocksize -> lseq a blocksize) ->
Lemma
(requires
(forall (i:nat{i < max}) (b_v:lseq a blocksize). f i b_v == g i b_v))
(ensures
map_blocks_multi blocksize max n inp f ==
map_blocks_multi blocksize max n inp g)
val map_blocks_extensionality:
#a:Type0
-> blocksize:size_pos
-> inp:seq a
-> f:(block (length inp) blocksize -> lseq a blocksize -> lseq a blocksize)
-> l_f:(last (length inp) blocksize -> rem:size_nat{rem < blocksize} -> s:lseq a rem -> lseq a rem)
-> g:(block (length inp) blocksize -> lseq a blocksize -> lseq a blocksize)
-> l_g:(last (length inp) blocksize -> rem:size_nat{rem < blocksize} -> s:lseq a rem -> lseq a rem) ->
Lemma
(requires
(let n = length inp / blocksize in
(forall (i:nat{i < n}) (b_v:lseq a blocksize). f i b_v == g i b_v) /\
(forall (rem:nat{rem < blocksize}) (b_v:lseq a rem). l_f n rem b_v == l_g n rem b_v)))
(ensures
map_blocks blocksize inp f l_f == map_blocks blocksize inp g l_g)
///
/// New definition of `map_blocks` that takes extra parameter `acc`.
/// When `acc` = Seq.empty, map_blocks == map_blocks_acc
///
let repeat_gen_blocks_map_f
(#a:Type0)
(blocksize:size_pos)
(hi:nat)
(f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize))
(i:nat{i < hi})
(block:lseq a blocksize)
(acc:map_blocks_a a blocksize hi i) : map_blocks_a a blocksize hi (i + 1)
=
Seq.append acc (f i block)
let repeat_gen_blocks_map_l
(#a:Type0)
(blocksize:size_pos)
(hi:nat)
(l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem))
(i:nat{i <= hi})
(rem:nat{rem < blocksize})
(block_l:lseq a rem)
(acc:map_blocks_a a blocksize hi i) : seq a
=
if rem > 0 then Seq.append acc (l i rem block_l) else acc
val repeat_gen_blocks_map_l_length:
#a:Type0
-> blocksize:size_pos
-> hi:nat
-> l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem)
-> i:nat{i <= hi}
-> rem:nat{rem < blocksize}
-> block_l:lseq a rem
-> acc:map_blocks_a a blocksize hi i ->
Lemma (length (repeat_gen_blocks_map_l blocksize hi l i rem block_l acc) == i * blocksize + rem)
val map_blocks_multi_acc:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq a{length inp == n * blocksize}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> acc0:map_blocks_a a blocksize hi mi ->
out:seq a {length out == length acc0 + length inp}
val map_blocks_acc:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq a{mi + length inp / blocksize <= hi}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem)
-> acc0:map_blocks_a a blocksize hi mi ->
seq a
val map_blocks_acc_length:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq a{mi + length inp / blocksize <= hi}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem)
-> acc0:map_blocks_a a blocksize hi mi ->
Lemma (length (map_blocks_acc blocksize mi hi inp f l acc0) == length acc0 + length inp)
[SMTPat (map_blocks_acc blocksize mi hi inp f l acc0)]
val map_blocks_multi_acc_is_repeat_gen_blocks_multi:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq a{length inp == n * blocksize}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> acc0:map_blocks_a a blocksize hi mi ->
Lemma
(map_blocks_multi_acc #a blocksize mi hi n inp f acc0 ==
repeat_gen_blocks_multi #a blocksize mi hi n inp
(map_blocks_a a blocksize hi)
(repeat_gen_blocks_map_f blocksize hi f) acc0)
val map_blocks_acc_is_repeat_gen_blocks:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq a{mi + length inp / blocksize <= hi}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem)
-> acc0:map_blocks_a a blocksize hi mi ->
Lemma
(map_blocks_acc #a blocksize mi hi inp f l acc0 ==
repeat_gen_blocks #a blocksize mi hi inp
(map_blocks_a a blocksize hi)
(repeat_gen_blocks_map_f blocksize hi f)
(repeat_gen_blocks_map_l blocksize hi l) acc0) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Lib.Sequence.Lemmas.fsti"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
blocksize: Lib.IntTypes.size_pos ->
mi: Prims.nat ->
hi: Prims.nat ->
n: Prims.nat{mi + n <= hi} ->
f: (i: Prims.nat{i < hi} -> _: Lib.Sequence.lseq a blocksize -> Lib.Sequence.lseq a blocksize) ->
i: Prims.nat{i < n} ->
_: Lib.Sequence.lseq a blocksize
-> Lib.Sequence.lseq a blocksize | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_pos",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_Addition",
"Prims.op_LessThan",
"Lib.Sequence.lseq"
] | [] | false | false | false | false | false | let f_shift
(#a: Type0)
(blocksize: size_pos)
(mi hi: nat)
(n: nat{mi + n <= hi})
(f: (i: nat{i < hi} -> lseq a blocksize -> lseq a blocksize))
(i: nat{i < n})
=
| f (mi + i) | false |
|
Lib.Sequence.Lemmas.fsti | Lib.Sequence.Lemmas.repeat_gen_blocks_map_f | val repeat_gen_blocks_map_f
(#a: Type0)
(blocksize: size_pos)
(hi: nat)
(f: (i: nat{i < hi} -> lseq a blocksize -> lseq a blocksize))
(i: nat{i < hi})
(block: lseq a blocksize)
(acc: map_blocks_a a blocksize hi i)
: map_blocks_a a blocksize hi (i + 1) | val repeat_gen_blocks_map_f
(#a: Type0)
(blocksize: size_pos)
(hi: nat)
(f: (i: nat{i < hi} -> lseq a blocksize -> lseq a blocksize))
(i: nat{i < hi})
(block: lseq a blocksize)
(acc: map_blocks_a a blocksize hi i)
: map_blocks_a a blocksize hi (i + 1) | let repeat_gen_blocks_map_f
(#a:Type0)
(blocksize:size_pos)
(hi:nat)
(f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize))
(i:nat{i < hi})
(block:lseq a blocksize)
(acc:map_blocks_a a blocksize hi i) : map_blocks_a a blocksize hi (i + 1)
=
Seq.append acc (f i block) | {
"file_name": "lib/Lib.Sequence.Lemmas.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 29,
"end_line": 510,
"start_col": 0,
"start_line": 501
} | module Lib.Sequence.Lemmas
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
module Loops = Lib.LoopCombinators
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0 \
--using_facts_from '-* +Prims +FStar.Math.Lemmas +FStar.Seq +Lib.IntTypes +Lib.Sequence +Lib.Sequence.Lemmas'"
let get_block_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len})
(i:nat{i < len / blocksize * blocksize}) :
lseq a blocksize
=
div_mul_lt blocksize i (len / blocksize);
let j = i / blocksize in
let b: lseq a blocksize = Seq.slice inp (j * blocksize) ((j + 1) * blocksize) in
b
let get_last_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len}) :
lseq a (len % blocksize)
=
let rem = len % blocksize in
let b: lseq a rem = Seq.slice inp (len - rem) len in
b
val repeati_extensionality:
#a:Type0
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g i acc))
(ensures Loops.repeati n f acc0 == Loops.repeati n g acc0)
val repeat_right_extensionality:
n:nat
-> lo:nat
-> a_f:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> a_g:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> f:(i:nat{lo <= i /\ i < lo + n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo <= i /\ i < lo + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f lo ->
Lemma
(requires
(forall (i:nat{lo <= i /\ i <= lo + n}). a_f i == a_g i) /\
(forall (i:nat{lo <= i /\ i < lo + n}) (acc:a_f i). f i acc == g i acc))
(ensures
Loops.repeat_right lo (lo + n) a_f f acc0 ==
Loops.repeat_right lo (lo + n) a_g g acc0)
// Loops.repeat_gen n a_f f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
val repeat_gen_right_extensionality:
n:nat
-> lo_g:nat
-> a_f:(i:nat{i <= n} -> Type)
-> a_g:(i:nat{lo_g <= i /\ i <= lo_g + n} -> Type)
-> f:(i:nat{i < n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f 0 ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f i == a_g (lo_g + i)) /\
(forall (i:nat{i < n}) (acc:a_f i). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n a_f f acc0 ==
Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
// Loops.repeati n a f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0
val repeati_right_extensionality:
#a:Type
-> n:nat
-> lo_g:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n (Loops.fixed_a a) f acc0 ==
Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0)
/// A specialized version of the lemma above, for only shifting one computation,
/// but specified using repeati instead
val repeati_right_shift:
#a:Type
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < 1 + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (i + 1) acc))
(ensures Loops.repeati n f (g 0 acc0) == Loops.repeati (n + 1) g acc0)
///
/// `repeat_gen_blocks` is defined here to prove all the properties
/// needed for `map_blocks` and `repeat_blocks` once
///
let repeat_gen_blocks_f
(#inp_t:Type0)
(blocksize:size_pos)
(mi:nat)
(hi:nat)
(n:nat{mi + n <= hi})
(inp:seq inp_t{length inp == n * blocksize})
(a:(i:nat{i <= hi} -> Type))
(f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1)))
(i:nat{mi <= i /\ i < mi + n})
(acc:a i) : a (i + 1)
=
let i_b = i - mi in
Math.Lemmas.lemma_mult_le_right blocksize (i_b + 1) n;
let block = Seq.slice inp (i_b * blocksize) (i_b * blocksize + blocksize) in
f i block acc
//lo = 0
val repeat_gen_blocks_multi:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
a (mi + n)
val lemma_repeat_gen_blocks_multi:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
Lemma
(repeat_gen_blocks_multi #inp_t blocksize mi hi n inp a f acc0 ==
Loops.repeat_right mi (mi + n) a (repeat_gen_blocks_f blocksize mi hi n inp a f) acc0)
val repeat_gen_blocks:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq inp_t{mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acci:a mi ->
c
val lemma_repeat_gen_blocks:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq inp_t{mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acc0:a mi ->
Lemma
(let len = length inp in
let nb = len / blocksize in
let rem = len % blocksize in
let blocks = Seq.slice inp 0 (nb * blocksize) in
let last = Seq.slice inp (nb * blocksize) len in
Math.Lemmas.cancel_mul_div nb blocksize;
Math.Lemmas.cancel_mul_mod nb blocksize;
let acc = repeat_gen_blocks_multi #inp_t blocksize mi hi nb blocks a f acc0 in
repeat_gen_blocks blocksize mi hi inp a f l acc0 == l (mi + nb) rem last acc)
val repeat_gen_blocks_multi_extensionality_zero:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi_f:nat
-> hi_g:nat
-> n:nat{mi + n <= hi_f /\ n <= hi_g}
-> inp:seq inp_t{length inp == n * blocksize}
-> a_f:(i:nat{i <= hi_f} -> Type)
-> a_g:(i:nat{i <= hi_g} -> Type)
-> f:(i:nat{i < hi_f} -> lseq inp_t blocksize -> a_f i -> a_f (i + 1))
-> g:(i:nat{i < hi_g} -> lseq inp_t blocksize -> a_g i -> a_g (i + 1))
-> acc0:a_f mi ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f (mi + i) == a_g i) /\
(forall (i:nat{i < n}) (block:lseq inp_t blocksize) (acc:a_f (mi + i)).
f (mi + i) block acc == g i block acc))
(ensures
repeat_gen_blocks_multi blocksize mi hi_f n inp a_f f acc0 ==
repeat_gen_blocks_multi blocksize 0 hi_g n inp a_g g acc0)
val repeat_gen_blocks_extensionality_zero:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi_f:nat
-> hi_g:nat
-> n:nat{mi + n <= hi_f /\ n <= hi_g}
-> inp:seq inp_t{n == length inp / blocksize}
-> a_f:(i:nat{i <= hi_f} -> Type)
-> a_g:(i:nat{i <= hi_g} -> Type)
-> f:(i:nat{i < hi_f} -> lseq inp_t blocksize -> a_f i -> a_f (i + 1))
-> l_f:(i:nat{i <= hi_f} -> len:nat{len < blocksize} -> lseq inp_t len -> a_f i -> c)
-> g:(i:nat{i < hi_g} -> lseq inp_t blocksize -> a_g i -> a_g (i + 1))
-> l_g:(i:nat{i <= hi_g} -> len:nat{len < blocksize} -> lseq inp_t len -> a_g i -> c)
-> acc0:a_f mi ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f (mi + i) == a_g i) /\
(forall (i:nat{i < n}) (block:lseq inp_t blocksize) (acc:a_f (mi + i)).
f (mi + i) block acc == g i block acc) /\
(forall (i:nat{i <= n}) (len:nat{len < blocksize}) (block:lseq inp_t len) (acc:a_f (mi + i)).
l_f (mi + i) len block acc == l_g i len block acc))
(ensures
repeat_gen_blocks blocksize mi hi_f inp a_f f l_f acc0 ==
repeat_gen_blocks blocksize 0 hi_g inp a_g g l_g acc0)
val len0_div_bs: blocksize:pos -> len:nat -> len0:nat ->
Lemma
(requires len0 <= len /\ len0 % blocksize == 0)
(ensures len0 / blocksize + (len - len0) / blocksize == len / blocksize)
val split_len_lemma0: blocksize:pos -> n:nat -> len0:nat ->
Lemma
(requires len0 <= n * blocksize /\ len0 % blocksize = 0)
(ensures
(let len = n * blocksize in
let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
len % blocksize = 0 /\ len1 % blocksize = 0 /\ n0 + n1 = n /\
n0 * blocksize = len0 /\ n1 * blocksize = len1))
val split_len_lemma: blocksize:pos -> len:nat -> len0:nat ->
Lemma
(requires len0 <= len /\ len0 % blocksize = 0)
(ensures
(let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
let n = len / blocksize in
len % blocksize = len1 % blocksize /\
n0 * blocksize = len0 /\ n0 + n1 = n))
val repeat_gen_blocks_multi_split:
#inp_t:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize == 0}
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{len0 <= length inp /\ length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
Lemma
(let len = length inp in
let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
split_len_lemma0 blocksize n len0;
let t0 = Seq.slice inp 0 len0 in
let t1 = Seq.slice inp len0 len in
let acc : a (mi + n0) = repeat_gen_blocks_multi blocksize mi hi n0 t0 a f acc0 in
repeat_gen_blocks_multi blocksize mi hi n inp a f acc0 ==
repeat_gen_blocks_multi blocksize (mi + n0) hi n1 t1 a f acc)
val repeat_gen_blocks_split:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize == 0}
-> hi:nat
-> mi:nat{mi <= hi}
-> inp:seq inp_t{len0 <= length inp /\ mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acc0:a mi ->
Lemma
(let len = length inp in
let n = len / blocksize in
let n0 = len0 / blocksize in
split_len_lemma blocksize len len0;
let t0 = Seq.slice inp 0 len0 in
let t1 = Seq.slice inp len0 len in
let acc : a (mi + n0) = repeat_gen_blocks_multi blocksize mi hi n0 t0 a f acc0 in
repeat_gen_blocks blocksize mi hi inp a f l acc0 ==
repeat_gen_blocks blocksize (mi + n0) hi t1 a f l acc)
///
/// Properties related to the repeat_blocks combinator
///
val repeat_blocks_extensionality:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> inp:seq a
-> f1:(lseq a blocksize -> b -> b)
-> f2:(lseq a blocksize -> b -> b)
-> l1:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> l2:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(requires
(forall (block:lseq a blocksize) (acc:b). f1 block acc == f2 block acc) /\
(forall (rem:nat{rem < blocksize}) (last:lseq a rem) (acc:b). l1 rem last acc == l2 rem last acc))
(ensures
repeat_blocks blocksize inp f1 l1 acc0 == repeat_blocks blocksize inp f2 l2 acc0)
val lemma_repeat_blocks_via_multi:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> inp:seq a
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(let len = length inp in
let nb = len / blocksize in
let rem = len % blocksize in
let blocks = Seq.slice inp 0 (nb * blocksize) in
let last = Seq.slice inp (nb * blocksize) len in
Math.Lemmas.cancel_mul_mod nb blocksize;
let acc = repeat_blocks_multi blocksize blocks f acc0 in
repeat_blocks #a #b blocksize inp f l acc0 == l rem last acc)
val repeat_blocks_multi_is_repeat_gen_blocks_multi:
#a:Type0
-> #b:Type0
-> hi:nat
-> blocksize:size_pos
-> inp:seq a{length inp % blocksize = 0 /\ length inp / blocksize <= hi}
-> f:(lseq a blocksize -> b -> b)
-> acc0:b ->
Lemma
(let n = length inp / blocksize in
Math.Lemmas.div_exact_r (length inp) blocksize;
repeat_blocks_multi #a #b blocksize inp f acc0 ==
repeat_gen_blocks_multi #a blocksize 0 hi n inp (Loops.fixed_a b) (Loops.fixed_i f) acc0)
val repeat_blocks_is_repeat_gen_blocks:
#a:Type0
-> #b:Type0
-> #c:Type0
-> hi:nat
-> blocksize:size_pos
-> inp:seq a{length inp / blocksize <= hi}
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(repeat_blocks #a #b #c blocksize inp f l acc0 ==
repeat_gen_blocks #a #c blocksize 0 hi inp
(Loops.fixed_a b) (Loops.fixed_i f) (Loops.fixed_i l) acc0)
val repeat_blocks_multi_split:
#a:Type0
-> #b:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize = 0}
-> inp:seq a{len0 <= length inp /\ length inp % blocksize = 0}
-> f:(lseq a blocksize -> b -> b)
-> acc0:b ->
Lemma
(let len = length inp in
Math.Lemmas.lemma_div_exact len blocksize;
split_len_lemma0 blocksize (len / blocksize) len0;
Math.Lemmas.swap_mul blocksize (len / blocksize);
repeat_blocks_multi blocksize inp f acc0 ==
repeat_blocks_multi blocksize (Seq.slice inp len0 len) f
(repeat_blocks_multi blocksize (Seq.slice inp 0 len0) f acc0))
val repeat_blocks_split:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize = 0}
-> inp:seq a{len0 <= length inp}
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(let len = length inp in
split_len_lemma blocksize len len0;
repeat_blocks blocksize inp f l acc0 ==
repeat_blocks blocksize (Seq.slice inp len0 len) f l
(repeat_blocks_multi blocksize (Seq.slice inp 0 len0) f acc0))
///
val repeat_blocks_multi_extensionality:
#a:Type0
-> #b:Type0
-> blocksize:size_pos
-> inp:seq a{length inp % blocksize = 0}
-> f:(lseq a blocksize -> b -> b)
-> g:(lseq a blocksize -> b -> b)
-> init:b ->
Lemma
(requires
(forall (block:lseq a blocksize) (acc:b). f block acc == g block acc))
(ensures
repeat_blocks_multi blocksize inp f init ==
repeat_blocks_multi blocksize inp g init)
/// Properties related to the map_blocks combinator
///
val map_blocks_multi_extensionality:
#a:Type0
-> blocksize:size_pos
-> max:nat
-> n:nat{n <= max}
-> inp:seq a{length inp == max * blocksize}
-> f:(i:nat{i < max} -> lseq a blocksize -> lseq a blocksize)
-> g:(i:nat{i < max} -> lseq a blocksize -> lseq a blocksize) ->
Lemma
(requires
(forall (i:nat{i < max}) (b_v:lseq a blocksize). f i b_v == g i b_v))
(ensures
map_blocks_multi blocksize max n inp f ==
map_blocks_multi blocksize max n inp g)
val map_blocks_extensionality:
#a:Type0
-> blocksize:size_pos
-> inp:seq a
-> f:(block (length inp) blocksize -> lseq a blocksize -> lseq a blocksize)
-> l_f:(last (length inp) blocksize -> rem:size_nat{rem < blocksize} -> s:lseq a rem -> lseq a rem)
-> g:(block (length inp) blocksize -> lseq a blocksize -> lseq a blocksize)
-> l_g:(last (length inp) blocksize -> rem:size_nat{rem < blocksize} -> s:lseq a rem -> lseq a rem) ->
Lemma
(requires
(let n = length inp / blocksize in
(forall (i:nat{i < n}) (b_v:lseq a blocksize). f i b_v == g i b_v) /\
(forall (rem:nat{rem < blocksize}) (b_v:lseq a rem). l_f n rem b_v == l_g n rem b_v)))
(ensures
map_blocks blocksize inp f l_f == map_blocks blocksize inp g l_g)
///
/// New definition of `map_blocks` that takes extra parameter `acc`.
/// When `acc` = Seq.empty, map_blocks == map_blocks_acc
/// | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Lib.Sequence.Lemmas.fsti"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
blocksize: Lib.IntTypes.size_pos ->
hi: Prims.nat ->
f: (i: Prims.nat{i < hi} -> _: Lib.Sequence.lseq a blocksize -> Lib.Sequence.lseq a blocksize) ->
i: Prims.nat{i < hi} ->
block: Lib.Sequence.lseq a blocksize ->
acc: Lib.Sequence.map_blocks_a a blocksize hi i
-> Lib.Sequence.map_blocks_a a blocksize hi (i + 1) | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_pos",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.Sequence.lseq",
"Lib.Sequence.map_blocks_a",
"FStar.Seq.Base.append",
"Prims.op_Addition"
] | [] | false | false | false | false | false | let repeat_gen_blocks_map_f
(#a: Type0)
(blocksize: size_pos)
(hi: nat)
(f: (i: nat{i < hi} -> lseq a blocksize -> lseq a blocksize))
(i: nat{i < hi})
(block: lseq a blocksize)
(acc: map_blocks_a a blocksize hi i)
: map_blocks_a a blocksize hi (i + 1) =
| Seq.append acc (f i block) | false |
Lib.Sequence.Lemmas.fsti | Lib.Sequence.Lemmas.l_shift | val l_shift : blocksize: Lib.IntTypes.size_pos ->
mi: Prims.nat ->
hi: Prims.nat ->
n: Prims.nat{mi + n <= hi} ->
l:
(i: Prims.nat{i <= hi} -> rem: Prims.nat{rem < blocksize} -> _: Lib.Sequence.lseq a rem
-> Lib.Sequence.lseq a rem) ->
i: Prims.nat{i <= n} ->
rem: Prims.nat{rem < blocksize} ->
_: Lib.Sequence.lseq a rem
-> Lib.Sequence.lseq a rem | let l_shift (#a:Type0) (blocksize:size_pos) (mi:nat) (hi:nat) (n:nat{mi + n <= hi})
(l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem)) (i:nat{i <= n}) = l (mi + i) | {
"file_name": "lib/Lib.Sequence.Lemmas.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 107,
"end_line": 613,
"start_col": 0,
"start_line": 612
} | module Lib.Sequence.Lemmas
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
module Loops = Lib.LoopCombinators
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0 \
--using_facts_from '-* +Prims +FStar.Math.Lemmas +FStar.Seq +Lib.IntTypes +Lib.Sequence +Lib.Sequence.Lemmas'"
let get_block_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len})
(i:nat{i < len / blocksize * blocksize}) :
lseq a blocksize
=
div_mul_lt blocksize i (len / blocksize);
let j = i / blocksize in
let b: lseq a blocksize = Seq.slice inp (j * blocksize) ((j + 1) * blocksize) in
b
let get_last_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len}) :
lseq a (len % blocksize)
=
let rem = len % blocksize in
let b: lseq a rem = Seq.slice inp (len - rem) len in
b
val repeati_extensionality:
#a:Type0
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g i acc))
(ensures Loops.repeati n f acc0 == Loops.repeati n g acc0)
val repeat_right_extensionality:
n:nat
-> lo:nat
-> a_f:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> a_g:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> f:(i:nat{lo <= i /\ i < lo + n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo <= i /\ i < lo + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f lo ->
Lemma
(requires
(forall (i:nat{lo <= i /\ i <= lo + n}). a_f i == a_g i) /\
(forall (i:nat{lo <= i /\ i < lo + n}) (acc:a_f i). f i acc == g i acc))
(ensures
Loops.repeat_right lo (lo + n) a_f f acc0 ==
Loops.repeat_right lo (lo + n) a_g g acc0)
// Loops.repeat_gen n a_f f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
val repeat_gen_right_extensionality:
n:nat
-> lo_g:nat
-> a_f:(i:nat{i <= n} -> Type)
-> a_g:(i:nat{lo_g <= i /\ i <= lo_g + n} -> Type)
-> f:(i:nat{i < n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f 0 ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f i == a_g (lo_g + i)) /\
(forall (i:nat{i < n}) (acc:a_f i). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n a_f f acc0 ==
Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
// Loops.repeati n a f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0
val repeati_right_extensionality:
#a:Type
-> n:nat
-> lo_g:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n (Loops.fixed_a a) f acc0 ==
Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0)
/// A specialized version of the lemma above, for only shifting one computation,
/// but specified using repeati instead
val repeati_right_shift:
#a:Type
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < 1 + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (i + 1) acc))
(ensures Loops.repeati n f (g 0 acc0) == Loops.repeati (n + 1) g acc0)
///
/// `repeat_gen_blocks` is defined here to prove all the properties
/// needed for `map_blocks` and `repeat_blocks` once
///
let repeat_gen_blocks_f
(#inp_t:Type0)
(blocksize:size_pos)
(mi:nat)
(hi:nat)
(n:nat{mi + n <= hi})
(inp:seq inp_t{length inp == n * blocksize})
(a:(i:nat{i <= hi} -> Type))
(f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1)))
(i:nat{mi <= i /\ i < mi + n})
(acc:a i) : a (i + 1)
=
let i_b = i - mi in
Math.Lemmas.lemma_mult_le_right blocksize (i_b + 1) n;
let block = Seq.slice inp (i_b * blocksize) (i_b * blocksize + blocksize) in
f i block acc
//lo = 0
val repeat_gen_blocks_multi:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
a (mi + n)
val lemma_repeat_gen_blocks_multi:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
Lemma
(repeat_gen_blocks_multi #inp_t blocksize mi hi n inp a f acc0 ==
Loops.repeat_right mi (mi + n) a (repeat_gen_blocks_f blocksize mi hi n inp a f) acc0)
val repeat_gen_blocks:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq inp_t{mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acci:a mi ->
c
val lemma_repeat_gen_blocks:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq inp_t{mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acc0:a mi ->
Lemma
(let len = length inp in
let nb = len / blocksize in
let rem = len % blocksize in
let blocks = Seq.slice inp 0 (nb * blocksize) in
let last = Seq.slice inp (nb * blocksize) len in
Math.Lemmas.cancel_mul_div nb blocksize;
Math.Lemmas.cancel_mul_mod nb blocksize;
let acc = repeat_gen_blocks_multi #inp_t blocksize mi hi nb blocks a f acc0 in
repeat_gen_blocks blocksize mi hi inp a f l acc0 == l (mi + nb) rem last acc)
val repeat_gen_blocks_multi_extensionality_zero:
#inp_t:Type0
-> blocksize:size_pos
-> mi:nat
-> hi_f:nat
-> hi_g:nat
-> n:nat{mi + n <= hi_f /\ n <= hi_g}
-> inp:seq inp_t{length inp == n * blocksize}
-> a_f:(i:nat{i <= hi_f} -> Type)
-> a_g:(i:nat{i <= hi_g} -> Type)
-> f:(i:nat{i < hi_f} -> lseq inp_t blocksize -> a_f i -> a_f (i + 1))
-> g:(i:nat{i < hi_g} -> lseq inp_t blocksize -> a_g i -> a_g (i + 1))
-> acc0:a_f mi ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f (mi + i) == a_g i) /\
(forall (i:nat{i < n}) (block:lseq inp_t blocksize) (acc:a_f (mi + i)).
f (mi + i) block acc == g i block acc))
(ensures
repeat_gen_blocks_multi blocksize mi hi_f n inp a_f f acc0 ==
repeat_gen_blocks_multi blocksize 0 hi_g n inp a_g g acc0)
val repeat_gen_blocks_extensionality_zero:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> mi:nat
-> hi_f:nat
-> hi_g:nat
-> n:nat{mi + n <= hi_f /\ n <= hi_g}
-> inp:seq inp_t{n == length inp / blocksize}
-> a_f:(i:nat{i <= hi_f} -> Type)
-> a_g:(i:nat{i <= hi_g} -> Type)
-> f:(i:nat{i < hi_f} -> lseq inp_t blocksize -> a_f i -> a_f (i + 1))
-> l_f:(i:nat{i <= hi_f} -> len:nat{len < blocksize} -> lseq inp_t len -> a_f i -> c)
-> g:(i:nat{i < hi_g} -> lseq inp_t blocksize -> a_g i -> a_g (i + 1))
-> l_g:(i:nat{i <= hi_g} -> len:nat{len < blocksize} -> lseq inp_t len -> a_g i -> c)
-> acc0:a_f mi ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f (mi + i) == a_g i) /\
(forall (i:nat{i < n}) (block:lseq inp_t blocksize) (acc:a_f (mi + i)).
f (mi + i) block acc == g i block acc) /\
(forall (i:nat{i <= n}) (len:nat{len < blocksize}) (block:lseq inp_t len) (acc:a_f (mi + i)).
l_f (mi + i) len block acc == l_g i len block acc))
(ensures
repeat_gen_blocks blocksize mi hi_f inp a_f f l_f acc0 ==
repeat_gen_blocks blocksize 0 hi_g inp a_g g l_g acc0)
val len0_div_bs: blocksize:pos -> len:nat -> len0:nat ->
Lemma
(requires len0 <= len /\ len0 % blocksize == 0)
(ensures len0 / blocksize + (len - len0) / blocksize == len / blocksize)
val split_len_lemma0: blocksize:pos -> n:nat -> len0:nat ->
Lemma
(requires len0 <= n * blocksize /\ len0 % blocksize = 0)
(ensures
(let len = n * blocksize in
let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
len % blocksize = 0 /\ len1 % blocksize = 0 /\ n0 + n1 = n /\
n0 * blocksize = len0 /\ n1 * blocksize = len1))
val split_len_lemma: blocksize:pos -> len:nat -> len0:nat ->
Lemma
(requires len0 <= len /\ len0 % blocksize = 0)
(ensures
(let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
let n = len / blocksize in
len % blocksize = len1 % blocksize /\
n0 * blocksize = len0 /\ n0 + n1 = n))
val repeat_gen_blocks_multi_split:
#inp_t:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize == 0}
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq inp_t{len0 <= length inp /\ length inp == n * blocksize}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> acc0:a mi ->
Lemma
(let len = length inp in
let len1 = len - len0 in
let n0 = len0 / blocksize in
let n1 = len1 / blocksize in
split_len_lemma0 blocksize n len0;
let t0 = Seq.slice inp 0 len0 in
let t1 = Seq.slice inp len0 len in
let acc : a (mi + n0) = repeat_gen_blocks_multi blocksize mi hi n0 t0 a f acc0 in
repeat_gen_blocks_multi blocksize mi hi n inp a f acc0 ==
repeat_gen_blocks_multi blocksize (mi + n0) hi n1 t1 a f acc)
val repeat_gen_blocks_split:
#inp_t:Type0
-> #c:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize == 0}
-> hi:nat
-> mi:nat{mi <= hi}
-> inp:seq inp_t{len0 <= length inp /\ mi + length inp / blocksize <= hi}
-> a:(i:nat{i <= hi} -> Type)
-> f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1))
-> l:(i:nat{i <= hi} -> len:nat{len < blocksize} -> lseq inp_t len -> a i -> c)
-> acc0:a mi ->
Lemma
(let len = length inp in
let n = len / blocksize in
let n0 = len0 / blocksize in
split_len_lemma blocksize len len0;
let t0 = Seq.slice inp 0 len0 in
let t1 = Seq.slice inp len0 len in
let acc : a (mi + n0) = repeat_gen_blocks_multi blocksize mi hi n0 t0 a f acc0 in
repeat_gen_blocks blocksize mi hi inp a f l acc0 ==
repeat_gen_blocks blocksize (mi + n0) hi t1 a f l acc)
///
/// Properties related to the repeat_blocks combinator
///
val repeat_blocks_extensionality:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> inp:seq a
-> f1:(lseq a blocksize -> b -> b)
-> f2:(lseq a blocksize -> b -> b)
-> l1:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> l2:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(requires
(forall (block:lseq a blocksize) (acc:b). f1 block acc == f2 block acc) /\
(forall (rem:nat{rem < blocksize}) (last:lseq a rem) (acc:b). l1 rem last acc == l2 rem last acc))
(ensures
repeat_blocks blocksize inp f1 l1 acc0 == repeat_blocks blocksize inp f2 l2 acc0)
val lemma_repeat_blocks_via_multi:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> inp:seq a
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(let len = length inp in
let nb = len / blocksize in
let rem = len % blocksize in
let blocks = Seq.slice inp 0 (nb * blocksize) in
let last = Seq.slice inp (nb * blocksize) len in
Math.Lemmas.cancel_mul_mod nb blocksize;
let acc = repeat_blocks_multi blocksize blocks f acc0 in
repeat_blocks #a #b blocksize inp f l acc0 == l rem last acc)
val repeat_blocks_multi_is_repeat_gen_blocks_multi:
#a:Type0
-> #b:Type0
-> hi:nat
-> blocksize:size_pos
-> inp:seq a{length inp % blocksize = 0 /\ length inp / blocksize <= hi}
-> f:(lseq a blocksize -> b -> b)
-> acc0:b ->
Lemma
(let n = length inp / blocksize in
Math.Lemmas.div_exact_r (length inp) blocksize;
repeat_blocks_multi #a #b blocksize inp f acc0 ==
repeat_gen_blocks_multi #a blocksize 0 hi n inp (Loops.fixed_a b) (Loops.fixed_i f) acc0)
val repeat_blocks_is_repeat_gen_blocks:
#a:Type0
-> #b:Type0
-> #c:Type0
-> hi:nat
-> blocksize:size_pos
-> inp:seq a{length inp / blocksize <= hi}
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(repeat_blocks #a #b #c blocksize inp f l acc0 ==
repeat_gen_blocks #a #c blocksize 0 hi inp
(Loops.fixed_a b) (Loops.fixed_i f) (Loops.fixed_i l) acc0)
val repeat_blocks_multi_split:
#a:Type0
-> #b:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize = 0}
-> inp:seq a{len0 <= length inp /\ length inp % blocksize = 0}
-> f:(lseq a blocksize -> b -> b)
-> acc0:b ->
Lemma
(let len = length inp in
Math.Lemmas.lemma_div_exact len blocksize;
split_len_lemma0 blocksize (len / blocksize) len0;
Math.Lemmas.swap_mul blocksize (len / blocksize);
repeat_blocks_multi blocksize inp f acc0 ==
repeat_blocks_multi blocksize (Seq.slice inp len0 len) f
(repeat_blocks_multi blocksize (Seq.slice inp 0 len0) f acc0))
val repeat_blocks_split:
#a:Type0
-> #b:Type0
-> #c:Type0
-> blocksize:size_pos
-> len0:nat{len0 % blocksize = 0}
-> inp:seq a{len0 <= length inp}
-> f:(lseq a blocksize -> b -> b)
-> l:(len:nat{len < blocksize} -> s:lseq a len -> b -> c)
-> acc0:b ->
Lemma
(let len = length inp in
split_len_lemma blocksize len len0;
repeat_blocks blocksize inp f l acc0 ==
repeat_blocks blocksize (Seq.slice inp len0 len) f l
(repeat_blocks_multi blocksize (Seq.slice inp 0 len0) f acc0))
///
val repeat_blocks_multi_extensionality:
#a:Type0
-> #b:Type0
-> blocksize:size_pos
-> inp:seq a{length inp % blocksize = 0}
-> f:(lseq a blocksize -> b -> b)
-> g:(lseq a blocksize -> b -> b)
-> init:b ->
Lemma
(requires
(forall (block:lseq a blocksize) (acc:b). f block acc == g block acc))
(ensures
repeat_blocks_multi blocksize inp f init ==
repeat_blocks_multi blocksize inp g init)
/// Properties related to the map_blocks combinator
///
val map_blocks_multi_extensionality:
#a:Type0
-> blocksize:size_pos
-> max:nat
-> n:nat{n <= max}
-> inp:seq a{length inp == max * blocksize}
-> f:(i:nat{i < max} -> lseq a blocksize -> lseq a blocksize)
-> g:(i:nat{i < max} -> lseq a blocksize -> lseq a blocksize) ->
Lemma
(requires
(forall (i:nat{i < max}) (b_v:lseq a blocksize). f i b_v == g i b_v))
(ensures
map_blocks_multi blocksize max n inp f ==
map_blocks_multi blocksize max n inp g)
val map_blocks_extensionality:
#a:Type0
-> blocksize:size_pos
-> inp:seq a
-> f:(block (length inp) blocksize -> lseq a blocksize -> lseq a blocksize)
-> l_f:(last (length inp) blocksize -> rem:size_nat{rem < blocksize} -> s:lseq a rem -> lseq a rem)
-> g:(block (length inp) blocksize -> lseq a blocksize -> lseq a blocksize)
-> l_g:(last (length inp) blocksize -> rem:size_nat{rem < blocksize} -> s:lseq a rem -> lseq a rem) ->
Lemma
(requires
(let n = length inp / blocksize in
(forall (i:nat{i < n}) (b_v:lseq a blocksize). f i b_v == g i b_v) /\
(forall (rem:nat{rem < blocksize}) (b_v:lseq a rem). l_f n rem b_v == l_g n rem b_v)))
(ensures
map_blocks blocksize inp f l_f == map_blocks blocksize inp g l_g)
///
/// New definition of `map_blocks` that takes extra parameter `acc`.
/// When `acc` = Seq.empty, map_blocks == map_blocks_acc
///
let repeat_gen_blocks_map_f
(#a:Type0)
(blocksize:size_pos)
(hi:nat)
(f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize))
(i:nat{i < hi})
(block:lseq a blocksize)
(acc:map_blocks_a a blocksize hi i) : map_blocks_a a blocksize hi (i + 1)
=
Seq.append acc (f i block)
let repeat_gen_blocks_map_l
(#a:Type0)
(blocksize:size_pos)
(hi:nat)
(l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem))
(i:nat{i <= hi})
(rem:nat{rem < blocksize})
(block_l:lseq a rem)
(acc:map_blocks_a a blocksize hi i) : seq a
=
if rem > 0 then Seq.append acc (l i rem block_l) else acc
val repeat_gen_blocks_map_l_length:
#a:Type0
-> blocksize:size_pos
-> hi:nat
-> l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem)
-> i:nat{i <= hi}
-> rem:nat{rem < blocksize}
-> block_l:lseq a rem
-> acc:map_blocks_a a blocksize hi i ->
Lemma (length (repeat_gen_blocks_map_l blocksize hi l i rem block_l acc) == i * blocksize + rem)
val map_blocks_multi_acc:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq a{length inp == n * blocksize}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> acc0:map_blocks_a a blocksize hi mi ->
out:seq a {length out == length acc0 + length inp}
val map_blocks_acc:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq a{mi + length inp / blocksize <= hi}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem)
-> acc0:map_blocks_a a blocksize hi mi ->
seq a
val map_blocks_acc_length:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq a{mi + length inp / blocksize <= hi}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem)
-> acc0:map_blocks_a a blocksize hi mi ->
Lemma (length (map_blocks_acc blocksize mi hi inp f l acc0) == length acc0 + length inp)
[SMTPat (map_blocks_acc blocksize mi hi inp f l acc0)]
val map_blocks_multi_acc_is_repeat_gen_blocks_multi:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> n:nat{mi + n <= hi}
-> inp:seq a{length inp == n * blocksize}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> acc0:map_blocks_a a blocksize hi mi ->
Lemma
(map_blocks_multi_acc #a blocksize mi hi n inp f acc0 ==
repeat_gen_blocks_multi #a blocksize mi hi n inp
(map_blocks_a a blocksize hi)
(repeat_gen_blocks_map_f blocksize hi f) acc0)
val map_blocks_acc_is_repeat_gen_blocks:
#a:Type0
-> blocksize:size_pos
-> mi:nat
-> hi:nat
-> inp:seq a{mi + length inp / blocksize <= hi}
-> f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)
-> l:(i:nat{i <= hi} -> rem:nat{rem < blocksize} -> lseq a rem -> lseq a rem)
-> acc0:map_blocks_a a blocksize hi mi ->
Lemma
(map_blocks_acc #a blocksize mi hi inp f l acc0 ==
repeat_gen_blocks #a blocksize mi hi inp
(map_blocks_a a blocksize hi)
(repeat_gen_blocks_map_f blocksize hi f)
(repeat_gen_blocks_map_l blocksize hi l) acc0)
let f_shift (#a:Type0) (blocksize:size_pos) (mi:nat) (hi:nat) (n:nat{mi + n <= hi})
(f:(i:nat{i < hi} -> lseq a blocksize -> lseq a blocksize)) (i:nat{i < n}) = f (mi + i) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Lib.Sequence.Lemmas.fsti"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
blocksize: Lib.IntTypes.size_pos ->
mi: Prims.nat ->
hi: Prims.nat ->
n: Prims.nat{mi + n <= hi} ->
l:
(i: Prims.nat{i <= hi} -> rem: Prims.nat{rem < blocksize} -> _: Lib.Sequence.lseq a rem
-> Lib.Sequence.lseq a rem) ->
i: Prims.nat{i <= n} ->
rem: Prims.nat{rem < blocksize} ->
_: Lib.Sequence.lseq a rem
-> Lib.Sequence.lseq a rem | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_pos",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_Addition",
"Prims.op_LessThan",
"Lib.Sequence.lseq"
] | [] | false | false | false | false | false | let l_shift
(#a: Type0)
(blocksize: size_pos)
(mi hi: nat)
(n: nat{mi + n <= hi})
(l: (i: nat{i <= hi} -> rem: nat{rem < blocksize} -> lseq a rem -> lseq a rem))
(i: nat{i <= n})
=
| l (mi + i) | false |
|
Lib.Sequence.Lemmas.fsti | Lib.Sequence.Lemmas.repeat_gen_blocks_f | val repeat_gen_blocks_f
(#inp_t: Type0)
(blocksize: size_pos)
(mi hi: nat)
(n: nat{mi + n <= hi})
(inp: seq inp_t {length inp == n * blocksize})
(a: (i: nat{i <= hi} -> Type))
(f: (i: nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1)))
(i: nat{mi <= i /\ i < mi + n})
(acc: a i)
: a (i + 1) | val repeat_gen_blocks_f
(#inp_t: Type0)
(blocksize: size_pos)
(mi hi: nat)
(n: nat{mi + n <= hi})
(inp: seq inp_t {length inp == n * blocksize})
(a: (i: nat{i <= hi} -> Type))
(f: (i: nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1)))
(i: nat{mi <= i /\ i < mi + n})
(acc: a i)
: a (i + 1) | let repeat_gen_blocks_f
(#inp_t:Type0)
(blocksize:size_pos)
(mi:nat)
(hi:nat)
(n:nat{mi + n <= hi})
(inp:seq inp_t{length inp == n * blocksize})
(a:(i:nat{i <= hi} -> Type))
(f:(i:nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1)))
(i:nat{mi <= i /\ i < mi + n})
(acc:a i) : a (i + 1)
=
let i_b = i - mi in
Math.Lemmas.lemma_mult_le_right blocksize (i_b + 1) n;
let block = Seq.slice inp (i_b * blocksize) (i_b * blocksize + blocksize) in
f i block acc | {
"file_name": "lib/Lib.Sequence.Lemmas.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 15,
"end_line": 133,
"start_col": 0,
"start_line": 118
} | module Lib.Sequence.Lemmas
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
module Loops = Lib.LoopCombinators
#set-options "--z3rlimit 50 --max_fuel 0 --max_ifuel 0 \
--using_facts_from '-* +Prims +FStar.Math.Lemmas +FStar.Seq +Lib.IntTypes +Lib.Sequence +Lib.Sequence.Lemmas'"
let get_block_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len})
(i:nat{i < len / blocksize * blocksize}) :
lseq a blocksize
=
div_mul_lt blocksize i (len / blocksize);
let j = i / blocksize in
let b: lseq a blocksize = Seq.slice inp (j * blocksize) ((j + 1) * blocksize) in
b
let get_last_s
(#a:Type)
(#len:nat)
(blocksize:size_pos)
(inp:seq a{length inp == len}) :
lseq a (len % blocksize)
=
let rem = len % blocksize in
let b: lseq a rem = Seq.slice inp (len - rem) len in
b
val repeati_extensionality:
#a:Type0
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g i acc))
(ensures Loops.repeati n f acc0 == Loops.repeati n g acc0)
val repeat_right_extensionality:
n:nat
-> lo:nat
-> a_f:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> a_g:(i:nat{lo <= i /\ i <= lo + n} -> Type)
-> f:(i:nat{lo <= i /\ i < lo + n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo <= i /\ i < lo + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f lo ->
Lemma
(requires
(forall (i:nat{lo <= i /\ i <= lo + n}). a_f i == a_g i) /\
(forall (i:nat{lo <= i /\ i < lo + n}) (acc:a_f i). f i acc == g i acc))
(ensures
Loops.repeat_right lo (lo + n) a_f f acc0 ==
Loops.repeat_right lo (lo + n) a_g g acc0)
// Loops.repeat_gen n a_f f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
val repeat_gen_right_extensionality:
n:nat
-> lo_g:nat
-> a_f:(i:nat{i <= n} -> Type)
-> a_g:(i:nat{lo_g <= i /\ i <= lo_g + n} -> Type)
-> f:(i:nat{i < n} -> a_f i -> a_f (i + 1))
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a_g i -> a_g (i + 1))
-> acc0:a_f 0 ->
Lemma
(requires
(forall (i:nat{i <= n}). a_f i == a_g (lo_g + i)) /\
(forall (i:nat{i < n}) (acc:a_f i). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n a_f f acc0 ==
Loops.repeat_right lo_g (lo_g + n) a_g g acc0)
// Loops.repeati n a f acc0 ==
// Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0
val repeati_right_extensionality:
#a:Type
-> n:nat
-> lo_g:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{lo_g <= i /\ i < lo_g + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (lo_g + i) acc))
(ensures
Loops.repeat_right 0 n (Loops.fixed_a a) f acc0 ==
Loops.repeat_right lo_g (lo_g + n) (Loops.fixed_a a) g acc0)
/// A specialized version of the lemma above, for only shifting one computation,
/// but specified using repeati instead
val repeati_right_shift:
#a:Type
-> n:nat
-> f:(i:nat{i < n} -> a -> a)
-> g:(i:nat{i < 1 + n} -> a -> a)
-> acc0:a ->
Lemma
(requires (forall (i:nat{i < n}) (acc:a). f i acc == g (i + 1) acc))
(ensures Loops.repeati n f (g 0 acc0) == Loops.repeati (n + 1) g acc0)
///
/// `repeat_gen_blocks` is defined here to prove all the properties
/// needed for `map_blocks` and `repeat_blocks` once
/// | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Lib.Sequence.Lemmas.fsti"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
blocksize: Lib.IntTypes.size_pos ->
mi: Prims.nat ->
hi: Prims.nat ->
n: Prims.nat{mi + n <= hi} ->
inp: Lib.Sequence.seq inp_t {Lib.Sequence.length inp == n * blocksize} ->
a: (i: Prims.nat{i <= hi} -> Type) ->
f: (i: Prims.nat{i < hi} -> _: Lib.Sequence.lseq inp_t blocksize -> _: a i -> a (i + 1)) ->
i: Prims.nat{mi <= i /\ i < mi + n} ->
acc: a i
-> a (i + 1) | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_pos",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_Addition",
"Lib.Sequence.seq",
"Prims.eq2",
"Prims.int",
"Lib.Sequence.length",
"FStar.Mul.op_Star",
"Prims.op_LessThan",
"Lib.Sequence.lseq",
"Prims.l_and",
"FStar.Seq.Base.seq",
"FStar.Seq.Base.slice",
"Prims.unit",
"FStar.Math.Lemmas.lemma_mult_le_right",
"Prims.op_Subtraction"
] | [] | false | false | false | false | false | let repeat_gen_blocks_f
(#inp_t: Type0)
(blocksize: size_pos)
(mi hi: nat)
(n: nat{mi + n <= hi})
(inp: seq inp_t {length inp == n * blocksize})
(a: (i: nat{i <= hi} -> Type))
(f: (i: nat{i < hi} -> lseq inp_t blocksize -> a i -> a (i + 1)))
(i: nat{mi <= i /\ i < mi + n})
(acc: a i)
: a (i + 1) =
| let i_b = i - mi in
Math.Lemmas.lemma_mult_le_right blocksize (i_b + 1) n;
let block = Seq.slice inp (i_b * blocksize) (i_b * blocksize + blocksize) in
f i block acc | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.alloc | val alloc (#a:Type)
(#pcm:pcm a)
(x:a)
: ST (ref a pcm)
emp
(fun r -> pts_to r x)
(requires pcm.refine x)
(ensures fun _ -> True) | val alloc (#a:Type)
(#pcm:pcm a)
(x:a)
: ST (ref a pcm)
emp
(fun r -> pts_to r x)
(requires pcm.refine x)
(ensures fun _ -> True) | let alloc x = C.coerce_steel (fun _ -> P.alloc x) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 49,
"end_line": 10,
"start_col": 0,
"start_line": 10
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | x: a -> Steel.ST.Effect.ST (Steel.Memory.ref a pcm) | Steel.ST.Effect.ST | [] | [] | [
"FStar.PCM.pcm",
"Steel.ST.Coercions.coerce_steel",
"Steel.Memory.ref",
"Steel.Effect.Common.emp",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"Steel.Effect.Common.vprop",
"FStar.PCM.__proj__Mkpcm__item__refine",
"Prims.l_True",
"Prims.unit",
"Steel.PCMReference.alloc"
] | [] | false | true | false | false | false | let alloc x =
| C.coerce_steel (fun _ -> P.alloc x) | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.read | val read (#a:Type)
(#pcm:pcm a)
(r:ref a pcm)
(v0:erased a)
: ST a
(pts_to r v0)
(fun _ -> pts_to r v0)
(requires True)
(ensures fun v -> compatible pcm v0 v /\ True) | val read (#a:Type)
(#pcm:pcm a)
(r:ref a pcm)
(v0:erased a)
: ST a
(pts_to r v0)
(fun _ -> pts_to r v0)
(requires True)
(ensures fun v -> compatible pcm v0 v /\ True) | let read r v0 = C.coerce_steel (fun _ -> P.read r v0) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 53,
"end_line": 6,
"start_col": 0,
"start_line": 6
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | r: Steel.Memory.ref a pcm -> v0: FStar.Ghost.erased a -> Steel.ST.Effect.ST a | Steel.ST.Effect.ST | [] | [] | [
"FStar.PCM.pcm",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"Steel.ST.Coercions.coerce_steel",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"FStar.Ghost.reveal",
"Steel.Effect.Common.vprop",
"Prims.l_True",
"Prims.l_and",
"FStar.PCM.compatible",
"Prims.unit",
"Steel.PCMReference.read"
] | [] | false | true | false | false | false | let read r v0 =
| C.coerce_steel (fun _ -> P.read r v0) | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.write | val write (#a:Type)
(#pcm:pcm a)
(r:ref a pcm)
(v0:erased a)
(v1:a)
: ST unit
(pts_to r v0)
(fun _ -> pts_to r v1)
(requires frame_preserving pcm v0 v1 /\ pcm.refine v1)
(ensures fun _ -> True) | val write (#a:Type)
(#pcm:pcm a)
(r:ref a pcm)
(v0:erased a)
(v1:a)
: ST unit
(pts_to r v0)
(fun _ -> pts_to r v1)
(requires frame_preserving pcm v0 v1 /\ pcm.refine v1)
(ensures fun _ -> True) | let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 61,
"end_line": 8,
"start_col": 0,
"start_line": 8
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | r: Steel.Memory.ref a pcm -> v0: FStar.Ghost.erased a -> v1: a -> Steel.ST.Effect.ST Prims.unit | Steel.ST.Effect.ST | [] | [] | [
"FStar.PCM.pcm",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"Steel.ST.Coercions.coerce_steel",
"Prims.unit",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"FStar.Ghost.reveal",
"Steel.Effect.Common.vprop",
"Prims.l_and",
"FStar.PCM.frame_preserving",
"FStar.PCM.__proj__Mkpcm__item__refine",
"Prims.l_True",
"Steel.PCMReference.write"
] | [] | false | true | false | false | false | let write r v0 v1 =
| C.coerce_steel (fun _ -> P.write r v0 v1) | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.free | val free (#a:Type)
(#p:pcm a)
(r:ref a p)
(x:erased a)
: ST unit (pts_to r x) (fun _ -> pts_to r p.p.one)
(requires exclusive p x /\ p.refine p.p.one)
(ensures fun _ -> True) | val free (#a:Type)
(#p:pcm a)
(r:ref a p)
(x:erased a)
: ST unit (pts_to r x) (fun _ -> pts_to r p.p.one)
(requires exclusive p x /\ p.refine p.p.one)
(ensures fun _ -> True) | let free r x = C.coerce_steel (fun _ -> P.free r x) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 51,
"end_line": 12,
"start_col": 0,
"start_line": 12
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1)
let alloc x = C.coerce_steel (fun _ -> P.alloc x) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | r: Steel.Memory.ref a p -> x: FStar.Ghost.erased a -> Steel.ST.Effect.ST Prims.unit | Steel.ST.Effect.ST | [] | [] | [
"FStar.PCM.pcm",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"Steel.ST.Coercions.coerce_steel",
"Prims.unit",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"FStar.Ghost.reveal",
"FStar.PCM.__proj__Mkpcm'__item__one",
"FStar.PCM.__proj__Mkpcm__item__p",
"Steel.Effect.Common.vprop",
"Prims.l_and",
"FStar.PCM.exclusive",
"FStar.PCM.__proj__Mkpcm__item__refine",
"Prims.l_True",
"Steel.PCMReference.free"
] | [] | false | true | false | false | false | let free r x =
| C.coerce_steel (fun _ -> P.free r x) | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.witness | val witness (#inames: _) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(fact:stable_property pcm)
(v:erased a)
(_:fact_valid_compat fact v)
: STAtomicUT (witnessed r fact) inames (pts_to r v)
(fun _ -> pts_to r v) | val witness (#inames: _) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(fact:stable_property pcm)
(v:erased a)
(_:fact_valid_compat fact v)
: STAtomicUT (witnessed r fact) inames (pts_to r v)
(fun _ -> pts_to r v) | let witness r fact v vc = C.coerce_atomic (witness' r fact v vc) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 64,
"end_line": 28,
"start_col": 0,
"start_line": 28
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1)
let alloc x = C.coerce_steel (fun _ -> P.alloc x)
let free r x = C.coerce_steel (fun _ -> P.free r x)
let split r v v0 v1 = C.coerce_ghost (fun _ -> P.split r v v0 v1)
let gather r v0 v1 = C.coerce_ghost (fun _ -> P.gather r v0 v1)
let witness' (#inames: _) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(fact:stable_property pcm)
(v:erased a)
(_:fact_valid_compat fact v)
()
: Steel.Effect.Atomic.SteelAtomicUT (witnessed r fact) inames (pts_to r v)
(fun _ -> pts_to r v)
= P.witness r fact v () | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
r: Steel.Memory.ref a pcm ->
fact: Steel.Memory.stable_property pcm ->
v: FStar.Ghost.erased a ->
vc: Steel.ST.PCMReference.fact_valid_compat fact v
-> Steel.ST.Effect.Atomic.STAtomicUT (Steel.Memory.witnessed r fact) | Steel.ST.Effect.Atomic.STAtomicUT | [] | [] | [
"Steel.Memory.inames",
"FStar.PCM.pcm",
"Steel.Memory.ref",
"Steel.Memory.stable_property",
"FStar.Ghost.erased",
"Steel.ST.PCMReference.fact_valid_compat",
"Steel.ST.Coercions.coerce_atomic",
"Steel.Memory.witnessed",
"Steel.Effect.Common.Unobservable",
"Steel.ST.PCMReference.pts_to",
"FStar.Ghost.reveal",
"Steel.Effect.Common.vprop",
"Prims.l_True",
"Steel.ST.PCMReference.witness'"
] | [] | false | true | false | false | false | let witness r fact v vc =
| C.coerce_atomic (witness' r fact v vc) | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.atomic_write | val atomic_write (#opened:_) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(v0:erased a)
(v1:a)
: STAtomic unit opened
(pts_to r v0)
(fun _ -> pts_to r v1)
(requires frame_preserving pcm v0 v1 /\ pcm.refine v1)
(ensures fun _ -> True) | val atomic_write (#opened:_) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(v0:erased a)
(v1:a)
: STAtomic unit opened
(pts_to r v0)
(fun _ -> pts_to r v1)
(requires frame_preserving pcm v0 v1 /\ pcm.refine v1)
(ensures fun _ -> True) | let atomic_write r v0 v1 = C.coerce_atomic (fun _ -> P.atomic_write r v0 v1) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 76,
"end_line": 38,
"start_col": 0,
"start_line": 38
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1)
let alloc x = C.coerce_steel (fun _ -> P.alloc x)
let free r x = C.coerce_steel (fun _ -> P.free r x)
let split r v v0 v1 = C.coerce_ghost (fun _ -> P.split r v v0 v1)
let gather r v0 v1 = C.coerce_ghost (fun _ -> P.gather r v0 v1)
let witness' (#inames: _) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(fact:stable_property pcm)
(v:erased a)
(_:fact_valid_compat fact v)
()
: Steel.Effect.Atomic.SteelAtomicUT (witnessed r fact) inames (pts_to r v)
(fun _ -> pts_to r v)
= P.witness r fact v ()
let witness r fact v vc = C.coerce_atomic (witness' r fact v vc)
let recall fact r v w = C.coerce_atomic (fun _ -> P.recall fact r v w)
let select_refine r x f = C.coerce_steel (fun _ -> P.select_refine r x f)
let upd_gen r x y f = C.coerce_steel (fun _ -> P.upd_gen r x y f)
let atomic_read r v0 = C.coerce_atomic (fun _ -> P.atomic_read r v0) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | r: Steel.Memory.ref a pcm -> v0: FStar.Ghost.erased a -> v1: a
-> Steel.ST.Effect.Atomic.STAtomic Prims.unit | Steel.ST.Effect.Atomic.STAtomic | [] | [] | [
"Steel.Memory.inames",
"FStar.PCM.pcm",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"Steel.ST.Coercions.coerce_atomic",
"Prims.unit",
"Steel.Effect.Common.Observable",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"FStar.Ghost.reveal",
"Steel.Effect.Common.vprop",
"Prims.l_and",
"FStar.PCM.frame_preserving",
"FStar.PCM.__proj__Mkpcm__item__refine",
"Prims.l_True",
"Steel.PCMReference.atomic_write"
] | [] | false | true | false | false | false | let atomic_write r v0 v1 =
| C.coerce_atomic (fun _ -> P.atomic_write r v0 v1) | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.upd_gen | val upd_gen (#a:Type) (#p:pcm a) (r:ref a p) (x y:erased a)
(f:frame_preserving_upd p x y)
: STT unit
(pts_to r x)
(fun _ -> pts_to r y) | val upd_gen (#a:Type) (#p:pcm a) (r:ref a p) (x y:erased a)
(f:frame_preserving_upd p x y)
: STT unit
(pts_to r x)
(fun _ -> pts_to r y) | let upd_gen r x y f = C.coerce_steel (fun _ -> P.upd_gen r x y f) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 65,
"end_line": 34,
"start_col": 0,
"start_line": 34
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1)
let alloc x = C.coerce_steel (fun _ -> P.alloc x)
let free r x = C.coerce_steel (fun _ -> P.free r x)
let split r v v0 v1 = C.coerce_ghost (fun _ -> P.split r v v0 v1)
let gather r v0 v1 = C.coerce_ghost (fun _ -> P.gather r v0 v1)
let witness' (#inames: _) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(fact:stable_property pcm)
(v:erased a)
(_:fact_valid_compat fact v)
()
: Steel.Effect.Atomic.SteelAtomicUT (witnessed r fact) inames (pts_to r v)
(fun _ -> pts_to r v)
= P.witness r fact v ()
let witness r fact v vc = C.coerce_atomic (witness' r fact v vc)
let recall fact r v w = C.coerce_atomic (fun _ -> P.recall fact r v w)
let select_refine r x f = C.coerce_steel (fun _ -> P.select_refine r x f) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
r: Steel.Memory.ref a p ->
x: FStar.Ghost.erased a ->
y: FStar.Ghost.erased a ->
f: FStar.PCM.frame_preserving_upd p (FStar.Ghost.reveal x) (FStar.Ghost.reveal y)
-> Steel.ST.Effect.STT Prims.unit | Steel.ST.Effect.STT | [] | [] | [
"FStar.PCM.pcm",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"FStar.PCM.frame_preserving_upd",
"FStar.Ghost.reveal",
"Steel.ST.Coercions.coerce_steel",
"Prims.unit",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"Steel.Effect.Common.vprop",
"Prims.l_True",
"Steel.PCMReference.upd_gen"
] | [] | false | true | false | false | false | let upd_gen r x y f =
| C.coerce_steel (fun _ -> P.upd_gen r x y f) | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.atomic_read | val atomic_read (#opened:_) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(v0:erased a)
: STAtomic a opened
(pts_to r v0)
(fun _ -> pts_to r v0)
(requires True)
(ensures fun v -> compatible pcm v0 v /\ True) | val atomic_read (#opened:_) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(v0:erased a)
: STAtomic a opened
(pts_to r v0)
(fun _ -> pts_to r v0)
(requires True)
(ensures fun v -> compatible pcm v0 v /\ True) | let atomic_read r v0 = C.coerce_atomic (fun _ -> P.atomic_read r v0) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 68,
"end_line": 36,
"start_col": 0,
"start_line": 36
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1)
let alloc x = C.coerce_steel (fun _ -> P.alloc x)
let free r x = C.coerce_steel (fun _ -> P.free r x)
let split r v v0 v1 = C.coerce_ghost (fun _ -> P.split r v v0 v1)
let gather r v0 v1 = C.coerce_ghost (fun _ -> P.gather r v0 v1)
let witness' (#inames: _) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(fact:stable_property pcm)
(v:erased a)
(_:fact_valid_compat fact v)
()
: Steel.Effect.Atomic.SteelAtomicUT (witnessed r fact) inames (pts_to r v)
(fun _ -> pts_to r v)
= P.witness r fact v ()
let witness r fact v vc = C.coerce_atomic (witness' r fact v vc)
let recall fact r v w = C.coerce_atomic (fun _ -> P.recall fact r v w)
let select_refine r x f = C.coerce_steel (fun _ -> P.select_refine r x f)
let upd_gen r x y f = C.coerce_steel (fun _ -> P.upd_gen r x y f) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | r: Steel.Memory.ref a pcm -> v0: FStar.Ghost.erased a -> Steel.ST.Effect.Atomic.STAtomic a | Steel.ST.Effect.Atomic.STAtomic | [] | [] | [
"Steel.Memory.inames",
"FStar.PCM.pcm",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"Steel.ST.Coercions.coerce_atomic",
"Steel.Effect.Common.Observable",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"FStar.Ghost.reveal",
"Steel.Effect.Common.vprop",
"Prims.l_True",
"Prims.l_and",
"FStar.PCM.compatible",
"Prims.unit",
"Steel.PCMReference.atomic_read"
] | [] | false | true | false | false | false | let atomic_read r v0 =
| C.coerce_atomic (fun _ -> P.atomic_read r v0) | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.recall | val recall (#inames: _) (#a:Type u#1) (#pcm:pcm a)
(fact:property a)
(r:ref a pcm)
(v:erased a)
(w:witnessed r fact)
: STAtomicU (erased a) inames
(pts_to r v)
(fun v1 -> pts_to r v)
(requires True)
(ensures fun v1 -> fact v1 /\ compatible pcm v v1) | val recall (#inames: _) (#a:Type u#1) (#pcm:pcm a)
(fact:property a)
(r:ref a pcm)
(v:erased a)
(w:witnessed r fact)
: STAtomicU (erased a) inames
(pts_to r v)
(fun v1 -> pts_to r v)
(requires True)
(ensures fun v1 -> fact v1 /\ compatible pcm v v1) | let recall fact r v w = C.coerce_atomic (fun _ -> P.recall fact r v w) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 70,
"end_line": 30,
"start_col": 0,
"start_line": 30
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1)
let alloc x = C.coerce_steel (fun _ -> P.alloc x)
let free r x = C.coerce_steel (fun _ -> P.free r x)
let split r v v0 v1 = C.coerce_ghost (fun _ -> P.split r v v0 v1)
let gather r v0 v1 = C.coerce_ghost (fun _ -> P.gather r v0 v1)
let witness' (#inames: _) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(fact:stable_property pcm)
(v:erased a)
(_:fact_valid_compat fact v)
()
: Steel.Effect.Atomic.SteelAtomicUT (witnessed r fact) inames (pts_to r v)
(fun _ -> pts_to r v)
= P.witness r fact v ()
let witness r fact v vc = C.coerce_atomic (witness' r fact v vc) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
fact: Steel.Memory.property a ->
r: Steel.Memory.ref a pcm ->
v: FStar.Ghost.erased a ->
w: Steel.Memory.witnessed r fact
-> Steel.ST.Effect.Atomic.STAtomicU (FStar.Ghost.erased a) | Steel.ST.Effect.Atomic.STAtomicU | [] | [] | [
"Steel.Memory.inames",
"FStar.PCM.pcm",
"Steel.Memory.property",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"Steel.Memory.witnessed",
"Steel.ST.Coercions.coerce_atomic",
"Steel.Effect.Common.Unobservable",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"FStar.Ghost.reveal",
"FStar.Ghost.hide",
"Steel.Effect.Common.vprop",
"Prims.l_True",
"Prims.l_and",
"FStar.PCM.compatible",
"Prims.unit",
"Steel.PCMReference.recall"
] | [] | false | true | false | false | false | let recall fact r v w =
| C.coerce_atomic (fun _ -> P.recall fact r v w) | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.select_refine | val select_refine (#a:Type u#1) (#p:pcm a)
(r:ref a p)
(x:erased a)
(f:(v:a{compatible p x v}
-> GTot (y:a{compatible p y v /\
frame_compatible p x v y})))
: STT (v:a{compatible p x v /\ p.refine v})
(pts_to r x)
(fun v -> pts_to r (f v)) | val select_refine (#a:Type u#1) (#p:pcm a)
(r:ref a p)
(x:erased a)
(f:(v:a{compatible p x v}
-> GTot (y:a{compatible p y v /\
frame_compatible p x v y})))
: STT (v:a{compatible p x v /\ p.refine v})
(pts_to r x)
(fun v -> pts_to r (f v)) | let select_refine r x f = C.coerce_steel (fun _ -> P.select_refine r x f) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 73,
"end_line": 32,
"start_col": 0,
"start_line": 32
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1)
let alloc x = C.coerce_steel (fun _ -> P.alloc x)
let free r x = C.coerce_steel (fun _ -> P.free r x)
let split r v v0 v1 = C.coerce_ghost (fun _ -> P.split r v v0 v1)
let gather r v0 v1 = C.coerce_ghost (fun _ -> P.gather r v0 v1)
let witness' (#inames: _) (#a:Type) (#pcm:pcm a)
(r:ref a pcm)
(fact:stable_property pcm)
(v:erased a)
(_:fact_valid_compat fact v)
()
: Steel.Effect.Atomic.SteelAtomicUT (witnessed r fact) inames (pts_to r v)
(fun _ -> pts_to r v)
= P.witness r fact v ()
let witness r fact v vc = C.coerce_atomic (witness' r fact v vc)
let recall fact r v w = C.coerce_atomic (fun _ -> P.recall fact r v w) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
r: Steel.Memory.ref a p ->
x: FStar.Ghost.erased a ->
f:
(v: a{FStar.PCM.compatible p (FStar.Ghost.reveal x) v}
-> Prims.GTot (y: a{FStar.PCM.compatible p y v /\ FStar.PCM.frame_compatible p x v y}))
-> Steel.ST.Effect.STT
(v: a{FStar.PCM.compatible p (FStar.Ghost.reveal x) v /\ Mkpcm?.refine p v}) | Steel.ST.Effect.STT | [] | [] | [
"FStar.PCM.pcm",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"FStar.PCM.compatible",
"FStar.Ghost.reveal",
"Prims.l_and",
"FStar.PCM.frame_compatible",
"Steel.ST.Coercions.coerce_steel",
"FStar.PCM.__proj__Mkpcm__item__refine",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"Steel.Effect.Common.vprop",
"Prims.l_True",
"Prims.unit",
"Steel.PCMReference.select_refine"
] | [] | false | true | false | false | false | let select_refine r x f =
| C.coerce_steel (fun _ -> P.select_refine r x f) | false |
Spec.Frodo.Pack.fst | Spec.Frodo.Pack.frodo_unpack8 | val frodo_unpack8:
d:size_nat{d <= 16}
-> b:lbytes d
-> lseq uint16 8 | val frodo_unpack8:
d:size_nat{d <= 16}
-> b:lbytes d
-> lseq uint16 8 | let frodo_unpack8 d b =
let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let v16 = Seq.create 16 (u8 0) in
let src = update_sub v16 (16 - d) d b in
let templong: uint_t U128 SEC = uint_from_bytes_be src in
let res = Seq.create 8 (u16 0) in
let res = res.[0] <- to_u16 (templong >>. size (7 * d)) &. maskd in
let res = res.[1] <- to_u16 (templong >>. size (6 * d)) &. maskd in
let res = res.[2] <- to_u16 (templong >>. size (5 * d)) &. maskd in
let res = res.[3] <- to_u16 (templong >>. size (4 * d)) &. maskd in
let res = res.[4] <- to_u16 (templong >>. size (3 * d)) &. maskd in
let res = res.[5] <- to_u16 (templong >>. size (2 * d)) &. maskd in
let res = res.[6] <- to_u16 (templong >>. size (1 * d)) &. maskd in
let res = res.[7] <- to_u16 (templong >>. size (0 * d)) &. maskd in
res | {
"file_name": "specs/frodo/Spec.Frodo.Pack.fst",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 5,
"end_line": 95,
"start_col": 0,
"start_line": 81
} | module Spec.Frodo.Pack
open FStar.Mul
open Lib.IntTypes
open Lib.Sequence
open Lib.ByteSequence
open Spec.Matrix
module Seq = Lib.Sequence
module Loops = Lib.LoopCombinators
#reset-options "--z3rlimit 100 --max_fuel 0 --max_ifuel 0 --using_facts_from '* -FStar +FStar.Pervasives +FStar.UInt -Spec +Spec.Frodo +Spec.Frodo.Params +Spec.Matrix'"
/// Pack
val frodo_pack8:
d:size_nat{d <= 16}
-> a:lseq uint16 8
-> lbytes d
let frodo_pack8 d a =
let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let a0 = Seq.index a 0 &. maskd in
let a1 = Seq.index a 1 &. maskd in
let a2 = Seq.index a 2 &. maskd in
let a3 = Seq.index a 3 &. maskd in
let a4 = Seq.index a 4 &. maskd in
let a5 = Seq.index a 5 &. maskd in
let a6 = Seq.index a 6 &. maskd in
let a7 = Seq.index a 7 &. maskd in
let templong =
to_u128 a0 <<. size (7 * d)
|. to_u128 a1 <<. size (6 * d)
|. to_u128 a2 <<. size (5 * d)
|. to_u128 a3 <<. size (4 * d)
|. to_u128 a4 <<. size (3 * d)
|. to_u128 a5 <<. size (2 * d)
|. to_u128 a6 <<. size (1 * d)
|. to_u128 a7 <<. size (0 * d)
in
let v16 = uint_to_bytes_be templong in
Seq.sub v16 (16 - d) d
val frodo_pack_state:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> i:size_nat{i <= (n1 * n2) / 8}
-> Type0
let frodo_pack_state #n1 #n2 d i = lseq uint8 (d * i)
val frodo_pack_inner:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> a:matrix n1 n2
-> i:size_nat{i < (n1 * n2) / 8}
-> frodo_pack_state #n1 #n2 d i
-> frodo_pack_state #n1 #n2 d (i + 1)
let frodo_pack_inner #n1 #n2 d a i s =
s @| frodo_pack8 d (Seq.sub a (8 * i) 8)
val frodo_pack:
#n1:size_nat
-> #n2:size_nat{n1 * n2 <= max_size_t /\ (n1 * n2) % 8 = 0}
-> d:size_nat{d * ((n1 * n2) / 8) <= max_size_t /\ d <= 16}
-> a:matrix n1 n2
-> lbytes (d * ((n1 * n2) / 8))
let frodo_pack #n1 #n2 d a =
Loops.repeat_gen ((n1 * n2) / 8)
(frodo_pack_state #n1 #n2 d)
(frodo_pack_inner #n1 #n2 d a)
(Seq.create 0 (u8 0))
/// Unpack
val frodo_unpack8:
d:size_nat{d <= 16}
-> b:lbytes d | {
"checked_file": "/",
"dependencies": [
"Spec.Matrix.fst.checked",
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.LoopCombinators.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteSequence.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Spec.Frodo.Pack.fst"
} | [
{
"abbrev": true,
"full_module": "Lib.LoopCombinators",
"short_module": "Loops"
},
{
"abbrev": true,
"full_module": "Lib.Sequence",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "Spec.Matrix",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.ByteSequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Frodo",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | d: Lib.IntTypes.size_nat{d <= 16} -> b: Lib.ByteSequence.lbytes d
-> Lib.Sequence.lseq Lib.IntTypes.uint16 8 | Prims.Tot | [
"total"
] | [] | [
"Lib.IntTypes.size_nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Lib.ByteSequence.lbytes",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U16",
"Lib.IntTypes.SEC",
"Prims.l_and",
"Prims.eq2",
"FStar.Seq.Base.seq",
"Lib.Sequence.to_seq",
"FStar.Seq.Base.upd",
"Lib.IntTypes.logand",
"Lib.IntTypes.cast",
"Lib.IntTypes.U128",
"Lib.IntTypes.shift_right",
"Lib.IntTypes.mk_int",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"Prims.op_Multiply",
"Lib.Sequence.index",
"Prims.l_Forall",
"Prims.nat",
"Prims.op_Subtraction",
"Prims.pow2",
"Prims.l_imp",
"Prims.op_LessThan",
"Prims.op_disEquality",
"Prims.l_or",
"FStar.Seq.Base.index",
"Lib.Sequence.op_String_Assignment",
"Lib.IntTypes.uint16",
"Lib.IntTypes.op_Amp_Dot",
"Lib.IntTypes.to_u16",
"Lib.IntTypes.op_Greater_Greater_Dot",
"Lib.IntTypes.size",
"FStar.Mul.op_Star",
"FStar.Seq.Base.create",
"Lib.Sequence.create",
"Lib.IntTypes.u16",
"Lib.ByteSequence.uint_from_bytes_be",
"Lib.IntTypes.U8",
"Lib.Sequence.sub",
"Prims.op_Addition",
"Lib.Sequence.update_sub",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.u8",
"Lib.IntTypes.op_Subtraction_Dot",
"Lib.IntTypes.op_Less_Less_Dot",
"Lib.IntTypes.u32"
] | [] | false | false | false | false | false | let frodo_unpack8 d b =
| let maskd = to_u16 (u32 1 <<. size d) -. u16 1 in
let v16 = Seq.create 16 (u8 0) in
let src = update_sub v16 (16 - d) d b in
let templong:uint_t U128 SEC = uint_from_bytes_be src in
let res = Seq.create 8 (u16 0) in
let res = res.[ 0 ] <- to_u16 (templong >>. size (7 * d)) &. maskd in
let res = res.[ 1 ] <- to_u16 (templong >>. size (6 * d)) &. maskd in
let res = res.[ 2 ] <- to_u16 (templong >>. size (5 * d)) &. maskd in
let res = res.[ 3 ] <- to_u16 (templong >>. size (4 * d)) &. maskd in
let res = res.[ 4 ] <- to_u16 (templong >>. size (3 * d)) &. maskd in
let res = res.[ 5 ] <- to_u16 (templong >>. size (2 * d)) &. maskd in
let res = res.[ 6 ] <- to_u16 (templong >>. size (1 * d)) &. maskd in
let res = res.[ 7 ] <- to_u16 (templong >>. size (0 * d)) &. maskd in
res | false |
Steel.TLArray.fst | Steel.TLArray.t | val t (a:Type0) : Type0 | val t (a:Type0) : Type0 | let t a = list a | {
"file_name": "lib/steel/Steel.TLArray.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 16,
"end_line": 3,
"start_col": 0,
"start_line": 3
} | module Steel.TLArray | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.TLArray.fst"
} | [
{
"abbrev": true,
"full_module": "FStar.SizeT",
"short_module": "US"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": true,
"full_module": "FStar.Ghost",
"short_module": "G"
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | a: Type0 -> Type0 | Prims.Tot | [
"total"
] | [] | [
"Prims.list"
] | [] | false | false | false | true | true | let t a =
| list a | false |
Steel.TLArray.fst | Steel.TLArray.length | val length (#a:Type0) (x:t a) : GTot nat | val length (#a:Type0) (x:t a) : GTot nat | let length x = L.length x | {
"file_name": "lib/steel/Steel.TLArray.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 25,
"end_line": 6,
"start_col": 0,
"start_line": 6
} | module Steel.TLArray
let t a = list a | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.TLArray.fst"
} | [
{
"abbrev": true,
"full_module": "FStar.SizeT",
"short_module": "US"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": true,
"full_module": "FStar.Ghost",
"short_module": "G"
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | x: Steel.TLArray.t a -> Prims.GTot Prims.nat | Prims.GTot | [
"sometrivial"
] | [] | [
"Steel.TLArray.t",
"FStar.List.Tot.Base.length",
"Prims.nat"
] | [] | false | false | false | false | false | let length x =
| L.length x | false |
Steel.TLArray.fst | Steel.TLArray.v | val v (#a:Type0) (x : t a) : G.erased (list a) | val v (#a:Type0) (x : t a) : G.erased (list a) | let v x = G.hide x | {
"file_name": "lib/steel/Steel.TLArray.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 18,
"end_line": 5,
"start_col": 0,
"start_line": 5
} | module Steel.TLArray
let t a = list a | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.TLArray.fst"
} | [
{
"abbrev": true,
"full_module": "FStar.SizeT",
"short_module": "US"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": true,
"full_module": "FStar.Ghost",
"short_module": "G"
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | x: Steel.TLArray.t a -> FStar.Ghost.erased (Prims.list a) | Prims.Tot | [
"total"
] | [] | [
"Steel.TLArray.t",
"FStar.Ghost.hide",
"Prims.list",
"FStar.Ghost.erased"
] | [] | false | false | false | true | false | let v x =
| G.hide x | false |
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.split | val split (#inames: _)
(#a:Type)
(#p:pcm a)
(r:ref a p)
(v:erased a)
(v0:erased a)
(v1:erased a)
: STGhost unit inames (pts_to r v)
(fun _ -> pts_to r v0 `star` pts_to r v1)
(requires
composable p v0 v1 /\
v == hide (op p v0 v1))
(ensures fun _ -> True) | val split (#inames: _)
(#a:Type)
(#p:pcm a)
(r:ref a p)
(v:erased a)
(v0:erased a)
(v1:erased a)
: STGhost unit inames (pts_to r v)
(fun _ -> pts_to r v0 `star` pts_to r v1)
(requires
composable p v0 v1 /\
v == hide (op p v0 v1))
(ensures fun _ -> True) | let split r v v0 v1 = C.coerce_ghost (fun _ -> P.split r v v0 v1) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 65,
"end_line": 14,
"start_col": 0,
"start_line": 14
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1)
let alloc x = C.coerce_steel (fun _ -> P.alloc x)
let free r x = C.coerce_steel (fun _ -> P.free r x) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
r: Steel.Memory.ref a p ->
v: FStar.Ghost.erased a ->
v0: FStar.Ghost.erased a ->
v1: FStar.Ghost.erased a
-> Steel.ST.Effect.Ghost.STGhost Prims.unit | Steel.ST.Effect.Ghost.STGhost | [] | [] | [
"Steel.Memory.inames",
"FStar.PCM.pcm",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"Steel.ST.Coercions.coerce_ghost",
"Prims.unit",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"FStar.Ghost.reveal",
"Steel.Effect.Common.star",
"Steel.Effect.Common.vprop",
"Prims.l_and",
"FStar.PCM.composable",
"Prims.eq2",
"FStar.Ghost.hide",
"FStar.PCM.op",
"Prims.l_True",
"Steel.PCMReference.split"
] | [] | false | true | false | false | false | let split r v v0 v1 =
| C.coerce_ghost (fun _ -> P.split r v v0 v1) | false |
FStar.Sequence.Base.fsti | FStar.Sequence.Base.length_of_empty_is_zero_fact | val length_of_empty_is_zero_fact : Prims.logical | let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0 | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 67,
"end_line": 157,
"start_col": 8,
"start_line": 156
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"FStar.Sequence.Base.length",
"FStar.Sequence.Base.empty"
] | [] | false | false | false | true | true | let length_of_empty_is_zero_fact =
| forall (ty: Type u#a). {:pattern empty #ty} length (empty #ty) = 0 | false |
|
Steel.TLArray.fst | Steel.TLArray.get | val get (#a:Type0) (x: t a) (i:US.t{US.v i < length x}) :
Pure a
(requires True)
(ensures fun y ->
US.v i < L.length (v x) /\
y == L.index (v x) (US.v i)) | val get (#a:Type0) (x: t a) (i:US.t{US.v i < length x}) :
Pure a
(requires True)
(ensures fun y ->
US.v i < L.length (v x) /\
y == L.index (v x) (US.v i)) | let get x i = L.index x (US.v i) | {
"file_name": "lib/steel/Steel.TLArray.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 32,
"end_line": 9,
"start_col": 0,
"start_line": 9
} | module Steel.TLArray
let t a = list a
let v x = G.hide x
let length x = L.length x | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.TLArray.fst"
} | [
{
"abbrev": true,
"full_module": "FStar.SizeT",
"short_module": "US"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": true,
"full_module": "FStar.Ghost",
"short_module": "G"
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | x: Steel.TLArray.t a -> i: FStar.SizeT.t{FStar.SizeT.v i < Steel.TLArray.length x} -> Prims.Pure a | Prims.Pure | [] | [] | [
"Steel.TLArray.t",
"FStar.SizeT.t",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.SizeT.v",
"Steel.TLArray.length",
"FStar.List.Tot.Base.index"
] | [] | false | false | false | false | false | let get x i =
| L.index x (US.v i) | false |
FStar.Sequence.Base.fsti | FStar.Sequence.Base.build_increments_length_fact | val build_increments_length_fact : Prims.logical | let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 37,
"end_line": 182,
"start_col": 8,
"start_line": 180
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"FStar.Sequence.Base.length",
"FStar.Sequence.Base.build",
"Prims.op_Addition"
] | [] | false | false | false | true | true | let build_increments_length_fact =
| forall (ty: Type u#a) (s: seq ty) (v: ty). {:pattern build s v} length (build s v) = 1 + length s | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.length_zero_implies_empty_fact | val length_zero_implies_empty_fact : Prims.logical | let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 83,
"end_line": 165,
"start_col": 8,
"start_line": 164
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty()) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.empty"
] | [] | false | false | false | true | true | let length_zero_implies_empty_fact =
| forall (ty: Type u#a) (s: seq ty). {:pattern length s} length s = 0 ==> s == empty | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.singleton_length_one_fact | val singleton_length_one_fact : Prims.logical | let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1 | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 88,
"end_line": 172,
"start_col": 8,
"start_line": 171
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"FStar.Sequence.Base.length",
"FStar.Sequence.Base.singleton"
] | [] | false | false | false | true | true | let singleton_length_one_fact =
| forall (ty: Type u#a) (v: ty). {:pattern length (singleton v)} length (singleton v) = 1 | false |
|
Steel.TLArray.fst | Steel.TLArray.create | val create (#a:Type0) (l: list a) :
Pure (t a)
(requires True)
(ensures fun x ->
Ghost.reveal (v x) == l /\
length x == normalize_term (List.Tot.length l)) | val create (#a:Type0) (l: list a) :
Pure (t a)
(requires True)
(ensures fun x ->
Ghost.reveal (v x) == l /\
length x == normalize_term (List.Tot.length l)) | let create l = l | {
"file_name": "lib/steel/Steel.TLArray.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 16,
"end_line": 8,
"start_col": 0,
"start_line": 8
} | module Steel.TLArray
let t a = list a
let v x = G.hide x
let length x = L.length x | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.TLArray.fst"
} | [
{
"abbrev": true,
"full_module": "FStar.SizeT",
"short_module": "US"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": true,
"full_module": "FStar.Ghost",
"short_module": "G"
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | l: Prims.list a -> Prims.Pure (Steel.TLArray.t a) | Prims.Pure | [] | [] | [
"Prims.list",
"Steel.TLArray.t"
] | [] | false | false | false | false | false | let create l =
| l | false |
FStar.Sequence.Base.fsti | FStar.Sequence.Base.append_sums_lengths_fact | val append_sums_lengths_fact : Prims.logical | let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1 | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 49,
"end_line": 203,
"start_col": 8,
"start_line": 201
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"FStar.Sequence.Base.length",
"FStar.Sequence.Base.append",
"Prims.op_Addition"
] | [] | false | false | false | true | true | let append_sums_lengths_fact =
| forall (ty: Type u#a) (s0: seq ty) (s1: seq ty). {:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1 | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.build_contains_equiv_fact | val build_contains_equiv_fact : Prims.logical | let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 56,
"end_line": 275,
"start_col": 8,
"start_line": 273
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x))); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.l_iff",
"FStar.Sequence.Base.contains",
"FStar.Sequence.Base.build",
"Prims.l_or",
"Prims.eq2"
] | [] | false | false | false | true | true | let build_contains_equiv_fact =
| forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty). {:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.empty_doesnt_contain_anything_fact | val empty_doesnt_contain_anything_fact : Prims.logical | let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 79,
"end_line": 265,
"start_col": 8,
"start_line": 264
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"Prims.l_not",
"FStar.Sequence.Base.contains",
"FStar.Sequence.Base.empty"
] | [] | false | false | false | true | true | let empty_doesnt_contain_anything_fact =
| forall (ty: Type u#a) (x: ty). {:pattern contains empty x} ~(contains empty x) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.contains_iff_exists_index_fact | val contains_iff_exists_index_fact : Prims.logical | let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 91,
"end_line": 256,
"start_col": 8,
"start_line": 254
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.l_iff",
"FStar.Sequence.Base.contains",
"Prims.l_Exists",
"Prims.nat",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.index"
] | [] | false | false | false | true | true | let contains_iff_exists_index_fact =
| forall (ty: Type u#a) (s: seq ty) (x: ty). {:pattern contains s x}
contains s x <==> (exists (i: nat). {:pattern index s i} i < length s /\ index s i == x) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.extensionality_fact | val extensionality_fact : Prims.logical | let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 24,
"end_line": 325,
"start_col": 8,
"start_line": 323
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.l_imp",
"FStar.Sequence.Base.equal",
"Prims.eq2"
] | [] | false | false | false | true | true | let extensionality_fact =
| forall (ty: Type u#a) (a: seq ty) (b: seq ty). {:pattern equal a b} equal a b ==> a == b | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.drop_length_fact | val drop_length_fact : Prims.logical | let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 54,
"end_line": 374,
"start_col": 8,
"start_line": 371
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.op_Equality",
"Prims.int",
"FStar.Sequence.Base.drop",
"Prims.op_Subtraction"
] | [] | false | false | false | true | true | let drop_length_fact =
| forall (ty: Type u#a) (s: seq ty) (n: nat). {:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.update_maintains_length_fact | val update_maintains_length_fact : Prims.logical | let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 36,
"end_line": 232,
"start_col": 8,
"start_line": 230
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"Prims.op_Equality",
"FStar.Sequence.Base.update"
] | [] | false | false | false | true | true | let update_maintains_length_fact =
| forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty). {:pattern length (update s i v)}
length (update s i v) = length s | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.index_into_build_fact | val index_into_build_fact : _: Prims.squash FStar.Sequence.Base.build_increments_length_fact -> Prims.logical | let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 59,
"end_line": 194,
"start_col": 8,
"start_line": 190
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i))); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.build_increments_length_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.build_increments_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"FStar.Sequence.Base.build",
"Prims.l_and",
"Prims.l_imp",
"Prims.op_Equality",
"Prims.eq2",
"FStar.Sequence.Base.index",
"Prims.op_disEquality",
"Prims.logical"
] | [] | false | false | false | true | true | let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
| forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)}).
{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v) /\
(i <> length s ==> index (build s v) i == index s i) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.index_after_append_fact | val index_after_append_fact : _: Prims.squash FStar.Sequence.Base.append_sums_lengths_fact -> Prims.logical | let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0)) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 78,
"end_line": 223,
"start_col": 8,
"start_line": 219
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0)))); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.append_sums_lengths_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.append_sums_lengths_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"FStar.Sequence.Base.append",
"Prims.l_and",
"Prims.l_imp",
"Prims.eq2",
"FStar.Sequence.Base.index",
"Prims.op_LessThanOrEqual",
"Prims.op_Subtraction",
"Prims.logical"
] | [] | false | false | false | true | true | let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
| forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)}).
{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n) /\
(length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0)) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.rank_def_fact | val rank_def_fact : Prims.logical | let rank_def_fact =
forall (ty: Type u#a) (v: ty).{:pattern rank v} rank v == v | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 61,
"end_line": 481,
"start_col": 8,
"start_line": 480
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) );
private let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v
/// We include the definition of `rank` among our facts. | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"Prims.eq2",
"FStar.Sequence.Base.rank"
] | [] | false | false | false | true | true | let rank_def_fact =
| forall (ty: Type u#a) (v: ty). {:pattern rank v} rank v == v | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.element_ranks_less_fact | val element_ranks_less_fact : Prims.logical | let element_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (index s i)}
i < length s ==> rank (index s i) << rank s | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 47,
"end_line": 491,
"start_col": 8,
"start_line": 489
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) );
private let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v
/// We include the definition of `rank` among our facts.
private let rank_def_fact =
forall (ty: Type u#a) (v: ty).{:pattern rank v} rank v == v
/// We represent the following Dafny axiom with `element_ranks_less_fact`.
///
/// axiom (forall s: Seq Box, i: int ::
/// { DtRank($Unbox(Seq#Index(s, i)): DatatypeType) }
/// 0 <= i && i < Seq#Length(s) ==> DtRank($Unbox(Seq#Index(s, i)): DatatypeType) < Seq#Rank(s) ); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"Prims.precedes",
"FStar.Sequence.Base.rank",
"FStar.Sequence.Base.index"
] | [] | false | false | false | true | true | let element_ranks_less_fact =
| forall (ty: Type u#a) (s: seq ty) (i: nat). {:pattern rank (index s i)}
i < length s ==> rank (index s i) << rank s | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.take_contains_equiv_exists_fact | val take_contains_equiv_exists_fact : Prims.logical | let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 82,
"end_line": 288,
"start_col": 8,
"start_line": 285
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.l_iff",
"FStar.Sequence.Base.contains",
"FStar.Sequence.Base.take",
"Prims.l_Exists",
"Prims.l_and",
"Prims.op_LessThan",
"Prims.eq2",
"FStar.Sequence.Base.index"
] | [] | false | false | false | true | true | let take_contains_equiv_exists_fact =
| forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty). {:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat). {:pattern index s i} i < n /\ i < length s /\ index s i == x) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.update_then_index_fact | val update_then_index_fact : Prims.logical | let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 59,
"end_line": 246,
"start_col": 8,
"start_line": 241
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n))); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"FStar.Sequence.Base.update",
"Prims.l_imp",
"Prims.l_and",
"Prims.op_Equality",
"Prims.l_or",
"Prims.eq2",
"FStar.Sequence.Base.index",
"Prims.op_disEquality"
] | [] | false | false | false | true | true | let update_then_index_fact =
| forall (ty: Type u#a)
(s: seq ty)
(i: nat{i < length s})
(v: ty)
(n: nat{n < length (update s i v)}).
{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v) /\ (i <> n ==> index (update s i v) n == index s n) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.index_into_drop_fact | val index_into_drop_fact : _: Prims.squash FStar.Sequence.Base.drop_length_fact -> Prims.logical | let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 62,
"end_line": 387,
"start_col": 8,
"start_line": 384
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.drop_length_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.drop_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.op_Subtraction",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.index",
"FStar.Sequence.Base.drop",
"Prims.op_Addition",
"Prims.logical"
] | [] | false | false | false | true | true | let index_into_drop_fact (_: squash (drop_length_fact u#a)) =
| forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat). {:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.is_prefix_def_fact | val is_prefix_def_fact : Prims.logical | let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 52,
"end_line": 341,
"start_col": 8,
"start_line": 336
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j))); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.l_iff",
"FStar.Sequence.Base.is_prefix",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.nat",
"Prims.l_imp",
"Prims.op_LessThan",
"Prims.eq2",
"FStar.Sequence.Base.index"
] | [] | false | false | false | true | true | let is_prefix_def_fact =
| forall (ty: Type u#a) (s0: seq ty) (s1: seq ty). {:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1 /\
(forall (j: nat). {:pattern index s0 j\/index s1 j} j < length s0 ==> index s0 j == index s1 j) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.take_length_fact | val take_length_fact : Prims.logical | let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 43,
"end_line": 350,
"start_col": 8,
"start_line": 348
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.op_Equality",
"FStar.Sequence.Base.take"
] | [] | false | false | false | true | true | let take_length_fact =
| forall (ty: Type u#a) (s: seq ty) (n: nat). {:pattern length (take s n)}
n <= length s ==> length (take s n) = n | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.index_into_take_fact | val index_into_take_fact : _: Prims.squash FStar.Sequence.Base.take_length_fact -> Prims.logical | let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 62,
"end_line": 364,
"start_col": 8,
"start_line": 361
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.take_length_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.take_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_AmpAmp",
"Prims.op_LessThan",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.index",
"FStar.Sequence.Base.take",
"Prims.logical"
] | [] | false | false | false | true | true | let index_into_take_fact (_: squash (take_length_fact u#a)) =
| forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j\/index s j; take s n}
j < n && n <= length s ==> index (take s n) j == index s j | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.index_into_singleton_fact | val index_into_singleton_fact : _: Prims.squash FStar.Sequence.Base.singleton_length_one_fact -> Prims.logical | let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 31,
"end_line": 211,
"start_col": 8,
"start_line": 209
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.singleton_length_one_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.singleton_length_one_fact",
"Prims.l_Forall",
"Prims.eq2",
"FStar.Sequence.Base.index",
"FStar.Sequence.Base.singleton",
"Prims.logical"
] | [] | false | false | false | true | true | let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
| forall (ty: Type u#a) (v: ty). {:pattern index (singleton v) 0} index (singleton v) 0 == v | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.equal_def_fact | val equal_def_fact : Prims.logical | let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 60,
"end_line": 316,
"start_col": 8,
"start_line": 311
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j))); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.l_iff",
"FStar.Sequence.Base.equal",
"Prims.l_and",
"Prims.eq2",
"Prims.nat",
"FStar.Sequence.Base.length",
"Prims.int",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"Prims.op_LessThan",
"Prims.l_imp",
"Prims.op_AmpAmp",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.index"
] | [] | false | false | false | true | true | let equal_def_fact =
| forall (ty: Type u#a) (s0: seq ty) (s1: seq ty). {:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j. {:pattern index s0 j\/index s1 j} 0 <= j && j < length s0 ==> index s0 j == index s1 j) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.drop_index_offset_fact | val drop_index_offset_fact : _: Prims.squash FStar.Sequence.Base.drop_length_fact -> Prims.logical | let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 68,
"end_line": 400,
"start_col": 8,
"start_line": 397
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.drop_length_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.drop_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_AmpAmp",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.index",
"FStar.Sequence.Base.drop",
"Prims.op_Subtraction",
"Prims.logical"
] | [] | false | false | false | true | true | let drop_index_offset_fact (_: squash (drop_length_fact u#a)) =
| forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat). {:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.append_then_take_or_drop_fact | val append_then_take_or_drop_fact : _: Prims.squash FStar.Sequence.Base.append_sums_lengths_fact -> Prims.logical | let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 73,
"end_line": 415,
"start_col": 8,
"start_line": 412
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.append_sums_lengths_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.append_sums_lengths_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_Equality",
"FStar.Sequence.Base.length",
"Prims.l_and",
"Prims.eq2",
"FStar.Sequence.Base.take",
"FStar.Sequence.Base.append",
"FStar.Sequence.Base.drop",
"Prims.logical"
] | [] | false | false | false | true | true | let append_then_take_or_drop_fact (_: squash (append_sums_lengths_fact u#a)) =
| forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n\/drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.drop_contains_equiv_exists_fact | val drop_contains_equiv_exists_fact : Prims.logical | let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 83,
"end_line": 301,
"start_col": 8,
"start_line": 298
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.l_iff",
"FStar.Sequence.Base.contains",
"FStar.Sequence.Base.drop",
"Prims.l_Exists",
"Prims.l_and",
"Prims.op_AmpAmp",
"Prims.op_LessThan",
"Prims.eq2",
"FStar.Sequence.Base.index"
] | [] | false | false | false | true | true | let drop_contains_equiv_exists_fact =
| forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty). {:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat). {:pattern index s i} n <= i && i < length s /\ index s i == x) | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.take_ranks_less_fact | val take_ranks_less_fact : Prims.logical | let take_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern length (take s i)}
i < length s ==> length (take s i) << length s | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 50,
"end_line": 514,
"start_col": 8,
"start_line": 512
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) );
private let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v
/// We include the definition of `rank` among our facts.
private let rank_def_fact =
forall (ty: Type u#a) (v: ty).{:pattern rank v} rank v == v
/// We represent the following Dafny axiom with `element_ranks_less_fact`.
///
/// axiom (forall s: Seq Box, i: int ::
/// { DtRank($Unbox(Seq#Index(s, i)): DatatypeType) }
/// 0 <= i && i < Seq#Length(s) ==> DtRank($Unbox(Seq#Index(s, i)): DatatypeType) < Seq#Rank(s) );
private let element_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (index s i)}
i < length s ==> rank (index s i) << rank s
/// We represent the following Dafny axiom with `drop_ranks_less_fact`.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Drop(s, i)) }
/// 0 < i && i <= Seq#Length(s) ==> Seq#Rank(Seq#Drop(s, i)) < Seq#Rank(s) );
private let drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (drop s i)}
0 < i && i <= length s ==> rank (drop s i) << rank s
/// We represent the following Dafny axiom with
/// `take_ranks_less_fact`. However, since it isn't true in F* (which
/// has strong requirements for <<), we instead substitute length,
/// requiring decreases clauses to use length in this case.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Take(s, i)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Rank(Seq#Take(s, i)) < Seq#Rank(s) ); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"Prims.precedes",
"FStar.Sequence.Base.take"
] | [] | false | false | false | true | true | let take_ranks_less_fact =
| forall (ty: Type u#a) (s: seq ty) (i: nat). {:pattern length (take s i)}
i < length s ==> length (take s i) << length s | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.drop_ranks_less_fact | val drop_ranks_less_fact : Prims.logical | let drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (drop s i)}
0 < i && i <= length s ==> rank (drop s i) << rank s | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 56,
"end_line": 501,
"start_col": 8,
"start_line": 499
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) );
private let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v
/// We include the definition of `rank` among our facts.
private let rank_def_fact =
forall (ty: Type u#a) (v: ty).{:pattern rank v} rank v == v
/// We represent the following Dafny axiom with `element_ranks_less_fact`.
///
/// axiom (forall s: Seq Box, i: int ::
/// { DtRank($Unbox(Seq#Index(s, i)): DatatypeType) }
/// 0 <= i && i < Seq#Length(s) ==> DtRank($Unbox(Seq#Index(s, i)): DatatypeType) < Seq#Rank(s) );
private let element_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (index s i)}
i < length s ==> rank (index s i) << rank s
/// We represent the following Dafny axiom with `drop_ranks_less_fact`.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Drop(s, i)) }
/// 0 < i && i <= Seq#Length(s) ==> Seq#Rank(Seq#Drop(s, i)) < Seq#Rank(s) ); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_AmpAmp",
"Prims.op_LessThan",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.precedes",
"FStar.Sequence.Base.rank",
"FStar.Sequence.Base.drop"
] | [] | false | false | false | true | true | let drop_ranks_less_fact =
| forall (ty: Type u#a) (s: seq ty) (i: nat). {:pattern rank (drop s i)}
0 < i && i <= length s ==> rank (drop s i) << rank s | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.take_commutes_with_in_range_update_fact | val take_commutes_with_in_range_update_fact : _:
Prims.squash (FStar.Sequence.Base.update_maintains_length_fact /\
FStar.Sequence.Base.take_length_fact)
-> Prims.logical | let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 50,
"end_line": 428,
"start_col": 8,
"start_line": 424
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) ); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
_:
Prims.squash (FStar.Sequence.Base.update_maintains_length_fact /\
FStar.Sequence.Base.take_length_fact)
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"Prims.l_and",
"FStar.Sequence.Base.update_maintains_length_fact",
"FStar.Sequence.Base.take_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_AmpAmp",
"Prims.op_LessThan",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.take",
"FStar.Sequence.Base.update",
"Prims.logical"
] | [] | false | false | false | true | true | let take_commutes_with_in_range_update_fact
(_: squash (update_maintains_length_fact u#a /\ take_length_fact u#a))
=
| forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat). {:pattern take (update s i v) n}
i < n && n <= length s ==> take (update s i v) n == update (take s n) i v | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.take_zero_fact | val take_zero_fact : Prims.logical | let take_zero_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern take s n}
n = 0 ==> take s n == empty | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 31,
"end_line": 547,
"start_col": 8,
"start_line": 545
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) );
private let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v
/// We include the definition of `rank` among our facts.
private let rank_def_fact =
forall (ty: Type u#a) (v: ty).{:pattern rank v} rank v == v
/// We represent the following Dafny axiom with `element_ranks_less_fact`.
///
/// axiom (forall s: Seq Box, i: int ::
/// { DtRank($Unbox(Seq#Index(s, i)): DatatypeType) }
/// 0 <= i && i < Seq#Length(s) ==> DtRank($Unbox(Seq#Index(s, i)): DatatypeType) < Seq#Rank(s) );
private let element_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (index s i)}
i < length s ==> rank (index s i) << rank s
/// We represent the following Dafny axiom with `drop_ranks_less_fact`.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Drop(s, i)) }
/// 0 < i && i <= Seq#Length(s) ==> Seq#Rank(Seq#Drop(s, i)) < Seq#Rank(s) );
private let drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (drop s i)}
0 < i && i <= length s ==> rank (drop s i) << rank s
/// We represent the following Dafny axiom with
/// `take_ranks_less_fact`. However, since it isn't true in F* (which
/// has strong requirements for <<), we instead substitute length,
/// requiring decreases clauses to use length in this case.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Take(s, i)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Rank(Seq#Take(s, i)) < Seq#Rank(s) );
private let take_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern length (take s i)}
i < length s ==> length (take s i) << length s
/// We represent the following Dafny axiom with
/// `append_take_drop_ranks_less_fact`. However, since it isn't true
/// in F* (which has strong requirements for <<), we instead
/// substitute length, requiring decreases clauses to use
/// length in this case.
///
/// axiom (forall<T> s: Seq T, i: int, j: int ::
/// { Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) }
/// 0 <= i && i < j && j <= Seq#Length(s) ==>
/// Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) < Seq#Rank(s) );
private let append_take_drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat) (j: nat).{:pattern length (append (take s i) (drop s j))}
i < j && j <= length s ==> length (append (take s i) (drop s j)) << length s
/// We represent the following Dafny axiom with `drop_zero_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Drop(s, n) }
/// n == 0 ==> Seq#Drop(s, n) == s);
private let drop_zero_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern drop s n}
n = 0 ==> drop s n == s
/// We represent the following Dafny axiom with `take_zero_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Take(s, n) }
/// n == 0 ==> Seq#Take(s, n) == Seq#Empty()); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"Prims.eq2",
"FStar.Sequence.Base.take",
"FStar.Sequence.Base.empty"
] | [] | false | false | false | true | true | let take_zero_fact =
| forall (ty: Type u#a) (s: seq ty) (n: nat). {:pattern take s n} n = 0 ==> take s n == empty | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.drop_commutes_with_build_fact | val drop_commutes_with_build_fact : _: Prims.squash FStar.Sequence.Base.build_increments_length_fact -> Prims.logical | let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 62,
"end_line": 476,
"start_col": 8,
"start_line": 474
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) ); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.build_increments_length_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.build_increments_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.drop",
"FStar.Sequence.Base.build",
"Prims.logical"
] | [] | false | false | false | true | true | let drop_commutes_with_build_fact (_: squash (build_increments_length_fact u#a)) =
| forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat). {:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.drop_ignores_out_of_range_update_fact | val drop_ignores_out_of_range_update_fact : _: Prims.squash FStar.Sequence.Base.update_maintains_length_fact -> Prims.logical | let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 37,
"end_line": 465,
"start_col": 8,
"start_line": 462
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.update_maintains_length_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.update_maintains_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_AmpAmp",
"Prims.op_LessThan",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.drop",
"FStar.Sequence.Base.update",
"Prims.logical"
] | [] | false | false | false | true | true | let drop_ignores_out_of_range_update_fact (_: squash (update_maintains_length_fact u#a)) =
| forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat). {:pattern drop (update s i v) n}
i < n && n <= length s ==> drop (update s i v) n == drop s n | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.drop_commutes_with_in_range_update_fact | val drop_commutes_with_in_range_update_fact : _:
Prims.squash (FStar.Sequence.Base.update_maintains_length_fact /\
FStar.Sequence.Base.drop_length_fact)
-> Prims.logical | let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 56,
"end_line": 452,
"start_col": 8,
"start_line": 448
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) ); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
_:
Prims.squash (FStar.Sequence.Base.update_maintains_length_fact /\
FStar.Sequence.Base.drop_length_fact)
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"Prims.l_and",
"FStar.Sequence.Base.update_maintains_length_fact",
"FStar.Sequence.Base.drop_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_AmpAmp",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.drop",
"FStar.Sequence.Base.update",
"Prims.op_Subtraction",
"Prims.logical"
] | [] | false | false | false | true | true | let drop_commutes_with_in_range_update_fact
(_: squash (update_maintains_length_fact u#a /\ drop_length_fact u#a))
=
| forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat). {:pattern drop (update s i v) n}
n <= i && i < length s ==> drop (update s i v) n == update (drop s n) (i - n) v | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.drop_zero_fact | val drop_zero_fact : Prims.logical | let drop_zero_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern drop s n}
n = 0 ==> drop s n == s | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 27,
"end_line": 538,
"start_col": 8,
"start_line": 536
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) );
private let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v
/// We include the definition of `rank` among our facts.
private let rank_def_fact =
forall (ty: Type u#a) (v: ty).{:pattern rank v} rank v == v
/// We represent the following Dafny axiom with `element_ranks_less_fact`.
///
/// axiom (forall s: Seq Box, i: int ::
/// { DtRank($Unbox(Seq#Index(s, i)): DatatypeType) }
/// 0 <= i && i < Seq#Length(s) ==> DtRank($Unbox(Seq#Index(s, i)): DatatypeType) < Seq#Rank(s) );
private let element_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (index s i)}
i < length s ==> rank (index s i) << rank s
/// We represent the following Dafny axiom with `drop_ranks_less_fact`.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Drop(s, i)) }
/// 0 < i && i <= Seq#Length(s) ==> Seq#Rank(Seq#Drop(s, i)) < Seq#Rank(s) );
private let drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (drop s i)}
0 < i && i <= length s ==> rank (drop s i) << rank s
/// We represent the following Dafny axiom with
/// `take_ranks_less_fact`. However, since it isn't true in F* (which
/// has strong requirements for <<), we instead substitute length,
/// requiring decreases clauses to use length in this case.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Take(s, i)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Rank(Seq#Take(s, i)) < Seq#Rank(s) );
private let take_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern length (take s i)}
i < length s ==> length (take s i) << length s
/// We represent the following Dafny axiom with
/// `append_take_drop_ranks_less_fact`. However, since it isn't true
/// in F* (which has strong requirements for <<), we instead
/// substitute length, requiring decreases clauses to use
/// length in this case.
///
/// axiom (forall<T> s: Seq T, i: int, j: int ::
/// { Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) }
/// 0 <= i && i < j && j <= Seq#Length(s) ==>
/// Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) < Seq#Rank(s) );
private let append_take_drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat) (j: nat).{:pattern length (append (take s i) (drop s j))}
i < j && j <= length s ==> length (append (take s i) (drop s j)) << length s
/// We represent the following Dafny axiom with `drop_zero_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Drop(s, n) }
/// n == 0 ==> Seq#Drop(s, n) == s); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"Prims.eq2",
"FStar.Sequence.Base.drop"
] | [] | false | false | false | true | true | let drop_zero_fact =
| forall (ty: Type u#a) (s: seq ty) (n: nat). {:pattern drop s n} n = 0 ==> drop s n == s | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.take_ignores_out_of_range_update_fact | val take_ignores_out_of_range_update_fact : _: Prims.squash FStar.Sequence.Base.update_maintains_length_fact -> Prims.logical | let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 37,
"end_line": 439,
"start_col": 8,
"start_line": 436
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.update_maintains_length_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.update_maintains_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_AmpAmp",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.take",
"FStar.Sequence.Base.update",
"Prims.logical"
] | [] | false | false | false | true | true | let take_ignores_out_of_range_update_fact (_: squash (update_maintains_length_fact u#a)) =
| forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat). {:pattern take (update s i v) n}
n <= i && i < length s ==> take (update s i v) n == take s n | false |
|
Vale.X64.Memory_Sems.fsti | Vale.X64.Memory_Sems.is_full_read | val is_full_read : h1: Vale.X64.Memory.vale_heap ->
h2: Vale.X64.Memory.vale_heap ->
b: Vale.X64.Memory.buffer t ->
i: Prims.int
-> Prims.logical | let is_full_read (#t:base_typ) (h1 h2:vale_heap) (b:buffer t) (i:int) =
buffer_addr b h1 == buffer_addr b h2 /\
buffer_read b i h1 == buffer_read b i h2 /\
valid_buffer_read h1 b i | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fsti",
"git_rev": "eb1badfa34c70b0bbe0fe24fe0f49fb1295c7872",
"git_url": "https://github.com/project-everest/hacl-star.git",
"project_name": "hacl-star"
} | {
"end_col": 26,
"end_line": 48,
"start_col": 0,
"start_line": 45
} | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
open Vale.Arch.MachineHeap_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Lib.Seqs
module S = Vale.X64.Machine_Semantics_s
module Map16 = Vale.Lib.Map16
val same_domain (h:vale_heap) (m:S.machine_heap) : prop0
val lemma_same_domains (h:vale_heap) (m1:S.machine_heap) (m2:S.machine_heap) : Lemma
(requires same_domain h m1 /\ Set.equal (Map.domain m1) (Map.domain m2))
(ensures same_domain h m2)
val get_heap (h:vale_heap) : GTot (m:S.machine_heap{same_domain h m})
val upd_heap (h:vale_heap) (m:S.machine_heap{is_machine_heap_update (get_heap h) m}) : GTot vale_heap
//val lemma_upd_get_heap (h:vale_heap) : Lemma (upd_heap h (get_heap h) == h)
// [SMTPat (upd_heap h (get_heap h))]
val lemma_get_upd_heap (h:vale_heap) (m:S.machine_heap) : Lemma
(requires is_machine_heap_update (get_heap h) m)
(ensures get_heap (upd_heap h m) == m)
unfold let coerce (#b #a:Type) (x:a{a == b}) : b = x
val lemma_heap_impl : squash (heap_impl == vale_full_heap)
val lemma_heap_get_heap (h:vale_full_heap) : Lemma
(heap_get (coerce h) == get_heap (get_vale_heap h))
[SMTPat (heap_get (coerce h))]
val lemma_heap_taint (h:vale_full_heap) : Lemma
(heap_taint (coerce h) == full_heap_taint h)
[SMTPat (heap_taint (coerce h))] | {
"checked_file": "/",
"dependencies": [
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.Arch.MachineHeap_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.X64.Memory_Sems.fsti"
} | [
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
h1: Vale.X64.Memory.vale_heap ->
h2: Vale.X64.Memory.vale_heap ->
b: Vale.X64.Memory.buffer t ->
i: Prims.int
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Vale.Arch.HeapTypes_s.base_typ",
"Vale.X64.Memory.vale_heap",
"Vale.X64.Memory.buffer",
"Prims.int",
"Prims.l_and",
"Prims.eq2",
"Vale.X64.Memory.buffer_addr",
"Vale.X64.Memory.base_typ_as_vale_type",
"Vale.X64.Memory.buffer_read",
"Vale.X64.Memory.valid_buffer_read",
"Prims.logical"
] | [] | false | false | false | false | true | let is_full_read (#t: base_typ) (h1 h2: vale_heap) (b: buffer t) (i: int) =
| buffer_addr b h1 == buffer_addr b h2 /\ buffer_read b i h1 == buffer_read b i h2 /\
valid_buffer_read h1 b i | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.append_take_drop_ranks_less_fact | val append_take_drop_ranks_less_fact : Prims.logical | let append_take_drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat) (j: nat).{:pattern length (append (take s i) (drop s j))}
i < j && j <= length s ==> length (append (take s i) (drop s j)) << length s | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 80,
"end_line": 529,
"start_col": 8,
"start_line": 527
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) );
private let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v
/// We include the definition of `rank` among our facts.
private let rank_def_fact =
forall (ty: Type u#a) (v: ty).{:pattern rank v} rank v == v
/// We represent the following Dafny axiom with `element_ranks_less_fact`.
///
/// axiom (forall s: Seq Box, i: int ::
/// { DtRank($Unbox(Seq#Index(s, i)): DatatypeType) }
/// 0 <= i && i < Seq#Length(s) ==> DtRank($Unbox(Seq#Index(s, i)): DatatypeType) < Seq#Rank(s) );
private let element_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (index s i)}
i < length s ==> rank (index s i) << rank s
/// We represent the following Dafny axiom with `drop_ranks_less_fact`.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Drop(s, i)) }
/// 0 < i && i <= Seq#Length(s) ==> Seq#Rank(Seq#Drop(s, i)) < Seq#Rank(s) );
private let drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (drop s i)}
0 < i && i <= length s ==> rank (drop s i) << rank s
/// We represent the following Dafny axiom with
/// `take_ranks_less_fact`. However, since it isn't true in F* (which
/// has strong requirements for <<), we instead substitute length,
/// requiring decreases clauses to use length in this case.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Take(s, i)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Rank(Seq#Take(s, i)) < Seq#Rank(s) );
private let take_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern length (take s i)}
i < length s ==> length (take s i) << length s
/// We represent the following Dafny axiom with
/// `append_take_drop_ranks_less_fact`. However, since it isn't true
/// in F* (which has strong requirements for <<), we instead
/// substitute length, requiring decreases clauses to use
/// length in this case.
///
/// axiom (forall<T> s: Seq T, i: int, j: int ::
/// { Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) }
/// 0 <= i && i < j && j <= Seq#Length(s) ==>
/// Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) < Seq#Rank(s) ); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_AmpAmp",
"Prims.op_LessThan",
"Prims.op_LessThanOrEqual",
"FStar.Sequence.Base.length",
"Prims.precedes",
"FStar.Sequence.Base.append",
"FStar.Sequence.Base.take",
"FStar.Sequence.Base.drop"
] | [] | false | false | false | true | true | let append_take_drop_ranks_less_fact =
| forall (ty: Type u#a) (s: seq ty) (i: nat) (j: nat).
{:pattern length (append (take s i) (drop s j))}
i < j && j <= length s ==> length (append (take s i) (drop s j)) << length s | false |
|
FStar.Sequence.Base.fsti | FStar.Sequence.Base.drop_then_drop_fact | val drop_then_drop_fact : _: Prims.squash FStar.Sequence.Base.drop_length_fact -> Prims.logical | let drop_then_drop_fact (_: squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (m: nat) (n: nat).{:pattern drop (drop s m) n}
m + n <= length s ==> drop (drop s m) n == drop s (m + n) | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 61,
"end_line": 557,
"start_col": 8,
"start_line": 555
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) );
private let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v
/// We include the definition of `rank` among our facts.
private let rank_def_fact =
forall (ty: Type u#a) (v: ty).{:pattern rank v} rank v == v
/// We represent the following Dafny axiom with `element_ranks_less_fact`.
///
/// axiom (forall s: Seq Box, i: int ::
/// { DtRank($Unbox(Seq#Index(s, i)): DatatypeType) }
/// 0 <= i && i < Seq#Length(s) ==> DtRank($Unbox(Seq#Index(s, i)): DatatypeType) < Seq#Rank(s) );
private let element_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (index s i)}
i < length s ==> rank (index s i) << rank s
/// We represent the following Dafny axiom with `drop_ranks_less_fact`.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Drop(s, i)) }
/// 0 < i && i <= Seq#Length(s) ==> Seq#Rank(Seq#Drop(s, i)) < Seq#Rank(s) );
private let drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (drop s i)}
0 < i && i <= length s ==> rank (drop s i) << rank s
/// We represent the following Dafny axiom with
/// `take_ranks_less_fact`. However, since it isn't true in F* (which
/// has strong requirements for <<), we instead substitute length,
/// requiring decreases clauses to use length in this case.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Take(s, i)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Rank(Seq#Take(s, i)) < Seq#Rank(s) );
private let take_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern length (take s i)}
i < length s ==> length (take s i) << length s
/// We represent the following Dafny axiom with
/// `append_take_drop_ranks_less_fact`. However, since it isn't true
/// in F* (which has strong requirements for <<), we instead
/// substitute length, requiring decreases clauses to use
/// length in this case.
///
/// axiom (forall<T> s: Seq T, i: int, j: int ::
/// { Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) }
/// 0 <= i && i < j && j <= Seq#Length(s) ==>
/// Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) < Seq#Rank(s) );
private let append_take_drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat) (j: nat).{:pattern length (append (take s i) (drop s j))}
i < j && j <= length s ==> length (append (take s i) (drop s j)) << length s
/// We represent the following Dafny axiom with `drop_zero_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Drop(s, n) }
/// n == 0 ==> Seq#Drop(s, n) == s);
private let drop_zero_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern drop s n}
n = 0 ==> drop s n == s
/// We represent the following Dafny axiom with `take_zero_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Take(s, n) }
/// n == 0 ==> Seq#Take(s, n) == Seq#Empty());
private let take_zero_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern take s n}
n = 0 ==> take s n == empty
/// We represent the following Dafny axiom with `drop_then_drop_fact`.
///
/// axiom (forall<T> s: Seq T, m, n: int :: { Seq#Drop(Seq#Drop(s, m), n) }
/// 0 <= m && 0 <= n && m+n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Drop(s, m), n) == Seq#Drop(s, m+n)); | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | _: Prims.squash FStar.Sequence.Base.drop_length_fact -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.squash",
"FStar.Sequence.Base.drop_length_fact",
"Prims.l_Forall",
"FStar.Sequence.Base.seq",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_Addition",
"FStar.Sequence.Base.length",
"Prims.eq2",
"FStar.Sequence.Base.drop",
"Prims.logical"
] | [] | false | false | false | true | true | let drop_then_drop_fact (_: squash (drop_length_fact u#a)) =
| forall (ty: Type u#a) (s: seq ty) (m: nat) (n: nat). {:pattern drop (drop s m) n}
m + n <= length s ==> drop (drop s m) n == drop s (m + n) | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.gst_pre | val gst_pre : Type | let gst_pre = st_pre_h mem | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 36,
"end_line": 45,
"start_col": 0,
"start_line": 45
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Type | Prims.Tot | [
"total"
] | [] | [
"FStar.Pervasives.st_pre_h",
"FStar.Monotonic.HyperStack.mem"
] | [] | false | false | false | true | true | let gst_pre =
| st_pre_h mem | false |
|
Steel.ST.PCMReference.fst | Steel.ST.PCMReference.gather | val gather (#inames: _)
(#a:Type)
(#p:FStar.PCM.pcm a)
(r:ref a p)
(v0:erased a)
(v1:erased a)
: STGhostT (_:unit{composable p v0 v1}) inames
(pts_to r v0 `star` pts_to r v1)
(fun _ -> pts_to r (op p v0 v1)) | val gather (#inames: _)
(#a:Type)
(#p:FStar.PCM.pcm a)
(r:ref a p)
(v0:erased a)
(v1:erased a)
: STGhostT (_:unit{composable p v0 v1}) inames
(pts_to r v0 `star` pts_to r v1)
(fun _ -> pts_to r (op p v0 v1)) | let gather r v0 v1 = C.coerce_ghost (fun _ -> P.gather r v0 v1) | {
"file_name": "lib/steel/Steel.ST.PCMReference.fst",
"git_rev": "f984200f79bdc452374ae994a5ca837496476c41",
"git_url": "https://github.com/FStarLang/steel.git",
"project_name": "steel"
} | {
"end_col": 63,
"end_line": 16,
"start_col": 0,
"start_line": 16
} | module Steel.ST.PCMReference
module C = Steel.ST.Coercions
module P = Steel.PCMReference
let read r v0 = C.coerce_steel (fun _ -> P.read r v0)
let write r v0 v1 = C.coerce_steel (fun _ -> P.write r v0 v1)
let alloc x = C.coerce_steel (fun _ -> P.alloc x)
let free r x = C.coerce_steel (fun _ -> P.free r x)
let split r v v0 v1 = C.coerce_ghost (fun _ -> P.split r v v0 v1) | {
"checked_file": "/",
"dependencies": [
"Steel.ST.Coercions.fsti.checked",
"Steel.PCMReference.fsti.checked",
"Steel.Effect.Atomic.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Steel.ST.PCMReference.fst"
} | [
{
"abbrev": true,
"full_module": "Steel.PCMReference",
"short_module": "P"
},
{
"abbrev": true,
"full_module": "Steel.ST.Coercions",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Steel.ST.Util",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Ghost",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.PCM",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Steel.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | r: Steel.Memory.ref a p -> v0: FStar.Ghost.erased a -> v1: FStar.Ghost.erased a
-> Steel.ST.Effect.Ghost.STGhostT
(_: Prims.unit{FStar.PCM.composable p (FStar.Ghost.reveal v0) (FStar.Ghost.reveal v1)}) | Steel.ST.Effect.Ghost.STGhostT | [] | [] | [
"Steel.Memory.inames",
"FStar.PCM.pcm",
"Steel.Memory.ref",
"FStar.Ghost.erased",
"Steel.ST.Coercions.coerce_ghost",
"Prims.unit",
"FStar.PCM.composable",
"FStar.Ghost.reveal",
"Steel.Effect.Common.star",
"Steel.Effect.Common.VUnit",
"Steel.Effect.Common.to_vprop'",
"Steel.Memory.pts_to",
"FStar.PCM.op",
"Steel.Effect.Common.vprop",
"Prims.l_True",
"Steel.PCMReference.gather"
] | [] | false | true | false | false | false | let gather r v0 v1 =
| C.coerce_ghost (fun _ -> P.gather r v0 v1) | false |
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.gst_post' | val gst_post' : a: Type -> pre: Type -> Type | let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 56,
"end_line": 46,
"start_col": 0,
"start_line": 46
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | a: Type -> pre: Type -> Type | Prims.Tot | [
"total"
] | [] | [
"FStar.Pervasives.st_post_h'",
"FStar.Monotonic.HyperStack.mem"
] | [] | false | false | false | true | true | let gst_post' (a pre: Type) =
| st_post_h' mem a pre | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.contains_region | val contains_region : m: FStar.Monotonic.HyperStack.mem -> r: FStar.Monotonic.HyperHeap.rid -> Prims.bool | let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 80,
"end_line": 29,
"start_col": 15,
"start_line": 29
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | m: FStar.Monotonic.HyperStack.mem -> r: FStar.Monotonic.HyperHeap.rid -> Prims.bool | Prims.Tot | [
"total"
] | [] | [
"FStar.Monotonic.HyperStack.mem",
"FStar.Monotonic.HyperHeap.rid",
"FStar.Map.contains",
"FStar.Monotonic.Heap.heap",
"FStar.Monotonic.HyperStack.get_hmap",
"Prims.bool"
] | [] | false | false | false | true | false | let contains_region (m: mem) (r: rid) =
| (get_hmap m) `Map.contains` r | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.st_post' | val st_post' : a: Type -> pre: Type -> Type | let st_post' = gst_post' | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 24,
"end_line": 80,
"start_col": 0,
"start_line": 80
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | a: Type -> pre: Type -> Type | Prims.Tot | [
"total"
] | [] | [
"FStar.HyperStack.ST.gst_post'"
] | [] | false | false | false | true | true | let st_post' =
| gst_post' | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.st_post | val st_post : a: Type -> Type | let st_post = gst_post | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 23,
"end_line": 81,
"start_col": 0,
"start_line": 81
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | a: Type -> Type | Prims.Tot | [
"total"
] | [] | [
"FStar.HyperStack.ST.gst_post"
] | [] | false | false | false | true | true | let st_post =
| gst_post | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.gst_post | val gst_post : a: Type -> Type | let gst_post (a:Type) = st_post_h mem a | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 39,
"end_line": 47,
"start_col": 0,
"start_line": 47
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | a: Type -> Type | Prims.Tot | [
"total"
] | [] | [
"FStar.Pervasives.st_post_h",
"FStar.Monotonic.HyperStack.mem"
] | [] | false | false | false | true | true | let gst_post (a: Type) =
| st_post_h mem a | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.gst_wp | val gst_wp : a: Type -> Type | let gst_wp (a:Type) = st_wp_h mem a | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 37,
"end_line": 48,
"start_col": 0,
"start_line": 48
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | a: Type -> Type | Prims.Tot | [
"total"
] | [] | [
"FStar.Pervasives.st_wp_h",
"FStar.Monotonic.HyperStack.mem"
] | [] | false | false | false | true | true | let gst_wp (a: Type) =
| st_wp_h mem a | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.st_pre | val st_pre : Type | let st_pre = gst_pre | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 22,
"end_line": 79,
"start_col": 0,
"start_line": 79
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Type | Prims.Tot | [
"total"
] | [] | [
"FStar.HyperStack.ST.gst_pre"
] | [] | false | false | false | true | true | let st_pre =
| gst_pre | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.lift_div_gst | val lift_div_gst : a: Type ->
wp: Prims.pure_wp a ->
p: FStar.HyperStack.ST.gst_post a ->
h: FStar.Monotonic.HyperStack.mem
-> Prims.pure_pre | let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h) | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 92,
"end_line": 50,
"start_col": 7,
"start_line": 50
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
a: Type ->
wp: Prims.pure_wp a ->
p: FStar.HyperStack.ST.gst_post a ->
h: FStar.Monotonic.HyperStack.mem
-> Prims.pure_pre | Prims.Tot | [
"total"
] | [] | [
"Prims.pure_wp",
"FStar.HyperStack.ST.gst_post",
"FStar.Monotonic.HyperStack.mem",
"Prims.l_True",
"Prims.pure_pre"
] | [] | false | false | false | true | false | let lift_div_gst (a: Type) (wp: pure_wp a) (p: gst_post a) (h: mem) =
| wp (fun a -> p a h) | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.st_wp | val st_wp : a: Type -> Type | let st_wp = gst_wp | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 21,
"end_line": 82,
"start_col": 0,
"start_line": 82
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre
let st_post' = gst_post' | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | a: Type -> Type | Prims.Tot | [
"total"
] | [] | [
"FStar.HyperStack.ST.gst_wp"
] | [] | false | false | false | true | true | let st_wp =
| gst_wp | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.lift_gst_state | val lift_gst_state : a: Type -> wp: FStar.HyperStack.ST.gst_wp a -> FStar.HyperStack.ST.gst_wp a | let lift_gst_state (a:Type) (wp:gst_wp a) = wp | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 53,
"end_line": 86,
"start_col": 7,
"start_line": 86
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre
let st_post' = gst_post'
let st_post = gst_post
let st_wp = gst_wp
new_effect STATE = GST | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | a: Type -> wp: FStar.HyperStack.ST.gst_wp a -> FStar.HyperStack.ST.gst_wp a | Prims.Tot | [
"total"
] | [] | [
"FStar.HyperStack.ST.gst_wp"
] | [] | false | false | false | true | false | let lift_gst_state (a: Type) (wp: gst_wp a) =
| wp | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.contained_stack_region | val contained_stack_region: mem -> mem -> rid -> Type0 | val contained_stack_region: mem -> mem -> rid -> Type0 | let contained_stack_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> is_stack_region r /\ contained_region m0 m1 r | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 64,
"end_line": 128,
"start_col": 15,
"start_line": 127
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre
let st_post' = gst_post'
let st_post = gst_post
let st_wp = gst_wp
new_effect STATE = GST
unfold let lift_gst_state (a:Type) (wp:gst_wp a) = wp
sub_effect GST ~> STATE = lift_gst_state
(* effect State (a:Type) (wp:st_wp a) = *)
(* STATE a wp *)
(**
WARNING: this effect is unsafe, for C/C++ extraction it shall only be used by
code that would later extract to OCaml or by library functions
*)
effect Unsafe (a:Type) (pre:st_pre) (post: (m0:mem -> Tot (st_post' a (pre m0)))) =
STATE a
(fun (p:st_post a) (h:mem) -> pre h /\ (forall a h1. pre h /\ post h a h1 ==> p a h1)) (* WP *)
(****** defining predicates for equal refs in some regions ******)
(*
// * AR: (may be this is an overkill)
// * various effects below talk about refs being equal in some regions (all regions, stack regions, etc.)
// * this was done by defining, for example, an equal_dom predicate with a (forall (r:rid)) quantifier
// * this quantifier was only guarded with Map.contains (HS.get_hmap m) r
// * which meant it could fire for all the contained regions
// *
// * instead now we define abstract predicates, e.g. same_refs_in_all_regions, and provide intro and elim forms
// * the advantage is that, the (lemma) quantifiers are now guarded additionally by same_refs_in_all_regions kind
// * of predicates, and hence should fire more contextually
// * should profile the queries to see if it actually helps
// *)
(*
// * marking these opaque, since expect them to be unfolded away beforehand
// *)
[@@"opaque_to_smt"]
unfold private let equal_heap_dom (r:rid) (m0 m1:mem) :Type0
= Heap.equal_dom (get_hmap m0 `Map.sel` r) (get_hmap m1 `Map.sel` r)
[@@"opaque_to_smt"]
unfold private let contained_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> m0 `contains_region` r /\ m1 `contains_region` r | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
m0: FStar.Monotonic.HyperStack.mem ->
m1: FStar.Monotonic.HyperStack.mem ->
r: FStar.Monotonic.HyperHeap.rid
-> Type0 | Prims.Tot | [
"total"
] | [] | [
"FStar.Monotonic.HyperStack.mem",
"FStar.Monotonic.HyperHeap.rid",
"Prims.l_and",
"Prims.b2t",
"FStar.Monotonic.HyperStack.is_stack_region",
"FStar.HyperStack.ST.contained_region"
] | [] | false | false | false | true | true | let contained_stack_region: mem -> mem -> rid -> Type0 =
| fun m0 m1 r -> is_stack_region r /\ contained_region m0 m1 r | false |
FStar.Sequence.Base.fsti | FStar.Sequence.Base.all_seq_facts | val all_seq_facts : Prims.logical | let all_seq_facts =
length_of_empty_is_zero_fact u#a
/\ length_zero_implies_empty_fact u#a
/\ singleton_length_one_fact u#a
/\ build_increments_length_fact u#a
/\ index_into_build_fact u#a ()
/\ append_sums_lengths_fact u#a
/\ index_into_singleton_fact u#a ()
/\ index_after_append_fact u#a ()
/\ update_maintains_length_fact u#a
/\ update_then_index_fact u#a
/\ contains_iff_exists_index_fact u#a
/\ empty_doesnt_contain_anything_fact u#a
/\ build_contains_equiv_fact u#a
/\ take_contains_equiv_exists_fact u#a
/\ drop_contains_equiv_exists_fact u#a
/\ equal_def_fact u#a
/\ extensionality_fact u#a
/\ is_prefix_def_fact u#a
/\ take_length_fact u#a
/\ index_into_take_fact u#a ()
/\ drop_length_fact u#a
/\ index_into_drop_fact u#a ()
/\ drop_index_offset_fact u#a ()
/\ append_then_take_or_drop_fact u#a ()
/\ take_commutes_with_in_range_update_fact u#a ()
/\ take_ignores_out_of_range_update_fact u#a ()
/\ drop_commutes_with_in_range_update_fact u#a ()
/\ drop_ignores_out_of_range_update_fact u#a ()
/\ drop_commutes_with_build_fact u#a ()
/\ rank_def_fact u#a
/\ element_ranks_less_fact u#a
/\ drop_ranks_less_fact u#a
/\ take_ranks_less_fact u#a
/\ append_take_drop_ranks_less_fact u#a
/\ drop_zero_fact u#a
/\ take_zero_fact u#a
/\ drop_then_drop_fact u#a () | {
"file_name": "ulib/experimental/FStar.Sequence.Base.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 31,
"end_line": 601,
"start_col": 0,
"start_line": 564
} | (*
Copyright 2008-2021 Jay Lorch, Rustan Leino, Alex Summers, Dan
Rosen, Nikhil Swamy, Microsoft Research, and contributors to
the Dafny Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Includes material from the Dafny project
(https://github.com/dafny-lang/dafny) which carries this license
information:
Created 9 February 2008 by Rustan Leino.
Converted to Boogie 2 on 28 June 2008.
Edited sequence axioms 20 October 2009 by Alex Summers.
Modified 2014 by Dan Rosen.
Copyright (c) 2008-2014, Microsoft.
Copyright by the contributors to the Dafny Project
SPDX-License-Identifier: MIT
*)
(**
This module declares a type and functions used for modeling
sequences as they're modeled in Dafny.
@summary Type and functions for modeling sequences
*)
module FStar.Sequence.Base
new val seq ([@@@ strictly_positive] a: Type u#a) : Type u#a
(**
We translate each Dafny sequence function prefixed with `Seq#`
into an F* function.
**)
/// We represent the Dafny function `Seq#Length` with `length`:
///
/// function Seq#Length<T>(Seq T): int;
val length : #ty: Type -> seq ty -> nat
/// We represent the Dafny function `Seq#Empty` with `empty`:
///
/// function Seq#Empty<T>(): Seq T;
///
/// We also provide an alias `nil` for it.
val empty : #ty: Type -> seq ty
/// We represent the Dafny function `Seq#Singleton` with `singleton`:
///
/// function Seq#Singleton<T>(T): Seq T;
val singleton : #ty: Type -> ty -> seq ty
/// We represent the Dafny function `Seq#Index` with `index`:
///
/// function Seq#Index<T>(Seq T, int): T;
///
/// We also provide the infix symbol `$@` for it.
val index: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty
let ($@) = index
/// We represent the Dafny function `Seq#Build` with `build`:
///
/// function Seq#Build<T>(s: Seq T, val: T): Seq T;
///
/// We also provide the infix symbol `$::` for it.
val build: #ty: Type -> seq ty -> ty -> seq ty
let ($::) = build
/// We represent the Dafny function `Seq#Append` with `append`:
///
/// function Seq#Append<T>(Seq T, Seq T): Seq T;
///
/// We also provide the infix notation `$+` for it.
val append: #ty: Type -> seq ty -> seq ty -> seq ty
let ($+) = append
/// We represent the Dafny function `Seq#Update` with `update`:
///
/// function Seq#Update<T>(Seq T, int, T): Seq T;
val update: #ty: Type -> s: seq ty -> i: nat{i < length s} -> ty -> seq ty
/// We represent the Dafny function `Seq#Contains` with `contains`:
///
/// function Seq#Contains<T>(Seq T, T): bool;
val contains: #ty: Type -> seq ty -> ty -> Type0
/// We represent the Dafny function `Seq#Take` with `take`:
///
/// function Seq#Take<T>(s: Seq T, howMany: int): Seq T;
val take: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Drop` with `drop`:
///
/// function Seq#Drop<T>(s: Seq T, howMany: int): Seq T;
val drop: #ty: Type -> s: seq ty -> howMany: nat{howMany <= length s} -> seq ty
/// We represent the Dafny function `Seq#Equal` with `equal`.
///
/// function Seq#Equal<T>(Seq T, Seq T): bool;
///
/// We also provide the infix symbol `$==` for it.
val equal: #ty: Type -> seq ty -> seq ty -> Type0
let ($==) = equal
/// Instead of representing the Dafny function `Seq#SameUntil`, which
/// is only ever used in Dafny to represent prefix relations, we
/// instead use `is_prefix`.
///
/// function Seq#SameUntil<T>(Seq T, Seq T, int): bool;
///
/// We also provide the infix notation `$<=` for it.
val is_prefix: #ty: Type -> seq ty -> seq ty -> Type0
let ($<=) = is_prefix
/// We represent the Dafny function `Seq#Rank` with `rank`.
///
/// function Seq#Rank<T>(Seq T): int;
val rank: #ty: Type -> ty -> ty
(**
We translate each sequence axiom from the Dafny prelude into an F*
predicate ending in `_fact`.
**)
/// We don't need the following axiom since we return a nat from length:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) } 0 <= Seq#Length(s));
/// We represent the following Dafny axiom with `length_of_empty_is_zero_fact`:
///
/// axiom (forall<T> :: { Seq#Empty(): Seq T } Seq#Length(Seq#Empty(): Seq T) == 0);
private let length_of_empty_is_zero_fact =
forall (ty: Type u#a).{:pattern empty #ty} length (empty #ty) = 0
/// We represent the following Dafny axiom with `length_zero_implies_empty_fact`:
///
/// axiom (forall<T> s: Seq T :: { Seq#Length(s) }
/// (Seq#Length(s) == 0 ==> s == Seq#Empty())
private let length_zero_implies_empty_fact =
forall (ty: Type u#a) (s: seq ty).{:pattern length s} length s = 0 ==> s == empty
/// We represent the following Dafny axiom with `singleton_length_one_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Length(Seq#Singleton(t)) } Seq#Length(Seq#Singleton(t)) == 1);
private let singleton_length_one_fact =
forall (ty: Type u#a) (v: ty).{:pattern length (singleton v)} length (singleton v) = 1
/// We represent the following Dafny axiom with `build_increments_length_fact`:
///
/// axiom (forall<T> s: Seq T, v: T ::
/// { Seq#Build(s,v) }
/// Seq#Length(Seq#Build(s,v)) == 1 + Seq#Length(s));
private let build_increments_length_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty).{:pattern build s v}
length (build s v) = 1 + length s
/// We represent the following Dafny axiom with `index_into_build_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Index(Seq#Build(s,v), i) }
/// (i == Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == v) &&
/// (i != Seq#Length(s) ==> Seq#Index(Seq#Build(s,v), i) == Seq#Index(s, i)));
private let index_into_build_fact (_: squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (i: nat{i < length (build s v)})
.{:pattern index (build s v) i}
(i = length s ==> index (build s v) i == v)
/\ (i <> length s ==> index (build s v) i == index s i)
/// We represent the following Dafny axiom with `append_sums_lengths_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Length(Seq#Append(s0,s1)) }
/// Seq#Length(Seq#Append(s0,s1)) == Seq#Length(s0) + Seq#Length(s1));
private let append_sums_lengths_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern length (append s0 s1)}
length (append s0 s1) = length s0 + length s1
/// We represent the following Dafny axiom with `index_into_singleton_fact`:
///
/// axiom (forall<T> t: T :: { Seq#Index(Seq#Singleton(t), 0) } Seq#Index(Seq#Singleton(t), 0) == t);
private let index_into_singleton_fact (_: squash (singleton_length_one_fact u#a)) =
forall (ty: Type u#a) (v: ty).{:pattern index (singleton v) 0}
index (singleton v) 0 == v
/// We represent the following axiom with `index_after_append_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#Index(Seq#Append(s0,s1), n) }
/// (n < Seq#Length(s0) ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s0, n)) &&
/// (Seq#Length(s0) <= n ==> Seq#Index(Seq#Append(s0,s1), n) == Seq#Index(s1, n - Seq#Length(s0))));
private let index_after_append_fact (_: squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty) (n: nat{n < length (append s0 s1)})
.{:pattern index (append s0 s1) n}
(n < length s0 ==> index (append s0 s1) n == index s0 n)
/\ (length s0 <= n ==> index (append s0 s1) n == index s1 (n - length s0))
/// We represent the following Dafny axiom with `update_maintains_length`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T :: { Seq#Length(Seq#Update(s,i,v)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Length(Seq#Update(s,i,v)) == Seq#Length(s));
private let update_maintains_length_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty).{:pattern length (update s i v)}
length (update s i v) = length s
/// We represent the following Dafny axiom with `update_then_index_fact`:
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int :: { Seq#Index(Seq#Update(s,i,v),n) }
/// 0 <= n && n < Seq#Length(s) ==>
/// (i == n ==> Seq#Index(Seq#Update(s,i,v),n) == v) &&
/// (i != n ==> Seq#Index(Seq#Update(s,i,v),n) == Seq#Index(s,n)));
private let update_then_index_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat{i < length s}) (v: ty) (n: nat{n < length (update s i v)})
.{:pattern index (update s i v) n}
n < length s ==>
(i = n ==> index (update s i v) n == v)
/\ (i <> n ==> index (update s i v) n == index s n)
/// We represent the following Dafny axiom with `contains_iff_exists_index_fact`:
///
/// axiom (forall<T> s: Seq T, x: T :: { Seq#Contains(s,x) }
/// Seq#Contains(s,x) <==>
/// (exists i: int :: { Seq#Index(s,i) } 0 <= i && i < Seq#Length(s) && Seq#Index(s,i) == x));
private let contains_iff_exists_index_fact =
forall (ty: Type u#a) (s: seq ty) (x: ty).{:pattern contains s x}
contains s x <==> (exists (i: nat).{:pattern index s i} i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `empty_doesnt_contain_fact`:
///
/// axiom (forall<T> x: T ::
/// { Seq#Contains(Seq#Empty(), x) }
/// !Seq#Contains(Seq#Empty(), x));
private let empty_doesnt_contain_anything_fact =
forall (ty: Type u#a) (x: ty).{:pattern contains empty x} ~(contains empty x)
/// We represent the following Dafny axiom with `build_contains_equiv_fact`:
///
/// axiom (forall<T> s: Seq T, v: T, x: T :: // needed to prove things like '4 in [2,3,4]', see method TestSequences0 in SmallTests.dfy
/// { Seq#Contains(Seq#Build(s, v), x) }
/// Seq#Contains(Seq#Build(s, v), x) <==> (v == x || Seq#Contains(s, x)));
private let build_contains_equiv_fact =
forall (ty: Type u#a) (s: seq ty) (v: ty) (x: ty).{:pattern contains (build s v) x}
contains (build s v) x <==> (v == x \/ contains s x)
/// We represent the following Dafny axiom with `take_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Take(s, n), x) }
/// Seq#Contains(Seq#Take(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= i && i < n && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let take_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (take s n) x}
contains (take s n) x <==>
(exists (i: nat).{:pattern index s i} i < n /\ i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `drop_contains_equiv_exists_fact`:
///
/// axiom (forall<T> s: Seq T, n: int, x: T ::
/// { Seq#Contains(Seq#Drop(s, n), x) }
/// Seq#Contains(Seq#Drop(s, n), x) <==>
/// (exists i: int :: { Seq#Index(s, i) }
/// 0 <= n && n <= i && i < Seq#Length(s) && Seq#Index(s, i) == x));
private let drop_contains_equiv_exists_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat{n <= length s}) (x: ty).{:pattern contains (drop s n) x}
contains (drop s n) x <==>
(exists (i: nat).{:pattern index s i} n <= i && i < length s /\ index s i == x)
/// We represent the following Dafny axiom with `equal_def_fact`:
///
/// axiom (forall<T> s0: Seq T, s1: Seq T :: { Seq#Equal(s0,s1) }
/// Seq#Equal(s0,s1) <==>
/// Seq#Length(s0) == Seq#Length(s1) &&
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < Seq#Length(s0) ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let equal_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern equal s0 s1}
equal s0 s1 <==>
length s0 == length s1 /\
(forall j.{:pattern index s0 j \/ index s1 j}
0 <= j && j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `extensionality_fact`:
///
/// axiom (forall<T> a: Seq T, b: Seq T :: { Seq#Equal(a,b) } // extensionality axiom for sequences
/// Seq#Equal(a,b) ==> a == b);
private let extensionality_fact =
forall (ty: Type u#a) (a: seq ty) (b: seq ty).{:pattern equal a b}
equal a b ==> a == b
/// We represent an analog of the following Dafny axiom with
/// `is_prefix_def_fact`. Our analog uses `is_prefix` instead
/// of `Seq#SameUntil`.
///
/// axiom (forall<T> s0: Seq T, s1: Seq T, n: int :: { Seq#SameUntil(s0,s1,n) }
/// Seq#SameUntil(s0,s1,n) <==>
/// (forall j: int :: { Seq#Index(s0,j) } { Seq#Index(s1,j) }
/// 0 <= j && j < n ==> Seq#Index(s0,j) == Seq#Index(s1,j)));
private let is_prefix_def_fact =
forall (ty: Type u#a) (s0: seq ty) (s1: seq ty).{:pattern is_prefix s0 s1}
is_prefix s0 s1 <==>
length s0 <= length s1
/\ (forall (j: nat).{:pattern index s0 j \/ index s1 j}
j < length s0 ==> index s0 j == index s1 j)
/// We represent the following Dafny axiom with `take_length_fact`:
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Take(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Take(s,n)) == n);
private let take_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern length (take s n)}
n <= length s ==> length (take s n) = n
/// We represent the following Dafny axiom with `index_into_take_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Take(s,n), j) }
/// { Seq#Index(s, j), Seq#Take(s,n) }
/// 0 <= j && j < n && j < Seq#Length(s) ==>
/// Seq#Index(Seq#Take(s,n), j) == Seq#Index(s, j));
private let index_into_take_fact (_ : squash (take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (take s n) j \/ index s j ; take s n}
j < n && n <= length s ==> index (take s n) j == index s j
/// We represent the following Dafny axiom with `drop_length_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Length(Seq#Drop(s,n)) }
/// 0 <= n && n <= Seq#Length(s) ==> Seq#Length(Seq#Drop(s,n)) == Seq#Length(s) - n);
private let drop_length_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).
{:pattern length (drop s n)}
n <= length s ==> length (drop s n) = length s - n
/// We represent the following Dafny axiom with `index_into_drop_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, j: int ::
/// {:weight 25}
/// { Seq#Index(Seq#Drop(s,n), j) }
/// 0 <= n && 0 <= j && j < Seq#Length(s)-n ==>
/// Seq#Index(Seq#Drop(s,n), j) == Seq#Index(s, j+n));
private let index_into_drop_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (j: nat).
{:pattern index (drop s n) j}
j < length s - n ==> index (drop s n) j == index s (j + n)
/// We represent the following Dafny axiom with `drop_index_offset_fact`.
///
/// axiom (forall<T> s: Seq T, n: int, k: int ::
/// {:weight 25}
/// { Seq#Index(s, k), Seq#Drop(s,n) }
/// 0 <= n && n <= k && k < Seq#Length(s) ==>
/// Seq#Index(Seq#Drop(s,n), k-n) == Seq#Index(s, k));
private let drop_index_offset_fact (_ : squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (n: nat) (k: nat).
{:pattern index s k; drop s n}
n <= k && k < length s ==> index (drop s n) (k - n) == index s k
/// We represent the following Dafny axiom with `append_then_take_or_drop_fact`.
///
/// axiom (forall<T> s, t: Seq T, n: int ::
/// { Seq#Take(Seq#Append(s, t), n) }
/// { Seq#Drop(Seq#Append(s, t), n) }
/// n == Seq#Length(s)
/// ==>
/// Seq#Take(Seq#Append(s, t), n) == s &&
/// Seq#Drop(Seq#Append(s, t), n) == t);
private let append_then_take_or_drop_fact (_ : squash (append_sums_lengths_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (t: seq ty) (n: nat).
{:pattern take (append s t) n \/ drop (append s t) n}
n = length s ==> take (append s t) n == s /\ drop (append s t) n == t
/// We represent the following Dafny axiom with `take_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n <= Seq#Length(s) ==>
/// Seq#Take(Seq#Update(s, i, v), n) == Seq#Update(Seq#Take(s, n), i, v) );
private let take_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ take_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
i < n && n <= length s ==>
take (update s i v) n == update (take s n) i v
/// We represent the following Dafny axiom with `take_ignores_out_of_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Take(Seq#Update(s, i, v), n) }
/// n <= i && i < Seq#Length(s) ==> Seq#Take(Seq#Update(s, i, v), n) == Seq#Take(s, n));
private let take_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern take (update s i v) n}
n <= i && i < length s ==>
take (update s i v) n == take s n
/// We represent the following Dafny axiom with `drop_commutes_with_in_range_update_fact`.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= n && n <= i && i < Seq#Length(s) ==>
/// Seq#Drop(Seq#Update(s, i, v), n) == Seq#Update(Seq#Drop(s, n), i-n, v) );
private let drop_commutes_with_in_range_update_fact
(_ : squash (update_maintains_length_fact u#a /\ drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
n <= i && i < length s ==>
drop (update s i v) n == update (drop s n) (i - n) v
/// We represent the following Dafny axiom with `drop_ignores_out_of_range_update_fact`.
/// Jay noticed that it was unnecessarily weak, possibly due to a typo, so he reported this as
/// Dafny issue #1423 (https://github.com/dafny-lang/dafny/issues/1423) and updated it here.
///
/// axiom (forall<T> s: Seq T, i: int, v: T, n: int ::
/// { Seq#Drop(Seq#Update(s, i, v), n) }
/// 0 <= i && i < n && n < Seq#Length(s) ==> Seq#Drop(Seq#Update(s, i, v), n) == Seq#Drop(s, n));
private let drop_ignores_out_of_range_update_fact (_ : squash (update_maintains_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (i: nat) (v: ty) (n: nat).{:pattern drop (update s i v) n}
i < n && n <= length s ==>
drop (update s i v) n == drop s n
/// We represent the following Dafny axiom with `drop_commutes_with_build_fact`.
///
/// axiom (forall<T> s: Seq T, v: T, n: int ::
/// { Seq#Drop(Seq#Build(s, v), n) }
/// 0 <= n && n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Build(s, v), n) == Seq#Build(Seq#Drop(s, n), v) );
private let drop_commutes_with_build_fact (_ : squash (build_increments_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (v: ty) (n: nat).{:pattern drop (build s v) n}
n <= length s ==> drop (build s v) n == build (drop s n) v
/// We include the definition of `rank` among our facts.
private let rank_def_fact =
forall (ty: Type u#a) (v: ty).{:pattern rank v} rank v == v
/// We represent the following Dafny axiom with `element_ranks_less_fact`.
///
/// axiom (forall s: Seq Box, i: int ::
/// { DtRank($Unbox(Seq#Index(s, i)): DatatypeType) }
/// 0 <= i && i < Seq#Length(s) ==> DtRank($Unbox(Seq#Index(s, i)): DatatypeType) < Seq#Rank(s) );
private let element_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (index s i)}
i < length s ==> rank (index s i) << rank s
/// We represent the following Dafny axiom with `drop_ranks_less_fact`.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Drop(s, i)) }
/// 0 < i && i <= Seq#Length(s) ==> Seq#Rank(Seq#Drop(s, i)) < Seq#Rank(s) );
private let drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern rank (drop s i)}
0 < i && i <= length s ==> rank (drop s i) << rank s
/// We represent the following Dafny axiom with
/// `take_ranks_less_fact`. However, since it isn't true in F* (which
/// has strong requirements for <<), we instead substitute length,
/// requiring decreases clauses to use length in this case.
///
/// axiom (forall<T> s: Seq T, i: int ::
/// { Seq#Rank(Seq#Take(s, i)) }
/// 0 <= i && i < Seq#Length(s) ==> Seq#Rank(Seq#Take(s, i)) < Seq#Rank(s) );
private let take_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat).{:pattern length (take s i)}
i < length s ==> length (take s i) << length s
/// We represent the following Dafny axiom with
/// `append_take_drop_ranks_less_fact`. However, since it isn't true
/// in F* (which has strong requirements for <<), we instead
/// substitute length, requiring decreases clauses to use
/// length in this case.
///
/// axiom (forall<T> s: Seq T, i: int, j: int ::
/// { Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) }
/// 0 <= i && i < j && j <= Seq#Length(s) ==>
/// Seq#Rank(Seq#Append(Seq#Take(s, i), Seq#Drop(s, j))) < Seq#Rank(s) );
private let append_take_drop_ranks_less_fact =
forall (ty: Type u#a) (s: seq ty) (i: nat) (j: nat).{:pattern length (append (take s i) (drop s j))}
i < j && j <= length s ==> length (append (take s i) (drop s j)) << length s
/// We represent the following Dafny axiom with `drop_zero_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Drop(s, n) }
/// n == 0 ==> Seq#Drop(s, n) == s);
private let drop_zero_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern drop s n}
n = 0 ==> drop s n == s
/// We represent the following Dafny axiom with `take_zero_fact`.
///
/// axiom (forall<T> s: Seq T, n: int :: { Seq#Take(s, n) }
/// n == 0 ==> Seq#Take(s, n) == Seq#Empty());
private let take_zero_fact =
forall (ty: Type u#a) (s: seq ty) (n: nat).{:pattern take s n}
n = 0 ==> take s n == empty
/// We represent the following Dafny axiom with `drop_then_drop_fact`.
///
/// axiom (forall<T> s: Seq T, m, n: int :: { Seq#Drop(Seq#Drop(s, m), n) }
/// 0 <= m && 0 <= n && m+n <= Seq#Length(s) ==>
/// Seq#Drop(Seq#Drop(s, m), n) == Seq#Drop(s, m+n));
private let drop_then_drop_fact (_: squash (drop_length_fact u#a)) =
forall (ty: Type u#a) (s: seq ty) (m: nat) (n: nat).{:pattern drop (drop s m) n}
m + n <= length s ==> drop (drop s m) n == drop s (m + n)
(**
The predicate `all_dafny_seq_facts` collects all the Dafny sequence axioms.
One can bring all these facts into scope with `all_dafny_seq_facts_lemma ()`.
**) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "FStar.Sequence.Base.fsti"
} | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "FLT"
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | Prims.logical | Prims.Tot | [
"total"
] | [] | [
"Prims.l_and",
"FStar.Sequence.Base.length_of_empty_is_zero_fact",
"FStar.Sequence.Base.length_zero_implies_empty_fact",
"FStar.Sequence.Base.singleton_length_one_fact",
"FStar.Sequence.Base.build_increments_length_fact",
"FStar.Sequence.Base.index_into_build_fact",
"FStar.Sequence.Base.append_sums_lengths_fact",
"FStar.Sequence.Base.index_into_singleton_fact",
"FStar.Sequence.Base.index_after_append_fact",
"FStar.Sequence.Base.update_maintains_length_fact",
"FStar.Sequence.Base.update_then_index_fact",
"FStar.Sequence.Base.contains_iff_exists_index_fact",
"FStar.Sequence.Base.empty_doesnt_contain_anything_fact",
"FStar.Sequence.Base.build_contains_equiv_fact",
"FStar.Sequence.Base.take_contains_equiv_exists_fact",
"FStar.Sequence.Base.drop_contains_equiv_exists_fact",
"FStar.Sequence.Base.equal_def_fact",
"FStar.Sequence.Base.extensionality_fact",
"FStar.Sequence.Base.is_prefix_def_fact",
"FStar.Sequence.Base.take_length_fact",
"FStar.Sequence.Base.index_into_take_fact",
"FStar.Sequence.Base.drop_length_fact",
"FStar.Sequence.Base.index_into_drop_fact",
"FStar.Sequence.Base.drop_index_offset_fact",
"FStar.Sequence.Base.append_then_take_or_drop_fact",
"FStar.Sequence.Base.take_commutes_with_in_range_update_fact",
"FStar.Sequence.Base.take_ignores_out_of_range_update_fact",
"FStar.Sequence.Base.drop_commutes_with_in_range_update_fact",
"FStar.Sequence.Base.drop_ignores_out_of_range_update_fact",
"FStar.Sequence.Base.drop_commutes_with_build_fact",
"FStar.Sequence.Base.rank_def_fact",
"FStar.Sequence.Base.element_ranks_less_fact",
"FStar.Sequence.Base.drop_ranks_less_fact",
"FStar.Sequence.Base.take_ranks_less_fact",
"FStar.Sequence.Base.append_take_drop_ranks_less_fact",
"FStar.Sequence.Base.drop_zero_fact",
"FStar.Sequence.Base.take_zero_fact",
"FStar.Sequence.Base.drop_then_drop_fact"
] | [] | false | false | false | true | true | let all_seq_facts =
| length_of_empty_is_zero_fact u#a /\ length_zero_implies_empty_fact u#a /\
singleton_length_one_fact u#a /\ build_increments_length_fact u#a /\ index_into_build_fact u#a () /\
append_sums_lengths_fact u#a /\ index_into_singleton_fact u#a () /\ index_after_append_fact u#a () /\
update_maintains_length_fact u#a /\ update_then_index_fact u#a /\ contains_iff_exists_index_fact u#a /\
empty_doesnt_contain_anything_fact u#a /\ build_contains_equiv_fact u#a /\
take_contains_equiv_exists_fact u#a /\ drop_contains_equiv_exists_fact u#a /\ equal_def_fact u#a /\
extensionality_fact u#a /\ is_prefix_def_fact u#a /\ take_length_fact u#a /\
index_into_take_fact u#a () /\ drop_length_fact u#a /\ index_into_drop_fact u#a () /\
drop_index_offset_fact u#a () /\ append_then_take_or_drop_fact u#a () /\
take_commutes_with_in_range_update_fact u#a () /\ take_ignores_out_of_range_update_fact u#a () /\
drop_commutes_with_in_range_update_fact u#a () /\ drop_ignores_out_of_range_update_fact u#a () /\
drop_commutes_with_build_fact u#a () /\ rank_def_fact u#a /\ element_ranks_less_fact u#a /\
drop_ranks_less_fact u#a /\ take_ranks_less_fact u#a /\ append_take_drop_ranks_less_fact u#a /\
drop_zero_fact u#a /\ take_zero_fact u#a /\ drop_then_drop_fact u#a () | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.contained_region | val contained_region: mem -> mem -> rid -> Type0 | val contained_region: mem -> mem -> rid -> Type0 | let contained_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> m0 `contains_region` r /\ m1 `contains_region` r | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 67,
"end_line": 124,
"start_col": 15,
"start_line": 123
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre
let st_post' = gst_post'
let st_post = gst_post
let st_wp = gst_wp
new_effect STATE = GST
unfold let lift_gst_state (a:Type) (wp:gst_wp a) = wp
sub_effect GST ~> STATE = lift_gst_state
(* effect State (a:Type) (wp:st_wp a) = *)
(* STATE a wp *)
(**
WARNING: this effect is unsafe, for C/C++ extraction it shall only be used by
code that would later extract to OCaml or by library functions
*)
effect Unsafe (a:Type) (pre:st_pre) (post: (m0:mem -> Tot (st_post' a (pre m0)))) =
STATE a
(fun (p:st_post a) (h:mem) -> pre h /\ (forall a h1. pre h /\ post h a h1 ==> p a h1)) (* WP *)
(****** defining predicates for equal refs in some regions ******)
(*
// * AR: (may be this is an overkill)
// * various effects below talk about refs being equal in some regions (all regions, stack regions, etc.)
// * this was done by defining, for example, an equal_dom predicate with a (forall (r:rid)) quantifier
// * this quantifier was only guarded with Map.contains (HS.get_hmap m) r
// * which meant it could fire for all the contained regions
// *
// * instead now we define abstract predicates, e.g. same_refs_in_all_regions, and provide intro and elim forms
// * the advantage is that, the (lemma) quantifiers are now guarded additionally by same_refs_in_all_regions kind
// * of predicates, and hence should fire more contextually
// * should profile the queries to see if it actually helps
// *)
(*
// * marking these opaque, since expect them to be unfolded away beforehand
// *)
[@@"opaque_to_smt"]
unfold private let equal_heap_dom (r:rid) (m0 m1:mem) :Type0
= Heap.equal_dom (get_hmap m0 `Map.sel` r) (get_hmap m1 `Map.sel` r) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
m0: FStar.Monotonic.HyperStack.mem ->
m1: FStar.Monotonic.HyperStack.mem ->
r: FStar.Monotonic.HyperHeap.rid
-> Type0 | Prims.Tot | [
"total"
] | [] | [
"FStar.Monotonic.HyperStack.mem",
"FStar.Monotonic.HyperHeap.rid",
"Prims.l_and",
"Prims.b2t",
"FStar.HyperStack.ST.contains_region"
] | [] | false | false | false | true | true | let contained_region: mem -> mem -> rid -> Type0 =
| fun m0 m1 r -> m0 `contains_region` r /\ m1 `contains_region` r | false |
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.equal_heap_dom | val equal_heap_dom (r: rid) (m0 m1: mem) : Type0 | val equal_heap_dom (r: rid) (m0 m1: mem) : Type0 | let equal_heap_dom (r:rid) (m0 m1:mem) :Type0
= Heap.equal_dom (get_hmap m0 `Map.sel` r) (get_hmap m1 `Map.sel` r) | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 70,
"end_line": 120,
"start_col": 15,
"start_line": 119
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre
let st_post' = gst_post'
let st_post = gst_post
let st_wp = gst_wp
new_effect STATE = GST
unfold let lift_gst_state (a:Type) (wp:gst_wp a) = wp
sub_effect GST ~> STATE = lift_gst_state
(* effect State (a:Type) (wp:st_wp a) = *)
(* STATE a wp *)
(**
WARNING: this effect is unsafe, for C/C++ extraction it shall only be used by
code that would later extract to OCaml or by library functions
*)
effect Unsafe (a:Type) (pre:st_pre) (post: (m0:mem -> Tot (st_post' a (pre m0)))) =
STATE a
(fun (p:st_post a) (h:mem) -> pre h /\ (forall a h1. pre h /\ post h a h1 ==> p a h1)) (* WP *)
(****** defining predicates for equal refs in some regions ******)
(*
// * AR: (may be this is an overkill)
// * various effects below talk about refs being equal in some regions (all regions, stack regions, etc.)
// * this was done by defining, for example, an equal_dom predicate with a (forall (r:rid)) quantifier
// * this quantifier was only guarded with Map.contains (HS.get_hmap m) r
// * which meant it could fire for all the contained regions
// *
// * instead now we define abstract predicates, e.g. same_refs_in_all_regions, and provide intro and elim forms
// * the advantage is that, the (lemma) quantifiers are now guarded additionally by same_refs_in_all_regions kind
// * of predicates, and hence should fire more contextually
// * should profile the queries to see if it actually helps
// *)
(*
// * marking these opaque, since expect them to be unfolded away beforehand
// *) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
r: FStar.Monotonic.HyperHeap.rid ->
m0: FStar.Monotonic.HyperStack.mem ->
m1: FStar.Monotonic.HyperStack.mem
-> Type0 | Prims.Tot | [
"total"
] | [] | [
"FStar.Monotonic.HyperHeap.rid",
"FStar.Monotonic.HyperStack.mem",
"FStar.Monotonic.Heap.equal_dom",
"FStar.Map.sel",
"FStar.Monotonic.Heap.heap",
"FStar.Monotonic.HyperStack.get_hmap"
] | [] | false | false | false | true | true | let equal_heap_dom (r: rid) (m0 m1: mem) : Type0 =
| Heap.equal_dom ((get_hmap m0) `Map.sel` r) ((get_hmap m1) `Map.sel` r) | false |
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.same_refs_common | val same_refs_common : p:
(
_: FStar.Monotonic.HyperStack.mem ->
_: FStar.Monotonic.HyperStack.mem ->
_: FStar.Monotonic.HyperHeap.rid
-> Type0) ->
m0: FStar.Monotonic.HyperStack.mem ->
m1: FStar.Monotonic.HyperStack.mem
-> Prims.logical | let same_refs_common (p:mem -> mem -> rid -> Type0) (m0 m1:mem) =
forall (r:rid). p m0 m1 r ==> equal_heap_dom r m0 m1 | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 54,
"end_line": 140,
"start_col": 15,
"start_line": 139
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre
let st_post' = gst_post'
let st_post = gst_post
let st_wp = gst_wp
new_effect STATE = GST
unfold let lift_gst_state (a:Type) (wp:gst_wp a) = wp
sub_effect GST ~> STATE = lift_gst_state
(* effect State (a:Type) (wp:st_wp a) = *)
(* STATE a wp *)
(**
WARNING: this effect is unsafe, for C/C++ extraction it shall only be used by
code that would later extract to OCaml or by library functions
*)
effect Unsafe (a:Type) (pre:st_pre) (post: (m0:mem -> Tot (st_post' a (pre m0)))) =
STATE a
(fun (p:st_post a) (h:mem) -> pre h /\ (forall a h1. pre h /\ post h a h1 ==> p a h1)) (* WP *)
(****** defining predicates for equal refs in some regions ******)
(*
// * AR: (may be this is an overkill)
// * various effects below talk about refs being equal in some regions (all regions, stack regions, etc.)
// * this was done by defining, for example, an equal_dom predicate with a (forall (r:rid)) quantifier
// * this quantifier was only guarded with Map.contains (HS.get_hmap m) r
// * which meant it could fire for all the contained regions
// *
// * instead now we define abstract predicates, e.g. same_refs_in_all_regions, and provide intro and elim forms
// * the advantage is that, the (lemma) quantifiers are now guarded additionally by same_refs_in_all_regions kind
// * of predicates, and hence should fire more contextually
// * should profile the queries to see if it actually helps
// *)
(*
// * marking these opaque, since expect them to be unfolded away beforehand
// *)
[@@"opaque_to_smt"]
unfold private let equal_heap_dom (r:rid) (m0 m1:mem) :Type0
= Heap.equal_dom (get_hmap m0 `Map.sel` r) (get_hmap m1 `Map.sel` r)
[@@"opaque_to_smt"]
unfold private let contained_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> m0 `contains_region` r /\ m1 `contains_region` r
[@@"opaque_to_smt"]
unfold private let contained_stack_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> is_stack_region r /\ contained_region m0 m1 r
[@@"opaque_to_smt"]
unfold private let contained_non_tip_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> r =!= get_tip m0 /\ r =!= get_tip m1 /\ contained_region m0 m1 r
[@@"opaque_to_smt"]
unfold private let contained_non_tip_stack_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> is_stack_region r /\ contained_non_tip_region m0 m1 r | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
p:
(
_: FStar.Monotonic.HyperStack.mem ->
_: FStar.Monotonic.HyperStack.mem ->
_: FStar.Monotonic.HyperHeap.rid
-> Type0) ->
m0: FStar.Monotonic.HyperStack.mem ->
m1: FStar.Monotonic.HyperStack.mem
-> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"FStar.Monotonic.HyperStack.mem",
"FStar.Monotonic.HyperHeap.rid",
"Prims.l_Forall",
"Prims.l_imp",
"FStar.HyperStack.ST.equal_heap_dom",
"Prims.logical"
] | [] | false | false | false | true | true | let same_refs_common (p: (mem -> mem -> rid -> Type0)) (m0 m1: mem) =
| forall (r: rid). p m0 m1 r ==> equal_heap_dom r m0 m1 | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.equal_stack_domains | val equal_stack_domains : m0: FStar.Monotonic.HyperStack.mem -> m1: FStar.Monotonic.HyperStack.mem -> Prims.logical | let equal_stack_domains (m0 m1:mem) =
get_tip m0 == get_tip m1 /\
same_refs_in_stack_regions m0 m1 | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 34,
"end_line": 218,
"start_col": 0,
"start_line": 216
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre
let st_post' = gst_post'
let st_post = gst_post
let st_wp = gst_wp
new_effect STATE = GST
unfold let lift_gst_state (a:Type) (wp:gst_wp a) = wp
sub_effect GST ~> STATE = lift_gst_state
(* effect State (a:Type) (wp:st_wp a) = *)
(* STATE a wp *)
(**
WARNING: this effect is unsafe, for C/C++ extraction it shall only be used by
code that would later extract to OCaml or by library functions
*)
effect Unsafe (a:Type) (pre:st_pre) (post: (m0:mem -> Tot (st_post' a (pre m0)))) =
STATE a
(fun (p:st_post a) (h:mem) -> pre h /\ (forall a h1. pre h /\ post h a h1 ==> p a h1)) (* WP *)
(****** defining predicates for equal refs in some regions ******)
(*
// * AR: (may be this is an overkill)
// * various effects below talk about refs being equal in some regions (all regions, stack regions, etc.)
// * this was done by defining, for example, an equal_dom predicate with a (forall (r:rid)) quantifier
// * this quantifier was only guarded with Map.contains (HS.get_hmap m) r
// * which meant it could fire for all the contained regions
// *
// * instead now we define abstract predicates, e.g. same_refs_in_all_regions, and provide intro and elim forms
// * the advantage is that, the (lemma) quantifiers are now guarded additionally by same_refs_in_all_regions kind
// * of predicates, and hence should fire more contextually
// * should profile the queries to see if it actually helps
// *)
(*
// * marking these opaque, since expect them to be unfolded away beforehand
// *)
[@@"opaque_to_smt"]
unfold private let equal_heap_dom (r:rid) (m0 m1:mem) :Type0
= Heap.equal_dom (get_hmap m0 `Map.sel` r) (get_hmap m1 `Map.sel` r)
[@@"opaque_to_smt"]
unfold private let contained_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> m0 `contains_region` r /\ m1 `contains_region` r
[@@"opaque_to_smt"]
unfold private let contained_stack_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> is_stack_region r /\ contained_region m0 m1 r
[@@"opaque_to_smt"]
unfold private let contained_non_tip_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> r =!= get_tip m0 /\ r =!= get_tip m1 /\ contained_region m0 m1 r
[@@"opaque_to_smt"]
unfold private let contained_non_tip_stack_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> is_stack_region r /\ contained_non_tip_region m0 m1 r
[@@"opaque_to_smt"]
unfold private let same_refs_common (p:mem -> mem -> rid -> Type0) (m0 m1:mem) =
forall (r:rid). p m0 m1 r ==> equal_heap_dom r m0 m1
(* predicates *)
val same_refs_in_all_regions (m0 m1:mem) :Type0
val same_refs_in_stack_regions (m0 m1:mem) :Type0
val same_refs_in_non_tip_regions (m0 m1:mem) :Type0
val same_refs_in_non_tip_stack_regions (m0 m1:mem) :Type0
(* intro and elim forms *)
val lemma_same_refs_in_all_regions_intro (m0 m1:mem)
:Lemma (requires (same_refs_common contained_region m0 m1)) (ensures (same_refs_in_all_regions m0 m1))
[SMTPat (same_refs_in_all_regions m0 m1)]
val lemma_same_refs_in_all_regions_elim (m0 m1:mem) (r:rid)
:Lemma (requires (same_refs_in_all_regions m0 m1 /\ contained_region m0 m1 r)) (ensures (equal_heap_dom r m0 m1))
[SMTPatOr [[SMTPat (same_refs_in_all_regions m0 m1); SMTPat (m0 `contains_region` r)];
[SMTPat (same_refs_in_all_regions m0 m1); SMTPat (m1 `contains_region` r)]]]
val lemma_same_refs_in_stack_regions_intro (m0 m1:mem)
:Lemma (requires (same_refs_common contained_stack_region m0 m1)) (ensures (same_refs_in_stack_regions m0 m1))
[SMTPat (same_refs_in_stack_regions m0 m1)]
val lemma_same_refs_in_stack_regions_elim (m0 m1:mem) (r:rid)
:Lemma (requires (same_refs_in_stack_regions m0 m1 /\ contained_stack_region m0 m1 r)) (ensures (equal_heap_dom r m0 m1))
[SMTPatOr [[SMTPat (same_refs_in_stack_regions m0 m1); SMTPat (is_stack_region r); SMTPat (m0 `contains_region` r)];
[SMTPat (same_refs_in_stack_regions m0 m1); SMTPat (is_stack_region r); SMTPat (m1 `contains_region` r)]]]
val lemma_same_refs_in_non_tip_regions_intro (m0 m1:mem)
:Lemma (requires (same_refs_common contained_non_tip_region m0 m1)) (ensures (same_refs_in_non_tip_regions m0 m1))
[SMTPat (same_refs_in_non_tip_regions m0 m1)]
val lemma_same_refs_in_non_tip_regions_elim (m0 m1:mem) (r:rid)
:Lemma (requires (same_refs_in_non_tip_regions m0 m1 /\ contained_non_tip_region m0 m1 r)) (ensures (equal_heap_dom r m0 m1))
[SMTPatOr [[SMTPat (same_refs_in_non_tip_regions m0 m1); SMTPat (m0 `contains_region` r)];
[SMTPat (same_refs_in_non_tip_regions m0 m1); SMTPat (m1 `contains_region` r)]]]
val lemma_same_refs_in_non_tip_stack_regions_intro (m0 m1:mem)
:Lemma (requires (same_refs_common contained_non_tip_stack_region m0 m1)) (ensures (same_refs_in_non_tip_stack_regions m0 m1))
[SMTPat (same_refs_in_non_tip_stack_regions m0 m1)]
val lemma_same_refs_in_non_tip_stack_regions_elim (m0 m1:mem) (r:rid)
:Lemma (requires (same_refs_in_non_tip_stack_regions m0 m1 /\ contained_non_tip_stack_region m0 m1 r))
(ensures (equal_heap_dom r m0 m1))
[SMTPatOr [[SMTPat (same_refs_in_non_tip_stack_regions m0 m1); SMTPat (is_stack_region r); SMTPat (m0 `contains_region` r);];
[SMTPat (same_refs_in_non_tip_stack_regions m0 m1); SMTPat (is_stack_region r); SMTPat (m1 `contains_region` r)]]]
(******)
let equal_domains (m0 m1:mem) =
get_tip m0 == get_tip m1 /\
Set.equal (Map.domain (get_hmap m0)) (Map.domain (get_hmap m1)) /\
same_refs_in_all_regions m0 m1
val lemma_equal_domains_trans (m0 m1 m2:mem)
:Lemma (requires (equal_domains m0 m1 /\ equal_domains m1 m2))
(ensures (equal_domains m0 m2))
[SMTPat (equal_domains m0 m1); SMTPat (equal_domains m1 m2)]
(**
* Effect of stacked based code: the 'equal_domains' clause enforces that
* - both mem have the same tip
* - both mem reference the same heaps (their map: rid -> heap have the same domain)
* - in each region id, the corresponding heaps contain the same references on both sides
*)
effect Stack (a:Type) (pre:st_pre) (post: (m0:mem -> Tot (st_post' a (pre m0)))) =
STATE a
(fun (p:st_post a) (h:mem) -> pre h /\ (forall a h1. (pre h /\ post h a h1 /\ equal_domains h h1) ==> p a h1)) (* WP *)
(**
* Effect of heap-based code.
* - assumes that the stack is empty (tip = root)
* - corresponds to the HyperHeap ST effect
* - can call to Stack and ST code freely
* - respects the stack invariant: the stack has to be empty when returning
*)
effect Heap (a:Type) (pre:st_pre) (post: (m0:mem -> Tot (st_post' a (pre m0)))) =
STATE a
(fun (p:st_post a) (h:mem) -> pre h /\ (forall a h1. (pre h /\ post h a h1 /\ get_tip h = HS.root /\ get_tip h1 = HS.root ) ==> p a h1)) (* WP *) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | m0: FStar.Monotonic.HyperStack.mem -> m1: FStar.Monotonic.HyperStack.mem -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"FStar.Monotonic.HyperStack.mem",
"Prims.l_and",
"Prims.eq2",
"FStar.Monotonic.HyperHeap.rid",
"FStar.Monotonic.HyperStack.get_tip",
"FStar.HyperStack.ST.same_refs_in_stack_regions",
"Prims.logical"
] | [] | false | false | false | true | true | let equal_stack_domains (m0 m1: mem) =
| get_tip m0 == get_tip m1 /\ same_refs_in_stack_regions m0 m1 | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.equal_domains | val equal_domains : m0: FStar.Monotonic.HyperStack.mem -> m1: FStar.Monotonic.HyperStack.mem -> Prims.logical | let equal_domains (m0 m1:mem) =
get_tip m0 == get_tip m1 /\
Set.equal (Map.domain (get_hmap m0)) (Map.domain (get_hmap m1)) /\
same_refs_in_all_regions m0 m1 | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 32,
"end_line": 188,
"start_col": 0,
"start_line": 185
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre
let st_post' = gst_post'
let st_post = gst_post
let st_wp = gst_wp
new_effect STATE = GST
unfold let lift_gst_state (a:Type) (wp:gst_wp a) = wp
sub_effect GST ~> STATE = lift_gst_state
(* effect State (a:Type) (wp:st_wp a) = *)
(* STATE a wp *)
(**
WARNING: this effect is unsafe, for C/C++ extraction it shall only be used by
code that would later extract to OCaml or by library functions
*)
effect Unsafe (a:Type) (pre:st_pre) (post: (m0:mem -> Tot (st_post' a (pre m0)))) =
STATE a
(fun (p:st_post a) (h:mem) -> pre h /\ (forall a h1. pre h /\ post h a h1 ==> p a h1)) (* WP *)
(****** defining predicates for equal refs in some regions ******)
(*
// * AR: (may be this is an overkill)
// * various effects below talk about refs being equal in some regions (all regions, stack regions, etc.)
// * this was done by defining, for example, an equal_dom predicate with a (forall (r:rid)) quantifier
// * this quantifier was only guarded with Map.contains (HS.get_hmap m) r
// * which meant it could fire for all the contained regions
// *
// * instead now we define abstract predicates, e.g. same_refs_in_all_regions, and provide intro and elim forms
// * the advantage is that, the (lemma) quantifiers are now guarded additionally by same_refs_in_all_regions kind
// * of predicates, and hence should fire more contextually
// * should profile the queries to see if it actually helps
// *)
(*
// * marking these opaque, since expect them to be unfolded away beforehand
// *)
[@@"opaque_to_smt"]
unfold private let equal_heap_dom (r:rid) (m0 m1:mem) :Type0
= Heap.equal_dom (get_hmap m0 `Map.sel` r) (get_hmap m1 `Map.sel` r)
[@@"opaque_to_smt"]
unfold private let contained_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> m0 `contains_region` r /\ m1 `contains_region` r
[@@"opaque_to_smt"]
unfold private let contained_stack_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> is_stack_region r /\ contained_region m0 m1 r
[@@"opaque_to_smt"]
unfold private let contained_non_tip_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> r =!= get_tip m0 /\ r =!= get_tip m1 /\ contained_region m0 m1 r
[@@"opaque_to_smt"]
unfold private let contained_non_tip_stack_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> is_stack_region r /\ contained_non_tip_region m0 m1 r
[@@"opaque_to_smt"]
unfold private let same_refs_common (p:mem -> mem -> rid -> Type0) (m0 m1:mem) =
forall (r:rid). p m0 m1 r ==> equal_heap_dom r m0 m1
(* predicates *)
val same_refs_in_all_regions (m0 m1:mem) :Type0
val same_refs_in_stack_regions (m0 m1:mem) :Type0
val same_refs_in_non_tip_regions (m0 m1:mem) :Type0
val same_refs_in_non_tip_stack_regions (m0 m1:mem) :Type0
(* intro and elim forms *)
val lemma_same_refs_in_all_regions_intro (m0 m1:mem)
:Lemma (requires (same_refs_common contained_region m0 m1)) (ensures (same_refs_in_all_regions m0 m1))
[SMTPat (same_refs_in_all_regions m0 m1)]
val lemma_same_refs_in_all_regions_elim (m0 m1:mem) (r:rid)
:Lemma (requires (same_refs_in_all_regions m0 m1 /\ contained_region m0 m1 r)) (ensures (equal_heap_dom r m0 m1))
[SMTPatOr [[SMTPat (same_refs_in_all_regions m0 m1); SMTPat (m0 `contains_region` r)];
[SMTPat (same_refs_in_all_regions m0 m1); SMTPat (m1 `contains_region` r)]]]
val lemma_same_refs_in_stack_regions_intro (m0 m1:mem)
:Lemma (requires (same_refs_common contained_stack_region m0 m1)) (ensures (same_refs_in_stack_regions m0 m1))
[SMTPat (same_refs_in_stack_regions m0 m1)]
val lemma_same_refs_in_stack_regions_elim (m0 m1:mem) (r:rid)
:Lemma (requires (same_refs_in_stack_regions m0 m1 /\ contained_stack_region m0 m1 r)) (ensures (equal_heap_dom r m0 m1))
[SMTPatOr [[SMTPat (same_refs_in_stack_regions m0 m1); SMTPat (is_stack_region r); SMTPat (m0 `contains_region` r)];
[SMTPat (same_refs_in_stack_regions m0 m1); SMTPat (is_stack_region r); SMTPat (m1 `contains_region` r)]]]
val lemma_same_refs_in_non_tip_regions_intro (m0 m1:mem)
:Lemma (requires (same_refs_common contained_non_tip_region m0 m1)) (ensures (same_refs_in_non_tip_regions m0 m1))
[SMTPat (same_refs_in_non_tip_regions m0 m1)]
val lemma_same_refs_in_non_tip_regions_elim (m0 m1:mem) (r:rid)
:Lemma (requires (same_refs_in_non_tip_regions m0 m1 /\ contained_non_tip_region m0 m1 r)) (ensures (equal_heap_dom r m0 m1))
[SMTPatOr [[SMTPat (same_refs_in_non_tip_regions m0 m1); SMTPat (m0 `contains_region` r)];
[SMTPat (same_refs_in_non_tip_regions m0 m1); SMTPat (m1 `contains_region` r)]]]
val lemma_same_refs_in_non_tip_stack_regions_intro (m0 m1:mem)
:Lemma (requires (same_refs_common contained_non_tip_stack_region m0 m1)) (ensures (same_refs_in_non_tip_stack_regions m0 m1))
[SMTPat (same_refs_in_non_tip_stack_regions m0 m1)]
val lemma_same_refs_in_non_tip_stack_regions_elim (m0 m1:mem) (r:rid)
:Lemma (requires (same_refs_in_non_tip_stack_regions m0 m1 /\ contained_non_tip_stack_region m0 m1 r))
(ensures (equal_heap_dom r m0 m1))
[SMTPatOr [[SMTPat (same_refs_in_non_tip_stack_regions m0 m1); SMTPat (is_stack_region r); SMTPat (m0 `contains_region` r);];
[SMTPat (same_refs_in_non_tip_stack_regions m0 m1); SMTPat (is_stack_region r); SMTPat (m1 `contains_region` r)]]]
(******) | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false | m0: FStar.Monotonic.HyperStack.mem -> m1: FStar.Monotonic.HyperStack.mem -> Prims.logical | Prims.Tot | [
"total"
] | [] | [
"FStar.Monotonic.HyperStack.mem",
"Prims.l_and",
"Prims.eq2",
"FStar.Monotonic.HyperHeap.rid",
"FStar.Monotonic.HyperStack.get_tip",
"FStar.Set.equal",
"FStar.Map.domain",
"FStar.Monotonic.Heap.heap",
"FStar.Monotonic.HyperStack.get_hmap",
"FStar.HyperStack.ST.same_refs_in_all_regions",
"Prims.logical"
] | [] | false | false | false | true | true | let equal_domains (m0 m1: mem) =
| get_tip m0 == get_tip m1 /\ Set.equal (Map.domain (get_hmap m0)) (Map.domain (get_hmap m1)) /\
same_refs_in_all_regions m0 m1 | false |
|
FStar.HyperStack.ST.fsti | FStar.HyperStack.ST.contained_non_tip_stack_region | val contained_non_tip_stack_region: mem -> mem -> rid -> Type0 | val contained_non_tip_stack_region: mem -> mem -> rid -> Type0 | let contained_non_tip_stack_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> is_stack_region r /\ contained_non_tip_region m0 m1 r | {
"file_name": "ulib/FStar.HyperStack.ST.fsti",
"git_rev": "10183ea187da8e8c426b799df6c825e24c0767d3",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | {
"end_col": 72,
"end_line": 136,
"start_col": 15,
"start_line": 135
} | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.HyperStack.ST
open FStar.HyperStack
module HS = FStar.HyperStack
open FStar.Preorder
(* Setting up the preorder for mem *)
(* Starting the predicates that constitute the preorder *)
[@@"opaque_to_smt"]
private unfold let contains_region (m:mem) (r:rid) = get_hmap m `Map.contains` r
(* The preorder is the conjunction of above predicates *)
val mem_rel :preorder mem
type mem_predicate = mem -> Type0
(* Predicates that we will witness with regions and refs *)
val region_contains_pred (r:HS.rid) :mem_predicate
val ref_contains_pred (#a:Type) (#rel:preorder a) (r:HS.mreference a rel) :mem_predicate
(***** Global ST (GST) effect with put, get, witness, and recall *****)
new_effect GST = STATE_h mem
let gst_pre = st_pre_h mem
let gst_post' (a:Type) (pre:Type) = st_post_h' mem a pre
let gst_post (a:Type) = st_post_h mem a
let gst_wp (a:Type) = st_wp_h mem a
unfold let lift_div_gst (a:Type) (wp:pure_wp a) (p:gst_post a) (h:mem) = wp (fun a -> p a h)
sub_effect DIV ~> GST = lift_div_gst
(*
* AR: A few notes about the interface:
* - The interface closely mimics the interface we formalized in our POPL'18 paper
* - Specifically, `witnessed` is defined for any mem_predicate (not necessarily stable ones)
* - `stable p` is a precondition for `gst_witness`
* - `gst_recall` does not have a precondition for `stable p`, since `gst_witness` is the only way
* clients would have obtained `witnessed p`, and so, `p` should already be stable
* - `lemma_functoriality` does not require stability for either `p` or `q`
* Our metatheory ensures that this is sound (without requiring stability of `q`)
* This form is useful in defining the MRRef interface (see mr_witness)
*)
val stable (p:mem_predicate) :Type0
val witnessed (p:mem_predicate) :Type0
(* TODO: we should derive these using DM4F *)
private val gst_get: unit -> GST mem (fun p h0 -> p h0 h0)
private val gst_put: h1:mem -> GST unit (fun p h0 -> mem_rel h0 h1 /\ p () h1)
private val gst_witness: p:mem_predicate -> GST unit (fun post h0 -> p h0 /\ stable p /\ (witnessed p ==> post () h0))
private val gst_recall: p:mem_predicate -> GST unit (fun post h0 -> witnessed p /\ (p h0 ==> post () h0))
val lemma_functoriality (p:mem_predicate{witnessed p}) (q:mem_predicate{(forall (h:mem). p h ==> q h)})
: Lemma (witnessed q)
let st_pre = gst_pre
let st_post' = gst_post'
let st_post = gst_post
let st_wp = gst_wp
new_effect STATE = GST
unfold let lift_gst_state (a:Type) (wp:gst_wp a) = wp
sub_effect GST ~> STATE = lift_gst_state
(* effect State (a:Type) (wp:st_wp a) = *)
(* STATE a wp *)
(**
WARNING: this effect is unsafe, for C/C++ extraction it shall only be used by
code that would later extract to OCaml or by library functions
*)
effect Unsafe (a:Type) (pre:st_pre) (post: (m0:mem -> Tot (st_post' a (pre m0)))) =
STATE a
(fun (p:st_post a) (h:mem) -> pre h /\ (forall a h1. pre h /\ post h a h1 ==> p a h1)) (* WP *)
(****** defining predicates for equal refs in some regions ******)
(*
// * AR: (may be this is an overkill)
// * various effects below talk about refs being equal in some regions (all regions, stack regions, etc.)
// * this was done by defining, for example, an equal_dom predicate with a (forall (r:rid)) quantifier
// * this quantifier was only guarded with Map.contains (HS.get_hmap m) r
// * which meant it could fire for all the contained regions
// *
// * instead now we define abstract predicates, e.g. same_refs_in_all_regions, and provide intro and elim forms
// * the advantage is that, the (lemma) quantifiers are now guarded additionally by same_refs_in_all_regions kind
// * of predicates, and hence should fire more contextually
// * should profile the queries to see if it actually helps
// *)
(*
// * marking these opaque, since expect them to be unfolded away beforehand
// *)
[@@"opaque_to_smt"]
unfold private let equal_heap_dom (r:rid) (m0 m1:mem) :Type0
= Heap.equal_dom (get_hmap m0 `Map.sel` r) (get_hmap m1 `Map.sel` r)
[@@"opaque_to_smt"]
unfold private let contained_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> m0 `contains_region` r /\ m1 `contains_region` r
[@@"opaque_to_smt"]
unfold private let contained_stack_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> is_stack_region r /\ contained_region m0 m1 r
[@@"opaque_to_smt"]
unfold private let contained_non_tip_region :mem -> mem -> rid -> Type0
= fun m0 m1 r -> r =!= get_tip m0 /\ r =!= get_tip m1 /\ contained_region m0 m1 r | {
"checked_file": "/",
"dependencies": [
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Preorder.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Map.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Heap.fst.checked"
],
"interface_file": false,
"source_file": "FStar.HyperStack.ST.fsti"
} | [
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.Witnessed",
"short_module": "W"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Preorder",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.HyperStack",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | false |
m0: FStar.Monotonic.HyperStack.mem ->
m1: FStar.Monotonic.HyperStack.mem ->
r: FStar.Monotonic.HyperHeap.rid
-> Type0 | Prims.Tot | [
"total"
] | [] | [
"FStar.Monotonic.HyperStack.mem",
"FStar.Monotonic.HyperHeap.rid",
"Prims.l_and",
"Prims.b2t",
"FStar.Monotonic.HyperStack.is_stack_region",
"FStar.HyperStack.ST.contained_non_tip_region"
] | [] | false | false | false | true | true | let contained_non_tip_stack_region: mem -> mem -> rid -> Type0 =
| fun m0 m1 r -> is_stack_region r /\ contained_non_tip_region m0 m1 r | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.