diff --git a/thys/CHERI-C_Memory_Model/CHERI_C_Concrete_Memory_Model.thy b/thys/CHERI-C_Memory_Model/CHERI_C_Concrete_Memory_Model.thy new file mode 100644 --- /dev/null +++ b/thys/CHERI-C_Memory_Model/CHERI_C_Concrete_Memory_Model.thy @@ -0,0 +1,3768 @@ +theory CHERI_C_Concrete_Memory_Model + imports "Preliminary_Library" + "Separation_Algebra.Separation_Algebra" + "Containers.Containers" + "HOL-Library.Mapping" + "HOL-Library.Code_Target_Numeral" +begin + +section \CHERI-C Error System\ +text \In this section, we formalise the error system used by the memory model.\ +text \Below are coprocessor 2 excessptions thrown by the hardware. + BadAddressViolation is not a coprocessor 2 exception but remains one given by the hardware. + This corresponds to CapErr in the paper~\cite{park_2022}.\ +datatype c2errtype = + TagViolation + | PermitLoadViolation + | PermitStoreViolation + | PermitStoreCapViolation + | PermitStoreLocalCapViolation + | LengthViolation + | BadAddressViolation + +text \These are logical errors produced by the language. In practice, Some of these errors would never + be caught due to the inherent spatial safety guarantees given by capabilities. + This corresponds to LogicErr in the paper~\cite{park_2022}. \\ + NOTE: Unhandled corresponds to a custom error not mentioned in \emph{logicerrtype}. One can provide + the custom error as a string, but here, for custom errors, we leave it empty to simplify the + proof. Ultimately, the important point is that the memory model can still catch custom errors.\ +datatype logicerrtype = + UseAfterFree + | BufferOverrun + | MissingResource + | WrongMemVal + | MemoryNotFreed + | Unhandled "String.literal" + +text \We make the distinction between the error types. This corresponds to Err in the paper~\cite{park_2022}.\ +datatype errtype = + C2Err c2errtype + | LogicErr logicerrtype + +text \Finally, we have the `return' type $\mathcal{R}\ \rho$ in the paper~\cite{park_2022}.\ +datatype 'a result = + Success (res: 'a) + | Error (err: errtype) + +text \In this theory, we concretise the notion of blocks\ +\ \While we can use int as blocks, integer makes it more efficient for code execution\ +type_synonym block = integer +type_synonym memcap = "block mem_capability" +type_synonym cap = "block capability" + +text \Because \texttt{sizeof} depends on the architecture, it shall be given via the memory model. We also + use uncompressed capabilities.\ +definition sizeof :: "cctype \ nat" ("|_|\<^sub>\") + where + "sizeof \ \ case \ of + Uint8 \ 1 + | Sint8 \ 1 + | Uint16 \ 2 + | Sint16 \ 2 + | Uint32 \ 4 + | Sint32 \ 4 + | Uint64 \ 8 + | Sint64 \ 8 + | Cap \ 32" + +text \We provide some helper lemmas\ +lemma size_type_align: + assumes "|t|\<^sub>\ = x" + shows "\ n. 2 ^ n = x" +proof (cases t) + case Uint8 + then show ?thesis + using assms + unfolding sizeof_def + by fastforce +next + case Sint8 + then show ?thesis + using assms + unfolding sizeof_def + by fastforce +next + case Uint16 + then show ?thesis + using assms + unfolding sizeof_def + by (rule_tac x=1 in exI) fastforce +next + case Sint16 + then show ?thesis + using assms + unfolding sizeof_def + by (rule_tac x=1 in exI) fastforce +next + case Uint32 + then show ?thesis + using assms + unfolding sizeof_def + by (rule_tac x=2 in exI) fastforce +next + case Sint32 + then show ?thesis + using assms + unfolding sizeof_def + by (rule_tac x=2 in exI) fastforce +next + case Uint64 + then show ?thesis + using assms + unfolding sizeof_def + by (rule_tac x=3 in exI) fastforce +next + case Sint64 + then show ?thesis + using assms + unfolding sizeof_def + by (rule_tac x=3 in exI) fastforce +next + case Cap + then show ?thesis + using assms + unfolding sizeof_def + by (rule_tac x=5 in exI) fastforce +qed + +lemma memval_size_u8: + "|memval_type (Uint8_v v)|\<^sub>\ = 1" + unfolding sizeof_def + by fastforce + +lemma memval_size_s8: + "|memval_type (Sint8_v v)|\<^sub>\ = 1" + unfolding sizeof_def + by fastforce + +lemma memval_size_u16: + "|memval_type (Uint16_v v)|\<^sub>\ = 2" + unfolding sizeof_def + by fastforce + +lemma memval_size_s16: + "|memval_type (Sint16_v v)|\<^sub>\ = 2" + unfolding sizeof_def + by fastforce + +lemma memval_size_u32: + "|memval_type (Uint32_v v)|\<^sub>\ = 4" + unfolding sizeof_def + by fastforce + +lemma memval_size_s32: + "|memval_type (Sint32_v v)|\<^sub>\ = 4" + unfolding sizeof_def + by fastforce + +lemma memval_size_u64: + "|memval_type (Uint64_v v)|\<^sub>\ = 8" + unfolding sizeof_def + by fastforce + +lemma memval_size_s64: + "|memval_type (Sint64_v v)|\<^sub>\ = 8" + unfolding sizeof_def + by fastforce + +lemma memval_size_cap: + "|memval_type (Cap_v v)|\<^sub>\ = 32" + unfolding sizeof_def + by fastforce + +lemmas memval_size_types = memval_size_u8 memval_size_s8 memval_size_u16 memval_size_s16 memval_size_u32 memval_size_s32 memval_size_u64 memval_size_s64 memval_size_cap + +corollary memval_size_u16_eq_word_split_len: + assumes "val = Uint16_v v" + shows "|memval_type val|\<^sub>\ = length (u16_split v)" + using assms memval_size_u16 flatten_u16_length + by force + +corollary memval_size_s16_eq_word_split_len: + assumes "val = Sint16_v v" + shows "|memval_type val|\<^sub>\ = length (s16_split v)" + using assms memval_size_s16 flatten_s16_length + by force + +corollary memval_size_u32_eq_word_split_len: + assumes "val = Uint32_v v" + shows "|memval_type val|\<^sub>\ = length (flatten_u32 v)" + using assms memval_size_u32 flatten_u32_length + by force + +corollary memval_size_s32_eq_word_split_len: + assumes "val = Sint32_v v" + shows "|memval_type val|\<^sub>\ = length (flatten_s32 v)" + using assms memval_size_s32 flatten_s32_length + by force + +corollary memval_size_u64_eq_word_split_len: + assumes "val = Uint64_v v" + shows "|memval_type val|\<^sub>\ = length (flatten_u64 v)" + using assms memval_size_u64 flatten_u64_length + by force + +corollary memval_size_s64_eq_word_split_len: + assumes "val = Sint64_v v" + shows "|memval_type val|\<^sub>\ = length (flatten_s64 v)" + using assms memval_size_s64 flatten_s64_length + by force + +lemma sizeof_nonzero: + "|t|\<^sub>\ > 0" + by (simp add: sizeof_def split: cctype.split) + +text \We prove that integer is a countable type.\ +instance int :: comp_countable .. + +lemma integer_encode_eq: "(int_encode \ int_of_integer) x = (int_encode \ int_of_integer) y \ x = y" + using int_encode_eq integer_eq_iff + by auto + +instance integer :: countable + by (rule countable_classI[of "int_encode \ int_of_integer"]) (simp only: integer_encode_eq) + +instance integer :: comp_countable .. + +section \Memory\ +text \In this section, we formalise the heap and prove some initial properties.\ +subsection \Definitions\ + +text \First, we provide $\mathcal{V}_\mathcal{M}$---refer to ~\cite{park_2022} for the definition. + We note that this representation allows us to make the distinction between what is a + capability and what is a primitive value stored in memory. We can define a + tag-preserving \texttt{memcpy} by checking ahead whether there are valid capabilities stored + in memory or whether there are simply bytes. The downside to this approach is that overwriting + primitive values to where capabilities were stored---and vice versa---will lead to an undefined + load operation. However, this tends not to be a big problem, as (1) overwritten capabilities + are tag-invalidated anyway, so the capabilities cannot be dereferenced even if the user + obtained the capability somehow, and (2) for legacy C programs that do not have access to + CHERI library functions, there is no way to access the metadata of the invalidated + capabilities. For compatibility purposes, this imposes hardly any problems.\ +datatype memval = + Byte (of_byte: "8 word") + | ACap (of_cap: "memcap") (of_nth: "nat") + +text \In general, the bound is irrelevant, as capability bound ensures spatial safety. We add bounds + in the heap so that we can incorporate \textit{hybrid} CHERI C programs in the future, where + pointers and capabilities co-exist, but strictly speaking, this is not required in + \textit{purecap} CHERI C programs, which is what this memory model is based on. Ultimately, + this is the pair of mapping defined in the paper~\cite{park_2022}.\ +record object = + bounds :: "nat \ nat" + content :: "(nat, memval) mapping" + tags :: "(nat, bool) mapping" + +text \t is the datatype that allows us to make the distinction between blocks that are freed and + blocks that are valid.\ +datatype t = + Freed + | Map (the_map: "object") + +text \heap\_map in heap is essentially $\mathcal{H}$ defined in the paper~\cite{park_2022}. We + extend the structure and keep track of the next block for the allocator for efficiency---much + like how CompCert's C memory model does this~\cite{leroy_2012}.\ +record heap = + next_block :: "block" + heap_map :: "(block, t) mapping" + +definition memval_is_byte :: "memval \ bool" + where + "memval_is_byte m \ case m of Byte _ \ True | ACap _ _ \ False" + +abbreviation memval_is_cap :: "memval \ bool" + where + "memval_is_cap m \ \ memval_is_byte m" + +lemma memval_byte: + "memval_is_byte m \ \ b. m = Byte b" + by (simp add: memval_is_byte_def split: memval.split_asm) + +lemma memval_byte_not_memcap: + "memval_is_byte m \ m \ ACap c n" + by (simp add: memval_is_byte_def split: memval.split_asm) + +lemma memval_memcap: + "memval_is_cap m \ \ c n. m = ACap c n" + by (simp add: memval_is_byte_def split: memval.split_asm) + +lemma memval_memcap_not_byte: + "memval_is_cap m \ m \ Byte b" + by (simp add: memval_is_byte_def split: memval.split_asm) + +subsection \Properties\ +text \We prove that the heap is an instance of separation algebra.\ +instantiation unit :: cancellative_sep_algebra +begin +definition "0 \ ()" +definition "u1 + u2 = ()" +definition "(u1::unit) ## u2 \ True" +instance + by (standard; (blast | simp add: sep_disj_unit_def)) +end + +instantiation nat :: cancellative_sep_algebra +begin +definition "(n1::nat) ## n2 \ True" +instance + by (standard; (blast | simp add: sep_disj_nat_def)) +end + +text \This proof ultimately shows that heap\_map forms a separation algebra.\ +instantiation mapping :: (type, type) cancellative_sep_algebra +begin + +definition zero_map_def: "0 \ Mapping.empty" +definition plus_map_def: "m1 + m2 \ Mapping ((Mapping.lookup m1) ++ (Mapping.lookup m2))" +definition sep_disj_map_def: "m1 ## m2 \ Mapping.keys m1 \ Mapping.keys m2 = {}" + +instance +proof + show "\x :: ('a, 'b) mapping. x ## 0" + by (simp add: sep_disj_map_def zero_map_def) +next + show "\x :: ('a, 'b) mapping. x + 0 = x" + by (simp add: sep_disj_map_def Mapping.keys_def zero_map_def plus_map_def Mapping.empty.abs_eq Mapping.lookup.abs_eq mapping_eqI) +next + show "\x y :: ('a, 'b) mapping. x ## y \ y ## x" + using sep_disj_map_def + by auto +next + show "\x y :: ('a, 'b) mapping. x ## y \ x + y = y + x" + using map_add_comm + by (fastforce simp add: sep_disj_map_def Mapping.keys_def zero_map_def plus_map_def Mapping.lookup_def) +next + show "\x y z :: ('a, 'b) mapping. \x ## y; y ## z; x ## z\ \ x + y + z = x + (y + z)" + by (simp add: sep_disj_map_def Mapping.keys_def zero_map_def plus_map_def Mapping.lookup_def Mapping_inverse) +next + show "\x y z :: ('a, 'b) mapping. \x ## y + z; y ## z\ \ x ## y" + by (simp add: sep_disj_map_def Mapping.keys_def zero_map_def plus_map_def Mapping.lookup_def map_add_comm) + (metis (no_types, opaque_lifting) Mapping.keys.abs_eq Mapping.keys.rep_eq disjoint_iff domIff map_add_dom_app_simps(3)) +next + show "\x y z :: ('a, 'b) mapping. \x ## y + z; y ## z\ \ x + y ## z" + by (simp add: sep_disj_map_def Mapping.keys_def zero_map_def plus_map_def Mapping.lookup_def map_add_comm Mapping_inverse inf_commute inf_sup_distrib1) +next + show "\x z y :: ('a, 'b) mapping. \x + z = y + z; x ## z; y ## z\ \ x = y" + by (simp add: plus_map_def sep_disj_map_def) + (metis (mono_tags, opaque_lifting) Mapping.keys.abs_eq Mapping.lookup.abs_eq disjoint_iff domIff map_add_dom_app_simps(3) mapping_eqI) +qed +end + +instantiation heap_ext :: (cancellative_sep_algebra) cancellative_sep_algebra +begin +definition "0 :: 'a heap_scheme \ \ next_block = 0, heap_map = Mapping.empty, \ = 0 \" +definition "(m1 :: 'a heap_scheme) + (m2 :: 'a heap_scheme) \ + \ next_block = next_block m1 + next_block m2, + heap_map = Mapping ((Mapping.lookup (heap_map m1)) ++ (Mapping.lookup (heap_map m2))), + \ = heap.more m1 + heap.more m2 \" +definition "(m1 :: 'a heap_scheme) ## (m2 :: 'a heap_scheme) \ + Mapping.keys (heap_map m1) \ Mapping.keys (heap_map m2) = {} + \ heap.more m1 ## heap.more m2" +instance +proof + show "\x :: 'a heap_scheme. x ## 0" + by (simp add: plus_heap_ext_def sep_disj_heap_ext_def zero_heap_ext_def) +next + show "\x y :: 'a heap_scheme. x ## y \ y ## x" + by (simp add: plus_heap_ext_def sep_disj_heap_ext_def zero_heap_ext_def inf_commute sep_disj_commute) +next + show "\x :: 'a heap_scheme. x + 0 = x" + by (simp add: plus_heap_ext_def sep_disj_heap_ext_def zero_heap_ext_def inf_commute sep_disj_commute Mapping.empty_def Mapping.lookup.abs_eq) + (simp add: Mapping.lookup.rep_eq rep_inverse) +next + show "\x y :: 'a heap_scheme. x ## y \ x + y = y + x" + by (simp add: plus_heap_ext_def sep_disj_heap_ext_def zero_heap_ext_def) + (metis keys_dom_lookup map_add_comm sep_add_commute) +next + show "\x y z :: 'a heap_scheme. \x ## y; y ## z; x ## z\ \ x + y + z = x + (y + z)" + by (simp add: plus_heap_ext_def sep_disj_heap_ext_def zero_heap_ext_def Mapping.lookup.abs_eq sep_add_assoc) +next + show "\x y z :: 'a heap_scheme. \x ## y + z; y ## z\ \ x ## y" + by (simp add: plus_heap_ext_def sep_disj_heap_ext_def zero_heap_ext_def) + (metis plus_map_def sep_disj_addD sep_disj_map_def) +next + show "\x y z :: 'a heap_scheme. \x ## y + z; y ## z\ \ x + y ## z" + by (simp add: plus_heap_ext_def sep_disj_heap_ext_def zero_heap_ext_def Mapping.lookup.abs_eq disjoint_iff keys_dom_lookup sep_disj_addI1) +next + show "\x z y :: 'a heap_scheme. \x + z = y + z; x ## z; y ## z\ \ x = y " + by (simp add: plus_heap_ext_def sep_disj_heap_ext_def zero_heap_ext_def) + (metis heap.equality plus_map_def sep_add_cancel sep_disj_map_def) +qed +end + +instantiation mem_capability_ext :: (comp_countable, zero) zero +begin +definition "0 :: ('a, 'b) mem_capability_scheme \ + \ block_id = 0, + offset = 0, + base = 0, + len = 0, + perm_load = False, + perm_cap_load = False, + perm_store = False, + perm_cap_store = False, + perm_cap_store_local = False, + perm_global = False, + \ = 0\" +instance .. +end + +subclass (in comp_countable) zero . + +instantiation capability_ext :: (zero) zero +begin +definition "0 \ \ tag = False, \ = 0\" +instance .. +end + +text \Section 4.5 of CHERI C/C++ Programming Guide defines what a \texttt{NULL} capability is~\cite{watson_cc_2019}.\ +definition null_capability :: "cap" ("NULL") + where + "NULL \ 0" + +context + notes null_capability_def[simp] +begin + +lemma null_capability_block_id[simp]: + "block_id NULL = 0" + by (simp add: zero_mem_capability_ext_def) + +lemma null_capability_offset[simp]: + "offset NULL = 0" + by (simp add: zero_mem_capability_ext_def) + +lemma null_capability_base[simp]: + "base NULL = 0" + by (simp add: zero_mem_capability_ext_def) + +lemma null_capability_len[simp]: + "len NULL = 0" + by (simp add: zero_mem_capability_ext_def) + +lemma null_capability_perm_load[simp]: + "perm_load NULL = False" + by (simp add: zero_mem_capability_ext_def) + +lemma null_capability_perm_cap_load[simp]: + "perm_cap_load NULL = False" + by (simp add: zero_mem_capability_ext_def) + +lemma null_capability_perm_store[simp]: + "perm_store NULL = False" + by (simp add: zero_mem_capability_ext_def) + +lemma null_capability_perm_cap_store[simp]: + "perm_cap_store NULL = False" + by (simp add: zero_mem_capability_ext_def) + +lemma null_capability_perm_cap_store_local[simp]: + "perm_cap_store_local NULL = False" + by (simp add: zero_mem_capability_ext_def) + +lemma null_capability_tag[simp]: + "tag NULL = False" + by (simp add: zero_capability_ext_def zero_mem_capability_ext_def) + +end + +text \Here, we define the initial heap.\ +definition init_heap :: "heap" + where + "init_heap \ 0 \ next_block := 1 \" + +abbreviation cap_offset :: "nat \ nat" + where + "cap_offset p \ if p mod |Cap|\<^sub>\ = 0 then p else p - p mod |Cap|\<^sub>\" + +text \We state the well-formedness property $\mathcal{W}^\mathcal{C}_f$ stated in the paper~\cite{park_2022}.\ +definition wellformed :: "(block, t) mapping \ bool" ("\\<^sub>\/(_/)") + where + "\\<^sub>\(h) \ + \ b obj. Mapping.lookup h b = Some (Map obj) + \ Set.filter (\x. x mod |Cap|\<^sub>\ \ 0) (Mapping.keys (tags obj)) = {}" + +lemma init_heap_empty: + "Mapping.keys (heap_map init_heap) = {}" + unfolding init_heap_def zero_heap_ext_def + by simp + +text \Below shows $\mathcal{W}^\mathcal{C}_f(\mu_0)$\ +lemma init_wellformed: + "\\<^sub>\(heap_map init_heap)" + unfolding init_heap_def wellformed_def zero_heap_ext_def + by simp + +lemma mapping_lookup_disj1: + "m1 ## m2 \ Mapping.lookup m1 n = Some x \ Mapping.lookup (m1 + m2) n = Some x" + by (metis Mapping.keys.rep_eq Mapping.lookup.abs_eq Mapping.lookup.rep_eq disjoint_iff is_none_simps(2) keys_is_none_rep map_add_dom_app_simps(3) plus_map_def sep_disj_map_def) + +lemma mapping_lookup_disj2: + "m1 ## m2 \ Mapping.lookup m2 n = Some x \ Mapping.lookup (m1 + m2) n = Some x" + by (metis Mapping.keys.rep_eq Mapping.lookup.abs_eq Mapping.lookup.rep_eq disjoint_iff is_none_simps(2) keys_is_none_rep map_add_dom_app_simps(2) plus_map_def sep_disj_map_def) + +text \Below shows that well-formedness is composition-compatible\ +lemma "heap_map h1 ## heap_map h2 \ \\<^sub>\(heap_map h1 + heap_map h2) + \ \\<^sub>\(heap_map h1) \ \\<^sub>\(heap_map h2)" + unfolding wellformed_def + by (safe, erule_tac x=b in allE, erule_tac x=obj in allE) + (fastforce intro: mapping_lookup_disj1 mapping_lookup_disj2)+ + +section \Helper functions and lemmas\ +primrec is_memval_defined :: "(nat, memval) mapping \ nat \ nat \ bool" + where + "is_memval_defined _ _ 0 = True" +| "is_memval_defined m off (Suc siz) = ((off \ Mapping.keys m) \ is_memval_defined m (Suc off) siz)" + +primrec is_contiguous_bytes :: "(nat, memval) mapping \ nat \ nat \ bool" + where + "is_contiguous_bytes _ _ 0 = True" +| "is_contiguous_bytes m off (Suc siz) = ((off \ Mapping.keys m) + \ memval_is_byte (the (Mapping.lookup m off)) + \ is_contiguous_bytes m (Suc off) siz)" + +definition get_cap :: "(nat, memval) mapping \ nat \ memcap" + where + "get_cap m off = of_cap (the (Mapping.lookup m off))" + +fun is_cap :: "(nat, memval) mapping \ nat \ bool" + where + "is_cap m off = (off \ Mapping.keys m \ memval_is_cap (the (Mapping.lookup m off)))" + +primrec is_contiguous_cap :: "(nat, memval) mapping \ memcap \ nat \ nat \ bool" + where + "is_contiguous_cap _ _ _ 0 = True" +| "is_contiguous_cap m c off (Suc siz) = ((off \ Mapping.keys m) + \ memval_is_cap (the (Mapping.lookup m off)) + \ of_cap (the (Mapping.lookup m off)) = c + \ of_nth (the (Mapping.lookup m off)) = siz + \ is_contiguous_cap m c (Suc off) siz)" + +primrec is_contiguous_zeros_prim :: "(nat, memval) mapping \ nat \ nat \ bool" + where + "is_contiguous_zeros_prim _ _ 0 = True" +| "is_contiguous_zeros_prim m off (Suc siz) = (Mapping.lookup m off = Some (Byte 0) + \ is_contiguous_zeros_prim m (Suc off) siz)" + +definition is_contiguous_zeros :: "(nat, memval) mapping \ nat \ nat \ bool" + where + "is_contiguous_zeros m off siz \ \ ofs \ off. ofs < off + siz \ Mapping.lookup m ofs = Some (Byte 0)" + +lemma is_contiguous_zeros_code[code]: + "is_contiguous_zeros m off siz = is_contiguous_zeros_prim m off siz" +proof safe + show "is_contiguous_zeros m off siz \ is_contiguous_zeros_prim m off siz" + unfolding is_contiguous_zeros_def + proof (induct siz arbitrary: off) + case 0 + thus ?case by simp + next + case (Suc siz) + thus ?case + by fastforce + qed +next + show "is_contiguous_zeros_prim m off siz \ is_contiguous_zeros m off siz" + unfolding is_contiguous_zeros_def + proof (induct siz arbitrary: off) + case 0 + thus ?case + by auto + next + case (Suc siz) + have alt: "is_contiguous_zeros_prim m (Suc off) siz" + using Suc(2) is_contiguous_zeros_prim.simps(2)[where ?m=m and ?off=off and ?siz=siz] + by blast + have add_simp: "Suc off + siz = off + Suc siz" + by simp + show ?case + using Suc(1)[where ?off="Suc off", OF alt, simplified add_simp le_eq_less_or_eq Suc_le_eq] + Suc(2) Suc_le_eq le_eq_less_or_eq + by auto + qed +qed + + + +primrec retrieve_bytes :: "(nat, memval) mapping \ nat \ nat \ 8 word list" + where + "retrieve_bytes m _ 0 = []" +| "retrieve_bytes m off (Suc siz) = of_byte (the (Mapping.lookup m off)) # retrieve_bytes m (Suc off) siz" + +primrec is_same_cap :: "(nat, memval) mapping \ memcap \ nat \ nat \ bool" + where + "is_same_cap _ _ _ 0 = True" +| "is_same_cap m c off (Suc siz) = (of_cap (the (Mapping.lookup m off)) = c \ is_same_cap m c (Suc off) siz)" + +(* tag retrieval must be based on offset now *) +definition retrieve_tval :: "object \ nat \ cctype \ bool \ block ccval" + where + "retrieve_tval obj off typ pcl \ + if is_contiguous_bytes (content obj) off |typ|\<^sub>\ then + (case typ of + Uint8 \ Uint8_v (decode_u8_list (retrieve_bytes (content obj) off |typ|\<^sub>\)) + | Sint8 \ Sint8_v (decode_s8_list (retrieve_bytes (content obj) off |typ|\<^sub>\)) + | Uint16 \ Uint16_v (cat_u16 (retrieve_bytes (content obj) off |typ|\<^sub>\)) + | Sint16 \ Sint16_v (cat_s16 (retrieve_bytes (content obj) off |typ|\<^sub>\)) + | Uint32 \ Uint32_v (cat_u32 (retrieve_bytes (content obj) off |typ|\<^sub>\)) + | Sint32 \ Sint32_v (cat_s32 (retrieve_bytes (content obj) off |typ|\<^sub>\)) + | Uint64 \ Uint64_v (cat_u64 (retrieve_bytes (content obj) off |typ|\<^sub>\)) + | Sint64 \ Sint64_v (cat_s64 (retrieve_bytes (content obj) off |typ|\<^sub>\)) + | Cap \ if is_contiguous_zeros (content obj) off |typ|\<^sub>\ then Cap_v NULL else Undef) + else if is_cap (content obj) off then + let cap = get_cap (content obj) off in + let tv = the (Mapping.lookup (tags obj) (cap_offset off)) in + let t = (case pcl of False \ False | True \ tv) in + let cv = mem_capability.extend cap \ tag = t \ in + let nth_frag = of_nth (the (Mapping.lookup (content obj) off)) in + (case typ of + Uint8 \ Cap_v_frag (mem_capability.extend cap \ tag = False \) nth_frag + | Sint8 \ Cap_v_frag (mem_capability.extend cap \ tag = False \) nth_frag + | Cap \ if is_contiguous_cap (content obj) cap off |typ|\<^sub>\ then Cap_v cv else Undef + | _ \ Undef) + else Undef" + +primrec store_bytes :: "(nat, memval) mapping \ nat \ 8 word list \ (nat, memval) mapping" + where + "store_bytes obj _ [] = obj" +| "store_bytes obj off (v # vs) = store_bytes (Mapping.update off (Byte v) obj) (Suc off) vs" + +primrec store_cap :: "(nat, memval) mapping \ nat \ cap \ nat \ (nat, memval) mapping" + where + "store_cap obj _ _ 0 = obj" +| "store_cap obj off cap (Suc n) = store_cap (Mapping.update off (ACap (mem_capability.truncate cap) n) obj) (Suc off) cap n" + +abbreviation store_tag :: "(nat, bool) mapping \ nat \ bool \ (nat, bool) mapping" + where + "store_tag obj off tg \ Mapping.update off tg obj" + +definition store_tval :: "object \ nat \ block ccval \ object" + where + "store_tval obj off val \ + case val of Uint8_v v \ obj \ content := store_bytes (content obj) off (encode_u8_list v), + tags := store_tag (tags obj) (cap_offset off) False \ + | Sint8_v v \ obj \ content := store_bytes (content obj) off (encode_s8_list v), + tags := store_tag (tags obj) (cap_offset off) False \ + | Uint16_v v \ obj \ content := store_bytes (content obj) off (u16_split v), + tags := store_tag (tags obj) (cap_offset off) False \ + | Sint16_v v \ obj \ content := store_bytes (content obj) off (s16_split v), + tags := store_tag (tags obj) (cap_offset off) False \ + | Uint32_v v \ obj \ content := store_bytes (content obj) off (flatten_u32 v), + tags := store_tag (tags obj) (cap_offset off) False \ + | Sint32_v v \ obj \ content := store_bytes (content obj) off (flatten_s32 v), + tags := store_tag (tags obj) (cap_offset off) False \ + | Uint64_v v \ obj \ content := store_bytes (content obj) off (flatten_u64 v), + tags := store_tag (tags obj) (cap_offset off) False \ + | Sint64_v v \ obj \ content := store_bytes (content obj) off (flatten_s64 v), + tags := store_tag (tags obj) (cap_offset off) False \ + | Cap_v c \ obj \ content := store_cap (content obj) off c |Cap|\<^sub>\, + tags := store_tag (tags obj) (cap_offset off) (tag c) \ + | Cap_v_frag c n \ obj \ content := Mapping.update off (ACap (mem_capability.truncate c) n) (content obj), + tags := store_tag (tags obj) (cap_offset off) False\" + +lemma stored_bytes_prev: + assumes "x < off" + shows "Mapping.lookup (store_bytes obj off vs) x = Mapping.lookup obj x" + using assms + by (induct vs arbitrary: obj off) fastforce+ + +lemma stored_tags_prev: + assumes "x < off" + shows "Mapping.lookup (store_tag obj off vs) x = Mapping.lookup obj x" + using assms + by force + +lemma stored_cap_prev: + assumes "x < off" + shows "Mapping.lookup (store_cap obj off cap siz) x = Mapping.lookup obj x" + using assms + by (induct siz arbitrary: obj off) fastforce+ + +lemma stored_bytes_instant_correctness: + "Mapping.lookup (store_bytes obj off (v # vs)) off = Some (Byte v)" +proof (induct vs arbitrary: obj off) + case Nil + thus ?case by force +next + case (Cons a vs) + thus ?case using stored_bytes_prev Suc_eq_plus1 lessI store_bytes.simps(2) + by metis +qed + +lemma stored_cap_instant_correctness: + "Mapping.lookup (store_cap obj off cap (Suc siz)) off = Some (ACap (mem_capability.truncate cap) siz)" +proof (induct siz arbitrary: obj off) + case 0 + thus ?case by force +next + case (Suc siz) + thus ?case using stored_cap_prev Suc_eq_plus1 lessI store_cap.simps(2) lookup_update + by metis +qed + +lemma numeral_4_eq_4: "4 = Suc (Suc (Suc (Suc 0)))" + by (simp add: eval_nat_numeral) + +lemma numeral_5_eq_5: "5 = Suc (Suc (Suc (Suc (Suc 0))))" + by (simp add: eval_nat_numeral) + +lemma numeral_6_eq_6: "6 = Suc (Suc (Suc (Suc (Suc (Suc 0)))))" + by (simp add: eval_nat_numeral) + +lemma numeral_7_eq_7: "7 = Suc (Suc (Suc (Suc (Suc (Suc (Suc 0))))))" + by (simp add: eval_nat_numeral) + +lemma numeral_8_eq_8: "8 = Suc (Suc (Suc (Suc (Suc (Suc (Suc (Suc 0)))))))" + by (simp add: eval_nat_numeral) + +lemma list_length_2_realise: + "length ls = 2 \ \ n0 n1. ls = [n0, n1]" + by (metis One_nat_def Suc_length_conv add_diff_cancel_right' len_gt_0 len_of_finite_2_def + list.size(4) list_exhaust_size_eq0 list_exhaust_size_gt0 one_add_one) + +lemma list_length_4_realise: + "length ls = 4 \ \ n0 n1 n2 n3. ls = [n0, n1, n2, n3]" + by (metis list_exhaust_size_eq0 list_exhaust_size_gt0 numeral_4_eq_4 size_Cons_lem_eq zero_less_Suc) + +lemma list_length_8_realise: + "length ls = 8 \ \ n0 n1 n2 n3 n4 n5 n6 n7. ls = [n0, n1, n2, n3, n4, n5, n6, n7]" + using list_exhaust_size_eq0 list_exhaust_size_gt0 numeral_8_eq_8 size_Cons_lem_eq zero_less_Suc + by smt + +lemma u16_split_realise: + "\ b0 b1. u16_split v = [b0, b1]" + using list_length_2_realise[where ?ls="u16_split v", OF flatten_u16_length[where ?vs=v]] + by assumption + +lemma s16_split_realise: + "\ b0 b1. s16_split v = [b0, b1]" + using list_length_2_realise[where ?ls="s16_split v", OF flatten_s16_length[where ?vs=v]] + by assumption + +lemma u32_split_realise: + "\ b0 b1 b2 b3. flatten_u32 v = [b0, b1, b2, b3]" + using list_length_4_realise[where ?ls="flatten_u32 v", OF flatten_u32_length[where ?vs=v]] + by assumption + +lemma s32_split_realise: + "\ b0 b1 b2 b3. flatten_s32 v = [b0, b1, b2, b3]" + using list_length_4_realise[where ?ls="flatten_s32 v", OF flatten_s32_length[where ?vs=v]] + by assumption + +lemma u64_split_realise: + "\ b0 b1 b2 b3 b4 b5 b6 b7. flatten_u64 v = [b0, b1, b2, b3, b4, b5, b6, b7]" + using list_length_8_realise[where ?ls="flatten_u64 v", OF flatten_u64_length[where ?vs=v]] + by assumption + +lemma s64_split_realise: + "\ b0 b1 b2 b3 b4 b5 b6 b7. flatten_s64 v = [b0, b1, b2, b3, b4, b5, b6, b7]" + using list_length_8_realise[where ?ls="flatten_s64 v", OF flatten_s64_length[where ?vs=v]] + by assumption + +lemma store_bytes_u16: + shows "off \ Mapping.keys (store_bytes m off (u16_split v))" + and "Suc off \ Mapping.keys (store_bytes m off (u16_split v))" + and "\ b0. Mapping.lookup (store_bytes m off (u16_split v)) off = Some (Byte b0)" + and "\ b1. Mapping.lookup (store_bytes m off (u16_split v)) (Suc off) = Some (Byte b1)" +proof - + show "off \ Mapping.keys (store_bytes m off (u16_split v))" + by (metis (no_types, opaque_lifting) domIff u16_split_realise handy_if_lemma keys_dom_lookup + stored_bytes_instant_correctness) +next + show "Suc off \ Mapping.keys (store_bytes m off (u16_split v))" + by (metis (mono_tags, opaque_lifting) domIff u16_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b0. Mapping.lookup (store_bytes m off (u16_split v)) off = Some (Byte b0)" + by (metis u16_split_realise stored_bytes_instant_correctness) +next + show "\ b1. Mapping.lookup (store_bytes m off (u16_split v)) (Suc off) = Some (Byte b1)" + by (metis u16_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +qed + +lemma store_bytes_s16: + shows "off \ Mapping.keys (store_bytes m off (s16_split v))" + and "Suc off \ Mapping.keys (store_bytes m off (s16_split v))" + and "\ b0. Mapping.lookup (store_bytes m off (s16_split v)) off = Some (Byte b0)" + and "\ b1. Mapping.lookup (store_bytes m off (s16_split v)) (Suc off) = Some (Byte b1)" +proof - + show "off \ Mapping.keys (store_bytes m off (s16_split v))" + by (metis (no_types, opaque_lifting) domIff s16_split_realise handy_if_lemma keys_dom_lookup + stored_bytes_instant_correctness) +next + show "Suc off \ Mapping.keys (store_bytes m off (s16_split v))" + by (metis (mono_tags, opaque_lifting) domIff s16_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b0. Mapping.lookup (store_bytes m off (s16_split v)) off = Some (Byte b0)" + by (metis s16_split_realise stored_bytes_instant_correctness) +next + show "\ b1. Mapping.lookup (store_bytes m off (s16_split v)) (Suc off) = Some (Byte b1)" + by (metis s16_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +qed + +lemma store_bytes_u32: + shows "off \ Mapping.keys (store_bytes m off (flatten_u32 v))" + and "Suc off \ Mapping.keys (store_bytes m off (flatten_u32 v))" + and "Suc (Suc off) \ Mapping.keys (store_bytes m off (flatten_u32 v))" + and "Suc (Suc (Suc off)) \ Mapping.keys (store_bytes m off (flatten_u32 v))" + and "\ b0. Mapping.lookup (store_bytes m off (flatten_u32 v)) off = Some (Byte b0)" + and "\ b1. Mapping.lookup (store_bytes m off (flatten_u32 v)) (Suc off) = Some (Byte b1)" + and "\ b2. Mapping.lookup (store_bytes m off (flatten_u32 v)) (Suc (Suc off)) = Some (Byte b2)" + and "\ b3. Mapping.lookup (store_bytes m off (flatten_u32 v)) (Suc (Suc (Suc off))) = Some (Byte b3)" +proof - + show "off \ Mapping.keys (store_bytes m off (flatten_u32 v))" + by (metis (no_types, opaque_lifting) domIff handy_if_lemma keys_dom_lookup + stored_bytes_instant_correctness u32_split_realise) +next + show "Suc off \ Mapping.keys (store_bytes m off (flatten_u32 v))" + by (metis (mono_tags, opaque_lifting) domIff u32_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc off) \ Mapping.keys (store_bytes m off (flatten_u32 v))" + by (metis (mono_tags, opaque_lifting) domIff u32_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc off)) \ Mapping.keys (store_bytes m off (flatten_u32 v))" + by (metis (mono_tags, opaque_lifting) domIff u32_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b0. Mapping.lookup (store_bytes m off (flatten_u32 v)) off = Some (Byte b0)" + by (metis u32_split_realise stored_bytes_instant_correctness) +next + show "\ b1. Mapping.lookup (store_bytes m off (flatten_u32 v)) (Suc off) = Some (Byte b1)" + by (metis u32_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b2. Mapping.lookup (store_bytes m off (flatten_u32 v)) (Suc (Suc off)) = Some (Byte b2)" + by (metis u32_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b3. Mapping.lookup (store_bytes m off (flatten_u32 v)) (Suc (Suc (Suc off))) = Some (Byte b3)" + by (metis u32_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +qed + +lemma store_bytes_s32: + shows "off \ Mapping.keys (store_bytes m off (flatten_s32 v))" + and "Suc off \ Mapping.keys (store_bytes m off (flatten_s32 v))" + and "Suc (Suc off) \ Mapping.keys (store_bytes m off (flatten_s32 v))" + and "Suc (Suc (Suc off)) \ Mapping.keys (store_bytes m off (flatten_s32 v))" + and "\ b0. Mapping.lookup (store_bytes m off (flatten_s32 v)) off = Some (Byte b0)" + and "\ b1. Mapping.lookup (store_bytes m off (flatten_s32 v)) (Suc off) = Some (Byte b1)" + and "\ b2. Mapping.lookup (store_bytes m off (flatten_s32 v)) (Suc (Suc off)) = Some (Byte b2)" + and "\ b3. Mapping.lookup (store_bytes m off (flatten_s32 v)) (Suc (Suc (Suc off))) = Some (Byte b3)" +proof - + show "off \ Mapping.keys (store_bytes m off (flatten_s32 v))" + by (metis (no_types, opaque_lifting) domIff handy_if_lemma keys_dom_lookup + stored_bytes_instant_correctness s32_split_realise) +next + show "Suc off \ Mapping.keys (store_bytes m off (flatten_s32 v))" + by (metis (mono_tags, opaque_lifting) domIff s32_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc off) \ Mapping.keys (store_bytes m off (flatten_s32 v))" + by (metis (mono_tags, opaque_lifting) domIff s32_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc off)) \ Mapping.keys (store_bytes m off (flatten_s32 v))" + by (metis (mono_tags, opaque_lifting) domIff s32_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b0. Mapping.lookup (store_bytes m off (flatten_s32 v)) off = Some (Byte b0)" + by (metis s32_split_realise stored_bytes_instant_correctness) +next + show "\ b1. Mapping.lookup (store_bytes m off (flatten_s32 v)) (Suc off) = Some (Byte b1)" + by (metis s32_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b2. Mapping.lookup (store_bytes m off (flatten_s32 v)) (Suc (Suc off)) = Some (Byte b2)" + by (metis s32_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b3. Mapping.lookup (store_bytes m off (flatten_s32 v)) (Suc (Suc (Suc off))) = Some (Byte b3)" + by (metis s32_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +qed + +lemma store_bytes_u64: + shows "off \ Mapping.keys (store_bytes m off (flatten_u64 v))" + and "Suc off \ Mapping.keys (store_bytes m off (flatten_u64 v))" + and "Suc (Suc off) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + and "Suc (Suc (Suc off)) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + and "Suc (Suc (Suc (Suc off))) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + and "Suc (Suc (Suc (Suc (Suc off)))) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + and "Suc (Suc (Suc (Suc (Suc (Suc off))))) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + and "Suc (Suc (Suc (Suc (Suc (Suc (Suc off)))))) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + and "\ b0. Mapping.lookup (store_bytes m off (flatten_u64 v)) off = Some (Byte b0)" + and "\ b1. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc off) = Some (Byte b1)" + and "\ b2. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc off)) = Some (Byte b2)" + and "\ b3. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc off))) = Some (Byte b3)" + and "\ b0. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc (Suc off)))) = Some (Byte b0)" + and "\ b1. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc (Suc (Suc off))))) = Some (Byte b1)" + and "\ b2. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc (Suc (Suc (Suc off)))))) = Some (Byte b2)" + and "\ b3. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc (Suc (Suc (Suc (Suc off))))))) = Some (Byte b3)" +proof - + show "off \ Mapping.keys (store_bytes m off (flatten_u64 v))" + by (metis (no_types, opaque_lifting) domIff handy_if_lemma keys_dom_lookup + stored_bytes_instant_correctness u64_split_realise) +next + show "Suc off \ Mapping.keys (store_bytes m off (flatten_u64 v))" + by (metis (mono_tags, opaque_lifting) domIff u64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc off) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + by (metis (mono_tags, opaque_lifting) domIff u64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc off)) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + by (metis (mono_tags, opaque_lifting) domIff u64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc (Suc off))) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + by (metis (mono_tags, opaque_lifting) domIff u64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc (Suc (Suc off)))) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + by (metis (mono_tags, opaque_lifting) domIff u64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc (Suc (Suc (Suc off))))) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + by (metis (mono_tags, opaque_lifting) domIff u64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc (Suc (Suc (Suc (Suc off)))))) \ Mapping.keys (store_bytes m off (flatten_u64 v))" + by (metis (mono_tags, opaque_lifting) domIff u64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b0. Mapping.lookup (store_bytes m off (flatten_u64 v)) off = Some (Byte b0)" + by (metis u64_split_realise stored_bytes_instant_correctness) +next + show "\ b1. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc off) = Some (Byte b1)" + by (metis u64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b2. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc off)) = Some (Byte b2)" + by (metis u64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b3. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc off))) = Some (Byte b3)" + by (metis u64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b0. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc (Suc off)))) = Some (Byte b0)" + by (metis u64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show"\ b1. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc (Suc (Suc off))))) = Some (Byte b1)" + by (metis u64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b2. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc (Suc (Suc (Suc off)))))) = Some (Byte b2)" + by (metis u64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b3. Mapping.lookup (store_bytes m off (flatten_u64 v)) (Suc (Suc (Suc (Suc (Suc (Suc (Suc off))))))) = Some (Byte b3)" + by (metis u64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +qed + +lemma store_bytes_s64: + shows "off \ Mapping.keys (store_bytes m off (flatten_s64 v))" + and "Suc off \ Mapping.keys (store_bytes m off (flatten_s64 v))" + and "Suc (Suc off) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + and "Suc (Suc (Suc off)) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + and "Suc (Suc (Suc (Suc off))) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + and "Suc (Suc (Suc (Suc (Suc off)))) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + and "Suc (Suc (Suc (Suc (Suc (Suc off))))) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + and "Suc (Suc (Suc (Suc (Suc (Suc (Suc off)))))) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + and "\ b0. Mapping.lookup (store_bytes m off (flatten_s64 v)) off = Some (Byte b0)" + and "\ b1. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc off) = Some (Byte b1)" + and "\ b2. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc off)) = Some (Byte b2)" + and "\ b3. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc off))) = Some (Byte b3)" + and "\ b0. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc (Suc off)))) = Some (Byte b0)" + and "\ b1. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc (Suc (Suc off))))) = Some (Byte b1)" + and "\ b2. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc (Suc (Suc (Suc off)))))) = Some (Byte b2)" + and "\ b3. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc (Suc (Suc (Suc (Suc off))))))) = Some (Byte b3)" +proof - + show "off \ Mapping.keys (store_bytes m off (flatten_s64 v))" + by (metis (no_types, opaque_lifting) domIff handy_if_lemma keys_dom_lookup + stored_bytes_instant_correctness s64_split_realise) +next + show "Suc off \ Mapping.keys (store_bytes m off (flatten_s64 v))" + by (metis (mono_tags, opaque_lifting) domIff s64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc off) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + by (metis (mono_tags, opaque_lifting) domIff s64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc off)) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + by (metis (mono_tags, opaque_lifting) domIff s64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc (Suc off))) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + by (metis (mono_tags, opaque_lifting) domIff s64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc (Suc (Suc off)))) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + by (metis (mono_tags, opaque_lifting) domIff s64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc (Suc (Suc (Suc off))))) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + by (metis (mono_tags, opaque_lifting) domIff s64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "Suc (Suc (Suc (Suc (Suc (Suc (Suc off)))))) \ Mapping.keys (store_bytes m off (flatten_s64 v))" + by (metis (mono_tags, opaque_lifting) domIff s64_split_realise handy_if_lemma keys_dom_lookup + store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b0. Mapping.lookup (store_bytes m off (flatten_s64 v)) off = Some (Byte b0)" + by (metis s64_split_realise stored_bytes_instant_correctness) +next + show "\ b1. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc off) = Some (Byte b1)" + by (metis s64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b2. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc off)) = Some (Byte b2)" + by (metis s64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b3. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc off))) = Some (Byte b3)" + by (metis s64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b0. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc (Suc off)))) = Some (Byte b0)" + by (metis s64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show"\ b1. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc (Suc (Suc off))))) = Some (Byte b1)" + by (metis s64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b2. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc (Suc (Suc (Suc off)))))) = Some (Byte b2)" + by (metis s64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +next + show "\ b3. Mapping.lookup (store_bytes m off (flatten_s64 v)) (Suc (Suc (Suc (Suc (Suc (Suc (Suc off))))))) = Some (Byte b3)" + by (metis s64_split_realise store_bytes.simps(2) stored_bytes_instant_correctness) +qed + +corollary u16_store_bytes_imp_is_contiguous_bytes: + "is_contiguous_bytes (store_bytes m off (u16_split v)) off 2" + by (metis One_nat_def Suc_1 is_contiguous_bytes.simps(1) is_contiguous_bytes.simps(2) memval_memcap_not_byte option.sel store_bytes_u16) + +corollary s16_store_bytes_imp_is_contiguous_bytes: + "is_contiguous_bytes (store_bytes m off (s16_split v)) off 2" + by (metis One_nat_def Suc_1 is_contiguous_bytes.simps(1) is_contiguous_bytes.simps(2) memval_memcap_not_byte option.sel store_bytes_s16) + +corollary u32_store_bytes_imp_is_contiguous_bytes: + "is_contiguous_bytes (store_bytes m off (flatten_u32 v)) off 4" + by(simp add: numeral_4_eq_4, safe) + (simp add: store_bytes_u32, metis memval_memcap_not_byte option.sel store_bytes_u32)+ + +corollary s32_store_bytes_imp_is_contiguous_bytes: + "is_contiguous_bytes (store_bytes m off (flatten_s32 v)) off 4" + by(simp add: numeral_4_eq_4, safe) + (simp add: store_bytes_s32, metis memval_memcap_not_byte option.sel store_bytes_s32)+ + +corollary u64_store_bytes_imp_is_contiguous_bytes: + "is_contiguous_bytes (store_bytes m off (flatten_u64 v)) off 8" + by(simp add: numeral_8_eq_8, safe) + (simp add: store_bytes_u64, metis memval_memcap_not_byte option.sel store_bytes_u64)+ + +corollary s64_store_bytes_imp_is_contiguous_bytes: + "is_contiguous_bytes (store_bytes m off (flatten_s64 v)) off 8" + by(simp add: numeral_8_eq_8, safe) + (simp add: store_bytes_s64, metis memval_memcap_not_byte option.sel store_bytes_s64)+ + +lemma stored_tval_contiguous_bytes: + assumes "val \ Undef" + and "\ v. val \ Cap_v v" + and "\ v n. val \ Cap_v_frag v n" + shows "is_contiguous_bytes (content (store_tval obj off val)) off |memval_type val|\<^sub>\" + unfolding sizeof_def + by (simp add: assms store_tval_def memval_is_byte_def split: ccval.split) (presburger add: s16_store_bytes_imp_is_contiguous_bytes s32_store_bytes_imp_is_contiguous_bytes s64_store_bytes_imp_is_contiguous_bytes u16_store_bytes_imp_is_contiguous_bytes u32_store_bytes_imp_is_contiguous_bytes u64_store_bytes_imp_is_contiguous_bytes) + +lemma suc_of_32: + "32 = Suc 31" + by simp + +lemma store_cap_correct_dom: + shows "off \ Mapping.keys (store_cap m off cap 32)" + and "off + 1 \ Mapping.keys (store_cap m off cap 32)" + and "off + 2 \ Mapping.keys (store_cap m off cap 32)" + and "off + 3 \ Mapping.keys (store_cap m off cap 32)" + and "off + 4 \ Mapping.keys (store_cap m off cap 32)" + and "off + 5 \ Mapping.keys (store_cap m off cap 32)" + and "off + 6 \ Mapping.keys (store_cap m off cap 32)" + and "off + 7 \ Mapping.keys (store_cap m off cap 32)" + and "off + 8 \ Mapping.keys (store_cap m off cap 32)" + and "off + 9 \ Mapping.keys (store_cap m off cap 32)" + and "off + 10 \ Mapping.keys (store_cap m off cap 32)" + and "off + 11 \ Mapping.keys (store_cap m off cap 32)" + and "off + 12 \ Mapping.keys (store_cap m off cap 32)" + and "off + 13 \ Mapping.keys (store_cap m off cap 32)" + and "off + 14 \ Mapping.keys (store_cap m off cap 32)" + and "off + 15 \ Mapping.keys (store_cap m off cap 32)" + and "off + 16 \ Mapping.keys (store_cap m off cap 32)" + and "off + 17 \ Mapping.keys (store_cap m off cap 32)" + and "off + 18 \ Mapping.keys (store_cap m off cap 32)" + and "off + 19 \ Mapping.keys (store_cap m off cap 32)" + and "off + 20 \ Mapping.keys (store_cap m off cap 32)" + and "off + 21 \ Mapping.keys (store_cap m off cap 32)" + and "off + 22 \ Mapping.keys (store_cap m off cap 32)" + and "off + 23 \ Mapping.keys (store_cap m off cap 32)" + and "off + 24 \ Mapping.keys (store_cap m off cap 32)" + and "off + 25 \ Mapping.keys (store_cap m off cap 32)" + and "off + 26 \ Mapping.keys (store_cap m off cap 32)" + and "off + 27 \ Mapping.keys (store_cap m off cap 32)" + and "off + 28 \ Mapping.keys (store_cap m off cap 32)" + and "off + 29 \ Mapping.keys (store_cap m off cap 32)" + and "off + 30 \ Mapping.keys (store_cap m off cap 32)" + and "off + 31 \ Mapping.keys (store_cap m off cap 32)" +proof - qed (simp add: suc_of_32 domIff eval_nat_numeral(3) numeral_Bit0)+ + +lemma store_cap_correct_val: + shows "Mapping.lookup (store_cap m off cap 32) off = + Some (ACap (mem_capability.truncate cap) 31)" + and "Mapping.lookup (store_cap m off cap 32) (off + 1) = + Some (ACap (mem_capability.truncate cap) 30)" + and "Mapping.lookup (store_cap m off cap 32) (off + 2) = + Some (ACap (mem_capability.truncate cap) 29)" + and "Mapping.lookup (store_cap m off cap 32) (off + 3) = + Some (ACap (mem_capability.truncate cap) 28)" + and "Mapping.lookup (store_cap m off cap 32) (off + 4) = + Some (ACap (mem_capability.truncate cap) 27)" + and "Mapping.lookup (store_cap m off cap 32) (off + 5) = + Some (ACap (mem_capability.truncate cap) 26)" + and "Mapping.lookup (store_cap m off cap 32) (off + 6) = + Some (ACap (mem_capability.truncate cap) 25)" + and "Mapping.lookup (store_cap m off cap 32) (off + 7) = + Some (ACap (mem_capability.truncate cap) 24)" + and "Mapping.lookup (store_cap m off cap 32) (off + 8) = + Some (ACap (mem_capability.truncate cap) 23)" + and "Mapping.lookup (store_cap m off cap 32) (off + 9) = + Some (ACap (mem_capability.truncate cap) 22)" + and "Mapping.lookup (store_cap m off cap 32) (off + 10) = + Some (ACap (mem_capability.truncate cap) 21)" + and "Mapping.lookup (store_cap m off cap 32) (off + 11) = + Some (ACap (mem_capability.truncate cap) 20)" + and "Mapping.lookup (store_cap m off cap 32) (off + 12) = + Some (ACap (mem_capability.truncate cap) 19)" + and "Mapping.lookup (store_cap m off cap 32) (off + 13) = + Some (ACap (mem_capability.truncate cap) 18)" + and "Mapping.lookup (store_cap m off cap 32) (off + 14) = + Some (ACap (mem_capability.truncate cap) 17)" + and "Mapping.lookup (store_cap m off cap 32) (off + 15) = + Some (ACap (mem_capability.truncate cap) 16)" + and "Mapping.lookup (store_cap m off cap 32) (off + 16) = + Some (ACap (mem_capability.truncate cap) 15)" + and "Mapping.lookup (store_cap m off cap 32) (off + 17) = + Some (ACap (mem_capability.truncate cap) 14)" + and "Mapping.lookup (store_cap m off cap 32) (off + 18) = + Some (ACap (mem_capability.truncate cap) 13)" + and "Mapping.lookup (store_cap m off cap 32) (off + 19) = + Some (ACap (mem_capability.truncate cap) 12)" + and "Mapping.lookup (store_cap m off cap 32) (off + 20) = + Some (ACap (mem_capability.truncate cap) 11)" + and "Mapping.lookup (store_cap m off cap 32) (off + 21) = + Some (ACap (mem_capability.truncate cap) 10)" + and "Mapping.lookup (store_cap m off cap 32) (off + 22) = + Some (ACap (mem_capability.truncate cap) 9)" + and "Mapping.lookup (store_cap m off cap 32) (off + 23) = + Some (ACap (mem_capability.truncate cap) 8)" + and "Mapping.lookup (store_cap m off cap 32) (off + 24) = + Some (ACap (mem_capability.truncate cap) 7)" + and "Mapping.lookup (store_cap m off cap 32) (off + 25) = + Some (ACap (mem_capability.truncate cap) 6)" + and "Mapping.lookup (store_cap m off cap 32) (off + 26) = + Some (ACap (mem_capability.truncate cap) 5)" + and "Mapping.lookup (store_cap m off cap 32) (off + 27) = + Some (ACap (mem_capability.truncate cap) 4)" + and "Mapping.lookup (store_cap m off cap 32) (off + 28) = + Some (ACap (mem_capability.truncate cap) 3)" + and "Mapping.lookup (store_cap m off cap 32) (off + 29) = + Some (ACap (mem_capability.truncate cap) 2)" + and "Mapping.lookup (store_cap m off cap 32) (off + 30) = + Some (ACap (mem_capability.truncate cap) 1)" + and "Mapping.lookup (store_cap m off cap 32) (off + 31) = + Some (ACap (mem_capability.truncate cap) 0)" +proof - qed (simp add: stored_cap_instant_correctness suc_of_32 eval_nat_numeral(3) numeral_Bit0)+ + +corollary store_cap_imp_is_contiguous_cap: + "is_contiguous_cap (store_cap m off cap 32) (mem_capability.truncate cap) off 32" + using memval_byte_not_memcap + by (simp add: eval_nat_numeral(3) numeral_Bit0, blast) + +lemma stored_tval_is_cap: + assumes "val = Cap_v v" + shows "is_cap (content (store_tval obj off val)) off" + using memval_byte_not_memcap sizeof_def store_cap_correct_val(1) + by (auto simp add: assms store_tval_def conjI sizeof_def store_cap_correct_dom(1) split: ccval.split) + +lemma stored_tval_contiguous_cap: + assumes "val = Cap_v cap" + shows "is_contiguous_cap (content (store_tval obj off val)) (mem_capability.truncate cap) off |memval_type val|\<^sub>\" + using assms store_tval_def + by (simp add: sizeof_def store_cap_imp_is_contiguous_cap) + +lemma decode_encoded_u16_in_mem: + "cat_u16 (retrieve_bytes (content (store_tval obj off (Uint16_v x))) off |Uint16|\<^sub>\) = x" +proof - + have "of_byte (the (Mapping.lookup (store_bytes (content obj) off (u16_split x)) off)) = u16_split x ! 0" + by (smt (verit, best) Some_to_the length_nth_simps(3) memval.sel(1) stored_bytes_instant_correctness u16_split_realise) + also have "of_byte (the (Mapping.lookup (store_bytes (content obj) off (u16_split x)) (Suc off))) = (u16_split x) ! 1" + by (metis One_nat_def length_nth_simps(3) memval.sel(1) nth_Cons_Suc option.sel store_bytes.simps(2) stored_bytes_instant_correctness u16_split_realise) + ultimately show ?thesis + by (clarsimp simp add: sizeof_def store_tval_def eval_nat_numeral(3), simp add: numeral_Bit0) + (metis cat_flatten_u16_eq length_nth_simps(3) nth_Cons_Suc u16_split_realise) +qed + +lemma decode_encoded_s16_in_mem: + "cat_s16 (retrieve_bytes (content (store_tval obj off (Sint16_v x))) off |Sint16|\<^sub>\) = x" +proof - + have "of_byte (the (Mapping.lookup (store_bytes (content obj) off (s16_split x)) off)) = s16_split x ! 0" + by (smt (verit, best) Some_to_the length_nth_simps(3) memval.sel(1) stored_bytes_instant_correctness s16_split_realise) + also have "of_byte (the (Mapping.lookup (store_bytes (content obj) off (s16_split x)) (Suc off))) = (s16_split x) ! 1" + by (metis One_nat_def length_nth_simps(3) memval.sel(1) nth_Cons_Suc option.sel store_bytes.simps(2) stored_bytes_instant_correctness s16_split_realise) + ultimately show ?thesis + by (clarsimp simp add: sizeof_def store_tval_def eval_nat_numeral(3), simp add: numeral_Bit0) + (metis cat_flatten_s16_eq length_nth_simps(3) nth_Cons_Suc s16_split_realise) +qed + +lemma decode_encoded_u32_in_mem: + "cat_u32 (retrieve_bytes (content (store_tval obj off (Uint32_v x))) off |Uint32|\<^sub>\) = x" +proof - + have fst: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u32 x)) off)) = flatten_u32 x ! 0" + by (metis length_nth_simps(3) memval.sel(1) option.sel stored_bytes_instant_correctness u32_split_realise) + have snd: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u32 x)) (Suc off))) = flatten_u32 x ! 1" + by (metis One_nat_def length_nth_simps(3) length_nth_simps(4) memval.sel(1) option.sel store_bytes.simps(2) stored_bytes_instant_correctness u32_split_realise) + have trd: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u32 x)) (Suc (Suc off)))) = flatten_u32 x ! 2" + by (metis One_nat_def Suc_1 length_nth_simps(3) length_nth_simps(4) memval.sel(1) option.sel store_bytes.simps(2) stored_bytes_instant_correctness u32_split_realise) + have fth: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u32 x)) (Suc (Suc (Suc off))))) = flatten_u32 x ! 3" + by (metis Some_to_the length_nth_simps(3) length_nth_simps(4) memval.sel(1) numeral_3_eq_3 store_bytes.simps(2) stored_bytes_instant_correctness u32_split_realise) + show ?thesis + by (clarsimp simp add: sizeof_def store_tval_def eval_nat_numeral(3), simp add: numeral_Bit0) + (smt (verit, del_insts) fst snd trd fth One_nat_def Suc_1 eval_nat_numeral(3) length_nth_simps(3) length_nth_simps(4) u32_split_realise word_rcat_rsplit) +qed + +lemma decode_encoded_s32_in_mem: + "cat_s32 (retrieve_bytes (content (store_tval obj off (Sint32_v x))) off |Sint32|\<^sub>\) = x" +proof - + have fst: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s32 x)) off)) = flatten_s32 x ! 0" + by (metis length_nth_simps(3) memval.sel(1) option.sel stored_bytes_instant_correctness s32_split_realise) + have snd: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s32 x)) (Suc off))) = flatten_s32 x ! 1" + by (metis One_nat_def length_nth_simps(3) length_nth_simps(4) memval.sel(1) option.sel store_bytes.simps(2) stored_bytes_instant_correctness s32_split_realise) + have trd: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s32 x)) (Suc (Suc off)))) = flatten_s32 x ! 2" + by (metis One_nat_def Suc_1 length_nth_simps(3) length_nth_simps(4) memval.sel(1) option.sel store_bytes.simps(2) stored_bytes_instant_correctness s32_split_realise) + have fth: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s32 x)) (Suc (Suc (Suc off))))) = flatten_s32 x ! 3" + by (metis Some_to_the length_nth_simps(3) length_nth_simps(4) memval.sel(1) numeral_3_eq_3 store_bytes.simps(2) stored_bytes_instant_correctness s32_split_realise) + show ?thesis + by (clarsimp simp add: sizeof_def store_tval_def eval_nat_numeral(3), simp add: numeral_Bit0) + (smt (verit, del_insts) fst snd trd fth One_nat_def Suc_1 eval_nat_numeral(3) length_nth_simps(3) length_nth_simps(4) s32_split_realise word_rcat_rsplit) +qed + +lemma cat_flatten_u64_contents_eq: + "cat_u64 [flatten_u64 vs ! 0, flatten_u64 vs ! 1, flatten_u64 vs ! 2, flatten_u64 vs ! 3, flatten_u64 vs ! 4, flatten_u64 vs ! 5, flatten_u64 vs ! 6, flatten_u64 vs ! 7] = vs" + using u64_split_realise[where ?v=vs] + by clarsimp (metis cat_flatten_u64_eq) + +lemma cat_flatten_s64_contents_eq: + "cat_s64 [flatten_s64 vs ! 0, flatten_s64 vs ! 1, flatten_s64 vs ! 2, flatten_s64 vs ! 3, flatten_s64 vs ! 4, flatten_s64 vs ! 5, flatten_s64 vs ! 6, flatten_s64 vs ! 7] = vs" + using s64_split_realise[where ?v=vs] + by clarsimp (metis word_rcat_rsplit) + +lemma decode_encoded_u64_in_mem: + "cat_u64 (retrieve_bytes (content (store_tval obj off (Uint64_v x))) off |Uint64|\<^sub>\) = x" +proof - + have all_bytes: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u64 x)) off)) = flatten_u64 x ! 0 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u64 x)) (Suc off))) = flatten_u64 x ! 1 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u64 x)) (Suc (Suc off)))) = flatten_u64 x ! 2 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u64 x)) (Suc (Suc (Suc off))))) = flatten_u64 x ! 3 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u64 x)) (Suc (Suc (Suc (Suc off)))))) = flatten_u64 x ! 4 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u64 x)) (Suc (Suc (Suc (Suc (Suc off))))))) = flatten_u64 x ! 5 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u64 x)) (Suc (Suc (Suc (Suc (Suc (Suc off)))))))) = flatten_u64 x ! 6 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_u64 x)) (Suc (Suc (Suc (Suc (Suc (Suc (Suc off))))))))) = flatten_u64 x ! 7" + by (smt (verit, best) length_nth_simps(3) length_nth_simps(4) memval.sel(1) option.sel One_nat_def numeral_2_eq_2 numeral_3_eq_3 numeral_4_eq_4 numeral_5_eq_5 numeral_6_eq_6 numeral_7_eq_7 store_bytes.simps(2) stored_bytes_instant_correctness u64_split_realise) + show ?thesis + by (clarsimp simp add: sizeof_def store_tval_def eval_nat_numeral(3), simp add: numeral_Bit0) + (presburger add: all_bytes cat_flatten_u64_contents_eq) +qed + +lemma decode_encoded_s64_in_mem: + "cat_s64 (retrieve_bytes (content (store_tval obj off (Sint64_v x))) off |Sint64|\<^sub>\) = x" +proof - + have all_bytes: "of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s64 x)) off)) = flatten_s64 x ! 0 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s64 x)) (Suc off))) = flatten_s64 x ! 1 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s64 x)) (Suc (Suc off)))) = flatten_s64 x ! 2 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s64 x)) (Suc (Suc (Suc off))))) = flatten_s64 x ! 3 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s64 x)) (Suc (Suc (Suc (Suc off)))))) = flatten_s64 x ! 4 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s64 x)) (Suc (Suc (Suc (Suc (Suc off))))))) = flatten_s64 x ! 5 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s64 x)) (Suc (Suc (Suc (Suc (Suc (Suc off)))))))) = flatten_s64 x ! 6 \ + of_byte (the (Mapping.lookup (store_bytes (content obj) off (flatten_s64 x)) (Suc (Suc (Suc (Suc (Suc (Suc (Suc off))))))))) = flatten_s64 x ! 7" + by (smt (verit, best) length_nth_simps(3) length_nth_simps(4) memval.sel(1) option.sel One_nat_def numeral_2_eq_2 numeral_3_eq_3 numeral_4_eq_4 numeral_5_eq_5 numeral_6_eq_6 numeral_7_eq_7 store_bytes.simps(2) stored_bytes_instant_correctness s64_split_realise) + show ?thesis + by (clarsimp simp add: sizeof_def store_tval_def eval_nat_numeral(3), simp add: numeral_Bit0) + (presburger add: all_bytes cat_flatten_s64_contents_eq) +qed + +lemma retrieve_stored_tval_cap: + assumes "val = Cap_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) True = val" + apply (clarsimp simp add: assms) + unfolding retrieve_tval_def + apply (clarsimp simp add: stored_tval_contiguous_cap; safe) + apply (metis is_contiguous_bytes.simps(2) less_numeral_extra(3) not0_implies_Suc sizeof_nonzero) + subgoal + apply (subgoal_tac "is_contiguous_cap (content (store_tval obj off (Cap_v v))) (get_cap (content (store_tval obj off (Cap_v v))) off) off |Cap|\<^sub>\") + apply clarsimp + apply (simp only: store_tval_def get_cap_def sizeof_def) + apply clarsimp + apply (subst suc_of_32) + apply (simp only: stored_cap_instant_correctness) + apply simp + apply (simp add: mem_capability.extend_def mem_capability.truncate_def) + apply (metis cctype.simps(81) get_cap_def is_contiguous_cap.simps(2) memval_size_cap sizeof_def stored_tval_contiguous_cap suc_of_32) + done + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + apply (metis is_contiguous_bytes.simps(2) less_numeral_extra(3) not0_implies_Suc sizeof_nonzero) + done + subgoal + apply (subgoal_tac "is_contiguous_cap (content (store_tval obj off (Cap_v v))) (get_cap (content (store_tval obj off (Cap_v v))) off) off |Cap|\<^sub>\") + apply clarsimp + apply (simp only: store_tval_def get_cap_def sizeof_def) + apply clarsimp + apply (subst suc_of_32) + apply (simp only: stored_cap_instant_correctness) + apply simp + apply (simp add: mem_capability.extend_def mem_capability.truncate_def) + apply (metis cctype.simps(81) get_cap_def is_contiguous_cap.simps(2) memval_size_cap sizeof_def stored_tval_contiguous_cap suc_of_32) + done + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + apply (metis cctype.simps(81) is_contiguous_bytes.simps(2) sizeof_def suc_of_32) + done + subgoal + apply (subgoal_tac "is_contiguous_cap (content (store_tval obj off (Cap_v v))) (get_cap (content (store_tval obj off (Cap_v v))) off) off |Cap|\<^sub>\") + apply clarsimp + apply (simp only: store_tval_def get_cap_def sizeof_def) + apply clarsimp + apply (metis is_contiguous_zeros_def le_eq_less_or_eq less_add_same_cancel1 memval_memcap_not_byte option.sel suc_of_32 zero_less_Suc) + apply (metis cctype.simps(81) get_cap_def is_contiguous_cap.simps(2) memval_size_cap sizeof_def stored_tval_contiguous_cap suc_of_32) + done + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + apply (metis gr_implies_not_zero is_contiguous_bytes.simps(2) old.nat.exhaust sizeof_nonzero) + done + subgoal + apply (subgoal_tac "is_contiguous_cap (content (store_tval obj off (Cap_v v))) (get_cap (content (store_tval obj off (Cap_v v))) off) off |Cap|\<^sub>\") + apply clarsimp + apply (simp only: store_tval_def get_cap_def sizeof_def) + apply clarsimp + apply (subst suc_of_32) + apply (simp only: stored_cap_instant_correctness) + apply simp + apply (simp add: mem_capability.extend_def mem_capability.truncate_def) + apply (metis cctype.simps(81) get_cap_def is_contiguous_cap.simps(2) memval_size_cap sizeof_def stored_tval_contiguous_cap suc_of_32) + done + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + done + +lemma retrieve_stored_tval_cap_no_perm_cap_load: + assumes "val = Cap_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) False = (Cap_v (v \ tag := False \))" + apply (clarsimp simp add: assms) + unfolding retrieve_tval_def + apply (clarsimp simp add: stored_tval_contiguous_cap; safe) + subgoal + apply (metis is_contiguous_bytes.simps(2) less_numeral_extra(3) not0_implies_Suc sizeof_nonzero) + done + subgoal + apply (subgoal_tac "is_contiguous_cap (content (store_tval obj off (Cap_v v))) (get_cap (content (store_tval obj off (Cap_v v))) off) off |Cap|\<^sub>\") + apply clarsimp + apply (simp only: store_tval_def get_cap_def sizeof_def) + apply clarsimp + apply (metis is_contiguous_zeros_def le_eq_less_or_eq less_add_same_cancel1 memval_memcap_not_byte option.sel suc_of_32 zero_less_Suc) + apply (metis cctype.simps(81) get_cap_def is_contiguous_cap.simps(2) memval_size_cap sizeof_def stored_tval_contiguous_cap suc_of_32) + done + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + subgoal + apply (metis is_contiguous_bytes.simps(2) less_numeral_extra(3) not0_implies_Suc sizeof_nonzero) + done + subgoal + apply (subgoal_tac "is_contiguous_cap (content (store_tval obj off (Cap_v v))) (get_cap (content (store_tval obj off (Cap_v v))) off) off |Cap|\<^sub>\") + apply clarsimp + apply (simp only: store_tval_def get_cap_def sizeof_def) + apply clarsimp + apply (subst suc_of_32) + apply (simp only: stored_cap_instant_correctness) + apply simp + apply (simp add: mem_capability.extend_def mem_capability.truncate_def) + apply (metis cctype.simps(81) get_cap_def is_contiguous_cap.simps(2) memval_size_cap sizeof_def stored_tval_contiguous_cap suc_of_32) + done + subgoal + using stored_tval_is_cap by auto + subgoal + using stored_tval_is_cap by auto + done + +lemma retrieve_stored_tval_u8: + assumes "val = Uint8_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = val" + unfolding retrieve_tval_def +proof (clarsimp simp add: assms stored_tval_contiguous_bytes; safe) + show "\off \ Mapping.keys (content (store_tval obj off (Uint8_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Uint8_v v))) off)); + is_contiguous_bytes (content (store_tval obj off (Uint8_v v))) off |Uint8|\<^sub>\\ + \ decode_u8_list (retrieve_bytes (content (store_tval obj off (Uint8_v v))) off |Uint8|\<^sub>\) = v" + by (simp add: sizeof_def) +next + show "\off \ Mapping.keys (content (store_tval obj off (Uint8_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Uint8_v v))) off))\ + \ is_contiguous_bytes (content (store_tval obj off (Uint8_v v))) off |Uint8|\<^sub>\" + by (metis One_nat_def ccval.distinct(15) ccval.distinct(17) ccval.distinct(19) is_contiguous_bytes.simps(2) memval_size_u8 stored_tval_contiguous_bytes) +next + show "\off \ Mapping.keys (content (store_tval obj off (Uint8_v v))); is_contiguous_bytes (content (store_tval obj off (Uint8_v v))) off |Uint8|\<^sub>\\ + \ decode_u8_list (retrieve_bytes (content (store_tval obj off (Uint8_v v))) off |Uint8|\<^sub>\) = v" + by (simp add: sizeof_def) +next + show "off \ Mapping.keys (content (store_tval obj off (Uint8_v v))) \ is_contiguous_bytes (content (store_tval obj off (Uint8_v v))) off |Uint8|\<^sub>\" + by (metis cctype.simps(73) ccval.distinct(15) ccval.distinct(17) ccval.distinct(19) memval_size_u8 sizeof_def stored_tval_contiguous_bytes) +next + show "\memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Uint8_v v))) off)); is_contiguous_bytes (content (store_tval obj off (Uint8_v v))) off |Uint8|\<^sub>\\ + \ decode_u8_list (retrieve_bytes (content (store_tval obj off (Uint8_v v))) off |Uint8|\<^sub>\) = v" + by (clarsimp simp add: sizeof_def store_tval_def) +next + show "memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Uint8_v v))) off)) \ is_contiguous_bytes (content (store_tval obj off (Uint8_v v))) off |Uint8|\<^sub>\ " + by (metis cctype.simps(73) ccval.distinct(15) ccval.distinct(17) ccval.distinct(19) memval_size_u8 sizeof_def stored_tval_contiguous_bytes) +qed + +lemma retrieve_stored_tval_s8: + assumes "val = Sint8_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = val" + unfolding retrieve_tval_def +proof (clarsimp simp add: assms stored_tval_contiguous_bytes; safe) + show "\off \ Mapping.keys (content (store_tval obj off (Sint8_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Sint8_v v))) off)); + is_contiguous_bytes (content (store_tval obj off (Sint8_v v))) off |Sint8|\<^sub>\\ + \ decode_u8_list (retrieve_bytes (content (store_tval obj off (Sint8_v v))) off |Sint8|\<^sub>\) = SCAST(8 signed \ 8) v" + by (simp add: sizeof_def) +next + show "\off \ Mapping.keys (content (store_tval obj off (Sint8_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Sint8_v v))) off))\ + \ is_contiguous_bytes (content (store_tval obj off (Sint8_v v))) off |Sint8|\<^sub>\" + by (metis One_nat_def ccval.distinct(33) ccval.distinct(35) ccval.distinct(37) is_contiguous_bytes.simps(2) memval_size_s8 stored_tval_contiguous_bytes) +next + show "\off \ Mapping.keys (content (store_tval obj off (Sint8_v v))); is_contiguous_bytes (content (store_tval obj off (Sint8_v v))) off |Sint8|\<^sub>\\ + \ decode_u8_list (retrieve_bytes (content (store_tval obj off (Sint8_v v))) off |Sint8|\<^sub>\) = SCAST(8 signed \ 8) v" + by (simp add: sizeof_def) +next + show "off \ Mapping.keys (content (store_tval obj off (Sint8_v v))) \ is_contiguous_bytes (content (store_tval obj off (Sint8_v v))) off |Sint8|\<^sub>\" + by (metis cctype.simps(74) ccval.distinct(33) ccval.distinct(35) ccval.distinct(37) memval_size_s8 sizeof_def stored_tval_contiguous_bytes) +next + show "\memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Sint8_v v))) off)); is_contiguous_bytes (content (store_tval obj off (Sint8_v v))) off |Sint8|\<^sub>\\ + \ decode_u8_list (retrieve_bytes (content (store_tval obj off (Sint8_v v))) off |Sint8|\<^sub>\) = SCAST(8 signed \ 8) v" + by (clarsimp simp add: sizeof_def store_tval_def) +next + show "memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Sint8_v v))) off)) \ is_contiguous_bytes (content (store_tval obj off (Sint8_v v))) off |Sint8|\<^sub>\" + by (metis cctype.simps(74) ccval.distinct(33) ccval.distinct(35) ccval.distinct(37) memval_size_s8 sizeof_def stored_tval_contiguous_bytes) +qed + +lemma retrieve_stored_tval_u16: + assumes "val = Uint16_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = val" + unfolding retrieve_tval_def +proof (clarsimp simp add: assms stored_tval_contiguous_bytes; safe) + show "\off \ Mapping.keys (content (store_tval obj off (Uint16_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Uint16_v v))) off)); + is_contiguous_bytes (content (store_tval obj off (Uint16_v v))) off |Uint16|\<^sub>\\ + \ cat_u16 (retrieve_bytes (content (store_tval obj off (Uint16_v v))) off |Uint16|\<^sub>\) = v" + by (presburger add: decode_encoded_u16_in_mem) +next + show "\off \ Mapping.keys (content (store_tval obj off (Uint16_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Uint16_v v))) off))\ + \ is_contiguous_bytes (content (store_tval obj off (Uint16_v v))) off |Uint16|\<^sub>\" + by (metis ccval.distinct(49) ccval.distinct(51) ccval.distinct(53) is_contiguous_bytes.simps(2) memval_size_u16 numeral_2_eq_2 stored_tval_contiguous_bytes) +next + show "\off \ Mapping.keys (content (store_tval obj off (Uint16_v v))); is_contiguous_bytes (content (store_tval obj off (Uint16_v v))) off |Uint16|\<^sub>\\ + \ cat_u16 (retrieve_bytes (content (store_tval obj off (Uint16_v v))) off |Uint16|\<^sub>\) = v" + by (presburger add: decode_encoded_u16_in_mem) +next + show "off \ Mapping.keys (content (store_tval obj off (Uint16_v v))) \ is_contiguous_bytes (content (store_tval obj off (Uint16_v v))) off |Uint16|\<^sub>\" + by (metis Suc_1 ccval.distinct(49) ccval.distinct(51) ccval.distinct(53) is_contiguous_bytes.simps(2) memval_size_u16 stored_tval_contiguous_bytes) +next + show "\memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Uint16_v v))) off)); is_contiguous_bytes (content (store_tval obj off (Uint16_v v))) off |Uint16|\<^sub>\\ + \ cat_u16 (retrieve_bytes (content (store_tval obj off (Uint16_v v))) off |Uint16|\<^sub>\) = v" + by (presburger add: decode_encoded_u16_in_mem) +next + show "memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Uint16_v v))) off)) \ is_contiguous_bytes (content (store_tval obj off (Uint16_v v))) off |Uint16|\<^sub>\" + by (metis cctype.simps(75) ccval.distinct(49) ccval.distinct(51) ccval.distinct(53) memval_size_u16 sizeof_def stored_tval_contiguous_bytes) +qed + +lemma retrieve_stored_tval_s16: + assumes "val = Sint16_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = val" + unfolding retrieve_tval_def +proof (clarsimp simp add: assms stored_tval_contiguous_bytes; safe) + show "\off \ Mapping.keys (content (store_tval obj off (Sint16_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Sint16_v v))) off)); + is_contiguous_bytes (content (store_tval obj off (Sint16_v v))) off |Sint16|\<^sub>\\ + \ cat_s16 (retrieve_bytes (content (store_tval obj off (Sint16_v v))) off |Sint16|\<^sub>\) = v" + by (presburger add: decode_encoded_s16_in_mem) +next + show "\off \ Mapping.keys (content (store_tval obj off (Sint16_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Sint16_v v))) off))\ + \ is_contiguous_bytes (content (store_tval obj off (Sint16_v v))) off |Sint16|\<^sub>\" + by (metis ccval.distinct(63) ccval.distinct(65) ccval.distinct(67) is_contiguous_bytes.simps(2) memval_size_s16 numeral_2_eq_2 stored_tval_contiguous_bytes) +next + show "\off \ Mapping.keys (content (store_tval obj off (Sint16_v v))); is_contiguous_bytes (content (store_tval obj off (Sint16_v v))) off |Sint16|\<^sub>\\ + \ cat_s16 (retrieve_bytes (content (store_tval obj off (Sint16_v v))) off |Sint16|\<^sub>\) = v" + by (presburger add: decode_encoded_s16_in_mem) +next + show "off \ Mapping.keys (content (store_tval obj off (Sint16_v v))) \ is_contiguous_bytes (content (store_tval obj off (Sint16_v v))) off |Sint16|\<^sub>\" + by (metis Suc_1 ccval.distinct(63) ccval.distinct(65) ccval.distinct(67) is_contiguous_bytes.simps(2) memval_size_s16 stored_tval_contiguous_bytes) +next + show "\memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Sint16_v v))) off)); is_contiguous_bytes (content (store_tval obj off (Sint16_v v))) off |Sint16|\<^sub>\\ + \ cat_s16 (retrieve_bytes (content (store_tval obj off (Sint16_v v))) off |Sint16|\<^sub>\) = v" + by (presburger add: decode_encoded_s16_in_mem) +next + show "memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Sint16_v v))) off)) \ is_contiguous_bytes (content (store_tval obj off (Sint16_v v))) off |Sint16|\<^sub>\" + by (metis cctype.simps(76) ccval.distinct(63) ccval.distinct(65) ccval.distinct(67) memval_size_s16 sizeof_def stored_tval_contiguous_bytes) +qed + +lemma retrieve_stored_tval_u32: + assumes "val = Uint32_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = val" + unfolding retrieve_tval_def +proof (clarsimp simp add: assms stored_tval_contiguous_bytes; safe) + show "\off \ Mapping.keys (content (store_tval obj off (Uint32_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Uint32_v v))) off)); + is_contiguous_bytes (content (store_tval obj off (Uint32_v v))) off |Uint32|\<^sub>\\ + \ cat_u32 (retrieve_bytes (content (store_tval obj off (Uint32_v v))) off |Uint32|\<^sub>\) = v" + by (presburger add: decode_encoded_u32_in_mem) +next + show "\off \ Mapping.keys (content (store_tval obj off (Uint32_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Uint32_v v))) off))\ + \ is_contiguous_bytes (content (store_tval obj off (Uint32_v v))) off |Uint32|\<^sub>\" + by (metis ccval.distinct(75) ccval.distinct(77) ccval.distinct(79) is_contiguous_bytes.simps(2) memval_size_u32 numeral_4_eq_4 stored_tval_contiguous_bytes) +next + show "\off \ Mapping.keys (content (store_tval obj off (Uint32_v v))); is_contiguous_bytes (content (store_tval obj off (Uint32_v v))) off |Uint32|\<^sub>\\ + \ cat_u32 (retrieve_bytes (content (store_tval obj off (Uint32_v v))) off |Uint32|\<^sub>\) = v" + by (presburger add: decode_encoded_u32_in_mem) +next + show "off \ Mapping.keys (content (store_tval obj off (Uint32_v v))) \ is_contiguous_bytes (content (store_tval obj off (Uint32_v v))) off |Uint32|\<^sub>\" + by (metis ccval.distinct(77) ccval.distinct(79) is_cap.elims(2) is_contiguous_bytes.simps(2) memval_size_u32 numeral_4_eq_4 stored_tval_contiguous_bytes stored_tval_is_cap) +next + show "\memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Uint32_v v))) off)); is_contiguous_bytes (content (store_tval obj off (Uint32_v v))) off |Uint32|\<^sub>\\ + \ cat_u32 (retrieve_bytes (content (store_tval obj off (Uint32_v v))) off |Uint32|\<^sub>\) = v" + by (presburger add: decode_encoded_u32_in_mem) +next + show "memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Uint32_v v))) off)) \ is_contiguous_bytes (content (store_tval obj off (Uint32_v v))) off |Uint32|\<^sub>\" + by (metis cctype.simps(77) ccval.distinct(75) ccval.distinct(77) ccval.distinct(79) memval_size_u32 sizeof_def stored_tval_contiguous_bytes) +qed + +lemma retrieve_stored_tval_s32: + assumes "val = Sint32_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = val" + unfolding retrieve_tval_def +proof (clarsimp simp add: assms stored_tval_contiguous_bytes; safe) + show "\off \ Mapping.keys (content (store_tval obj off (Sint32_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Sint32_v v))) off)); + is_contiguous_bytes (content (store_tval obj off (Sint32_v v))) off |Sint32|\<^sub>\\ + \ cat_s32 (retrieve_bytes (content (store_tval obj off (Sint32_v v))) off |Sint32|\<^sub>\) = v" + by (presburger add: decode_encoded_s32_in_mem) +next + show "\off \ Mapping.keys (content (store_tval obj off (Sint32_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Sint32_v v))) off))\ + \ is_contiguous_bytes (content (store_tval obj off (Sint32_v v))) off |Sint32|\<^sub>\" + by (metis cctype.simps(78) ccval.distinct(85) ccval.distinct(87) ccval.distinct(89) flatten_s32_length memval_size_s32_eq_word_split_len sizeof_def stored_tval_contiguous_bytes) +next + show "\off \ Mapping.keys (content (store_tval obj off (Sint32_v v))); is_contiguous_bytes (content (store_tval obj off (Sint32_v v))) off |Sint32|\<^sub>\\ + \ cat_s32 (retrieve_bytes (content (store_tval obj off (Sint32_v v))) off |Sint32|\<^sub>\) = v" + by (presburger add: decode_encoded_s32_in_mem) +next + show "off \ Mapping.keys (content (store_tval obj off (Sint32_v v))) \ is_contiguous_bytes (content (store_tval obj off (Sint32_v v))) off |Sint32|\<^sub>\" + by (metis ccval.distinct(85) ccval.distinct(87) ccval.distinct(89) is_contiguous_bytes.simps(2) less_numeral_extra(3) not0_implies_Suc sizeof_nonzero stored_tval_contiguous_bytes) +next + show "\memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Sint32_v v))) off)); is_contiguous_bytes (content (store_tval obj off (Sint32_v v))) off |Sint32|\<^sub>\\ + \ cat_s32 (retrieve_bytes (content (store_tval obj off (Sint32_v v))) off |Sint32|\<^sub>\) = v" + by (presburger add: decode_encoded_s32_in_mem) +next + show "memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Sint32_v v))) off)) \ is_contiguous_bytes (content (store_tval obj off (Sint32_v v))) off |Sint32|\<^sub>\" + by (metis cctype.simps(78) ccval.distinct(85) ccval.distinct(87) ccval.simps(100) flatten_s32_length memval_size_s32_eq_word_split_len sizeof_def stored_tval_contiguous_bytes) +qed + +lemma retrieve_stored_tval_u64: + assumes "val = Uint64_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = val" + unfolding retrieve_tval_def +proof (clarsimp simp add: assms stored_tval_contiguous_bytes; safe) + show "\off \ Mapping.keys (content (store_tval obj off (Uint64_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Uint64_v v))) off)); + is_contiguous_bytes (content (store_tval obj off (Uint64_v v))) off |Uint64|\<^sub>\\ + \ cat_u64 (retrieve_bytes (content (store_tval obj off (Uint64_v v))) off |Uint64|\<^sub>\) = v" + by (presburger add: decode_encoded_u64_in_mem) +next + show "\off \ Mapping.keys (content (store_tval obj off (Uint64_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Uint64_v v))) off))\ + \ is_contiguous_bytes (content (store_tval obj off (Uint64_v v))) off |Uint64|\<^sub>\" + by (metis cctype.simps(79) ccval.distinct(93) ccval.distinct(95) ccval.distinct(97) memval_size_u64 sizeof_def stored_tval_contiguous_bytes) +next + show "\off \ Mapping.keys (content (store_tval obj off (Uint64_v v))); is_contiguous_bytes (content (store_tval obj off (Uint64_v v))) off |Uint64|\<^sub>\\ + \ cat_u64 (retrieve_bytes (content (store_tval obj off (Uint64_v v))) off |Uint64|\<^sub>\) = v" + by (presburger add: decode_encoded_u64_in_mem) +next + show "off \ Mapping.keys (content (store_tval obj off (Uint64_v v))) \ is_contiguous_bytes (content (store_tval obj off (Uint64_v v))) off |Uint64|\<^sub>\" + by (metis cctype.simps(79) ccval.distinct(95) ccval.distinct(97) is_cap.elims(1) memval_size_u64 sizeof_def stored_tval_contiguous_bytes stored_tval_is_cap) +next + show "\memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Uint64_v v))) off)); is_contiguous_bytes (content (store_tval obj off (Uint64_v v))) off |Uint64|\<^sub>\\ + \ cat_u64 (retrieve_bytes (content (store_tval obj off (Uint64_v v))) off |Uint64|\<^sub>\) = v" + by (presburger add: decode_encoded_u64_in_mem) +next + show "memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Uint64_v v))) off)) \ is_contiguous_bytes (content (store_tval obj off (Uint64_v v))) off |Uint64|\<^sub>\" + by (metis cctype.simps(79) ccval.distinct(93) ccval.distinct(95) ccval.distinct(97) memval_size_u64 sizeof_def stored_tval_contiguous_bytes) +qed + +lemma retrieve_stored_tval_s64: + assumes "val = Sint64_v v" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = val" + unfolding retrieve_tval_def +proof (clarsimp simp add: assms stored_tval_contiguous_bytes; safe) + show "\off \ Mapping.keys (content (store_tval obj off (Sint64_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Sint64_v v))) off)); + is_contiguous_bytes (content (store_tval obj off (Sint64_v v))) off |Sint64|\<^sub>\\ + \ cat_s64 (retrieve_bytes (content (store_tval obj off (Sint64_v v))) off |Sint64|\<^sub>\) = v" + by (presburger add: decode_encoded_s64_in_mem) +next + show "\off \ Mapping.keys (content (store_tval obj off (Sint64_v v))); memval_is_cap (the (Mapping.lookup (content (store_tval obj off (Sint64_v v))) off))\ + \ is_contiguous_bytes (content (store_tval obj off (Sint64_v v))) off |Sint64|\<^sub>\" + by (metis cctype.simps(80) ccval.distinct(101) ccval.distinct(103) ccval.distinct(99) memval_size_s64 sizeof_def stored_tval_contiguous_bytes) +next + show "\off \ Mapping.keys (content (store_tval obj off (Sint64_v v))); is_contiguous_bytes (content (store_tval obj off (Sint64_v v))) off |Sint64|\<^sub>\\ + \ cat_s64 (retrieve_bytes (content (store_tval obj off (Sint64_v v))) off |Sint64|\<^sub>\) = v" + by (presburger add: decode_encoded_s64_in_mem) +next + show "off \ Mapping.keys (content (store_tval obj off (Sint64_v v))) \ is_contiguous_bytes (content (store_tval obj off (Sint64_v v))) off |Sint64|\<^sub>\" + by (metis bot_nat_0.not_eq_extremum ccval.distinct(101) ccval.distinct(103) ccval.distinct(99) is_contiguous_bytes.simps(2) list_decode.cases sizeof_nonzero stored_tval_contiguous_bytes) +next + show "\memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Sint64_v v))) off)); is_contiguous_bytes (content (store_tval obj off (Sint64_v v))) off |Sint64|\<^sub>\\ + \ cat_s64 (retrieve_bytes (content (store_tval obj off (Sint64_v v))) off |Sint64|\<^sub>\) = v" + by (presburger add: decode_encoded_s64_in_mem) +next + show "memval_is_byte (the (Mapping.lookup (content (store_tval obj off (Sint64_v v))) off)) \ is_contiguous_bytes (content (store_tval obj off (Sint64_v v))) off |Sint64|\<^sub>\" + by (metis cctype.simps(80) ccval.distinct(101) ccval.distinct(103) ccval.distinct(99) memval_size_s64 sizeof_def stored_tval_contiguous_bytes) +qed + +lemma memcap_truncate_extend_equiv: + "mem_capability.extend (mem_capability.truncate c) \ tag = tag c \ = c" + by (simp add: mem_capability.extend_def mem_capability.truncate_def) + +corollary Acap_truncate_extend_equiv: + "mem_capability.extend (of_cap (ACap (mem_capability.truncate c) n)) \ tag = tag c \ = c" + by clarsimp (blast intro: memcap_truncate_extend_equiv) + +lemma memcap_truncate_extend_gen: + "mem_capability.extend (mem_capability.truncate c) \ tag = b \ = c \ tag := b \" + by (simp add: mem_capability.extend_def mem_capability.truncate_def) + +corollary Acap_truncate_extend_gen: + "mem_capability.extend (of_cap (ACap (mem_capability.truncate c) n)) \ tag = b \ = c \ tag := b \" + by clarsimp (blast intro: memcap_truncate_extend_gen) + +lemma retrieve_stored_tval_cap_frag: + assumes "val = Cap_v_frag c n" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = + Cap_v_frag (c \ tag := False \) n" + by (clarsimp simp add: assms retrieve_tval_def store_tval_def sizeof_def get_cap_def memcap_truncate_extend_gen memval_is_byte_def split: bool.split) + +lemmas retrieve_stored_tval_prim = retrieve_stored_tval_u8 retrieve_stored_tval_s8 +retrieve_stored_tval_u16 retrieve_stored_tval_s16 +retrieve_stored_tval_u32 retrieve_stored_tval_s32 +retrieve_stored_tval_u64 retrieve_stored_tval_s64 + +lemma retrieve_stored_tval_any_perm: + assumes "val \ Undef" + and "\ v. val \ Cap_v v" + and "\ v n. val \ Cap_v_frag v n" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) b = val" + using retrieve_stored_tval_prim[where ?obj=obj and ?off=off and ?val=val and ?b=b] + by (clarsimp simp add: assms split: ccval.split) + +lemma retrieve_stored_tval_with_perm_cap_load: + assumes "val \ Undef" + and "\ v n. val \ Cap_v_frag v n" + shows "retrieve_tval (store_tval obj off val) off (memval_type val) True = val" + using retrieve_stored_tval_prim[where ?obj=obj and ?off=off and ?val=val and ?b=True] + retrieve_stored_tval_cap[where ?obj=obj and ?off=off and ?val=val] + by (clarsimp simp add: assms split: ccval.split) + +lemma store_bytes_domain_1: + assumes "x + length vs \ n" + shows "Mapping.lookup (store_bytes m n vs) x = Mapping.lookup m x" + using assms + by (induct vs arbitrary: x m n) simp_all + +lemma store_bytes_domain_2: + assumes "n + length vs \ x" + shows "Mapping.lookup (store_bytes m n vs) x = Mapping.lookup m x" + using assms + by (induct vs arbitrary: x m n) simp_all + +lemma store_bytes_keys_1: + "Set.filter (\ x. x + length vs \ n) (Mapping.keys m) = + Set.filter (\ x. x + length vs \ n) (Mapping.keys (store_bytes m n vs))" + by (induct vs arbitrary: m n) + (simp, smt (verit, best) Collect_cong Set.filter_def keys_is_none_rep store_bytes_domain_1) + +lemma store_bytes_keys_2: + "Set.filter (\ x. n + length vs \ x) (Mapping.keys m) = + Set.filter (\ x. n + length vs \ x) (Mapping.keys (store_bytes m n vs))" + by (induct vs arbitrary: m n) + (simp, smt (verit, best) Collect_cong Set.filter_def keys_is_none_rep store_bytes_domain_2) + +lemma store_cap_domain_1: + assumes "x + n \ p" + shows "Mapping.lookup (store_cap m p c n) x = Mapping.lookup m x" + using assms + by (induct n arbitrary: x m p) simp_all + +lemma store_cap_domain_2: + assumes "p + n \ x" + shows "Mapping.lookup (store_cap m p c n) x = Mapping.lookup m x" + using assms + by (induct n arbitrary: x m p) simp_all + +lemma store_cap_keys_1: + "Set.filter (\ x. x + n \ p) (Mapping.keys m) = + Set.filter (\ x. x + n \ p) (Mapping.keys (store_cap m p c n))" + by (induct n arbitrary: m p) + (force, smt (verit, best) Collect_cong Set.filter_def keys_is_none_rep store_cap_domain_1) + +lemma store_cap_keys_2: + "Set.filter (\ x. p + n \ x) (Mapping.keys m) = + Set.filter (\ x. p + n \ x) (Mapping.keys (store_cap m p c n))" + by (induct n arbitrary: m p) + (force, smt (verit, best) Collect_cong Set.filter_def keys_is_none_rep store_cap_domain_2) + +lemma store_tags_domain_1: + assumes "x < n" + shows "Mapping.lookup (store_tag m n b) x = Mapping.lookup m x" + using assms by auto + +lemma store_tags_domain_2: + assumes "n < x" + shows "Mapping.lookup (store_tag m n b) x = Mapping.lookup m x" + using assms by auto + +lemma store_tags_keys_1: + "Set.filter (\ x. x < n) (Mapping.keys m) = + Set.filter (\ x. x < n) (Mapping.keys (store_tag m n b))" + by fastforce + +lemma store_tags_keys_2: + "Set.filter (\ x. n < x) (Mapping.keys m) = + Set.filter (\ x. n < x) (Mapping.keys (store_tag m n b))" + by fastforce + +lemma cap_offset_aligned: + "(cap_offset n) mod |Cap|\<^sub>\ = 0" + unfolding sizeof_def + by force + +lemma store_tags_offset: + assumes "Set.filter (\ x. x mod |Cap|\<^sub>\ \ 0) (Mapping.keys m) = {}" + shows "Set.filter (\ x. x mod |Cap|\<^sub>\ \ 0) (Mapping.keys (store_tag m (cap_offset n) b)) = {}" + using assms + unfolding sizeof_def + by force + +lemma store_tval_disjoint_bounds: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + shows "bounds obj = bounds obj'" + using assms + unfolding store_tval_def + by (clarsimp split: ccval.split_asm) + +lemma store_tval_disjoint_1_content: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off' < off" + shows "Mapping.lookup (content obj) off' = Mapping.lookup (content obj') off'" + using assms + by (clarsimp simp add: store_tval_def split: ccval.split_asm) + (presburger add: stored_bytes_prev stored_cap_prev)+ + +lemma store_tval_disjoint_1_content_bytes: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off' + n \ off" + shows "retrieve_bytes (content obj) off' n = retrieve_bytes (content obj') off' n" + using assms + +proof (induct n arbitrary: obj obj' off val off') + case 0 + then show ?case + by force +next + case (Suc n) + then show ?case + by (metis (no_types, lifting) Suc_eq_plus1 add.assoc le_eq_less_or_eq less_add_same_cancel1 order_le_less_trans plus_1_eq_Suc retrieve_bytes.simps(2) store_tval_disjoint_1_content zero_less_Suc) +qed + +lemma store_tval_disjoint_1_content_contiguous_bytes: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off' + n \ off" + shows "is_contiguous_bytes (content obj) off' n = is_contiguous_bytes (content obj') off' n" + using assms +proof (induct n arbitrary: obj obj' off val off') + case 0 + then show ?case + by force +next + case (Suc n) + then show ?case + by (smt (verit, best) add.assoc add.commute domIff is_contiguous_bytes.simps(2) keys_dom_lookup le_eq_less_or_eq less_add_same_cancel1 order_le_less_trans plus_1_eq_Suc store_tval_disjoint_1_content zero_less_Suc) +qed + +lemma store_tval_disjoint_1_content_contiguous_caps: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off' + n \ off" + shows "is_contiguous_cap (content obj) cap off' n = is_contiguous_cap (content obj') cap off' n" + using assms +proof (induct n arbitrary: obj obj' off val off') + case 0 + then show ?case + by force +next + case (Suc n) + then show ?case + by (smt (verit, best) add.assoc add.commute domIff is_contiguous_cap.simps(2) keys_dom_lookup le_eq_less_or_eq less_add_same_cancel1 order_le_less_trans plus_1_eq_Suc store_tval_disjoint_1_content zero_less_Suc) +qed + +lemma store_tval_disjoint_1_tags: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off' + |Cap|\<^sub>\ \ off" + shows "Mapping.lookup (tags obj) off' = Mapping.lookup (tags obj') off'" + using assms + by (clarsimp simp add: store_tval_def split: ccval.split_asm) + (metis Mapping.lookup_update_neq add.commute add_cancel_right_right bot_nat_0.extremum_strict diff_diff_cancel le_add1 le_add_diff_inverse less_diff_conv2 mod_less_divisor mod_less_eq_dividend sizeof_nonzero)+ + + + +lemma store_tval_disjoint_2_content: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off + |memval_type val|\<^sub>\ \ off'" + shows "Mapping.lookup (content obj) off' = Mapping.lookup (content obj') off'" + using assms +proof (clarsimp simp add: store_tval_def split: ccval.split_asm) + show "\x. \off + |Cap|\<^sub>\ \ off'; val = Cap_v x; obj' = obj\content := store_cap (content obj) off x |Cap|\<^sub>\, tags := store_tag (tags obj) (cap_offset off) (tag x)\\ + \ Mapping.lookup (content obj) off' = Mapping.lookup (store_cap (content obj) off x |Cap|\<^sub>\) off'" + by (presburger add: store_cap_domain_2) +qed (force simp add: sizeof_def | metis assms(3) store_bytes_domain_2 flatten_types_length memval_size_types)+ + +lemma store_tval_disjoint_2_content_bytes: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off + |memval_type val|\<^sub>\ \ off'" + shows "retrieve_bytes (content obj) off' n = retrieve_bytes (content obj') off' n" + using assms +proof (induct n arbitrary: obj obj' off off' val) + case 0 + then show ?case + by force +next + case (Suc n) + then show ?case + by (metis less_Suc_eq_le less_or_eq_imp_le retrieve_bytes.simps(2) store_tval_disjoint_2_content) +qed + +lemma store_tval_disjoint_2_content_contiguous_bytes: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off + |memval_type val|\<^sub>\ \ off'" + shows "is_contiguous_bytes (content obj) off' n = is_contiguous_bytes (content obj') off' n" + using assms +proof (induct n arbitrary: obj obj' off val off') + case 0 + then show ?case + by force +next + case (Suc n) + then show ?case + by (smt (verit, best) domIff is_contiguous_bytes.simps(2) keys_dom_lookup le_eq_less_or_eq lessI order_le_less_trans store_tval_disjoint_2_content) +qed + +lemma store_tval_disjoint_2_content_contiguous_caps: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off + |memval_type val|\<^sub>\ \ off'" + shows "is_contiguous_cap (content obj) cap off' n = is_contiguous_cap (content obj') cap off' n" + using assms +proof (induct n arbitrary: obj obj' off val off') + case 0 + then show ?case + by force +next + case (Suc n) + then show ?case + by (smt (verit, best) domIff is_contiguous_cap.simps(2) keys_dom_lookup le_eq_less_or_eq lessI order_le_less_trans store_tval_disjoint_2_content) +qed + +lemma store_tval_disjoint_2_tags: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off + |memval_type val|\<^sub>\ \ off'" + shows "Mapping.lookup (tags obj) off' = Mapping.lookup (tags obj') off'" + using assms + by (clarsimp simp add: store_tval_def split: ccval.split_asm) + (force simp add: assms(3) sizeof_def)+ + +lemma zero_imp_bytes: + "is_contiguous_zeros obj off n \ \ is_contiguous_bytes obj off n \ False" +proof (simp add: is_contiguous_zeros_code, induct n arbitrary: obj off) + case 0 + then show ?case + by simp +next + case (Suc n) + then show ?case + using keys_dom_lookup memval_memcap_not_byte + by fastforce +qed + +lemma retrieve_stored_tval_disjoint_1: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off' + |t|\<^sub>\ \ off" + shows "retrieve_tval obj off' t b = retrieve_tval obj' off' t b" + using assms + apply (clarsimp simp add: retrieve_tval_def) + apply (rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI) + subgoal + by (simp split: cctype.split, safe; (force simp add: numeral_2_eq_2 numeral_4_eq_4 numeral_8_eq_8 sizeof_def)+) + subgoal + apply (simp split: cctype.split, safe, (force simp add: sizeof_def numeral_2_eq_2 numeral_4_eq_4 numeral_8_eq_8)+) + apply (metis is_contiguous_bytes.simps(2) less_imp_Suc_add sizeof_nonzero) + done + subgoal + apply (intro strip conjI) + apply (simp split: cctype.split, safe; (force simp add: numeral_2_eq_2 numeral_4_eq_4 numeral_8_eq_8 sizeof_def)+) + apply (simp split: cctype.split, metis is_contiguous_bytes.simps(1) is_contiguous_bytes.simps(2) old.nat.exhaust) + done + subgoal + using zero_imp_bytes by presburger + subgoal + by (smt (z3) store_tval_disjoint_1_content_bytes zero_imp_bytes) + subgoal + by (simp add: is_contiguous_zeros_def, metis (no_types, lifting) order_less_le_trans store_tval_disjoint_1_content) + subgoal + unfolding is_contiguous_zeros_def + by (smt (verit) cctype.exhaust cctype.simps(73) cctype.simps(74) cctype.simps(75) cctype.simps(76) cctype.simps(77) cctype.simps(78) cctype.simps(79) cctype.simps(80) get_cap_def keys_is_none_rep less_add_same_cancel1 order_less_le_trans sizeof_nonzero store_tval_disjoint_1_content store_tval_disjoint_1_content_bytes store_tval_disjoint_1_content_contiguous_bytes store_tval_disjoint_1_content_contiguous_caps store_tval_disjoint_1_tags) + apply (rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI) + apply (smt (z3) store_tval_disjoint_1_content_bytes zero_imp_bytes) + apply (smt (verit, best) store_tval_disjoint_1_content_bytes zero_imp_bytes) + apply (simp add: is_contiguous_zeros_def, smt (z3) le_add_diff_inverse2 store_tval_disjoint_1_content trans_less_add2) + apply (rule impI, rule conjI, rule impI) + apply (simp add: is_contiguous_zeros_def) + apply (smt (z3) le_add_diff_inverse2 store_tval_disjoint_1_content trans_less_add2) + subgoal + apply (rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI) + apply (metis is_contiguous_bytes.simps(2) less_nat_zero_code old.nat.exhaust sizeof_nonzero) + using store_tval_disjoint_1_content_contiguous_bytes + apply blast + apply (metis is_contiguous_bytes.simps(2) less_numeral_extra(3) not0_implies_Suc sizeof_nonzero) + apply (rule impI, rule conjI, rule impI, rule conjI, rule impI) + using store_tval_disjoint_1_content_contiguous_bytes + apply blast + apply (simp add: Let_def split: cctype.split; clarsimp, smt (verit, best) add.commute add_diff_cancel_right' add_le_cancel_left bot_nat_0.not_eq_extremum get_cap_def le_add_diff_inverse2 le_eq_less_or_eq order_less_le_trans sizeof_nonzero store_tval_disjoint_1_content store_tval_disjoint_1_content_contiguous_caps store_tval_disjoint_1_tags zero_less_diff) + apply (metis add.right_neutral add_less_cancel_left domIff keys_dom_lookup le_add_diff_inverse2 sizeof_nonzero store_tval_disjoint_1_content trans_less_add2) + apply (smt (z3) keys_is_none_rep less_add_same_cancel1 order_less_le_trans sizeof_nonzero store_tval_disjoint_1_content store_tval_disjoint_1_content_bytes store_tval_disjoint_1_content_contiguous_bytes) + done + done + +lemma retrieve_stored_tval_disjoint_2: + assumes "store_tval obj off val = obj'" + and "val \ Undef" + and "off + |memval_type val|\<^sub>\ \ off'" + and "t = Cap \ off' mod |Cap|\<^sub>\ = 0" + shows "retrieve_tval obj off' t b = retrieve_tval obj' off' t b" + using assms(1) assms(2) assms(3) + apply (clarsimp simp add: retrieve_tval_def) + apply (rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI) + subgoal + by (simp split: cctype.split, safe; (force simp add: numeral_2_eq_2 numeral_4_eq_4 numeral_8_eq_8 sizeof_def)+) + subgoal + apply (simp split: cctype.split, safe, (force simp add: sizeof_def numeral_2_eq_2 numeral_4_eq_4 numeral_8_eq_8)+) + apply (metis is_contiguous_bytes.simps(2) less_imp_Suc_add sizeof_nonzero) + done + subgoal + apply (rule impI, rule conjI, rule impI) + apply (simp split: cctype.split, safe; (force simp add: numeral_2_eq_2 numeral_4_eq_4 numeral_8_eq_8 sizeof_def)+) + apply (simp split: cctype.split, metis is_contiguous_bytes.simps(1) is_contiguous_bytes.simps(2) old.nat.exhaust) + done + subgoal + using zero_imp_bytes by presburger + subgoal + by (smt (z3) memval_type.elims store_tval_disjoint_2_content_bytes zero_imp_bytes) + subgoal + by (smt (z3) is_contiguous_zeros_def le_trans memval_type.elims store_tval_disjoint_2_content) + subgoal + unfolding is_contiguous_zeros_def + by (smt (verit) get_cap_def keys_is_none_rep le_trans memval_type.elims store_tval_disjoint_2_content store_tval_disjoint_2_content_bytes store_tval_disjoint_2_content_contiguous_bytes store_tval_disjoint_2_content_contiguous_caps store_tval_disjoint_2_tags) + subgoal + apply (rule impI, rule conjI, rule impI, rule conjI, rule impI, rule conjI, rule impI) + apply (metis (no_types, opaque_lifting) is_contiguous_bytes.simps(2) less_numeral_extra(3) not0_implies_Suc sizeof_nonzero zero_imp_bytes) + apply (smt (z3) assms(3) store_tval_disjoint_2_content_bytes zero_imp_bytes) + apply (simp add: is_contiguous_zeros_def, metis assms(3) order_trans store_tval_disjoint_2_content) + unfolding is_contiguous_zeros_def + by (smt (verit) assms(3) assms(4) cctype.exhaust cctype.simps(73) cctype.simps(74) cctype.simps(75) cctype.simps(76) cctype.simps(77) cctype.simps(78) cctype.simps(79) cctype.simps(80) get_cap_def keys_is_none_rep mod_0_imp_dvd mod_greater_zero_iff_not_dvd store_tval_disjoint_2_content store_tval_disjoint_2_content_bytes store_tval_disjoint_2_content_contiguous_bytes) + done + + +lemma type_uniq: + assumes "\ x n. ret = Cap_v_frag x n" + shows "ret \ Uint8_v v1" "ret \ Sint8_v v2" "ret \ Uint16_v v3" "ret \ Sint16_v v4" + "ret \ Uint32_v v5" "ret \ Sint32_v v6" "ret \ Uint64_v v7" "ret \ Sint64_v v8" + "ret \ Cap_v v9" + using assms + by blast+ + +section \Memory Actions / Operations\ + +definition alloc :: "heap \ bool \ nat \ (heap \ cap) result" + where + "alloc h c s \ + let cap = \ block_id = (next_block h), + offset = 0, + base = 0, + len = s, + perm_load = True, + perm_cap_load = c, + perm_store = True, + perm_cap_store = c, + perm_cap_store_local = c, + perm_global = False, + tag = True + \ in + let h' = h \ next_block := (next_block h) + 1, + heap_map := Mapping.update + (next_block h) + (Map \ bounds = (0, s), + content = Mapping.empty, + tags = Mapping.empty + \ + ) (heap_map h) + \ in + Success (h', cap)" + +definition free :: "heap \ cap \ (heap \ cap) result" + where + "free h c \ + if c = NULL then Success (h, c) else + if tag c = False then Error (C2Err (TagViolation)) else + if perm_global c = True then Error (LogicErr (Unhandled 0)) else + let obj = Mapping.lookup (heap_map h) (block_id c) in + (case obj of None \ Error (LogicErr (MissingResource)) + | Some cobj \ + (case cobj of Freed \ Error (LogicErr (UseAfterFree)) + | Map m \ + if offset c \ 0 then Error (LogicErr (Unhandled 0)) + else if offset c > base c + len c then Error (LogicErr (Unhandled 0)) else + let cap_bound = (base c, base c + len c) in + if cap_bound \ bounds m then Error (LogicErr (Unhandled 0)) else + let h' = h \ heap_map := Mapping.update (block_id c) Freed (heap_map h) \ in + let cap = c \ tag := False \ in + Success (h', cap)))" + +text \How load works: + The hardware would perform a CL[C] operation on the given capability first. + An invalid capability for load would be caught by the hardware. + Once all the hardware checks are performed, we then proceed to the logical checks.\ +definition load :: "heap \ cap \ cctype \ block ccval result" + where + "load h c t \ + if tag c = False then + Error (C2Err TagViolation) + else if perm_load c = False then + Error (C2Err PermitLoadViolation) + else if offset c + |t|\<^sub>\ > base c + len c then + Error (C2Err LengthViolation) + else if offset c < base c then + Error (C2Err LengthViolation) + else if offset c mod |t|\<^sub>\ \ 0 then + Error (C2Err BadAddressViolation) + else + let obj = Mapping.lookup (heap_map h) (block_id c) in + (case obj of None \ Error (LogicErr (MissingResource)) + | Some cobj \ + (case cobj of Freed \ Error (LogicErr (UseAfterFree)) + | Map m \ if offset c < fst (bounds m) \ offset c + |t|\<^sub>\ > snd (bounds m) then + Error (LogicErr BufferOverrun) else + Success (retrieve_tval m (nat (offset c)) t (perm_cap_load c))))" + +definition store :: "heap \ cap \ block ccval \ heap result" + where + "store h c v \ + if tag c = False then + Error (C2Err TagViolation) + else if perm_store c = False then + Error (C2Err PermitStoreViolation) + else if (case v of Cap_v cv \ \ perm_cap_store c \ tag cv | _ \ False) then + Error (C2Err PermitStoreCapViolation) + else if (case v of Cap_v cv \ \ perm_cap_store_local c \ tag cv \ \ perm_global cv | _ \ False) then + Error (C2Err PermitStoreLocalCapViolation) + else if offset c + |memval_type v|\<^sub>\ > base c + len c then + Error (C2Err LengthViolation) + else if offset c < base c then + Error (C2Err LengthViolation) + else if offset c mod |memval_type v|\<^sub>\ \ 0 then + Error (C2Err BadAddressViolation) + else if v = Undef then + Error (LogicErr (Unhandled 0)) + else + let obj = Mapping.lookup (heap_map h) (block_id c) in + (case obj of None \ Error (LogicErr (MissingResource)) + | Some cobj \ + (case cobj of Freed \ Error (LogicErr (UseAfterFree)) + | Map m \ if offset c < fst (bounds m) \ offset c + |memval_type v|\<^sub>\ > snd (bounds m) then + Error (LogicErr BufferOverrun) else + Success (h \ heap_map := Mapping.update + (block_id c) + (Map (store_tval m (nat (offset c)) v)) + (heap_map h) \)))" + +subsection \Properties of the operations\ +text \Here we provide all the properties the operations satisfy. In general, you may find the following + forms of proofs: + \begin{itemize} + \item If we have valid input, the operation will succeed + \item If we have invalid inputs, the operations will return the appropriate error + \item If the operation succeeds, we have a valid input + \end{itemize} + good variable laws are also proven at the next subsubsection.\ + +subsubsection \Correctness Properties\ + +lemma alloc_always_success: + "\! res. alloc h c s = Success res" + by (simp add: alloc_def) + +schematic_goal alloc_updated_heap_and_cap: + "alloc h c s = Success (?h', ?cap)" + by (fastforce simp add: alloc_def) + +lemma alloc_never_fails: + "alloc h c s = Error e \ False" + by (simp add: alloc_def) + +text \In practice, malloc may actually return NULL when allocation fails. However, this still complies + with The C Standard.\ +lemma alloc_no_null_ret: + assumes "alloc h c s = Success (h', cap)" + shows "cap \ NULL" +proof - + have "perm_load cap" + using assms alloc_def + by force + moreover have "\ perm_load NULL" + unfolding null_capability_def zero_capability_ext_def zero_mem_capability_ext_def + by force + ultimately show ?thesis + by blast +qed + +lemma alloc_correct: + assumes "alloc h c s = Success (h', cap)" + shows "next_block h' = next_block h + 1" + and "Mapping.lookup (heap_map h') (next_block h) + = Some (Map \ bounds = (0, s), content = Mapping.empty, tags = Mapping.empty\)" + using assms alloc_def + by auto + +text \Section 7.20.3.2 of The C Standard states free(NULL) results in no action occurring~\cite{cstandard_2003}.\ +lemma free_null: + "free h NULL = Success (h, NULL)" + by (simp add: free_def) + +lemma free_false_tag: + assumes "c \ NULL" + and "tag c = False" + shows "free h c = Error (C2Err (TagViolation))" + by (presburger add: assms free_def) + +lemma free_global_cap: + assumes "c \ NULL" + and "tag c = True" + and "perm_global c = True" + shows "free h c = Error (LogicErr (Unhandled 0))" + by (presburger add: assms free_def) + +lemma free_nonexistant_obj: + assumes "c \ NULL" + and "tag c = True" + and "perm_global c = False" + and "Mapping.lookup (heap_map h) (block_id c) = None" + shows "free h c = Error (LogicErr (MissingResource))" + using assms free_def + by auto + +text \This case may arise if there are copies of the same capability, where only one was freed. + It is worth noting that due to this, temporal safety is not guaranteed by the CHERI hardware.\ +lemma free_double_free: + assumes "c \ NULL" + and "tag c = True" + and "perm_global c = False" + and "Mapping.lookup (heap_map h) (block_id c) = Some Freed" + shows "free h c = Error (LogicErr (UseAfterFree))" + using free_def assms + by force + +text \An incorrect offset implies the actual ptr value is not that returned by alloc. + Section 7.20.3.2 of The C Standard states this leads to undefined behaviour~\cite{cstandard_2003}. + Clang, in practice, however, terminates the C program with an invalid pointer error. \ +lemma free_incorrect_cap_offset: + assumes "c \ NULL" + and "tag c = True" + and "perm_global c = False" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ 0" + shows "free h c = Error (LogicErr (Unhandled 0))" + using free_def assms + by force + +lemma free_incorrect_bounds: + assumes "c \ NULL" + and "tag c = True" + and "perm_global c = False" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c = 0" + and "bounds m \ (base c, base c + len c)" + shows "free h c = Error (LogicErr (Unhandled 0))" + unfolding free_def + using assms + by force + +lemma free_non_null_correct: + assumes "c \ NULL" + and valid_tag: "tag c = True" + and "perm_global c = False" + and map_has_contents: "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and offset_correct: "offset c = 0" + and bounds_correct: "bounds m = (base c, base c + len c)" + shows "free h c = Success (h \ heap_map := Mapping.update (block_id c) Freed (heap_map h) \, + c \ tag := False \)" + unfolding free_def + using assms + by simp + +lemma free_cond: + assumes "free h c = Success (h', cap)" + shows "c \ NULL \ tag c = True" + and "c \ NULL \ perm_global c = False" + and "c \ NULL \ offset c = 0" + and "c \ NULL \ \ m. Mapping.lookup (heap_map h) (block_id c) = Some (Map m) \ + bounds m = (base c, base c + len c)" + and "c \ NULL \ Mapping.lookup (heap_map h') (block_id c) = Some Freed" + and "c \ NULL \ cap = c \ tag := False \" + and "c = NULL \ (h, c) = (h', cap)" +proof - + assume "c \ NULL" + thus "tag c = True" + using assms unfolding free_def + by (meson result.simps(4)) +next + assume "c \ NULL" + thus "perm_global c = False" + using assms unfolding free_def + by (meson result.simps(4)) +next + assume "c \ NULL" + thus "offset c = 0" + using assms unfolding free_def + by (smt (verit, ccfv_SIG) not_None_eq option.simps(4) option.simps(5) result.distinct(1) t.exhaust t.simps(4) t.simps(5)) +next + assume "c \ NULL" + thus "\ m. Mapping.lookup (heap_map h) (block_id c) = Some (Map m) \ + bounds m = (base c, base c + len c)" + using assms unfolding free_def + by (metis assms free_double_free free_incorrect_bounds free_incorrect_cap_offset free_nonexistant_obj not_Some_eq result.distinct(1) t.exhaust) +next + assume "c \ NULL" + hence "h' = h \ heap_map := Mapping.update (block_id c) Freed (heap_map h) \" + using assms unfolding free_def + by (smt (verit, ccfv_SIG) free_nonexistant_obj not_Some_eq option.simps(4) option.simps(5) prod.inject result.distinct(1) result.exhaust result.inject(1) t.exhaust t.simps(4) t.simps(5)) + thus "Mapping.lookup (heap_map h') (block_id c) = Some Freed" + by fastforce +next + assume "c \ NULL" + thus "cap = c \ tag := False \" + using assms unfolding free_def + by (smt (verit, ccfv_SIG) not_Some_eq option.simps(4) option.simps(5) prod.inject result.distinct(1) result.inject(1) t.exhaust t.simps(4) t.simps(5)) +next + assume "c = NULL" + thus "(h, c) = (h', cap)" + using free_null assms + by force +qed + +lemmas free_cond_non_null = free_cond(1) free_cond(2) free_cond(3) free_cond(4) free_cond(5) free_cond(6) + +lemma double_free: + assumes "free h c = Success (h', cap)" + and "cap \ NULL" + shows "free h' cap = Error (C2Err TagViolation)" +proof - + have "cap = c \ tag := False \ \ tag cap = False" + by fastforce + thus ?thesis + using assms free_cond(6)[where ?h=h and ?c=c and ?h'=h' and ?cap=cap] free_false_tag[where ?c=cap and ?h=h'] free_cond(7)[where ?h=h and ?c=c and ?h'=h' and ?cap=cap] + by blast +qed + +lemma free_next_block: + assumes "free h cap = Success (h', cap')" + shows "next_block h = next_block h'" + proof - + consider (null) "cap = NULL" | (non_null) "cap \ NULL" by blast + then show ?thesis + proof (cases) + case null + then show ?thesis + using free_null assms null + by simp + next + case non_null + then show ?thesis + using assms free_cond_non_null[OF assms non_null] + unfolding free_def + by (auto split: option.split_asm t.split_asm) + qed +qed + +lemma load_null_error: + "load h NULL t = Error (C2Err TagViolation)" + unfolding load_def + by simp + +lemma load_false_tag: + assumes "tag c = False" + shows "load h c t = Error (C2Err TagViolation)" + unfolding load_def + using assms + by presburger + +lemma load_false_perm_load: + assumes "tag c = True" + and "perm_load c = False" + shows "load h c t = Error (C2Err PermitLoadViolation)" + unfolding load_def + using assms + by presburger + +lemma load_bound_over: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ > base c + len c" + shows "load h c t = Error (C2Err LengthViolation)" + unfolding load_def + using assms + by presburger + +lemma load_bound_under: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c < base c" + shows "load h c t = Error (C2Err LengthViolation)" + unfolding load_def + using assms + by presburger + +lemma load_misaligned: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ \ 0" + shows "load h c t = Error (C2Err BadAddressViolation)" + unfolding load_def + using assms + by force + +lemma load_nonexistant_obj: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = None" + shows "load h c t = Error (LogicErr MissingResource)" + unfolding load_def + using assms + by auto + +lemma load_load_after_free: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some Freed" + shows "load h c t = Error (LogicErr UseAfterFree)" + unfolding load_def + using assms + by fastforce + +lemma load_cap_on_heap_bounds_fail_1: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Cap" + and "\ is_contiguous_zeros (content m) (nat (offset c)) |t|\<^sub>\" + and "offset c < fst (bounds m)" + shows "load h c t = Error (LogicErr BufferOverrun)" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_cap_on_heap_bounds_fail_2: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Cap" + and "\ is_contiguous_zeros (content m) (nat (offset c)) |t|\<^sub>\" + and "offset c + |t|\<^sub>\ > snd (bounds m)" + shows "load h c t = Error (LogicErr BufferOverrun)" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_cap_on_membytes_fail: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Cap" + and "\ is_contiguous_zeros (content m) (nat (offset c)) |t|\<^sub>\" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + shows "load h c t = Success Undef" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_null_cap_on_membytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Cap" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "is_contiguous_zeros (content m) (nat (offset c)) |t|\<^sub>\" + shows "load h c t = Success (Cap_v NULL)" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_u8_on_membytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Uint8" + shows "load h c t = Success (Uint8_v (decode_u8_list (retrieve_bytes (content m) (nat (offset c)) |t|\<^sub>\)))" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_s8_on_membytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Sint8" + shows "load h c t = Success (Sint8_v (decode_s8_list (retrieve_bytes (content m) (nat (offset c)) |t|\<^sub>\)))" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_u16_on_membytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Uint16" + shows "load h c t = Success (Uint16_v (cat_u16 (retrieve_bytes (content m) (nat (offset c)) |t|\<^sub>\)))" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_s16_on_membytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Sint16" + shows "load h c t = Success (Sint16_v (cat_s16 (retrieve_bytes (content m) (nat (offset c)) |t|\<^sub>\)))" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_u32_on_membytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Uint32" + shows "load h c t = Success (Uint32_v (cat_u32 (retrieve_bytes (content m) (nat (offset c)) |t|\<^sub>\)))" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_s32_on_membytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Sint32" + shows "load h c t = Success (Sint32_v (cat_s32 (retrieve_bytes (content m) (nat (offset c)) |t|\<^sub>\)))" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_u64_on_membytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Uint64" + shows "load h c t = Success (Uint64_v (cat_u64 (retrieve_bytes (content m) (nat (offset c)) |t|\<^sub>\)))" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_s64_on_membytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "t = Sint64" + shows "load h c t = Success (Sint64_v (cat_s64 (retrieve_bytes (content m) (nat (offset c)) |t|\<^sub>\)))" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_not_cap_in_mem: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "\ is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "\ is_cap (content m) (nat (offset c))" + shows "load h c t = Success Undef" + unfolding load_def retrieve_tval_def + using assms + by fastforce + +lemma load_not_contiguous_cap_in_mem: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "\ is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "is_cap (content m) (nat (offset c))" + and "mc = get_cap (content m) (nat (offset c))" + and "\ is_contiguous_cap (content m) mc (nat (offset c)) |t|\<^sub>\" + and "t \ Uint8" + and "t \ Sint8" + shows "load h c t = Success Undef" + unfolding load_def retrieve_tval_def Let_def + using assms + by (clarsimp split: cctype.split) + +lemma load_cap_frag_u8: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "\ is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "is_cap (content m) (nat (offset c))" + and "mc = get_cap (content m) (nat (offset c))" + and "t = Uint8" + and "tagval = the (Mapping.lookup (tags m) (cap_offset (nat (offset c))))" + and "tg = (case perm_cap_load c of False \ False | True \ tagval)" + and "nth_frag = of_nth (the (Mapping.lookup (content m) (nat (offset c))))" + shows "load h c t = Success (Cap_v_frag (mem_capability.extend mc \ tag = False \) nth_frag)" + unfolding load_def retrieve_tval_def Let_def + using assms + by (clarsimp simp add: sizeof_def split: cctype.split) + + +lemma load_cap_frag_s8: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "\ is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "is_cap (content m) (nat (offset c))" + and "mc = get_cap (content m) (nat (offset c))" + and "\ is_contiguous_cap (content m) mc (nat (offset c)) |t|\<^sub>\" + and "t = Sint8" + and "tagval = the (Mapping.lookup (tags m) (cap_offset (nat (offset c))))" + and "tg = (case perm_cap_load c of False \ False | True \ tagval)" + and "nth_frag = of_nth (the (Mapping.lookup (content m) (nat (offset c))))" + shows "load h c t = Success (Cap_v_frag (mem_capability.extend mc \ tag = False \) nth_frag)" + unfolding load_def retrieve_tval_def Let_def + using assms + by (clarsimp simp add: sizeof_def split: cctype.split) + +lemma load_bytes_on_capbytes_fail: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "\ is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "is_cap (content m) (nat (offset c))" + and "mc = get_cap (content m) (nat (offset c))" + and "is_contiguous_cap (content m) mc (nat (offset c)) |t|\<^sub>\" + and "t \ Cap" + and "t \ Uint8" + and "t \ Sint8" + shows "load h c t = Success Undef" + unfolding load_def retrieve_tval_def Let_def + using assms + by (clarsimp split: cctype.split) + +lemma load_cap_on_capbytes: + assumes "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |t|\<^sub>\ \ snd (bounds m)" + and "\ is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" + and "is_cap (content m) (nat (offset c))" + and "mc = get_cap (content m) (nat (offset c))" + and "is_contiguous_cap (content m) mc (nat (offset c)) |t|\<^sub>\" + and "t = Cap" + and "tagval = the (Mapping.lookup (tags m) (nat (offset c)))" + and "tg = (case perm_cap_load c of False \ False | True \ tagval)" + shows "load h c t = Success (Cap_v (mem_capability.extend mc \ tag = tg \))" + unfolding load_def retrieve_tval_def + using assms + by (clarsimp split: cctype.split) + (smt (verit) assms(5) nat_int nat_less_le nat_mod_distrib of_nat_0_le_iff semiring_1_class.of_nat_0) + +lemma load_cond_hard_cap: + assumes "load h c t = Success ret" + shows "tag c = True" + and "perm_load c = True" + and "offset c + |t|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |t|\<^sub>\ = 0" +proof - + show "tag c = True" + using assms result.distinct(1) + unfolding load_def + by metis +next + show "perm_load c = True" + using assms result.distinct(1) + unfolding load_def + by metis +next + show "offset c + |t|\<^sub>\ \ base c + len c" + using assms result.distinct(1) linorder_not_le + unfolding load_def + by metis +next + show "offset c \ base c" + using assms result.distinct(1) linorder_not_le + unfolding load_def + by metis +next + show "offset c mod |t|\<^sub>\ = 0" + using assms result.distinct(1) + unfolding load_def + by metis +qed + +lemma load_cond_bytes: + assumes "load h c t = Success ret" + and "ret \ Undef" + and "\ x. ret \ Cap_v x" + and "\ x n . ret \ Cap_v_frag x n" + shows "\ m. Mapping.lookup (heap_map h) (block_id c) = Some (Map m) + \ offset c \ fst (bounds m) + \ offset c + |t|\<^sub>\ \ snd (bounds m) + \ is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\" +proof (cases ret) + case (Cap_v x9) + thus ?thesis + using assms(3) + by blast +next + case (Cap_v_frag x101 x102) + thus ?thesis + using assms(4) + by blast +next + case Undef + thus ?thesis + using assms(2) + by simp +qed (insert assms(1) load_cond_hard_cap[where ?h=h and ?c=c and ?t=t and ?ret=ret], clarsimp, unfold load_def retrieve_tval_def, clarsimp split: option.split_asm t.split_asm, smt (z3) assms(2) assms(3) assms(4) cctype.exhaust cctype.simps(73) cctype.simps(74) cctype.simps(75) cctype.simps(76) cctype.simps(77) cctype.simps(78) cctype.simps(79) cctype.simps(80) cctype.simps(81) result.distinct(1) result.inject(1))+ +(* WARNING: the above takes quite some time to prove the remaining cases *) + +lemma load_cond_cap: + assumes "load h c t = Success ret" + and "\ x. ret = Cap_v x" + shows "\ m mc tagval tg. + Mapping.lookup (heap_map h) (block_id c) = Some (Map m) \ + offset c \ fst (bounds m) \ + offset c + |t|\<^sub>\ \ snd (bounds m) \ + (is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\ \ + is_contiguous_zeros (content m) (nat (offset c)) |t|\<^sub>\ \ + ret = Cap_v NULL) \ + (\ is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\ \ + is_cap (content m) (nat (offset c)) \ + mc = get_cap (content m) (nat (offset c)) \ + is_contiguous_cap (content m) mc (nat (offset c)) |t|\<^sub>\ \ + t = Cap \ + tagval = the (Mapping.lookup (tags m) (nat (offset c))) \ + tg = (case perm_cap_load c of False \ False | True \ tagval))" + using assms(2) +proof (cases ret) + case (Cap_v ca) + show ?thesis + using assms load_cond_hard_cap[where ?h=h and ?c=c and ?t=t and ?ret=ret] + by (clarsimp, simp only: load_def retrieve_tval_def Let_def, clarsimp split: option.split_asm, clarsimp split: t.split_asm, rename_tac x nth map, subgoal_tac "int (fst (bounds map)) \ int |t|\<^sub>\ * nth \ int |t|\<^sub>\ * nth + int |t|\<^sub>\ \ int (snd (bounds map))", clarsimp split: cctype.split_asm, safe; force?) + (metis ccval.distinct(105) ccval.distinct(107) ccval.inject(9) is_cap.elims(2) linorder_not_le result.distinct(1))+ +qed blast+ + +lemma load_cond_cap_frag: + assumes "load h c t = Success ret" + and "\ x n. ret = Cap_v_frag x n" + shows "\ m mc tagval tg nth_frag. + Mapping.lookup (heap_map h) (block_id c) = Some (Map m) \ + offset c \ fst (bounds m) \ + offset c + |t|\<^sub>\ \ snd (bounds m) \ + (is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\ \ + is_contiguous_zeros (content m) (nat (offset c)) |t|\<^sub>\ \ + ret = Cap_v NULL) \ + (\ is_contiguous_bytes (content m) (nat (offset c)) |t|\<^sub>\ \ + is_cap (content m) (nat (offset c)) \ + mc = get_cap (content m) (nat (offset c)) \ + (t = Uint8 \ t = Sint8) \ + tagval = the (Mapping.lookup (tags m) (nat (offset c))) \ + tg = (case perm_cap_load c of False \ False | True \ tagval) \ + nth_frag = of_nth (the (Mapping.lookup (content m) (nat (offset c)))))" + using assms(2) +proof (cases ret) + case (Cap_v_frag x101 x102) + show ?thesis + using assms load_cond_hard_cap[where ?h=h and ?c=c and ?t=t and ?ret=ret] + by (clarsimp, simp only: load_def retrieve_tval_def Let_def, clarsimp split: option.split_asm, clarsimp split: t.split_asm if_split_asm cctype.split_asm) +qed (simp add: type_uniq assms(2))+ + + +lemma store_null_error: + "store h NULL v = Error (C2Err TagViolation)" + unfolding store_def + by simp + +lemma store_false_tag: + assumes "tag c = False" + shows "store h c v = Error (C2Err TagViolation)" + unfolding store_def + using assms + by presburger + +lemma store_false_perm_store: + assumes "tag c = True" + and "perm_store c = False" + shows "store h c v = Error (C2Err PermitStoreViolation)" + unfolding store_def + using assms + by presburger + +lemma store_cap_false_perm_cap_store: + assumes "tag c = True" + and "perm_store c = True" + and "perm_cap_store c = False" + and "\ cv. v = Cap_v cv \ tag cv = True" + shows "store h c v = Error (C2Err PermitStoreCapViolation)" + unfolding store_def + using assms + by force + +lemma store_cap_false_perm_cap_store_local: + assumes "tag c = True" + and "perm_store c = True" + and "perm_cap_store c = True" + and "perm_cap_store_local c = False" + and "\ cv. v = Cap_v cv \ tag cv = True \ perm_global cv = False" + shows "store h c v = Error (C2Err PermitStoreLocalCapViolation)" + unfolding store_def + using assms + by force + +lemma store_bound_over: + assumes "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ > base c + len c" + shows "store h c v = Error (C2Err LengthViolation)" + unfolding store_def + using assms + by (clarsimp split: ccval.split) + +lemma store_bound_under: + assumes "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ \ base c + len c" + and "offset c < base c" + shows "store h c v = Error (C2Err LengthViolation)" + unfolding store_def + using assms + by (clarsimp split: ccval.split) + +lemma store_misaligned: + assumes "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |memval_type v|\<^sub>\ \ 0" + shows "store h c v = Error (C2Err BadAddressViolation)" + unfolding store_def + using assms + by (clarsimp split: ccval.split) + +lemma store_undef_val: + assumes "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |memval_type v|\<^sub>\ = 0" + and "v = Undef" + shows "store h c v = Error (LogicErr (Unhandled 0))" + unfolding store_def + using assms + by auto + +lemma store_nonexistant_obj: + assumes "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |memval_type v|\<^sub>\ = 0" + and "v \ Undef" + and "Mapping.lookup (heap_map h) (block_id c) = None" + shows "store h c v = Error (LogicErr MissingResource)" + unfolding store_def + using assms + by (clarsimp split: ccval.split) + +lemma store_store_after_free: + assumes "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |memval_type v|\<^sub>\ = 0" + and "v \ Undef" + and "Mapping.lookup (heap_map h) (block_id c) = Some Freed" + shows "store h c v = Error (LogicErr UseAfterFree)" + unfolding store_def + using assms + by (clarsimp split: ccval.split) + +lemma store_bound_violated_1: + assumes "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |memval_type v|\<^sub>\ = 0" + and "v \ Undef" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c < fst (bounds m)" + shows "store h c v = Error (LogicErr BufferOverrun)" + unfolding store_def using assms + by (clarsimp split: ccval.split) + +lemma store_bound_violated_2: + assumes "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |memval_type v|\<^sub>\ = 0" + and "v \ Undef" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c + |memval_type v|\<^sub>\ > snd (bounds m)" + shows "store h c v = Error (LogicErr BufferOverrun)" + unfolding store_def using assms + by (clarsimp split: ccval.split) + +lemma store_success: + assumes "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |memval_type v|\<^sub>\ = 0" + and "v \ Undef" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "offset c \ fst (bounds m)" + and "offset c + |memval_type v|\<^sub>\ \ snd (bounds m)" + shows "\ ret. store h c v = Success ret \ + next_block ret = next_block h \ + heap_map ret = Mapping.update (block_id c) (Map (store_tval m (nat (offset c)) v)) (heap_map h)" + unfolding store_def + using assms + by (clarsimp split: ccval.split) + +lemma store_cond_hard_cap: + assumes "store h c v = Success ret" + shows "tag c = True" + and "perm_store c = True" + and "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + and "offset c + |memval_type v|\<^sub>\ \ base c + len c" + and "offset c \ base c" + and "offset c mod |memval_type v|\<^sub>\ = 0" +proof - + show "tag c = True" + using assms unfolding store_def + by (meson result.simps(4)) +next + show "perm_store c = True" + using assms unfolding store_def + by (meson result.simps(4)) +next + show "\ cv. \ v = Cap_v cv; tag cv \ \ perm_cap_store c \ (perm_cap_store_local c \ perm_global cv)" + using assms unfolding store_def + by (metis (no_types, lifting) assms result.simps(4) store_cap_false_perm_cap_store store_cap_false_perm_cap_store_local) +next + show "offset c + |memval_type v|\<^sub>\ \ base c + len c" + using assms unfolding store_def + by (meson linorder_not_le result.simps(4)) +next + show "offset c \ base c" + using assms unfolding store_def + by (meson linorder_not_le result.simps(4)) +next + show "offset c mod |memval_type v|\<^sub>\ = 0" + using assms unfolding store_def + by (meson linorder_not_le result.simps(4)) +qed + +lemma store_cond_bytes_bounds: + assumes "store h c val = Success h'" + and "\ x. val \ Cap_v x" +shows "\ m. Mapping.lookup (heap_map h) (block_id c) = Some (Map m) + \ offset c \ fst (bounds m) + \ offset c + |memval_type val|\<^sub>\ \ snd (bounds m)" +using store_cond_hard_cap[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] assms + unfolding store_def + by (simp split: ccval.split_asm; simp split: option.split_asm t.split_asm) + (metis nle_le order.strict_iff_not result.distinct(1))+ + +lemma store_cond_bytes: + assumes "store h c val = Success h'" + and "\ x. val \ Cap_v x" + shows "\ m. Mapping.lookup (heap_map h') (block_id c) = Some (Map m) + \ offset c \ fst (bounds m) + \ offset c + |memval_type val|\<^sub>\ \ snd (bounds m)" + using store_cond_hard_cap[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] assms + unfolding store_def + by (simp split: ccval.split_asm; simp split: option.split_asm t.split_asm) + (auto split: if_split_asm simp add: store_tval_def) + +lemma store_cond_cap_bounds: + assumes "store h c val = Success h'" + and "val = Cap_v x" +shows "\ m. Mapping.lookup (heap_map h) (block_id c) = Some (Map m) + \ offset c \ fst (bounds m) + \ offset c + |memval_type val|\<^sub>\ \ snd (bounds m)" +proof - + have cap_perms: "\(\ perm_cap_store c \ tag x) \ \(\ perm_cap_store_local c \ tag x \ \ perm_global x)" + using store_cond_hard_cap(1)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(2)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(3)[where ?h=h and ?c=c and ?v=val and ?ret=h' and ?cv=x, OF assms(1)] + store_cond_hard_cap(4)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(5)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(6)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + assms + unfolding store_def + by blast + thus ?thesis + using store_cond_hard_cap(1)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(2)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(3)[where ?h=h and ?c=c and ?v=val and ?ret=h' and ?cv=x, OF assms(1)] + store_cond_hard_cap(4)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(5)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(6)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + assms + by (simp split: ccval.split option.split_asm t.split_asm add: store_def cap_perms) + (metis linorder_not_le result.distinct(1)) +qed + +lemma store_cond_cap: + assumes "store h c val = Success h'" + and "val = Cap_v v" + shows "\ m. Mapping.lookup (heap_map h') (block_id c) = Some (Map m) + \ offset c \ fst (bounds m) + \ offset c + |memval_type val|\<^sub>\ \ snd (bounds m)" +proof - + have cap_perms: "\(\ perm_cap_store c \ tag v) \ \(\ perm_cap_store_local c \ tag v \ \ perm_global v)" + using store_cond_hard_cap(1)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(2)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(3)[where ?h=h and ?c=c and ?v=val and ?ret=h' and ?cv=v, OF assms(1)] + store_cond_hard_cap(4)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(5)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(6)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + assms + by blast + show ?thesis + using store_cond_hard_cap(1)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(2)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(3)[where ?h=h and ?c=c and ?v=val and ?ret=h' and ?cv=v, OF assms(1)] + store_cond_hard_cap(4)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(5)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_hard_cap(6)[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + assms + by (clarsimp split: ccval.split option.split_asm t.split_asm simp add: store_def cap_perms) + (smt (verit) Mapping.lookup_update assms(1) result.distinct(1) result.sel(1) store_bound_violated_2 store_cond_hard_cap(3) store_def store_success store_tval_disjoint_bounds) +qed + +lemma store_cond: + assumes "store h c val = Success h'" + shows "\ m. Mapping.lookup (heap_map h') (block_id c) = Some (Map m) + \ offset c \ fst (bounds m) + \ offset c + |memval_type val|\<^sub>\ \ snd (bounds m)" + using store_cond_bytes[OF assms(1)] store_cond_cap[OF assms(1)] + by blast + +lemma store_bounds_preserved: + assumes "store h c v = Success h'" + and "Mapping.lookup (heap_map h) (block_id c) = Some (Map m)" + and "Mapping.lookup (heap_map h') (block_id c) = Some (Map m')" + shows "bounds m = bounds m'" + using assms store_cond_hard_cap[OF assms(1)] +proof (cases v) + case (Uint8_v x1) + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (clarsimp split: if_split_asm) (blast intro: store_tval_disjoint_bounds) +next + case (Sint8_v x2) + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (clarsimp split: if_split_asm) (blast intro: store_tval_disjoint_bounds) +next + case (Uint16_v x3) + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (clarsimp split: if_split_asm) (blast intro: store_tval_disjoint_bounds) +next + case (Sint16_v x4) + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (clarsimp split: if_split_asm) (blast intro: store_tval_disjoint_bounds) +next + case (Uint32_v x5) + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (clarsimp split: if_split_asm) (blast intro: store_tval_disjoint_bounds) +next + case (Sint32_v x6) + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (clarsimp split: if_split_asm) (blast intro: store_tval_disjoint_bounds) +next + case (Uint64_v x7) + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (clarsimp split: if_split_asm) (blast intro: store_tval_disjoint_bounds) +next + case (Sint64_v x8) + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (clarsimp split: if_split_asm) (blast intro: store_tval_disjoint_bounds) +next + case (Cap_v x9) + moreover hence "\(\ perm_cap_store c \ tag x9) \ \(\ perm_cap_store_local c \ tag x9 \ \ perm_global x9)" + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by blast + ultimately show ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (simp split: if_split_asm add: store_tval_def; auto) +next + case (Cap_v_frag x101 x102) + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by (clarsimp split: if_split_asm) (blast intro: store_tval_disjoint_bounds) +next + case Undef + thus ?thesis + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def + by force +qed + +lemma store_cond_cap_frag: + assumes "store h c val = Success h'" + and "val = Cap_v_frag v n" + shows "\ m. Mapping.lookup (heap_map h') (block_id c) = Some (Map m)" + using store_cond_hard_cap[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] assms + unfolding store_def + by (simp split: ccval.split_asm; simp split: option.split_asm t.split_asm) + (metis Mapping.lookup_update heap.select_convs(2) heap.surjective heap.update_convs(2) result.distinct(1) result.sel(1)) + +lemma store_undef_false: + assumes "store h c Undef = Success ret" + shows "False" + using store_cond_hard_cap[where ?h=h and ?c=c and ?v="Undef" and ?ret=ret, OF assms] assms + unfolding store_def + by simp + +lemma load_after_alloc_size_fail: + assumes "alloc h c s = Success (h', cap)" + and "|t|\<^sub>\ > s" + shows "load h' cap t = Error (C2Err LengthViolation)" +proof - + have "tag cap = True" + using assms alloc_def + by auto + moreover have "perm_load cap = True" + using assms alloc_def + by force + moreover have "base cap = 0" + using assms alloc_def + by fastforce + moreover have "len cap = s" + using assms alloc_def + by auto + ultimately show ?thesis + using assms load_def by auto +qed + + +subsubsection \Good Variable Laws\ +text \The properties defined above are intermediate results. Properties that govern the + correctness while executing are the \textit{good variable} laws. The most important ones are: + \begin{itemize} + \item load after alloc + \item load after free + \item load after store + \end{itemize} + The \textit{load after store} case requires particular attention. For disjoint cases within + the same block (refer to load\_after\_store\_disjoint\_2 and \\ load\_after\_store\_disjoint\_3), + extra attention must be paid to the tagged memory, where the updated tag may occur at a location + specified \textit{before} whatever was given by the capability offset. This is why + the lemma retrieve\_stored\_tval\_disjoint\_2 requires an additional constraint where + capability values are aligned. Of course, this is not a problem for + load\_after\_store\_disjoint\_3 since the capability conditions state that offsets must be aligned. + + For the compatible case, as stated in the paper~\cite{park_2022}, extra care has to be put in the cases where + we load capabilities and capability fragments. For this, we have three cases: + \begin{itemize} + \item load\_after\_store\_prim + \item load\_after\_store\_cap + \item load\_after\_store\_cap\_frag + \end{itemize} + The load\_after\_store\_prim case returns the exact value that was stored. + The load\_after\_store\_cap case returns the stored capability with the tag bit dependent on + the permissions of the capability provided to load. Finally, the load\_after\_store\_cap\_frag + case returns the capability fragment with the tag bit falsifed. + + Finally, we note that there are slight differences to the CompCert version of the Good Variable + Law due to the differences in the type and value system. Thus, there are cases in the + CompCert version that are trivial in our case.\ + +theorem load_after_alloc_1: + assumes "alloc h c s = Success (h', cap)" + and "|t|\<^sub>\ \ s" + shows "load h' cap t = Success Undef" +proof - + let ?m = "\bounds = (0, s), content = Mapping.empty, tags = Mapping.empty\" + have "tag cap = True" + using assms(1) alloc_def + by fastforce + moreover have "perm_load cap = True" + using assms(1) alloc_def + by fastforce + moreover have "offset cap + |t|\<^sub>\ \ base cap + len cap" + using assms alloc_def + by fastforce + moreover have "offset cap \ base cap" + using assms alloc_def + by fastforce + moreover have "offset cap mod |t|\<^sub>\ = 0" + using assms alloc_def + by fastforce + moreover have "Mapping.lookup (heap_map h') (block_id cap) = Some (Map ?m)" + using assms alloc_def + by fastforce + moreover have "offset cap \ fst (bounds ?m)" + using assms alloc_def + by fastforce + moreover have "offset cap + |t|\<^sub>\ \ snd (bounds ?m)" + using assms alloc_def + by fastforce + moreover have "\ is_contiguous_bytes (content ?m) (nat (offset cap)) |t|\<^sub>\" + proof - + have "\ n. |t|\<^sub>\ = Suc n" + using not0_implies_Suc sizeof_nonzero + by force + thus ?thesis + using assms alloc_def + by fastforce + qed + moreover have "\ is_cap (content ?m) (nat (offset cap))" + by simp + ultimately show ?thesis + using load_not_cap_in_mem + by presburger +qed + +theorem load_after_alloc_2: + assumes "alloc h c s = Success (h', cap)" + and "|t|\<^sub>\ \ s" + and "block_id cap \ block_id cap'" + shows "load h' cap' t = load h cap' t" + using assms unfolding alloc_def load_def + by force + +theorem load_after_free_1: + assumes "free h c = Success (h', cap)" + shows "load h cap t = Error (C2Err TagViolation)" +proof - + consider (null) "c = NULL" | (non_null) "c \ NULL" by blast + then show ?thesis + proof (cases) + case null + moreover hence "c = cap" + using assms free_null + by force + ultimately show ?thesis + using load_null_error assms + by blast + next + case non_null + hence "cap = c \ tag := False \" + using assms free_cond(6)[where ?h=h and ?c=c and ?h'=h' and ?cap=cap] + by presburger + moreover hence "tag cap = False" + using assms + by force + ultimately show ?thesis using load_false_tag + by blast + qed +qed + +theorem load_after_free_2: + assumes "free h c = Success (h', cap)" + and "block_id cap \ block_id cap'" + shows "load h cap' t = load h' cap' t" + using assms free_cond[OF assms(1)] + unfolding free_def load_def + by fastforce + +theorem load_after_store_disjoint_1: + assumes "store h c val = Success h'" + and "block_id c \ block_id c'" + shows "load h c' t = load h' c' t" + using assms store_cond_hard_cap[OF assms(1)] + unfolding store_def load_def + by (clarsimp split: ccval.split_asm option.split_asm t.split_asm if_split_asm) + +theorem load_after_store_disjoint_2: + assumes "store h c v = Success h'" + and "offset c' + |t|\<^sub>\ \ offset c" + shows "load h' c' t = load h c' t" + using assms store_cond[OF assms(1)] store_cond_hard_cap[OF assms(1)] + apply (clarsimp simp add: store_def load_def split: if_split_asm option.split t.split option.split_asm t.split_asm) + apply (intro conjI impI) + apply (smt (z3) lookup_update' option.discI) + apply (rule allI, rule conjI, rule impI) + apply (simp add: lookup_update') + apply (rule allI, rule conjI, rule impI) + apply (smt (z3) Mapping.lookup_update Mapping.lookup_update_neq option.distinct(1) option.inject store_tval_disjoint_bounds t.distinct(1) t.inject) + apply (rule conjI, rule impI) + apply (smt (z3) Mapping.lookup_update Mapping.lookup_update_neq option.distinct(1) option.sel store_tval_disjoint_bounds t.distinct(1) t.sel) + apply (intro impI conjI allI) + apply (metis (no_types, lifting) Mapping.lookup_update_neq option.distinct(1)) + apply (metis (no_types, lifting) lookup_update' option.inject t.discI) + apply (smt (z3) Mapping.lookup_update Mapping.lookup_update_neq option.inject store_tval_disjoint_bounds t.inject) + apply (metis (no_types, lifting) lookup_update' option.sel store_tval_disjoint_bounds t.sel) + using retrieve_stored_tval_disjoint_1 + apply (smt (z3) int_nat_eq lookup_update' of_nat_0_le_iff of_nat_add of_nat_le_iff option.sel t.sel) + done + +theorem load_after_store_disjoint_3: + assumes "store h c v = Success h'" + and "offset c + |memval_type v|\<^sub>\ \ offset c'" + shows "load h' c' t = load h c' t" + using assms store_cond[OF assms(1)] store_cond_hard_cap[OF assms(1)] + apply (clarsimp simp add: store_def load_def split: if_split_asm option.split t.split option.split_asm t.split_asm) + apply (rule conjI) + apply (smt (z3) lookup_update' option.discI) + apply (rule allI, rule conjI) + apply (simp add: lookup_update') + apply (rule allI, rule conjI) + apply (smt (z3) Mapping.lookup_update Mapping.lookup_update_neq option.distinct(1) option.inject store_tval_disjoint_bounds t.distinct(1) t.inject) + apply (rule conjI) + apply (smt (z3) Mapping.lookup_update Mapping.lookup_update_neq option.distinct(1) option.sel store_tval_disjoint_bounds t.distinct(1) t.sel) + apply (intro impI conjI allI) + apply (metis (no_types, lifting) Mapping.lookup_update_neq option.distinct(1)) + apply (metis (no_types, lifting) lookup_update' option.inject t.discI) + apply (smt (z3) Mapping.lookup_update Mapping.lookup_update_neq option.inject store_tval_disjoint_bounds t.inject) + apply (metis (no_types, lifting) lookup_update' option.sel store_tval_disjoint_bounds t.sel) + using retrieve_stored_tval_disjoint_2 + apply (smt (z3) int_nat_eq lookup_update' memval_type.simps nat_int nat_mod_distrib of_nat_add of_nat_le_0_iff of_nat_le_iff option.sel t.sel) + done + +theorem load_after_store_prim: + assumes "store h c val = Success h'" + and "\ v. val \ Cap_v v" + and "\ v n. val \ Cap_v_frag v n" + and "perm_load c = True" + shows "load h' c (memval_type val) = Success val" + using assms(1) store_cond_hard_cap[where ?h=h and ?c=c and ?v=val and ?ret=h', OF assms(1)] + store_cond_bytes[OF assms(1) assms(2)] retrieve_stored_tval_any_perm[OF _ _ assms(3)] + by (clarsimp simp add: store_def load_def split: if_split_asm option.split_asm t.split_asm ccval.split) + (safe; clarsimp simp add: assms) + +theorem load_after_store_cap: + assumes "store h c (Cap_v v) = Success h'" + and "perm_load c = True" + shows "load h' c (memval_type (Cap_v v)) = Success (Cap_v (v \ tag := case perm_cap_load c of False => False | True => tag v \))" + using store_cond_hard_cap(1)[where ?h=h and ?c=c and ?v="Cap_v v" and ?ret=h', OF assms(1)] + store_cond_hard_cap(2)[where ?h=h and ?c=c and ?v="Cap_v v" and ?ret=h', OF assms(1)] + store_cond_hard_cap(3)[where ?h=h and ?c=c and ?v="Cap_v v" and ?ret=h' and ?cv=v, OF assms(1) refl] + store_cond_hard_cap(4)[where ?h=h and ?c=c and ?v="Cap_v v" and ?ret=h', OF assms(1)] + store_cond_hard_cap(5)[where ?h=h and ?c=c and ?v="Cap_v v" and ?ret=h', OF assms(1)] + store_cond_hard_cap(6)[where ?h=h and ?c=c and ?v="Cap_v v" and ?ret=h', OF assms(1)] + assms + retrieve_stored_tval_cap[where ?val="Cap_v v" and ?v=v, OF refl] + retrieve_stored_tval_cap_no_perm_cap_load[where ?val="Cap_v v" and ?v=v, OF refl] + apply (clarsimp, simp split: ccval.split; safe; clarsimp) + apply (simp only: load_def; clarsimp split: option.split) + apply (simp add: sizeof_def split: t.split, safe) + subgoal + by (blast dest: store_cond_cap) + subgoal + by (metis option.sel store_cond_cap t.distinct(1)) + subgoal + apply (simp add: sizeof_def store_def) + apply (subgoal_tac "\(\ perm_cap_store c \ tag v) \ \(\ perm_cap_store_local c \ tag v \ \ perm_global v)"; presburger?) + apply (clarsimp split: if_split_asm) + apply (simp split: option.split_asm t.split_asm if_split_asm) + apply clarsimp + apply (smt (verit, best) \offset c + int |memval_type (Cap_v v)|\<^sub>\ \ int (base c + len c)\ \offset c mod int |memval_type (Cap_v v)|\<^sub>\ = 0\ assms(1) ccval.distinct(107) lookup_update' option.sel result.inject(1) result.simps(4) store_bound_violated_2 store_cond_cap store_cond_hard_cap(3) store_success t.sel) + done + subgoal + apply (simp add: sizeof_def store_def) + apply (subgoal_tac "\(\ perm_cap_store c \ tag v) \ \(\ perm_cap_store_local c \ tag v \ \ perm_global v)"; presburger?) + apply (clarsimp split: if_split_asm) + apply (simp split: option.split_asm t.split_asm if_split_asm) + apply (clarsimp simp add: sizeof_def) + apply (smt (verit, ccfv_SIG) Mapping.lookup_update assms(1) heap.select_convs(2) heap.surjective heap.update_convs(2) store_bounds_preserved) + done + subgoal + apply (simp add: store_def) + apply (subgoal_tac "\(\ perm_cap_store c \ tag v) \ \(\ perm_cap_store_local c \ tag v \ \ perm_global v)"; presburger?) + apply (clarsimp split: if_split_asm option.split_asm t.split_asm) + apply (cases "perm_cap_load c"; clarsimp) + done + done + +theorem load_after_store_cap_frag: + assumes "store h c (Cap_v_frag c' n) = Success h'" + and "perm_load c" + shows "load h' c (memval_type (Cap_v_frag c' n)) = Success (Cap_v_frag (c' \ tag := False \) n)" + using assms(1) store_cond_hard_cap[where ?h=h and ?c=c and ?v="Cap_v_frag c' n" and ?ret=h', OF assms(1)] + retrieve_stored_tval_cap_frag[where ?val="Cap_v_frag c' n" and ?c=c' and ?n=n and ?off="nat (offset c)" and ?b="(perm_cap_load c)", OF refl, simplified] + unfolding store_def + apply (simp split: option.split_asm t.split_asm option.split t.split add: load_def assms(2), safe; simp split: if_split_asm) + using assms(1) store_cond_cap_frag apply blast + apply (metis assms(1) option.sel store_cond_cap_frag t.distinct(1)) + apply (metis assms(1) store_bounds_preserved) + apply (metis assms(1) store_bounds_preserved) + apply (metis Mapping.lookup_update heap.select_convs(2) heap.surjective heap.update_convs(2) option.sel t.sel) + done + +subsubsection \Miscellaneous Laws\ + +lemma free_after_alloc: + assumes "alloc h c s = Success (h', cap)" + shows "\! ret. free h' cap = Success ret" + using alloc_def assms free_non_null_correct alloc_no_null_ret + by force + +lemma store_after_alloc: + assumes "alloc h True s = Success (h', cap)" + and "|memval_type v|\<^sub>\ \ s" + and "v \ Undef" + shows "\ h''. store h' cap v = Success h''" + proof - + let ?m = "\bounds = (0, s), content = Mapping.empty, tags = Mapping.empty\" + have "tag cap = True" + using assms(1) alloc_def + by fastforce + moreover have "perm_store cap = True" + using assms(1) alloc_def + by fastforce + moreover have "\cv. \v = Cap_v cv; tag cv\ \ perm_cap_store cap \ (perm_cap_store_local cap \ perm_global cv)" + proof - + have "\ (case v of Cap_v cv \ \ perm_cap_store cap \ tag cv | _ \ False)" + using assms unfolding alloc_def + by (simp split: ccval.split, force) + moreover have "\ (case v of Cap_v cv \ \ perm_cap_store_local cap \ tag cv \ \ perm_global cv | _ \ False)" + using assms unfolding alloc_def + by (simp split: ccval.split, force) + ultimately show "\cv. \v = Cap_v cv; tag cv\ \ perm_cap_store cap \ (perm_cap_store_local cap \ perm_global cv)" + by force + qed + moreover have "offset cap + |memval_type v|\<^sub>\ \ base cap + len cap" + using assms alloc_def + by fastforce + moreover have "offset cap \ base cap" + using assms alloc_def + by fastforce + moreover have "offset cap mod |memval_type v|\<^sub>\ = 0" + using assms alloc_def + by fastforce + moreover have "Mapping.lookup (heap_map h') (block_id cap) = Some (Map ?m)" + using assms alloc_def + by fastforce + moreover have "offset cap \ fst (bounds ?m)" + using assms alloc_def + by fastforce + moreover have "offset cap + |memval_type v|\<^sub>\ \ snd (bounds ?m)" + using assms alloc_def + by fastforce + ultimately show ?thesis + using store_success[where ?c=cap and ?v=v and ?m="\bounds = (0, s), content = Mapping.empty, tags = Mapping.empty\" and ?h=h'] assms(3) + by simp (blast) +qed + +lemma store_after_alloc_gen: + assumes "alloc h True s = Success (h', cap)" + and "|memval_type v|\<^sub>\ \ s" + and "v \ Undef" + and "n mod |memval_type v|\<^sub>\ = 0" + and "offset cap + n + |memval_type v|\<^sub>\ \ base cap + len cap" + shows "\ h''. store h' (cap \ offset := offset cap + n \) v = Success h''" + proof - + let ?m = "\bounds = (0, s), content = Mapping.empty, tags = Mapping.empty\" + have "tag cap = True" + using assms(1) alloc_def + by fastforce + moreover have "perm_store (cap \ offset := offset cap + n \) = True" + using assms(1) alloc_def + by fastforce + moreover have "\cv. \v = Cap_v cv; tag cv\ \ perm_cap_store (cap \ offset := offset cap + n \) \ (perm_cap_store_local (cap \ offset := offset cap + n \) \ perm_global cv)" + proof - + have "\ (case v of Cap_v cv \ \ perm_cap_store (cap \ offset := offset cap + n \) \ tag cv | _ \ False)" + using assms unfolding alloc_def + by (simp split: ccval.split, force) + moreover have "\ (case v of Cap_v cv \ \ perm_cap_store_local (cap \ offset := offset cap + n \) \ tag cv \ \ perm_global cv | _ \ False)" + using assms unfolding alloc_def + by (simp split: ccval.split, force) + ultimately show "\cv. \v = Cap_v cv; tag cv\ \ perm_cap_store (cap \ offset := offset cap + n \) \ (perm_cap_store_local (cap \ offset := offset cap + n \) \ perm_global cv)" + by force + qed + moreover have "offset (cap \ offset := offset cap + n \) + |memval_type v|\<^sub>\ \ base (cap \ offset := offset cap + n \) + len (cap \ offset := offset cap + n \)" + using assms alloc_def + by fastforce + moreover have "offset (cap \ offset := offset cap + n \) \ base (cap \ offset := offset cap + n \)" + using assms alloc_def + by fastforce + moreover have "offset (cap \ offset := offset cap + n \) mod |memval_type v|\<^sub>\ = 0" + using assms alloc_def + by fastforce + moreover have "Mapping.lookup (heap_map h') (block_id (cap \ offset := offset cap + n \)) = Some (Map ?m)" + using assms alloc_def + by fastforce + moreover have "offset (cap \ offset := offset cap + n \) \ fst (bounds ?m)" + using assms alloc_def + by fastforce + moreover have "offset (cap \ offset := offset cap + n \) + |memval_type v|\<^sub>\ \ snd (bounds ?m)" + using assms alloc_def + by fastforce + ultimately show ?thesis + using store_success[where ?c="(cap \ offset := offset cap + n \)" and ?v=v and ?m="\bounds = (0, s), content = Mapping.empty, tags = Mapping.empty\" and ?h=h'] assms(3) + by simp (blast) +qed + +subsection \Well-formedness of actions\ +lemma alloc_wellformed: + assumes "\\<^sub>\(heap_map h)" + and "alloc h True s = Success (h', cap)" + shows "\\<^sub>\(heap_map h')" + using assms + by (simp add: alloc_def wellformed_def, safe, erule_tac x=b in allE, erule_tac x=obj in allE, simp) + (smt (verit, best) Mapping.keys_empty Mapping.lookup_update Mapping.lookup_update_neq Set.filter_def bot_nat_0.not_eq_extremum empty_iff mem_Collect_eq object.select_convs(3) option.sel t.sel zero_less_diff) + +lemma free_wellformed: + assumes "\\<^sub>\(heap_map h)" + and "free h cap = Success (h', cap')" + shows "\\<^sub>\(heap_map h')" +proof - + consider (null) "cap = NULL" | (non_null) "cap \ NULL" by blast + then show ?thesis + proof (cases) + case null + show ?thesis + using free_null assms null + by simp + next + case non_null + show ?thesis + by (smt (z3) Mapping.lookup_update_neq assms(1) assms(2) free_cond_non_null(3) free_cond_non_null(4) free_cond_non_null(5) free_def free_non_null_correct fst_conv heap.select_convs(2) heap.surjective heap.update_convs(2) option.sel result.sel(1) t.discI wellformed_def) + qed +qed + +lemma load_wellformed: + assumes "\\<^sub>\(heap_map h)" + and "load h c t = Success v" + shows "\\<^sub>\(heap_map h)" + using assms(1) + by assumption + +lemma store_wellformed: + assumes "\\<^sub>\(heap_map h)" + and "store h c v = Success h'" + shows "\\<^sub>\(heap_map h')" + using store_cond_hard_cap(1)[where ?h=h and ?c=c and ?v=v and ?ret=h', OF assms(2)] + store_cond_hard_cap(2)[where ?h=h and ?c=c and ?v=v and ?ret=h', OF assms(2)] + store_cond_hard_cap(4)[where ?h=h and ?c=c and ?v=v and ?ret=h', OF assms(2)] + store_cond_hard_cap(5)[where ?h=h and ?c=c and ?v=v and ?ret=h', OF assms(2)] + store_cond_hard_cap(6)[where ?h=h and ?c=c and ?v=v and ?ret=h', OF assms(2)] + assms + apply (simp add: store_def wellformed_def split: option.split_asm t.split_asm if_split_asm del: memval_type.simps) + apply safe + apply (clarsimp simp del: memval_type.simps) + apply (erule_tac x="block_id c" in allE) + apply (rename_tac map nth block obj x) + apply (erule_tac x=map in allE) + apply (clarsimp simp del: memval_type.simps) + apply (subgoal_tac "Set.filter (\x. 0 < x mod |Cap|\<^sub>\) (Mapping.keys (tags (store_tval map (nat (int |memval_type v|\<^sub>\ * nth)) v))) = {}") + apply (smt (verit, best) Mapping.lookup_update Mapping.lookup_update_neq Set.member_filter assms(1) emptyE option.sel rel_simps(70) t.sel wellformed_def) + apply (simp add: store_tval_def del: memval_type.simps split: ccval.split_asm; fastforce) + done (* WARNING: The last line takes a VERY LONG TIME to process, at least 30s *) + +subsection \memcpy formalisation\ +text \We also formalise memcpy in Isabelle/HOL. While other higher level operations are defined + in the GIL level, we formalise memcpy here and prove basic properties. + memcpy works as follows: we define a mutually recursive function memcpy\_prim and memcpy\_cap. + memcpy\_prim attempts byte copies, where tags are invalidated, and memcpy\_cap attempts + capability copies. memcpy initially calls memcpy\_cap. If either load or store fails, perhaps + due to misalignment or other issues, memcpy\_prim will be called instead. If memcpy\_prim + also fails from load or store, the operation will fail.\ +function memcpy_prim :: "heap \ cap \ cap \ nat \ heap result" + and memcpy_cap :: "heap \ cap \ cap \ nat \ heap result" + where + "memcpy_prim h _ _ 0 = Success h" +| "memcpy_cap h _ _ 0 = Success h" +| "memcpy_prim h dst src (Suc n) = + (let x = load h src Uint8 in + if \ is_Success x then Error (err x) else + let xs = res x in + if xs = Undef then Error (LogicErr (Unhandled 0)) else + let y = store h dst xs in + if \ is_Success y then Error (err y) else + let ys = res y in + memcpy_cap ys (dst \ offset := (offset dst + 1) \) (src \ offset := (offset src) + 1\) n)" +| "memcpy_cap h dst src (Suc n) = + (if (Suc n) < |Cap|\<^sub>\ then memcpy_prim h dst src (Suc n) + else + let x = load h src Cap in + if \ is_Success x then memcpy_prim h dst src (Suc n) else + let xs = res x in + if xs = Undef then memcpy_prim h dst src (Suc n) else + let y = store h dst xs in + if \ is_Success y then memcpy_prim h dst src (Suc n) else + let ys = res y in + memcpy_cap ys (dst \ offset := (offset dst + |Cap|\<^sub>\)\) (src \ offset := (offset src + |Cap|\<^sub>\)\) (Suc n - |Cap|\<^sub>\))" + by (blast | metis old.nat.exhaust prod_cases3 sumE)+ + +text \We prove that the mutually recursive function terminates.\ +context + notes sizeof_def[simp] +begin +termination by size_change +end + +text \This is the definition of memcpy. We also check that src and dst do not overlap. \ +definition memcpy :: "heap \ cap \ cap \ nat \ heap result" + where + "memcpy h dst src n \ + if n = 0 then + Success h + else if block_id dst = block_id src \ + ((offset src \ offset dst \ offset src < offset dst + n) \ + (offset dst \ offset src \ offset dst < offset src + n)) then + Error (LogicErr (Unhandled 0)) + else memcpy_cap h dst src n" + +lemma memcpy_rec_wellformed: + assumes "\\<^sub>\(heap_map h)" + shows "memcpy_prim h dst src n = Success h' \ \\<^sub>\(heap_map h')" + and "memcpy_cap h dst src n = Success h' \ \\<^sub>\(heap_map h')" + using assms +proof(induct h dst src n and h dst src n rule: memcpy_prim_memcpy_cap.induct) + case (1 h uu uv) + then show ?case + by force +next + case (2 h uw ux) + then show ?case + by force +next + case (3 h dst src n) + then show ?case + by clarsimp (smt (verit) result.disc(1) result.distinct(1) result.expand result.sel(1) store_wellformed) +next + case (4 h dst src n) + then show ?case + by clarsimp (smt (verit, best) "4.hyps"(1) "4.hyps"(2) "4.hyps"(3) "4.hyps"(4) result.collapse(1) store_wellformed) +qed + +text \We also prove that memcpy preserves well-formedness.\ +lemma memcpy_wellformed: + assumes "\\<^sub>\(heap_map h)" + and "memcpy h dst src n = Success h'" + shows "\\<^sub>\(heap_map h')" + using assms unfolding memcpy_def + by (metis memcpy_rec_wellformed(2) result.distinct(1) result.sel(1)) + +lemma memcpy_cond: + assumes "memcpy h dst src n = Success h'" + shows "n > 0 \ \ (block_id dst = block_id src \ + ((offset src \ offset dst \ offset src < offset dst + n) \ + (offset dst \ offset src \ offset dst < offset src + n)))" + using assms unfolding memcpy_def + by force + +section \Miscellaneous Definitions\ +text \The following are used for catching memory leaks in Gillian.\ +definition get_block_size :: "heap \ block \ nat option" + where + "get_block_size h b \ + let ex = Mapping.lookup (heap_map h) b in + (case ex of None \ None | Some m \ + (case m of Freed \ None | _ \ Some (snd (bounds (the_map m)))))" + +primrec get_memory_leak_size :: "heap \ nat \ nat" + where + "get_memory_leak_size _ 0 = 0" +| "get_memory_leak_size h (Suc n) = get_memory_leak_size h n + + (case get_block_size h (integer_of_nat (Suc n)) of + None \ 0 + | Some n \ n)" + +primrec get_unfreed_blocks :: "heap \ nat \ block list" + where + "get_unfreed_blocks _ 0 = []" +| "get_unfreed_blocks h (Suc n) = + (let ex = Mapping.lookup (heap_map h) (integer_of_nat (Suc n)) in + (case ex of None \ get_unfreed_blocks h n | Some m \ + (case m of Freed \ get_unfreed_blocks h n | _ \ integer_of_nat (Suc n) # get_unfreed_blocks h n)))" + +end diff --git a/thys/CHERI-C_Memory_Model/CHERI_C_Global_Environment.thy b/thys/CHERI-C_Memory_Model/CHERI_C_Global_Environment.thy new file mode 100644 --- /dev/null +++ b/thys/CHERI-C_Memory_Model/CHERI_C_Global_Environment.thy @@ -0,0 +1,80 @@ +theory CHERI_C_Global_Environment + imports CHERI_C_Concrete_Memory_Model +begin + +text \Here, we define the global environment. The Global Environment does the following: + \begin{enumerate} + \item Creates a mapping from variables to locations (or rather, the capabilities) + \item Sets global variables by invoking alloc. These variables cannot be freed by design + \end{enumerate}\ + +type_synonym genv = "(String.literal, cap) mapping" + +definition alloc_glob_var :: "heap \ bool \ nat \ (heap \ cap) result" + where + "alloc_glob_var h c s \ + let h' = alloc h c s in + Success (fst (res h'), snd (res h') \ perm_global := True \)" + +definition set_glob_var :: "heap \ bool \ nat \ String.literal \ genv \ (heap \ cap \ genv) result" + where + "set_glob_var h c s v g \ + let (h', cap) = res (alloc_glob_var h c s) in + let g' = Mapping.update v cap g in + Success (h', cap, g')" + +lemma set_glob_var_glob_bit: + assumes "alloc_glob_var h c s = Success (h', cap)" + shows "perm_global cap" + using assms + unfolding alloc_glob_var_def alloc_def + by fastforce + +lemma set_glob_var_glob_bit_lift: + assumes "set_glob_var h c s v g = Success (h', cap, g')" + shows "perm_global cap" + using assms + unfolding alloc_glob_var_def set_glob_var_def alloc_def + by fastforce + +(* It is worth noting that other operations such as load and store still work. *) +(* However, free should only work on values returned by m?alloc. *) +lemma free_fails_on_glob_var: + assumes "alloc_glob_var h c s = Success (h', cap)" + shows "free h' cap = Error (LogicErr (Unhandled 0))" + by (metis alloc_updated_heap_and_cap assms capability.select_convs(1) free_global_cap + mem_capability.select_convs(10) mem_capability.simps(21) null_capability_def result.sel(1) + alloc_glob_var_def snd_conv zero_mem_capability_ext_def) + +lemma free_fails_on_glob_lift: + assumes "set_glob_var h c s v g = Success (h', cap, g')" + shows "free h' cap = Error (LogicErr (Unhandled 0))" +proof - + have res: "alloc_glob_var h c s = Success (h', cap)" + using assms + unfolding set_glob_var_def alloc_glob_var_def alloc_def + by fastforce + show ?thesis using free_fails_on_glob_var[OF res] + by blast +qed + +section \Code Generation\ +text \Here we generate an OCaml instance of the memory model that will be used for Gillian.\ +export_code + null_capability init_heap next_block get_memory_leak_size get_unfreed_blocks (* Utility Functions *) + alloc free load store (* Memory Actions *) + memcpy + set_glob_var (* Global Environment Actions *) + word8_of_integer word16_of_integer word32_of_integer word64_of_integer (* Value Conversions *) + integer_of_word8 integer_of_word16 integer_of_word32 integer_of_word64 + sword8_of_integer sword16_of_integer sword32_of_integer sword64_of_integer + integer_of_sword8 integer_of_sword16 integer_of_sword32 integer_of_sword64 + integer_of_nat cast_val + C2Err LogicErr (* Error Types *) + TagViolation PermitLoadViolation PermitStoreViolation PermitStoreCapViolation + PermitStoreLocalCapViolation LengthViolation BadAddressViolation + UseAfterFree BufferOverrun MissingResource WrongMemVal MemoryNotFreed Unhandled + in OCaml + file_prefix CHERI_C_Memory_Model + +end \ No newline at end of file diff --git a/thys/CHERI-C_Memory_Model/Preliminary_Library.thy b/thys/CHERI-C_Memory_Model/Preliminary_Library.thy new file mode 100644 --- /dev/null +++ b/thys/CHERI-C_Memory_Model/Preliminary_Library.thy @@ -0,0 +1,652 @@ +theory Preliminary_Library + imports Main "HOL-Library.Word" "Word_Lib.Word_Lib_Sumo" "HOL-Library.Countable" +begin + +section \Preliminary Theories\ +text \In this subsection, we provide the type and value system used by the CHERI-C Memory Model. + We also provide proofs for the conversion between large words (i.e. bits) and a list of bytes. + For primitive bytes that are not $U8_\tau$ or $S8_\tau$, we need to be able to convert between + their normal representation and list of bytes so that storing values work as intended. + The high-level detail is given in the paper~\cite{park_2022}.\ +subsection \Types and Values\ +text \We first formalise the capability type. We first define \textit{memory capabilities} as a record, + then we define \textit{tagged capabilities} by extending the record. We state the class + comp\_countable for future work, but countable is sufficient for the block\_id type. + For the permissions, we present only those used by the memory model.\ +class comp_countable = countable + zero + ord + +record ('a :: comp_countable) mem_capability = + block_id :: "'a" (* block_id *) + offset :: int (* offset/address *) + base :: nat (* metadata *) + len :: nat + perm_load :: bool + perm_cap_load :: bool + perm_store :: bool + perm_cap_store :: bool + perm_cap_store_local :: bool + perm_global :: bool + +record ('a :: comp_countable) capability = "'a mem_capability" + + tag :: bool (* tag *) + +text \cctype corresponds to $\tau$ in the paper~\cite{park_2022}, where $\tau$ is the type system.\ +datatype cctype = + Uint8 + | Sint8 + | Uint16 + | Sint16 + | Uint32 + | Sint32 + | Uint64 + | Sint64 + | Cap + +text \$'a\ ccval$ corresponds to $\mathcal{V}_\mathcal{C}$ in the paper~\cite{park_2022}. $'a$ in + this instance must be countable. \ +datatype 'a ccval = + Uint8_v "8 word" + | Sint8_v "8 sword" + | Uint16_v "16 word" + | Sint16_v "16 sword" + | Uint32_v "32 word" + | Sint32_v "32 sword" + | Uint64_v "64 word" + | Sint64_v "64 sword" + | Cap_v "'a capability" + | Cap_v_frag "'a capability" "nat" + | Undef + +text \memval\_type infers the type of a value. \ +fun memval_type :: "'a ccval \ cctype" + where + "memval_type v = (case v of + Uint8_v _ \ Uint8 + | Sint8_v _ \ Sint8 + | Uint16_v _ \ Uint16 + | Sint16_v _ \ Sint16 + | Uint32_v _ \ Uint32 + | Sint32_v _ \ Sint32 + | Uint64_v _ \ Uint64 + | Sint64_v _ \ Sint64 + | Cap_v _ \ Cap + | Cap_v_frag _ _ \ Uint8)" + + +subsection \Primitive Value Conversion and Cast Proof\ +text \In this subsection, we provide proofs for the conversion between words and list of words. + We also provide proofs that casting primitive values is correct. These will be used by the + \texttt{load} and \texttt{store} operations in the memory model.\ +abbreviation encode_u8 :: "nat \ 8 word" + where + "encode_u8 x \ word_of_nat x" + +abbreviation decode_u8 :: "8 word \ nat" + where + "decode_u8 b \ unat b" + +abbreviation encode_u8_list :: "8 word \ 8 word list" + where + "encode_u8_list w \ [w]" + +abbreviation decode_u8_list :: "8 word list \ 8 word" + where + "decode_u8_list ls \ hd ls" + +lemma encode_decode_u8_list: + "ls = [b] \ ls = encode_u8_list (decode_u8_list ls)" + by simp + +lemma decode_encode_u8_list: + "w = decode_u8_list (encode_u8_list w)" + by simp + +lemma encode_decode_u8: + "w = encode_u8 (decode_u8 w)" + by simp + +lemma decode_encode_u8: + assumes "i \ 2 ^ LENGTH(8) - 1" + shows "i = decode_u8 (encode_u8 i)" + by (metis assms le_unat_uoi unat_minus_one_word) + +abbreviation u64_split :: "64 word \ 32 word list" + where + "u64_split x \ (word_rsplit :: 64 word \ 32 word list) x" + +abbreviation u32_split :: "32 word \ 16 word list" + where + "u32_split x \ (word_rsplit :: 32 word \ 16 word list) x" + +abbreviation u16_split :: "16 word \ 8 word list" + where + "u16_split x \ (word_rsplit :: 16 word \ 8 word list) x" + +abbreviation cat_u16 :: "8 word list \ 16 word" + where + "cat_u16 x \ (word_rcat :: 8 word list \ 16 word) x" + +abbreviation encode_u16 :: "nat \ 8 word list" + where + "encode_u16 x \ u16_split (word_of_nat x)" + +abbreviation decode_u16 :: "8 word list \ nat" + where + "decode_u16 x \ unat (cat_u16 x)" + +lemma flatten_u16_length: + "length (u16_split vs) = 2" + by (simp add: length_word_rsplit_even_size wsst_TYs(3)) + +lemma rsplit_rcat_eq: + assumes "LENGTH(('b::len)) mod LENGTH(('a::len)) = 0" + and "length w = LENGTH('b) div LENGTH('a)" + shows "(word_rsplit :: 'b word \ 'a word list) ((word_rcat :: 'a word list \ 'b word) w) = w" + by (simp add: assms mod_0_imp_dvd size_word.rep_eq word_rsplit_rcat_size) + +lemma rsplit_rcat_u16_eq: + assumes "w = [a1, a2]" + shows "(word_rsplit :: 16 word \ 8 word list) ((word_rcat :: 8 word list \ 16 word) w) = w" +proof - + have l1: "length w * 8 = 16" + using assms by clarsimp + moreover have l2: "size ((word_rcat :: 8 word list \ 16 word) w) = 16" + using assms + by (simp add: size_word.rep_eq) + from l1 l2 have "length w * 8 = size ((word_rcat :: 8 word list \ 16 word) w)" + by argo + thus ?thesis + by (metis l1 l2 len8 word_rsplit_rcat_size) +qed + + +lemma encode_decode_u16: + assumes "w = [a, b]" + shows "w = encode_u16 (decode_u16 w)" + by (simp add: assms rsplit_rcat_eq) + +lemma cat_flatten_u16_eq: + "cat_u16 (u16_split w) = w" + by (simp add: word_rcat_rsplit) + +lemma decode_encode_u16: + assumes "i \ 2 ^ LENGTH(16) - 1" + shows "i = decode_u16 (encode_u16 i)" + by (metis assms cat_flatten_u16_eq le_unat_uoi unat_minus_one_word) + +abbreviation flatten_u32 :: "32 word \ 8 word list" + where + "flatten_u32 x \ (word_rsplit :: 32 word \ 8 word list) x" + +abbreviation cat_u32 :: "8 word list \ 32 word" + where + "cat_u32 x \ (word_rcat :: 8 word list \ 32 word) x" + +abbreviation encode_u32 :: "nat \ 8 word list" + where + "encode_u32 x \ flatten_u32 (word_of_nat x)" + +abbreviation decode_u32 :: "8 word list \ nat" + where + "decode_u32 i \ unat (cat_u32 i)" + +lemma flatten_u32_length: + "length (flatten_u32 vs) = 4" + by (simp add: length_word_rsplit_even_size wsst_TYs(3)) + +lemma rsplit_rcat_u32_eq: + assumes "w = [a1, a2, b1, b2]" + shows "(word_rsplit :: 32 word \ 8 word list) ((word_rcat :: 8 word list \ 32 word) w) = w" + using rsplit_rcat_eq assms + by force + +lemma encode_decode_u32: + assumes "w = [a1, a2, b1, b2]" + shows "w = encode_u32 (decode_u32 w)" + using assms + by (simp add: rsplit_rcat_u32_eq) + +lemma cat_flatten_u32_eq: + "cat_u32 (flatten_u32 w) = w" + by (simp add: word_rcat_rsplit) + +lemma decode_encode_u32: + assumes "i \ 2 ^ LENGTH(32) - 1" + shows "i = decode_u32 (encode_u32 i)" + by (metis assms le_unat_uoi unat_minus_one_word word_rcat_rsplit) + +abbreviation flatten_u64 :: "64 word \ 8 word list" + where + "flatten_u64 x \ (word_rsplit :: 64 word \ 8 word list) x" + +abbreviation cat_u64 :: "8 word list \ 64 word" + where + "cat_u64 x \ word_rcat x" + +abbreviation encode_u64 :: "nat \ 8 word list" + where + "encode_u64 x \ flatten_u64 (word_of_nat x)" + +abbreviation decode_u64 :: "8 word list \ nat" + where + "decode_u64 x \ unat (cat_u64 x)" + +lemma flatten_u64_length: + "length (flatten_u64 vs) = 8" + by (simp add: length_word_rsplit_even_size wsst_TYs(3)) + +lemma encode_decode_u64: + assumes "w = [a1, a2, b1, b2, c1, c2, d1, d2]" + shows "w = encode_u64 (decode_u64 w)" + using assms + by (simp add: rsplit_rcat_eq) + +lemma cat_flatten_u64_eq: + "cat_u64 (flatten_u64 w) = w" + by (simp add: word_rcat_rsplit) + +lemma decode_encode_u64: + assumes "i \ 2 ^ LENGTH(64) - 1" + shows "i = decode_u64 (encode_u64 i)" + by (metis assms le_unat_uoi unat_minus_one_word word_rcat_rsplit) + +abbreviation encode_s8 :: "int \ 8 sword" + where + "encode_s8 x \ word_of_int x" + +abbreviation decode_s8 :: "8 sword \ int" + where + "decode_s8 b \ sint b" + +abbreviation encode_s8_list :: "8 sword \ 8 word list" + where + "encode_s8_list w \ [SCAST(8 signed \ 8) w]" + +abbreviation decode_s8_list :: "8 word list \ 8 sword" + where + "decode_s8_list ls \ UCAST(8 \ 8 signed) (hd ls)" + +lemma encode_decode_s8_list: + "ls = [b] \ ls = encode_s8_list (decode_s8_list ls)" + by simp + +lemma decode_encode_s8_list: + "w = decode_s8_list (encode_s8_list w)" + by simp + +lemma encode_decode_s8: + "w = encode_s8 (decode_s8 w)" + by simp + +lemma decode_encode_s8: + assumes "- (2 ^ (LENGTH(8) - 1)) \ i" + and "i < 2 ^ (LENGTH(8) - 1)" + shows "i = decode_s8 (encode_s8 i)" + by (metis assms More_Word.sint_of_int_eq len_signed) + +abbreviation s64_split :: "64 sword \ 32 word list" + where + "s64_split x \ (word_rsplit :: 64 sword \ 32 word list) x" + +abbreviation s32_split :: "32 sword \ 16 word list" + where + "s32_split x \ (word_rsplit :: 32 sword \ 16 word list) x" + +abbreviation s16_split :: "16 sword \ 8 word list" + where + "s16_split x \ (word_rsplit :: 16 sword \ 8 word list) x" + +abbreviation cat_s16 :: "8 word list \ 16 sword" + where + "cat_s16 x \ (word_rcat :: 8 word list \ 16 sword) x" + +abbreviation encode_s16 :: "int \ 8 word list" + where + "encode_s16 x \ s16_split (word_of_int x)" + +abbreviation decode_s16 :: "8 word list \ int" + where + "decode_s16 x \ sint (cat_s16 x)" + +lemma flatten_s16_length: + "length (s16_split vs) = 2" + by (simp add: length_word_rsplit_even_size wsst_TYs(3)) + +lemma rsplit_rcat_s16_eq: + assumes "w = [a1, a2]" + shows "(word_rsplit :: 16 sword \ 8 word list) ((word_rcat :: 8 word list \ 16 sword) w) = w" +proof - + have l1: "length w * 8 = 16" + using assms by clarsimp + moreover have l2: "size ((word_rcat :: 8 word list \ 16 sword) w) = 16" + using assms + by (simp add: size_word.rep_eq) + from l1 l2 have "length w * 8 = size ((word_rcat :: 8 word list \ 16 sword) w)" + by argo + thus ?thesis + by (simp add: word_rsplit_rcat_size) +qed + + +lemma encode_decode_s16: + assumes "w = [a, b]" + shows "w = encode_s16 (decode_s16 w)" + by (simp add: assms rsplit_rcat_eq) + +lemma cat_flatten_s16_eq: + "cat_s16 (s16_split w) = w" + by (simp add: word_rcat_rsplit) + +lemma decode_encode_s16: + assumes "- (2 ^ (LENGTH(16) - 1)) \ i" + and "i < 2 ^ (LENGTH(16) - 1)" + shows "i = decode_s16 (encode_s16 i)" + by (metis assms cat_flatten_s16_eq len_signed sint_of_int_eq) + +abbreviation flatten_s32 :: "32 sword \ 8 word list" + where + "flatten_s32 x \ (word_rsplit :: 32 sword \ 8 word list) x" + +abbreviation cat_s32 :: "8 word list \ 32 sword" + where + "cat_s32 x \ (word_rcat :: 8 word list \ 32 sword) x" + +abbreviation encode_s32 :: "int \ 8 word list" + where + "encode_s32 x \ flatten_s32 (word_of_int x)" + +abbreviation decode_s32 :: "8 word list \ int" + where + "decode_s32 i \ sint (cat_s32 i)" + +lemma flatten_s32_length: + "length (flatten_s32 vs) = 4" + by (simp add: length_word_rsplit_even_size wsst_TYs(3)) + +lemma rsplit_rcat_s32_eq: + assumes "w = [a1, a2, b1, b2]" + shows "(word_rsplit :: 32 sword \ 8 word list) ((word_rcat :: 8 word list \ 32 sword) w) = w" + using rsplit_rcat_eq assms + by force + +lemma encode_decode_s32: + assumes "w = [a1, a2, b1, b2]" + shows "w = encode_s32 (decode_s32 w)" + using assms + by (simp add: rsplit_rcat_s32_eq) + +lemma decode_encode_s32: + assumes "- (2 ^ (LENGTH(32) - 1)) \ i" + and "i < 2 ^ (LENGTH(32) - 1)" + shows "i = decode_s32 (encode_s32 i)" + by (metis assms len_signed sint_of_int_eq word_rcat_rsplit) + +abbreviation flatten_s64 :: "64 sword \ 8 word list" + where + "flatten_s64 x \ (word_rsplit :: 64 sword \ 8 word list) x" + +lemma flatten_s64_length: + "length (flatten_s64 vs) = 8" + by (simp add: length_word_rsplit_even_size wsst_TYs(3)) + +abbreviation cat_s64 :: "8 word list \ 64 sword" + where + "cat_s64 x \ word_rcat x" + +abbreviation encode_s64 :: "int \ 8 word list" + where + "encode_s64 x \ flatten_s64 (word_of_int x)" + +abbreviation decode_s64 :: "8 word list \ int" + where + "decode_s64 x \ sint (cat_s64 x)" + +lemma encode_decode_s64: + assumes "w = [a1, a2, b1, b2, c1, c2, d1, d2]" + shows "w = encode_s64 (decode_s64 w)" + using assms + by (simp add: rsplit_rcat_eq) + +lemma decode_encode_s64: + assumes "- (2 ^ (LENGTH(64) - 1)) \ i" + and "i < 2 ^ (LENGTH(64) - 1)" + shows "i = decode_s64 (encode_s64 i)" + by (metis assms len_signed sint_of_int_eq word_rcat_rsplit) + +definition word_of_integer :: "integer \ 'a::len word" + where + "word_of_integer x \ word_of_int (int_of_integer x)" + +definition sword_of_integer :: "integer \ 'a::len sword" + where + "sword_of_integer x \ word_of_int (int_of_integer x)" + +definition integer_of_word :: "'a::len word \ integer" + where + "integer_of_word x \ integer_of_int (uint x)" + +definition integer_of_sword :: "'a::len sword \ integer" + where + "integer_of_sword x \ integer_of_int (sint x)" + +lemma word_integer_eq: + "word_of_integer (integer_of_word w) = w" + unfolding word_of_integer_def integer_of_word_def + by (metis int_of_integer_of_int integer_of_int_eq_of_int word_uint.Rep_inverse') + +lemma sword_integer_eq: + "sword_of_integer (integer_of_sword w) = w" + unfolding sword_of_integer_def integer_of_sword_def + by (metis int_of_integer_of_int integer_of_int_eq_of_int word_sint.Rep_inverse') + +lemma integer_word_bounded_eq: + assumes "0 \ i" + assumes "i \ 2 ^ LENGTH('a::len) - 1" + shows "integer_of_word ((word_of_integer :: integer \ 'a word) i) = i" + unfolding integer_of_word_def word_of_integer_def + using assms + by (metis integer_less_eq_iff integer_of_int_eq_of_int minus_integer.rep_eq of_int_0_le_iff of_int_eq_1_iff of_int_eq_numeral_power_cancel_iff of_int_integer_of word_of_int_inverse zle_diff1_eq) + +lemma integer_sword_bounded_eq: + assumes "- (2 ^ (LENGTH('a::len) - 1)) \ i" + and "i < 2 ^ (LENGTH('a) - 1)" + shows "integer_of_sword ((sword_of_integer :: integer \ 'a sword) i) = i" + unfolding integer_of_sword_def sword_of_integer_def + using signed_take_bit_int_eq_self assms + by (smt (verit) diff_numeral_special(11) int_of_integer_numeral integer_less_eq_iff integer_of_int_eq_of_int len_signed minus_integer.rep_eq numeral_power_eq_of_int_cancel_iff of_int_integer_of of_int_power_less_of_int_cancel_iff one_integer.rep_eq sint_of_int_eq uminus_integer.rep_eq) + +definition word8_of_integer :: "integer \ 8 word" + where + "word8_of_integer \ word_of_integer" + +definition word16_of_integer :: "integer \ 16 word" + where + "word16_of_integer \ word_of_integer" + +definition word32_of_integer :: "integer \ 32 word" + where + "word32_of_integer \ word_of_integer" + +definition word64_of_integer :: "integer \ 64 word" + where + "word64_of_integer \ word_of_integer" + +definition integer_of_word8 :: "8 word \ integer" + where + "integer_of_word8 \ integer_of_word" + +definition integer_of_word16 :: "16 word \ integer" + where + "integer_of_word16 \ integer_of_word" + +definition integer_of_word32 :: "32 word \ integer" + where + "integer_of_word32 \ integer_of_word" + +definition integer_of_word64 :: "64 word \ integer" + where + "integer_of_word64 \ integer_of_word" + +lemma word8_integer_eq: + "word8_of_integer (integer_of_word8 w) = w" + unfolding word8_of_integer_def integer_of_word8_def + using word_integer_eq + by blast + +lemma word16_integer_eq: + "word16_of_integer (integer_of_word16 w) = w" + unfolding word16_of_integer_def integer_of_word16_def + using word_integer_eq + by blast + +lemma word32_integer_eq: + "word32_of_integer (integer_of_word32 w) = w" + unfolding word32_of_integer_def integer_of_word32_def + using word_integer_eq + by blast + +lemma word64_integer_eq: + "word64_of_integer (integer_of_word64 w) = w" + unfolding word64_of_integer_def integer_of_word64_def + using word_integer_eq + by blast + +lemma integer_word8_bounded_eq: + assumes "0 \ i" + and "i \ 2 ^ LENGTH(8) - 1" + shows "integer_of_word8 (word8_of_integer i) = i" + unfolding word8_of_integer_def integer_of_word8_def + using integer_word_bounded_eq assms + by blast + +lemma integer_word16_bounded_eq: + assumes "0 \ i" + and "i \ 2 ^ LENGTH(16) - 1" + shows "integer_of_word16 (word16_of_integer i) = i" + unfolding word16_of_integer_def integer_of_word16_def + using integer_word_bounded_eq assms + by blast + +lemma integer_word32_bounded_eq: + assumes "0 \ i" + and "i \ 2 ^ LENGTH(32) - 1" + shows "integer_of_word32 (word32_of_integer i) = i" + unfolding word32_of_integer_def integer_of_word32_def + using integer_word_bounded_eq assms + by blast + +lemma integer_word64_bounded_eq: + assumes "0 \ i" + and "i \ 2 ^ LENGTH(64) - 1" + shows "integer_of_word64 (word64_of_integer i) = i" + unfolding word64_of_integer_def integer_of_word64_def + using integer_word_bounded_eq assms + by blast + +definition sword8_of_integer :: "integer \ 8 sword" + where + "sword8_of_integer \ sword_of_integer" + +definition sword16_of_integer :: "integer \ 16 sword" + where + "sword16_of_integer \ sword_of_integer" + +definition sword32_of_integer :: "integer \ 32 sword" + where + "sword32_of_integer \ sword_of_integer" + +definition sword64_of_integer :: "integer \ 64 sword" + where + "sword64_of_integer \ sword_of_integer" + +definition integer_of_sword8 :: "8 sword \ integer" + where + "integer_of_sword8 \ integer_of_sword" + +definition integer_of_sword16 :: "16 sword \ integer" + where + "integer_of_sword16 \ integer_of_sword" + +definition integer_of_sword32 :: "32 sword \ integer" + where + "integer_of_sword32 \ integer_of_sword" + +definition integer_of_sword64 :: "64 sword \ integer" + where + "integer_of_sword64 \ integer_of_sword" + +lemma sword8_integer_eq: + "sword8_of_integer (integer_of_sword8 w) = w" + unfolding sword8_of_integer_def integer_of_sword8_def + using sword_integer_eq + by blast + +lemma sword16_integer_eq: + "sword16_of_integer (integer_of_sword16 w) = w" + unfolding sword16_of_integer_def integer_of_sword16_def + using sword_integer_eq + by blast + +lemma sword32_integer_eq: + "sword32_of_integer (integer_of_sword32 w) = w" + unfolding sword32_of_integer_def integer_of_sword32_def + using sword_integer_eq + by blast + +lemma sword64_integer_eq: + "sword64_of_integer (integer_of_sword64 w) = w" + unfolding sword64_of_integer_def integer_of_sword64_def + using sword_integer_eq + by blast + +lemma integer_sword8_bounded_eq: + assumes "- (2 ^ (LENGTH(8) - 1)) \ i" + and "i < 2 ^ (LENGTH(8) - 1)" + shows "integer_of_sword8 (sword8_of_integer i) = i" + unfolding sword8_of_integer_def integer_of_sword8_def + using integer_sword_bounded_eq assms + by metis + +lemma integer_sword16_bounded_eq: + assumes "- (2 ^ (LENGTH(16) - 1)) \ i" + and "i < 2 ^ (LENGTH(16) - 1)" + shows "integer_of_sword16 (sword16_of_integer i) = i" + unfolding sword16_of_integer_def integer_of_sword16_def + using integer_sword_bounded_eq assms + by metis + +lemma integer_sword32_bounded_eq: + assumes "- (2 ^ (LENGTH(32) - 1)) \ i" + and "i < 2 ^ (LENGTH(32) - 1)" + shows "integer_of_sword32 (sword32_of_integer i) = i" + unfolding sword32_of_integer_def integer_of_sword32_def + using integer_sword_bounded_eq assms + by metis + +lemma integer_sword64_bounded_eq: + assumes "- (2 ^ (LENGTH(64) - 1)) \ i" + and "i < 2 ^ (LENGTH(64) - 1)" + shows "integer_of_sword64 (sword64_of_integer i) = i" + unfolding sword64_of_integer_def integer_of_sword64_def + using integer_sword_bounded_eq assms + by metis + +lemmas flatten_types_length = flatten_u16_length flatten_s16_length flatten_u32_length flatten_s32_length flatten_u64_length flatten_s64_length + +text \cast\_val is an executable code that ensures easy casting of values. This value cast function + is used within the Gillian + framework~\cite{old_gillian_2020, gillian_2021, maksimovic_2021, fragoso_2020}.\ +definition cast_val :: "String.literal \ integer \ integer" + where + "cast_val s i \ + if s = STR ''uint8'' then integer_of_word8 (word8_of_integer i) + else if s = STR ''int8'' then integer_of_sword8 (sword8_of_integer i) + else if s = STR ''uint16'' then integer_of_word16 (word16_of_integer i) + else if s = STR ''int16'' then integer_of_sword16 (sword16_of_integer i) + else if s = STR ''uint32'' then integer_of_word32 (word32_of_integer i) + else if s = STR ''int32'' then integer_of_sword32 (sword32_of_integer i) + else if s = STR ''uint64'' then integer_of_word64 (word64_of_integer i) + else if s = STR ''int64'' then integer_of_sword64 (sword64_of_integer i) + else i" + +end diff --git a/thys/CHERI-C_Memory_Model/ROOT b/thys/CHERI-C_Memory_Model/ROOT new file mode 100644 --- /dev/null +++ b/thys/CHERI-C_Memory_Model/ROOT @@ -0,0 +1,15 @@ +chapter AFP + +session "CHERI-C_Memory_Model" (AFP) = Containers + + options [timeout=1500] + sessions + "HOL-Library" + "Word_Lib" + "Separation_Algebra" + theories + Preliminary_Library + CHERI_C_Concrete_Memory_Model + CHERI_C_Global_Environment + document_files + "root.bib" + "root.tex" diff --git a/thys/CHERI-C_Memory_Model/document/root.bib b/thys/CHERI-C_Memory_Model/document/root.bib new file mode 100644 --- /dev/null +++ b/thys/CHERI-C_Memory_Model/document/root.bib @@ -0,0 +1,471 @@ +@online{DepartmentComputerScience, + title = {{{CHERI}}: {{The Arm Morello Board}}}, + url = {https://www.cl.cam.ac.uk/research/security/ctsrd/cheri/cheri-morello.html} +} + +@online{DigitalSecurityDesign, + title = {{Digital Security by Design Challenge \textendash{} {{UKRI}}}}, + url = {https://www.ukri.org/our-work/our-main-funds/industrial-strategy-challenge-fund/artificial-intelligence-and-data-economy/digital-security-by-design-challenge/} +} + +@online{DepartmentComputerScienceb, + title = {{{CHERI}}: {{The Digital Security}} by {{Design}} ({{DSbD}}) {{Initiative}}}, + url = {https://www.cl.cam.ac.uk/research/security/ctsrd/cheri/dsbd.html} +} + +@inproceedings{cheri2014, + title = {{The {CHERI} Capability Model: Revisiting {RISC} in an Age of Risk}}, + booktitle = {2014 {ACM}/{IEEE} 41st International Symposium on Computer Architecture ({ISCA})}, + author = {Jonathan Woodruff and Robert N. M. Watson and David Chisnall and Simon W. Moore and Jonathan Anderson and Brooks Davis and Ben Laurie and Peter G.~Neumann and Robert Norton and Michael Roe}, + year = {2014}, + month = {June}, + pages = {457--468}, + publisher = {IEEE} +} + +@techreport{watson_laurie_richardson_2021, + url = {https://www.capabilitieslimited.co.uk/pdfs/20210917-capltd-cheri-desktop-report-version1-FINAL.pdf}, + title = {{Assessing the Viability of an Open- Source CHERI Desktop Software Ecosystem}}, + institution = {Capabilities Limited}, + author = {Watson, Robert and Laurie, Ben and Richardson, Alex}, + year = {2021}, + month = {Sep} +} + +@misc{miller_2019, + author = {Miller, Matt}, + title = {{Trends, challenges, and strategic shifts in the software vulnerability mitigation landscape}}, + note = {Presented at BlueHat IL}, + url = {https://msrnd-cdn-stor.azureedge.net/bluehat/bluehatil/2019/assets/doc/Trends%2C%20Challenges%2C%20and%20Strategic%20Shifts%20in%20the%20Software%20Vulnerability%20Mitigation%20Landscape.pdf}, + month = {Feb}, + year = {2019} +} + +@misc{richardson_2022, + author = {Richardson, Alex}, + title = {{Porting C/C++ software to Morello}}, + note = {Presented at CHERI Technical Workshop 2022}, + url = {https://soft-dev.org/events/cheritech22/slides/Richardson.pdf}, + month = {Sep}, + year = {2022} +} + +@misc{chisnall_2022, + author = {Chisnall, David}, + title = {{Towards a Safe, High-Performance Heap Allocator}}, + note = {Presented at CHERI Technical Workshop 2022}, + url = {https://soft-dev.org/events/cheritech22/slides/Chisnall.pdf}, + month = {Sep}, + year = {2022} +} + +@inproceedings{watson_2015, + author = {Watson, Robert N.M. and Woodruff, Jonathan and Neumann, Peter G. and Moore, Simon W. and Anderson, Jonathan and Chisnall, David and Dave, Nirav and Davis, Brooks and Gudka, Khilan and Laurie, Ben and Murdoch, Steven J. and Norton, Robert and Roe, Michael and Son, Stacey and Vadera, Munraj}, + booktitle = {{2015 IEEE Symposium on Security and Privacy}}, + title = {{CHERI: A Hybrid Capability-System Architecture for Scalable Software Compartmentalization}}, + year = {2015}, + pages = {20-37}, + issn = {1081-6011}, + doi = {10.1109/SP.2015.9} +} + +@article{peta_1970, + title = {{C Programming Language Still Ruling the World}}, + volume = {22}, + issn = {0975-4172}, + number = {1}, + journal = {Global Journal of Computer Science and Technology}, + author = {Saphalya Peta}, + year = {1970}, + month = {Jan}, + pages = {1–5} +} + +@article{chisnall_2015, + author = {Chisnall, David and Rothwell, Colin and Watson, Robert N.M. and Woodruff, Jonathan and Vadera, Munraj and Moore, Simon W. and Roe, Michael and Davis, Brooks and Neumann, Peter G.}, + title = {{Beyond the PDP-11: Architectural Support for a Memory-Safe C Abstract Machine}}, + year = {2015}, + issue_date = {April 2015}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {50}, + number = {4}, + issn = {0362-1340}, + journal = {SIGPLAN Not.}, + month = {Mar}, + pages = {117–130}, + numpages = {14}, + keywords = {processor design, security, C language, bounds checking, capabilities, compilers, memory protection, memory safety} +} + +@techreport{watson_et_al_2020, + address = {Cambridge, England}, + title = {{Capability Hardware Enhanced RISC Instructions: CHERI Instruction-Set Architecture (Version 8)}}, + institution = {University of Cambridge}, + author = {Watson, Robert N. M. and Neumann, Peter G. and Woodruff, Jonathan and Roe, Michael and Almatary, Hesham and Anderson, Jonathan and Baldwin, John and Barnes, Graeme and Chisnall, David and Clarke, Jessica and et al.}, + issn = {1476-2986}, + url = {https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-951.pdf}, + month = {Oct}, + year = {2020} +} + +@article{woodruff_2019, + author = {Woodruff, Jonathan and Joannou, Alexandre and Xia, Hongyan and Fox, Anthony and Norton, Robert M. and Chisnall, David and Davis, Brooks and Gudka, Khilan and Filardo, Nathaniel W. and Markettos, A. Theodore and Roe, Michael and Neumann, Peter G. and Watson, Robert N. M. and Moore, Simon W.}, + journal = {IEEE Transactions on Computers}, + title = {{CHERI Concentrate: Practical Compressed Capabilities}}, + year = {2019}, + volume = {68}, + number = {10}, + pages = {1455-1469}, + doi = {10.1109/TC.2019.2914037} +} + +@techreport{watson_cc_2019, + author = {Watson, Robert N. M. and Richardson, Alexander and Davis, Brooks and Baldwin, John and Chisnall, David and Clarke, Jessica and Filardo, Nathaniel and Moore, Simon M. and Napierala, Edward and Sewell, Peter and Neumann, Peter G.}, + title = {{CHERI C/C++ Programming Guide}}, + institution = {University of Cambridge}, + address = {Cambridge, England}, + issn = {1476-2986}, + url = {https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-947.pdf}, + month = {Jun}, + year = {2020} +} + +@inproceedings{filardo_2020, + author = {Wesley Filardo, Nathaniel and Gutstein, Brett F. and Woodruff, Jonathan and Ainsworth, Sam and Paul-Trifu, Lucian and Davis, Brooks and Xia, Hongyan and Tomasz Napierala, Edward and Richardson, Alexander and Baldwin, John and Chisnall, David and Clarke, Jessica and Gudka, Khilan and Joannou, Alexandre and Theodore Markettos, A. and Mazzinghi, Alfredo and Norton, Robert M. and Roe, Michael and Sewell, Peter and Son, Stacey and Jones, Timothy M. and Moore, Simon W. and Neumann, Peter G. and Watson, Robert N. M.}, + booktitle = {2020 IEEE Symposium on Security and Privacy (SP)}, + title = {{Cornucopia: Temporal Safety for CHERI Heaps}}, + year = {2020}, + pages = {608-625}, + doi = {10.1109/SP40000.2020.00098} +} + +@techreport{mte_2021, + title = {{Armv8.5-A Memory Tagging Extension}}, + url = {https://documentation-service.arm.com/static/624ea580caabfd7b3c13e23f?token=}, + month = {Jun}, + year = {2021} +} + +@inproceedings{brausse_2022, + author = {Brau\ss{}e, Franz and Shmarov, Fedor and Menezes, Rafael and Gadelha, Mikhail R. and Korovin, Konstantin and Reger, Giles and Cordeiro, Lucas C.}, + title = {{ESBMC-CHERI: Towards Verification of C Programs for CHERI Platforms with ESBMC}}, + year = {2022}, + isbn = {9781450393799}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + booktitle = {Proceedings of the 31st ACM SIGSOFT International Symposium on Software Testing and Analysis}, + pages = {773–776}, + numpages = {4}, + keywords = {bounded model checking, CHERI, capability hardware, formal methods, ARM Morello}, + location = {Virtual, South Korea}, + series = {ISSTA 2022} +} + +@inproceedings{fragoso_2020, + author = {Fragoso Santos, Jos\'{e} and Maksimovi\'{c}, Petar and Ayoun, Sacha-\'{E}lie and Gardner, Philippa}, + title = {{Gillian, Part i: A Multi-Language Platform for Symbolic Execution}}, + year = {2020}, + isbn = {9781450376136}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + booktitle = {Proceedings of the 41st ACM SIGPLAN Conference on Programming Language Design and Implementation}, + pages = {927–942}, + numpages = {16}, + keywords = {parametric semantics, bug-finding, bounded verification, Symbolic execution, C, JavaScript}, + location = {London, UK}, + series = {PLDI 2020} +} + +@book{nipkow_2002, + author = {Nipkow, Tobias and Paulson, Lawrence C. and Wenzel, Markus}, + doi = {10.1007/3-540-45949-9}, + issn = {0302-9743}, + isbn = {3-540-43376-7}, + publisher = {Springer}, + series = {[ecture Notes in Computer Science}, + title = {{Isabelle/HOL - A Proof Assistant for Higher-Order Logic}}, + year = {2002} +} + +@manual{morello_2022, + title = {{Arm Architecture Reference Manual Supplement Morello for A-Profile Architecture}}, + organization= {ARM Ltd.}, + year = {2022}, + url = {https://documentation-service.arm.com/static/61e577e1b691546d37bd38a0?token=}, + version = {A.k}, + numpages = {1294}, + +} + +@inproceedings{maksimovic_2021, + author = {Maksimovic, Petar and Ayoun, Sacha-Elie and Santos, Jose Fragoso and Gardner, Philippa}, + editor = {Silva, Alexandra and Leino, K. Rustan M.}, + title = {Gillian, Part {II:} Real-World Verification for JavaScript and {C}}, + booktitle = {{Proceedings of the 33\textsuperscript{rd} Computer Aided Verification International Conference, {CAV} 2021, Virtual Event, July 20-23, 2021, Part {II}}}, + series = {Lecture Notes in Computer Science}, + volume = {12760}, + pages = {827--850}, + publisher = {Springer}, + year = {2021}, + doi = {10.1007/978-3-030-81688-9\_38} +} + +@misc{gillian_2021, + doi = {10.48550/ARXIV.2105.14769}, + url = {https://arxiv.org/abs/2105.14769}, + author = {Maksimovic, Petar and Santos, Jose Fragoso and Ayoun, Sacha-Elie and Gardner, Philippa}, + keywords = {Programming Languages (cs.PL), Logic in Computer Science (cs.LO), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {{Gillian: A Multi-Language Platform for Unified Symbolic Analysis}}, + publisher = {arXiv}, + year = {2021}, + copyright = {Creative Commons Attribution 4.0 International} +} + +@article{old_gillian_2020, + author = {Jose Fragoso Santos and Petar Maksimovic and Sacha-Elie Ayoun and Philippa Gardner}, + title = {{Gillian: Compositional Symbolic Execution for All}}, + journal = {CoRR}, + volume = {abs/2001.05059}, + year = {2020}, + url = {https://arxiv.org/abs/2001.05059}, + eprinttype = {arXiv}, + eprint = {2001.05059}, + timestamp = {Sat, 23 Jan 2021 01:19:30 +0100}, + biburl = {https://dblp.org/rec/journals/corr/abs-2001-05059.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{calcagno_2007, + author = {Calcagno, Cristiano and O'Hearn, Peter W. and Yang, Hongseok}, + booktitle = {22nd Annual IEEE Symposium on Logic in Computer Science (LICS 2007)}, + title = {{Local Action and Abstract Separation Logic}}, + year = {2007}, + pages = {366-378} +} + +@techreport{leroy_2012, + title = {{The CompCert Memory Model, Version 2}}, + author = {Leroy, Xavier and Appel, Andrew W. and Blazy, Sandrine and Stewart, Gordon}, + type = {Research Report}, + number = {RR-7987}, + pages = {26}, + institution = {{INRIA}}, + year = {2012}, + month = {Jun}, + keywords = {Memory models ; formal semantics ; verified compilation ; CompCert}, + pdf = {https://hal.inria.fr/hal-00703441/file/RR-7987.pdf}, +} + +@article{armstrong_2019, + author = {Armstrong, Alasdair and Bauereiss, Thomas and Campbell, Brian and Reid, Alastair and Gray, Kathryn E. and Norton, Robert M. and Mundkur, Prashanth and Wassell, Mark and French, Jon and Pulte, Christopher and Flur, Shaked and Stark, Ian and Krishnaswami, Neel and Sewell, Peter}, + title = {{ISA Semantics for ARMv8-a, RISC-v, and CHERI-MIPS}}, + year = {2019}, + issue_date = {January 2019}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {3}, + number = {POPL}, + journal = {Proc. ACM Program. Lang.}, + month = {jan}, + articleno = {71}, + numpages = {31}, + keywords = {Instruction Set Architectures, Semantics, Theorem Proving} +} + +@online{cheri_mips_github, + title = {{Sail model of CHERI-MIPS ISA}}, + publisher = {Github}, + journal = {Github Repository}, + howpublished= {\url{https://github.com/CTSRD-CHERI/sail-cheri-mips}} +} + +@online{cheri_morello_github, + title = {{Morello Sail specification}}, + publisher = {Github}, + journal = {Github Repository}, + howpublished= {\url{https://github.com/CTSRD-CHERI/sail-morello}} +} + +@online{cheri_riscv_github, + title = {{CHERI RISC-V Sail model}}, + publisher = {Github}, + journal = {Github Repository}, + howpublished= {\url{https://github.com/CTSRD-CHERI/sail-cheri-riscv}} +} + +@online{cheri_compressed_cap, + title = {{cheri-compressed-cap}}, + publisher = {Github}, + journal = {Github Repository}, + howpublished= {\url{https://github.com/CTSRD-CHERI/cheri-compressed-cap}} +} + +@online{cheri_c_tests, + title = {{CHERI C Tests}}, + publisher = {Github}, + journal = {Github Repository}, + howpublished= {\url{https://github.com/CTSRD-CHERI/cheri-c-tests}} +} + +@inproceedings{klein_2012, + author = {Klein, Gerwin and Kolanski, Rafal and Boyton, Andrew}, + editor = {Beringer, Lennart and Felty, Amy}, + title = {{Mechanised Separation Algebra}}, + booktitle = {Interactive Theorem Proving}, + year = {2012}, + publisher = {Springer Berlin Heidelberg}, + address = {Berlin, Heidelberg}, + pages = {332--337}, + isbn = {978-3-642-32347-8}, + doi = {10.1007/978-3-642-32347-8_22} +} + +@inproceedings{haftmann_2013, + author = {Haftmann, Florian and Krauss, Alexander and Kun{\v{c}}ar, Ond{\v{r}}ej and Nipkow, Tobias}, + editor = {Blazy, Sandrine and Paulin-Mohring, Christine and Pichardie, David}, + title = {{Data Refinement in Isabelle/HOL}}, + booktitle = {Interactive Theorem Proving}, + year = {2013}, + publisher = {Springer Berlin Heidelberg}, + address = {Berlin, Heidelberg}, + pages = {100--115}, + isbn = {978-3-642-39634-2}, + doi = {10.1007/978-3-642-39634-2_10} +} + +@inproceedings{lochbihler_2013, + author = {Lochbihler, Andreas}, + editor = {Blazy, Sandrine and Paulin-Mohring, Christine and Pichardie, David}, + title = {{Light-Weight Containers for Isabelle: Efficient, Extensible, Nestable}}, + booktitle = {Interactive Theorem Proving}, + year = {2013}, + publisher = {Springer Berlin Heidelberg}, + address = {Berlin, Heidelberg}, + pages = {116--132}, + isbn = {978-3-642-39634-2}, + doi = {10.1007/978-3-642-39634-2_11} +} + +@article{haftmann_2021, + author = {Haftmann, Florian}, + month = {Dec}, + year = {2021}, + url = {https://isabelle.in.tum.de/doc/codegen.pdf}, + title = {{Code generation from Isabelle/HOL theories}} +} + +@article{memarian_2019, + author = {Memarian, Kayvan and Gomes, Victor B. F. and Davis, Brooks and Kell, Stephen and Richardson, Alexander and Watson, Robert N. M. and Sewell, Peter}, + title = {{Exploring C Semantics and Pointer Provenance}}, + year = {2019}, + issue_date = {January 2019}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {3}, + number = {POPL}, + journal = {Proc. ACM Program. Lang.}, + month = {jan}, + articleno = {67}, + numpages = {32}, + keywords = {C} +} + +@article{ohearn_2019, + author = {O'Hearn, Peter W.}, + title = {Incorrectness Logic}, + year = {2019}, + issue_date = {January 2020}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {4}, + number = {POPL}, + url = {https://doi.org/10.1145/3371078}, + doi = {10.1145/3371078}, + journal = {Proc. ACM Program. Lang.}, + month = {dec}, + articleno = {10}, + numpages = {32}, + keywords = {none} +} + +@article{beeren_2016, + author = {Joel Beeren and Matthew Fernandez and Xin Gao and Gerwin Klein and Rafal Kolanski and Japheth Lim and Corey Lewis and Daniel Matichuk and Thomas Sewell}, + title = {{Finite Machine Word Library}}, + journal = {Archive of Formal Proofs}, + month = {June}, + year = {2016}, + note = {\url{https://isa-afp.org/entries/Word_Lib.html}, Formal proof development}, + ISSN = {2150-914x}, +} + +@article{krebbers_2016, + author = {Krebbers, Robbert}, + title = {{A Formal C Memory Model for Separation Logic}}, + journal = {Journal of Automated Reasoning}, + year = {2016}, + month = {Dec}, + day = {01}, + volume = {57}, + number = {4}, + pages = {319-387}, + issn = {1573-0670}, + doi = {10.1007/s10817-016-9369-1}, +} + +@article{tuch_2009, + author = {Tuch, Harvey}, + title = {{Formal Verification of C Systems Code}}, + journal = {Journal of Automated Reasoning}, + year = {2009}, + month = {Apr}, + day = {01}, + volume = {42}, + number = {2}, + pages = {125-187}, + issn = {1573-0670}, + doi = {10.1007/s10817-009-9120-2}, +} + +@article{cohen_2009, + title = {{A Precise Yet Efficient Memory Model For C}}, + journal = {Electronic Notes in Theoretical Computer Science}, + volume = {254}, + pages = {85-103}, + year = {2009}, + note = {Proceedings of the 4th International Workshop on Systems Software Verification (SSV 2009)}, + issn = {1571-0661}, + doi = {https://doi.org/10.1016/j.entcs.2009.09.061}, + author = {Ernie Cohen and Michał Moskal and Stephan Tobies and Wolfram Schulte}, + keywords = {deductive program verification, C programming language, memory models}, +} + +@InProceedings{krebbers_2014, + author = {Krebbers, Robbert and Leroy, Xavier and Wiedijk, Freek}, + editor = {Klein, Gerwin and Gamboa, Ruben}, + title = {{Formal C Semantics: CompCert and the C Standard}}, + booktitle = {Interactive Theorem Proving}, + year = {2014}, + publisher = {Springer International Publishing}, + address = {Cham}, + pages = {543--548}, + isbn = {978-3-319-08970-6} +} + +@misc{park_2022, + doi = {10.48550/arxiv.2211.07511}, + url = {https://arxiv.org/abs/2211.07511}, + author = {Park, Seung Hoon and Pai, Rekha and Melham, Tom}, + title = {{A Formal CHERI-C Semantics for Verification}}, + publisher = {arXiv}, + year = {2022}, + note = {Submitted to TACAS 2023}, +} + +@book{cstandard_2003, + title = {{The C Standard: Incorporating Technical Corrigendum 1}}, + author = {British Standards Institution and BSI (The British Standards Institution)}, + isbn = {9780470846742}, + year = {2003}, + publisher = {Wiley} +} diff --git a/thys/CHERI-C_Memory_Model/document/root.tex b/thys/CHERI-C_Memory_Model/document/root.tex new file mode 100644 --- /dev/null +++ b/thys/CHERI-C_Memory_Model/document/root.tex @@ -0,0 +1,88 @@ +\documentclass[11pt,a4paper]{article} +\usepackage[T1]{fontenc} +\usepackage{isabelle,isabellesym} + +% further packages required for unusual symbols (see also +% isabellesym.sty), use only when needed + +\usepackage{amssymb} + %for \, \, \, \, \, \, + %\, \, \, \, \, + %\, \, \ + +%\usepackage{eurosym} + %for \ + +%\usepackage[only,bigsqcap,bigparallel,fatsemi,interleave,sslash]{stmaryrd} + %for \, \, \, \, \ + +%\usepackage{eufrak} + %for \ ... \, \ ... \ (also included in amssymb) + +%\usepackage{textcomp} + %for \, \, \, \, \, + %\ + +% this should be the last package used +\usepackage{pdfsetup} + +% urls in roman style, theory text in math-similar italics +\urlstyle{rm} +\isabellestyle{it} + +% for uniform font size +%\renewcommand{\isastyle}{\isastyleminor} + + +\begin{document} + +\title{A Formal CHERI-C Memory Model} +\author{Seung Hoon Park} +\maketitle + +\begin{abstract} +%CHERI-C extends the C programming language by adding \textit{hardware +%capabilities}, ensuring a certain degree of memory safety while remaining efficient. +%Capabilities can also be employed for higher-level security measures, such as +%software compartmentalization, that have to be used correctly to achieve the +%desired security guarantees. As the extension changes the semantics of C, +%new theories and tooling are required to reason about CHERI-C code +%and verify correctness. +% +In this work, we present a formal memory model that provides a memory +semantics for CHERI-C programs with uncompressed capabilities in a `purecap' +environment. We present a CHERI-C memory model theory with +properties suitable for verification and potentially other types of +analyses. Our theory generates an OCaml executable instance of the memory model, +which is then used to instantiate the parametric \textit{Gillian} +program analysis framework, enabling concrete execution of +CHERI-C programs. The tool can run a CHERI-C test suite, demonstrating the +correctness of our tool, and catch a good class of safety violations that the +CHERI hardware might miss. + +The proof is accompanied by a paper~\cite{park_2022} that explains the Gillian +framework instantiation that uses the OCaml code generated by this work. +\end{abstract} + +\tableofcontents +\subsubsection*{Acknowledgements} +The author was funded by the UKRI programme on Digital Security by Design +(Ref. EP/V000225/1, \href{https://scorch-project.github.io/}{SCorCH}). +\newpage + +% sane default for proof documents +\parindent 0pt\parskip 0.5ex + +% generated text of all theories +\input{session} + +% optional bibliography +\bibliographystyle{abbrv} +\bibliography{root} + +\end{document} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: t +%%% End: diff --git a/thys/ROOTS b/thys/ROOTS --- a/thys/ROOTS +++ b/thys/ROOTS @@ -1,713 +1,714 @@ ADS_Functor AI_Planning_Languages_Semantics AODV AVL-Trees AWN Abortable_Linearizable_Modules Abs_Int_ITP2012 Abstract-Hoare-Logics Abstract-Rewriting Abstract_Completeness Abstract_Soundness Ackermanns_not_PR Actuarial_Mathematics Adaptive_State_Counting Affine_Arithmetic Aggregation_Algebras Akra_Bazzi Algebraic_Numbers Algebraic_VCs Allen_Calculus Amicable_Numbers Amortized_Complexity AnselmGod Applicative_Lifting Approximation_Algorithms Architectural_Design_Patterns Aristotles_Assertoric_Syllogistic Arith_Prog_Rel_Primes ArrowImpossibilityGS Attack_Trees Auto2_HOL Auto2_Imperative_HOL AutoFocus-Stream Automated_Stateful_Protocol_Verification Automatic_Refinement AxiomaticCategoryTheory Balog_Szemeredi_Gowers BDD BD_Security_Compositional BNF_CC BNF_Operations BTree Banach_Steinhaus Belief_Revision Bell_Numbers_Spivey BenOr_Kozen_Reif Berlekamp_Zassenhaus Bernoulli Bertrands_Postulate Bicategory BinarySearchTree Binding_Syntax_Theory Binomial-Heaps Binomial-Queues BirdKMP Blue_Eyes Bondy Boolean_Expression_Checkers Boolos_Curious_Inference Bounded_Deducibility_Security Buchi_Complementation Budan_Fourier Buffons_Needle Buildings BytecodeLogicJmlTypes C2KA_DistributedSystems CAVA_Automata CAVA_LTL_Modelchecker CCS CISC-Kernel CRYSTALS-Kyber CRDT CSP_RefTK CYK CZH_Elementary_Categories CZH_Foundations CZH_Universal_Constructions CakeML CakeML_Codegen Call_Arity Card_Equiv_Relations Card_Multisets Card_Number_Partitions Card_Partitions Cartan_FP Case_Labeling Catalan_Numbers Category Category2 Category3 Cauchy Cayley_Hamilton Certification_Monads Chandy_Lamport +CHERI-C_Memory_Model Chord_Segments Circus Clean Clique_and_Monotone_Circuits ClockSynchInst Closest_Pair_Points CoCon CoSMeDis CoSMed CofGroups Coinductive Coinductive_Languages Collections Combinable_Wands Combinatorial_Enumeration_Algorithms Combinatorics_Words Combinatorics_Words_Graph_Lemma Combinatorics_Words_Lyndon Commuting_Hermitian Comparison_Sort_Lower_Bound Compiling-Exceptions-Correctly Complete_Non_Orders Completeness Complex_Bounded_Operators Complex_Geometry Complx ComponentDependencies ConcurrentGC ConcurrentIMP Concurrent_Ref_Alg Concurrent_Revisions Conditional_Simplification Conditional_Transfer_Rule Consensus_Refined Constructive_Cryptography Constructive_Cryptography_CM Constructor_Funs Containers CoreC++ Core_DOM Core_SC_DOM Correctness_Algebras Cotangent_PFD_Formula Count_Complex_Roots CryptHOL CryptoBasedCompositionalProperties Cubic_Quartic_Equations DFS_Framework DOM_Components DPT-SAT-Solver DataRefinementIBP Datatype_Order_Generator Decl_Sem_Fun_PL Decreasing-Diagrams Decreasing-Diagrams-II Dedekind_Real Deep_Learning Delta_System_Lemma Density_Compiler Dependent_SIFUM_Refinement Dependent_SIFUM_Type_Systems Depth-First-Search Derangements Deriving Descartes_Sign_Rule Design_Theory Dict_Construction Differential_Dynamic_Logic Differential_Game_Logic Digit_Expansions Dijkstra_Shortest_Path Diophantine_Eqns_Lin_Hom Dirichlet_L Dirichlet_Series DiscretePricing Discrete_Summation DiskPaxos Dominance_CHK DPRM_Theorem DynamicArchitectures Dynamic_Tables E_Transcendental Echelon_Form EdmondsKarp_Maxflow Efficient-Mergesort Elliptic_Curves_Group_Law Encodability_Process_Calculi Epistemic_Logic Equivalence_Relation_Enumeration Ergodic_Theory Error_Function Euler_MacLaurin Euler_Partition Eval_FO Example-Submission Extended_Finite_State_Machine_Inference Extended_Finite_State_Machines FFT FLP FOL-Fitting FOL_Axiomatic FOL_Harrison FOL_Seq_Calc1 FOL_Seq_Calc2 FOL_Seq_Calc3 FSM_Tests Factor_Algebraic_Polynomial Factored_Transition_System_Bounding Falling_Factorial_Sum Farkas FeatherweightJava Featherweight_OCL Fermat3_4 FileRefinement FinFun Finger-Trees Finite-Map-Extras Finite_Automata_HF Finite_Fields Finitely_Generated_Abelian_Groups First_Order_Terms First_Welfare_Theorem Fishburn_Impossibility Fisher_Yates Fishers_Inequality Flow_Networks Floyd_Warshall Flyspeck-Tame FocusStreamsCaseStudies Forcing Formal_Puiseux_Series Formal_SSA Formula_Derivatives Foundation_of_geometry Fourier FO_Theory_Rewriting Free-Boolean-Algebra Free-Groups Frequency_Moments Fresh_Identifiers FunWithFunctions FunWithTilings Functional-Automata Functional_Ordered_Resolution_Prover Furstenberg_Topology GPU_Kernel_PL Gabow_SCC GaleStewart_Games Gale_Shapley Game_Based_Crypto Gauss-Jordan-Elim-Fun Gauss_Jordan Gauss_Sums Gaussian_Integers GenClock General-Triangle Generalized_Counting_Sort Generic_Deriving Generic_Join GewirthPGCProof Girth_Chromatic GoedelGod Goedel_HFSet_Semantic Goedel_HFSet_Semanticless Goedel_Incompleteness Goodstein_Lambda GraphMarkingIBP Graph_Saturation Graph_Theory Green Groebner_Bases Groebner_Macaulay Gromov_Hyperbolicity Grothendieck_Schemes Group-Ring-Module HOL-CSP HOLCF-Prelude HRB-Slicing Hahn_Jordan_Decomposition Hales_Jewett Heard_Of Hello_World HereditarilyFinite Hermite Hermite_Lindemann Hidden_Markov_Models Higher_Order_Terms Hoare_Time Hood_Melville_Queue HotelKeyCards Huffman Hybrid_Logic Hybrid_Multi_Lane_Spatial_Logic Hybrid_Systems_VCs HyperCTL Hyperdual IEEE_Floating_Point IFC_Tracking IMAP-CRDT IMO2019 IMP2 IMP2_Binary_Heap IMP_Compiler IMP_Compiler_Reuse IP_Addresses Imperative_Insertion_Sort Implicational_Logic Impossible_Geometry Incompleteness Incredible_Proof_Machine Independence_CH Inductive_Confidentiality Inductive_Inference InfPathElimination InformationFlowSlicing InformationFlowSlicing_Inter Integration Interpolation_Polynomials_HOL_Algebra Interpreter_Optimizations Interval_Arithmetic_Word32 Intro_Dest_Elim Involutions2Squares Iptables_Semantics Irrational_Series_Erdos_Straus Irrationality_J_Hancl Irrationals_From_THEBOOK IsaGeoCoq Isabelle_C Isabelle_Marries_Dirac Isabelle_Meta_Model IsaNet Jacobson_Basic_Algebra Jinja JinjaDCI JinjaThreads JiveDataStoreModel Jordan_Hoelder Jordan_Normal_Form KAD KAT_and_DRA KBPs KD_Tree Key_Agreement_Strong_Adversaries Khovanskii_Theorem Kleene_Algebra Knights_Tour Knot_Theory Knuth_Bendix_Order Knuth_Morris_Pratt Koenigsberg_Friendship Kruskal Kuratowski_Closure_Complement LLL_Basis_Reduction LLL_Factorization LOFT LTL LTL_Master_Theorem LTL_Normal_Form LTL_to_DRA LTL_to_GBA Lam-ml-Normalization LambdaAuth LambdaMu Lambda_Free_EPO Lambda_Free_KBOs Lambda_Free_RPOs Lambert_W Landau_Symbols Laplace_Transform Latin_Square LatticeProperties Launchbury Laws_of_Large_Numbers Lazy-Lists-II Lazy_Case Lehmer Lifting_Definition_Option Lifting_the_Exponent LightweightJava LinearQuantifierElim Linear_Inequalities Linear_Programming Linear_Recurrences Liouville_Numbers List-Index List-Infinite List_Interleaving List_Inversions List_Update LocalLexing Localization_Ring Locally-Nameless-Sigma Logging_Independent_Anonymity Lowe_Ontological_Argument Lower_Semicontinuous Lp LP_Duality Lucas_Theorem MDP-Algorithms MDP-Rewards MFMC_Countable MFODL_Monitor_Optimized MFOTL_Monitor MSO_Regex_Equivalence Markov_Models Marriage Mason_Stothers Matrices_for_ODEs Matrix Matrix_Tensor Matroids Maximum_Segment_Sum Max-Card-Matching Median_Method Median_Of_Medians_Selection Menger Mereology Mersenne_Primes Metalogic_ProofChecker MiniML MiniSail Minimal_SSA Minkowskis_Theorem Minsky_Machines Modal_Logics_for_NTS Modular_Assembly_Kit_Security Modular_arithmetic_LLL_and_HNF_algorithms Monad_Memo_DP Monad_Normalisation MonoBoolTranAlgebra MonoidalCategory Monomorphic_Monad MuchAdoAboutTwo Multiset_Ordering_NPC Multi_Party_Computation Multirelations Myhill-Nerode Name_Carrying_Type_Inference Nano_JSON Nash_Williams Nat-Interval-Logic Native_Word Nested_Multisets_Ordinals Network_Security_Policy_Verification Neumann_Morgenstern_Utility No_FTL_observers Nominal2 Noninterference_CSP Noninterference_Concurrent_Composition Noninterference_Generic_Unwinding Noninterference_Inductive_Unwinding Noninterference_Ipurge_Unwinding Noninterference_Sequential_Composition NormByEval Nullstellensatz Number_Theoretic_Transform Octonions OpSets Open_Induction Optics Optimal_BST Orbit_Stabiliser Order_Lattice_Props Ordered_Resolution_Prover Ordinal Ordinal_Partitions Ordinals_and_Cardinals Ordinary_Differential_Equations PAC_Checker Package_logic PAL PAPP_Impossibility PCF PLM POPLmark-deBruijn PSemigroupsConvolution Padic_Ints Padic_Field Pairing_Heap Paraconsistency Parity_Game Partial_Function_MR Partial_Order_Reduction Password_Authentication_Protocol Pell Perfect-Number-Thm Perron_Frobenius Physical_Quantities Pi_Calculus Pi_Transcendental Planarity_Certificates Pluennecke_Ruzsa_Inequality Poincare_Bendixson Poincare_Disc Polynomial_Factorization Polynomial_Interpolation Polynomials Pop_Refinement Posix-Lexing Possibilistic_Noninterference Power_Sum_Polynomials Pratt_Certificate Prefix_Free_Code_Combinators Presburger-Automata Prim_Dijkstra_Simple Prime_Distribution_Elementary Prime_Harmonic_Series Prime_Number_Theorem Priority_Queue_Braun Priority_Search_Trees Probabilistic_Noninterference Probabilistic_Prime_Tests Probabilistic_System_Zoo Probabilistic_Timed_Automata Probabilistic_While Program-Conflict-Analysis Progress_Tracking Projective_Geometry Projective_Measurements Promela Proof_Strategy_Language PropResPI Propositional_Proof_Systems Prpu_Maxflow PseudoHoops Psi_Calculi Ptolemys_Theorem Public_Announcement_Logic QHLProver QR_Decomposition Quantales Quasi_Borel_Spaces Quaternions Query_Optimization Quick_Sort_Cost RIPEMD-160-SPARK ROBDD RSAPSS Ramsey-Infinite Random_BSTs Random_Graph_Subgraph_Threshold Randomised_BSTs Randomised_Social_Choice Rank_Nullity_Theorem Real_Impl Real_Power Real_Time_Deque Recursion-Addition Recursion-Theory-I Refine_Imperative_HOL Refine_Monadic RefinementReactive Regex_Equivalence Registers Regression_Test_Selection Regular-Sets Regular_Algebras Regular_Tree_Relations Relation_Algebra Relational-Incorrectness-Logic Relational_Disjoint_Set_Forests Relational_Forests Relational_Method Relational_Minimum_Spanning_Trees Relational_Paths Rep_Fin_Groups ResiduatedTransitionSystem Residuated_Lattices Resolution_FOL Rewrite_Properties_Reduction Rewriting_Z Ribbon_Proofs Risk_Free_Lending Robbins-Conjecture Robinson_Arithmetic Root_Balanced_Tree Roth_Arithmetic_Progressions Routing Roy_Floyd_Warshall SATSolverVerification SC_DOM_Components SDS_Impossibility SIFPL SIFUM_Type_Systems SPARCv8 Safe_Distance Safe_OCL Safe_Range_RC Saturation_Framework Saturation_Framework_Extensions SCC_Bloemen_Sequential Schutz_Spacetime Secondary_Sylow Security_Protocol_Refinement Selection_Heap_Sort SenSocialChoice Separata Separation_Algebra Separation_Logic_Imperative_HOL Separation_Logic_Unbounded SequentInvertibility Shadow_DOM Shadow_SC_DOM Shivers-CFA ShortestPath Show Sigma_Commit_Crypto Signature_Groebner Simpl Simple_Firewall Simplex Simplicial_complexes_and_boolean_functions SimplifiedOntologicalArgument Skew_Heap Skip_Lists Slicing Sliding_Window_Algorithm Smith_Normal_Form Smooth_Manifolds Sophomores_Dream Solidity Sort_Encodings Source_Coding_Theorem SpecCheck Special_Function_Bounds Splay_Tree Sqrt_Babylonian Stable_Matching Stalnaker_Logic Statecharts Stateful_Protocol_Composition_and_Typing Stellar_Quorums Stern_Brocot Stewart_Apollonius Stirling_Formula Stochastic_Matrices Stone_Algebras Stone_Kleene_Relation_Algebras Stone_Relation_Algebras Store_Buffer_Reduction Stream-Fusion Stream_Fusion_Code Strong_Security Sturm_Sequences Sturm_Tarski Stuttering_Equivalence Subresultants Subset_Boolean_Algebras SumSquares Sunflowers SuperCalc Surprise_Paradox Symmetric_Polynomials Syntax_Independent_Logic Szemeredi_Regularity Szpilrajn TESL_Language TLA Tail_Recursive_Functions Tarskis_Geometry Taylor_Models Three_Circles Timed_Automata Topological_Semantics Topology TortoiseHare Transcendence_Series_Hancl_Rucki Transformer_Semantics Transition_Systems_and_Automata Transitive-Closure Transitive-Closure-II Transitive_Models Treaps Tree-Automata Tree_Decomposition Triangle Trie Twelvefold_Way Tycon Types_Tableaus_and_Goedels_God Types_To_Sets_Extension UPF UPF_Firewall UTP Undirected_Graph_Theory Universal_Hash_Families Universal_Turing_Machine UpDown_Scheme Valuation Van_Emde_Boas_Trees Van_der_Waerden VectorSpace VeriComp Verified-Prover Verified_SAT_Based_AI_Planning VerifyThis2018 VerifyThis2019 Vickrey_Clarke_Groves Virtual_Substitution VolpanoSmith VYDRA_MDL WHATandWHERE_Security WOOT_Strong_Eventual_Consistency WebAssembly Weight_Balanced_Trees Weighted_Arithmetic_Geometric_Mean Weighted_Path_Order Well_Quasi_Orders Wetzels_Problem Winding_Number_Eval Word_Lib WorkerWrapper X86_Semantics XML Youngs_Inequality ZFC_in_HOL Zeta_3_Irrational Zeta_Function pGCL