diff --git a/thys/Buildings/Simplicial.thy b/thys/Buildings/Simplicial.thy --- a/thys/Buildings/Simplicial.thy +++ b/thys/Buildings/Simplicial.thy @@ -1,749 +1,749 @@ section \Simplicial complexes\ text \ In this section we develop the basic theory of abstract simplicial complexes as a collection of finite sets, where the power set of each member set is contained in the collection. Note that in this development we allow the empty simplex, since allowing it or not seemed of no logical consequence, but of some small practical consequence. \ theory Simplicial imports Prelim begin subsection \Geometric notions\ text \ The geometric notions attached to a simplicial complex of main interest to us are those of facets (subsets of codimension one), adjacency (sharing a facet in common), and chains of adjacent simplices. \ subsubsection \Facets\ definition facetrel :: "'a set \ 'a set \ bool" (infix "\" 60) where "y \ x \ \v. v \ y \ x = insert v y" lemma facetrelI: "v \ y \ x = insert v y \ y \ x" using facetrel_def by fast lemma facetrelI_card: "y \ x \ card (x-y) = 1 \ y \ x" using card1[of "x-y"] by (blast intro: facetrelI) lemma facetrel_complement_vertex: "y\x \ x = insert v y \ v\y" using facetrel_def[of y x] by fastforce lemma facetrel_diff_vertex: "v\x \ x-{v} \ x" by (auto intro: facetrelI) lemma facetrel_conv_insert: "y \ x \ v \ x - y \ x = insert v y" unfolding facetrel_def by fast lemma facetrel_psubset: "y \ x \ y \ x" unfolding facetrel_def by fast lemma facetrel_subset: "y \ x \ y \ x" using facetrel_psubset by fast lemma facetrel_card: "y \ x \ card (x-y) = 1" using insert_Diff_if[of _ y y] unfolding facetrel_def by fastforce lemma finite_facetrel_card: "finite x \ y\x \ card x = Suc (card y)" using facetrel_def[of y x] card_insert_disjoint[of x] by auto lemma facetrelI_cardSuc: "z\x \ card x = Suc (card z) \ z\x" using card_ge_0_finite finite_subset[of z] card_Diff_subset[of z x] by (force intro: facetrelI_card) lemma facet2_subset: "\ z\x; z\y; x\y - z \ {} \ \ x \ y" unfolding facetrel_def by force lemma inj_on_pullback_facet: assumes "inj_on f x" "z \ f`x" obtains y where "y \ x" "f`y = z" proof from assms(2) obtain v where v: "v\z" "f`x = insert v z" using facetrel_def[of z] by auto define u and y where "u \ the_inv_into x f v" and y: "y \ {v\x. f v \ z}" moreover with assms(2) v have "x = insert u y" using the_inv_into_f_eq[OF assms(1)] the_inv_into_into[OF assms(1)] - by auto + by fastforce ultimately show "y \ x" using v f_the_inv_into_f[OF assms(1)] by (force intro: facetrelI) from y assms(2) show "f`y = z" using facetrel_subset by fast qed subsubsection \Adjacency\ definition adjacent :: "'a set \ 'a set \ bool" (infix "\" 70) where "x \ y \ \z. z\x \ z\y" lemma adjacentI: "z\x \ z\y \ x \ y" using adjacent_def by fast lemma empty_not_adjacent: "\ {} \ x" unfolding facetrel_def adjacent_def by fast lemma adjacent_sym: "x \ y \ y \ x" unfolding adjacent_def by fast lemma adjacent_refl: assumes "x \ {}" shows "x \ x" proof- from assms obtain v where v: "v\x" by fast thus "x \ x" using facetrelI[of v "x-{v}"] unfolding adjacent_def by fast qed lemma common_facet: "\ z\x; z\y; x \ y \ \ z = x \ y" using facetrel_subset facet2_subset by fast lemma adjacent_int_facet1: "x \ y \ x \ y \ (x \ y) \ x" using common_facet unfolding adjacent_def by fast lemma adjacent_int_facet2: "x \ y \ x \ y \ (x \ y) \ y" using adjacent_sym adjacent_int_facet1 by (fastforce simp add: Int_commute) lemma adjacent_conv_insert: "x \ y \ v \ x - y \ x = insert v (x\y)" using adjacent_int_facet1 facetrel_conv_insert by fast lemma adjacent_int_decomp: "x \ y \ x \ y \ \v. v \ y \ x = insert v (x\y)" using adjacent_int_facet1 unfolding facetrel_def by fast lemma adj_antivertex: assumes "x\y" "x\y" shows "\!v. v\x-y" proof (rule ex_ex1I) from assms obtain w where w: "w\y" "x = insert w (x\y)" using adjacent_int_decomp by fast thus "\v. v\x-y" by auto from w have "\v. v\x-y \ v=w" by fast thus "\v v'. v\x-y \ v'\x-y \ v=v'" by auto qed lemma adjacent_card: "x \ y \ card x = card y" unfolding adjacent_def facetrel_def by (cases "finite x" "x=y" rule: two_cases) auto lemma adjacent_to_adjacent_int_subset: assumes "C \ D" "f`C \ f`D" "f`C \ f`D" shows "f`C \ f`D \ f`(C\D)" proof from assms(1,3) obtain v where v: "v \ D" "C = insert v (C\D)" using adjacent_int_decomp by fast from assms(2,3) obtain w where w: "w \ f`D" "f`C = insert w (f`C\f`D)" using adjacent_int_decomp[of "f`C" "f`D"] by fast from w have w': "w \ f`C - f`D" by fast with v assms(1,2) have fv_w: "f v = w" using adjacent_conv_insert by fast fix b assume "b \ f`C \ f`D" from this obtain a1 a2 where a1: "a1 \ C" "b = f a1" and a2: "a2 \ D" "b = f a2" by fast from v a1 a2(2) have "a1 \ D \ f a2 = w" using fv_w by auto with a2(1) w' have "a1 \ D" by fast with a1 show "b \ f`(C\D)" by fast qed lemma adjacent_to_adjacent_int: "\ C \ D; f`C \ f`D; f`C \ f`D \ \ f`(C\D) = f`C \ f`D" using adjacent_to_adjacent_int_subset by fast subsubsection \Chains of adjacent sets\ abbreviation "adjacentchain \ binrelchain adjacent" abbreviation "padjacentchain \ proper_binrelchain adjacent" lemmas adjacentchain_Cons_reduce = binrelchain_Cons_reduce [of adjacent] lemmas adjacentchain_obtain_proper = binrelchain_obtain_proper [of _ _ adjacent] lemma adjacentchain_card: "adjacentchain (x#xs@[y]) \ card x = card y" using adjacent_card by (induct xs arbitrary: x) auto subsection \Locale and basic facts\ locale SimplicialComplex = fixes X :: "'a set set" assumes finite_simplices: "\x\X. finite x" and faces : "x\X \ y\x \ y\X" context SimplicialComplex begin abbreviation "Subcomplex Y \ Y \ X \ SimplicialComplex Y" definition "maxsimp x \ x\X \ (\z\X. x\z \ z=x)" definition adjacentset :: "'a set \ 'a set set" where "adjacentset x = {y\X. x\y}" lemma finite_simplex: "x\X \ finite x" using finite_simplices by simp lemma singleton_simplex: "v\\X \ {v} \ X" using faces by auto lemma maxsimpI: "x \ X \ (\z. z\X \ x\z \ z=x) \ maxsimp x" using maxsimp_def by auto lemma maxsimpD_simplex: "maxsimp x \ x\X" using maxsimp_def by fast lemma maxsimpD_maximal: "maxsimp x \ z\X \ x\z \ z=x" using maxsimp_def by auto lemmas finite_maxsimp = finite_simplex[OF maxsimpD_simplex] lemma maxsimp_nempty: "X \ {{}} \ maxsimp x \ x \ {}" unfolding maxsimp_def by fast lemma maxsimp_vertices: "maxsimp x \ x\\X" using maxsimpD_simplex by fast lemma adjacentsetD_adj: "y \ adjacentset x \ x\y" using adjacentset_def by fast lemma max_in_subcomplex: "\ Subcomplex Y; y \ Y; maxsimp y \ \ SimplicialComplex.maxsimp Y y" using maxsimpD_maximal by (fast intro: SimplicialComplex.maxsimpI) lemma face_im: assumes "w \ X" "y \ f`w" defines "u \ {a\w. f a \ y}" shows "y \ f\X" using assms faces[of w u] image_eqI[of y "(`) f" u X] by fast lemma im_faces: "x \ f \ X \ y \ x \ y \ f \ X" using faces face_im[of _ y] by (cases "y={}") auto lemma map_is_simplicial_morph: "SimplicialComplex (f\X)" proof show "\x\f\X. finite x" using finite_simplices by fast show "\x y. x \f\X \ y\x \ y\f\X" using im_faces by fast qed lemma vertex_set_int: assumes "SimplicialComplex Y" shows "\(X\Y) = \X \ \Y" proof have "\v. v \ \X \ \Y \ v\ \(X\Y)" using faces SimplicialComplex.faces[OF assms] by auto thus "\(X\Y) \ \X \ \Y" by fast qed auto end (* context SimplicialComplex *) subsection \Chains of maximal simplices\ text \ Chains of maximal simplices (with respect to adjacency) will allow us to walk through chamber complexes. But there is much we can say about them in simplicial complexes. We will call a chain of maximal simplices proper (using the prefix \p\ as a naming convention to denote proper) if no maximal simplex appears more than once in the chain. (Some sources elect to call improper chains prechains, and reserve the name chain to describe a proper chain. And usually a slightly weaker notion of proper is used, requiring only that no maximal simplex appear twice in succession. But it essentially makes no difference, and we found it easier to use @{const distinct} rather than @{term "binrelchain not_equal"}.) \ context SimplicialComplex begin definition "maxsimpchain xs \ (\x\set xs. maxsimp x) \ adjacentchain xs" definition "pmaxsimpchain xs \ (\x\set xs. maxsimp x) \ padjacentchain xs" function min_maxsimpchain :: "'a set list \ bool" where "min_maxsimpchain [] = True" | "min_maxsimpchain [x] = maxsimp x" | "min_maxsimpchain (x#xs@[y]) = (x\y \ is_arg_min length (\zs. maxsimpchain (x#zs@[y])) xs)" by (auto, rule list_cases_Cons_snoc) termination by (relation "measure length") auto lemma maxsimpchain_snocI: "\ maxsimpchain (xs@[x]); maxsimp y; x\y \ \ maxsimpchain (xs@[x,y])" using maxsimpchain_def binrelchain_snoc maxsimpchain_def by auto lemma maxsimpchainD_maxsimp: "maxsimpchain xs \ x \ set xs \ maxsimp x" using maxsimpchain_def by fast lemma maxsimpchainD_adj: "maxsimpchain xs \ adjacentchain xs" using maxsimpchain_def by fast lemma maxsimpchain_CConsI: "\ maxsimp w; maxsimpchain (x#xs); w\x \ \ maxsimpchain (w#x#xs)" using maxsimpchain_def by auto lemma maxsimpchain_Cons_reduce: "maxsimpchain (x#xs) \ maxsimpchain xs" using adjacentchain_Cons_reduce maxsimpchain_def by fastforce lemma maxsimpchain_append_reduce1: "maxsimpchain (xs@ys) \ maxsimpchain xs" using binrelchain_append_reduce1 maxsimpchain_def by auto lemma maxsimpchain_append_reduce2: "maxsimpchain (xs@ys) \ maxsimpchain ys" using binrelchain_append_reduce2 maxsimpchain_def by auto lemma maxsimpchain_remdup_adj: "maxsimpchain (xs@[x,x]@ys) \ maxsimpchain (xs@[x]@ys)" using maxsimpchain_def binrelchain_remdup_adj by auto lemma maxsimpchain_rev: "maxsimpchain xs \ maxsimpchain (rev xs)" using maxsimpchainD_maxsimp adjacent_sym binrelchain_sym_rev[of adjacent] unfolding maxsimpchain_def by fastforce lemma maxsimpchain_overlap_join: "maxsimpchain (xs@[w]) \ maxsimpchain (w#ys) \ maxsimpchain (xs@w#ys)" using binrelchain_overlap_join maxsimpchain_def by auto lemma pmaxsimpchain: "pmaxsimpchain xs \ maxsimpchain xs" using maxsimpchain_def pmaxsimpchain_def by fast lemma pmaxsimpchainI_maxsimpchain: "maxsimpchain xs \ distinct xs \ pmaxsimpchain xs" using maxsimpchain_def pmaxsimpchain_def by fast lemma pmaxsimpchain_CConsI: "\ maxsimp w; pmaxsimpchain (x#xs); w\x; w \ set (x#xs) \ \ pmaxsimpchain (w#x#xs)" using pmaxsimpchain_def by auto lemmas pmaxsimpchainD_maxsimp = maxsimpchainD_maxsimp[OF pmaxsimpchain] lemmas pmaxsimpchainD_adj = maxsimpchainD_adj [OF pmaxsimpchain] lemma pmaxsimpchainD_distinct: "pmaxsimpchain xs \ distinct xs" using pmaxsimpchain_def by fast lemma pmaxsimpchain_Cons_reduce: "pmaxsimpchain (x#xs) \ pmaxsimpchain xs" using maxsimpchain_Cons_reduce pmaxsimpchain pmaxsimpchainD_distinct by (fastforce intro: pmaxsimpchainI_maxsimpchain) lemma pmaxsimpchain_append_reduce1: "pmaxsimpchain (xs@ys) \ pmaxsimpchain xs" using maxsimpchain_append_reduce1 pmaxsimpchain pmaxsimpchainD_distinct by (fastforce intro: pmaxsimpchainI_maxsimpchain) lemma maxsimpchain_obtain_pmaxsimpchain: assumes "x\y" "maxsimpchain (x#xs@[y])" shows "\ys. set ys \ set xs \ length ys \ length xs \ pmaxsimpchain (x#ys@[y])" proof- obtain ys where ys: "set ys \ set xs" "length ys \ length xs" "padjacentchain (x#ys@[y])" using maxsimpchainD_adj[OF assms(2)] adjacentchain_obtain_proper[OF assms(1)] by auto from ys(1) assms(2) have "\a\set (x#ys@[y]). maxsimp a" using maxsimpchainD_maxsimp by auto with ys show ?thesis unfolding pmaxsimpchain_def by auto qed lemma min_maxsimpchainD_maxsimpchain: assumes "min_maxsimpchain xs" shows "maxsimpchain xs" proof (cases xs rule: list_cases_Cons_snoc) case Nil thus ?thesis using maxsimpchain_def by simp next case Single with assms show ?thesis using maxsimpchain_def by simp next case Cons_snoc with assms show ?thesis using is_arg_minD1 by fastforce qed lemma min_maxsimpchainD_min_betw: "min_maxsimpchain (x#xs@[y]) \ maxsimpchain (x#ys@[y]) \ length ys \ length xs" using is_arg_minD2 by fastforce lemma min_maxsimpchainI_betw: assumes "x\y" "maxsimpchain (x#xs@[y])" "\ys. maxsimpchain (x#ys@[y]) \ length xs \ length ys" shows "min_maxsimpchain (x#xs@[y])" using assms by (simp add: is_arg_min_linorderI) lemma min_maxsimpchainI_betw_compare: assumes "x\y" "maxsimpchain (x#xs@[y])" "min_maxsimpchain (x#ys@[y])" "length xs = length ys" shows "min_maxsimpchain (x#xs@[y])" using assms min_maxsimpchainD_min_betw min_maxsimpchainI_betw by auto lemma min_maxsimpchain_pmaxsimpchain: assumes "min_maxsimpchain xs" shows "pmaxsimpchain xs" proof ( rule pmaxsimpchainI_maxsimpchain, rule min_maxsimpchainD_maxsimpchain, rule assms, cases xs rule: list_cases_Cons_snoc ) case (Cons_snoc x ys y) have "\ distinct (x#ys@[y]) \ False" proof (cases "x\set ys" "y\set ys" rule: two_cases) case both from both(1) obtain as bs where "ys = as@x#bs" using in_set_conv_decomp[of x ys] by fast with assms Cons_snoc show False using min_maxsimpchainD_maxsimpchain[OF assms] maxsimpchain_append_reduce2[of "x#as"] min_maxsimpchainD_min_betw[of x ys y] by fastforce next case one from one(1) obtain as bs where "ys = as@x#bs" using in_set_conv_decomp[of x ys] by fast with assms Cons_snoc show False using min_maxsimpchainD_maxsimpchain[OF assms] maxsimpchain_append_reduce2[of "x#as"] min_maxsimpchainD_min_betw[of x ys y] by fastforce next case other from other(2) obtain as bs where "ys = as@y#bs" using in_set_conv_decomp[of y ys] by fast with assms Cons_snoc show False using min_maxsimpchainD_maxsimpchain[OF assms] maxsimpchain_append_reduce1[of "x#as@[y]"] min_maxsimpchainD_min_betw[of x ys y] by fastforce next case neither moreover assume "\ distinct (x # ys @ [y])" ultimately obtain as a bs cs where "ys = as@[a]@bs@[a]@cs" using assms Cons_snoc not_distinct_decomp[of ys] by auto with assms Cons_snoc show False using min_maxsimpchainD_maxsimpchain[OF assms] maxsimpchain_append_reduce1[of "x#as@[a]"] maxsimpchain_append_reduce2[of "x#as@[a]@bs" "a#cs@[y]"] maxsimpchain_overlap_join[of "x#as" a "cs@[y]"] min_maxsimpchainD_min_betw[of x ys y "as@a#cs"] by auto qed with Cons_snoc show "distinct xs" by fast qed auto lemma min_maxsimpchain_rev: assumes "min_maxsimpchain xs" shows "min_maxsimpchain (rev xs)" proof (cases xs rule: list_cases_Cons_snoc) case Single with assms show ?thesis using min_maxsimpchainD_maxsimpchain maxsimpchainD_maxsimp by simp next case (Cons_snoc x ys y) moreover have "min_maxsimpchain (y # rev ys @ [x])" proof (rule min_maxsimpchainI_betw) from Cons_snoc assms show "y\x" using min_maxsimpchain_pmaxsimpchain pmaxsimpchainD_distinct by auto from Cons_snoc show "maxsimpchain (y # rev ys @ [x])" using min_maxsimpchainD_maxsimpchain[OF assms] maxsimpchain_rev by fastforce from Cons_snoc assms show "\zs. maxsimpchain (y#zs@[x]) \ length (rev ys) \ length zs" using maxsimpchain_rev min_maxsimpchainD_min_betw[of x ys y] by fastforce qed ultimately show ?thesis by simp qed simp lemma min_maxsimpchain_adj: "\ maxsimp x; maxsimp y; x\y; x\y \ \ min_maxsimpchain [x,y]" using maxsimpchain_def min_maxsimpchainI_betw[of x y "[]"] by simp lemma min_maxsimpchain_betw_CCons_reduce: assumes "min_maxsimpchain (w#x#ys@[z])" shows "min_maxsimpchain (x#ys@[z])" proof (rule min_maxsimpchainI_betw) from assms show "x\z" using min_maxsimpchain_pmaxsimpchain pmaxsimpchainD_distinct by fastforce show "maxsimpchain (x#ys@[z])" using min_maxsimpchainD_maxsimpchain[OF assms] maxsimpchain_Cons_reduce by fast next fix zs assume "maxsimpchain (x#zs@[z])" hence "maxsimpchain (w#x#zs@[z])" using min_maxsimpchainD_maxsimpchain[OF assms] maxsimpchain_def by fastforce with assms show "length ys \ length zs" using min_maxsimpchainD_min_betw[of w "x#ys" z "x#zs"] by simp qed lemma min_maxsimpchain_betw_uniform_length: assumes "min_maxsimpchain (x#xs@[y])" "min_maxsimpchain (x#ys@[y])" shows "length xs = length ys" using min_maxsimpchainD_min_betw[OF assms(1)] min_maxsimpchainD_min_betw[OF assms(2)] min_maxsimpchainD_maxsimpchain[OF assms(1)] min_maxsimpchainD_maxsimpchain[OF assms(2)] by fastforce lemma not_min_maxsimpchainI_betw: "\ maxsimpchain (x#ys@[y]); length ys < length xs \ \ \ min_maxsimpchain (x#xs@[y])" using min_maxsimpchainD_min_betw not_less by blast lemma maxsimpchain_in_subcomplex: "\ Subcomplex Y; set ys \ Y; maxsimpchain ys \ \ SimplicialComplex.maxsimpchain Y ys" using maxsimpchain_def max_in_subcomplex SimplicialComplex.maxsimpchain_def by force end (* context SimplicialComplex *) subsection \Isomorphisms of simplicial complexes\ text \ Here we develop the concept of isomorphism of simplicial complexes. Note that we have not bothered to first develop the concept of morphism of simplicial complexes, since every function on the vertex set of a simplicial complex can be considered a morphism of complexes (see lemma \map_is_simplicial_morph\ above). \ locale SimplicialComplexIsomorphism = SimplicialComplex X for X :: "'a set set" + fixes f :: "'a \ 'b" assumes inj: "inj_on f (\X)" begin lemmas morph = map_is_simplicial_morph[of f] lemma iso_codim_map: "x \ X \ y \ X \ card (f`x - f`y) = card (x-y)" using inj inj_on_image_set_diff[of f _ x y] finite_simplex subset_inj_on[of f _ "x-y"] inj_on_iff_eq_card[of "x-y"] by fastforce lemma maxsimp_im_max: "maxsimp x \ w \ X \ f`x \ f`w \ f`w = f`x" using maxsimpD_simplex inj_onD[OF inj] maxsimpD_maximal[of x w] by blast lemma maxsimp_map: "maxsimp x \ SimplicialComplex.maxsimp (f\X) (f`x)" using maxsimpD_simplex maxsimp_im_max morph SimplicialComplex.maxsimpI[of "f\X" "f`x"] by fastforce lemma iso_adj_int_im: assumes "maxsimp x" "maxsimp y" "x\y" "x\y" shows "(f`x \ f`y) \ f`x" proof (rule facetrelI_card) from assms(1,2) have 1: "f ` x \ f ` y \ f ` y = f ` x" using maxsimp_map SimplicialComplex.maxsimpD_simplex[OF morph] SimplicialComplex.maxsimpD_maximal[OF morph] by simp thus "f`x \ f`y \ f`x" by fast from assms(1) have "card (f`x - f`x \ f`y) \ card (f`x - f`(x\y))" using finite_maxsimp card_mono[of "f`x - f`(x\y)" "f`x - f`x \ f`y"] by fast moreover from assms(1,3,4) have "card (f`x - f`(x\y)) = 1" using maxsimpD_simplex faces[of x] maxsimpD_simplex iso_codim_map adjacent_int_facet1[of x y] facetrel_card by fastforce ultimately have "card (f`x - f`x \ f`y) \ 1" by simp moreover from assms(1,2,4) have "card (f`x - f`x \ f`y) \ 0" using 1 maxsimpD_simplex finite_maxsimp inj_onD[OF induced_pow_fun_inj_on, OF inj, of x y] by auto ultimately show "card (f`x - f`x \ f`y) = 1" by simp qed lemma iso_adj_map: assumes "maxsimp x" "maxsimp y" "x\y" "x\y" shows "f`x \ f`y" using assms(3,4) iso_adj_int_im[OF assms] adjacent_sym iso_adj_int_im[OF assms(2) assms(1)] by (auto simp add: Int_commute intro: adjacentI) lemma pmaxsimpchain_map: "pmaxsimpchain xs \ SimplicialComplex.pmaxsimpchain (f\X) (f\xs)" proof (induct xs rule: list_induct_CCons) case Nil show ?case using map_is_simplicial_morph SimplicialComplex.pmaxsimpchain_def by fastforce next case (Single x) thus ?case using map_is_simplicial_morph pmaxsimpchainD_maxsimp maxsimp_map SimplicialComplex.pmaxsimpchain_def by fastforce next case (CCons x y xs) have "SimplicialComplex.pmaxsimpchain (f \ X) ( f`x # f`y # f\xs)" proof ( rule SimplicialComplex.pmaxsimpchain_CConsI, rule map_is_simplicial_morph ) from CCons(2) show "SimplicialComplex.maxsimp (f\X) (f`x)" using pmaxsimpchainD_maxsimp maxsimp_map by simp from CCons show "SimplicialComplex.pmaxsimpchain (f\X) (f`y # f\xs)" using pmaxsimpchain_Cons_reduce by simp from CCons(2) show "f`x \ f`y" using pmaxsimpchain_def iso_adj_map by simp from inj CCons(2) have "distinct (f\(x#y#xs))" using maxsimpD_simplex inj_on_distinct_setlistmapim unfolding pmaxsimpchain_def by blast thus "f`x \ set (f`y # f\xs)" by simp qed thus ?case by simp qed end (* context SimplicialComplexIsomorphism *) subsection \The complex associated to a poset\ text \ A simplicial complex is naturally a poset under the subset relation. The following develops the reverse direction: constructing a simplicial complex from a suitable poset. \ context ordering begin definition PosetComplex :: "'a set \ 'a set set" where "PosetComplex P \ (\x\P. { {y. pseudominimal_in (P.\<^bold>\x) y} })" lemma poset_is_SimplicialComplex: assumes "\x\P. simplex_like (P.\<^bold>\x)" shows "SimplicialComplex (PosetComplex P)" proof (rule SimplicialComplex.intro, rule ballI) fix a assume "a \ PosetComplex P" from this obtain x where "x\P" "a = {y. pseudominimal_in (P.\<^bold>\x) y}" unfolding PosetComplex_def by fast with assms show "finite a" using pseudominimal_inD1 simplex_likeD_finite finite_subset[of a "P.\<^bold>\x"] by fast next fix a b assume ab: "a \ PosetComplex P" "b\a" from ab(1) obtain x where x: "x\P" "a = {y. pseudominimal_in (P.\<^bold>\x) y}" unfolding PosetComplex_def by fast from assms x(1) obtain f and A::"nat set" where fA: "OrderingSetIso less_eq less (\) (\) (P.\<^bold>\x) f" "f`(P.\<^bold>\x) = Pow A" using simplex_likeD_iso[of "P.\<^bold>\x"] by auto define x' where x': "x' \ the_inv_into (P.\<^bold>\x) f (\(f`b))" from fA x(2) ab(2) x' have x'_P: "x'\P" using collect_pseudominimals_below_in_poset[of P x f] by simp moreover from x fA ab(2) x' have "b = {y. pseudominimal_in (P.\<^bold>\x') y}" using collect_pseudominimals_below_in_eq[of x P f] by simp ultimately show "b \ PosetComplex P" unfolding PosetComplex_def by fast qed definition poset_simplex_map :: "'a set \ 'a \ 'a set" where "poset_simplex_map P x = {y. pseudominimal_in (P.\<^bold>\x) y}" lemma poset_to_PosetComplex_OrderingSetMap: assumes "\x. x\P \ simplex_like (P.\<^bold>\x)" shows "OrderingSetMap (\<^bold>\) (\<^bold><) (\) (\) P (poset_simplex_map P)" proof from assms show "\a b. \ a\P; b\P; a\<^bold>\b \ \ poset_simplex_map P a \ poset_simplex_map P b" using simplex_like_has_bottom pseudominimal_in_below_in unfolding poset_simplex_map_def by fast qed end (* context ordering *) text \ When a poset affords a simplicial complex, there is a natural morphism of posets from the source poset into the poset of sets in the complex, as above. However, some further assumptions are necessary to ensure that this morphism is an isomorphism. These conditions are collected in the following locale. \ locale ComplexLikePoset = ordering less_eq less for less_eq :: "'a\'a\bool" (infix "\<^bold>\" 50) and less :: "'a\'a\bool" (infix "\<^bold><" 50) + fixes P :: "'a set" assumes below_in_P_simplex_like: "x\P \ simplex_like (P.\<^bold>\x)" and P_has_bottom : "has_bottom P" and P_has_glbs : "x\P \ y\P \ \b. glbound_in_of P x y b" begin abbreviation "smap \ poset_simplex_map P" lemma smap_onto_PosetComplex: "smap ` P = PosetComplex P" using poset_simplex_map_def PosetComplex_def by auto lemma ordsetmap_smap: "\ a\P; b\P; a\<^bold>\b \ \ smap a \ smap b" using OrderingSetMap.ordsetmap[ OF poset_to_PosetComplex_OrderingSetMap, OF below_in_P_simplex_like ] poset_simplex_map_def by simp lemma inj_on_smap: "inj_on smap P" proof (rule inj_onI) fix x y assume xy: "x\P" "y\P" "smap x = smap y" show "x = y" proof (cases "smap x = {}") case True with xy show ?thesis using poset_simplex_map_def below_in_P_simplex_like P_has_bottom simplex_like_no_pseudominimal_in_below_in_imp_singleton[of x P] simplex_like_no_pseudominimal_in_below_in_imp_singleton[of y P] below_in_singleton_is_bottom[of P x] below_in_singleton_is_bottom[of P y] by auto next case False from this obtain z where "z \ smap x" by fast with xy(3) have z1: "z \ P.\<^bold>\x" "z \ P.\<^bold>\y" using pseudominimal_inD1 poset_simplex_map_def by auto hence "lbound_of x y z" by (auto intro: lbound_ofI) with z1(1) obtain b where b: "glbound_in_of P x y b" using xy(1,2) P_has_glbs by fast moreover have "b \ P.\<^bold>\x" "b \ P.\<^bold>\y" using glbound_in_ofD_in[OF b] glbound_in_of_less_eq1[OF b] glbound_in_of_less_eq2[OF b] by auto ultimately show ?thesis using xy below_in_P_simplex_like pseudominimal_in_below_in_less_eq_glbound[of P x _ y b] simplex_like_below_in_above_pseudominimal_is_top[of x P] simplex_like_below_in_above_pseudominimal_is_top[of y P] unfolding poset_simplex_map_def by force qed qed lemma OrderingSetIso_smap: "OrderingSetIso (\<^bold>\) (\<^bold><) (\) (\) P smap" proof (rule OrderingSetMap.isoI) show "OrderingSetMap (\<^bold>\) (\<^bold><) (\) (\) P smap" using poset_simplex_map_def below_in_P_simplex_like poset_to_PosetComplex_OrderingSetMap by simp next fix x y assume xy: "x\P" "y\P" "smap x \ smap y" from xy(2) have "simplex_like (P.\<^bold>\y)" using below_in_P_simplex_like by fast from this obtain g and A::"nat set" where "OrderingSetIso (\<^bold>\) (\<^bold><) (\) (\) (P.\<^bold>\y) g" "g`(P.\<^bold>\y) = Pow A" using simplex_likeD_iso[of "P.\<^bold>\y"] by auto with xy show "x\<^bold>\y" using poset_simplex_map_def collect_pseudominimals_below_in_eq[of y P g] collect_pseudominimals_below_in_poset[of P y g] inj_onD[OF inj_on_smap, of "the_inv_into (P.\<^bold>\y) g (\(g ` smap x))" x] collect_pseudominimals_below_in_less_eq_top[of P y g A "smap x"] by simp qed (rule inj_on_smap) lemmas rev_ordsetmap_smap = OrderingSetIso.rev_ordsetmap[OF OrderingSetIso_smap] end (* context ComplexLikePoset *) end (* theory *) diff --git a/thys/Complex_Geometry/Unit_Circle_Preserving_Moebius.thy b/thys/Complex_Geometry/Unit_Circle_Preserving_Moebius.thy --- a/thys/Complex_Geometry/Unit_Circle_Preserving_Moebius.thy +++ b/thys/Complex_Geometry/Unit_Circle_Preserving_Moebius.thy @@ -1,1201 +1,1202 @@ (* ---------------------------------------------------------------------------- *) section \Unit circle preserving Möbius transformations\ (* ---------------------------------------------------------------------------- *) text \In this section we shall examine Möbius transformations that map the unit circle onto itself. We shall say that they fix or preserve the unit circle (although, they do not need to fix each of its points).\ theory Unit_Circle_Preserving_Moebius imports Unitary11_Matrices Moebius Oriented_Circlines begin (* ---------------------------------------------------------------------------- *) subsection \Möbius transformations that fix the unit circle\ (* ---------------------------------------------------------------------------- *) text \We define Möbius transformations that preserve unit circle as transformations represented by generalized unitary matrices with the $1-1$ signature (elements of the gruop $GU_{1,1}(2, \mathbb{C})$, defined earlier in the theory Unitary11Matrices).\ lift_definition unit_circle_fix_mmat :: "moebius_mat \ bool" is unitary11_gen done lift_definition unit_circle_fix :: "moebius \ bool" is unit_circle_fix_mmat apply transfer apply (auto simp del: mult_sm.simps) apply (simp del: mult_sm.simps add: unitary11_gen_mult_sm) apply (simp del: mult_sm.simps add: unitary11_gen_div_sm) done text \Our algebraic characterisation (by matrices) is geometrically correct.\ lemma unit_circle_fix_iff: shows "unit_circle_fix M \ moebius_circline M unit_circle = unit_circle" (is "?rhs = ?lhs") proof assume ?lhs thus ?rhs proof (transfer, transfer) fix M :: complex_mat assume "mat_det M \ 0" assume "circline_eq_cmat (moebius_circline_cmat_cmat M unit_circle_cmat) unit_circle_cmat" then obtain k where "k \ 0" "(1, 0, 0, -1) = cor k *\<^sub>s\<^sub>m congruence (mat_inv M) (1, 0, 0, -1)" by auto hence "(1/cor k, 0, 0, -1/cor k) = congruence (mat_inv M) (1, 0, 0, -1)" using mult_sm_inv_l[of "cor k" "congruence (mat_inv M) (1, 0, 0, -1)" ] by simp hence "congruence M (1/cor k, 0, 0, -1/cor k) = (1, 0, 0, -1)" using \mat_det M \ 0\ mat_det_inv[of M] using congruence_inv[of "mat_inv M" "(1, 0, 0, -1)" "(1/cor k, 0, 0, -1/cor k)"] by simp hence "congruence M (1, 0, 0, -1) = cor k *\<^sub>s\<^sub>m (1, 0, 0, -1)" using congruence_scale_m[of "M" "1/cor k" "(1, 0, 0, -1)"] using mult_sm_inv_l[of "1/ cor k" "congruence M (1, 0, 0, -1)" "(1, 0, 0, -1)"] \k \ 0\ by simp thus "unitary11_gen M" using \k \ 0\ unfolding unitary11_gen_def by simp qed next assume ?rhs thus ?lhs proof (transfer, transfer) fix M :: complex_mat assume "mat_det M \ 0" assume "unitary11_gen M" hence "unitary11_gen (mat_inv M)" using \mat_det M \ 0\ using unitary11_gen_mat_inv by simp thus " circline_eq_cmat (moebius_circline_cmat_cmat M unit_circle_cmat) unit_circle_cmat" unfolding unitary11_gen_real by auto (rule_tac x="1/k" in exI, simp) qed qed lemma circline_set_fix_iff_circline_fix: assumes "circline_set H' \ {}" shows "circline_set (moebius_circline M H) = circline_set H' \ moebius_circline M H = H'" using assms by auto (rule inj_circline_set, auto) lemma unit_circle_fix_iff_unit_circle_set: shows "unit_circle_fix M \ moebius_pt M ` unit_circle_set = unit_circle_set" proof- have "circline_set unit_circle \ {}" using one_in_unit_circle_set by auto thus ?thesis using unit_circle_fix_iff[of M] circline_set_fix_iff_circline_fix[of unit_circle M unit_circle] by (simp add: unit_circle_set_def) qed text \Unit circle preserving Möbius transformations form a group. \ lemma unit_circle_fix_id_moebius [simp]: shows "unit_circle_fix id_moebius" by (transfer, transfer, simp add: unitary11_gen_def mat_adj_def mat_cnj_def) lemma unit_circle_fix_moebius_add [simp]: assumes "unit_circle_fix M1" and "unit_circle_fix M2" shows "unit_circle_fix (M1 + M2)" using assms unfolding unit_circle_fix_iff by auto lemma unit_circle_fix_moebius_comp [simp]: assumes "unit_circle_fix M1" and "unit_circle_fix M2" shows "unit_circle_fix (moebius_comp M1 M2)" using unit_circle_fix_moebius_add[OF assms] by simp lemma unit_circle_fix_moebius_uminus [simp]: assumes "unit_circle_fix M" shows "unit_circle_fix (-M)" using assms unfolding unit_circle_fix_iff by (metis moebius_circline_comp_inv_left uminus_moebius_def) lemma unit_circle_fix_moebius_inv [simp]: assumes "unit_circle_fix M" shows "unit_circle_fix (moebius_inv M)" using unit_circle_fix_moebius_uminus[OF assms] by simp text \Unit circle fixing transforms preserve inverse points.\ lemma unit_circle_fix_moebius_pt_inversion [simp]: assumes "unit_circle_fix M" shows "moebius_pt M (inversion z) = inversion (moebius_pt M z)" using assms using symmetry_principle[of z "inversion z" unit_circle M] using unit_circle_fix_iff[of M, symmetric] using circline_symmetric_inv_homo_disc[of z] using circline_symmetric_inv_homo_disc'[of "moebius_pt M z" "moebius_pt M (inversion z)"] by metis (* -------------------------------------------------------------------------- *) subsection \Möbius transformations that fix the imaginary unit circle\ (* -------------------------------------------------------------------------- *) text \Only for completeness we show that Möbius transformations that preserve the imaginary unit circle are exactly those characterised by generalized unitary matrices (with the (2, 0) signature).\ lemma imag_unit_circle_fixed_iff_unitary_gen: assumes "mat_det (A, B, C, D) \ 0" shows "moebius_circline (mk_moebius A B C D) imag_unit_circle = imag_unit_circle \ unitary_gen (A, B, C, D)" (is "?lhs = ?rhs") proof assume ?lhs thus ?rhs using assms proof (transfer, transfer) fix A B C D :: complex let ?M = "(A, B, C, D)" and ?E = "(1, 0, 0, 1)" assume "circline_eq_cmat (moebius_circline_cmat_cmat (mk_moebius_cmat A B C D) imag_unit_circle_cmat) imag_unit_circle_cmat" "mat_det ?M \ 0" then obtain k where "k \ 0" "?E = cor k *\<^sub>s\<^sub>m congruence (mat_inv ?M) ?E" by auto hence "unitary_gen (mat_inv ?M)" using mult_sm_inv_l[of "cor k" "congruence (mat_inv ?M) ?E" "?E"] unfolding unitary_gen_def by (metis congruence_def divide_eq_0_iff eye_def mat_eye_r of_real_eq_0_iff one_neq_zero) thus "unitary_gen ?M" using unitary_gen_inv[of "mat_inv ?M"] \mat_det ?M \ 0\ by (simp del: mat_inv.simps) qed next assume ?rhs thus ?lhs using assms proof (transfer, transfer) fix A B C D :: complex let ?M = "(A, B, C, D)" and ?E = "(1, 0, 0, 1)" assume "unitary_gen ?M" "mat_det ?M \ 0" hence "unitary_gen (mat_inv ?M)" using unitary_gen_inv[of ?M] by simp then obtain k where "k \ 0" "mat_adj (mat_inv ?M) *\<^sub>m\<^sub>m (mat_inv ?M) = cor k *\<^sub>s\<^sub>m eye" using unitary_gen_real[of "mat_inv ?M"] mat_det_inv[of ?M] by auto hence *: "?E = (1 / cor k) *\<^sub>s\<^sub>m (mat_adj (mat_inv ?M) *\<^sub>m\<^sub>m (mat_inv ?M))" using mult_sm_inv_l[of "cor k" eye "mat_adj (mat_inv ?M) *\<^sub>m\<^sub>m (mat_inv ?M)"] by simp have "\k. k \ 0 \ (1, 0, 0, 1) = cor k *\<^sub>s\<^sub>m (mat_adj (mat_inv (A, B, C, D)) *\<^sub>m\<^sub>m (1, 0, 0, 1) *\<^sub>m\<^sub>m mat_inv (A, B, C, D))" using \mat_det ?M \ 0\ \k \ 0\ by (metis "*" Im_complex_of_real Re_complex_of_real \mat_adj (mat_inv ?M) *\<^sub>m\<^sub>m mat_inv ?M = cor k *\<^sub>s\<^sub>m eye\ complex_of_real_Re eye_def mat_eye_l mult_mm_assoc mult_mm_sm mult_sm_eye_mm of_real_1 of_real_divide of_real_eq_1_iff zero_eq_1_divide_iff) thus "circline_eq_cmat (moebius_circline_cmat_cmat (mk_moebius_cmat A B C D) imag_unit_circle_cmat) imag_unit_circle_cmat" using \mat_det ?M \ 0\ \k \ 0\ by (simp del: mat_inv.simps) qed qed (* -------------------------------------------------------------------------- *) subsection \Möbius transformations that fix the oriented unit circle and the unit disc\ (* -------------------------------------------------------------------------- *) text \Möbius transformations that fix the unit circle either map the unit disc onto itself or exchange it with its exterior. The transformations that fix the unit disc can be recognized from their matrices -- they have the form as before, but additionally it must hold that $|a|^2 > |b|^2$.\ definition unit_disc_fix_cmat :: "complex_mat \ bool" where [simp]: "unit_disc_fix_cmat M \ (let (A, B, C, D) = M in unitary11_gen (A, B, C, D) \ (B = 0 \ Re ((A*D)/(B*C)) > 1))" lift_definition unit_disc_fix_mmat :: "moebius_mat \ bool" is unit_disc_fix_cmat done lift_definition unit_disc_fix :: "moebius \ bool" is unit_disc_fix_mmat proof transfer fix M M' :: complex_mat assume det: "mat_det M \ 0" "mat_det M' \ 0" assume "moebius_cmat_eq M M'" then obtain k where *: "k \ 0" "M' = k *\<^sub>s\<^sub>m M" by auto hence **: "unitary11_gen M \ unitary11_gen M'" using unitary11_gen_mult_sm[of k M] unitary11_gen_div_sm[of k M] by auto obtain A B C D where MM: "(A, B, C, D) = M" by (cases M) auto obtain A' B' C' D' where MM': "(A', B', C', D') = M'" by (cases M') auto show "unit_disc_fix_cmat M = unit_disc_fix_cmat M'" using * ** MM MM' by auto qed text \Transformations that fix the unit disc also fix the unit circle.\ lemma unit_disc_fix_unit_circle_fix [simp]: assumes "unit_disc_fix M" shows "unit_circle_fix M" using assms by (transfer, transfer, auto) text \Transformations that preserve the unit disc preserve the orientation of the unit circle.\ lemma unit_disc_fix_iff_ounit_circle: shows "unit_disc_fix M \ moebius_ocircline M ounit_circle = ounit_circle" (is "?rhs \ ?lhs") proof assume *: ?lhs have "moebius_circline M unit_circle = unit_circle" apply (subst moebius_circline_ocircline[of M unit_circle]) apply (subst of_circline_unit_circle) apply (subst *) by simp hence "unit_circle_fix M" by (simp add: unit_circle_fix_iff) thus ?rhs using * proof (transfer, transfer) fix M :: complex_mat assume "mat_det M \ 0" let ?H = "(1, 0, 0, -1)" obtain A B C D where MM: "(A, B, C, D) = M" by (cases M) auto assume "unitary11_gen M" "ocircline_eq_cmat (moebius_circline_cmat_cmat M unit_circle_cmat) unit_circle_cmat" then obtain k where "0 < k" "?H = cor k *\<^sub>s\<^sub>m congruence (mat_inv M) ?H" by auto hence "congruence M ?H = cor k *\<^sub>s\<^sub>m ?H" using congruence_inv[of "mat_inv M" "?H" "(1/cor k) *\<^sub>s\<^sub>m ?H"] \mat_det M \ 0\ using mult_sm_inv_l[of "cor k" "congruence (mat_inv M) ?H" "?H"] using mult_sm_inv_l[of "1/cor k" "congruence M ?H"] using congruence_scale_m[of M "1/cor k" "?H"] + using \\B. \1 / cor k \ 0; (1 / cor k) *\<^sub>s\<^sub>m congruence M (1, 0, 0, - 1) = B\ \ congruence M (1, 0, 0, - 1) = (1 / (1 / cor k)) *\<^sub>s\<^sub>m B\ by (auto simp add: mat_det_inv) then obtain a b k' where "k' \ 0" "M = k' *\<^sub>s\<^sub>m (a, b, cnj b, cnj a)" "sgn (Re (mat_det (a, b, cnj b, cnj a))) = 1" using unitary11_sgn_det_orientation'[of M k] \k > 0\ by auto moreover have "mat_det (a, b, cnj b, cnj a) \ 0" using \sgn (Re (mat_det (a, b, cnj b, cnj a))) = 1\ by (smt sgn_0 zero_complex.simps(1)) ultimately show "unit_disc_fix_cmat M" using unitary11_sgn_det[of k' a b M A B C D] using MM[symmetric] \k > 0\ \unitary11_gen M\ by (simp add: sgn_1_pos split: if_split_asm) qed next assume ?rhs thus ?lhs proof (transfer, transfer) fix M :: complex_mat assume "mat_det M \ 0" obtain A B C D where MM: "(A, B, C, D) = M" by (cases M) auto assume "unit_disc_fix_cmat M" hence "unitary11_gen M" "B = 0 \ 1 < Re (A * D / (B * C))" using MM[symmetric] by auto have "sgn (if B = 0 then 1 else sgn (Re (A * D / (B * C)) - 1)) = 1" using \B = 0 \ 1 < Re (A * D / (B * C))\ by auto then obtain k' where "k' > 0" "congruence M (1, 0, 0, -1) = cor k' *\<^sub>s\<^sub>m (1, 0, 0, -1)" using unitary11_orientation[OF \unitary11_gen M\ MM[symmetric]] by (auto simp add: sgn_1_pos) thus "ocircline_eq_cmat (moebius_circline_cmat_cmat M unit_circle_cmat) unit_circle_cmat" using congruence_inv[of M "(1, 0, 0, -1)" "cor k' *\<^sub>s\<^sub>m (1, 0, 0, -1)"] \mat_det M \ 0\ using congruence_scale_m[of "mat_inv M" "cor k'" "(1, 0, 0, -1)"] by auto qed qed text \Our algebraic characterisation (by matrices) is geometrically correct.\ lemma unit_disc_fix_iff [simp]: assumes "unit_disc_fix M" shows "moebius_pt M ` unit_disc = unit_disc" using assms using unit_disc_fix_iff_ounit_circle[of M] unfolding unit_disc_def by (subst disc_moebius_ocircline[symmetric], simp) lemma unit_disc_fix_discI [simp]: assumes "unit_disc_fix M" and "u \ unit_disc" shows "moebius_pt M u \ unit_disc" using unit_disc_fix_iff assms by blast text \Unit disc preserving transformations form a group.\ lemma unit_disc_fix_id_moebius [simp]: shows "unit_disc_fix id_moebius" by (transfer, transfer, simp add: unitary11_gen_def mat_adj_def mat_cnj_def) lemma unit_disc_fix_moebius_add [simp]: assumes "unit_disc_fix M1" and "unit_disc_fix M2" shows "unit_disc_fix (M1 + M2)" using assms unfolding unit_disc_fix_iff_ounit_circle by auto lemma unit_disc_fix_moebius_comp [simp]: assumes "unit_disc_fix M1" and "unit_disc_fix M2" shows "unit_disc_fix (moebius_comp M1 M2)" using unit_disc_fix_moebius_add[OF assms] by simp lemma unit_disc_fix_moebius_uminus [simp]: assumes "unit_disc_fix M" shows "unit_disc_fix (-M)" using assms unfolding unit_disc_fix_iff_ounit_circle by (metis moebius_ocircline_comp_inv_left uminus_moebius_def) lemma unit_disc_fix_moebius_inv [simp]: assumes "unit_disc_fix M" shows "unit_disc_fix (moebius_inv M)" using unit_disc_fix_moebius_uminus[OF assms] by simp (* -------------------------------------------------------------------------- *) subsection \Rotations are unit disc preserving transformations\ (* -------------------------------------------------------------------------- *) lemma unit_disc_fix_rotation [simp]: shows "unit_disc_fix (moebius_rotation \)" unfolding moebius_rotation_def moebius_similarity_def by (transfer, transfer, simp add: unitary11_gen_def mat_adj_def mat_cnj_def cis_mult) lemma moebius_rotation_unit_circle_fix [simp]: shows "moebius_pt (moebius_rotation \) u \ unit_circle_set \ u \ unit_circle_set" using moebius_pt_moebius_inv_in_set unit_circle_fix_iff_unit_circle_set by fastforce lemma ex_rotation_mapping_u_to_positive_x_axis: assumes "u \ 0\<^sub>h" and "u \ \\<^sub>h" shows "\ \. moebius_pt (moebius_rotation \) u \ positive_x_axis" proof- from assms obtain c where *: "u = of_complex c" using inf_or_of_complex by blast have "is_real (cis (- arg c) * c)" "Re (cis (-arg c) * c) > 0" using "*" assms is_real_rot_to_x_axis positive_rot_to_x_axis of_complex_zero_iff by blast+ thus ?thesis using * by (rule_tac x="-arg c" in exI) (simp add: positive_x_axis_def circline_set_x_axis) qed lemma ex_rotation_mapping_u_to_positive_y_axis: assumes "u \ 0\<^sub>h" and "u \ \\<^sub>h" shows "\ \. moebius_pt (moebius_rotation \) u \ positive_y_axis" proof- from assms obtain c where *: "u = of_complex c" using inf_or_of_complex by blast have "is_imag (cis (pi/2 - arg c) * c)" "Im (cis (pi/2 - arg c) * c) > 0" using "*" assms is_real_rot_to_x_axis positive_rot_to_x_axis of_complex_zero_iff by - (simp, simp, simp add: field_simps) thus ?thesis using * by (rule_tac x="pi/2-arg c" in exI) (simp add: positive_y_axis_def circline_set_y_axis) qed lemma wlog_rotation_to_positive_x_axis: assumes in_disc: "u \ unit_disc" and not_zero: "u \ 0\<^sub>h" assumes preserving: "\\ u. \u \ unit_disc; u \ 0\<^sub>h; P (moebius_pt (moebius_rotation \) u)\ \ P u" assumes x_axis: "\x. \is_real x; 0 < Re x; Re x < 1\ \ P (of_complex x)" shows "P u" proof- from in_disc obtain \ where *: "moebius_pt (moebius_rotation \) u \ positive_x_axis" using ex_rotation_mapping_u_to_positive_x_axis[of u] using inf_notin_unit_disc not_zero by blast let ?Mu = "moebius_pt (moebius_rotation \) u" have "P ?Mu" proof- let ?x = "to_complex ?Mu" have "?Mu \ unit_disc" "?Mu \ 0\<^sub>h" "?Mu \ \\<^sub>h" using \u \ unit_disc\ \u \ 0\<^sub>h\ by auto hence "is_real (to_complex ?Mu)" "0 < Re ?x" "Re ?x < 1" using * unfolding positive_x_axis_def circline_set_x_axis by (auto simp add: cmod_eq_Re) thus ?thesis using x_axis[of ?x] \?Mu \ \\<^sub>h\ by simp qed thus ?thesis using preserving[OF in_disc] not_zero by simp qed lemma wlog_rotation_to_positive_x_axis': assumes not_zero: "u \ 0\<^sub>h" and not_inf: "u \ \\<^sub>h" assumes preserving: "\\ u. \u \ 0\<^sub>h; u \ \\<^sub>h; P (moebius_pt (moebius_rotation \) u)\ \ P u" assumes x_axis: "\x. \is_real x; 0 < Re x\ \ P (of_complex x)" shows "P u" proof- from not_zero and not_inf obtain \ where *: "moebius_pt (moebius_rotation \) u \ positive_x_axis" using ex_rotation_mapping_u_to_positive_x_axis[of u] using inf_notin_unit_disc by blast let ?Mu = "moebius_pt (moebius_rotation \) u" have "P ?Mu" proof- let ?x = "to_complex ?Mu" have "?Mu \ 0\<^sub>h" "?Mu \ \\<^sub>h" using \u \ \\<^sub>h\ \u \ 0\<^sub>h\ by auto hence "is_real (to_complex ?Mu)" "0 < Re ?x" using * unfolding positive_x_axis_def circline_set_x_axis by (auto simp add: cmod_eq_Re) thus ?thesis using x_axis[of ?x] \?Mu \ \\<^sub>h\ by simp qed thus ?thesis using preserving[OF not_zero not_inf] by simp qed lemma wlog_rotation_to_positive_y_axis: assumes in_disc: "u \ unit_disc" and not_zero: "u \ 0\<^sub>h" assumes preserving: "\\ u. \u \ unit_disc; u \ 0\<^sub>h; P (moebius_pt (moebius_rotation \) u)\ \ P u" assumes y_axis: "\x. \is_imag x; 0 < Im x; Im x < 1\ \ P (of_complex x)" shows "P u" proof- from in_disc and not_zero obtain \ where *: "moebius_pt (moebius_rotation \) u \ positive_y_axis" using ex_rotation_mapping_u_to_positive_y_axis[of u] using inf_notin_unit_disc by blast let ?Mu = "moebius_pt (moebius_rotation \) u" have "P ?Mu" proof- let ?y = "to_complex ?Mu" have "?Mu \ unit_disc" "?Mu \ 0\<^sub>h" "?Mu \ \\<^sub>h" using \u \ unit_disc\ \u \ 0\<^sub>h\ by auto hence "is_imag (to_complex ?Mu)" "0 < Im ?y" "Im ?y < 1" using * unfolding positive_y_axis_def circline_set_y_axis by (auto simp add: cmod_eq_Im) thus ?thesis using y_axis[of ?y] \?Mu \ \\<^sub>h\ by simp qed thus ?thesis using preserving[OF in_disc not_zero] by simp qed (* ---------------------------------------------------------------------------- *) subsection \Blaschke factors are unit disc preserving transformations\ (* ---------------------------------------------------------------------------- *) text \For a given point $a$, Blaschke factor transformations are of the form $k \cdot \left(\begin{array}{cc}1 & -a\\ -\overline{a} & 1\end{array}\right)$. It is a disc preserving Möbius transformation that maps the point $a$ to zero (by the symmetry principle, it must map the inverse point of $a$ to infinity).\ definition blaschke_cmat :: "complex \ complex_mat" where [simp]: "blaschke_cmat a = (if cmod a \ 1 then (1, -a, -cnj a, 1) else eye)" lift_definition blaschke_mmat :: "complex \ moebius_mat" is blaschke_cmat by simp lift_definition blaschke :: "complex \ moebius" is blaschke_mmat done lemma blaschke_0_id [simp]: "blaschke 0 = id_moebius" by (transfer, transfer, simp) lemma blaschke_a_to_zero [simp]: assumes "cmod a \ 1" shows "moebius_pt (blaschke a) (of_complex a) = 0\<^sub>h" using assms by (transfer, transfer, simp) lemma blaschke_inv_a_inf [simp]: assumes "cmod a \ 1" shows "moebius_pt (blaschke a) (inversion (of_complex a)) = \\<^sub>h" using assms unfolding inversion_def by (transfer, transfer) (simp add: vec_cnj_def, rule_tac x="1/(1 - a*cnj a)" in exI, simp) lemma blaschke_inf [simp]: assumes "cmod a < 1" and "a \ 0" shows "moebius_pt (blaschke a) \\<^sub>h = of_complex (- 1 / cnj a)" using assms by (transfer, transfer, simp add: complex_mod_sqrt_Re_mult_cnj) lemma blaschke_0_minus_a [simp]: assumes "cmod a \ 1" shows "moebius_pt (blaschke a) 0\<^sub>h = ~\<^sub>h (of_complex a)" using assms by (transfer, transfer, simp) lemma blaschke_unit_circle_fix [simp]: assumes "cmod a \ 1" shows "unit_circle_fix (blaschke a)" using assms by (transfer, transfer) (simp add: unitary11_gen_def mat_adj_def mat_cnj_def) lemma blaschke_unit_disc_fix [simp]: assumes "cmod a < 1" shows "unit_disc_fix (blaschke a)" using assms proof (transfer, transfer) fix a assume *: "cmod a < 1" show "unit_disc_fix_cmat (blaschke_cmat a)" proof (cases "a = 0") case True thus ?thesis by (simp add: unitary11_gen_def mat_adj_def mat_cnj_def) next case False hence "Re (a * cnj a) < 1" using * by (metis complex_mod_sqrt_Re_mult_cnj real_sqrt_lt_1_iff) hence "1 / Re (a * cnj a) > 1" using False by (smt complex_div_gt_0 less_divide_eq_1_pos one_complex.simps(1) right_inverse_eq) hence "Re (1 / (a * cnj a)) > 1" by (simp add: complex_is_Real_iff) thus ?thesis by (simp add: unitary11_gen_def mat_adj_def mat_cnj_def) qed qed lemma blaschke_unit_circle_fix': assumes "cmod a \ 1" shows "moebius_circline (blaschke a) unit_circle = unit_circle" using assms using blaschke_unit_circle_fix unit_circle_fix_iff by simp lemma blaschke_ounit_circle_fix': assumes "cmod a < 1" shows "moebius_ocircline (blaschke a) ounit_circle = ounit_circle" proof- have "Re (a * cnj a) < 1" using assms by (metis complex_mod_sqrt_Re_mult_cnj real_sqrt_lt_1_iff) thus ?thesis using assms using blaschke_unit_disc_fix unit_disc_fix_iff_ounit_circle by simp qed lemma moebius_pt_blaschke [simp]: assumes "cmod a \ 1" and "z \ 1 / cnj a" shows "moebius_pt (blaschke a) (of_complex z) = of_complex ((z - a) / (1 - cnj a * z))" using assms proof (cases "a = 0") case True thus ?thesis by auto next case False thus ?thesis using assms apply (transfer, transfer) apply (simp add: complex_mod_sqrt_Re_mult_cnj) apply (rule_tac x="1 / (1 - cnj a * z)" in exI) apply (simp add: field_simps) done qed (* -------------------------------------------------------------------------- *) subsubsection \Blaschke factors for a real point $a$\ (* -------------------------------------------------------------------------- *) text \If the point $a$ is real, the Blaschke factor preserve x-axis and the upper and the lower halfplane.\ lemma blaschke_real_preserve_x_axis [simp]: assumes "is_real a" and "cmod a < 1" shows "moebius_pt (blaschke a) z \ circline_set x_axis \ z \ circline_set x_axis" proof (cases "a = 0") case True thus ?thesis by simp next case False have "cmod a \ 1" using assms by linarith let ?a = "of_complex a" let ?i = "inversion ?a" let ?M = "moebius_pt (blaschke a)" have *: "?M ?a = 0\<^sub>h" "?M ?i = \\<^sub>h" "?M 0\<^sub>h = of_complex (-a)" using \cmod a \ 1\ blaschke_a_to_zero[of a] blaschke_inv_a_inf[of a] blaschke_0_minus_a[of a] by auto let ?Mx = "moebius_circline (blaschke a) x_axis" have "?a \ circline_set x_axis" "?i \ circline_set x_axis" "0\<^sub>h \ circline_set x_axis" using \is_real a\ \a \ 0\ eq_cnj_iff_real[of a] by auto hence "0\<^sub>h \ circline_set ?Mx" "\\<^sub>h \ circline_set ?Mx" "of_complex (-a) \ circline_set ?Mx" using * apply - apply (force simp add: image_iff)+ apply (simp add: image_iff, rule_tac x="0\<^sub>h" in bexI, simp_all) done moreover have "0\<^sub>h \ circline_set x_axis" "\\<^sub>h \ circline_set x_axis" "of_complex (-a) \ circline_set x_axis" using \is_real a\ by auto moreover have "of_complex (-a) \ 0\<^sub>h" using \a \ 0\ by simp hence "0\<^sub>h \ of_complex (-a)" by metis hence "\!H. 0\<^sub>h \ circline_set H \ \\<^sub>h \ circline_set H \ of_complex (- a) \ circline_set H" using unique_circline_set[of "0\<^sub>h" "\\<^sub>h" "of_complex (-a)"] \a \ 0\ by simp ultimately have "moebius_circline (blaschke a) x_axis = x_axis" by auto thus ?thesis by (metis circline_set_moebius_circline_iff) qed lemma blaschke_real_preserve_sgn_Im [simp]: assumes "is_real a" and "cmod a < 1" and "z \ \\<^sub>h" and "z \ inversion (of_complex a)" shows "sgn (Im (to_complex (moebius_pt (blaschke a) z))) = sgn (Im (to_complex z))" proof (cases "a = 0") case True thus ?thesis by simp next case False obtain z' where z': "z = of_complex z'" using inf_or_of_complex[of z] \z \ \\<^sub>h\ by auto have "z' \ 1 / cnj a" using assms z' \a \ 0\ by (auto simp add: of_complex_inj) moreover have "a * cnj a \ 1" using \cmod a < 1\ by auto (simp add: complex_mod_sqrt_Re_mult_cnj) moreover have "sgn (Im ((z' - a) / (1 - a * z'))) = sgn (Im z')" proof- have "a * z' \ 1" using \is_real a\ \z' \ 1 / cnj a\ \a \ 0\ eq_cnj_iff_real[of a] by (simp add: field_simps) moreover have "Re (1 - a\<^sup>2) > 0" using \is_real a\ \cmod a < 1\ by (smt Re_power2 minus_complex.simps(1) norm_complex_def one_complex.simps(1) power2_less_0 real_sqrt_lt_1_iff) moreover have "Im ((z' - a) / (1 - a * z')) = Re (((1 - a\<^sup>2) * Im z') / (cmod (1 - a*z'))\<^sup>2)" proof- have "1 - a * cnj z' \ 0" using \z' \ 1 / cnj a\ by (metis Im_complex_div_eq_0 complex_cnj_zero_iff diff_eq_diff_eq diff_numeral_special(9) eq_divide_imp is_real_div mult_not_zero one_complex.simps(2) zero_neq_one) hence "Im ((z' - a) / (1 - a * z')) = Im (((z' - a) * (1 - a * cnj z')) / ((1 - a * z') * cnj (1 - a * z')))" using \is_real a\ eq_cnj_iff_real[of a] by simp also have "... = Im ((z' - a - a * z' * cnj z' + a\<^sup>2 * cnj z') / (cmod (1 - a*z'))\<^sup>2)" unfolding complex_mult_cnj_cmod by (simp add: power2_eq_square field_simps) finally show ?thesis using \is_real a\ by (simp add: field_simps) qed moreover have "0 < (1 - (Re a)\<^sup>2) * Im z' / (cmod (1 - a * z'))\<^sup>2 \ Im z' > 0" using `is_real a` `0 < Re (1 - a\<^sup>2)` by (smt Re_power_real divide_le_0_iff minus_complex.simps(1) not_sum_power2_lt_zero one_complex.simps(1) zero_less_mult_pos) ultimately show ?thesis unfolding sgn_real_def using \cmod a < 1\ \a * z' \ 1\ \is_real a\ by (auto simp add: cmod_eq_Re) qed ultimately show ?thesis using assms z' moebius_pt_blaschke[of a z'] \is_real a\ eq_cnj_iff_real[of a] by simp qed lemma blaschke_real_preserve_sgn_arg [simp]: assumes "is_real a" and "cmod a < 1" and "z \ circline_set x_axis" shows "sgn (arg (to_complex (moebius_pt (blaschke a) z))) = sgn (arg (to_complex z))" proof- have "z \ \\<^sub>h" using assms using special_points_on_x_axis''(3) by blast moreover have "z \ inversion (of_complex a)" using assms by (metis calculation circline_equation_x_axis circline_set_x_axis_I conjugate_of_complex inversion_of_complex inversion_sym is_real_inversion o_apply of_complex_zero reciprocal_zero to_complex_of_complex) ultimately show ?thesis using blaschke_real_preserve_sgn_Im[OF assms(1) assms(2), of z] by (smt arg_Im_sgn assms(3) circline_set_x_axis_I norm_sgn of_complex_to_complex) qed (* -------------------------------------------------------------------------- *) subsubsection \Inverse Blaschke transform\ (* -------------------------------------------------------------------------- *) definition inv_blaschke_cmat :: "complex \ complex_mat" where [simp]: "inv_blaschke_cmat a = (if cmod a \ 1 then (1, a, cnj a, 1) else eye)" lift_definition inv_blaschke_mmat :: "complex \ moebius_mat" is inv_blaschke_cmat by simp lift_definition inv_blaschke :: "complex \ moebius" is inv_blaschke_mmat done lemma inv_blaschke_neg [simp]: "inv_blaschke a = blaschke (-a)" by (transfer, transfer) simp lemma inv_blaschke: assumes "cmod a \ 1" shows "blaschke a + inv_blaschke a = 0" apply simp apply (transfer, transfer) by auto (rule_tac x="1/(1 - a*cnj a)" in exI, simp) lemma ex_unit_disc_fix_mapping_u_to_zero: assumes "u \ unit_disc" shows "\ M. unit_disc_fix M \ moebius_pt M u = 0\<^sub>h" proof- from assms obtain c where *: "u = of_complex c" by (metis inf_notin_unit_disc inf_or_of_complex) hence "cmod c < 1" using assms unit_disc_iff_cmod_lt_1 by simp thus ?thesis using * by (rule_tac x="blaschke c" in exI) (smt blaschke_a_to_zero blaschke_ounit_circle_fix' unit_disc_fix_iff_ounit_circle) qed lemma wlog_zero: assumes in_disc: "u \ unit_disc" assumes preserving: "\ a u. \u \ unit_disc; cmod a < 1; P (moebius_pt (blaschke a) u)\ \ P u" assumes zero: "P 0\<^sub>h" shows "P u" proof- have *: "moebius_pt (blaschke (to_complex u)) u = 0\<^sub>h" by (smt blaschke_a_to_zero in_disc inf_notin_unit_disc of_complex_to_complex unit_disc_iff_cmod_lt_1) thus ?thesis using preserving[of u "to_complex u"] in_disc zero using inf_or_of_complex[of u] by auto qed lemma wlog_real_zero: assumes in_disc: "u \ unit_disc" and real: "is_real (to_complex u)" assumes preserving: "\ a u. \u \ unit_disc; is_real a; cmod a < 1; P (moebius_pt (blaschke a) u)\ \ P u" assumes zero: "P 0\<^sub>h" shows "P u" proof- have *: "moebius_pt (blaschke (to_complex u)) u = 0\<^sub>h" by (smt blaschke_a_to_zero in_disc inf_notin_unit_disc of_complex_to_complex unit_disc_iff_cmod_lt_1) thus ?thesis using preserving[of u "to_complex u"] in_disc zero real using inf_or_of_complex[of u] by auto qed lemma unit_disc_fix_transitive: assumes in_disc: "u \ unit_disc" and "u' \ unit_disc" shows "\ M. unit_disc_fix M \ moebius_pt M u = u'" proof- have "\ u \ unit_disc. \ M. unit_disc_fix M \ moebius_pt M u = u'" (is "?P u'") proof (rule wlog_zero) show "u' \ unit_disc" by fact next show "?P 0\<^sub>h" by (simp add: ex_unit_disc_fix_mapping_u_to_zero) next fix a u assume "cmod a < 1" and *: "?P (moebius_pt (blaschke a) u)" show "?P u" proof fix u' assume "u' \ unit_disc" then obtain M' where "unit_disc_fix M'" "moebius_pt M' u' = moebius_pt (blaschke a) u" using * by auto thus "\M. unit_disc_fix M \ moebius_pt M u' = u" using \cmod a < 1\ blaschke_unit_disc_fix[of a] using unit_disc_fix_moebius_comp[of "- blaschke a" "M'"] using unit_disc_fix_moebius_inv[of "blaschke a"] by (rule_tac x="(- (blaschke a)) + M'" in exI, simp) qed qed thus ?thesis using assms by auto qed (* -------------------------------------------------------------------------- *) subsection \Decomposition of unit disc preserving Möbius transforms\ (* -------------------------------------------------------------------------- *) text \Each transformation preserving unit disc can be decomposed to a rotation around the origin and a Blaschke factors that maps a point within the unit disc to zero.\ lemma unit_disc_fix_decompose_blaschke_rotation: assumes "unit_disc_fix M" shows "\ k \. cmod k < 1 \ M = moebius_rotation \ + blaschke k" using assms unfolding moebius_rotation_def moebius_similarity_def proof (simp, transfer, transfer) fix M assume *: "mat_det M \ 0" "unit_disc_fix_cmat M" then obtain k a b :: complex where **: "k \ 0" "mat_det (a, b, cnj b, cnj a) \ 0" "M = k *\<^sub>s\<^sub>m (a, b, cnj b, cnj a)" using unitary11_gen_iff[of M] by auto have "a \ 0" using * ** by auto then obtain a' k' \ where ***: "k' \ 0 \ a' * cnj a' \ 1 \ M = k' *\<^sub>s\<^sub>m (cis \, 0, 0, 1) *\<^sub>m\<^sub>m (1, - a', - cnj a', 1)" using ** unitary11_gen_cis_blaschke[of k M a b] by auto have "a' = 0 \ 1 < 1 / (cmod a')\<^sup>2" using * *** complex_mult_cnj_cmod[of a'] by simp hence "cmod a' < 1" by (smt less_divide_eq_1_pos norm_zero one_less_power one_power2 pos2) thus "\k. cmod k < 1 \ (\\. moebius_cmat_eq M (moebius_comp_cmat (mk_moebius_cmat (cis \) 0 0 1) (blaschke_cmat k)))" using *** apply (rule_tac x=a' in exI) apply simp apply (rule_tac x=\ in exI) apply simp apply (rule_tac x="1/k'" in exI) by auto qed lemma wlog_unit_disc_fix: assumes "unit_disc_fix M" assumes b: "\ k. cmod k < 1 \ P (blaschke k)" assumes r: "\ \. P (moebius_rotation \)" assumes comp: "\M1 M2. \unit_disc_fix M1; P M1; unit_disc_fix M2; P M2\ \ P (M1 + M2)" shows "P M" using assms using unit_disc_fix_decompose_blaschke_rotation[OF assms(1)] using blaschke_unit_disc_fix by auto lemma ex_unit_disc_fix_to_zero_positive_x_axis: assumes "u \ unit_disc" and "v \ unit_disc" and "u \ v" shows "\ M. unit_disc_fix M \ moebius_pt M u = 0\<^sub>h \ moebius_pt M v \ positive_x_axis" proof- from assms obtain B where *: "unit_disc_fix B" "moebius_pt B u = 0\<^sub>h" using ex_unit_disc_fix_mapping_u_to_zero by blast let ?v = "moebius_pt B v" have "?v \ unit_disc" using \v \ unit_disc\ * by auto hence "?v \ \\<^sub>h" using inf_notin_unit_disc by auto have "?v \ 0\<^sub>h" using \u \ v\ * by (metis moebius_pt_invert) obtain R where "unit_disc_fix R" "moebius_pt R 0\<^sub>h = 0\<^sub>h" "moebius_pt R ?v \ positive_x_axis" using ex_rotation_mapping_u_to_positive_x_axis[of ?v] \?v \ 0\<^sub>h\ \?v \ \\<^sub>h\ using moebius_pt_rotation_inf_iff moebius_pt_moebius_rotation_zero unit_disc_fix_rotation by blast thus ?thesis using * moebius_comp[of R B, symmetric] using unit_disc_fix_moebius_comp by (rule_tac x="R + B" in exI) (simp add: comp_def) qed lemma wlog_x_axis: assumes in_disc: "u \ unit_disc" "v \ unit_disc" assumes preserved: "\ M u v. \unit_disc_fix M; u \ unit_disc; v \ unit_disc; P (moebius_pt M u) (moebius_pt M v)\ \ P u v" assumes axis: "\ x. \is_real x; 0 \ Re x; Re x < 1\ \ P 0\<^sub>h (of_complex x)" shows "P u v" proof (cases "u = v") case True have "P u u" (is "?Q u") proof (rule wlog_zero[where P="?Q"]) show "u \ unit_disc" by fact next show "?Q 0\<^sub>h" using axis[of 0] by simp next fix a u assume "u \ unit_disc" "cmod a < 1" "?Q (moebius_pt (blaschke a) u)" thus "?Q u" using preserved[of "blaschke a" u u] using blaschke_unit_disc_fix[of a] by simp qed thus ?thesis using True by simp next case False from in_disc obtain M where *: "unit_disc_fix M" "moebius_pt M u = 0\<^sub>h" "moebius_pt M v \ positive_x_axis" using ex_unit_disc_fix_to_zero_positive_x_axis False by auto then obtain x where **: "moebius_pt M v = of_complex x" "is_real x" unfolding positive_x_axis_def circline_set_x_axis by auto moreover have "of_complex x \ unit_disc" using \unit_disc_fix M\ \v \ unit_disc\ ** using unit_disc_fix_discI by fastforce hence "0 < Re x" "Re x < 1" using \moebius_pt M v \ positive_x_axis\ ** by (auto simp add: positive_x_axis_def cmod_eq_Re) ultimately have "P 0\<^sub>h (of_complex x)" using \is_real x\ axis by auto thus ?thesis using preserved[OF *(1) assms(1-2)] *(2) **(1) by simp qed lemma wlog_positive_x_axis: assumes in_disc: "u \ unit_disc" "v \ unit_disc" "u \ v" assumes preserved: "\ M u v. \unit_disc_fix M; u \ unit_disc; v \ unit_disc; u \ v; P (moebius_pt M u) (moebius_pt M v)\ \ P u v" assumes axis: "\ x. \is_real x; 0 < Re x; Re x < 1\ \ P 0\<^sub>h (of_complex x)" shows "P u v" proof- have "u \ v \ P u v" (is "?Q u v") proof (rule wlog_x_axis) show "u \ unit_disc" "v \ unit_disc" by fact+ next fix M u v assume "unit_disc_fix M" "u \ unit_disc" "v \ unit_disc" "?Q (moebius_pt M u) (moebius_pt M v)" thus "?Q u v" using preserved[of M u v] using moebius_pt_invert by blast next fix x assume "is_real x" "0 \ Re x" "Re x < 1" thus "?Q 0\<^sub>h (of_complex x)" using axis[of x] of_complex_zero_iff[of x] complex.expand[of x 0] by fastforce qed thus ?thesis using \u \ v\ by simp qed (* -------------------------------------------------------------------------- *) subsection \All functions that fix the unit disc\ (* -------------------------------------------------------------------------- *) text \It can be proved that continuous functions that fix the unit disc are either actions of Möbius transformations that fix the unit disc (homographies), or are compositions of actions of Möbius transformations that fix the unit disc and the conjugation (antihomographies). We postulate this as a definition, but it this characterisation could also be formally shown (we do not need this for our further applications).\ definition unit_disc_fix_f where "unit_disc_fix_f f \ (\ M. unit_disc_fix M \ (f = moebius_pt M \ f = moebius_pt M \ conjugate))" text \Unit disc fixing functions really fix unit disc.\ lemma unit_disc_fix_f_unit_disc: assumes "unit_disc_fix_f M" shows "M ` unit_disc = unit_disc" using assms unfolding unit_disc_fix_f_def using image_comp by force text \Actions of unit disc fixing Möbius transformations (unit disc fixing homographies) are unit disc fixing functions.\ lemma unit_disc_fix_f_moebius_pt [simp]: assumes "unit_disc_fix M" shows "unit_disc_fix_f (moebius_pt M)" using assms unfolding unit_disc_fix_f_def by auto text \Compositions of unit disc fixing Möbius transformations and conjugation (unit disc fixing antihomographies) are unit disc fixing functions.\ lemma unit_disc_fix_conjugate_moebius [simp]: assumes "unit_disc_fix M" shows "unit_disc_fix (conjugate_moebius M)" proof- have "\a aa ab b. \1 < Re (a * b / (aa * ab)); \ 1 < Re (cnj a * cnj b / (cnj aa * cnj ab))\ \ aa = 0" by (metis cnj.simps(1) complex_cnj_divide complex_cnj_mult) thus ?thesis using assms by (transfer, transfer) (auto simp add: mat_cnj_def unitary11_gen_def mat_adj_def field_simps) qed lemma unit_disc_fix_conjugate_comp_moebius [simp]: assumes "unit_disc_fix M" shows "unit_disc_fix_f (conjugate \ moebius_pt M)" using assms apply (subst conjugate_moebius) apply (simp add: unit_disc_fix_f_def) apply (rule_tac x="conjugate_moebius M" in exI, simp) done text \Uniti disc fixing functions form a group under function composition.\ lemma unit_disc_fix_f_comp [simp]: assumes "unit_disc_fix_f f1" and "unit_disc_fix_f f2" shows "unit_disc_fix_f (f1 \ f2)" using assms apply (subst (asm) unit_disc_fix_f_def) apply (subst (asm) unit_disc_fix_f_def) proof safe fix M M' assume "unit_disc_fix M" "unit_disc_fix M'" thus "unit_disc_fix_f (moebius_pt M \ moebius_pt M')" unfolding unit_disc_fix_f_def by (rule_tac x="M + M'" in exI) auto next fix M M' assume "unit_disc_fix M" "unit_disc_fix M'" thus "unit_disc_fix_f (moebius_pt M \ (moebius_pt M' \ conjugate))" unfolding unit_disc_fix_f_def by (subst comp_assoc[symmetric])+ (rule_tac x="M + M'" in exI, auto) next fix M M' assume "unit_disc_fix M" "unit_disc_fix M'" thus "unit_disc_fix_f ((moebius_pt M \ conjugate) \ moebius_pt M')" unfolding unit_disc_fix_f_def by (subst comp_assoc, subst conjugate_moebius, subst comp_assoc[symmetric])+ (rule_tac x="M + conjugate_moebius M'" in exI, auto) next fix M M' assume "unit_disc_fix M" "unit_disc_fix M'" thus "unit_disc_fix_f ((moebius_pt M \ conjugate) \ (moebius_pt M' \ conjugate))" apply (subst comp_assoc[symmetric], subst comp_assoc) apply (subst conjugate_moebius, subst comp_assoc, subst comp_assoc) apply (simp add: unit_disc_fix_f_def) apply (rule_tac x="M + conjugate_moebius M'" in exI, auto) done qed lemma unit_disc_fix_f_inv: assumes "unit_disc_fix_f M" shows "unit_disc_fix_f (inv M)" using assms apply (subst (asm) unit_disc_fix_f_def) proof safe fix M assume "unit_disc_fix M" have "inv (moebius_pt M) = moebius_pt (-M)" by (rule ext) (simp add: moebius_inv) thus "unit_disc_fix_f (inv (moebius_pt M))" using \unit_disc_fix M\ unfolding unit_disc_fix_f_def by (rule_tac x="-M" in exI, simp) next fix M assume "unit_disc_fix M" have "inv (moebius_pt M \ conjugate) = conjugate \ inv (moebius_pt M)" by (subst o_inv_distrib, simp_all) also have "... = conjugate \ (moebius_pt (-M))" using moebius_inv by auto also have "... = moebius_pt (conjugate_moebius (-M)) \ conjugate" by (simp add: conjugate_moebius) finally show "unit_disc_fix_f (inv (moebius_pt M \ conjugate))" using \unit_disc_fix M\ unfolding unit_disc_fix_f_def by (rule_tac x="conjugate_moebius (-M)" in exI, simp) qed (* -------------------------------------------------------------------------- *) subsubsection \Action of unit disc fixing functions on circlines\ (* -------------------------------------------------------------------------- *) definition unit_disc_fix_f_circline where "unit_disc_fix_f_circline f H = (if \ M. unit_disc_fix M \ f = moebius_pt M then moebius_circline (THE M. unit_disc_fix M \ f = moebius_pt M) H else if \ M. unit_disc_fix M \ f = moebius_pt M \ conjugate then (moebius_circline (THE M. unit_disc_fix M \ f = moebius_pt M \ conjugate) \ conjugate_circline) H else H)" lemma unique_moebius_pt_conjugate: assumes "moebius_pt M1 \ conjugate = moebius_pt M2 \ conjugate" shows "M1 = M2" proof- from assms have "moebius_pt M1 = moebius_pt M2" using conjugate_conjugate_comp rewriteL_comp_comp2 by fastforce thus ?thesis using unique_moebius_pt by auto qed lemma unit_disc_fix_f_circline_direct: assumes "unit_disc_fix M" and "f = moebius_pt M" shows "unit_disc_fix_f_circline f H = moebius_circline M H" proof- have "M = (THE M. unit_disc_fix M \ f = moebius_pt M)" using assms using theI_unique[of "\ M. unit_disc_fix M \ f = moebius_pt M" M] using unique_moebius_pt[of M] by auto thus ?thesis using assms unfolding unit_disc_fix_f_circline_def by auto qed lemma unit_disc_fix_f_circline_indirect: assumes "unit_disc_fix M" and "f = moebius_pt M \ conjugate" shows "unit_disc_fix_f_circline f H = ((moebius_circline M) \ conjugate_circline) H" proof- have "\ (\ M. unit_disc_fix M \ f = moebius_pt M)" using assms homography_antihomography_exclusive[of f] unfolding is_homography_def is_antihomography_def is_moebius_def by auto moreover have "M = (THE M. unit_disc_fix M \ f = moebius_pt M \ conjugate)" using assms using theI_unique[of "\ M. unit_disc_fix M \ f = moebius_pt M \ conjugate" M] using unique_moebius_pt_conjugate[of M] by auto ultimately show ?thesis using assms unfolding unit_disc_fix_f_circline_def by metis qed text \Disc automorphisms - it would be nice to show that there are no disc automorphisms other than unit disc fixing homographies and antihomographies, but this part of the theory is not yet developed.\ definition is_disc_aut where "is_disc_aut f \ bij_betw f unit_disc unit_disc" end \ No newline at end of file diff --git a/thys/Core_DOM/common/monads/BaseMonad.thy b/thys/Core_DOM/common/monads/BaseMonad.thy --- a/thys/Core_DOM/common/monads/BaseMonad.thy +++ b/thys/Core_DOM/common/monads/BaseMonad.thy @@ -1,376 +1,375 @@ (*********************************************************************************** * Copyright (c) 2016-2018 The University of Sheffield, UK * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * SPDX-License-Identifier: BSD-2-Clause ***********************************************************************************) section\The Monad Infrastructure\ text\In this theory, we introduce the basic infrastructure for our monadic class encoding.\ theory BaseMonad imports "../classes/BaseClass" "../preliminaries/Heap_Error_Monad" begin subsection\Datatypes\ datatype exception = NotFoundError | SegmentationFault | HierarchyRequestError | AssertException | NonTerminationException | InvokeError | TypeError | DebugException nat lemma finite_set_in [simp]: "x \ fset FS \ x |\| FS" by (meson notin_fset) consts put_M :: 'a consts get_M :: 'a consts delete_M :: 'a lemma sorted_list_of_set_eq [dest]: "sorted_list_of_set (fset x) = sorted_list_of_set (fset y) \ x = y" by (metis finite_fset fset_inject sorted_list_of_set(1)) locale l_ptr_kinds_M = fixes ptr_kinds :: "'heap \ 'ptr::linorder fset" begin definition a_ptr_kinds_M :: "('heap, exception, 'ptr list) prog" where "a_ptr_kinds_M = do { h \ get_heap; return (sorted_list_of_set (fset (ptr_kinds h))) }" lemma ptr_kinds_M_ok [simp]: "h \ ok a_ptr_kinds_M" by(simp add: a_ptr_kinds_M_def) lemma ptr_kinds_M_pure [simp]: "pure a_ptr_kinds_M h" by (auto simp add: a_ptr_kinds_M_def intro: bind_pure_I) lemma ptr_kinds_ptr_kinds_M [simp]: "ptr \ set |h \ a_ptr_kinds_M|\<^sub>r \ ptr |\| ptr_kinds h" by(simp add: a_ptr_kinds_M_def) lemma ptr_kinds_M_ptr_kinds [simp]: "h \ a_ptr_kinds_M \\<^sub>r xa \ xa = sorted_list_of_set (fset (ptr_kinds h))" by(auto simp add: a_ptr_kinds_M_def) lemma ptr_kinds_M_ptr_kinds_returns_result [simp]: "h \ a_ptr_kinds_M \ f \\<^sub>r x \ h \ f (sorted_list_of_set (fset (ptr_kinds h))) \\<^sub>r x" by(auto simp add: a_ptr_kinds_M_def) lemma ptr_kinds_M_ptr_kinds_returns_heap [simp]: "h \ a_ptr_kinds_M \ f \\<^sub>h h' \ h \ f (sorted_list_of_set (fset (ptr_kinds h))) \\<^sub>h h'" by(auto simp add: a_ptr_kinds_M_def) end locale l_get_M = fixes get :: "'ptr \ 'heap \ 'obj option" fixes type_wf :: "'heap \ bool" fixes ptr_kinds :: "'heap \ 'ptr fset" assumes "type_wf h \ ptr |\| ptr_kinds h \ get ptr h \ None" assumes "get ptr h \ None \ ptr |\| ptr_kinds h" begin definition a_get_M :: "'ptr \ ('obj \ 'result) \ ('heap, exception, 'result) prog" where "a_get_M ptr getter = (do { h \ get_heap; (case get ptr h of Some res \ return (getter res) | None \ error SegmentationFault) })" lemma get_M_pure [simp]: "pure (a_get_M ptr getter) h" by(auto simp add: a_get_M_def bind_pure_I split: option.splits) lemma get_M_ok: "type_wf h \ ptr |\| ptr_kinds h \ h \ ok (a_get_M ptr getter)" apply(simp add: a_get_M_def) by (metis l_get_M_axioms l_get_M_def option.case_eq_if return_ok) lemma get_M_ptr_in_heap: "h \ ok (a_get_M ptr getter) \ ptr |\| ptr_kinds h" apply(simp add: a_get_M_def) by (metis error_returns_result is_OK_returns_result_E l_get_M_axioms l_get_M_def option.simps(4)) end locale l_put_M = l_get_M get for get :: "'ptr \ 'heap \ 'obj option" + fixes put :: "'ptr \ 'obj \ 'heap \ 'heap" begin definition a_put_M :: "'ptr \ (('v \ 'v) \ 'obj \ 'obj) \ 'v \ ('heap, exception, unit) prog" where "a_put_M ptr setter v = (do { obj \ a_get_M ptr id; h \ get_heap; return_heap (put ptr (setter (\_. v) obj) h) })" lemma put_M_ok: "type_wf h \ ptr |\| ptr_kinds h \ h \ ok (a_put_M ptr setter v)" by(auto simp add: a_put_M_def intro!: bind_is_OK_I2 dest: get_M_ok elim!: bind_is_OK_E) lemma put_M_ptr_in_heap: "h \ ok (a_put_M ptr setter v) \ ptr |\| ptr_kinds h" by(auto simp add: a_put_M_def intro!: bind_is_OK_I2 elim: get_M_ptr_in_heap dest: is_OK_returns_result_I elim!: bind_is_OK_E) end subsection \Setup for Defining Partial Functions\ lemma execute_admissible: "ccpo.admissible (fun_lub (flat_lub (Inl (e::'e)))) (fun_ord (flat_ord (Inl e))) ((\a. \(h::'heap) h2 (r::'result). h \ a = Inr (r, h2) \ P h h2 r) \ Prog)" proof (unfold comp_def, rule ccpo.admissibleI, clarify) fix A :: "('heap \ 'e + 'result \ 'heap) set" let ?lub = "Prog (fun_lub (flat_lub (Inl e)) A)" fix h h2 r assume 1: "Complete_Partial_Order.chain (fun_ord (flat_ord (Inl e))) A" and 2: "\xa\A. \h h2 r. h \ Prog xa = Inr (r, h2) \ P h h2 r" and 4: "h \ Prog (fun_lub (flat_lub (Inl e)) A) = Inr (r, h2)" have h1:"\a. Complete_Partial_Order.chain (flat_ord (Inl e)) {y. \f\A. y = f a}" by (rule chain_fun[OF 1]) show "P h h2 r" using chain_fun[OF 1] flat_lub_in_chain[OF chain_fun[OF 1]] 2 4 unfolding execute_def fun_lub_def - by force + by auto (metis (lifting) Inl_Inr_False) qed lemma execute_admissible2: "ccpo.admissible (fun_lub (flat_lub (Inl (e::'e)))) (fun_ord (flat_ord (Inl e))) ((\a. \(h::'heap) h' h2 h2' (r::'result) r'. h \ a = Inr (r, h2) \ h' \ a = Inr (r', h2') \ P h h' h2 h2' r r') \ Prog)" proof (unfold comp_def, rule ccpo.admissibleI, clarify) fix A :: "('heap \ 'e + 'result \ 'heap) set" let ?lub = "Prog (fun_lub (flat_lub (Inl e)) A)" fix h h' h2 h2' r r' assume 1: "Complete_Partial_Order.chain (fun_ord (flat_ord (Inl e))) A" and 2 [rule_format]: "\xa\A. \h h' h2 h2' r r'. h \ Prog xa = Inr (r, h2) \ h' \ Prog xa = Inr (r', h2') \ P h h' h2 h2' r r'" and 4: "h \ Prog (fun_lub (flat_lub (Inl e)) A) = Inr (r, h2)" and 5: "h' \ Prog (fun_lub (flat_lub (Inl e)) A) = Inr (r', h2')" have h1:"\a. Complete_Partial_Order.chain (flat_ord (Inl e)) {y. \f\A. y = f a}" by (rule chain_fun[OF 1]) have "h \ ?lub \ {y. \f\A. y = f h}" using flat_lub_in_chain[OF h1] 4 unfolding execute_def fun_lub_def - by auto + by auto (metis (lifting) Inl_Inr_False) moreover have "h' \ ?lub \ {y. \f\A. y = f h'}" using flat_lub_in_chain[OF h1] 5 unfolding execute_def fun_lub_def - by auto + by auto (metis (lifting) Inl_Inr_False) ultimately obtain f where "f \ A" and "h \ Prog f = Inr (r, h2)" and "h' \ Prog f = Inr (r', h2')" using 1 4 5 - apply(auto simp add: chain_def fun_ord_def flat_ord_def execute_def)[1] - by (metis Inl_Inr_False) + by (auto simp add: chain_def fun_ord_def flat_ord_def execute_def)[1] (metis (lifting) Inl_Inr_False) then show "P h h' h2 h2' r r'" by(fact 2) qed definition dom_prog_ord :: "('heap, exception, 'result) prog \ ('heap, exception, 'result) prog \ bool" where "dom_prog_ord = img_ord (\a b. execute b a) (fun_ord (flat_ord (Inl NonTerminationException)))" definition dom_prog_lub :: "('heap, exception, 'result) prog set \ ('heap, exception, 'result) prog" where "dom_prog_lub = img_lub (\a b. execute b a) Prog (fun_lub (flat_lub (Inl NonTerminationException)))" lemma dom_prog_lub_empty: "dom_prog_lub {} = error NonTerminationException" by(simp add: dom_prog_lub_def img_lub_def fun_lub_def flat_lub_def error_def) lemma dom_prog_interpretation: "partial_function_definitions dom_prog_ord dom_prog_lub" proof - have "partial_function_definitions (fun_ord (flat_ord (Inl NonTerminationException))) (fun_lub (flat_lub (Inl NonTerminationException)))" by (rule partial_function_lift) (rule flat_interpretation) then show ?thesis apply (simp add: dom_prog_lub_def dom_prog_ord_def flat_interpretation execute_def) using partial_function_image prog.expand prog.sel by blast qed interpretation dom_prog: partial_function_definitions dom_prog_ord dom_prog_lub rewrites "dom_prog_lub {} \ error NonTerminationException" by (fact dom_prog_interpretation)(simp add: dom_prog_lub_empty) lemma admissible_dom_prog: "dom_prog.admissible (\f. \x h h' r. h \ f x \\<^sub>r r \ h \ f x \\<^sub>h h' \ P x h h' r)" proof (rule admissible_fun[OF dom_prog_interpretation]) fix x show "ccpo.admissible dom_prog_lub dom_prog_ord (\a. \h h' r. h \ a \\<^sub>r r \ h \ a \\<^sub>h h' \ P x h h' r)" unfolding dom_prog_ord_def dom_prog_lub_def proof (intro admissible_image partial_function_lift flat_interpretation) show "ccpo.admissible (fun_lub (flat_lub (Inl NonTerminationException))) (fun_ord (flat_ord (Inl NonTerminationException))) ((\a. \h h' r. h \ a \\<^sub>r r \ h \ a \\<^sub>h h' \ P x h h' r) \ Prog)" by(auto simp add: execute_admissible returns_result_def returns_heap_def split: sum.splits) next show "\x y. (\b. b \ x) = (\b. b \ y) \ x = y" by(simp add: execute_def prog.expand) next show "\x. (\b. b \ Prog x) = x" by(simp add: execute_def) qed qed lemma admissible_dom_prog2: "dom_prog.admissible (\f. \x h h2 h' h2' r r2. h \ f x \\<^sub>r r \ h \ f x \\<^sub>h h' \ h2 \ f x \\<^sub>r r2 \ h2 \ f x \\<^sub>h h2' \ P x h h2 h' h2' r r2)" proof (rule admissible_fun[OF dom_prog_interpretation]) fix x show "ccpo.admissible dom_prog_lub dom_prog_ord (\a. \h h2 h' h2' r r2. h \ a \\<^sub>r r \ h \ a \\<^sub>h h' \ h2 \ a \\<^sub>r r2 \ h2 \ a \\<^sub>h h2' \ P x h h2 h' h2' r r2)" unfolding dom_prog_ord_def dom_prog_lub_def proof (intro admissible_image partial_function_lift flat_interpretation) show "ccpo.admissible (fun_lub (flat_lub (Inl NonTerminationException))) (fun_ord (flat_ord (Inl NonTerminationException))) ((\a. \h h2 h' h2' r r2. h \ a \\<^sub>r r \ h \ a \\<^sub>h h' \ h2 \ a \\<^sub>r r2 \ h2 \ a \\<^sub>h h2' \ P x h h2 h' h2' r r2) \ Prog)" by(auto simp add: returns_result_def returns_heap_def intro!: ccpo.admissibleI dest!: ccpo.admissibleD[OF execute_admissible2[where P="P x"]] split: sum.splits) next show "\x y. (\b. b \ x) = (\b. b \ y) \ x = y" by(simp add: execute_def prog.expand) next show "\x. (\b. b \ Prog x) = x" by(simp add: execute_def) qed qed lemma fixp_induct_dom_prog: fixes F :: "'c \ 'c" and U :: "'c \ 'b \ ('heap, exception, 'result) prog" and C :: "('b \ ('heap, exception, 'result) prog) \ 'c" and P :: "'b \ 'heap \ 'heap \ 'result \ bool" assumes mono: "\x. monotone (fun_ord dom_prog_ord) dom_prog_ord (\f. U (F (C f)) x)" assumes eq: "f \ C (ccpo.fixp (fun_lub dom_prog_lub) (fun_ord dom_prog_ord) (\f. U (F (C f))))" assumes inverse2: "\f. U (C f) = f" assumes step: "\f x h h' r. (\x h h' r. h \ (U f x) \\<^sub>r r \ h \ (U f x) \\<^sub>h h' \ P x h h' r) \ h \ (U (F f) x) \\<^sub>r r \ h \ (U (F f) x) \\<^sub>h h' \ P x h h' r" assumes defined: "h \ (U f x) \\<^sub>r r" and "h \ (U f x) \\<^sub>h h'" shows "P x h h' r" using step defined dom_prog.fixp_induct_uc[of U F C, OF mono eq inverse2 admissible_dom_prog, of P] by (metis assms(6) error_returns_heap) declaration \Partial_Function.init "dom_prog" @{term dom_prog.fixp_fun} @{term dom_prog.mono_body} @{thm dom_prog.fixp_rule_uc} @{thm dom_prog.fixp_induct_uc} (SOME @{thm fixp_induct_dom_prog})\ abbreviation "mono_dom_prog \ monotone (fun_ord dom_prog_ord) dom_prog_ord" lemma dom_prog_ordI: assumes "\h. h \ f \\<^sub>e NonTerminationException \ h \ f = h \ g" shows "dom_prog_ord f g" proof(auto simp add: dom_prog_ord_def img_ord_def fun_ord_def flat_ord_def)[1] fix x assume "x \ f \ x \ g" then show "x \ f = Inl NonTerminationException" using assms[where h=x] by(auto simp add: returns_error_def split: sum.splits) qed lemma dom_prog_ordE: assumes "dom_prog_ord x y" obtains "h \ x \\<^sub>e NonTerminationException" | " h \ x = h \ y" using assms unfolding dom_prog_ord_def img_ord_def fun_ord_def flat_ord_def using returns_error_def by force lemma bind_mono [partial_function_mono]: fixes B :: "('a \ ('heap, exception, 'result) prog) \ ('heap, exception, 'result2) prog" assumes mf: "mono_dom_prog B" and mg: "\y. mono_dom_prog (\f. C y f)" shows "mono_dom_prog (\f. B f \ (\y. C y f))" proof (rule monotoneI) fix f g :: "'a \ ('heap, exception, 'result) prog" assume fg: "dom_prog.le_fun f g" from mf have 1: "dom_prog_ord (B f) (B g)" by (rule monotoneD) (rule fg) from mg have 2: "\y'. dom_prog_ord (C y' f) (C y' g)" by (rule monotoneD) (rule fg) have "dom_prog_ord (B f \ (\y. C y f)) (B g \ (\y. C y f))" (is "dom_prog_ord ?L ?R") proof (rule dom_prog_ordI) fix h from 1 show "h \ ?L \\<^sub>e NonTerminationException \ h \ ?L = h \ ?R" apply(rule dom_prog_ordE) apply(auto)[1] using bind_cong by fastforce qed also have h1: "dom_prog_ord (B g \ (\y'. C y' f)) (B g \ (\y'. C y' g))" (is "dom_prog_ord ?L ?R") proof (rule dom_prog_ordI) (* { *) fix h show "h \ ?L \\<^sub>e NonTerminationException \ h \ ?L = h \ ?R" proof (cases "h \ ok (B g)") case True then obtain x h' where x: "h \ B g \\<^sub>r x" and h': "h \ B g \\<^sub>h h'" by blast then have "dom_prog_ord (C x f) (C x g)" using 2 by simp then show ?thesis using x h' apply(auto intro!: bind_returns_error_I3 dest: returns_result_eq dest!: dom_prog_ordE)[1] apply(auto simp add: execute_bind_simp)[1] using "2" dom_prog_ordE by metis next case False then obtain e where e: "h \ B g \\<^sub>e e" by(simp add: is_OK_def returns_error_def split: sum.splits) have "h \ B g \ (\y'. C y' f) \\<^sub>e e" using e by(auto) moreover have "h \ B g \ (\y'. C y' g) \\<^sub>e e" using e by auto ultimately show ?thesis using bind_returns_error_eq by metis qed qed finally (dom_prog.leq_trans) show "dom_prog_ord (B f \ (\y. C y f)) (B g \ (\y'. C y' g))" . qed lemma mono_dom_prog1 [partial_function_mono]: fixes g :: "('a \ ('heap, exception, 'result) prog) \ 'b \ ('heap, exception, 'result) prog" assumes "\x. (mono_dom_prog (\f. g f x))" shows "mono_dom_prog (\f. map_M (g f) xs)" using assms apply (induct xs) by(auto simp add: call_mono dom_prog.const_mono intro!: bind_mono) lemma mono_dom_prog2 [partial_function_mono]: fixes g :: "('a \ ('heap, exception, 'result) prog) \ 'b \ ('heap, exception, 'result) prog" assumes "\x. (mono_dom_prog (\f. g f x))" shows "mono_dom_prog (\f. forall_M (g f) xs)" using assms apply (induct xs) by(auto simp add: call_mono dom_prog.const_mono intro!: bind_mono) lemma sorted_list_set_cong [simp]: "sorted_list_of_set (fset FS) = sorted_list_of_set (fset FS') \ FS = FS'" by auto end diff --git a/thys/Dependent_SIFUM_Refinement/Examples/Eg1Eg2.thy b/thys/Dependent_SIFUM_Refinement/Examples/Eg1Eg2.thy --- a/thys/Dependent_SIFUM_Refinement/Examples/Eg1Eg2.thy +++ b/thys/Dependent_SIFUM_Refinement/Examples/Eg1Eg2.thy @@ -1,197 +1,197 @@ theory Eg1Eg2 imports Eg1 Eg2 "../CompositionalRefinement" begin locale sifum_refinement_eg1_eg2 = A: sifum_example + C: sifum_example2 primrec Eg2_var\<^sub>C_of_Eg1 :: "Eg1.var \ Eg2.var\<^sub>C" where "Eg2_var\<^sub>C_of_Eg1 control_var = control_var\<^sub>C" | "Eg2_var\<^sub>C_of_Eg1 buffer = buffer\<^sub>C" | "Eg2_var\<^sub>C_of_Eg1 high_var = high_var\<^sub>C" | "Eg2_var\<^sub>C_of_Eg1 low_var = low_var\<^sub>C" | "Eg2_var\<^sub>C_of_Eg1 temp = temp\<^sub>C" sublocale sifum_refinement_eg1_eg2 \ sifum_refinement dma dma\<^sub>C \_vars \_vars\<^sub>C \ \\<^sub>C A.eval\<^sub>w C.eval\<^sub>w undefined Eg2_var\<^sub>C_of_Eg1 - supply image_cong_simp [cong del] INF_cong_simp [cong] SUP_cong_simp [cong] + supply image_cong_simp [cong del] INF_cong_simp [cong] SUP_cong_simp [cong] subst_all [simp del] subst_all' [simp del] apply(unfold_locales) apply(erule C.eval_det, simp) apply(rule C.Var_finite) apply(simp add:\_vars\<^sub>C_def split:if_splits) apply(simp add:\_vars\<^sub>C_def dma\<^sub>C_def) apply(simp add:\_vars\<^sub>C_def dma\<^sub>C_def) apply(rule inj_onI, simp) apply(case_tac x) apply(case_tac y, simp+) apply(case_tac y, simp+) apply(case_tac y, simp+) apply(case_tac y, simp+) apply(case_tac y, simp+) apply(case_tac x\<^sub>A) apply(clarsimp simp:dma\<^sub>C_def dma_def dma_control_var_def dma_control_var\<^sub>C_def)+ apply(case_tac x\<^sub>A) apply(clarsimp simp:\_vars\<^sub>C_def \_vars_def)+ apply(rule set_eqI, clarsimp) apply(case_tac x, clarsimp) apply(rule_tac x=control_var in rev_image_eqI, clarsimp+) apply(case_tac xa, clarsimp+) apply(case_tac xb, clarsimp+) apply(case_tac xa, clarsimp+) apply(case_tac xb, clarsimp+) apply(case_tac xa, clarsimp+) apply(case_tac xb, clarsimp+) apply(case_tac xa, clarsimp+) apply(case_tac xb, clarsimp+) apply(case_tac xa, clarsimp+) apply(simp add:\\<^sub>C_def) done context sifum_refinement_eg1_eg2 begin lemma bisim_simple_\: "bisim_simple (A.\ \ \ P)" unfolding bisim_simple_def apply clarsimp apply(drule_tac lc="\c\<^sub>1\<^sub>A, mds, mem\<^sub>1\<^sub>A\\<^sub>A" and lc'="\c\<^sub>2\<^sub>A, mds, mem\<^sub>2\<^sub>A\\<^sub>A" in A.bisim_simple_\\<^sub>u) by simp (* Don't know to what extent these helpers can be made generic enough to go into any parent theories. Again, mightn't be able to (or bother to) make some generic considering the aexp evaluator is going to be specific to each language. *) lemma conc_only_vars_not_visible_abs: "(\v\<^sub>C. v\<^sub>C \ range Eg2_var\<^sub>C_of_Eg1 \ mem\<^sub>C v\<^sub>C = mem\<^sub>C' v\<^sub>C) \ mem\<^sub>A_of mem\<^sub>C = mem\<^sub>A_of mem\<^sub>C'" by (simp add: mem\<^sub>A_of_def) lemma conc_only_var_assign_not_visible_abs: "\v\<^sub>C e. v\<^sub>C \ range Eg2_var\<^sub>C_of_Eg1 \ mem\<^sub>A_of mem\<^sub>C = mem\<^sub>A_of (mem\<^sub>C(v\<^sub>C := e))" using conc_only_vars_not_visible_abs by simp lemma reg\<^sub>C_is_not_the_var\<^sub>C_of_anything: "reg\<^sub>C = Eg2_var\<^sub>C_of_Eg1 x \ False" by (induct x, clarsimp+) lemma reg\<^sub>C_not_visible_abs: "reg\<^sub>C \ range Eg2_var\<^sub>C_of_Eg1" using reg\<^sub>C_is_not_the_var\<^sub>C_of_anything by blast (* This one's pretty specific to this refinement... *) lemma reg\<^sub>C_the_only_concrete_only_var: "v\<^sub>C \ range Eg2_var\<^sub>C_of_Eg1 \ v\<^sub>C = reg\<^sub>C" apply(case_tac v\<^sub>C) apply(erule rev_notE, clarsimp, rule_tac x=control_var in range_eqI, clarsimp) apply(erule rev_notE, clarsimp, rule_tac x=buffer in range_eqI, clarsimp) apply(erule rev_notE, clarsimp, rule_tac x=high_var in range_eqI, clarsimp) apply(erule rev_notE, clarsimp, rule_tac x=low_var in range_eqI, clarsimp) apply(erule rev_notE, clarsimp, rule_tac x=temp in range_eqI, clarsimp) apply clarsimp done lemma NoRW\<^sub>A_implies_NoRW\<^sub>C: "x \ mds\<^sub>A_of mds\<^sub>C AsmNoReadOrWrite \ Eg2_var\<^sub>C_of_Eg1 x \ mds\<^sub>C AsmNoReadOrWrite" unfolding mds\<^sub>A_of_def apply clarsimp apply (simp only: Eg2_var\<^sub>C_of_Eg1_def) apply clarsimp apply (simp add: f_inv_into_f) done lemma NoWrite\<^sub>A_implies_NoWrite\<^sub>C: "x \ mds\<^sub>A_of mds\<^sub>C AsmNoWrite \ Eg2_var\<^sub>C_of_Eg1 x \ mds\<^sub>C AsmNoWrite" unfolding mds\<^sub>A_of_def apply clarsimp apply (simp only: Eg2_var\<^sub>C_of_Eg1_def) apply clarsimp apply (simp add: f_inv_into_f) done lemma assign_eval\<^sub>w_load\<^sub>A: shows "(\x \ Eg1.Load y, mds, mem\\<^sub>A, \Stop, mds, mem (x := mem y)\\<^sub>A) \ A.eval\<^sub>w" by (metis A.assign_eval\<^sub>w ev\<^sub>A.simps(2)) lemma assign_eval\<^sub>w_load\<^sub>C: shows "(\x \ Load y, mds, mem\\<^sub>C, \Stop, mds, mem (x := mem y)\\<^sub>C) \ C.eval\<^sub>w" using C.unannotated[OF C.assign, where E="[]", simplified] apply(drule_tac x=x in meta_spec) apply(drule_tac x="Load y" in meta_spec) apply(drule_tac x=mds in meta_spec) apply(drule_tac x=mem in meta_spec) apply clarsimp done lemma assign_eval\<^sub>w_const\<^sub>A: shows "(\x \ Eg1.Const c, mds, mem\, \Stop, mds, mem (x := c)\) \ A.eval\<^sub>w" by (metis A.assign_eval\<^sub>w ev\<^sub>A.simps(1)) lemma assign_eval\<^sub>w_const\<^sub>C: shows "(\x \ Const c, mds, mem\, \Stop, mds, mem (x := c)\) \ C.eval\<^sub>w" using C.unannotated[OF C.assign, where E="[]", simplified] apply(drule_tac x=x in meta_spec) apply(drule_tac x="Const c" in meta_spec) apply(drule_tac x=mds in meta_spec) apply(drule_tac x=mem in meta_spec) apply clarsimp done lemma if_seq_eval\<^sub>w_helper\<^sub>A: "(\If B T E, mds, mem\, \if ev\<^sub>B mem B then T else E, mds, mem\\<^sub>A) \ A.eval\<^sub>w \ (\If B T E ;; TAIL, mds, mem\, \if ev\<^sub>B mem B then T ;; TAIL else E ;; TAIL, mds, mem\\<^sub>A) \ A.eval\<^sub>w" using A.eval\<^sub>w.seq by auto lemma if_seq_eval\<^sub>w_helper\<^sub>C: "(\If B T E, mds, mem\, \if ev\<^sub>B\<^sub>C mem B then T else E, mds, mem\\<^sub>C) \ C.eval\<^sub>w \ (\If B T E ;; TAIL, mds, mem\, \if ev\<^sub>B\<^sub>C mem B then T ;; TAIL else E ;; TAIL, mds, mem\\<^sub>C) \ C.eval\<^sub>w" using C.eval\<^sub>w.seq by auto lemma mem_assign_refinement_helper_var: "mem\<^sub>A_of (mem\<^sub>C (Eg2_var\<^sub>C_of_Eg1 x := mem\<^sub>C (Eg2_var\<^sub>C_of_Eg1 y))) = (mem\<^sub>A_of mem\<^sub>C) (x := (mem\<^sub>A_of mem\<^sub>C) y)" apply(clarsimp simp: mem\<^sub>A_of_def) apply(rule ext, clarsimp) apply(cases x) apply(case_tac x\<^sub>A, clarsimp+)+ done lemma mem_assign_refinement_helper_const: "mem\<^sub>A_of (mem\<^sub>C (Eg2_var\<^sub>C_of_Eg1 x := c)) = (mem\<^sub>A_of mem\<^sub>C) (x := c)" apply(clarsimp simp: mem\<^sub>A_of_def) apply(rule ext, clarsimp) apply(cases x) apply(case_tac x\<^sub>A, clarsimp+)+ done lemma if_true_eval\<^sub>w\<^sub>C: shows "mem\<^sub>C x = 0 \ (\(If (Eq x 0) c\<^sub>C_then c\<^sub>C_else) ;; c\<^sub>C_tail, mds\<^sub>C, mem\<^sub>C\\<^sub>C, \(c\<^sub>C_then ;; c\<^sub>C_tail), mds\<^sub>C, mem\<^sub>C\\<^sub>C) \ C.eval\<^sub>w" using C.if_eval\<^sub>w C.eval\<^sub>w.seq ev\<^sub>B\<^sub>C.simps by presburger lemma if_false_eval\<^sub>w\<^sub>C: shows "mem\<^sub>C x \ 0 \ (\(If (Eq x 0) c\<^sub>C_then c\<^sub>C_else) ;; c\<^sub>C_tail, mds\<^sub>C, mem\<^sub>C\\<^sub>C, \(c\<^sub>C_else ;; c\<^sub>C_tail), mds\<^sub>C, mem\<^sub>C\\<^sub>C) \ C.eval\<^sub>w" using C.if_eval\<^sub>w C.eval\<^sub>w.seq ev\<^sub>B\<^sub>C.simps by presburger end end diff --git a/thys/Differential_Dynamic_Logic/Proof_Checker.thy b/thys/Differential_Dynamic_Logic/Proof_Checker.thy --- a/thys/Differential_Dynamic_Logic/Proof_Checker.thy +++ b/thys/Differential_Dynamic_Logic/Proof_Checker.thy @@ -1,2091 +1,2091 @@ theory "Proof_Checker" imports Ordinary_Differential_Equations.ODE_Analysis "Ids" "Lib" "Syntax" "Denotational_Semantics" "Axioms" "Differential_Axioms" "Frechet_Correctness" "Static_Semantics" "Coincidence" "Bound_Effect" "Uniform_Renaming" "USubst_Lemma" "Pretty_Printer" begin context ids begin section \Proof Checker\ text \This proof checker defines a datatype for proof terms in dL and a function for checking proof terms, with a soundness proof that any proof accepted by the checker is a proof of a sound rule or valid formula. A simple concrete hybrid system and a differential invariant rule for conjunctions are provided as example proofs. \ lemma sound_weaken_gen:"\A B C. sublist A B \ sound (A, C) \ sound (B,C)" proof (rule soundI_mem) fix A B::"('sf,'sc,'sz) sequent list" and C::"('sf,'sc,'sz) sequent" and I::"('sf,'sc,'sz) interp" assume sub:"sublist A B" assume good:"is_interp I" assume "sound (A, C)" then have soundC:"(\\. List.member A \ \ seq_sem I \ = UNIV) \ seq_sem I C = UNIV" apply simp apply(drule soundD_mem) by (auto simp add: good) assume SG:"(\\. List.member B \ \ seq_sem I \ = UNIV)" show "seq_sem I C = UNIV" using soundC SG sub unfolding sublist_def by auto qed lemma sound_weaken:"\SG SGS C. sound (SGS, C) \ sound (SG # SGS, C)" subgoal for SG SGS C apply(induction SGS) subgoal unfolding sound_def by auto subgoal for SG2 SGS unfolding sound_def by (metis fst_conv le0 length_Cons not_less_eq nth_Cons_Suc snd_conv) done done lemma member_filter:"\P. List.member (filter P L) x \ List.member L x" apply(induction L, auto) by(metis (full_types) member_rec(1)) lemma nth_member:"n < List.length L \ List.member L (List.nth L n)" apply(induction L, auto simp add: member_rec) by (metis in_set_member length_Cons nth_mem set_ConsD) lemma mem_appL:"List.member A x \ List.member (A @ B) x" by(induction A, auto simp add: member_rec) lemma sound_weaken_appR:"\SG SGS C. sound (SG, C) \ sound (SG @ SGS, C)" subgoal for SG SGS C apply(rule sound_weaken_gen) apply(auto) unfolding sublist_def apply(rule allI) subgoal for x using mem_appL[of SG x SGS] by auto done done fun start_proof::"('sf,'sc,'sz) sequent \ ('sf,'sc,'sz) rule" where "start_proof S = ([S], S)" lemma start_proof_sound:"sound (start_proof S)" unfolding sound_def by auto section \Proof Checker Implementation\ datatype axiom = AloopIter | AI | Atest | Abox | Achoice | AK | AV | Aassign | Adassign | AdConst | AdPlus | AdMult | ADW | ADE | ADC | ADS | ADIGeq | ADIGr | ADG fun get_axiom:: "axiom \ ('sf,'sc,'sz) formula" where "get_axiom AloopIter = loop_iterate_axiom" | "get_axiom AI = Iaxiom" | "get_axiom Atest = test_axiom" | "get_axiom Abox = box_axiom" | "get_axiom Achoice = choice_axiom" | "get_axiom AK = Kaxiom" | "get_axiom AV = Vaxiom" | "get_axiom Aassign = assign_axiom" | "get_axiom Adassign = diff_assign_axiom" | "get_axiom AdConst = diff_const_axiom" | "get_axiom AdPlus = diff_plus_axiom" | "get_axiom AdMult = diff_times_axiom" | "get_axiom ADW = DWaxiom" | "get_axiom ADE = DEaxiom" | "get_axiom ADC = DCaxiom" | "get_axiom ADS = DSaxiom" | "get_axiom ADIGeq = DIGeqaxiom" | "get_axiom ADIGr = DIGraxiom" | "get_axiom ADG = DGaxiom" lemma axiom_safe:"fsafe (get_axiom a)" by(cases a, auto simp add: axiom_defs Box_def Or_def Equiv_def Implies_def empty_def Equals_def f1_def p1_def P_def f0_def expand_singleton Forall_def Greater_def id_simps) (*apply(cases a) prefer 9 subgoal apply(simp only: get_axiom.simps diff_assign_axiom_def Equiv_def Or_def Box_def) apply(simp only: fsafe_Not_simps fsafe_Diamond_simps fsafe_And_simps) apply(rule conjI)+ subgoal apply(simp only: hpsafe_DiffAssign_simps dsafe_Fun_simps empty_def dsafe_Const) by auto *) (*auto simp add: loop_iterate_axiom_def Iaxiom_def diff_assign_axiom_def test_axiom_def choice_axiom_def box_axiom_def empty_def Kaxiom_def Vaxiom_def assign_axiom_def diff_const_axiom_def diff_plus_axiom_def diff_times_axiom_def DWaxiom_def Equals_def state_fun_def DEaxiom_def DCaxiom_def DSaxiom_def DIGeqaxiom_def DIGraxiom_def f1_def p1_def P_def expand_singleton f0_def Forall_def DGaxiom_def Equiv_def Implies_def Or_def Box_def Greater_def vne12*) lemma axiom_valid:"valid (get_axiom a)" proof (cases a) case AloopIter then show ?thesis by (simp add: loop_valid) next case AI then show ?thesis by (simp add: I_valid) next case Atest then show ?thesis by (simp add: test_valid) next case Abox then show ?thesis by (simp add: box_valid) next case Achoice then show ?thesis by (simp add: choice_valid) next case AK then show ?thesis by (simp add: K_valid) next case AV then show ?thesis by (simp add: V_valid) next case Aassign then show ?thesis by (simp add: assign_valid) next case Adassign then show ?thesis by (simp add: diff_assign_valid) next case AdConst then show ?thesis by (simp add: diff_const_axiom_valid) next case AdPlus then show ?thesis by (simp add: diff_plus_axiom_valid) next case AdMult then show ?thesis by (simp add: diff_times_axiom_valid) next case ADW then show ?thesis by (simp add: DW_valid) next case ADE then show ?thesis by (simp add: DE_valid) next case ADC then show ?thesis by (simp add: DC_valid) next case ADS then show ?thesis by (simp add: DS_valid) next case ADIGeq then show ?thesis by (simp add: DIGeq_valid) next case ADIGr then show ?thesis by (simp add: DIGr_valid) next case ADG then show ?thesis by (simp add: DG_valid) qed datatype rrule = ImplyR | AndR | CohideR | CohideRR | TrueR | EquivR datatype lrule = ImplyL | AndL | EquivForwardL | EquivBackwardL datatype ('a, 'b, 'c) step = Axiom axiom | MP | G | CT | CQ "('a, 'c) trm" "('a, 'c) trm" "('a, 'b, 'c) subst" | CE "('a, 'b, 'c) formula" "('a, 'b, 'c) formula" "('a, 'b, 'c) subst" | Skolem \ \Apply \Usubst\ to some other (valid) formula\ | VSubst "('a, 'b, 'c) formula" "('a, 'b, 'c) subst" | AxSubst axiom "('a, 'b, 'c) subst" | URename | BRename | Rrule rrule nat | Lrule lrule nat | CloseId nat nat | Cut "('a, 'b, 'c) formula" | DEAxiomSchema "('a,'c) ODE" "('a, 'b, 'c) subst" type_synonym ('a, 'b, 'c) derivation = "(nat * ('a, 'b, 'c) step) list" type_synonym ('a, 'b, 'c) pf = "('a,'b,'c) sequent * ('a, 'b, 'c) derivation" fun seq_to_string :: "('sf, 'sc, 'sz) sequent \ char list" where "seq_to_string (A,S) = join '', '' (map fml_to_string A) @ '' |- '' @ join '', '' (map fml_to_string S)" fun rule_to_string :: "('sf, 'sc, 'sz) rule \ char list" where "rule_to_string (SG, C) = (join '';; '' (map seq_to_string SG)) @ '' '' @ \<^cancel>\[char_of_nat 10] @\ seq_to_string C" fun close :: "'a list \ 'a \'a list" where "close L x = filter (\y. y \ x) L" fun closeI ::"'a list \ nat \'a list" where "closeI L i = close L (nth L i)" lemma close_sub:"sublist (close \ \) \" apply (auto simp add: sublist_def) using member_filter by fastforce lemma close_app_comm:"close (A @ B) x = close A x @ close B x" by auto lemma close_provable_sound:"sound (SG, C) \ sound (close SG \, \) \ sound (close SG \, C)" proof (rule soundI_mem) fix I::"('sf,'sc,'sz) interp" assume S1:"sound (SG, C)" assume S2:"sound (close SG \, \)" assume good:"is_interp I" assume SGCs:"(\\'. List.member (close SG \) \' \ seq_sem I \' = UNIV)" have S\:"seq_sem I \ = UNIV" using S2 apply simp apply(drule soundD_mem) using good apply auto using SGCs UNIV_I by fastforce have mem_close:"\P. List.member SG P \ P \ \ \ List.member (close SG \) P" by(induction SG, auto simp add: member_rec) have SGs:"\P. List.member SG P \ seq_sem I P = UNIV" subgoal for P apply(cases "P = \") subgoal using S\ by auto subgoal using mem_close[of P] SGCs by auto done done show "seq_sem I C = UNIV" using S1 apply simp apply(drule soundD_mem) using good apply auto using SGs apply auto using impl_sem by blast qed fun Lrule_result :: "lrule \ nat \ ('sf, 'sc, 'sz) sequent \ ('sf, 'sc, 'sz) sequent list" where "Lrule_result AndL j (A,S) = (case (nth A j) of And p q \ [(close ([p, q] @ A) (nth A j), S)])" | "Lrule_result ImplyL j (A,S) = (case (nth A j) of Not (And (Not q) (Not (Not p))) \ [(close (q # A) (nth A j), S), (close A (nth A j), p # S)])" | "Lrule_result EquivForwardL j (A,S) = (case (nth A j) of Not(And (Not (And p q)) (Not (And (Not p') (Not q')))) \ [(close (q # A) (nth A j), S), (close A (nth A j), p # S)])" | "Lrule_result EquivBackwardL j (A,S) = (case (nth A j) of Not(And (Not (And p q)) (Not (And (Not p') (Not q')))) \ [(close (p # A) (nth A j), S), (close A (nth A j), q # S)])" \ \Note: Some of the pattern-matching here is... interesting. The reason for this is that we can only\ \ \match on things in the base grammar, when we would quite like to check things in the derived grammar.\ \ \So all the pattern-matches have the definitions expanded, sometimes in a silly way.\ fun Rrule_result :: "rrule \ nat \ ('sf, 'sc, 'sz) sequent \ ('sf, 'sc, 'sz) sequent list" where Rstep_Imply:"Rrule_result ImplyR j (A,S) = (case (nth S j) of Not (And (Not q) (Not (Not p))) \ [(p # A, q # (closeI S j))] | _ \ undefined)" | Rstep_And:"Rrule_result AndR j (A,S) = (case (nth S j) of (And p q) \ [(A, p # (closeI S j )), (A, q # (closeI S j))])" | Rstep_EquivR:"Rrule_result EquivR j (A,S) = (case (nth S j) of Not(And (Not (And p q)) (Not (And (Not p') (Not q')))) \ (if (p = p' \ q = q') then [(p # A, q # (closeI S j)), (q # A, p # (closeI S j))] else undefined))" | Rstep_CohideR:"Rrule_result CohideR j (A,S) = [(A, [nth S j])]" | Rstep_CohideRR:"Rrule_result CohideRR j (A,S) = [([], [nth S j])]" | Rstep_TrueR:"Rrule_result TrueR j (A,S) = []" fun step_result :: "('sf, 'sc, 'sz) rule \ (nat * ('sf, 'sc, 'sz) step) \ ('sf, 'sc, 'sz) rule" where Step_axiom:"step_result (SG,C) (i,Axiom a) = (closeI SG i, C)" | Step_AxSubst:"step_result (SG,C) (i,AxSubst a \) = (closeI SG i, C)" | Step_Lrule:"step_result (SG,C) (i,Lrule L j) = (close (append SG (Lrule_result L j (nth SG i))) (nth SG i), C)" | Step_Rrule:"step_result (SG,C) (i,Rrule L j) = (close (append SG (Rrule_result L j (nth SG i))) (nth SG i), C)" | Step_Cut:"step_result (SG,C) (i,Cut \) = (let (A,S) = nth SG i in ((\ # A, S) # ((A, \ # S) # (closeI SG i)), C))" | Step_Vsubst:"step_result (SG,C) (i,VSubst \ \) = (closeI SG i, C)" | Step_CloseId:"step_result (SG,C) (i,CloseId j k) = (closeI SG i, C)" | Step_G:"step_result (SG,C) (i,G) = (case nth SG i of (_, (Not (Diamond q (Not p))) # Nil) \ (([], [p]) # closeI SG i, C))" | Step_DEAxiomSchema:"step_result (SG,C) (i,DEAxiomSchema ODE \) = (closeI SG i, C)" | Step_CE:"step_result (SG,C) (i, CE \ \ \) = (closeI SG i, C)" | Step_CQ:"step_result (SG,C) (i, CQ \\<^sub>1 \\<^sub>2 \) = (closeI SG i, C)" | Step_default:"step_result R (i,S) = R" fun deriv_result :: "('sf, 'sc, 'sz) rule \ ('sf, 'sc, 'sz) derivation \ ('sf, 'sc, 'sz) rule" where "deriv_result R [] = R" | "deriv_result R (s # ss) = deriv_result (step_result R s) (ss)" fun proof_result :: "('sf, 'sc, 'sz) pf \ ('sf, 'sc, 'sz) rule" where "proof_result (D,S) = deriv_result (start_proof D) S" inductive lrule_ok ::"('sf,'sc,'sz) sequent list \ ('sf,'sc,'sz) sequent \ nat \ nat \ lrule \ bool" where Lrule_And:"\p q. nth (fst (nth SG i)) j = (p && q) \ lrule_ok SG C i j AndL" | Lrule_Imply:"\p q. nth (fst (nth SG i)) j = (p \ q) \ lrule_ok SG C i j ImplyL" | Lrule_EquivForward:"\p q. nth (fst (nth SG i)) j = (p \ q) \ lrule_ok SG C i j EquivForwardL" | Lrule_EquivBackward:"\p q. nth (fst (nth SG i)) j = (p \ q) \ lrule_ok SG C i j EquivBackwardL" named_theorems prover "Simplification rules for checking validity of proof certificates" lemmas [prover] = axiom_defs Box_def Or_def Implies_def filter_append ssafe_def SDom_def FUadmit_def PFUadmit_def id_simps inductive_simps Lrule_And[prover]: "lrule_ok SG C i j AndL" and Lrule_Imply[prover]: "lrule_ok SG C i j ImplyL" and Lrule_Forward[prover]: "lrule_ok SG C i j EquivForwardL" and Lrule_EquivBackward[prover]: "lrule_ok SG C i j EquivBackwardL" inductive rrule_ok ::"('sf,'sc,'sz) sequent list \ ('sf,'sc,'sz) sequent \ nat \ nat \ rrule \ bool" where Rrule_And:"\p q. nth (snd (nth SG i)) j = (p && q) \ rrule_ok SG C i j AndR" | Rrule_Imply:"\p q. nth (snd (nth SG i)) j = (p \ q) \ rrule_ok SG C i j ImplyR" | Rrule_Equiv:"\p q. nth (snd (nth SG i)) j = (p \ q) \ rrule_ok SG C i j EquivR" | Rrule_Cohide:"length (snd (nth SG i)) > j \ (\\ q. (nth SG i) \ (\, [q])) \ rrule_ok SG C i j CohideR" | Rrule_CohideRR:"length (snd (nth SG i)) > j \ (\q. (nth SG i) \ ([], [q])) \ rrule_ok SG C i j CohideRR" | Rrule_True:"nth (snd (nth SG i)) j = TT \ rrule_ok SG C i j TrueR" inductive_simps Rrule_And_simps[prover]: "rrule_ok SG C i j AndR" and Rrule_Imply_simps[prover]: "rrule_ok SG C i j ImplyR" and Rrule_Equiv_simps[prover]: "rrule_ok SG C i j EquivR" and Rrule_CohideR_simps[prover]: "rrule_ok SG C i j CohideR" and Rrule_CohideRR_simps[prover]: "rrule_ok SG C i j CohideRR" and Rrule_TrueR_simps[prover]: "rrule_ok SG C i j TrueR" inductive step_ok :: "('sf, 'sc, 'sz) rule \ nat \ ('sf, 'sc, 'sz) step \ bool" where Step_Axiom:"(nth SG i) = ([], [get_axiom a]) \ step_ok (SG,C) i (Axiom a)" | Step_AxSubst:"(nth SG i) = ([], [Fsubst (get_axiom a) \]) \ Fadmit \ (get_axiom a) \ ssafe \ \ step_ok (SG,C) i (AxSubst a \)" | Step_Lrule:"lrule_ok SG C i j L \ j < length (fst (nth SG i)) \ step_ok (SG,C) i (Lrule L j)" | Step_Rrule:"rrule_ok SG C i j L \ j < length (snd (nth SG i)) \ step_ok (SG,C) i (Rrule L j)" | Step_Cut:"fsafe \ \ i < length SG \ step_ok (SG,C) i (Cut \)" | Step_CloseId:"nth (fst (nth SG i)) j = nth (snd (nth SG i)) k \ j < length (fst (nth SG i)) \ k < length (snd (nth SG i)) \ step_ok (SG,C) i (CloseId j k) " | Step_G:"\a p. nth SG i = ([], [([[a]]p)]) \ step_ok (SG,C) i G" | Step_DEAxiom_schema: " nth SG i = ([], [Fsubst ((([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1)) ODE) (p1 vid2 vid1)]] (P pid1)) \ ([[EvolveODE ((OProd (OSing vid1 (f1 fid1 vid1))) ODE) (p1 vid2 vid1)]] [[DiffAssign vid1 (f1 fid1 vid1)]]P pid1))) \]) \ ssafe \ \ osafe ODE \ {Inl vid1, Inr vid1} \ BVO ODE = {} \ Fadmit \ ((([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]] (P pid1)) \ ([[EvolveODE ((OProd (OSing vid1 (f1 fid1 vid1))ODE)) (p1 vid2 vid1)]] [[DiffAssign vid1 (f1 fid1 vid1)]]P pid1))) \ step_ok (SG,C) i (DEAxiomSchema ODE \)" | Step_CE:"nth SG i = ([], [Fsubst (Equiv (InContext pid1 \) (InContext pid1 \)) \]) \ valid (Equiv \ \) \ fsafe \ \ fsafe \ \ ssafe \ \ Fadmit \ (Equiv (InContext pid1 \) (InContext pid1 \)) \ step_ok (SG,C) i (CE \ \ \)" | Step_CQ:"nth SG i = ([], [Fsubst (Equiv (Prop p (singleton \)) (Prop p (singleton \'))) \]) \ valid (Equals \ \') \ dsafe \ \ dsafe \' \ ssafe \ \ Fadmit \ (Equiv (Prop p (singleton \)) (Prop p (singleton \'))) \ step_ok (SG,C) i (CQ \ \' \)" inductive_simps Step_G_simps[prover]: "step_ok (SG,C) i G" and Step_CloseId_simps[prover]: "step_ok (SG,C) i (CloseId j k)" and Step_Cut_simps[prover]: "step_ok (SG,C) i (Cut \)" and Step_Rrule_simps[prover]: "step_ok (SG,C) i (Rrule j L)" and Step_Lrule_simps[prover]: "step_ok (SG,C) i (Lrule j L)" and Step_Axiom_simps[prover]: "step_ok (SG,C) i (Axiom a)" and Step_AxSubst_simps[prover]: "step_ok (SG,C) i (AxSubst a \)" and Step_DEAxiom_schema_simps[prover]: "step_ok (SG,C) i (DEAxiomSchema ODE \)" and Step_CE_simps[prover]: "step_ok (SG,C) i (CE \ \ \)" and Step_CQ_simps[prover]: "step_ok (SG,C) i (CQ \ \' \)" inductive deriv_ok :: "('sf, 'sc, 'sz) rule \ ('sf, 'sc, 'sz) derivation \ bool" where Deriv_Nil:"deriv_ok R Nil" | Deriv_Cons:"step_ok R i S \ i \ 0 \ i < length (fst R) \ deriv_ok (step_result R (i,S)) SS \ deriv_ok R ((i,S) # SS)" inductive_simps Deriv_nil_simps[prover]: "deriv_ok R Nil" and Deriv_cons_simps[prover]: "deriv_ok R ((i,S)#SS)" inductive proof_ok :: "('sf, 'sc, 'sz) pf \ bool" where Proof_ok:"deriv_ok (start_proof D) S \ proof_ok (D,S)" inductive_simps Proof_ok_simps[prover]: "proof_ok (D,S)" subsection \Soundness\ named_theorems member_intros "Prove that stuff is in lists" lemma mem_sing[member_intros]:"\x. List.member [x] x" by(auto simp add: member_rec) lemma mem_appR[member_intros]:"\A B x. List.member B x \ List.member (A @ B) x" subgoal for A by(induction A, auto simp add: member_rec) done lemma mem_filter[member_intros]:"\A P x. P x \ List.member A x \ List.member (filter P A) x" subgoal for A by(induction A, auto simp add: member_rec) done lemma sound_weaken_appL:"\SG SGS C. sound (SGS, C) \ sound (SG @ SGS, C)" subgoal for SG SGS C apply(rule sound_weaken_gen) apply(auto) unfolding sublist_def apply(rule allI) subgoal for x using mem_appR[of SGS x SG] by auto done done lemma fml_seq_valid:"valid \ \ seq_valid ([], [\])" unfolding seq_valid_def valid_def by auto lemma closeI_provable_sound:"\i. sound (SG, C) \ sound (closeI SG i, (nth SG i)) \ sound (closeI SG i, C)" using close_provable_sound by auto lemma valid_to_sound:"seq_valid A \ sound (B, A)" unfolding seq_valid_def sound_def by auto lemma closeI_valid_sound:"\i. sound (SG, C) \ seq_valid (nth SG i) \ sound (closeI SG i, C)" using valid_to_sound closeI_provable_sound by auto lemma close_nonmember_eq:"\(List.member A a) \ close A a = A" by (induction A, auto simp add: member_rec) lemma close_noneq_nonempty:"List.member A x \ x \ a \ close A a \ []" by(induction A, auto simp add: member_rec) lemma close_app_neq:"List.member A x \ x \ a \ close (A @ B) a \ B" using append_self_conv2[of "close A a" "close B a"] append_self_conv2[of "close A a" "B"] close_app_comm[of A B a] close_noneq_nonempty[of A x a] apply(cases "close B a = B") apply (auto) by (metis (no_types, lifting) filter_True filter_append mem_Collect_eq set_filter) lemma member_singD:"\x P. P x \ (\y. List.member [x] y \ P y)" by (metis member_rec(1) member_rec(2)) lemma fst_neq:"A \ B \ (A,C) \ (B,D)" by auto lemma lrule_sound: "lrule_ok SG C i j L \ i < length SG \ j < length (fst (SG ! i)) \ sound (SG,C) \ sound (close (append SG (Lrule_result L j (nth SG i))) (nth SG i), C)" proof(induction rule: lrule_ok.induct) case (Lrule_And SG i j C p q) assume eq:"fst (SG ! i) ! j = (p && q)" assume sound:"sound (SG, C)" obtain AI and SI where SG_dec:"(AI,SI) = (SG ! i)" by (metis seq2fml.cases) have AIjeq:"AI ! j = (p && q)" using SG_dec eq by (metis fst_conv) have sub:"sublist [(close ([p, q] @ AI) (p && q),SI)] ([y\SG . y \ (AI, SI)] @ [y\ [(close (p # q # AI) (p && q), SI)] . y \ (AI, SI)])" apply (rule sublistI) using member_singD [of "\y. List.member ([y\SG . y \ (AI, SI)] @ [y\ [(close ([p, q] @ AI) (p && q), SI)] . y \ (AI, SI)]) y" "(close ([p, q] @ AI) (p && q),SI)"] using close_app_neq[of "[p, q]" p "p && q" AI] by(auto intro: member_intros fst_neq simp add: member_rec expr_diseq) have cool:"sound ([y\SG . y \ (AI, SI)] @ [y\ [(close (p # q # AI) (p && q), SI)] . y \ (AI, SI)], AI, SI)" apply(rule sound_weaken_gen[OF sub] ) apply(auto simp add: member_rec expr_diseq) unfolding seq_valid_def proof (rule soundI_mem) fix I::"('sf,'sc,'sz) interp" assume good:"is_interp I" assume sgs:"(\\. List.member [(p # q # [y\AI . y \ (p && q)], SI)] \ \ seq_sem I \ = UNIV)" have theSg:"seq_sem I (p # q # [y\AI . y \ (p && q)], SI) = UNIV" apply(rule sgs) by(auto intro: member_intros) then have sgIn:"\\. \ \ seq_sem I ((p && q) # [y\AI . y \ (p && q)], SI)" by auto { fix \ assume sem:"\ \ seq_sem I ((p && q) # [y\AI . y \ (p && q)], SI)" have mem_eq:"\x. List.member ((p && q) # [y\AI . y \ (p && q)]) x = List.member AI x" by (metis (mono_tags, lifting) Lrule_And.prems(2) SG_dec eq fst_conv local.member_filter mem_filter member_rec(1) nth_member) have myeq:"\ \ seq_sem I ((p && q) # [y\AI . y \ (p && q)], SI) \ \ \ seq_sem I (AI, SI)" using and_foldl_sem and_foldl_sem_conv seq_semI Lrule_And.prems(2) SG_dec eq seq_MP seq_semI' mem_eq by (metis (no_types, lifting)) have "\ \ seq_sem I ((p && q) # [y\AI . y \ (p && q)], SI)" using sem by auto then have "\ \ seq_sem I ((p && q) # [y\AI . y \ (p && q)], SI)" by blast then have "\ \ seq_sem I (AI, SI)" using myeq by auto} then show "seq_sem I (AI, SI) = UNIV" using sgIn by blast qed have res_sound:"sound ([y\SG . y \ (AI,SI)] @ [y\Lrule_result AndL j (AI,SI) . y \ (AI,SI)],(AI,SI))" apply (simp) using cool AIjeq by auto show "?case" apply(rule close_provable_sound) apply(rule sound_weaken_appR) apply(rule sound) using res_sound SG_dec by auto next case (Lrule_Imply SG i j C p q) have implyL_simp:"\AI SI SS p q. (nth AI j) = (Not (And (Not q) (Not (Not p)))) \ (AI,SI) = SS \ Lrule_result ImplyL j SS = [(close (q # AI) (nth AI j), SI), (close AI (nth AI j), p # SI)]" subgoal for AI SI SS p q apply(cases SS) by auto done assume eq:"fst (SG ! i) ! j = (p \ q)" assume iL:"i < length SG" assume jL:"j < length (fst (SG ! i))" assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) have res_eq:"Lrule_result ImplyL j (SG ! i) = [(close (q # \) (nth \ j), \), (close \ (nth \ j), p # \)]" apply(rule implyL_simp) using SG_dec eq Implies_def Or_def by (metis fstI)+ have AIjeq:"\ ! j = (p \ q)" using SG_dec eq unfolding Implies_def Or_def by (metis fst_conv) have big_sound:"sound ([(close (q # \) (p \ q), \), (close \ (p \ q), p # \)], (\,\))" apply(rule soundI') apply(rule seq_semI') proof - fix I::"('sf,'sc,'sz) interp" and \::"'sz state" assume good:"is_interp I" assume sgs:"(\i. 0 \ i \ i < length [(close (q # \) (p \ q), \), (close \ (p \ q), p # \)] \ \ \ seq_sem I ([(close (q # \) (p \ q), \), (close \ (p \ q), p # \)] ! i))" have sg1:"\ \ seq_sem I (close (q # \) (p \ q), \)" using sgs[of 0] by auto have sg2:"\ \ seq_sem I (close \ (p \ q), p # \)" using sgs[of "Suc 0"] by auto assume \:"\ \ fml_sem I (foldr And \ TT)" have \_proj:"\\ \. List.member \ \ \ \ \ fml_sem I (foldr And \ TT) \ \ \ fml_sem I \" apply(induction \, auto simp add: member_rec) using and_foldl_sem by blast have imp:"\ \ fml_sem I (p \ q)" apply(rule \_proj[of \]) using AIjeq jL SG_dec nth_member apply (metis fst_conv) by (rule \) have sub:"sublist (close \ (p \ q)) \" by (rule close_sub) have \C:"\ \ fml_sem I (foldr And (close \ (p \ q)) TT)" by (rule \_sub_sem[OF sub \]) have "\ \ fml_sem I (foldr (||) (p # \) FF)" by(rule seq_MP[OF sg2 \C]) then have disj:"\ \ fml_sem I p \ \ \ fml_sem I (foldr (||) \ FF)" by auto { assume p:"\ \ fml_sem I p" have q:"\ \ fml_sem I q" using p imp by simp have res: "\ \ fml_sem I (foldr (||) \ FF)" using disj \ seq_semI proof - have "\ \ fml_sem I (foldr (&&) (q # \) TT)" using \ q by auto then show ?thesis by (meson \_sub_sem close_sub seq_MP sg1) qed have conj:"\ \ fml_sem I (foldr (&&) (q # \) TT)" using q \ by auto have conj:"\ \ fml_sem I (foldr (&&) (close (q # \) (p \ q)) TT)" apply(rule \_sub_sem) defer apply(rule conj) by(rule close_sub) have \1:"\ \ fml_sem I (foldr (||) \ FF)" by(rule seq_MP[OF sg1 conj]) } then show "\ \ fml_sem I (foldr (||) \ FF)" using disj by auto qed have neq1:"close ([q] @ \) (p \ q) \ \" apply(rule close_app_neq) apply(rule mem_sing) by (auto simp add: expr_diseq) have neq2:"p # \ \ \" by(induction p, auto) have close_eq:"close [(close (q # \) (p \ q), \), (close \ (p \ q), p # \)] (\,\) = [(close (q # \) (p \ q), \), (close \ (p \ q), p # \)]" apply(rule close_nonmember_eq) apply auto using neq1 neq2 apply (simp add: member_rec) proof - assume a1: "q = (p \ q)" assume "List.member [([y\\ . y \ q], \), ([y\\ . y \ q], p # \)] (\, \)" then have "[f\\ . f \ q] = \" by (simp add: member_rec) then show False using a1 neq1 by fastforce qed show ?case apply(rule close_provable_sound) apply(rule sound_weaken_appR) apply(rule sound) apply(unfold res_eq) apply(unfold AIjeq) unfolding close_app_comm apply (rule sound_weaken_appL) using close_eq big_sound SG_dec by simp next case (Lrule_EquivBackward SG i j C p q) have equivLBackward_simp:"\AI SI SS p q. (nth AI j) = Not (And (Not (And p q)) (Not (And (Not p) (Not q)))) \ (AI,SI) = SS \ Lrule_result EquivBackwardL j SS = [(close (p # AI) (nth AI j), SI), (close AI (nth AI j), q # SI)]" subgoal for AI SI SS p q apply(cases SS) by auto done assume eq:"fst (SG ! i) ! j = (p \ q)" assume iL:"i < length SG" assume jL:"j < length (fst (SG ! i))" assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) have res_eq:"Lrule_result EquivBackwardL j (SG ! i) = [(close (p # \) (nth \ j), \), (close \ (nth \ j), q # \)]" apply(rule equivLBackward_simp) using SG_dec eq Equiv_def Or_def by (metis fstI)+ have AIjeq:"\ ! j = (p \ q)" using SG_dec eq unfolding Implies_def Or_def by (metis fst_conv) have big_sound:"sound ([(close (p # \) (p \ q), \), (close \ (p \ q), q # \)], (\,\))" apply(rule soundI') apply(rule seq_semI') proof - fix I::"('sf,'sc,'sz) interp" and \::"'sz state" assume good:"is_interp I" assume sgs:"(\i. 0 \ i \ i < length [(close (p # \) (p \ q), \), (close \ (p \ q), q # \)] \ \ \ seq_sem I ([(close (p # \) (p \ q), \), (close \ (p \ q), q # \)] ! i))" have sg1:"\ \ seq_sem I (close (p # \) (p \ q), \)" using sgs[of 0] by auto have sg2:"\ \ seq_sem I (close \ (p \ q), q # \)" using sgs[of "Suc 0"] by auto assume \:"\ \ fml_sem I (foldr And \ TT)" have \_proj:"\\ \. List.member \ \ \ \ \ fml_sem I (foldr And \ TT) \ \ \ fml_sem I \" apply(induction \, auto simp add: member_rec) using and_foldl_sem by blast have imp:"\ \ fml_sem I (p \ q)" apply(rule \_proj[of \]) using AIjeq jL SG_dec nth_member apply (metis fst_conv) by (rule \) have sub:"sublist (close \ (p \ q)) \" by (rule close_sub) have \C:"\ \ fml_sem I (foldr And (close \ (p \ q)) TT)" by (rule \_sub_sem[OF sub \]) have "\ \ fml_sem I (foldr (||) (p # \) FF)" by (metis \ \_sub_sem close_sub iff_sem imp member_rec(1) or_foldl_sem or_foldl_sem_conv seq_MP sg2) then have disj:"\ \ fml_sem I p \ \ \ fml_sem I (foldr (||) \ FF)" by auto { assume p:"\ \ fml_sem I p" have q:"\ \ fml_sem I q" using p imp by simp have res: "\ \ fml_sem I (foldr (||) \ FF)" using disj \ seq_semI proof - have "\ \ fml_sem I (foldr (&&) (q # \) TT)" using \ q by auto then show ?thesis proof - have "\fs p i. (\f. List.member fs (f::('sf, 'sc, 'sz) formula) \ p \ fml_sem i f) \ p \ fml_sem i (foldr (&&) fs TT)" using and_foldl_sem_conv by blast then obtain ff :: "('sf, 'sc, 'sz) formula list \ (real, 'sz) vec \ (real, 'sz) vec \ ('sf, 'sc, 'sz) interp \ ('sf, 'sc, 'sz) formula" where f1: "\fs p i. List.member fs (ff fs p i) \ p \ fml_sem i (ff fs p i) \ p \ fml_sem i (foldr (&&) fs TT)" by metis have "\f. \ \ fml_sem I f \ \ List.member \ f" by (meson \\ \ fml_sem I (foldr (&&) (q # \) TT)\ and_foldl_sem member_rec(1)) then have "\ \ fml_sem I (foldr (&&) (close (p # \) (p \ q)) TT)" using f1 by (metis (no_types) close_sub local.sublist_def member_rec(1) p) then show ?thesis using seq_MP sg1 by blast qed qed have conj:"\ \ fml_sem I (foldr (&&) (q # \) TT)" using q \ by auto have conj:"\ \ fml_sem I (foldr (&&) (close (q # \) (p \ q)) TT)" apply(rule \_sub_sem) defer apply(rule conj) by(rule close_sub) have \1:"\ \ fml_sem I (foldr (||) \ FF)" using res by blast } then show "\ \ fml_sem I (foldr (||) \ FF)" using disj by auto qed have neq1:"close ([q] @ \) (p \ q) \ \" apply(rule close_app_neq) apply(rule mem_sing) by (auto simp add: expr_diseq) have neq2:"p # \ \ \" by(induction p, auto) have close_eq:"close [(close (p # \) (p \ q), \), (close \ (p \ q), q # \)] (\,\) = [(close (p # \) (p \ q), \), (close \ (p \ q), q # \)]" apply(rule close_nonmember_eq) apply auto using neq1 neq2 apply (simp add: member_rec) apply (metis append_Cons append_Nil close.simps close_app_neq member_rec(1)) proof - assume a1:"p = (p \ q)" then show False by (simp add: expr_diseq) qed show ?case apply(rule close_provable_sound) apply(rule sound_weaken_appR) apply(rule sound) apply(unfold res_eq) apply(unfold AIjeq) unfolding close_app_comm apply (rule sound_weaken_appL) using close_eq big_sound SG_dec by simp next case (Lrule_EquivForward SG i j C p q) have equivLForward_simp:"\AI SI SS p q. (nth AI j) = Not (And (Not (And p q)) (Not (And (Not p) (Not q)))) \ (AI,SI) = SS \ Lrule_result EquivForwardL j SS = [(close (q # AI) (nth AI j), SI), (close AI (nth AI j), p # SI)]" subgoal for AI SI SS p q apply(cases SS) by auto done assume eq:"fst (SG ! i) ! j = (p \ q)" assume iL:"i < length SG" assume jL:"j < length (fst (SG ! i))" assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) have res_eq:"Lrule_result EquivForwardL j (SG ! i) = [(close (q # \) (nth \ j), \), (close \ (nth \ j), p # \)]" apply(rule equivLForward_simp) using SG_dec eq Equiv_def Or_def by (metis fstI)+ have AIjeq:"\ ! j = (p \ q)" using SG_dec eq unfolding Implies_def Or_def by (metis fst_conv) have big_sound:"sound ([(close (q # \) (p \ q), \), (close \ (p \ q), p # \)], (\,\))" apply(rule soundI') apply(rule seq_semI') proof - fix I::"('sf,'sc,'sz) interp" and \::"'sz state" assume good:"is_interp I" assume sgs:"(\i. 0 \ i \ i < length [(close (q # \) (p \ q), \), (close \ (p \ q), p # \)] \ \ \ seq_sem I ([(close (q # \) (p \ q), \), (close \ (p \ q), p # \)] ! i))" have sg1:"\ \ seq_sem I (close (q # \) (p \ q), \)" using sgs[of 0] by auto have sg2:"\ \ seq_sem I (close \ (p \ q), p # \)" using sgs[of "Suc 0"] by auto assume \:"\ \ fml_sem I (foldr And \ TT)" have \_proj:"\\ \. List.member \ \ \ \ \ fml_sem I (foldr And \ TT) \ \ \ fml_sem I \" apply(induction \, auto simp add: member_rec) using and_foldl_sem by blast have imp:"\ \ fml_sem I (p \ q)" apply(rule \_proj[of \]) using AIjeq jL SG_dec nth_member apply (metis fst_conv) by (rule \) have sub:"sublist (close \ (p \ q)) \" by (rule close_sub) have \C:"\ \ fml_sem I (foldr And (close \ (p \ q)) TT)" by (rule \_sub_sem[OF sub \]) have "\ \ fml_sem I (foldr (||) (p # \) FF)" by (metis \ \_sub_sem close_sub iff_sem imp member_rec(1) or_foldl_sem or_foldl_sem_conv seq_MP sg2) then have disj:"\ \ fml_sem I p \ \ \ fml_sem I (foldr (||) \ FF)" by auto { assume p:"\ \ fml_sem I p" have q:"\ \ fml_sem I q" using p imp by simp have res: "\ \ fml_sem I (foldr (||) \ FF)" using disj \ seq_semI proof - have "\ \ fml_sem I (foldr (&&) (q # \) TT)" using \ q by auto then show ?thesis by (meson \\ \ fml_sem I (foldr (&&) (q # \) TT)\ and_foldl_sem and_foldl_sem_conv close_sub local.sublist_def seq_MP sg1) qed have conj:"\ \ fml_sem I (foldr (&&) (q # \) TT)" using q \ by auto have conj:"\ \ fml_sem I (foldr (&&) (close (q # \) (p \ q)) TT)" apply(rule \_sub_sem) defer apply(rule conj) by(rule close_sub) have \1:"\ \ fml_sem I (foldr (||) \ FF)" using res by blast } then show "\ \ fml_sem I (foldr (||) \ FF)" using disj by auto qed have neq1:"close ([q] @ \) (p \ q) \ \" apply(rule close_app_neq) apply(rule mem_sing) by (auto simp add: expr_diseq) have neq2:"p # \ \ \" by(induction p, auto) have close_eq:"close [(close (q # \) (p \ q), \), (close \ (p \ q), p # \)] (\,\) = [(close (q # \) (p \ q), \), (close \ (p \ q), p # \)]" apply(rule close_nonmember_eq) apply auto using neq1 neq2 apply (simp add: member_rec) proof - assume a1:"q = (p \ q)" then show False by (simp add: expr_diseq) qed show ?case apply(rule close_provable_sound) apply(rule sound_weaken_appR) apply(rule sound) apply(unfold res_eq) apply(unfold AIjeq) unfolding close_app_comm apply (rule sound_weaken_appL) using close_eq big_sound SG_dec by simp qed lemma rrule_sound: "rrule_ok SG C i j L \ i < length SG \ j < length (snd (SG ! i)) \ sound (SG,C) \ sound (close (append SG (Rrule_result L j (nth SG i))) (nth SG i), C)" proof(induction rule: rrule_ok.induct) case (Rrule_And SG i j C p q) assume eq:"snd (SG ! i) ! j = (p && q)" assume "i < length SG" assume "j < length (snd (SG ! i))" assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) have andR_simp:"\\ \ SS p q. (nth \ j) = And p q \ (\,\) = SS \ Rrule_result AndR j SS = [(\, p # (close \ (nth \ j))), (\, q # (close \ (nth \ j)))]" subgoal for AI SI SS p q apply(cases SS) by auto done have res_eq:"Rrule_result AndR j (SG ! i) = [(\, p # (close \ (nth \ j))), (\, q # (close \ (nth \ j)))]" using SG_dec andR_simp apply auto using SG_dec eq Implies_def Or_def using fstI by (metis andR_simp close.simps snd_conv) have AIjeq:"\ ! j = (p && q)" using SG_dec eq snd_conv by metis have big_sound:"sound ([(\, p # (close \ (nth \ j))), (\, q # (close \ (nth \ j)))], (\,\))" apply(rule soundI') apply(rule seq_semI') proof - fix I::"('sf,'sc,'sz) interp" and \ assume good:"is_interp I" assume sgs:"(\i. 0 \ i \ i < length [(\, p # close \ (nth \ j)), (\, q # close \ (nth \ j))] \ \ \ seq_sem I (nth [(\, p # close \ (nth \ j)), (\, q # close \ (nth \ j))] i))" assume \_sem:"\ \ fml_sem I (foldr (&&) \ TT)" have sg1:"\ \ seq_sem I (\, p # close \ (nth \ j))" using sgs[of 0] by auto have sg2:"\ \ seq_sem I (\, q # close \ (nth \ j))" using sgs[of 1] by auto have \1:"\ \ fml_sem I (foldr (||) (p # close \ (nth \ j)) FF)" by(rule seq_MP[OF sg1 \_sem]) have \2:"\ \ fml_sem I (foldr (||) (q # close \ (nth \ j)) FF)" by(rule seq_MP[OF sg2 \_sem]) have \':"\ \ fml_sem I (foldr (||) ((p && q) # close \ (nth \ j)) FF)" using \1 \2 by auto have mem_eq:"\x. List.member ((p && q) # close \ (nth \ j)) x \ List.member \ x" using Rrule_And.prems SG_dec eq member_rec(1) nth_member by (metis close_sub local.sublist_def snd_conv) have myeq:"\ \ fml_sem I (foldr (||) ((p && q) # close \ (nth \ j)) FF) \ \ \ fml_sem I (foldr (||) \ FF)" using seq_semI Rrule_And.prems SG_dec eq seq_MP seq_semI' mem_eq or_foldl_sem or_foldl_sem_conv by metis then show "\ \ fml_sem I (foldr (||) \ FF)" using \' by auto qed have list_neqI1:"\L1 L2 x. List.member L1 x \ \(List.member L2 x) \ L1 \ L2" by(auto) have list_neqI2:"\L1 L2 x. \(List.member L1 x) \ (List.member L2 x) \ L1 \ L2" by(auto) have notin_cons:"\x y ys. x \ y \ \(List.member ys x) \ \(List.member (y # ys) x)" subgoal for x y ys by(induction ys, auto simp add: member_rec) done have notin_close:"\L x. \(List.member (close L x) x)" subgoal for L x by(induction L, auto simp add: member_rec) done have neq_lemma:"\L x y. List.member L x \ y \ x \ (y # (close L x)) \ L" subgoal for L x y apply(cases "List.member L y") subgoal apply(rule list_neqI2[of "y # close L x" x]) apply(rule notin_cons) defer apply(rule notin_close) by(auto) subgoal apply(rule list_neqI2[of "y # close L x" x]) apply(rule notin_cons) defer apply(rule notin_close) by(auto) done done have neq1:"p # close \ (p && q) \ \" apply(rule neq_lemma) apply (metis Rrule_And.prems(2) SG_dec eq nth_member sndI) by(auto simp add: expr_diseq) have neq2:"q # close \ (p && q) \ \" apply(rule neq_lemma) apply (metis Rrule_And.prems(2) SG_dec eq nth_member sndI) by(auto simp add: expr_diseq) have close_eq:"close [(\, p # close \ (p && q)), (\, q # close \ (p && q))] (\,\) = [(\, p # close \ (p && q)), (\, q # close \ (p && q))]" apply(rule close_nonmember_eq) apply auto using neq1 neq2 by (simp add: member_rec) show " sound (close (SG @ Rrule_result AndR j (SG ! i)) (SG ! i), C)" apply(rule close_provable_sound) apply(rule sound_weaken_appR) apply(rule sound) apply(unfold res_eq) apply(unfold AIjeq) unfolding close_app_comm apply (rule sound_weaken_appL) using close_eq big_sound SG_dec by (simp add: AIjeq) next case (Rrule_Imply SG i j C p q) assume eq:"snd (SG ! i) ! j = (p \ q)" assume "i < length SG" assume "j < length (snd (SG ! i))" assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) have impR_simp:"\\ \ SS p q. (nth \ j) = Implies p q \ (\,\) = SS \ Rrule_result ImplyR j SS = [(p # \, q # (close \ (nth \ j)))]" subgoal for AI SI SS p q apply(cases SS) by (auto simp add: Implies_def Or_def) done have res_eq:"Rrule_result ImplyR j (SG ! i) = [(p # \, q # (close \ (nth \ j)))]" using SG_dec impR_simp apply auto using SG_dec eq Implies_def Or_def using fstI by (metis impR_simp close.simps snd_conv) have AIjeq:"\ ! j = (p \ q)" using SG_dec eq snd_conv by metis have close_eq:"close [(p # \, q # (close \ (nth \ j)))] (\,\) = [(p # \, q # (close \ (nth \ j)))]" apply(rule close_nonmember_eq) by (simp add: member_rec) have big_sound:"sound ([(p # \, q # close \ (\ ! j))], (\,\))" apply(rule soundI') apply(rule seq_semI') proof - fix I ::"('sf,'sc,'sz) interp" and \::"'sz state" assume "is_interp I" assume sgs:"(\i. 0 \ i \ i < length [(p # \, q # close \ (\ ! j))] \ \ \ seq_sem I ([(p # \, q # close \ (\ ! j))] ! i))" have sg:"\ \ seq_sem I (p # \, q # close \ (\ ! j))" using sgs[of 0] by auto assume \_sem:"\ \ fml_sem I (foldr (&&) \ TT)" show "\ \ fml_sem I (foldr (||) \ FF)" using \_sem sg AIjeq Rrule_Imply.prems(2) SG_dec and_foldl_sem_conv close_sub impl_sem local.sublist_def member_rec(1) nth_member or_foldl_sem_conv seq_MP seq_semI snd_conv \_sub_sem and_foldl_sem or_foldl_sem seq_sem.simps sublistI proof - have f1: "\fs p i. \f. (p \ fml_sem i (foldr (&&) fs (TT::('sf, 'sc, 'sz) formula)) \ List.member fs f) \ (p \ fml_sem i f \ p \ fml_sem i (foldr (&&) fs TT))" using and_foldl_sem_conv by blast have "\p i fs. \f. \pa ia fa fb pb ib fc fd. p \ fml_sem i (f::('sf, 'sc, 'sz) formula) \ (pa \ fml_sem ia (fa::('sf, 'sc, 'sz) formula) \ pa \ fml_sem ia (fa \ fb)) \ (pb \ fml_sem ib (fc::('sf, 'sc, 'sz) formula) \ pb \ fml_sem ib (fd \ fc)) \ (p \ fml_sem i (foldr (||) fs FF) \ List.member fs f)" by (metis impl_sem or_foldl_sem_conv) then obtain ff :: "(real, 'sz) vec \ (real, 'sz) vec \ ('sf, 'sc, 'sz) interp \ ('sf, 'sc, 'sz) formula list \ ('sf, 'sc, 'sz) formula" where f2: "\p i fs pa ia f fa pb ib fb fc. p \ fml_sem i (ff p i fs) \ (pa \ fml_sem ia (f::('sf, 'sc, 'sz) formula) \ pa \ fml_sem ia (f \ fa)) \ (pb \ fml_sem ib (fb::('sf, 'sc, 'sz) formula) \ pb \ fml_sem ib (fc \ fb)) \ (p \ fml_sem i (foldr (||) fs FF) \ List.member fs (ff p i fs))" by metis then have "\fs. \ \ fml_sem I (foldr (&&) (p # \) TT) \ \ local.sublist (close \ (p \ q)) fs \ ff \ I (q # close \ (p \ q)) = q \ List.member fs (ff \ I (q # close \ (p \ q)))" by (metis (no_types) AIjeq local.sublist_def member_rec(1) seq_MP sg) then have "\f. List.member \ f \ \ \ fml_sem I f" using f2 f1 by (metis (no_types) AIjeq Rrule_Imply.prems(2) SG_dec \_sem and_foldl_sem close_sub member_rec(1) nth_member snd_conv) then show ?thesis using or_foldl_sem by blast qed qed show ?case apply(rule close_provable_sound) apply(rule sound_weaken_appR) apply(rule sound) using res_eq apply(unfold res_eq) apply(unfold AIjeq) unfolding close_app_comm apply (rule sound_weaken_appL) using close_eq big_sound SG_dec AIjeq by (simp add: AIjeq) next case (Rrule_Cohide SG i j C) assume "i < length SG" assume "j < length (snd (SG ! i))" assume chg:"(\\ q. (nth SG i) \ (\, [q]))" assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) - have cohideR_simp:"\\ \ SS p q. + have cohideR_simp:" (\,\) = SS \ - Rrule_result CohideR j SS = [(\, [nth \ j])]" - subgoal for AI SI SS p q by(cases SS, auto) done + Rrule_result CohideR j SS = [(\, [nth \ j])]" for \ \ SS p q + by (cases SS, auto) have res_eq:"Rrule_result CohideR j (SG ! i) = [(\, [nth \ j])]" - using SG_dec cohideR_simp by auto + using SG_dec by (rule cohideR_simp) have close_eq:"close [(\, [nth \ j])] (\,\) = [(\, [nth \ j])]" using chg by (metis SG_dec close_nonmember_eq member_rec(1) member_rec(2)) have big_sound:"sound ([(\, [nth \ j])], (\,\))" apply(rule soundI') apply(rule seq_semI') by (metis (no_types, lifting) Rrule_Cohide.prems(2) SG_dec length_greater_0_conv less_or_eq_imp_le list.distinct(1) member_singD nth_Cons_0 nth_member or_foldl_sem or_foldl_sem_conv seq_MP snd_conv) show ?case apply(rule close_provable_sound) apply(rule sound_weaken_appR) apply(rule sound) using res_eq apply(unfold res_eq) unfolding close_app_comm apply (rule sound_weaken_appL) using big_sound SG_dec apply(cases "[nth \ j] = \") apply(auto) using chg by (metis)+ next case (Rrule_CohideRR SG i j C) assume "i < length SG" assume "j < length (snd (SG ! i))" assume chg:"(\q. (nth SG i) \ ([], [q]))" assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) - have cohideRR_simp:"\\ \ SS p q. + have cohideRR_simp:" (\,\) = SS \ - Rrule_result CohideRR j SS = [([], [nth \ j])]" - subgoal for AI SI SS p q by(cases SS, auto) done + Rrule_result CohideRR j SS = [([], [nth \ j])]" for \ \ SS p q + by (cases SS, auto) have res_eq:"Rrule_result CohideRR j (SG ! i) = [([], [nth \ j])]" - using SG_dec cohideRR_simp by auto + using SG_dec by (rule cohideRR_simp) have close_eq:"close [([], [nth \ j])] (\,\) = [([], [nth \ j])]" using chg by (metis SG_dec close_nonmember_eq member_rec(1) member_rec(2)) have big_sound:"sound ([([], [nth \ j])], (\,\))" apply(rule soundI') apply(rule seq_semI') by (metis (no_types, lifting) Rrule_CohideRR.prems(2) SG_dec and_foldl_sem_conv length_greater_0_conv less_or_eq_imp_le list.distinct(1) member_rec(2) member_singD nth_Cons_0 nth_member or_foldl_sem or_foldl_sem_conv seq_MP snd_conv) show ?case apply(rule close_provable_sound) apply(rule sound_weaken_appR) apply(rule sound) using res_eq apply(unfold res_eq) unfolding close_app_comm apply (rule sound_weaken_appL) using big_sound SG_dec apply(cases "[nth \ j] = \") apply(auto) using chg by (metis)+ next case (Rrule_True SG i j C) assume tt:"snd (SG ! i) ! j = TT" assume iL:"i < length SG" assume iJ:"j < length (snd (SG ! i))" assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) have "\I \. is_interp I \ \ \ fml_sem I (foldr (||) \ FF)" proof - fix I::"('sf,'sc,'sz)interp" and \::"'sz state" assume good:"is_interp I" have mem2:"List.member \ (\ ! j)" using iJ nth_member by (metis SG_dec snd_conv) then show "\ \ fml_sem I (foldr (||) \ FF)" using mem2 using or_foldl_sem by (metis SG_dec UNIV_I snd_conv tt tt_sem) qed then have seq_valid:"seq_valid (SG ! i)" unfolding seq_valid_def using SG_dec by (metis UNIV_eq_I seq_semI') show ?case using closeI_valid_sound[OF sound seq_valid] by (simp add: sound_weaken_appR) next case (Rrule_Equiv SG i j C p q) assume eq:"snd (SG ! i) ! j = (p \ q)" assume iL:"i < length SG" assume jL:"j < length (snd (SG ! i))" assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) have equivR_simp:"\\ \ SS p q. (nth \ j) = Equiv p q \ (\,\) = SS \ Rrule_result EquivR j SS = [(p # \, q # (closeI \ j)), (q # \, p # (closeI \ j))]" subgoal for AI SI SS p q apply(cases SS) by (auto simp add: Equiv_def Implies_def Or_def) done have res_eq:"Rrule_result EquivR j (SG ! i) = [(p # \, q # (closeI \ j)), (q # \, p # (closeI \ j))]" apply(rule equivR_simp) subgoal using eq SG_dec by (metis snd_conv) by (rule SG_dec) have AIjeq:"\ ! j = (p \ q)" using SG_dec eq snd_conv by metis have close_eq:"close [(p # \, q # (closeI \ j)), (q # \, p # (closeI \ j))] (\,\) = [(p # \, q # (closeI \ j)), (q # \, p # (closeI \ j))]" apply(rule close_nonmember_eq) by (simp add: member_rec) have big_sound:"sound ([(p # \, q # (closeI \ j)), (q # \, p # (closeI \ j))], (\,\))" apply(rule soundI') apply(rule seq_semI') proof - fix I ::"('sf,'sc,'sz) interp" and \::"'sz state" assume good:"is_interp I" assume sgs:"(\i. 0 \ i \ i < length [(p # \, q # (closeI \ j)), (q # \, p # (closeI \ j))] \ \ \ seq_sem I ([(p # \, q # (closeI \ j)), (q # \, p # (closeI \ j))] ! i))" have sg1:"\ \ seq_sem I (p # \, q # close \ (\ ! j))" using sgs[of 0] by auto have sg2:"\ \ seq_sem I (q # \, p # (closeI \ j))" using sgs[of 1] by auto assume \_sem:"\ \ fml_sem I (foldr (&&) \ TT)" have case1:"\ \ fml_sem I p \ \ \ fml_sem I (foldr (||) \ FF)" proof - assume sem:"\ \ fml_sem I p" have "\ \ fml_sem I (foldr (||) (q # (close \ (nth \ j))) FF)" using sem \_sem sg1 by auto then show "\ \ fml_sem I (foldr (||) \ FF)" using AIjeq SG_dec close_sub[of \ "nth \ j"] iff_sem[of "\" I p q] jL local.sublist_def member_rec(1)[of q "close \ (nth \ j)"] sem snd_conv or_foldl_sem_conv[of \ I "q # close \ (nth \ j)"] or_foldl_sem[of "\", where I=I and \=\] nth_member[of j "snd (SG ! i)"] by metis qed have case2:"\ \ fml_sem I p \ \ \ fml_sem I (foldr (||) \ FF)" proof - assume sem:"\ \ fml_sem I p" have "\ \ fml_sem I q \ \ \ fml_sem I (foldr (||) \ FF) \ False" using and_foldl_sem[OF \_sem] and_foldl_sem_conv closeI.simps close_sub local.sublist_def member_rec(1)[of "p" "closeI \ j"] member_rec(1)[of "q" "\"] or_foldl_sem[of "\"] or_foldl_sem_conv[of \ I "p # closeI \ j"] sem sg2 seq_MP[of \ I "q # \" "p # closeI \ j", OF sg2] proof - assume a1: "\ \ fml_sem I q" assume a2: "\ \ fml_sem I (foldr (||) \ FF)" obtain ff :: "('sf, 'sc, 'sz) formula" where "\ \ fml_sem I ff \ List.member (p # close \ (\ ! j)) ff" using a1 by (metis (no_types) \\\. List.member \ \ \ \ \ fml_sem I \\ \\y. List.member (q # \) y = (q = y \ List.member \ y)\ \\ \ fml_sem I (foldr (&&) (q # \) TT) \ \ \ fml_sem I (foldr (||) (p # closeI \ j) FF)\ \\ \ fml_sem I (foldr (||) (p # closeI \ j) FF) \ \\. \ \ fml_sem I \ \ List.member (p # closeI \ j) \\ and_foldl_sem_conv closeI.simps) then show ?thesis using a2 by (metis (no_types) \\\ \ I. \List.member \ \; \ \ fml_sem I \\ \ \ \ fml_sem I (foldr (||) \ FF)\ \\y. List.member (p # closeI \ j) y = (p = y \ List.member (closeI \ j) y)\ closeI.simps close_sub local.sublist_def sem) qed show "\ \ fml_sem I (foldr (||) \ FF)" by (metis AIjeq SG_dec \\\ \ fml_sem I q; \ \ fml_sem I (foldr (||) \ FF)\ \ False\ iff_sem jL nth_member or_foldl_sem sem snd_eqD) qed show "\ \ fml_sem I (foldr (||) \ FF)" by(cases "\ \ fml_sem I p", (simp add: case1 case2)+) qed show ?case apply(rule close_provable_sound) apply(rule sound_weaken_appR) apply(rule sound) using res_eq apply(unfold res_eq) unfolding close_app_comm apply (rule sound_weaken_appL) using close_eq big_sound SG_dec AIjeq by (simp add: AIjeq) qed lemma step_sound:"step_ok R i S \ i \ 0 \ i < length (fst R) \ sound R \ sound (step_result R (i,S))" proof(induction rule: step_ok.induct) case (Step_Axiom SG i a C) assume is_axiom:"SG ! i = ([], [get_axiom a])" assume sound:"sound (SG, C)" assume i0:"0 \ i" assume "i < length (fst (SG, C))" then have iL:"i < length (SG)" by auto have "seq_valid ([], [get_axiom a])" apply(rule fml_seq_valid) by(rule axiom_valid) then have seq_valid:"seq_valid (SG ! i)" using is_axiom by auto \ \\i0 iL\\ then show ?case using closeI_valid_sound[OF sound seq_valid] by simp next case (Step_AxSubst SG i a \ C) assume is_axiom:"SG ! i = ([], [Fsubst (get_axiom a) \])" assume sound:"sound (SG, C)" assume ssafe:"ssafe \" assume i0:"0 \ i" assume Fadmit:"Fadmit \ (get_axiom a)" assume "i < length (fst (SG, C))" then have iL:"i < length (SG)" by auto have valid_axiom:"valid (get_axiom a)" by(rule axiom_valid) have subst_valid:"valid (Fsubst (get_axiom a) \)" apply(rule subst_fml_valid) apply(rule Fadmit) apply(rule axiom_safe) apply(rule ssafe) by(rule valid_axiom) have "seq_valid ([], [(Fsubst (get_axiom a) \)])" apply(rule fml_seq_valid) by(rule subst_valid) then have seq_valid:"seq_valid (SG ! i)" using is_axiom by auto \ \\i0 iL\\ then show ?case using closeI_valid_sound[OF sound seq_valid] by simp next case (Step_Lrule R i j L) then show ?case using lrule_sound using step_result.simps(2) surj_pair by simp next case (Step_Rrule R i SG j L) then show ?case using rrule_sound using step_result.simps(2) surj_pair by simp next case (Step_Cut \ i SG C) assume safe:"fsafe \" assume "i < length (fst (SG, C))" then have iL:"i < length SG" by auto assume sound:"sound (SG, C)" obtain \ and \ where SG_dec:"(\,\) = (SG ! i)" by (metis seq2fml.cases) have "sound ((\ # \, \) # (\, \ # \) # [y\SG . y \ SG ! i], C)" apply(rule soundI_memv) proof - fix I::"('sf,'sc,'sz) interp" and \::"'sz state" assume good:"is_interp I" assume sgs:"(\\' \. List.member ((\ # \, \) # (\, \ # \) # [y\SG . y \ SG ! i]) \' \ \ \ seq_sem I \')" have sg1:"\\. \ \ seq_sem I (\ # \, \)" using sgs by (meson member_rec(1)) have sg2:"\\. \ \ seq_sem I (\, \ # \)" using sgs by (meson member_rec(1)) have sgs:"\\ \. (List.member (close SG (nth SG i)) \) \ \ \ seq_sem I \" using sgs by (simp add: member_rec(1)) then have sgs:"\\ \. (List.member (close SG (\,\)) \) \ \ \ seq_sem I \" using SG_dec by auto have sgNew:"\\. \ \ seq_sem I (\, \)" using sg1 sg2 by auto have same_mem:"\x. List.member SG x \ List.member ((\,\) # close SG (\,\)) x" subgoal for s by(induction SG, auto simp add: member_rec) done have SGS:"(\\' \. List.member SG \' \ \ \ seq_sem I \')" using sgNew sgs same_mem member_rec(1) seq_MP by metis show "\ \ seq_sem I C" using sound apply simp apply(drule soundD_memv) apply(rule good) using SGS apply blast by auto qed then show ?case using SG_dec case_prod_conv proof - have "(\f. ((case nth SG i of (x, xa) \ ((f x xa)::('sf, 'sc, 'sz) rule)) = (f \ \)))" by (metis (no_types) SG_dec case_prod_conv) then show ?thesis by (simp add: \sound ((\ # \, \) # (\, \ # \) # [y\SG . y \ SG ! i], C)\) qed next case (Step_G SG i C a p) assume eq:"SG ! i = ([], [([[a]]p)])" assume iL:"i < length (fst (SG, C))" assume sound:"sound (SG, C)" have "sound (([], [p]) # (close SG ([], [([[ a ]] p)])), C)" apply(rule soundI_memv) proof - fix I::"('sf,'sc,'sz) interp" and \::"'sz state" assume "is_interp I" assume sgs:"(\\ \. List.member (([], [p]) # close SG ([], [([[a]]p)])) \ \ \ \ seq_sem I \)" have sg0:"(\\. \ \ seq_sem I ([], [p]))" using sgs by (meson member_rec(1)) then have sg0':"(\\. \ \ seq_sem I ([], [([[a]]p)]))" by auto have sgTail:"(\\ \. List.member (close SG ([], [([[a]]p)])) \ \ \ \ seq_sem I \)" using sgs by (simp add: member_rec(1)) have same_mem:"\x. List.member SG x \ List.member (([], [([[a]]p)]) # close SG ([], [([[a]]p)])) x" subgoal for s by(induction SG, auto simp add: member_rec) done have sgsC:"(\\ \. List.member SG \ \ \ \ seq_sem I \)" apply auto using sgTail sg0' same_mem member_rec by (metis seq_MP) then show "\ \ seq_sem I C" using sound by (metis UNIV_eq_I \is_interp I\ iso_tuple_UNIV_I soundD_mem) qed then show ?case by(auto simp add: eq Box_def) next case (Step_CloseId SG i j k C) assume match:"fst (SG ! i) ! j = snd (SG ! i) ! k" assume jL:"j < length (fst (SG ! i))" assume kL:"k < length (snd (SG ! i))" assume iL:"i < length (fst (SG, C))" then have iL:"i < length (SG)" by auto assume sound:"sound (SG, C)" obtain \ \ where SG_dec:"(\, \) = SG ! i" using prod.collapse by blast have j\:"j < length \" using SG_dec jL by (metis fst_conv) have k\:"k < length \" using SG_dec kL by (metis snd_conv) have "\I \. is_interp I \ \ \ fml_sem I (foldr (&&) \ TT) \ \ \ fml_sem I (foldr (||) \ FF)" proof - fix I::"('sf,'sc,'sz)interp" and \::"'sz state" assume good:"is_interp I" assume \_sem:"\ \ fml_sem I (foldr (&&) \ TT)" have mem:"List.member \ (\ ! j)" using j\ nth_member by blast have mem2:"List.member \ (\ ! k)" using k\ nth_member by blast have "\ \ fml_sem I (\ ! j)" using \_sem mem using and_foldl_sem by blast then have "\ \ fml_sem I (\ ! k)" using match SG_dec by (metis fst_conv snd_conv) then show "\ \ fml_sem I (foldr (||) \ FF)" using mem2 using or_foldl_sem by blast qed then have seq_valid:"seq_valid (SG ! i)" unfolding seq_valid_def using SG_dec by (metis UNIV_eq_I seq_semI') then show "sound (step_result (SG, C) (i, CloseId j k))" using closeI_valid_sound[OF sound seq_valid] by simp next case (Step_DEAxiom_schema SG i ODE \ C ) assume isNth:"nth SG i = ([], [Fsubst (([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]]P pid1) \ ([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]][[DiffAssign vid1 (f1 fid1 vid1)]]P pid1)) \])" assume FA:"Fadmit \ (([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]]P pid1) \ ([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]][[DiffAssign vid1 (f1 fid1 vid1)]]P pid1))" assume disj:"{Inl vid1, Inr vid1} \ BVO ODE = {}" have schem_valid:"valid (([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]] (P pid1)) \ ([[EvolveODE ((OProd (OSing vid1 (f1 fid1 vid1))ODE)) (p1 vid2 vid1)]] [[DiffAssign vid1 (f1 fid1 vid1)]]P pid1))" using DE_sys_valid[OF disj] by auto assume ssafe:"ssafe \" assume osafe:"osafe ODE" have subst_valid:"valid (Fsubst (([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]]P pid1) \ ([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]][[DiffAssign vid1 (f1 fid1 vid1)]]P pid1)) \)" apply(rule subst_fml_valid) apply(rule FA) subgoal using disj by(auto simp add: f1_def Box_def p1_def P_def Equiv_def Or_def expand_singleton osafe, induction ODE, auto) subgoal by (rule ssafe) by (rule schem_valid) assume "0 \ i" assume "i < length (fst (SG, C))" assume sound:"sound (SG, C)" have "seq_valid ([], [(Fsubst (([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]]P pid1) \ ([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1))ODE) (p1 vid2 vid1)]][[DiffAssign vid1 (f1 fid1 vid1)]]P pid1)) \)])" apply(rule fml_seq_valid) by(rule subst_valid) then have seq_valid:"seq_valid (SG ! i)" using isNth by auto \ \\i0 iL\\ then show ?case using closeI_valid_sound[OF sound seq_valid] by simp next case (Step_CE SG i \ \ \ C) assume isNth:"SG ! i = ([], [Fsubst (InContext pid1 \ \ InContext pid1 \) \])" assume valid:"valid (\ \ \)" assume FA:"Fadmit \ (InContext pid1 \ \ InContext pid1 \)" assume "0 \ i" assume "i < length (fst (SG, C))" assume sound:"sound (SG, C)" assume fsafe1:"fsafe \" assume fsafe2:"fsafe \" assume ssafe:"ssafe \" have schem_valid:"valid (InContext pid1 \ \ InContext pid1 \)" using valid unfolding valid_def by (metis CE_holds_def CE_sound fml_sem.simps(7) iff_sem surj_pair valid_def)+ have subst_valid:"valid (Fsubst (InContext pid1 \ \ InContext pid1 \) \)" apply(rule subst_fml_valid) apply(rule FA) subgoal by(auto simp add: f1_def Box_def p1_def P_def Equiv_def Or_def expand_singleton fsafe1 fsafe2) subgoal by (rule ssafe) by (rule schem_valid) have "seq_valid ([], [Fsubst (InContext pid1 \ \ InContext pid1 \) \])" apply(rule fml_seq_valid) by(rule subst_valid) then have seq_valid:"seq_valid (SG ! i)" using isNth by auto show "sound (step_result (SG, C) (i, CE \ \ \))" using closeI_valid_sound[OF sound seq_valid] by simp next case (Step_CQ SG i p \ \' \ C) assume isNth:"nth SG i = ([], [Fsubst (Equiv (Prop p (singleton \)) (Prop p (singleton \'))) \])" assume valid:"valid (Equals \ \')" assume FA:"Fadmit \ ($\ p (singleton \) \ $\ p (singleton \'))" assume "0 \ i" assume "i < length (fst (SG, C))" assume sound:"sound (SG, C)" assume dsafe1:"dsafe \" assume dsafe2:"dsafe \'" assume ssafe:"ssafe \" have schem_valid:"valid ($\ p (singleton \) \ $\ p (singleton \'))" using valid unfolding valid_def by (metis CQ_holds_def CQ_sound fml_sem.simps(7) iff_sem surj_pair valid_def)+ have subst_valid:"valid (Fsubst ($\ p (singleton \) \ $\ p (singleton \')) \)" apply(rule subst_fml_valid) apply(rule FA) using schem_valid ssafe by (auto simp add: f1_def Box_def p1_def P_def Equiv_def Or_def expand_singleton dsafe1 dsafe2 expand_singleton) have "seq_valid ([], [Fsubst ($\ p (singleton \) \ $\ p (singleton \')) \])" apply(rule fml_seq_valid) by(rule subst_valid) then have seq_valid:"seq_valid (SG ! i)" using isNth by auto show "sound (step_result (SG, C) (i, CQ \ \' \))" using closeI_valid_sound[OF sound seq_valid] by simp qed lemma deriv_sound:"deriv_ok R D \ sound R \ sound (deriv_result R D)" apply(induction rule: deriv_ok.induct) using step_sound by auto lemma proof_sound:"proof_ok Pf \ sound (proof_result Pf)" apply(induct rule: proof_ok.induct) unfolding proof_result.simps apply(rule deriv_sound) apply assumption by(rule start_proof_sound) section \Example 1: Differential Invariants\ definition DIAndConcl::"('sf,'sc,'sz) sequent" where "DIAndConcl = ([], [Implies (And (Predicational pid1) (Predicational pid2)) (Implies ([[Pvar vid1]](And (Predicational pid3) (Predicational pid4))) ([[Pvar vid1]](And (Predicational pid1) (Predicational pid2))))])" definition DIAndSG1::"('sf,'sc,'sz) formula" where "DIAndSG1 = (Implies (Predicational pid1) (Implies ([[Pvar vid1]](Predicational pid3)) ([[Pvar vid1]](Predicational pid1))))" definition DIAndSG2::"('sf,'sc,'sz) formula" where "DIAndSG2 = (Implies (Predicational pid2) (Implies ([[Pvar vid1]](Predicational pid4)) ([[Pvar vid1]](Predicational pid2))))" definition DIAndCut::"('sf,'sc,'sz) formula" where "DIAndCut = (([[$\ vid1]]((And (Predicational ( pid3)) (Predicational ( pid4)))) \ (And (Predicational ( pid1)) (Predicational ( pid2)))) \ ([[$\ vid1]](And (Predicational ( pid3)) (Predicational ( pid4)))) \ ([[$\ vid1]](And (Predicational (pid1)) (Predicational ( pid2)))))" definition DIAndSubst::"('sf,'sc,'sz) subst" where "DIAndSubst = \ SFunctions = (\_. None), SPredicates = (\_. None), SContexts = (\C. (if C = pid1 then Some(And (Predicational (Inl pid3)) (Predicational (Inl pid4))) else (if C = pid2 then Some(And (Predicational (Inl pid1)) (Predicational (Inl pid2))) else None))), SPrograms = (\_. None), SODEs = (\_. None) \" \ \\[a]R&H->R->[a]R&H->[a]R DIAndSubst34\\ definition DIAndSubst341::"('sf,'sc,'sz) subst" where "DIAndSubst341 = \ SFunctions = (\_. None), SPredicates = (\_. None), SContexts = (\C. (if C = pid1 then Some(And (Predicational (Inl pid3)) (Predicational (Inl pid4))) else (if C = pid2 then Some(Predicational (Inl pid3)) else None))), SPrograms = (\_. None), SODEs = (\_. None) \" definition DIAndSubst342::"('sf,'sc,'sz) subst" where "DIAndSubst342 = \ SFunctions = (\_. None), SPredicates = (\_. None), SContexts = (\C. (if C = pid1 then Some(And (Predicational (Inl pid3)) (Predicational (Inl pid4))) else (if C = pid2 then Some(Predicational (Inl pid4)) else None))), SPrograms = (\_. None), SODEs = (\_. None) \" \ \\[a]P, [a]R&H, P, Q |- [a]Q->P&Q->[a]Q->[a]P&Q, [a]P&Q;;\\ definition DIAndSubst12::"('sf,'sc,'sz) subst" where "DIAndSubst12 = \ SFunctions = (\_. None), SPredicates = (\_. None), SContexts = (\C. (if C = pid1 then Some(Predicational (Inl pid2)) else (if C = pid2 then Some(Predicational (Inl pid1) && Predicational (Inl pid2)) else None))), SPrograms = (\_. None), SODEs = (\_. None) \" \ \\P -> Q->P&Q\\ definition DIAndCurry12::"('sf,'sc,'sz) subst" where "DIAndCurry12 = \ SFunctions = (\_. None), SPredicates = (\_. None), SContexts = (\C. (if C = pid1 then Some(Predicational (Inl pid1)) else (if C = pid2 then Some(Predicational (Inl pid2) \ (Predicational (Inl pid1) && Predicational (Inl pid2))) else None))), SPrograms = (\_. None), SODEs = (\_. None) \" definition DIAnd :: "('sf,'sc,'sz) rule" where "DIAnd = ([([],[DIAndSG1]),([],[DIAndSG2])], DIAndConcl)" definition DIAndCutP1 :: "('sf,'sc,'sz) formula" where "DIAndCutP1 = ([[Pvar vid1]](Predicational pid1))" definition DIAndCutP2 :: "('sf,'sc,'sz) formula" where "DIAndCutP2 = ([[Pvar vid1]](Predicational pid2))" definition DIAndCutP12 :: "('sf,'sc,'sz) formula" where "DIAndCutP12 = (([[Pvar vid1]](Pc pid1) \ (Pc pid2 \ (And (Pc pid1) (Pc pid2)))) \ (([[Pvar vid1]]Pc pid1) \ ([[Pvar vid1]](Pc pid2 \ (And (Pc pid1) (Pc pid2))))))" definition DIAndCut34Elim1 :: "('sf,'sc,'sz) formula" where "DIAndCut34Elim1 = (([[Pvar vid1]](Pc pid3 && Pc pid4) \ (Pc pid3)) \ (([[Pvar vid1]](Pc pid3 && Pc pid4)) \ ([[Pvar vid1]](Pc pid3))))" definition DIAndCut34Elim2 :: "('sf,'sc,'sz) formula" where "DIAndCut34Elim2 = (([[Pvar vid1]](Pc pid3 && Pc pid4) \ (Pc pid4)) \ (([[Pvar vid1]](Pc pid3 && Pc pid4)) \ ([[Pvar vid1]](Pc pid4))))" definition DIAndCut12Intro :: "('sf,'sc,'sz) formula" where "DIAndCut12Intro = (([[Pvar vid1]](Pc pid2 \ (Pc pid1 && Pc pid2))) \ (([[Pvar vid1]](Pc pid2)) \ ([[Pvar vid1]](Pc pid1 && Pc pid2))))" definition DIAndProof :: "('sf, 'sc, 'sz) pf" where "DIAndProof = (DIAndConcl, [ (0, Rrule ImplyR 0) \ \1\ ,(0, Lrule AndL 0) ,(0, Rrule ImplyR 0) ,(0, Cut DIAndCutP1) ,(1, Cut DIAndSG1) ,(0, Rrule CohideR 0) ,(Suc (Suc 0), Lrule ImplyL 0) ,(Suc (Suc (Suc 0)), CloseId 1 0) ,(Suc (Suc 0), Lrule ImplyL 0) ,(Suc (Suc 0), CloseId 0 0) ,(Suc (Suc 0), Cut DIAndCut34Elim1) \ \11\ ,(0, Lrule ImplyL 0) ,(Suc (Suc (Suc 0)), Lrule ImplyL 0) ,(0, Rrule CohideRR 0) ,(0, Rrule CohideRR 0) ,(Suc 0, Rrule CohideRR 0) ,(Suc (Suc (Suc (Suc (Suc 0)))), G) ,(0, Rrule ImplyR 0) ,(Suc (Suc (Suc (Suc (Suc 0)))), Lrule AndL 0) ,(Suc (Suc (Suc (Suc (Suc 0)))), CloseId 0 0) ,(Suc (Suc (Suc 0)), AxSubst AK DIAndSubst341) \ \21\ ,(Suc (Suc 0), CloseId 0 0) ,(Suc 0, CloseId 0 0) ,(0, Cut DIAndCut12Intro) ,(Suc 0, Rrule CohideRR 0) ,(Suc (Suc 0), AxSubst AK DIAndSubst12) ,(0, Lrule ImplyL 0) ,(1, Lrule ImplyL 0) ,(Suc (Suc 0), CloseId 0 0) ,(Suc 0, Cut DIAndCutP12) ,(0, Lrule ImplyL 0) \ \31\ ,(0, Rrule CohideRR 0) ,(Suc (Suc (Suc (Suc 0))), AxSubst AK DIAndCurry12) ,(Suc (Suc (Suc 0)), Rrule CohideRR 0) ,(Suc (Suc 0), Lrule ImplyL 0) ,(Suc (Suc 0), G) ,(0, Rrule ImplyR 0) ,(Suc (Suc (Suc (Suc 0))), Rrule ImplyR 0) ,(Suc (Suc (Suc (Suc 0))), Rrule AndR 0) ,(Suc (Suc (Suc (Suc (Suc 0)))), CloseId 0 0) ,(Suc (Suc (Suc (Suc 0))), CloseId 1 0) \ \41\ ,(Suc (Suc 0), CloseId 0 0) ,(Suc 0, Cut DIAndCut34Elim2) ,(0, Lrule ImplyL 0) ,(0, Rrule CohideRR 0) ,(Suc (Suc (Suc (Suc 0))), AxSubst AK DIAndSubst342) \ \46\ ,(Suc (Suc (Suc 0)), Rrule CohideRR 0) ,(Suc (Suc (Suc 0)), G) \ \48\ ,(0, Rrule ImplyR 0) ,(Suc (Suc (Suc 0)), Lrule AndL 0) \ \50\ ,(Suc (Suc (Suc 0)), CloseId 1 0) ,(Suc (Suc 0), Lrule ImplyL 0) ,(Suc 0, CloseId 0 0) ,(1, Cut DIAndSG2) ,(0, Lrule ImplyL 0) ,(0, Rrule CohideRR 0) ,(Suc (Suc (Suc 0)), CloseId 4 0) ,(Suc (Suc 0), Lrule ImplyL 0) ,(Suc (Suc (Suc 0)), CloseId 0 0) ,(Suc (Suc (Suc 0)), CloseId 0 0) ,(1, CloseId 1 0) ]) " fun proof_take :: "nat \ ('sf,'sc,'sz) pf \ ('sf,'sc,'sz) pf" where "proof_take n (C,D) = (C,List.take n D)" fun last_step::"('sf,'sc,'sz) pf \ nat \ nat * ('sf,'sc,'sz ) step" where "last_step (C,D) n = List.last (take n D)" lemma DIAndSound_lemma:"sound (proof_result (proof_take 61 DIAndProof))" apply(rule proof_sound) unfolding DIAndProof_def DIAndConcl_def DIAndCutP1_def DIAndSG1_def DIAndCut34Elim1_def DIAndSubst341_def DIAndCut12Intro_def DIAndSubst12_def DIAndCutP12_def DIAndCurry12_def DIAndSubst342_def DIAndCut34Elim2_def \ \43\ DIAndSG2_def \ \54\ (* slow *) apply (auto simp add: prover) done section \Example 2: Concrete Hybrid System\ \ \\v \ 0 \ A() \ 0 \ [v' = A, x' = v]v' \ 0\\ definition SystemConcl::"('sf,'sc,'sz) sequent" where "SystemConcl = ([],[ Implies (And (Geq (Var vid1) (Const 0)) (Geq (f0 fid1) (Const 0))) ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (TT)]]Geq (Var vid1) (Const 0)) ])" definition SystemDICut :: "('sf,'sc,'sz) formula" where "SystemDICut = Implies (Implies TT ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) TT]] (Geq (Differential (Var vid1)) (Differential (Const 0))))) (Implies (Implies TT (Geq (Var vid1) (Const 0))) ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) TT]](Geq (Var vid1) (Const 0))))" (* (Implies (Geq (Var vid1) (Const 0)) (Implies (And TT ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) TT]] (Geq (Differential (Var vid1)) (Differential (Const 0))) )) ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) TT]](Geq (Var vid1) (Const 0)))))" *) definition SystemDCCut::"('sf,'sc,'sz) formula" where "SystemDCCut = (([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) TT]](Geq (f0 fid1) (Const 0))) \ (([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) TT]]((Geq (Differential (Var vid1)) (Differential (Const 0))))) \ ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (And TT (Geq (f0 fid1) (Const 0)))]](Geq (Differential (Var vid1)) (Differential (Const 0))))))" definition SystemVCut::"('sf,'sc,'sz) formula" where "SystemVCut = Implies (Geq (f0 fid1) (Const 0)) ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (And TT (Geq (f0 fid1) (Const 0)))]](Geq (f0 fid1) (Const 0)))" definition SystemVCut2::"('sf,'sc,'sz) formula" where "SystemVCut2 = Implies (Geq (f0 fid1) (Const 0)) ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) TT]](Geq (f0 fid1) (Const 0)))" definition SystemDECut::"('sf,'sc,'sz) formula" where "SystemDECut = (([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (And TT (Geq (f0 fid1) (Const 0)))]] ((Geq (Differential (Var vid1)) (Differential (Const 0))))) \ ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (And TT (Geq (f0 fid1) (Const 0)))]] [[DiffAssign vid1 (f0 fid1)]](Geq (Differential (Var vid1)) (Differential (Const 0)))))" definition SystemKCut::"('sf,'sc,'sz) formula" where "SystemKCut = (Implies ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (And TT (Geq (f0 fid1) (Const 0)))]] (Implies ((And TT (Geq (f0 fid1) (Const 0)))) ([[DiffAssign vid1 (f0 fid1)]](Geq (Differential (Var vid1)) (Differential (Const 0)))))) (Implies ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (And TT (Geq (f0 fid1) (Const 0)))]] ((And TT (Geq (f0 fid1) (Const 0))))) ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (And TT (Geq (f0 fid1) (Const 0)))]] ([[DiffAssign vid1 (f0 fid1)]](Geq (Differential (Var vid1)) (Differential (Const 0)))))))" definition SystemEquivCut::"('sf,'sc,'sz) formula" where "SystemEquivCut = (Equiv (Implies ((And TT (Geq (f0 fid1) (Const 0)))) ([[DiffAssign vid1 (f0 fid1)]](Geq (Differential (Var vid1)) (Differential (Const 0))))) (Implies ((And TT (Geq (f0 fid1) (Const 0)))) ([[DiffAssign vid1 (f0 fid1)]](Geq (DiffVar vid1) (Const 0)))))" definition SystemDiffAssignCut::"('sf,'sc,'sz) formula" where "SystemDiffAssignCut = (([[DiffAssign vid1 ($f fid1 empty)]] (Geq (DiffVar vid1) (Const 0))) \ (Geq ($f fid1 empty) (Const 0)))" definition SystemCEFml1::"('sf,'sc,'sz) formula" where "SystemCEFml1 = Geq (Differential (Var vid1)) (Differential (Const 0))" definition SystemCEFml2::"('sf,'sc,'sz) formula" where "SystemCEFml2 = Geq (DiffVar vid1) (Const 0)" (* definition diff_const_axiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"diff_const_axiom \ Equals (Differential ($f fid1 empty)) (Const 0)" definition diff_var_axiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"diff_var_axiom \ Equals (Differential (Var vid1)) (DiffVar vid1)"*) definition CQ1Concl::"('sf,'sc,'sz) formula" where "CQ1Concl = (Geq (Differential (Var vid1)) (Differential (Const 0)) \ Geq (DiffVar vid1) (Differential (Const 0)))" definition CQ2Concl::"('sf,'sc,'sz) formula" where "CQ2Concl = (Geq (DiffVar vid1) (Differential (Const 0)) \ Geq ($' vid1) (Const 0))" definition CEReq::"('sf,'sc,'sz) formula" where "CEReq = (Geq (Differential (trm.Var vid1)) (Differential (Const 0)) \ Geq ($' vid1) (Const 0))" definition CQRightSubst::"('sf,'sc,'sz) subst" where "CQRightSubst = \ SFunctions = (\_. None), SPredicates = (\p. (if p = vid1 then (Some (Geq (DiffVar vid1) (Function (Inr vid1) empty))) else None)), SContexts = (\_. None), SPrograms = (\_. None), SODEs = (\_. None) \" definition CQLeftSubst::"('sf,'sc,'sz) subst" where "CQLeftSubst = \ SFunctions = (\_. None), SPredicates = (\p. (if p = vid1 then (Some (Geq (Function (Inr vid1) empty) (Differential (Const 0)))) else None)), SContexts = (\_. None), SPrograms = (\_. None), SODEs = (\_. None) \" definition CEProof::"('sf,'sc,'sz) pf" where "CEProof = (([],[CEReq]), [ (0, Cut CQ1Concl) ,(0, Cut CQ2Concl) ,(1, Rrule CohideRR 0) ,(Suc (Suc 0), CQ (Differential (Const 0)) (Const 0) CQRightSubst) ,(1, Rrule CohideRR 0) ,(1, CQ (Differential (Var vid1)) (DiffVar vid1) CQLeftSubst) ,(0, Rrule EquivR 0) ,(0, Lrule EquivForwardL 1) ,(Suc (Suc 0), Lrule EquivForwardL 1) ,(Suc (Suc (Suc 0)), CloseId 0 0) ,(Suc (Suc 0), CloseId 0 0) ,(Suc 0, CloseId 0 0) ,(0, Lrule EquivBackwardL (Suc (Suc 0))) ,(0, CloseId 0 0) ,(0, Lrule EquivBackwardL (Suc 0)) ,(0, CloseId 0 0) ,(0, CloseId 0 0) ])" lemma CE_result_correct:"proof_result CEProof = ([],([],[CEReq]))" unfolding CEProof_def CEReq_def CQ1Concl_def CQ2Concl_def Implies_def Or_def f0_def TT_def Equiv_def Box_def CQRightSubst_def by (auto simp add: id_simps) definition DiffConstSubst::"('sf,'sc,'sz) subst" where "DiffConstSubst = \ SFunctions = (\f. (if f = fid1 then (Some (Const 0)) else None)), SPredicates = (\_. None), SContexts = (\_. None), SPrograms = (\_. None), SODEs = (\_. None) \" definition DiffConstProof::"('sf,'sc,'sz) pf" where "DiffConstProof = (([],[(Equals (Differential (Const 0)) (Const 0))]), [ (0, AxSubst AdConst DiffConstSubst)])" lemma diffconst_result_correct:"proof_result DiffConstProof = ([], ([],[Equals (Differential (Const 0)) (Const 0)]))" by(auto simp add: prover DiffConstProof_def) lemma diffconst_sound_lemma:"sound (proof_result DiffConstProof)" apply(rule proof_sound) unfolding DiffConstProof_def by (auto simp add: prover DiffConstProof_def DiffConstSubst_def Equals_def empty_def TUadmit_def) lemma valid_of_sound:"sound ([], ([],[\])) \ valid \" unfolding valid_def sound_def TT_def FF_def apply (auto simp add: TT_def FF_def Or_def) subgoal for I a b apply(erule allE[where x=I]) by(auto) done lemma almost_diff_const_sound:"sound ([], ([], [Equals (Differential (Const 0)) (Const 0)]))" using diffconst_result_correct diffconst_sound_lemma by simp lemma almost_diff_const:"valid (Equals (Differential (Const 0)) (Const 0))" using almost_diff_const_sound valid_of_sound by auto \ \Note: this is just unpacking the definition: the axiom is defined as literally this formula\ lemma almost_diff_var:"valid (Equals (Differential (trm.Var vid1)) ($' vid1))" using diff_var_axiom_valid unfolding diff_var_axiom_def by auto lemma CESound_lemma:"sound (proof_result CEProof)" apply(rule proof_sound) unfolding CEProof_def CEReq_def CQ1Concl_def CQ2Concl_def Equiv_def CQRightSubst_def diff_const_axiom_valid diff_var_axiom_valid empty_def Or_def expand_singleton diff_var_axiom_def by (auto simp add: prover CEProof_def CEReq_def CQ1Concl_def CQ2Concl_def Equiv_def CQRightSubst_def diff_const_axiom_valid diff_var_axiom_valid empty_def Or_def expand_singleton TUadmit_def NTUadmit_def almost_diff_const CQLeftSubst_def almost_diff_var) lemma sound_to_valid:"sound ([], ([], [\])) \ valid \" unfolding valid_def apply auto apply(drule soundD_mem) by (auto simp add: member_rec(2)) lemma CE1pre:"sound ([], ([], [CEReq]))" using CE_result_correct CESound_lemma by simp lemma CE1pre_valid:"valid CEReq" by (rule sound_to_valid[OF CE1pre]) lemma CE1pre_valid2:"valid (! (! (Geq (Differential (trm.Var vid1)) (Differential (Const 0)) && Geq ($' vid1) (Const 0)) && ! (! (Geq (Differential (trm.Var vid1)) (Differential (Const 0))) && ! (Geq ($' vid1) (Const 0))))) " using CE1pre_valid unfolding CEReq_def Equiv_def Or_def by auto definition SystemDISubst::"('sf,'sc,'sz) subst" where "SystemDISubst = \ SFunctions = (\f. ( if f = fid1 then Some(Function (Inr vid1) empty) else if f = fid2 then Some(Const 0) else None)), SPredicates = (\p. if p = vid1 then Some TT else None), SContexts = (\_. None), SPrograms = (\_. None), SODEs = (\c. if c = vid1 then Some (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (trm.Var vid1))) else None) \" (* Implies (Implies (Prop vid1 empty) ([[EvolveODE (OVar vid1) (Prop vid1 empty)]](Geq (Differential (f1 fid1 vid1)) (Differential (f1 fid2 vid1))))) (Implies (Implies(Prop vid1 empty) (Geq (f1 fid1 vid1) (f1 fid2 vid1))) ([[EvolveODE (OVar vid1) (Prop vid1 empty)]](Geq (f1 fid1 vid1) (f1 fid2 vid1))))" *) (* Implies (Implies TT ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) TT]] (Geq (Differential (Var vid1)) (Differential (Const 0))))) (Implies (Implies TT (Geq (Var vid1) (Const 0))) ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) TT]](Geq (Var vid1) (Const 0)))) *) definition SystemDCSubst::"('sf,'sc,'sz) subst" where "SystemDCSubst = \ SFunctions = (\ f. None), SPredicates = (\p. None), SContexts = (\C. if C = pid1 then Some TT else if C = pid2 then Some (Geq (Differential (Var vid1)) (Differential (Const 0))) else if C = pid3 then Some (Geq (Function fid1 empty) (Const 0)) else None), SPrograms = (\_. None), SODEs = (\c. if c = vid1 then Some (OProd (OSing vid1 (Function fid1 empty)) (OSing vid2 (trm.Var vid1))) else None) \" definition SystemVSubst::"('sf,'sc,'sz) subst" where "SystemVSubst = \ SFunctions = (\f. None), SPredicates = (\p. if p = vid1 then Some (Geq (Function (Inl fid1) empty) (Const 0)) else None), SContexts = (\_. None), SPrograms = (\a. if a = vid1 then Some (EvolveODE (OProd (OSing vid1 (Function fid1 empty)) (OSing vid2 (Var vid1))) (And TT (Geq (Function fid1 empty) (Const 0)))) else None), SODEs = (\_. None) \" definition SystemVSubst2::"('sf,'sc,'sz) subst" where "SystemVSubst2 = \ SFunctions = (\f. None), SPredicates = (\p. if p = vid1 then Some (Geq (Function (Inl fid1) empty) (Const 0)) else None), SContexts = (\_. None), SPrograms = (\a. if a = vid1 then Some (EvolveODE (OProd (OSing vid1 (Function fid1 empty)) (OSing vid2 (Var vid1))) TT) else None), SODEs = (\_. None) \" definition SystemDESubst::"('sf,'sc,'sz) subst" where "SystemDESubst = \ SFunctions = (\f. if f = fid1 then Some(Function (Inl fid1) empty) else None), SPredicates = (\p. if p = vid2 then Some(And TT (Geq (Function (Inl fid1) empty) (Const 0))) else None), SContexts = (\C. if C = pid1 then Some(Geq (Differential (Var vid1)) (Differential (Const 0))) else None), SPrograms = (\_. None), SODEs = (\_. None) \" lemma systemdesubst_correct:"\ ODE.(([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (And TT (Geq (f0 fid1) (Const 0)))]] ((Geq (Differential (Var vid1)) (Differential (Const 0))))) \ ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (And TT (Geq (f0 fid1) (Const 0)))]] [[DiffAssign vid1 (f0 fid1)]](Geq (Differential (Var vid1)) (Differential (Const 0))))) = Fsubst ((([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1)) ODE) (p1 vid2 vid1)]] (P pid1)) \ ([[EvolveODE ((OProd (OSing vid1 (f1 fid1 vid1))) ODE) (p1 vid2 vid1)]] [[DiffAssign vid1 (f1 fid1 vid1)]]P pid1))) SystemDESubst" apply(rule exI[where x="OSing vid2 (trm.Var vid1)"]) by(auto simp add: f0_def f1_def Box_def Or_def Equiv_def empty_def TT_def P_def p1_def SystemDESubst_def empty_def) \ \\[{dx=, dy=x&r>=r&>=r}]r>=r&>=r->[D{x}:=]D{x}>=D{r}->\\ \ \\[{dx=, dy=x&r>=r&>=r}]r>=r&>=r->\\ \ \\[{dx=, dy=x&r>=r&>=r}][D{x}:=]D{x}>=D{r}\\ \ \\([[$\ vid1]]((Predicational pid1) \ (Predicational pid2)))\\ \ \\\ ([[$\ vid1]]Predicational pid1) \ ([[$\ vid1]]Predicational pid2)\\ definition SystemKSubst::"('sf,'sc,'sz) subst" where "SystemKSubst = \ SFunctions = (\f. None), SPredicates = (\_. None), SContexts = (\C. if C = pid1 then (Some (And (Geq (Const 0) (Const 0)) (Geq (Function fid1 empty) (Const 0)))) else if C = pid2 then (Some ([[DiffAssign vid1 (Function fid1 empty)]](Geq (Differential (Var vid1)) (Differential (Const 0))))) else None), SPrograms = (\c. if c = vid1 then Some (EvolveODE (OProd (OSing vid1 (Function fid1 empty)) (OSing vid2 (Var vid1))) (And (Geq (Const 0) (Const 0)) (Geq (Function fid1 empty) (Const 0)))) else None), SODEs = (\_. None) \" lemma subst_imp_simp:"Fsubst (Implies p q) \ = (Implies (Fsubst p \) (Fsubst q \))" unfolding Implies_def Or_def by auto lemma subst_equiv_simp:"Fsubst (Equiv p q) \ = (Equiv (Fsubst p \) (Fsubst q \))" unfolding Implies_def Or_def Equiv_def by auto lemma subst_box_simp:"Fsubst (Box p q) \ = (Box (Psubst p \) (Fsubst q \))" unfolding Box_def Or_def by auto lemma pfsubst_box_simp:"PFsubst (Box p q) \ = (Box (PPsubst p \) (PFsubst q \))" unfolding Box_def Or_def by auto lemma pfsubst_imp_simp:"PFsubst (Implies p q) \ = (Implies (PFsubst p \) (PFsubst q \))" unfolding Box_def Implies_def Or_def by auto definition SystemDWSubst::"('sf,'sc,'sz) subst" where "SystemDWSubst = \ SFunctions = (\f. None), SPredicates = (\_. None), SContexts = (\C. if C = pid1 then Some (And (Geq (Const 0) (Const 0)) (Geq (Function fid1 empty) (Const 0))) else None), SPrograms = (\_. None), SODEs = (\c. if c = vid1 then Some (OProd (OSing vid1 (Function fid1 empty)) (OSing vid2 (Var vid1))) else None) \" definition SystemCESubst::"('sf,'sc,'sz) subst" where "SystemCESubst = \ SFunctions = (\f. None), SPredicates = (\_. None), SContexts = (\C. if C = pid1 then Some(Implies(And (Geq (Const 0) (Const 0)) (Geq (Function fid1 empty) (Const 0))) ([[DiffAssign vid1 (Function fid1 empty)]](Predicational (Inr ())))) else None), SPrograms = (\_. None), SODEs = (\_. None) \" lemma SystemCESubstOK: "step_ok ([([],[Equiv (Implies(And (Geq (Const 0) (Const 0)) (Geq (Function fid1 empty) (Const 0))) ([[DiffAssign vid1 (Function fid1 empty)]]( SystemCEFml1))) (Implies(And (Geq (Const 0) (Const 0)) (Geq (Function fid1 empty) (Const 0))) ([[DiffAssign vid1 (Function fid1 empty)]]( (SystemCEFml2)))) ])], ([],[])) 0 (CE SystemCEFml1 SystemCEFml2 SystemCESubst)" apply(rule Step_CE) subgoal by(auto simp add: subst_equiv_simp subst_imp_simp subst_box_simp SystemCESubst_def SystemCEFml1_def SystemCEFml2_def pfsubst_imp_simp pfsubst_box_simp) subgoal using CE1pre_valid by (auto simp add: CEReq_def SystemCEFml1_def SystemCEFml2_def CE1pre_valid) subgoal unfolding SystemCEFml1_def by auto subgoal unfolding SystemCEFml2_def by auto subgoal unfolding SystemCESubst_def ssafe_def Implies_def Box_def Or_def empty_def by auto unfolding SystemCESubst_def Equiv_def Or_def SystemCEFml1_def SystemCEFml2_def TUadmit_def apply (auto simp add: TUadmit_def FUadmit_def Box_def Implies_def Or_def) unfolding PFUadmit_def by auto \ \\[D{x}:=f]Dv{x}>=r<->f>=r\\ \ \\[[DiffAssign vid1 ($f fid1 empty)]] (Prop vid1 (singleton (DiffVar vid1))))\\ \ \\\ Prop vid1 (singleton ($f fid1 empty))\\ definition SystemDiffAssignSubst::"('sf,'sc,'sz) subst" where "SystemDiffAssignSubst = \ SFunctions = (\f. None), SPredicates = (\p. if p = vid1 then Some (Geq (Function (Inr vid1) empty) (Const 0)) else None), SContexts = (\_. None), SPrograms = (\_. None), SODEs = (\_. None) \" lemma SystemDICutCorrect:"SystemDICut = Fsubst DIGeqaxiom SystemDISubst" unfolding SystemDICut_def DIGeqaxiom_def SystemDISubst_def by (auto simp add: f1_def p1_def f0_def Implies_def Or_def id_simps TT_def Box_def empty_def) \ \\v\0 \ A()\0 \ [{x'=v, v'=A()}]v\0\\ definition SystemProof :: "('sf, 'sc, 'sz) pf" where "SystemProof = (SystemConcl, [ (0, Rrule ImplyR 0) ,(0, Lrule AndL 0) ,(0, Cut SystemDICut) ,(0, Lrule ImplyL 0) ,(0, Rrule CohideRR 0) ,(0, Lrule ImplyL 0) ,(Suc (Suc 0), CloseId 0 0) ,(Suc 0, AxSubst ADIGeq SystemDISubst) \ \8\ ,(Suc 0, Rrule ImplyR 0) \<^cancel>\,(0, CloseId 0 0)\ ,(Suc 0, CloseId 1 0) \<^cancel>\,(0, Rrule AndR 0)\ ,(0, Rrule ImplyR 0) ,(0, Cut SystemDCCut) ,(0, Lrule ImplyL 0) ,(0, Rrule CohideRR 0) ,(0, Lrule EquivBackwardL 0) ,(0, Rrule CohideR 0) ,(0, AxSubst ADC SystemDCSubst) \ \17\ ,(0, CloseId 0 0) ,(0, Rrule CohideRR 0) ,(0, Cut SystemVCut) ,(0, Lrule ImplyL 0) ,(0, Rrule CohideRR 0) ,(0, Cut SystemDECut) ,(0, Lrule EquivBackwardL 0) ,(0, Rrule CohideRR 0) ,(1, CloseId (Suc 1) 0) \ \Last step\ ,(Suc 1, CloseId 0 0) ,(1, AxSubst AV SystemVSubst) \ \28\ ,(0, Cut SystemVCut2) ,(0, Lrule ImplyL 0) ,(0, Rrule CohideRR 0) ,(Suc 1, CloseId 0 0) ,(Suc 1, CloseId (Suc 2) 0) ,(Suc 1, AxSubst AV SystemVSubst2) \ \34\ ,(0, Rrule CohideRR 0) ,(0, DEAxiomSchema (OSing vid2 (trm.Var vid1)) SystemDESubst) \ \36\ ,(0, Cut SystemKCut) ,(0, Lrule ImplyL 0) ,(0, Rrule CohideRR 0) ,(0, Lrule ImplyL 0) ,(0, Rrule CohideRR 0) ,(0, AxSubst AK SystemKSubst) \ \42\ ,(0, CloseId 0 0) ,(0, Rrule CohideR 0) ,(1, AxSubst ADW SystemDWSubst) \ \45\ ,(0, G) ,(0, Cut SystemEquivCut) ,(0, Lrule EquivBackwardL 0) ,(0, Rrule CohideR 0) ,(0, CloseId 0 0) ,(0, Rrule CohideR 0) ,(0, CE SystemCEFml1 SystemCEFml2 SystemCESubst) \ \52\ ,(0, Rrule ImplyR 0) ,(0, Lrule AndL 0) ,(0, Cut SystemDiffAssignCut) ,(0, Lrule EquivBackwardL 0) ,(0, Rrule CohideRR 0) ,(0, CloseId 0 0) ,(0, CloseId 1 0) ,(0, AxSubst Adassign SystemDiffAssignSubst) \ \60\ ])" lemma system_result_correct:"proof_result SystemProof = ([], ([],[Implies (And (Geq (Var vid1) (Const 0)) (Geq (f0 fid1) (Const 0))) ([[EvolveODE (OProd (OSing vid1 (f0 fid1)) (OSing vid2 (Var vid1))) (TT)]]Geq (Var vid1) (Const 0))]))" unfolding SystemProof_def SystemConcl_def Implies_def Or_def f0_def TT_def Equiv_def SystemDICut_def SystemDCCut_def proof_result.simps deriv_result.simps start_proof.simps Box_def SystemDCSubst_def SystemVCut_def SystemDECut_def SystemKCut_def SystemEquivCut_def SystemDiffAssignCut_def SystemVCut2_def (* slow *) apply( simp add: prover) done lemma SystemSound_lemma:"sound (proof_result SystemProof)" apply(rule proof_sound) unfolding SystemProof_def SystemConcl_def CQ1Concl_def CQ2Concl_def Equiv_def CQRightSubst_def diff_const_axiom_valid diff_var_axiom_valid empty_def Or_def expand_singleton diff_var_axiom_def SystemDICut_def (* slow *) apply (auto simp add: prover CEProof_def CEReq_def CQ1Concl_def CQ2Concl_def Equiv_def CQRightSubst_def diff_const_axiom_valid diff_var_axiom_valid empty_def Or_def expand_singleton TUadmit_def NTUadmit_def almost_diff_const CQLeftSubst_def almost_diff_var f0_def TT_def SystemDISubst_def f1_def p1_def SystemDCCut_def SystemDCSubst_def SystemVCut_def SystemDECut_def SystemVSubst_def SystemVCut2_def SystemVSubst2_def SystemDESubst_def P_def SystemKCut_def SystemKSubst_def SystemDWSubst_def SystemEquivCut_def SystemCESubst_def SystemCEFml1_def SystemCEFml2_def CE1pre_valid2 SystemDiffAssignCut_def SystemDiffAssignSubst_def) done lemma system_sound:"sound ([], SystemConcl)" using SystemSound_lemma system_result_correct unfolding SystemConcl_def by auto lemma DIAnd_result_correct:"proof_result (proof_take 61 DIAndProof) = DIAnd" unfolding DIAndProof_def DIAndConcl_def Implies_def Or_def proof_result.simps deriv_result.simps start_proof.simps DIAndCutP12_def DIAndSG1_def DIAndSG2_def DIAndCutP1_def Box_def DIAndCut34Elim1_def DIAndCut12Intro_def DIAndCut34Elim2_def DIAnd_def using pne12 pne13 pne14 pne23 pne24 pne34 by (auto) theorem DIAnd_sound: "sound DIAnd" using DIAndSound_lemma DIAnd_result_correct by auto end end diff --git a/thys/Diophantine_Eqns_Lin_Hom/Linear_Diophantine_Equations.thy b/thys/Diophantine_Eqns_Lin_Hom/Linear_Diophantine_Equations.thy --- a/thys/Diophantine_Eqns_Lin_Hom/Linear_Diophantine_Equations.thy +++ b/thys/Diophantine_Eqns_Lin_Hom/Linear_Diophantine_Equations.thy @@ -1,1162 +1,1162 @@ (* Author: Florian Messner Author: Julian Parsert Author: Jonas Schöpf Author: Christian Sternagel License: LGPL *) section \Homogeneous Linear Diophantine Equations\ theory Linear_Diophantine_Equations imports List_Vector begin (*TODO: move*) lemma lcm_div_le: fixes a :: nat shows "lcm a b div b \ a" by (metis div_by_0 div_le_dividend div_le_mono div_mult_self_is_m lcm_nat_def neq0_conv) (*TODO: move*) lemma lcm_div_le': fixes a :: nat shows "lcm a b div a \ b" by (metis lcm.commute lcm_div_le) (*TODO: move*) lemma lcm_div_gt_0: fixes a :: nat assumes "a > 0" and "b > 0" shows "lcm a b div a > 0" proof - have "lcm a b = (a * b) div (gcd a b)" using lcm_nat_def by blast moreover have "\ > 0" using assms by (metis assms calculation lcm_pos_nat) ultimately show ?thesis using assms by (metis div_by_0 div_mult2_eq div_positive gcd_le2_nat nat_mult_div_cancel_disj neq0_conv semiring_normalization_rules(7)) qed (*TODO: move*) lemma sum_list_list_update_Suc: assumes "i < length u" shows "sum_list (u[i := Suc (u ! i)]) = Suc (sum_list u)" using assms proof (induct u arbitrary: i) case (Cons x xs) then show ?case by (simp_all split: nat.splits) qed (simp) (*TODO: move*) lemma lessThan_conv: assumes "card A = n" and "\x\A. x < n" shows "A = {.. Given a non-empty list \xs\ of \n\ natural numbers, either there is a value in \xs\ that is \0\ modulo \n\, or there are two values whose moduli coincide. \ lemma list_mod_cases: assumes "length xs = n" and "n > 0" shows "(\x\set xs. x mod n = 0) \ (\ij j \ (xs ! i) mod n = (xs ! j) mod n)" proof - let ?f = "\x. x mod n" and ?X = "set xs" have *: "\x \ ?f ` ?X. x < n" using \n > 0\ by auto consider (eq) "card (?f ` ?X) = card ?X" | (less) "card (?f ` ?X) < card ?X" using antisym_conv2 and card_image_le by blast then show ?thesis proof (cases) case eq show ?thesis proof (cases "distinct xs") assume "distinct xs" with eq have "card (?f ` ?X) = n" using \distinct xs\ by (simp add: assms card_distinct distinct_card) from lessThan_conv [OF this *] and \n > 0\ have "\x\set xs. x mod n = 0" by (metis imageE lessThan_iff) then show ?thesis .. next assume "\ distinct xs" then show ?thesis by (auto) (metis distinct_conv_nth) qed next case less from pigeonhole [OF this] show ?thesis by (auto simp: inj_on_def iff: in_set_conv_nth) qed qed text \ Homogeneous linear Diophantine equations: \a\<^sub>1x\<^sub>1 + \ + a\<^sub>mx\<^sub>m = b\<^sub>1y\<^sub>1 + \ + b\<^sub>ny\<^sub>n\ \ locale hlde_ops = fixes a b :: "nat list" begin abbreviation "m \ length a" abbreviation "n \ length b" \ \The set of all solutions.\ definition Solutions :: "(nat list \ nat list) set" where "Solutions = {(x, y). a \ x = b \ y \ length x = m \ length y = n}" lemma in_Solutions_iff: "(x, y) \ Solutions \ length x = m \ length y = n \ a \ x = b \ y" by (auto simp: Solutions_def) \ \The set of pointwise minimal solutions.\ definition Minimal_Solutions :: "(nat list \ nat list) set" where "Minimal_Solutions = {(x, y) \ Solutions. nonzero x \ \ (\(u, v) \ Solutions. nonzero u \ u @ v <\<^sub>v x @ y)}" definition dij :: "nat \ nat \ nat" where "dij i j = lcm (a ! i) (b ! j) div (a ! i)" definition eij :: "nat \ nat \ nat" where "eij i j = lcm (a ! i) (b ! j) div (b ! j)" definition sij :: "nat \ nat \ (nat list \ nat list)" where "sij i j = ((zeroes m)[i := dij i j], (zeroes n)[j := eij i j])" subsection \Further Constraints on Minimal Solutions\ definition Ej :: "nat \ nat list \ nat set" where "Ej j x = { eij i j - 1 | i. i < length x \ x ! i \ dij i j }" definition Di :: "nat \ nat list \ nat set" where "Di i y = { dij i j - 1 | j. j < length y \ y ! j \ eij i j }" definition Di' :: "nat \ nat list \ nat set" where "Di' i y = { dij i (j + length b - length y) - 1 | j. j < length y \ y ! j \ eij i (j + length b - length y) }" lemma Ej_take_subset: "Ej j (take k x) \ Ej j x" by (auto simp: Ej_def) lemma Di_take_subset: "Di i (take l y) \ Di i y" by (auto simp: Di_def) lemma Di'_drop_subset: "Di' i (drop l y) \ Di' i y" by (auto simp: Di'_def) (metis add.assoc add.commute less_diff_conv) lemma finite_Ej: "finite (Ej j x)" by (rule finite_subset [of _ "(\i. eij i j - 1) ` {0 ..< length x}"]) (auto simp: Ej_def) lemma finite_Di: "finite (Di i y)" by (rule finite_subset [of _ "(\j. dij i j - 1) ` {0 ..< length y}"]) (auto simp: Di_def) lemma finite_Di': "finite (Di' i y)" by (rule finite_subset [of _ "(\j. dij i (j + length b - length y) - 1) ` {0 ..< length y}"]) (auto simp: Di'_def) definition max_y :: "nat list \ nat \ nat" where "max_y x j = (if j < n \ Ej j x \ {} then Min (Ej j x) else Max (set a))" definition max_x :: "nat list \ nat \ nat" where "max_x y i = (if i < m \ Di i y \ {} then Min (Di i y) else Max (set b))" definition max_x' :: "nat list \ nat \ nat" where "max_x' y i = (if i < m \ Di' i y \ {} then Min (Di' i y) else Max (set b))" lemma Min_Ej_le: assumes "j < n" and "e \ Ej j x" and "length x \ m" shows "Min (Ej j x) \ Max (set a)" (is "?m \ _") proof - have "?m \ Ej j x" using assms and finite_Ej and Min_in by blast then obtain i where i: "?m = eij i j - 1" "i < length x" "x ! i \ dij i j" by (auto simp: Ej_def) have "lcm (a ! i) (b ! j) div b ! j \ a ! i" by (rule lcm_div_le) then show ?thesis using i and assms by (auto simp: eij_def) (meson List.finite_set Max_ge diff_le_self le_trans less_le_trans nth_mem) qed lemma Min_Di_le: assumes "i < m" and "e \ Di i y" and "length y \ n" shows "Min (Di i y) \ Max (set b)" (is "?m \ _") proof - have "?m \ Di i y" using assms and finite_Di and Min_in by blast then obtain j where j: "?m = dij i j - 1" "j < length y" "y ! j \ eij i j" by (auto simp: Di_def) have "lcm (a ! i) (b ! j) div a ! i \ b ! j" by (rule lcm_div_le') then show ?thesis using j and assms by (auto simp: dij_def) (meson List.finite_set Max_ge diff_le_self le_trans less_le_trans nth_mem) qed lemma Min_Di'_le: assumes "i < m" and "e \ Di' i y" and "length y \ n" shows "Min (Di' i y) \ Max (set b)" (is "?m \ _") proof - have "?m \ Di' i y" using assms and finite_Di' and Min_in by blast then obtain j where j: "?m = dij i (j + length b - length y) - 1" "j < length y" "y ! j \ eij i (j + length b - length y)" by (auto simp: Di'_def) then have "j + length b - length y < length b" using assms by auto moreover have "lcm (a ! i) (b ! (j + length b - length y)) div a ! i \ b ! (j + length b - length y)" by (rule lcm_div_le') ultimately show ?thesis using j and assms by (auto simp: dij_def) (meson List.finite_set Max_ge diff_le_self le_trans less_le_trans nth_mem) qed lemma max_y_le_take: assumes "length x \ m" shows "max_y x j \ max_y (take k x) j" using assms and Min_Ej_le and Ej_take_subset and Min.subset_imp [OF _ _ finite_Ej] by (auto simp: max_y_def) blast lemma max_x_le_take: assumes "length y \ n" shows "max_x y i \ max_x (take l y) i" using assms and Min_Di_le and Di_take_subset and Min.subset_imp [OF _ _ finite_Di] by (auto simp: max_x_def) blast lemma max_x'_le_drop: assumes "length y \ n" shows "max_x' y i \ max_x' (drop l y) i" using assms and Min_Di'_le and Di'_drop_subset and Min.subset_imp [OF _ _ finite_Di'] by (auto simp: max_x'_def) blast end abbreviation "Solutions \ hlde_ops.Solutions" abbreviation "Minimal_Solutions \ hlde_ops.Minimal_Solutions" abbreviation "dij \ hlde_ops.dij" abbreviation "eij \ hlde_ops.eij" abbreviation "sij \ hlde_ops.sij" declare hlde_ops.dij_def [code] declare hlde_ops.eij_def [code] declare hlde_ops.sij_def [code] lemma Solutions_sym: "(x, y) \ Solutions a b \ (y, x) \ Solutions b a" by (auto simp: hlde_ops.in_Solutions_iff) lemma Minimal_Solutions_imp_Solutions: "(x, y) \ Minimal_Solutions a b \ (x, y) \ Solutions a b" by (auto simp: hlde_ops.Minimal_Solutions_def) lemma Minimal_SolutionsI: assumes "(x, y) \ Solutions a b" and "nonzero x" and "\ (\(u, v) \ Solutions a b. nonzero u \ u @ v <\<^sub>v x @ y)" shows "(x, y) \ Minimal_Solutions a b" using assms by (auto simp: hlde_ops.Minimal_Solutions_def) lemma minimize_nonzero_solution: assumes "(x, y) \ Solutions a b" and "nonzero x" obtains u and v where "u @ v \\<^sub>v x @ y" and "(u, v) \ Minimal_Solutions a b" using assms proof (induct "x @ y" arbitrary: x y thesis rule: wf_induct [OF wf_less]) case 1 then show ?case proof (cases "(x, y) \ Minimal_Solutions a b") case False then obtain u and v where "nonzero u" and "(u, v) \ Solutions a b" and uv: "u @ v <\<^sub>v x @ y" using 1(3,4) by (auto simp: hlde_ops.Minimal_Solutions_def) with 1(1) [rule_format, of "u @ v" u v] obtain u' and v' where uv': "u' @ v' \\<^sub>v u @ v" and "(u', v') \ Minimal_Solutions a b" by blast moreover have "u' @ v' \\<^sub>v x @ y" using uv and uv' by auto ultimately show ?thesis by (intro 1(2)) qed blast qed lemma Minimal_SolutionsI': assumes "(x, y) \ Solutions a b" and "nonzero x" and "\ (\(u, v) \ Minimal_Solutions a b. u @ v <\<^sub>v x @ y)" shows "(x, y) \ Minimal_Solutions a b" proof (rule Minimal_SolutionsI [OF assms(1,2)]) show "\ (\(u, v) \ Solutions a b. nonzero u \ u @ v <\<^sub>v x @ y)" proof assume "\(u, v) \ Solutions a b. nonzero u \ u @ v <\<^sub>v x @ y" then obtain u and v where "(u, v) \ Solutions a b" and "nonzero u" and uv: "u @ v <\<^sub>v x @ y" by blast then obtain u' and v' where "(u', v') \ Minimal_Solutions a b" and uv': "u' @ v' \\<^sub>v u @ v" by (blast elim: minimize_nonzero_solution) moreover have "u' @ v' <\<^sub>v x @ y" using uv and uv' by auto ultimately show False using assms by blast qed qed lemma Minimal_Solutions_length: "(x, y) \ Minimal_Solutions a b \ length x = length a \ length y = length b" by (auto simp: hlde_ops.Minimal_Solutions_def hlde_ops.in_Solutions_iff) lemma Minimal_Solutions_gt0: "(x, y) \ Minimal_Solutions a b \ zeroes (length x) <\<^sub>v x" using zero_less by (auto simp: hlde_ops.Minimal_Solutions_def) lemma Minimal_Solutions_sym: assumes "0 \ set a" and "0 \ set b" shows "(xs, ys) \ Minimal_Solutions a b \ (ys, xs) \ Minimal_Solutions b a" using assms by (auto simp: hlde_ops.Minimal_Solutions_def hlde_ops.Solutions_def dest: dotprod_eq_nonzero_iff dest!: less_append_swap [of _ _ ys xs]) locale hlde = hlde_ops + assumes no0: "0 \ set a" "0 \ set b" begin lemma nonzero_Solutions_iff: assumes "(x, y) \ Solutions" shows "nonzero x \ nonzero y" using assms and no0 by (auto simp: in_Solutions_iff dest: dotprod_eq_nonzero_iff) lemma Minimal_Solutions_min: assumes "(x, y) \ Minimal_Solutions" and "u @ v <\<^sub>v x @ y" and "a \ u = b \ v" and [simp]: "length u = m" and non0: "nonzero (u @ v)" shows False proof - have [simp]: "length v = n" using assms by (force dest: less_appendD Minimal_Solutions_length) have "(u, v) \ Solutions" using \a \ u = b \ v\ by (simp add: in_Solutions_iff) moreover from nonzero_Solutions_iff [OF this] have "nonzero u" using non0 by auto ultimately show False using assms by (auto simp: hlde_ops.Minimal_Solutions_def) qed lemma Solutions_snd_not_0: assumes "(x, y) \ Solutions" and "nonzero x" shows "nonzero y" using assms by (metis nonzero_Solutions_iff) end subsection \Pointwise Restricting Solutions\ text \ Constructing the list of \u\ vectors from Huet's proof @{cite "Huet1978"}, satisfying \<^item> \\i y ! i\ and \<^item> \0 < sum_list u \ a\<^sub>k\. \ text \ Given \y\, increment a "previous" \u\ vector at first position starting from \i\ where \u\ is strictly smaller than \y\. If this is not possible, return \u\ unchanged. \ function inc :: "nat list \ nat \ nat list \ nat list" where "inc y i u = (if i < length y then if u ! i < y ! i then u[i := u ! i + 1] else inc y (Suc i) u else u)" by (pat_completeness) auto termination inc by (relation "measure (\(y, i, u). max (length y) (length u) - i)") auto (*inc.simps may cause simplification to loop*) declare inc.simps [simp del] text \ Starting from the 0-vector produce \u\s by iteratively incrementing with respect to \y\. \ definition huets_us :: "nat list \ nat \ nat list" ("\<^bold>u" 1000) where "\<^bold>u y i = ((inc y 0) ^^ Suc i) (zeroes (length y))" lemma huets_us_simps [simp]: "\<^bold>u y 0 = inc y 0 (zeroes (length y))" "\<^bold>u y (Suc i) = inc y 0 (\<^bold>u y i)" by (auto simp: huets_us_def) lemma length_inc [simp]: "length (inc y i u) = length u" by (induct y i u rule: inc.induct) (simp add: inc.simps) lemma length_us [simp]: "length (\<^bold>u y i) = length y" by (induct i) (simp_all) text \ \inc\ produces vectors that are pointwise smaller than \y\ \ lemma inc_le: assumes "length u = length y" and "i < length y" and "u \\<^sub>v y" shows "inc y i u \\<^sub>v y" using assms by (induct y i u rule: inc.induct) (auto simp: inc.simps nth_list_update less_eq_def) lemma us_le: assumes "length y > 0" shows "\<^bold>u y i \\<^sub>v y" using assms by (induct i) (auto simp: inc_le le_length) lemma sum_list_inc_le: "u \\<^sub>v y \ sum_list (inc y i u) \ sum_list y" by (induct y i u rule: inc.induct) (auto simp: inc.simps intro: le_sum_list_mono) lemma sum_list_inc_gt0: assumes "sum_list u > 0" and "length y = length u" shows "sum_list (inc y i u) > 0" using assms proof (induct y i u rule: inc.induct) case (1 y i u) then show ?case by (auto simp add: inc.simps) (meson Suc_neq_Zero gr_zeroI set_update_memI sum_list_eq_0_iff) qed lemma sum_list_inc_gt0': assumes "length u = length y" and "i < length y" and "y ! i > 0" and "j \ i" shows "sum_list (inc y j u) > 0" using assms proof (induct y j u rule: inc.induct) case (1 y i u) then show ?case by (auto simp: inc.simps [of y i] sum_list_update) (metis elem_le_sum_list le_antisym le_zero_eq neq0_conv not_less_eq_eq sum_list_inc_gt0) qed lemma sum_list_us_gt0: assumes "sum_list y \ 0" shows "0 < sum_list (\<^bold>u y i)" using assms by (induct i) (auto simp: in_set_conv_nth sum_list_inc_gt0' sum_list_inc_gt0) lemma sum_list_inc_le': assumes "length u = length y" shows "sum_list (inc y i u) \ sum_list u + 1" using assms by (induct y i u rule: inc.induct) (auto simp: inc.simps sum_list_update) lemma sum_list_us_le: "sum_list (\<^bold>u y i) \ i + 1" proof (induct i) case 0 then show ?case by (auto simp: sum_list_update) (metis Suc_eq_plus1 in_set_replicate length_replicate sum_list_eq_0_iff sum_list_inc_le') next case (Suc i) then show ?case by auto (metis Suc_le_mono add.commute le_trans length_us plus_1_eq_Suc sum_list_inc_le') qed lemma sum_list_us_bounded: assumes "i < k" shows "sum_list (\<^bold>u y i) \ k" using assms and sum_list_us_le [of y i] by force lemma sum_list_inc_eq_sum_list_Suc: assumes "length u = length y" and "i < length y" and "\j\i. j < length y \ u ! j < y ! j" shows "sum_list (inc y i u) = Suc (sum_list u)" using assms by (induct y i u rule: inc.induct) (metis inc.simps Suc_eq_plus1 Suc_leI antisym_conv2 leD sum_list_list_update_Suc) lemma sum_list_us_eq: assumes "i < sum_list y" shows "sum_list (\<^bold>u y i) = i + 1" using assms proof (induct i) case (Suc i) then show ?case by (auto) (metis (no_types, lifting) Suc_eq_plus1 gr_implies_not0 length_pos_if_in_set length_us less_Suc_eq_le less_imp_le_nat antisym_conv2 not_less_eq_eq sum_list_eq_0_iff sum_list_inc_eq_sum_list_Suc sum_list_less_diff_Ex us_le) qed (metis Suc_eq_plus1 Suc_leI antisym_conv gr_implies_not0 sum_list_us_gt0 sum_list_us_le) lemma inc_ge: "length u = length y \ u \\<^sub>v inc y i u" by (induct y i u rule: inc.induct) (auto simp: inc.simps nth_list_update less_eq_def) lemma us_le_mono: assumes "i < j" shows "\<^bold>u y i \\<^sub>v \<^bold>u y j" using assms proof (induct "j - i" arbitrary: j i) case (Suc n) then show ?case by (simp add: Suc.prems inc_ge order.strict_implies_order order_vec.lift_Suc_mono_le) qed simp lemma us_mono: assumes "i < j" and "j < sum_list y" shows "\<^bold>u y i <\<^sub>v \<^bold>u y j" proof - let ?u = "\<^bold>u y i" and ?v = "\<^bold>u y j" have "?u \\<^sub>v ?v" using us_le_mono [OF \i < j\] by simp moreover have "sum_list ?u < sum_list ?v" using assms by (auto simp: sum_list_us_eq) ultimately show ?thesis by (intro le_sum_list_less) (auto simp: less_eq_def) qed context hlde begin lemma max_coeff_bound_right: assumes "(xs, ys) \ Minimal_Solutions" shows "\x \ set xs. x \ maxne0 ys b" (is "\x\set xs. x \ ?m") proof (rule ccontr) assume "\ ?thesis" then obtain k where k_def: "k < length xs \ \ (xs ! k \ ?m)" by (metis in_set_conv_nth) have sol: "(xs, ys) \ Solutions" using assms Minimal_Solutions_def by auto then have len: "m = length xs" by (simp add: in_Solutions_iff) have max_suml: "?m * sum_list ys \ b \ ys" using maxne0_times_sum_list_gt_dotprod sol by (auto simp: in_Solutions_iff) then have is_sol: "b \ ys = a \ xs" using sol by (auto simp: in_Solutions_iff) then have a_ge_ak: "a \ xs \ a ! k * xs ! k" using dotprod_pointwise_le k_def len by auto then have ak_gt_max: "a ! k * xs ! k > a ! k * ?m" using no0 in_set_conv_nth k_def len by fastforce then have sl_ys_g_ak: "sum_list ys > a ! k" by (metis a_ge_ak is_sol less_le_trans max_suml mult.commute mult_le_mono1 not_le) define Seq where Seq_def: "Seq = map (\<^bold>u ys) [0 ..< a ! k]" have ak_n0: "a ! k \ 0" using \a ! k * ?m < a ! k * xs ! k\ by auto have "zeroes (length ys) <\<^sub>v ys" by (intro zero_less) (metis gr_implies_not0 nonzero_iff sl_ys_g_ak sum_list_eq_0_iff) then have "length Seq > 0" using ak_n0 Seq_def by auto have u_in_nton: "\u \ set Seq. length u = length ys" by (simp add: Seq_def) have prop_3: "\u \ set Seq. u \\<^sub>v ys" proof - have "length ys > 0" using sl_ys_g_ak by auto then show ?thesis using us_le [of ys ] less_eq_def Seq_def by (simp) qed have prop_4_1: "\u \ set Seq. sum_list u > 0" by (metis Seq_def sl_ys_g_ak gr_implies_not_zero imageE set_map sum_list_us_gt0) have prop_4_2: "\u \ set Seq. sum_list u \ a ! k" by (simp add: Seq_def sum_list_us_bounded) have prop_5: "\u. length u = length ys \ u \\<^sub>v ys \ sum_list u > 0 \ sum_list u \ a ! k" using \0 < length Seq\ nth_mem prop_3 prop_4_1 prop_4_2 u_in_nton by blast define Us where "Us = {u. length u = length ys \ u \\<^sub>v ys \ sum_list u > 0 \ sum_list u \ a ! k}" have "\u \ Us. b \ u mod a ! k = 0" proof (rule ccontr) assume neg_th: "\ ?thesis" define Seq_p where "Seq_p = map (dotprod b) Seq" have "length Seq = a ! k" by (simp add: Seq_def) then consider (eq_0) "(\x\set Seq_p. x mod (a ! k) = 0)" | (not_0) "(\ij j \ (Seq_p ! i) mod (a!k) = (Seq_p ! j) mod (a!k))" - using list_mod_cases[of Seq_p] Seq_p_def ak_n0 by auto + using list_mod_cases[of Seq_p] Seq_p_def ak_n0 by auto force then show False proof (cases) case eq_0 have "\u \ set Seq. b \ u mod a ! k = 0" using Seq_p_def eq_0 by auto then show False by (metis (mono_tags, lifting) Us_def mem_Collect_eq neg_th prop_3 prop_4_1 prop_4_2 u_in_nton) next case not_0 obtain i and j where i_j: "i j" " Seq_p ! i mod a ! k = Seq_p ! j mod a ! k" using not_0 by blast define v where v_def: "v = Seq!i" define w where w_def: "w = Seq!j" have mod_eq: "b \ v mod a!k = b \ w mod a!k" using Seq_p_def i_j w_def v_def i_j by auto have "v <\<^sub>v w \ w <\<^sub>v v" using \i \ j\ and i_j proof (cases "i < j") case True then show ?thesis using Seq_p_def sl_ys_g_ak i_j(2) local.Seq_def us_mono v_def w_def by auto next case False then show ?thesis using Seq_p_def sl_ys_g_ak \i \ j\ i_j(1) local.Seq_def us_mono v_def w_def by auto qed then show False proof assume ass: "v <\<^sub>v w" define u where u_def: "u = w -\<^sub>v v" have "w \\<^sub>v ys" using Seq_p_def w_def i_j(2) prop_3 by force then have prop_3: "less_eq u ys" using vdiff_le ass less_eq_def order_vec.less_imp_le u_def by auto have prop_4_1: "sum_list u > 0" using le_sum_list_mono [of v w] ass u_def sum_list_vdiff_distr [of v w] by (simp add: less_vec_sum_list_less) have prop_4_2: "sum_list u \ a ! k" proof - have "u \\<^sub>v w" using u_def using ass less_eq_def order_vec.less_imp_le vdiff_le by auto then show ?thesis by (metis Seq_p_def i_j(2) length_map le_sum_list_mono less_le_trans not_le nth_mem prop_4_2 w_def) qed have "b \ u mod a ! k = 0" by (metis (mono_tags, lifting) in_Solutions_iff \w \\<^sub>v ys\ u_def ass no0(2) less_eq_def mem_Collect_eq mod_eq mods_with_vec_2 prod.simps(2) sol) then show False using neg_th by (metis (mono_tags, lifting) Us_def less_eq_def mem_Collect_eq prop_3 prop_4_1 prop_4_2) next assume ass: "w <\<^sub>v v" define u where u_def: "u = v -\<^sub>v w" have "v \\<^sub>v ys" using Seq_p_def v_def i_j(1) prop_3 by force then have prop_3: "u \\<^sub>v ys" using vdiff_le ass less_eq_def order_vec.less_imp_le u_def by auto have prop_4_1: "sum_list u > 0" using le_sum_list_mono [of w v] sum_list_vdiff_distr [of w v] \u \ v -\<^sub>v w\ ass less_vec_sum_list_less by auto have prop_4_2: "sum_list u \ a!k" proof - have "u \\<^sub>v v" using u_def using ass less_eq_def order_vec.less_imp_le vdiff_le by auto then show ?thesis by (metis Seq_p_def i_j(1) le_neq_implies_less length_map less_imp_le_nat less_le_trans nth_mem prop_4_2 le_sum_list_mono v_def) qed have "b \ u mod a ! k = 0" by (metis (mono_tags, lifting) in_Solutions_iff \v \\<^sub>v ys\ u_def ass no0(2) less_eq_def mem_Collect_eq mod_eq mods_with_vec_2 prod.simps(2) sol) then show False by (metis (mono_tags, lifting) neg_th Us_def less_eq_def mem_Collect_eq prop_3 prop_4_1 prop_4_2) qed qed qed then obtain u where u3_4: "u \\<^sub>v ys" "sum_list u > 0" "sum_list u \ a ! k" " b \ u mod (a ! k) = 0" "length u = length ys" unfolding Us_def by auto have u_b_len: "length u = n" using less_eq_def u3_4 in_Solutions_iff sol by simp have "b \ u \ maxne0 u b * sum_list u" by (simp add: maxne0_times_sum_list_gt_dotprod u_b_len) also have "... \ ?m * a ! k" by (intro mult_le_mono) (simp_all add: u3_4 maxne0_mono) also have "... < a ! k * xs ! k" using ak_gt_max by auto then obtain zk where zk: "b \ u = zk * a ! k" using u3_4(4) by auto have "length xs > k" by (simp add: k_def) have "zk \ 0" proof - have "\e \ set u. e \ 0" using u3_4 by (metis neq0_conv sum_list_eq_0_iff) then have "b \ u > 0" using assms no0 u3_4 unfolding dotprod_gt0_iff[OF u_b_len [symmetric]] by (fastforce simp add: in_set_conv_nth u_b_len) then have "a ! k > 0" using \a ! k \ 0\ by blast then show ?thesis using \0 < b \ u\ zk by auto qed define z where z_def: "z = (zeroes (length xs))[k := zk]" then have zk_zk: "z ! k = zk" by (auto simp add: \k < length xs\) have "length z = length xs" using assms z_def \k < length xs\ by auto then have bu_eq_akzk: "b \ u = a ! k * z ! k" by (simp add: \b \ u = zk * a ! k\ zk_zk) then have "z!k < xs!k" using ak_gt_max calculation by auto then have z_less_xs: "z <\<^sub>v xs" by (auto simp add: z_def) (metis \k < length xs\ le0 le_list_update less_def less_imp_le order_vec.dual_order.antisym nat_neq_iff z_def zk_zk) then have "z @ u <\<^sub>v xs @ ys" by (intro less_append) (auto simp add: u3_4(1) z_less_xs) moreover have "(z, u) \ Solutions" by (auto simp add: bu_eq_akzk in_Solutions_iff z_def u_b_len \k < length xs\ len) moreover have "nonzero z" using \length z = length xs\ and \zk \ 0\ and k_def and zk_zk by (auto simp: nonzero_iff) ultimately show False using assms by (auto simp: Minimal_Solutions_def) qed text \Proof of Lemma 1 of Huet's paper.\ lemma max_coeff_bound: assumes "(xs, ys) \ Minimal_Solutions" shows "(\x \ set xs. x \ maxne0 ys b) \ (\y \ set ys. y \ maxne0 xs a)" proof - interpret ba: hlde b a by (standard) (auto simp: no0) show ?thesis using assms and Minimal_Solutions_sym [OF no0, of xs ys] by (auto simp: max_coeff_bound_right ba.max_coeff_bound_right) qed lemma max_coeff_bound': assumes "(x, y) \ Minimal_Solutions" shows "\i Max (set b)" and "\j Max (set a)" using max_coeff_bound [OF assms] and maxne0_le_Max by auto (metis le_eq_less_or_eq less_le_trans nth_mem)+ lemma Minimal_Solutions_alt_def: "Minimal_Solutions = {(x, y)\Solutions. (x, y) \ (zeroes m, zeroes n) \ x \\<^sub>v replicate m (Max (set b)) \ y \\<^sub>v replicate n (Max (set a)) \ \ (\(u, v)\Solutions. nonzero u \ u @ v <\<^sub>v x @ y)}" by (auto simp: not_nonzero_iff Minimal_Solutions_imp_Solutions less_eq_def Minimal_Solutions_length max_coeff_bound' intro!: Minimal_SolutionsI' dest: Minimal_Solutions_gt0) (auto simp: Minimal_Solutions_def nonzero_Solutions_iff not_nonzero_iff) subsection \Special Solutions\ definition Special_Solutions :: "(nat list \ nat list) set" where "Special_Solutions = {sij i j | i j. i < m \ j < n}" lemma dij_neq_0: assumes "i < m" and "j < n" shows "dij i j \ 0" proof - have "a ! i > 0" and "b ! j > 0" using assms and no0 by (simp_all add: in_set_conv_nth) then have "dij i j > 0" using lcm_div_gt_0 [of "a ! i" "b ! j"] by (simp add: dij_def) then show ?thesis by simp qed lemma eij_neq_0: assumes "i < m" and "j < n" shows "eij i j \ 0" proof - have "a ! i > 0" and "b ! j > 0" using assms and no0 by (simp_all add: in_set_conv_nth) then have "eij i j > 0" using lcm_div_gt_0[of "b ! j" "a ! i"] by (simp add: eij_def lcm.commute) then show ?thesis by simp qed lemma Special_Solutions_in_Solutions: "x \ Special_Solutions \ x \ Solutions" by (auto simp: in_Solutions_iff Special_Solutions_def sij_def dij_def eij_def) lemma Special_Solutions_in_Minimal_Solutions: assumes "(x, y) \ Special_Solutions" shows "(x, y) \ Minimal_Solutions" proof (intro Minimal_SolutionsI') show "(x, y) \ Solutions" by (fact Special_Solutions_in_Solutions [OF assms]) then have [simp]: "length x = m" "length y = n" by (auto simp: in_Solutions_iff) show "nonzero x" using assms and dij_neq_0 by (auto simp: Special_Solutions_def sij_def nonzero_iff) (metis length_replicate set_update_memI) show "\ (\(u, v)\Minimal_Solutions. u @ v <\<^sub>v x @ y)" proof assume "\(u, v)\Minimal_Solutions. u @ v <\<^sub>v x @ y" then obtain u and v where uv: "(u, v) \ Minimal_Solutions" and "u @ v <\<^sub>v x @ y" and [simp]: "length u = m" "length v = n" and "nonzero u" by (auto simp: Minimal_Solutions_def in_Solutions_iff) then consider "u <\<^sub>v x" and "v \\<^sub>v y" | "v <\<^sub>v y" and "u \\<^sub>v x" by (auto elim: less_append_cases) then show False proof (cases) case 1 then obtain i and j where ij: "i < m" "j < n" and less_dij: "u ! i < dij i j" and "u \\<^sub>v (zeroes m)[i := dij i j]" and "v \\<^sub>v (zeroes n)[j := eij i j]" using assms by (auto simp: Special_Solutions_def sij_def unit_less) then have u: "u = (zeroes m)[i := u ! i]" and v: "v = (zeroes n)[j := v ! j]" by (auto simp: less_eq_def list_eq_iff_nth_eq) (metis le_zero_eq length_list_update length_replicate rep_upd_unit)+ then have "u ! i > 0" using \nonzero u\ and ij by (metis gr_implies_not0 neq0_conv unit_less zero_less) define c where "c = a ! i * u ! i" then have ac: "a ! i dvd c" by simp have "a \ u = b \ v" using uv by (auto simp: Minimal_Solutions_def in_Solutions_iff) then have "c = b ! j * v ! j" using ij unfolding c_def by (subst (asm) u, subst (asm)v, subst u, subst v) auto then have bc: "b ! j dvd c" by simp have "a ! i * u ! i < a ! i * dij i j" using less_dij and no0 and ij by (auto simp: in_set_conv_nth) then have "c < lcm (a ! i) (b ! j)" by (auto simp: dij_def c_def) moreover have "lcm (a ! i) (b ! j) dvd c" by (simp add: ac bc) moreover have "c > 0" using \u ! i > 0\ and no0 and ij by (auto simp: c_def in_set_conv_nth) ultimately show False using ac and bc by (auto dest: nat_dvd_not_less) next case 2 then obtain i and j where ij: "i < m" "j < n" and less_dij: "v ! j < eij i j" and "u \\<^sub>v (zeroes m)[i := dij i j]" and "v \\<^sub>v (zeroes n)[j := eij i j]" using assms by (auto simp: Special_Solutions_def sij_def unit_less) then have u: "u = (zeroes m)[i := u ! i]" and v: "v = (zeroes n)[j := v ! j]" by (auto simp: less_eq_def list_eq_iff_nth_eq) (metis le_zero_eq length_list_update length_replicate rep_upd_unit)+ moreover have "nonzero v" using \nonzero u\ and \(u, v) \ Minimal_Solutions\ and Minimal_Solutions_imp_Solutions Solutions_snd_not_0 by blast ultimately have "v ! j > 0" using ij by (metis gr_implies_not0 neq0_conv unit_less zero_less) define c where "c = b ! j * v ! j" then have bc: "b ! j dvd c" by simp have "a \ u = b \ v" using uv by (auto simp: Minimal_Solutions_def in_Solutions_iff) then have "c = a ! i * u ! i" using ij unfolding c_def by (subst (asm) u, subst (asm)v, subst u, subst v) auto then have ac: "a ! i dvd c" by simp have "b ! j * v ! j < b ! j * eij i j" using less_dij and no0 and ij by (auto simp: in_set_conv_nth) then have "c < lcm (a ! i) (b ! j)" by (auto simp: eij_def c_def) moreover have "lcm (a ! i) (b ! j) dvd c" by (simp add: ac bc) moreover have "c > 0" using \v ! j > 0\ and no0 and ij by (auto simp: c_def in_set_conv_nth) ultimately show False using ac and bc by (auto dest: nat_dvd_not_less) qed qed qed (*Lemma 2 of Huet*) lemma non_special_solution_non_minimal: assumes "(x, y) \ Solutions - Special_Solutions" and ij: "i < m" "j < n" and "x ! i \ dij i j" and "y ! j \ eij i j" shows "(x, y) \ Minimal_Solutions" proof assume min: "(x, y) \ Minimal_Solutions" moreover have "sij i j \ Solutions" using ij by (intro Special_Solutions_in_Solutions) (auto simp: Special_Solutions_def) moreover have "(case sij i j of (u, v) \ u @ v) <\<^sub>v x @ y" using assms and min apply (cases "sij i j") apply (auto simp: sij_def Special_Solutions_def) by (metis List_Vector.le0 Minimal_Solutions_length le_append le_list_update less_append order_vec.dual_order.strict_iff_order same_append_eq) moreover have "(case sij i j of (u, v) \ nonzero u)" apply (auto simp: sij_def) by (metis dij_neq_0 ij length_replicate nonzero_iff set_update_memI) ultimately show False by (auto simp: Minimal_Solutions_def) qed subsection \Huet's conditions\ (*A*) definition "cond_A xs ys \ (\x\set xs. x \ maxne0 ys b)" (*B*) definition "cond_B x \ (\k\m. take k a \ take k x \ b \ map (max_y (take k x)) [0 ..< n])" (*C*) definition "boundr x y \ (\j max_y x j)" (*D*) definition "cond_D x y \ (\l\n. take l b \ take l y \ a \ x)" subsection \New conditions: facilitating generation of candidates from right to left\ (*condition on right sub-dotproduct*) definition "subdprodr y \ (\l\n. take l b \ take l y \ a \ map (max_x (take l y)) [0 ..< m])" (*condition on left sub-dotproduct*) definition "subdprodl x y \ (\k\m. take k a \ take k x \ b \ y)" (*bound on elements of left vector*) definition "boundl x y \ (\i max_x y i)" lemma boundr: assumes min: "(x, y) \ Minimal_Solutions" and "(x, y) \ Special_Solutions" shows "boundr x y" proof (unfold boundr_def, intro allI impI) fix j assume ass: "j < n" have ln: "m = length x \ n = length y" using assms Minimal_Solutions_def in_Solutions_iff min by auto have is_sol: "(x, y) \ Solutions" using assms Minimal_Solutions_def min by auto have j_less_l: "j < n" using assms ass le_less_trans by linarith consider (notemp) "Ej j x \ {}" | (empty) " Ej j x = {}" by blast then show "y ! j \ max_y x j" proof (cases) case notemp have max_y_def: "max_y x j = Min (Ej j x)" using j_less_l max_y_def notemp by auto have fin_e: "finite (Ej j x)" using finite_Ej [of j x] by auto have e_def': "\e \ Ej j x. (\i dij i j \ eij i j - 1 = e)" using Ej_def [of j x] by auto then have "\i dij i j \ eij i j - 1 = Min (Ej j x)" using notemp Min_in e_def' fin_e by blast then obtain i where i: "i < length x" "x ! i \ dij i j" "eij i j - 1 = Min (Ej j x)" by blast show ?thesis proof (rule ccontr) assume "\ ?thesis" with non_special_solution_non_minimal [of x y i j] and i and ln and assms and is_sol and j_less_l have "case sij i j of (u, v) \ u @ v \\<^sub>v x @ y" by (force simp: max_y_def) then have cs:"case sij i j of (u, v) \ u @ v <\<^sub>v x @ y" using assms by(auto simp: Special_Solutions_def) (metis append_eq_append_conv i(1) j_less_l length_list_update length_replicate sij_def order_vec.le_neq_trans ln prod.sel(1)) then obtain u v where u_v: "sij i j = (u, v)" "u @ v <\<^sub>v x @ y" by blast have dij_gt0: "dij i j > 0" using assms(1) assms(2) dij_neq_0 i(1) j_less_l ln by auto then have not_0_u: "nonzero u" proof (unfold nonzero_iff) have "i < length (zeroes m)" by (simp add: i(1) ln) then show "\i\set u. i \ 0" by (metis (no_types) Pair_inject dij_gt0 set_update_memI sij_def u_v(1) neq0_conv) qed then have "sij i j \ Solutions" by (metis (mono_tags, lifting) Special_Solutions_def i(1) Special_Solutions_in_Solutions j_less_l ln mem_Collect_eq u_v(1)) then show False using assms cs u_v not_0_u Minimal_Solutions_def min by auto qed next case empty have "\y\set y. y \ Max (set a)" using assms and max_coeff_bound and maxne0_le_Max using le_trans by blast then show ?thesis using empty j_less_l ln max_y_def by auto qed qed lemma boundl: assumes min: "(x, y) \ Minimal_Solutions" and "(x, y) \ Special_Solutions" shows "boundl x y" proof (unfold boundl_def, intro allI impI) fix i assume ass: "i < m" have ln: "n = length y \ m = length x" using assms Minimal_Solutions_def in_Solutions_iff min by auto have is_sol: "(x, y) \ Solutions" using assms Minimal_Solutions_def min by auto have i_less_l: "i < m" using assms ass le_less_trans by linarith consider (notemp) "Di i y \ {}" | (empty) " Di i y = {}" by blast then show "x ! i \ max_x y i" proof (cases) case notemp have max_x_def: "max_x y i = Min (Di i y)" using i_less_l max_x_def notemp by auto have fin_e: "finite (Di i y)" using finite_Di [of i y] by auto have e_def': "\e \ Di i y. (\j eij i j \ dij i j - 1 = e)" using Di_def [of i y] by auto then have "\j eij i j \ dij i j - 1 = Min (Di i y)" using notemp Min_in e_def' fin_e by blast then obtain j where j: "j < length y" "y ! j \ eij i j" "dij i j - 1 = Min (Di i y)" by blast show ?thesis proof (rule ccontr) assume "\ ?thesis" with non_special_solution_non_minimal [of x y i j] and j and ln and assms and is_sol and i_less_l have "case sij i j of (u, v) \ u @ v \\<^sub>v x @ y" by (force simp: max_x_def) then have cs: "case sij i j of (u, v) \ u @ v <\<^sub>v x @ y" using assms by(auto simp: Special_Solutions_def) (metis append_eq_append_conv j(1) i_less_l length_list_update length_replicate sij_def order_vec.le_neq_trans ln prod.sel(1)) then obtain u v where u_v: "sij i j = (u, v)" "u @ v <\<^sub>v x @ y" by blast have dij_gt0: "dij i j > 0" using assms(1) assms(2) dij_neq_0 j(1) i_less_l ln by auto then have not_0_u: "nonzero u" proof (unfold nonzero_iff) have "i < length (zeroes m)" using ass by simp then show "\i\set u. i \ 0" by (metis (no_types) Pair_inject dij_gt0 set_update_memI sij_def u_v(1) neq0_conv) qed then have "sij i j \ Solutions" by (metis (mono_tags, lifting) Special_Solutions_def j(1) Special_Solutions_in_Solutions i_less_l ln mem_Collect_eq u_v(1)) then show False using assms cs u_v not_0_u Minimal_Solutions_def min by auto qed next case empty have "\x\set x. x \ Max (set b)" using assms and max_coeff_bound and maxne0_le_Max using le_trans by blast then show ?thesis using empty i_less_l ln max_x_def by auto qed qed lemma Solution_imp_cond_D: assumes "(x, y) \ Solutions" shows "cond_D x y" using assms and dotprod_le_take by (auto simp: cond_D_def in_Solutions_iff) lemma Solution_imp_subdprodl: assumes "(x, y) \ Solutions" shows "subdprodl x y" using assms and dotprod_le_take by (auto simp: subdprodl_def in_Solutions_iff) metis theorem conds: assumes min: "(x, y) \ Minimal_Solutions" shows cond_A: "cond_A x y" and cond_B: "(x, y) \ Special_Solutions \ cond_B x" and "(x, y) \ Special_Solutions \ boundr x y" and cond_D: "cond_D x y" and subdprodr: "(x, y) \ Special_Solutions \ subdprodr y" and subdprodl: "subdprodl x y" proof - have sol: "a \ x = b \ y" and ln: "m = length x \ n = length y" using min by (auto simp: Minimal_Solutions_def in_Solutions_iff) then have "\i maxne0 y b" by (metis min max_coeff_bound_right nth_mem) then show "cond_A x y" using min and le_less_trans by (auto simp: cond_A_def max_coeff_bound) show "(x, y) \ Special_Solutions \ cond_B x" proof (unfold cond_B_def, intro allI impI) fix k assume non_spec: "(x, y) \ Special_Solutions" and k: "k \ m" from k have "take k a \ take k x \ a \ x" using dotprod_le_take ln by blast also have "... = b \ y" by fact also have map_b_dot_p: "... \ b \ map (max_y x) [0.. _ b \ ?nt") using non_spec and less_eq_def and ln and boundr and min by (fastforce intro!: dotprod_le_right simp: boundr_def) also have "... \ b \ map (max_y (take k x)) [0.. _ \ ?t") proof - have "\j ?t!j" using min and ln and max_y_le_take and k by auto then have "?nt \\<^sub>v ?t" using less_eq_def by auto then show ?thesis by (simp add: dotprod_le_right) qed finally show "take k a \ take k x \ b \ map (max_y (take k x)) [0.. Special_Solutions \ subdprodr y" proof (unfold subdprodr_def, intro allI impI) fix l assume non_spec: "(x, y) \ Special_Solutions" and l: "l \ n" from l have "take l b \ take l y \ b \ y" using dotprod_le_take ln by blast also have "... = a \ x" by (simp add: sol) also have map_b_dot_p: "... \ a \ map (max_x y) [0.. _ a \ ?nt") using non_spec and less_eq_def and ln and boundl and min by (fastforce intro!: dotprod_le_right simp: boundl_def) also have "... \ a \ map (max_x (take l y)) [0.. _ \ ?t") proof - have "\i ?t ! i" using min and ln and max_x_le_take and l by auto then have "?nt \\<^sub>v ?t" using less_eq_def by auto then show ?thesis by (simp add: dotprod_le_right) qed finally show "take l b \ take l y \ a \ map (max_x (take l y)) [0.. Special_Solutions \ boundr x y" using boundr [of x y] and min by blast show "cond_D x y" using ln and dotprod_le_take and sol by (auto simp: cond_D_def) show "subdprodl x y" using ln and dotprod_le_take and sol by (force simp: subdprodl_def) qed lemma le_imp_Ej_subset: assumes "u \\<^sub>v x" shows "Ej j u \ Ej j x" using assms and le_trans by (force simp: Ej_def less_eq_def dij_def eij_def) lemma le_imp_max_y_ge: assumes "u \\<^sub>v x" and "length x \ m" shows "max_y u j \ max_y x j" using assms and le_imp_Ej_subset and Min_Ej_le [of j, OF _ _ assms(2)] by (metis Min.subset_imp Min_in emptyE finite_Ej max_y_def order_refl subsetCE) lemma le_imp_Di_subset: assumes "v \\<^sub>v y" shows "Di i v \ Di i y" using assms and le_trans by (force simp: Di_def less_eq_def dij_def eij_def) lemma le_imp_max_x_ge: assumes "v \\<^sub>v y" and "length y \ n" shows "max_x v i \ max_x y i" using assms and le_imp_Di_subset and Min_Di_le [of i, OF _ _ assms(2)] by (metis Min.subset_imp Min_in emptyE finite_Di max_x_def order_refl subsetCE) end end diff --git a/thys/Echelon_Form/Echelon_Form.thy b/thys/Echelon_Form/Echelon_Form.thy --- a/thys/Echelon_Form/Echelon_Form.thy +++ b/thys/Echelon_Form/Echelon_Form.thy @@ -1,3005 +1,3005 @@ (* Title: Echelon_Form.thy Author: Jose Divasón Author: Jesús Aransay *) section\Echelon Form\ theory Echelon_Form imports Rings2 Gauss_Jordan.Determinants2 Cayley_Hamilton_Compatible begin subsection\Definition of Echelon Form\ text\Echelon form up to column k (NOT INCLUDED).\ definition echelon_form_upt_k :: "'a::{bezout_ring}^'cols::{mod_type}^'rows::{finite, ord} \ nat \ bool" where "echelon_form_upt_k A k = ( (\i. is_zero_row_upt_k i k A \ \ (\j. j>i \ \ is_zero_row_upt_k j k A)) \ (\i j. i \ (is_zero_row_upt_k i k A) \ \ (is_zero_row_upt_k j k A) \ ((LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0))))" definition "echelon_form A = echelon_form_upt_k A (ncols A)" text\Some properties of matrices in echelon form.\ lemma echelon_form_upt_k_intro: assumes "(\i. is_zero_row_upt_k i k A \ \ (\j. j>i \ \ is_zero_row_upt_k j k A))" and "(\i j. i \ (is_zero_row_upt_k i k A) \ \ (is_zero_row_upt_k j k A) \ ((LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)))" shows "echelon_form_upt_k A k" using assms unfolding echelon_form_upt_k_def by fast lemma echelon_form_upt_k_condition1: assumes "echelon_form_upt_k A k" "is_zero_row_upt_k i k A" shows "\ (\j. j>i \ \ is_zero_row_upt_k j k A)" using assms unfolding echelon_form_upt_k_def by auto lemma echelon_form_upt_k_condition1': assumes "echelon_form_upt_k A k" "is_zero_row_upt_k i k A" and "i (is_zero_row_upt_k i k A)" "\ (is_zero_row_upt_k j k A)" shows "(LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)" using assms unfolding echelon_form_upt_k_def by auto lemma echelon_form_upt_k_if_equal: assumes e: "echelon_form_upt_k A k" and eq: "\a. \b is_zero_row_upt_k i k B" and not_zero_jB: "\ is_zero_row_upt_k j k B" obtain a where Bia: "B $ i $ a \ 0" and ak: "a 0" by (metis ak Bia eq) obtain b where Bjb: "B $ j $ b \ 0" and bk: "b 0" by (metis bk Bjb eq) have not_zero_iA: "\ is_zero_row_upt_k i k A" by (metis (full_types) Aia ak is_zero_row_upt_k_def to_nat_le) have not_zero_jA: "\ is_zero_row_upt_k j k A" by (metis (full_types) Ajb bk is_zero_row_upt_k_def to_nat_le) have "(LEAST n. A $ i $ n \ 0) = (LEAST n. B $ i $ n \ 0)" proof (rule Least_equality) have "(LEAST n. B $ i $ n \ 0) \ a" by (rule Least_le, simp add: Bia) hence least_bi_less: "(LEAST n. B $ i $ n \ 0) < from_nat k" using ak by simp thus "A $ i $ (LEAST n. B $ i $ n \ 0) \ 0" by (metis (mono_tags, lifting) LeastI eq is_zero_row_upt_k_def not_zero_iB) fix y assume "A $ i $ y \ 0" thus "(LEAST n. B $ i $ n \ 0) \ y" by (metis (mono_tags, lifting) Least_le dual_order.strict_trans2 eq least_bi_less linear) qed moreover have "(LEAST n. A $ j $ n \ 0) = (LEAST n. B $ j $ n \ 0)" proof (rule Least_equality) have "(LEAST n. B $ j $ n \ 0) \ b" by (rule Least_le, simp add: Bjb) hence least_bi_less: "(LEAST n. B $ j $ n \ 0) < from_nat k" using bk by simp thus "A $ j $ (LEAST n. B $ j $ n \ 0) \ 0" by (metis (mono_tags, lifting) LeastI eq is_zero_row_upt_k_def not_zero_jB) fix y assume "A $ j $ y \ 0" thus "(LEAST n. B $ j $ n \ 0) \ y" by (metis (mono_tags, lifting) Least_le dual_order.strict_trans2 eq least_bi_less linear) qed moreover have "(LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)" by (rule echelon_form_upt_k_condition2[OF e ij not_zero_iA not_zero_jA]) ultimately show "(LEAST n. B $ i $ n \ 0) < (LEAST n. B $ j $ n \ 0)" by auto qed lemma echelon_form_upt_k_0: "echelon_form_upt_k A 0" unfolding echelon_form_upt_k_def is_zero_row_upt_k_def by auto lemma echelon_form_condition1: assumes r: "echelon_form A" shows "(\i. is_zero_row i A \ \ (\j. j>i \ \ is_zero_row j A))" using r unfolding echelon_form_def by (metis echelon_form_upt_k_condition1' is_zero_row_def) lemma echelon_form_condition2: assumes r: "echelon_form A" shows "(\i. i \ (is_zero_row i A) \ \ (is_zero_row j A) \ ((LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)))" using r unfolding echelon_form_def by (metis echelon_form_upt_k_condition2 is_zero_row_def) lemma echelon_form_condition2_explicit: assumes rref_A: "echelon_form A" and i_le: "i < j" and "\ is_zero_row i A" and "\ is_zero_row j A" shows "(LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)" using echelon_form_condition2 assms by blast lemma echelon_form_intro: assumes 1: "(\i. is_zero_row i A \ \ (\j. j>i \ \ is_zero_row j A))" and 2: "(\i j. i \ (is_zero_row i A) \ \ (is_zero_row j A) \ ((LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)))" shows "echelon_form A" proof (unfold echelon_form_def, rule echelon_form_upt_k_intro, auto) fix i j assume "is_zero_row_upt_k i (ncols A) A" and "i < j" thus "is_zero_row_upt_k j (ncols A) A" using 1 is_zero_row_imp_is_zero_row_upt by (metis is_zero_row_def) next fix i j assume "i < j" and "\ is_zero_row_upt_k i (ncols A) A" and "\ is_zero_row_upt_k j (ncols A) A" thus "(LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)" using 2 by (metis is_zero_row_imp_is_zero_row_upt) qed lemma echelon_form_implies_echelon_form_upt: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" assumes rref: "echelon_form A" shows "echelon_form_upt_k A k" proof (rule echelon_form_upt_k_intro) show "\i. is_zero_row_upt_k i k A \ \ (\j>i. \ is_zero_row_upt_k j k A)" proof (auto, rule ccontr) fix i j assume zero_i_k: "is_zero_row_upt_k i k A" and i_less_j: "i < j" and not_zero_j_k:"\ is_zero_row_upt_k j k A" have not_zero_j: "\ is_zero_row j A" using is_zero_row_imp_is_zero_row_upt not_zero_j_k by blast hence not_zero_i: "\ is_zero_row i A" using echelon_form_condition1[OF rref] i_less_j by blast have Least_less: "(LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)" by (rule echelon_form_condition2_explicit[OF rref i_less_j not_zero_i not_zero_j]) moreover have "(LEAST n. A $ j $ n \ 0) < (LEAST n. A $ i $ n \ 0)" proof (rule LeastI2_ex) show "\a. A $ i $ a \ 0" using not_zero_i unfolding is_zero_row_def is_zero_row_upt_k_def by fast fix x assume Aix_not_0: "A $ i $ x \ 0" have k_less_x: "k \ to_nat x" using zero_i_k Aix_not_0 unfolding is_zero_row_upt_k_def by force hence k_less_ncols: "k < ncols A" unfolding ncols_def using to_nat_less_card[of x] by simp obtain s where Ajs_not_zero: "A $ j $ s \ 0" and s_less_k: "to_nat s < k" using not_zero_j_k unfolding is_zero_row_upt_k_def by blast have "(LEAST n. A $ j $ n \ 0) \ s" using Ajs_not_zero Least_le by fast also have "... = from_nat (to_nat s)" unfolding from_nat_to_nat_id .. also have "... < from_nat k" by (rule from_nat_mono[OF s_less_k k_less_ncols[unfolded ncols_def]]) also have "... \ x" using k_less_x leD not_le_imp_less to_nat_le by fast finally show "(LEAST n. A $ j $ n \ 0) < x" . qed ultimately show False by fastforce qed show "\i j. i < j \ \ is_zero_row_upt_k i k A \ \ is_zero_row_upt_k j k A \ (LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)" using echelon_form_condition2[OF rref] is_zero_row_imp_is_zero_row_upt by blast qed lemma upper_triangular_upt_k_def': assumes "\i j. to_nat j \ k \ A $ i $ j \ 0 \ j\i" shows "upper_triangular_upt_k A k" using assms unfolding upper_triangular_upt_k_def by (metis leD linear) lemma echelon_form_imp_upper_triagular_upt: fixes A::"'a::{bezout_ring}^'n::{mod_type}^'n::{mod_type}" assumes "echelon_form A" shows "upper_triangular_upt_k A k" proof (induct k) case 0 show ?case unfolding upper_triangular_upt_k_def by simp next case (Suc k) show ?case unfolding upper_triangular_upt_k_def proof (clarify) fix i j::'n assume j_less_i: "j < i" and j_less_suc_k: "to_nat j < Suc k" show "A $ i $ j = 0" proof (cases "to_nat j < k") case True thus ?thesis using Suc.hyps unfolding upper_triangular_upt_k_def using j_less_i True by auto next case False hence j_eq_k: "to_nat j = k" using j_less_suc_k by simp hence j_eq_k2: "from_nat k = j" by (metis from_nat_to_nat_id) have rref_suc: "echelon_form_upt_k A (Suc k)" by (metis assms echelon_form_implies_echelon_form_upt) have zero_j_k: "is_zero_row_upt_k j k A" unfolding is_zero_row_upt_k_def by (metis (hide_lams, mono_tags) Suc.hyps leD le_less_linear j_eq_k to_nat_mono' upper_triangular_upt_k_def) hence zero_i_k: "is_zero_row_upt_k i k A" by (metis (poly_guards_query) assms echelon_form_implies_echelon_form_upt echelon_form_upt_k_condition1' j_less_i) show ?thesis proof (cases "A $ j $ j = 0") case True have "is_zero_row_upt_k j (Suc k) A" by (rule is_zero_row_upt_k_suc[OF zero_j_k], simp add: True j_eq_k2) hence "is_zero_row_upt_k i (Suc k) A" by (metis echelon_form_upt_k_condition1' j_less_i rref_suc) thus ?thesis by (metis is_zero_row_upt_k_def j_eq_k lessI) next case False note Ajj_not_zero=False hence not_zero_j:"\ is_zero_row_upt_k j (Suc k) A" by (metis is_zero_row_upt_k_def j_eq_k lessI) show ?thesis proof (rule ccontr) assume Aij_not_zero: "A $ i $ j \ 0" hence not_zero_i: "\ is_zero_row_upt_k i (Suc k) A" by (metis is_zero_row_upt_k_def j_eq_k lessI) have Least_eq: "(LEAST n. A $ i $ n \ 0) = from_nat k" proof (rule Least_equality) show "A $ i $ from_nat k \ 0" using Aij_not_zero j_eq_k2 by simp show "\y. A $ i $ y \ 0 \ from_nat k \ y" by (metis (full_types) is_zero_row_upt_k_def not_le_imp_less to_nat_le zero_i_k) qed moreover have Least_eq2: "(LEAST n. A $ j $ n \ 0) = from_nat k" proof (rule Least_equality) show "A $ j $ from_nat k \ 0" using Ajj_not_zero j_eq_k2 by simp show "\y. A $ j $ y \ 0 \ from_nat k \ y" by (metis (full_types) is_zero_row_upt_k_def not_le_imp_less to_nat_le zero_j_k) qed ultimately show False using echelon_form_upt_k_condition2[OF rref_suc j_less_i not_zero_j not_zero_i] by simp qed qed qed qed qed text\A matrix in echelon form is upper triangular.\ lemma echelon_form_imp_upper_triagular: fixes A::"'a::{bezout_ring}^'n::{mod_type}^'n::{mod_type}" assumes "echelon_form A" shows "upper_triangular A" using echelon_form_imp_upper_triagular_upt[OF assms] by (metis upper_triangular_upt_imp_upper_triangular) lemma echelon_form_upt_k_interchange: fixes A::"'a::{bezout_ring}^'c::{mod_type}^'b::{mod_type}" assumes e: "echelon_form_upt_k A k" and zero_ikA: "is_zero_row_upt_k (from_nat i) k A" and Amk_not_0: "A $ m $ from_nat k \ 0" and i_le_m: "(from_nat i)\m" and k: "k 0 \ (from_nat i) \ n)) k" proof (rule echelon_form_upt_k_if_equal[OF e _ k], auto) fix a and b::'c assume b: "b < from_nat k" let ?least = "(LEAST n. A $ n $ from_nat k \ 0 \ (from_nat i) \ n)" let ?interchange = "(interchange_rows A (from_nat i) ?least)" have "(from_nat i)\?least" by (metis (mono_tags, lifting) Amk_not_0 LeastI_ex i_le_m) hence zero_leastkA: "is_zero_row_upt_k ?least k A" using echelon_form_upt_k_condition1[OF e zero_ikA] by (metis (poly_guards_query) dual_order.strict_iff_order zero_ikA) show "A $ a $ b = ?interchange $ a $ b" proof (cases "a=from_nat i") case True hence "?interchange $ a $ b = A $ ?least $ b" unfolding interchange_rows_def by auto also have "... = 0" using zero_leastkA unfolding is_zero_row_upt_k_def by (metis (mono_tags) b to_nat_le) finally have "?interchange $ a $ b = 0" . moreover have "A $ a $ b = 0" by (metis True b is_zero_row_upt_k_def to_nat_le zero_ikA) ultimately show ?thesis by simp next case False note a_not_i=False show ?thesis proof (cases "a=?least") case True hence "?interchange $ a $ b = A $ (from_nat i) $ b" unfolding interchange_rows_def by auto also have "... = 0" using zero_ikA unfolding is_zero_row_upt_k_def by (metis (poly_guards_query) b to_nat_le) finally have "?interchange $ a $ b = 0" . moreover have "A $ a $ b = 0" by (metis True b is_zero_row_upt_k_def to_nat_le zero_leastkA) ultimately show ?thesis by simp next case False thus ?thesis using a_not_i unfolding interchange_rows_def by auto qed qed qed text\There are similar theorems to the following ones in the Gauss-Jordan developments, but for matrices in reduced row echelon form. It is possible to prove that reduced row echelon form implies echelon form. Then the theorems in the Gauss-Jordan development could be obtained with ease.\ lemma greatest_less_zero_row: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{finite, wellorder}" assumes r: "echelon_form_upt_k A k" and zero_i: "is_zero_row_upt_k i k A" and not_all_zero: "\ (\a. is_zero_row_upt_k a k A)" shows "(GREATEST m. \ is_zero_row_upt_k m k A) < i" proof (rule ccontr) assume not_less_i: "\ (GREATEST m. \ is_zero_row_upt_k m k A) < i" have i_less_greatest: "i < (GREATEST m. \ is_zero_row_upt_k m k A)" by (metis not_less_i neq_iff GreatestI not_all_zero zero_i) have "is_zero_row_upt_k (GREATEST m. \ is_zero_row_upt_k m k A) k A" using r zero_i i_less_greatest unfolding echelon_form_upt_k_def by blast thus False using GreatestI_ex not_all_zero by fast qed lemma greatest_ge_nonzero_row': fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" assumes r: "echelon_form_upt_k A k" and i: "i \ (GREATEST m. \ is_zero_row_upt_k m k A)" and not_all_zero: "\ (\a. is_zero_row_upt_k a k A)" shows "\ is_zero_row_upt_k i k A" using greatest_less_zero_row[OF r] i not_all_zero by fastforce lemma rref_imp_ef: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" assumes rref: "reduced_row_echelon_form A" shows "echelon_form A" proof (rule echelon_form_intro) show "\i. is_zero_row i A \ \ (\j>i. \ is_zero_row j A)" by (simp add: rref rref_condition1) show "\i j. i < j \ \ is_zero_row i A \ \ is_zero_row j A \ (LEAST n. A $ i $ n \ 0) < (LEAST n. A $ j $ n \ 0)" by (simp add: rref_condition3_equiv rref) qed subsection\Computing the echelon form of a matrix\ subsubsection\Demonstration over principal ideal rings\ text\Important remark: We want to prove that there exist the echelon form of any matrix whose elements belong to a bezout domain. In addition, we want to compute the echelon form, so we will need computable gcd and bezout operations which is possible over euclidean domains. Our approach consists of demonstrating the correctness over bezout domains and executing over euclidean domains. To do that, we have studied several options: \begin{enumerate} \item We could define a gcd in bezout rings (\bezout_ring_gcd\) as follows: \gcd_bezout_ring a b = (SOME d. d dvd a \ d dvd b \ (\d'. d' dvd a \ d' dvd b \ d' dvd d))\ And then define an algorithm that computes the Echelon Form using such a definition to the gcd. This would allow us to prove the correctness over bezout rings, but we would not be able to execute over euclidean rings because it is not possible to demonstrate a (code) lemma stating that \(gcd_bezout_ring a b) = gcd_eucl a b\ (the gcd is not unique over bezout rings and GCD rings). \item Create a \bezout_ring_norm\ class and define a gcd normalized over bezout rings: \definition gcd_bezout_ring_norm a b = gcd_bezout_ring a b div normalisation_factor (gcd_bezout_ring a b)\ Then, one could demonstrate a (code) lemma stating that: \(gcd_bezout_ring_norm a b) = gcd_eucl a b\ This allows us to execute the gcd function, but with bezout it is not possible. \item The third option (and the chosen one) consists of defining the algorithm over bezout domains and parametrizing the algorithm by a \bezout\ operation which must satisfy suitable properties (i.e @{term "is_bezout_ext bezout"}). Then we can prove the correctness over bezout domains and we will execute over euclidean domains, since we can prove that the operation @{term "euclid_ext2"} is an executable operation which satisfies @{term "is_bezout_ext euclid_ext2"}. \end{enumerate} \ subsubsection\Definition of the algorithm\ context bezout_ring begin definition bezout_matrix :: "'a^'cols^'rows \ 'rows \ 'rows \ 'cols \ ('a \ 'a \ ('a \ 'a \ 'a \ 'a \ 'a)) \ 'a^'rows^'rows" where "bezout_matrix A a b j bezout = (\ x y. (let (p, q, u, v, d) = bezout (A $ a $ j) (A $ b $ j) in if x = a \ y = a then p else if x = a \ y = b then q else if x = b \ y = a then u else if x = b \ y = b then v else if x = y then 1 else 0))" end primrec bezout_iterate :: "'a::{bezout_ring}^'cols^'rows::{mod_type} \ nat \ 'rows::{mod_type} \ 'cols \ ('a \'a \ ('a \ 'a \ 'a \ 'a \ 'a)) \ 'a^'cols^'rows::{mod_type}" where "bezout_iterate A 0 i j bezout = A" | "bezout_iterate A (Suc n) i j bezout = (if (Suc n) \ to_nat i then A else bezout_iterate (bezout_matrix A i (from_nat (Suc n)) j bezout ** A) n i j bezout)" text\If every element in column @{term "k::nat"} over index @{term "i::nat"} are equal to zero, the same input is returned. If every element over @{term "i::nat"} is equal to zero, except the pivot, the algorithm does nothing, but pivot @{term "i::nat"} is increased in a unit. Finally, if there is a position @{term "n::nat"} whose coefficient is different from zero, its row is interchanged with row @{term "i::nat"} and the bezout coefficients are used to produce a zero in its position.\ definition "echelon_form_of_column_k bezout A' k = (let (A, i) = A' in if (\m\from_nat i. A $ m $ from_nat k = 0) \ (i = nrows A) then (A, i) else if (\m>from_nat i. A $ m $ from_nat k = 0) then (A, i + 1) else let n = (LEAST n. A $ n $ from_nat k \ 0 \ from_nat i \ n); interchange_A = interchange_rows A (from_nat i) n in (bezout_iterate (interchange_A) (nrows A - 1) (from_nat i) (from_nat k) bezout, i + 1))" definition "echelon_form_of_upt_k A k bezout = (fst (foldl (echelon_form_of_column_k bezout) (A,0) [0..The executable definition:\ context euclidean_space begin definition [code_unfold]: "echelon_form_of_euclidean A = echelon_form_of A euclid_ext2" end subsubsection\Properties of the bezout matrix\ lemma bezout_matrix_works1: assumes ib: "is_bezout_ext bezout" and a_not_b: "a \ b" shows "(bezout_matrix A a b j bezout ** A) $ a $ j = snd (snd (snd (snd (bezout (A $ a $ j) (A $ b $ j)))))" proof (unfold matrix_matrix_mult_def bezout_matrix_def Let_def, simp) let ?a = "(A $ a $ j)" let ?b = "(A $ b $ j)" let ?z = "bezout (A $ a $ j) (A $ b $ j)" obtain p q u v d where bz: "(p, q, u, v, d) = ?z" by (cases ?z, auto) from ib have foo: "(\a b. let (p, q, u, v, gcd_a_b) = bezout a b in p * a + q * b = gcd_a_b \ gcd_a_b dvd a \ gcd_a_b dvd b \ (\d'. d' dvd a \ d' dvd b \ d' dvd gcd_a_b) \ gcd_a_b * u = - b \ gcd_a_b * v = a)" using is_bezout_ext_def [of bezout] by simp have foo: "p * ?a + q * ?b = d \ d dvd ?a \ d dvd ?b \ (\d'. d' dvd ?a \ d' dvd ?b \ d' dvd d) \ d * u = - ?b \ d * v = ?a" using ib using is_bezout_ext_def using bz [symmetric] using foo [of ?a ?b] by fastforce have pa_bq_d: "p * ?a + ?b * q = d" using foo by (auto simp add: mult.commute) define f where "f k = (if k = a then p else if a = a \ k = b then q else if a = b \ k = a then u else if a = b \ k = b then v else if a = k then 1 else 0) * A $ k $ j" for k have UNIV_rw: "UNIV = insert b (insert a (UNIV - {a} - {b}))" by auto have sum_rw: "sum f (insert a (UNIV - {a} - {b})) = f a + sum f (UNIV - {a} - {b})" by (rule sum.insert, auto) have sum0: "sum f (UNIV - {a} - {b}) = 0" by (rule sum.neutral, simp add: f_def) have "(\k\UNIV. (case bezout (A $ a $ j) (A $ b $ j) of (p, q, u, v, d) \ if k = a then p else if a = a \ k = b then q else if a = b \ k = a then u else if a = b \ k = b then v else if a = k then 1 else 0) * A $ k $ j) = (\k\UNIV. (if k = a then p else if a = a \ k = b then q else if a = b \ k = a then u else if a = b \ k = b then v else if a = k then 1 else 0) * A $ k $ j)" unfolding bz [symmetric] by auto also have "... = sum f UNIV" unfolding f_def .. also have "sum f UNIV = sum f (insert b (insert a (UNIV - {a} - {b})))" using UNIV_rw by simp also have "... = f b + sum f (insert a (UNIV - {a} - {b}))" by (rule sum.insert, auto, metis a_not_b) also have "... = f b + f a" unfolding sum_rw sum0 by simp also have "... = d" unfolding f_def using a_not_b bz [symmetric] by (auto, metis add.commute mult.commute pa_bq_d) also have "... = snd (snd (snd (snd (bezout (A $ a $ j) (A $ b $ j)))))" using bz by (metis snd_conv) finally show "(\k\UNIV. (case bezout (A $ a $ j) (A $ b $ j) of (p, q, u, v, d) \ if k = a then p else if a = a \ k = b then q else if a = b \ k = a then u else if a = b \ k = b then v else if a = k then 1 else 0) * A $ k $ j) = snd (snd (snd (snd (bezout (A $ a $ j) (A $ b $ j)))))" unfolding f_def by simp qed lemma bezout_matrix_not_zero: assumes ib: "is_bezout_ext bezout" and a_not_b: "a \ b" and Aaj: "A $ a $ j \ 0" shows "(bezout_matrix A a b j bezout ** A) $ a $ j \ 0" proof - have "(bezout_matrix A a b j bezout ** A) $ a $ j = snd (snd (snd(snd (bezout (A $ a $ j) (A $ b $ j)))))" using bezout_matrix_works1[OF ib a_not_b] . also have "... = (\a b. (case bezout a b of (_, _,_ ,_,gcd') \ (gcd'))) (A $ a $ j) (A $ b $ j)" by (simp add: split_beta) also have "... \ 0" using gcd'_zero[OF is_gcd_is_bezout_ext[OF ib]] Aaj by simp finally show ?thesis . qed lemma ua_vb_0: fixes a::"'a::bezout_domain" assumes ib: "is_bezout_ext bezout" and nz: "snd (snd (snd (snd (bezout a b)))) \ 0" shows "fst (snd (snd (bezout a b))) * a + fst (snd (snd (snd (bezout a b)))) * b = 0" proof- obtain p q u v d where bz: "(p, q, u, v, d) = bezout a b" by (cases "bezout a b", auto) from ib have foo: "(\a b. let (p, q, u, v, gcd_a_b) = bezout a b in p * a + q * b = gcd_a_b \ gcd_a_b dvd a \ gcd_a_b dvd b \ (\d'. d' dvd a \ d' dvd b \ d' dvd gcd_a_b) \ gcd_a_b * u = - b \ gcd_a_b * v = a)" using is_bezout_ext_def [of bezout] by simp have "p * a + q * b = d \ d dvd a \ d dvd b \ (\d'. d' dvd a \ d' dvd b \ d' dvd d) \ d * u = - b \ d * v = a" using foo [of a b] using bz by fastforce hence dub: "d * u = - b" and dva: "d * v = a" by (simp_all) hence "d * u * a + d * v * b = 0" using eq_neg_iff_add_eq_0 mult.commute mult_minus_left by auto hence "u * a + v * b = 0" by (metis (no_types, lifting) dub dva minus_minus mult_minus_left neg_eq_iff_add_eq_0 semiring_normalization_rules(18) semiring_normalization_rules(7)) thus ?thesis using bz [symmetric] by simp qed lemma bezout_matrix_works2: fixes A::"'a::bezout_domain^'cols^'rows" assumes ib: "is_bezout_ext bezout" and a_not_b: "a \ b" and not_0: "A $ a $ j \ 0 \ A $ b $ j \ 0" shows "(bezout_matrix A a b j bezout ** A) $ b $ j = 0" proof (unfold matrix_matrix_mult_def bezout_matrix_def Let_def, auto) let ?a = "(A $ a $ j)" let ?b = "(A $ b $ j)" let ?z = "bezout (A $ a $ j) (A $ b $ j)" from ib have foo: "(\a b. let (p, q, u, v, gcd_a_b) = bezout a b in p * a + q * b = gcd_a_b \ gcd_a_b dvd a \ gcd_a_b dvd b \ (\d'. d' dvd a \ d' dvd b \ d' dvd gcd_a_b) \ gcd_a_b * u = - b \ gcd_a_b * v = a)" using is_bezout_ext_def [of bezout] by simp obtain p q u v d where bz: "(p, q, u, v, d) = ?z" by (cases ?z, auto) hence pib: "p * ?a + q * ?b = d \ d dvd ?a \ d dvd ?b \ (\d'. d' dvd ?a \ d' dvd ?b \ d' dvd d) \ d * u = - ?b \ d * v = ?a" using foo [of ?a ?b] by fastforce hence pa_bq_d: "p * ?a + ?b * q = d" by (simp add: mult.commute) have d_dvd_a: "d dvd ?a" using pib by auto have d_dvd_b: "d dvd -?b" using pib by auto have pa_bq_d: "p * ?a + ?b * q = d" using pa_bq_d by (simp add: mult.commute) define f where "f k = (if b = a \ k = a then p else if b = a \ k = b then q else if b = b \ k = a then u else if b = b \ k = b then v else if b = k then 1 else 0) * A $ k $ j" for k have UNIV_rw: "UNIV = insert b (insert a (UNIV - {a} - {b}))" by auto have sum_rw: "sum f (insert a (UNIV - {a} - {b})) = f a + sum f (UNIV - {a} - {b})" by (rule sum.insert, auto) have sum0: "sum f (UNIV - {a} - {b}) = 0" by (rule sum.neutral, simp add: f_def) have "(\k\UNIV. (case bezout (A $ a $ j) (A $ b $ j) of (p, q, u, v, d) \ if b = a \ k = a then p else if b = a \ k = b then q else if b = b \ k = a then u else if b = b \ k = b then v else if b = k then 1 else 0) * A $ k $ j) = sum f UNIV" unfolding f_def bz [symmetric] by simp also have "sum f UNIV = sum f (insert b (insert a (UNIV - {a} - {b})))" using UNIV_rw by simp also have "... = f b + sum f (insert a (UNIV - {a} - {b}))" by (rule sum.insert, auto, metis a_not_b) also have "... = f b + f a" unfolding sum_rw sum0 by simp also have "... = v * ?b + u * ?a" unfolding f_def using a_not_b by auto also have "... = u * ?a + v * ?b" by auto also have "... = 0" using ua_vb_0 [OF ib] bz by (metis fst_conv minus_minus minus_zero mult_eq_0_iff pib snd_conv) finally show "(\k\UNIV. (case bezout (A $ a $ j) (A $ b $ j) of (p, q, u, v, d) \ if b = a \ k = a then p else if b = a \ k = b then q else if b = b \ k = a then u else if b = b \ k = b then v else if b = k then 1 else 0) * A $ k $ j) = 0" . qed lemma bezout_matrix_preserves_previous_columns: assumes ib: "is_bezout_ext bezout" and i_not_j: "i \ j" and Aik: "A $ i $ k \ 0" and b_k: "ba b. let (p, q, u, v, gcd_a_b) = bezout a b in p * a + q * b = gcd_a_b \ gcd_a_b dvd a \ gcd_a_b dvd b \ (\d'. d' dvd a \ d' dvd b \ d' dvd gcd_a_b) \ gcd_a_b * u = - b \ gcd_a_b * v = a)" using is_bezout_ext_def [of bezout] by simp obtain p q u v d where bz: "(p, q, u, v, d) = ?z" by (cases ?z, auto) have Aib: "A $ i $ b = 0" by (metis b_k i is_zero_row_upt_k_def to_nat_mono) have Ajb: "A $ j $ b = 0" by (metis b_k j is_zero_row_upt_k_def to_nat_mono) define f where "f ka = (if a = i \ ka = i then p else if a = i \ ka = j then q else if a = j \ ka = i then u else if a = j \ ka = j then v else if a = ka then 1 else 0) * A $ ka $ b" for ka show "(\ka\UNIV. (case bezout (A $ i $ k) (A $ j $ k) of (p, q, u, v, d) \ if a = i \ ka = i then p else if a = i \ ka = j then q else if a = j \ ka = i then u else if a = j \ ka = j then v else if a = ka then 1 else 0) * A $ ka $ b) = A $ a $ b" proof (cases "a=i") case True have "(\ka\UNIV. (case bezout (A $ i $ k) (A $ j $ k) of (p, q, u, v, d) \ if a = i \ ka = i then p else if a = i \ ka = j then q else if a = j \ ka = i then u else if a = j \ ka = j then v else if a = ka then 1 else 0) * A $ ka $ b) = sum f UNIV" unfolding f_def bz [symmetric] by simp also have "sum f UNIV = 0" by (rule sum.neutral, auto simp add: Aib Ajb f_def True i_not_j) also have "... = A $ a $ b" unfolding True using Aib by simp finally show ?thesis . next case False note a_not_i=False show ?thesis proof (cases "a=j") case True have "(\ka\UNIV. (case bezout (A $ i $ k) (A $ j $ k) of (p, q, u, v, d) \ if a = i \ ka = i then p else if a = i \ ka = j then q else if a = j \ ka = i then u else if a = j \ ka = j then v else if a = ka then 1 else 0) * A $ ka $ b) = sum f UNIV" unfolding f_def bz [symmetric] by simp also have "sum f UNIV = 0" by (rule sum.neutral, auto simp add: Aib Ajb f_def True i_not_j) also have "... = A $ a $ b" unfolding True using Ajb by simp finally show ?thesis . next case False have UNIV_rw: "UNIV = insert j (insert i (UNIV - {i} - {j}))" by auto have UNIV_rw2: "UNIV - {i} - {j} = insert a (UNIV - {i} - {j}-{a})" using False a_not_i by auto have sum0: "sum f (UNIV - {i} - {j} - {a}) = 0" by (rule sum.neutral, simp add: f_def) have "(\ka\UNIV. (case bezout (A $ i $ k) (A $ j $ k) of (p, q, u, v, d) \ if a = i \ ka = i then p else if a = i \ ka = j then q else if a = j \ ka = i then u else if a = j \ ka = j then v else if a = ka then 1 else 0) * A $ ka $ b) = sum f UNIV" unfolding f_def bz [symmetric] by simp also have "sum f UNIV = sum f (insert j (insert i (UNIV - {i} - {j})))" using UNIV_rw by simp also have "... = f j + sum f (insert i (UNIV - {i} - {j}))" by (rule sum.insert, auto, metis i_not_j) also have "... = sum f (insert i (UNIV - {i} - {j}))" unfolding f_def using False a_not_i by auto also have "... = f i + sum f (UNIV - {i} - {j})" by (rule sum.insert, auto) also have "... = sum f (UNIV - {i} - {j})" unfolding f_def using False a_not_i by auto also have "... = sum f (insert a (UNIV - {i} - {j} - {a}))" using UNIV_rw2 by simp also have "... = f a + sum f (UNIV - {i} - {j} - {a})" by (rule sum.insert, auto) also have "... = f a" unfolding sum0 by simp also have "... = A $ a $ b" unfolding f_def using False a_not_i by auto finally show ?thesis . qed qed qed lemma det_bezout_matrix: fixes A::"'a::{bezout_domain}^'cols^'rows::{finite,wellorder}" assumes ib: "is_bezout_ext bezout" and a_less_b: "a < b" and aj: "A $ a $ j \ 0" shows "det (bezout_matrix A a b j bezout) = 1" proof - let ?B = "bezout_matrix A a b j bezout" let ?a = "(A $ a $ j)" let ?b = "(A $ b $ j)" let ?z = "bezout ?a ?b" from ib have foo: "(\a b. let (p, q, u, v, gcd_a_b) = bezout a b in p * a + q * b = gcd_a_b \ gcd_a_b dvd a \ gcd_a_b dvd b \ (\d'. d' dvd a \ d' dvd b \ d' dvd gcd_a_b) \ gcd_a_b * u = - b \ gcd_a_b * v = a)" using is_bezout_ext_def [of bezout] by simp obtain p q u v d where bz: "(p, q, u, v, d) = ?z" by (cases ?z, auto) hence pib: "p * ?a + q * ?b = d \ d dvd ?a \ d dvd ?b \ (\d'. d' dvd ?a \ d' dvd ?b \ d' dvd d) \ d * u = - ?b \ d * v = ?a" using foo [of ?a ?b] by fastforce hence pa_bq_d: "p * ?a + ?b * q = d" by (simp add: mult.commute) have a_not_b: "a \ b" using a_less_b by auto have d_dvd_a: "d dvd ?a" using pib by auto have UNIV_rw: "UNIV = insert b (insert a (UNIV - {a} - {b}))" by auto show ?thesis proof (cases "p = 0") case True note p0=True have q_not_0: "q \ 0" proof (rule ccontr, simp) assume q: "q = 0" have "d = 0" using pib by (metis True q add.right_neutral mult.commute mult_zero_right) hence "A $ a $ j = 0 \ A $ b $ j = 0" by (metis aj d_dvd_a dvd_0_left_iff) thus False using aj by auto qed have d_not_0: "d \ 0" by (metis aj d_dvd_a dvd_0_left_iff) have qb_not_0: "q *(-?b) \ 0" by (metis d_not_0 mult_cancel_left1 neg_equal_0_iff_equal no_zero_divisors p0 pa_bq_d q_not_0 right_minus) have "det (interchange_rows ?B a b) = (\i\UNIV. (interchange_rows ?B a b) $ i $ i)" proof (rule det_upperdiagonal) fix i ja::'rows assume ja_i: "ja = -1" proof - define f where "f i = interchange_rows (bezout_matrix A a b j bezout) a b $ i $ i" for i have prod_rw: "prod f (insert a (UNIV - {a} - {b})) = f a * prod f (UNIV - {a} - {b})" by (rule prod.insert, simp_all) have prod1: "prod f (UNIV - {a} - {b}) = 1" by (rule prod.neutral) (simp add: f_def interchange_rows_def bezout_matrix_def Let_def) have "prod f UNIV = prod f (insert b (insert a (UNIV - {a} - {b})))" using UNIV_rw by simp also have "... = f b * prod f (insert a (UNIV - {a} - {b}))" proof (rule prod.insert, simp) show "b \ insert a (UNIV - {a} - {b})" using a_not_b by auto qed also have "... = f b * f a" unfolding prod_rw prod1 by auto also have "... = q * u" using a_not_b using bz [symmetric] unfolding f_def interchange_rows_def bezout_matrix_def Let_def by auto also have "... = -1" proof - let ?r = "q * u" have du_b: " d * u = -?b" using pib by auto hence "q * (-?b) = d * ?r" by (metis mult.left_commute) also have "... = (p * ?a + ?b * q) * ?r" unfolding pa_bq_d by auto also have "... = ?b * q * ?r" using True by auto also have "... = q * (-?b) * (-?r)" by auto finally show ?thesis using qb_not_0 unfolding mult_cancel_left1 by (metis minus_minus) qed finally show ?thesis unfolding f_def . qed finally have det_inter_1: "det (interchange_rows ?B a b) = - 1" . have "det (bezout_matrix A a b j bezout) = - 1 * det (interchange_rows ?B a b)" unfolding det_interchange_rows using a_not_b by auto thus ?thesis unfolding det_inter_1 by simp next case False define mult_b_dp where "mult_b_dp = mult_row ?B b (d * p)" define sum_ab where "sum_ab = row_add mult_b_dp b a ?b" have "det (sum_ab) = prod (\i. sum_ab $ i $ i) UNIV" proof (rule det_upperdiagonal) fix i j::'rows assume j_less_i: "j < i" have "d * p * u + ?b * p = 0" using pib by (metis eq_neg_iff_add_eq_0 mult_minus_left semiring_normalization_rules(16)) thus "sum_ab $ i $ j = 0" unfolding sum_ab_def mult_b_dp_def unfolding row_add_def unfolding mult_row_def bezout_matrix_def using a_not_b j_less_i a_less_b unfolding bz [symmetric] by auto qed also have "... = d * p" proof - define f where "f i = sum_ab $ i $ i" for i have prod_rw: "prod f (insert a (UNIV - {a} - {b})) = f a * prod f (UNIV - {a} - {b})" by (rule prod.insert, simp_all) have prod1: "prod f (UNIV - {a} - {b}) = 1" by (rule prod.neutral) (simp add: f_def sum_ab_def row_add_def mult_b_dp_def mult_row_def bezout_matrix_def Let_def) have ap_bq_d: "A $ a $ j * p + A $ b $ j * q = d" by (metis mult.commute pa_bq_d) have "prod f UNIV = prod f (insert b (insert a (UNIV - {a} - {b})))" using UNIV_rw by simp also have "... = f b * prod f (insert a (UNIV - {a} - {b}))" proof (rule prod.insert, simp) show "b \ insert a (UNIV - {a} - {b})" using a_not_b by auto qed also have "... = f b * f a" unfolding prod_rw prod1 by auto also have "... = (d * p * v + ?b * q) * p" unfolding f_def sum_ab_def row_add_def mult_b_dp_def mult_row_def bezout_matrix_def unfolding bz [symmetric] using a_not_b by auto also have "... = d * p" using pib ap_bq_d semiring_normalization_rules(16) by auto finally show ?thesis unfolding f_def . qed finally have "det (sum_ab) = d * p" . moreover have "det (sum_ab) = d * p * det ?B" unfolding sum_ab_def unfolding det_row_add'[OF not_sym[OF a_not_b]] unfolding mult_b_dp_def unfolding det_mult_row .. ultimately show ?thesis by (metis (erased, hide_lams) False aj d_dvd_a dvd_0_left_iff mult_cancel_left1 mult_eq_0_iff) qed qed lemma invertible_bezout_matrix: fixes A::"'a::{bezout_ring_div}^'cols^'rows::{finite,wellorder}" assumes ib: "is_bezout_ext bezout" and a_less_b: "a < b" and aj: "A $ a $ j \ 0" shows "invertible (bezout_matrix A a b j bezout)" unfolding invertible_iff_is_unit unfolding det_bezout_matrix[OF assms] by simp lemma echelon_form_upt_k_bezout_matrix: fixes A k and i::"'b::mod_type" assumes e: "echelon_form_upt_k A k" and ib: "is_bezout_ext bezout" and Aik_0: "A $ i $ from_nat k \ 0" and zero_i: "is_zero_row_upt_k i k A" and i_less_n: "i n" using i_less_n by simp have zero_n: "is_zero_row_upt_k n k A" by (metis assms(5) e echelon_form_upt_k_condition1 zero_i) have zero_i2: "is_zero_row_upt_k i (to_nat (from_nat k::'c)) A" using zero_i by (metis k ncols_def to_nat_from_nat_id) have zero_n2: "is_zero_row_upt_k n (to_nat (from_nat k::'c)) A" using zero_n by (metis k ncols_def to_nat_from_nat_id) show ?thesis unfolding echelon_form_upt_k_def proof (auto) fix ia j assume ia: "is_zero_row_upt_k ia k ?B" and ia_j: "ia < j" have ia_A: "is_zero_row_upt_k ia k A" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'c assume j_less_k: "to_nat j < k" have "A $ ia $ j = ?B $ ia $ j" proof (rule bezout_matrix_preserves_previous_columns [symmetric, OF ib i_not_n Aik_0 _ zero_i2 zero_n2]) show "j < from_nat k" using j_less_k k by (metis from_nat_mono from_nat_to_nat_id ncols_def) qed also have "... = 0" by (metis ia is_zero_row_upt_k_def j_less_k) finally show "A $ ia $ j = 0" . qed show "is_zero_row_upt_k j k ?B" proof (unfold is_zero_row_upt_k_def, clarify) fix ja::'c assume ja_less_k: "to_nat ja < k" have "?B $ j $ ja = A $ j $ ja" proof (rule bezout_matrix_preserves_previous_columns[OF ib i_not_n Aik_0 _ zero_i2 zero_n2]) show "ja < from_nat k" using ja_less_k k by (metis from_nat_mono from_nat_to_nat_id ncols_def) qed also have "... = 0" by (metis e echelon_form_upt_k_condition1 ia_A ia_j is_zero_row_upt_k_def ja_less_k) finally show "?B $ j $ ja = 0" . qed next fix ia j assume ia_j: "ia < j" and not_zero_ia_B: "\ is_zero_row_upt_k ia k ?B" and not_zero_j_B: "\ is_zero_row_upt_k j k ?B" obtain na where na: "to_nat na < k" and Biana: "?B $ ia $ na \ 0" using not_zero_ia_B unfolding is_zero_row_upt_k_def by auto obtain na2 where na2: "to_nat na2 < k" and Bjna2: "?B $ j $ na2 \ 0" using not_zero_j_B unfolding is_zero_row_upt_k_def by auto have na_less_fn: "na < from_nat k" by (metis from_nat_mono from_nat_to_nat_id k na ncols_def) have "A $ ia $ na = ?B $ ia $ na" by (rule bezout_matrix_preserves_previous_columns [symmetric, OF ib i_not_n Aik_0 na_less_fn zero_i2 zero_n2]) also have "... \ 0" using Biana by simp finally have A: "A $ ia $ na \ 0" . have na_less_fn2: "na2 < from_nat k" by (metis from_nat_mono from_nat_to_nat_id k na2 ncols_def) have "A $ j $ na2 = ?B $ j $ na2" by (rule bezout_matrix_preserves_previous_columns [symmetric, OF ib i_not_n Aik_0 na_less_fn2 zero_i2 zero_n2]) also have "... \ 0" using Bjna2 by simp finally have A2: "A $ j $ na2 \ 0" . have not_zero_ia_A: "\ is_zero_row_upt_k ia k A" unfolding is_zero_row_upt_k_def using na A by auto have not_zero_j_A: "\ is_zero_row_upt_k j k A" unfolding is_zero_row_upt_k_def using na2 A2 by auto obtain na where A: "A $ ia $ na \ 0" and na_less_k: "to_nat na 0" and na2_less_k: "to_nat na2 0) = (LEAST na. A $ ia $ na \ 0)" proof (rule Least_equality) have "?B $ ia $ (LEAST na. A $ ia $ na \ 0) = A $ ia $ (LEAST na. A $ ia $ na \ 0)" proof (rule bezout_matrix_preserves_previous_columns[OF ib i_not_n Aik_0 _ zero_i2 zero_n2]) show "(LEAST na. A $ ia $ na \ 0) < from_nat k" using Least_le A na_less_fn by fastforce qed also have "... \ 0" by (metis (mono_tags) A LeastI) finally show "?B $ ia $ (LEAST na. A $ ia $ na \ 0) \ 0" . fix y assume B_ia_y: "?B $ ia $ y \ 0" show "(LEAST na. A $ ia $ na \ 0) \ y" proof (cases "y 0" using B_ia_y . finally show "A $ ia $ y \ 0" . qed next case False show ?thesis using False by (metis (mono_tags) A Least_le dual_order.strict_iff_order le_less_trans na_less_fn not_le) qed qed have least_eq2: "(LEAST na. ?B $ j $ na \ 0) = (LEAST na. A $ j $ na \ 0)" proof (rule Least_equality) have "?B $ j $ (LEAST na. A $ j $ na \ 0) = A $ j $ (LEAST na. A $ j $ na \ 0)" proof (rule bezout_matrix_preserves_previous_columns[OF ib i_not_n Aik_0 _ zero_i2 zero_n2]) show "(LEAST na. A $ j $ na \ 0) < from_nat k" using Least_le A2 na_less_fn2 by fastforce qed also have "... \ 0" by (metis (mono_tags) A2 LeastI) finally show "?B $ j $ (LEAST na. A $ j $ na \ 0) \ 0" . fix y assume B_ia_y: "?B $ j $ y \ 0" show "(LEAST na. A $ j $ na \ 0) \ y" proof (cases "y 0" using B_ia_y . finally show "A $ j $ y \ 0" . qed next case False show ?thesis using False by (metis (mono_tags) A2 Least_le dual_order.strict_iff_order le_less_trans na_less_fn2 not_le) qed qed show "(LEAST na. ?B $ ia $ na \ 0) < (LEAST na. ?B $ j $ na \ 0)" unfolding least_eq least_eq2 by (rule echelon_form_upt_k_condition2[OF e ia_j not_zero_ia_A not_zero_j_A]) qed qed lemma bezout_matrix_preserves_rest: assumes ib: "is_bezout_ext bezout" and a_not_n: "a\n" and i_not_n: "i\n" and a_not_i: "a\i" and Aik_0: "A $ i $ k \ 0" and zero_ikA: "is_zero_row_upt_k i (to_nat k) A" shows "(bezout_matrix A i n k bezout ** A) $ a $ b = A $ a $ b" unfolding matrix_matrix_mult_def unfolding bezout_matrix_def Let_def proof (auto simp add: a_not_n i_not_n a_not_i) have UNIV_rw: "UNIV = insert a (UNIV - {a})" by auto let ?f="(\k. (if a = k then 1 else 0) * A $ k $ b)" have sum0: "sum ?f (UNIV - {a}) = 0" by (rule sum.neutral, auto) have "sum ?f UNIV = sum ?f (insert a (UNIV - {a}))" using UNIV_rw by simp also have "... = ?f a + sum ?f (UNIV - {a})" by (rule sum.insert, simp_all) also have "... = ?f a" using sum0 by auto also have "... = A $ a $ b" by simp finally show "sum ?f UNIV = A $ a $ b" . qed text\Code equations to execute the bezout matrix\ definition "bezout_matrix_row A a b j bezout x = (let (p, q, u, v, d) = bezout (A $ a $ j) (A $ b $ j) in vec_lambda (\y. if x = a \ y = a then p else if x = a \ y = b then q else if x = b \ y = a then u else if x = b \ y = b then v else if x = y then 1 else 0))" lemma bezout_matrix_row_code [code abstract]: "vec_nth (bezout_matrix_row A a b j bezout x) = (let (p, q, u, v, d) = bezout (A $ a $ j) (A $ b $ j) in (\y. if x = a \ y = a then p else if x = a \ y = b then q else if x = b \ y = a then u else if x = b \ y = b then v else if x = y then 1 else 0))" unfolding bezout_matrix_row_def by (cases "bezout (A $ a $ j) (A $ b $ j)") auto lemma [code abstract]: "vec_nth (bezout_matrix A a b j bezout) = bezout_matrix_row A a b j bezout" unfolding bezout_matrix_def unfolding bezout_matrix_row_def[abs_def] Let_def by (cases "bezout (A $ a $ j) (A $ b $ j)") auto subsubsection\Properties of the bezout iterate function\ lemma bezout_iterate_not_zero: assumes Aik_0: "A $ i $ from_nat k \ 0" and n: "n n" and ib: "is_bezout_ext bezout" shows "bezout_iterate A n i (from_nat k) bezout $ i $ from_nat k \ 0" using Aik_0 n a proof (induct n arbitrary: A) case 0 have "to_nat i = 0" by (metis "0.prems"(3) le_0_eq) hence i0: "i=0" by (metis to_nat_eq_0) show ?case using "0.prems"(1) unfolding i0 by auto next case (Suc n) show ?case proof (cases "Suc n = to_nat i") case True show ?thesis unfolding bezout_iterate.simps using True Suc.prems(1) by simp next case False let ?B="(bezout_matrix A i (from_nat (Suc n)) (from_nat k) bezout ** A)" have i_le_n: "to_nat i < Suc n" using Suc.prems(3) False by auto have "bezout_iterate A (Suc n) i (from_nat k) bezout $ i $ from_nat k = bezout_iterate ?B n i (from_nat k) bezout $ i $ from_nat k" unfolding bezout_iterate.simps using i_le_n by auto also have "... \ 0" proof (rule Suc.hyps, rule bezout_matrix_not_zero[OF ib]) show "i \ from_nat (Suc n)" by (metis False Suc.prems(2) nrows_def to_nat_from_nat_id) show "A $ i $ from_nat k \ 0" by (rule Suc.prems(1)) show "n < nrows ?B" by (metis Suc.prems(2) Suc_lessD nrows_def) show "to_nat i \ n" using i_le_n by auto qed finally show ?thesis . qed qed lemma bezout_iterate_preserves: fixes A k and i::"'b::mod_type" assumes e: "echelon_form_upt_k A k" and ib: "is_bezout_ext bezout" and Aik_0: "A $ i $ from_nat k \ 0" and n: "n n" and k: "k from_nat (Suc n)" by (metis False Suc.prems(2) nrows_def to_nat_from_nat_id) let ?B="(bezout_matrix A i (from_nat (Suc n)) (from_nat k) bezout ** A)" have i_le_n: "to_nat i < Suc n" by (metis False Suc.prems(3) le_imp_less_or_eq) have "bezout_iterate A (Suc n) i (from_nat k) bezout $ a $ b = bezout_iterate ?B n i (from_nat k) bezout $ a $ b" unfolding bezout_iterate.simps using i_le_n by auto also have "... = ?B $ a $ b" proof (rule Suc.hyps) show "?B $ i $ from_nat k \ 0" by (metis False Suc.prems(1) Suc.prems(2) bezout_matrix_not_zero ib nrows_def to_nat_from_nat_id) show "n < nrows ?B" by (metis Suc.prems(2) Suc_lessD nrows_def) show "k < ncols ?B" by (metis Suc.prems(4) ncols_def) show "to_nat i \ n" using i_le_n by auto show "is_zero_row_upt_k i k ?B" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'c assume j_k: "to_nat j < k" have j_k2: "j < from_nat k" by (metis from_nat_mono from_nat_to_nat_id j_k k ncols_def) have "?B $ i $ j = A $ i $ j" proof (rule bezout_matrix_preserves_previous_columns[OF ib i_not_fn Suc.prems(1) j_k2], unfold to_nat_from_nat_id[OF Suc.prems(4)[unfolded ncols_def]]) show "is_zero_row_upt_k i k A" by (rule Suc.prems(5)) show "is_zero_row_upt_k (from_nat (Suc n)) k A" using echelon_form_upt_k_condition1[OF Suc.prems(6) Suc.prems(5)] by (metis Suc.prems(2) from_nat_mono from_nat_to_nat_id i_le_n nrows_def) qed also have "... = 0" by (metis Suc.prems(5) is_zero_row_upt_k_def j_k) finally show "?B $ i $ j = 0" . qed show "echelon_form_upt_k ?B k" proof (rule echelon_form_upt_k_bezout_matrix) show "echelon_form_upt_k A k" by (metis Suc.prems(6)) show "is_bezout_ext bezout" by (rule ib) show "A $ i $ from_nat k \ 0" by (rule Suc.prems(1)) show "is_zero_row_upt_k i k A" by (rule Suc.prems(5)) have "(from_nat (to_nat i)::'b)\from_nat (Suc n)" by (rule from_nat_mono'[OF Suc.prems(3) Suc.prems(2)[unfolded nrows_def]]) thus "i < from_nat (Suc n)" using i_not_fn by auto show "k < ncols A" by (rule Suc.prems(4)) qed qed also have "... = A $ a $ b" proof (rule bezout_matrix_preserves_previous_columns[OF ib]) show "i \ from_nat (Suc n)" by (metis False Suc.prems(2) nrows_def to_nat_from_nat_id) show "A $ i $ from_nat k \ 0" by (rule Suc.prems(1)) show "b < from_nat k" by (rule assms(5)) show "is_zero_row_upt_k i (to_nat (from_nat k::'c)) A" unfolding to_nat_from_nat_id[OF Suc.prems(4)[unfolded ncols_def]] by (rule Suc.prems(5)) show "is_zero_row_upt_k (from_nat (Suc n)) (to_nat (from_nat k::'c)) A" unfolding to_nat_from_nat_id[OF Suc.prems(4)[unfolded ncols_def]] by (metis Suc.prems(2) Suc.prems(5) Suc.prems(6) add_to_nat_def echelon_form_upt_k_condition1 from_nat_mono i_le_n monoid_add_class.add.right_neutral nrows_def to_nat_0) qed finally show ?thesis . qed qed lemma bezout_iterate_preserves_below_n: assumes e: "echelon_form_upt_k A k" and ib: "is_bezout_ext bezout" and Aik_0: "A $ i $ from_nat k \ 0" and n: "n n" and zero_upt_k_i: "is_zero_row_upt_k i k A" shows "bezout_iterate A n i (from_nat k) bezout $ a $ b = A $ a $ b" using Aik_0 n i_le_n k zero_upt_k_i e n_less_a proof (induct n arbitrary: A) case 0 show ?case unfolding bezout_iterate.simps .. next case (Suc n) show ?case proof (cases "Suc n = to_nat i") case True show ?thesis unfolding bezout_iterate.simps using True by simp next case False have i_not_fn:" i \ from_nat (Suc n)" by (metis False Suc.prems(2) nrows_def to_nat_from_nat_id) let ?B="(bezout_matrix A i (from_nat (Suc n)) (from_nat k) bezout ** A)" have i_le_n: "to_nat i < Suc n" by (metis False Suc.prems(3) le_imp_less_or_eq) have zero_ikB: "is_zero_row_upt_k i k ?B" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'b assume j_k: "to_nat j < k" have j_k2: "j < from_nat k" by (metis from_nat_mono from_nat_to_nat_id j_k k ncols_def) have "?B $ i $ j = A $ i $ j" proof (rule bezout_matrix_preserves_previous_columns[OF ib i_not_fn Suc.prems(1) j_k2], unfold to_nat_from_nat_id[OF Suc.prems(4)[unfolded ncols_def]]) show "is_zero_row_upt_k i k A" by (rule Suc.prems(5)) show "is_zero_row_upt_k (from_nat (Suc n)) k A" using echelon_form_upt_k_condition1[OF Suc.prems(6) Suc.prems(5)] by (metis Suc.prems(2) from_nat_mono from_nat_to_nat_id i_le_n nrows_def) qed also have "... = 0" by (metis Suc.prems(5) is_zero_row_upt_k_def j_k) finally show "?B $ i $ j = 0" . qed have "bezout_iterate A (Suc n) i (from_nat k) bezout $ a $ b = bezout_iterate ?B n i (from_nat k) bezout $ a $ b" unfolding bezout_iterate.simps using i_le_n by auto also have "... = ?B $ a $ b" proof (rule Suc.hyps) show "?B $ i $ from_nat k \ 0" by (metis Suc.prems(1) bezout_matrix_not_zero i_not_fn ib) show "n < nrows ?B" by (metis Suc.prems(2) Suc_lessD nrows_def) show "to_nat i \ n" by (metis i_le_n less_Suc_eq_le) show "k < ncols ?B" by (metis Suc.prems(4) ncols_def) show "is_zero_row_upt_k i k ?B" by (rule zero_ikB) show "echelon_form_upt_k ?B k" proof (rule echelon_form_upt_k_bezout_matrix[OF Suc.prems(6) ib Suc.prems(1) Suc.prems(5) _ Suc.prems(4)]) show "i < from_nat (Suc n)" by (metis (no_types) Suc.prems(7) add_to_nat_def dual_order.strict_iff_order from_nat_mono i_le_n le_less_trans monoid_add_class.add.right_neutral to_nat_0 to_nat_less_card) qed show "n < to_nat a" by (metis Suc.prems(7) Suc_lessD) qed also have "... = A $ a $ b" proof (rule bezout_matrix_preserves_rest[OF ib _ _ _ Suc.prems(1)]) show "a \ from_nat (Suc n)" by (metis Suc.prems(7) add_to_nat_def from_nat_mono less_irrefl monoid_add_class.add.right_neutral to_nat_0 to_nat_less_card) show "i \ from_nat (Suc n)" by (rule i_not_fn) show "a \ i" by (metis assms(7) n_less_a not_le) show "is_zero_row_upt_k i (to_nat (from_nat k::'b)) A" by (metis Suc.prems(4) Suc.prems(5) ncols_def to_nat_from_nat_id) qed finally show ?thesis . qed qed lemma bezout_iterate_zero_column_k: fixes A::"'a::bezout_domain^'cols::{mod_type}^'rows::{mod_type}" assumes e: "echelon_form_upt_k A k" and ib: "is_bezout_ext bezout" and Aik_0: "A $ i $ from_nat k \ 0" and n: "nn" and zero_upt_k_i: "is_zero_row_upt_k i k A" shows "bezout_iterate A n i (from_nat k) bezout $ a $ from_nat k = 0" using e Aik_0 n k a_n zero_upt_k_i proof (induct n arbitrary: A) case 0 show ?case unfolding bezout_iterate.simps - using "0.prems"(5) i_le_a to_nat_from_nat to_nat_le by auto + using "0.prems"(5) i_le_a to_nat_from_nat to_nat_le to_nat_mono by fastforce next case (Suc n) show ?case proof (cases "Suc n = to_nat i") case True show ?thesis unfolding bezout_iterate.simps using True by (metis Suc.prems(5) i_le_a leD to_nat_mono) next case False have i_not_fn:" i \ from_nat (Suc n)" by (metis False Suc.prems(3) nrows_def to_nat_from_nat_id) let ?B="(bezout_matrix A i (from_nat (Suc n)) (from_nat k) bezout ** A)" have i_le_n: "to_nat i < Suc n" by (metis Suc.prems(5) i_le_a le_less_trans not_le to_nat_mono) have zero_ikB: "is_zero_row_upt_k i k ?B" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'cols assume j_k: "to_nat j < k" have j_k2: "j < from_nat k" using from_nat_mono[OF j_k Suc.prems(4)[unfolded ncols_def]] unfolding from_nat_to_nat_id . have "?B $ i $ j = A $ i $ j" proof (rule bezout_matrix_preserves_previous_columns[OF ib i_not_fn Suc.prems(2) j_k2], unfold to_nat_from_nat_id[OF Suc.prems(4)[unfolded ncols_def]]) show "is_zero_row_upt_k i k A" by (rule Suc.prems(6)) show "is_zero_row_upt_k (from_nat (Suc n)) k A" using echelon_form_upt_k_condition1[OF Suc.prems(1)] by (metis (mono_tags) Suc.prems(3) Suc.prems(6) add_to_nat_def from_nat_mono i_le_n monoid_add_class.add.right_neutral nrows_def to_nat_0) qed also have "... = 0" by (metis Suc.prems(6) is_zero_row_upt_k_def j_k) finally show "?B $ i $ j = 0" . qed have "bezout_iterate A (Suc n) i (from_nat k) bezout $ a $ (from_nat k) = bezout_iterate ?B n i (from_nat k) bezout $ a $ (from_nat k)" unfolding bezout_iterate.simps using i_le_n by auto also have "... = 0" proof (cases "to_nat a = Suc n") case True have "bezout_iterate ?B n i (from_nat k) bezout $ a $ (from_nat k) = ?B $ a $ from_nat k" proof (rule bezout_iterate_preserves_below_n[OF _ ib]) show "echelon_form_upt_k ?B k" by (metis (erased, hide_lams) Suc.prems(1) Suc.prems(2) Suc.prems(4) Suc.prems(6) True echelon_form_upt_k_bezout_matrix from_nat_to_nat_id i_le_a ib) show "?B $ i $ from_nat k \ 0" by (metis Suc.prems(2) bezout_matrix_not_zero i_not_fn ib) show "n < nrows ?B" by (metis Suc.prems(3) Suc_lessD nrows_def) show "n < to_nat a" by (metis True lessI) show "k < ncols ?B" by (metis Suc.prems(4) ncols_def) show "to_nat i \ n" by (metis i_le_n less_Suc_eq_le) show "is_zero_row_upt_k i k ?B" by (rule zero_ikB) qed also have "... = 0" by (metis Suc.prems(2) True bezout_matrix_works2 i_not_fn ib to_nat_from_nat) finally show ?thesis . next case False show ?thesis proof (rule Suc.hyps) show "echelon_form_upt_k ?B k" proof (rule echelon_form_upt_k_bezout_matrix [OF Suc.prems(1) ib Suc.prems(2) Suc.prems(6) _ Suc.prems(4)]) show "i < from_nat (Suc n)" by (metis (mono_tags) Suc.prems(3) from_nat_mono from_nat_to_nat_id i_le_n nrows_def) qed show "?B $ i $ from_nat k \ 0" by (metis Suc.prems(2) bezout_matrix_not_zero i_not_fn ib) show "n < nrows ?B" by (metis Suc.prems(3) Suc_lessD nrows_def) show "k < ncols ?B" by (metis Suc.prems(4) ncols_def) show "to_nat a \ n" by (metis False Suc.prems(5) le_SucE) show "is_zero_row_upt_k i k ?B" by (rule zero_ikB) qed qed finally show ?thesis . qed qed subsubsection\Proving the correctness\ lemma condition1_index_le_zero_row: fixes A k defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" assumes e: "echelon_form_upt_k A k" and "is_zero_row_upt_k a (Suc k) A" shows "from_nat i\a" proof (rule ccontr) have zero_ik: "is_zero_row_upt_k a k A" by (metis assms(3) is_zero_row_upt_k_le) assume a: "\ from_nat i \ (a::'a)" hence ai: "a < from_nat i" by simp show False proof (cases "(from_nat i::'a)=0") case True thus ?thesis using ai least_mod_type[of a] unfolding True from_nat_0 by auto next case False from a have "a \ from_nat i - 1" by (intro leI) (auto dest: le_Suc) also from False have "i \ 0" by (intro notI) (simp_all add: from_nat_0) hence "i = (i - 1) + 1" by simp also have "from_nat \ = from_nat (i - 1) + 1" by (rule from_nat_suc) finally have ai2: "a \ from_nat (i - 1)" by simp have "i = to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1" using i False by (metis from_nat_0) hence "i - 1 = to_nat (GREATEST n. \ is_zero_row_upt_k n k A)" by simp hence "from_nat (i - 1) = (GREATEST n. \ is_zero_row_upt_k n k A)" using from_nat_to_nat_id by auto hence "\ is_zero_row_upt_k (from_nat (i - 1)) k A" using False GreatestI_ex i by (metis from_nat_to_nat_id to_nat_0) moreover have "is_zero_row_upt_k (from_nat (i - 1)) k A" using echelon_form_upt_k_condition1[OF e zero_ik] using ai2 zero_ik by (cases "a = from_nat (i - 1)", auto) ultimately show False by contradiction qed qed lemma condition1_part1: fixes A k defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" assumes e: "echelon_form_upt_k A k" and a: "is_zero_row_upt_k a (Suc k) A" and ab: "a < b" and all_zero: "\m\from_nat i. A $ m $ from_nat k = 0" shows "is_zero_row_upt_k b (Suc k) A" proof (rule is_zero_row_upt_k_suc) have zero_ik: "is_zero_row_upt_k a k A" by (metis assms(3) is_zero_row_upt_k_le) show "is_zero_row_upt_k b k A" using echelon_form_upt_k_condition1[OF e zero_ik] using ab by auto have "from_nat i\a" using condition1_index_le_zero_row[OF e a] all_zero unfolding i by auto thus "A $ b $ from_nat k = 0" using all_zero ab by auto qed lemma condition1_part2: fixes A k defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" assumes e: "echelon_form_upt_k A k" and a: "is_zero_row_upt_k a (Suc k) A" and ab: "a < b" and i_last: "i = nrows A" and all_zero: "\m>from_nat (nrows A). A $ m $ from_nat k = 0" shows "is_zero_row_upt_k b (Suc k) A" proof - have zero_ik: "is_zero_row_upt_k a k A" by (metis assms(3) is_zero_row_upt_k_le) have i_le_a: "from_nat i\a" using condition1_index_le_zero_row[OF e a] unfolding i . have "(from_nat (nrows A)::'a) = 0" unfolding nrows_def using from_nat_CARD . thus ?thesis using ab i_last i_le_a by (metis all_zero e echelon_form_upt_k_condition1 is_zero_row_upt_k_suc le_less_trans zero_ik) qed lemma condition1_part3: fixes A k bezout defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" defines B: "B \ fst ((echelon_form_of_column_k bezout) (A,i) k)" assumes e: "echelon_form_upt_k A k" and ib: "is_bezout_ext bezout" and a: "is_zero_row_upt_k a (Suc k) B" and "a < b" and all_zero: "\m>from_nat i. A $ m $ from_nat k = 0" and i_not_last: "i \ nrows A" and i_le_m: "from_nat i \ m" and Amk_not_0: "A $ m $ from_nat k \ 0" shows "is_zero_row_upt_k b (Suc k) A" proof (rule is_zero_row_upt_k_suc) have AB: "A = B" unfolding B echelon_form_of_column_k_def Let_def using all_zero by auto have i_le_a: "from_nat i\a" using condition1_index_le_zero_row[OF e a[unfolded AB[symmetric]]] unfolding i . show "A $ b $ from_nat k = 0" by (metis i_le_a all_zero assms(6) le_less_trans) show "is_zero_row_upt_k b k A" by (metis (poly_guards_query) AB a assms(6) e echelon_form_upt_k_condition1 is_zero_row_upt_k_le) qed lemma condition1_part4: fixes A k bezout i defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" defines B: "B\ fst ((echelon_form_of_column_k bezout) (A,i) k)" assumes e: "echelon_form_upt_k A k" assumes a: "is_zero_row_upt_k a (Suc k) A" and i_nrows: "i = nrows A" shows "is_zero_row_upt_k b (Suc k) A" proof - have eq_G: "from_nat (i - 1) = (GREATEST n. \ is_zero_row_upt_k n k A)" by (metis One_nat_def Suc_eq_plus1 i_nrows diff_Suc_Suc diff_zero from_nat_to_nat_id i nrows_not_0) hence a_le: "a\from_nat (i - 1)" by (metis One_nat_def Suc_pred i_nrows lessI not_less not_less_eq nrows_def to_nat_from_nat_id to_nat_less_card to_nat_mono zero_less_card_finite) have not_zero_G: "\ is_zero_row_upt_k (from_nat(i - 1)) k A" unfolding eq_G by (metis (mono_tags) GreatestI_ex i_nrows i nrows_not_0) hence "\ is_zero_row_upt_k a k A" by (metis a_le dual_order.strict_iff_order e echelon_form_upt_k_condition1) hence "\ is_zero_row_upt_k a (Suc k) A" by (metis is_zero_row_upt_k_le) thus ?thesis using a by contradiction qed lemma condition1_part5: fixes A::"'a::bezout_domain^'cols::{mod_type}^'rows::{mod_type}" and k bezout defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" defines B: "B \ fst((echelon_form_of_column_k bezout) (A,i) k)" assumes ib: "is_bezout_ext bezout" and e: "echelon_form_upt_k A k" assumes zero_a_B: "is_zero_row_upt_k a (Suc k) B" and ab: "a < b" and im: "from_nat i < m" and Amk_not_0: "A $ m $ from_nat k \ 0" and not_last_row: "i \ nrows A" and k: "k 0 \ (from_nat i) \ n)) (nrows A - Suc 0) (from_nat i) (from_nat k) bezout)" proof (rule is_zero_row_upt_k_suc) let ?least="(LEAST n. A $ n $ from_nat k \ 0 \ from_nat i \ n)" let ?interchange="(interchange_rows A (from_nat i) ?least)" let ?bezout_iterate="(bezout_iterate ?interchange (nrows A - Suc 0) (from_nat i) (from_nat k) bezout)" have B_eq: "B = ?bezout_iterate" unfolding B echelon_form_of_column_k_def Let_def fst_conv snd_conv using im Amk_not_0 not_last_row by auto have zero_ikA: "is_zero_row_upt_k (from_nat i) k A" proof (cases "\m. is_zero_row_upt_k m k A") case True thus ?thesis by simp next case False hence i_eq: "i=to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1" unfolding i by auto show ?thesis proof (rule row_greater_greatest_is_zero, simp add: i_eq from_nat_to_nat_greatest,rule Suc_le') show " (GREATEST m. \ is_zero_row_upt_k m k A) + 1 \ 0" proof - have "\x\<^sub>1. \ x\<^sub>1 < i \ \ to_nat (GREATEST R. \ is_zero_row_upt_k R k A) < x\<^sub>1" using i_eq by linarith thus "(GREATEST m. \ is_zero_row_upt_k m k A) + 1 \ 0" by (metis One_nat_def add_Suc_right neq_iff from_nat_to_nat_greatest i_eq monoid_add_class.add.right_neutral nat.distinct(1) not_last_row nrows_def to_nat_0 to_nat_from_nat_id to_nat_less_card) qed qed qed have zero_interchange: "is_zero_row_upt_k (from_nat i) k ?interchange" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'cols assume j_less_k: "to_nat j < k" have i_le_least: "from_nat i\?least" by (metis (mono_tags, lifting) Amk_not_0 LeastI2_wellorder less_imp_le im) hence zero_least_kA: "is_zero_row_upt_k ?least k A" using echelon_form_upt_k_condition1[OF e zero_ikA] by (metis (poly_guards_query) dual_order.strict_iff_order zero_ikA) have "?interchange $ from_nat i $ j = A $ ?least $ j" by simp also have "... = 0" using zero_least_kA j_less_k unfolding is_zero_row_upt_k_def by simp finally show "?interchange $ from_nat i $ j = 0" . qed have zero_a_k: "is_zero_row_upt_k a k A" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'cols assume j_less_k: "to_nat j < k" have "?interchange $ a $ j = ?bezout_iterate $ a $ j" proof (rule bezout_iterate_preserves[symmetric]) show "echelon_form_upt_k ?interchange k" proof (rule echelon_form_upt_k_interchange[OF e zero_ikA Amk_not_0 _ k]) show "from_nat i \ m" using im by auto qed show "is_bezout_ext bezout" using ib . show "?interchange $ (from_nat i) $ from_nat k \ 0" by (metis (mono_tags, lifting) Amk_not_0 LeastI_ex dual_order.strict_iff_order im interchange_rows_i) show "nrows A - Suc 0 < nrows ?interchange" unfolding nrows_def by simp show "j < from_nat k" by (metis from_nat_mono from_nat_to_nat_id j_less_k k ncols_def) show "to_nat (from_nat i::'rows) \ nrows A - Suc 0" by (metis Suc_eq_plus1 Suc_le_mono Suc_pred discrete nrows_def to_nat_less_card zero_less_card_finite) show "k < ncols ?interchange" using k unfolding ncols_def by auto show "is_zero_row_upt_k (from_nat i) k ?interchange" using zero_interchange . qed also have "... = 0" using zero_a_B j_less_k unfolding B_eq is_zero_row_upt_k_def by auto finally have *: "?interchange $ a $ j = 0" . show "A $ a $ j = 0" proof (cases "a=from_nat i") case True show ?thesis unfolding True using zero_ikA j_less_k unfolding is_zero_row_upt_k_def by auto next case False note a_not_i=False show ?thesis proof (cases "a=?least") case True show ?thesis using zero_interchange unfolding True is_zero_row_upt_k_def using j_less_k by auto next case False note a_not_least=False have "?interchange $ a $ j = A $ a $ j" using a_not_least a_not_i by (metis (erased, lifting) interchange_rows_preserves) thus ?thesis unfolding * .. qed qed qed hence zero_b_k: "is_zero_row_upt_k b k A" by (metis ab e echelon_form_upt_k_condition1) have i_le_a: "from_nat i\a" unfolding i proof (auto simp add: from_nat_to_nat_greatest from_nat_0) show "0 \ a" by (metis least_mod_type) fix m assume m: "\ is_zero_row_upt_k m k A" have "(GREATEST n. \ is_zero_row_upt_k n k A) < a" by (metis (no_types, lifting) GreatestI_ex neq_iff e echelon_form_upt_k_condition1 m zero_a_k) thus "(GREATEST n. \ is_zero_row_upt_k n k A) + 1 \ a" by (metis le_Suc) qed have i_not_b: "from_nat i \ b" using i_le_a ab by simp show "is_zero_row_upt_k b k ?bezout_iterate" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'cols assume j_less_k: "to_nat j < k" have "?bezout_iterate $ b $ j = ?interchange $ b $ j" proof (rule bezout_iterate_preserves) show "echelon_form_upt_k ?interchange k" proof (rule echelon_form_upt_k_interchange[OF e zero_ikA Amk_not_0 _ k]) show "from_nat i \ m" using im by auto qed show "is_bezout_ext bezout" using ib . show "?interchange $ from_nat i $ from_nat k \ 0" by (metis (mono_tags, lifting) Amk_not_0 LeastI_ex dual_order.strict_iff_order im interchange_rows_i) show "nrows A - Suc 0 < nrows ?interchange" unfolding nrows_def by simp show "j < from_nat k" by (metis from_nat_mono from_nat_to_nat_id j_less_k k ncols_def) show "to_nat (from_nat i::'rows) \ nrows A - Suc 0" by (metis Suc_eq_plus1 Suc_le_mono Suc_pred discrete nrows_def to_nat_less_card zero_less_card_finite) show "k < ncols ?interchange" using k unfolding ncols_def by auto show "is_zero_row_upt_k (from_nat i) k ?interchange" by (rule zero_interchange) qed also have "... = A $ b $ j" proof (cases "b=?least") case True have "?interchange $ b $ j = A $ (from_nat i) $ j" using True by auto also have "... = A $ b $ j" using zero_b_k zero_ikA j_less_k unfolding is_zero_row_upt_k_def by auto finally show ?thesis . next case False show ?thesis using False using interchange_rows_preserves[OF i_not_b] by (metis (no_types, lifting)) qed also have "... = 0" using zero_b_k j_less_k unfolding is_zero_row_upt_k_def by auto finally show "?bezout_iterate $ b $ j = 0" . qed show "?bezout_iterate $ b $ from_nat k = 0" proof (rule bezout_iterate_zero_column_k[OF _ ib]) show "echelon_form_upt_k ?interchange k" proof (rule echelon_form_upt_k_interchange[OF e zero_ikA Amk_not_0 _ k]) show "from_nat i \ m" using im by auto qed show "?interchange $ from_nat i $ from_nat k \ 0" by (metis (mono_tags, lifting) Amk_not_0 LeastI_ex dual_order.strict_iff_order im interchange_rows_i) show "nrows A - Suc 0 < nrows ?interchange" unfolding nrows_def by simp show "from_nat i < b" by (metis ab i_le_a le_less_trans) show "k < ncols ?interchange" by (metis (full_types, lifting) k ncols_def) show "to_nat b \ nrows A - Suc 0" by (metis Suc_pred leD not_less_eq_eq nrows_def to_nat_less_card zero_less_card_finite) show "is_zero_row_upt_k (from_nat i) k ?interchange" by (rule zero_interchange) qed qed lemma condition2_part1: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" and k bezout i defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" defines B:"B \ fst ((echelon_form_of_column_k bezout) (A,i) k)" assumes e: "echelon_form_upt_k A k" and ab: "a < b" and not_zero_aB: "\ is_zero_row_upt_k a (Suc k) B" and not_zero_bB: "\ is_zero_row_upt_k b (Suc k) B" and all_zero: "\m\from_nat i. A $ m $ from_nat k = 0" shows "(LEAST n. A $ a $ n \ 0) < (LEAST n. A $ b $ n \ 0)" proof - have B_eq_A: "B=A" unfolding B echelon_form_of_column_k_def Let_def fst_conv snd_conv using all_zero by auto show ?thesis proof (cases "\m. is_zero_row_upt_k m k A") case True have i0: "i = 0" unfolding i using True by simp have "is_zero_row_upt_k a k B" using True unfolding B_eq_A by auto moreover have "B $ a $ from_nat k = 0" using all_zero unfolding i0 from_nat_0 by (metis B_eq_A least_mod_type) ultimately have "is_zero_row_upt_k a (Suc k) B" by (rule is_zero_row_upt_k_suc) thus ?thesis using not_zero_aB by contradiction next case False note not_all_zero=False have i2: "i = to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1" unfolding i using False by auto have not_zero_aA: "\ is_zero_row_upt_k a k A" by (metis (erased, lifting) B_eq_A GreatestI_ex add_to_nat_def all_zero neq_iff e echelon_form_upt_k_condition1 i2 is_zero_row_upt_k_suc le_Suc not_all_zero not_zero_aB to_nat_1) moreover have not_zero_bA: "\ is_zero_row_upt_k b k A" by (metis (erased, lifting) B_eq_A GreatestI_ex add_to_nat_def all_zero neq_iff e echelon_form_upt_k_condition1 i2 is_zero_row_upt_k_suc le_Suc not_all_zero not_zero_bB to_nat_1) ultimately show ?thesis using echelon_form_upt_k_condition2[OF e ab] by simp qed qed lemma condition2_part2: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" and k bezout i defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" assumes e: "echelon_form_upt_k A k" and ab: "a < b" and all_zero: "\m>from_nat (nrows A). A $ m $ from_nat k = 0" and i_nrows: "i = nrows A" shows "(LEAST n. A $ a $ n \ 0) < (LEAST n. A $ b $ n \ 0)" proof - have not_all_zero: "\ (\m. is_zero_row_upt_k m k A)" by (metis i i_nrows nrows_not_0) have "(GREATEST m. \ is_zero_row_upt_k m k A) + 1 = 0" by (metis (mono_tags, lifting) add_0_right One_nat_def Suc_le' add_Suc_right i i_nrows less_not_refl less_trans_Suc nrows_def to_nat_less_card to_nat_mono zero_less_card_finite) hence g_minus_1: "(GREATEST m. \ is_zero_row_upt_k m k A) = - 1" by (simp add: a_eq_minus_1) have "\ is_zero_row_upt_k a k A" proof (rule greatest_ge_nonzero_row'[OF e _ not_all_zero]) show "a \ (GREATEST m. \ is_zero_row_upt_k m k A)" by (simp add: Greatest_is_minus_1 g_minus_1) qed moreover have "\ is_zero_row_upt_k b k A" proof (rule greatest_ge_nonzero_row'[OF e _ not_all_zero]) show "b \ (GREATEST m. \ is_zero_row_upt_k m k A)" by (simp add: Greatest_is_minus_1 g_minus_1) qed ultimately show ?thesis using echelon_form_upt_k_condition2[OF e ab] by simp qed lemma condition2_part3: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" and k bezout i defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" defines B:"B \ fst ((echelon_form_of_column_k bezout) (A,i) k)" assumes e: "echelon_form_upt_k A k" and k: "k is_zero_row_upt_k a (Suc k) B" and not_zero_bB: "\ is_zero_row_upt_k b (Suc k) B" and all_zero: "\m>from_nat i. A $ m $ from_nat k = 0" and i_ma: "from_nat i \ ma" and A_ma_k: "A $ ma $ from_nat k \ 0" shows "(LEAST n. A $ a $ n \ 0) < (LEAST n. A $ b $ n \ 0)" proof - have B_eq_A: "B=A" unfolding B echelon_form_of_column_k_def Let_def fst_conv snd_conv using all_zero by simp have not_all_zero: "\ (\m. is_zero_row_upt_k m k A)" by (metis B_eq_A ab all_zero from_nat_0 i is_zero_row_upt_k_suc le_less_trans least_mod_type not_zero_bB) have i2: "i = to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1" unfolding i using not_all_zero by auto have not_zero_aA: "\ is_zero_row_upt_k a k A" proof - have "\x\<^sub>1 x\<^sub>2. from_nat (to_nat (x\<^sub>1::'rows) + 1) \ x\<^sub>2 \ \ x\<^sub>1 < x\<^sub>2" by (metis (no_types) add_to_nat_def le_Suc to_nat_1) moreover { assume "\ is_zero_row_upt_k b k A" hence "\ is_zero_row_upt_k a k A" using ab e echelon_form_upt_k_condition1 by blast } ultimately show "\ is_zero_row_upt_k a k A" by (metis B_eq_A greatest_less_zero_row ab all_zero le_imp_less_or_eq e i2 is_zero_row_upt_k_suc not_all_zero not_zero_aB not_zero_bB) qed show ?thesis proof (cases "\ is_zero_row_upt_k b k A") case True thus ?thesis using not_zero_aA echelon_form_upt_k_condition2[OF e ab] by simp next case False note zero_bA=False obtain v where Aav: "A $ a $ v \ 0" and v: "v 0) \ v" by (rule Least_le, simp add: Aav) have b_ge_greatest: "b>(GREATEST n. \ is_zero_row_upt_k n k A)" using False by (simp add: greatest_less_zero_row e not_all_zero) have i_eq_b: "from_nat i = b" proof (rule ccontr, cases "from_nat i < b") case True hence Abk_0: "A $ b $ from_nat k = 0" using all_zero by auto have "is_zero_row_upt_k b (Suc k) B" proof (rule is_zero_row_upt_k_suc) show "is_zero_row_upt_k b k B" using zero_bA unfolding B_eq_A by simp show "B $ b $ from_nat k = 0" using Abk_0 unfolding B_eq_A by simp qed thus False using not_zero_bB by contradiction next case False assume i_not_b: "from_nat i \ b" hence b_less_i: "from_nat i > b" using False by simp thus False using b_ge_greatest unfolding i by (metis (no_types, lifting) False Suc_less add_to_nat_def i2 i_not_b to_nat_1) qed have Abk_not_0: "A $ b $ from_nat k \ 0" using False not_zero_bB unfolding B_eq_A is_zero_row_upt_k_def by (metis B_eq_A False is_zero_row_upt_k_suc not_zero_bB) have "(LEAST n. A $ b $ n \ 0) = from_nat k" proof (rule Least_equality) show "A $ b $ from_nat k \ 0" by (rule Abk_not_0) show "\y. A $ b $ y \ 0 \ from_nat k \ y" by (metis False is_zero_row_upt_k_def k ncols_def not_less to_nat_from_nat_id to_nat_mono) qed thus ?thesis using least_v v by auto qed qed lemma condition2_part4: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" and k bezout i defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" assumes e: "echelon_form_upt_k A k" and ab: "a < b" and i_nrows: "i = nrows A" shows "(LEAST n. A $ a $ n \ 0) < (LEAST n. A $ b $ n \ 0)" proof - have not_all_zero: "\ (\m. is_zero_row_upt_k m k A)" by (metis i_nrows i nrows_not_0) then have "i = to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1" by (simp add: i) then have "nrows A = to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1" by (simp add: i_nrows) then have "CARD('rows) = mod_type_class.to_nat (GREATEST n. \ is_zero_row_upt_k n k A) + 1" unfolding nrows_def . then have "(GREATEST n. \ is_zero_row_upt_k n k A) + 1 = 0" using to_nat_plus_one_less_card by auto hence g: "(GREATEST n. \ is_zero_row_upt_k n k A) = -1" by (simp add: a_eq_minus_1) have "\ is_zero_row_upt_k a k A" proof (rule greatest_ge_nonzero_row'[OF e _ not_all_zero]) show "a \ (GREATEST m. \ is_zero_row_upt_k m k A)" by (simp add: Greatest_is_minus_1 g) qed moreover have "\ is_zero_row_upt_k b k A" proof (rule greatest_ge_nonzero_row'[OF e _ not_all_zero]) show "b \ (GREATEST m. \ is_zero_row_upt_k m k A)" by (simp add: Greatest_is_minus_1 g) qed ultimately show ?thesis using echelon_form_upt_k_condition2[OF e ab] by simp qed lemma condition2_part5: fixes A::"'a::{bezout_domain}^'cols::{mod_type}^'rows::{mod_type}" and k bezout i defines i:"i\(if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" defines B:"B \ fst ((echelon_form_of_column_k bezout) (A,i) k)" assumes ib: "is_bezout_ext bezout" and e: "echelon_form_upt_k A k" and k: "k is_zero_row_upt_k a (Suc k) B" and not_zero_bB: "\ is_zero_row_upt_k b (Suc k) B" and i_m:"from_nat i < m" and A_mk: "A $ m $ from_nat k \ 0" and i_not_nrows: "i \ nrows A" shows "(LEAST n. B $ a $ n \ 0) < (LEAST n. B $ b $ n \ 0)" proof - have B_eq: "B = bezout_iterate (interchange_rows A (from_nat i) (LEAST n. A $ n $ from_nat k \ 0 \ from_nat i \ n)) (nrows A - Suc 0) (from_nat i) (from_nat k) bezout" unfolding B echelon_form_of_column_k_def Let_def fst_conv snd_conv using i_m A_mk i_not_nrows by auto let ?least="(LEAST n. A $ n $ from_nat k \ 0 \ from_nat i \ n)" let ?interchange="interchange_rows A (from_nat i) ?least" let ?greatest="(GREATEST n. \ is_zero_row_upt_k n k A)" have nrows_less: "nrows A - Suc 0 < nrows ?interchange" unfolding nrows_def by auto have interchange_ik_not_zero: "?interchange $ from_nat i $ from_nat k \ 0" by (metis (mono_tags, lifting) A_mk LeastI_ex dual_order.strict_iff_order i_m interchange_rows_i) have k2: "k < ncols ?interchange" using k unfolding ncols_def by simp have to_nat_b: "to_nat b \ nrows A - Suc 0" by (metis Suc_pred leD not_less_eq_eq nrows_def to_nat_less_card zero_less_card_finite) have to_nat_from_nat_i: "to_nat (from_nat i::'rows) \ nrows A - Suc 0" using i_not_nrows unfolding nrows_def by (metis Suc_pred less_Suc_eq_le to_nat_less_card zero_less_card_finite) have not_all_zero: "\ (\m. is_zero_row_upt_k m k A)" proof (rule ccontr) assume all_zero: "\\(\m. is_zero_row_upt_k m k A)" hence zero_aA: "is_zero_row_upt_k a k A" and zero_bA: "is_zero_row_upt_k b k A" by auto have echelon_interchange: "echelon_form_upt_k ?interchange k" proof (rule echelon_form_upt_k_interchange[OF e _ A_mk _ k]) show "is_zero_row_upt_k (from_nat i) k A" using all_zero by auto show "from_nat i \ m" using i_m by auto qed have zero_i_interchange: "is_zero_row_upt_k (from_nat i) k ?interchange" using all_zero unfolding is_zero_row_upt_k_def by auto have zero_bB: "is_zero_row_upt_k b k B" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'cols assume j: "to_nat j < k" have "B $ b $ j = ?interchange $ b $ j" proof (unfold B_eq, rule bezout_iterate_preserves [OF echelon_interchange ib interchange_ik_not_zero nrows_less _ to_nat_from_nat_i k2 zero_i_interchange]) show "j < from_nat k" using j by (metis from_nat_mono from_nat_to_nat_id k ncols_def) qed also have "... = 0" using all_zero j unfolding is_zero_row_upt_k_def interchange_rows_def by auto finally show "B $ b $ j = 0" . qed have i_not_b: "from_nat i \ b" using i all_zero ab least_mod_type by (metis leD from_nat_0) have "B $ b $ from_nat k \ 0" by (metis is_zero_row_upt_k_suc not_zero_bB zero_bB) moreover have "B $ b $ from_nat k = 0" proof (unfold B_eq, rule bezout_iterate_zero_column_k [OF echelon_interchange ib interchange_ik_not_zero nrows_less _ k2 to_nat_b zero_i_interchange]) show "from_nat i < b" by (metis all_zero antisym_conv1 from_nat_0 i i_not_b least_mod_type) qed ultimately show False by contradiction qed have i2: "i = to_nat (?greatest) + 1" unfolding i using not_all_zero by auto have g_rw: "(from_nat (to_nat ?greatest + 1)) = ?greatest + 1" by (metis add_to_nat_def to_nat_1) have zero_least_kA: "is_zero_row_upt_k ?least k A" proof (rule row_greater_greatest_is_zero) have "?greatest < from_nat i" by (metis Suc_eq_plus1 Suc_le' add_to_nat_def neq_iff from_nat_0 from_nat_mono i2 i_not_nrows not_less_eq nrows_def to_nat_1 to_nat_less_card zero_less_Suc) also have "... \ ?least" by (metis (mono_tags, lifting) A_mk LeastI_ex dual_order.strict_iff_order i_m) finally show "?greatest < ?least" . qed have zero_ik_interchange: "is_zero_row_upt_k (from_nat i) k ?interchange" by (metis (no_types, lifting) interchange_rows_i is_zero_row_upt_k_def zero_least_kA) have echelon_form_interchange: "echelon_form_upt_k ?interchange k" proof (rule echelon_form_upt_k_interchange[OF e _ A_mk _ k]) show "is_zero_row_upt_k (from_nat i) k A" by (metis (mono_tags) greatest_ge_nonzero_row' Greatest_is_minus_1 Suc_le' a_eq_minus_1 e g_rw i2 row_greater_greatest_is_zero zero_least_kA) show "from_nat i \ m" using i_m by simp qed have b_le_i: "b \ from_nat i" proof (rule ccontr) assume "\ b \ from_nat i" hence b_gr_i: "b > from_nat i" by simp have "is_zero_row_upt_k b (Suc k) B" proof (rule is_zero_row_upt_k_suc) show "B $ b $ from_nat k = 0" by (unfold B_eq, rule bezout_iterate_zero_column_k[OF echelon_form_interchange ib interchange_ik_not_zero nrows_less b_gr_i k2 to_nat_b zero_ik_interchange]) show "is_zero_row_upt_k b k B" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'cols assume j_k: "to_nat j < k" have "B $ b $ j = ?interchange $ b $ j" proof (unfold B_eq, rule bezout_iterate_preserves[OF echelon_form_interchange ib interchange_ik_not_zero nrows_less _ to_nat_from_nat_i k2 zero_ik_interchange]) show "j < from_nat k" by (metis from_nat_mono from_nat_to_nat_id j_k k ncols_def) qed also have "... = 0" by (metis (erased, lifting) b_gr_i echelon_form_interchange echelon_form_upt_k_condition1 is_zero_row_upt_k_def j_k zero_ik_interchange) finally show "B $ b $ j = 0" . qed qed thus False using not_zero_bB by contradiction qed hence a_less_i: "a < from_nat i" using ab by simp have not_zero_aA: "\ is_zero_row_upt_k a k A" proof (rule greatest_ge_nonzero_row'[OF e _ not_all_zero]) show " a \ (GREATEST m. \ is_zero_row_upt_k m k A)" using a_less_i unfolding i2 g_rw by (metis le_Suc not_le) qed have least_eq1:"(LEAST n. B $ a $ n \ 0) = (LEAST n. A $ a $ n \ 0)" proof (rule Least_equality) have "B $ a $ (LEAST n. A $ a $ n \ 0) = ?interchange $ a $ (LEAST n. A $ a $ n \ 0)" proof (unfold B_eq, rule bezout_iterate_preserves[OF echelon_form_interchange ib interchange_ik_not_zero nrows_less _ to_nat_from_nat_i k2 zero_ik_interchange]) obtain j::'cols where j: "to_nat j < k" and Aaj: "A $ a $ j \ 0" using not_zero_aA unfolding is_zero_row_upt_k_def by auto have "(LEAST n. A $ a $ n \ 0) \ j" by (rule Least_le, simp add: Aaj) also have "... < from_nat k" by (metis (full_types) from_nat_mono from_nat_to_nat_id j k ncols_def) finally show "(LEAST n. A $ a $ n \ 0) < from_nat k" . qed also have "... = A $ a $ (LEAST n. A $ a $ n \ 0)" by (metis (no_types, lifting) ab b_le_i interchange_rows_preserves leD not_zero_aA zero_least_kA) also have "... \ 0" by (metis (mono_tags) LeastI is_zero_row_def' is_zero_row_imp_is_zero_row_upt not_zero_aA) finally show "B $ a $ (LEAST n. A $ a $ n \ 0) \ 0" . fix y assume Bay:"B $ a $ y \ 0" show "(LEAST n. A $ a $ n \ 0) \ y" proof (cases "y 0" using Bay by simp thus ?thesis using Least_le by fast next case False obtain j::'cols where j: "to_nat j < k" and Aaj: "A $ a $ j \ 0" using not_zero_aA unfolding is_zero_row_upt_k_def by auto have "(LEAST n. A $ a $ n \ 0) \ j" by (rule Least_le, simp add: Aaj) also have "... < from_nat k" by (metis (full_types) from_nat_mono from_nat_to_nat_id j k ncols_def) also have "...\ y" using False by auto finally show ?thesis by simp qed qed show ?thesis proof (cases "b=from_nat i") case True have zero_bB: "is_zero_row_upt_k b k B" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'cols assume jk:"to_nat j < k" have jk2: "j < from_nat k" by (metis from_nat_mono from_nat_to_nat_id jk k ncols_def) have "B $ b $ j = ?interchange $ b $ j" by (unfold B_eq, rule bezout_iterate_preserves[OF echelon_form_interchange ib interchange_ik_not_zero nrows_less jk2 to_nat_from_nat_i k2 zero_ik_interchange]) also have "... = A $ ?least $ j" unfolding True by auto also have "... = 0" using zero_least_kA jk unfolding is_zero_row_upt_k_def by simp finally show "B $ b $ j = 0" . qed have least_eq2: "(LEAST n. B $ b $ n \ 0) = from_nat k" proof (rule Least_equality) show "B $ b $ from_nat k \ 0" unfolding B_eq True by (rule bezout_iterate_not_zero[OF interchange_ik_not_zero nrows_less to_nat_from_nat_i ib]) show "\y. B $ b $ y \ 0 \ from_nat k \ y" by (metis is_zero_row_upt_k_def le_less_linear to_nat_le zero_bB) qed obtain j::'cols where j: "to_nat j < k" and Abj: "A $ a $ j \ 0" using not_zero_aA unfolding is_zero_row_upt_k_def by auto have "(LEAST n. A $ a $ n \ 0) \ j" by (rule Least_le, simp add: Abj) also have "... < from_nat k" by (metis (full_types) from_nat_mono from_nat_to_nat_id j k ncols_def) finally show ?thesis unfolding least_eq1 least_eq2 . next case False note b_not_i=False hence b_less_i: "b < from_nat i" using b_le_i by simp have not_zero_bA: "\ is_zero_row_upt_k b k A" proof (rule greatest_ge_nonzero_row'[OF e _ not_all_zero]) show " b \ (GREATEST m. \ is_zero_row_upt_k m k A)" using b_less_i unfolding i2 g_rw by (metis le_Suc not_le) qed have least_eq2: "(LEAST n. B $ b $ n \ 0) = (LEAST n. A $ b $ n \ 0)" proof (rule Least_equality) have "B $ b $ (LEAST n. A $ b $ n \ 0) = ?interchange $ b $ (LEAST n. A $ b $ n \ 0)" proof (unfold B_eq, rule bezout_iterate_preserves[OF echelon_form_interchange ib interchange_ik_not_zero nrows_less _ to_nat_from_nat_i k2 zero_ik_interchange]) obtain j::'cols where j: "to_nat j < k" and Abj: "A $ b $ j \ 0" using not_zero_bA unfolding is_zero_row_upt_k_def by auto have "(LEAST n. A $ b $ n \ 0) \ j" by (rule Least_le, simp add: Abj) also have "... < from_nat k" by (metis (full_types) from_nat_mono from_nat_to_nat_id j k ncols_def) finally show "(LEAST n. A $ b $ n \ 0) < from_nat k" . qed also have "... = A $ b $ (LEAST n. A $ b $ n \ 0)" by (metis (mono_tags) b_not_i interchange_rows_preserves not_zero_bA zero_least_kA) also have "... \ 0" by (metis (mono_tags) LeastI is_zero_row_def' is_zero_row_imp_is_zero_row_upt not_zero_bA) finally show "B $ b $ (LEAST n. A $ b $ n \ 0) \ 0" . fix y assume Bby:"B $ b $ y \ 0" show "(LEAST n. A $ b $ n \ 0) \ y" proof (cases "y 0" using Bby by simp thus ?thesis using Least_le by fast next case False obtain j::'cols where j: "to_nat j < k" and Abj: "A $ b $ j \ 0" using not_zero_bA unfolding is_zero_row_upt_k_def by auto have "(LEAST n. A $ b $ n \ 0) \ j" by (rule Least_le, simp add: Abj) also have "... < from_nat k" by (metis (full_types) from_nat_mono from_nat_to_nat_id j k ncols_def) also have "...\ y" using False by auto finally show ?thesis by simp qed qed show ?thesis unfolding least_eq1 least_eq2 by (rule echelon_form_upt_k_condition2[OF e ab not_zero_aA not_zero_bA]) qed qed lemma echelon_echelon_form_column_k: fixes A::"'a::{bezout_domain}^'cols::{mod_type}^'rows::{mod_type}" and k bezout defines i:"i \ (if \m. is_zero_row_upt_k m k A then 0 else to_nat ((GREATEST n. \ is_zero_row_upt_k n k A)) + 1)" defines B: "B \ fst ((echelon_form_of_column_k bezout) (A,i) k)" assumes ib: "is_bezout_ext bezout" and e: "echelon_form_upt_k A k" and k: "k a < b \ is_zero_row_upt_k b (Suc k) B" proof (unfold B echelon_form_of_column_k_def Let_def fst_conv snd_conv, auto) assume 1: "is_zero_row_upt_k a (Suc k) A" and 2: "a < b" and 3: "\m\from_nat i. A $ m $ from_nat k = 0" show "is_zero_row_upt_k b (Suc k) A" by (rule condition1_part1[OF e 1 2 3[unfolded i]]) next assume 1: "is_zero_row_upt_k a (Suc k) A" and 2: "a < b" and 3: "i = nrows A " and 4: "\m>from_nat (nrows A). A $ m $ from_nat k = 0" show "is_zero_row_upt_k b (Suc k) A" by (rule condition1_part2[OF e 1 2 3[unfolded i] 4]) next fix m assume 1: "is_zero_row_upt_k a (Suc k) ?B2" and 2: "a < b" and 3: "\m>from_nat i. A $ m $ from_nat k = 0" and 4: "i \ nrows A" and 5: "from_nat i \ m" and 6: "A $ m $ from_nat k \ 0" show "is_zero_row_upt_k b (Suc k) A" using condition1_part3[OF e ib _ 2 _ _ _ 6] using 1 3 4 5 unfolding i echelon_form_of_column_k_def Let_def fst_conv snd_conv by auto next fix m::'c assume 1: "is_zero_row_upt_k a (Suc k) A" and 2: "i = nrows A" show "is_zero_row_upt_k b (Suc k) A" by (rule condition1_part4[OF e 1 2[unfolded i]]) next let ?B2="(fst (if \m\from_nat i. A $ m $ from_nat k = 0 then (A, i) else if \m>from_nat i. A $ m $ from_nat k = 0 then (A, i + 1) else (bezout_iterate (interchange_rows A (from_nat i) (LEAST n. A $ n $ from_nat k \ 0 \ from_nat i \ n)) (nrows A - 1) (from_nat i) (from_nat k) bezout, i + 1)))" fix m assume 1: "is_zero_row_upt_k a (Suc k) ?B2" and 2: "a < b" and 3: "from_nat i < m" and 4: "A $ m $ from_nat k \ 0" and 5: "i \ nrows A" show "is_zero_row_upt_k b (Suc k) (bezout_iterate (interchange_rows A (from_nat i) (LEAST n. A $ n $ from_nat k \ 0 \ from_nat i \ n)) (nrows A - Suc 0) (from_nat i) (from_nat k) bezout)" using condition1_part5[OF ib e _ 2 _ 4 _ k] using 1 3 5 unfolding i echelon_form_of_column_k_def Let_def fst_conv snd_conv by auto qed next fix a b assume ab: "a < b" and not_zero_aB: "\ is_zero_row_upt_k a (Suc k) B" and not_zero_bB: "\ is_zero_row_upt_k b (Suc k) B" show "(LEAST n. B $ a $ n \ 0) < (LEAST n. B $ b $ n \ 0)" proof (unfold B echelon_form_of_column_k_def Let_def fst_conv snd_conv, auto) assume all_zero: "\m\from_nat i. A $ m $ from_nat k = 0" show "(LEAST n. A $ a $ n \ 0) < (LEAST n. A $ b $ n \ 0)" using condition2_part1[OF e ab] not_zero_aB not_zero_bB all_zero unfolding B i by simp next assume 1: "\m>from_nat (nrows A). A $ m $ from_nat k = 0" and 2: "i = nrows A" show "(LEAST n. A $ a $ n \ 0) < (LEAST n. A $ b $ n \ 0)" using condition2_part2[OF e ab 1] 2 unfolding i by simp next fix ma assume 1: "\m>from_nat i. A $ m $ from_nat k = 0" and 2: "from_nat i \ ma" and 3: "A $ ma $ from_nat k \ 0" show "(LEAST n. A $ a $ n \ 0) < (LEAST n. A $ b $ n \ 0)" using condition2_part3[OF e k ab _ _ _ _ 3] using 1 2 not_zero_aB not_zero_bB unfolding i B by auto next assume "i = nrows A" thus "(LEAST n. A $ a $ n \ 0) < (LEAST n. A $ b $ n \ 0)" using condition2_part4[OF e ab] unfolding i by simp next let ?B2="bezout_iterate (interchange_rows A (from_nat i) (LEAST n. A $ n $ from_nat k \ 0 \ from_nat i \ n)) (nrows A - Suc 0) (from_nat i) (from_nat k) bezout" fix m assume 1: "from_nat i < m" and 2: "A $ m $ from_nat k \ 0" and 3: "i \ nrows A" have B_eq: "B=?B2" unfolding B echelon_form_of_column_k_def Let_def using 1 2 3 by auto show "(LEAST n. ?B2 $ a $ n \ 0) < (LEAST n. ?B2 $ b $ n \ 0)" using condition2_part5[OF ib e k ab _ _ _ 2] 1 3 not_zero_aB not_zero_bB unfolding i[symmetric] B[symmetric] unfolding B_eq by auto qed qed lemma echelon_foldl_condition1: assumes ib: "is_bezout_ext bezout" and "A $ ma $ from_nat (Suc k) \ 0" and k: "km. \ is_zero_row_upt_k m (Suc (Suc k)) (bezout_iterate (interchange_rows A 0 (LEAST n. A $ n $ from_nat (Suc k) \ 0)) (nrows A - Suc 0) 0 (from_nat (Suc k)) bezout)" proof (rule exI[of _ 0], unfold is_zero_row_upt_k_def, auto, rule exI[of _ "from_nat (Suc k)"], rule conjI) show "to_nat (from_nat (Suc k)) < Suc (Suc k)" by (metis from_nat_mono from_nat_to_nat_id less_irrefl not_less_eq to_nat_less_card) show "bezout_iterate (interchange_rows A 0 (LEAST n. A $ n $ from_nat (Suc k) \ 0)) (nrows A - Suc 0) 0 (from_nat (Suc k)) bezout $ 0 $ from_nat (Suc k) \ 0" proof (rule bezout_iterate_not_zero[OF _ _ _ ib]) show "interchange_rows A 0 (LEAST n. A $ n $ from_nat (Suc k) \ 0) $ 0 $ from_nat (Suc k) \ 0" by (metis (mono_tags, lifting) LeastI_ex assms(2) interchange_rows_i) show "nrows A - Suc 0 < nrows (interchange_rows A 0 (LEAST n. A $ n $ from_nat (Suc k) \ 0))" unfolding nrows_def by simp show "to_nat 0 \ nrows A - Suc 0" unfolding to_nat_0 nrows_def by simp qed qed lemma echelon_foldl_condition2: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" assumes n: "\ is_zero_row_upt_k ma k A" and all_zero: "\m\ (GREATEST n. \ is_zero_row_upt_k n k A)+1. A $ m $ from_nat k = 0" shows "(GREATEST n. \ is_zero_row_upt_k n k A) = (GREATEST n. \ is_zero_row_upt_k n (Suc k) A)" proof (rule Greatest_equality[symmetric]) show "\ is_zero_row_upt_k (GREATEST n. \ is_zero_row_upt_k n k A) (Suc k) A" by (metis GreatestI_ex n is_zero_row_upt_k_le) fix y assume y: "\ is_zero_row_upt_k y (Suc k) A" show "y \ (GREATEST n. \ is_zero_row_upt_k n k A)" proof (rule ccontr) assume " \ y \ (GREATEST n. \ is_zero_row_upt_k n k A)" hence y2: "y > (GREATEST n. \ is_zero_row_upt_k n k A)" by simp hence "is_zero_row_upt_k y k A" by (metis row_greater_greatest_is_zero) moreover have "A $ y $ from_nat k = 0" by (metis (no_types, lifting) all_zero le_Suc y2) ultimately have "is_zero_row_upt_k y (Suc k) A" by (rule is_zero_row_upt_k_suc) thus False using y by contradiction qed qed lemma echelon_foldl_condition3: fixes A::"'a::{bezout_domain}^'cols::{mod_type}^'rows::{mod_type}" assumes ib: "is_bezout_ext bezout" and Am0: "A $ m $ from_nat k \ 0" and all_zero: "\m. is_zero_row_upt_k m k A" and e: "echelon_form_upt_k A k" and k: "k < ncols A" shows "to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc k) (bezout_iterate (interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0)) (nrows A - (Suc 0)) 0 (from_nat k) bezout)) = 0" proof (unfold to_nat_eq_0, rule Greatest_equality) let ?interchange="(interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0))" let ?b="(bezout_iterate ?interchange (nrows A - (Suc 0)) 0 (from_nat k) bezout)" have b0k: "?b $ 0 $ from_nat k \ 0" proof (rule bezout_iterate_not_zero[OF _ _ _ ib]) show "interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0) $ 0 $ from_nat k \ 0" by (metis (mono_tags, lifting) LeastI Am0 interchange_rows_i) show "nrows A - (Suc 0) < nrows (interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0))" unfolding nrows_def by simp show "to_nat 0 \ nrows A - (Suc 0)" unfolding to_nat_0 nrows_def by simp qed have least_eq: "(LEAST n. A $ n $ from_nat k \ 0) = (LEAST n. A $ n $ from_nat k \ 0 \ 0 \ n)" by (metis least_mod_type) have all_zero_below: "\a>0. ?b $ a $ from_nat k = 0" proof (auto) fix a::'rows assume a: "0 < a" show "bezout_iterate (interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0)) (nrows A - Suc 0) 0 (from_nat k) bezout $ a $ from_nat k = 0" proof (rule bezout_iterate_zero_column_k[OF _ ib _ _ a]) show "echelon_form_upt_k (interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0)) k" proof(unfold from_nat_0[symmetric] least_eq, rule echelon_form_upt_k_interchange[OF e _ Am0 _ k]) show "is_zero_row_upt_k (from_nat 0) k A" by (metis all_zero) show "from_nat 0 \ m" unfolding from_nat_0 by (metis least_mod_type) qed show "interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0) $ 0 $ from_nat k \ 0" by (metis (mono_tags, lifting) Am0 LeastI interchange_rows_i) show "nrows A - Suc 0 < nrows (interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0))" unfolding nrows_def by simp show "k < ncols (interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0))" using k unfolding ncols_def by simp show "to_nat a \ nrows A - Suc 0" by (metis (erased, hide_lams) One_nat_def Suc_leI Suc_le_D diff_Suc_eq_diff_pred not_le nrows_def to_nat_less_card zero_less_diff) show "is_zero_row_upt_k 0 k (interchange_rows A 0 (LEAST n. A $ n $ from_nat k \ 0))" by (metis all_zero interchange_rows_i is_zero_row_upt_k_def) qed qed show "\ is_zero_row_upt_k 0 (Suc k) ?b" by (metis b0k is_zero_row_upt_k_def k lessI ncols_def to_nat_from_nat_id) fix y assume y: "\ is_zero_row_upt_k y (Suc k) ?b" show "y \ 0" proof (rule ccontr) assume "\ y \ 0" hence y2: "y>0" by simp have "is_zero_row_upt_k y (Suc k) ?b" proof (rule is_zero_row_upt_k_suc) show "is_zero_row_upt_k y k ?b" proof (unfold is_zero_row_upt_k_def, clarify) fix j::'cols assume j: "to_nat j < k" have "?b $ y $ j = ?interchange $ y $ j" proof (rule bezout_iterate_preserves[OF _ ib]) show "echelon_form_upt_k ?interchange k" proof (unfold least_eq from_nat_0[symmetric], rule echelon_form_upt_k_interchange[OF e _ Am0 _ k]) show "is_zero_row_upt_k (from_nat 0) k A" by (metis all_zero) show "from_nat 0 \ m" by (metis from_nat_0 least_mod_type) qed show "?interchange $ 0 $ from_nat k \ 0" by (metis (mono_tags, lifting) Am0 LeastI interchange_rows_i) show "nrows A - Suc 0 < nrows ?interchange" unfolding nrows_def by simp show "j < from_nat k" by (metis (full_types) j from_nat_mono from_nat_to_nat_id k ncols_def) show "to_nat 0 \ nrows A - Suc 0" by (metis le0 to_nat_0) show "k < ncols ?interchange" using k unfolding ncols_def by simp show "is_zero_row_upt_k 0 k ?interchange" by (metis all_zero interchange_rows_i is_zero_row_upt_k_def) qed also have "... = 0" by (metis all_zero dual_order.strict_iff_order interchange_rows_j interchange_rows_preserves is_zero_row_upt_k_def j y2) finally show "?b $ y $ j = 0" . qed show "?b $ y $ from_nat k = 0" using all_zero_below using y2 by auto qed thus False using y by contradiction qed qed lemma echelon_foldl_condition4: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" assumes all_zero: "\m>(GREATEST n. \ is_zero_row_upt_k n k A)+1. A $ m $ from_nat k = 0" and greatest_nrows: "Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n k A)) \ nrows A" and le_mb: "(GREATEST n. \ is_zero_row_upt_k n k A)+1 \ mb" and A_mb_k: "A $ mb $ from_nat k \ 0" shows "Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n k A)) = to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc k) A)" proof - let ?greatest = "(GREATEST n. \ is_zero_row_upt_k n k A)" have mb_eq: "mb=(GREATEST n. \ is_zero_row_upt_k n k A) + 1" by (metis all_zero le_mb A_mb_k le_less ) have "(GREATEST n. \ is_zero_row_upt_k n k A) + 1 = (GREATEST n. \ is_zero_row_upt_k n (Suc k) A)" proof (rule Greatest_equality[symmetric]) show "\ is_zero_row_upt_k (?greatest + 1) (Suc k) A" by (metis (no_types, lifting) A_mb_k is_zero_row_upt_k_def less_Suc_eq less_trans mb_eq not_less_eq to_nat_from_nat_id to_nat_less_card) fix y assume y: "\ is_zero_row_upt_k y (Suc k) A" show "y \ ?greatest + 1" proof (rule ccontr) assume "\ y \ (GREATEST n. \ is_zero_row_upt_k n k A) + 1" hence y_greatest: "y > ?greatest + 1" by simp have "is_zero_row_upt_k y (Suc k) A" proof (rule is_zero_row_upt_k_suc) show "is_zero_row_upt_k y k A" proof (rule row_greater_greatest_is_zero) show "?greatest < y" using y_greatest greatest_nrows unfolding nrows_def by (metis Suc_eq_plus1 dual_order.strict_implies_order le_Suc' suc_not_zero to_nat_plus_one_less_card') qed show "A $ y $ from_nat k = 0" using all_zero y_greatest unfolding from_nat_to_nat_greatest by auto qed thus False using y by contradiction qed qed thus ?thesis by (metis (mono_tags, lifting) Suc_eq_plus1 Suc_lessI add_to_nat_def greatest_nrows nrows_def to_nat_1 to_nat_from_nat_id to_nat_less_card) qed lemma echelon_foldl_condition5: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" assumes mb: "\ is_zero_row_upt_k mb k A" and nrows: "Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n k A)) = nrows A" shows "nrows A = Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc k) A))" by (metis (no_types, lifting) GreatestI Suc_lessI Suc_less_eq mb nrows from_nat_mono from_nat_to_nat_id is_zero_row_upt_k_le not_greater_Greatest nrows_def to_nat_less_card) lemma echelon_foldl_condition6: fixes A::"'a::{bezout_ring}^'cols::{mod_type}^'rows::{mod_type}" assumes ib: "is_bezout_ext bezout" and g_mc: "(GREATEST n. \ is_zero_row_upt_k n k A) + 1 \ mc" and A_mc_k: "A $ mc $ from_nat k \ 0" shows "\m. \ is_zero_row_upt_k m (Suc k) (bezout_iterate (interchange_rows A ((GREATEST n. \ is_zero_row_upt_k n k A) + 1) (LEAST n. A $ n $ from_nat k \ 0 \ (GREATEST n. \ is_zero_row_upt_k n k A) + 1 \ n)) (nrows A - Suc 0) ((GREATEST n. \ is_zero_row_upt_k n k A) + 1) (from_nat k) bezout)" proof - let ?greatest="(GREATEST n. \ is_zero_row_upt_k n k A)" let ?interchange="interchange_rows A (?greatest + 1) (LEAST n. A $ n $ from_nat k \ 0 \ ?greatest + 1 \ n)" let ?B="(bezout_iterate ?interchange (nrows A - Suc 0) (?greatest + 1) (from_nat k) bezout)" have "?B $ (?greatest + 1) $ from_nat k \ 0" proof (rule bezout_iterate_not_zero[OF _ _ _ ib]) show "?interchange $ (?greatest + 1) $ from_nat k \ 0" by (metis (mono_tags, lifting) LeastI_ex g_mc A_mc_k interchange_rows_i) show "nrows A - Suc 0 < nrows ?interchange" unfolding nrows_def by simp show "to_nat (?greatest + 1) \ nrows A - Suc 0" by (metis Suc_pred less_Suc_eq_le nrows_def to_nat_less_card zero_less_card_finite) qed thus ?thesis by (metis (no_types, lifting) from_nat_mono from_nat_to_nat_id is_zero_row_upt_k_def less_irrefl not_less_eq to_nat_less_card) qed lemma echelon_foldl_condition7: fixes A::"'a::{bezout_domain}^'cols::{mod_type}^'rows::{mod_type}" assumes ib: "is_bezout_ext bezout" and e: "echelon_form_upt_k A k" and k: "k is_zero_row_upt_k mb k A" and not_nrows: "Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n k A)) \ nrows A" and g_mc: "(GREATEST n. \ is_zero_row_upt_k n k A) + 1 \ mc" and A_mc_k: "A $ mc $ from_nat k \ 0" shows "Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n k A)) = to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc k) (bezout_iterate (interchange_rows A ((GREATEST n. \ is_zero_row_upt_k n k A) + 1) (LEAST n. A $ n $ from_nat k \ 0 \ (GREATEST n. \ is_zero_row_upt_k n k A) + 1 \ n)) (nrows A - Suc 0) ((GREATEST n. \ is_zero_row_upt_k n k A) + 1) (from_nat k) bezout))" proof - let ?greatest="(GREATEST n. \ is_zero_row_upt_k n k A)" let ?interchange="interchange_rows A (?greatest + 1) (LEAST n. A $ n $ from_nat k \ 0 \ ?greatest + 1 \ n)" let ?B="(bezout_iterate ?interchange (nrows A - Suc 0) (?greatest + 1) (from_nat k) bezout)" have g_rw: "(GREATEST n. \ is_zero_row_upt_k n k A) + 1 = from_nat (to_nat ((GREATEST n. \ is_zero_row_upt_k n k A) + 1))" unfolding from_nat_to_nat_id .. have B_gk:"?B $ (?greatest + 1) $ from_nat k \ 0" proof (rule bezout_iterate_not_zero[OF _ _ _ ib]) show "?interchange $ ((GREATEST n. \ is_zero_row_upt_k n k A) + 1) $ from_nat k \ 0" by (metis (mono_tags, lifting) LeastI_ex g_mc A_mc_k interchange_rows_i) show "nrows A - Suc 0 < nrows (?interchange)" unfolding nrows_def by simp show "to_nat (?greatest + 1) \ nrows A - Suc 0" by (metis Suc_pred less_Suc_eq_le nrows_def to_nat_less_card zero_less_card_finite) qed have "(GREATEST n. \ is_zero_row_upt_k n (Suc k) ?B) = ?greatest + 1" proof (rule Greatest_equality) show "\ is_zero_row_upt_k (?greatest + 1) (Suc k) ?B" by (metis (no_types, lifting) B_gk from_nat_mono from_nat_to_nat_id is_zero_row_upt_k_def less_irrefl not_less_eq to_nat_less_card) fix y assume y: "\ is_zero_row_upt_k y (Suc k) ?B" show "y \ ?greatest + 1" proof (rule ccontr) assume " \ y \ (GREATEST n. \ is_zero_row_upt_k n k A) + 1" hence y_gr: " y > (GREATEST n. \ is_zero_row_upt_k n k A) + 1" by simp hence y_gr2: "y > (GREATEST n. \ is_zero_row_upt_k n k A)" by (metis (erased, lifting) Suc_eq_plus1 leI le_Suc' less_irrefl less_trans not_nrows nrows_def suc_not_zero to_nat_plus_one_less_card') have echelon_interchange: "echelon_form_upt_k ?interchange k" proof (subst (1 2) from_nat_to_nat_id [of "(GREATEST n. \ is_zero_row_upt_k n k A) + 1", symmetric], rule echelon_form_upt_k_interchange[OF e _ A_mc_k _ k]) show "is_zero_row_upt_k (from_nat (to_nat ((GREATEST n. \ is_zero_row_upt_k n k A) + 1))) k A" by (metis Suc_eq_plus1 Suc_le' g_rw not_nrows nrows_def row_greater_greatest_is_zero suc_not_zero) show "from_nat (to_nat ((GREATEST n. \ is_zero_row_upt_k n k A) + 1)) \ mc" by (metis g_mc g_rw) qed have i: "?interchange $ (?greatest + 1) $ from_nat k \ 0" by (metis (mono_tags, lifting) A_mc_k LeastI_ex g_mc interchange_rows_i) have zero_greatest: "is_zero_row_upt_k (?greatest + 1) k A" by (metis Suc_eq_plus1 Suc_le' not_nrows nrows_def row_greater_greatest_is_zero suc_not_zero) { fix j::'cols assume "to_nat j < k" have "?greatest < ?greatest + 1" by (metis greatest_less_zero_row e mb zero_greatest) also have "... \(LEAST n. A $ n $ from_nat k \ 0 \ (?greatest + 1) \ n)" by (metis (mono_tags, lifting) A_mc_k LeastI_ex g_mc) finally have least_less: "?greatest < (LEAST n. A $ n $ from_nat k \ 0 \ (?greatest + 1) \ n)" . have "is_zero_row_upt_k (LEAST n. A $ n $ from_nat k \ 0 \ (?greatest + 1) \ n) k A" by (rule row_greater_greatest_is_zero[OF least_less]) } hence zero_g1: "is_zero_row_upt_k (?greatest + 1) k ?interchange" unfolding is_zero_row_upt_k_def by auto hence zero_y: "is_zero_row_upt_k y k ?interchange" by (metis (erased, lifting) echelon_form_upt_k_condition1' echelon_interchange y_gr) have "is_zero_row_upt_k y (Suc k) ?B" proof (rule is_zero_row_upt_k_suc) show "?B $ y $ from_nat k = 0" proof (rule bezout_iterate_zero_column_k[OF echelon_interchange ib i _ y_gr _ _ zero_g1]) show "nrows A - Suc 0 < nrows ?interchange" unfolding nrows_def by simp show "k < ncols ?interchange" using k unfolding ncols_def by simp show "to_nat y \ nrows A - Suc 0" by (metis One_nat_def Suc_eq_plus1 Suc_leI nrows_def le_diff_conv2 to_nat_less_card zero_less_card_finite) qed show "is_zero_row_upt_k y k ?B" proof (subst is_zero_row_upt_k_def, clarify) fix j::'cols assume j: "to_nat j < k" have "?B $ y $ j = ?interchange $ y $ j" proof (rule bezout_iterate_preserves[OF echelon_interchange ib i _ _ _ _ zero_g1]) show "nrows A - Suc 0 < nrows ?interchange" unfolding nrows_def by simp show "j < from_nat k" using j by (metis (poly_guards_query) from_nat_mono from_nat_to_nat_id k ncols_def) show "to_nat ((GREATEST n. \ is_zero_row_upt_k n k A) + 1) \ nrows A - Suc 0" by (metis Suc_pred less_Suc_eq_le nrows_def to_nat_less_card zero_less_card_finite) show "k < ncols ?interchange" using k unfolding ncols_def . qed also have "... = 0" using zero_y unfolding is_zero_row_upt_k_def using j by simp finally show "?B $ y $ j = 0" . qed qed thus False using y by contradiction qed qed thus ?thesis by (metis (erased, lifting) Suc_eq_plus1 add_to_nat_def not_nrows nrows_def suc_not_zero to_nat_1 to_nat_from_nat_id to_nat_plus_one_less_card') qed lemma fixes A::"'a::{bezout_domain}^'cols::{mod_type}^'rows::{mod_type}" assumes k: "km. is_zero_row_upt_k m (Suc k) (fst (foldl (echelon_form_of_column_k bezout) (A, 0) [0.. is_zero_row_upt_k n (Suc k) (fst (foldl (echelon_form_of_column_k bezout) (A, 0) [0..m. is_zero_row_upt_k m 0 A then 0 else to_nat (GREATEST n. \ is_zero_row_upt_k n 0 A) + 1) = 0" unfolding is_zero_row_upt_k_def by auto show "echelon_form_upt_k (echelon_form_of_upt_k A 0 bezout) (Suc 0)" unfolding echelon_form_of_upt_k_def by (auto, subst i_rw[symmetric], rule echelon_echelon_form_column_k[OF ib echelon_form_upt_k_0], simp add: ncols_def) have rw_upt: "[0..m. is_zero_row_upt_k m (Suc 0) (fst (foldl (echelon_form_of_column_k bezout) (A, 0) [0.. is_zero_row_upt_k n (Suc 0) (fst (foldl (echelon_form_of_column_k bezout) (A, 0) [0.. 0" and all_zero: "\m. bezout_iterate ?interchange (nrows A - Suc 0) 0 0 bezout $ m $ 0 = 0" have "bezout_iterate ?interchange (nrows A - Suc 0) 0 0 bezout $ 0 $ 0 = bezout_iterate ?interchange (nrows A - Suc 0) 0 (from_nat 0) bezout $ 0 $ from_nat 0" using from_nat_0 by metis also have "... \ 0" proof (rule bezout_iterate_not_zero[OF _ _ _ ib], simp_all add: nrows_def) show "A $ (LEAST n. A $ n $ 0 \ 0) $ from_nat 0 \ 0" by (metis (mono_tags) LeastI \A $ m $ 0 \ 0\ from_nat_0) show "to_nat 0 \ CARD('rows) - Suc 0" by (metis le0 to_nat_0) qed finally have " bezout_iterate ?interchange (nrows A - Suc 0) 0 0 bezout $ 0 $ 0 \ 0" . thus "A $ mb $ 0 = 0" using all_zero by auto next fix m assume Am0: "A $ m $ 0 \ 0" and all_zero: "\m>0. A $ m $ 0 = 0" thus "(GREATEST n. A $ n $ 0 \ 0) = 0" by (metis (mono_tags, lifting) GreatestI neq_iff not_less0 to_nat_0 to_nat_mono) next fix m ma mb assume "A $ m $ 0 \ 0" and "bezout_iterate (interchange_rows A 0 (LEAST n. A $ n $ 0 \ 0)) (nrows A - Suc 0) 0 0 bezout $ ma $ 0 \ 0" have "bezout_iterate ?interchange (nrows A - Suc 0) 0 0 bezout $ 0 $ 0 = bezout_iterate ?interchange (nrows A - Suc 0) 0 (from_nat 0) bezout $ 0 $ from_nat 0" using from_nat_0 by metis also have "... \ 0" proof (rule bezout_iterate_not_zero[OF _ _ _ ib], simp_all add: nrows_def) show "A $ (LEAST n. A $ n $ 0 \ 0) $ from_nat 0 \ 0" by (metis (mono_tags) LeastI \A $ m $ 0 \ 0\ from_nat_0) show "to_nat 0 \ CARD('rows) - Suc 0" by (metis le0 to_nat_0) qed finally have 1: "bezout_iterate ?interchange (nrows A - Suc 0) 0 0 bezout $ 0 $ 0 \ 0" . have 2: "\m>0. bezout_iterate ?interchange (nrows A - Suc 0) 0 0 bezout $ m $ 0 = 0" proof (auto) fix b::'rows assume b: "0 0" by (metis (mono_tags, lifting) LeastI_ex \A $ m $ 0 \ 0\ from_nat_0 interchange_rows_i) show "nrows A - Suc 0 < nrows (?interchange)" unfolding nrows_def by simp show "0 < b" using b . show "0 < ncols (?interchange)" unfolding ncols_def by auto show "to_nat b \ nrows A - Suc 0" by (metis Suc_eq_plus1 discrete less_one add.left_neutral not_le nrows_def nrows_not_0 le_diff_conv2 to_nat_less_card) show "is_zero_row_upt_k 0 0 (?interchange)" by (metis is_zero_row_utp_0) qed finally show "bezout_iterate (interchange_rows A 0 (LEAST n. A $ n $ 0 \ 0)) (nrows A - Suc 0) 0 0 bezout $ b $ 0 = 0" . qed show "(GREATEST n. bezout_iterate (interchange_rows A 0 (LEAST n. A $ n $ 0 \ 0)) (nrows A - Suc 0) 0 0 bezout $ n $ 0 \ 0) = 0" apply (rule Greatest_equality, simp add: 1) using 2 by force qed next fix k let ?fold="(foldl (echelon_form_of_column_k bezout)(A, 0) [0.. echelon_form_upt_k (echelon_form_of_upt_k A k bezout) (Suc k))" and "(k < ncols A \ foldl (echelon_form_of_column_k bezout) (A, 0) [0..m. is_zero_row_upt_k m (Suc k) (fst ?fold2) then 0 else to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc k) (fst ?fold2)) + 1))" and Suc_k: "Suc k < ncols A" hence hyp_foldl: "foldl (echelon_form_of_column_k bezout) (A, 0) [0..m. is_zero_row_upt_k m (Suc k) (fst ?fold2) then 0 else to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc k) (fst ?fold2)) + 1)" and hyp_echelon: "echelon_form_upt_k (echelon_form_of_upt_k A k bezout) (Suc k)" by auto have rw: "[0..m. is_zero_row_upt_k m (Suc k) (echelon_form_of_upt_k A k bezout) then 0 else to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc k) (echelon_form_of_upt_k A k bezout)) + 1)" unfolding echelon_form_of_upt_k_def using hyp_foldl by fast show "echelon_form_upt_k (echelon_form_of_upt_k A (Suc k) bezout) (Suc (Suc k))" unfolding echelon_form_of_upt_k_def unfolding rw unfolding foldl_append unfolding foldl.simps unfolding rw2 proof (rule echelon_echelon_form_column_k[OF ib hyp_echelon]) show "Suc k < ncols (echelon_form_of_upt_k A k bezout)" using Suc_k unfolding ncols_def . qed show "foldl (echelon_form_of_column_k bezout) (A, 0) [0..m. is_zero_row_upt_k m (Suc (Suc k)) (fst ?fold) then 0 else to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) (fst ?fold)) + 1)" proof (rule prod_eqI, metis fst_conv) define A' where "A' = fst ?fold2" let ?greatest="(GREATEST n. \ is_zero_row_upt_k n (Suc k) A')" have k: "k < ncols A'" using Suc_k unfolding ncols_def by auto have k2: "Suc k < ncols A'" using Suc_k unfolding ncols_def by auto have fst_snd_foldl: "snd ?fold2 = snd (fst ?fold2, if \m. is_zero_row_upt_k m (Suc k) (fst ?fold2) then 0 else to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc k) (fst ?fold2)) + 1)" using hyp_foldl by simp have ncols_eq: "ncols A = ncols A'" unfolding A'_def ncols_def .. have rref_A': "echelon_form_upt_k A' (Suc k)" using hyp_echelon unfolding A'_def echelon_form_of_upt_k_def . show "snd ?fold = snd (fst ?fold, if \m. is_zero_row_upt_k m (Suc (Suc k)) (fst ?fold) then 0 else to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) (fst ?fold)) + 1)" using [[unfold_abs_def = false]] unfolding fst_conv snd_conv unfolding rw unfolding foldl_append unfolding foldl.simps unfolding echelon_form_of_column_k_def Let_def split_beta fst_snd_foldl unfolding A'_def[symmetric] proof (auto simp add: least_mod_type from_nat_0 from_nat_to_nat_greatest) fix m assume "A' $ m $ from_nat (Suc k) \ 0" thus "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" and "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" and "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" and "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" and "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" and "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" and "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" and "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" unfolding is_zero_row_upt_k_def by (metis add_to_nat_def from_nat_mono less_irrefl monoid_add_class.add.right_neutral not_less_eq to_nat_0 to_nat_less_card)+ next fix m assume "\ is_zero_row_upt_k m (Suc k) A'" thus "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" and "\m. \ is_zero_row_upt_k m (Suc (Suc k)) A'" by (metis is_zero_row_upt_k_le)+ next fix m assume "\ma. is_zero_row_upt_k ma (Suc k) A'" and "\mb. A' $ mb $ from_nat (Suc k) = 0" thus "is_zero_row_upt_k m (Suc (Suc k)) A'" by (metis is_zero_row_upt_k_suc) next fix ma assume "\m>0. A' $ m $ from_nat (Suc k) = 0" and "\m. is_zero_row_upt_k m (Suc k) A'" and "\ is_zero_row_upt_k ma (Suc (Suc k)) A'" thus "to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A') = 0" and "to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A') = 0" by (metis (erased, lifting) GreatestI_ex le_less is_zero_row_upt_k_suc least_mod_type to_nat_0)+ next fix m assume "\m>0. A' $ m $ from_nat (Suc k) = 0" and "\ is_zero_row_upt_k m (Suc k) A'" and "\m\?greatest+1. A' $ m $ from_nat (Suc k) = 0" thus "?greatest = (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A')" by (metis (mono_tags, lifting) echelon_form_upt_k_condition1 from_nat_0 is_zero_row_upt_k_le is_zero_row_upt_k_suc less_nat_zero_code neq_iff rref_A' to_nat_le) next fix m ma assume "\m>?greatest+1. A' $ m $ from_nat (Suc k) = 0" and "\m>0. A' $ m $ from_nat (Suc k) = 0" and"Suc (to_nat ?greatest) \ nrows A'" and "?greatest + 1 \ ma" and "A' $ ma $ from_nat (Suc k) \ 0" thus "Suc (to_nat ?greatest) = to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A')" by (metis (mono_tags) Suc_eq_plus1 less_linear leD least_mod_type nrows_def suc_not_zero) next fix m ma assume "\m>?greatest + 1. A' $ m $ from_nat (Suc k) = 0" and "\m>0. A' $ m $ from_nat (Suc k) = 0" and "\ is_zero_row_upt_k m (Suc k) A'" and "Suc (to_nat ?greatest) = nrows A'" and "\ is_zero_row_upt_k ma (Suc (Suc k)) A'" thus "nrows A' = Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A'))" by (metis echelon_foldl_condition5) next fix ma assume 1: "A' $ ma $ from_nat (Suc k) \ 0" show "\m. \ is_zero_row_upt_k m (Suc (Suc k)) (bezout_iterate (interchange_rows A' 0 (LEAST n. A' $ n $ from_nat (Suc k) \ 0)) (nrows A' - Suc 0) 0 (from_nat (Suc k)) bezout)" and "\m. \ is_zero_row_upt_k m (Suc (Suc k)) (bezout_iterate (interchange_rows A' 0 (LEAST n. A' $ n $ from_nat (Suc k) \ 0)) (nrows A' - Suc 0) 0 (from_nat (Suc k)) bezout)" by (rule echelon_foldl_condition1[OF ib 1 k])+ next fix m ma mb assume 1: "\ is_zero_row_upt_k ma (Suc k) A'" and 2:"\m\?greatest + 1. A' $ m $ from_nat (Suc k) = 0" show "?greatest = (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A')" by (rule echelon_foldl_condition2[OF 1 2]) next fix m assume 1: "A' $ m $ from_nat (Suc k) \ 0" and 2: "\m. is_zero_row_upt_k m (Suc k) A'" show "to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) (bezout_iterate (interchange_rows A' 0 (LEAST n. A' $ n $ from_nat (Suc k) \ 0)) (nrows A' - Suc 0) 0 (from_nat (Suc k)) bezout)) = 0" and "to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) (bezout_iterate (interchange_rows A' 0 (LEAST n. A' $ n $ from_nat (Suc k) \ 0)) (nrows A' - Suc 0) 0 (from_nat (Suc k)) bezout)) = 0" by (rule echelon_foldl_condition3[OF ib 1 2 rref_A'], metis ncols_def Suc_k)+ next fix m assume "\m>?greatest + 1. A' $ m $ from_nat (Suc k) = 0" and "0 < m" and "A' $ m $ from_nat (Suc k) \ 0" and "Suc (to_nat ?greatest) = nrows A'" thus "nrows A' = Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A'))" by (metis (mono_tags) Suc_eq_plus1 Suc_le' from_nat_suc from_nat_to_nat_id not_less_eq nrows_def to_nat_less_card to_nat_mono) next fix mb assume 1: "\m>?greatest+1. A' $ m $ from_nat (Suc k) = 0" and 2: "Suc (to_nat ?greatest) \ nrows A'" and 3: "?greatest+1 \ mb" and 4: "A' $ mb $ from_nat (Suc k) \ 0" show "Suc (to_nat ?greatest) = to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A')" by (rule echelon_foldl_condition4[OF 1 2 3 4]) next fix m assume "?greatest + 1 < m" and "A' $ m $ from_nat (Suc k) \ 0" and "\m>0. A' $ m $ from_nat (Suc k) = 0 " thus "nrows A' = Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A'))" by (metis le_less_trans least_mod_type) next fix m assume "?greatest + 1 < m" and "A' $ m $ from_nat (Suc k) \ 0" and "\m>0. A' $ m $ from_nat (Suc k) = 0" thus "\m. \ is_zero_row_upt_k m (Suc (Suc k)) (bezout_iterate (interchange_rows A' (?greatest + 1) (LEAST n. A' $ n $ from_nat (Suc k) \ 0 \ ?greatest + 1 \ n)) (nrows A' - Suc 0) (?greatest + 1) (from_nat (Suc k)) bezout)" by (metis le_less_trans least_mod_type) next fix mb assume "\ is_zero_row_upt_k mb (Suc k) A'" and "Suc (to_nat ?greatest) = nrows A'" thus " nrows A' = Suc (to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) A'))" by (rule echelon_foldl_condition5) next fix m assume "(GREATEST n. \ is_zero_row_upt_k n (Suc k) A') + 1 < m" and "A' $ m $ from_nat (Suc k) \ 0" and "\m>0. A' $ m $ from_nat (Suc k) = 0" thus "Suc (to_nat ?greatest) = to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) (bezout_iterate (interchange_rows A' (?greatest + 1) (LEAST n. A' $ n $ from_nat (Suc k) \ 0 \ ?greatest + 1 \ n)) (nrows A' - Suc 0) (?greatest + 1) (from_nat (Suc k)) bezout))" by (metis le_less_trans least_mod_type) next fix mc assume "?greatest + 1 \ mc" and "A' $ mc $ from_nat (Suc k) \ 0" thus "\m. \ is_zero_row_upt_k m (Suc (Suc k)) (bezout_iterate (interchange_rows A' (?greatest + 1) (LEAST n. A' $ n $ from_nat (Suc k) \ 0 \ ?greatest + 1 \ n)) (nrows A' - Suc 0) (?greatest + 1) (from_nat (Suc k)) bezout)" using echelon_foldl_condition6[OF ib] by blast next fix mb mc assume 1: "\ is_zero_row_upt_k mb (Suc k) A'" and 2: "Suc (to_nat ?greatest) \ nrows A'" and 3: "?greatest + 1 \ mc" and 4:"A' $ mc $ from_nat (Suc k) \ 0" show " Suc (to_nat ?greatest) = to_nat (GREATEST n. \ is_zero_row_upt_k n (Suc (Suc k)) (bezout_iterate (interchange_rows A' (?greatest + 1) (LEAST n. A' $ n $ from_nat (Suc k) \ 0 \ ?greatest + 1 \ n)) (nrows A' - Suc 0) (?greatest + 1) (from_nat (Suc k)) bezout))" by (rule echelon_foldl_condition7[OF ib rref_A' k2 1 2 3 4 ]) qed qed qed subsubsection\Proving the existence of invertible matrices which do the transformations\ lemma bezout_iterate_invertible: fixes A::"'a::{bezout_domain}^'cols^'rows::{mod_type}" assumes ib: "is_bezout_ext bezout" assumes "nn" and "A $ i $ j \ 0" shows "\P. invertible P \ P**A = bezout_iterate A n i j bezout" using assms proof (induct n arbitrary: A) case 0 show ?case unfolding bezout_iterate.simps by (simp add: exI[of _ "mat 1"] matrix_mul_lid invertible_def) next case (Suc n) show ?case proof (cases "Suc n = to_nat i") case True show ?thesis unfolding bezout_iterate.simps using True Suc.prems(1) by (simp add: exI[of _ "mat 1"] matrix_mul_lid invertible_def) next case False have i_le_n: "to_nat i < Suc n" using Suc.prems(3) False by auto let ?B="(bezout_matrix A i (from_nat (Suc n)) j bezout ** A)" have b: "bezout_iterate A (Suc n) i j bezout = bezout_iterate ?B n i j bezout" unfolding bezout_iterate.simps using i_le_n by auto have "\P. invertible P \ P**?B = bezout_iterate ?B n i j bezout" proof (rule Suc.hyps[OF ib _]) show "n < nrows ?B" using Suc.prems (2) unfolding nrows_def by simp show "to_nat i \ n" using i_le_n by auto show "?B $ i $ j \ 0" by (metis False Suc.prems(2) Suc.prems(4) bezout_matrix_not_zero ib nrows_def to_nat_from_nat_id) qed from this obtain P where inv_P: "invertible P" and P: "P**?B = bezout_iterate ?B n i j bezout" by blast show ?thesis proof (rule exI[of _ "P ** bezout_matrix A i (from_nat (Suc n)) j bezout"], rule conjI, rule invertible_mult) show "P ** bezout_matrix A i (from_nat (Suc n)) j bezout ** A = bezout_iterate A (Suc n) i j bezout" using P unfolding b by (metis matrix_mul_assoc) have "det (bezout_matrix A i (from_nat (Suc n)) j bezout) = 1" proof (rule det_bezout_matrix[OF ib]) show "i < from_nat (Suc n)" using i_le_n from_nat_mono[of "to_nat i" "Suc n"] Suc.prems(2) unfolding nrows_def by (metis from_nat_to_nat_id) show "A $ i $ j \ 0" by (rule Suc.prems(4)) qed thus "invertible (bezout_matrix A i (mod_type_class.from_nat (Suc n)) j bezout)" unfolding invertible_iff_is_unit by simp show "invertible P" using inv_P . qed qed qed lemma echelon_form_of_column_k_invertible: fixes A::"'a::{bezout_domain}^'cols::{mod_type}^'rows::{mod_type}" assumes ib: "is_bezout_ext bezout" shows "\P. invertible P \ P ** A = fst ((echelon_form_of_column_k bezout) (A,i) k)" proof - have "\P. invertible P \ P ** A = A" by (simp add: exI[of _ "mat 1"] matrix_mul_lid invertible_def) thus ?thesis proof (unfold echelon_form_of_column_k_def Let_def, auto) fix P m ma let ?least = "(LEAST n. A $ n $ from_nat k \ 0 \ from_nat i \ n)" let ?interchange ="(interchange_rows A (from_nat i) ?least)" assume i: "i \ nrows A" and i2: "mod_type_class.from_nat i \ ma" and ma: "A $ ma $ mod_type_class.from_nat k \ 0" have "\P. invertible P \ P ** ?interchange = bezout_iterate ?interchange (nrows A - Suc 0) (from_nat i) (from_nat k) bezout" proof (rule bezout_iterate_invertible[OF ib]) show "nrows A - Suc 0 < nrows ?interchange" unfolding nrows_def by simp show "to_nat (from_nat i::'rows) \ nrows A - Suc 0" by (metis Suc_leI Suc_le_mono Suc_pred nrows_def to_nat_less_card zero_less_card_finite) show "?interchange $ from_nat i $ from_nat k \ 0" by (metis (mono_tags, lifting) LeastI_ex i2 ma interchange_rows_i) qed from this obtain P where inv_P: "invertible P" and P: "P ** ?interchange = bezout_iterate ?interchange (nrows A - Suc 0) (from_nat i) (from_nat k) bezout" by blast show "\P. invertible P \ P ** A = bezout_iterate ?interchange (nrows A - Suc 0) (from_nat i) (from_nat k) bezout" proof (rule exI[of _ "P ** interchange_rows (mat 1) (from_nat i) ?least"], rule conjI, rule invertible_mult) show "P ** interchange_rows (mat 1) (from_nat i) ?least ** A = bezout_iterate ?interchange (nrows A - Suc 0) (from_nat i) (from_nat k) bezout" using P by (metis (no_types, lifting) interchange_rows_mat_1 matrix_mul_assoc) show "invertible P" by (rule inv_P) show "invertible (interchange_rows (mat 1) (from_nat i) ?least)" by (simp add: invertible_interchange_rows) qed qed qed lemma echelon_form_of_upt_k_invertible: fixes A::"'a::{bezout_domain}^'cols::{mod_type}^'rows::{mod_type}" assumes ib: "is_bezout_ext bezout" shows "\P. invertible P \ P**A = (echelon_form_of_upt_k A k bezout)" proof (induct k) case 0 show ?case unfolding echelon_form_of_upt_k_def by (simp add: echelon_form_of_column_k_invertible[OF ib]) next case (Suc k) have set_rw: "[0..Final results\ lemma echelon_form_echelon_form_of: fixes A::"'a::{bezout_domain}^'cols::{mod_type}^'rows::{mod_type}" assumes ib: "is_bezout_ext bezout" shows "echelon_form (echelon_form_of A bezout)" proof - have n: "ncols A - 1 < ncols A" unfolding ncols_def by auto show ?thesis unfolding echelon_form_def echelon_form_of_def using echelon_echelon_form_of_upt_k[OF n ib] unfolding ncols_def by simp qed lemma echelon_form_of_invertible: fixes A::"'a::{bezout_domain}^'cols::{mod_type}^'rows::{mod_type}" assumes ib: "is_bezout_ext (bezout)" shows "\P. invertible P \ P ** A = (echelon_form_of A bezout) \ echelon_form (echelon_form_of A bezout)" using echelon_form_of_upt_k_invertible[OF ib] echelon_form_echelon_form_of[OF ib] unfolding echelon_form_of_def by fast text\Executable version\ corollary echelon_form_echelon_form_of_euclidean: fixes A::"'a::{euclidean_ring_gcd}^'cols::{mod_type}^'rows::{mod_type}" shows "echelon_form (echelon_form_of_euclidean A)" using echelon_form_echelon_form_of is_bezout_ext_euclid_ext2 unfolding echelon_form_of_euclidean_def by auto corollary echelon_form_of_euclidean_invertible: fixes A::"'a::{euclidean_ring_gcd}^'cols::{mod_type}^'rows::{mod_type}" shows "\P. invertible P \ P**A = (echelon_form_of A euclid_ext2) \ echelon_form (echelon_form_of A euclid_ext2)" using echelon_form_of_invertible[OF is_bezout_ext_euclid_ext2] . subsection\More efficient code equations\ definition "echelon_form_of_column_k_efficient bezout A' k = (let (A, i) = A'; from_nat_k = from_nat k; from_nat_i = from_nat i; all_zero_below_i = (\m>from_nat_i. A $ m $ from_nat_k = 0) in if (i = nrows A) \ (A $ from_nat_i $ from_nat_k = 0) \ all_zero_below_i then (A, i) else if all_zero_below_i then (A, i + 1) else let n = (LEAST n. A $ n $ from_nat_k \ 0 \ from_nat_i \ n); interchange_A = interchange_rows A (from_nat_i) n in (bezout_iterate (interchange_A) (nrows A - 1) (from_nat_i) (from_nat_k) bezout, i + 1))" lemma echelon_form_of_column_k_efficient[code]: "(echelon_form_of_column_k bezout) (A,i) k = (echelon_form_of_column_k_efficient bezout) (A,i) k" unfolding echelon_form_of_column_k_def echelon_form_of_column_k_efficient_def unfolding Let_def by force end (*********** Possible future work: ***********) (* - Other kind of Echelon Forms (minimal Echelon, Howell...) and more applications: ranks, system of equations... - Hermite Normal Form and its uniqueness. HNF is unique over PID (not in PIR) with respect to a given complete set of associates and a given complete set of residues. In general terms, with the integers we use the positive integers, in F[x] we use monic polynomials... - Smith Normal Form and its uniqueness. *) diff --git a/thys/Flyspeck-Tame/FaceDivisionProps.thy b/thys/Flyspeck-Tame/FaceDivisionProps.thy --- a/thys/Flyspeck-Tame/FaceDivisionProps.thy +++ b/thys/Flyspeck-Tame/FaceDivisionProps.thy @@ -1,4954 +1,4955 @@ (* Author: Gertrud Bauer, Tobias Nipkow *) section\Properties of Face Division\ theory FaceDivisionProps imports Plane EnumeratorProps begin subsection\Finality\ (********************* makeFaceFinal ****************************) lemma vertices_makeFaceFinal: "vertices(makeFaceFinal f g) = vertices g" by(induct g)(simp add:vertices_graph_def makeFaceFinal_def) lemma edges_makeFaceFinal: "\ (makeFaceFinal f g) = \ g" proof - { fix fs have "(\\<^bsub>f\set (makeFaceFinalFaceList f fs)\<^esub> edges f) = (\\<^bsub>f\ set fs\<^esub> edges f)" apply(unfold makeFaceFinalFaceList_def) apply(induct f) by(induct fs) simp_all } thus ?thesis by(simp add:edges_graph_def makeFaceFinal_def) qed lemma in_set_repl_setFin: "f \ set fs \ final f \ f \ set (replace f' [setFinal f'] fs)" by (induct fs) auto lemma in_set_repl: "f \ set fs \ f \ f' \ f \ set (replace f' fs' fs)" by (induct fs) auto lemma makeFaceFinals_preserve_finals: "f \ set (finals g) \ f \ set (finals (makeFaceFinal f' g))" by (induct g) (simp add:makeFaceFinal_def finals_def makeFaceFinalFaceList_def in_set_repl_setFin) lemma len_faces_makeFaceFinal[simp]: "|faces (makeFaceFinal f g)| = |faces g|" by(simp add:makeFaceFinal_def makeFaceFinalFaceList_def) lemma len_finals_makeFaceFinal: "f \ \ g \ \ final f \ |finals (makeFaceFinal f g)| = |finals g| + 1" by(simp add:makeFaceFinal_def finals_def makeFaceFinalFaceList_def length_filter_replace1) lemma len_nonFinals_makeFaceFinal: "\ \ final f; f \ \ g\ \ |nonFinals (makeFaceFinal f g)| = |nonFinals g| - 1" by(simp add:makeFaceFinal_def nonFinals_def makeFaceFinalFaceList_def length_filter_replace2) lemma set_finals_makeFaceFinal[simp]: "distinct(faces g) \ f \ \ g \ set(finals (makeFaceFinal f g)) = insert (setFinal f) (set(finals g))" by(auto simp:finals_def makeFaceFinal_def makeFaceFinalFaceList_def distinct_set_replace) lemma splitFace_preserve_final: "f \ set (finals g) \ \ final f' \ f \ set (finals (snd (snd (splitFace g i j f' ns))))" by (induct g) (auto simp add: splitFace_def finals_def split_def intro: in_set_repl) lemma splitFace_nonFinal_face: "\ final (fst (snd (splitFace g i j f' ns)))" by (simp add: splitFace_def split_def split_face_def) lemma subdivFace'_preserve_finals: "\n i f' g. f \ set (finals g) \ \ final f' \ f \ set (finals (subdivFace' g f' i n is))" proof (induct "is") case Nil then show ?case by(simp add:makeFaceFinals_preserve_finals) next case (Cons j "js") then show ?case proof (cases j) case None with Cons show ?thesis by simp next case (Some sj) with Cons show ?thesis by (auto simp: splitFace_preserve_final splitFace_nonFinal_face split_def) qed qed lemma subdivFace_pres_finals: "f \ set (finals g) \ \ final f' \ f \ set (finals (subdivFace g f' is))" by(simp add:subdivFace_def subdivFace'_preserve_finals) declare Nat.diff_is_0_eq' [simp del] (********************* section is_prefix ****************************) subsection \\is_prefix\\ definition is_prefix :: "'a list \ 'a list \ bool" where "is_prefix ls vs \ (\ bs. vs = ls @ bs)" lemma is_prefix_add: "is_prefix ls vs \ is_prefix (as @ ls) (as @ vs)" by (simp add: is_prefix_def) lemma is_prefix_hd[simp]: "is_prefix [l] vs = (l = hd vs \ vs \ [])" apply (rule iffI) apply (auto simp: is_prefix_def) apply (intro exI) apply (subgoal_tac "vs = hd vs # tl vs") apply assumption by auto lemma is_prefix_f[simp]: "is_prefix (a#as) (a#vs) = is_prefix as vs" by (auto simp: is_prefix_def) lemma splitAt_is_prefix: "ram \ set vs \ is_prefix (fst (splitAt ram vs) @ [ram]) vs" by (auto dest!: splitAt_ram simp: is_prefix_def) (******************** section is_sublist *********************************) subsection \\is_sublist\\ definition is_sublist :: "'a list \ 'a list \ bool" where "is_sublist ls vs \ (\ as bs. vs = as @ ls @ bs)" lemma is_prefix_sublist: "is_prefix ls vs \ is_sublist ls vs" by (auto simp: is_prefix_def is_sublist_def) lemma is_sublist_trans: "is_sublist as bs \ is_sublist bs cs \ is_sublist as cs" apply (simp add: is_sublist_def) apply (elim exE) apply (subgoal_tac "cs = (asaa @ asa) @ as @ (bsa @ bsaa)") apply (intro exI) apply assumption by force lemma is_sublist_add: "is_sublist as bs \ is_sublist as (xs @ bs @ ys)" apply (simp add: is_sublist_def) apply (elim exE) apply (subgoal_tac "xs @ bs @ ys = (xs @ asa) @ as @ (bsa @ ys)") apply (intro exI) apply assumption by auto lemma is_sublist_rec: "is_sublist xs ys = (if length xs > length ys then False else if xs = take (length xs) ys then True else is_sublist xs (tl ys))" proof (simp add:is_sublist_def, goal_cases) case 1 show ?case proof (standard, goal_cases) case 1 show ?case proof (standard, goal_cases) case xs: 1 show ?case proof (standard, goal_cases) case 1 show ?case by auto next case 2 show ?case proof (standard, goal_cases) case 1 have "ys = take |xs| ys @ drop |xs| ys" by simp also have "\ = [] @ xs @ drop |xs| ys" by(simp add:xs[symmetric]) finally show ?case by blast qed qed qed next case 2 show ?case proof (standard, goal_cases) case xs_neq: 1 show ?case proof (standard, goal_cases) case 1 show ?case by auto next case 2 show ?case proof (standard, goal_cases) case not_less: 1 show ?case proof (standard, goal_cases) case 1 then obtain as bs where ys: "ys = as @ xs @ bs" by blast have "as \ []" using xs_neq ys by auto then obtain a as' where "as = a # as'" by (simp add:neq_Nil_conv) blast hence "tl ys = as' @ xs @ bs" by(simp add:ys) thus ?case by blast next case 2 then obtain as bs where ys: "tl ys = as @ xs @ bs" by blast have "ys \ []" using xs_neq not_less by auto then obtain y ys' where "ys = y # ys'" by (simp add:neq_Nil_conv) blast hence "ys = (y#as) @ xs @ bs" using ys by simp thus ?case by blast qed qed qed qed qed qed lemma not_sublist_len[simp]: "|ys| < |xs| \ \ is_sublist xs ys" by(simp add:is_sublist_rec) lemma is_sublist_simp[simp]: "a \ v \ is_sublist (a#as) (v#vs) = is_sublist (a#as) vs" proof assume av: "a \ v" and subl: "is_sublist (a # as) (v # vs)" then obtain rs ts where vvs: "v#vs = rs @ (a # as) @ ts" by (auto simp: is_sublist_def) with av have "rs \ []" by auto with vvs have "tl (v#vs) = tl rs @ a # as @ ts" by auto then have "vs = tl rs @ a # as @ ts" by auto then show "is_sublist (a # as) vs" by (auto simp: is_sublist_def) next assume av: "a \ v" and subl: "is_sublist (a # as) vs" then show "is_sublist (a # as) (v # vs)" apply (auto simp: is_sublist_def) apply (intro exI) apply (subgoal_tac "v # asa @ a # as @ bs = (v # asa) @ a # as @ bs") apply assumption by auto qed lemma is_sublist_id[simp]: "is_sublist vs vs" apply (auto simp: is_sublist_def) apply (intro exI) apply (subgoal_tac "vs = [] @ vs @ []") by (assumption) auto lemma is_sublist_in: "is_sublist (a#as) vs \ a \ set vs" by (auto simp: is_sublist_def) lemma is_sublist_in1: "is_sublist [x,y] vs \ y \ set vs" by (auto simp: is_sublist_def) lemma is_sublist_notlast[simp]: "distinct vs \ x = last vs \ \ is_sublist [x,y] vs" proof assume dvs: "distinct vs" and xl: "x = last vs" and subl:"is_sublist [x, y] vs" then obtain rs ts where vs: "vs = rs @ x # y # ts" by (auto simp: is_sublist_def) define as where "as = rs @ [x]" define bs where "bs = y # ts" then have bsne: "bs \ []" by auto from as_def bs_def have vs2: "vs = as @ bs" using vs by auto with as_def have xas: "x \ set as" by auto from bsne vs2 have "last vs = last bs" by auto with xl have "x = last bs" by auto with bsne have "bs = (butlast bs) @ [x]" by auto then have "x \ set bs" by (induct bs) auto with xas vs2 dvs show False by auto qed lemma is_sublist_nth1: "is_sublist [x,y] ls \ \ i j. i < length ls \ j < length ls \ ls!i = x \ ls!j = y \ Suc i = j" proof - assume subl: "is_sublist [x,y] ls" then obtain as bs where "ls = as @ x # y # bs" by (auto simp: is_sublist_def) then have "(length as) < length ls \ (Suc (length as)) < length ls \ ls!(length as) = x \ ls!(Suc (length as)) = y \ Suc (length as) = (Suc (length as))" apply auto apply hypsubst_thin apply (induct as) by auto then show ?thesis by auto qed lemma is_sublist_nth2: "\ i j. i < length ls \ j < length ls \ ls!i = x \ ls!j = y \ Suc i = j \ is_sublist [x,y] ls " proof - assume "\ i j. i < length ls \ j < length ls \ ls!i = x \ ls!j = y \ Suc i = j" then obtain i j where vors: "i < length ls \ j < length ls \ ls!i = x \ ls!j = y \ Suc i = j" by auto then have "ls = take (Suc (Suc i)) ls @ drop (Suc (Suc i)) ls" by auto with vors have "ls = take (Suc i) ls @ [ls! (Suc i)] @ drop (Suc (Suc i)) ls" by (auto simp: take_Suc_conv_app_nth) with vors have "ls = take i ls @ [ls!i] @ [ls! (Suc i)] @ drop (Suc (Suc i)) ls" by (auto simp: take_Suc_conv_app_nth) with vors show ?thesis by (auto simp: is_sublist_def) qed lemma is_sublist_tl: "is_sublist (a # as) vs \ is_sublist as vs" apply (simp add: is_sublist_def) apply (elim exE) apply (intro exI) apply (subgoal_tac "vs = (asa @ [a]) @ as @ bs") apply assumption by auto lemma is_sublist_hd: "is_sublist (a # as) vs \ is_sublist [a] vs" apply (simp add: is_sublist_def) by auto lemma is_sublist_hd_eq[simp]: "(is_sublist [a] vs) = (a \ set vs)" apply (rule_tac iffI) apply (simp add: is_sublist_def) apply force apply (simp add: is_sublist_def) apply (induct vs) apply force apply (case_tac "a = aa") apply force apply (subgoal_tac "a \ set vs") apply simp apply (elim exE) apply (intro exI) apply (subgoal_tac "aa # vs = (aa # as) @ a # bs") apply (assumption) by auto lemma is_sublist_distinct_prefix: "is_sublist (v#as) (v # vs) \ distinct (v # vs) \ is_prefix as vs" proof - assume d: "distinct (v # vs)" and subl: "is_sublist (v # as) (v # vs)" from subl obtain rs ts where v_vs: "v # vs = rs @ (v # as) @ ts" by (simp add: is_sublist_def) auto from d have v: "v \ set vs" by auto then have "\ is_sublist (v # as) vs" by (auto dest: is_sublist_hd) with v_vs have "rs = []" apply (cases rs) by (auto simp: is_sublist_def) with v_vs show "is_prefix as vs" by (auto simp: is_prefix_def) qed lemma is_sublist_distinct[intro]: "is_sublist as vs \ distinct vs \ distinct as" by (auto simp: is_sublist_def) lemma is_sublist_y_hd: "distinct vs \ y = hd vs \ \ is_sublist [x,y] vs" proof assume d: "distinct vs" and yh: "y = hd vs" and subl: "is_sublist [x, y] vs" then obtain rs ts where vs: "vs = rs @ x # y # ts" by (auto simp: is_sublist_def) define as where "as = rs @ [x]" then have asne: "as \ []" by auto define bs where "bs = y # ts" then have bsne: "bs \ []" by auto from as_def bs_def have vs2: "vs = as @ bs" using vs by auto from bs_def have xbs: "y \ set bs" by auto from vs2 asne have "hd vs = hd as" by simp with yh have "y = hd as" by auto with asne have "y \ set as" by (induct as) auto with d xbs vs2 show False by auto qed lemma is_sublist_at1: "distinct (as @ bs) \ is_sublist [x,y] (as @ bs) \ x \ (last as) \ is_sublist [x,y] as \ is_sublist [x,y] bs" proof (cases "x \ set as") assume d: "distinct (as @ bs)" and subl: "is_sublist [x, y] (as @ bs)" and xnl: "x \ last as" define vs where "vs = as @ bs" with d have dvs: "distinct vs" by auto case True with xnl subl have ind: "is_sublist (as@bs) vs \ is_sublist [x, y] as" proof (induct as) case Nil then show ?case by force next case (Cons a as) assume ih: "\is_sublist (as@bs) vs; x \ last as; is_sublist [x,y] (as @ bs); x \ set as\ \ is_sublist [x, y] as" and subl_aas_vs: "is_sublist ((a # as) @ bs) vs" and xnl2: "x \ last (a # as)" and subl2: "is_sublist [x, y] ((a # as) @ bs)" and x: "x \ set (a # as)" then have rule1: "x \ a \ is_sublist [x,y] as" apply (cases "as = []") apply simp apply (rule_tac ih) by (auto dest: is_sublist_tl) from dvs subl_aas_vs have daas: "distinct (a # as @ bs)" apply (rule_tac is_sublist_distinct) by auto from xnl2 have asne: "x = a \ as \ []" by auto with subl2 daas have yhdas: "x = a \ y = hd as" apply simp apply (drule_tac is_sublist_distinct_prefix) by auto with asne have "x = a \ as = y # tl as" by auto with asne yhdas have "x = a \ is_prefix [x,y] (a # as)" by auto then have rule2: "x = a \ is_sublist [x,y] (a # as)" by (simp add: is_prefix_sublist) from rule1 rule2 show ?case by (cases "x = a") auto qed from vs_def d have "is_sublist [x, y] as" by (rule_tac ind) auto then show ?thesis by auto next assume d: "distinct (as @ bs)" and subl: "is_sublist [x, y] (as @ bs)" and xnl: "x \ last as" define ars where "ars = as" case False with ars_def have xars: "x \ set ars" by auto from subl have ind: "is_sublist as ars \ is_sublist [x, y] bs" proof (induct as) case Nil then show ?case by auto next case (Cons a as) assume ih: "\is_sublist as ars; is_sublist [x, y] (as @ bs)\ \ is_sublist [x, y] bs" and subl_aasbsvs: "is_sublist (a # as) ars" and subl2: "is_sublist [x, y] ((a # as) @ bs)" from subl_aasbsvs ars_def False have "x \ a" by (auto simp:is_sublist_in) with subl_aasbsvs subl2 show ?thesis apply (rule_tac ih) by (auto dest: is_sublist_tl) qed from ars_def have "is_sublist [x, y] bs" by (rule_tac ind) auto then show ?thesis by auto qed lemma is_sublist_at4: "distinct (as @ bs) \ is_sublist [x,y] (as @ bs) \ as \ [] \ x = last as \ y = hd bs" proof - assume d: "distinct (as @ bs)" and subl: "is_sublist [x,y] (as @ bs)" and asne: "as \ []" and xl: "x = last as" define vs where "vs = as @ bs" with subl have "is_sublist [x,y] vs" by auto then obtain rs ts where vs2: "vs = rs @ x # y # ts" by (auto simp: is_sublist_def) from vs_def d have dvs:"distinct vs" by auto from asne xl have as:"as = butlast as @ [x]" by auto with vs_def have vs3: "vs = butlast as @ x # bs" by auto from dvs vs2 vs3 have "rs = butlast as" apply (rule_tac dist_at1) by auto then have "rs @ [x] = butlast as @ [x]" by auto with as have "rs @ [x] = as" by auto then have "as = rs @ [x]" by auto with vs2 vs_def have "bs = y # ts" by auto then show ?thesis by auto qed lemma is_sublist_at5: "distinct (as @ bs) \ is_sublist [x,y] (as @ bs) \ is_sublist [x,y] as \ is_sublist [x,y] bs \ x = last as \ y = hd bs" apply (case_tac "as = []") apply simp apply (cases "x = last as") apply (subgoal_tac "y = hd bs") apply simp apply (rule is_sublist_at4) apply assumption+ (*apply force+ *) apply (drule_tac is_sublist_at1) by auto lemma is_sublist_rev: "is_sublist [a,b] (rev zs) = is_sublist [b,a] zs" apply (simp add: is_sublist_def) apply (intro iffI) apply (elim exE) apply (intro exI) apply (subgoal_tac "zs = (rev bs) @ b # a # rev as") apply assumption apply (subgoal_tac "rev (rev zs) = rev (as @ a # b # bs)") apply (thin_tac "rev zs = as @ a # b # bs") apply simp apply simp apply (elim exE) apply (intro exI) by force lemma is_sublist_at5'[simp]: "distinct as \ distinct bs \ set as \ set bs = {} \ is_sublist [x,y] (as @ bs) \ is_sublist [x,y] as \ is_sublist [x,y] bs \ x = last as \ y = hd bs" apply (subgoal_tac "distinct (as @ bs)") apply (drule is_sublist_at5) by auto lemma splitAt_is_sublist1R[simp]: "ram \ set vs \ is_sublist (fst (splitAt ram vs) @ [ram]) vs" apply (auto dest!: splitAt_ram simp: is_sublist_def) apply (intro exI) apply (subgoal_tac "vs = [] @ fst (splitAt ram vs) @ ram # snd (splitAt ram vs)") apply assumption by simp lemma splitAt_is_sublist2R[simp]: "ram \ set vs \ is_sublist (ram # snd (splitAt ram vs)) vs" apply (auto dest!: splitAt_ram splitAt_no_ram simp: is_sublist_def) apply (intro exI) apply (subgoal_tac "vs = fst (splitAt ram vs) @ ram # snd (splitAt ram vs) @ []") apply assumption by auto (*********************** section is_nextElem *****************************) subsection \\is_nextElem\\ definition is_nextElem :: "'a list \ 'a \ 'a \ bool" where "is_nextElem xs x y \ is_sublist [x,y] xs \ xs \ [] \ x = last xs \ y = hd xs" lemma is_nextElem_a[intro]: "is_nextElem vs a b \ a \ set vs" by (auto simp: is_nextElem_def is_sublist_def) lemma is_nextElem_b[intro]: "is_nextElem vs a b \ b \ set vs" by (auto simp: is_nextElem_def is_sublist_def) lemma is_nextElem_last_hd[intro]: "distinct vs \ is_nextElem vs x y \ x = last vs \ y = hd vs" by (auto simp: is_nextElem_def) lemma is_nextElem_last_ne[intro]: "distinct vs \ is_nextElem vs x y \ x = last vs \ vs \ []" by (auto simp: is_nextElem_def) lemma is_nextElem_sublistI: "is_sublist [x,y] vs \ is_nextElem vs x y" by (auto simp: is_nextElem_def) lemma is_nextElem_nth1: "is_nextElem ls x y \ \ i j. i < length ls \ j < length ls \ ls!i = x \ ls!j = y \ (Suc i) mod (length ls) = j" proof (cases "is_sublist [x,y] ls") assume is_nextElem: "is_nextElem ls x y" case True then show ?thesis apply (drule_tac is_sublist_nth1) by auto next assume is_nextElem: "is_nextElem ls x y" case False with is_nextElem have hl: "ls \ [] \ last ls = x \ hd ls = y" by (auto simp: is_nextElem_def) then have j: "ls!0 = y" by (cases ls) auto from hl have i: "ls!(length ls - 1) = x" by (cases ls rule: rev_exhaust) auto from i j hl have "(length ls - 1) < length ls \ 0 < length ls \ ls!(length ls - 1) = x \ ls!0 = y \ (Suc (length ls - 1)) mod (length ls) = 0" by auto then show ?thesis apply (intro exI) . qed lemma is_nextElem_nth2: " \ i j. i < length ls \ j < length ls \ ls!i = x \ ls!j = y \ (Suc i) mod (length ls) = j \ is_nextElem ls x y" proof - assume "\ i j. i < length ls \ j < length ls \ ls!i = x \ ls!j = y \ (Suc i) mod (length ls) = j" then obtain i j where vors: "i < length ls \ j < length ls \ ls!i = x \ ls!j = y \ (Suc i) mod (length ls) = j" by auto then show ?thesis proof (cases "Suc i = length ls") case True with vors have "j = 0" by auto (*ls ! i = last ls*) with True vors show ?thesis apply (auto simp: is_nextElem_def) apply (cases ls rule: rev_exhaust) apply auto apply (cases ls) by auto next case False with vors have "is_sublist [x,y] ls" apply (rule_tac is_sublist_nth2) by auto then show ?thesis by (simp add: is_nextElem_def) qed qed lemma is_nextElem_rotate1_aux: "is_nextElem (rotate m ls) x y \ is_nextElem ls x y" proof - assume is_nextElem: "is_nextElem (rotate m ls) x y" define n where "n = m mod length ls" then have rot_eq: "rotate m ls = rotate n ls" by (auto intro: rotate_conv_mod) with is_nextElem have "is_nextElem (rotate n ls) x y" by simp then obtain i j where vors:"i < length (rotate n ls) \ j < length (rotate n ls) \ (rotate n ls)!i = x \ (rotate n ls)!j = y \ (Suc i) mod (length (rotate n ls)) = j" by (drule_tac is_nextElem_nth1) auto then have lls: "0 < length ls" by auto define k where "k = (i+n) mod (length ls)" with lls have sk: "k < length ls" by simp from k_def lls vors have "ls!k = (rotate n ls)!(i mod (length ls))" by (simp add: nth_rotate) with vors have lsk: "ls!k = x" by simp define l where "l = (j+n) mod (length ls)" with lls have sl: "l < length ls" by simp from l_def lls vors have "ls!l = (rotate n ls)!(j mod (length ls))" by (simp add: nth_rotate) with vors have lsl: "ls!l = y" by simp from vors k_def l_def have "(Suc i) mod length ls = j" by simp then have "(Suc i) mod length ls = j mod length ls" by auto then have "((Suc i) mod length ls + n mod (length ls)) mod length ls = (j mod length ls + n mod (length ls)) mod length ls" by simp then have "((Suc i) + n) mod length ls = (j + n) mod length ls" by (simp add: mod_simps) with vors k_def l_def have "(Suc k) mod (length ls) = l" by (simp add: mod_simps) with sk lsk sl lsl show ?thesis by (auto intro: is_nextElem_nth2) qed lemma is_nextElem_rotate_eq[simp]: "is_nextElem (rotate m ls) x y = is_nextElem ls x y" apply (auto dest: is_nextElem_rotate1_aux) apply (rule is_nextElem_rotate1_aux) apply (subgoal_tac "is_nextElem (rotate (length ls - m mod length ls) (rotate m ls)) x y") apply assumption by simp lemma is_nextElem_congs_eq: "ls \ ms \ is_nextElem ls x y = is_nextElem ms x y" by (auto simp: congs_def) lemma is_nextElem_rev[simp]: "is_nextElem (rev zs) a b = is_nextElem zs b a" apply (simp add: is_nextElem_def is_sublist_rev) apply (case_tac "zs = []") apply simp apply simp apply (case_tac "a = hd zs") apply (case_tac "zs") apply simp apply simp apply simp apply (case_tac "a = last (rev zs) \ b = last zs") apply simp apply (case_tac "zs" rule: rev_exhaust) apply simp apply (case_tac "ys") apply simp apply simp by force lemma is_nextElem_circ: "\ distinct xs; is_nextElem xs a b; is_nextElem xs b a \ \ |xs| \ 2" apply(drule is_nextElem_nth1) apply(drule is_nextElem_nth1) apply (clarsimp) apply(rename_tac i j) apply(frule_tac i=j and j = "Suc i mod |xs|" in nth_eq_iff_index_eq) apply assumption+ apply(frule_tac j=i and i = "Suc j mod |xs|" in nth_eq_iff_index_eq) apply assumption+ apply(rule ccontr) apply(simp add: distinct_conv_nth mod_Suc) done subsection\\nextElem, sublist, is_nextElem\\ lemma is_sublist_eq: "distinct vs \ c \ y \ (nextElem vs c x = y) = is_sublist [x,y] vs" proof - assume d: "distinct vs" and c: "c \ y" have r1: "nextElem vs c x = y \ is_sublist [x,y] vs" proof - assume fn: "nextElem vs c x = y" with c show ?thesis by(drule_tac nextElem_cases)(auto simp: is_sublist_def) qed with d have r2: "is_sublist [x,y] vs \ nextElem vs c x = y" apply (simp add: is_sublist_def) apply (elim exE) by auto show ?thesis apply (intro iffI r1) by (auto intro: r2) qed lemma is_nextElem1: "distinct vs \ x \ set vs \ nextElem vs (hd vs) x = y \ is_nextElem vs x y" proof - assume d: "distinct vs" and x: "x \ set vs" and fn: "nextElem vs (hd vs) x = y" from x have r0: "vs \ []" by auto from d fn have r1: "x = last vs \ y = hd vs" by (auto) from d fn have r3: "hd vs \ y \ (\ a b. vs = a @ [x,y] @ b)" by (drule_tac nextElem_cases) auto from x obtain n where xn:"x = vs!n" and nl: "n < length vs" by (auto simp: in_set_conv_nth) define as where "as = take n vs" define bs where "bs = drop (Suc n) vs" from as_def bs_def xn nl have vs:"vs = as @ [x] @ bs" by (auto intro: id_take_nth_drop) then have r2: "x \ last vs \ y \ hd vs" proof - assume notx: "x \ last vs" from vs notx have "bs \ []" by auto with vs have r2: "vs = as @ [x, hd bs] @ tl bs" by auto with d have ineq: "hd bs \ hd vs" by (cases as) auto from d fn r2 have "y = hd bs" by auto with ineq show ?thesis by auto qed from r0 r1 r2 r3 show ?thesis apply (simp add:is_nextElem_def is_sublist_def) apply (cases "x = last vs") by auto qed lemma is_nextElem2: "distinct vs \ x \ set vs \ is_nextElem vs x y \ nextElem vs (hd vs) x = y" proof - assume d: "distinct vs" and x: "x \ set vs" and is_nextElem: "is_nextElem vs x y" then show ?thesis apply (simp add: is_nextElem_def) apply (cases "is_sublist [x,y] vs") apply (cases "y = hd vs") apply (simp add: is_sublist_def) apply (force dest: distinct_hd_not_cons) apply (subgoal_tac "hd vs \ y") apply (simp add: is_sublist_eq) by auto qed lemma nextElem_is_nextElem: "distinct xs \ x \ set xs \ is_nextElem xs x y = (nextElem xs (hd xs) x = y)" by (auto intro!: is_nextElem1 is_nextElem2) lemma nextElem_congs_eq: "xs \ ys \ distinct xs \ x \ set xs \ nextElem xs (hd xs) x = nextElem ys (hd ys) x" proof - assume eq: "xs \ ys" and dist: "distinct xs" and x: "x \ set xs" define y where "y = nextElem xs (hd xs) x" then have f1:"nextElem xs (hd xs) x = y" by auto with dist x have "is_nextElem xs x y" by (auto intro: is_nextElem1) with eq have "is_nextElem ys x y" by (simp add:is_nextElem_congs_eq) with eq dist x have f2:"nextElem ys (hd ys) x = y" by (auto simp: congs_distinct intro: is_nextElem2) from f1 f2 show ?thesis by auto qed lemma is_sublist_is_nextElem: "distinct vs \ is_nextElem vs x y \ is_sublist as vs \ x \ set as \ x \ last as \ is_sublist [x,y] as" proof - assume d: "distinct vs" and is_nextElem: "is_nextElem vs x y" and subl: "is_sublist as vs" and xin: "x \ set as" and xnl: "x \ last as" from xin have asne: "as \ []" by auto with subl have vsne: "vs \ []" by (auto simp: is_sublist_def) from subl obtain rs ts where vs: "vs = rs @ as @ ts" apply (simp add: is_sublist_def) apply (elim exE) by auto with d xnl asne have "x \ last vs" proof (cases "ts = []") case True with d xnl asne vs show ?thesis by force next define lastvs where "lastvs = last ts" case False with vs lastvs_def have vs2: "vs = rs @ as @ butlast ts @ [lastvs]" by auto with d have "lastvs \ set as" by auto with xin have "lastvs \ x" by auto with vs2 show ?thesis by auto qed with is_nextElem have subl_vs: "is_sublist [x,y] vs" by (auto simp: is_nextElem_def) from d xin vs have "\ is_sublist [x] rs" by auto then have nrs: "\ is_sublist [x,y] rs" by (auto dest: is_sublist_hd) from d xin vs have "\ is_sublist [x] ts" by auto then have nts: "\ is_sublist [x,y] ts" by (auto dest: is_sublist_hd) from d xin vs have xnrs: "x \ set rs" by auto then have notrs: "\ is_sublist [x,y] rs" by (auto simp:is_sublist_in) from xnrs have xnlrs: "rs \ [] \ x \ last rs" by (induct rs) auto from d xin vs have xnts: "x \ set ts" by auto then have notts: "\ is_sublist [x,y] ts" by (auto simp:is_sublist_in) from d vs subl_vs have "is_sublist [x,y] rs \ is_sublist [x,y] (as@ts)" apply (cases "rs = []") apply simp apply (rule_tac is_sublist_at1) by (auto intro!: xnlrs) with notrs have "is_sublist [x,y] (as@ts)" by auto with d vs xnl have "is_sublist [x,y] as \ is_sublist [x,y] ts" apply (rule_tac is_sublist_at1) by auto with notts show "is_sublist [x,y] as" by auto qed subsection \\before\\ definition before :: "'a list \ 'a \ 'a \ bool" where "before vs ram1 ram2 \ \ a b c. vs = a @ ram1 # b @ ram2 # c" lemma before_dist_fst_fst[simp]: "before vs ram1 ram2 \ distinct vs \ fst (splitAt ram2 (fst (splitAt ram1 vs))) = fst (splitAt ram1 (fst (splitAt ram2 vs)))" apply (simp add: before_def) apply (elim exE) apply (drule splitAt_dist_ram_all) by (auto dest!: pairD) lemma before_dist_fst_snd[simp]: "before vs ram1 ram2 \ distinct vs \ fst (splitAt ram2 (snd (splitAt ram1 vs))) = snd (splitAt ram1 (fst (splitAt ram2 vs)))" apply (simp add: before_def) apply (elim exE) apply (drule_tac splitAt_dist_ram_all) by (auto dest!: pairD) lemma before_dist_snd_fst[simp]: "before vs ram1 ram2 \ distinct vs \ snd (splitAt ram2 (fst (splitAt ram1 vs))) = snd (splitAt ram1 (snd (splitAt ram2 vs)))" apply (simp add: before_def) apply (elim exE) apply (drule_tac splitAt_dist_ram_all) by (auto dest!: pairD) lemma before_dist_snd_snd[simp]: "before vs ram1 ram2 \ distinct vs \ snd (splitAt ram2 (snd (splitAt ram1 vs))) = fst (splitAt ram1 (snd (splitAt ram2 vs)))" apply (simp add: before_def) apply (elim exE) apply (drule_tac splitAt_dist_ram_all) by (auto dest!: pairD) lemma before_dist_snd[simp]: "before vs ram1 ram2 \ distinct vs \ fst (splitAt ram1 (snd (splitAt ram2 vs))) = snd (splitAt ram2 vs)" apply (simp add: before_def) apply (elim exE) apply (drule_tac splitAt_dist_ram_all) by (auto dest!: pairD) lemma before_dist_fst[simp]: "before vs ram1 ram2 \ distinct vs \ fst (splitAt ram1 (fst (splitAt ram2 vs))) = fst (splitAt ram1 vs)" apply (simp add: before_def) apply (elim exE) apply (drule_tac splitAt_dist_ram_all) by (auto dest!: pairD) lemma before_or: "ram1 \ set vs \ ram2 \ set vs \ ram1 \ ram2 \ before vs ram1 ram2 \ before vs ram2 ram1" proof - assume r1: "ram1 \ set vs" and r2: "ram2 \ set vs" and r12: "ram1 \ ram2" then show ?thesis proof (cases "ram2 \ set (snd (splitAt ram1 vs))") define a where "a = fst (splitAt ram1 vs)" define b where "b = fst (splitAt ram2 (snd (splitAt ram1 vs)))" define c where "c = snd (splitAt ram2 (snd (splitAt ram1 vs)))" case True with r1 a_def b_def c_def have "vs = a @ [ram1] @ b @ [ram2] @ c" by (auto dest!: splitAt_ram) then show ?thesis apply (simp add: before_def) by auto next define ab where "ab = fst (splitAt ram1 vs)" case False with r1 r2 r12 ab_def have r2': "ram2 \ set ab" by (auto intro: splitAt_ram3) define a where "a = fst (splitAt ram2 ab)" define b where "b = snd (splitAt ram2 ab)" define c where "c = snd (splitAt ram1 vs)" from r1 ab_def c_def have "vs = ab @ [ram1] @ c" by (auto dest!: splitAt_ram) with r2' a_def b_def have "vs = (a @ [ram2] @ b) @ [ram1] @ c" by (drule_tac splitAt_ram) simp then show ?thesis apply (simp add: before_def) apply (rule disjI2) by auto qed qed lemma before_r1: "before vs r1 r2 \ r1 \ set vs" by (auto simp: before_def) lemma before_r2: "before vs r1 r2 \ r2 \ set vs" by (auto simp: before_def) lemma before_dist_r2: "distinct vs \ before vs r1 r2 \ r2 \ set (snd (splitAt r1 vs))" proof - assume d: "distinct vs" and b: "before vs r1 r2" from d b have ex1: "\! s. (vs = (fst s) @ r1 # snd (s))" apply (drule_tac before_r1) apply (rule distinct_unique1) by auto from d b ex1 show ?thesis apply (unfold before_def) proof (elim exE ex1E) fix a b c s assume vs: "vs = a @ r1 # b @ r2 # c" and "\y. vs = fst y @ r1 # snd y \ y = s" then have "\ y. vs = fst y @ r1 # snd y \ y = s" by (clarify, hypsubst_thin, auto) then have single: "\ y. vs = fst y @ r1 # snd y \ y = s" by auto define bc where "bc = b @ r2 # c" with vs have vs2: "vs = a @ r1 # bc" by auto from bc_def have r2: "r2 \ set bc" by auto define t where "t = (a,bc)" with vs2 have vs3: "vs = fst (t) @ r1 # snd (t)" by auto with single have ts: "t = s" by (rule_tac single) auto from b have "splitAt r1 vs = s" apply (drule_tac before_r1) apply (drule_tac splitAt_ram) by (rule single) auto with ts have "t = splitAt r1 vs" by simp with t_def have "bc = snd(splitAt r1 vs)" by simp with r2 show ?thesis by simp qed qed lemma before_dist_not_r2[intro]: "distinct vs \ before vs r1 r2 \ r2 \ set (fst (splitAt r1 vs))" apply (frule before_dist_r2) by (auto dest: splitAt_distinct_fst_snd) lemma before_dist_r1: "distinct vs \ before vs r1 r2 \ r1 \ set (fst (splitAt r2 vs))" proof - assume d: "distinct vs" and b: "before vs r1 r2" from d b have ex1: "\! s. (vs = (fst s) @ r2 # snd (s))" apply (drule_tac before_r2) apply (rule distinct_unique1) by auto from d b ex1 show ?thesis apply (unfold before_def) proof (elim exE ex1E) fix a b c s assume vs: "vs = a @ r1 # b @ r2 # c" and "\y. vs = fst y @ r2 # snd y \ y = s" then have "\ y. vs = fst y @ r2 # snd y \ y = s" by (clarify, hypsubst_thin, auto) then have single: "\ y. vs = fst y @ r2 # snd y \ y = s" by auto define ab where "ab = a @ r1 # b" with vs have vs2: "vs = ab @ r2 # c" by auto from ab_def have r1: "r1 \ set ab" by auto define t where "t = (ab,c)" with vs2 have vs3: "vs = fst (t) @ r2 # snd (t)" by auto with single have ts: "t = s" by (rule_tac single) auto from b have "splitAt r2 vs = s" apply (drule_tac before_r2) apply (drule_tac splitAt_ram) by (rule single) auto with ts have "t = splitAt r2 vs" by simp with t_def have "ab = fst(splitAt r2 vs)" by simp with r1 show ?thesis by simp qed qed lemma before_dist_not_r1[intro]: "distinct vs \ before vs r1 r2 \ r1 \ set (snd (splitAt r2 vs))" apply (frule before_dist_r1) by (auto dest: splitAt_distinct_fst_snd) lemma before_snd: "r2 \ set (snd (splitAt r1 vs)) \ before vs r1 r2" proof - assume r2: "r2 \ set (snd (splitAt r1 vs))" from r2 have r1: "r1 \ set vs" apply (rule_tac ccontr) apply (drule splitAt_no_ram) by simp define a where "a = fst (splitAt r1 vs)" define bc where "bc = snd (splitAt r1 vs)" define b where "b = fst (splitAt r2 bc)" define c where "c = snd (splitAt r2 bc)" from r1 a_def bc_def have vs: "vs = a @ [r1] @ bc" by (auto dest: splitAt_ram) from r2 bc_def have r2: "r2 \ set bc" by simp with b_def c_def have "bc = b @ [r2] @ c" by (auto dest: splitAt_ram) with vs show ?thesis by (simp add: before_def) auto qed lemma before_fst: "r2 \ set vs \ r1 \ set (fst (splitAt r2 vs)) \ before vs r1 r2" proof - assume r2: "r2 \ set vs" and r1: "r1 \ set (fst (splitAt r2 vs))" define ab where "ab = fst (splitAt r2 vs)" define c where "c = snd (splitAt r2 vs)" define a where "a = fst (splitAt r1 ab)" define b where "b = snd (splitAt r1 ab)" from r2 ab_def c_def have vs: "vs = ab @ [r2] @ c" by (auto dest: splitAt_ram) from r1 ab_def have r1: "r1 \ set ab" by simp with a_def b_def have "ab = a @ [r1] @ b" by (auto dest: splitAt_ram) with vs show ?thesis by (simp add: before_def) auto qed (* usefule simplifier rules *) lemma before_dist_eq_fst: "distinct vs \ r2 \ set vs \ r1 \ set (fst (splitAt r2 vs)) = before vs r1 r2" by (auto intro: before_fst before_dist_r1) lemma before_dist_eq_snd: "distinct vs \ r2 \ set (snd (splitAt r1 vs)) = before vs r1 r2" by (auto intro: before_snd before_dist_r2) lemma before_dist_not1: "distinct vs \ before vs ram1 ram2 \ \ before vs ram2 ram1" proof assume d: "distinct vs" and b1: "before vs ram2 ram1" and b2: "before vs ram1 ram2" from b2 have r1: "ram1 \ set vs" by (drule_tac before_r1) from d b1 have r2: "ram2 \ set (fst (splitAt ram1 vs))" by (rule before_dist_r1) from d b2 have r2':"ram2 \ set (snd (splitAt ram1 vs))" by (rule before_dist_r2) from d r1 r2 r2' show "False" by (drule_tac splitAt_distinct_fst_snd) auto qed lemma before_dist_not2: "distinct vs \ ram1 \ set vs \ ram2 \ set vs \ ram1 \ ram2 \ \ (before vs ram1 ram2) \ before vs ram2 ram1" proof - assume "distinct vs" "ram1 \ set vs " "ram2 \ set vs" "ram1 \ ram2" "\ before vs ram1 ram2" then show "before vs ram2 ram1" apply (frule_tac before_or) by auto qed lemma before_dist_eq: "distinct vs \ ram1 \ set vs \ ram2 \ set vs \ ram1 \ ram2 \ ( \ (before vs ram1 ram2)) = before vs ram2 ram1" by (auto intro: before_dist_not2 dest: before_dist_not1) lemma before_vs: "distinct vs \ before vs ram1 ram2 \ vs = fst (splitAt ram1 vs) @ ram1 # fst (splitAt ram2 (snd (splitAt ram1 vs))) @ ram2 # snd (splitAt ram2 vs)" proof - assume d: "distinct vs" and b: "before vs ram1 ram2" define s where "s = snd (splitAt ram1 vs)" from b have "ram1 \ set vs" by (auto simp: before_def) with s_def have vs: "vs = fst (splitAt ram1 vs) @ [ram1] @ s" by (auto dest: splitAt_ram) from d b s_def have "ram2 \ set s" by (auto intro: before_dist_r2) then have snd: "s = fst (splitAt ram2 s) @ [ram2] @ snd (splitAt ram2 s)" by (auto dest: splitAt_ram) with vs have "vs = fst (splitAt ram1 vs) @ [ram1] @ fst (splitAt ram2 s) @ [ram2] @ snd (splitAt ram2 s)" by auto with d b s_def show ?thesis by auto qed (************************ between lemmas *************************************) subsection \@{const between}\ definition pre_between :: "'a list \ 'a \ 'a \ bool" where "pre_between vs ram1 ram2 \ distinct vs \ ram1 \ set vs \ ram2 \ set vs \ ram1 \ ram2" declare pre_between_def [simp] lemma pre_between_dist[intro]: "pre_between vs ram1 ram2 \ distinct vs" by (auto simp: pre_between_def) lemma pre_between_r1[intro]: "pre_between vs ram1 ram2 \ ram1 \ set vs" by auto lemma pre_between_r2[intro]: "pre_between vs ram1 ram2 \ ram2 \ set vs" by auto lemma pre_between_r12[intro]: "pre_between vs ram1 ram2 \ ram1 \ ram2" by auto lemma pre_between_symI: "pre_between vs ram1 ram2 \ pre_between vs ram2 ram1" by auto lemma pre_between_before[dest]: "pre_between vs ram1 ram2 \ before vs ram1 ram2 \ before vs ram2 ram1" by (rule_tac before_or) auto lemma pre_between_rotate1[intro]: "pre_between vs ram1 ram2 \ pre_between (rotate1 vs) ram1 ram2" by auto lemma pre_between_rotate[intro]: "pre_between vs ram1 ram2 \ pre_between (rotate n vs) ram1 ram2" by auto lemma(*<*) before_xor: (*>*) "pre_between vs ram1 ram2 \ (\ before vs ram1 ram2) = before vs ram2 ram1" by (simp add: before_dist_eq) declare pre_between_def [simp del] lemma between_simp1[simp]: "before vs ram1 ram2 \ pre_between vs ram1 ram2 \ between vs ram1 ram2 = fst (splitAt ram2 (snd (splitAt ram1 vs)))" by (simp add: pre_between_def between_def split_def before_dist_eq_snd) lemma between_simp2[simp]: "before vs ram1 ram2 \ pre_between vs ram1 ram2 \ between vs ram2 ram1 = snd (splitAt ram2 vs) @ fst (splitAt ram1 vs)" proof - assume b: "before vs ram1 ram2" and p: "pre_between vs ram1 ram2" from p b have b2: "\ before vs ram2 ram1" apply (simp add: pre_between_def) by (auto dest: before_dist_not1) with p have "ram2 \ set (fst (splitAt ram1 vs))" by (simp add: pre_between_def before_dist_eq_fst) then have "fst (splitAt ram1 vs) = fst (splitAt ram2 (fst (splitAt ram1 vs)))" by (auto dest: splitAt_no_ram) then have "fst (splitAt ram2 (fst (splitAt ram1 vs))) = fst (splitAt ram1 vs)" by auto with b2 b p show ?thesis apply (simp add: pre_between_def between_def split_def) by (auto dest: before_dist_not_r1) qed lemma between_not_r1[intro]: "distinct vs \ ram1 \ set (between vs ram1 ram2)" proof (cases "pre_between vs ram1 ram2") assume d: "distinct vs" case True then have p: "pre_between vs ram1 ram2" by auto then show "ram1 \ set (between vs ram1 ram2)" proof (cases "before vs ram1 ram2") case True with d p show ?thesis by (auto del: notI) next from p have p2: "pre_between vs ram2 ram1" by (auto intro: pre_between_symI) case False with p have "before vs ram2 ram1" by auto with d p2 show ?thesis by (auto del: notI) qed next assume d:"distinct vs" case False then have p: "\ pre_between vs ram1 ram2" by auto then show ?thesis proof (cases "ram1 = ram2") case True with d have h1:"ram2 \ set (snd (splitAt ram2 vs))" by (auto del: notI) from True d have h2: "ram2 \ set (fst (splitAt ram2 (fst (splitAt ram2 vs))))" by (auto del: notI) with True d h1 show ?thesis by (auto simp: between_def split_def) next case False then have neq: "ram1 \ ram2" by auto then show ?thesis proof (cases "ram1 \ set vs") case True with d show ?thesis by (auto dest: splitAt_no_ram splitAt_in_fst simp: between_def split_def) next case False then have r1in: "ram1 \ set vs" by auto then show ?thesis proof (cases "ram2 \ set vs") from d have h1: "ram1 \ set (fst (splitAt ram1 vs))" by (auto del: notI) case True with d h1 show ?thesis by (auto dest: splitAt_not1 splitAt_in_fst splitAt_ram splitAt_no_ram simp: between_def split_def del: notI) next case False then have r2in: "ram2 \ set vs" by auto with d neq r1in have "pre_between vs ram1 ram2" by (auto simp: pre_between_def) with p show ?thesis by auto qed qed qed qed lemma between_not_r2[intro]: "distinct vs \ ram2 \ set (between vs ram1 ram2)" proof (cases "pre_between vs ram1 ram2") assume d: "distinct vs" case True then have p: "pre_between vs ram1 ram2" by auto then show "ram2 \ set (between vs ram1 ram2)" proof (cases "before vs ram1 ram2") from d have "ram2 \ set (fst (splitAt ram2 vs))" by (auto del: notI) then have h1: "ram2 \ set (snd (splitAt ram1 (fst (splitAt ram2 vs))))" by (auto dest: splitAt_in_fst) case True with d p h1 show ?thesis by (auto del: notI) next from p have p2: "pre_between vs ram2 ram1" by (auto intro: pre_between_symI) case False with p have "before vs ram2 ram1" by auto with d p2 show ?thesis by (auto del: notI) qed next assume d:"distinct vs" case False then have p: "\ pre_between vs ram1 ram2" by auto then show ?thesis proof (cases "ram1 = ram2") case True with d have h1:"ram2 \ set (snd (splitAt ram2 vs))" by (auto del: notI) from True d have h2: "ram2 \ set (fst (splitAt ram2 (fst (splitAt ram2 vs))))" by (auto del: notI) with True d h1 show ?thesis by (auto simp: between_def split_def) next case False then have neq: "ram1 \ ram2" by auto then show ?thesis proof (cases "ram2 \ set vs") case True with d show ?thesis by (auto dest: splitAt_no_ram splitAt_in_fst splitAt_in_fst simp: between_def split_def) next case False then have r1in: "ram2 \ set vs" by auto then show ?thesis proof (cases "ram1 \ set vs") from d have h1: "ram1 \ set (fst (splitAt ram1 vs))" by (auto del: notI) case True with d h1 show ?thesis by (auto dest: splitAt_ram splitAt_no_ram simp: between_def split_def del: notI) next case False then have r2in: "ram1 \ set vs" by auto with d neq r1in have "pre_between vs ram1 ram2" by (auto simp: pre_between_def) with p show ?thesis by auto qed qed qed qed lemma between_distinct[intro]: "distinct vs \ distinct (between vs ram1 ram2)" proof - assume vs: "distinct vs" define a where "a = fst (splitAt ram1 vs)" define b where "b = snd (splitAt ram1 vs)" from a_def b_def have ab: "(a,b) = splitAt ram1 vs" by auto with vs have ab_disj:"set a \ set b = {}" by (drule_tac splitAt_distinct_ab) auto define c where "c = fst (splitAt ram2 a)" define d where "d = snd (splitAt ram2 a)" from c_def d_def have c_d: "(c,d) = splitAt ram2 a" by auto with ab_disj have "set c \ set b = {}" by (drule_tac splitAt_subset_ab) auto with vs a_def b_def c_def show ?thesis by (auto simp: between_def split_def splitAt_no_ram dest: splitAt_ram intro: splitAt_distinct_fst splitAt_distinct_snd) qed lemma between_distinct_r12: "distinct vs \ ram1 \ ram2 \ distinct (ram1 # between vs ram1 ram2 @ [ram2])" by (auto del: notI) lemma between_vs: "before vs ram1 ram2 \ pre_between vs ram1 ram2 \ vs = fst (splitAt ram1 vs) @ ram1 # (between vs ram1 ram2) @ ram2 # snd (splitAt ram2 vs)" apply (simp) apply (frule pre_between_dist) apply (drule before_vs) by auto lemma between_in: "before vs ram1 ram2 \ pre_between vs ram1 ram2 \ x \ set vs \ x = ram1 \ x \ set (between vs ram1 ram2) \ x = ram2 \ x \ set (between vs ram2 ram1)" proof - assume b: "before vs ram1 ram2" and p: "pre_between vs ram1 ram2" and xin: "x \ set vs" define a where "a = fst (splitAt ram1 vs)" define b where "b = between vs ram1 ram2" define c where "c = snd (splitAt ram2 vs)" from p have "distinct vs" by auto from p b a_def b_def c_def have "vs = a @ ram1 # b @ ram2 # c" apply (drule_tac between_vs) by auto with xin have "x \ set (a @ ram1 # b @ ram2 # c)" by auto then have "x \ set (a) \ x \ set (ram1 #b) \ x \ set (ram2 # c)" by auto then have "x \ set (a) \ x = ram1 \ x \ set b \ x = ram2 \ x \ set c" by auto then have "x \ set c \ x \ set (a) \ x = ram1 \ x \ set b \ x = ram2" by auto then have "x \ set (c @ a) \ x = ram1 \ x \ set b \ x = ram2" by auto with b p a_def b_def c_def show ?thesis by auto qed lemma "before vs ram1 ram2 \ pre_between vs ram1 ram2 \ hd vs \ ram1 \ (a,b) = splitAt (hd vs) (between vs ram2 ram1) \ vs = [hd vs] @ b @ [ram1] @ (between vs ram1 ram2) @ [ram2] @ a" proof - assume b: "before vs ram1 ram2" and p: "pre_between vs ram1 ram2" and vs: "hd vs \ ram1" and ab: "(a,b) = splitAt (hd vs) (between vs ram2 ram1)" from p have dist_b: "distinct (between vs ram2 ram1)" by (auto intro: between_distinct simp: pre_between_def) with ab have "distinct a \ distinct b" by (auto intro: splitAt_distinct_a splitAt_distinct_b) define r where "r = snd (splitAt ram1 vs)" define btw where "btw = between vs ram2 ram1" from p r_def have vs2: "vs = fst (splitAt ram1 vs) @ [ram1] @ r" by (auto dest: splitAt_ram simp: pre_between_def) then have "fst (splitAt ram1 vs) = [] \ hd vs = ram1" by auto with vs have neq: "fst (splitAt ram1 vs) \ []" by auto with vs2 have vs_fst: "hd vs = hd (fst (splitAt ram1 vs))" by (induct ("fst (splitAt ram1 vs)")) auto with neq have "hd vs \ set (fst (splitAt ram1 vs))" by auto with b p have "hd vs \ set (between vs ram2 ram1)" by auto with btw_def have help1: "btw = fst (splitAt (hd vs) btw) @ [hd vs] @ snd (splitAt (hd vs) btw)" by (auto dest: splitAt_ram) from p b btw_def have "btw = snd (splitAt ram2 vs) @ fst (splitAt ram1 vs)" by auto with neq have "btw = snd (splitAt ram2 vs) @ hd (fst (splitAt ram1 vs)) # tl (fst (splitAt ram1 vs))" by auto with vs_fst have "btw = snd (splitAt ram2 vs) @ [hd vs] @ tl (fst (splitAt ram1 vs))" by auto with help1 have eq: "snd (splitAt ram2 vs) @ [hd vs] @ tl (fst (splitAt ram1 vs)) = fst (splitAt (hd vs) btw) @ [hd vs] @ snd (splitAt (hd vs) btw)" by auto from dist_b btw_def help1 have "distinct (fst (splitAt (hd vs) btw) @ [hd vs] @ snd (splitAt (hd vs) btw))" by auto with eq have eq2: "snd (splitAt ram2 vs) = fst (splitAt (hd vs) btw) \ tl (fst (splitAt ram1 vs)) = snd (splitAt (hd vs) btw)" apply (rule_tac dist_at) by auto with btw_def ab have a: "a = snd (splitAt ram2 vs)" by (auto dest: pairD) from eq2 vs_fst have "hd (fst (splitAt ram1 vs)) # tl (fst (splitAt ram1 vs)) = hd vs # snd (splitAt (hd vs) btw)" by auto with ab btw_def neq have hdb: "hd vs # b = fst (splitAt ram1 vs)" by (auto dest: pairD) from b p have "vs = fst (splitAt ram1 vs) @ [ram1] @ fst (splitAt ram2 (snd (splitAt ram1 vs))) @ [ram2] @ snd (splitAt ram2 vs)" apply simp apply (rule_tac before_vs) by (auto simp: pre_between_def) with hdb have "vs = (hd vs # b) @ [ram1] @ fst (splitAt ram2 (snd (splitAt ram1 vs))) @ [ram2] @ snd (splitAt ram2 vs)" by auto with a b p show ?thesis by (simp) qed lemma between_congs: "pre_between vs ram1 ram2 \ vs \ vs' \ between vs ram1 ram2 = between vs' ram1 ram2" proof - have "\ us. pre_between us ram1 ram2 \ before us ram1 ram2 \ between us ram1 ram2 = between (rotate1 us) ram1 ram2" proof - fix us assume vors: "pre_between us ram1 ram2" "before us ram1 ram2" then have pb2: "pre_between (rotate1 us) ram1 ram2" by auto with vors show "between us ram1 ram2 = between (rotate1 us) ram1 ram2" proof (cases "us") case Nil then show ?thesis by auto next case (Cons u' us') with vors pb2 show ?thesis apply (auto simp: before_def) apply (case_tac "a") apply auto by (simp_all add: between_def split_def pre_between_def) qed qed moreover have "\ us. pre_between us ram1 ram2 \ before us ram2 ram1 \ between us ram1 ram2 = between (rotate1 us) ram1 ram2" proof - fix us assume vors: " pre_between us ram1 ram2" "before us ram2 ram1" then have pb2: "pre_between (rotate1 us) ram1 ram2" by auto with vors show "between us ram1 ram2 = between (rotate1 us) ram1 ram2" proof (cases "us") case Nil then show ?thesis by auto next case (Cons u' us') with vors pb2 show ?thesis apply (auto simp: before_def) apply (case_tac "a") apply auto by (simp_all add: between_def split_def pre_between_def) qed qed ultimately have "help": "\ us. pre_between us ram1 ram2 \ between us ram1 ram2 = between (rotate1 us) ram1 ram2" apply (subgoal_tac "before us ram1 ram2 \ before us ram2 ram1") by auto assume "vs \ vs'" and pre_b: "pre_between vs ram1 ram2" then obtain n where vs': "vs' = rotate n vs" by (auto simp: congs_def) have "between vs ram1 ram2 = between (rotate n vs) ram1 ram2" proof (induct n) case 0 then show ?case by auto next case (Suc m) then show ?case apply simp apply (subgoal_tac " between (rotate1 (rotate m vs)) ram1 ram2 = between (rotate m vs) ram1 ram2") by (auto intro: "help" [symmetric] pre_b) qed with vs' show ?thesis by auto qed lemma between_inter_empty: "pre_between vs ram1 ram2 \ set (between vs ram1 ram2) \ set (between vs ram2 ram1) = {}" apply (case_tac "before vs ram1 ram2") apply (simp add: pre_between_def) apply (elim conjE) apply (frule (1) before_vs) apply (subgoal_tac "distinct (fst (splitAt ram1 vs) @ ram1 # fst (splitAt ram2 (snd (splitAt ram1 vs))) @ ram2 # snd (splitAt ram2 vs))") apply (thin_tac "vs = fst (splitAt ram1 vs) @ ram1 # fst (splitAt ram2 (snd (splitAt ram1 vs))) @ ram2 # snd (splitAt ram2 vs)") apply (frule (1) before_dist_fst_snd) apply(simp) apply blast apply (simp only:) apply (simp add: before_xor) apply (subgoal_tac "pre_between vs ram2 ram1") apply (simp add: pre_between_def) apply (elim conjE) apply (frule (1) before_vs) apply (subgoal_tac "distinct (fst (splitAt ram2 vs) @ ram2 # fst (splitAt ram1 (snd (splitAt ram2 vs))) @ ram1 # snd (splitAt ram1 vs))") apply (thin_tac "vs = fst (splitAt ram2 vs) @ ram2 # fst (splitAt ram1 (snd (splitAt ram2 vs))) @ ram1 # snd (splitAt ram1 vs)") apply simp apply blast apply (simp only:) by (rule pre_between_symI) (*********************** between - is_nextElem *************************) subsubsection \\between is_nextElem\\ lemma is_nextElem_or1: "pre_between vs ram1 ram2 \ is_nextElem vs x y \ before vs ram1 ram2 \ is_sublist [x,y] (ram1 # between vs ram1 ram2 @ [ram2]) \ is_sublist [x,y] (ram2 # between vs ram2 ram1 @ [ram1])" proof - assume p: "pre_between vs ram1 ram2" and is_nextElem: "is_nextElem vs x y" and b: "before vs ram1 ram2" from p have r1: "ram1 \ set vs" by (auto simp: pre_between_def) define bs where "bs = [ram1] @ (between vs ram1 ram2) @ [ram2]" have rule1: "x \ set (ram1 # (between vs ram1 ram2)) \ is_sublist [x,y] bs" proof - assume xin:"x \ set (ram1 # (between vs ram1 ram2))" with bs_def have xin2: "x \ set bs" by auto define s where "s = snd (splitAt ram1 vs)" from r1 s_def have sub1:"is_sublist (ram1 # s) vs" by (auto intro: splitAt_is_sublist2R) from b p s_def have "ram2 \ set s" by (auto intro!: before_dist_r2 simp: pre_between_def) then have "is_prefix (fst (splitAt ram2 s) @ [ram2]) s" by (auto intro!: splitAt_is_prefix) then have "is_prefix ([ram1] @ ((fst (splitAt ram2 s)) @ [ram2])) ([ram1] @ s)" by (rule_tac is_prefix_add) auto with sub1 have "is_sublist (ram1 # (fst (splitAt ram2 s)) @ [ram2]) vs" apply (rule_tac is_sublist_trans) apply (rule is_prefix_sublist) by simp_all with p b s_def bs_def have subl: "is_sublist bs vs" by (auto) with p have db: "distinct bs" by (auto simp: pre_between_def) with xin bs_def have xnlb:"x \ last bs" by auto with p is_nextElem subl xin2 show "is_sublist [x,y] bs" apply (rule_tac is_sublist_is_nextElem) by (auto simp: pre_between_def) qed define bs2 where "bs2 = [ram2] @ (between vs ram2 ram1) @ [ram1]" have rule2: "x \ set (ram2 # (between vs ram2 ram1)) \ is_sublist [x,y] bs2" proof - assume xin:"x \ set (ram2 # (between vs ram2 ram1))" with bs2_def have xin2: "x \ set bs2" by auto define cs1 where "cs1 = ram2 # snd (splitAt ram2 vs)" then have cs1ne: "cs1 \ []" by auto define cs2 where "cs2 = fst (splitAt ram1 vs)" define cs2ram1 where "cs2ram1 = cs2 @ [ram1]" from cs1_def cs2_def bs2_def p b have bs2: "bs2 = cs1 @ cs2 @ [ram1]" by (auto simp: pre_between_def) have "x \ set cs1 \ x \ last cs1 \ is_sublist [x,y] cs1" proof- assume xin2: "x \ set cs1" and xnlcs1: "x \ last cs1" from cs1_def p have "is_sublist cs1 vs" by (simp add: pre_between_def) with p is_nextElem xnlcs1 xin2 show ?thesis apply (rule_tac is_sublist_is_nextElem) by (auto simp: pre_between_def) qed with bs2 have incs1nl: "x \ set cs1 \ x \ last cs1 \ is_sublist [x,y] bs2" apply (auto simp: is_sublist_def) apply (intro exI) apply (subgoal_tac "as @ x # y # bs @ cs2 @ [ram1] = as @ x # y # (bs @ cs2 @ [ram1])") apply assumption by auto have "x = last cs1 \ y = hd (cs2 @ [ram1])" proof - assume xl: "x = last cs1" from p have "distinct vs" by auto with p have "vs = fst (splitAt ram2 vs) @ ram2 # snd (splitAt ram2 vs)" by (auto intro: splitAt_ram) with cs1_def have "last vs = last (fst (splitAt ram2 vs) @ cs1)" by auto with cs1ne have "last vs = last cs1" by auto with xl have "x = last vs" by auto with p is_nextElem have yhd: "y = hd vs" by auto from p have "vs = fst (splitAt ram1 vs) @ ram1 # snd (splitAt ram1 vs)" by (auto intro: splitAt_ram) with yhd cs2ram1_def cs2_def have yhd2: "y = hd (cs2ram1 @ snd (splitAt ram1 vs))" by auto from cs2ram1_def have "cs2ram1 \ []" by auto then have "hd (cs2ram1 @ snd(splitAt ram1 vs)) = hd (cs2ram1)" by auto with yhd2 cs2ram1_def show ?thesis by auto qed with bs2 cs1ne have "x = last cs1 \ is_sublist [x,y] bs2" apply (auto simp: is_sublist_def) apply (intro exI) apply (subgoal_tac "cs1 @ cs2 @ [ram1] = butlast cs1 @ last cs1 # hd (cs2 @ [ram1]) # tl (cs2 @ [ram1])") apply assumption by auto with incs1nl have incs1: "x \ set cs1 \ is_sublist [x,y] bs2" by auto have "x \ set cs2 \ is_sublist [x,y] (cs2 @ [ram1])" proof- assume xin2': "x \ set cs2" then have xin2: "x \ set (cs2 @ [ram1])" by auto define fr2 where "fr2 = snd (splitAt ram1 vs)" from p have "vs = fst (splitAt ram1 vs) @ ram1 # snd (splitAt ram1 vs)" by (auto intro: splitAt_ram) with fr2_def cs2_def have "vs = cs2 @ [ram1] @ fr2" by simp with p xin2' have "x \ ram1" by (auto simp: pre_between_def) then have xnlcs2: "x \ last (cs2 @ [ram1])" by auto from cs2_def p have "is_sublist (cs2 @ [ram1]) vs" by (simp add: pre_between_def) with p is_nextElem xnlcs2 xin2 show ?thesis apply (rule_tac is_sublist_is_nextElem) by (auto simp: pre_between_def) qed with bs2 have "x \ set cs2 \ is_sublist [x,y] bs2" apply (auto simp: is_sublist_def) apply (intro exI) apply (subgoal_tac "cs1 @ as @ x # y # bs = (cs1 @ as) @ x # y # bs") apply assumption by auto with p b cs1_def cs2_def incs1 xin show ?thesis by auto qed from is_nextElem have "x \ set vs" by auto with b p have "x = ram1 \ x \ set (between vs ram1 ram2) \ x = ram2 \ x \ set (between vs ram2 ram1)" by (rule_tac between_in) auto then have "x \ set (ram1 # (between vs ram1 ram2)) \ x \ set (ram2 # (between vs ram2 ram1))" by auto with rule1 rule2 bs_def bs2_def show ?thesis by auto qed lemma is_nextElem_or: "pre_between vs ram1 ram2 \ is_nextElem vs x y \ is_sublist [x,y] (ram1 # between vs ram1 ram2 @ [ram2]) \ is_sublist [x,y] (ram2 # between vs ram2 ram1 @ [ram1])" proof (cases "before vs ram1 ram2") case True assume "pre_between vs ram1 ram2" "is_nextElem vs x y" with True show ?thesis by (rule_tac is_nextElem_or1) next assume p: "pre_between vs ram1 ram2" and is_nextElem: "is_nextElem vs x y" from p have p': "pre_between vs ram2 ram1" by (auto intro: pre_between_symI) case False with p have "before vs ram2 ram1" by auto with p' is_nextElem show ?thesis apply (drule_tac is_nextElem_or1) apply assumption+ by auto qed lemma(*<*) between_eq2: (*>*) "pre_between vs ram1 ram2 \ before vs ram2 ram1 \ \as bs cs. between vs ram1 ram2 = cs @ as \ vs = as @[ram2] @ bs @ [ram1] @ cs" apply (subgoal_tac "pre_between vs ram2 ram1") apply auto apply (intro exI conjI) apply simp apply (simp add: pre_between_def) apply auto apply (frule_tac before_vs) apply auto by (auto simp: pre_between_def) lemma is_sublist_same_len[simp]: "length xs = length ys \ is_sublist xs ys = (xs = ys)" apply(cases xs) apply simp apply(rename_tac a as) apply(cases ys) apply simp apply(rename_tac b bs) apply(case_tac "a = b") apply(subst is_sublist_rec) apply simp apply simp done lemma is_nextElem_between_empty[simp]: "distinct vs \ is_nextElem vs a b \ between vs a b = []" apply (simp add: is_nextElem_def between_def split_def) apply (cases "vs") apply simp+ apply (simp split: if_split_asm) apply (case_tac "b = aa") apply (simp add: is_sublist_def) apply (erule disjE) apply (elim exE) apply (case_tac "as") apply simp apply simp apply simp apply (case_tac "list" rule: rev_exhaust) apply simp apply simp apply simp apply (subgoal_tac "aa # list = vs") apply (thin_tac "vs = aa # list") apply simp apply (subgoal_tac "distinct vs") apply (simp add: is_sublist_def) apply (elim exE) by auto lemma is_nextElem_between_empty': "between vs a b = [] \ distinct vs \ a \ set vs \ b \ set vs \ a \ b \ is_nextElem vs a b" apply (simp add: is_nextElem_def between_def split_def split: if_split_asm) apply (case_tac vs) apply simp apply simp apply (rule conjI) apply (rule impI) apply simp apply (case_tac "aa = b") apply simp apply (rule impI) apply (case_tac "list" rule: rev_exhaust) apply simp apply simp apply (case_tac "a = y") apply simp apply simp apply (elim conjE) apply (drule split_list_first) apply (elim exE) apply simp apply (rule impI) apply (subgoal_tac "b \ aa") apply simp apply (case_tac "a = aa") apply simp apply (case_tac "list") apply simp apply simp apply (case_tac "aaa = b") apply (simp add: is_sublist_def) apply force apply simp apply simp apply (drule split_list_first) apply (elim exE) apply simp apply (case_tac "zs") apply simp apply simp apply (case_tac "aaa = b") apply simp apply (simp add: is_sublist_def) apply force apply simp apply force apply (case_tac vs) apply simp apply simp apply (rule conjI) apply (rule impI) apply simp apply (rule impI) apply (case_tac "b = aa") apply simp apply (case_tac "list" rule: rev_exhaust) apply simp apply simp apply (case_tac "a = y") apply simp apply simp apply (drule split_list_first) apply (elim exE) apply simp apply simp apply (case_tac "a = aa") by auto lemma between_nextElem: "pre_between vs u v \ between vs u (nextElem vs (hd vs) v) = between vs u v @ [v]" apply(subgoal_tac "pre_between vs v u") prefer 2 apply (clarsimp simp add:pre_between_def) apply(case_tac "before vs u v") apply(drule (1) between_eq2) apply(clarsimp simp:pre_between_def hd_append split:list.split) apply(simp add:between_def split_def) apply(fastforce simp:neq_Nil_conv) apply (simp only:before_xor) apply(drule (1) between_eq2) apply(clarsimp simp:pre_between_def hd_append split:list.split) apply (simp add: append_eq_Cons_conv) apply(fastforce simp:between_def split_def) done (******************** section split_face ********************************) lemma nextVertices_in_face[simp]: "v \ \ f \ f\<^bsup>n\<^esup> \ v \ \ f" proof - assume v: "v \ \ f" then have f: "vertices f \ []" by auto show ?thesis apply (simp add: nextVertices_def) apply (induct n) by (auto simp: f v) qed subsubsection \\is_nextElem edges\ equivalence\ lemma is_nextElem_edges1: "distinct (vertices f) \ (a,b) \ edges (f::face) \ is_nextElem (vertices f) a b" apply (simp add: edges_face_def nextVertex_def) apply (rule is_nextElem1) by auto lemma is_nextElem_edges2: "distinct (vertices f) \ is_nextElem (vertices f) a b \ (a,b) \ edges (f::face)" apply (auto simp add: edges_face_def nextVertex_def) apply (rule sym) apply (rule is_nextElem2) by (auto intro: is_nextElem_a) lemma is_nextElem_edges_eq[simp]: "distinct (vertices (f::face)) \ (a,b) \ edges f = is_nextElem (vertices f) a b" by (auto intro: is_nextElem_edges1 is_nextElem_edges2) (*********************** nextVertex *****************************) subsubsection \@{const nextVertex}\ lemma nextElem_suc2: "distinct (vertices f) \ last (vertices f) = v \ v \ set (vertices f) \ f \ v = hd (vertices f)" proof - assume dist: "distinct (vertices f)" and last: "last (vertices f) = v" and v: "v \ set (vertices f)" define ls where "ls = vertices f" have ind: "\ c. distinct ls \ last ls = v \ v \ set ls \ nextElem ls c v = c" proof (induct ls) case Nil then show ?case by auto next case (Cons m ms) then show ?case proof (cases "m = v") case True with Cons have "ms = []" apply (cases ms rule: rev_exhaust) by auto then show ?thesis by auto next case False with Cons have a1: "v \ set ms" by auto then have ms: "ms \ []" by auto with False Cons ms have "nextElem ms c v = c" apply (rule_tac Cons) by simp_all with False ms show ?thesis by simp qed qed from dist ls_def last v have "nextElem ls (hd ls) v = hd ls" apply (rule_tac ind) by auto with ls_def show ?thesis by (simp add: nextVertex_def) qed (*********************** split_face ****************************) subsection \@{const split_face}\ definition pre_split_face :: "face \ nat \ nat \ nat list \ bool" where "pre_split_face oldF ram1 ram2 newVertexList \ distinct (vertices oldF) \ distinct (newVertexList) \ \ oldF \ set newVertexList = {} \ ram1 \ \ oldF \ ram2 \ \ oldF \ ram1 \ ram2" declare pre_split_face_def [simp] lemma pre_split_face_p_between[intro]: "pre_split_face oldF ram1 ram2 newVertexList \ pre_between (vertices oldF) ram1 ram2" by (simp add: pre_between_def) lemma pre_split_face_symI: "pre_split_face oldF ram1 ram2 newVertexList \ pre_split_face oldF ram2 ram1 newVertexList" by auto lemma pre_split_face_rev[intro]: "pre_split_face oldF ram1 ram2 newVertexList \ pre_split_face oldF ram1 ram2 (rev newVertexList)" by auto lemma split_face_distinct1: "(f12, f21) = split_face oldF ram1 ram2 newVertexList \ pre_split_face oldF ram1 ram2 newVertexList \ distinct (vertices f12)" proof - assume split: "(f12, f21) = split_face oldF ram1 ram2 newVertexList" and p: "pre_split_face oldF ram1 ram2 newVertexList" define old_vs where "old_vs = vertices oldF" with p have d_old_vs: "distinct old_vs" by auto from p have p2: "pre_between (vertices oldF) ram1 ram2" by auto have rule1: "before old_vs ram1 ram2 \ distinct (vertices f12)" proof - assume b: "before old_vs ram1 ram2" with split p have "f12 = Face (rev newVertexList @ ram1 # between (vertices oldF) ram1 ram2 @ [ram2]) Nonfinal" by (simp add: split_face_def) then have h1:"vertices f12 = rev newVertexList @ ram1 # between (vertices oldF) ram1 ram2 @ [ram2]" by auto from p have d1: "distinct(ram1 # between (vertices oldF) ram1 ram2 @ [ram2])" by (auto del: notI) from b p p2 old_vs_def have d2: "set (ram1 # between (vertices oldF) ram1 ram2 @ [ram2]) \ set newVertexList = {}" by (auto dest!: splitAt_in_fst splitAt_in_snd) with h1 d1 p show ?thesis by auto qed have rule2: "before old_vs ram2 ram1 \ distinct (vertices f12)" proof - assume b: "before old_vs ram2 ram1" from p have p3: "pre_split_face oldF ram1 ram2 newVertexList" by (auto intro: pre_split_face_symI) then have p4: "pre_between (vertices oldF) ram2 ram1" by auto with split p have "f12 = Face (rev newVertexList @ ram1 # between (vertices oldF) ram1 ram2 @ [ram2]) Nonfinal" by (simp add: split_face_def) then have h1:"vertices f12 = rev newVertexList @ ram1 # between (vertices oldF) ram1 ram2 @ [ram2]" by auto from p3 have d1: "distinct(ram1 # between (vertices oldF) ram1 ram2 @ [ram2])" by (auto del: notI) from b p3 p4 old_vs_def have d2: "set (ram1 # between (vertices oldF) ram1 ram2 @ [ram2]) \ set newVertexList = {}" by auto with h1 d1 p show ?thesis by auto qed from p2 rule1 rule2 old_vs_def show ?thesis by auto qed lemma split_face_distinct1'[intro]: "pre_split_face oldF ram1 ram2 newVertexList \ distinct (vertices (fst(split_face oldF ram1 ram2 newVertexList)))" apply (rule_tac split_face_distinct1) by (auto simp del: pre_split_face_def simp: split_face_def) lemma split_face_distinct2: "(f12, f21) = split_face oldF ram1 ram2 newVertexList \ pre_split_face oldF ram1 ram2 newVertexList \ distinct (vertices f21)" proof - assume split: "(f12, f21) = split_face oldF ram1 ram2 newVertexList" and p: "pre_split_face oldF ram1 ram2 newVertexList" define old_vs where "old_vs = vertices oldF" with p have d_old_vs: "distinct old_vs" by auto with p have p1: "pre_split_face oldF ram1 ram2 newVertexList" by auto from p have p2: "pre_between (vertices oldF) ram1 ram2" by auto have rule1: "before old_vs ram1 ram2 \ distinct (vertices f21)" proof - assume b: "before old_vs ram1 ram2" with split p have "f21 = Face (ram2 # between (vertices oldF) ram2 ram1 @ [ram1] @ newVertexList) Nonfinal" by (simp add: split_face_def) then have h1:"vertices f21 = ram2 # between (vertices oldF) ram2 ram1 @ [ram1] @ newVertexList" by auto from p have d1: "distinct(ram1 # between (vertices oldF) ram2 ram1 @ [ram2])" by (auto del: notI) from b p1 p2 old_vs_def have d2: "set (ram2 # between (vertices oldF) ram2 ram1 @ [ram1]) \ set newVertexList = {}" by auto with h1 d1 p1 show ?thesis by auto qed have rule2: "before old_vs ram2 ram1 \ distinct (vertices f21)" proof - assume b: "before old_vs ram2 ram1" from p have p3: "pre_split_face oldF ram1 ram2 newVertexList" by (auto intro: pre_split_face_symI) then have p4: "pre_between (vertices oldF) ram2 ram1" by auto with split p have "f21 = Face (ram2 # between (vertices oldF) ram2 ram1 @ [ram1] @ newVertexList) Nonfinal" by (simp add: split_face_def) then have h1:"vertices f21 = ram2 # between (vertices oldF) ram2 ram1 @ [ram1] @ newVertexList" by auto from p3 have d1: "distinct(ram2 # between (vertices oldF) ram2 ram1 @ [ram1])" by (auto del: notI) from b p3 p4 old_vs_def have d2: "set (ram2 # between (vertices oldF) ram2 ram1 @ [ram1]) \ set newVertexList = {}" by auto with h1 d1 p1 show ?thesis by auto qed from p2 rule1 rule2 old_vs_def show ?thesis by auto qed lemma split_face_distinct2'[intro]: "pre_split_face oldF ram1 ram2 newVertexList \ distinct (vertices (snd(split_face oldF ram1 ram2 newVertexList)))" apply (rule_tac split_face_distinct2) by (auto simp del: pre_split_face_def simp: split_face_def) declare pre_split_face_def [simp del] lemma split_face_edges_or: "(f12, f21) = split_face oldF ram1 ram2 newVertexList \ pre_split_face oldF ram1 ram2 newVertexList \ (a, b) \ edges oldF \ (a,b) \ edges f12 \ (a,b) \ edges f21" proof - assume nf: "(f12, f21) = split_face oldF ram1 ram2 newVertexList" and p: "pre_split_face oldF ram1 ram2 newVertexList" and old:"(a, b) \ edges oldF" from p have d1:"distinct (vertices oldF)" by auto from nf p have d2: "distinct (vertices f12)" by (auto dest: pairD) from nf p have d3: "distinct (vertices f21)" by (auto dest: pairD) from p have p': "pre_between (vertices oldF) ram1 ram2" by auto from old d1 have is_nextElem: "is_nextElem (vertices oldF) a b" by simp with p have "is_sublist [a,b] (ram1 # (between (vertices oldF) ram1 ram2) @ [ram2]) \ is_sublist [a,b] (ram2 # (between (vertices oldF) ram2 ram1) @ [ram1])" apply (rule_tac is_nextElem_or) by auto then have "is_nextElem (rev newVertexList @ ram1 # between (vertices oldF) ram1 ram2 @ [ram2]) a b \ is_nextElem (ram2 # between (vertices oldF) ram2 ram1 @ ram1 # newVertexList) a b" proof (cases "is_sublist [a,b] (ram1 # (between (vertices oldF) ram1 ram2) @ [ram2])") case True then show ?thesis by (auto dest: is_sublist_add intro!: is_nextElem_sublistI) next case False assume "is_sublist [a,b] (ram1 # (between (vertices oldF) ram1 ram2) @ [ram2]) \ is_sublist [a,b] (ram2 # (between (vertices oldF) ram2 ram1) @ [ram1])" with False have "is_sublist [a,b] (ram2 # (between (vertices oldF) ram2 ram1) @ [ram1])" by auto then show ?thesis apply (rule_tac disjI2) apply (rule_tac is_nextElem_sublistI) apply (subgoal_tac "is_sublist [a, b] ([] @ (ram2 # between (vertices oldF) ram2 ram1 @ [ram1]) @ newVertexList)") apply force by (frule is_sublist_add) qed with nf d1 d2 d3 show ?thesis by (simp add: split_face_def) qed subsection \\verticesFrom\\ definition verticesFrom :: "face \ vertex \ vertex list" where "verticesFrom f \ rotate_to (vertices f)" lemmas verticesFrom_Def = verticesFrom_def rotate_to_def lemma len_vFrom[simp]: "v \ \ f \ |verticesFrom f v| = |vertices f|" apply(drule split_list_first) apply(clarsimp simp: verticesFrom_Def) done lemma verticesFrom_empty[simp]: "v \ \ f \ (verticesFrom f v = []) = (vertices f = [])" by(clarsimp simp: verticesFrom_Def) lemma verticesFrom_congs: "v \ \ f \ (vertices f) \ (verticesFrom f v)" by(simp add:verticesFrom_def cong_rotate_to) lemma verticesFrom_eq_if_vertices_cong: "\distinct(vertices f); distinct(vertices f'); vertices f \ vertices f'; x \ \ f \ \ verticesFrom f x = verticesFrom f' x" by(clarsimp simp:congs_def verticesFrom_Def splitAt_rotate_pair_conv) lemma verticesFrom_in[intro]: "v \ \ f \ a \ \ f \ a \ set (verticesFrom f v)" by (auto dest: verticesFrom_congs congs_pres_nodes) lemma verticesFrom_in': "a \ set (verticesFrom f v) \ a \ v \ a \ \ f" apply (cases "v \ \ f") apply (auto dest: verticesFrom_congs congs_pres_nodes) by (simp add: verticesFrom_Def) lemma set_verticesFrom: "v \ \ f \ set (verticesFrom f v) = \ f" apply(auto) apply (auto simp add: verticesFrom_Def) done lemma verticesFrom_hd: "hd (verticesFrom f v) = v" by (simp add: verticesFrom_Def) lemma verticesFrom_distinct[simp]: "distinct (vertices f) \ v \ \ f \ distinct (verticesFrom f v)" apply (frule_tac verticesFrom_congs) by (auto simp: congs_distinct) lemma verticesFrom_nextElem_eq: "distinct (vertices f) \ v \ \ f \ u \ \ f \ nextElem (verticesFrom f v) (hd (verticesFrom f v)) u = nextElem (vertices f) (hd (vertices f)) u" apply (subgoal_tac "(verticesFrom f v) \ (vertices f)") apply (rule nextElem_congs_eq) apply (auto simp: verticesFrom_congs congs_pres_nodes) apply (rule congs_sym) by (simp add: verticesFrom_congs) lemma nextElem_vFrom_suc1: "distinct (vertices f) \ v \ \ f \ i < length (vertices f) \ last (verticesFrom f v) \ u \ (verticesFrom f v)!i = u \ f \ u = (verticesFrom f v)!(Suc i)" proof - assume dist: "distinct (vertices f)" and ith: "(verticesFrom f v)!i = u" and small_i: "i < length (vertices f)" and notlast: "last (verticesFrom f v) \ u" and v: "v \ \ f" hence eq: "(vertices f) \ (verticesFrom f v)" by (auto simp: verticesFrom_congs) from ith eq small_i have "u \ set (verticesFrom f v)" by (auto simp: congs_length) with eq have u: "u \ \ f" by (auto simp: congs_pres_nodes) define ls where "ls = verticesFrom f v" with dist ith have "ls!i = u" by auto have ind: "\ c i. i < length ls \ distinct ls \ last ls \ u \ ls!i = u \ u \ set ls \ nextElem ls c u = ls ! Suc i" proof (induct ls) case Nil then show ?case by auto next case (Cons m ms) then show ?case proof (cases "m = u") case True with Cons have "u \ set ms" by auto with Cons True have i: "i = 0" apply (induct i) by auto with Cons show ?thesis apply (simp split: if_split_asm) apply (cases ms) by simp_all next case False with Cons have a1: "u \ set ms" by auto then have ms: "ms \ []" by auto from False Cons have i: "0 < i" by auto define i' where "i' = i - 1" with i have i': "i = Suc i'" by auto with False Cons i' ms have "nextElem ms c u = ms ! Suc i'" apply (rule_tac Cons) by simp_all with False Cons i' ms show ?thesis by simp qed qed from eq dist ith ls_def small_i notlast v have "nextElem ls (hd ls) u = ls ! Suc i" apply (rule_tac ind) by (auto simp: congs_length) with dist u v ls_def show ?thesis by (simp add: nextVertex_def verticesFrom_nextElem_eq) qed lemma verticesFrom_nth: "distinct (vertices f) \ d < length (vertices f) \ v \ \ f \ (verticesFrom f v)!d = f\<^bsup>d\<^esup> \ v" proof (induct d) case 0 then show ?case by (simp add: verticesFrom_Def nextVertices_def) next case (Suc n) then have dist2: "distinct (verticesFrom f v)" apply (frule_tac verticesFrom_congs) by (auto simp: congs_distinct) from Suc have in2: "v \ set (verticesFrom f v)" apply (frule_tac verticesFrom_congs) by (auto dest: congs_pres_nodes) then have vFrom: "(verticesFrom f v) = butlast (verticesFrom f v) @ [last (verticesFrom f v)]" apply (cases "(verticesFrom f v)" rule: rev_exhaust) by auto from Suc show ?case proof (cases "last (verticesFrom f v) = f\<^bsup>n\<^esup> \ v") case True with Suc have "verticesFrom f v ! n = f\<^bsup>n\<^esup> \ v" by (rule_tac Suc) auto with True have "last (verticesFrom f v) = verticesFrom f v ! n" by auto with Suc dist2 in2 have "Suc n = length (verticesFrom f v)" apply (rule_tac nth_last_Suc_n) by auto with Suc show ?thesis by auto next case False with Suc show ?thesis apply (simp add: nextVertices_def) apply (rule sym) apply (rule_tac nextElem_vFrom_suc1) by simp_all qed qed lemma verticesFrom_length: "distinct (vertices f) \ v \ set (vertices f) \ length (verticesFrom f v) = length (vertices f)" by (auto intro: congs_length verticesFrom_congs) lemma verticesFrom_between: "v' \ \ f \ pre_between (vertices f) u v \ between (vertices f) u v = between (verticesFrom f v') u v" by (auto intro!: between_congs verticesFrom_congs) lemma verticesFrom_is_nextElem: "v \ \ f \ is_nextElem (vertices f) a b = is_nextElem (verticesFrom f v) a b" apply (rule is_nextElem_congs_eq) by (rule verticesFrom_congs) lemma verticesFrom_is_nextElem_last: "v' \ \ f \ distinct (vertices f) \ is_nextElem (verticesFrom f v') (last (verticesFrom f v')) v \ v = v'" apply (subgoal_tac "distinct (verticesFrom f v')") apply (subgoal_tac "last (verticesFrom f v') \ set (verticesFrom f v')") apply (simp add: nextElem_is_nextElem) apply (simp add: verticesFrom_hd) apply (cases "(verticesFrom f v')" rule: rev_exhaust) apply (simp add: verticesFrom_Def) by auto lemma verticesFrom_is_nextElem_hd: "v' \ \ f \ distinct (vertices f) \ is_nextElem (verticesFrom f v') u v' \ u = last (verticesFrom f v')" apply (subgoal_tac "distinct (verticesFrom f v')") apply (thin_tac "distinct (vertices f)") apply (auto simp: is_nextElem_def) apply (drule is_sublist_y_hd) apply (simp add: verticesFrom_hd) by auto lemma verticesFrom_pres_nodes1: "v \ \ f \ \ f = set(verticesFrom f v)" proof - assume "v \ \ f" then have "fst (splitAt v (vertices f)) @ [v] @ snd (splitAt v (vertices f)) = vertices f" apply (drule_tac splitAt_ram) by simp moreover have "set (fst (splitAt v (vertices f)) @ [v] @ snd (splitAt v (vertices f))) = set (verticesFrom f v)" by (auto simp: verticesFrom_Def) ultimately show ?thesis by simp qed lemma verticesFrom_pres_nodes: "v \ \ f \ w \ \ f \ w \ set (verticesFrom f v)" by (auto dest: verticesFrom_pres_nodes1) lemma before_verticesFrom: "distinct (vertices f) \ v \ \ f \ w \ \ f \ v \ w \ before (verticesFrom f v) v w" proof - assume v: "v \ \ f" and w: "w \ \ f" and neq: "v \ w" have "hd (verticesFrom f v) = v" by (rule verticesFrom_hd) with v have vf:"verticesFrom f v = v # tl (verticesFrom f v)" apply (frule_tac verticesFrom_pres_nodes1) apply (cases "verticesFrom f v") by auto moreover with v w have "w \ set (verticesFrom f v)" by (auto simp: verticesFrom_pres_nodes) ultimately have "w \ set (v # tl (verticesFrom f v))" by auto with neq have "w \ set (tl (verticesFrom f v))" by auto with vf have "verticesFrom f v = [] @ v # fst (splitAt w (tl (verticesFrom f v))) @ w # snd (splitAt w (tl (verticesFrom f v)))" by (auto dest: splitAt_ram) then show ?thesis apply (unfold before_def) by (intro exI) qed lemma last_vFrom: "\ distinct(vertices f); x \ \ f \ \ last(verticesFrom f x) = f\<^bsup>-1\<^esup> \ x" apply(frule split_list) apply(clarsimp simp:prevVertex_def verticesFrom_Def split:list.split) done lemma rotate_before_vFrom: "\ distinct(vertices f); r \ \ f; r\u \ \ before (verticesFrom f r) u v \ before (verticesFrom f v) r u" using [[linarith_neq_limit = 1]] apply(frule split_list) apply(clarsimp simp:verticesFrom_Def) apply(rename_tac as bs) apply(clarsimp simp:before_def) apply(rename_tac xs ys zs) apply(subst (asm) Cons_eq_append_conv) apply clarsimp apply(rename_tac bs') apply(subst (asm) append_eq_append_conv2) apply clarsimp apply(rename_tac as') apply(erule disjE) defer apply clarsimp apply(rule_tac x = "v#zs" in exI) apply(rule_tac x = "bs@as'" in exI) apply simp apply clarsimp apply(subst (asm) append_eq_Cons_conv) apply(erule disjE) apply clarsimp apply(rule_tac x = "v#zs" in exI) apply simp apply blast apply clarsimp apply(rename_tac ys') apply(subst (asm) append_eq_append_conv2) apply clarsimp apply(rename_tac vs') apply(erule disjE) apply clarsimp apply(subst (asm) append_eq_Cons_conv) apply(erule disjE) apply clarsimp apply(rule_tac x = "v#zs" in exI) apply simp apply blast apply clarsimp apply(rule_tac x = "v#ys'@as" in exI) apply simp apply blast apply clarsimp apply(rule_tac x = "v#zs" in exI) apply simp apply blast done lemma before_between: "\ before(verticesFrom f x) y z; distinct(vertices f); x \ \ f; x \ y \ \ y \ set(between (vertices f) x z)" apply(induct f) apply(clarsimp simp:verticesFrom_Def before_def) apply(frule split_list) apply(clarsimp simp:Cons_eq_append_conv) apply(subst (asm) append_eq_append_conv2) apply clarsimp apply(erule disjE) apply(clarsimp simp:append_eq_Cons_conv) prefer 2 apply(clarsimp simp add:between_def split_def) apply(erule disjE) apply (clarsimp simp:between_def split_def) apply clarsimp apply(subst (asm) append_eq_append_conv2) apply clarsimp apply(erule disjE) prefer 2 apply(clarsimp simp add:between_def split_def) apply(clarsimp simp:append_eq_Cons_conv) apply(fastforce simp:between_def split_def) done lemma before_between2: "\ before (verticesFrom f u) v w; distinct(vertices f); u \ \ f \ \ u = v \ u \ set (between (vertices f) w v)" apply(subgoal_tac "pre_between (vertices f) v w") apply(subst verticesFrom_between) apply assumption apply(erule pre_between_symI) apply(frule pre_between_r1) apply(drule (1) verticesFrom_distinct) using verticesFrom_hd[of f u] apply(clarsimp simp add:before_def between_def split_def hd_append split:if_split_asm) apply(frule (1) verticesFrom_distinct) apply(clarsimp simp:pre_between_def before_def simp del:verticesFrom_distinct) apply(rule conjI) apply(cases "v = u") apply simp apply(rule verticesFrom_in'[of v f u])apply simp apply assumption apply(cases "w = u") apply simp apply(rule verticesFrom_in'[of w f u])apply simp apply assumption done (************************** splitFace ********************************) subsection \@{const splitFace}\ definition pre_splitFace :: "graph \ vertex \ vertex \ face \ vertex list \ bool" where "pre_splitFace g ram1 ram2 oldF nvs \ oldF \ \ g \ \ final oldF \ distinct (vertices oldF) \ distinct nvs \ \ g \ set nvs = {} \ \ oldF \ set nvs = {} \ ram1 \ \ oldF \ ram2 \ \ oldF \ ram1 \ ram2 \ (((ram1,ram2) \ edges oldF \ (ram2,ram1) \ edges oldF \ (ram1, ram2) \ edges g \ (ram2, ram1) \ edges g) \ nvs \ [])" declare pre_splitFace_def [simp] lemma pre_splitFace_pre_split_face[simp]: "pre_splitFace g ram1 ram2 oldF nvs \ pre_split_face oldF ram1 ram2 nvs" by (simp add: pre_splitFace_def pre_split_face_def) lemma pre_splitFace_oldF[simp]: "pre_splitFace g ram1 ram2 oldF nvs \ oldF \ \ g" apply (unfold pre_splitFace_def) by force declare pre_splitFace_def [simp del] lemma splitFace_split_face: "oldF \ \ g \ (f\<^sub>1, f\<^sub>2, newGraph) = splitFace g ram\<^sub>1 ram\<^sub>2 oldF newVs \ (f\<^sub>1, f\<^sub>2) = split_face oldF ram\<^sub>1 ram\<^sub>2 newVs" by (simp add: splitFace_def split_def) (* split_face *) lemma split_face_empty_ram2_ram1_in_f12: "pre_split_face oldF ram1 ram2 [] \ (f12, f21) = split_face oldF ram1 ram2 [] \ (ram2, ram1) \ edges f12" proof - assume split: "(f12, f21) = split_face oldF ram1 ram2 []" "pre_split_face oldF ram1 ram2 []" then have "ram2 \ \ f12" by (simp add: split_face_def) moreover with split have "f12 \ ram2 = hd (vertices f12)" apply (rule_tac nextElem_suc2) apply (simp add: pre_split_face_def split_face_distinct1) by (simp add: split_face_def) with split have "ram1 = f12 \ ram2" by (simp add: split_face_def) ultimately show ?thesis by (simp add: edges_face_def) qed lemma split_face_empty_ram2_ram1_in_f12': "pre_split_face oldF ram1 ram2 [] \ (ram2, ram1) \ edges (fst (split_face oldF ram1 ram2 []))" proof - assume split: "pre_split_face oldF ram1 ram2 []" define f12 where "f12 = fst (split_face oldF ram1 ram2 [])" define f21 where "f21 = snd (split_face oldF ram1 ram2 [])" then have "(f12, f21) = split_face oldF ram1 ram2 []" by (simp add: f12_def f21_def) with split have "(ram2, ram1) \ edges f12" by (rule split_face_empty_ram2_ram1_in_f12) with f12_def show ?thesis by simp qed lemma splitFace_empty_ram2_ram1_in_f12: "pre_splitFace g ram1 ram2 oldF [] \ (f12, f21, newGraph) = splitFace g ram1 ram2 oldF [] \ (ram2, ram1) \ edges f12" proof - assume pre: "pre_splitFace g ram1 ram2 oldF []" then have oldF: "oldF \ \ g" by (unfold pre_splitFace_def) simp assume sp: "(f12, f21, newGraph) = splitFace g ram1 ram2 oldF []" with oldF have "(f12, f21) = split_face oldF ram1 ram2 []" by (rule splitFace_split_face) with pre sp show ?thesis apply (unfold splitFace_def pre_splitFace_def) apply (simp add: split_def) apply (rule split_face_empty_ram2_ram1_in_f12') apply (rule pre_splitFace_pre_split_face) apply (rule pre) done qed lemma splitFace_f12_new_vertices: "(f12, f21, newGraph) = splitFace g ram1 ram2 oldF newVs \ v \ set newVs \ v \ \ f12" apply (unfold splitFace_def) apply (simp add: split_def) apply (unfold split_face_def Let_def) by simp lemma splitFace_add_vertices_direct[simp]: "vertices (snd (snd (splitFace g ram1 ram2 oldF [countVertices g ..< countVertices g + n]))) = vertices g @ [countVertices g ..< countVertices g + n]" apply (auto simp: splitFace_def split_def) apply (cases g) apply auto apply (induct n) by auto lemma splitFace_delete_oldF: " (f12, f21, newGraph) = splitFace g ram1 ram2 oldF newVertexList \ oldF \ f12 \ oldF \ f21 \ distinct (faces g) \ oldF \ \ newGraph" by (simp add: splitFace_def split_def distinct_set_replace) lemma splitFace_faces_1: "(f12, f21, newGraph) = splitFace g ram1 ram2 oldF newVertexList \ oldF \ \ g \ set (faces newGraph) \ {oldF} = {f12, f21} \ set (faces g)" (is "?oldF \ ?C \ ?A = ?B") proof (intro equalityI subsetI) fix x assume "x \ ?A" and "?C" and "?oldF" then show "x \ ?B" apply (simp add: splitFace_def split_def) by (auto dest: replace1) next fix x assume "x \ ?B" and "?C" and "?oldF" then show "x \ ?A" apply (simp add: splitFace_def split_def) apply (cases "x = oldF") apply simp_all apply (cases "x = f12") apply simp_all apply (cases "x = f21") by (auto intro: replace3 replace4) qed lemma splitFace_distinct1[intro]:"pre_splitFace g ram1 ram2 oldF newVertexList \ distinct (vertices (fst (snd (splitFace g ram1 ram2 oldF newVertexList))))" apply (unfold splitFace_def split_def) by (auto simp add: split_def) lemma splitFace_distinct2[intro]:"pre_splitFace g ram1 ram2 oldF newVertexList \ distinct (vertices (fst (splitFace g ram1 ram2 oldF newVertexList)))" apply (unfold splitFace_def split_def) by (auto simp add: split_def) lemma splitFace_add_f21': "f' \ \ g' \ fst (snd (splitFace g' v a f' nvl)) \ \ (snd (snd (splitFace g' v a f' nvl)))" apply (simp add: splitFace_def split_def) apply (rule disjI2) apply (rule replace3) by simp_all lemma split_face_help[simp]: "Suc 0 < |vertices (fst (split_face f' v a nvl))|" by (simp add: split_face_def) lemma split_face_help'[simp]: "Suc 0 < |vertices (snd (split_face f' v a nvl))|" by (simp add: split_face_def) lemma splitFace_split: "f \ \ (snd (snd (splitFace g v a f' nvl))) \ f \ \ g \ f = fst (splitFace g v a f' nvl) \ f = (fst (snd (splitFace g v a f' nvl)))" apply (simp add: splitFace_def split_def) apply safe by (frule replace5) simp lemma pre_FaceDiv_between1: "pre_splitFace g' ram1 ram2 f [] \ \ between (vertices f) ram1 ram2 = []" proof - assume pre_f: "pre_splitFace g' ram1 ram2 f []" then have pre_bet: "pre_between (vertices f) ram1 ram2" apply (unfold pre_splitFace_def) by (simp add: pre_between_def) then have pre_bet': "pre_between (verticesFrom f ram1) ram1 ram2" by (simp add: pre_between_def set_verticesFrom) from pre_f have dist': "distinct (verticesFrom f ram1)" apply (unfold pre_splitFace_def) by simp from pre_f have ram2: "ram2 \ \ f" apply (unfold pre_splitFace_def) by simp from pre_f have "\ is_nextElem (vertices f) ram1 ram2" apply (unfold pre_splitFace_def) by auto with pre_f have "\ is_nextElem (verticesFrom f ram1) ram1 ram2" apply (unfold pre_splitFace_def) by (simp add: verticesFrom_is_nextElem [symmetric]) moreover from pre_f have "ram2 \ set (verticesFrom f ram1)" apply (unfold pre_splitFace_def) by auto moreover from pre_f have "ram2 \ ram1" apply (unfold pre_splitFace_def) by auto ultimately have ram2_not: "ram2 \ hd (snd (splitAt ram1 (vertices f)) @ fst (splitAt ram1 (vertices f)))" apply (simp add: is_nextElem_def verticesFrom_Def) apply (cases "snd (splitAt ram1 (vertices f)) @ fst (splitAt ram1 (vertices f))") apply simp apply simp apply (simp add: is_sublist_def) by auto from pre_f have before: "before (verticesFrom f ram1) ram1 ram2" apply (unfold pre_splitFace_def) apply safe apply (rule before_verticesFrom) by auto have "fst (splitAt ram2 (snd (splitAt ram1 (verticesFrom f ram1)))) = [] \ False" proof - assume "fst (splitAt ram2 (snd (splitAt ram1 (verticesFrom f ram1)))) = []" moreover from ram2 pre_f have "ram2 \ set (verticesFrom f ram1)"apply (unfold pre_splitFace_def) by auto with pre_f have "ram2 \ set (snd (splitAt ram1 (vertices f)) @ fst (splitAt ram1 (vertices f)))" apply (simp add: verticesFrom_Def) apply (unfold pre_splitFace_def) by auto moreover note dist' ultimately have "ram2 = hd (snd (splitAt ram1 (vertices f)) @ fst (splitAt ram1 (vertices f)))" apply (rule_tac ccontr) apply (cases "(snd (splitAt ram1 (vertices f)) @ fst (splitAt ram1 (vertices f)))") apply simp apply simp by (simp add: verticesFrom_Def del: distinct_append) with ram2_not show ?thesis by auto qed with before pre_bet' have "between (verticesFrom f ram1) ram1 ram2 \ []" by auto with pre_f pre_bet show ?thesis apply (unfold pre_splitFace_def) apply safe by (simp add: verticesFrom_between) qed lemma pre_FaceDiv_between2: "pre_splitFace g' ram1 ram2 f [] \ \ between (vertices f) ram2 ram1 = []" proof - assume pre_f: "pre_splitFace g' ram1 ram2 f []" then have "pre_splitFace g' ram2 ram1 f []" apply (unfold pre_splitFace_def) by auto then show ?thesis by (rule pre_FaceDiv_between1) qed (********************** Edges *******************) definition Edges :: "vertex list \ (vertex \ vertex) set" where "Edges vs \ {(a,b). is_sublist [a,b] vs}" lemma Edges_Nil[simp]: "Edges [] = {}" by(simp add:Edges_def is_sublist_def) lemma Edges_rev: "Edges (rev (zs::vertex list)) = {(b,a). (a,b) \ Edges zs}" by (auto simp add: Edges_def is_sublist_rev) lemma in_Edges_rev[simp]: "((a,b) : Edges (rev (zs::vertex list))) = ((b,a) \ Edges zs)" by (simp add: Edges_rev) lemma notinset_notinEdge1: "x \ set xs \ (x,y) \ Edges xs" by(unfold Edges_def)(blast intro:is_sublist_in) lemma notinset_notinEdge2: "y \ set xs \ (x,y) \ Edges xs" by(unfold Edges_def)(blast intro:is_sublist_in1) lemma in_Edges_in_set: "(x,y) : Edges vs \ x \ set vs \ y \ set vs" by(unfold Edges_def)(blast intro:is_sublist_in is_sublist_in1) lemma edges_conv_Edges: "distinct(vertices(f::face)) \ \ f = Edges (vertices f) \ (if vertices f = [] then {} else {(last(vertices f), hd(vertices f))})" by(induct f) (auto simp: Edges_def is_nextElem_def) lemma Edges_Cons: "Edges(x#xs) = (if xs = [] then {} else Edges xs \ {(x,hd xs)})" apply(auto simp:Edges_def) apply(rule ccontr) apply(simp) apply(erule thin_rl) apply(erule contrapos_np) apply(clarsimp simp:is_sublist_def Cons_eq_append_conv) apply(rename_tac as bs) apply(erule disjE) apply simp apply(clarsimp) apply(rename_tac cs) apply(rule_tac x = cs in exI) apply(rule_tac x = bs in exI) apply(rule HOL.refl) apply(clarsimp simp:neq_Nil_conv) apply(subst is_sublist_rec) apply simp apply(simp add:is_sublist_def) apply(erule exE)+ apply(rename_tac as bs) apply simp apply(rule_tac x = "x#as" in exI) apply(rule_tac x = bs in exI) apply simp done lemma Edges_append: "Edges(xs @ ys) = (if xs = [] then Edges ys else if ys = [] then Edges xs else Edges xs \ Edges ys \ {(last xs, hd ys)})" apply(induct xs) apply simp apply (simp add:Edges_Cons) apply blast done lemma Edges_rev_disj: "distinct xs \ Edges(rev xs) \ Edges(xs) = {}" apply(induct xs) apply simp apply(auto simp:Edges_Cons Edges_append last_rev notinset_notinEdge1 notinset_notinEdge2) done lemma disj_sets_disj_Edges: "set xs \ set ys = {} \ Edges xs \ Edges ys = {}" by(unfold Edges_def)(blast intro:is_sublist_in is_sublist_in1) lemma disj_sets_disj_Edges2: "set ys \ set xs = {} \ Edges xs \ Edges ys = {}" by(blast intro!:disj_sets_disj_Edges) lemma finite_Edges[iff]: "finite(Edges xs)" by(induct xs)(simp_all add:Edges_Cons) lemma Edges_compl: "\ distinct vs; x \ set vs; y \ set vs; x \ y \ \ Edges(x # between vs x y @ [y]) \ Edges(y # between vs y x @ [x]) = {}" using [[linarith_neq_limit = 1]] apply(subgoal_tac "\xs (ys::vertex list). xs \ [] \ set xs \ set ys = {} \ hd xs \ set ys") prefer 2 apply(drule hd_in_set) apply(blast) apply(frule split_list[of x]) apply clarsimp apply(erule disjE) apply(frule split_list[of y]) apply(clarsimp simp add:between_def split_def) apply (clarsimp simp add:Edges_Cons Edges_append notinset_notinEdge1 notinset_notinEdge2 disj_sets_disj_Edges disj_sets_disj_Edges2 Int_Un_distrib Int_Un_distrib2) apply(fastforce) apply(frule split_list[of y]) apply(clarsimp simp add:between_def split_def) apply (clarsimp simp add:Edges_Cons Edges_append notinset_notinEdge1 notinset_notinEdge2 disj_sets_disj_Edges disj_sets_disj_Edges2 Int_Un_distrib Int_Un_distrib2) apply fastforce done lemma Edges_disj: "\ distinct vs; x \ set vs; z \ set vs; x \ y; y \ z; y \ set(between vs x z) \ \ Edges(x # between vs x y @ [y]) \ Edges(y # between vs y z @ [z]) = {}" apply(subgoal_tac "\xs (ys::vertex list). xs \ [] \ set xs \ set ys = {} \ hd xs \ set ys") prefer 2 apply(drule hd_in_set) apply(blast) apply(frule split_list[of x]) apply clarsimp apply(erule disjE) apply simp apply(drule inbetween_inset) apply(rule Edges_compl) apply simp apply simp apply simp apply simp apply(erule disjE) apply(frule split_list[of z]) apply(clarsimp simp add:between_def split_def) apply(erule disjE) apply(frule split_list[of y]) apply clarsimp apply (clarsimp simp add:Edges_Cons Edges_append notinset_notinEdge1 notinset_notinEdge2 disj_sets_disj_Edges disj_sets_disj_Edges2 Int_Un_distrib Int_Un_distrib2) apply fastforce apply(frule split_list[of y]) apply clarsimp apply (clarsimp simp add:Edges_Cons Edges_append notinset_notinEdge1 notinset_notinEdge2 disj_sets_disj_Edges disj_sets_disj_Edges2 Int_Un_distrib Int_Un_distrib2) apply fastforce apply(frule split_list[of z]) apply(clarsimp simp add:between_def split_def) apply(frule split_list[of y]) apply clarsimp apply (clarsimp simp add:Edges_Cons Edges_append notinset_notinEdge1 notinset_notinEdge2 disj_sets_disj_Edges disj_sets_disj_Edges2 Int_Un_distrib Int_Un_distrib2) apply fastforce done lemma edges_conv_Un_Edges: "\ distinct(vertices(f::face)); x \ \ f; y \ \ f; x \ y \ \ \ f = Edges(x # between (vertices f) x y @ [y]) \ Edges(y # between (vertices f) y x @ [x])" apply(simp add:edges_conv_Edges) apply(rule conjI) apply clarsimp apply clarsimp apply(frule split_list[of x]) apply clarsimp apply(erule disjE) apply(frule split_list[of y]) apply(clarsimp simp add:between_def split_def) apply (clarsimp simp add:Edges_Cons Edges_append notinset_notinEdge1 notinset_notinEdge2 disj_sets_disj_Edges disj_sets_disj_Edges2 Int_Un_distrib Int_Un_distrib2) apply(fastforce) apply(frule split_list[of y]) apply(clarsimp simp add:between_def split_def) apply (clarsimp simp add:Edges_Cons Edges_append notinset_notinEdge1 notinset_notinEdge2 disj_sets_disj_Edges disj_sets_disj_Edges2 Int_Un_distrib Int_Un_distrib2) apply(fastforce) done lemma Edges_between_edges: "\ (a,b) \ Edges (u # between (vertices(f::face)) u v @ [v]); pre_split_face f u v vs \ \ (a,b) \ \ f" apply(simp add:pre_split_face_def) apply(induct f) apply(simp add:edges_conv_Edges Edges_Cons) apply clarify apply(rename_tac list) apply(case_tac "between list u v = []") apply simp apply(drule (4) is_nextElem_between_empty') apply(simp add:Edges_def) apply(subgoal_tac "pre_between list u v") prefer 2 apply (simp add:pre_between_def) apply(subgoal_tac "pre_between list v u") prefer 2 apply (simp add:pre_between_def) apply(case_tac "before list u v") apply(drule (1) between_eq2) apply clarsimp apply(erule disjE) apply (clarsimp simp:neq_Nil_conv) apply(rule is_nextElem_sublistI) apply(simp (no_asm) add:is_sublist_def) apply blast apply(rule is_nextElem_sublistI) apply(clarsimp simp add:Edges_def is_sublist_def) apply(rename_tac as bs cs xs ys) apply(rule_tac x = "as @ u # xs" in exI) apply(rule_tac x = "ys @ cs" in exI) apply simp apply (simp only:before_xor) apply(drule (1) between_eq2) apply clarsimp apply(rename_tac as bs cs) apply(erule disjE) apply (clarsimp simp:neq_Nil_conv) apply(case_tac cs) apply clarsimp apply(simp add:is_nextElem_def) apply simp apply(rule is_nextElem_sublistI) apply(simp (no_asm) add:is_sublist_def) apply(rule_tac x = "as @ v # bs" in exI) apply simp apply(rule_tac m1 = "|as|+1" in is_nextElem_rotate_eq[THEN iffD1]) apply simp apply(simp add:rotate_drop_take) apply(rule is_nextElem_sublistI) apply(clarsimp simp add:Edges_def is_sublist_def) apply(rename_tac xs ys) apply(rule_tac x = "bs @ u # xs" in exI) apply simp done (******************** split_face_edges ********************************) (* split_face *) lemma edges_split_face1: "pre_split_face f u v vs \ \(fst(split_face f u v vs)) = Edges(v # rev vs @ [u]) \ Edges(u # between (vertices f) u v @ [v])" apply(simp add: edges_conv_Edges split_face_distinct1') apply(auto simp add:split_face_def Edges_Cons Edges_append) done lemma edges_split_face2: "pre_split_face f u v vs \ \(snd(split_face f u v vs)) = Edges(u # vs @ [v]) \ Edges(v # between (vertices f) v u @ [u])" apply(simp add: edges_conv_Edges split_face_distinct2') apply(auto simp add:split_face_def Edges_Cons Edges_append) done lemma split_face_empty_ram1_ram2_in_f21: "pre_split_face oldF ram1 ram2 [] \ (f12, f21) = split_face oldF ram1 ram2 [] \ (ram1, ram2) \ edges f21" proof - assume split: "(f12, f21) = split_face oldF ram1 ram2 []" "pre_split_face oldF ram1 ram2 []" then have "ram1 \ \ f21" by (simp add: split_face_def) moreover with split have "f21 \ ram1 = hd (vertices f21)" apply (rule_tac nextElem_suc2) apply (simp add: pre_split_face_def split_face_distinct2) by (simp add: split_face_def) with split have "ram2 = f21 \ ram1" by (simp add: split_face_def) ultimately show ?thesis by (simp add: edges_face_def) qed lemma split_face_empty_ram1_ram2_in_f21': "pre_split_face oldF ram1 ram2 [] \ (ram1, ram2) \ edges (snd (split_face oldF ram1 ram2 []))" proof - assume split: "pre_split_face oldF ram1 ram2 []" define f12 where "f12 = fst (split_face oldF ram1 ram2 [])" define f21 where "f21 = snd (split_face oldF ram1 ram2 [])" then have "(f12, f21) = split_face oldF ram1 ram2 []" by (simp add: f12_def f21_def) with split have "(ram1, ram2) \ edges f21" by (rule split_face_empty_ram1_ram2_in_f21) with f21_def show ?thesis by simp qed lemma splitFace_empty_ram1_ram2_in_f21: "pre_splitFace g ram1 ram2 oldF [] \ (f12, f21, newGraph) = splitFace g ram1 ram2 oldF [] \ (ram1, ram2) \ edges f21" proof - assume pre: "pre_splitFace g ram1 ram2 oldF []" then have oldF: "oldF \ \ g" by (unfold pre_splitFace_def) simp assume sp: "(f12, f21, newGraph) = splitFace g ram1 ram2 oldF []" with oldF have "(f12, f21) = split_face oldF ram1 ram2 []" by (rule splitFace_split_face) with pre sp show ?thesis apply (unfold splitFace_def pre_splitFace_def) apply (simp add: split_def) apply (rule split_face_empty_ram1_ram2_in_f21') apply (rule pre_splitFace_pre_split_face) apply (rule pre) done qed lemma splitFace_f21_new_vertices: "(f12, f21, newGraph) = splitFace g ram1 ram2 oldF newVs \ v \ set newVs \ v \ \ f21" apply (unfold splitFace_def) apply (simp add: split_def) apply (unfold split_face_def) by simp lemma split_face_edges_f12: assumes vors: "pre_split_face f ram1 ram2 vs" "(f12, f21) = split_face f ram1 ram2 vs" "vs \ []" "vs1 = between (vertices f) ram1 ram2" "vs1 \ []" shows "edges f12 = {(hd vs, ram1) , (ram1, hd vs1), (last vs1, ram2), (ram2, last vs)} \ Edges(rev vs) \ Edges vs1" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f12: "distinct (vertices f12)" apply (rule_tac split_face_distinct1) by auto from x vors show "x \ ?rhs" apply (simp add: split_face_def is_nextElem_def is_sublist_def dist_f12) apply (case_tac "c = ram2 \ d = last vs") apply simp apply simp apply (elim exE) apply (case_tac "c = ram1") apply simp apply (subgoal_tac "between (vertices f) ram1 ram2 @ [ram2] = d # bs") apply (case_tac "between (vertices f) ram1 ram2") apply simp apply simp apply (rule dist_at2) apply (rule dist_f12) apply (rule sym) apply simp apply simp (* c \ ram1 *) apply (case_tac "c \ set(rev vs)") apply (subgoal_tac "distinct(rev vs)") apply (simp only: in_set_conv_decomp) apply (elim exE) apply simp apply (case_tac "zs") apply simp apply (subgoal_tac "ys = as") apply(drule last_rev) apply (simp) apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp apply (simp add:rev_swap) apply (subgoal_tac "ys = as") apply (clarsimp simp add: Edges_def is_sublist_def) apply (rule conjI) apply (subgoal_tac "\as bs. rev list @ [d, c] = as @ d # c # bs") apply simp apply (intro exI) apply simp apply (subgoal_tac "\asa bs. rev list @ d # c # rev as = asa @ d # c # bs") apply simp apply (intro exI) apply simp apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp apply (simp add: pre_split_face_def) (* c \ set vs *) apply (case_tac "c \ set (between (vertices f) ram1 ram2)") apply (subgoal_tac "distinct (between (vertices f) ram1 ram2)") apply (simp add: in_set_conv_decomp) apply (elim exE) apply simp apply (case_tac zs) apply simp apply (subgoal_tac "rev vs @ ram1 # ys = as") apply force apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp apply simp apply (subgoal_tac "rev vs @ ram1 # ys = as") apply (simp add: Edges_def is_sublist_def) apply (subgoal_tac "(rev vs @ ram1 # ys) @ c # a # list @ [ram2] = as @ c # d # bs") apply simp apply (rule conjI) apply (rule impI) apply (rule disjI2)+ apply (rule exI) apply force apply (rule impI) apply (rule disjI2)+ apply force apply force apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp apply (thin_tac "rev vs @ ram1 # between (vertices f) ram1 ram2 @ [ram2] = as @ c # d # bs") apply (subgoal_tac "distinct (vertices f12)") apply simp apply (rule dist_f12) (* c \ set (between (vertices f) ram1 ram2) *) apply (subgoal_tac "c = ram2") apply simp apply (subgoal_tac "rev vs @ ram1 # between (vertices f) ram1 ram2 = as") apply force apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp (* c \ ram2 *) apply (subgoal_tac "c \ set (rev vs @ ram1 # between (vertices f) ram1 ram2 @ [ram2])") apply (thin_tac "rev vs @ ram1 # between (vertices f) ram1 ram2 @ [ram2] = as @ c # d # bs") apply simp by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f12: "distinct (vertices f12)" apply (rule_tac split_face_distinct1) by auto from x vors show "x \ ?lhs" apply (simp add: dist_f12 is_nextElem_def is_sublist_def) apply (simp add: split_face_def) apply (case_tac "c = ram2 \ d = last vs") apply simp apply (rule disjCI) apply simp apply (case_tac "c = hd vs \ d = ram1") apply (case_tac "vs") apply simp apply force apply simp apply (case_tac "c = ram1 \ d = hd (between (vertices f) ram1 ram2)") apply (case_tac "between (vertices f) ram1 ram2") apply simp apply force apply simp apply (case_tac "c = last (between (vertices f) ram1 ram2) \ d = ram2") apply (case_tac "between (vertices f) ram1 ram2" rule: rev_exhaust) apply simp apply simp apply (intro exI) apply (subgoal_tac "rev vs @ ram1 # ys @ [y, ram2] = (rev vs @ ram1 # ys) @ y # ram2 # []") apply assumption apply simp apply simp apply (case_tac "(d,c) \ Edges vs") apply (simp add: Edges_def is_sublist_def) apply (elim exE) apply simp apply (intro exI) apply simp apply (simp add: Edges_def is_sublist_def) apply (elim exE) apply simp apply (intro exI) apply (subgoal_tac "rev vs @ ram1 # as @ c # d # bs @ [ram2] = (rev vs @ ram1 # as) @ c # d # bs @ [ram2]") apply assumption by simp qed lemma split_face_edges_f12_vs: assumes vors: "pre_split_face f ram1 ram2 []" "(f12, f21) = split_face f ram1 ram2 []" "vs1 = between (vertices f) ram1 ram2" "vs1 \ []" shows "edges f12 = {(ram2, ram1) , (ram1, hd vs1), (last vs1, ram2)} \ Edges vs1" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f12: "distinct (vertices f12)" apply (rule_tac split_face_distinct1) by auto from x vors show "x \ ?rhs" apply (simp add: split_face_def is_nextElem_def is_sublist_def dist_f12) apply (case_tac " c = ram2 \ d = ram1") apply simp apply simp apply (elim exE) apply (case_tac "c = ram1") apply simp apply (subgoal_tac "as = []") apply simp apply (case_tac "between (vertices f) ram1 ram2") apply simp apply simp apply (rule dist_at1) apply (rule dist_f12) apply force apply (rule sym) apply simp (* a \ ram1 *) apply (case_tac "c \ set (between (vertices f) ram1 ram2)") apply (subgoal_tac "distinct (between (vertices f) ram1 ram2)") apply (simp add: in_set_conv_decomp) apply (elim exE) apply simp apply (case_tac zs) apply simp apply (subgoal_tac "ram1 # ys = as") apply force apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp apply simp apply (subgoal_tac "ram1 # ys = as") apply (simp add: Edges_def is_sublist_def) apply (subgoal_tac "(ram1 # ys) @ c # a # list @ [ram2] = as @ c # d # bs") apply simp apply (rule conjI) apply (rule impI) apply (rule disjI2)+ apply (rule exI) apply force apply (rule impI) apply (rule disjI2)+ apply force apply force apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp apply (thin_tac "ram1 # between (vertices f) ram1 ram2 @ [ram2] = as @ c # d # bs") apply (subgoal_tac "distinct (vertices f12)") apply simp apply (rule dist_f12) (* c \ set (between (vertices f) ram1 ram2) *) apply (subgoal_tac "c = ram2") apply simp apply (subgoal_tac "ram1 # between (vertices f) ram1 ram2 = as") apply force apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp (* c \ ram2 *) apply (subgoal_tac "c \ set (ram1 # between (vertices f) ram1 ram2 @ [ram2])") apply (thin_tac "ram1 # between (vertices f) ram1 ram2 @ [ram2] = as @ c # d # bs") apply simp by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f12: "distinct (vertices f12)" apply (rule_tac split_face_distinct1) by auto from x vors show "x \ ?lhs" apply (simp add: dist_f12 is_nextElem_def is_sublist_def) apply (simp add: split_face_def) apply (case_tac "c = ram2 \ d = ram1") apply simp apply (rule disjCI) apply simp apply (case_tac "c = ram1 \ d = hd (between (vertices f) ram1 ram2)") apply (case_tac "between (vertices f) ram1 ram2") apply simp apply force apply simp apply (case_tac "c = last (between (vertices f) ram1 ram2) \ d = ram2") apply (case_tac "between (vertices f) ram1 ram2" rule: rev_exhaust) apply simp apply simp apply (intro exI) apply (subgoal_tac "ram1 # ys @ [y, ram2] = (ram1 # ys) @ y # ram2 # []") apply assumption apply simp apply simp apply (case_tac "(c, d) \ Edges vs") apply (simp add: Edges_def is_sublist_def) apply (elim exE) apply simp apply (intro exI) apply (subgoal_tac "ram1 # as @ c # d # bs @ [ram2] = (ram1 # as) @ c # d # (bs @ [ram2])") apply assumption apply simp apply (simp add: Edges_def is_sublist_def) apply (elim exE) apply simp apply (intro exI) apply (subgoal_tac "ram1 # as @ c # d # bs @ [ram2] = (ram1 # as) @ c # d # bs @ [ram2]") apply assumption by simp qed lemma split_face_edges_f12_bet: assumes vors: "pre_split_face f ram1 ram2 vs" "(f12, f21) = split_face f ram1 ram2 vs" "vs \ []" "between (vertices f) ram1 ram2 = []" shows "edges f12 = {(hd vs, ram1) , (ram1, ram2), (ram2, last vs)} \ Edges(rev vs)" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f12: "distinct (vertices f12)" apply (rule_tac split_face_distinct1) by auto from x vors show "x \ ?rhs" apply (simp add: split_face_def is_nextElem_def is_sublist_def dist_f12) apply (case_tac " c = ram2 \ d = last vs") apply simp apply simp apply (elim exE) apply (case_tac "c = ram1") apply simp apply (subgoal_tac "rev vs = as") apply simp apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp (* a \ ram1 *) apply (case_tac "c \ set(rev vs)") apply (subgoal_tac "distinct(rev vs)") apply (simp only: in_set_conv_decomp) apply (elim exE) apply simp apply (case_tac "zs") apply simp apply (subgoal_tac "ys = as") apply (simp add:rev_swap) apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp apply simp apply (subgoal_tac "ys = as") apply (simp add: Edges_def is_sublist_def rev_swap) apply (rule conjI) apply (subgoal_tac "\as bs. rev list @ [d, c] = as @ d # c # bs") apply simp apply (intro exI) apply simp apply (subgoal_tac "\asa bs. rev list @ d # c # rev as = asa @ d # c # bs") apply simp apply (intro exI) apply simp apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp apply (simp add: pre_split_face_def) (* c \ set vs *) apply (subgoal_tac "c = ram2") apply simp apply (subgoal_tac "rev vs @ [ram1] = as") apply force apply (rule dist_at1) apply (rule dist_f12) apply (rule sym) apply simp apply simp (* c \ ram2 *) apply (subgoal_tac "c \ set (rev vs @ ram1 # [ram2])") apply (thin_tac "rev vs @ ram1 # [ram2] = as @ c # d # bs") apply simp by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f12: "distinct (vertices f12)" apply (rule_tac split_face_distinct1) by auto from x vors show "x \ ?lhs" apply (simp add: dist_f12 is_nextElem_def is_sublist_def) apply (simp add: split_face_def) apply (case_tac "c = hd vs \ d = ram1") apply (case_tac "vs") apply simp apply force apply simp apply (case_tac "c = ram1 \ d = ram2") apply force apply simp apply (case_tac "c = ram2 \ d = last vs") apply (case_tac "vs" rule:rev_exhaust) apply simp apply simp apply (simp add: Edges_def is_sublist_def) apply (elim exE) apply simp apply (rule conjI) apply (rule impI) apply (rule disjI1) apply (intro exI) apply (subgoal_tac "c # d # rev as @ [ram1, ram2] = [] @ c # d # rev as @ [ram1,ram2]") apply assumption apply simp apply (rule impI) apply (rule disjI1) apply (intro exI) by simp qed lemma split_face_edges_f12_bet_vs: assumes vors: "pre_split_face f ram1 ram2 []" "(f12, f21) = split_face f ram1 ram2 []" "between (vertices f) ram1 ram2 = []" shows "edges f12 = {(ram2, ram1) , (ram1, ram2)}" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f12: "distinct (vertices f12)" apply (rule_tac split_face_distinct1) by auto from x vors show "x \ ?rhs" apply (simp add: split_face_def is_nextElem_def is_sublist_def dist_f12) apply (case_tac " c = ram2 \ d = ram1") apply force apply simp apply (elim exE) apply (case_tac "c = ram1") apply simp apply (case_tac "as") apply simp apply (case_tac "list") apply simp apply simp apply (case_tac "as") apply simp apply (case_tac "list") apply simp by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f12: "distinct (vertices f12)" apply (rule_tac split_face_distinct1) by auto from x vors show "x \ ?lhs" apply (simp add: dist_f12 is_nextElem_def is_sublist_def) apply (simp add: split_face_def) by auto qed lemma split_face_edges_f12_subset: "pre_split_face f ram1 ram2 vs \ (f12, f21) = split_face f ram1 ram2 vs \ vs \ [] \ {(hd vs, ram1), (ram2, last vs)} \ Edges(rev vs) \ edges f12" apply (case_tac "between (vertices f) ram1 ram2") apply (frule split_face_edges_f12_bet) apply simp apply simp apply simp apply force apply (frule split_face_edges_f12) apply simp+ by force (*declare in_Edges_rev [simp del] rev_eq_Cons_iff [simp del]*) lemma split_face_edges_f21: assumes vors: "pre_split_face f ram1 ram2 vs" "(f12, f21) = split_face f ram1 ram2 vs" "vs \ []" "vs2 = between (vertices f) ram2 ram1" "vs2 \ []" shows "edges f21 = {(last vs2, ram1) , (ram1, hd vs), (last vs, ram2), (ram2, hd vs2)} \ Edges vs \ Edges vs2" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f21: "distinct (vertices f21)" apply (rule_tac split_face_distinct2) by auto from x vors show "x \ ?rhs" apply (simp add: split_face_def is_nextElem_def is_sublist_def dist_f21) apply (case_tac " c = last vs \ d = ram2") apply simp apply simp apply (elim exE) apply (case_tac "c = ram1") apply simp apply (subgoal_tac "ram2 # between (vertices f) ram2 ram1 = as") apply clarsimp apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp (* a \ ram1 *) apply (case_tac "c \ set vs") apply (subgoal_tac "distinct vs") apply (simp add: in_set_conv_decomp) apply (elim exE) apply simp apply (case_tac "zs") apply simp apply (subgoal_tac "ram2 # between (vertices f) ram2 ram1 @ ram1 # ys = as") apply force apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp apply simp apply (subgoal_tac "ram2 # between (vertices f) ram2 ram1 @ ram1 # ys = as") apply (subgoal_tac "(ram2 # between (vertices f) ram2 ram1 @ ram1 # ys) @ c # a # list = as @ c # d # bs") apply (thin_tac "ram2 # between (vertices f) ram2 ram1 @ ram1 # ys @ c # a # list = as @ c # d # bs") apply (simp add: Edges_def is_sublist_def) apply(rule conjI) apply (subgoal_tac "\as bs. ys @ [c, d] = as @ c # d # bs") apply simp apply force apply force apply force apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp apply (simp add: pre_split_face_def) (* c \ set (rev vs) *) apply (case_tac "c \ set (between (vertices f) ram2 ram1)") apply (subgoal_tac "distinct (between (vertices f) ram2 ram1)") apply (simp add: in_set_conv_decomp) apply (elim exE) apply simp apply (case_tac zs) apply simp apply (subgoal_tac "ram2 # ys = as") apply force apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp apply simp apply (subgoal_tac "ram2 # ys = as") apply (simp add: Edges_def is_sublist_def) apply (subgoal_tac "(ram2 # ys) @ c # a # list @ ram1 # vs = as @ c # d # bs") apply (thin_tac "ram2 # ys @ c # a # list @ ram1 # vs = as @ c # d # bs") apply (rule conjI) apply (rule impI) apply (rule disjI2)+ apply force apply (rule impI) apply (rule disjI2)+ apply force apply force apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp apply (subgoal_tac "distinct (vertices f21)") apply (thin_tac "ram2 # between (vertices f) ram2 ram1 @ ram1 # vs = as @ c # d # bs") apply simp apply (rule dist_f21) (* c \ set (between (vertices f) ram2 ram1) *) apply (subgoal_tac "c = ram2") apply simp apply (subgoal_tac "[] = as") apply simp apply (case_tac "between (vertices f) ram2 ram1") apply simp apply simp apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp (* c \ ram2 *) apply (subgoal_tac "c \ set (ram2 # between (vertices f) ram2 ram1 @ ram1 # vs)") apply (thin_tac "ram2 # between (vertices f) ram2 ram1 @ ram1 # vs = as @ c # d # bs") apply simp by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f21: "distinct (vertices f21)" apply (rule_tac split_face_distinct2) by auto from x vors show "x \ ?lhs" apply (simp add: dist_f21 is_nextElem_def is_sublist_def) apply (simp add: split_face_def) apply (case_tac "c = ram2 \ d = hd (between (vertices f) ram2 ram1)") apply simp apply (rule disjI1) apply (intro exI) apply (subgoal_tac "ram2 # between (vertices f) ram2 ram1 @ ram1 # vs = [] @ ram2 # hd (between (vertices f) ram2 ram1) # tl (between (vertices f) ram2 ram1) @ ram1 # vs") apply assumption apply simp apply (case_tac "c = ram1 \ d = hd vs") apply (rule disjI1) apply (case_tac "vs") apply simp apply simp apply (intro exI) apply (subgoal_tac "ram2 # between (vertices f) ram2 ram1 @ ram1 # a # list = (ram2 # between (vertices f) ram2 ram1) @ ram1 # a # list") apply assumption apply simp apply (case_tac "c = last vs \ d = ram2") apply (case_tac vs rule:rev_exhaust) apply simp apply simp apply simp apply (case_tac "c = last (between (vertices f) ram2 ram1) \ d = ram1") apply (rule disjI1) apply (case_tac "between (vertices f) ram2 ram1" rule: rev_exhaust) apply simp apply (intro exI) apply simp apply (subgoal_tac "ram2 # ys @ y # ram1 # vs = (ram2 # ys) @ y # ram1 # vs") apply assumption apply simp apply simp apply (case_tac "(c, d) \ Edges vs") apply (simp add: Edges_def is_sublist_def) apply (elim exE) apply simp apply (rule conjI) apply (rule impI) apply (rule disjI1) apply (intro exI) apply (subgoal_tac "ram2 # between (vertices f) ram2 ram1 @ ram1 # as @ [c, d] = (ram2 # between (vertices f) ram2 ram1 @ ram1 # as) @ c # d # []") apply assumption apply simp apply (rule impI) apply (rule disjI1) apply (intro exI) apply (subgoal_tac "ram2 # between (vertices f) ram2 ram1 @ ram1 # as @ c # d # bs = (ram2 # between (vertices f) ram2 ram1 @ ram1 # as) @ c # d # bs") apply assumption apply simp apply (simp add: Edges_def is_sublist_def) apply (elim exE) apply simp apply (rule disjI1) apply (intro exI) apply (subgoal_tac "ram2 # as @ c # d # bs @ ram1 # vs = (ram2 # as) @ c # d # (bs @ ram1 # vs)") apply assumption by simp qed lemma split_face_edges_f21_vs: assumes vors: "pre_split_face f ram1 ram2 []" "(f12, f21) = split_face f ram1 ram2 []" "vs2 = between (vertices f) ram2 ram1" "vs2 \ []" shows "edges f21 = {(last vs2, ram1) , (ram1, ram2), (ram2, hd vs2)} \ Edges vs2" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f21: "distinct (vertices f21)" apply (rule_tac split_face_distinct2) by auto from x vors show "x \ ?rhs" apply (simp add: split_face_def is_nextElem_def is_sublist_def dist_f21) apply (case_tac " c = ram1 \ d = ram2") apply simp apply simp apply (elim exE) apply (case_tac "c = ram1") apply simp apply (subgoal_tac "ram2 # between (vertices f) ram2 ram1 = as") apply (subgoal_tac "(ram2 # between (vertices f) ram2 ram1) @ [ram1] = as @ ram1 # d # bs") apply (thin_tac "ram2 # between (vertices f) ram2 ram1 @ [ram1] = as @ ram1 # d # bs") apply simp apply force apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp (* a \ ram1 *) apply (case_tac "c \ set (between (vertices f) ram2 ram1)") apply (subgoal_tac "distinct (between (vertices f) ram2 ram1)") apply (simp add: in_set_conv_decomp) apply (elim exE) apply simp apply (case_tac zs) apply simp apply (subgoal_tac "ram2 # ys = as") apply force apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp apply simp apply (subgoal_tac "ram2 # ys = as") apply (simp add: Edges_def is_sublist_def) apply (subgoal_tac "(ram2 # ys) @ c # a # list @ [ram1] = as @ c # d # bs") apply (thin_tac "ram2 # ys @ c # a # list @ [ram1] = as @ c # d # bs") apply (rule conjI) apply (rule impI) apply (rule disjI2)+ apply force apply (rule impI) apply (rule disjI2)+ apply force apply force apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp apply (subgoal_tac "distinct (vertices f21)") apply (thin_tac "ram2 # between (vertices f) ram2 ram1 @ [ram1] = as @ c # d # bs") apply simp apply (rule dist_f21) (* c \ set (between (vertices f) ram2 ram1) *) apply (subgoal_tac "c = ram2") apply simp apply (subgoal_tac "[] = as") apply simp apply (case_tac "between (vertices f) ram2 ram1") apply simp apply simp apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp (* c \ ram2 *) apply (subgoal_tac "c \ set (ram2 # between (vertices f) ram2 ram1 @ [ram1])") apply (thin_tac "ram2 # between (vertices f) ram2 ram1 @ [ram1] = as @ c # d # bs") apply simp by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f21: "distinct (vertices f21)" apply (rule_tac split_face_distinct2) by auto from x vors show "x \ ?lhs" apply (simp add: dist_f21 is_nextElem_def is_sublist_def) apply (simp add: split_face_def) apply (case_tac "c = ram2 \ d = hd (between (vertices f) ram2 ram1)") apply simp apply (rule disjI1) apply (intro exI) apply (subgoal_tac "ram2 # between (vertices f) ram2 ram1 @ [ram1] = [] @ ram2 # hd (between (vertices f) ram2 ram1) # tl (between (vertices f) ram2 ram1) @ [ram1]") apply assumption apply simp apply (case_tac "c = ram1 \ d = ram2") apply (rule disjI2) apply simp apply simp apply (case_tac "c = last (between (vertices f) ram2 ram1) \ d = ram1") apply (rule disjI1) apply (case_tac "between (vertices f) ram2 ram1" rule: rev_exhaust) apply simp apply (intro exI) apply simp apply (subgoal_tac "ram2 # ys @ y # [ram1] = (ram2 # ys) @ y # [ram1]") apply assumption apply simp apply simp apply (simp add: Edges_def is_sublist_def) apply (elim exE) apply simp apply (rule disjI1) apply (intro exI) apply (subgoal_tac "ram2 # as @ c # d # bs @ [ram1] = (ram2 # as) @ c # d # (bs @ [ram1])") apply assumption by simp qed lemma split_face_edges_f21_bet: assumes vors: "pre_split_face f ram1 ram2 vs" "(f12, f21) = split_face f ram1 ram2 vs" "vs \ []" "between (vertices f) ram2 ram1 = []" shows "edges f21 = {(ram1, hd vs), (last vs, ram2), (ram2, ram1)} \ Edges vs" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f21: "distinct (vertices f21)" apply (rule_tac split_face_distinct2) by auto from x vors show "x \ ?rhs" apply (simp add: split_face_def is_nextElem_def is_sublist_def dist_f21) apply (case_tac " c = last vs \ d = ram2") apply simp apply simp apply (elim exE) apply (case_tac "c = ram1") apply simp apply (subgoal_tac "[ram2] = as") apply clarsimp apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp (* a \ ram1 *) apply (case_tac "c \ set vs") apply (subgoal_tac "distinct vs") apply (simp add: in_set_conv_decomp) apply (elim exE) apply simp apply (case_tac "zs") apply simp apply (subgoal_tac "ram2 # ram1 # ys = as") apply force apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp apply simp apply (subgoal_tac "ram2 # ram1 # ys = as") apply (subgoal_tac "(ram2 # ram1 # ys) @ c # a # list = as @ c # d # bs") apply (thin_tac "ram2 # ram1 # ys @ c # a # list = as @ c # d # bs") apply (simp add: Edges_def is_sublist_def) apply force apply force apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp apply (simp add: pre_split_face_def) (* c \ set (rev vs) *) apply (subgoal_tac "c = ram2") apply simp apply (subgoal_tac "[] = as") apply simp apply (rule dist_at1) apply (rule dist_f21) apply (rule sym) apply simp apply simp (* c \ ram2 *) apply (subgoal_tac "c \ set (ram2 # ram1 # vs)") apply (thin_tac "ram2 # ram1 # vs = as @ c # d # bs") apply simp by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f21: "distinct (vertices f21)" apply (rule_tac split_face_distinct2) by auto from x vors show "x \ ?lhs" apply (simp add: dist_f21 is_nextElem_def is_sublist_def) apply (simp add: split_face_def) apply (case_tac "c = ram2 \ d = ram1") apply simp apply (rule disjI1) apply force apply (case_tac "c = ram1 \ d = hd vs") apply (rule disjI1) apply (case_tac "vs") apply simp apply simp apply (intro exI) apply (subgoal_tac "ram2 # ram1 # a # list = [ram2] @ ram1 # a # list") apply assumption apply simp apply (case_tac "c = last vs \ d = ram2") apply (case_tac vs rule: rev_exhaust) apply simp apply simp apply (simp add: Edges_def is_sublist_def) apply (elim exE) apply simp apply (rule conjI) apply (rule impI) apply (rule disjI1) apply (intro exI) apply (subgoal_tac "ram2 # ram1 # as @ [c, d] = (ram2 # ram1 # as) @ c # d # []") apply assumption apply simp apply (rule impI) apply (rule disjI1) apply (intro exI) apply (subgoal_tac "ram2 # ram1 # as @ c # d # bs = (ram2 # ram1 # as) @ c # d # bs") apply assumption by simp qed lemma split_face_edges_f21_bet_vs: assumes vors: "pre_split_face f ram1 ram2 []" "(f12, f21) = split_face f ram1 ram2 []" "between (vertices f) ram2 ram1 = []" shows "edges f21 = {(ram1, ram2), (ram2, ram1)}" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f21: "distinct (vertices f21)" apply (rule_tac split_face_distinct2) by auto from x vors show "x \ ?rhs" apply (simp add: split_face_def is_nextElem_def is_sublist_def dist_f21) apply (case_tac " c = ram1 \ d = ram2") apply simp apply simp apply (elim exE) apply (case_tac "as") apply simp apply (case_tac "list") by auto next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f21: "distinct (vertices f21)" apply (rule_tac split_face_distinct2) by auto from x vors show "x \ ?lhs" apply (simp add: dist_f21 is_nextElem_def is_sublist_def) apply (simp add: split_face_def) by auto qed lemma split_face_edges_f21_subset: "pre_split_face f ram1 ram2 vs \ (f12, f21) = split_face f ram1 ram2 vs \ vs \ [] \ {(last vs, ram2), (ram1, hd vs)} \ Edges vs \ edges f21" apply (case_tac "between (vertices f) ram2 ram1") apply (frule split_face_edges_f21_bet) apply simp apply simp apply simp apply force apply (frule split_face_edges_f21) apply simp+ by force lemma verticesFrom_ram1: "pre_split_face f ram1 ram2 vs \ verticesFrom f ram1 = ram1 # between (vertices f) ram1 ram2 @ ram2 # between (vertices f) ram2 ram1" apply (subgoal_tac "pre_between (vertices f) ram1 ram2") apply (subgoal_tac "distinct (vertices f)") apply (case_tac "before (vertices f) ram1 ram2") apply (simp add: verticesFrom_Def) apply (subgoal_tac "ram2 \ set (snd (splitAt ram1 (vertices f)))") apply (drule splitAt_ram) apply (subgoal_tac "snd (splitAt ram2 (snd (splitAt ram1 (vertices f)))) = snd (splitAt ram2 (vertices f))") apply simp apply (thin_tac "snd (splitAt ram1 (vertices f)) = fst (splitAt ram2 (snd (splitAt ram1 (vertices f)))) @ ram2 # snd (splitAt ram2 (snd (splitAt ram1 (vertices f))))") apply simp apply (rule before_dist_r2) apply simp apply simp apply (subgoal_tac "before (vertices f) ram2 ram1") apply (subgoal_tac "pre_between (vertices f) ram2 ram1") apply (simp add: verticesFrom_Def) apply (subgoal_tac "ram2 \ set (fst (splitAt ram1 (vertices f)))") apply (drule splitAt_ram) apply (subgoal_tac "fst (splitAt ram2 (fst (splitAt ram1 (vertices f)))) = fst (splitAt ram2 (vertices f))") apply simp apply (thin_tac "fst (splitAt ram1 (vertices f)) = fst (splitAt ram2 (fst (splitAt ram1 (vertices f)))) @ ram2 # snd (splitAt ram2 (fst (splitAt ram1 (vertices f))))") apply simp apply (rule before_dist_r1) apply simp apply simp apply (simp add: pre_between_def) apply force apply (simp add: pre_split_face_def) by (rule pre_split_face_p_between) lemma split_face_edges_f_vs1_vs2: assumes vors: "pre_split_face f ram1 ram2 vs" "between (vertices f) ram1 ram2 = []" "between (vertices f) ram2 ram1 = []" shows "edges f = {(ram2, ram1) , (ram1, ram2)}" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f: "distinct (vertices f)" by (simp add: pre_split_face_def) from x vors show "x \ ?rhs" apply (simp add: dist_f) apply (subgoal_tac "pre_between (vertices f) ram1 ram2") apply (drule is_nextElem_or) apply assumption apply (simp add: Edges_def) apply (case_tac "is_sublist [c, d] [ram1, ram2]") apply (simp) apply (simp) apply blast by (rule pre_split_face_p_between) next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f: "distinct (vertices f)" by (simp add: pre_split_face_def) from x vors show "x \ ?lhs" apply (simp add: dist_f) apply (subgoal_tac "ram1 \ \ f") apply (simp add: verticesFrom_is_nextElem verticesFrom_ram1) apply (simp add: is_nextElem_def) apply blast by (simp add: pre_split_face_def) qed lemma split_face_edges_f_vs1: assumes vors: "pre_split_face f ram1 ram2 vs" "between (vertices f) ram1 ram2 = []" "vs2 = between (vertices f) ram2 ram1" "vs2 \ []" shows "edges f = {(last vs2, ram1) , (ram1, ram2), (ram2, hd vs2)} \ Edges vs2" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f: "distinct (vertices f)" by (simp add: pre_split_face_def) from vors have dist_vs2: "distinct (ram2 # vs2 @ [ram1])" apply (simp only:) apply (rule between_distinct_r12) apply (rule dist_f) apply (rule not_sym) by (simp add: pre_split_face_def) from x vors show "x \ ?rhs" apply (simp add: dist_f) apply (subgoal_tac "pre_between (vertices f) ram1 ram2") apply (drule is_nextElem_or) apply assumption apply (simp add: Edges_def) apply (case_tac "is_sublist [c, d] [ram1, ram2]") apply simp apply simp apply(erule disjE) apply blast apply (case_tac "c = ram2") apply (case_tac "between (vertices f) ram2 ram1") apply simp apply simp apply (drule is_sublist_distinct_prefix) apply (subgoal_tac "distinct (ram2 # vs2 @ [ram1])") apply simp apply (rule dist_vs2) apply simp apply (case_tac "c = ram1") apply (subgoal_tac "\ is_sublist [c, d] (ram2 # vs2 @ [ram1])") apply simp apply (rule is_sublist_notlast) apply (rule_tac dist_vs2) apply simp apply simp apply (simp add: is_sublist_def) apply (elim exE) apply (case_tac "between (vertices f) ram2 ram1" rule: rev_exhaust) apply simp apply simp apply (case_tac "bs" rule: rev_exhaust) apply simp apply simp apply (rule disjI2) apply (intro exI) apply simp apply (rule pre_split_face_p_between) by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f: "distinct (vertices f)" by (simp add: pre_split_face_def) from vors have dist_vs2: "distinct (ram2 # vs2 @ [ram1])" apply (simp only:) apply (rule between_distinct_r12) apply (rule dist_f) apply (rule not_sym) by (simp add: pre_split_face_def) from x vors show "x \ ?lhs" apply (simp add: dist_f) apply (subgoal_tac "ram1 \ set (vertices f)") apply (simp add: verticesFrom_is_nextElem verticesFrom_ram1) apply (simp add: is_nextElem_def) apply (case_tac "c = last (between (vertices f) ram2 ram1) \ d = ram1") apply simp apply simp apply (rule disjI1) apply (case_tac "c = ram1 \ d = ram2") apply (simp add: is_sublist_def) apply force apply simp apply (case_tac "c = ram2 \ d = hd (between (vertices f) ram2 ram1)") apply (case_tac "between (vertices f) ram2 ram1") apply simp apply (simp add: is_sublist_def) apply (intro exI) apply (subgoal_tac "ram1 # ram2 # a # list = [ram1] @ ram2 # a # (list)") apply assumption apply simp apply simp apply (subgoal_tac "is_sublist [c, d] ((ram1 # [ram2]) @ between (vertices f) ram2 ram1 @ [])") apply simp apply (rule is_sublist_add) apply (simp add: Edges_def) by (simp add: pre_split_face_def) qed lemma split_face_edges_f_vs2: assumes vors: "pre_split_face f ram1 ram2 vs" "vs1 = between (vertices f) ram1 ram2" "vs1 \ []" "between (vertices f) ram2 ram1 = []" shows "edges f = {(ram2, ram1) , (ram1, hd vs1), (last vs1, ram2)} \ Edges vs1" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f: "distinct (vertices f)" by (simp add: pre_split_face_def) from vors have dist_vs1: "distinct (ram1 # vs1 @ [ram2])" apply (simp only:) apply (rule between_distinct_r12) apply (rule dist_f) by (simp add: pre_split_face_def) from x vors show "x \ ?rhs" apply (simp add: dist_f) apply (subgoal_tac "pre_between (vertices f) ram1 ram2") apply (drule is_nextElem_or) apply assumption apply (simp add: Edges_def) apply (case_tac "is_sublist [c, d] (ram1 # between (vertices f) ram1 ram2 @ [ram2])") apply simp apply (case_tac "c = ram1") apply (case_tac "between (vertices f) ram1 ram2") apply simp apply simp apply (drule is_sublist_distinct_prefix) apply (subgoal_tac "distinct (ram1 # vs1 @ [ram2])") apply simp apply (rule dist_vs1) apply simp apply (case_tac "c = ram2") apply (subgoal_tac "\ is_sublist [c, d] (ram1 # vs1 @ [ram2])") apply simp apply (rule is_sublist_notlast) apply (rule_tac dist_vs1) apply simp apply simp apply (simp add: is_sublist_def) apply (elim exE) apply (case_tac "between (vertices f) ram1 ram2" rule: rev_exhaust) apply simp apply simp apply (case_tac "bs" rule: rev_exhaust) apply simp apply simp apply (rule disjI2) apply (intro exI) apply simp apply simp apply (rule pre_split_face_p_between) by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f: "distinct (vertices f)" by (simp add: pre_split_face_def) from vors have dist_vs1: "distinct (ram1 # vs1 @ [ram2])" apply (simp only:) apply (rule between_distinct_r12) apply (rule dist_f) by (simp add: pre_split_face_def) from x vors show "x \ ?lhs" apply (simp add: dist_f) apply (subgoal_tac "ram1 \ \ f") apply (simp add: verticesFrom_is_nextElem verticesFrom_ram1) apply (simp add: is_nextElem_def) apply (case_tac "c = ram2 \ d = ram1") apply simp apply simp apply (rule disjI1) apply (case_tac "c = ram1 \ d = hd (between (vertices f) ram1 ram2)") apply (case_tac "between (vertices f) ram1 ram2") apply simp apply (force simp: is_sublist_def) apply simp apply (case_tac "c = last (between (vertices f) ram1 ram2) \ d = ram2") apply (case_tac "between (vertices f) ram1 ram2" rule: rev_exhaust) apply simp apply (simp add: is_sublist_def) apply (intro exI) apply (subgoal_tac "ram1 # ys @ [y, ram2] = (ram1 # ys) @ y # ram2 # []") apply assumption apply simp apply simp apply (simp add: Edges_def) apply (subgoal_tac "is_sublist [c, d] ([ram1] @ between (vertices f) ram1 ram2 @ [ram2])") apply simp apply (rule is_sublist_add) apply simp by (simp add: pre_split_face_def) qed lemma split_face_edges_f: assumes vors: "pre_split_face f ram1 ram2 vs" "vs1 = between (vertices f) ram1 ram2" "vs1 \ []" "vs2 = between (vertices f) ram2 ram1" "vs2 \ []" shows "edges f = {(last vs2, ram1) , (ram1, hd vs1), (last vs1, ram2), (ram2, hd vs2)} \ Edges vs1 \ Edges vs2" (is "?lhs = ?rhs") proof (intro equalityI subsetI) fix x assume x: "x \ ?lhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f: "distinct (vertices f)" by (simp add: pre_split_face_def) from vors have dist_vs1: "distinct (ram1 # vs1 @ [ram2])" apply (simp only:) apply (rule between_distinct_r12) apply (rule dist_f) by (simp add: pre_split_face_def) from vors have dist_vs2: "distinct (ram2 # vs2 @ [ram1])" apply (simp only:) apply (rule between_distinct_r12) apply (rule dist_f) apply (rule not_sym) by (simp add: pre_split_face_def) from x vors show "x \ ?rhs" apply (simp add: dist_f) apply (subgoal_tac "pre_between (vertices f) ram1 ram2") apply (drule is_nextElem_or) apply assumption apply (simp add: Edges_def) apply (case_tac "is_sublist [c, d] (ram1 # between (vertices f) ram1 ram2 @ [ram2])") apply simp apply (case_tac "c = ram1") apply (case_tac "between (vertices f) ram1 ram2") apply simp apply simp apply (drule is_sublist_distinct_prefix) apply (subgoal_tac "distinct (ram1 # vs1 @ [ram2])") apply simp apply (rule dist_vs1) apply simp apply (case_tac "c = ram2") apply (subgoal_tac "\ is_sublist [c, d] (ram1 # vs1 @ [ram2])") apply simp apply (rule is_sublist_notlast) apply (rule_tac dist_vs1) apply simp apply simp apply (simp add: is_sublist_def) apply (elim exE) apply (case_tac "between (vertices f) ram1 ram2" rule: rev_exhaust) apply simp apply simp apply (case_tac "bs" rule: rev_exhaust) apply simp apply simp apply (rule disjI2) apply (rule disjI2) apply (rule disjI1) apply (intro exI) apply simp apply simp apply (case_tac "c = ram2") apply (case_tac "between (vertices f) ram2 ram1") apply simp apply simp apply (drule is_sublist_distinct_prefix) apply (subgoal_tac "distinct (ram2 # vs2 @ [ram1])") apply simp apply (rule dist_vs2) apply simp apply (case_tac "c = ram1") apply (subgoal_tac "\ is_sublist [c, d] (ram2 # vs2 @ [ram1])") apply simp apply (rule is_sublist_notlast) apply (rule_tac dist_vs2) apply simp apply simp apply (simp add: is_sublist_def) apply (elim exE) apply (case_tac "between (vertices f) ram2 ram1" rule: rev_exhaust) apply simp apply simp apply (case_tac "bs" rule: rev_exhaust) apply simp apply simp apply (rule disjI2) apply (rule disjI2) apply (rule disjI2) apply (intro exI) apply simp apply (rule pre_split_face_p_between) by simp next fix x assume x: "x \ ?rhs" define c where "c = fst x" define d where "d = snd x" with c_def have [simp]: "x = (c,d)" by simp from vors have dist_f: "distinct (vertices f)" by (simp add: pre_split_face_def) from vors have dist_vs1: "distinct (ram1 # vs1 @ [ram2])" apply (simp only:) apply (rule between_distinct_r12) apply (rule dist_f) by (simp add: pre_split_face_def) from vors have dist_vs2: "distinct (ram2 # vs2 @ [ram1])" apply (simp only:) apply (rule between_distinct_r12) apply (rule dist_f) apply (rule not_sym) by (simp add: pre_split_face_def) from x vors show "x \ ?lhs" apply (simp add: dist_f) apply (subgoal_tac "ram1 \ \ f") apply (simp add: verticesFrom_is_nextElem verticesFrom_ram1) apply (simp add: is_nextElem_def) apply (case_tac "c = last (between (vertices f) ram2 ram1) \ d = ram1") apply simp apply simp apply (rule disjI1) apply (case_tac "c = ram1 \ d = hd (between (vertices f) ram1 ram2)") apply (case_tac "between (vertices f) ram1 ram2") apply simp apply (force simp: is_sublist_def) apply simp apply (case_tac "c = last (between (vertices f) ram1 ram2) \ d = ram2") apply (case_tac "between (vertices f) ram1 ram2" rule: rev_exhaust) apply simp apply (simp add: is_sublist_def) apply (intro exI) apply (subgoal_tac "ram1 # ys @ y # ram2 # between (vertices f) ram2 ram1 = (ram1 # ys) @ y # ram2 # (between (vertices f) ram2 ram1)") apply assumption apply simp apply simp apply (case_tac "c = ram2 \ d = hd (between (vertices f) ram2 ram1)") apply (case_tac "between (vertices f) ram2 ram1") apply simp apply (simp add: is_sublist_def) apply (intro exI) apply (subgoal_tac "ram1 # between (vertices f) ram1 ram2 @ ram2 # a # list = (ram1 # between (vertices f) ram1 ram2) @ ram2 # a # (list)") apply assumption apply simp apply simp apply (case_tac "(c, d) \ Edges (between (vertices f) ram1 ram2)") apply (simp add: Edges_def) apply (subgoal_tac "is_sublist [c, d] ([ram1] @ between (vertices f) ram1 ram2 @ (ram2 # between (vertices f) ram2 ram1))") apply simp apply (rule is_sublist_add) apply simp apply simp apply (subgoal_tac "is_sublist [c, d] ((ram1 # between (vertices f) ram1 ram2 @ [ram2]) @ between (vertices f) ram2 ram1 @ [])") apply simp apply (rule is_sublist_add) apply (simp add: Edges_def) by (simp add: pre_split_face_def) qed lemma split_face_edges_f12_f21: "pre_split_face f ram1 ram2 vs \ (f12, f21) = split_face f ram1 ram2 vs \ vs \ [] \ edges f12 \ edges f21 = edges f \ {(hd vs, ram1), (ram1, hd vs), (last vs, ram2), (ram2, last vs)} \ Edges vs \ Edges (rev vs)" apply (case_tac "between (vertices f) ram1 ram2 = []") apply (case_tac "between (vertices f) ram2 ram1 = []") apply (simp add: split_face_edges_f12_bet split_face_edges_f21_bet split_face_edges_f_vs1_vs2) apply force apply (simp add: split_face_edges_f12_bet split_face_edges_f21 split_face_edges_f_vs1) apply force apply (case_tac "between (vertices f) ram2 ram1 = []") apply (simp add: split_face_edges_f21_bet split_face_edges_f12 split_face_edges_f_vs2) apply force apply (simp add: split_face_edges_f21 split_face_edges_f12 split_face_edges_f) by force lemma split_face_edges_f12_f21_vs: "pre_split_face f ram1 ram2 [] \ (f12, f21) = split_face f ram1 ram2 [] \ edges f12 \ edges f21 = edges f \ {(ram2, ram1), (ram1, ram2)}" apply (case_tac "between (vertices f) ram1 ram2 = []") apply (case_tac "between (vertices f) ram2 ram1 = []") apply (simp add: split_face_edges_f12_bet_vs split_face_edges_f21_bet_vs split_face_edges_f_vs1_vs2) apply force apply (simp add: split_face_edges_f12_bet_vs split_face_edges_f21_vs split_face_edges_f_vs1) apply force apply (case_tac "between (vertices f) ram2 ram1 = []") apply (simp add: split_face_edges_f21_bet_vs split_face_edges_f12_vs split_face_edges_f_vs2) apply force apply (simp add: split_face_edges_f21_vs split_face_edges_f12_vs split_face_edges_f) by force lemma split_face_edges_f12_f21_sym: "f \ \ g \ pre_split_face f ram1 ram2 vs \ (f12, f21) = split_face f ram1 ram2 vs \ ((a,b) \ edges f12 \ (a,b) \ edges f21) = ((a,b) \ edges f \ (((b,a) \ edges f12 \ (b,a) \ edges f21) \ ((a,b) \ edges f12 \ (a,b) \ edges f21)))" apply (subgoal_tac "((a,b) \ edges f12 \ edges f21) = ((a,b) \ edges f \ ((b,a) \ edges f12 \ edges f21) \ (a,b) \ edges f12 \ edges f21)") apply force apply (case_tac "vs = []") apply (subgoal_tac "pre_split_face f ram1 ram2 []") apply (drule split_face_edges_f12_f21_vs) apply simp apply simp apply force apply simp apply (drule split_face_edges_f12_f21) apply simp apply simp apply simp by force lemma splitFace_edges_g'_help: "pre_splitFace g ram1 ram2 f vs \ (f12, f21, g') = splitFace g ram1 ram2 f vs \ vs \ [] \ edges g' = edges g \ edges f \ Edges vs \ Edges(rev vs) \ {(ram2, last vs), (hd vs, ram1), (ram1, hd vs), (last vs, ram2)}" proof - assume pre: "pre_splitFace g ram1 ram2 f vs" and fdg: "(f12, f21, g') = splitFace g ram1 ram2 f vs" and vs_neq: "vs \ []" from pre fdg have split: "(f12, f21) = split_face f ram1 ram2 vs" apply (unfold pre_splitFace_def) apply (elim conjE) by (simp add: splitFace_split_face) from fdg pre have "edges g' = (\\<^bsub>a\set (replace f [f21] (faces g))\<^esub> edges a) \ edges (f12)" by(auto simp: splitFace_def split_def edges_graph_def) with pre vs_neq show ?thesis apply (simp add: UNION_eq) apply (rule equalityI) apply simp apply (rule conjI) apply (rule subsetI) apply simp apply (erule bexE) apply (drule replace5) apply (case_tac "xa \ \ g") apply simp apply (subgoal_tac "x \ edges g") apply simp apply (simp add: edges_graph_def) apply force apply simp apply (subgoal_tac "pre_split_face f ram1 ram2 vs") apply (case_tac "between (vertices f) ram2 ram1 = []") apply (frule split_face_edges_f21_bet) apply (rule split) apply simp apply simp apply (case_tac "between (vertices f) ram1 ram2 = []") apply (frule split_face_edges_f_vs1_vs2) apply simp apply simp apply simp apply force apply (frule split_face_edges_f_vs2) apply simp apply simp apply simp apply force apply (frule split_face_edges_f21) apply (rule split) apply simp apply simp apply simp apply (case_tac "between (vertices f) ram1 ram2 = []") apply (frule split_face_edges_f_vs1) apply simp apply simp apply simp apply simp apply force apply (frule split_face_edges_f) apply simp apply simp apply simp apply simp apply force apply simp apply (subgoal_tac "pre_split_face f ram1 ram2 vs") apply (case_tac "between (vertices f) ram1 ram2 = []") apply (frule split_face_edges_f12_bet) apply (rule split) apply simp apply simp apply (case_tac "between (vertices f) ram2 ram1 = []") apply (frule split_face_edges_f_vs1_vs2) apply simp apply simp apply simp apply force apply (frule split_face_edges_f_vs1) apply simp apply simp apply simp apply force apply (frule split_face_edges_f12) apply (rule split) apply simp apply simp apply simp apply (case_tac "between (vertices f) ram2 ram1 = []") apply (frule split_face_edges_f_vs2) apply simp apply simp apply simp apply simp apply force apply (frule split_face_edges_f) apply simp apply simp apply simp apply simp apply force apply simp apply simp apply (subgoal_tac "pre_split_face f ram1 ram2 vs") apply (subgoal_tac "(ram2, last vs) \ edges f12 \ (hd vs, ram1) \ edges f12") apply (rule conjI) apply simp apply (rule conjI) apply simp apply (subgoal_tac "(ram1, hd vs) \ edges f21 \ (last vs, ram2) \ edges f21") apply (rule conjI) apply (rule disjI1) apply (rule bexI) apply (elim conjE) apply simp apply (rule replace3) apply(erule pre_splitFace_oldF) apply simp apply (rule conjI) apply (rule disjI1) apply (rule bexI) apply (elim conjE) apply simp apply (rule replace3) apply(erule pre_splitFace_oldF) apply simp apply (subgoal_tac "edges f \ {y. \x\set (replace f [f21] (faces g)). y \ edges x} \ edges f12") apply (subgoal_tac "edges g \ {y. \x\set (replace f [f21] (faces g)). y \ edges x} \ edges f12") apply (rule conjI) apply simp apply (rule conjI) apply simp apply (subgoal_tac "Edges(rev vs) \ edges f12") apply (rule conjI) prefer 2 apply blast apply (subgoal_tac "Edges vs \ edges f21") apply (subgoal_tac "Edges vs \ {y. \x\set (replace f [f21] (faces g)). y \ edges x}") apply blast apply (rule subset_trans) apply assumption apply (rule subsetI) apply simp apply (rule bexI) apply simp apply (rule replace3) apply(erule pre_splitFace_oldF) apply simp (* jetzt alle 7 subgoals aufloesen *) apply (frule split_face_edges_f21_subset) apply (rule split) apply simp apply simp apply (frule split_face_edges_f12_subset) apply (rule split) apply simp apply simp apply (simp add: edges_graph_def) apply (rule subsetI) apply simp apply (elim bexE) apply (case_tac "xa = f") apply simp apply blast apply (rule disjI1) apply (rule bexI) apply simp apply (rule replace4) apply simp apply force apply (rule subsetI) apply (subgoal_tac "\ u v. x = (u,v)") apply (elim exE conjE) apply (frule split_face_edges_or [OF split]) apply simp apply (case_tac "(u, v) \ edges f12") apply simp apply simp apply (rule bexI) apply (thin_tac "(u, v) \ edges f") apply assumption apply (rule replace3) apply(erule pre_splitFace_oldF) apply simp apply simp apply (frule split_face_edges_f21_subset) apply (rule split) apply simp apply simp apply (frule split_face_edges_f12_subset) apply (rule split) apply simp apply simp by simp qed lemma pre_splitFace_edges_f_in_g: "pre_splitFace g ram1 ram2 f vs \ edges f \ edges g" apply (simp add: edges_graph_def) by (force) lemma pre_splitFace_edges_f_in_g2: "pre_splitFace g ram1 ram2 f vs \ x \ edges f \ x \ edges g" apply (simp add: edges_graph_def) by (force) lemma splitFace_edges_g': "pre_splitFace g ram1 ram2 f vs \ (f12, f21, g') = splitFace g ram1 ram2 f vs \ vs \ [] \ edges g' = edges g \ Edges vs \ Edges(rev vs) \ {(ram2, last vs), (hd vs, ram1), (ram1, hd vs), (last vs, ram2)}" apply (subgoal_tac "edges g \ edges f = edges g") apply (frule splitFace_edges_g'_help) apply simp apply simp apply simp apply (frule pre_splitFace_edges_f_in_g) by blast lemma splitFace_edges_g'_vs: "pre_splitFace g ram1 ram2 f [] \ (f12, f21, g') = splitFace g ram1 ram2 f [] \ edges g' = edges g \ {(ram1, ram2), (ram2, ram1)}" proof - assume pre: "pre_splitFace g ram1 ram2 f []" and fdg: "(f12, f21, g') = splitFace g ram1 ram2 f []" from pre fdg have split: "(f12, f21) = split_face f ram1 ram2 []" apply (unfold pre_splitFace_def) apply (elim conjE) by (simp add: splitFace_split_face) from fdg pre have "edges g' = (\\<^bsub>a\set (replace f [f21] (faces g))\<^esub> edges a) \ edges (f12)" by (auto simp: splitFace_def split_def edges_graph_def) with pre show ?thesis apply (simp add: UNION_eq) apply (rule equalityI) apply simp apply (rule conjI) apply (rule subsetI) apply simp apply (erule bexE) apply (drule replace5) apply (case_tac "xa \ \ g") apply simp apply (subgoal_tac "x \ edges g") apply simp apply (simp add: edges_graph_def) apply force apply simp apply (subgoal_tac "pre_split_face f ram1 ram2 []") apply (case_tac "between (vertices f) ram2 ram1 = []") apply (simp add: pre_FaceDiv_between2) apply (frule split_face_edges_f21_vs) apply (rule split) apply simp apply simp apply simp apply (case_tac "x = (ram1, ram2)") apply simp apply simp apply (rule disjI2) apply (rule pre_splitFace_edges_f_in_g2) apply simp apply (subgoal_tac "pre_split_face f ram1 ram2 []") apply (frule split_face_edges_f) apply simp apply simp apply (rule pre_FaceDiv_between1) apply simp apply simp apply simp apply force apply simp apply simp apply (rule subsetI) apply simp apply (subgoal_tac "pre_split_face f ram1 ram2 []") apply (case_tac "between (vertices f) ram1 ram2 = []") apply (simp add: pre_FaceDiv_between1) apply (frule split_face_edges_f12_vs) apply (rule split) apply simp apply simp apply simp apply (case_tac "x = (ram2, ram1)") apply simp apply simp apply (rule disjI2) apply (rule pre_splitFace_edges_f_in_g2) apply simp apply (subgoal_tac "pre_split_face f ram1 ram2 []") apply (frule split_face_edges_f) apply simp apply simp apply simp apply (rule pre_FaceDiv_between2) apply simp apply simp apply force apply simp apply simp apply simp apply (subgoal_tac "pre_split_face f ram1 ram2 []") apply (subgoal_tac "(ram1, ram2) \ edges f21") apply (rule conjI) apply (rule disjI1) apply (rule bexI) apply simp apply (force) apply (subgoal_tac "(ram2, ram1) \ edges f12") apply (rule conjI) apply force apply (rule subsetI) apply (simp add: edges_graph_def) apply (elim bexE) apply (case_tac "xa = f") apply simp apply (subgoal_tac "\ u v. x = (u,v)") apply (elim exE conjE) apply (subgoal_tac "pre_split_face f ram1 ram2 []") apply (frule split_face_edges_or [OF split]) apply simp apply (case_tac "(u, v) \ edges f12") apply simp apply simp apply force apply simp apply simp apply (rule disjI1) apply (rule bexI) apply simp apply (rule replace4) apply simp apply force apply (frule split_face_edges_f12_vs) apply simp apply (rule split) apply simp apply (rule pre_FaceDiv_between1) apply simp apply simp apply (frule split_face_edges_f21_vs) apply simp apply (rule split) apply simp apply (rule pre_FaceDiv_between2) apply simp apply simp by simp qed lemma splitFace_edges_incr: "pre_splitFace g ram1 ram2 f vs \ (f\<^sub>1, f\<^sub>2, g') = splitFace g ram1 ram2 f vs \ edges g \ edges g'" apply(cases vs) apply(simp add:splitFace_edges_g'_vs) apply blast apply(simp add:splitFace_edges_g') apply blast done lemma snd_snd_splitFace_edges_incr: "pre_splitFace g v\<^sub>1 v\<^sub>2 f vs \ edges g \ edges(snd(snd(splitFace g v\<^sub>1 v\<^sub>2 f vs)))" apply(erule splitFace_edges_incr [where f\<^sub>1 = "fst(splitFace g v\<^sub>1 v\<^sub>2 f vs)" and f\<^sub>2 = "fst(snd(splitFace g v\<^sub>1 v\<^sub>2 f vs))"]) apply(auto) done (********************* Vorbereitung für subdivFace *****************) (**** computes the list of ram vertices **********) subsection \\removeNones\\ definition removeNones :: "'a option list \ 'a list" where "removeNones vOptionList \ [the x. x \ vOptionList, x \ None]" (* "removeNones vOptionList \ map the [x \ vOptionList. x \ None]" *) declare removeNones_def [simp] lemma removeNones_inI[intro]: "Some a \ set ls \ a \ set (removeNones ls)" by (induct ls) auto lemma removeNones_hd[simp]: "removeNones ( Some a # ls) = a # removeNones ls" by auto lemma removeNones_last[simp]: "removeNones (ls @ [Some a]) = removeNones ls @ [a]" by auto lemma removeNones_in[simp]: "removeNones (as @ Some a # bs) = removeNones as @ a # removeNones bs" by auto lemma removeNones_none_hd[simp]: "removeNones ( None # ls) = removeNones ls" by auto lemma removeNones_none_last[simp]: "removeNones (ls @ [None]) = removeNones ls" by auto lemma removeNones_none_in[simp]: "removeNones (as @ None # bs) = removeNones (as @ bs)" by auto lemma removeNones_empty[simp]: "removeNones [] = []" by auto declare removeNones_def [simp del] (************* natToVertexList ************) subsection\\natToVertexList\\ (* Hilfskonstrukt natToVertexList *) primrec natToVertexListRec :: "nat \ vertex \ face \ nat list \ vertex option list" where "natToVertexListRec old v f [] = []" | "natToVertexListRec old v f (i#is) = (if i = old then None#natToVertexListRec i v f is else Some (f\<^bsup>i\<^esup> \ v) # natToVertexListRec i v f is)" primrec natToVertexList :: "vertex \ face \ nat list \ vertex option list" where "natToVertexList v f [] = []" | "natToVertexList v f (i#is) = (if i = 0 then (Some v)#(natToVertexListRec i v f is) else [])" subsection \@{const indexToVertexList}\ lemma nextVertex_inj: "distinct (vertices f) \ v \ \ f \ i < length (vertices (f::face)) \ a < length (vertices f) \ f\<^bsup>a\<^esup>\v = f\<^bsup>i\<^esup>\v \ i = a" proof - assume d: "distinct (vertices f)" and v: "v \ \ f" and i: "i < length (vertices (f::face))" and a: "a < length (vertices f)" and eq:" f\<^bsup>a\<^esup>\v = f\<^bsup>i\<^esup>\v" then have eq: "(verticesFrom f v)!a = (verticesFrom f v)!i " by (simp add: verticesFrom_nth) define xs where "xs = verticesFrom f v" with eq have eq: "xs!a = xs!i" by auto from d v have z: "distinct (verticesFrom f v)" by auto moreover from xs_def a v d have "(verticesFrom f v) = take a xs @ xs ! a # drop (Suc a) xs" by (auto intro: id_take_nth_drop simp: verticesFrom_length) with eq have "(verticesFrom f v) = take a xs @ xs ! i # drop (Suc a) xs" by simp moreover from xs_def i v d have "(verticesFrom f v) = take i xs @ xs ! i # drop (Suc i) xs" by (auto intro: id_take_nth_drop simp: verticesFrom_length) ultimately have "take a xs = take i xs" by (rule dist_at1) moreover from v d have vertFrom[simp]: "length (vertices f) = length (verticesFrom f v)" by (auto simp: verticesFrom_length) from xs_def a i have "a < length xs" "i < length xs" by auto moreover have "\ a i. a < length xs \ i < length xs \ take a xs = take i xs \ a = i" proof (induct xs) case Nil then show ?case by auto next case (Cons x xs) then show ?case apply (cases a) apply auto apply (cases i) apply auto apply (cases i) by auto qed ultimately show ?thesis by simp qed lemma a: "distinct (vertices f) \ v \ \ f \ (\i \ set is. i < length (vertices f)) \ (\a. a < length (vertices f) \ hideDupsRec ((f \ ^^ a) v) [(f \ ^^ k) v. k \ is] = natToVertexListRec a v f is)" proof (induct "is") case Nil then show ?case by simp next case (Cons i "is") then show ?case by (auto simp: nextVertices_def intro: nextVertex_inj) qed lemma indexToVertexList_natToVertexList_eq: "distinct (vertices f) \ v \ \ f \ (\i \ set is. i < length (vertices f)) \ is \ [] \ hd is = 0 \ indexToVertexList f v is = natToVertexList v f is" apply (cases "is") by (auto simp: a [where a = 0, simplified] indexToVertexList_def nextVertices_def) (********************* Einschub Ende ***************************************) (******** natToVertexListRec ************) lemma nvlr_length: "\ old. (length (natToVertexListRec old v f ls)) = length ls" apply (induct ls) by auto lemma nvl_length[simp]: "hd e = 0 \ length (natToVertexList v f e) = length e" apply (cases "e") by (auto intro: nvlr_length) lemma natToVertexListRec_length[simp]: "\ e f. length (natToVertexListRec e v f es) = length es" by (induct es) auto lemma natToVertexList_length[simp]: "incrIndexList es (length es) (length (vertices f)) \ length (natToVertexList v f es) = length es" apply (case_tac es) by simp_all lemma natToVertexList_nth_Suc: "incrIndexList es (length es) (length (vertices f)) \ Suc n < length es \ (natToVertexList v f es)!(Suc n) = (if (es!n = es!(Suc n)) then None else Some (f\<^bsup>(es!Suc n)\<^esup> \ v))" proof - assume incr: "incrIndexList es (length es) (length (vertices f))" and n: "Suc n < length es" have rec: "\ old n. Suc n < length es \ (natToVertexListRec old v f es)!(Suc n) = (if (es!n = es!(Suc n)) then None else Some (f\<^bsup>(es!Suc n)\<^esup> \ v))" proof (induct es) case Nil then show ?case by auto next case (Cons e es) note cons1 = this then show ?case proof (cases es) case Nil with cons1 show ?thesis by simp next case (Cons e' es') with cons1 show ?thesis proof (cases n) case 0 with Cons cons1 show ?thesis by simp next case (Suc m) with Cons cons1 have "\ old. natToVertexListRec old v f es ! Suc m = (if es ! m = es ! Suc m then None else Some (f\<^bsup>es ! Suc m\<^esup> \ v))" by (rule_tac cons1) auto then show ?thesis apply (cases "e = old") by (simp_all add: Suc) qed qed qed with n have "natToVertexListRec 0 v f es ! Suc n = (if es ! n = es ! Suc n then None else Some (f\<^bsup>es ! Suc n\<^esup> \ v))" by (rule_tac rec) auto with incr show ?thesis by (cases es) auto qed lemma natToVertexList_nth_0: "incrIndexList es (length es) (length (vertices f)) \ 0 < length es \ (natToVertexList v f es)!0 = Some (f\<^bsup>(es!0)\<^esup> \ v)" apply (cases es) apply (simp_all add: nextVertices_def) by (subgoal_tac "a = 0") auto lemma natToVertexList_hd[simp]: "incrIndexList es (length es) (length (vertices f)) \ hd (natToVertexList v f es) = Some v" apply (cases es) by (simp_all add: nextVertices_def) lemma nth_last[intro]: "Suc i = length xs \ xs!i = last xs" by (cases xs rule: rev_exhaust) auto declare incrIndexList_help4 [simp del] lemma natToVertexList_last[simp]: "distinct (vertices f) \ v \ \ f \ incrIndexList es (length es) (length (vertices f)) \ last (natToVertexList v f es) = Some (last (verticesFrom f v))" proof - assume vors: "distinct (vertices f)" "v \ \ f" and incr: "incrIndexList es (length es) (length (vertices f))" define n' where "n' = length es - 2" from incr have "1 < length es" by auto with n'_def have n'l: "Suc (Suc n') = length es" by arith from incr n'l have last_ntvl: "(natToVertexList v f es)!(Suc n') = last (natToVertexList v f es)" by auto from n'l have last_es: "es!(Suc n') = last es" by auto from n'l have "es!n' = last (butlast es)" apply (cases es rule: rev_exhaust) by (auto simp: nth_append) with last_es incr have less: "es!n' < es!(Suc n')" by auto from n'l have "Suc n' < length es" by arith with incr less have "(natToVertexList v f es)!(Suc n') = (Some (f\<^bsup>(es!Suc n')\<^esup> \ v))" by (auto dest: natToVertexList_nth_Suc) with incr last_ntvl last_es have rule1: "last (natToVertexList v f es) = Some (f\<^bsup>((length (vertices f)) - (Suc 0))\<^esup> \ v)" by auto from incr have lvf: "1 < length (vertices f)" by auto with vors have rule2: "verticesFrom f v ! ((length (vertices f)) - (Suc 0)) = f\<^bsup>((length (vertices f)) - (Suc 0))\<^esup> \ v" by (auto intro!: verticesFrom_nth) from vors lvf have "verticesFrom f v ! ((length (vertices f)) - (Suc 0)) = last (verticesFrom f v)" apply (rule_tac nth_last) by (auto simp: verticesFrom_length) with rule1 rule2 show ?thesis by auto qed lemma indexToVertexList_last[simp]: "distinct (vertices f) \ v \ \ f \ incrIndexList es (length es) (length (vertices f)) \ last (indexToVertexList f v es) = Some (last (verticesFrom f v))" apply (subgoal_tac "indexToVertexList f v es = natToVertexList v f es") apply simp apply (rule indexToVertexList_natToVertexList_eq) by auto lemma nths_take: "\ n iset. \ i \ iset. i < n \ nths (take n xs) iset = nths xs iset" proof (induct xs) case Nil then show ?case by simp next case (Cons x xs) then show ?case apply (simp add: nths_Cons) apply (cases n) apply simp apply (simp add: nths_Cons) apply (rule Cons) by auto qed lemma nths_reduceIndices: "\ iset. nths xs iset = nths xs {i. i < length xs \ i \ iset}" proof (induct xs) case Nil then show ?case by simp next case (Cons x xs) then have "nths xs {j. Suc j \ iset} = nths xs {i. i < length xs \ i \ {j. Suc j \ iset}}" by (rule_tac Cons) then show ?case by (simp add: nths_Cons) qed lemma natToVertexList_nths1: "distinct (vertices f) \ v \ \ f \ vs = verticesFrom f v \ incrIndexList es (length es) (length vs) \ n \ length es \ nths (take (Suc (es!(n - 1))) vs) (set (take n es)) = removeNones (take n (natToVertexList v f es))" proof (induct n) case 0 then show ?case by simp next case (Suc n) then have "nths (take (Suc (es ! (n - Suc 0))) (verticesFrom f v)) (set (take n es)) = removeNones (take n (natToVertexList v f es))" "distinct (vertices f)" "v \ \ f" "vs = verticesFrom f v" "incrIndexList es (length es) (length (verticesFrom f v))" "Suc n \ length es" by auto (* does this improve the performance? *) note suc1 = this then have lvs: "length vs = length (vertices f)" by (auto intro: verticesFrom_length) with suc1 have vsne: "vs \ []" by auto with suc1 show ?case proof (cases "natToVertexList v f es ! n") case None then show ?thesis proof (cases n) case 0 with None suc1 lvs show ?thesis by (simp add: take_Suc_conv_app_nth natToVertexList_nth_0) next case (Suc n') with None suc1 lvs have esn: "es!n = es!n'" by (simp add: natToVertexList_nth_Suc split: if_split_asm) from Suc have n': "n - Suc 0 = n'" by auto show ?thesis proof (cases "Suc n = length es") case True then have small_n: "n < length es" by auto from True have "take (Suc n) es = es" by auto with small_n have "take n es @ [es!n] = es" by (simp add: take_Suc_conv_app_nth) then have esn_simps: "take n es = butlast es \ es!n = last es" by (cases es rule: rev_exhaust) auto from True Suc have n'l: "Suc n' = length (butlast es)" by auto then have small_n': "n' < length (butlast es)" by auto from Suc small_n have take_n': "take (Suc n') (butlast es @ [last es]) = take (Suc n') (butlast es)" by auto from small_n have es_exh: "es = butlast es @ [last es]" by (cases es rule: rev_exhaust) auto from n'l have "take (Suc n') (butlast es @ [last es]) = butlast es" by auto with es_exh have "take (Suc n') es = butlast es" by auto with small_n Suc have "take n' es @ [es!n'] = (butlast es)" by (simp add: take_Suc_conv_app_nth) with small_n' have esn'_simps: "take n' es = butlast (butlast es) \ es!n' = last (butlast es)" by (cases "butlast es" rule: rev_exhaust) auto from suc1 have "last (butlast es) < last es" by auto with esn esn_simps esn'_simps have False by auto (* have "last (butlast es) = last es" by auto *) then show ?thesis by auto next case False with suc1 have le: "Suc n < length es" by auto from suc1 le have "es = take (Suc n) es @ es!(Suc n) # drop (Suc (Suc n)) es" by (auto intro: id_take_nth_drop) with suc1 have "increasing (take (Suc n) es @ es!(Suc n) # drop (Suc (Suc n)) es)" by auto then have "\ i \ (set (take (Suc n) es)). i \ es ! (Suc n)" by (auto intro: increasing2) with suc1 have "\ i \ (set (take n es)). i \ es ! (Suc n)" by (simp add: take_Suc_conv_app_nth) then have seq: "nths (take (Suc (es ! Suc n)) (verticesFrom f v)) (set (take n es)) = nths (verticesFrom f v) (set (take n es))" apply (rule_tac nths_take) by auto from suc1 have "es = take n es @ es!n # drop (Suc n) es" by (auto intro: id_take_nth_drop) with suc1 have "increasing (take n es @ es!n # drop (Suc n) es)" by auto then have "\ i \ (set (take n es)). i \ es ! n" by (auto intro: increasing2) with suc1 esn have "\ i \ (set (take n es)). i \ es ! n'" by (simp add: take_Suc_conv_app_nth) with Suc have seq2: "nths (take (Suc (es ! n')) (verticesFrom f v)) (set (take n es)) = nths (verticesFrom f v) (set (take n es))" apply (rule_tac nths_take) by auto from Suc suc1 have "(insert (es ! n') (set (take n es))) = set (take n es)" apply auto by (simp add: take_Suc_conv_app_nth) with esn None suc1 seq seq2 n' show ?thesis by (simp add: take_Suc_conv_app_nth) qed qed next case (Some v') then show ?thesis proof (cases n) case 0 from suc1 lvs have "verticesFrom f v \ []" by auto then have "verticesFrom f v = hd (verticesFrom f v) # tl (verticesFrom f v)" by auto then have "verticesFrom f v = v # tl (verticesFrom f v)" by (simp add: verticesFrom_hd) then obtain z where "verticesFrom f v = v # z" by auto then have sub: "nths (verticesFrom f v) {0} = [v]" by (auto simp: nths_Cons) from 0 suc1 have "es!0 = 0" by (cases es) auto with 0 Some suc1 lvs sub vsne show ?thesis by (simp add: take_Suc_conv_app_nth natToVertexList_nth_0 nextVertices_def take_Suc nths_Cons verticesFrom_hd del:verticesFrom_empty) next case (Suc n') with Some suc1 lvs have esn: "es!n \ es!n'" by (simp add: natToVertexList_nth_Suc split: if_split_asm) from suc1 Suc have "Suc n' < length es" by auto with suc1 lvs esn have "natToVertexList v f es !(Suc n') = Some (f\<^bsup>(es!(Suc n'))\<^esup> \ v)" apply (simp add: natToVertexList_nth_Suc) by (simp add: Suc) with Suc have "natToVertexList v f es ! n = Some (f\<^bsup>(es!n)\<^esup> \ v)" by auto with Some have v': "v' = f\<^bsup>(es!n)\<^esup> \ v" by simp from Suc have n': "n - Suc 0 = n'" by auto from suc1 Suc have "es = take (Suc n') es @ es!n # drop (Suc n) es" by (auto intro: id_take_nth_drop) with suc1 have "increasing (take (Suc n') es @ es!n # drop (Suc n) es)" by auto with suc1 Suc have "es!n' \ es!n" apply (auto intro!: increasing2) by (auto simp: take_Suc_conv_app_nth) with esn have smaller_n: "es!n' < es!n" by auto from suc1 lvs have smaller: "(es!n) < length vs" by auto from suc1 smaller lvs have "(verticesFrom f v)!(es!n) = f\<^bsup>(es!n)\<^esup> \ v" by (auto intro: verticesFrom_nth) with v' have "(verticesFrom f v)!(es!n) = v'" by auto then have sub1: "nths ([((verticesFrom f v)!(es!n))]) {j. j + (es!n) : (insert (es ! n) (set (take n es)))} = [v']" by auto from suc1 smaller lvs have len: "length (take (es ! n) (verticesFrom f v)) = es!n" by auto have "\x. x \ (set (take n es)) \ x < (es ! n)" proof - fix x assume x: "x \ set (take n es)" from suc1 Suc have "es = take n' es @ es!n' # drop (Suc n') es" by (auto intro: id_take_nth_drop) with suc1 have "increasing (take n' es @ es!n' # drop (Suc n') es)" by auto then have "\ x. x \ set (take n' es) \ x \ es!n'" by (auto intro!: increasing2) with x Suc suc1 have "x \ es!n'" by (auto simp: take_Suc_conv_app_nth) with smaller_n show "x < es!n" by auto qed then have "{i. i < es ! n \ i \ set (take n es)} = (set (take n es))" by auto then have elim_insert: "{i. i < es ! n \ i \ insert (es ! n) (set (take n es))} = (set (take n es))" by auto have "nths (take (es ! n) (verticesFrom f v)) (insert (es ! n) (set (take n es))) = nths (take (es ! n) (verticesFrom f v)) {i. i < length (take (es ! n) (verticesFrom f v)) \ i \ (insert (es ! n) (set (take n es)))}" by (rule nths_reduceIndices) with len have "nths (take (es ! n) (verticesFrom f v)) (insert (es ! n) (set (take n es))) = nths (take (es ! n) (verticesFrom f v)) {i. i < (es ! n) \ i \ (insert (es ! n) (set (take n es)))}" by simp with elim_insert have sub2: "nths (take (es ! n) (verticesFrom f v)) (insert (es ! n) (set (take n es))) = nths (take (es ! n) (verticesFrom f v)) (set (take n es))" by simp define m where "m = es!n - es!n'" with smaller_n have mgz: "0 < m" by auto with m_def have esn: "es!n = (es!n') + m" by auto have helper: "\x. x \ (set (take n es)) \ x \ (es ! n')" proof - fix x assume x: "x \ set (take n es)" from suc1 Suc have "es = take n' es @ es!n' # drop (Suc n') es" by (auto intro: id_take_nth_drop) with suc1 have "increasing (take n' es @ es!n' # drop (Suc n') es)" by auto then have "\ x. x \ set (take n' es) \ x \ es!n'" by (auto intro!: increasing2) with x Suc suc1 show "x \ es!n'" by (auto simp: take_Suc_conv_app_nth) qed define m' where "m' = m - 1" define Suc_es_n' where "Suc_es_n' = Suc (es!n')" from smaller smaller_n have "Suc (es!n') < length vs" by auto then have "min (length vs) (Suc (es ! n')) = Suc (es!n')" by arith with Suc_es_n'_def have empty: "{j. j + length (take Suc_es_n' vs) \ set (take n es)} = {}" apply auto apply (frule helper) by arith from Suc_es_n'_def mgz esn m'_def have esn': "es!n = Suc_es_n' + m'" by auto with smaller have "(take (Suc_es_n' + m') vs) = take (Suc_es_n') vs @ take m' (drop (Suc_es_n') vs)" by (auto intro: take_add) with esn' have "nths (take (es ! n) vs) (set (take n es)) = nths (take (Suc_es_n') vs @ take m' (drop (Suc_es_n') vs)) (set (take n es))" by auto then have "nths (take (es ! n) vs) (set (take n es)) = nths (take (Suc_es_n') vs) (set (take n es)) @ nths (take m' (drop (Suc_es_n') vs)) {j. j + length (take (Suc_es_n') vs) : (set (take n es))}" by (simp add: nths_append) with empty Suc_es_n'_def have "nths (take (es ! n) vs) (set (take n es)) = nths (take (Suc (es!n')) vs) (set (take n es))" by simp with suc1 sub2 have sub3: "nths (take (es ! n) (verticesFrom f v)) (insert (es ! n) (set (take n es))) = nths (take (Suc (es!n')) (verticesFrom f v)) (set (take n es))" by simp from smaller suc1 have "take (Suc (es ! n)) (verticesFrom f v) = take (es ! n) (verticesFrom f v) @ [((verticesFrom f v)!(es!n))]" by (auto simp: take_Suc_conv_app_nth) with suc1 smaller have "nths (take (Suc (es ! n)) (verticesFrom f v)) (insert (es ! n) (set (take n es))) = nths (take (es ! n) (verticesFrom f v)) (insert (es ! n) (set (take n es))) @ nths ([((verticesFrom f v)!(es!n))]) {j. j + (es!n) : (insert (es ! n) (set (take n es)))}" by (auto simp: nths_append) with sub1 sub3 have "nths (take (Suc (es ! n)) (verticesFrom f v)) (insert (es ! n) (set (take n es))) = nths (take (Suc (es ! n')) (verticesFrom f v)) (set (take n es)) @ [v']" by auto with Some suc1 lvs n' show ?thesis by (simp add: take_Suc_conv_app_nth) qed qed qed lemma natToVertexList_nths: "distinct (vertices f) \ v \ \ f \ incrIndexList es (length es) (length (vertices f)) \ nths (verticesFrom f v) (set es) = removeNones (natToVertexList v f es)" proof - assume vors1: "distinct (vertices f)" "v \ \ f" "incrIndexList es (length es) (length (vertices f))" define vs where "vs = verticesFrom f v" with vors1 have lvs: "length vs = length (vertices f)" by (auto intro: verticesFrom_length) with vors1 vs_def have vors: "distinct (vertices f)" "v \ \ f" "vs = verticesFrom f v" "incrIndexList es (length es) (length vs)" by auto with lvs have vsne: "vs \ []" by auto define n where "n = length es" then have "es!(n - 1) = last es" proof (cases n) case 0 with n_def vors show ?thesis by (cases es) auto next case (Suc n') with n_def have small_n': "n' < length es" by arith from Suc n_def have "take (Suc n') es = es" by auto with small_n' have "take n' es @ [es!n'] = es" by (simp add: take_Suc_conv_app_nth) then have "es!n' = last es" by (cases es rule: rev_exhaust) auto with Suc show ?thesis by auto qed with vors have "es!(n - 1) = (length vs) - 1" by auto with vsne have "Suc (es! (n - 1)) = (length vs)" by auto then have take_vs: "take (Suc (es!(n - 1))) vs = vs" by auto from n_def vors have "n = length (natToVertexList v f es)" by auto then have take_nTVL: "take n (natToVertexList v f es) = natToVertexList v f es" by auto from n_def have take_es: "take n es = es" by auto from n_def have "n \ length es" by auto with vors have "nths (take (Suc (es!(n - 1))) vs) (set (take n es)) = removeNones (take n (natToVertexList v f es))" by (rule natToVertexList_nths1) with take_vs take_nTVL take_es vs_def show ?thesis by simp qed lemma filter_Cons2: "x \ set ys \ [y\ys. y = x \ P y] = [y\ys. P y]" by (induct ys) (auto) lemma natToVertexList_removeNones: "distinct (vertices f) \ v \ \ f \ incrIndexList es (length es) (length (vertices f)) \ [x\verticesFrom f v. x \ set (removeNones (natToVertexList v f es))] = removeNones (natToVertexList v f es)" proof - assume vors: "distinct (vertices f)" "v \ \ f" "incrIndexList es (length es) (length (vertices f))" then have dist: "distinct (verticesFrom f v)" by auto from vors have sub_eq: "nths (verticesFrom f v) (set es) = removeNones (natToVertexList v f es)" by (rule natToVertexList_nths) from dist have "[x \ verticesFrom f v. x \ set (nths (verticesFrom f v) (set es))] = removeNones (natToVertexList v f es)" apply (simp add: filter_in_nths) by (simp add: sub_eq) with sub_eq show ?thesis by simp qed (**************************** invalidVertexList ************) definition is_duplicateEdge :: "graph \ face \ vertex \ vertex \ bool" where "is_duplicateEdge g f a b \ ((a, b) \ edges g \ (a, b) \ edges f \ (b, a) \ edges f) \ ((b, a) \ edges g \ (b, a) \ edges f \ (a, b) \ edges f)" definition invalidVertexList :: "graph \ face \ vertex option list \ bool" where "invalidVertexList g f vs \ \i < |vs|- 1. case vs!i of None \ False | Some a \ case vs!(i+1) of None \ False | Some b \ is_duplicateEdge g f a b" (**************************** subdivFace **********************) subsection \\pre_subdivFace(')\\ definition pre_subdivFace_face :: "face \ vertex \ vertex option list \ bool" where "pre_subdivFace_face f v' vOptionList \ [v \ verticesFrom f v'. v \ set (removeNones vOptionList)] = (removeNones vOptionList) \ \ final f \ distinct (vertices f) \ hd (vOptionList) = Some v' \ v' \ \ f \ last (vOptionList) = Some (last (verticesFrom f v')) \ hd (tl (vOptionList)) \ last (vOptionList) \ 2 < | vOptionList | \ vOptionList \ [] \ tl (vOptionList) \ []" definition pre_subdivFace :: "graph \ face \ vertex \ vertex option list \ bool" where "pre_subdivFace g f v' vOptionList \ pre_subdivFace_face f v' vOptionList \ \ invalidVertexList g f vOptionList" (* zu teilende Fläche, ursprüngliches v, erster Ram-Punkt, Anzahl der überlaufenen NOnes, rest der vol *) definition pre_subdivFace' :: "graph \ face \ vertex \ vertex \ nat \ vertex option list \ bool" where "pre_subdivFace' g f v' ram1 n vOptionList \ \ final f \ v' \ \ f \ ram1 \ \ f \ v' \ set (removeNones vOptionList) \ distinct (vertices f) \ ( [v \ verticesFrom f v'. v \ set (removeNones vOptionList)] = (removeNones vOptionList) \ before (verticesFrom f v') ram1 (hd (removeNones vOptionList)) \ last (vOptionList) = Some (last (verticesFrom f v')) \ vOptionList \ [] \ ((v' = ram1 \ (0 < n)) \ ((v' = ram1 \ (hd (vOptionList) \ Some (last (verticesFrom f v')))) \ (v' \ ram1))) \ \ invalidVertexList g f vOptionList \ (n = 0 \ hd (vOptionList) \ None \ \ is_duplicateEdge g f ram1 (the (hd (vOptionList)))) \ (vOptionList = [] \ v' \ ram1) )" lemma pre_subdivFace_face_in_f[intro]: "pre_subdivFace_face f v ls \ Some a \ set ls \ a \ set (verticesFrom f v)" apply (subgoal_tac "a \ set (removeNones ls)") apply (auto simp: pre_subdivFace_face_def) apply (subgoal_tac "a \ set [v\verticesFrom f v . v \ set (removeNones ls)]") apply (thin_tac "[v\verticesFrom f v . v \ set (removeNones ls)] = removeNones ls") by auto lemma pre_subdivFace_in_f[intro]: "pre_subdivFace g f v ls \ Some a \ set ls \ a \ set (verticesFrom f v)" by (auto simp: pre_subdivFace_def) lemma pre_subdivFace_face_in_f'[intro]: "pre_subdivFace_face f v ls \ Some a \ set ls \ a \ \ f" apply (cases "a = v") apply (force simp: pre_subdivFace_face_def) apply (rule verticesFrom_in') apply (rule pre_subdivFace_face_in_f) by auto lemma filter_congs_shorten1: "distinct (verticesFrom f v) \ [v\verticesFrom f v . v = a \ v \ set vs] = (a # vs) \ [v\verticesFrom f v . v \ set vs] = vs" proof - assume dist: "distinct (verticesFrom f v)" and eq: "[v\verticesFrom f v . v = a \ v \ set vs] = (a # vs)" have rule1: "\ vs a ys. distinct vs \ [v\vs . v = a \ v \ set ys] = a # ys \ [v\vs. v \ set ys] = ys" proof - fix vs a ys assume dist: "distinct vs" and ays: "[v\vs . v = a \ v \ set ys] = a # ys" then have "distinct ([v\vs . v = a \ v \ set ys])" by (rule_tac distinct_filter) with ays have distys: "distinct (a # ys)" by simp from dist distys ays show "[v\vs. v \ set ys] = ys" apply (induct vs) by (auto split: if_split_asm simp: filter_Cons2) qed from dist eq show ?thesis by (rule_tac rule1) qed lemma ovl_shorten: "distinct (verticesFrom f v) \ [v\verticesFrom f v . v \ set (removeNones (va # vol))] = (removeNones (va # vol)) \ [v\verticesFrom f v . v \ set (removeNones (vol))] = (removeNones (vol))" proof - assume dist: "distinct (verticesFrom f v)" and vors: "[v\verticesFrom f v . v \ set (removeNones (va # vol))] = (removeNones (va # vol))" then show ?thesis proof (cases va) case None with vors Cons show ?thesis by auto next case (Some a) with vors dist show ?thesis by (auto intro!: filter_congs_shorten1) qed qed lemma pre_subdivFace_face_distinct: "pre_subdivFace_face f v vol \ distinct (removeNones vol)" apply (auto dest!: verticesFrom_distinct simp: pre_subdivFace_face_def) apply (subgoal_tac "distinct ([v\verticesFrom f v . v \ set (removeNones vol)])") apply simp apply (thin_tac "[v\verticesFrom f v . v \ set (removeNones vol)] = removeNones vol") by auto lemma invalidVertexList_shorten: "invalidVertexList g f vol \ invalidVertexList g f (v # vol)" apply (simp add: invalidVertexList_def) apply auto apply (rule exI) apply safe apply (subgoal_tac "(Suc i) < | vol |") apply assumption apply arith apply auto apply (case_tac "vol!i") by auto lemma pre_subdivFace_pre_subdivFace': "v \ \ f \ pre_subdivFace g f v (vo # vol) \ pre_subdivFace' g f v v 0 (vol)" proof - assume vors: "v \ \ f" "pre_subdivFace g f v (vo # vol)" then have vors': "v \ \ f" "pre_subdivFace_face f v (vo # vol)" "\ invalidVertexList g f (vo # vol)" by (auto simp: pre_subdivFace_def) then have r: "removeNones vol \ []" apply (cases "vol" rule: rev_exhaust) by (auto simp: pre_subdivFace_face_def) then have "Some (hd (removeNones vol)) \ set vol" apply (induct vol) apply auto apply (case_tac a) by auto then have "Some (hd (removeNones vol)) \ set (vo # vol)" by auto with vors' have hd: "hd (removeNones vol) \ \ f" by (rule_tac pre_subdivFace_face_in_f') from vors' have "Some v = vo" by (auto simp: pre_subdivFace_face_def) with vors' have "v \ set (tl (removeNones (vo # vol)))" apply (drule_tac pre_subdivFace_face_distinct) by auto with vors' r have ne: "v \ hd (removeNones vol)" by (cases "removeNones vol") (auto simp: pre_subdivFace_face_def) from vors' have dist: "distinct (removeNones (vo # vol))" apply (rule_tac pre_subdivFace_face_distinct) . from vors' have invalid: "\ invalidVertexList g f vol" by (auto simp: invalidVertexList_shorten) from ne hd vors' invalid dist show ?thesis apply (unfold pre_subdivFace'_def) apply (simp add: pre_subdivFace'_def pre_subdivFace_face_def) apply safe apply (rule ovl_shorten) apply (simp add: pre_subdivFace_face_def) apply assumption apply (rule before_verticesFrom) apply simp+ apply (simp add: invalidVertexList_def) apply (erule allE) apply (erule impE) apply (subgoal_tac "0 < |vol|") apply (thin_tac "Suc 0 < | vol |") apply assumption apply simp apply (simp) apply (case_tac "vol") apply simp by (simp add: is_duplicateEdge_def) qed lemma pre_subdivFace'_distinct: "pre_subdivFace' g f v' v n vol \ distinct (removeNones vol)" apply (unfold pre_subdivFace'_def) apply (cases vol) apply simp+ apply (elim conjE) apply (drule_tac verticesFrom_distinct) apply assumption apply (subgoal_tac "distinct [v\verticesFrom f v' . v \ set (removeNones (a # list))]") apply force apply (thin_tac "[v\verticesFrom f v' . v \ set (removeNones (a # list))] = removeNones (a # list)") by auto lemma natToVertexList_pre_subdivFace_face: "\ final f \ distinct (vertices f) \ v \ \ f \ 2 < |es| \ incrIndexList es (length es) (length (vertices f)) \ pre_subdivFace_face f v (natToVertexList v f es)" proof - assume vors: "\ final f" "distinct (vertices f)" "v \ \ f" "2 < |es|" "incrIndexList es (length es) (length (vertices f))" then have lastOvl: "last (natToVertexList v f es) = Some (last (verticesFrom f v))" by auto from vors have nvl_l: "2 < | natToVertexList v f es |" by auto from vors have "distinct [x\verticesFrom f v . x \ set (removeNones (natToVertexList v f es))]" by auto with vors have "distinct (removeNones (natToVertexList v f es))" by (simp add: natToVertexList_removeNones) with nvl_l lastOvl have hd_last: "hd (tl (natToVertexList v f es)) \ last (natToVertexList v f es)" apply auto apply (cases "natToVertexList v f es") apply simp apply (case_tac "list" rule: rev_exhaust) apply simp apply (case_tac "ys") apply simp apply (case_tac "a") apply simp by simp from vors lastOvl hd_last nvl_l show ?thesis apply (auto intro: natToVertexList_removeNones simp: pre_subdivFace_face_def) apply (cases es) apply auto apply (cases es) apply auto apply (subgoal_tac "0 < length list") apply (case_tac list) by (auto split: if_split_asm) qed lemma indexToVertexList_pre_subdivFace_face: "\ final f \ distinct (vertices f) \ v \ \ f \ 2 < |es| \ incrIndexList es (length es) (length (vertices f)) \ pre_subdivFace_face f v (indexToVertexList f v es)" apply (subgoal_tac "indexToVertexList f v es = natToVertexList v f es") apply simp apply (rule natToVertexList_pre_subdivFace_face) apply assumption+ apply (rule indexToVertexList_natToVertexList_eq) by auto lemma subdivFace_subdivFace'_eq: "pre_subdivFace g f v vol \ subdivFace g f vol = subdivFace' g f v 0 (tl vol)" by (simp_all add: subdivFace_def pre_subdivFace_def pre_subdivFace_face_def) lemma pre_subdivFace'_None: "pre_subdivFace' g f v' v n (None # vol) \ pre_subdivFace' g f v' v (Suc n) vol" by(auto simp: pre_subdivFace'_def dest:invalidVertexList_shorten split:if_split_asm) declare verticesFrom_between [simp del] (* zu zeigen: 1. vol \ [] \ [v\verticesFrom f21 v' . v \ set (removeNones vol)] = removeNones vol 2. vol \ [] \ before (verticesFrom f21 v') u (hd (removeNones vol)) 3. vol \ [] \ distinct (vertices f21) 4. vol \ [] \ last vol = Some (last (verticesFrom f21 v')) *) lemma verticesFrom_split: "v # tl (verticesFrom f v) = verticesFrom f v" by (auto simp: verticesFrom_Def) lemma verticesFrom_v: "distinct (vertices f) \ vertices f = a @ v # b \ verticesFrom f v = v # b @ a" by (simp add: verticesFrom_Def) lemma splitAt_fst[simp]: "distinct xs \ xs = a @ v # b \ fst (splitAt v xs) = a" by auto lemma splitAt_snd[simp]: "distinct xs \ xs = a @ v # b \ snd (splitAt v xs) = b" by auto lemma verticesFrom_splitAt_v_fst[simp]: "distinct (verticesFrom f v) \ fst (splitAt v (verticesFrom f v)) = []" by (simp add: verticesFrom_Def) lemma verticesFrom_splitAt_v_snd[simp]: "distinct (verticesFrom f v) \ snd (splitAt v (verticesFrom f v)) = tl (verticesFrom f v)" by (simp add: verticesFrom_Def) lemma filter_distinct_at: "distinct xs \ xs = (as @ u # bs) \ [v\xs. v = u \ P v] = u # us \ [v\bs. P v] = us \ [v\as. P v] = []" apply (subgoal_tac "filter P as @ u # filter P bs = [] @ u # us") apply (drule local_help') by (auto simp: filter_Cons2) lemma filter_distinct_at3: "distinct xs \ xs = (as @ u # bs) \ [v\xs. v = u \ P v] = u # us \ \ z \ set zs. z \ set as \ \ ( P z) \ [v\zs@bs. P v] = us" apply (drule filter_distinct_at) apply assumption+ apply simp by (induct zs) auto lemma filter_distinct_at4: "distinct xs \ xs = (as @ u # bs) \ [v\xs. v = u \ v \ set us] = u # us \ set zs \ set us \ {u} \ set as \ [v \ zs@bs. v \ set us] = us" proof - assume vors: "distinct xs" "xs = (as @ u # bs)" "[v\xs. v = u \ v \ set us] = u # us" "set zs \ set us \ {u} \ set as" then have "distinct ([v\xs. v = u \ v \ set us])" apply (rule_tac distinct_filter) by simp with vors have dist: "distinct (u # us)" by auto with vors show ?thesis apply (rule_tac filter_distinct_at3) by assumption+ auto qed lemma filter_distinct_at5: "distinct xs \ xs = (as @ u # bs) \ [v\xs. v = u \ v \ set us] = u # us \ set zs \ set xs \ {u} \ set as \ [v \ zs@bs. v \ set us] = us" proof - assume vors: "distinct xs" "xs = (as @ u # bs)" "[v\xs. v = u \ v \ set us] = u # us" "set zs \ set xs \ {u} \ set as" have "set ([v\xs. v = u \ v \ set us]) \ set xs" by auto with vors have "set (u # us) \ set xs" by simp then have "set us \ set xs" by simp with vors have "set zs \ set us \ set zs \ insert u (set as \ set bs)" by auto with vors show ?thesis apply (rule_tac filter_distinct_at4) apply assumption+ by auto qed lemma filter_distinct_at6: "distinct xs \ xs = (as @ u # bs) \ [v\xs. v = u \ v \ set us] = u # us \ set zs \ set xs \ {u} \ set as \ [v \ zs@bs. v \ set us] = us \ [v \ bs. v \ set us] = us" proof - assume vors: "distinct xs" "xs = (as @ u # bs)" "[v \ xs. v = u \ v \ set us] = u # us" "set zs \ set xs \ {u} \ set as" then show ?thesis apply (rule_tac conjI) apply (rule_tac filter_distinct_at5) apply assumption+ apply (drule filter_distinct_at) apply assumption+ by auto qed lemma filter_distinct_at_special: "distinct xs \ xs = (as @ u # bs) \ [v\xs. v = u \ v \ set us] = u # us \ set zs \ set xs \ {u} \ set as \ us = hd_us # tl_us \ [v \ zs@bs. v \ set us] = us \ hd_us \ set bs" proof - assume vors: "distinct xs" "xs = (as @ u # bs)" "[v\xs. v = u \ v \ set us] = u # us" "set zs \ set xs \ {u} \ set as" "us = hd_us # tl_us" then have "[v \ zs@bs. v \ set us] = us \ [v\bs. v \ set us] = us" by (rule_tac filter_distinct_at6) with vors show ?thesis apply (rule_tac conjI) apply safe apply simp apply (subgoal_tac "set (hd_us # tl_us) \ set bs") apply simp apply (subgoal_tac "set [v\bs . v = hd_us \ v \ set tl_us] \ set bs") apply simp by (rule_tac filter_is_subset) qed (* später ggf. pre_splitFace eliminieren *) (* nein, Elimination nicht sinnvoll *) lemma pre_subdivFace'_Some1': assumes pre_add: "pre_subdivFace' g f v' v n ((Some u) # vol)" and pre_fdg: "pre_splitFace g v u f ws" and fdg: "f21 = fst (snd (splitFace g v u f ws))" and g': "g' = snd (snd (splitFace g v u f ws))" shows "pre_subdivFace' g' f21 v' u 0 vol" proof (cases "vol = []") case True then show ?thesis using pre_add fdg pre_fdg apply(unfold pre_subdivFace'_def pre_splitFace_def) apply (simp add: splitFace_def split_face_def split_def del:distinct.simps) apply (rule conjI) apply(clarsimp) apply(rule before_between) apply(erule (5) rotate_before_vFrom) apply(erule not_sym) apply (clarsimp simp:between_distinct between_not_r1 between_not_r2) apply(blast dest:inbetween_inset) done next case False with pre_add have "removeNones vol \ []" apply (cases "vol" rule: rev_exhaust) by (auto simp: pre_subdivFace'_def) then have removeNones_split: "removeNones vol = hd (removeNones vol) # tl (removeNones vol)" by auto from pre_add have dist: "distinct (removeNones ((Some u) # vol))" by (rule_tac pre_subdivFace'_distinct) from pre_add have v': "v' \ \ f" by (auto simp: pre_subdivFace'_def) hence "(vertices f) \ (verticesFrom f v')" by (rule verticesFrom_congs) hence set_eq: "set (verticesFrom f v') = \ f" apply (rule_tac sym) by (rule congs_pres_nodes) from pre_fdg fdg have dist_f21: "distinct (vertices f21)" by auto from pre_add have pre_bet': "pre_between (verticesFrom f v') u v" apply (simp add: pre_between_def pre_subdivFace'_def) apply (elim conjE) apply (thin_tac "n = 0 \ \ is_duplicateEdge g f v u") apply (thin_tac "v' = v \ 0 < n \ v' = v \ u \ last (verticesFrom f v') \ v' \ v") apply (auto simp add: before_def) apply (subgoal_tac "distinct (verticesFrom f v')") apply simp apply (rule_tac verticesFrom_distinct) by auto with pre_add have pre_bet: "pre_between (vertices f) u v" apply (subgoal_tac "(vertices f) \ (verticesFrom f v')") apply (simp add: pre_between_def pre_subdivFace'_def) by (auto dest: congs_pres_nodes intro: verticesFrom_congs simp: pre_subdivFace'_def) from pre_bet pre_add have bet_eq[simp]: "between (vertices f) u v = between (verticesFrom f v') u v" by (auto intro: verticesFrom_between simp: pre_subdivFace'_def) from fdg have f21_split_face: "f21 = snd (split_face f v u ws)" by (simp add: splitFace_def split_def) then have f21: "f21 = Face (u # between (vertices f) u v @ v # ws) Nonfinal" by (simp add: split_face_def) with pre_add pre_bet' have vert_f21: "vertices f21 = u # snd (splitAt u (verticesFrom f v')) @ fst (splitAt v (verticesFrom f v')) @ v # ws" apply (drule_tac pre_between_symI) by (auto simp: pre_subdivFace'_def between_simp2 intro: pre_between_symI) moreover from pre_add have "v \ set (verticesFrom f v')" by (auto simp: pre_subdivFace'_def before_def) then have "verticesFrom f v' = fst (splitAt v (verticesFrom f v')) @ v # snd (splitAt v (verticesFrom f v'))" by (auto dest: splitAt_ram) then have m: "v' # tl (verticesFrom f v') = fst (splitAt v (verticesFrom f v')) @ v # snd (splitAt v (verticesFrom f v'))" by (simp add: verticesFrom_split) then have vv': "v \ v' \ fst (splitAt v (verticesFrom f v')) = v' # tl (fst (splitAt v (verticesFrom f v')))" by (cases "fst (splitAt v (verticesFrom f v'))") auto ultimately have "v \ v' \ vertices f21 = u # snd (splitAt u (verticesFrom f v')) @ v' # tl (fst (splitAt v (verticesFrom f v'))) @ v # ws" by auto moreover with f21 have rule2: "v' \ \ f21" by auto with dist_f21 have dist_f21_v': "distinct (verticesFrom f21 v')" by auto ultimately have m1: "v \ v' \ verticesFrom f21 v' = v' # tl (fst (splitAt v (verticesFrom f v'))) @ v # ws @ u # snd (splitAt u (verticesFrom f v'))" apply auto apply (subgoal_tac "snd (splitAt v' (vertices f21)) = tl (fst (splitAt v (verticesFrom f v'))) @ v # ws") apply (subgoal_tac "fst (splitAt v' (vertices f21)) = u # snd (splitAt u (verticesFrom f v'))") apply (subgoal_tac "verticesFrom f21 v' = v' # snd (splitAt v' (vertices f21)) @ fst (splitAt v' (vertices f21))") apply simp apply (intro verticesFrom_v dist_f21) apply force apply (subgoal_tac "distinct (vertices f21)") apply simp apply (rule_tac dist_f21) apply (subgoal_tac "distinct (vertices f21)") apply simp by (rule_tac dist_f21) from pre_add have dist_vf_v': "distinct (verticesFrom f v')" by (simp add: pre_subdivFace'_def) with vert_f21 have m2: "v = v' \ verticesFrom f21 v' = v' # ws @ u # snd (splitAt u (verticesFrom f v'))" apply auto apply (intro verticesFrom_v dist_f21) by simp from pre_add have u: "u \ set (verticesFrom f v')" by (fastforce simp: pre_subdivFace'_def before_def) then have split_u: "verticesFrom f v' = fst (splitAt u (verticesFrom f v')) @ u # snd (splitAt u (verticesFrom f v'))" by (auto dest!: splitAt_ram) then have rule1': "[v \ snd (splitAt u (verticesFrom f v')) . v \ set (removeNones vol)] = removeNones vol" proof - from split_u have "v' # tl (verticesFrom f v') = fst (splitAt u (verticesFrom f v')) @ u # snd (splitAt u (verticesFrom f v'))" by (simp add: verticesFrom_split) have "help": "set [] \ set (verticesFrom f v') \ {u} \ set (fst (splitAt u (verticesFrom f v')))" by auto from split_u dist_vf_v' pre_add have "[v \ [] @ snd (splitAt u (verticesFrom f v')) . v \ set (removeNones vol)] = removeNones vol" apply (rule_tac filter_distinct_at5) apply assumption+ apply (simp add: pre_subdivFace'_def) by (rule "help") then show ?thesis by auto qed then have inSnd_u: "\ x. x \ set (removeNones vol) \ x \ set (snd (splitAt u (verticesFrom f v')))" apply (subgoal_tac "x \ set [v \ snd (splitAt u (verticesFrom f v')) . v \ set (removeNones vol)] \ x \ set (snd (splitAt u (verticesFrom f v')))") apply force apply (thin_tac "[v \ snd (splitAt u (verticesFrom f v')) . v \ set (removeNones vol)] = removeNones vol") by simp from split_u dist_vf_v' have notinFst_u: "\ x. x \ set (removeNones vol) \ x \ set ((fst (splitAt u (verticesFrom f v'))) @ [u])" apply (drule_tac inSnd_u) apply (subgoal_tac "distinct ( fst (splitAt u (verticesFrom f v')) @ u # snd (splitAt u (verticesFrom f v')))") apply (thin_tac "verticesFrom f v' = fst (splitAt u (verticesFrom f v')) @ u # snd (splitAt u (verticesFrom f v'))") apply simp apply safe apply (subgoal_tac "x \ set (fst (splitAt u (verticesFrom f v'))) \ set (snd (splitAt u (verticesFrom f v')))") apply simp apply (thin_tac "set (fst (splitAt u (verticesFrom f v'))) \ set (snd (splitAt u (verticesFrom f v'))) = {}") apply simp by (simp only:) from rule2 v' have "\ a b. is_nextElem (vertices f) a b \ a \ set (removeNones vol) \ b \ set (removeNones vol) \ is_nextElem (vertices f21) a b" proof - fix a b assume vors: "is_nextElem (vertices f) a b \ a \ set (removeNones vol) \ b \ set (removeNones vol)" define vor_u where "vor_u = fst (splitAt u (verticesFrom f v'))" define nach_u where "nach_u = snd (splitAt u (verticesFrom f v'))" from vors v' have "is_nextElem (verticesFrom f v') a b" by (simp add: verticesFrom_is_nextElem) moreover from vors inSnd_u nach_u_def have "a \ set (nach_u)" by auto moreover from vors inSnd_u nach_u_def have "b \ set (nach_u)" by auto moreover from split_u vor_u_def nach_u_def have "verticesFrom f v' = vor_u @ u # nach_u" by auto moreover note dist_vf_v' ultimately have "is_sublist [a,b] (nach_u)" apply (simp add: is_nextElem_def split:if_split_asm) apply (subgoal_tac "b \ hd (vor_u @ u # nach_u)") apply simp apply (subgoal_tac "distinct (vor_u @ (u # nach_u))") apply (drule is_sublist_at5) apply simp apply simp apply (erule disjE) apply (drule is_sublist_in1)+ apply (subgoal_tac "b \ set vor_u \ set nach_u") apply simp apply (thin_tac "set vor_u \ set nach_u = {}") apply simp apply (erule disjE) apply (subgoal_tac "distinct ([u] @ nach_u)") apply (drule is_sublist_at5) apply simp apply simp apply (erule disjE) apply simp apply simp apply simp apply (subgoal_tac "distinct (vor_u @ (u # nach_u))") apply (drule is_sublist_at5) apply simp apply (erule disjE) apply (drule is_sublist_in1)+ apply simp apply (erule disjE) apply (drule is_sublist_in1)+ apply simp apply simp apply simp apply simp apply (cases "vor_u") by auto with nach_u_def have "is_sublist [a,b] (snd (splitAt u (verticesFrom f v')))" by auto then have "is_sublist [a,b] (verticesFrom f21 v')" apply (cases "v = v'") apply (simp_all add: m1 m2) apply (subgoal_tac "is_sublist [a, b] ((v' # ws @ [u]) @ snd (splitAt u (verticesFrom f v')) @ [])") apply simp apply (rule is_sublist_add) apply simp apply (subgoal_tac "is_sublist [a, b] ((v' # tl (fst (splitAt v (verticesFrom f v'))) @ v # ws @ [u]) @ (snd (splitAt u (verticesFrom f v'))) @ [])") apply simp apply (rule is_sublist_add) by simp with rule2 show "is_nextElem (vertices f) a b \ a \ set (removeNones vol) \ b \ set (removeNones vol) \ is_nextElem (vertices f21) a b" apply (simp add: verticesFrom_is_nextElem) by (auto simp: is_nextElem_def) qed with pre_add dist_f21 have rule5': "\ a b. (a,b) \ edges f \ a \ set (removeNones vol) \ b \ set (removeNones vol) \ (a, b) \ edges f21" by (simp add: is_nextElem_edges_eq pre_subdivFace'_def) have rule1: "[v\verticesFrom f21 v' . v \ set (removeNones vol)] = removeNones vol \ hd (removeNones vol) \ set (snd (splitAt u (verticesFrom f v')))" proof (cases "v = v'") case True from split_u have "v' # tl (verticesFrom f v') = fst (splitAt u (verticesFrom f v')) @ u # snd (splitAt u (verticesFrom f v'))" by (simp add: verticesFrom_split) then have "u \ v' \ fst (splitAt u (verticesFrom f v')) = v' # tl (fst (splitAt u (verticesFrom f v')))" by (cases "fst (splitAt u (verticesFrom f v'))") auto moreover have "v' \ set (v' # tl (fst (splitAt u (verticesFrom f v'))))" by simp ultimately have "u \ v' \ v' \ set (fst (splitAt u (verticesFrom f v')))" by simp moreover from pre_fdg have "set (v' # ws @ [u]) \ set (verticesFrom f v') \ {v', u}" apply (simp add: set_eq) by (unfold pre_splitFace_def) auto ultimately have "help": "set (v' # ws @ [u]) \ set (verticesFrom f v') \ {u} \ set (fst (splitAt u (verticesFrom f v')))" apply (rule_tac subset_trans) apply assumption apply (cases "u = v'") by simp_all from split_u dist_vf_v' pre_add pre_fdg removeNones_split have "[v \ (v' # ws @ [u]) @ snd (splitAt u (verticesFrom f v')) . v \ set (removeNones vol)] = removeNones vol \ hd (removeNones vol) \ set (snd (splitAt u (verticesFrom f v')))" apply (rule_tac filter_distinct_at_special) apply assumption+ apply (simp add: pre_subdivFace'_def) apply (rule "help") . with True m2 show ?thesis by auto next case False with m1 dist_f21_v' have ne_uv': "u \ v'" by auto define fst_u where "fst_u = fst (splitAt u (verticesFrom f v'))" define fst_v where "fst_v = fst (splitAt v (verticesFrom f v'))" from pre_add u dist_vf_v' have "v \ set (fst (splitAt u (verticesFrom f v')))" apply (rule_tac before_dist_r1) by (auto simp: pre_subdivFace'_def) with fst_u_def have "fst_u = fst (splitAt v (fst (splitAt u (verticesFrom f v')))) @ v # snd (splitAt v (fst (splitAt u (verticesFrom f v'))))" by (auto dest: splitAt_ram) with pre_add fst_v_def pre_bet' have fst_u':"fst_u = fst_v @ v # snd (splitAt v (fst (splitAt u (verticesFrom f v'))))" by (simp add: pre_subdivFace'_def) from pre_fdg have "set (v # ws @ [u]) \ set (verticesFrom f v') \ {v, u}" apply (simp add: set_eq) by (unfold pre_splitFace_def) auto with fst_u' have "set (v # ws @ [u]) \ set (verticesFrom f v') \ {u} \ set fst_u" by auto moreover from fst_u' have "set fst_v \ set fst_u" by auto ultimately have "(set (v # ws @ [u]) \ set fst_v) \ set (verticesFrom f v') \ {u} \ set fst_u" by auto with fst_u_def fst_v_def have "set (fst (splitAt v (verticesFrom f v')) @ v # ws @ [u]) \ set (verticesFrom f v') \ {u} \ set (fst (splitAt u (verticesFrom f v')))" by auto moreover with False vv' have "v' # tl (fst (splitAt v (verticesFrom f v'))) = fst (splitAt v (verticesFrom f v'))" by auto ultimately have "set ((v' # tl (fst (splitAt v (verticesFrom f v')))) @ v # ws @ [u]) \ set (verticesFrom f v') \ {u} \ set (fst (splitAt u (verticesFrom f v')))" by (simp only:) then have "help": "set (v' # tl (fst (splitAt v (verticesFrom f v'))) @ v # ws @ [u]) \ set (verticesFrom f v') \ {u} \ set (fst (splitAt u (verticesFrom f v')))" by auto from split_u dist_vf_v' pre_add pre_fdg removeNones_split have "[v \ (v' # tl (fst (splitAt v (verticesFrom f v'))) @ v # ws @ [u]) @ snd (splitAt u (verticesFrom f v')) . v \ set (removeNones vol)] = removeNones vol \ hd (removeNones vol) \ set (snd (splitAt u (verticesFrom f v')))" apply (rule_tac filter_distinct_at_special) apply assumption+ apply (simp add: pre_subdivFace'_def) apply (rule "help") . with False m1 show ?thesis by auto qed from rule1 have "(hd (removeNones vol)) \ set (snd (splitAt u (verticesFrom f v')))" by auto with m1 m2 dist_f21_v' have rule3: "before (verticesFrom f21 v') u (hd (removeNones vol))" proof - assume hd_ram: "(hd (removeNones vol)) \ set (snd (splitAt u (verticesFrom f v')))" from m1 m2 dist_f21_v' have "distinct (snd (splitAt u (verticesFrom f v')))" apply (cases "v = v'") by auto moreover define z1 where "z1 = fst (splitAt (hd (removeNones vol)) (snd (splitAt u (verticesFrom f v'))))" define z2 where "z2 = snd (splitAt (hd (removeNones vol)) (snd (splitAt u (verticesFrom f v'))))" note z1_def z2_def hd_ram ultimately have "snd (splitAt u (verticesFrom f v')) = z1 @ (hd (removeNones vol)) # z2" by (auto intro: splitAt_ram) with m1 m2 show ?thesis apply (cases "v = v'") apply (auto simp: before_def) apply (intro exI ) apply (subgoal_tac "v' # ws @ u # z1 @ hd (removeNones vol) # z2 = (v' # ws) @ u # z1 @ hd (removeNones vol) # z2") apply assumption apply simp apply (intro exI ) apply (subgoal_tac "v' # tl (fst (splitAt v (verticesFrom f v'))) @ v # ws @ u # z1 @ hd (removeNones vol) # z2 = (v' # tl (fst (splitAt v (verticesFrom f v'))) @ v # ws) @ u # z1 @ hd (removeNones vol) # z2") apply assumption by simp qed from rule1 have ne:"(hd (removeNones vol)) \ set (snd (splitAt u (verticesFrom f v')))" by auto with m1 m2 have "last (verticesFrom f21 v') = last (snd (splitAt u (verticesFrom f v')))" apply (cases "snd (splitAt u (verticesFrom f v'))" rule: rev_exhaust) apply simp_all apply (cases "v = v'") by simp_all moreover from ne have "last (fst (splitAt u (verticesFrom f v')) @ u # snd (splitAt u (verticesFrom f v'))) = last (snd (splitAt u (verticesFrom f v')))" by auto moreover note split_u ultimately have rule4: "last (verticesFrom f v') = last (verticesFrom f21 v')" by simp have l: "\ a b f v. v \ set (vertices f) \ is_nextElem (vertices f) a b = is_nextElem (verticesFrom f v) a b " apply (rule is_nextElem_congs_eq) by (rule verticesFrom_congs) define f12 where "f12 = fst (split_face f v u ws)" then have f12_fdg: "f12 = fst (splitFace g v u f ws)" by (simp add: splitFace_def split_def) from pre_bet pre_add have bet_eq2[simp]: "between (vertices f) v u = between (verticesFrom f v') v u" apply (drule_tac pre_between_symI) by (auto intro: verticesFrom_between simp: pre_subdivFace'_def) from f12_fdg have f12_split_face: "f12 = fst (split_face f v u ws)" by (simp add: splitFace_def split_def) then have f12: "f12 = Face (rev ws @ v # between (verticesFrom f v') v u @ [u]) Nonfinal" by (simp add: split_face_def) then have "vertices f12 = rev ws @ v # between (verticesFrom f v') v u @ [u]" by simp with pre_add pre_bet' have vert_f12: "vertices f12 = rev ws @ v # snd (splitAt v (fst (splitAt u (verticesFrom f v')))) @ [u]" apply (subgoal_tac "between (verticesFrom f v') v u = fst (splitAt u (snd (splitAt v (verticesFrom f v'))))") apply (simp add: pre_subdivFace'_def) apply (rule between_simp1) apply (simp add: pre_subdivFace'_def) apply (rule pre_between_symI) . with dist_f21_v' have removeNones_vol_not_f12: "\ x. x \ set (removeNones vol) \ x \ set (vertices f12)" apply (frule_tac notinFst_u) apply (drule inSnd_u) apply simp apply (case_tac "v = v'") apply (simp add: m1 m2) apply (rule conjI) apply force apply (rule conjI) apply (rule ccontr) apply simp apply (subgoal_tac "x \ set ws \ set (snd (splitAt u (verticesFrom f v')))") apply simp apply (elim conjE) apply (thin_tac "set ws \ set (snd (splitAt u (verticesFrom f v'))) = {}") apply simp apply force apply (simp add: m1 m2) apply (rule conjI) apply force apply (rule conjI) apply (rule ccontr) apply simp apply (subgoal_tac "x \ set ws \ set (snd (splitAt u (verticesFrom f v')))") apply simp apply (elim conjE) apply (thin_tac "set ws \ set (snd (splitAt u (verticesFrom f v'))) = {}") apply simp by force from pre_fdg f12_split_face have dist_f12: "distinct (vertices f12)" by (auto intro: split_face_distinct1') then have removeNones_vol_edges_not_f12: "\ x y. x \ set (removeNones vol) \ (x,y) \ edges f12" (* ? *) apply (drule_tac removeNones_vol_not_f12) by auto from dist_f12 have removeNones_vol_edges_not_f12': "\ x y. y \ set (removeNones vol) \ (x,y) \ edges f12" apply (drule_tac removeNones_vol_not_f12) by auto from f12_fdg pre_fdg g' fdg have face_set_eq: "\ g' \ {f} = {f12, f21} \ \ g" apply (rule_tac splitFace_faces_1) by (simp_all) have rule5'': "\ a b. (a,b) \ edges g' \ (a,b) \ edges g \ a \ set (removeNones vol) \ b \ set (removeNones vol) \ (a, b) \ edges f21" (* ? *) apply (simp add: edges_graph_def) apply safe apply (case_tac "x = f") apply simp apply (rule rule5') apply safe apply (subgoal_tac "x \ \ g' \ {f}") apply (thin_tac "x \ f") apply (thin_tac "x \ set (faces g')") apply (simp only: add: face_set_eq) apply safe apply (drule removeNones_vol_edges_not_f12) by auto have rule5''': "\ a b. (a,b) \ edges g' \ (a,b) \ edges g \ a \ set (removeNones vol) \ b \ set (removeNones vol) \ (a, b) \ edges f21" (* ? *) apply (simp add: edges_graph_def) apply safe apply (case_tac "x = f") apply simp apply (rule rule5') apply safe apply (subgoal_tac "x \ \ g' \ {f}") apply (thin_tac "x \ f") apply (thin_tac "x \ \ g'") apply (simp only: add: face_set_eq) apply safe apply (drule removeNones_vol_edges_not_f12) by auto from pre_fdg fdg f12_fdg g' have edges_g'1: "ws \ [] \ edges g' = edges g \ Edges ws \ Edges(rev ws) \ {(u, last ws), (hd ws, v), (v, hd ws), (last ws, u)}" apply (rule_tac splitFace_edges_g') apply simp apply (subgoal_tac "(f12, f21, g') = splitFace g v u f ws") apply assumption by auto from pre_fdg fdg f12_fdg g' have edges_g'2: "ws = [] \ edges g' = edges g \ {(v, u), (u, v)}" apply (rule_tac splitFace_edges_g'_vs) apply simp apply (subgoal_tac "(f12, f21, g') = splitFace g v u f []") apply assumption by auto from f12_split_face f21_split_face have split: "(f12,f21) = split_face f v u ws" by simp from pre_add have "\ invalidVertexList g f vol" by (auto simp: pre_subdivFace'_def dest: invalidVertexList_shorten) then have rule5: "\ invalidVertexList g' f21 vol" apply (simp add: invalidVertexList_def) apply (intro allI impI) apply (case_tac "vol!i") apply simp+ apply (case_tac "vol!Suc i") apply simp+ apply (subgoal_tac "\ is_duplicateEdge g f a aa") apply (thin_tac "\i<|vol| - Suc 0. \ (case vol ! i of None \ False | Some a \ case_option False (is_duplicateEdge g f a) (vol ! (i+1)))") apply (simp add: is_duplicateEdge_def) apply (subgoal_tac "a \ set (removeNones vol) \ aa \ set (removeNones vol)") apply (rule conjI) apply (rule impI) apply (case_tac "(a, aa) \ edges f") apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (frule split_face_edges_or [OF split]) apply simp apply (simp add: removeNones_vol_edges_not_f12) apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply (case_tac "(aa, a) \ edges f") apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (frule split_face_edges_or [OF split]) apply simp apply (simp add: removeNones_vol_edges_not_f12) apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply simp apply (case_tac "ws = []") apply (frule edges_g'2) apply simp apply (subgoal_tac "pre_split_face f v u []") apply (subgoal_tac "(f12, f21) = split_face f v u ws") apply (case_tac "between (vertices f) u v = []") apply (frule split_face_edges_f21_bet_vs) apply simp apply simp apply simp apply (frule split_face_edges_f21_vs) apply simp apply simp apply simp apply (case_tac "a = v \ aa = u") apply simp apply simp apply (rule split) apply (subgoal_tac "pre_split_face f v u ws") apply simp apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply (frule edges_g'1) apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (subgoal_tac "(f12, f21) = split_face f v u ws") apply (case_tac "between (vertices f) u v = []") apply (frule split_face_edges_f21_bet) apply simp apply simp apply simp apply simp apply (case_tac "a = u \ aa = last ws") apply simp apply simp apply (case_tac "a = hd ws \ aa = v") apply simp apply simp apply (case_tac "a = v \ aa = hd ws") apply simp apply simp apply (case_tac "a = last ws \ aa = u") apply simp apply simp apply (case_tac "(a, aa) \ Edges ws") apply simp apply simp apply (frule split_face_edges_f21) apply simp apply simp apply simp apply simp apply (force) apply (rule split) apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply (rule impI) apply (case_tac "(aa,a) \ edges f") apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (frule split_face_edges_or [OF split]) apply simp apply (simp add: removeNones_vol_edges_not_f12) apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply (case_tac "(a,aa) \ edges f") apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (frule split_face_edges_or [OF split]) apply simp apply (simp add: removeNones_vol_edges_not_f12) apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply simp apply (case_tac "ws = []") apply (frule edges_g'2) apply simp apply (subgoal_tac "pre_split_face f v u []") apply (subgoal_tac "(f12, f21) = split_face f v u ws") apply (case_tac "between (vertices f) u v = []") apply (frule split_face_edges_f21_bet_vs) apply simp apply simp apply simp apply (frule split_face_edges_f21_vs) apply simp apply simp apply simp apply force apply (rule split) apply (subgoal_tac "pre_split_face f v u ws") apply simp apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply (frule edges_g'1) apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (subgoal_tac "(f12, f21) = split_face f v u ws") apply (case_tac "between (vertices f) u v = []") apply (frule split_face_edges_f21_bet) apply simp apply simp apply simp apply (force) apply (frule split_face_edges_f21) apply simp apply simp apply simp apply simp apply (force) apply (rule split) apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply (rule conjI) apply (subgoal_tac "Some a \ set vol") apply (induct vol) apply simp apply force apply (subgoal_tac "vol ! i \ set vol") apply simp apply (rule nth_mem) apply arith apply (subgoal_tac "Some aa \ set vol") apply (induct vol) apply simp apply force apply (subgoal_tac "vol ! (Suc i) \ set vol") apply simp apply (rule nth_mem) apply arith by auto from pre_fdg dist_f21 v' have dists: "distinct (vertices f)" "distinct (vertices f12)" "distinct (vertices f21)" "v' \ \ f" apply auto defer apply (drule splitFace_distinct2) apply (simp add: f12_fdg) apply (unfold pre_splitFace_def) by simp with pre_fdg have edges_or: "\ a b. (a,b) \ edges f \ (a,b) \ edges f12 \ (a,b) \ edges f21" apply (rule_tac split_face_edges_or) apply (simp add: f12_split_face f21_split_face) by simp+ from pre_fdg have dist_f: "distinct (vertices f)" apply (unfold pre_splitFace_def) by simp (* lemma *) from g' have edges_g': "edges g' = (UN h:set(replace f [snd (split_face f v u ws)] (faces g)). edges h) \ edges (fst (split_face f v u ws))" by (auto simp add: splitFace_def split_def edges_graph_def) (* lemma *) from pre_fdg edges_g' have edges_g'_or: "\ a b. (a,b) \ edges g' \ (a,b) \ edges g \ (a,b) \ edges f12 \ (a,b) \ edges f21" apply simp apply (case_tac "(a, b) \ edges (fst (split_face f v u ws))") apply (simp add:f12_split_face) apply simp apply (elim bexE) apply (simp add: f12_split_face) apply (case_tac "x \ \ g") apply (induct g) apply (simp add: edges_graph_def) apply (rule disjI1) apply (rule bexI) apply simp apply simp apply (drule replace1) apply simp by (simp add: f21_split_face) have rule6: "0 < |vol| \ \ invalidVertexList g f (Some u # vol) \ (\y. hd vol = Some y) \ \ is_duplicateEdge g' f21 u (the (hd vol))" apply (rule impI) apply (erule exE) apply simp apply (case_tac vol) apply simp+ apply (simp add: invalidVertexList_def) apply (erule allE) apply (erule impE) apply force apply (simp) apply (subgoal_tac "y \ \ f12") defer apply (rule removeNones_vol_not_f12) apply simp apply (simp add: is_duplicateEdge_def) apply (subgoal_tac "y \ set (removeNones vol)") apply (rule conjI) apply (rule impI) apply (case_tac "(u, y) \ edges f") apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (frule split_face_edges_or [OF split]) apply simp apply (simp add: removeNones_vol_edges_not_f12') apply (rule pre_splitFace_pre_split_face) apply simp apply (rule pre_fdg) apply (case_tac "(y, u) \ edges f") apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (frule split_face_edges_or [OF split]) apply simp apply (simp add: removeNones_vol_edges_not_f12) apply (rule pre_splitFace_pre_split_face) apply simp apply (rule pre_fdg) apply simp apply (case_tac "ws = []") apply (frule edges_g'2) apply simp apply (subgoal_tac "pre_split_face f v u []") apply (subgoal_tac "(f12, f21) = split_face f v u ws") apply (case_tac "between (vertices f) u v = []") apply (frule split_face_edges_f21_bet_vs) apply simp apply simp apply simp apply (frule split_face_edges_f21_vs) apply simp apply simp apply simp apply force apply (rule split) apply (subgoal_tac "pre_split_face f v u ws") apply simp apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply (frule edges_g'1) apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (subgoal_tac "(f12, f21) = split_face f v u ws") apply (case_tac "between (vertices f) u v = []") apply (frule split_face_edges_f21_bet) apply simp apply simp apply simp apply (force) apply (frule split_face_edges_f21) apply simp apply simp apply simp apply simp apply (force) apply (rule split) apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply (rule impI) apply (case_tac "(u, y) \ edges f") apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (frule split_face_edges_or [OF split]) apply simp apply (simp add: removeNones_vol_edges_not_f12') apply (rule pre_splitFace_pre_split_face) apply simp apply (rule pre_fdg) apply (case_tac "(y, u) \ edges f") apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (frule split_face_edges_or [OF split]) apply simp apply (simp add: removeNones_vol_edges_not_f12) apply (rule pre_splitFace_pre_split_face) apply simp apply (rule pre_fdg) apply simp apply (case_tac "ws = []") apply (frule edges_g'2) apply simp apply (subgoal_tac "pre_split_face f v u []") apply (subgoal_tac "(f12, f21) = split_face f v u ws") apply (case_tac "between (vertices f) u v = []") apply (frule split_face_edges_f21_bet_vs) apply simp apply simp apply simp apply (frule split_face_edges_f21_vs) apply simp apply simp apply simp apply force apply (rule split) apply (subgoal_tac "pre_split_face f v u ws") apply simp apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) apply (frule edges_g'1) apply simp apply (subgoal_tac "pre_split_face f v u ws") apply (subgoal_tac "(f12, f21) = split_face f v u ws") apply (case_tac "between (vertices f) u v = []") apply (frule split_face_edges_f21_bet) apply simp apply simp apply simp apply (force) apply (frule split_face_edges_f21) apply simp apply simp apply simp apply simp apply (force) apply (rule split) apply (rule pre_splitFace_pre_split_face) apply (rule pre_fdg) by simp have u21: "u \ \ f21" by(simp add:f21) from fdg have "\ final f21" by(simp add:splitFace_def split_face_def split_def) with pre_add rule1 rule2 rule3 rule4 rule5 rule6 dist_f21 False dist u21 show ?thesis by (simp_all add: pre_subdivFace'_def l) qed lemma before_filter: "\ ys. filter P xs = ys \ distinct xs \ before ys u v \ before xs u v" + using subst_all subst_all' [simp del] apply (subgoal_tac "P u") apply (subgoal_tac "P v") apply (subgoal_tac "pre_between xs u v") apply (rule ccontr) apply (simp add: before_xor) apply (subgoal_tac "before ys v u") apply (subgoal_tac "\ before ys v u") apply simp apply (rule before_dist_not1) apply force apply simp apply (simp add: before_def) apply (elim exE) apply simp apply (subgoal_tac "a @ u # b @ v # c = filter P aa @ v # filter P ba @ u # filter P ca") apply (intro exI) apply assumption apply simp apply (subgoal_tac "u \ set ys \ v \ set ys \ u \ v") apply (simp add: pre_between_def) apply force apply (subgoal_tac "distinct ys") apply (simp add: before_def) apply (elim exE) apply simp apply force apply (subgoal_tac "v \ set (filter P xs)") apply force apply (simp add: before_def) apply (elim exE) apply simp apply (subgoal_tac "u \ set (filter P xs)") apply force apply (simp add: before_def) apply (elim exE) by simp lemma pre_subdivFace'_Some2: "pre_subdivFace' g f v' v 0 ((Some u) # vol) \ pre_subdivFace' g f v' u 0 vol" apply (cases "vol = []") apply (simp add: pre_subdivFace'_def) apply (cases "u = v'") apply simp apply(rule verticesFrom_in') apply(rule last_in_set) apply(simp add:verticesFrom_Def) apply clarsimp apply (simp add: pre_subdivFace'_def) apply (elim conjE) apply (thin_tac "v' = v \ u \ last (verticesFrom f v') \ v' \ v") apply auto apply(rule verticesFrom_in'[where v = v']) apply(clarsimp simp:before_def) apply simp apply (rule ovl_shorten) apply simp apply (subgoal_tac "[v \ verticesFrom f v' . v \ set (removeNones ((Some u) # vol))] = removeNones ((Some u) # vol)") apply assumption apply simp apply (rule before_filter) apply assumption apply simp apply (simp add: before_def) apply (intro exI) apply (subgoal_tac "u # removeNones vol = [] @ u # [] @ hd (removeNones vol) # tl (removeNones vol)") apply assumption apply simp apply (subgoal_tac "removeNones vol \ []") apply simp apply (cases vol rule: rev_exhaust) apply simp_all apply (simp add: invalidVertexList_shorten) apply (simp add: is_duplicateEdge_def) apply (case_tac "vol") apply simp apply simp apply (simp add: invalidVertexList_def) apply (elim allE) apply (rotate_tac -1) apply (erule impE) apply (subgoal_tac "0 < Suc |list|") apply assumption apply simp apply simp by (simp add: is_duplicateEdge_def) lemma pre_subdivFace'_preFaceDiv: "pre_subdivFace' g f v' v n ((Some u) # vol) \ f \ \ g \ (f \ v = u \ n \ 0) \ \ f \ \ g \ pre_splitFace g v u f [countVertices g ..< countVertices g + n]" proof - assume pre_add: "pre_subdivFace' g f v' v n ((Some u) # vol)" and f: "f \ \ g" and nextVert: "(f \ v = u \ n \ 0)" and subset: "\ f \ \ g" have "distinct [countVertices g ..< countVertices g + n]" by (induct n) auto moreover have "\ g \ set [countVertices g ..< countVertices g + n] = {}" apply (cases g) by auto with subset have "\ f \ set [countVertices g ..< countVertices g + n] = {}" by auto moreover from pre_add have "\ f = set (verticesFrom f v')" apply (intro congs_pres_nodes verticesFrom_congs) by (simp add: pre_subdivFace'_def) with pre_add have "help": "v \ \ f \ u \ \ f \ v \ u" apply (simp add: pre_subdivFace'_def before_def) apply (elim conjE exE) apply (subgoal_tac "distinct (verticesFrom f v')") apply force apply (rule verticesFrom_distinct) by simp_all moreover from "help" pre_add nextVert have help1: "is_nextElem (vertices f) v u \ 0 < n" apply auto apply (simp add: nextVertex_def) by (simp add: nextElem_is_nextElem pre_subdivFace'_def) moreover have help2: "before (verticesFrom f v') v u \ distinct (verticesFrom f v') \ v \ v' \ \ is_nextElem (verticesFrom f v') u v" apply (simp add: before_def is_nextElem_def verticesFrom_hd is_sublist_def) apply safe apply (frule dist_at) apply simp apply (thin_tac "verticesFrom f v' = a @ v # b @ u # c") apply (subgoal_tac "verticesFrom f v' = (as @ [u]) @ v # bs") apply assumption apply simp apply (subgoal_tac "distinct (a @ v # b @ u # c)") apply force by simp note pre_add f moreover(* have "\ m. {k. k < m} \ {k. m \ k \ k < (m + n)} = {}" by auto moreover*) from pre_add f help2 help1 "help" have "[countVertices g.. (v, u) \ edges f \ (u, v) \ edges f" apply (cases "0 < n") apply (induct g) apply simp+ apply (simp add: pre_subdivFace'_def) apply (rule conjI) apply force apply (simp split: if_split_asm) apply (rule ccontr) apply simp apply (subgoal_tac "v = v'") apply simp apply (elim conjE) apply (simp only:) apply (rule verticesFrom_is_nextElem_last) apply force apply force apply (simp add: verticesFrom_is_nextElem [symmetric]) apply (cases "v = v'") apply simp apply (subgoal_tac "v' \ \ f") apply (thin_tac "u \ \ f") apply (simp add: verticesFrom_is_nextElem) apply (rule ccontr) apply simp apply (subgoal_tac "v' \ \ f") apply (drule verticesFrom_is_nextElem_hd) apply simp+ apply (elim conjE) apply (drule help2) apply simp apply simp apply (subgoal_tac "is_nextElem (vertices f) u v = is_nextElem (verticesFrom f v') u v") apply simp apply (rule verticesFrom_is_nextElem) by simp ultimately show ?thesis apply (simp add: pre_subdivFace'_def) apply (unfold pre_splitFace_def) apply simp apply (cases "0 < n") apply (induct g) apply (simp add: ivl_disj_int) apply (auto simp: invalidVertexList_def is_duplicateEdge_def) done qed lemma pre_subdivFace'_Some1: "pre_subdivFace' g f v' v n ((Some u) # vol) \ f \ \ g \ (f \ v = u \ n \ 0) \ \ f \ \ g \ f21 = fst (snd (splitFace g v u f [countVertices g ..< countVertices g + n])) \ g' = snd (snd (splitFace g v u f [countVertices g ..< countVertices g + n])) \ pre_subdivFace' g' f21 v' u 0 vol" apply (subgoal_tac "pre_splitFace g v u f [countVertices g ..< countVertices g + n]") apply (rule pre_subdivFace'_Some1') apply assumption+ apply (simp) apply (rule pre_subdivFace'_preFaceDiv) by auto end diff --git a/thys/Gaussian_Integers/Gaussian_Integers.thy b/thys/Gaussian_Integers/Gaussian_Integers.thy --- a/thys/Gaussian_Integers/Gaussian_Integers.thy +++ b/thys/Gaussian_Integers/Gaussian_Integers.thy @@ -1,2376 +1,2376 @@ (* File: Gaussian_Integers.thy Author: Manuel Eberl, TU München *) section \Gaussian Integers\ theory Gaussian_Integers imports "HOL-Computational_Algebra.Computational_Algebra" "HOL-Number_Theory.Number_Theory" begin subsection \Auxiliary material\ lemma coprime_iff_prime_factors_disjoint: fixes x y :: "'a :: factorial_semiring" assumes "x \ 0" "y \ 0" shows "coprime x y \ prime_factors x \ prime_factors y = {}" proof assume "coprime x y" have False if "p \ prime_factors x" "p \ prime_factors y" for p proof - from that assms have "p dvd x" "p dvd y" by (auto simp: prime_factors_dvd) with \coprime x y\ have "p dvd 1" using coprime_common_divisor by auto with that assms show False by (auto simp: prime_factors_dvd) qed thus "prime_factors x \ prime_factors y = {}" by auto next assume disjoint: "prime_factors x \ prime_factors y = {}" show "coprime x y" proof (rule coprimeI) fix d assume d: "d dvd x" "d dvd y" show "is_unit d" proof (rule ccontr) assume "\is_unit d" moreover from this and d assms have "d \ 0" by auto ultimately obtain p where p: "prime p" "p dvd d" using prime_divisor_exists by auto with d and assms have "p \ prime_factors x \ prime_factors y" by (auto simp: prime_factors_dvd) with disjoint show False by auto qed qed qed lemma product_dvd_irreducibleD: fixes a b x :: "'a :: algebraic_semidom" assumes "irreducible x" assumes "a * b dvd x" shows "a dvd 1 \ b dvd 1" proof - from assms obtain c where "x = a * b * c" by auto hence "x = a * (b * c)" by (simp add: mult_ac) from irreducibleD[OF assms(1) this] show "a dvd 1 \ b dvd 1" by (auto simp: is_unit_mult_iff) qed lemma prime_elem_mult_dvdI: assumes "prime_elem p" "p dvd c" "b dvd c" "\p dvd b" shows "p * b dvd c" proof - from assms(3) obtain a where c: "c = a * b" using mult.commute by blast with assms(2) have "p dvd a * b" by simp with assms have "p dvd a" by (subst (asm) prime_elem_dvd_mult_iff) auto with c show ?thesis by (auto intro: mult_dvd_mono) qed lemma prime_elem_power_mult_dvdI: fixes p :: "'a :: algebraic_semidom" assumes "prime_elem p" "p ^ n dvd c" "b dvd c" "\p dvd b" shows "p ^ n * b dvd c" proof (cases "n = 0") case False from assms(3) obtain a where c: "c = a * b" using mult.commute by blast with assms(2) have "p ^ n dvd b * a" by (simp add: mult_ac) hence "p ^ n dvd a" by (rule prime_power_dvd_multD[OF assms(1)]) (use assms False in auto) with c show ?thesis by (auto intro: mult_dvd_mono) qed (use assms in auto) lemma prime_mod_4_cases: fixes p :: nat assumes "prime p" shows "p = 2 \ [p = 1] (mod 4) \ [p = 3] (mod 4)" proof (cases "p = 2") case False with prime_gt_1_nat[of p] assms have "p > 2" by auto have "\4 dvd p" using assms product_dvd_irreducibleD[of p 2 2] by (auto simp: prime_elem_iff_irreducible simp flip: prime_elem_nat_iff) hence "p mod 4 \ 0" by (auto simp: mod_eq_0_iff_dvd) moreover have "p mod 4 \ 2" proof assume "p mod 4 = 2" hence "p mod 4 mod 2 = 0" by (simp add: cong_def) thus False using \prime p\ \p > 2\ prime_odd_nat[of p] by (auto simp: mod_mod_cancel) qed moreover have "p mod 4 \ {0,1,2,3}" by auto ultimately show ?thesis by (auto simp: cong_def) qed auto lemma of_nat_prod_mset: "of_nat (prod_mset A) = prod_mset (image_mset of_nat A)" by (induction A) auto lemma multiplicity_0_left [simp]: "multiplicity 0 x = 0" by (cases "x = 0") (auto simp: not_dvd_imp_multiplicity_0) lemma is_unit_power [intro]: "is_unit x \ is_unit (x ^ n)" by (subst is_unit_power_iff) auto lemma (in factorial_semiring) pow_divides_pow_iff: assumes "n > 0" shows "a ^ n dvd b ^ n \ a dvd b" proof (cases "b = 0") case False show ?thesis proof assume dvd: "a ^ n dvd b ^ n" with \b \ 0\ have "a \ 0" using \n > 0\ by (auto simp: power_0_left) show "a dvd b" proof (rule multiplicity_le_imp_dvd) fix p :: 'a assume p: "prime p" from dvd \b \ 0\ have "multiplicity p (a ^ n) \ multiplicity p (b ^ n)" by (intro dvd_imp_multiplicity_le) auto thus "multiplicity p a \ multiplicity p b" using p \a \ 0\ \b \ 0\ \n > 0\ by (simp add: prime_elem_multiplicity_power_distrib) qed fact+ qed (auto intro: dvd_power_same) qed (use assms in \auto simp: power_0_left\) lemma multiplicity_power_power: fixes p :: "'a :: {factorial_semiring, algebraic_semidom}" assumes "n > 0" shows "multiplicity (p ^ n) (x ^ n) = multiplicity p x" proof (cases "x = 0 \ p = 0 \ is_unit p") case True thus ?thesis using \n > 0\ by (auto simp: power_0_left is_unit_power_iff multiplicity_unit_left) next case False show ?thesis proof (intro antisym multiplicity_geI) have "(p ^ multiplicity p x) ^ n dvd x ^ n" by (intro dvd_power_same) (simp add: multiplicity_dvd) thus "(p ^ n) ^ multiplicity p x dvd x ^ n" by (simp add: mult_ac flip: power_mult) next have "(p ^ n) ^ multiplicity (p ^ n) (x ^ n) dvd x ^ n" by (simp add: multiplicity_dvd) hence "(p ^ multiplicity (p ^ n) (x ^ n)) ^ n dvd x ^ n" by (simp add: mult_ac flip: power_mult) thus "p ^ multiplicity (p ^ n) (x ^ n) dvd x" by (subst (asm) pow_divides_pow_iff) (use assms in auto) qed (use False \n > 0\ in \auto simp: is_unit_power_iff\) qed lemma even_square_cong_4_int: fixes x :: int assumes "even x" shows "[x ^ 2 = 0] (mod 4)" proof - from assms have "even \x\" by simp hence [simp]: "\x\ mod 2 = 0" by presburger have "(\x\ ^ 2) mod 4 = ((\x\ mod 4) ^ 2) mod 4" by (simp add: power_mod) also from assms have "\x\ mod 4 = 0 \ \x\ mod 4 = 2" using mod_double_modulus[of 2 "\x\"] by simp hence "((\x\ mod 4) ^ 2) mod 4 = 0" by auto finally show ?thesis by (simp add: cong_def) qed lemma even_square_cong_4_nat: "even (x::nat) \ [x ^ 2 = 0] (mod 4)" using even_square_cong_4_int[of "int x"] by (auto simp flip: cong_int_iff) lemma odd_square_cong_4_int: fixes x :: int assumes "odd x" shows "[x ^ 2 = 1] (mod 4)" proof - from assms have "odd \x\" by simp hence [simp]: "\x\ mod 2 = 1" by presburger have "(\x\ ^ 2) mod 4 = ((\x\ mod 4) ^ 2) mod 4" by (simp add: power_mod) also from assms have "\x\ mod 4 = 1 \ \x\ mod 4 = 3" using mod_double_modulus[of 2 "\x\"] by simp hence "((\x\ mod 4) ^ 2) mod 4 = 1" by auto finally show ?thesis by (simp add: cong_def) qed lemma odd_square_cong_4_nat: "odd (x::nat) \ [x ^ 2 = 1] (mod 4)" using odd_square_cong_4_int[of "int x"] by (auto simp flip: cong_int_iff) text \ Gaussian integers will require a notion of an element being a power up to a unit, so we introduce this here. This should go in the library eventually. \ definition is_nth_power_upto_unit where "is_nth_power_upto_unit n x \ (\u. is_unit u \ is_nth_power n (u * x))" lemma is_nth_power_upto_unit_base: "is_nth_power n x \ is_nth_power_upto_unit n x" by (auto simp: is_nth_power_upto_unit_def intro: exI[of _ 1]) lemma is_nth_power_upto_unitI: assumes "normalize (x ^ n) = normalize y" shows "is_nth_power_upto_unit n y" proof - from associatedE1[OF assms] obtain u where "is_unit u" "u * y = x ^ n" by metis thus ?thesis by (auto simp: is_nth_power_upto_unit_def intro!: exI[of _ u]) qed lemma is_nth_power_upto_unit_conv_multiplicity: fixes x :: "'a :: factorial_semiring" assumes "n > 0" shows "is_nth_power_upto_unit n x \ (\p. prime p \ n dvd multiplicity p x)" proof (cases "x = 0") case False show ?thesis proof safe fix p :: 'a assume p: "prime p" assume "is_nth_power_upto_unit n x" then obtain u y where uy: "is_unit u" "u * x = y ^ n" by (auto simp: is_nth_power_upto_unit_def elim!: is_nth_powerE) from p uy assms False have [simp]: "y \ 0" by (auto simp: power_0_left) have "multiplicity p (u * x) = multiplicity p (y ^ n)" by (subst uy(2) [symmetric]) simp also have "multiplicity p (u * x) = multiplicity p x" by (simp add: multiplicity_times_unit_right uy(1)) finally show "n dvd multiplicity p x" using False and p and uy and assms by (auto simp: prime_elem_multiplicity_power_distrib) next assume *: "\p. prime p \ n dvd multiplicity p x" have "multiplicity p ((\p\prime_factors x. p ^ (multiplicity p x div n)) ^ n) = multiplicity p x" if "prime p" for p proof - from that and * have "n dvd multiplicity p x" by blast have "multiplicity p x = 0" if "p \ prime_factors x" using that and \prime p\ by (simp add: prime_factors_multiplicity) with that and * and assms show ?thesis unfolding prod_power_distrib power_mult [symmetric] by (subst multiplicity_prod_prime_powers) (auto simp: in_prime_factors_imp_prime elim: dvdE) qed with assms False have "normalize ((\p\prime_factors x. p ^ (multiplicity p x div n)) ^ n) = normalize x" by (intro multiplicity_eq_imp_eq) (auto simp: multiplicity_prod_prime_powers) thus "is_nth_power_upto_unit n x" by (auto intro: is_nth_power_upto_unitI) qed qed (use assms in \auto simp: is_nth_power_upto_unit_def\) lemma is_nth_power_upto_unit_0_left [simp, intro]: "is_nth_power_upto_unit 0 x \ is_unit x" proof assume "is_unit x" thus "is_nth_power_upto_unit 0 x" unfolding is_nth_power_upto_unit_def by (intro exI[of _ "1 div x"]) auto next assume "is_nth_power_upto_unit 0 x" then obtain u where "is_unit u" "u * x = 1" by (auto simp: is_nth_power_upto_unit_def) thus "is_unit x" by (metis dvd_triv_right) qed lemma is_nth_power_upto_unit_unit [simp, intro]: assumes "is_unit x" shows "is_nth_power_upto_unit n x" using assms by (auto simp: is_nth_power_upto_unit_def intro!: exI[of _ "1 div x"]) lemma is_nth_power_upto_unit_1_left [simp, intro]: "is_nth_power_upto_unit 1 x" by (auto simp: is_nth_power_upto_unit_def intro: exI[of _ 1]) lemma is_nth_power_upto_unit_mult_coprimeD1: fixes x y :: "'a :: factorial_semiring" assumes "coprime x y" "is_nth_power_upto_unit n (x * y)" shows "is_nth_power_upto_unit n x" proof - consider "n = 0" | "x = 0" "n > 0" | "x \ 0" "y = 0" "n > 0" | "n > 0" "x \ 0" "y \ 0" by force thus ?thesis proof cases assume [simp]: "n = 0" from assms have "is_unit (x * y)" by auto hence "is_unit x" using is_unit_mult_iff by blast thus ?thesis using assms by auto next assume "x = 0" "n > 0" thus ?thesis by (auto simp: is_nth_power_upto_unit_def) next assume *: "x \ 0" "y = 0" "n > 0" with assms show ?thesis by auto next assume *: "n > 0" and [simp]: "x \ 0" "y \ 0" show ?thesis proof (subst is_nth_power_upto_unit_conv_multiplicity[OF \n > 0\]; safe) fix p :: 'a assume p: "prime p" show "n dvd multiplicity p x" proof (cases "p dvd x") case False thus ?thesis by (simp add: not_dvd_imp_multiplicity_0) next case True have "n dvd multiplicity p (x * y)" using assms(2) \n > 0\ p by (auto simp: is_nth_power_upto_unit_conv_multiplicity) also have "\ = multiplicity p x + multiplicity p y" using p by (subst prime_elem_multiplicity_mult_distrib) auto also have "\p dvd y" using \coprime x y\ \p dvd x\ p not_prime_unit coprime_common_divisor by blast hence "multiplicity p y = 0" by (rule not_dvd_imp_multiplicity_0) finally show ?thesis by simp qed qed qed qed lemma is_nth_power_upto_unit_mult_coprimeD2: fixes x y :: "'a :: factorial_semiring" assumes "coprime x y" "is_nth_power_upto_unit n (x * y)" shows "is_nth_power_upto_unit n y" using assms is_nth_power_upto_unit_mult_coprimeD1[of y x] by (simp_all add: mult_ac coprime_commute) subsection \Definition\ text \ Gaussian integers are the ring $\mathbb{Z}[i]$ which is formed either by formally adjoining an element $i$ with $i^2 = -1$ to $\mathbb{Z}$ or by taking all the complex numbers with integer real and imaginary part. We define them simply by giving an appropriate ring structure to $\mathbb{Z}^2$, with the first component representing the real part and the second component the imaginary part: \ codatatype gauss_int = Gauss_Int (ReZ: int) (ImZ: int) text \ The following is the imaginary unit $i$ in the Gaussian integers, which we will denote as \\\<^sub>\\: \ primcorec gauss_i where "ReZ gauss_i = 0" | "ImZ gauss_i = 1" lemma gauss_int_eq_iff: "x = y \ ReZ x = ReZ y \ ImZ x = ImZ y" by (cases x; cases y) auto (*<*) bundle gauss_int_notation begin notation gauss_i ("\\<^sub>\") end bundle no_gauss_int_notation begin no_notation (output) gauss_i ("\\<^sub>\") end bundle gauss_int_output_notation begin notation (output) gauss_i ("\") end unbundle gauss_int_notation (*>*) text \ Next, we define the canonical injective homomorphism from the Gaussian integers into the complex numbers: \ primcorec gauss2complex where "Re (gauss2complex z) = of_int (ReZ z)" | "Im (gauss2complex z) = of_int (ImZ z)" declare [[coercion gauss2complex]] lemma gauss2complex_eq_iff [simp]: "gauss2complex z = gauss2complex u \ z = u" by (simp add: complex_eq_iff gauss_int_eq_iff) text \ Gaussian integers also have conjugates, just like complex numbers: \ primcorec gauss_cnj where "ReZ (gauss_cnj z) = ReZ z" | "ImZ (gauss_cnj z) = -ImZ z" text \ In the remainder of this section, we prove that Gaussian integers are a commutative ring of characteristic 0 and several other trivial algebraic properties. \ instantiation gauss_int :: comm_ring_1 begin primcorec zero_gauss_int where "ReZ zero_gauss_int = 0" | "ImZ zero_gauss_int = 0" primcorec one_gauss_int where "ReZ one_gauss_int = 1" | "ImZ one_gauss_int = 0" primcorec uminus_gauss_int where "ReZ (uminus_gauss_int x) = -ReZ x" | "ImZ (uminus_gauss_int x) = -ImZ x" primcorec plus_gauss_int where "ReZ (plus_gauss_int x y) = ReZ x + ReZ y" | "ImZ (plus_gauss_int x y) = ImZ x + ImZ y" primcorec minus_gauss_int where "ReZ (minus_gauss_int x y) = ReZ x - ReZ y" | "ImZ (minus_gauss_int x y) = ImZ x - ImZ y" primcorec times_gauss_int where "ReZ (times_gauss_int x y) = ReZ x * ReZ y - ImZ x * ImZ y" | "ImZ (times_gauss_int x y) = ReZ x * ImZ y + ImZ x * ReZ y" instance by intro_classes (auto simp: gauss_int_eq_iff algebra_simps) end lemma gauss_i_times_i [simp]: "\\<^sub>\ * \\<^sub>\ = (-1 :: gauss_int)" and gauss_cnj_i [simp]: "gauss_cnj \\<^sub>\ = -\\<^sub>\" by (simp_all add: gauss_int_eq_iff) lemma gauss_cnj_eq_0_iff [simp]: "gauss_cnj z = 0 \ z = 0" by (auto simp: gauss_int_eq_iff) lemma gauss_cnj_eq_self: "Im z = 0 \ gauss_cnj z = z" and gauss_cnj_eq_minus_self: "Re z = 0 \ gauss_cnj z = -z" by (auto simp: gauss_int_eq_iff) lemma ReZ_of_nat [simp]: "ReZ (of_nat n) = of_nat n" and ImZ_of_nat [simp]: "ImZ (of_nat n) = 0" by (induction n; simp)+ lemma ReZ_of_int [simp]: "ReZ (of_int n) = n" and ImZ_of_int [simp]: "ImZ (of_int n) = 0" by (induction n; simp)+ lemma ReZ_numeral [simp]: "ReZ (numeral n) = numeral n" and ImZ_numeral [simp]: "ImZ (numeral n) = 0" by (subst of_nat_numeral [symmetric], subst ReZ_of_nat ImZ_of_nat, simp)+ lemma gauss2complex_0 [simp]: "gauss2complex 0 = 0" and gauss2complex_1 [simp]: "gauss2complex 1 = 1" and gauss2complex_i [simp]: "gauss2complex \\<^sub>\ = \" and gauss2complex_add [simp]: "gauss2complex (x + y) = gauss2complex x + gauss2complex y" and gauss2complex_diff [simp]: "gauss2complex (x - y) = gauss2complex x - gauss2complex y" and gauss2complex_mult [simp]: "gauss2complex (x * y) = gauss2complex x * gauss2complex y" and gauss2complex_uminus [simp]: "gauss2complex (-x) = -gauss2complex x" and gauss2complex_cnj [simp]: "gauss2complex (gauss_cnj x) = cnj (gauss2complex x)" by (simp_all add: complex_eq_iff) lemma gauss2complex_of_nat [simp]: "gauss2complex (of_nat n) = of_nat n" by (simp add: complex_eq_iff) lemma gauss2complex_eq_0_iff [simp]: "gauss2complex x = 0 \ x = 0" and gauss2complex_eq_1_iff [simp]: "gauss2complex x = 1 \ x = 1" and zero_eq_gauss2complex_iff [simp]: "0 = gauss2complex x \ x = 0" and one_eq_gauss2complex_iff [simp]: "1 = gauss2complex x \ x = 1" by (simp_all add: complex_eq_iff gauss_int_eq_iff) lemma gauss_i_times_gauss_i_times [simp]: "\\<^sub>\ * (\\<^sub>\ * x) = (-x :: gauss_int)" by (subst mult.assoc [symmetric], subst gauss_i_times_i) auto lemma gauss_i_neq_0 [simp]: "\\<^sub>\ \ 0" "0 \ \\<^sub>\" and gauss_i_neq_1 [simp]: "\\<^sub>\ \ 1" "1 \ \\<^sub>\" and gauss_i_neq_of_nat [simp]: "\\<^sub>\ \ of_nat n" "of_nat n \ \\<^sub>\" and gauss_i_neq_of_int [simp]: "\\<^sub>\ \ of_int n" "of_int n \ \\<^sub>\" and gauss_i_neq_numeral [simp]: "\\<^sub>\ \ numeral m" "numeral m \ \\<^sub>\" by (auto simp: gauss_int_eq_iff) lemma gauss_cnj_0 [simp]: "gauss_cnj 0 = 0" and gauss_cnj_1 [simp]: "gauss_cnj 1 = 1" and gauss_cnj_cnj [simp]: "gauss_cnj (gauss_cnj z) = z" and gauss_cnj_uminus [simp]: "gauss_cnj (-a) = -gauss_cnj a" and gauss_cnj_add [simp]: "gauss_cnj (a + b) = gauss_cnj a + gauss_cnj b" and gauss_cnj_diff [simp]: "gauss_cnj (a - b) = gauss_cnj a - gauss_cnj b" and gauss_cnj_mult [simp]: "gauss_cnj (a * b) = gauss_cnj a * gauss_cnj b" and gauss_cnj_of_nat [simp]: "gauss_cnj (of_nat n1) = of_nat n1" and gauss_cnj_of_int [simp]: "gauss_cnj (of_int n2) = of_int n2" and gauss_cnj_numeral [simp]: "gauss_cnj (numeral n3) = numeral n3" by (simp_all add: gauss_int_eq_iff) lemma gauss_cnj_power [simp]: "gauss_cnj (a ^ n) = gauss_cnj a ^ n" by (induction n) auto lemma gauss_cnj_sum [simp]: "gauss_cnj (sum f A) = (\x\A. gauss_cnj (f x))" by (induction A rule: infinite_finite_induct) auto lemma gauss_cnj_prod [simp]: "gauss_cnj (prod f A) = (\x\A. gauss_cnj (f x))" by (induction A rule: infinite_finite_induct) auto lemma of_nat_dvd_of_nat: assumes "a dvd b" shows "of_nat a dvd (of_nat b :: 'a :: comm_semiring_1)" using assms by auto lemma of_int_dvd_imp_dvd_gauss_cnj: fixes z :: gauss_int assumes "of_int n dvd z" shows "of_int n dvd gauss_cnj z" proof - from assms obtain u where "z = of_int n * u" by blast hence "gauss_cnj z = of_int n * gauss_cnj u" by simp thus ?thesis by auto qed lemma of_nat_dvd_imp_dvd_gauss_cnj: fixes z :: gauss_int assumes "of_nat n dvd z" shows "of_nat n dvd gauss_cnj z" using of_int_dvd_imp_dvd_gauss_cnj[of "int n"] assms by simp lemma of_int_dvd_of_int_gauss_int_iff: "(of_int m :: gauss_int) dvd of_int n \ m dvd n" proof assume "of_int m dvd (of_int n :: gauss_int)" then obtain a :: gauss_int where "of_int n = of_int m * a" by blast thus "m dvd n" by (auto simp: gauss_int_eq_iff) qed auto lemma of_nat_dvd_of_nat_gauss_int_iff: "(of_nat m :: gauss_int) dvd of_nat n \ m dvd n" using of_int_dvd_of_int_gauss_int_iff[of "int m" "int n"] by simp lemma gauss_cnj_dvd: assumes "a dvd b" shows "gauss_cnj a dvd gauss_cnj b" proof - from assms obtain c where "b = a * c" by blast hence "gauss_cnj b = gauss_cnj a * gauss_cnj c" by simp thus ?thesis by auto qed lemma gauss_cnj_dvd_iff: "gauss_cnj a dvd gauss_cnj b \ a dvd b" using gauss_cnj_dvd[of a b] gauss_cnj_dvd[of "gauss_cnj a" "gauss_cnj b"] by auto lemma gauss_cnj_dvd_left_iff: "gauss_cnj a dvd b \ a dvd gauss_cnj b" by (subst gauss_cnj_dvd_iff [symmetric]) auto lemma gauss_cnj_dvd_right_iff: "a dvd gauss_cnj b \ gauss_cnj a dvd b" by (rule gauss_cnj_dvd_left_iff [symmetric]) instance gauss_int :: idom proof fix z u :: gauss_int assume "z \ 0" "u \ 0" hence "gauss2complex z * gauss2complex u \ 0" by simp also have "gauss2complex z * gauss2complex u = gauss2complex (z * u)" by simp finally show "z * u \ 0" unfolding gauss2complex_eq_0_iff . qed instance gauss_int :: ring_char_0 by intro_classes (auto intro!: injI simp: gauss_int_eq_iff) subsection \Pretty-printing\ text \ The following lemma collection provides better pretty-printing of Gaussian integers so that e.g.\ evaluation with the `value' command produces nicer results. \ lemma gauss_int_code_post [code_post]: "Gauss_Int 0 0 = 0" "Gauss_Int 0 1 = \\<^sub>\" "Gauss_Int 0 (-1) = -\\<^sub>\" "Gauss_Int 1 0 = 1" "Gauss_Int 1 1 = 1 + \\<^sub>\" "Gauss_Int 1 (-1) = 1 - \\<^sub>\" "Gauss_Int (-1) 0 = -1" "Gauss_Int (-1) 1 = -1 + \\<^sub>\" "Gauss_Int (-1) (-1) = -1 - \\<^sub>\" "Gauss_Int (numeral b) 0 = numeral b" "Gauss_Int (-numeral b) 0 = -numeral b" "Gauss_Int (numeral b) 1 = numeral b + \\<^sub>\" "Gauss_Int (-numeral b) 1 = -numeral b + \\<^sub>\" "Gauss_Int (numeral b) (-1) = numeral b - \\<^sub>\" "Gauss_Int (-numeral b) (-1) = -numeral b - \\<^sub>\" "Gauss_Int 0 (numeral b) = numeral b * \\<^sub>\" "Gauss_Int 0 (-numeral b) = -numeral b * \\<^sub>\" "Gauss_Int 1 (numeral b) = 1 + numeral b * \\<^sub>\" "Gauss_Int 1 (-numeral b) = 1 - numeral b * \\<^sub>\" "Gauss_Int (-1) (numeral b) = -1 + numeral b * \\<^sub>\" "Gauss_Int (-1) (-numeral b) = -1 - numeral b * \\<^sub>\" "Gauss_Int (numeral a) (numeral b) = numeral a + numeral b * \\<^sub>\" "Gauss_Int (numeral a) (-numeral b) = numeral a - numeral b * \\<^sub>\" "Gauss_Int (-numeral a) (numeral b) = -numeral a + numeral b * \\<^sub>\" "Gauss_Int (-numeral a) (-numeral b) = -numeral a - numeral b * \\<^sub>\" by (simp_all add: gauss_int_eq_iff) value "\\<^sub>\ ^ 3" value "2 * (3 + \\<^sub>\)" value "(2 + \\<^sub>\) * (2 - \\<^sub>\)" subsection \Norm\ text \ The square of the complex norm (or complex modulus) on the Gaussian integers gives us a norm that always returns a natural number. We will later show that this is also a Euclidean norm (in the sense of a Euclidean ring). \ definition gauss_int_norm :: "gauss_int \ nat" where "gauss_int_norm z = nat (ReZ z ^ 2 + ImZ z ^ 2)" lemma gauss_int_norm_0 [simp]: "gauss_int_norm 0 = 0" and gauss_int_norm_1 [simp]: "gauss_int_norm 1 = 1" and gauss_int_norm_i [simp]: "gauss_int_norm \\<^sub>\ = 1" and gauss_int_norm_cnj [simp]: "gauss_int_norm (gauss_cnj z) = gauss_int_norm z" and gauss_int_norm_of_nat [simp]: "gauss_int_norm (of_nat n) = n ^ 2" and gauss_int_norm_of_int [simp]: "gauss_int_norm (of_int m) = nat (m ^ 2)" and gauss_int_norm_of_numeral [simp]: "gauss_int_norm (numeral n') = numeral (Num.sqr n')" by (simp_all add: gauss_int_norm_def nat_power_eq) lemma gauss_int_norm_uminus [simp]: "gauss_int_norm (-z) = gauss_int_norm z" by (simp add: gauss_int_norm_def) lemma gauss_int_norm_eq_0_iff [simp]: "gauss_int_norm z = 0 \ z = 0" proof assume "gauss_int_norm z = 0" hence "ReZ z ^ 2 + ImZ z ^ 2 \ 0" by (simp add: gauss_int_norm_def) moreover have "ReZ z ^ 2 + ImZ z ^ 2 \ 0" by simp ultimately have "ReZ z ^ 2 + ImZ z ^ 2 = 0" by linarith thus "z = 0" by (auto simp: gauss_int_eq_iff) qed auto lemma gauss_int_norm_pos_iff [simp]: "gauss_int_norm z > 0 \ z \ 0" using gauss_int_norm_eq_0_iff[of z] by (auto intro: Nat.gr0I) lemma real_gauss_int_norm: "real (gauss_int_norm z) = norm (gauss2complex z) ^ 2" by (auto simp: cmod_def gauss_int_norm_def) lemma gauss_int_norm_mult: "gauss_int_norm (z * u) = gauss_int_norm z * gauss_int_norm u" proof - have "real (gauss_int_norm (z * u)) = real (gauss_int_norm z * gauss_int_norm u)" unfolding of_nat_mult by (simp add: real_gauss_int_norm norm_power norm_mult power_mult_distrib) thus ?thesis by (subst (asm) of_nat_eq_iff) qed lemma self_mult_gauss_cnj: "z * gauss_cnj z = of_nat (gauss_int_norm z)" by (simp add: gauss_int_norm_def gauss_int_eq_iff algebra_simps power2_eq_square) lemma gauss_cnj_mult_self: "gauss_cnj z * z = of_nat (gauss_int_norm z)" by (subst mult.commute, rule self_mult_gauss_cnj) lemma self_plus_gauss_cnj: "z + gauss_cnj z = of_int (2 * ReZ z)" and self_minus_gauss_cnj: "z - gauss_cnj z = of_int (2 * ImZ z) * \\<^sub>\" by (auto simp: gauss_int_eq_iff) lemma gauss_int_norm_dvd_mono: assumes "a dvd b" shows "gauss_int_norm a dvd gauss_int_norm b" proof - from assms obtain c where "b = a * c" by blast hence "gauss_int_norm b = gauss_int_norm (a * c)" by metis thus ?thesis by (simp add: gauss_int_norm_mult) qed text \ A Gaussian integer is a unit iff its norm is 1, and this is the case precisely for the four elements \\1\ and \\\\: \ lemma is_unit_gauss_int_iff: "x dvd 1 \ x \ {1, -1, \\<^sub>\, -\\<^sub>\ :: gauss_int}" and is_unit_gauss_int_iff': "x dvd 1 \ gauss_int_norm x = 1" proof - have "x dvd 1" if "x \ {1, -1, \\<^sub>\, -\\<^sub>\}" proof - from that have *: "x * gauss_cnj x = 1" by (auto simp: gauss_int_norm_def) show "x dvd 1" by (subst * [symmetric]) simp qed moreover have "gauss_int_norm x = 1" if "x dvd 1" using gauss_int_norm_dvd_mono[OF that] by simp moreover have "x \ {1, -1, \\<^sub>\, -\\<^sub>\}" if "gauss_int_norm x = 1" proof - from that have *: "(ReZ x)\<^sup>2 + (ImZ x)\<^sup>2 = 1" by (auto simp: gauss_int_norm_def nat_eq_iff) hence "ReZ x ^ 2 \ 1" and "ImZ x ^ 2 \ 1" using zero_le_power2[of "ImZ x"] zero_le_power2[of "ReZ x"] by linarith+ hence "\ReZ x\ \ 1" and "\ImZ x\ \ 1" by (auto simp: abs_square_le_1) hence "ReZ x \ {-1, 0, 1}" and "ImZ x \ {-1, 0, 1}" by auto thus "x \ {1, -1, \\<^sub>\, -\\<^sub>\ :: gauss_int}" using * by (auto simp: gauss_int_eq_iff) qed ultimately show "x dvd 1 \ x \ {1, -1, \\<^sub>\, -\\<^sub>\ :: gauss_int}" and "x dvd 1 \ gauss_int_norm x = 1" by blast+ qed lemma is_unit_gauss_i [simp, intro]: "(gauss_i :: gauss_int) dvd 1" by (simp add: is_unit_gauss_int_iff) lemma gauss_int_norm_eq_Suc_0_iff: "gauss_int_norm x = Suc 0 \ x dvd 1" by (simp add: is_unit_gauss_int_iff') lemma is_unit_gauss_cnj [intro]: "z dvd 1 \ gauss_cnj z dvd 1" by (simp add: is_unit_gauss_int_iff') lemma is_unit_gauss_cnj_iff [simp]: "gauss_cnj z dvd 1 \ z dvd 1" by (simp add: is_unit_gauss_int_iff') subsection \Division and normalisation\ text \ We define a rounding operation that takes a complex number and returns a Gaussian integer by rounding the real and imaginary parts separately: \ primcorec round_complex :: "complex \ gauss_int" where "ReZ (round_complex z) = round (Re z)" | "ImZ (round_complex z) = round (Im z)" text \ The distance between a rounded complex number and the original one is no more than $\frac{1}{2}\sqrt{2}$: \ lemma norm_round_complex_le: "norm (z - gauss2complex (round_complex z)) ^ 2 \ 1 / 2" proof - have "(Re z - ReZ (round_complex z)) ^ 2 \ (1 / 2) ^ 2" using of_int_round_abs_le[of "Re z"] by (subst abs_le_square_iff [symmetric]) (auto simp: abs_minus_commute) moreover have "(Im z - ImZ (round_complex z)) ^ 2 \ (1 / 2) ^ 2" using of_int_round_abs_le[of "Im z"] by (subst abs_le_square_iff [symmetric]) (auto simp: abs_minus_commute) ultimately have "(Re z - ReZ (round_complex z)) ^ 2 + (Im z - ImZ (round_complex z)) ^ 2 \ (1 / 2) ^ 2 + (1 / 2) ^ 2" by (rule add_mono) thus "norm (z - gauss2complex (round_complex z)) ^ 2 \ 1 / 2" by (simp add: cmod_def power2_eq_square) qed lemma dist_round_complex_le: "dist z (gauss2complex (round_complex z)) \ sqrt 2 / 2" proof - have "dist z (gauss2complex (round_complex z)) ^ 2 = norm (z - gauss2complex (round_complex z)) ^ 2" by (simp add: dist_norm) also have "\ \ 1 / 2" by (rule norm_round_complex_le) also have "\ = (sqrt 2 / 2) ^ 2" by (simp add: power2_eq_square) finally show ?thesis by (rule power2_le_imp_le) auto qed text \ We can now define division on Gaussian integers simply by performing the division in the complex numbers and rounding the result. This also gives us a remainder operation defined accordingly for which the norm of the remainder is always smaller than the norm of the divisor. We can also define a normalisation operation that returns a canonical representative for each association class. Since the four units of the Gaussian integers are \\1\ and \\\\, each association class (other than \0\) has four representatives, one in each quadrant. We simply define the on in the upper-right quadrant (i.e.\ the one with non-negative imaginary part and positive real part) as the canonical one. Thus, the Gaussian integers form a Euclidean ring. This gives us many things, most importantly the existence of GCDs and LCMs and unique factorisation. \ instantiation gauss_int :: algebraic_semidom begin definition divide_gauss_int :: "gauss_int \ gauss_int \ gauss_int" where "divide_gauss_int a b = round_complex (gauss2complex a / gauss2complex b)" instance proof fix a :: gauss_int show "a div 0 = 0" by (auto simp: gauss_int_eq_iff divide_gauss_int_def) next fix a b :: gauss_int assume "b \ 0" thus "a * b div b = a" by (auto simp: gauss_int_eq_iff divide_gauss_int_def) qed end instantiation gauss_int :: semidom_divide_unit_factor begin definition unit_factor_gauss_int :: "gauss_int \ gauss_int" where "unit_factor_gauss_int z = (if z = 0 then 0 else if ImZ z \ 0 \ ReZ z > 0 then 1 else if ReZ z \ 0 \ ImZ z > 0 then \\<^sub>\ else if ImZ z \ 0 \ ReZ z < 0 then -1 else -\\<^sub>\)" instance proof show "unit_factor (0 :: gauss_int) = 0" by (simp add: unit_factor_gauss_int_def) next fix z :: gauss_int assume "is_unit z" thus "unit_factor z = z" by (subst (asm) is_unit_gauss_int_iff) (auto simp: unit_factor_gauss_int_def) next fix z :: gauss_int assume z: "z \ 0" thus "is_unit (unit_factor z)" by (subst is_unit_gauss_int_iff) (auto simp: unit_factor_gauss_int_def) next fix z u :: gauss_int assume "is_unit z" hence "z \ {1, -1, \\<^sub>\, -\\<^sub>\}" by (subst (asm) is_unit_gauss_int_iff) thus "unit_factor (z * u) = z * unit_factor u" by (safe; auto simp: unit_factor_gauss_int_def gauss_int_eq_iff[of u 0]) qed end instantiation gauss_int :: normalization_semidom begin definition normalize_gauss_int :: "gauss_int \ gauss_int" where "normalize_gauss_int z = (if z = 0 then 0 else if ImZ z \ 0 \ ReZ z > 0 then z else if ReZ z \ 0 \ ImZ z > 0 then -\\<^sub>\ * z else if ImZ z \ 0 \ ReZ z < 0 then -z else \\<^sub>\ * z)" instance proof show "normalize (0 :: gauss_int) = 0" by (simp add: normalize_gauss_int_def) next fix z :: gauss_int show "unit_factor z * normalize z = z" by (auto simp: normalize_gauss_int_def unit_factor_gauss_int_def algebra_simps) qed end lemma normalize_gauss_int_of_nat [simp]: "normalize (of_nat n :: gauss_int) = of_nat n" and normalize_gauss_int_of_int [simp]: "normalize (of_int m :: gauss_int) = of_int \m\" and normalize_gauss_int_of_numeral [simp]: "normalize (numeral n' :: gauss_int) = numeral n'" by (auto simp: normalize_gauss_int_def) lemma normalize_gauss_i [simp]: "normalize \\<^sub>\ = 1" by (simp add: normalize_gauss_int_def) lemma gauss_int_norm_normalize [simp]: "gauss_int_norm (normalize x) = gauss_int_norm x" by (simp add: normalize_gauss_int_def gauss_int_norm_mult) lemma normalized_gauss_int: assumes "normalize z = z" shows "ReZ z \ 0" "ImZ z \ 0" using assms by (cases "ReZ z" "0 :: int" rule: linorder_cases; cases "ImZ z" "0 :: int" rule: linorder_cases; simp add: normalize_gauss_int_def gauss_int_eq_iff)+ lemma normalized_gauss_int': assumes "normalize z = z" "z \ 0" shows "ReZ z > 0" "ImZ z \ 0" using assms by (cases "ReZ z" "0 :: int" rule: linorder_cases; cases "ImZ z" "0 :: int" rule: linorder_cases; simp add: normalize_gauss_int_def gauss_int_eq_iff)+ lemma normalized_gauss_int_iff: "normalize z = z \ z = 0 \ ReZ z > 0 \ ImZ z \ 0" by (cases "ReZ z" "0 :: int" rule: linorder_cases; cases "ImZ z" "0 :: int" rule: linorder_cases; simp add: normalize_gauss_int_def gauss_int_eq_iff)+ instantiation gauss_int :: idom_modulo begin definition modulo_gauss_int :: "gauss_int \ gauss_int \ gauss_int" where "modulo_gauss_int a b = a - a div b * b" instance proof fix a b :: gauss_int show "a div b * b + a mod b = a" by (simp add: modulo_gauss_int_def) qed end lemma gauss_int_norm_mod_less_aux: assumes [simp]: "b \ 0" shows "2 * gauss_int_norm (a mod b) \ gauss_int_norm b" proof - define a' b' where "a' = gauss2complex a" and "b' = gauss2complex b" have [simp]: "b' \ 0" by (simp add: b'_def) have "gauss_int_norm (a mod b) = norm (gauss2complex (a - round_complex (a' / b') * b)) ^ 2" unfolding modulo_gauss_int_def by (subst real_gauss_int_norm [symmetric]) (auto simp add: divide_gauss_int_def a'_def b'_def) also have "gauss2complex (a - round_complex (a' / b') * b) = a' - gauss2complex (round_complex (a' / b')) * b'" by (simp add: a'_def b'_def) also have "\ = (a' / b' - gauss2complex (round_complex (a' / b'))) * b'" by (simp add: field_simps) also have "norm \ ^ 2 = norm (a' / b' - gauss2complex (round_complex (a' / b'))) ^ 2 * norm b' ^ 2" by (simp add: norm_mult power_mult_distrib) also have "\ \ 1 / 2 * norm b' ^ 2" by (intro mult_right_mono norm_round_complex_le) auto also have "norm b' ^ 2 = gauss_int_norm b" by (simp add: b'_def real_gauss_int_norm) finally show ?thesis by linarith qed lemma gauss_int_norm_mod_less: assumes [simp]: "b \ 0" shows "gauss_int_norm (a mod b) < gauss_int_norm b" proof - have "gauss_int_norm b > 0" by simp thus "gauss_int_norm (a mod b) < gauss_int_norm b" using gauss_int_norm_mod_less_aux[OF assms, of a] by presburger qed lemma gauss_int_norm_dvd_imp_le: assumes "b \ 0" shows "gauss_int_norm a \ gauss_int_norm (a * b)" proof (cases "a = 0") case False thus ?thesis using assms by (intro dvd_imp_le gauss_int_norm_dvd_mono) auto qed auto instantiation gauss_int :: euclidean_ring begin definition euclidean_size_gauss_int :: "gauss_int \ nat" where [simp]: "euclidean_size_gauss_int = gauss_int_norm" instance proof show "euclidean_size (0 :: gauss_int) = 0" by simp next fix a b :: gauss_int assume [simp]: "b \ 0" show "euclidean_size (a mod b) < euclidean_size b" using gauss_int_norm_mod_less[of b a] by simp show "euclidean_size a \ euclidean_size (a * b)" by (simp add: gauss_int_norm_dvd_imp_le) qed end instance gauss_int :: normalization_euclidean_semiring .. instantiation gauss_int :: euclidean_ring_gcd begin definition gcd_gauss_int :: "gauss_int \ gauss_int \ gauss_int" where "gcd_gauss_int \ normalization_euclidean_semiring_class.gcd" definition lcm_gauss_int :: "gauss_int \ gauss_int \ gauss_int" where "lcm_gauss_int \ normalization_euclidean_semiring_class.lcm" definition Gcd_gauss_int :: "gauss_int set \ gauss_int" where "Gcd_gauss_int \ normalization_euclidean_semiring_class.Gcd" definition Lcm_gauss_int :: "gauss_int set \ gauss_int" where "Lcm_gauss_int \ normalization_euclidean_semiring_class.Lcm" instance by intro_classes (simp_all add: gcd_gauss_int_def lcm_gauss_int_def Gcd_gauss_int_def Lcm_gauss_int_def) end lemma multiplicity_gauss_cnj: "multiplicity (gauss_cnj a) (gauss_cnj b) = multiplicity a b" unfolding multiplicity_def gauss_cnj_power [symmetric] gauss_cnj_dvd_iff .. lemma multiplicity_gauss_int_of_nat: "multiplicity (of_nat a) (of_nat b :: gauss_int) = multiplicity a b" unfolding multiplicity_def of_nat_power [symmetric] of_nat_dvd_of_nat_gauss_int_iff .. lemma gauss_int_dvd_same_norm_imp_associated: assumes "z1 dvd z2" "gauss_int_norm z1 = gauss_int_norm z2" shows "normalize z1 = normalize z2" proof (cases "z1 = 0") case [simp]: False from assms(1) obtain u where u: "z2 = z1 * u" by blast from assms have "gauss_int_norm u = 1" by (auto simp: gauss_int_norm_mult u) hence "is_unit u" by (simp add: is_unit_gauss_int_iff') with u show ?thesis by simp qed (use assms in auto) lemma gcd_of_int_gauss_int: "gcd (of_int a :: gauss_int) (of_int b) = of_int (gcd a b)" proof (induction "nat \b\" arbitrary: a b rule: less_induct) case (less b a) show ?case proof (cases "b = 0") case False have "of_int (gcd a b) = (of_int (gcd b (a mod b)) :: gauss_int)" by (subst gcd_red_int) auto also have "\ = gcd (of_int b) (of_int (a mod b))" using False by (intro less [symmetric]) (auto intro!: abs_mod_less) also have "a mod b = (a - a div b * b)" by (simp add: minus_div_mult_eq_mod) also have "of_int \ = of_int (-(a div b)) * of_int b + (of_int a :: gauss_int)" by (simp add: algebra_simps) also have "gcd (of_int b) \ = gcd (of_int b) (of_int a)" by (rule gcd_add_mult) finally show ?thesis by (simp add: gcd.commute) qed auto qed lemma coprime_of_int_gauss_int: "coprime (of_int a :: gauss_int) (of_int b) = coprime a b" unfolding coprime_iff_gcd_eq_1 gcd_of_int_gauss_int by auto lemma gcd_of_nat_gauss_int: "gcd (of_nat a :: gauss_int) (of_nat b) = of_nat (gcd a b)" using gcd_of_int_gauss_int[of "int a" "int b"] by simp lemma coprime_of_nat_gauss_int: "coprime (of_nat a :: gauss_int) (of_nat b) = coprime a b" unfolding coprime_iff_gcd_eq_1 gcd_of_nat_gauss_int by auto lemma gauss_cnj_dvd_self_iff: "gauss_cnj z dvd z \ ReZ z = 0 \ ImZ z = 0 \ \ReZ z\ = \ImZ z\" proof assume "gauss_cnj z dvd z" hence "normalize (gauss_cnj z) = normalize z" by (rule gauss_int_dvd_same_norm_imp_associated) auto then obtain u :: gauss_int where "is_unit u" and u: "gauss_cnj z = u * z" using associatedE1 by blast hence "u \ {1, -1, \\<^sub>\, -\\<^sub>\}" by (simp add: is_unit_gauss_int_iff) thus "ReZ z = 0 \ ImZ z = 0 \ \ReZ z\ = \ImZ z\" proof (elim insertE emptyE) assume [simp]: "u = \\<^sub>\" have "ReZ z = ReZ (gauss_cnj z)" by simp also have "gauss_cnj z = \\<^sub>\ * z" using u by simp also have "ReZ \ = -ImZ z" by simp finally show "ReZ z = 0 \ ImZ z = 0 \ \ReZ z\ = \ImZ z\" by auto next assume [simp]: "u = -\\<^sub>\" have "ReZ z = ReZ (gauss_cnj z)" by simp also have "gauss_cnj z = -\\<^sub>\ * z" using u by simp also have "ReZ \ = ImZ z" by simp finally show "ReZ z = 0 \ ImZ z = 0 \ \ReZ z\ = \ImZ z\" by auto next assume [simp]: "u = 1" have "ImZ z = -ImZ (gauss_cnj z)" by simp also have "gauss_cnj z = z" using u by simp finally show "ReZ z = 0 \ ImZ z = 0 \ \ReZ z\ = \ImZ z\" by auto next assume [simp]: "u = -1" have "ReZ z = ReZ (gauss_cnj z)" by simp also have "gauss_cnj z = -z" using u by simp also have "ReZ \ = -ReZ z" by simp finally show "ReZ z = 0 \ ImZ z = 0 \ \ReZ z\ = \ImZ z\" by auto qed next assume "ReZ z = 0 \ ImZ z = 0 \ \ReZ z\ = \ImZ z\" thus "gauss_cnj z dvd z" proof safe assume "\ReZ z\ = \ImZ z\" then obtain u :: int where "is_unit u" and u: "ImZ z = u * ReZ z" using associatedE2[of "ReZ z" "ImZ z"] by auto from \is_unit u\ have "u \ {1, -1}" by auto hence "z = gauss_cnj z * (of_int u * \\<^sub>\)" using u by (auto simp: gauss_int_eq_iff) thus ?thesis by (metis dvd_triv_left) qed (auto simp: gauss_cnj_eq_self gauss_cnj_eq_minus_self) qed lemma self_dvd_gauss_cnj_iff: "z dvd gauss_cnj z \ ReZ z = 0 \ ImZ z = 0 \ \ReZ z\ = \ImZ z\" using gauss_cnj_dvd_self_iff[of z] by (subst (asm) gauss_cnj_dvd_left_iff) auto subsection \Prime elements\ text \ Next, we analyse what the prime elements of the Gaussian integers are. First, note that according to the conventions of Isabelle's computational algebra library, a prime element is called a prime iff it is also normalised, i.e.\ in our case it lies in the upper right quadrant. As a first fact, we can show that a Gaussian integer whose norm is \\\-prime must be $\mathbb{Z}[i]$-prime: \ lemma prime_gauss_int_norm_imp_prime_elem: assumes "prime (gauss_int_norm q)" shows "prime_elem q" proof - have "irreducible q" proof (rule irreducibleI) fix a b assume "q = a * b" hence "gauss_int_norm q = gauss_int_norm a * gauss_int_norm b" by (simp_all add: gauss_int_norm_mult) thus "is_unit a \ is_unit b" using assms by (auto dest!: prime_product simp: gauss_int_norm_eq_Suc_0_iff) qed (use assms in \auto simp: is_unit_gauss_int_iff'\) thus "prime_elem q" using irreducible_imp_prime_elem_gcd by blast qed text \ Also, a conjugate is a prime element iff the original element is a prime element: \ lemma prime_elem_gauss_cnj [intro]: "prime_elem z \ prime_elem (gauss_cnj z)" by (auto simp: prime_elem_def gauss_cnj_dvd_left_iff) lemma prime_elem_gauss_cnj_iff [simp]: "prime_elem (gauss_cnj z) \ prime_elem z" using prime_elem_gauss_cnj[of z] prime_elem_gauss_cnj[of "gauss_cnj z"] by auto subsubsection \The factorisation of 2\ text \ 2 factors as $-i (1 + i)^2$ in the Gaussian integers, where $-i$ is a unit and $1 + i$ is prime. \ lemma gauss_int_2_eq: "2 = -\\<^sub>\ * (1 + \\<^sub>\) ^ 2" by (simp add: gauss_int_eq_iff power2_eq_square) lemma prime_elem_one_plus_i_gauss_int: "prime_elem (1 + \\<^sub>\)" by (rule prime_gauss_int_norm_imp_prime_elem) (auto simp: gauss_int_norm_def) lemma prime_one_plus_i_gauss_int: "prime (1 + \\<^sub>\)" by (simp add: prime_def prime_elem_one_plus_i_gauss_int gauss_int_eq_iff normalize_gauss_int_def) lemma prime_factorization_2_gauss_int: "prime_factorization (2 :: gauss_int) = {#1 + \\<^sub>\, 1 + \\<^sub>\#}" proof - have "prime_factorization (2 :: gauss_int) = (prime_factorization (prod_mset {#1 + gauss_i, 1 + gauss_i#}))" by (subst prime_factorization_unique) (auto simp: gauss_int_eq_iff normalize_gauss_int_def) also have "prime_factorization (prod_mset {#1 + gauss_i, 1 + gauss_i#}) = {#1 + gauss_i, 1 + gauss_i#}" using prime_one_plus_i_gauss_int by (subst prime_factorization_prod_mset_primes) auto finally show ?thesis . qed subsubsection \Inert primes\ text \ Any \\\-prime congruent 3 modulo 4 is also a Gaussian prime. These primes are called \<^emph>\inert\, because they do not decompose when moving from \\\ to $\mathbb{Z}[i]$. \ lemma gauss_int_norm_not_3_mod_4: "[gauss_int_norm z \ 3] (mod 4)" proof - have A: "ReZ z mod 4 \ {0..3}" "ImZ z mod 4 \ {0..3}" by auto have B: "{0..3} = {0, 1, 2, 3 :: int}" by auto have "[ReZ z ^ 2 + ImZ z ^ 2 = (ReZ z mod 4) ^ 2 + (ImZ z mod 4) ^ 2] (mod 4)" by (intro cong_add cong_pow) (auto simp: cong_def) moreover have "((ReZ z mod 4) ^ 2 + (ImZ z mod 4) ^ 2) mod 4 \ 3 mod 4" using A unfolding B by auto ultimately have "[ReZ z ^ 2 + ImZ z ^ 2 \ 3] (mod 4)" unfolding cong_def by metis hence "[int (nat (ReZ z ^ 2 + ImZ z ^ 2)) \ int 3] (mod (int 4))" by simp thus ?thesis unfolding gauss_int_norm_def by (subst (asm) cong_int_iff) qed lemma prime_elem_gauss_int_of_nat: fixes n :: nat assumes prime: "prime n" and "[n = 3] (mod 4)" shows "prime_elem (of_nat n :: gauss_int)" proof (intro irreducible_imp_prime_elem irreducibleI) from assms show "of_nat n \ (0 :: gauss_int)" by (auto simp: gauss_int_eq_iff) next show "\is_unit (of_nat n :: gauss_int)" using assms by (subst is_unit_gauss_int_iff) (auto simp: gauss_int_eq_iff) next fix a b :: gauss_int assume *: "of_nat n = a * b" hence "gauss_int_norm (a * b) = gauss_int_norm (of_nat n)" by metis hence *: "gauss_int_norm a * gauss_int_norm b = n ^ 2" by (simp add: gauss_int_norm_mult power2_eq_square flip: nat_mult_distrib) from prime_power_mult_nat[OF prime this] obtain i j :: nat where ij: "gauss_int_norm a = n ^ i" "gauss_int_norm b = n ^ j" by blast have "i + j = 2" proof - have "n ^ (i + j) = n ^ 2" using ij * by (simp add: power_add) from prime_power_inj[OF prime this] show ?thesis by simp qed hence "i = 0 \ j = 2 \ i = 1 \ j = 1 \ i = 2 \ j = 0" by auto thus "is_unit a \ is_unit b" proof (elim disjE) assume "i = 1 \ j = 1" with ij have "gauss_int_norm a = n" by auto hence "[gauss_int_norm a = n] (mod 4)" by simp also have "[n = 3] (mod 4)" by fact finally have "[gauss_int_norm a = 3] (mod 4)" . moreover have "[gauss_int_norm a \ 3] (mod 4)" by (rule gauss_int_norm_not_3_mod_4) ultimately show ?thesis by contradiction qed (use ij in \auto simp: is_unit_gauss_int_iff'\) qed theorem prime_gauss_int_of_nat: fixes n :: nat assumes prime: "prime n" and "[n = 3] (mod 4)" shows "prime (of_nat n :: gauss_int)" using prime_elem_gauss_int_of_nat[OF assms] unfolding prime_def by simp subsubsection \Non-inert primes\ text \ Any \\\-prime congruent 1 modulo 4 factors into two conjugate Gaussian primes. \ lemma minimal_QuadRes_neg1: assumes "QuadRes n (-1)" "n > 1" "odd n" obtains x :: nat where "x \ (n - 1) div 2" and "[x ^ 2 + 1 = 0] (mod n)" proof - from \QuadRes n (-1)\ obtain x where "[x ^ 2 = (-1)] (mod (int n))" by (auto simp: QuadRes_def) hence "[x ^ 2 + 1 = -1 + 1] (mod (int n))" by (intro cong_add) auto also have "x ^ 2 + 1 = int (nat \x\ ^ 2 + 1)" by simp finally have "[int (nat \x\ ^ 2 + 1) = int 0] (mod (int n))" by simp hence "[nat \x\ ^ 2 + 1 = 0] (mod n)" by (subst (asm) cong_int_iff) define x' where "x' = (if nat \x\ mod n \ (n - 1) div 2 then nat \x\ mod n else n - (nat \x\ mod n))" have x'_quadres: "[x' ^ 2 + 1 = 0] (mod n)" proof (cases "nat \x\ mod n \ (n - 1) div 2") case True hence "[x' ^ 2 + 1 = (nat \x\ mod n) ^ 2 + 1] (mod n)" by (simp add: x'_def) also have "[(nat \x\ mod n) ^ 2 + 1 = nat \x\ ^ 2 + 1] (mod n)" by (intro cong_add cong_pow) (auto simp: cong_def) also have "[nat \x\ ^ 2 + 1 = 0] (mod n)" by fact finally show ?thesis . next case False hence "[int (x' ^ 2 + 1) = (int n - int (nat \x\ mod n)) ^ 2 + 1] (mod int n)" using \n > 1\ by (simp add: x'_def of_nat_diff add_ac) also have "[(int n - int (nat \x\ mod n)) ^ 2 + 1 = (0 - int (nat \x\ mod n)) ^ 2 + 1] (mod int n)" by (intro cong_add cong_pow) (auto simp: cong_def) also have "[(0 - int (nat \x\ mod n)) ^ 2 + 1 = int ((nat \x\ mod n) ^ 2 + 1)] (mod (int n))" by (simp add: add_ac) finally have "[x' ^ 2 + 1 = (nat \x\ mod n)\<^sup>2 + 1] (mod n)" by (subst (asm) cong_int_iff) also have "[(nat \x\ mod n)\<^sup>2 + 1 = nat \x\ ^ 2 + 1] (mod n)" by (intro cong_add cong_pow) (auto simp: cong_def) also have "[nat \x\ ^ 2 + 1 = 0] (mod n)" by fact finally show ?thesis . qed moreover have x'_le: "x' \ (n - 1) div 2" using \odd n\ by (auto elim!: oddE simp: x'_def) ultimately show ?thesis by (intro that[of x']) qed text \ Let \p\ be some prime number that is congruent 1 modulo 4. \ locale noninert_gauss_int_prime = fixes p :: nat assumes prime_p: "prime p" and cong_1_p: "[p = 1] (mod 4)" begin lemma p_gt_2: "p > 2" and odd_p: "odd p" proof - from prime_p and cong_1_p have "p > 1" "p \ 2" by (auto simp: prime_gt_Suc_0_nat cong_def) thus "p > 2" by auto with prime_p show "odd p" using primes_dvd_imp_eq two_is_prime_nat by blast qed text \ -1 is a quadratic residue modulo \p\, so there exists some \x\ such that $x^2 + 1$ is divisible by \p\. Moreover, we can choose \x\ such that it is positive and no greater than $\frac{1}{2}(p-1)$: \ lemma minimal_QuadRes_neg1: obtains x where "x > 0" "x \ (p - 1) div 2" "[x ^ 2 + 1 = 0] (mod p)" proof - have "[Legendre (-1) (int p) = (- 1) ^ ((p - 1) div 2)] (mod (int p))" using prime_p p_gt_2 by (intro euler_criterion) auto also have "[p - 1 = 1 - 1] (mod 4)" using p_gt_2 by (intro cong_diff_nat cong_refl) (use cong_1_p in auto) hence "2 * 2 dvd p - 1" by (simp add: cong_0_iff) hence "even ((p - 1) div 2)" using dvd_mult_imp_div by blast hence "(-1) ^ ((p - 1) div 2) = (1 :: int)" by simp finally have "Legendre (-1) (int p) mod p = 1" using p_gt_2 by (auto simp: cong_def) hence "Legendre (-1) (int p) = 1" using p_gt_2 by (auto simp: Legendre_def cong_def zmod_minus1 split: if_splits) hence "QuadRes p (-1)" by (simp add: Legendre_def split: if_splits) from minimal_QuadRes_neg1[OF this] p_gt_2 odd_p obtain x where x: "x \ (p - 1) div 2" "[x ^ 2 + 1 = 0] (mod p)" by auto have "x > 0" using x p_gt_2 by (auto intro!: Nat.gr0I simp: cong_def) from x and this show ?thesis by (intro that[of x]) auto qed text \ We can show from this that \p\ is not prime as a Gaussian integer. \ lemma not_prime: "\prime_elem (of_nat p :: gauss_int)" proof assume prime: "prime_elem (of_nat p :: gauss_int)" obtain x where x: "x > 0" "x \ (p - 1) div 2" "[x ^ 2 + 1 = 0] (mod p)" using minimal_QuadRes_neg1 . have "of_nat p dvd (of_nat (x ^ 2 + 1) :: gauss_int)" using x by (intro of_nat_dvd_of_nat) (auto simp: cong_0_iff) also have eq: "of_nat (x ^ 2 + 1) = ((of_nat x + \\<^sub>\) * (of_nat x - \\<^sub>\) :: gauss_int)" using \x > 0\ by (simp add: algebra_simps gauss_int_eq_iff power2_eq_square of_nat_diff) finally have "of_nat p dvd ((of_nat x + \\<^sub>\) * (of_nat x - \\<^sub>\) :: gauss_int)" . from prime and this have "of_nat p dvd (of_nat x + \\<^sub>\ :: gauss_int) \ of_nat p dvd (of_nat x - \\<^sub>\ :: gauss_int)" by (rule prime_elem_dvd_multD) hence dvd: "of_nat p dvd (of_nat x + \\<^sub>\ :: gauss_int)" "of_nat p dvd (of_nat x - \\<^sub>\ :: gauss_int)" by (auto dest: of_nat_dvd_imp_dvd_gauss_cnj) have "of_nat (p ^ 2) = (of_nat p * of_nat p :: gauss_int)" by (simp add: power2_eq_square) also from dvd have "\ dvd ((of_nat x + \\<^sub>\) * (of_nat x - \\<^sub>\))" by (intro mult_dvd_mono) also have "\ = of_nat (x ^ 2 + 1)" by (rule eq [symmetric]) finally have "p ^ 2 dvd (x ^ 2 + 1)" by (subst (asm) of_nat_dvd_of_nat_gauss_int_iff) hence "p ^ 2 \ x ^ 2 + 1" by (intro dvd_imp_le) auto moreover have "p ^ 2 > x ^ 2 + 1" proof - have "x ^ 2 + 1 \ ((p - 1) div 2) ^ 2 + 1" using x by (intro add_mono power_mono) auto also have "\ \ (p - 1) ^ 2 + 1" by auto also have "(p - 1) * (p - 1) < (p - 1) * (p + 1)" using p_gt_2 by (intro mult_strict_left_mono) auto hence "(p - 1) ^ 2 + 1 < p ^ 2" by (simp add: algebra_simps power2_eq_square) finally show ?thesis . qed ultimately show False by linarith qed text \ Any prime factor of \p\ in the Gaussian integers must have norm \p\. \ lemma norm_prime_divisor: fixes q :: gauss_int assumes q: "prime_elem q" "q dvd of_nat p" shows "gauss_int_norm q = p" proof - from assms obtain r where r: "of_nat p = q * r" by auto have "p ^ 2 = gauss_int_norm (of_nat p)" by simp also have "\ = gauss_int_norm q * gauss_int_norm r" by (auto simp: r gauss_int_norm_mult) finally have *: "gauss_int_norm q * gauss_int_norm r = p ^ 2" by simp hence "\i j. gauss_int_norm q = p ^ i \ gauss_int_norm r = p ^ j" using prime_p by (intro prime_power_mult_nat) then obtain i j where ij: "gauss_int_norm q = p ^ i" "gauss_int_norm r = p ^ j" by blast have ij_eq_2: "i + j = 2" proof - from * have "p ^ (i + j) = p ^ 2" by (simp add: power_add ij) thus ?thesis using p_gt_2 by (subst (asm) power_inject_exp) auto qed hence "i = 0 \ j = 2 \ i = 1 \ j = 1 \ i = 2 \ j = 0" by auto hence "i = 1" proof (elim disjE) assume "i = 2 \ j = 0" hence "is_unit r" using ij by (simp add: gauss_int_norm_eq_Suc_0_iff) hence "prime_elem (of_nat p :: gauss_int)" using \prime_elem q\ by (simp add: prime_elem_mult_unit_left r mult.commute[of _ r]) with not_prime show "i = 1" by contradiction qed (use q ij in \auto simp: gauss_int_norm_eq_Suc_0_iff\) thus ?thesis using ij by simp qed text \ We now show two lemmas that characterise the two prime factors of \p\ in the Gaussian integers: they are two conjugates $x\pm iy$ for positive integers \x\ and \y\ such that $x^2 + y^2 = p$. \ lemma prime_divisor_exists: obtains q where "prime q" "prime_elem (gauss_cnj q)" "ReZ q > 0" "ImZ q > 0" "of_nat p = q * gauss_cnj q" "gauss_int_norm q = p" proof - have "\q::gauss_int. q dvd of_nat p \ prime q" by (rule prime_divisor_exists) (use prime_p in \auto simp: is_unit_gauss_int_iff'\) then obtain q :: gauss_int where q: "prime q" "q dvd of_nat p" by blast from \prime q\ have [simp]: "q \ 0" by auto have "normalize q = q" using q by simp hence q_signs: "ReZ q > 0" "ImZ q \ 0" by (subst (asm) normalized_gauss_int_iff; simp)+ from q have "gauss_int_norm q = p" using norm_prime_divisor[of q] by simp moreover from this have "gauss_int_norm (gauss_cnj q) = p" by simp hence "prime_elem (gauss_cnj q)" using prime_p by (intro prime_gauss_int_norm_imp_prime_elem) auto moreover have "of_nat p = q * gauss_cnj q" using \gauss_int_norm q = p\ by (simp add: self_mult_gauss_cnj) moreover have "ImZ q \ 0" proof assume [simp]: "ImZ q = 0" define m where "m = nat (ReZ q)" have [simp]: "q = of_nat m" using q_signs by (auto simp: gauss_int_eq_iff m_def) with q have "m dvd p" by (simp add: of_nat_dvd_of_nat_gauss_int_iff) with prime_p have "m = 1 \ m = p" using prime_nat_iff by blast with q show False using not_prime by auto qed with q_signs have "ImZ q > 0" by simp ultimately show ?thesis using q q_signs by (intro that[of q]) qed theorem prime_factorization: obtains q1 q2 where "prime q1" "prime q2" "prime_factorization (of_nat p) = {#q1, q2#}" "gauss_int_norm q1 = p" "gauss_int_norm q2 = p" "q2 = \\<^sub>\ * gauss_cnj q1" "ReZ q1 > 0" "ImZ q1 > 0" "ReZ q1 > 0" "ImZ q2 > 0" proof - obtain q where q: "prime q" "prime_elem (gauss_cnj q)" "ReZ q > 0" "ImZ q > 0" "of_nat p = q * gauss_cnj q" "gauss_int_norm q = p" using prime_divisor_exists by metis from \prime q\ have [simp]: "q \ 0" by auto define q' where "q' = normalize (gauss_cnj q)" have "prime_factorization (of_nat p) = prime_factorization (prod_mset {#q, q'#})" by (subst prime_factorization_unique) (auto simp: q q'_def) also have "\ = {#q, q'#}" using q by (subst prime_factorization_prod_mset_primes) (auto simp: q'_def) finally have "prime_factorization (of_nat p) = {#q, q'#}" . moreover have "q' = \\<^sub>\ * gauss_cnj q" using q by (auto simp: normalize_gauss_int_def q'_def) moreover have "prime q'" using q by (auto simp: q'_def) ultimately show ?thesis using q by (intro that[of q q']) (auto simp: q'_def gauss_int_norm_mult) qed end text \ In particular, a consequence of this is that any prime congruent 1 modulo 4 can be written as a sum of squares of positive integers. \ lemma prime_cong_1_mod_4_gauss_int_norm_exists: fixes p :: nat assumes "prime p" "[p = 1] (mod 4)" shows "\z. gauss_int_norm z = p \ ReZ z > 0 \ ImZ z > 0" proof - from assms interpret noninert_gauss_int_prime p by unfold_locales from prime_divisor_exists obtain q where q: "prime q" "of_nat p = q * gauss_cnj q" "ReZ q > 0" "ImZ q > 0" "gauss_int_norm q = p" by metis have "p = gauss_int_norm q" using q by simp thus ?thesis using q by blast qed subsubsection \Full classification of Gaussian primes\ text \ Any prime in the ring of Gaussian integers is of the form \<^item> \1 + \\<^sub>\\ \<^item> \p\ where \p \ \\ is prime in \\\ and congruent 1 modulo 4 \<^item> $x + iy$ where $x,y$ are positive integers and $x^2 + y^2$ is a prime congruent 3 modulo 4 or an associated element of one of these. \ theorem gauss_int_prime_classification: fixes x :: gauss_int assumes "prime x" obtains (one_plus_i) "x = 1 + \\<^sub>\" | (cong_3_mod_4) p where "x = of_nat p" "prime p" "[p = 3] (mod 4)" | (cong_1_mod_4) "prime (gauss_int_norm x)" "[gauss_int_norm x = 1] (mod 4)" "ReZ x > 0" "ImZ x > 0" "ReZ x \ ImZ x" proof - define N where "N = gauss_int_norm x" have "x dvd x * gauss_cnj x" by simp also have "\ = of_nat (gauss_int_norm x)" by (simp add: self_mult_gauss_cnj) finally have "x \ prime_factors (of_nat N)" using assms by (auto simp: in_prime_factors_iff N_def) also have "N = prod_mset (prime_factorization N)" using assms unfolding N_def by (subst prod_mset_prime_factorization_nat) auto also have "(of_nat \ :: gauss_int) = prod_mset (image_mset of_nat (prime_factorization N))" by (subst of_nat_prod_mset) auto also have "prime_factors \ = (\p\prime_factors N. prime_factors (of_nat p))" by (subst prime_factorization_prod_mset) auto finally obtain p where p: "p \ prime_factors N" "x \ prime_factors (of_nat p)" by auto have "prime p" using p by auto hence "\(2 * 2) dvd p" using product_dvd_irreducibleD[of p 2 2] by (auto simp flip: prime_elem_iff_irreducible) hence "[p \ 0] (mod 4)" using p by (auto simp: cong_0_iff in_prime_factors_iff) hence "p mod 4 \ {1,2,3}" by (auto simp: cong_def) thus ?thesis proof (elim singletonE insertE) assume "p mod 4 = 2" hence "p mod 4 mod 2 = 0" by simp hence "p mod 2 = 0" by (simp add: mod_mod_cancel) with \prime p\ have [simp]: "p = 2" using prime_prime_factor two_is_prime_nat by blast have "prime_factors (of_nat p) = {1 + \\<^sub>\ :: gauss_int}" by (simp add: prime_factorization_2_gauss_int) with p show ?thesis using that(1) by auto next assume *: "p mod 4 = 3" hence "prime_factors (of_nat p) = {of_nat p :: gauss_int}" using prime_gauss_int_of_nat[of p] \prime p\ by (subst prime_factorization_prime) (auto simp: cong_def) with p show ?thesis using that(2)[of p] * by (auto simp: cong_def) next assume *: "p mod 4 = 1" then interpret noninert_gauss_int_prime p by unfold_locales (use \prime p\ in \auto simp: cong_def\) obtain q1 q2 :: gauss_int where q12: "prime q1" "prime q2" "prime_factorization (of_nat p) = {#q1, q2#}" "gauss_int_norm q1 = p" "gauss_int_norm q2 = p" "q2 = \\<^sub>\ * gauss_cnj q1" "ReZ q1 > 0" "ImZ q1 > 0" "ReZ q1 > 0" "ImZ q2 > 0" using prime_factorization by metis from p q12 have "x = q1 \ x = q2" by auto with q12 have **: "gauss_int_norm x = p" "ReZ x > 0" "ImZ x > 0" by auto have "ReZ x \ ImZ x" proof assume "ReZ x = ImZ x" hence "even (gauss_int_norm x)" by (auto simp: gauss_int_norm_def nat_mult_distrib) hence "even p" using \gauss_int_norm x = p\ by simp with \p mod 4 = 1\ show False by presburger qed thus ?thesis using that(3) \prime p\ * ** by (simp add: cong_def) qed qed lemma prime_gauss_int_norm_squareD: fixes z :: gauss_int assumes "prime z" "gauss_int_norm z = p ^ 2" shows "prime p \ z = of_nat p" using assms(1) proof (cases rule: gauss_int_prime_classification) case one_plus_i have "prime (2 :: nat)" by simp also from one_plus_i have "2 = p ^ 2" using assms(2) by (auto simp: gauss_int_norm_def) finally show ?thesis by (simp add: prime_power_iff) next case (cong_3_mod_4 p) thus ?thesis using assms by auto next case cong_1_mod_4 with assms show ?thesis by (auto simp: prime_power_iff) qed lemma gauss_int_norm_eq_prime_squareD: assumes "prime p" and "[p = 3] (mod 4)" and "gauss_int_norm z = p ^ 2" shows "normalize z = of_nat p" and "prime_elem z" proof - have "\q::gauss_int. q dvd z \ prime q" by (rule prime_divisor_exists) (use assms in \auto simp: is_unit_gauss_int_iff'\) then obtain q :: gauss_int where q: "q dvd z" "prime q" by blast have "gauss_int_norm q dvd gauss_int_norm z" by (rule gauss_int_norm_dvd_mono) fact also have "\ = p ^ 2" by fact finally obtain i where i: "i \ 2" "gauss_int_norm q = p ^ i" by (subst (asm) divides_primepow_nat) (use assms q in auto) from i assms q have "i \ 0" by (auto intro!: Nat.gr0I simp: gauss_int_norm_eq_Suc_0_iff) moreover from i assms q have "i \ 1" using gauss_int_norm_not_3_mod_4[of q] by auto ultimately have "i = 2" using i by auto with i have "gauss_int_norm q = p ^ 2" by auto hence [simp]: "q = of_nat p" using prime_gauss_int_norm_squareD[of q p] q by auto have "normalize (of_nat p) = normalize z" using q assms by (intro gauss_int_dvd_same_norm_imp_associated) auto thus *: "normalize z = of_nat p" by simp have "prime (normalize z)" using prime_gauss_int_of_nat[of p] assms by (subst *) auto thus "prime_elem z" by simp qed text \ The following can be used as a primality test for Gaussian integers. It effectively reduces checking the primality of a Gaussian integer to checking the primality of an integer. A Gaussian integer is prime if either its norm is either \\\-prime or the square of a \\\-prime that is congruent 3 modulo 4. \ lemma prime_elem_gauss_int_iff: fixes z :: gauss_int defines "n \ gauss_int_norm z" shows "prime_elem z \ prime n \ (\p. n = p ^ 2 \ prime p \ [p = 3] (mod 4))" proof assume "prime n \ (\p. n = p ^ 2 \ prime p \ [p = 3] (mod 4))" thus "prime_elem z" by (auto intro: gauss_int_norm_eq_prime_squareD(2) prime_gauss_int_norm_imp_prime_elem simp: n_def) next assume "prime_elem z" hence "prime (normalize z)" by simp thus "prime n \ (\p. n = p ^ 2 \ prime p \ [p = 3] (mod 4))" proof (cases rule: gauss_int_prime_classification) case one_plus_i have "n = gauss_int_norm (normalize z)" by (simp add: n_def) also have "normalize z = 1 + \\<^sub>\" by fact also have "gauss_int_norm \ = 2" by (simp add: gauss_int_norm_def) finally show ?thesis by simp next case (cong_3_mod_4 p) have "n = gauss_int_norm (normalize z)" by (simp add: n_def) also have "normalize z = of_nat p" by fact also have "gauss_int_norm \ = p ^ 2" by simp finally show ?thesis using cong_3_mod_4 by simp next case cong_1_mod_4 thus ?thesis by (simp add: n_def) qed qed subsubsection \Multiplicities of primes\ text \ In this section, we will show some results connecting the multiplicity of a Gaussian prime \p\ in a Gaussian integer \z\ to the \\\-multiplicity of the norm of \p\ in the norm of \z\. \ text \ The multiplicity of the Gaussian prime \<^term>\1 + \\<^sub>\\ in an integer \c\ is simply twice the \\\-multiplicity of 2 in \c\: \ lemma multiplicity_prime_1_plus_i_aux: "multiplicity (1 + \\<^sub>\) (of_nat c) = 2 * multiplicity 2 c" proof (cases "c = 0") case [simp]: False have "2 * multiplicity 2 c = multiplicity 2 (c ^ 2)" by (simp add: prime_elem_multiplicity_power_distrib) also have "multiplicity 2 (c ^ 2) = multiplicity (of_nat 2) (of_nat c ^ 2 :: gauss_int)" by (simp flip: multiplicity_gauss_int_of_nat) also have "of_nat 2 = (-\\<^sub>\) * (1 + \\<^sub>\) ^ 2" by (simp add: algebra_simps power2_eq_square) also have "multiplicity \ (of_nat c ^ 2) = multiplicity ((1 + \\<^sub>\) ^ 2) (of_nat c ^ 2)" by (subst multiplicity_times_unit_left) auto also have "\ = multiplicity (1 + \\<^sub>\) (of_nat c)" by (subst multiplicity_power_power) auto finally show ?thesis .. qed auto text \ Tha multiplicity of an inert Gaussian prime $q\in\mathbb{Z}$ in a Gaussian integer \z\ is precisely half the \\\-multiplicity of \q\ in the norm of \z\. \ lemma multiplicity_prime_cong_3_mod_4: assumes "prime (of_nat q :: gauss_int)" shows "multiplicity q (gauss_int_norm z) = 2 * multiplicity (of_nat q) z" proof (cases "z = 0") case [simp]: False have "multiplicity q (gauss_int_norm z) = multiplicity (of_nat q) (of_nat (gauss_int_norm z) :: gauss_int)" by (simp add: multiplicity_gauss_int_of_nat) also have "\ = multiplicity (of_nat q) (z * gauss_cnj z)" by (simp add: self_mult_gauss_cnj) also have "\ = multiplicity (of_nat q) z + multiplicity (gauss_cnj (of_nat q)) (gauss_cnj z)" using assms by (subst prime_elem_multiplicity_mult_distrib) auto also have "multiplicity (gauss_cnj (of_nat q)) (gauss_cnj z) = multiplicity (of_nat q) z" by (subst multiplicity_gauss_cnj) auto also have "\ + \ = 2 * \" by simp finally show ?thesis . qed auto text \ For Gaussian primes \p\ whose norm is congruent 1 modulo 4, the $\mathbb{Z}[i]$-multiplicity of \p\ in an integer \c\ is just the \\\-multiplicity of their norm in \c\. \ lemma multiplicity_prime_cong_1_mod_4_aux: fixes p :: gauss_int assumes "prime_elem p" "ReZ p > 0" "ImZ p > 0" "ImZ p \ ReZ p" shows "multiplicity p (of_nat c) = multiplicity (gauss_int_norm p) c" proof (cases "c = 0") case [simp]: False show ?thesis proof (intro antisym multiplicity_geI) define k where "k = multiplicity p (of_nat c)" have "p ^ k dvd of_nat c" by (simp add: multiplicity_dvd k_def) moreover have "gauss_cnj p ^ k dvd of_nat c" using multiplicity_dvd[of "gauss_cnj p" "of_nat c"] multiplicity_gauss_cnj[of p "of_nat c"] by (simp add: k_def) moreover have "\p dvd gauss_cnj p" using assms by (subst self_dvd_gauss_cnj_iff) auto hence "\p dvd gauss_cnj p ^ k" using assms prime_elem_dvd_power by blast ultimately have "p ^ k * gauss_cnj p ^ k dvd of_nat c" using assms by (intro prime_elem_power_mult_dvdI) auto also have "p ^ k * gauss_cnj p ^ k = of_nat (gauss_int_norm p ^ k)" by (simp flip: self_mult_gauss_cnj add: power_mult_distrib) finally show "gauss_int_norm p ^ k dvd c" by (subst (asm) of_nat_dvd_of_nat_gauss_int_iff) next define k where "k = multiplicity (gauss_int_norm p) c" have "p ^ k dvd (p * gauss_cnj p) ^ k" by (intro dvd_power_same) auto also have "\ = of_nat (gauss_int_norm p ^ k)" by (simp add: self_mult_gauss_cnj) also have "\ dvd of_nat c" unfolding of_nat_dvd_of_nat_gauss_int_iff by (auto simp: k_def multiplicity_dvd) finally show "p ^ k dvd of_nat c" . qed (use assms in \auto simp: gauss_int_norm_eq_Suc_0_iff\) qed auto text \ The multiplicity of a Gaussian prime with norm congruent 1 modulo 4 in some Gaussian integer \z\ and the multiplicity of its conjugate in \z\ sum to the the \\\-multiplicity of their norm in the norm of \z\: \ lemma multiplicity_prime_cong_1_mod_4: fixes p :: gauss_int assumes "prime_elem p" "ReZ p > 0" "ImZ p > 0" "ImZ p \ ReZ p" shows "multiplicity (gauss_int_norm p) (gauss_int_norm z) = multiplicity p z + multiplicity (gauss_cnj p) z" proof (cases "z = 0") case [simp]: False have "multiplicity (gauss_int_norm p) (gauss_int_norm z) = multiplicity p (of_nat (gauss_int_norm z))" using assms by (subst multiplicity_prime_cong_1_mod_4_aux) auto also have "\ = multiplicity p (z * gauss_cnj z)" by (simp add: self_mult_gauss_cnj) also have "\ = multiplicity p z + multiplicity p (gauss_cnj z)" using assms by (subst prime_elem_multiplicity_mult_distrib) auto also have "multiplicity p (gauss_cnj z) = multiplicity (gauss_cnj p) z" by (subst multiplicity_gauss_cnj [symmetric]) auto finally show ?thesis . qed auto text \ The multiplicity of the Gaussian prime \<^term>\1 + \\<^sub>\\ in a Gaussian integer \z\ is precisely the \\\-multiplicity of 2 in the norm of \z\: \ lemma multiplicity_prime_1_plus_i: "multiplicity (1 + \\<^sub>\) z = multiplicity 2 (gauss_int_norm z)" proof (cases "z = 0") case [simp]: False note [simp] = prime_elem_one_plus_i_gauss_int have "2 * multiplicity 2 (gauss_int_norm z) = multiplicity (1 + \\<^sub>\) (of_nat (gauss_int_norm z))" by (rule multiplicity_prime_1_plus_i_aux [symmetric]) also have "\ = multiplicity (1 + \\<^sub>\) (z * gauss_cnj z)" by (simp add: self_mult_gauss_cnj) also have "\ = multiplicity (1 + \\<^sub>\) z + multiplicity (gauss_cnj (1 - \\<^sub>\)) (gauss_cnj z)" by (subst prime_elem_multiplicity_mult_distrib) auto also have "multiplicity (gauss_cnj (1 - \\<^sub>\)) (gauss_cnj z) = multiplicity (1 - \\<^sub>\) z" by (subst multiplicity_gauss_cnj) auto also have "1 - \\<^sub>\ = (-\\<^sub>\) * (1 + \\<^sub>\)" by (simp add: algebra_simps) also have "multiplicity \ z = multiplicity (1 + \\<^sub>\) z" by (subst multiplicity_times_unit_left) auto also have "\ + \ = 2 * \" by simp finally show ?thesis by simp qed auto subsection \Coprimality of an element and its conjugate\ text \ Using the classification of the primes, we now show that if the real and imaginary parts of a Gaussian integer are coprime and its norm is odd, then it is coprime to its own conjugate. \ lemma coprime_self_gauss_cnj: assumes "coprime (ReZ z) (ImZ z)" and "odd (gauss_int_norm z)" shows "coprime z (gauss_cnj z)" proof (rule coprimeI) fix d assume "d dvd z" "d dvd gauss_cnj z" have *: False if "p \ prime_factors z" "p \ prime_factors (gauss_cnj z)" for p proof - from that have p: "prime p" "p dvd z" "p dvd gauss_cnj z" by auto define p' where "p' = gauss_cnj p" define d where "d = gauss_int_norm p" have of_nat_d_eq: "of_nat d = p * p'" by (simp add: p'_def self_mult_gauss_cnj d_def) have "prime_elem p" "prime_elem p'" "p dvd z" "p' dvd z" "p dvd gauss_cnj z" "p' dvd gauss_cnj z" using that by (auto simp: in_prime_factors_iff p'_def gauss_cnj_dvd_left_iff) have "prime p" using that by auto then obtain q where q: "prime q" "of_nat q dvd z" proof (cases rule: gauss_int_prime_classification) case one_plus_i hence "2 = gauss_int_norm p" by (auto simp: gauss_int_norm_def) also have "gauss_int_norm p dvd gauss_int_norm z" using p by (intro gauss_int_norm_dvd_mono) auto finally have "even (gauss_int_norm z)" . with \odd (gauss_int_norm z)\ show ?thesis by contradiction next case (cong_3_mod_4 q) thus ?thesis using that[of q] p by simp next case cong_1_mod_4 hence "\p dvd p'" unfolding p'_def by (subst self_dvd_gauss_cnj_iff) auto hence "p * p' dvd z" using p by (intro prime_elem_mult_dvdI) (auto simp: p'_def gauss_cnj_dvd_left_iff) also have "p * p' = of_nat (gauss_int_norm p)" by (simp add: p'_def self_mult_gauss_cnj) finally show ?thesis using that[of "gauss_int_norm p"] cong_1_mod_4 by simp qed have "of_nat q dvd gcd (2 * of_int (ReZ z)) (2 * \\<^sub>\ * of_int (ImZ z))" proof (rule gcd_greatest) have "of_nat q dvd (z + gauss_cnj z)" using q by (auto simp: gauss_cnj_dvd_right_iff) also have "\ = 2 * of_int (ReZ z)" by (simp add: self_plus_gauss_cnj) finally show "of_nat q dvd (2 * of_int (ReZ z) :: gauss_int)" . next have "of_nat q dvd (z - gauss_cnj z)" using q by (auto simp: gauss_cnj_dvd_right_iff) also have "\ = 2 * \\<^sub>\ * of_int (ImZ z)" by (simp add: self_minus_gauss_cnj) finally show "of_nat q dvd (2 * \\<^sub>\ * of_int (ImZ z))" . qed also have "\ = 2" proof - have "odd (ReZ z) \ odd (ImZ z)" using assms by (auto simp: gauss_int_norm_def even_nat_iff) thus ?thesis proof assume "odd (ReZ z)" hence "coprime (of_int (ReZ z)) (of_int 2 :: gauss_int)" unfolding coprime_of_int_gauss_int coprime_right_2_iff_odd . thus ?thesis using assms by (subst gcd_mult_left_right_cancel) (auto simp: coprime_of_int_gauss_int coprime_commute is_unit_left_imp_coprime is_unit_right_imp_coprime gcd_proj1_if_dvd gcd_proj2_if_dvd) next assume "odd (ImZ z)" hence "coprime (of_int (ImZ z)) (of_int 2 :: gauss_int)" unfolding coprime_of_int_gauss_int coprime_right_2_iff_odd . hence "gcd (2 * of_int (ReZ z)) (2 * \\<^sub>\ * of_int (ImZ z)) = gcd (2 * of_int (ReZ z)) (2 * \\<^sub>\)" using assms by (subst gcd_mult_right_right_cancel) (auto simp: coprime_of_int_gauss_int coprime_commute is_unit_left_imp_coprime is_unit_right_imp_coprime) also have "\ = normalize (2 * gcd (of_int (ReZ z)) \\<^sub>\)" by (subst gcd_mult_left) auto also have "gcd (of_int (ReZ z)) \\<^sub>\ = 1" by (subst coprime_iff_gcd_eq_1 [symmetric], rule is_unit_right_imp_coprime) auto finally show ?thesis by simp qed qed finally have "of_nat q dvd (of_nat 2 :: gauss_int)" by simp hence "q dvd 2" by (simp only: of_nat_dvd_of_nat_gauss_int_iff) with \prime q\ have "q = 2" using primes_dvd_imp_eq two_is_prime_nat by blast with q have "2 dvd z" by auto have "2 dvd gauss_int_norm 2" by simp also have "\ dvd gauss_int_norm z" using \2 dvd z\ by (intro gauss_int_norm_dvd_mono) finally show False using \odd (gauss_int_norm z)\ by contradiction qed fix d :: gauss_int assume d: "d dvd z" "d dvd gauss_cnj z" show "is_unit d" proof (rule ccontr) assume "\is_unit d" moreover from d assms have "d \ 0" by auto ultimately obtain p where p: "prime p" "p dvd d" using prime_divisorE by blast with d have "p \ prime_factors z" "p \ prime_factors (gauss_cnj z)" using assms by (auto simp: in_prime_factors_iff) with *[of p] show False by blast qed qed subsection \Square decompositions of prime numbers congruent 1 mod 4\ lemma prime_1_mod_4_sum_of_squares_unique_aux: fixes p x y :: nat assumes "prime p" "[p = 1] (mod 4)" "x ^ 2 + y ^ 2 = p" shows "x > 0 \ y > 0 \ x \ y" proof safe from assms show "x > 0" "y > 0" by (auto intro!: Nat.gr0I simp: prime_power_iff) next assume "x = y" with assms have "p = 2 * x ^ 2" by simp with \prime p\ have "p = 2" by (auto dest: prime_product) with \[p = 1] (mod 4)\ show False by (simp add: cong_def) qed text \ Any prime number congruent 1 modulo 4 can be written \<^emph>\uniquely\ as a sum of two squares $x^2 + y^2$ (up to commutativity of the addition). Additionally, we have shown above that \x\ and \y\ are both positive and \x \ y\. \ lemma prime_1_mod_4_sum_of_squares_unique: fixes p :: nat assumes "prime p" "[p = 1] (mod 4)" shows "\!(x,y). x \ y \ x ^ 2 + y ^ 2 = p" proof (rule ex_ex1I) obtain z where z: "gauss_int_norm z = p" using prime_cong_1_mod_4_gauss_int_norm_exists[OF assms] by blast show "\z. case z of (x,y) \ x \ y \ x ^ 2 + y ^ 2 = p" proof (cases "\ReZ z\ \ \ImZ z\") case True with z show ?thesis by (intro exI[of _ "(nat \ReZ z\, nat \ImZ z\)"]) (auto simp: gauss_int_norm_def nat_add_distrib simp flip: nat_power_eq) next case False with z show ?thesis by (intro exI[of _ "(nat \ImZ z\, nat \ReZ z\)"]) (auto simp: gauss_int_norm_def nat_add_distrib simp flip: nat_power_eq) qed next fix z1 z2 assume z1: "case z1 of (x, y) \ x \ y \ x\<^sup>2 + y\<^sup>2 = p" assume z2: "case z2 of (x, y) \ x \ y \ x\<^sup>2 + y\<^sup>2 = p" define z1' :: gauss_int where "z1' = of_nat (fst z1) + \\<^sub>\ * of_nat (snd z1)" define z2' :: gauss_int where "z2' = of_nat (fst z2) + \\<^sub>\ * of_nat (snd z2)" from assms interpret noninert_gauss_int_prime p by unfold_locales auto have norm_z1': "gauss_int_norm z1' = p" using z1 by (simp add: z1'_def gauss_int_norm_def case_prod_unfold nat_add_distrib nat_power_eq) have norm_z2': "gauss_int_norm z2' = p" using z2 by (simp add: z2'_def gauss_int_norm_def case_prod_unfold nat_add_distrib nat_power_eq) have sgns: "fst z1 > 0" "snd z1 > 0" "fst z2 > 0" "snd z2 > 0" "fst z1 \ snd z1" "fst z2 \ snd z2" using prime_1_mod_4_sum_of_squares_unique_aux[OF assms, of "fst z1" "snd z1"] z1 prime_1_mod_4_sum_of_squares_unique_aux[OF assms, of "fst z2" "snd z2"] z2 by auto have [simp]: "normalize z1' = z1'" "normalize z2' = z2'" using sgns by (subst normalized_gauss_int_iff; simp add: z1'_def z2'_def)+ have "prime z1'" "prime z2'" using norm_z1' norm_z2' assms unfolding prime_def by (auto simp: prime_gauss_int_norm_imp_prime_elem) have "of_nat p = z1' * gauss_cnj z1'" by (simp add: self_mult_gauss_cnj norm_z1') hence "z1' dvd of_nat p" by simp also have "of_nat p = z2' * gauss_cnj z2'" by (simp add: self_mult_gauss_cnj norm_z2') finally have "z1' dvd z2' \ z1' dvd gauss_cnj z2'" using assms by (subst (asm) prime_elem_dvd_mult_iff) (simp add: norm_z1' prime_gauss_int_norm_imp_prime_elem) thus "z1 = z2" proof assume "z1' dvd z2'" with \prime z1'\ \prime z2'\ have "z1' = z2'" by (simp add: primes_dvd_imp_eq) thus ?thesis by (simp add: z1'_def z2'_def gauss_int_eq_iff prod_eq_iff) next assume dvd: "z1' dvd gauss_cnj z2'" have "normalize (\\<^sub>\ * gauss_cnj z2') = \\<^sub>\ * gauss_cnj z2'" using sgns by (subst normalized_gauss_int_iff) (auto simp: z2'_def) moreover have "prime_elem (\\<^sub>\ * gauss_cnj z2')" by (rule prime_gauss_int_norm_imp_prime_elem) (simp add: gauss_int_norm_mult norm_z2' \prime p\) ultimately have "prime (\\<^sub>\ * gauss_cnj z2')" by (simp add: prime_def) moreover from dvd have "z1' dvd \\<^sub>\ * gauss_cnj z2'" by simp ultimately have "z1' = \\<^sub>\ * gauss_cnj z2'" using \prime z1'\ by (simp add: primes_dvd_imp_eq) hence False using z1 z2 sgns by (auto simp: gauss_int_eq_iff z1'_def z2'_def) thus ?thesis .. qed qed lemma two_sum_of_squares_nat_iff: "(x :: nat) ^ 2 + y ^ 2 = 2 \ x = 1 \ y = 1" proof assume eq: "x ^ 2 + y ^ 2 = 2" have square_neq_2: "n ^ 2 \ 2" for n :: nat proof assume *: "n ^ 2 = 2" have "prime (2 :: nat)" by simp thus False by (subst (asm) * [symmetric]) (auto simp: prime_power_iff) qed from eq have "x ^ 2 < 2 ^ 2" "y ^ 2 < 2 ^ 2" by simp_all hence "x < 2" "y < 2" using power2_less_imp_less[of x 2] power2_less_imp_less[of y 2] by auto moreover have "x > 0" "y > 0" using eq square_neq_2[of x] square_neq_2[of y] by (auto intro!: Nat.gr0I) ultimately show "x = 1 \ y = 1" by auto qed auto lemma prime_sum_of_squares_unique: fixes p :: nat assumes "prime p" "p = 2 \ [p = 1] (mod 4)" shows "\!(x,y). x \ y \ x ^ 2 + y ^ 2 = p" using assms(2) proof assume [simp]: "p = 2" have **: "(\(x,y). x \ y \ x ^ 2 + y ^ 2 = p) = (\z. z = (1,1 :: nat))" using two_sum_of_squares_nat_iff by (auto simp: fun_eq_iff) thus ?thesis by (subst **) auto qed (use prime_1_mod_4_sum_of_squares_unique[of p] assms in auto) text \ We now give a simple and inefficient algorithm to compute the canonical decomposition $x ^ 2 + y ^ 2$ with $x\leq y$. \ definition prime_square_sum_nat_decomp :: "nat \ nat \ nat" where "prime_square_sum_nat_decomp p = (if prime p \ (p = 2 \ [p = 1] (mod 4)) then THE (x,y). x \ y \ x ^ 2 + y ^ 2 = p else (0, 0))" lemma prime_square_sum_nat_decomp_eqI: assumes "prime p" "x ^ 2 + y ^ 2 = p" "x \ y" shows "prime_square_sum_nat_decomp p = (x, y)" proof - have "[gauss_int_norm (of_nat x + \\<^sub>\ * of_nat y) \ 3] (mod 4)" by (rule gauss_int_norm_not_3_mod_4) also have "gauss_int_norm (of_nat x + \\<^sub>\ * of_nat y) = p" using assms by (auto simp: gauss_int_norm_def nat_add_distrib nat_power_eq) finally have "[p \ 3] (mod 4)" . with prime_mod_4_cases[of p] assms have *: "p = 2 \ [p = 1] (mod 4)" by auto have "prime_square_sum_nat_decomp p = (THE (x,y). x \ y \ x ^ 2 + y ^ 2 = p)" using * \prime p\ by (simp add: prime_square_sum_nat_decomp_def) also have "\ = (x, y)" proof (rule the1_equality) show "\!(x,y). x \ y \ x ^ 2 + y ^ 2 = p" using \prime p\ * by (rule prime_sum_of_squares_unique) qed (use assms in auto) finally show ?thesis . qed lemma prime_square_sum_nat_decomp_correct: assumes "prime p" "p = 2 \ [p = 1] (mod 4)" defines "z \ prime_square_sum_nat_decomp p" shows "fst z ^ 2 + snd z ^ 2 = p" "fst z \ snd z" proof - define z' where "z' = (THE (x,y). x \ y \ x ^ 2 + y ^ 2 = p)" have "z = z'" unfolding z_def z'_def using assms by (simp add: prime_square_sum_nat_decomp_def) also have"\!(x,y). x \ y \ x ^ 2 + y ^ 2 = p" using assms by (intro prime_sum_of_squares_unique) hence "case z' of (x, y) \ x \ y \ x ^ 2 + y ^ 2 = p" unfolding z'_def by (rule theI') finally show "fst z ^ 2 + snd z ^ 2 = p" "fst z \ snd z" by auto qed lemma sum_of_squares_nat_bound: fixes x y n :: nat assumes "x ^ 2 + y ^ 2 = n" shows "x \ n" proof (cases "x = 0") case False hence "x * 1 \ x ^ 2" unfolding power2_eq_square by (intro mult_mono) auto also have "\ \ x ^ 2 + y ^ 2" by simp also have "\ = n" by fact finally show ?thesis by simp qed auto lemma sum_of_squares_nat_bound': fixes x y n :: nat assumes "x ^ 2 + y ^ 2 = n" shows "y \ n" using sum_of_squares_nat_bound[of y x] assms by (simp add: add.commute) lemma is_singleton_conv_Ex1: "is_singleton A \ (\!x. x \ A)" proof assume "is_singleton A" thus "\!x. x \ A" by (auto elim!: is_singletonE) next assume "\!x. x \ A" thus "is_singleton A" by (metis equals0D is_singletonI') qed lemma the_elemI: assumes "is_singleton A" shows "the_elem A \ A" using assms by (elim is_singletonE) auto lemma prime_square_sum_nat_decomp_code_aux: assumes "prime p" "p = 2 \ [p = 1] (mod 4)" defines "z \ the_elem (Set.filter (\(x,y). x ^ 2 + y ^ 2 = p) (SIGMA x:{0..p}. {x..p}))" shows "prime_square_sum_nat_decomp p = z" proof - let ?A = "Set.filter (\(x,y). x ^ 2 + y ^ 2 = p) (SIGMA x:{0..p}. {x..p})" have eq: "?A = {(x,y). x \ y \ x ^ 2 + y ^ 2 = p}" - using sum_of_squares_nat_bound sum_of_squares_nat_bound' by auto + using sum_of_squares_nat_bound [of _ _ p] sum_of_squares_nat_bound' [of _ _ p] by auto have z: "z \ Set.filter (\(x,y). x ^ 2 + y ^ 2 = p) (SIGMA x:{0..p}. {x..p})" unfolding z_def eq using prime_sum_of_squares_unique[OF assms(1,2)] by (intro the_elemI) (simp add: is_singleton_conv_Ex1) have "prime_square_sum_nat_decomp p = (fst z, snd z)" using z by (intro prime_square_sum_nat_decomp_eqI[OF assms(1)]) auto also have "\ = z" by simp finally show ?thesis . qed lemma prime_square_sum_nat_decomp_code [code]: "prime_square_sum_nat_decomp p = (if prime p \ (p = 2 \ [p = 1] (mod 4)) then the_elem (Set.filter (\(x,y). x ^ 2 + y ^ 2 = p) (SIGMA x:{0..p}. {x..p})) else (0, 0))" using prime_square_sum_nat_decomp_code_aux[of p] by (auto simp: prime_square_sum_nat_decomp_def) subsection \Executable factorisation of Gaussian integers\ text \ Lastly, we use all of the above to give an executable (albeit not very efficient) factorisation algorithm for Gaussian integers based on factorisation of regular integers. Note that we will only compute the set of prime factors without multiplicity, but given that, it would be fairly easy to determine the multiplicity as well. First, we need the following function that computes the Gaussian integer factors of a \\\-prime \p\: \ definition factor_gauss_int_prime_nat :: "nat \ gauss_int list" where "factor_gauss_int_prime_nat p = (if p = 2 then [1 + \\<^sub>\] else if [p = 3] (mod 4) then [of_nat p] else case prime_square_sum_nat_decomp p of (x, y) \ [of_nat x + \\<^sub>\ * of_nat y, of_nat y + \\<^sub>\ * of_nat x])" lemma factor_gauss_int_prime_nat_correct: assumes "prime p" shows "set (factor_gauss_int_prime_nat p) = prime_factors (of_nat p)" using prime_mod_4_cases[OF assms] proof (elim disjE) assume "p = 2" thus ?thesis by (auto simp: prime_factorization_2_gauss_int factor_gauss_int_prime_nat_def) next assume *: "[p = 3] (mod 4)" with assms have "prime (of_nat p :: gauss_int)" by (intro prime_gauss_int_of_nat) thus ?thesis using assms * by (auto simp: prime_factorization_prime factor_gauss_int_prime_nat_def cong_def) next assume *: "[p = 1] (mod 4)" then interpret noninert_gauss_int_prime p using \prime p\ by unfold_locales define z where "z = prime_square_sum_nat_decomp p" define x y where "x = fst z" and "y = snd z" have xy: "x ^ 2 + y ^ 2 = p" "x \ y" using prime_square_sum_nat_decomp_correct[of p] * assms by (auto simp: x_def y_def z_def) from xy have xy_signs: "x > 0" "y > 0" using prime_1_mod_4_sum_of_squares_unique_aux[of p x y] assms * by auto have norms: "gauss_int_norm (of_nat x + \\<^sub>\ * of_nat y) = p" "gauss_int_norm (of_nat y + \\<^sub>\ * of_nat x) = p" using xy by (auto simp: gauss_int_norm_def nat_add_distrib nat_power_eq) have prime: "prime (of_nat x + \\<^sub>\ * of_nat y)" "prime (of_nat y + \\<^sub>\ * of_nat x)" using norms xy_signs \prime p\ unfolding prime_def normalized_gauss_int_iff by (auto intro!: prime_gauss_int_norm_imp_prime_elem) have "normalize ((of_nat x + \\<^sub>\ * of_nat y) * (of_nat y + \\<^sub>\ * of_nat x)) = of_nat p" proof - have "(of_nat x + \\<^sub>\ * of_nat y) * (of_nat y + \\<^sub>\ * of_nat x) = (\\<^sub>\ * of_nat p :: gauss_int)" by (subst xy(1) [symmetric]) (auto simp: gauss_int_eq_iff power2_eq_square) also have "normalize \ = of_nat p" by simp finally show ?thesis . qed hence "prime_factorization (of_nat p) = prime_factorization (prod_mset {#of_nat x + \\<^sub>\ * of_nat y, of_nat y + \\<^sub>\ * of_nat x#})" using assms xy by (subst prime_factorization_unique) (auto simp: gauss_int_eq_iff) also have "\ = {#of_nat x + \\<^sub>\ * of_nat y, of_nat y + \\<^sub>\ * of_nat x#}" using prime by (subst prime_factorization_prod_mset_primes) auto finally have "prime_factors (of_nat p) = {of_nat x + \\<^sub>\ * of_nat y, of_nat y + \\<^sub>\ * of_nat x}" by simp also have "\ = set (factor_gauss_int_prime_nat p)" using * unfolding factor_gauss_int_prime_nat_def case_prod_unfold by (auto simp: cong_def x_def y_def z_def) finally show ?thesis .. qed text \ Next, we lift this to compute the prime factorisation of any integer in the Gaussian integers: \ definition prime_factors_gauss_int_of_nat :: "nat \ gauss_int set" where "prime_factors_gauss_int_of_nat n = (if n = 0 then {} else (\p\prime_factors n. set (factor_gauss_int_prime_nat p)))" lemma prime_factors_gauss_int_of_nat_correct: "prime_factors_gauss_int_of_nat n = prime_factors (of_nat n)" proof (cases "n = 0") case False from False have [simp]: "n > 0" by auto have "prime_factors (of_nat n :: gauss_int) = prime_factors (of_nat (prod_mset (prime_factorization n)))" by (subst prod_mset_prime_factorization_nat [symmetric]) auto also have "\ = prime_factors (prod_mset (image_mset of_nat (prime_factorization n)))" by (subst of_nat_prod_mset) auto also have "\ = (\p\prime_factors n. prime_factors (of_nat p))" by (subst prime_factorization_prod_mset) auto also have "\ = (\p\prime_factors n. set (factor_gauss_int_prime_nat p))" by (intro SUP_cong refl factor_gauss_int_prime_nat_correct [symmetric]) auto finally show ?thesis by (simp add: prime_factors_gauss_int_of_nat_def) qed (auto simp: prime_factors_gauss_int_of_nat_def) text \ We can now use this to factor any Gaussian integer by computing a factorisation of its norm and removing all the prime divisors that do not actually divide it. \ definition prime_factors_gauss_int :: "gauss_int \ gauss_int set" where "prime_factors_gauss_int z = (if z = 0 then {} else Set.filter (\p. p dvd z) (prime_factors_gauss_int_of_nat (gauss_int_norm z)))" lemma prime_factors_gauss_int_correct [code_unfold]: "prime_factors z = prime_factors_gauss_int z" proof (cases "z = 0") case [simp]: False define n where "n = gauss_int_norm z" from False have [simp]: "n > 0" by (auto simp: n_def) have "prime_factors_gauss_int z = Set.filter (\p. p dvd z) (prime_factors (of_nat n))" by (simp add: prime_factors_gauss_int_of_nat_correct prime_factors_gauss_int_def n_def) also have "of_nat n = z * gauss_cnj z" by (simp add: n_def self_mult_gauss_cnj) also have "prime_factors \ = prime_factors z \ prime_factors (gauss_cnj z)" by (subst prime_factors_product) auto also have "Set.filter (\p. p dvd z) \ = prime_factors z" by (auto simp: in_prime_factors_iff) finally show ?thesis by simp qed (auto simp: prime_factors_gauss_int_def) (*<*) unbundle no_gauss_int_notation (*>*) end \ No newline at end of file diff --git a/thys/Generalized_Counting_Sort/Conservation.thy b/thys/Generalized_Counting_Sort/Conservation.thy --- a/thys/Generalized_Counting_Sort/Conservation.thy +++ b/thys/Generalized_Counting_Sort/Conservation.thy @@ -1,2036 +1,2036 @@ (* Title: An Efficient Generalization of Counting Sort for Large, possibly Infinite Key Ranges Author: Pasquale Noce Software Engineer at HID Global, Italy pasquale dot noce dot lavoro at gmail dot com pasquale dot noce at hidglobal dot com *) section "Proof of objects' conservation" theory Conservation imports Algorithm "HOL-Library.Multiset" begin text \ \null In this section, it is formally proven that GCsort \emph{conserves objects}, viz. that the objects' list returned by function @{const gcsort} contains as many occurrences of any given object as the input objects' list. Here below, steps 5, 6, and 7 of the proof method are accomplished. Particularly, @{text count_inv} is the predicate that will be shown to be invariant over inductive set @{const gcsort_set}. \null \ fun count_inv :: "('a \ nat) \ nat \ nat list \ 'a list \ bool" where "count_inv f (u, ns, xs) = (\x. count (mset xs) x = f x)" lemma gcsort_count_input: "count_inv (count (mset xs)) (0, [length xs], xs)" by simp lemma gcsort_count_intro: "count_inv f t \ count (mset (gcsort_out t)) x = f x" by (cases t, simp add: gcsort_out_def) text \ \null The main task to be accomplished to prove that GCsort conserves objects is to prove that so does function @{const fill} in case its input offsets' list is computed via the composition of functions @{const offs} and @{const enum}, as happens within function @{const round}. To achieve this result, a multi-step strategy will be adopted. The first step, addressed here below, opens with the definition of predicate @{text offs_pred}, satisfied by an offsets' list $ns$ and an objects' list $xs$ just in case each bucket delimited by $ns$ is sufficiently large to accommodate the corresponding objects in $xs$. Then, lemma @{text offs_pred_cons} shows that this predicate, if satisfied initially, keeps being true if each object in $xs$ is consumed as happens within function @{const fill}, viz. increasing the corresponding offset in $ns$ by one. \null \ definition offs_num :: "nat \ 'a list \ ('a, 'b) index_sign \ ('a \ 'b) \ 'b \ 'b \ nat \ nat" where "offs_num n xs index key mi ma i \ length [x\xs. index key x n mi ma = i]" abbreviation offs_set_next :: "nat list \ 'a list \ ('a, 'b) index_sign \ ('a \ 'b) \ 'b \ 'b \ nat \ nat set" where "offs_set_next ns xs index key mi ma i \ {k. k < length ns \ i < k \ 0 < offs_num (length ns) xs index key mi ma k}" abbreviation offs_set_prev :: "nat list \ 'a list \ ('a, 'b) index_sign \ ('a \ 'b) \ 'b \ 'b \ nat \ nat set" where "offs_set_prev ns xs index key mi ma i \ {k. i < length ns \ k < i \ 0 < offs_num (length ns) xs index key mi ma k}" definition offs_next :: "nat list \ nat \ 'a list \ ('a, 'b) index_sign \ ('a \ 'b) \ 'b \ 'b \ nat \ nat" where "offs_next ns ub xs index key mi ma i \ if offs_set_next ns xs index key mi ma i = {} then ub else ns ! Min (offs_set_next ns xs index key mi ma i)" definition offs_none :: "nat list \ nat \ 'a list \ ('a, 'b) index_sign \ ('a \ 'b) \ 'b \ 'b \ nat \ bool" where "offs_none ns ub xs index key mi ma i \ (\j < length ns. 0 < offs_num (length ns) xs index key mi ma j \ i \ {ns ! j + offs_num (length ns) xs index key mi ma j..< offs_next ns ub xs index key mi ma j}) \ offs_num (length ns) xs index key mi ma 0 = 0 \ i < offs_next ns ub xs index key mi ma 0 \ 0 < offs_num (length ns) xs index key mi ma 0 \ i < ns ! 0" definition offs_pred :: "nat list \ nat \ 'a list \ ('a, 'b) index_sign \ ('a \ 'b) \ 'b \ 'b \ bool" where "offs_pred ns ub xs index key mi ma \ \i < length ns. offs_num (length ns) xs index key mi ma i \ offs_next ns ub xs index key mi ma i - ns ! i" lemma offs_num_cons: "offs_num n (x # xs) index key mi ma i = (if index key x n mi ma = i then Suc else id) (offs_num n xs index key mi ma i)" by (simp add: offs_num_def) lemma offs_next_prev: "(0 < offs_num (length ns) xs index key mi ma i \ offs_set_next ns xs index key mi ma i \ {} \ Min (offs_set_next ns xs index key mi ma i) = j) = (0 < offs_num (length ns) xs index key mi ma j \ offs_set_prev ns xs index key mi ma j \ {} \ Max (offs_set_prev ns xs index key mi ma j) = i)" (is "?P = ?Q") proof (rule iffI, (erule_tac [!] conjE)+) let ?A = "offs_set_next ns xs index key mi ma i" let ?B = "offs_set_prev ns xs index key mi ma j" assume A: "0 < offs_num (length ns) xs index key mi ma i" and B: "?A \ {}" and C: "Min ?A = j" have "Min ?A \ ?A" using B by (rule_tac Min_in, simp) hence D: "j \ ?A" using C by simp hence E: "i \ ?B" using A by simp show ?Q proof (rule conjI, rule_tac [2] conjI) show "0 < offs_num (length ns) xs index key mi ma j" using D by simp next show "?B \ {}" using E by blast next from E show "Max ?B = i" proof (subst Max_eq_iff, simp, blast, simp, rule_tac allI, rule_tac impI, (erule_tac conjE)+, rule_tac ccontr, simp) fix k assume F: "k < j" and "j < length ns" hence "k < length ns" by simp moreover assume "\ k \ i" and "0 < offs_num (length ns) xs index key mi ma k" ultimately have "k \ ?A" by simp hence "Min ?A \ k" by (rule_tac Min_le, simp) thus False using C and F by simp qed qed next let ?A = "offs_set_prev ns xs index key mi ma j" let ?B = "offs_set_next ns xs index key mi ma i" assume A: "0 < offs_num (length ns) xs index key mi ma j" and B: "?A \ {}" and C: "Max ?A = i" have "Max ?A \ ?A" using B by (rule_tac Max_in, simp) hence D: "i \ ?A" using C by simp hence E: "j \ ?B" using A by simp show ?P proof (rule conjI, rule_tac [2] conjI) show "0 < offs_num (length ns) xs index key mi ma i" using D by simp next show "?B \ {}" using E by blast next from E show "Min ?B = j" proof (subst Min_eq_iff, simp, blast, simp, rule_tac allI, rule_tac impI, (erule_tac conjE)+, rule_tac ccontr, simp) fix k assume "j < length ns" and "\ j \ k" and "0 < offs_num (length ns) xs index key mi ma k" hence "k \ ?A" by simp hence "k \ Max ?A" by (rule_tac Max_ge, simp) moreover assume "i < k" ultimately show False using C by simp qed qed qed lemma offs_next_cons_eq: assumes A: "index key x (length ns) mi ma = i" and B: "i < length ns" and C: "0 < offs_num (length ns) (x # xs) index key mi ma j" shows "offs_set_prev ns (x # xs) index key mi ma i = {} \ Max (offs_set_prev ns (x # xs) index key mi ma i) \ j \ offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma j = offs_next ns ub (x # xs) index key mi ma j" (is "?P \ ?Q \ _") proof (simp only: disj_imp, cases "j < i") let ?A = "offs_set_prev ns (x # xs) index key mi ma i" let ?B = "offs_set_next (ns[i := Suc (ns ! i)]) xs index key mi ma j" let ?C = "offs_set_next ns (x # xs) index key mi ma j" case True hence D: "j \ ?A" using B and C by simp hence "j \ Max ?A" by (rule_tac Max_ge, simp) moreover assume "\ ?P \ ?Q" hence ?Q using D by blast ultimately have E: "j < Max ?A" by simp have F: "Max ?A \ ?A" using D by (rule_tac Max_in, simp, blast) have G: "Max ?A \ ?B" proof (simp, rule conjI, rule_tac [2] conjI) show "Max ?A < length ns" using F by auto next show "j < Max ?A" using E . next have "0 < offs_num (length ns) (x # xs) index key mi ma (Max ?A)" using F by blast moreover have "Max ?A < i" using F by blast ultimately show "0 < offs_num (length ns) xs index key mi ma (Max ?A)" using A by (simp add: offs_num_cons) qed hence H: "offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma j = ns[i := Suc (ns ! i)] ! Min ?B" by (simp only: offs_next_def split: if_split, blast) have "Min ?B \ Max ?A" using G by (rule_tac Min_le, simp) moreover have "Max ?A < i" using F by blast ultimately have I: "Min ?B < i" by simp hence J: "offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma j = ns ! Min ?B" using H by simp have "Min ?B \ ?B" using G by (rule_tac Min_in, simp, blast) hence K: "Min ?B \ ?C" using A and I by (simp add: offs_num_cons) hence L: "Min ?C \ Min ?B" by (rule_tac Min_le, simp) have "Min ?C \ ?C" using K by (rule_tac Min_in, simp, blast) moreover have "Min ?C < i" using L and I by simp ultimately have "Min ?C \ ?B" using A by (simp add: offs_num_cons) hence "Min ?B \ Min ?C" using G by (rule_tac Min_le, simp) hence "Min ?B = Min ?C" using L by simp moreover have "offs_next ns ub (x # xs) index key mi ma j = ns ! Min ?C" using K by (simp only: offs_next_def split: if_split, blast) ultimately show ?thesis using J by simp next let ?A = "offs_set_next (ns[i := Suc (ns ! i)]) xs index key mi ma j" let ?B = "offs_set_next ns (x # xs) index key mi ma j" case False hence "?A = ?B" using A by (rule_tac set_eqI, simp add: offs_num_cons) thus ?thesis proof (simp only: offs_next_def split: if_split, (rule_tac conjI, blast, rule_tac impI)+) assume "?B \ {}" hence "Min ?B \ ?B" by (rule_tac Min_in, simp) hence "i < Min ?B" using False by simp thus "ns[i := Suc (ns ! i)] ! Min ?B = ns ! Min ?B" by simp qed qed lemma offs_next_cons_neq: assumes A: "index key x (length ns) mi ma = i" and B: "offs_set_prev ns (x # xs) index key mi ma i \ {}" and C: "Max (offs_set_prev ns (x # xs) index key mi ma i) = j" shows "offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma j = (if 0 < offs_num (length ns) xs index key mi ma i then Suc (ns ! i) else offs_next ns ub (x # xs) index key mi ma i)" proof (simp, rule conjI, rule_tac [!] impI) let ?A = "offs_set_next ns (x # xs) index key mi ma j" assume "0 < offs_num (length ns) xs index key mi ma i" with A have "offs_set_next (ns[i := Suc (ns ! i)]) xs index key mi ma j = ?A" by (rule_tac set_eqI, rule_tac iffI, simp_all add: offs_num_cons split: if_split_asm) moreover have "0 < offs_num (length ns) (x # xs) index key mi ma i" using A by (simp add: offs_num_def) hence "0 < offs_num (length ns) (x # xs) index key mi ma j \ ?A \ {} \ Min ?A = i" using B and C by (subst offs_next_prev, simp) ultimately show "offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma j = Suc (ns ! i)" using B by (simp only: offs_next_def, simp, subst nth_list_update_eq, blast, simp) next let ?A = "offs_set_prev ns (x # xs) index key mi ma i" assume "offs_num (length ns) xs index key mi ma i = 0" with A have "offs_set_next (ns[i := Suc (ns ! i)]) xs index key mi ma j = offs_set_next ns (x # xs) index key mi ma i" proof (rule_tac set_eqI, rule_tac iffI, simp_all add: offs_num_cons split: if_split_asm, rule_tac conjI, rule_tac notI, simp, rule_tac impI, (erule_tac [!] conjE)+, rule_tac ccontr, simp_all) fix k have "i < length ns" using B by blast moreover assume "i \ k" and "\ i < k" hence "k < i" by simp moreover assume "0 < offs_num (length ns) xs index key mi ma k" ultimately have "k \ ?A" by (simp add: offs_num_cons) hence "k \ Max ?A" by (rule_tac Max_ge, simp) moreover assume "j < k" ultimately show False using C by simp next fix k have "Max ?A \ ?A" using B by (rule_tac Max_in, simp_all) hence "j < i" using C by simp moreover assume "i < k" ultimately show "j < k" by simp qed with B show "offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma j = offs_next ns ub (x # xs) index key mi ma i" proof (simp only: offs_next_def split: if_split, (rule_tac conjI, rule_tac [!] impI, simp)+, subst nth_list_update, blast, simp (no_asm_simp)) assume "offs_set_next ns (x # xs) index key mi ma i \ {}" hence "Min (offs_set_next ns (x # xs) index key mi ma i) \ offs_set_next ns (x # xs) index key mi ma i" by (rule_tac Min_in, simp) thus "i \ Min (offs_set_next ns (x # xs) index key mi ma i)" by simp qed qed lemma offs_pred_ub_aux [rule_format]: assumes A: "offs_pred ns ub xs index key mi ma" shows "i < length ns \ \j < length ns. i \ j \ 0 < offs_num (length ns) xs index key mi ma j \ ns ! j + offs_num (length ns) xs index key mi ma j \ ub" proof (erule strict_inc_induct, rule_tac [!] allI, (rule_tac [!] impI)+, drule le_less_Suc_eq, simp_all) fix i assume B: "length ns = Suc i" hence "offs_num (Suc i) xs index key mi ma i \ offs_next ns ub xs index key mi ma i - ns ! i" using A by (simp add: offs_pred_def) moreover have "offs_next ns ub xs index key mi ma i = ub" using B by (simp add: offs_next_def) ultimately have "offs_num (Suc i) xs index key mi ma i \ ub - ns ! i" by simp moreover assume "0 < offs_num (Suc i) xs index key mi ma i" ultimately show "ns ! i + offs_num (Suc i) xs index key mi ma i \ ub" by simp next fix i j assume "j < length ns" hence "offs_num (length ns) xs index key mi ma j \ offs_next ns ub xs index key mi ma j - ns ! j" using A by (simp add: offs_pred_def) moreover assume B: "\k < length ns. Suc i \ k \ 0 < offs_num (length ns) xs index key mi ma k \ ns ! k + offs_num (length ns) xs index key mi ma k \ ub" and C: "i \ j" have "offs_next ns ub xs index key mi ma j \ ub" proof (simp only: offs_next_def split: if_split, rule conjI, simp, rule impI) let ?A = "offs_set_next ns xs index key mi ma j" assume "?A \ {}" hence "Min ?A \ ?A" by (rule_tac Min_in, simp) hence "ns ! Min ?A + offs_num (length ns) xs index key mi ma (Min ?A) \ ub" using B and C by simp thus "ns ! Min ?A \ ub" by simp qed ultimately have "offs_num (length ns) xs index key mi ma j \ ub - ns ! j" by simp moreover assume "0 < offs_num (length ns) xs index key mi ma j" ultimately show "ns ! j + offs_num (length ns) xs index key mi ma j \ ub" by simp qed lemma offs_pred_ub: "\offs_pred ns ub xs index key mi ma; i < length ns; 0 < offs_num (length ns) xs index key mi ma i\ \ ns ! i + offs_num (length ns) xs index key mi ma i \ ub" by (drule offs_pred_ub_aux, assumption+, simp) lemma offs_pred_asc_aux [rule_format]: assumes A: "offs_pred ns ub xs index key mi ma" shows "i < length ns \ \j k. k < length ns \ i \ j \ j < k \ 0 < offs_num (length ns) xs index key mi ma j \ 0 < offs_num (length ns) xs index key mi ma k \ ns ! j + offs_num (length ns) xs index key mi ma j \ ns ! k" proof (erule strict_inc_induct, simp, (rule allI)+, (rule impI)+, simp) fix i j k assume B: "k < length ns" and C: "j < k" hence "j < length ns" by simp hence "offs_num (length ns) xs index key mi ma j \ offs_next ns ub xs index key mi ma j - ns ! j" using A by (simp add: offs_pred_def) moreover assume D: "\j k. k < length ns \ Suc i \ j \ j < k \ 0 < offs_num (length ns) xs index key mi ma j \ 0 < offs_num (length ns) xs index key mi ma k \ ns ! j + offs_num (length ns) xs index key mi ma j \ ns ! k" and E: "i \ j" and F: "0 < offs_num (length ns) xs index key mi ma k" have "offs_next ns ub xs index key mi ma j \ ns ! k" proof (simp only: offs_next_def split: if_split, rule conjI, rule_tac [!] impI, rule ccontr, simp) assume "\n > j. n < length ns \ offs_num (length ns) xs index key mi ma n = 0" hence "offs_num (length ns) xs index key mi ma k = 0" using B and C by simp thus False using F by simp next let ?A = "offs_set_next ns xs index key mi ma j" have G: "k \ ?A" using B and C and F by simp hence "Min ?A \ k" by (rule_tac Min_le, simp) hence "Min ?A < k \ Min ?A = k" by (simp add: le_less) moreover { have "Suc i \ Min ?A \ Min ?A < k \ 0 < offs_num (length ns) xs index key mi ma (Min ?A) \ ns ! Min ?A + offs_num (length ns) xs index key mi ma (Min ?A) \ ns ! k" using B and D and F by simp moreover assume "Min ?A < k" moreover have "Min ?A \ ?A" using G by (rule_tac Min_in, simp, blast) ultimately have "ns ! Min ?A < ns ! k" using E by simp } moreover { assume "Min ?A = k" hence "ns ! Min ?A = ns ! k" by simp } ultimately show "ns ! Min ?A \ ns ! k" by (simp add: le_less, blast) qed ultimately have "offs_num (length ns) xs index key mi ma j \ ns ! k - ns ! j" by simp moreover assume "0 < offs_num (length ns) xs index key mi ma j" ultimately show "ns ! j + offs_num (length ns) xs index key mi ma j \ ns ! k" by simp qed lemma offs_pred_asc: "\offs_pred ns ub xs index key mi ma; i < j; j < length ns; 0 < offs_num (length ns) xs index key mi ma i; 0 < offs_num (length ns) xs index key mi ma j\ \ ns ! i + offs_num (length ns) xs index key mi ma i \ ns ! j" by (drule offs_pred_asc_aux, erule less_trans, assumption+, rule order_refl) lemma offs_pred_next: assumes A: "offs_pred ns ub xs index key mi ma" and B: "i < length ns" and C: "0 < offs_num (length ns) xs index key mi ma i" shows "ns ! i < offs_next ns ub xs index key mi ma i" proof (simp only: offs_next_def split: if_split, rule conjI, rule_tac [!] impI) have "ns ! i + offs_num (length ns) xs index key mi ma i \ ub" using A and B and C by (rule offs_pred_ub) thus "ns ! i < ub" using C by simp next assume "offs_set_next ns xs index key mi ma i \ {}" hence "Min (offs_set_next ns xs index key mi ma i) \ offs_set_next ns xs index key mi ma i" by (rule_tac Min_in, simp) hence "ns ! i + offs_num (length ns) xs index key mi ma i \ ns ! Min (offs_set_next ns xs index key mi ma i)" using C by (rule_tac offs_pred_asc [OF A], simp_all) thus "ns ! i < ns ! Min (offs_set_next ns xs index key mi ma i)" using C by simp qed lemma offs_pred_next_cons_less: assumes A: "offs_pred ns ub (x # xs) index key mi ma" and B: "index key x (length ns) mi ma = i" and C: "offs_set_prev ns (x # xs) index key mi ma i \ {}" and D: "Max (offs_set_prev ns (x # xs) index key mi ma i) = j" shows "offs_next ns ub (x # xs) index key mi ma j < offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma j" (is "?M < ?N") proof - have E: "0 < offs_num (length ns) (x # xs) index key mi ma i" using B by (simp add: offs_num_cons) hence "0 < offs_num (length ns) (x # xs) index key mi ma j \ offs_set_next ns (x # xs) index key mi ma j \ {} \ Min (offs_set_next ns (x # xs) index key mi ma j) = i" using C and D by (subst offs_next_prev, simp) hence F: "?M = ns ! i" by (simp only: offs_next_def, simp) have "?N = (if 0 < offs_num (length ns) xs index key mi ma i then Suc (ns ! i) else offs_next ns ub (x # xs) index key mi ma i)" using B and C and D by (rule offs_next_cons_neq) thus ?thesis proof (split if_split_asm) assume "?N = Suc (ns ! i)" thus ?thesis using F by simp next assume "?N = offs_next ns ub (x # xs) index key mi ma i" moreover have "ns ! i < offs_next ns ub (x # xs) index key mi ma i" using C by (rule_tac offs_pred_next [OF A _ E], blast) ultimately show ?thesis using F by simp qed qed lemma offs_pred_next_cons: assumes A: "offs_pred ns ub (x # xs) index key mi ma" and B: "index key x (length ns) mi ma = i" and C: "i < length ns" and D: "0 < offs_num (length ns) (x # xs) index key mi ma j" shows "offs_next ns ub (x # xs) index key mi ma j \ offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma j" (is "?M \ ?N") proof - let ?P = "offs_set_prev ns (x # xs) index key mi ma i \ {} \ Max (offs_set_prev ns (x # xs) index key mi ma i) = j" have "?P \ \ ?P" by blast moreover { assume ?P hence "?M < ?N" using B by (rule_tac offs_pred_next_cons_less [OF A], simp_all) hence ?thesis by simp } moreover { assume "\ ?P" hence "?N = ?M" by (rule_tac offs_next_cons_eq [OF B C D], blast) hence ?thesis by simp } ultimately show ?thesis .. qed lemma offs_pred_cons: assumes A: "offs_pred ns ub (x # xs) index key mi ma" and B: "index key x (length ns) mi ma = i" and C: "i < length ns" shows "offs_pred (ns[i := Suc (ns ! i)]) ub xs index key mi ma" using A proof (simp add: offs_pred_def, rule_tac allI, rule_tac impI, simp) let ?ns' = "ns[i := Suc (ns ! i)]" fix j assume "\j < length ns. offs_num (length ns) (x # xs) index key mi ma j \ offs_next ns ub (x # xs) index key mi ma j - ns ! j" and "j < length ns" hence D: "offs_num (length ns) (x # xs) index key mi ma j \ offs_next ns ub (x # xs) index key mi ma j - ns ! j" by simp from B and C show "offs_num (length ns) xs index key mi ma j \ offs_next ?ns' ub xs index key mi ma j - ?ns' ! j" proof (cases "j = i", case_tac [2] "0 < offs_num (length ns) xs index key mi ma j", simp_all) assume "j = i" hence "offs_num (length ns) xs index key mi ma i \ offs_next ns ub (x # xs) index key mi ma i - Suc (ns ! i)" using B and D by (simp add: offs_num_cons) moreover have "offs_next ns ub (x # xs) index key mi ma i \ offs_next ?ns' ub xs index key mi ma i" using B by (rule_tac offs_pred_next_cons [OF A _ C], simp_all add: offs_num_cons) ultimately show "offs_num (length ns) xs index key mi ma i \ offs_next ?ns' ub xs index key mi ma i - Suc (ns ! i)" by simp next assume "j \ i" hence "offs_num (length ns) xs index key mi ma j \ offs_next ns ub (x # xs) index key mi ma j - ns ! j" using B and D by (simp add: offs_num_cons) moreover assume "0 < offs_num (length ns) xs index key mi ma j" hence "offs_next ns ub (x # xs) index key mi ma j \ offs_next ?ns' ub xs index key mi ma j" using B by (rule_tac offs_pred_next_cons [OF A _ C], simp_all add: offs_num_cons) ultimately show "offs_num (length ns) xs index key mi ma j \ offs_next ?ns' ub xs index key mi ma j - ns ! j" by simp qed qed text \ \null The next step consists of proving, as done in lemma @{text fill_count_item} in what follows, that if certain conditions hold, particularly if offsets' list $ns$ and objects' list $xs$ satisfy predicate @{const offs_pred}, then function @{const fill} conserves objects if called using $xs$ and $ns$ as its input arguments. This lemma is proven by induction on $xs$. Hence, lemma @{thm [source] offs_pred_cons}, proven in the previous step, is used to remove the antecedent containing predicate @{const offs_pred} from the induction hypothesis, which has the form of an implication. \null \ lemma offs_next_zero: assumes A: "i < length ns" and B: "offs_num (length ns) xs index key mi ma i = 0" and C: "offs_set_prev ns xs index key mi ma i = {}" shows "offs_next ns ub xs index key mi ma 0 = offs_next ns ub xs index key mi ma i" proof - have "offs_set_next ns xs index key mi ma 0 = offs_set_next ns xs index key mi ma i" proof (rule set_eqI, rule iffI, simp_all, (erule conjE)+, rule ccontr, simp add: not_less) fix j assume D: "0 < offs_num (length ns) xs index key mi ma j" assume "j \ i" hence "j < i \ j = i" by (simp add: le_less) moreover { assume "j < i" hence "j \ offs_set_prev ns xs index key mi ma i" using A and D by simp hence False using C by simp } moreover { assume "j = i" hence False using B and D by simp } ultimately show False .. qed thus ?thesis by (simp only: offs_next_def) qed lemma offs_next_zero_cons_eq: assumes A: "index key x (length ns) mi ma = i" and B: "offs_num (length ns) (x # xs) index key mi ma 0 = 0" and C: "offs_set_prev ns (x # xs) index key mi ma i \ {}" (is "?A \ _") shows "offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma 0 = offs_next ns ub (x # xs) index key mi ma 0" proof - have D: "Min ?A \ ?A" using C by (rule_tac Min_in, simp) moreover have E: "0 < Min ?A" proof (rule ccontr, simp) assume "Min ?A = 0" hence "offs_num (length ns) (x # xs) index key mi ma (Min ?A) = 0" using B by simp moreover have "0 < offs_num (length ns) (x # xs) index key mi ma (Min ?A)" using D by auto ultimately show False by simp qed ultimately have "Min ?A \ offs_set_next ns (x # xs) index key mi ma 0" (is "_ \ ?B") by auto moreover from this have "Min ?B = Min ?A" proof (subst Min_eq_iff, simp, blast, simp, rule_tac allI, rule_tac impI, (erule_tac conjE)+, rule_tac ccontr, simp add: not_le) fix j assume F: "j < Min ?A" have "i < length ns" using D by simp moreover have "Min ?A < i" using D by auto hence "j < i" using F by simp moreover assume "0 < offs_num (length ns) (x # xs) index key mi ma j" ultimately have "j \ ?A" by simp hence "Min ?A \ j" by (rule_tac Min_le, simp) thus False using F by simp qed moreover have "Min ?A \ offs_set_next (ns[i := Suc (ns ! i)]) xs index key mi ma 0" (is "_ \ ?C") using D and E by (auto, simp add: offs_num_cons A [symmetric]) moreover from this have "Min ?C = Min ?A" proof (subst Min_eq_iff, simp, blast, simp, rule_tac allI, rule_tac impI, (erule_tac conjE)+, rule_tac ccontr, simp add: not_le) fix j assume F: "j < Min ?A" have "i < length ns" using D by simp moreover have "Min ?A < i" using D by auto hence "j < i" using F by simp moreover assume "0 < offs_num (length ns) xs index key mi ma j" hence "0 < offs_num (length ns) (x # xs) index key mi ma j" by (simp add: offs_num_cons) ultimately have "j \ ?A" by simp hence "Min ?A \ j" by (rule_tac Min_le, simp) thus False using F by simp qed moreover have "Min ?A < i" using D by auto ultimately show ?thesis by (simp only: offs_next_def split: if_split, (rule_tac conjI, blast, rule_tac impI)+, simp) qed lemma offs_next_zero_cons_neq: assumes A: "index key x (length ns) mi ma = i" and B: "i < length ns" and C: "0 < i" and D: "offs_set_prev ns (x # xs) index key mi ma i = {}" shows "offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma 0 = (if 0 < offs_num (length ns) xs index key mi ma i then Suc (ns ! i) else offs_next ns ub (x # xs) index key mi ma i)" proof (simp, rule conjI, rule_tac [!] impI) let ?ns' = "ns[i := Suc (ns ! i)]" assume "0 < offs_num (length ns) xs index key mi ma i" with A have "offs_set_next ?ns' xs index key mi ma 0 = offs_set_next ns (x # xs) index key mi ma 0" by (rule_tac set_eqI, rule_tac iffI, simp_all add: offs_num_cons split: if_split_asm) moreover have "i \ offs_set_next ns (x # xs) index key mi ma 0" using A and B and C by (simp add: offs_num_cons) moreover from this have "Min (offs_set_next ns (x # xs) index key mi ma 0) = i" proof (subst Min_eq_iff, simp, blast, simp, rule_tac allI, rule_tac impI, (erule_tac conjE)+, rule_tac ccontr, simp add: not_le) fix j assume "j < i" and "0 < offs_num (length ns) (x # xs) index key mi ma j" hence "j \ offs_set_prev ns (x # xs) index key mi ma i" using B by simp thus False using D by simp qed ultimately show "offs_next ?ns' ub xs index key mi ma 0 = Suc (ns ! i)" by (simp only: offs_next_def split: if_split, rule_tac conjI, rule_tac [!] impI, simp_all) next let ?ns' = "ns[i := Suc (ns ! i)]" assume "offs_num (length ns) xs index key mi ma i = 0" moreover have "offs_set_prev ?ns' xs index key mi ma i = {}" using D by (simp add: offs_num_cons split: if_split_asm, blast) ultimately have "offs_next ?ns' ub xs index key mi ma 0 = offs_next ?ns' ub xs index key mi ma i" using B by (rule_tac offs_next_zero, simp_all) moreover have "offs_next ?ns' ub xs index key mi ma i = offs_next ns ub (x # xs) index key mi ma i" using A and B and D by (rule_tac offs_next_cons_eq, simp_all add: offs_num_cons) ultimately show "offs_next ?ns' ub xs index key mi ma 0 = offs_next ns ub (x # xs) index key mi ma i" by simp qed lemma offs_pred_zero_cons_less: assumes A: "offs_pred ns ub (x # xs) index key mi ma" and B: "index key x (length ns) mi ma = i" and C: "i < length ns" and D: "0 < i" and E: "offs_set_prev ns (x # xs) index key mi ma i = {}" shows "offs_next ns ub (x # xs) index key mi ma 0 < offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma 0" (is "?M < ?N") proof - have "i \ offs_set_next ns (x # xs) index key mi ma 0" using B and C and D by (simp add: offs_num_cons) moreover from this have "Min (offs_set_next ns (x # xs) index key mi ma 0) = i" proof (subst Min_eq_iff, simp, blast, simp, rule_tac allI, rule_tac impI, (erule_tac conjE)+, rule_tac ccontr, simp add: not_le) fix j assume "j < i" and "0 < offs_num (length ns) (x # xs) index key mi ma j" hence "j \ offs_set_prev ns (x # xs) index key mi ma i" using C by simp thus False using E by simp qed ultimately have F: "?M = ns ! i" by (simp only: offs_next_def split: if_split, blast) have "?N = (if 0 < offs_num (length ns) xs index key mi ma i then Suc (ns ! i) else offs_next ns ub (x # xs) index key mi ma i)" using B and C and D and E by (rule offs_next_zero_cons_neq) thus ?thesis proof (split if_split_asm) assume "?N = Suc (ns ! i)" thus ?thesis using F by simp next assume "?N = offs_next ns ub (x # xs) index key mi ma i" moreover have "ns ! i < offs_next ns ub (x # xs) index key mi ma i" using B by (rule_tac offs_pred_next [OF A C], simp add: offs_num_cons) ultimately show ?thesis using F by simp qed qed lemma offs_pred_zero_cons: assumes A: "offs_pred ns ub (x # xs) index key mi ma" and B: "index key x (length ns) mi ma = i" and C: "i < length ns" and D: "offs_num (length ns) (x # xs) index key mi ma 0 = 0" shows "offs_next ns ub (x # xs) index key mi ma 0 \ offs_next (ns[i := Suc (ns ! i)]) ub xs index key mi ma 0" (is "?M \ ?N") proof (cases "offs_set_prev ns (x # xs) index key mi ma i = {}") case True have "0 < i" using B and D by (rule_tac ccontr, simp add: offs_num_cons) hence "?M < ?N" using True and B by (rule_tac offs_pred_zero_cons_less [OF A _ C]) thus ?thesis by simp next case False hence "?N = ?M" by (rule offs_next_zero_cons_eq [OF B D]) thus ?thesis by simp qed lemma replicate_count: "count (mset (replicate n x)) x = n" by (induction n, simp_all) lemma fill_none [rule_format]: assumes A: "index_less index key" shows "(\x \ set xs. key x \ {mi..ma}) \ ns \ [] \ offs_pred ns ub xs index key mi ma \ offs_none ns ub xs index key mi ma i \ fill xs ns index key ub mi ma ! i = None" proof (induction xs arbitrary: ns, simp add: offs_none_def offs_num_def offs_next_def, (rule impI)+, simp add: Let_def, (erule conjE)+) fix x xs and ns :: "nat list" let ?i' = "index key x (length ns) mi ma" let ?ns' = "ns[?i' := Suc (ns ! ?i')]" assume B: "offs_pred ns ub (x # xs) index key mi ma" and C: "offs_none ns ub (x # xs) index key mi ma i" assume D: "ns \ []" and "mi \ key x" and "key x \ ma" moreover from this have E: "?i' < length ns" using A by (simp add: index_less_def) hence "offs_pred ?ns' ub xs index key mi ma" by (rule_tac offs_pred_cons [OF B], simp) moreover assume "\ns. ns \ [] \ offs_pred ns ub xs index key mi ma \ offs_none ns ub xs index key mi ma i \ fill xs ns index key ub mi ma ! i = None" ultimately have F: "offs_none ?ns' ub xs index key mi ma i \ fill xs ?ns' index key ub mi ma ! i = None" by simp show "(fill xs ?ns' index key ub mi ma)[ns ! ?i' := Some x] ! i = None" - proof (insert C, simp add: offs_none_def, erule disjE, erule_tac [2] disjE, simp_all + proof (insert C, simp add: offs_none_def, erule disjE, erule_tac [2] disjE, simp_all del: subst_all subst_all' add: offs_num_cons split: if_split_asm, erule conjE, rule case_split, drule mp, - assumption, simp_all, (erule conjE)+, (erule_tac [2] conjE)+, + assumption, simp_all del: subst_all subst_all', (erule conjE)+, (erule_tac [2] conjE)+, erule_tac [3] conjE, erule_tac [5] conjE) fix j assume G: "?i' = j" and H: "j < length ns" and I: "Suc (ns ! j + offs_num (length ns) xs index key mi ma j) \ i" and J: "i < offs_next ns ub (x # xs) index key mi ma j" show "fill xs (ns[j := Suc (ns ! j)]) index key ub mi ma ! i = None" proof (cases "0 < offs_num (length ns) xs index key mi ma j", case_tac [2] "offs_set_prev ns (x # xs) index key mi ma j \ {}", simp_all only: not_not not_gr0) have "j < length ns \ 0 < offs_num (length ns) xs index key mi ma j \ ?ns' ! j + offs_num (length ns) xs index key mi ma j \ i \ i < offs_next ?ns' ub xs index key mi ma j \ fill xs ?ns' index key ub mi ma ! i = None" using F by (simp add: offs_none_def, blast) moreover assume "0 < offs_num (length ns) xs index key mi ma j" moreover have "?ns' ! j + offs_num (length ns) xs index key mi ma j \ i" using G and H and I by simp moreover have "0 < offs_num (length ns) (x # xs) index key mi ma j" using G by (simp add: offs_num_cons) hence "offs_next ns ub (x # xs) index key mi ma j \ offs_next ?ns' ub xs index key mi ma j" using G and H by (rule_tac offs_pred_next_cons [OF B], simp_all) hence "i < offs_next ?ns' ub xs index key mi ma j" using J by simp ultimately have "fill xs ?ns' index key ub mi ma ! i = None" using H by blast thus ?thesis using G by simp next let ?j' = "Max (offs_set_prev ns (x # xs) index key mi ma j)" have "?j' < length ns \ 0 < offs_num (length ns) xs index key mi ma ?j' \ ?ns' ! ?j' + offs_num (length ns) xs index key mi ma ?j' \ i \ i < offs_next ?ns' ub xs index key mi ma ?j' \ fill xs ?ns' index key ub mi ma ! i = None" using F by (simp add: offs_none_def, blast) moreover assume K: "offs_set_prev ns (x # xs) index key mi ma j \ {}" hence "?j' \ offs_set_prev ns (x # xs) index key mi ma j" by (rule_tac Max_in, simp) hence L: "?j' < length ns \ ?j' < j \ 0 < offs_num (length ns) xs index key mi ma ?j'" using G by (auto, subst (asm) (2) offs_num_cons, simp) moreover have "ns ! ?j' + offs_num (length ns) (x # xs) index key mi ma ?j' \ ns ! j" using G and H and L by (rule_tac offs_pred_asc [OF B], simp_all add: offs_num_cons) hence "?ns' ! ?j' + offs_num (length ns) xs index key mi ma ?j' \ ns ! j" using G and H and L by (subst nth_list_update, simp_all add: offs_num_cons) hence "?ns' ! ?j' + offs_num (length ns) xs index key mi ma ?j' \ i" using I by simp moreover assume M: "offs_num (length ns) xs index key mi ma j = 0" have "offs_next (ns[j := Suc (ns ! j)]) ub xs index key mi ma ?j' = (if 0 < offs_num (length ns) xs index key mi ma j then Suc (ns ! j) else offs_next ns ub (x # xs) index key mi ma j)" using G and K by (rule_tac offs_next_cons_neq, simp_all) hence "offs_next ?ns' ub xs index key mi ma ?j' = offs_next ns ub (x # xs) index key mi ma j" using G and M by simp hence "i < offs_next ?ns' ub xs index key mi ma ?j'" using J by simp ultimately have "fill xs ?ns' index key ub mi ma ! i = None" by blast thus ?thesis using G by simp next have "offs_num (length ns) xs index key mi ma 0 = 0 \ i < offs_next ?ns' ub xs index key mi ma 0 \ fill xs ?ns' index key ub mi ma ! i = None" using F by (simp add: offs_none_def) moreover assume K: "offs_set_prev ns (x # xs) index key mi ma j = {}" and L: "offs_num (length ns) xs index key mi ma j = 0" have "offs_set_prev ns (x # xs) index key mi ma j = offs_set_prev ?ns' xs index key mi ma j" using G by (rule_tac set_eqI, rule_tac iffI, simp_all add: offs_num_cons split: if_split_asm) hence M: "offs_set_prev ?ns' xs index key mi ma j = {}" using K by simp hence "offs_num (length ns) xs index key mi ma 0 = 0" using H and L by (cases j, simp_all) moreover have N: "offs_next ?ns' ub xs index key mi ma 0 = offs_next ?ns' ub xs index key mi ma j" using H and L and M by (rule_tac offs_next_zero, simp_all) have "offs_next ?ns' ub xs index key mi ma j = offs_next ns ub (x # xs) index key mi ma j" using G and H and K by (subst offs_next_cons_eq, simp_all add: offs_num_cons) hence "i < offs_next ?ns' ub xs index key mi ma 0" using J and N by simp ultimately have "fill xs ?ns' index key ub mi ma ! i = None" by blast thus ?thesis using G by simp qed next fix j assume G: "?i' \ j" and H: "j < length ns" and I: "ns ! j + offs_num (length ns) xs index key mi ma j \ i" and J: "i < offs_next ns ub (x # xs) index key mi ma j" and K: "0 < offs_num (length ns) xs index key mi ma j" from G have "ns ! ?i' \ i" proof (rule_tac notI, cases "?i' < j", simp_all add: not_less le_less) assume "?i' < j" hence "ns ! ?i' + offs_num (length ns) (x # xs) index key mi ma ?i' \ ns ! j" using H and K by (rule_tac offs_pred_asc [OF B], simp_all add: offs_num_cons) moreover assume "ns ! ?i' = i" ultimately have "i < ns ! j" by (simp add: offs_num_cons) thus False using I by simp next assume "j < ?i'" hence L: "?i' \ offs_set_next ns (x # xs) index key mi ma j" (is "_ \ ?A") using E by (simp add: offs_num_cons) hence "Min ?A \ ?i'" by (rule_tac Min_le, simp) hence "Min ?A < ?i' \ Min ?A = ?i'" by (simp add: le_less) hence "ns ! Min ?A \ ns ! ?i'" proof (rule disjE, simp_all) assume "Min ?A < ?i'" moreover have "Min ?A \ ?A" using L by (rule_tac Min_in, simp, blast) hence "0 < offs_num (length ns) (x # xs) index key mi ma (Min ?A)" by simp ultimately have "ns ! Min ?A + offs_num (length ns) (x # xs) index key mi ma (Min ?A) \ ns ! ?i'" using E by (rule_tac offs_pred_asc [OF B], simp_all add: offs_num_cons) thus ?thesis by simp qed hence "offs_next ns ub (x # xs) index key mi ma j \ ns ! ?i'" using L by (simp only: offs_next_def split: if_split, blast) moreover assume "ns ! ?i' = i" ultimately show False using J by simp qed thus "(fill xs ?ns' index key ub mi ma)[ns ! ?i' := Some x] ! i = None" proof simp have "j < length ns \ 0 < offs_num (length ns) xs index key mi ma j \ ?ns' ! j + offs_num (length ns) xs index key mi ma j \ i \ i < offs_next ?ns' ub xs index key mi ma j \ fill xs ?ns' index key ub mi ma ! i = None" using F by (simp add: offs_none_def, blast) moreover have "offs_next ns ub (x # xs) index key mi ma j \ offs_next ?ns' ub xs index key mi ma j" using E and K by (rule_tac offs_pred_next_cons [OF B], simp_all add: offs_num_cons) hence "i < offs_next ?ns' ub xs index key mi ma j" using J by simp ultimately show "fill xs ?ns' index key ub mi ma ! i = None" using G and H and I and K by simp qed next assume G: "0 < ?i'" and H: "offs_num (length ns) xs index key mi ma 0 = 0" and I: "i < offs_next ns ub (x # xs) index key mi ma 0" have "ns ! ?i' \ i" proof have "0 < offs_num (length ns) (x # xs) index key mi ma ?i'" by (simp add: offs_num_cons) hence L: "?i' \ offs_set_next ns (x # xs) index key mi ma 0" (is "_ \ ?A") using E and G by simp hence "Min ?A \ ?i'" by (rule_tac Min_le, simp) hence "Min ?A < ?i' \ Min ?A = ?i'" by (simp add: le_less) hence "ns ! Min ?A \ ns ! ?i'" proof (rule disjE, simp_all) assume "Min ?A < ?i'" moreover have "Min ?A \ ?A" using L by (rule_tac Min_in, simp, blast) hence "0 < offs_num (length ns) (x # xs) index key mi ma (Min ?A)" by simp ultimately have "ns ! Min ?A + offs_num (length ns) (x # xs) index key mi ma (Min ?A) \ ns ! ?i'" using E by (rule_tac offs_pred_asc [OF B], simp_all add: offs_num_cons) thus ?thesis by simp qed hence "offs_next ns ub (x # xs) index key mi ma 0 \ ns ! ?i'" using L by (simp only: offs_next_def split: if_split, blast) moreover assume "ns ! ?i' = i" ultimately show False using I by simp qed thus "(fill xs ?ns' index key ub mi ma)[ns ! ?i' := Some x] ! i = None" proof simp have "offs_num (length ns) xs index key mi ma 0 = 0 \ i < offs_next ?ns' ub xs index key mi ma 0 \ fill xs ?ns' index key ub mi ma ! i = None" using F by (simp add: offs_none_def) moreover have "offs_next ns ub (x # xs) index key mi ma 0 \ offs_next ?ns' ub xs index key mi ma 0" using E and G and H by (rule_tac offs_pred_zero_cons [OF B], simp_all add: offs_num_cons) hence "i < offs_next ?ns' ub xs index key mi ma 0" using I by simp ultimately show "fill xs ?ns' index key ub mi ma ! i = None" using H by simp qed next assume G: "?i' = 0" and H: "i < ns ! 0" show "fill xs (ns[0 := Suc (ns ! 0)]) index key ub mi ma ! i = None" proof (cases "0 < offs_num (length ns) xs index key mi ma 0", simp_all) have "0 < offs_num (length ns) xs index key mi ma 0 \ i < ?ns' ! 0 \ fill xs ?ns' index key ub mi ma ! i = None" using F by (simp add: offs_none_def) moreover assume "0 < offs_num (length ns) xs index key mi ma 0" moreover have "i < ?ns' ! 0" using D and G and H by simp ultimately show ?thesis using G by simp next have "offs_num (length ns) xs index key mi ma 0 = 0 \ i < offs_next ?ns' ub xs index key mi ma 0 \ fill xs ?ns' index key ub mi ma ! i = None" using F by (simp add: offs_none_def) moreover assume "offs_num (length ns) xs index key mi ma 0 = 0" moreover have I: "offs_next ?ns' ub xs index key mi ma 0 = offs_next ns ub (x # xs) index key mi ma 0" using D and G by (rule_tac offs_next_cons_eq, simp_all add: offs_num_cons) have "ns ! 0 < offs_next ns ub (x # xs) index key mi ma 0" using D and G by (rule_tac offs_pred_next [OF B], simp_all add: offs_num_cons) hence "i < offs_next ?ns' ub xs index key mi ma 0" using H and I by simp ultimately show ?thesis using G by simp qed next assume G: "0 < ?i'" and H: "0 < offs_num (length ns) xs index key mi ma 0" and I: "i < ns ! 0" have "ns ! ?i' \ i" proof - have "ns ! 0 + offs_num (length ns) (x # xs) index key mi ma 0 \ ns ! ?i'" using H by (rule_tac offs_pred_asc [OF B G E], simp_all add: offs_num_cons) moreover have "0 < offs_num (length ns) (x # xs) index key mi ma 0" using H by (simp add: offs_num_cons) ultimately show ?thesis using I by simp qed thus "(fill xs ?ns' index key ub mi ma)[ns ! ?i' := Some x] ! i = None" proof simp have "0 < offs_num (length ns) xs index key mi ma 0 \ i < ?ns' ! 0 \ fill xs ?ns' index key ub mi ma ! i = None" using F by (simp add: offs_none_def) moreover have "i < ?ns' ! 0" using G and I by simp ultimately show "fill xs ?ns' index key ub mi ma ! i = None" using H by simp qed qed qed lemma fill_index_none [rule_format]: assumes A: "index_less index key" and B: "key x \ {mi..ma}" and C: "ns \ []" and D: "offs_pred ns ub (x # xs) index key mi ma" shows "\x \ set xs. key x \ {mi..ma} \ fill xs (ns[(index key x (length ns) mi ma) := Suc (ns ! index key x (length ns) mi ma)]) index key ub mi ma ! (ns ! index key x (length ns) mi ma) = None" (is "_ \ fill _ ?ns' _ _ _ _ _ ! (_ ! ?i) = _") using A and B and C and D proof (rule_tac fill_none, simp_all, rule_tac offs_pred_cons, simp_all, simp add: index_less_def, cases "0 < ?i", cases "offs_set_prev ns (x # xs) index key mi ma ?i = {}", case_tac [3] "0 < offs_num (length ns) xs index key mi ma 0") assume E: "0 < ?i" and F: "offs_set_prev ns (x # xs) index key mi ma ?i = {}" have G: "?i < length ns" using A and B and C by (simp add: index_less_def) hence "offs_num (length ns) (x # xs) index key mi ma 0 = 0" using E and F by (rule_tac ccontr, simp) hence "offs_num (length ns) xs index key mi ma 0 = 0" by (simp add: offs_num_cons split: if_split_asm) moreover have "offs_next ?ns' ub xs index key mi ma 0 = (if 0 < offs_num (length ns) xs index key mi ma ?i then Suc (ns ! ?i) else offs_next ns ub (x # xs) index key mi ma ?i)" using E and F and G by (rule_tac offs_next_zero_cons_neq, simp_all) hence "ns ! ?i < offs_next ?ns' ub xs index key mi ma 0" by (simp split: if_split_asm, rule_tac offs_pred_next [OF D G], simp add: offs_num_cons) ultimately show "offs_none ?ns' ub xs index key mi ma (ns ! ?i)" by (simp add: offs_none_def) next assume E: "0 < ?i" and F: "offs_set_prev ns (x # xs) index key mi ma ?i \ {}" (is "?A \ _") have G: "?i < length ns" using A and B and C by (simp add: index_less_def) have H: "Max ?A \ ?A" using F by (rule_tac Max_in, simp) hence I: "Max ?A < ?i" by blast have "Max ?A < length ns" using H by auto moreover have "0 < offs_num (length ns) (x # xs) index key mi ma (Max ?A)" using H by auto hence "0 < offs_num (length ns) xs index key mi ma (Max ?A)" using I by (subst (asm) offs_num_cons, split if_split_asm, simp_all) moreover have "ns ! Max ?A + offs_num (length ns) (x # xs) index key mi ma (Max ?A) \ ns ! ?i" using G and H by (rule_tac offs_pred_asc [OF D], simp_all add: offs_num_cons) hence "?ns' ! Max ?A + offs_num (length ns) xs index key mi ma (Max ?A) \ ns ! ?i" using I by (simp add: offs_num_cons) moreover have "offs_next ?ns' ub xs index key mi ma (Max ?A) = (if 0 < offs_num (length ns) xs index key mi ma ?i then Suc (ns ! ?i) else offs_next ns ub (x # xs) index key mi ma ?i)" using F and I by (rule_tac offs_next_cons_neq, simp_all) hence "ns ! ?i < offs_next ?ns' ub xs index key mi ma (Max ?A)" by (simp split: if_split_asm, rule_tac offs_pred_next [OF D G], simp add: offs_num_cons) ultimately show "offs_none ?ns' ub xs index key mi ma (ns ! ?i)" by (simp add: offs_none_def, blast) next assume "0 < offs_num (length ns) xs index key mi ma 0" and "\ 0 < ?i" moreover have "?i < length ns" using A and B and C by (simp add: index_less_def) ultimately show "offs_none ?ns' ub xs index key mi ma (ns ! ?i)" by (simp add: offs_none_def) next assume E: "\ 0 < ?i" and F: "\ 0 < offs_num (length ns) xs index key mi ma 0" have G: "?i < length ns" using A and B and C by (simp add: index_less_def) have "offs_num (length ns) xs index key mi ma 0 = 0" using F by simp moreover have "offs_next ?ns' ub xs index key mi ma ?i = offs_next ns ub (x # xs) index key mi ma ?i" using E and G by (rule_tac offs_next_cons_eq, simp_all add: offs_num_cons) hence "ns ! ?i < offs_next ?ns' ub xs index key mi ma ?i" by (simp, rule_tac offs_pred_next [OF D G], simp add: offs_num_cons) hence "ns ! ?i < offs_next ?ns' ub xs index key mi ma 0" using E by simp ultimately show "offs_none ?ns' ub xs index key mi ma (ns ! ?i)" by (simp add: offs_none_def) qed lemma fill_count_item [rule_format]: assumes A: "index_less index key" shows "(\x \ set xs. key x \ {mi..ma}) \ ns \ [] \ offs_pred ns ub xs index key mi ma \ length xs \ ub \ count (mset (map the (fill xs ns index key ub mi ma))) x = count (mset xs) x + (if the None = x then ub - length xs else 0)" proof (induction xs arbitrary: ns, simp add: replicate_count, (rule impI)+, simp add: Let_def map_update del: count_add_mset mset_map split del: if_split, (erule conjE)+, subst add_mset_add_single, simp only: count_single count_union) fix y xs and ns :: "nat list" let ?i = "index key y (length ns) mi ma" let ?ns' = "ns[?i := Suc (ns ! ?i)]" assume B: "\x \ set xs. mi \ key x \ key x \ ma" and C: "mi \ key y" and D: "key y \ ma" and E: "ns \ []" and F: "offs_pred ns ub (y # xs) index key mi ma" and G: "Suc (length xs) \ ub" have H: "?i < length ns" using A and C and D and E by (simp add: index_less_def) assume "\ns. ns \ [] \ offs_pred ns ub xs index key mi ma \ count (mset (map the (fill xs ns index key ub mi ma))) x = count (mset xs) x + (if the None = x then ub - length xs else 0)" moreover have "offs_pred ?ns' ub xs index key mi ma" using F and H by (rule_tac offs_pred_cons, simp_all) ultimately have "count (mset (map the (fill xs ?ns' index key ub mi ma))) x = count (mset xs) x + (if the None = x then ub - length xs else 0)" using E by simp moreover have "ns ! ?i + offs_num (length ns) (y # xs) index key mi ma ?i \ ub" using F and H by (rule offs_pred_ub, simp add: offs_num_cons) hence "ns ! ?i < ub" by (simp add: offs_num_cons) ultimately show "count (mset ((map the (fill xs ?ns' index key ub mi ma)) [ns ! ?i := y])) x = count (mset xs) x + (if y = x then 1 else 0) + (if the None = x then ub - length (y # xs) else 0)" proof (subst mset_update, simp add: fill_length, subst add_mset_add_single, simp only: count_diff count_single count_union, subst nth_map, simp add: fill_length, subst add.assoc, subst (3) add.commute, subst add.assoc [symmetric], subst add_right_cancel) have "fill xs ?ns' index key ub mi ma ! (ns ! ?i) = None" using B and C and D and E by (rule_tac fill_index_none [OF A _ _ F], simp_all) thus "count (mset xs) x + (if the None = x then ub - length xs else 0) - (if the (fill xs ?ns' index key ub mi ma ! (ns ! ?i)) = x then 1 else 0) = count (mset xs) x + (if the None = x then ub - length (y # xs) else 0)" using G by simp qed qed text \ \null Finally, lemma @{text offs_enum_pred} here below proves that, if $ns$ is the offsets' list obtained by applying the composition of functions @{const offs} and @{const enum} to objects' list $xs$, then predicate @{const offs_pred} is satisfied by $ns$ and $xs$. This result is in turn used, together with lemma @{thm [source] fill_count_item}, to prove lemma @{text fill_offs_enum_count_item}, which states that function @{const fill} conserves objects if its input offsets' list is computed via the composition of functions @{const offs} and @{const enum}. \null \ lemma enum_offs_num: "i < n \ enum xs index key n mi ma ! i = offs_num n xs index key mi ma i" by (induction xs, simp add: offs_num_def, simp add: Let_def offs_num_cons, subst nth_list_update_eq, simp_all add: enum_length) lemma offs_length: "length (offs ns i) = length ns" by (induction ns arbitrary: i, simp_all) lemma offs_add [rule_format]: "i < length ns \ offs ns k ! i = foldl (+) k (take i ns)" by (induction ns arbitrary: i k, simp, simp add: nth_Cons split: nat.split) lemma offs_mono_aux: "i \ j \ j < length ns \ offs ns k ! i \ offs ns k ! (i + (j - i))" by (simp only: offs_add take_add, simp add: add_le) lemma offs_mono: "i \ j \ j < length ns \ offs ns k ! i \ offs ns k ! j" by (frule offs_mono_aux, simp_all) lemma offs_update: "j < length ns \ offs (ns[i := Suc (ns ! i)]) k ! j = (if j \ i then id else Suc) (offs ns k ! j)" by (simp add: offs_add not_le take_update_swap, rule impI, subst nth_take [symmetric], assumption, subst add_update, simp_all) lemma offs_equal_suc: assumes A: "Suc i < length ns" and B: "ns ! i = 0" shows "offs ns m ! i = offs ns m ! Suc i" proof - have "offs ns m ! i = foldl (+) m (take i ns)" using A by (subst offs_add, simp_all) also have "\ = foldl (+) m (take i ns @ [ns ! i])" using B by simp also have "\ = foldl (+) m (take (Suc i) ns)" using A by (subst take_Suc_conv_app_nth, simp_all) also have "\ = offs ns m ! Suc i" using A by (subst offs_add, simp_all) finally show ?thesis . qed lemma offs_equal [rule_format]: "i < j \ j < length ns \ (\k \ {i.. offs ns m ! i = offs ns m ! j" proof (erule strict_inc_induct, rule_tac [!] impI, simp_all, erule offs_equal_suc, simp) fix i assume A: "i < j" and "j < length ns" hence "Suc i < length ns" by simp moreover assume "\k \ {i.. = offs ns m ! j" finally show "offs ns m ! i = offs ns m ! j" . qed lemma offs_enum_last [rule_format]: assumes A: "index_less index key" and B: "0 < n" and C: "\x \ set xs. key x \ {mi..ma}" shows "offs (enum xs index key n mi ma) k ! (n - Suc 0) + offs_num n xs index key mi ma (n - Suc 0) = length xs + k" proof - let ?ns = "enum xs index key n mi ma" from B have D: "last ?ns = offs_num n xs index key mi ma (n - Suc 0)" by (subst last_conv_nth, subst length_0_conv [symmetric], simp_all add: enum_length, subst enum_offs_num, simp_all) have "offs ?ns k ! (n - Suc 0) = foldl (+) k (take (n - Suc 0) ?ns)" using B by (rule_tac offs_add, simp add: enum_length) also have "\ = foldl (+) k (butlast ?ns)" by (simp add: butlast_conv_take enum_length) finally have "offs ?ns k ! (n - Suc 0) + offs_num n xs index key mi ma (n - Suc 0) = foldl (+) k (butlast ?ns @ [last ?ns])" using D by simp also have "\ = foldl (+) k ?ns" using B by (subst append_butlast_last_id, subst length_0_conv [symmetric], simp_all add: enum_length) also have "\ = foldl (+) 0 ?ns + k" by (rule add_base_zero) also have "\ = length xs + k" using A and B and C by (subst enum_add, simp_all) finally show ?thesis . qed lemma offs_enum_ub [rule_format]: assumes A: "index_less index key" and B: "i < n" and C: "\x \ set xs. key x \ {mi..ma}" shows "offs (enum xs index key n mi ma) k ! i \ length xs + k" proof - let ?ns = "enum xs index key n mi ma" have "offs ?ns k ! i \ offs ?ns k ! (n - Suc 0)" using B by (rule_tac offs_mono, simp_all add: enum_length) also have "\ \ offs ?ns k ! (n - Suc 0) + offs_num n xs index key mi ma (n - Suc 0)" by simp also have "\ = length xs + k" using A and B and C by (rule_tac offs_enum_last, simp_all) finally show ?thesis . qed lemma offs_enum_next_ge [rule_format]: assumes A: "index_less index key" and B: "i < n" shows "\x \ set xs. key x \ {mi..ma} \ offs (enum xs index key n mi ma) k ! i \ offs_next (offs (enum xs index key n mi ma) k) (length xs + k) xs index key mi ma i" (is "_ \ offs ?ns _ ! _ \ _") proof (simp only: offs_next_def split: if_split, rule conjI, rule_tac [!] impI, rule offs_enum_ub [OF A B], simp) assume "offs_set_next (offs ?ns k) xs index key mi ma i \ {}" (is "?A \ _") hence C: "Min ?A \ ?A" by (rule_tac Min_in, simp) hence "i \ Min ?A" by simp moreover have "Min ?A < length ?ns" using C by (simp add: offs_length) ultimately show "offs ?ns k ! i \ offs ?ns k ! Min ?A" by (rule offs_mono) qed lemma offs_enum_zero_aux [rule_format]: "\index_less index key; 0 < n; \x \ set xs. key x \ {mi..ma}; offs_num n xs index key mi ma (n - Suc 0) = 0\ \ offs (enum xs index key n mi ma) k ! (n - Suc 0) = length xs + k" by (subst offs_enum_last [where key = key and mi = mi and ma = ma, symmetric], simp+) lemma offs_enum_zero [rule_format]: assumes A: "index_less index key" and B: "i < n" and C: "\x \ set xs. key x \ {mi..ma}" and D: "offs_num n xs index key mi ma i = 0" shows "offs (enum xs index key n mi ma) k ! i = offs_next (offs (enum xs index key n mi ma) k) (length xs + k) xs index key mi ma i" proof (simp only: offs_next_def split: if_split, rule conjI, rule_tac [!] impI, cases "i = n - Suc 0", simp) assume "i = n - Suc 0" thus "offs (enum xs index key n mi ma) k ! (n - Suc 0) = length xs + k" using A and B and C and D by (rule_tac offs_enum_zero_aux, simp_all) next let ?ns = "enum xs index key n mi ma" assume E: "offs_set_next (offs ?ns k) xs index key mi ma i = {}" (is "?A = _") assume "i \ n - Suc 0" hence F: "i < n - Suc 0" using B by simp hence "offs ?ns k ! i = offs ?ns k ! (n - Suc 0)" proof (rule offs_equal, simp_all add: enum_length le_less, erule_tac conjE, erule_tac disjE, rule_tac ccontr, drule_tac [2] sym, simp_all) fix j assume G: "j < n - Suc 0" hence "j < length (offs ?ns k)" by (simp add: offs_length enum_length) moreover assume "i < j" moreover assume "0 < ?ns ! j" hence "0 < offs_num (length (offs ?ns k)) xs index key mi ma j" using G by (subst (asm) enum_offs_num, simp_all add: offs_length enum_length) ultimately have "j \ ?A" by simp thus False using E by simp next show "?ns ! i = 0" using B and D by (subst enum_offs_num, simp_all) qed also from A and B and C have "\ = length xs + k" proof (rule_tac offs_enum_zero_aux, simp_all, rule_tac ccontr, simp) have "n - Suc 0 < length (offs ?ns k)" using B by (simp add: offs_length enum_length) moreover assume "0 < offs_num n xs index key mi ma (n - Suc 0)" hence "0 < offs_num (length (offs ?ns k)) xs index key mi ma (n - Suc 0)" by (simp add: offs_length enum_length) ultimately have "n - Suc 0 \ ?A" using F by simp thus False using E by simp qed finally show "offs (enum xs index key n mi ma) k ! i = length xs + k" . next let ?ns = "enum xs index key n mi ma" assume "offs_set_next (offs ?ns k) xs index key mi ma i \ {}" (is "?A \ _") hence "Min ?A \ ?A" by (rule_tac Min_in, simp) thus "offs ?ns k ! i = offs ?ns k ! Min ?A" proof (rule_tac offs_equal, simp_all add: le_less, simp add: offs_length, (erule_tac conjE)+, erule_tac disjE, rule_tac ccontr, drule_tac [2] sym, simp_all) fix j assume E: "j < Min ?A" and "Min ?A < length (offs ?ns k)" hence F: "j < length (offs ?ns k)" by simp moreover assume "i < j" moreover assume "0 < ?ns ! j" hence "0 < offs_num (length (offs ?ns k)) xs index key mi ma j" using F by (subst (asm) enum_offs_num, simp_all add: offs_length enum_length) ultimately have "j \ ?A" by simp hence "Min ?A \ j" by (rule_tac Min_le, simp) thus False using E by simp next show "?ns ! i = 0" using B and D by (subst enum_offs_num, simp_all) qed qed lemma offs_enum_next_cons [rule_format]: assumes A: "index_less index key" and B: "\x \ set xs. key x \ {mi..ma}" shows "(if i < index key x n mi ma then (\) else (<)) (offs_next (offs (enum xs index key n mi ma) k) (length xs + k) xs index key mi ma i) (offs_next (offs ((enum xs index key n mi ma) [index key x n mi ma := Suc (enum xs index key n mi ma ! index key x n mi ma)]) k) (Suc (length xs + k)) (x # xs) index key mi ma i)" (is "(if i < ?i' then _ else _) (offs_next (offs ?ns _) _ _ _ _ _ _ _) (offs_next (offs ?ns' _) _ _ _ _ _ _ _)") proof (simp_all only: offs_next_def not_less split: if_split, (rule conjI, rule impI)+, simp, simp, (rule_tac [!] impI, (rule_tac [!] conjI)?)+, rule_tac [2-3] FalseE, rule_tac [4] conjI, rule_tac [4-5] impI) assume C: "offs_set_next (offs ?ns k) xs index key mi ma i = {}" (is "?A = _") and D: "offs_set_next (offs ?ns' k) (x # xs) index key mi ma i \ {}" (is "?A' \ _") and E: "i < ?i'" from C have F: "\j \ ?i'. j \ ?A'" by (rule_tac allI, rule_tac impI, rule_tac notI, simp add: enum_length offs_length offs_num_cons split: if_split_asm, (erule_tac conjE)+, simp) from D have "Min ?A' \ ?A'" by (rule_tac Min_in, simp) hence G: "Min ?A' < n" by (simp add: offs_length enum_length) have H: "Min ?A' = ?i'" proof (rule Min_eqI, simp, rule eq_refl, erule contrapos_pp, insert F, simp) have "\j. j \ ?A'" using D by blast then obtain j where "j \ ?A'" .. moreover from this have "j = ?i'" by (erule_tac contrapos_pp, insert F, simp) ultimately show "?i' \ ?A'" by simp qed with G have "offs ?ns' k ! Min ?A' = offs ?ns k ! Min ?A'" by (subst offs_update, simp_all add: enum_length) also from A and B and G and H have "\ = offs_next (offs ?ns k) (length xs + k) xs index key mi ma (Min ?A')" proof (rule_tac offs_enum_zero, simp_all, rule_tac ccontr, simp) assume "?i' < n" and "0 < offs_num n xs index key mi ma ?i'" hence "?i' \ ?A" using E by (simp add: offs_length enum_length) thus False using C by simp qed also have "\ = length xs + k" proof (simp only: offs_next_def split: if_split, rule conjI, simp, rule impI, rule FalseE, simp, erule exE, (erule conjE)+) fix j assume "j < length (offs ?ns k)" moreover assume "Min ?A' < j" hence "i < j" using E and H by simp moreover assume "0 < offs_num (length (offs ?ns k)) xs index key mi ma j" ultimately have "j \ ?A" by simp thus False using C by simp qed finally show "length xs + k \ offs ?ns' k ! Min ?A'" by simp next assume C: "offs_set_next (offs ?ns k) xs index key mi ma i = {}" (is "?A = _") and D: "offs_set_next (offs ?ns' k) (x # xs) index key mi ma i \ {}" (is "?A' \ _") and E: "?i' \ i" have "\j. j \ ?A'" using D by blast then obtain j where F: "j \ ?A'" .. hence "j < length (offs ?ns k)" by (simp add: offs_length) moreover have "i < j" using F by simp moreover from this have "0 < offs_num (length (offs ?ns k)) xs index key mi ma j" using E and F by (simp add: offs_length enum_length offs_num_cons) ultimately have "j \ ?A" by simp thus False using C by simp next assume C: "offs_set_next (offs ?ns k) xs index key mi ma i \ {}" (is "?A \ _") and D: "offs_set_next (offs ?ns' k) (x # xs) index key mi ma i = {}" (is "?A' = _") have "\j. j \ ?A" using C by blast then obtain j where E: "j \ ?A" .. hence "j < length (offs ?ns' k)" by (simp add: offs_length) moreover have "i < j" using E by simp moreover have "0 < offs_num (length (offs ?ns' k)) (x # xs) index key mi ma j" using E by (simp add: offs_length enum_length offs_num_cons) ultimately have "j \ ?A'" by simp thus False using D by simp next assume "offs_set_next (offs ?ns k) xs index key mi ma i \ {}" (is "?A \ _") hence "Min ?A \ ?A" by (rule_tac Min_in, simp) hence C: "Min ?A < n" by (simp add: offs_length enum_length) assume "offs_set_next (offs ?ns' k) (x # xs) index key mi ma i \ {}" (is "?A' \ _") hence D: "Min ?A' \ ?A'" by (rule_tac Min_in, simp) hence E: "Min ?A' < n" by (simp add: offs_length enum_length) have "offs ?ns k ! Min ?A \ offs ?ns k ! Min ?A'" proof (cases "offs_num n xs index key mi ma (Min ?A') = 0") case True have "offs ?ns k ! Min ?A' = offs_next (offs ?ns k) (length xs + k) xs index key mi ma (Min ?A')" using A and B and E and True by (rule_tac offs_enum_zero, simp_all) also from A and B and C have "\ \ offs ?ns k ! Min ?A" proof (simp only: offs_next_def split: if_split, rule_tac conjI, rule_tac [!] impI, rule_tac offs_enum_ub, simp, simp, simp) assume "offs_set_next (offs ?ns k) xs index key mi ma (Min ?A') \ {}" (is "?B \ _") hence "Min ?B \ ?B" by (rule_tac Min_in, simp) hence "Min ?B \ ?A" using D by simp moreover from this have "Min ?A \ Min ?B" by (rule_tac Min_le, simp) ultimately show "offs ?ns k ! Min ?A \ offs ?ns k ! Min ?B" by (rule_tac offs_mono, simp_all add: offs_length) qed finally show ?thesis . next case False hence "Min ?A' \ ?A" using D by (simp add: offs_length enum_length) hence "Min ?A \ Min ?A'" by (rule_tac Min_le, simp) thus ?thesis by (rule offs_mono, simp_all add: enum_length E) qed also have "\ \ offs ?ns' k ! Min ?A'" using E by (subst offs_update, simp_all add: enum_length) finally show "offs ?ns k ! Min ?A \ offs ?ns' k ! Min ?A'" . next let ?A = "offs_set_next (offs ?ns k) xs index key mi ma i" assume "offs_set_next (offs ?ns' k) (x # xs) index key mi ma i \ {}" (is "?A' \ _") hence C: "Min ?A' \ ?A'" by (rule_tac Min_in, simp) hence D: "Min ?A' < n" by (simp add: offs_length enum_length) assume "?i' \ i" hence E: "?i' < Min ?A'" using C by simp hence "0 < offs_num n xs index key mi ma (Min ?A')" using C by (simp add: offs_length enum_length offs_num_cons) hence "Min ?A' \ ?A" using C by (simp add: offs_length enum_length) hence "Min ?A \ Min ?A'" by (rule_tac Min_le, simp) hence "offs ?ns k ! Min ?A \ offs ?ns k ! Min ?A'" by (rule offs_mono, simp_all add: enum_length D) also have "\ < offs ?ns' k ! Min ?A'" using E by (subst offs_update, simp_all add: enum_length D) finally show "offs ?ns k ! Min ?A < offs ?ns' k ! Min ?A'" . qed lemma offs_enum_pred [rule_format]: assumes A: "index_less index key" shows "(\x \ set xs. key x \ {mi..ma}) \ offs_pred (offs (enum xs index key n mi ma) k) (length xs + k) xs index key mi ma" proof (induction xs, simp add: offs_pred_def offs_num_def, simp add: Let_def offs_pred_def offs_length enum_length, rule impI, (erule conjE)+, simp, rule allI, rule impI, erule allE, drule mp, assumption) fix x xs i let ?i' = "index key x n mi ma" let ?ns = "enum xs index key n mi ma" let ?ns' = "?ns[?i' := Suc (?ns ! ?i')]" assume B: "\x \ set xs. mi \ key x \ key x \ ma" and C: "i < n" and D: "offs_num n xs index key mi ma i \ offs_next (offs ?ns k) (length xs + k) xs index key mi ma i - offs ?ns k ! i" have E: "(if i < ?i' then (\) else (<)) (offs_next (offs ?ns k) (length xs + k) xs index key mi ma i) (offs_next (offs ?ns' k) (Suc (length xs + k)) (x # xs) index key mi ma i)" using A and B by (subst offs_enum_next_cons, simp_all) show "offs_num n (x # xs) index key mi ma i \ offs_next (offs ?ns' k) (Suc (length xs + k)) (x # xs) index key mi ma i - offs ?ns' k ! i" proof (subst offs_update, simp add: enum_length C, rule linorder_cases [of i ?i'], simp_all add: offs_num_cons) assume "i < ?i'" hence "offs_next (offs ?ns k) (length xs + k) xs index key mi ma i \ offs_next (offs ?ns' k) (Suc (length xs + k)) (x # xs) index key mi ma i" using E by simp thus "offs_num n xs index key mi ma i \ offs_next (offs ?ns' k) (Suc (length xs + k)) (x # xs) index key mi ma i - offs ?ns k ! i" using D by arith next assume F: "i = ?i'" hence "Suc (offs_num n xs index key mi ma ?i') \ Suc (offs_next (offs ?ns k) (length xs + k) xs index key mi ma ?i' - offs ?ns k ! ?i')" using D by simp also from A and B and C and F have "\ = Suc (offs_next (offs ?ns k) (length xs + k) xs index key mi ma ?i') - offs ?ns k ! ?i'" by (rule_tac Suc_diff_le [symmetric], rule_tac offs_enum_next_ge, simp_all) finally have "Suc (offs_num n xs index key mi ma ?i') \ Suc (offs_next (offs ?ns k) (length xs + k) xs index key mi ma ?i') - offs ?ns k ! ?i'" . moreover have "Suc (offs_next (offs ?ns k) (length xs + k) xs index key mi ma ?i') \ offs_next (offs ?ns' k) (Suc (length xs + k)) (x # xs) index key mi ma ?i'" using E and F by simp ultimately show "Suc (offs_num n xs index key mi ma ?i') \ offs_next (offs ?ns' k) (Suc (length xs + k)) (x # xs) index key mi ma ?i' - offs ?ns k ! ?i'" by arith next assume "?i' < i" hence "Suc (offs_next (offs ?ns k) (length xs + k) xs index key mi ma i) \ offs_next (offs ?ns' k) (Suc (length xs + k)) (x # xs) index key mi ma i" using E by simp thus "offs_num n xs index key mi ma i \ offs_next (offs ?ns' k) (Suc (length xs + k)) (x # xs) index key mi ma i - Suc (offs ?ns k ! i)" using D by arith qed qed lemma fill_offs_enum_count_item [rule_format]: "\index_less index key; \x \ set xs. key x \ {mi..ma}; 0 < n\ \ count (mset (map the (fill xs (offs (enum xs index key n mi ma) 0) index key (length xs) mi ma))) x = count (mset xs) x" by (subst fill_count_item, simp_all, simp only: length_greater_0_conv [symmetric] offs_length enum_length, insert offs_enum_pred [of index key xs mi ma n 0], simp) text \ \null Using lemma @{thm [source] fill_offs_enum_count_item}, step 9 of the proof method can now be dealt with. It is accomplished by proving lemma @{text gcsort_count_inv}, which states that the number of the occurrences of whatever object in the objects' list is still the same after any recursive round. \null \ lemma nths_count: "count (mset (nths xs A)) x = count (mset xs) x - card {i. i < length xs \ i \ A \ xs ! i = x}" proof (induction xs arbitrary: A, simp_all add: nths_Cons) fix v xs A let ?B = "{i. i < length xs \ Suc i \ A \ xs ! i = x}" let ?C = "\v. {i. i < Suc (length xs) \ i \ A \ (v # xs) ! i = x}" have A: "\v. ?C v = ?C v \ {0} \ ?C v \ {i. \j. i = Suc j}" by (subst Int_Un_distrib [symmetric], auto, arith) have "\v. card (?C v) = card (?C v \ {0}) + card (?C v \ {i. \j. i = Suc j})" by (subst A, rule card_Un_disjoint, simp_all, blast) moreover have "\v. card ?B = card (?C v \ {i. \j. i = Suc j})" by (rule bij_betw_same_card [of Suc], auto) ultimately show "(0 \ A \ (v = x \ Suc (count (mset xs) x - card ?B) = Suc (count (mset xs) x) - card (?C x)) \ (v \ x \ count (mset xs) x - card ?B = count (mset xs) x - card (?C v))) \ (0 \ A \ (v = x \ count (mset xs) x - card ?B = Suc (count (mset xs) x) - card (?C x)) \ (v \ x \ count (mset xs) x - card ?B = count (mset xs) x - card (?C v)))" proof ((rule_tac [!] conjI, rule_tac [!] impI)+, simp_all) have "card ?B \ count (mset xs) x" by (simp add: count_mset length_filter_conv_card, rule card_mono, simp, blast) thus "Suc (count (mset xs) x - card ?B) = Suc (count (mset xs) x) - card ?B" by (rule Suc_diff_le [symmetric]) qed qed lemma round_count_inv [rule_format]: "index_less index key \ bn_inv p q t \ add_inv n t \ count_inv f t \ count_inv f (round index key p q r t)" proof (induction index key p q r t arbitrary: n f rule: round.induct, (rule_tac [!] impI)+, simp, simp, simp_all only: simp_thms) fix index p q r u ns xs n f and key :: "'a \ 'b" let ?t = "round index key p q r (u, ns, tl xs)" assume "\n f. bn_inv p q (u, ns, tl xs) \ add_inv n (u, ns, tl xs) \ count_inv f (u, ns, tl xs) \ count_inv f ?t" and "bn_inv p q (u, Suc 0 # ns, xs)" and "add_inv n (u, Suc 0 # ns, xs)" and "count_inv f (u, Suc 0 # ns, xs)" thus "count_inv f (round index key p q r (u, Suc 0 # ns, xs))" proof (cases ?t, simp add: add_suc, rule_tac allI, cases xs, simp_all add: disj_imp split: if_split_asm) fix y ys x and xs' :: "'a list" assume "\n f. foldl (+) 0 ns = n \ length ys = n \ (\x. count (mset ys) x = f x) \ (\x. count (mset xs') x = f x)" moreover assume "Suc (foldl (+) 0 ns) = n" and "Suc (length ys) = n" ultimately have "\n f. (\x. count (mset ys) x = f x) \ (\x. count (mset xs') x = f x)" by blast moreover assume A: "\x. (y = x \ Suc (count (mset ys) x) = f x) \ (y \ x \ count (mset ys) x = f x)" have "\x. count (mset ys) x = (f(y := f y - Suc 0)) x" (is "\x. _ = ?f' x") by (simp add: A, insert spec [OF A, where x = y], simp) ultimately have "\x. count (mset xs') x = ?f' x" .. thus "(y = x \ Suc (count (mset xs') x) = f x) \ (y \ x \ count (mset xs') x = f x)" by (simp, insert spec [OF A, where x = y], rule_tac impI, simp) qed next fix index p q r u m ns n f and key :: "'a \ 'b" and xs :: "'a list" let ?ws = "take (Suc (Suc m)) xs" let ?ys = "drop (Suc (Suc m)) xs" let ?r = "\m'. round_suc_suc index key ?ws m m' u" let ?t = "\r' v. round index key p q r' (v, ns, ?ys)" assume A: "index_less index key" assume "\ws a b c d e g h i n f. ws = ?ws \ a = bn_comp m p q r \ (b, c) = bn_comp m p q r \ d = ?r b \ (e, g) = ?r b \ (h, i) = g \ bn_inv p q (e, ns, ?ys) \ add_inv n (e, ns, ?ys) \ count_inv f (e, ns, ?ys) \ count_inv f (?t c e)" and "bn_inv p q (u, Suc (Suc m) # ns, xs)" and "add_inv n (u, Suc (Suc m) # ns, xs)" and "count_inv f (u, Suc (Suc m) # ns, xs)" thus "count_inv f (round index key p q r (u, Suc (Suc m) # ns, xs))" proof (simp split: prod.split, ((rule_tac allI)+, ((rule_tac impI)+)?)+, (erule_tac conjE)+, subst (asm) (2) add_base_zero, simp) fix m' r' v ms' ws' xs' x assume B: "?r m' = (v, ms', ws')" and C: "bn_comp m p q r = (m', r')" and D: "bn_valid m p q" and E: "Suc (Suc (foldl (+) 0 ns + m)) = n" and F: "length xs = n" assume "\ws a b c d e g h i n' f. ws = ?ws \ a = (m', r') \ b = m' \ c = r' \ d = (v, ms', ws') \ e = v \ g = (ms', ws') \ h = ms' \ i = ws' \ foldl (+) 0 ns = n' \ n - Suc (Suc m) = n' \ (\x. count (mset ?ys) x = f x) \ (\x. count (mset xs') x = f x)" hence "foldl (+) 0 ns = n - Suc (Suc m) \ (\x. count (mset xs') x = count (mset ?ys) x)" by simp hence "count (mset xs') x = count (mset ?ys) x" using E by simp moreover assume "\x. count (mset xs) x = f x" ultimately have "f x = count (mset ?ws) x + count (mset xs') x" by (subst (asm) append_take_drop_id [of "Suc (Suc m)", symmetric], subst (asm) mset_append, simp) with B [symmetric] show "count (mset ws') x + count (mset xs') x = f x" proof (simp add: round_suc_suc_def Let_def del: count_add_mset mset_map split: if_split_asm, subst (1 2) add_mset_add_single, simp only: count_single count_union) let ?nmi = "mini ?ws key" let ?nma = "maxi ?ws key" let ?xmi = "?ws ! ?nmi" let ?xma = "?ws ! ?nma" let ?mi = "key ?xmi" let ?ma = "key ?xma" let ?k = "case m of 0 \ m | Suc 0 \ m | Suc (Suc i) \ u + m'" let ?zs = "nths ?ws (- {?nmi, ?nma})" let ?ms = "enum ?zs index key ?k ?mi ?ma" let ?A = "{i. i < Suc (Suc m) \ (i = ?nmi \ i = ?nma) \ ?ws ! i = x}" have G: "length ?ws = Suc (Suc m)" using E and F by simp hence H: "card ?A \ count (mset ?ws) x" by (simp add: count_mset length_filter_conv_card, rule_tac card_mono, simp, blast) show "count (mset (map the (fill ?zs (offs ?ms 0) index key m ?mi ?ma))) x + (if ?xma = x then 1 else 0) + (if ?xmi = x then 1 else 0) = count (mset ?ws) x" proof (cases "m = 0") case True hence I: "length ?zs = 0" using G by (simp add: mini_maxi_nths) have "count (mset ?zs) x = count (mset ?ws) x - card ?A" using G by (subst nths_count, simp) hence J: "count (mset ?ws) x = card ?A" using H and I by simp from I show ?thesis proof (simp, (rule_tac [!] conjI, rule_tac [!] impI)+, simp_all (no_asm_simp) add: True) assume "?xmi = x" and "?xma = x" with G have "?A = {?nmi, ?nma}" by (rule_tac set_eqI, rule_tac iffI, simp_all, erule_tac disjE, insert mini_less [of ?ws key], insert maxi_less [of ?ws key], simp_all) with G have "card ?A = Suc (Suc 0)" by (simp, subst card_insert_disjoint, simp_all, rule_tac mini_maxi_neq, simp) thus "Suc (Suc 0) = count (mset (take (Suc (Suc 0)) xs)) x" using True and J by simp next assume "?xmi \ x" and "?xma = x" with G have "?A = {?nma}" by (rule_tac set_eqI, rule_tac iffI, simp_all, (erule_tac conjE)+, erule_tac disjE, insert maxi_less [of ?ws key], simp_all) thus "Suc 0 = count (mset (take (Suc (Suc 0)) xs)) x" using True and J by simp next assume "?xmi = x" and "?xma \ x" with G have "?A = {?nmi}" by (rule_tac set_eqI, rule_tac iffI, simp_all, (erule_tac conjE)+, erule_tac disjE, insert mini_less [of ?ws key], simp_all) thus "Suc 0 = count (mset (take (Suc (Suc 0)) xs)) x" using True and J by simp next assume "?xmi \ x" and "?xma \ x" hence "?A = {}" by (rule_tac set_eqI, rule_tac iffI, simp_all, (erule_tac conjE)+, erule_tac disjE, simp_all) hence "count (mset ?ws) x = 0" using J by simp thus "x \ set (take (Suc (Suc 0)) xs)" using True by simp qed next case False hence "0 < ?k" by (simp, drule_tac bn_comp_fst_nonzero [OF D], subst (asm) C, simp split: nat.split) hence "count (mset (map the (fill ?zs (offs ?ms 0) index key (length ?zs) ?mi ?ma))) x = count (mset ?zs) x" by (rule_tac fill_offs_enum_count_item [OF A], simp, rule_tac conjI, ((rule_tac mini_lb | rule_tac maxi_ub), erule_tac in_set_nthsD)+) with G show ?thesis proof (simp, (rule_tac [!] conjI, rule_tac [!] impI)+, simp_all add: mini_maxi_nths nths_count) assume "?xmi = x" and "?xma = x" with G have "?A = {?nmi, ?nma}" by (rule_tac set_eqI, rule_tac iffI, simp_all, erule_tac disjE, insert mini_less [of ?ws key], insert maxi_less [of ?ws key], simp_all) with G have "card ?A = Suc (Suc 0)" by (simp, subst card_insert_disjoint, simp_all, rule_tac mini_maxi_neq, simp) thus "Suc (Suc (count (mset ?ws) x - card ?A)) = count (mset ?ws) x" using H by simp next assume "?xmi \ x" and "?xma = x" with G have "?A = {?nma}" by (rule_tac set_eqI, rule_tac iffI, simp_all, (erule_tac conjE)+, erule_tac disjE, insert maxi_less [of ?ws key], simp_all) thus "Suc (count (mset ?ws) x - card ?A) = count (mset ?ws) x" using H by simp next assume "?xmi = x" and "?xma \ x" with G have "?A = {?nmi}" by (rule_tac set_eqI, rule_tac iffI, simp_all, (erule_tac conjE)+, erule_tac disjE, insert mini_less [of ?ws key], simp_all) thus "Suc (count (mset ?ws) x - card ?A) = count (mset ?ws) x" using H by simp next assume "?xmi \ x" and "?xma \ x" hence "?A = {}" by (rule_tac set_eqI, rule_tac iffI, simp_all, (erule_tac conjE)+, erule_tac disjE, simp_all) thus "count (mset ?ws) x - card ?A = count (mset ?ws) x" by (simp (no_asm_simp)) qed qed qed qed qed lemma gcsort_count_inv: assumes A: "index_less index key" and B: "add_inv n t" and C: "n \ p" shows "\t' \ gcsort_set index key p t; count_inv f t\ \ count_inv f t'" by (erule gcsort_set.induct, simp, drule gcsort_add_inv [OF A _ B C], rule round_count_inv [OF A], simp_all del: bn_inv.simps, erule conjE, frule sym, erule subst, rule bn_inv_intro, insert C, simp) text \ \null The only remaining task is to address step 10 of the proof method, which is done by proving theorem @{text gcsort_count}. It holds under the conditions that the objects' number is not larger than the counters' upper bound and function @{text index} satisfies predicate @{const index_less}, and states that for any object, function @{const gcsort} leaves unchanged the number of its occurrences within the input objects' list. \null \ theorem gcsort_count: assumes A: "index_less index key" and B: "length xs \ p" shows "count (mset (gcsort index key p xs)) x = count (mset xs) x" proof - let ?t = "(0, [length xs], xs)" have "count_inv (count (mset xs)) (gcsort_aux index key p ?t)" by (rule gcsort_count_inv [OF A _ B], rule gcsort_add_input, rule gcsort_aux_set, rule gcsort_count_input) hence "count (mset (gcsort_out (gcsort_aux index key p ?t))) x = (count (mset xs)) x" by (rule gcsort_count_intro) moreover have "?t = gcsort_in xs" by (simp add: gcsort_in_def) ultimately show ?thesis by (simp add: gcsort_def) qed end diff --git a/thys/Incredible_Proof_Machine/Incredible_Propositional_Tasks.thy b/thys/Incredible_Proof_Machine/Incredible_Propositional_Tasks.thy --- a/thys/Incredible_Proof_Machine/Incredible_Propositional_Tasks.thy +++ b/thys/Incredible_Proof_Machine/Incredible_Propositional_Tasks.thy @@ -1,328 +1,326 @@ theory Incredible_Propositional_Tasks imports Incredible_Completeness Incredible_Propositional begin context ND_Rules_Inst begin lemma eff_NatRuleI: "nat_rule rule c ants \ entail = (\ \ subst s (freshen a c)) \ hyps = ((\ant. ((\p. subst s (freshen a p)) |`| a_hyps ant |\| \ \ subst s (freshen a (a_conc ant)))) |`| ants) \ (\ ant f. ant |\| ants \ f |\| \ \ freshenLC a ` (a_fresh ant) \ lconsts f = {}) \ (\ ant. ant |\| ants \ freshenLC a ` (a_fresh ant) \ subst_lconsts s = {}) \ eff (NatRule rule) entail hyps" by (drule eff.intros(2)) simp_all end context Abstract_Task begin lemma natEff_InstI: "rule = (r,c) \ c \ set (consequent r) \ antec = f_antecedent r \ natEff_Inst rule c antec" by (metis natEff_Inst.intros) end context begin subsubsection \Task 1.1\ text \This is the very first task of the Incredible Proof Machine: @{term "A \ A"}\ abbreviation A :: "(string,prop_funs) pform" where "A \ Fun (Const ''A'') []" text \First the task is defined as an @{term Abstract_Task}.\ interpretation task1_1: Abstract_Task "curry (SOME f. bij f):: nat \ string \ string" "\_. id" "\_. {}" "closed :: (string, prop_funs) pform \ bool" subst "\_. {}" "\_. id" "Var undefined" antecedent consequent prop_rules "[A]" "[A]" by unfold_locales simp text \Then we show, that this task has a proof within our formalization of natural deduction by giving a concrete proof tree.\ lemma "task1_1.solved" unfolding task1_1.solved_def apply clarsimp apply (rule_tac x="{|A|}" in exI) apply clarsimp apply (rule_tac x="Node ({|A|} \ A, Axiom) {||}" in exI) apply clarsimp apply (rule conjI) apply (rule task1_1.wf) apply (solves clarsimp) apply clarsimp apply (rule task1_1.eff.intros(1)) apply (solves simp) apply (solves clarsimp) by (auto intro: tfinite.intros) print_locale Vertex_Graph interpretation task1_1: Vertex_Graph task1_1.nodes task1_1.inPorts task1_1.outPorts "{|0::nat,1|}" "undefined(0 := Assumption A, 1 := Conclusion A)" . print_locale Pre_Port_Graph interpretation task1_1: Pre_Port_Graph task1_1.nodes task1_1.inPorts task1_1.outPorts "{|0::nat,1|}" "undefined(0 := Assumption A, 1 := Conclusion A)" "{((0,Reg A),(1,plain_ant A))}" . print_locale Instantiation interpretation task1_1: Instantiation task1_1.inPorts task1_1.outPorts "undefined(0 := Assumption A, 1 := Conclusion A)" task1_1.hyps task1_1.nodes "{((0,Reg A),(1,plain_ant A))}" "{|0::nat,1|}" task1_1.labelsIn task1_1.labelsOut "curry (SOME f. bij f):: nat \ string \ string" "\_. id" "\_. {}" "closed :: (string, prop_funs) pform \ bool" subst "\_. {}" "\_. id" "Var undefined" "id" "undefined" by unfold_locales simp declare One_nat_def [simp del] lemma path_one_edge[simp]: "task1_1.path v1 v2 pth \ (v1 = 0 \ v2 = 1 \ pth = [((0,Reg A),(1,plain_ant A))] \ pth = [] \ v1 = v2)" apply (cases pth) apply (auto simp add: task1_1.path_cons_simp') apply (rename_tac list, case_tac list, auto simp add: task1_1.path_cons_simp')+ done text \Finally we can also show that there is a proof graph for this task.\ interpretation Tasked_Proof_Graph "curry (SOME f. bij f):: nat \ string \ string" "\_. id" "\_. {}" "closed :: (string, prop_funs) pform \ bool" subst "\_. {}" "\_. id" "Var undefined" antecedent consequent prop_rules "[A]" "[A]" "{|0::nat,1|}" "undefined(0 := Assumption A, 1 := Conclusion A)" "{((0,Reg A),(1,plain_ant A))}" "id" "undefined" apply unfold_locales apply (solves simp) apply (solves clarsimp) apply (solves clarsimp) apply (solves clarsimp) apply (solves fastforce) apply (solves fastforce) apply (solves \clarsimp simp add: task1_1.labelAtOut_def task1_1.labelAtIn_def\) apply (solves clarsimp) apply (solves clarsimp) done subsubsection \Task 2.11\ text \This is a slightly more interesting task as it involves both our connectives: @{term "P \ Q \ R \ P \ (Q \ R)"}\ abbreviation B :: "(string,prop_funs) pform" where "B \ Fun (Const ''B'') []" abbreviation C :: "(string,prop_funs) pform" where "C \ Fun (Const ''C'') []" interpretation task2_11: Abstract_Task "curry (SOME f. bij f):: nat \ string \ string" "\_. id" "\_. {}" "closed :: (string, prop_funs) pform \ bool" subst "\_. {}" "\_. id" "Var undefined" antecedent consequent prop_rules "[Fun imp [Fun and [A,B],C]]" "[Fun imp [A,Fun imp [B,C]]]" by unfold_locales simp_all abbreviation "n_andI \ task2_11.n_rules !! 0" abbreviation "n_andE1 \ task2_11.n_rules !! 1" abbreviation "n_andE2 \ task2_11.n_rules !! 2" abbreviation "n_impI \ task2_11.n_rules !! 3" abbreviation "n_impE \ task2_11.n_rules !! 4" lemma n_andI [simp]: "n_andI = (andI, Fun and [X,Y])" unfolding task2_11.n_rules_def by (simp add: prop_rules_def) lemma n_andE1 [simp]: "n_andE1 = (andE, X)" unfolding task2_11.n_rules_def One_nat_def by (simp add: prop_rules_def) lemma n_andE2 [simp]: "n_andE2 = (andE, Y)" unfolding task2_11.n_rules_def numeral_2_eq_2 by (simp add: prop_rules_def) lemma n_impI [simp]: "n_impI = (impI, Fun imp [X,Y])" unfolding task2_11.n_rules_def numeral_3_eq_3 by (simp add: prop_rules_def) lemma n_impE [simp]: "n_impE = (impE, Y)" proof - have "n_impE = task2_11.n_rules !! Suc 3" by simp also have "... = (impE, Y)" unfolding task2_11.n_rules_def numeral_3_eq_3 by (simp add: prop_rules_def) finally show ?thesis . qed lemma subst_Var_eq_id [simp]: "subst Var = id" by (rule ext) (induct_tac x; auto simp: map_idI) lemma xy_update: "f = undefined(''X'' := x, ''Y'' := y) \ x = f ''X'' \ y = f ''Y''" by force lemma y_update: "f = undefined(''Y'':=y) \ y = f ''Y''" by force declare snth.simps(1) [simp del] text \By interpreting @{term Solved_Task} we show that there is a proof tree for the task. We get the existence of the proof graph for free by using the completeness theorem.\ interpretation task2_11: Solved_Task "curry (SOME f. bij f):: nat \ string \ string" "\_. id" "\_. {}" "closed :: (string, prop_funs) pform \ bool" subst "\_. {}" "\_. id" "Var undefined" antecedent consequent prop_rules "[Fun imp [Fun and [A,B],C]]" "[Fun imp [A,Fun imp [B,C]]]" apply unfold_locales unfolding task2_11.solved_def apply clarsimp apply (rule_tac x="{|Fun imp [Fun and [A,B],C]|}" in exI) apply clarsimp \ \The actual proof tree for this task.\ apply (rule_tac x="Node ({|Fun imp [Fun and [A, B], C]|} \ Fun imp [A, Fun imp [B, C]],NatRule n_impI) {|Node ({|Fun imp [Fun and [A, B], C], A|} \ Fun imp [B,C],NatRule n_impI) {|Node ({|Fun imp [Fun and [A, B], C], A, B|} \ C,NatRule n_impE) {|Node ({|Fun imp [Fun and [A, B], C], A, B|} \ Fun imp [Fun and [A,B], C],Axiom) {||}, Node ({|Fun imp [Fun and [A, B], C], A, B|} \ Fun and [A,B],NatRule n_andI) {|Node ({|Fun imp [Fun and [A, B], C], A, B|} \ A,Axiom) {||}, Node ({|Fun imp [Fun and [A, B], C], A, B|} \ B,Axiom) {||} |} |} |} |}" in exI) apply clarsimp apply (rule conjI) apply (rule task1_1.wf) apply (solves \clarsimp; metis n_impI snth_smap snth_sset\) apply clarsimp apply (rule task1_1.eff_NatRuleI [unfolded propositional.freshen_def, simplified]) apply simp_all[4] apply (rule task2_11.natEff_InstI) apply (solves simp) apply (solves simp) apply (solves simp) apply (intro conjI; simp; rule xy_update) apply (solves simp) apply (solves \fastforce simp: propositional.f_antecedent_def\) - apply clarsimp apply (rule task1_1.wf) apply (solves \clarsimp; metis n_impI snth_smap snth_sset\) apply clarsimp apply (rule task1_1.eff_NatRuleI [unfolded propositional.freshen_def, simplified]) apply simp_all[4] apply (rule task2_11.natEff_InstI) apply (solves simp) apply (solves simp) apply (solves simp) apply (intro conjI; simp; rule xy_update) apply (solves simp) apply (solves \fastforce simp: propositional.f_antecedent_def\) - apply clarsimp apply (rule task1_1.wf) apply (solves \clarsimp; metis n_impE snth_smap snth_sset\) apply clarsimp apply (rule task1_1.eff_NatRuleI [unfolded propositional.freshen_def, simplified, where s="undefined(''Y'':=C,''X'':=Fun and [A,B])"]) apply simp_all[4] apply (rule task2_11.natEff_InstI) apply (solves simp) apply (solves simp) apply (solves simp) apply (solves \intro conjI; simp\) apply (solves \simp add: propositional.f_antecedent_def\) apply (erule disjE) apply (auto intro: task1_1.wf intro!: task1_1.eff.intros(1))[1] apply (rule task1_1.wf) apply (solves \clarsimp; metis n_andI snth_smap snth_sset\) apply clarsimp apply (rule task1_1.eff_NatRuleI [unfolded propositional.freshen_def, simplified]) apply simp_all[4] apply (rule task2_11.natEff_InstI) apply (solves simp) apply (solves simp) apply (solves simp) apply (intro conjI; simp; rule xy_update) apply (solves simp) apply (solves \simp add: propositional.f_antecedent_def\) apply clarsimp apply (erule disjE) apply (solves \rule task1_1.wf; auto intro: task1_1.eff.intros(1)\) apply (solves \rule task1_1.wf; auto intro: task1_1.eff.intros(1)\) by (rule tfinite.intros; auto)+ interpretation Tasked_Proof_Graph "curry (SOME f. bij f):: nat \ string \ string" "\_. id" "\_. {}" "closed :: (string, prop_funs) pform \ bool" subst "\_. {}" "\_. id" "Var undefined" antecedent consequent prop_rules "[Fun imp [Fun and [A,B],C]]" "[Fun imp [A,Fun imp [B,C]]]" task2_11.vertices task2_11.nodeOf task2_11.edges task2_11.vidx task2_11.inst by unfold_locales end end diff --git a/thys/Jinja/Compiler/Correctness1.thy b/thys/Jinja/Compiler/Correctness1.thy --- a/thys/Jinja/Compiler/Correctness1.thy +++ b/thys/Jinja/Compiler/Correctness1.thy @@ -1,756 +1,760 @@ (* Title: Jinja/Compiler/Correctness1.thy Author: Tobias Nipkow Copyright TUM 2003 *) section \Correctness of Stage 1\ theory Correctness1 imports J1WellForm Compiler1 begin subsection\Correctness of program compilation\ primrec unmod :: "expr\<^sub>1 \ nat \ bool" and unmods :: "expr\<^sub>1 list \ nat \ bool" where "unmod (new C) i = True" | "unmod (Cast C e) i = unmod e i" | "unmod (Val v) i = True" | "unmod (e\<^sub>1 \bop\ e\<^sub>2) i = (unmod e\<^sub>1 i \ unmod e\<^sub>2 i)" | "unmod (Var i) j = True" | "unmod (i:=e) j = (i \ j \ unmod e j)" | "unmod (e\F{D}) i = unmod e i" | "unmod (e\<^sub>1\F{D}:=e\<^sub>2) i = (unmod e\<^sub>1 i \ unmod e\<^sub>2 i)" | "unmod (e\M(es)) i = (unmod e i \ unmods es i)" | "unmod {j:T; e} i = unmod e i" | "unmod (e\<^sub>1;;e\<^sub>2) i = (unmod e\<^sub>1 i \ unmod e\<^sub>2 i)" | "unmod (if (e) e\<^sub>1 else e\<^sub>2) i = (unmod e i \ unmod e\<^sub>1 i \ unmod e\<^sub>2 i)" | "unmod (while (e) c) i = (unmod e i \ unmod c i)" | "unmod (throw e) i = unmod e i" | "unmod (try e\<^sub>1 catch(C i) e\<^sub>2) j = (unmod e\<^sub>1 j \ (if i=j then False else unmod e\<^sub>2 j))" | "unmods ([]) i = True" | "unmods (e#es) i = (unmod e i \ unmods es i)" lemma hidden_unmod: "\Vs. hidden Vs i \ unmod (compE\<^sub>1 Vs e) i" and "\Vs. hidden Vs i \ unmods (compEs\<^sub>1 Vs es) i" (*<*) apply(induct e and es rule: compE\<^sub>1.induct compEs\<^sub>1.induct) apply (simp_all add:hidden_inacc) apply(auto simp add:hidden_def) done (*>*) lemma eval\<^sub>1_preserves_unmod: "\ P \\<^sub>1 \e,(h,ls)\ \ \e',(h',ls')\; unmod e i; i < size ls \ \ ls ! i = ls' ! i" and "\ P \\<^sub>1 \es,(h,ls)\ [\] \es',(h',ls')\; unmods es i; i < size ls \ \ ls ! i = ls' ! i" (*<*) apply(induct rule:eval\<^sub>1_evals\<^sub>1_inducts) apply(auto dest!:eval\<^sub>1_preserves_len split:if_split_asm) done (*>*) lemma LAss_lem: "\x \ set xs; size xs \ size ys \ \ m\<^sub>1 \\<^sub>m m\<^sub>2(xs[\]ys) \ m\<^sub>1(x\y) \\<^sub>m m\<^sub>2(xs[\]ys[last_index xs x := y])" (*<*) by(simp add:map_le_def fun_upds_apply eq_sym_conv) (*>*) lemma Block_lem: fixes l :: "'a \ 'b" assumes 0: "l \\<^sub>m [Vs [\] ls]" and 1: "l' \\<^sub>m [Vs [\] ls', V\v]" and hidden: "V \ set Vs \ ls ! last_index Vs V = ls' ! last_index Vs V" and size: "size ls = size ls'" "size Vs < size ls'" shows "l'(V := l V) \\<^sub>m [Vs [\] ls']" (*<*) proof - have "l'(V := l V) \\<^sub>m [Vs [\] ls', V\v](V := l V)" using 1 by(rule map_le_upd) also have "\ = [Vs [\] ls'](V := l V)" by simp also have "\ \\<^sub>m [Vs [\] ls']" proof (cases "l V") case None thus ?thesis by simp next case (Some w) hence "[Vs [\] ls] V = Some w" using 0 by(force simp add: map_le_def split:if_splits) hence VinVs: "V \ set Vs" and w: "w = ls ! last_index Vs V" using size by(auto simp add:fun_upds_apply split:if_splits) hence "w = ls' ! last_index Vs V" using hidden[OF VinVs] by simp hence "[Vs [\] ls'](V := l V) = [Vs [\] ls']" using Some size VinVs by(simp add: map_upds_upd_conv_last_index) thus ?thesis by simp qed finally show ?thesis . qed (*>*) (*<*) declare fun_upd_apply[simp del] (*>*) text\\noindent The main theorem:\ theorem assumes wf: "wwf_J_prog P" shows eval\<^sub>1_eval: "P \ \e,(h,l)\ \ \e',(h',l')\ \ (\Vs ls. \ fv e \ set Vs; l \\<^sub>m [Vs[\]ls]; size Vs + max_vars e \ size ls \ \ \ls'. compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 Vs e,(h,ls)\ \ \fin\<^sub>1 e',(h',ls')\ \ l' \\<^sub>m [Vs[\]ls'])" (*<*) (is "_ \ (\Vs ls. PROP ?P e h l e' h' l' Vs ls)" is "_ \ (\Vs ls. \ _; _; _ \ \ \ls'. ?Post e h l e' h' l' Vs ls ls')") (*>*) and evals\<^sub>1_evals: "P \ \es,(h,l)\ [\] \es',(h',l')\ \ (\Vs ls. \ fvs es \ set Vs; l \\<^sub>m [Vs[\]ls]; size Vs + max_varss es \ size ls \ \ \ls'. compP\<^sub>1 P \\<^sub>1 \compEs\<^sub>1 Vs es,(h,ls)\ [\] \compEs\<^sub>1 Vs es',(h',ls')\ \ l' \\<^sub>m [Vs[\]ls'])" (*<*) (is "_ \ (\Vs ls. PROP ?Ps es h l es' h' l' Vs ls)" is "_ \ (\Vs ls. \ _; _; _\ \ \ls'. ?Posts es h l es' h' l' Vs ls ls')") proof (induct rule:eval_evals_inducts) case Nil thus ?case by(fastforce intro!:Nil\<^sub>1) next case (Cons e h l v h' l' es es' h\<^sub>2 l\<^sub>2) have "PROP ?P e h l (Val v) h' l' Vs ls" by fact with Cons.prems obtain ls' where 1: "?Post e h l (Val v) h' l' Vs ls ls'" "size ls = size ls'" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?Ps es h' l' es' h\<^sub>2 l\<^sub>2 Vs ls'" by fact with 1 Cons.prems obtain ls\<^sub>2 where 2: "?Posts es h' l' es' h\<^sub>2 l\<^sub>2 Vs ls' ls\<^sub>2" by(auto) from 1 2 Cons show ?case by(auto intro!:Cons\<^sub>1) next case ConsThrow thus ?case by(fastforce intro!:ConsThrow\<^sub>1 dest: eval_final) next case (Block e h l V e' h' l' T) let ?Vs = "Vs @ [V]" have IH: "\fv e \ set ?Vs; l(V := None) \\<^sub>m [?Vs [\] ls]; size ?Vs + max_vars e \ size ls\ \ \ls'. compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 ?Vs e,(h,ls)\ \ \fin\<^sub>1 e',(h', ls')\ \ l' \\<^sub>m [?Vs [\] ls']" and fv: "fv {V:T; e} \ set Vs" and rel: "l \\<^sub>m [Vs [\] ls]" and len: "length Vs + max_vars {V:T; e} \ length ls" by fact+ have len': "length Vs < length ls" using len by auto have "fv e \ set ?Vs" using fv by auto moreover have "l(V := None) \\<^sub>m [?Vs [\] ls]" using rel len' by simp moreover have "size ?Vs + max_vars e \ size ls" using len by simp ultimately obtain ls' where 1: "compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 ?Vs e,(h,ls)\ \ \fin\<^sub>1 e',(h',ls')\" and rel': "l' \\<^sub>m [?Vs [\] ls']" using IH by blast have [simp]: "length ls = length ls'" by(rule eval\<^sub>1_preserves_len[OF 1]) show "\ls'. compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 Vs {V:T; e},(h,ls)\ \ \fin\<^sub>1 e',(h',ls')\ \ l'(V := l V) \\<^sub>m [Vs [\] ls']" (is "\ls'. ?R ls'") proof show "?R ls'" proof show "compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 Vs {V:T; e},(h,ls)\ \ \fin\<^sub>1 e',(h',ls')\" using 1 by(simp add:Block\<^sub>1) next show "l'(V := l V) \\<^sub>m [Vs [\] ls']" proof - have "l' \\<^sub>m [Vs [\] ls', V \ ls' ! length Vs]" using len' rel' by simp moreover { assume VinVs: "V \ set Vs" hence "hidden (Vs @ [V]) (last_index Vs V)" by(rule hidden_last_index) hence "unmod (compE\<^sub>1 (Vs @ [V]) e) (last_index Vs V)" by(rule hidden_unmod) moreover have "last_index Vs V < length ls" using len' VinVs by simp ultimately have "ls ! last_index Vs V = ls' ! last_index Vs V" by(rule eval\<^sub>1_preserves_unmod[OF 1]) } ultimately show ?thesis using Block_lem[OF rel] len' by auto qed qed qed next case (TryThrow e' h l a h' l' D fs C V e\<^sub>2) have "PROP ?P e' h l (Throw a) h' l' Vs ls" by fact with TryThrow.prems obtain ls' where 1: "?Post e' h l (Throw a) h' l' Vs ls ls'" by(auto) show ?case using 1 TryThrow.hyps by(auto intro!:eval\<^sub>1_evals\<^sub>1.TryThrow\<^sub>1) next case (TryCatch e\<^sub>1 h l a h\<^sub>1 l\<^sub>1 D fs C e\<^sub>2 V e' h\<^sub>2 l\<^sub>2) let ?e = "try e\<^sub>1 catch(C V) e\<^sub>2" have IH\<^sub>1: "\fv e\<^sub>1 \ set Vs; l \\<^sub>m [Vs [\] ls]; size Vs + max_vars e\<^sub>1 \ length ls\ \ \ls\<^sub>1. compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 Vs e\<^sub>1,(h,ls)\ \ \fin\<^sub>1 (Throw a),(h\<^sub>1,ls\<^sub>1)\ \ l\<^sub>1 \\<^sub>m [Vs [\] ls\<^sub>1]" and fv: "fv ?e \ set Vs" and rel: "l \\<^sub>m [Vs [\] ls]" and len: "length Vs + max_vars ?e \ length ls" by fact+ have "fv e\<^sub>1 \ set Vs" using fv by auto moreover have "length Vs + max_vars e\<^sub>1 \ length ls" using len by(auto) ultimately obtain ls\<^sub>1 where 1: "compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 Vs e\<^sub>1,(h,ls)\ \ \Throw a,(h\<^sub>1,ls\<^sub>1)\" and rel\<^sub>1: "l\<^sub>1 \\<^sub>m [Vs [\] ls\<^sub>1]" using IH\<^sub>1 rel by fastforce from 1 have [simp]: "size ls = size ls\<^sub>1" by(rule eval\<^sub>1_preserves_len) let ?Vs = "Vs @ [V]" let ?ls = "(ls\<^sub>1[size Vs:=Addr a])" have IH\<^sub>2: "\fv e\<^sub>2 \ set ?Vs; l\<^sub>1(V \ Addr a) \\<^sub>m [?Vs [\] ?ls]; length ?Vs + max_vars e\<^sub>2 \ length ?ls\ \ \ls\<^sub>2. compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 ?Vs e\<^sub>2,(h\<^sub>1,?ls)\ \ \fin\<^sub>1 e',(h\<^sub>2, ls\<^sub>2)\ \ l\<^sub>2 \\<^sub>m [?Vs [\] ls\<^sub>2]" by fact have len\<^sub>1: "size Vs < size ls\<^sub>1" using len by(auto) have "fv e\<^sub>2 \ set ?Vs" using fv by auto moreover have "l\<^sub>1(V \ Addr a) \\<^sub>m [?Vs [\] ?ls]" using rel\<^sub>1 len\<^sub>1 by simp moreover have "length ?Vs + max_vars e\<^sub>2 \ length ?ls" using len by(auto) ultimately obtain ls\<^sub>2 where 2: "compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 ?Vs e\<^sub>2,(h\<^sub>1,?ls)\ \ \fin\<^sub>1 e',(h\<^sub>2, ls\<^sub>2)\" and rel\<^sub>2: "l\<^sub>2 \\<^sub>m [?Vs [\] ls\<^sub>2]" using IH\<^sub>2 by blast from 2 have [simp]: "size ls\<^sub>1 = size ls\<^sub>2" by(fastforce dest: eval\<^sub>1_preserves_len) show "\ls\<^sub>2. compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 Vs ?e,(h,ls)\ \ \fin\<^sub>1 e',(h\<^sub>2,ls\<^sub>2)\ \ l\<^sub>2(V := l\<^sub>1 V) \\<^sub>m [Vs [\] ls\<^sub>2]" (is "\ls\<^sub>2. ?R ls\<^sub>2") proof show "?R ls\<^sub>2" proof have hp: "h\<^sub>1 a = Some (D, fs)" by fact have "P \ D \\<^sup>* C" by fact hence caught: "compP\<^sub>1 P \ D \\<^sup>* C" by simp from TryCatch\<^sub>1[OF 1 _ caught len\<^sub>1 2, OF hp] show "compP\<^sub>1 P \\<^sub>1 \compE\<^sub>1 Vs ?e,(h,ls)\ \ \fin\<^sub>1 e',(h\<^sub>2,ls\<^sub>2)\" by simp next show "l\<^sub>2(V := l\<^sub>1 V) \\<^sub>m [Vs [\] ls\<^sub>2]" proof - have "l\<^sub>2 \\<^sub>m [Vs [\] ls\<^sub>2, V \ ls\<^sub>2 ! length Vs]" using len\<^sub>1 rel\<^sub>2 by simp moreover { assume VinVs: "V \ set Vs" hence "hidden (Vs @ [V]) (last_index Vs V)" by(rule hidden_last_index) hence "unmod (compE\<^sub>1 (Vs @ [V]) e\<^sub>2) (last_index Vs V)" by(rule hidden_unmod) moreover have "last_index Vs V < length ?ls" using len\<^sub>1 VinVs by simp ultimately have "?ls ! last_index Vs V = ls\<^sub>2 ! last_index Vs V" by(rule eval\<^sub>1_preserves_unmod[OF 2]) moreover have "last_index Vs V < size Vs" using VinVs by simp ultimately have "ls\<^sub>1 ! last_index Vs V = ls\<^sub>2 ! last_index Vs V" using len\<^sub>1 by(simp del:size_last_index_conv) } ultimately show ?thesis using Block_lem[OF rel\<^sub>1] len\<^sub>1 by simp qed qed qed next case Try thus ?case by(fastforce intro!:Try\<^sub>1) next case Throw thus ?case by(fastforce intro!:Throw\<^sub>1) next case ThrowNull thus ?case by(fastforce intro!:ThrowNull\<^sub>1) next case ThrowThrow thus ?case by(fastforce intro!:ThrowThrow\<^sub>1) next case (CondT e h l h\<^sub>1 l\<^sub>1 e\<^sub>1 e' h\<^sub>2 l\<^sub>2 e\<^sub>2) have "PROP ?P e h l true h\<^sub>1 l\<^sub>1 Vs ls" by fact with CondT.prems obtain ls\<^sub>1 where 1: "?Post e h l true h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P e\<^sub>1 h\<^sub>1 l\<^sub>1 e' h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 CondT.prems obtain ls\<^sub>2 where 2: "?Post e\<^sub>1 h\<^sub>1 l\<^sub>1 e' h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 show ?case by(auto intro!:CondT\<^sub>1) next case (CondF e h l h\<^sub>1 l\<^sub>1 e\<^sub>2 e' h\<^sub>2 l\<^sub>2 e\<^sub>1 Vs ls) have "PROP ?P e h l false h\<^sub>1 l\<^sub>1 Vs ls" by fact with CondF.prems obtain ls\<^sub>1 where 1: "?Post e h l false h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P e\<^sub>2 h\<^sub>1 l\<^sub>1 e' h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 CondF.prems obtain ls\<^sub>2 where 2: "?Post e\<^sub>2 h\<^sub>1 l\<^sub>1 e' h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 show ?case by(auto intro!:CondF\<^sub>1) next case CondThrow thus ?case by(fastforce intro!:CondThrow\<^sub>1) next case (Seq e h l v h\<^sub>1 l\<^sub>1 e\<^sub>1 e' h\<^sub>2 l\<^sub>2) have "PROP ?P e h l (Val v) h\<^sub>1 l\<^sub>1 Vs ls" by fact with Seq.prems obtain ls\<^sub>1 where 1: "?Post e h l (Val v) h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P e\<^sub>1 h\<^sub>1 l\<^sub>1 e' h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 Seq.prems obtain ls\<^sub>2 where 2: "?Post e\<^sub>1 h\<^sub>1 l\<^sub>1 e' h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 Seq show ?case by(auto intro!:Seq\<^sub>1) next case SeqThrow thus ?case by(fastforce intro!:SeqThrow\<^sub>1) next case WhileF thus ?case by(fastforce intro!:eval\<^sub>1_evals\<^sub>1.intros) next case (WhileT e h l h\<^sub>1 l\<^sub>1 c v h\<^sub>2 l\<^sub>2 e' h\<^sub>3 l\<^sub>3) have "PROP ?P e h l true h\<^sub>1 l\<^sub>1 Vs ls" by fact with WhileT.prems obtain ls\<^sub>1 where 1: "?Post e h l true h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P c h\<^sub>1 l\<^sub>1 (Val v) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 WhileT.prems obtain ls\<^sub>2 where 2: "?Post c h\<^sub>1 l\<^sub>1 (Val v) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" "size ls\<^sub>1 = size ls\<^sub>2" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P (While (e) c) h\<^sub>2 l\<^sub>2 e' h\<^sub>3 l\<^sub>3 Vs ls\<^sub>2" by fact with 1 2 WhileT.prems obtain ls\<^sub>3 where 3: "?Post (While (e) c) h\<^sub>2 l\<^sub>2 e' h\<^sub>3 l\<^sub>3 Vs ls\<^sub>2 ls\<^sub>3" by(auto) from 1 2 3 show ?case by(auto intro!:WhileT\<^sub>1) next case (WhileBodyThrow e h l h\<^sub>1 l\<^sub>1 c e' h\<^sub>2 l\<^sub>2) have "PROP ?P e h l true h\<^sub>1 l\<^sub>1 Vs ls" by fact with WhileBodyThrow.prems obtain ls\<^sub>1 where 1: "?Post e h l true h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P c h\<^sub>1 l\<^sub>1 (throw e') h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 WhileBodyThrow.prems obtain ls\<^sub>2 where 2: "?Post c h\<^sub>1 l\<^sub>1 (throw e') h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by auto from 1 2 show ?case by(auto intro!:WhileBodyThrow\<^sub>1) next case WhileCondThrow thus ?case by(fastforce intro!:WhileCondThrow\<^sub>1) next case New thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case NewFail thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case Cast thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case CastNull thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case CastThrow thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case (CastFail e h l a h\<^sub>1 l\<^sub>1 D fs C) have "PROP ?P e h l (addr a) h\<^sub>1 l\<^sub>1 Vs ls" by fact with CastFail.prems obtain ls\<^sub>1 where 1: "?Post e h l (addr a) h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" by auto show ?case using 1 CastFail.hyps by(auto intro!:CastFail\<^sub>1[where D=D]) next case Val thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case (BinOp e h l v\<^sub>1 h\<^sub>1 l\<^sub>1 e\<^sub>1 v\<^sub>2 h\<^sub>2 l\<^sub>2 bop v) have "PROP ?P e h l (Val v\<^sub>1) h\<^sub>1 l\<^sub>1 Vs ls" by fact with BinOp.prems obtain ls\<^sub>1 where 1: "?Post e h l (Val v\<^sub>1) h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P e\<^sub>1 h\<^sub>1 l\<^sub>1 (Val v\<^sub>2) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 BinOp.prems obtain ls\<^sub>2 where 2: "?Post e\<^sub>1 h\<^sub>1 l\<^sub>1 (Val v\<^sub>2) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 BinOp show ?case by(auto intro!:BinOp\<^sub>1) next case (BinOpThrow2 e\<^sub>0 h l v\<^sub>1 h\<^sub>1 l\<^sub>1 e\<^sub>1 e h\<^sub>2 l\<^sub>2 bop) have "PROP ?P e\<^sub>0 h l (Val v\<^sub>1) h\<^sub>1 l\<^sub>1 Vs ls" by fact with BinOpThrow2.prems obtain ls\<^sub>1 where 1: "?Post e\<^sub>0 h l (Val v\<^sub>1) h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P e\<^sub>1 h\<^sub>1 l\<^sub>1 (throw e) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 BinOpThrow2.prems obtain ls\<^sub>2 where 2: "?Post e\<^sub>1 h\<^sub>1 l\<^sub>1 (throw e) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 BinOpThrow2 show ?case by(auto intro!:BinOpThrow\<^sub>2\<^sub>1) next case BinOpThrow1 thus ?case by(fastforce intro!:eval\<^sub>1_evals\<^sub>1.intros) next case Var thus ?case by(force intro!:Var\<^sub>1 simp add: map_le_def fun_upds_apply) next case LAss thus ?case by(fastforce simp add: LAss_lem intro:eval\<^sub>1_evals\<^sub>1.intros dest:eval\<^sub>1_preserves_len) next case LAssThrow thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case FAcc thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case FAccNull thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case FAccThrow thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case (FAss e\<^sub>1 h l a h\<^sub>1 l\<^sub>1 e\<^sub>2 v h\<^sub>2 l\<^sub>2 C fs fs' F D h\<^sub>2') have "PROP ?P e\<^sub>1 h l (addr a) h\<^sub>1 l\<^sub>1 Vs ls" by fact with FAss.prems obtain ls\<^sub>1 where 1: "?Post e\<^sub>1 h l (addr a) h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P e\<^sub>2 h\<^sub>1 l\<^sub>1 (Val v) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 FAss.prems obtain ls\<^sub>2 where 2: "?Post e\<^sub>2 h\<^sub>1 l\<^sub>1 (Val v) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 FAss show ?case by(auto intro!:FAss\<^sub>1) next case (FAssNull e\<^sub>1 h l h\<^sub>1 l\<^sub>1 e\<^sub>2 v h\<^sub>2 l\<^sub>2 F D) have "PROP ?P e\<^sub>1 h l null h\<^sub>1 l\<^sub>1 Vs ls" by fact with FAssNull.prems obtain ls\<^sub>1 where 1: "?Post e\<^sub>1 h l null h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P e\<^sub>2 h\<^sub>1 l\<^sub>1 (Val v) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 FAssNull.prems obtain ls\<^sub>2 where 2: "?Post e\<^sub>2 h\<^sub>1 l\<^sub>1 (Val v) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 FAssNull show ?case by(auto intro!:FAssNull\<^sub>1) next case FAssThrow1 thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case (FAssThrow2 e\<^sub>1 h l v h\<^sub>1 l\<^sub>1 e\<^sub>2 e h\<^sub>2 l\<^sub>2 F D) have "PROP ?P e\<^sub>1 h l (Val v) h\<^sub>1 l\<^sub>1 Vs ls" by fact with FAssThrow2.prems obtain ls\<^sub>1 where 1: "?Post e\<^sub>1 h l (Val v) h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?P e\<^sub>2 h\<^sub>1 l\<^sub>1 (throw e) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 FAssThrow2.prems obtain ls\<^sub>2 where 2: "?Post e\<^sub>2 h\<^sub>1 l\<^sub>1 (throw e) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 FAssThrow2 show ?case by(auto intro!:FAssThrow\<^sub>2\<^sub>1) next case (CallNull e h l h\<^sub>1 l\<^sub>1 es vs h\<^sub>2 l\<^sub>2 M) have "PROP ?P e h l null h\<^sub>1 l\<^sub>1 Vs ls" by fact with CallNull.prems obtain ls\<^sub>1 where 1: "?Post e h l null h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?Ps es h\<^sub>1 l\<^sub>1 (map Val vs) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 CallNull.prems obtain ls\<^sub>2 where 2: "?Posts es h\<^sub>1 l\<^sub>1 (map Val vs) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 CallNull show ?case by (auto simp add: comp_def elim!: CallNull\<^sub>1) next case CallObjThrow thus ?case by(fastforce intro:eval\<^sub>1_evals\<^sub>1.intros) next case (CallParamsThrow e h l v h\<^sub>1 l\<^sub>1 es vs ex es' h\<^sub>2 l\<^sub>2 M) have "PROP ?P e h l (Val v) h\<^sub>1 l\<^sub>1 Vs ls" by fact with CallParamsThrow.prems obtain ls\<^sub>1 where 1: "?Post e h l (Val v) h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?Ps es h\<^sub>1 l\<^sub>1 (map Val vs @ throw ex # es') h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 CallParamsThrow.prems obtain ls\<^sub>2 where 2: "?Posts es h\<^sub>1 l\<^sub>1 (map Val vs @ throw ex # es') h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" by(auto) from 1 2 CallParamsThrow show ?case by (auto simp add: comp_def elim!: CallParamsThrow\<^sub>1 dest!:evals_final) next case (Call e h l a h\<^sub>1 l\<^sub>1 es vs h\<^sub>2 l\<^sub>2 C fs M Ts T pns body D l\<^sub>2' b' h\<^sub>3 l\<^sub>3) have "PROP ?P e h l (addr a) h\<^sub>1 l\<^sub>1 Vs ls" by fact with Call.prems obtain ls\<^sub>1 where 1: "?Post e h l (addr a) h\<^sub>1 l\<^sub>1 Vs ls ls\<^sub>1" "size ls = size ls\<^sub>1" by(auto intro!:eval\<^sub>1_preserves_len) have "PROP ?Ps es h\<^sub>1 l\<^sub>1 (map Val vs) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1" by fact with 1 Call.prems obtain ls\<^sub>2 where 2: "?Posts es h\<^sub>1 l\<^sub>1 (map Val vs) h\<^sub>2 l\<^sub>2 Vs ls\<^sub>1 ls\<^sub>2" "size ls\<^sub>1 = size ls\<^sub>2" by(auto intro!:evals\<^sub>1_preserves_len) let ?Vs = "this#pns" let ?ls = "Addr a # vs @ replicate (max_vars body) undefined" have mdecl: "P \ C sees M: Ts\T = (pns, body) in D" by fact have fv_body: "fv body \ set ?Vs" and wf_size: "size Ts = size pns" using wf mdecl by(auto dest!:sees_wf_mdecl simp:wf_mdecl_def) have mdecl\<^sub>1: "compP\<^sub>1 P \ C sees M: Ts\T = (compE\<^sub>1 ?Vs body) in D" using sees_method_compP[OF mdecl, of "\(pns,e). compE\<^sub>1 (this#pns) e"] by(simp) have [simp]: "l\<^sub>2' = [this \ Addr a, pns [\] vs]" by fact have Call_size: "size vs = size pns" by fact have "PROP ?P body h\<^sub>2 l\<^sub>2' b' h\<^sub>3 l\<^sub>3 ?Vs ?ls" by fact with 1 2 fv_body Call_size Call.prems obtain ls\<^sub>3 where 3: "?Post body h\<^sub>2 l\<^sub>2' b' h\<^sub>3 l\<^sub>3 ?Vs ?ls ls\<^sub>3" by(auto) have hp: "h\<^sub>2 a = Some (C, fs)" by fact from 1 2 3 hp mdecl\<^sub>1 wf_size Call_size show ?case by(fastforce simp add: comp_def intro!: Call\<^sub>1 dest!:evals_final) qed (*>*) subsection\Preservation of well-formedness\ text\The compiler preserves well-formedness. Is less trivial than it may appear. We start with two simple properties: preservation of well-typedness\ lemma compE\<^sub>1_pres_wt: "\Vs Ts U. \ P,[Vs[\]Ts] \ e :: U; size Ts = size Vs \ \ compP f P,Ts \\<^sub>1 compE\<^sub>1 Vs e :: U" and "\Vs Ts Us. \ P,[Vs[\]Ts] \ es [::] Us; size Ts = size Vs \ \ compP f P,Ts \\<^sub>1 compEs\<^sub>1 Vs es [::] Us" (*<*) apply(induct e and es rule: compE\<^sub>1.induct compEs\<^sub>1.induct) apply clarsimp apply(fastforce) apply clarsimp apply(fastforce split:bop.splits) apply (fastforce simp:map_upds_apply_eq_Some split:if_split_asm) apply (fastforce simp:map_upds_apply_eq_Some split:if_split_asm) apply (fastforce) apply (fastforce) apply (fastforce dest!: sees_method_compP[where f = f]) apply (fastforce simp:nth_append) apply (fastforce) apply (fastforce) apply (fastforce) apply (fastforce) apply (fastforce simp:nth_append) apply simp apply (fastforce) done (*>*) text\\noindent and the correct block numbering:\ lemma \: "\Vs n. size Vs = n \ \ (compE\<^sub>1 Vs e) n" and \s: "\Vs n. size Vs = n \ \s (compEs\<^sub>1 Vs es) n" -(*<*)by(induct e and es rule: \.induct \s.induct) simp_all(*>*) +(*<*)apply (induction e and es rule: \.induct \s.induct) + apply (auto dest: sym) + apply (metis length_append_singleton) + apply (metis length_append_singleton) + done(*>*) text\The main complication is preservation of definite assignment @{term"\"}.\ lemma image_last_index: "A \ set(xs@[x]) \ last_index (xs @ [x]) ` A = (if x \ A then insert (size xs) (last_index xs ` (A-{x})) else last_index xs ` A)" (*<*) by(auto simp:image_def) (*>*) lemma A_compE\<^sub>1_None[simp]: "\Vs. \ e = None \ \ (compE\<^sub>1 Vs e) = None" and "\Vs. \s es = None \ \s (compEs\<^sub>1 Vs es) = None" (*<*)by(induct e and es rule: compE\<^sub>1.induct compEs\<^sub>1.induct)(auto simp:hyperset_defs)(*>*) lemma A_compE\<^sub>1: "\A Vs. \ \ e = \A\; fv e \ set Vs \ \ \ (compE\<^sub>1 Vs e) = \last_index Vs ` A\" and "\A Vs. \ \s es = \A\; fvs es \ set Vs \ \ \s (compEs\<^sub>1 Vs es) = \last_index Vs ` A\" (*<*) proof(induct e and es rule: fv.induct fvs.induct) case (Block V' T e) hence "fv e \ set (Vs@[V'])" by fastforce moreover obtain B where "\ e = \B\" using Block.prems by(simp add: hyperset_defs) moreover from calculation have "B \ set (Vs@[V'])" by(auto dest!:A_fv) ultimately show ?case using Block by(auto simp add: hyperset_defs image_last_index last_index_size_conv) next case (TryCatch e\<^sub>1 C V' e\<^sub>2) hence fve\<^sub>2: "fv e\<^sub>2 \ set (Vs@[V'])" by auto show ?case proof (cases "\ e\<^sub>1") assume A\<^sub>1: "\ e\<^sub>1 = None" then obtain A\<^sub>2 where A\<^sub>2: "\ e\<^sub>2 = \A\<^sub>2\" using TryCatch by(simp add:hyperset_defs) hence "A\<^sub>2 \ set (Vs@[V'])" using TryCatch.prems A_fv[OF A\<^sub>2] by simp blast thus ?thesis using TryCatch fve\<^sub>2 A\<^sub>1 A\<^sub>2 by(auto simp add:hyperset_defs image_last_index last_index_size_conv) next fix A\<^sub>1 assume A\<^sub>1: "\ e\<^sub>1 = \A\<^sub>1\" show ?thesis proof (cases "\ e\<^sub>2") assume A\<^sub>2: "\ e\<^sub>2 = None" then show ?case using TryCatch A\<^sub>1 by(simp add:hyperset_defs) next fix A\<^sub>2 assume A\<^sub>2: "\ e\<^sub>2 = \A\<^sub>2\" have "A\<^sub>1 \ set Vs" using TryCatch.prems A_fv[OF A\<^sub>1] by simp blast moreover have "A\<^sub>2 \ set (Vs@[V'])" using TryCatch.prems A_fv[OF A\<^sub>2] by simp blast ultimately show ?thesis using TryCatch A\<^sub>1 A\<^sub>2 by (auto simp add: Diff_subset_conv last_index_size_conv subsetD hyperset_defs dest!: sym [of _ A]) qed qed next case (Cond e e\<^sub>1 e\<^sub>2) { assume "\ e = None \ \ e\<^sub>1 = None \ \ e\<^sub>2 = None" hence ?case using Cond by (auto simp add: hyperset_defs) } moreover { fix A A\<^sub>1 A\<^sub>2 assume "\ e = \A\" and A\<^sub>1: "\ e\<^sub>1 = \A\<^sub>1\" and A\<^sub>2: "\ e\<^sub>2 = \A\<^sub>2\" moreover have "A\<^sub>1 \ set Vs" using Cond.prems A_fv[OF A\<^sub>1] by simp blast moreover have "A\<^sub>2 \ set Vs" using Cond.prems A_fv[OF A\<^sub>2] by simp blast ultimately have ?case using Cond by(auto simp add:hyperset_defs image_Un inj_on_image_Int[OF inj_on_last_index]) } ultimately show ?case by fastforce qed (auto simp add:hyperset_defs) (*>*) lemma D_None[iff]: "\ (e::'a exp) None" and [iff]: "\s (es::'a exp list) None" (*<*)by(induct e and es rule: \.induct \s.induct)(simp_all)(*>*) lemma D_last_index_compE\<^sub>1: "\A Vs. \ A \ set Vs; fv e \ set Vs \ \ \ e \A\ \ \ (compE\<^sub>1 Vs e) \last_index Vs ` A\" and "\A Vs. \ A \ set Vs; fvs es \ set Vs \ \ \s es \A\ \ \s (compEs\<^sub>1 Vs es) \last_index Vs ` A\" (*<*) proof(induct e and es rule: \.induct \s.induct) case (BinOp e\<^sub>1 bop e\<^sub>2) hence IH\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) \last_index Vs ` A\" by simp show ?case proof (cases "\ e\<^sub>1") case None thus ?thesis using BinOp by simp next case (Some A\<^sub>1) have indexA\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) = \last_index Vs ` A\<^sub>1\" using A_compE\<^sub>1[OF Some] BinOp.prems by auto have "A \ A\<^sub>1 \ set Vs" using BinOp.prems A_fv[OF Some] by auto hence "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` (A \ A\<^sub>1)\" using BinOp Some by auto hence "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` A \ last_index Vs ` A\<^sub>1\" by(simp add: image_Un) thus ?thesis using IH\<^sub>1 indexA\<^sub>1 by auto qed next case (FAss e\<^sub>1 F D e\<^sub>2) hence IH\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) \last_index Vs ` A\" by simp show ?case proof (cases "\ e\<^sub>1") case None thus ?thesis using FAss by simp next case (Some A\<^sub>1) have indexA\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) = \last_index Vs ` A\<^sub>1\" using A_compE\<^sub>1[OF Some] FAss.prems by auto have "A \ A\<^sub>1 \ set Vs" using FAss.prems A_fv[OF Some] by auto hence "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` (A \ A\<^sub>1)\" using FAss Some by auto hence "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` A \ last_index Vs ` A\<^sub>1\" by(simp add: image_Un) thus ?thesis using IH\<^sub>1 indexA\<^sub>1 by auto qed next case (Call e\<^sub>1 M es) hence IH\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) \last_index Vs ` A\" by simp show ?case proof (cases "\ e\<^sub>1") case None thus ?thesis using Call by simp next case (Some A\<^sub>1) have indexA\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) = \last_index Vs ` A\<^sub>1\" using A_compE\<^sub>1[OF Some] Call.prems by auto have "A \ A\<^sub>1 \ set Vs" using Call.prems A_fv[OF Some] by auto hence "\s (compEs\<^sub>1 Vs es) \last_index Vs ` (A \ A\<^sub>1)\" using Call Some by auto hence "\s (compEs\<^sub>1 Vs es) \last_index Vs ` A \ last_index Vs ` A\<^sub>1\" by(simp add: image_Un) thus ?thesis using IH\<^sub>1 indexA\<^sub>1 by auto qed next case (TryCatch e\<^sub>1 C V e\<^sub>2) have "\ A\{V} \ set(Vs@[V]); fv e\<^sub>2 \ set(Vs@[V]); \ e\<^sub>2 \A\{V}\\ \ \ (compE\<^sub>1 (Vs@[V]) e\<^sub>2) \last_index (Vs@[V]) ` (A\{V})\" by fact hence "\ (compE\<^sub>1 (Vs@[V]) e\<^sub>2) \last_index (Vs@[V]) ` (A\{V})\" using TryCatch.prems by(simp add:Diff_subset_conv) moreover have "last_index (Vs@[V]) ` A \ last_index Vs ` A \ {size Vs}" using TryCatch.prems by(auto simp add: image_last_index split:if_split_asm) ultimately show ?case using TryCatch by(auto simp:hyperset_defs elim!:D_mono') next case (Seq e\<^sub>1 e\<^sub>2) hence IH\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) \last_index Vs ` A\" by simp show ?case proof (cases "\ e\<^sub>1") case None thus ?thesis using Seq by simp next case (Some A\<^sub>1) have indexA\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) = \last_index Vs ` A\<^sub>1\" using A_compE\<^sub>1[OF Some] Seq.prems by auto have "A \ A\<^sub>1 \ set Vs" using Seq.prems A_fv[OF Some] by auto hence "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` (A \ A\<^sub>1)\" using Seq Some by auto hence "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` A \ last_index Vs ` A\<^sub>1\" by(simp add: image_Un) thus ?thesis using IH\<^sub>1 indexA\<^sub>1 by auto qed next case (Cond e e\<^sub>1 e\<^sub>2) hence IH\<^sub>1: "\ (compE\<^sub>1 Vs e) \last_index Vs ` A\" by simp show ?case proof (cases "\ e") case None thus ?thesis using Cond by simp next case (Some B) have indexB: "\ (compE\<^sub>1 Vs e) = \last_index Vs ` B\" using A_compE\<^sub>1[OF Some] Cond.prems by auto have "A \ B \ set Vs" using Cond.prems A_fv[OF Some] by auto hence "\ (compE\<^sub>1 Vs e\<^sub>1) \last_index Vs ` (A \ B)\" and "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` (A \ B)\" using Cond Some by auto hence "\ (compE\<^sub>1 Vs e\<^sub>1) \last_index Vs ` A \ last_index Vs ` B\" and "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` A \ last_index Vs ` B\" by(simp add: image_Un)+ thus ?thesis using IH\<^sub>1 indexB by auto qed next case (While e\<^sub>1 e\<^sub>2) hence IH\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) \last_index Vs ` A\" by simp show ?case proof (cases "\ e\<^sub>1") case None thus ?thesis using While by simp next case (Some A\<^sub>1) have indexA\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) = \last_index Vs ` A\<^sub>1\" using A_compE\<^sub>1[OF Some] While.prems by auto have "A \ A\<^sub>1 \ set Vs" using While.prems A_fv[OF Some] by auto hence "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` (A \ A\<^sub>1)\" using While Some by auto hence "\ (compE\<^sub>1 Vs e\<^sub>2) \last_index Vs ` A \ last_index Vs ` A\<^sub>1\" by(simp add: image_Un) thus ?thesis using IH\<^sub>1 indexA\<^sub>1 by auto qed next case (Block V T e) have "\ A-{V} \ set(Vs@[V]); fv e \ set(Vs@[V]); \ e \A-{V}\ \ \ \ (compE\<^sub>1 (Vs@[V]) e) \last_index (Vs@[V]) ` (A-{V})\" by fact hence "\ (compE\<^sub>1 (Vs@[V]) e) \last_index (Vs@[V]) ` (A-{V})\" using Block.prems by(simp add:Diff_subset_conv) moreover have "size Vs \ last_index Vs ` A" using Block.prems by(auto simp add:image_def size_last_index_conv) ultimately show ?case using Block by(auto simp add: image_last_index Diff_subset_conv hyperset_defs elim!: D_mono') next case (Cons_exp e\<^sub>1 es) hence IH\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) \last_index Vs ` A\" by simp show ?case proof (cases "\ e\<^sub>1") case None thus ?thesis using Cons_exp by simp next case (Some A\<^sub>1) have indexA\<^sub>1: "\ (compE\<^sub>1 Vs e\<^sub>1) = \last_index Vs ` A\<^sub>1\" using A_compE\<^sub>1[OF Some] Cons_exp.prems by auto have "A \ A\<^sub>1 \ set Vs" using Cons_exp.prems A_fv[OF Some] by auto hence "\s (compEs\<^sub>1 Vs es) \last_index Vs ` (A \ A\<^sub>1)\" using Cons_exp Some by auto hence "\s (compEs\<^sub>1 Vs es) \last_index Vs ` A \ last_index Vs ` A\<^sub>1\" by(simp add: image_Un) thus ?thesis using IH\<^sub>1 indexA\<^sub>1 by auto qed qed (simp_all add:hyperset_defs) (*>*) lemma last_index_image_set: "distinct xs \ last_index xs ` set xs = {..*) lemma D_compE\<^sub>1: "\ \ e \set Vs\; fv e \ set Vs; distinct Vs \ \ \ (compE\<^sub>1 Vs e) \{.." (*<*)by(fastforce dest!: D_last_index_compE\<^sub>1[OF subset_refl] simp add:last_index_image_set)(*>*) lemma D_compE\<^sub>1': assumes "\ e \set(V#Vs)\" and "fv e \ set(V#Vs)" and "distinct(V#Vs)" shows "\ (compE\<^sub>1 (V#Vs) e) \{..length Vs}\" (*<*) proof - have "{..size Vs} = {..1) qed (*>*) lemma compP\<^sub>1_pres_wf: "wf_J_prog P \ wf_J\<^sub>1_prog (compP\<^sub>1 P)" (*<*) apply simp apply(rule wf_prog_compPI) prefer 2 apply assumption apply(case_tac m) apply(simp add:wf_mdecl_def wf_J\<^sub>1_mdecl_def wf_J_mdecl) apply(clarify) apply(frule WT_fv) apply(fastforce intro!: compE\<^sub>1_pres_wt D_compE\<^sub>1' \) done (*>*) end diff --git a/thys/JinjaThreads/MM/SC_Completion.thy b/thys/JinjaThreads/MM/SC_Completion.thy --- a/thys/JinjaThreads/MM/SC_Completion.thy +++ b/thys/JinjaThreads/MM/SC_Completion.thy @@ -1,1285 +1,1284 @@ (* Title: JinjaThreads/MM/SC_Completion.thy Author: Andreas Lochbihler *) section \Sequentially consistent completion of executions in the JMM\ theory SC_Completion imports Non_Speculative begin subsection \Most recently written values\ fun mrw_value :: "'m prog \ (('addr \ addr_loc) \ ('addr val \ bool)) \ ('addr, 'thread_id) obs_event action \ (('addr \ addr_loc) \ ('addr val \ bool))" where "mrw_value P vs (NormalAction (WriteMem ad al v)) = vs((ad, al) \ (v, True))" | "mrw_value P vs (NormalAction (NewHeapElem ad hT)) = (\(ad', al). if ad = ad' \ al \ addr_locs P hT \ (case vs (ad, al) of None \ True | Some (v, b) \ \ b) then Some (addr_loc_default P hT al, False) else vs (ad', al))" | "mrw_value P vs _ = vs" lemma mrw_value_cases: obtains ad al v where "x = NormalAction (WriteMem ad al v)" | ad hT where "x = NormalAction (NewHeapElem ad hT)" | ad M vs v where "x = NormalAction (ExternalCall ad M vs v)" | ad al v where "x = NormalAction (ReadMem ad al v)" | t where "x = NormalAction (ThreadStart t)" | t where "x = NormalAction (ThreadJoin t)" | ad where "x = NormalAction (SyncLock ad)" | ad where "x = NormalAction (SyncUnlock ad)" | t where "x = NormalAction (ObsInterrupt t)" | t where "x = NormalAction (ObsInterrupted t)" | "x = InitialThreadAction" | "x = ThreadFinishAction" by pat_completeness abbreviation mrw_values :: "'m prog \ (('addr \ addr_loc) \ ('addr val \ bool)) \ ('addr, 'thread_id) obs_event action list \ (('addr \ addr_loc) \ ('addr val \ bool))" where "mrw_values P \ foldl (mrw_value P)" lemma mrw_values_eq_SomeD: assumes mrw: "mrw_values P vs0 obs (ad, al) = \(v, b)\" and "vs0 (ad, al) = \(v, b)\ \ \wa. wa \ set obs \ is_write_action wa \ (ad, al) \ action_loc_aux P wa \ (b \ \ is_new_action wa)" shows "\obs' wa obs''. obs = obs' @ wa # obs'' \ is_write_action wa \ (ad, al) \ action_loc_aux P wa \ value_written_aux P wa al = v \ (is_new_action wa \ \ b) \ (\ob\set obs''. is_write_action ob \ (ad, al) \ action_loc_aux P ob \ is_new_action ob \ b)" (is "?concl obs") using assms proof(induct obs rule: rev_induct) case Nil thus ?case by simp next case (snoc ob obs) note mrw = \mrw_values P vs0 (obs @ [ob]) (ad, al) = \(v, b)\\ show ?case proof(cases "is_write_action ob \ (ad, al) \ action_loc_aux P ob \ (is_new_action ob \ \ b)") case True thus ?thesis using mrw by(fastforce elim!: is_write_action.cases intro: action_loc_aux_intros split: if_split_asm) next case False with mrw have "mrw_values P vs0 obs (ad, al) = \(v, b)\" by(cases "ob" rule: mrw_value_cases)(auto split: if_split_asm simp add: addr_locs_def split: htype.split_asm) moreover { assume "vs0 (ad, al) = \(v, b)\" hence "\wa. wa \ set (obs @ [ob]) \ is_write_action wa \ (ad, al) \ action_loc_aux P wa \ (b \ \ is_new_action wa)" by(rule snoc) with False have "\wa. wa \ set obs \ is_write_action wa \ (ad, al) \ action_loc_aux P wa \ (b \ \ is_new_action wa)" by auto } ultimately have "?concl obs" by(rule snoc) thus ?thesis using False mrw by fastforce qed qed lemma mrw_values_WriteMemD: assumes "NormalAction (WriteMem ad al v') \ set obs" shows "\v. mrw_values P vs0 obs (ad, al) = Some (v, True)" using assms apply(induct obs rule: rev_induct) apply simp apply clarsimp apply(erule disjE) apply clarsimp apply clarsimp apply(case_tac x rule: mrw_value_cases) apply simp_all done lemma mrw_values_new_actionD: assumes "w \ set obs" "is_new_action w" "adal \ action_loc_aux P w" shows "\v b. mrw_values P vs0 obs adal = Some (v, b)" using assms apply(induct obs rule: rev_induct) apply simp apply clarsimp apply(erule disjE) apply(fastforce simp add: split_beta elim!: action_loc_aux_cases is_new_action.cases) apply clarsimp apply(rename_tac w' obs' v b) apply(case_tac w' rule: mrw_value_cases) apply(auto simp add: split_beta) done lemma mrw_value_dom_mono: "dom vs \ dom (mrw_value P vs ob)" by(cases ob rule: mrw_value_cases) auto lemma mrw_values_dom_mono: "dom vs \ dom (mrw_values P vs obs)" by(induct obs arbitrary: vs)(auto intro: subset_trans[OF mrw_value_dom_mono] del: subsetI) lemma mrw_values_eq_NoneD: assumes "mrw_values P vs0 obs adal = None" and "w \ set obs" and "is_write_action w" and "adal \ action_loc_aux P w" shows False using assms apply - apply(erule is_write_action.cases) apply(fastforce dest: mrw_values_WriteMemD[where ?vs0.0=vs0 and P=P] mrw_values_new_actionD[where ?vs0.0=vs0] elim: action_loc_aux_cases)+ done lemma mrw_values_mrw: assumes mrw: "mrw_values P vs0 (map snd obs) (ad, al) = \(v, b)\" and initial: "vs0 (ad, al) = \(v, b)\ \ \wa. wa \ set (map snd obs) \ is_write_action wa \ (ad, al) \ action_loc_aux P wa \ (b \ \ is_new_action wa)" shows "\i. i < length obs \ P,llist_of (obs @ [(t, NormalAction (ReadMem ad al v))]) \ length obs \mrw i \ value_written P (llist_of obs) i (ad, al) = v" proof - from mrw_values_eq_SomeD[OF mrw initial] obtain obs' wa obs'' where obs: "map snd obs = obs' @ wa # obs''" and wa: "is_write_action wa" and adal: "(ad, al) \ action_loc_aux P wa" and written: "value_written_aux P wa al = v" and new: "is_new_action wa \ \ b" and last: "\ob. \ ob \ set obs''; is_write_action ob; (ad, al) \ action_loc_aux P ob \ \ is_new_action ob \ b" by blast let ?i = "length obs'" let ?E = "llist_of (obs @ [(t, NormalAction (ReadMem ad al v))])" from obs have len: "length (map snd obs) = Suc (length obs') + length obs''" by simp hence "?i < length obs" by simp moreover hence obs_i: "action_obs ?E ?i = wa" using len obs by(auto simp add: action_obs_def map_eq_append_conv) have "P,?E \ length obs \mrw ?i" proof(rule most_recent_write_for.intros) show "length obs \ read_actions ?E" by(auto intro: read_actions.intros simp add: actions_def action_obs_def) show "(ad, al) \ action_loc P ?E (length obs)" by(simp add: action_obs_def lnth_llist_of) show "?E \ length obs' \a length obs" using len by-(rule action_orderI, auto simp add: actions_def action_obs_def nth_append) show "?i \ write_actions ?E" using len obs wa by-(rule write_actions.intros, auto simp add: actions_def action_obs_def nth_append map_eq_append_conv) show "(ad, al) \ action_loc P ?E ?i" using obs_i adal by simp fix wa' assume wa': "wa' \ write_actions ?E" and adal': "(ad, al) \ action_loc P ?E wa'" from wa' \?i \ write_actions ?E\ have "wa' \ actions ?E" "?i \ actions ?E" by simp_all hence "?E \ wa' \a ?i" proof(rule action_orderI) assume new_wa': "is_new_action (action_obs ?E wa')" and new_i: "is_new_action (action_obs ?E ?i)" from new_i obs_i new have b: "\ b" by simp show "wa' \ ?i" proof(rule ccontr) assume "\ ?thesis" hence "?i < wa'" by simp hence "snd (obs ! wa') \ set obs''" using obs wa' unfolding in_set_conv_nth by -(rule exI[where x="wa' - Suc (length obs')"], auto elim!: write_actions.cases actionsE simp add: action_obs_def lnth_llist_of actions_def nth_append map_eq_append_conv nth_Cons' split: if_split_asm) moreover from wa' have "is_write_action (snd (obs ! wa'))" by cases(auto simp add: action_obs_def nth_append actions_def split: if_split_asm) moreover from adal' wa' have "(ad, al) \ action_loc_aux P (snd (obs ! wa'))" by(auto simp add: action_obs_def nth_append nth_Cons' actions_def split: if_split_asm elim!: write_actions.cases) ultimately show False using last[of "snd (obs ! wa')"] b by simp qed next assume new_wa': "\ is_new_action (action_obs ?E wa')" with wa' adal' obtain v' where "NormalAction (WriteMem ad al v') \ set (map snd obs)" unfolding in_set_conv_nth by (fastforce elim!: write_actions.cases is_write_action.cases simp add: action_obs_def actions_def nth_append split: if_split_asm intro!: exI[where x=wa']) from mrw_values_WriteMemD[OF this, of P vs0] mrw have b by simp with new obs_i have "\ is_new_action (action_obs ?E ?i)" by simp moreover have "wa' \ ?i" proof(rule ccontr) assume "\ ?thesis" hence "?i < wa'" by simp hence "snd (obs ! wa') \ set obs''" using obs wa' unfolding in_set_conv_nth by -(rule exI[where x="wa' - Suc (length obs')"], auto elim!: write_actions.cases actionsE simp add: action_obs_def lnth_llist_of actions_def nth_append map_eq_append_conv nth_Cons' split: if_split_asm) moreover from wa' have "is_write_action (snd (obs ! wa'))" by cases(auto simp add: action_obs_def nth_append actions_def split: if_split_asm) moreover from adal' wa' have "(ad, al) \ action_loc_aux P (snd (obs ! wa'))" by(auto simp add: action_obs_def nth_append nth_Cons' actions_def split: if_split_asm elim!: write_actions.cases) ultimately have "is_new_action (snd (obs ! wa'))" using last[of "snd (obs ! wa')"] by simp moreover from new_wa' wa' have "\ is_new_action (snd (obs ! wa'))" by(auto elim!: write_actions.cases simp add: action_obs_def nth_append actions_def split: if_split_asm) ultimately show False by contradiction qed ultimately show "\ is_new_action (action_obs ?E ?i) \ wa' \ ?i" by blast qed thus "?E \ wa' \a ?i \ ?E \ length obs \a wa'" .. qed moreover from written \?i < length obs\ obs_i have "value_written P (llist_of obs) ?i (ad, al) = v" by(simp add: value_written_def action_obs_def nth_append) ultimately show ?thesis by blast qed lemma mrw_values_no_write_unchanged: assumes no_write: "\w. \ w \ set obs; is_write_action w; adal \ action_loc_aux P w \ \ case vs adal of None \ False | Some (v, b) \ b \ is_new_action w" shows "mrw_values P vs obs adal = vs adal" using assms proof(induct obs arbitrary: vs) case Nil show ?case by simp next case (Cons ob obs) from Cons.prems[of ob] have "mrw_value P vs ob adal = vs adal" apply(cases adal) apply(cases ob rule: mrw_value_cases, fastforce+) apply(auto simp add: addr_locs_def split: htype.split_asm) apply blast+ done moreover have "mrw_values P (mrw_value P vs ob) obs adal = mrw_value P vs ob adal" proof(rule Cons.hyps) fix w assume "w \ set obs" "is_write_action w" "adal \ action_loc_aux P w" with Cons.prems[of w] \mrw_value P vs ob adal = vs adal\ show "case mrw_value P vs ob adal of None \ False | \(v, b)\ \ b \ is_new_action w" by simp qed ultimately show ?case by simp qed subsection \Coinductive version of sequentially consistent prefixes\ coinductive ta_seq_consist :: "'m prog \ ('addr \ addr_loc \ 'addr val \ bool) \ ('addr, 'thread_id) obs_event action llist \ bool" for P :: "'m prog" where LNil: "ta_seq_consist P vs LNil" | LCons: "\ case ob of NormalAction (ReadMem ad al v) \ \b. vs (ad, al) = \(v, b)\ | _ \ True; ta_seq_consist P (mrw_value P vs ob) obs \ \ ta_seq_consist P vs (LCons ob obs)" inductive_simps ta_seq_consist_simps [simp]: "ta_seq_consist P vs LNil" "ta_seq_consist P vs (LCons ob obs)" lemma ta_seq_consist_lappend: assumes "lfinite obs" shows "ta_seq_consist P vs (lappend obs obs') \ ta_seq_consist P vs obs \ ta_seq_consist P (mrw_values P vs (list_of obs)) obs'" (is "?concl vs obs") using assms proof(induct arbitrary: vs) case lfinite_LNil thus ?case by simp next case (lfinite_LConsI obs ob) have "?concl (mrw_value P vs ob) obs" by fact thus ?case using \lfinite obs\ by(simp split: action.split add: list_of_LCons) qed lemma assumes "ta_seq_consist P vs obs" shows ta_seq_consist_ltake: "ta_seq_consist P vs (ltake n obs)" (is ?thesis1) and ta_seq_consist_ldrop: "ta_seq_consist P (mrw_values P vs (list_of (ltake n obs))) (ldrop n obs)" (is ?thesis2) proof - note assms also have "obs = lappend (ltake n obs) (ldrop n obs)" by(simp add: lappend_ltake_ldrop) finally have "?thesis1 \ ?thesis2" by(cases n)(simp_all add: ta_seq_consist_lappend del: lappend_ltake_enat_ldropn) thus ?thesis1 ?thesis2 by blast+ qed lemma ta_seq_consist_coinduct_append [consumes 1, case_names ta_seq_consist, case_conclusion ta_seq_consist LNil lappend]: assumes major: "X vs obs" and step: "\vs obs. X vs obs \ obs = LNil \ (\obs' obs''. obs = lappend obs' obs'' \ obs' \ LNil \ ta_seq_consist P vs obs' \ (lfinite obs' \ (X (mrw_values P vs (list_of obs')) obs'' \ ta_seq_consist P (mrw_values P vs (list_of obs')) obs'')))" (is "\vs obs. _ \ _ \ ?step vs obs") shows "ta_seq_consist P vs obs" proof - from major have "\obs' obs''. obs = lappend (llist_of obs') obs'' \ ta_seq_consist P vs (llist_of obs') \ X (mrw_values P vs obs') obs''" by(auto intro: exI[where x="[]"]) thus ?thesis proof(coinduct) case (ta_seq_consist vs obs) then obtain obs' obs'' where obs: "obs = lappend (llist_of obs') obs''" and sc_obs': "ta_seq_consist P vs (llist_of obs')" and X: "X (mrw_values P vs obs') obs''" by blast show ?case proof(cases obs') case Nil with X have "X vs obs''" by simp from step[OF this] show ?thesis proof assume "obs'' = LNil" with Nil obs show ?thesis by simp next assume "?step vs obs''" then obtain obs''' obs'''' where obs'': "obs'' = lappend obs''' obs''''" and "obs''' \ LNil" and sc_obs''': "ta_seq_consist P vs obs'''" and fin: "lfinite obs''' \ X (mrw_values P vs (list_of obs''')) obs'''' \ ta_seq_consist P (mrw_values P vs (list_of obs''')) obs''''" by blast from \obs''' \ LNil\ obtain ob obs''''' where obs''': "obs''' = LCons ob obs'''''" unfolding neq_LNil_conv by blast with Nil obs'' obs have concl1: "obs = LCons ob (lappend obs''''' obs'''')" by simp have concl2: "case ob of NormalAction (ReadMem ad al v) \ \b. vs (ad, al) = \(v, b)\ | _ \ True" using sc_obs''' obs''' by simp show ?thesis proof(cases "lfinite obs'''") case False hence "lappend obs''''' obs'''' = obs'''''" using obs''' by(simp add: lappend_inf) hence "ta_seq_consist P (mrw_value P vs ob) (lappend obs''''' obs'''')" using sc_obs''' obs''' by simp with concl1 concl2 have ?LCons by blast thus ?thesis by simp next case True with obs''' obtain obs'''''' where obs''''': "obs''''' = llist_of obs''''''" by simp(auto simp add: lfinite_eq_range_llist_of) from fin[OF True] have "?LCons" proof assume X: "X (mrw_values P vs (list_of obs''')) obs''''" hence "X (mrw_values P (mrw_value P vs ob) obs'''''') obs''''" using obs''''' obs''' by simp moreover from obs''''' have "lappend obs''''' obs'''' = lappend (llist_of obs'''''') obs''''" by simp moreover have "ta_seq_consist P (mrw_value P vs ob) (llist_of obs'''''')" using sc_obs''' obs''' obs''''' by simp ultimately show ?thesis using concl1 concl2 by blast next assume "ta_seq_consist P (mrw_values P vs (list_of obs''')) obs''''" with sc_obs''' obs''''' obs''' have "ta_seq_consist P (mrw_value P vs ob) (lappend obs''''' obs'''')" by(simp add: ta_seq_consist_lappend) with concl1 concl2 show ?thesis by blast qed thus ?thesis by simp qed qed next case (Cons ob obs''') hence "obs = LCons ob (lappend (llist_of obs''') obs'')" using obs by simp moreover from sc_obs' Cons have "case ob of NormalAction (ReadMem ad al v) \ \b. vs (ad, al) = \(v, b)\ | _ \ True" and "ta_seq_consist P (mrw_value P vs ob) (llist_of obs''')" by simp_all moreover from X Cons have "X (mrw_values P (mrw_value P vs ob) obs''') obs''" by simp ultimately show ?thesis by blast qed qed qed lemma ta_seq_consist_coinduct_append_wf [consumes 2, case_names ta_seq_consist, case_conclusion ta_seq_consist LNil lappend]: assumes major: "X vs obs a" and wf: "wf R" and step: "\vs obs a. X vs obs a \ obs = LNil \ (\obs' obs'' a'. obs = lappend obs' obs'' \ ta_seq_consist P vs obs' \ (obs' = LNil \ (a', a) \ R) \ (lfinite obs' \ X (mrw_values P vs (list_of obs')) obs'' a' \ ta_seq_consist P (mrw_values P vs (list_of obs')) obs''))" (is "\vs obs a. _ \ _ \ ?step vs obs a") shows "ta_seq_consist P vs obs" proof - { fix vs obs a assume "X vs obs a" with wf have "obs = LNil \ (\obs' obs''. obs = lappend obs' obs'' \ obs' \ LNil \ ta_seq_consist P vs obs' \ (lfinite obs' \ (\a. X (mrw_values P vs (list_of obs')) obs'' a) \ ta_seq_consist P (mrw_values P vs (list_of obs')) obs''))" (is "_ \ ?step_concl vs obs") proof(induct a arbitrary: vs obs rule: wf_induct[consumes 1, case_names wf]) case (wf a) note IH = wf.hyps[rule_format] from step[OF \X vs obs a\] show ?case proof assume "obs = LNil" thus ?thesis .. next assume "?step vs obs a" then obtain obs' obs'' a' where obs: "obs = lappend obs' obs''" and sc_obs': "ta_seq_consist P vs obs'" and decr: "obs' = LNil \ (a', a) \ R" and fin: "lfinite obs' \ X (mrw_values P vs (list_of obs')) obs'' a' \ ta_seq_consist P (mrw_values P vs (list_of obs')) obs''" by blast show ?case proof(cases "obs' = LNil") case True hence "lfinite obs'" by simp from fin[OF this] show ?thesis proof assume X: "X (mrw_values P vs (list_of obs')) obs'' a'" from True have "(a', a) \ R" by(rule decr) from IH[OF this X] show ?thesis proof assume "obs'' = LNil" with True obs have "obs = LNil" by simp thus ?thesis .. next assume "?step_concl (mrw_values P vs (list_of obs')) obs''" hence "?step_concl vs obs" using True obs by simp thus ?thesis .. qed next assume "ta_seq_consist P (mrw_values P vs (list_of obs')) obs''" thus ?thesis using obs True by cases(auto cong: action.case_cong obs_event.case_cong intro: exI[where x="LCons x LNil" for x]) qed next case False with obs sc_obs' fin show ?thesis by auto qed qed qed } note step' = this from major show ?thesis proof(coinduction arbitrary: vs obs a rule: ta_seq_consist_coinduct_append) case (ta_seq_consist vs obs a) thus ?case by simp(rule step') qed qed lemma ta_seq_consist_nthI: "(\i ad al v. \ enat i < llength obs; lnth obs i = NormalAction (ReadMem ad al v); ta_seq_consist P vs (ltake (enat i) obs) \ \ \b. mrw_values P vs (list_of (ltake (enat i) obs)) (ad, al) = \(v, b)\) \ ta_seq_consist P vs obs" proof(coinduction arbitrary: vs obs) case (ta_seq_consist vs obs) hence nth: "\i ad al v. \ enat i < llength obs; lnth obs i = NormalAction (ReadMem ad al v); ta_seq_consist P vs (ltake (enat i) obs) \ \ \b. mrw_values P vs (list_of (ltake (enat i) obs)) (ad, al) = \(v, b)\" by blast show ?case proof(cases obs) case LNil thus ?thesis by simp next case (LCons ob obs') { fix ad al v assume "ob = NormalAction (ReadMem ad al v)" with nth[of 0 ad al v] LCons have "\b. vs (ad, al) = \(v, b)\" by(simp add: zero_enat_def[symmetric]) } note base = this moreover { fix i ad al v assume "enat i < llength obs'" "lnth obs' i = NormalAction (ReadMem ad al v)" and "ta_seq_consist P (mrw_value P vs ob) (ltake (enat i) obs')" with LCons nth[of "Suc i" ad al v] base have "\b. mrw_values P (mrw_value P vs ob) (list_of (ltake (enat i) obs')) (ad, al) = \(v, b)\" by(clarsimp simp add: eSuc_enat[symmetric] split: obs_event.split action.split) } ultimately have ?LCons using LCons by(simp split: action.split obs_event.split) thus ?thesis .. qed qed lemma ta_seq_consist_into_non_speculative: "\ ta_seq_consist P vs obs; \adal. set_option (vs adal) \ vs' adal \ UNIV \ \ non_speculative P vs' obs" proof(coinduction arbitrary: vs' obs vs) case (non_speculative vs' obs vs) thus ?case apply cases apply(auto split: action.split_asm obs_event.split_asm) apply(rule exI, erule conjI, auto)+ done qed lemma llist_of_list_of_append: "lfinite xs \ llist_of (list_of xs @ ys) = lappend xs (llist_of ys)" unfolding lfinite_eq_range_llist_of by(clarsimp simp add: lappend_llist_of_llist_of) lemma ta_seq_consist_most_recent_write_for: assumes sc: "ta_seq_consist P Map.empty (lmap snd E)" and read: "r \ read_actions E" and new_actions_for_fun: "\adal a a'. \ a \ new_actions_for P E adal; a' \ new_actions_for P E adal \ \ a = a'" shows "\i. P,E \ r \mrw i \ i < r" proof - from read obtain t v ad al where nth_r: "lnth E r = (t, NormalAction (ReadMem ad al v))" and r: "enat r < llength E" by(cases)(cases "lnth E r", auto simp add: action_obs_def actions_def) from nth_r r have E_unfold: "E = lappend (ltake (enat r) E) (LCons (t, NormalAction (ReadMem ad al v)) (ldropn (Suc r) E))" by (metis lappend_ltake_enat_ldropn ldropn_Suc_conv_ldropn) from sc obtain b where sc': "ta_seq_consist P Map.empty (ltake (enat r) (lmap snd E))" and mrw': "mrw_values P Map.empty (map snd (list_of (ltake (enat r) E))) (ad, al) = \(v, b)\" by(subst (asm) (3) E_unfold)(auto simp add: ta_seq_consist_lappend lmap_lappend_distrib) from mrw_values_mrw[OF mrw', of t] r obtain E' w' where E': "E' = llist_of (list_of (ltake (enat r) E) @ [(t, NormalAction (ReadMem ad al v))])" and v: "v = value_written P (ltake (enat r) E) w' (ad, al)" and mrw'': "P,E' \ r \mrw w'" and w': "w' < r" by(fastforce simp add: length_list_of_conv_the_enat min_def split: if_split_asm) from E' r have sim: "ltake (enat (Suc r)) E' [\] ltake (enat (Suc r)) E" by(subst E_unfold)(simp add: ltake_lappend llist_of_list_of_append min_def, auto simp add: eSuc_enat[symmetric] zero_enat_def[symmetric] eq_into_sim_actions) from nth_r have adal_r: "(ad, al) \ action_loc P E r" by(simp add: action_obs_def) from E' r have nth_r': "lnth E' r = (t, NormalAction (ReadMem ad al v))" by(auto simp add: nth_append length_list_of_conv_the_enat min_def) with mrw'' w' r adal_r obtain "E \ w' \a r" "w' \ write_actions E" "(ad, al) \ action_loc P E w'" by cases(fastforce simp add: action_obs_def action_loc_change_prefix[OF sim[symmetric], simplified action_obs_def] intro: action_order_change_prefix[OF _ sim] write_actions_change_prefix[OF _ sim]) with read adal_r have "P,E \ r \mrw w'" proof(rule most_recent_write_for.intros) fix wa' assume write': "wa' \ write_actions E" and adal_wa': "(ad, al) \ action_loc P E wa'" show "E \ wa' \a w' \ E \ r \a wa'" proof(cases "r \ wa'") assume "r \ wa'" show ?thesis proof(cases "is_new_action (action_obs E wa')") case False with \r \ wa'\ have "E \ r \a wa'" using read write' by(auto simp add: action_order_def elim!: read_actions.cases) thus ?thesis .. next case True with write' adal_wa' have "wa' \ new_actions_for P E (ad, al)" by(simp add: new_actions_for_def) hence "w' \ new_actions_for P E (ad, al)" using r w' \r \ wa'\ by(auto dest: new_actions_for_fun) with \w' \ write_actions E\ \(ad, al) \ action_loc P E w'\ have "\ is_new_action (action_obs E w')" by(simp add: new_actions_for_def) with write' True \w' \ write_actions E\ have "E \ wa' \a w'" by(simp add: action_order_def) thus ?thesis .. qed next assume "\ r \ wa'" hence "wa' < r" by simp with write' adal_wa' have "wa' \ write_actions E'" "(ad, al) \ action_loc P E' wa'" by(auto intro: write_actions_change_prefix[OF _ sim[symmetric]] simp add: action_loc_change_prefix[OF sim]) from most_recent_write_recent[OF mrw'' _ this] nth_r' have "E' \ wa' \a w' \ E' \ r \a wa'" by(simp add: action_obs_def) thus ?thesis using \wa' < r\ w' by(auto 4 3 del: disjCI intro: disjI1 disjI2 action_order_change_prefix[OF _ sim]) qed qed with w' show ?thesis by blast qed lemma ta_seq_consist_mrw_before: assumes sc: "ta_seq_consist P Map.empty (lmap snd E)" and new_actions_for_fun: "\adal a a'. \ a \ new_actions_for P E adal; a' \ new_actions_for P E adal \ \ a = a'" and mrw: "P,E \ r \mrw w" shows "w < r" proof - from mrw have "r \ read_actions E" by cases with sc new_actions_for_fun obtain w' where "P,E \ r \mrw w'" "w' < r" by(auto dest: ta_seq_consist_most_recent_write_for) with mrw show ?thesis by(auto dest: most_recent_write_for_fun) qed lemma ta_seq_consist_imp_sequentially_consistent: assumes tsa_ok: "thread_start_actions_ok E" and new_actions_for_fun: "\adal a a'. \ a \ new_actions_for P E adal; a' \ new_actions_for P E adal \ \ a = a'" and seq: "ta_seq_consist P Map.empty (lmap snd E)" shows "\ws. sequentially_consistent P (E, ws) \ P \ (E, ws) \" proof(intro exI conjI) define ws where "ws i = (THE w. P,E \ i \mrw w)" for i from seq have ns: "non_speculative P (\_. {}) (lmap snd E)" by(rule ta_seq_consist_into_non_speculative) simp show "sequentially_consistent P (E, ws)" unfolding ws_def proof(rule sequentially_consistentI) fix r assume "r \ read_actions E" with seq new_actions_for_fun obtain w where "P,E \ r \mrw w" by(auto dest: ta_seq_consist_most_recent_write_for) thus "P,E \ r \mrw THE w. P,E \ r \mrw w" by(simp add: THE_most_recent_writeI) qed show "P \ (E, ws) \" proof(rule wf_execI) show "is_write_seen P E ws" proof(rule is_write_seenI) fix a ad al v assume a: "a \ read_actions E" and adal: "action_obs E a = NormalAction (ReadMem ad al v)" from ns have seq': "non_speculative P (\_. {}) (ltake (enat a) (lmap snd E))" by(rule non_speculative_ltake) from seq a seq new_actions_for_fun obtain w where mrw: "P,E \ a \mrw w" and "w < a" by(auto dest: ta_seq_consist_most_recent_write_for) hence w: "ws a = w" by(simp add: ws_def THE_most_recent_writeI) with mrw adal show "ws a \ write_actions E" and "(ad, al) \ action_loc P E (ws a)" and "\ P,E \ a \hb ws a" by(fastforce elim!: most_recent_write_for.cases dest: happens_before_into_action_order antisymPD[OF antisym_action_order] read_actions_not_write_actions)+ let ?between = "ltake (enat (a - Suc w)) (ldropn (Suc w) E)" let ?prefix = "ltake (enat w) E" let ?vs_prefix = "mrw_values P Map.empty (map snd (list_of ?prefix))" { fix v' assume new: "is_new_action (action_obs E w)" and vs': "?vs_prefix (ad, al) = \(v', True)\" from mrw_values_eq_SomeD[OF vs'] obtain obs' wa obs'' where split: "map snd (list_of ?prefix) = obs' @ wa # obs''" and wa: "is_write_action wa" and adal': "(ad, al) \ action_loc_aux P wa" and new_wa: "\ is_new_action wa" by blast from split have "length (map snd (list_of ?prefix)) = Suc (length obs' + length obs'')" by simp hence len_prefix: "llength ?prefix = enat \" by(simp add: length_list_of_conv_the_enat min_enat1_conv_enat) with split have "nth (map snd (list_of ?prefix)) (length obs') = wa" and "enat (length obs') < llength ?prefix" by simp_all hence "snd (lnth ?prefix (length obs')) = wa" by(simp add: list_of_lmap[symmetric] del: list_of_lmap) hence wa': "action_obs E (length obs') = wa" and "enat (length obs') < llength E" using \enat (length obs') < llength ?prefix\ by(auto simp add: action_obs_def lnth_ltake) with wa have "length obs' \ write_actions E" by(auto intro: write_actions.intros simp add: actions_def) from most_recent_write_recent[OF mrw _ this, of "(ad, al)"] adal adal' wa' have "E \ length obs' \a w \ E \ a \a length obs'" by simp hence False using new_wa new wa' adal len_prefix \w < a\ by(auto elim!: action_orderE simp add: min_enat1_conv_enat split: enat.split_asm) } hence mrw_value_w: "mrw_value P ?vs_prefix (snd (lnth E w)) (ad, al) = \(value_written P E w (ad, al), \ is_new_action (action_obs E w))\" using \ws a \ write_actions E\ \(ad, al) \ action_loc P E (ws a)\ w by(cases "snd (lnth E w)" rule: mrw_value_cases)(fastforce elim: write_actions.cases simp add: value_written_def action_obs_def)+ have "mrw_values P (mrw_value P ?vs_prefix (snd (lnth E w))) (list_of (lmap snd ?between)) (ad, al) = \(value_written P E w (ad, al), \ is_new_action (action_obs E w))\" proof(subst mrw_values_no_write_unchanged) fix wa assume "wa \ set (list_of (lmap snd ?between))" and write_wa: "is_write_action wa" and adal_wa: "(ad, al) \ action_loc_aux P wa" hence wa: "wa \ lset (lmap snd ?between)" by simp from wa obtain i_wa where "wa = lnth (lmap snd ?between) i_wa" and i_wa: "enat i_wa < llength (lmap snd ?between)" unfolding lset_conv_lnth by blast moreover hence i_wa_len: "enat (Suc (w + i_wa)) < llength E" by(cases "llength E") auto ultimately have wa': "wa = action_obs E (Suc (w + i_wa))" by(simp_all add: lnth_ltake action_obs_def ac_simps) with write_wa i_wa_len have "Suc (w + i_wa) \ write_actions E" by(auto intro: write_actions.intros simp add: actions_def) from most_recent_write_recent[OF mrw _ this, of "(ad, al)"] adal adal_wa wa' have "E \ Suc (w + i_wa) \a w \ E \ a \a Suc (w + i_wa)" by(simp) hence "is_new_action wa \ \ is_new_action (action_obs E w)" using adal i_wa wa' by(auto elim: action_orderE) thus "case (mrw_value P ?vs_prefix (snd (lnth E w)) (ad, al)) of None \ False | Some (v, b) \ b \ is_new_action wa" unfolding mrw_value_w by simp qed(simp add: mrw_value_w) moreover from a have "a \ actions E" by simp hence "enat a < llength E" by(rule actionsE) with \w < a\ have "enat (a - Suc w) < llength E - enat (Suc w)" by(cases "llength E") simp_all hence "E = lappend (lappend ?prefix (LCons (lnth E w) ?between)) (LCons (lnth (ldropn (Suc w) E) (a - Suc w)) (ldropn (Suc (a - Suc w)) (ldropn (Suc w) E)))" using \w < a\ \enat a < llength E\ unfolding lappend_assoc lappend_code apply(subst ldropn_Suc_conv_ldropn, simp) apply(subst lappend_ltake_enat_ldropn) apply(subst ldropn_Suc_conv_ldropn, simp add: less_trans[where y="enat a"]) by simp hence E': "E = lappend (lappend ?prefix (LCons (lnth E w) ?between)) (LCons (lnth E a) (ldropn (Suc a) E))" using \w < a\ \enat a < llength E\ by simp from seq have "ta_seq_consist P (mrw_values P Map.empty (list_of (lappend (lmap snd ?prefix) (LCons (snd (lnth E w)) (lmap snd ?between))))) (lmap snd (LCons (lnth E a) (ldropn (Suc a) E)))" by(subst (asm) E')(simp add: lmap_lappend_distrib ta_seq_consist_lappend) ultimately show "value_written P E (ws a) (ad, al) = v" using adal w by(clarsimp simp add: action_obs_def list_of_lappend list_of_LCons) (* assume "is_volatile P al" *) show "\ P,E \ a \so ws a" using \w < a\ w adal by(auto elim!: action_orderE sync_orderE) fix a' assume a': "a' \ write_actions E" "(ad, al) \ action_loc P E a'" { presume "E \ ws a \a a'" "E \ a' \a a" with mrw adal a' have "a' = ws a" unfolding w by cases(fastforce dest: antisymPD[OF antisym_action_order] read_actions_not_write_actions elim!: meta_allE[where x=a']) thus "a' = ws a" "a' = ws a" by - next assume "P,E \ ws a \hb a'" "P,E \ a' \hb a" thus "E \ ws a \a a'" "E \ a' \a a" using a' by(blast intro: happens_before_into_action_order)+ next assume "is_volatile P al" "P,E \ ws a \so a'" "P,E \ a' \so a" thus "E \ ws a \a a'" "E \ a' \a a" by(auto elim: sync_orderE) } qed qed(rule tsa_ok) qed subsection \Cut-and-update and sequentially consistent completion\ inductive foldl_list_all2 :: "('b \ 'c \ 'a \ 'a) \ ('b \ 'c \ 'a \ bool) \ ('b \ 'c \ 'a \ bool) \ 'b list \ 'c list \ 'a \ bool" for f and P and Q where "foldl_list_all2 f P Q [] [] s" | "\ Q x y s; P x y s \ foldl_list_all2 f P Q xs ys (f x y s) \ \ foldl_list_all2 f P Q (x # xs) (y # ys) s" inductive_simps foldl_list_all2_simps [simp]: "foldl_list_all2 f P Q [] ys s" "foldl_list_all2 f P Q xs [] s" "foldl_list_all2 f P Q (x # xs) (y # ys) s" inductive_simps foldl_list_all2_Cons1: "foldl_list_all2 f P Q (x # xs) ys s" inductive_simps foldl_list_all2_Cons2: "foldl_list_all2 f P Q xs (y # ys) s" definition eq_upto_seq_inconsist :: "'m prog \ ('addr, 'thread_id) obs_event action list \ ('addr, 'thread_id) obs_event action list \ ('addr \ addr_loc \ 'addr val \ bool) \ bool" where "eq_upto_seq_inconsist P = foldl_list_all2 (\ob ob' vs. mrw_value P vs ob) (\ob ob' vs. case ob of NormalAction (ReadMem ad al v) \ \b. vs (ad, al) = Some (v, b) | _ \ True) (\ob ob' vs. if (case ob of NormalAction (ReadMem ad al v) \ \b. vs (ad, al) = Some (v, b) | _ \ True) then ob = ob' else ob \ ob')" lemma eq_upto_seq_inconsist_simps: "eq_upto_seq_inconsist P [] obs' vs \ obs' = []" "eq_upto_seq_inconsist P obs [] vs \ obs = []" "eq_upto_seq_inconsist P (ob # obs) (ob' # obs') vs \ (case ob of NormalAction (ReadMem ad al v) \ if (\b. vs (ad, al) = \(v, b)\) then ob = ob' \ eq_upto_seq_inconsist P obs obs' (mrw_value P vs ob) else ob \ ob' | _ \ ob = ob' \ eq_upto_seq_inconsist P obs obs' (mrw_value P vs ob))" by(auto simp add: eq_upto_seq_inconsist_def split: action.split obs_event.split) lemma eq_upto_seq_inconsist_Cons1: "eq_upto_seq_inconsist P (ob # obs) obs' vs \ (\ob' obs''. obs' = ob' # obs'' \ (case ob of NormalAction (ReadMem ad al v) \ if (\b. vs (ad, al) = \(v, b)\) then ob' = ob \ eq_upto_seq_inconsist P obs obs'' (mrw_value P vs ob) else ob \ ob' | _ \ ob' = ob \ eq_upto_seq_inconsist P obs obs'' (mrw_value P vs ob)))" unfolding eq_upto_seq_inconsist_def by(auto split: obs_event.split action.split simp add: foldl_list_all2_Cons1) lemma eq_upto_seq_inconsist_appendD: assumes "eq_upto_seq_inconsist P (obs @ obs') obs'' vs" and "ta_seq_consist P vs (llist_of obs)" shows "length obs \ length obs''" (is ?thesis1) and "take (length obs) obs'' = obs" (is ?thesis2) and "eq_upto_seq_inconsist P obs' (drop (length obs) obs'') (mrw_values P vs obs)" (is ?thesis3) using assms by(induct obs arbitrary: obs'' vs)(auto split: action.split_asm obs_event.split_asm simp add: eq_upto_seq_inconsist_Cons1) lemma ta_seq_consist_imp_eq_upto_seq_inconsist_refl: "ta_seq_consist P vs (llist_of obs) \ eq_upto_seq_inconsist P obs obs vs" apply(induct obs arbitrary: vs) apply(auto simp add: eq_upto_seq_inconsist_simps split: action.split obs_event.split) done context notes split_paired_Ex [simp del] eq_upto_seq_inconsist_simps [simp] begin lemma eq_upto_seq_inconsist_appendI: "\ eq_upto_seq_inconsist P obs OBS vs; \ ta_seq_consist P vs (llist_of obs) \ \ eq_upto_seq_inconsist P obs' OBS' (mrw_values P vs OBS) \ \ eq_upto_seq_inconsist P (obs @ obs') (OBS @ OBS') vs" apply(induct obs arbitrary: vs OBS) apply simp apply(auto simp add: eq_upto_seq_inconsist_Cons1) apply(simp split: action.split obs_event.split) apply auto done lemma eq_upto_seq_inconsist_trans: "\ eq_upto_seq_inconsist P obs obs' vs; eq_upto_seq_inconsist P obs' obs'' vs \ \ eq_upto_seq_inconsist P obs obs'' vs" apply(induction obs arbitrary: obs' obs'' vs) apply(clarsimp simp add: eq_upto_seq_inconsist_Cons1)+ apply(auto split!: action.split obs_event.split if_split_asm) done lemma eq_upto_seq_inconsist_append2: "\ eq_upto_seq_inconsist P obs obs' vs; \ ta_seq_consist P vs (llist_of obs) \ \ eq_upto_seq_inconsist P obs (obs' @ obs'') vs" apply(induction obs arbitrary: obs' vs) apply(clarsimp simp add: eq_upto_seq_inconsist_Cons1)+ apply(auto split!: action.split obs_event.split if_split_asm) done end context executions_sc_hb begin lemma ta_seq_consist_mrwI: assumes E: "E \ \" and wf: "P \ (E, ws) \" and mrw: "\a. \ enat a < r; a \ read_actions E \ \ P,E \ a \mrw ws a" shows "ta_seq_consist P Map.empty (lmap snd (ltake r E))" proof(rule ta_seq_consist_nthI) fix i ad al v assume i_len: "enat i < llength (lmap snd (ltake r E))" and E_i: "lnth (lmap snd (ltake r E)) i = NormalAction (ReadMem ad al v)" and sc: "ta_seq_consist P Map.empty (ltake (enat i) (lmap snd (ltake r E)))" from i_len have "enat i < r" by simp with sc have "ta_seq_consist P Map.empty (ltake (enat i) (lmap snd E))" by(simp add: min_def split: if_split_asm) hence ns: "non_speculative P (\_. {}) (ltake (enat i) (lmap snd E))" by(rule ta_seq_consist_into_non_speculative) simp from i_len have "i \ actions E" by(simp add: actions_def) moreover from E_i i_len have obs_i: "action_obs E i = NormalAction (ReadMem ad al v)" by(simp add: action_obs_def lnth_ltake) ultimately have read: "i \ read_actions E" .. with i_len have mrw_i: "P,E \ i \mrw ws i" by(auto intro: mrw) with E have "ws i < i" using ns by(rule mrw_before) from mrw_i obs_i obtain adal_w: "(ad, al) \ action_loc P E (ws i)" and adal_r: "(ad, al) \ action_loc P E i" and "write": "ws i \ write_actions E" by cases auto from wf have "is_write_seen P E ws" by(rule wf_exec_is_write_seenD) from is_write_seenD[OF this read obs_i] have vw_v: "value_written P E (ws i) (ad, al) = v" by simp let ?vs = "mrw_values P Map.empty (map snd (list_of (ltake (enat (ws i)) E)))" from \ws i < i\ i_len have "enat (ws i) < llength (ltake (enat i) E)" by(simp add: less_trans[where y="enat i"]) hence "ltake (enat i) E = lappend (ltake (enat (ws i)) (ltake (enat i) E)) (LCons (lnth (ltake (enat i) E) (ws i)) (ldropn (Suc (ws i)) (ltake (enat i) E)))" by(simp only: ldropn_Suc_conv_ldropn lappend_ltake_enat_ldropn) also have "\ = lappend (ltake (enat (ws i)) E) (LCons (lnth E (ws i)) (ldropn (Suc (ws i)) (ltake (enat i) E)))" using \ws i < i\ i_len \enat (ws i) < llength (ltake (enat i) E)\ by(simp add: lnth_ltake)(simp add: min_def) finally have r_E: "ltake (enat i) E = \" . have "mrw_values P Map.empty (list_of (ltake (enat i) (lmap snd (ltake r E)))) (ad, al) = mrw_values P Map.empty (map snd (list_of (ltake (enat i) E))) (ad, al)" using \enat i < r\ by(auto simp add: min_def) also have "\ = mrw_values P (mrw_value P ?vs (snd (lnth E (ws i)))) (map snd (list_of (ldropn (Suc (ws i)) (ltake (enat i) E)))) (ad, al)" by(subst r_E)(simp add: list_of_lappend) also have "\ = mrw_value P ?vs (snd (lnth E (ws i))) (ad, al)" proof(rule mrw_values_no_write_unchanged) fix wa assume wa: "wa \ set (map snd (list_of (ldropn (Suc (ws i)) (ltake (enat i) E))))" and "is_write_action wa" "(ad, al) \ action_loc_aux P wa" from wa obtain w where "w < length (map snd (list_of (ldropn (Suc (ws i)) (ltake (enat i) E))))" and "map snd (list_of (ldropn (Suc (ws i)) (ltake (enat i) E))) ! w = wa" unfolding in_set_conv_nth by blast moreover hence "Suc (ws i + w) < i" (is "?w < _") using i_len by(cases "llength E")(simp_all add: length_list_of_conv_the_enat) ultimately have obs_w': "action_obs E ?w = wa" using i_len by(simp add: action_obs_def lnth_ltake less_trans[where y="enat i"] ac_simps) from \?w < i\ i_len have "?w \ actions E" by(simp add: actions_def less_trans[where y="enat i"]) with \is_write_action wa\ obs_w' \(ad, al) \ action_loc_aux P wa\ have write': "?w \ write_actions E" and adal': "(ad, al) \ action_loc P E ?w" by(auto intro: write_actions.intros) from \?w < i\ \i \ read_actions E\ \?w \ actions E\ have "E \ ?w \a i" by(auto simp add: action_order_def elim: read_actions.cases) from mrw_i adal_r write' adal' have "E \ ?w \a ws i \ E \ i \a ?w" by(rule most_recent_write_recent) hence "E \ ?w \a ws i" proof assume "E \ i \a ?w" with \E \ ?w \a i\ have "?w = i" by(rule antisymPD[OF antisym_action_order]) with write' read have False by(auto dest: read_actions_not_write_actions) thus ?thesis .. qed from adal_w "write" have "mrw_value P ?vs (snd (lnth E (ws i))) (ad, al) \ None" by(cases "snd (lnth E (ws i))" rule: mrw_value_cases) (auto simp add: action_obs_def split: if_split_asm elim: write_actions.cases) then obtain b v where vb: "mrw_value P ?vs (snd (lnth E (ws i))) (ad, al) = Some (v, b)" by auto moreover from \E \ ?w \a ws i\ obs_w' have "is_new_action wa" "\ is_new_action (action_obs E (ws i))" by(auto elim!: action_orderE) from \\ is_new_action (action_obs E (ws i))\ "write" adal_w obtain v' where "action_obs E (ws i) = NormalAction (WriteMem ad al v')" by(auto elim!: write_actions.cases is_write_action.cases) with vb have b by(simp add: action_obs_def) with \is_new_action wa\ vb show "case mrw_value P ?vs (snd (lnth E (ws i))) (ad, al) of None \ False | \(v, b)\ \ b \ is_new_action wa" by simp qed also { fix v assume "?vs (ad, al) = Some (v, True)" and "is_new_action (action_obs E (ws i))" from mrw_values_eq_SomeD[OF this(1)] obtain wa where "wa \ set (map snd (list_of (ltake (enat (ws i)) E)))" and "is_write_action wa" and "(ad, al) \ action_loc_aux P wa" and "\ is_new_action wa" by(fastforce simp del: set_map) moreover then obtain w where w: "w < ws i" and wa: "wa = snd (lnth E w)" unfolding in_set_conv_nth by(cases "llength E")(auto simp add: lnth_ltake length_list_of_conv_the_enat) ultimately have "w \ write_actions E" "action_obs E w = wa" "(ad, al) \ action_loc P E w" using \ws i \ write_actions E\ by(auto intro!: write_actions.intros simp add: actions_def less_trans[where y="enat (ws i)"] action_obs_def elim!: write_actions.cases) with mrw_i adal_r have "E \ w \a ws i \ E \ i \a w" by -(rule most_recent_write_recent) hence False proof assume "E \ w \a ws i" moreover from \\ is_new_action wa\ \is_new_action (action_obs E (ws i))\ "write" w wa \w \ write_actions E\ have "E \ ws i \a w" by(auto simp add: action_order_def action_obs_def) ultimately have "w = ws i" by(rule antisymPD[OF antisym_action_order]) with \w < ws i\ show False by simp next assume "E \ i \a w" moreover from \w \ write_actions E\ \w < ws i\ \ws i < i\ read have "E \ w \a i" by(auto simp add: action_order_def elim: read_actions.cases) ultimately have "i = w" by(rule antisymPD[OF antisym_action_order]) with \w < ws i\ \ws i < i\ show False by simp qed } then obtain b where "\ = Some (v, b)" using vw_v "write" adal_w apply(atomize_elim) apply(auto simp add: action_obs_def value_written_def write_actions_iff) apply(erule is_write_action.cases) apply auto - apply blast+ done finally show "\b. mrw_values P Map.empty (list_of (ltake (enat i) (lmap snd (ltake r E)))) (ad, al) = \(v, b)\" by blast qed end context jmm_multithreaded begin definition complete_sc :: "('l,'thread_id,'x,'m,'w) state \ ('addr \ addr_loc \ 'addr val \ bool) \ ('thread_id \ ('l, 'thread_id, 'x, 'm, 'w, ('addr, 'thread_id) obs_event action) thread_action) llist" where "complete_sc s vs = unfold_llist (\(s, vs). \t ta s'. \ s -t\ta\ s') (\(s, vs). fst (SOME ((t, ta), s'). s -t\ta\ s' \ ta_seq_consist P vs (llist_of \ta\\<^bsub>o\<^esub>))) (\(s, vs). let ((t, ta), s') = SOME ((t, ta), s'). s -t\ta\ s' \ ta_seq_consist P vs (llist_of \ta\\<^bsub>o\<^esub>) in (s', mrw_values P vs \ta\\<^bsub>o\<^esub>)) (s, vs)" definition sc_completion :: "('l, 'thread_id, 'x, 'm, 'w) state \ ('addr \ addr_loc \ 'addr val \ bool) \ bool" where "sc_completion s vs \ (\ttas s' t x ta x' m'. s -\ttas\* s' \ ta_seq_consist P vs (llist_of (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) \ thr s' t = \(x, no_wait_locks)\ \ t \ (x, shr s') -ta\ (x', m') \ actions_ok s' t ta \ (\ta' x'' m''. t \ (x, shr s') -ta'\ (x'', m'') \ actions_ok s' t ta' \ ta_seq_consist P (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) (llist_of \ta'\\<^bsub>o\<^esub>)))" lemma sc_completionD: "\ sc_completion s vs; s -\ttas\* s'; ta_seq_consist P vs (llist_of (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))); thr s' t = \(x, no_wait_locks)\; t \ (x, shr s') -ta\ (x', m'); actions_ok s' t ta \ \ \ta' x'' m''. t \ (x, shr s') -ta'\ (x'', m'') \ actions_ok s' t ta' \ ta_seq_consist P (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) (llist_of \ta'\\<^bsub>o\<^esub>)" unfolding sc_completion_def by blast lemma sc_completionI: "(\ttas s' t x ta x' m'. \ s -\ttas\* s'; ta_seq_consist P vs (llist_of (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))); thr s' t = \(x, no_wait_locks)\; t \ (x, shr s') -ta\ (x', m'); actions_ok s' t ta \ \ \ta' x'' m''. t \ (x, shr s') -ta'\ (x'', m'') \ actions_ok s' t ta' \ ta_seq_consist P (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) (llist_of \ta'\\<^bsub>o\<^esub>)) \ sc_completion s vs" unfolding sc_completion_def by blast lemma sc_completion_shift: assumes sc_c: "sc_completion s vs" and \Red: "s -\ttas\* s'" and sc: "ta_seq_consist P vs (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) (llist_of ttas)))" shows "sc_completion s' (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas)))" proof(rule sc_completionI) fix ttas' s'' t x ta x' m' assume \Red': "s' -\ttas'\* s''" and sc': "ta_seq_consist P (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) (llist_of (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas')))" and red: "thr s'' t = \(x, no_wait_locks)\" "t \ \x, shr s''\ -ta\ \x', m'\" "actions_ok s'' t ta" from \Red \Red' have "s -\ttas @ ttas'\* s''" unfolding RedT_def by(rule rtrancl3p_trans) moreover from sc sc' have "ta_seq_consist P vs (llist_of (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) (ttas @ ttas'))))" apply(simp add: lappend_llist_of_llist_of[symmetric] ta_seq_consist_lappend del: lappend_llist_of_llist_of) apply(simp add: lconcat_llist_of[symmetric] lmap_llist_of[symmetric] llist.map_comp o_def split_def del: lmap_llist_of) done ultimately show "\ta' x'' m''. t \ \x, shr s''\ -ta'\ \x'', m''\ \ actions_ok s'' t ta' \ ta_seq_consist P (mrw_values P (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas'))) (llist_of \ta'\\<^bsub>o\<^esub>)" using red unfolding foldl_append[symmetric] concat_append[symmetric] map_append[symmetric] by(rule sc_completionD[OF sc_c]) qed lemma complete_sc_in_Runs: assumes cau: "sc_completion s vs" and ta_seq_consist_convert_RA: "\vs ln. ta_seq_consist P vs (llist_of (convert_RA ln))" shows "mthr.Runs s (complete_sc s vs)" proof - let ?ttas' = "\ttas' :: ('thread_id \ ('l,'thread_id,'x,'m,'w, ('addr, 'thread_id) obs_event action) thread_action) list. concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas')" let "?vs ttas'" = "mrw_values P vs (?ttas' ttas')" define s' vs' and ttas :: "('thread_id \ ('l,'thread_id,'x,'m,'w, ('addr, 'thread_id) obs_event action) thread_action) list" where "s' = s" and "vs' = vs" and "ttas = []" hence "s -\ttas\* s'" "ta_seq_consist P vs (llist_of (?ttas' ttas))" by auto hence "mthr.Runs s' (complete_sc s' (?vs ttas))" proof(coinduction arbitrary: s' ttas rule: mthr.Runs.coinduct) case (Runs s' ttas') note Red = \s -\ttas'\* s'\ and sc = \ta_seq_consist P vs (llist_of (?ttas' ttas'))\ show ?case proof(cases "\t' ta' s''. s' -t'\ta'\ s''") case False hence ?Stuck by(simp add: complete_sc_def) thus ?thesis .. next case True let ?proceed = "\((t', ta'), s''). s' -t'\ta'\ s'' \ ta_seq_consist P (?vs ttas') (llist_of \ta'\\<^bsub>o\<^esub>)" from True obtain t' ta' s'' where red: "s' -t'\ta'\ s''" by(auto) then obtain ta'' s''' where "s' -t'\ta''\ s'''" and "ta_seq_consist P (?vs ttas') (llist_of \ta''\\<^bsub>o\<^esub>)" proof(cases) case (redT_normal x x' m') note red = \t' \ \x, shr s'\ -ta'\ \x', m'\\ and ts''t' = \thr s' t' = \(x, no_wait_locks)\\ and aok = \actions_ok s' t' ta'\ and s'' = \redT_upd s' t' ta' x' m' s''\ from sc_completionD[OF cau Red sc ts''t' red aok] obtain ta'' x'' m'' where red': "t' \ \x, shr s'\ -ta''\ \x'', m''\" and aok': "actions_ok s' t' ta''" and sc': "ta_seq_consist P (?vs ttas') (llist_of \ta''\\<^bsub>o\<^esub>)" by blast from redT_updWs_total obtain ws' where "redT_updWs t' (wset s') \ta''\\<^bsub>w\<^esub> ws'" .. then obtain s''' where "redT_upd s' t' ta'' x'' m'' s'''" by fastforce with red' ts''t' aok' have "s' -t'\ta''\ s'''" .. thus thesis using sc' by(rule that) next case redT_acquire thus thesis by(simp add: that[OF red] ta_seq_consist_convert_RA) qed hence "?proceed ((t', ta''), s''')" using Red by(auto) hence *: "?proceed (Eps ?proceed)" by(rule someI) moreover from Red * have "s -\ttas' @ [fst (Eps ?proceed)]\* snd (Eps ?proceed)" by(auto simp add: split_beta RedT_def intro: rtrancl3p_step) moreover from True have "complete_sc s' (?vs ttas') = LCons (fst (Eps ?proceed)) (complete_sc (snd (Eps ?proceed)) (?vs (ttas' @ [fst (Eps ?proceed)])))" unfolding complete_sc_def by(simp add: split_def) moreover from sc \?proceed (Eps ?proceed)\ have "ta_seq_consist P vs (llist_of (?ttas' (ttas' @ [fst (Eps ?proceed)])))" unfolding map_append concat_append lappend_llist_of_llist_of[symmetric] by(subst ta_seq_consist_lappend)(auto simp add: split_def) ultimately have ?Step by(fastforce intro: exI[where x="ttas' @ [fst (Eps ?proceed)]"] simp del: split_paired_Ex) thus ?thesis by simp qed qed thus ?thesis by(simp add: s'_def ttas_def) qed lemma complete_sc_ta_seq_consist: assumes cau: "sc_completion s vs" and ta_seq_consist_convert_RA: "\vs ln. ta_seq_consist P vs (llist_of (convert_RA ln))" shows "ta_seq_consist P vs (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) (complete_sc s vs)))" proof - define vs' where "vs' = vs" let ?obs = "\ttas. lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) ttas)" define obs where "obs = ?obs (complete_sc s vs)" define a where "a = complete_sc s vs'" let ?ttas' = "\ttas' :: ('thread_id \ ('l,'thread_id,'x,'m,'w,('addr, 'thread_id) obs_event action) thread_action) list. concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas')" let ?vs = "\ttas'. mrw_values P vs (?ttas' ttas')" from vs'_def obs_def have "s -\[]\* s" "ta_seq_consist P vs (llist_of (?ttas' []))" "vs' = ?vs []" by(auto) hence "\s' ttas'. obs = ?obs (complete_sc s' vs') \ s -\ttas'\* s' \ ta_seq_consist P vs (llist_of (?ttas' ttas')) \ vs' = ?vs ttas' \ a = complete_sc s' vs'" unfolding obs_def vs'_def a_def by metis moreover have "wf (inv_image {(m, n). m < n} (llength \ ltakeWhile (\(t, ta). \ta\\<^bsub>o\<^esub> = [])))" (is "wf ?R") by(rule wf_inv_image)(rule wellorder_class.wf) ultimately show "ta_seq_consist P vs' obs" proof(coinduct vs' obs a rule: ta_seq_consist_coinduct_append_wf) case (ta_seq_consist vs' obs a) then obtain s' ttas' where obs_def: "obs = ?obs (complete_sc s' (?vs ttas'))" and Red: "s -\ttas'\* s'" and sc: "ta_seq_consist P vs (llist_of (?ttas' ttas'))" and vs'_def: "vs' = ?vs ttas'" and a_def: "a = complete_sc s' vs'" by blast show ?case proof(cases "\t' ta' s''. s' -t'\ta'\ s''") case False hence "obs = LNil" unfolding obs_def complete_sc_def by simp hence ?LNil unfolding obs_def by auto thus ?thesis .. next case True let ?proceed = "\((t', ta'), s''). s' -t'\ta'\ s'' \ ta_seq_consist P (?vs ttas') (llist_of \ta'\\<^bsub>o\<^esub>)" let ?tta = "fst (Eps ?proceed)" let ?s' = "snd (Eps ?proceed)" from True obtain t' ta' s'' where red: "s' -t'\ta'\ s''" by blast then obtain ta'' s''' where "s' -t'\ta''\ s'''" and "ta_seq_consist P (?vs ttas') (llist_of \ta''\\<^bsub>o\<^esub>)" proof(cases) case (redT_normal x x' m') note red = \t' \ \x, shr s'\ -ta'\ \x', m'\\ and ts''t' = \thr s' t' = \(x, no_wait_locks)\\ and aok = \actions_ok s' t' ta'\ and s''' = \redT_upd s' t' ta' x' m' s''\ from sc_completionD[OF cau Red sc ts''t' red aok] obtain ta'' x'' m'' where red': "t' \ \x, shr s'\ -ta''\ \x'', m''\" and aok': "actions_ok s' t' ta''" and sc': "ta_seq_consist P (?vs ttas') (llist_of \ta''\\<^bsub>o\<^esub>)" by blast from redT_updWs_total obtain ws' where "redT_updWs t' (wset s') \ta''\\<^bsub>w\<^esub> ws'" .. then obtain s''' where "redT_upd s' t' ta'' x'' m'' s'''" by fastforce with red' ts''t' aok' have "s' -t'\ta''\ s'''" .. thus thesis using sc' by(rule that) next case redT_acquire thus thesis by(simp add: that[OF red] ta_seq_consist_convert_RA) qed hence "?proceed ((t', ta''), s''')" by auto hence "?proceed (Eps ?proceed)" by(rule someI) show ?thesis proof(cases "obs = LNil") case True thus ?thesis .. next case False from True have csc_unfold: "complete_sc s' (?vs ttas') = LCons ?tta (complete_sc ?s' (?vs (ttas' @ [?tta])))" unfolding complete_sc_def by(simp add: split_def) hence "obs = lappend (llist_of \snd ?tta\\<^bsub>o\<^esub>) (?obs (complete_sc ?s' (?vs (ttas' @ [?tta]))))" using obs_def by(simp add: split_beta) moreover have "ta_seq_consist P vs' (llist_of \snd ?tta\\<^bsub>o\<^esub>)" using \?proceed (Eps ?proceed)\ vs'_def by(clarsimp simp add: split_beta) moreover { assume "llist_of \snd ?tta\\<^bsub>o\<^esub> = LNil" moreover from obs_def \obs \ LNil\ have "lfinite (ltakeWhile (\(t, ta). \ta\\<^bsub>o\<^esub> = []) (complete_sc s' (?vs ttas')))" unfolding lfinite_ltakeWhile by(fastforce simp add: split_def lconcat_eq_LNil) ultimately have "(complete_sc ?s' (?vs (ttas' @ [?tta])), a) \ ?R" unfolding a_def vs'_def csc_unfold by(clarsimp simp add: split_def llist_of_eq_LNil_conv)(auto simp add: lfinite_eq_range_llist_of) } moreover have "?obs (complete_sc ?s' (?vs (ttas' @ [?tta]))) = ?obs (complete_sc ?s' (mrw_values P vs' (list_of (llist_of \snd ?tta\\<^bsub>o\<^esub>))))" unfolding vs'_def by(simp add: split_def) moreover from \?proceed (Eps ?proceed)\ Red have "s -\ttas' @ [?tta]\* ?s'" by(auto simp add: RedT_def split_def intro: rtrancl3p_step) moreover from sc \?proceed (Eps ?proceed)\ have "ta_seq_consist P vs (llist_of (?ttas' (ttas' @ [?tta])))" by(clarsimp simp add: split_def ta_seq_consist_lappend lappend_llist_of_llist_of[symmetric] simp del: lappend_llist_of_llist_of) moreover have "mrw_values P vs' (list_of (llist_of \snd ?tta\\<^bsub>o\<^esub>)) = ?vs (ttas' @ [?tta])" unfolding vs'_def by(simp add: split_def) moreover have "complete_sc ?s' (?vs (ttas' @ [?tta])) = complete_sc ?s' (mrw_values P vs' (list_of (llist_of \snd ?tta\\<^bsub>o\<^esub>)))" unfolding vs'_def by(simp add: split_def) ultimately have "?lappend" by blast thus ?thesis .. qed qed qed qed lemma sequential_completion_Runs: assumes "sc_completion s vs" and "\vs ln. ta_seq_consist P vs (llist_of (convert_RA ln))" shows "\ttas. mthr.Runs s ttas \ ta_seq_consist P vs (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) ttas))" using complete_sc_ta_seq_consist[OF assms] complete_sc_in_Runs[OF assms] by blast definition cut_and_update :: "('l, 'thread_id, 'x, 'm, 'w) state \ ('addr \ addr_loc \ 'addr val \ bool) \ bool" where "cut_and_update s vs \ (\ttas s' t x ta x' m'. s -\ttas\* s' \ ta_seq_consist P vs (llist_of (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) \ thr s' t = \(x, no_wait_locks)\ \ t \ (x, shr s') -ta\ (x', m') \ actions_ok s' t ta \ (\ta' x'' m''. t \ (x, shr s') -ta'\ (x'', m'') \ actions_ok s' t ta' \ ta_seq_consist P (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) (llist_of \ta'\\<^bsub>o\<^esub>) \ eq_upto_seq_inconsist P \ta\\<^bsub>o\<^esub> \ta'\\<^bsub>o\<^esub> (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas)))))" lemma cut_and_updateI[intro?]: "(\ttas s' t x ta x' m'. \ s -\ttas\* s'; ta_seq_consist P vs (llist_of (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))); thr s' t = \(x, no_wait_locks)\; t \ (x, shr s') -ta\ (x', m'); actions_ok s' t ta \ \ \ta' x'' m''. t \ (x, shr s') -ta'\ (x'', m'') \ actions_ok s' t ta' \ ta_seq_consist P (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) (llist_of \ta'\\<^bsub>o\<^esub>) \ eq_upto_seq_inconsist P \ta\\<^bsub>o\<^esub> \ta'\\<^bsub>o\<^esub> (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas)))) \ cut_and_update s vs" unfolding cut_and_update_def by blast lemma cut_and_updateD: "\ cut_and_update s vs; s -\ttas\* s'; ta_seq_consist P vs (llist_of (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))); thr s' t = \(x, no_wait_locks)\; t \ (x, shr s') -ta\ (x', m'); actions_ok s' t ta \ \ \ta' x'' m''. t \ (x, shr s') -ta'\ (x'', m'') \ actions_ok s' t ta' \ ta_seq_consist P (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas))) (llist_of \ta'\\<^bsub>o\<^esub>) \ eq_upto_seq_inconsist P \ta\\<^bsub>o\<^esub> \ta'\\<^bsub>o\<^esub> (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas)))" unfolding cut_and_update_def by blast lemma cut_and_update_imp_sc_completion: "cut_and_update s vs \ sc_completion s vs" apply(rule sc_completionI) apply(drule (5) cut_and_updateD) apply blast done lemma sequential_completion: assumes cut_and_update: "cut_and_update s vs" and ta_seq_consist_convert_RA: "\vs ln. ta_seq_consist P vs (llist_of (convert_RA ln))" and Red: "s -\ttas\* s'" and sc: "ta_seq_consist P vs (llist_of (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas)))" and red: "s' -t\ta\ s''" shows "\ta' ttas'. mthr.Runs s' (LCons (t, ta') ttas') \ ta_seq_consist P vs (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) (lappend (llist_of ttas) (LCons (t, ta') ttas')))) \ eq_upto_seq_inconsist P \ta\\<^bsub>o\<^esub> \ta'\\<^bsub>o\<^esub> (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas)))" proof - from red obtain ta' s''' where red': "redT s' (t, ta') s'''" and sc': "ta_seq_consist P vs (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) (lappend (llist_of ttas) (LCons (t, ta') LNil))))" and eq: "eq_upto_seq_inconsist P \ta\\<^bsub>o\<^esub> \ta'\\<^bsub>o\<^esub> (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas)))" proof cases case (redT_normal x x' m') note ts't = \thr s' t = \(x, no_wait_locks)\\ and red = \t \ \x, shr s'\ -ta\ \x', m'\\ and aok = \actions_ok s' t ta\ and s'' = \redT_upd s' t ta x' m' s''\ from cut_and_updateD[OF cut_and_update, OF Red sc ts't red aok] obtain ta' x'' m'' where red: "t \ \x, shr s'\ -ta'\ \x'', m''\" and sc': "ta_seq_consist P (mrw_values P vs (concat (map (\(t, y). \y\\<^bsub>o\<^esub>) ttas))) (llist_of \ta'\\<^bsub>o\<^esub>)" and eq: "eq_upto_seq_inconsist P \ta\\<^bsub>o\<^esub> \ta'\\<^bsub>o\<^esub> (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas)))" and aok: "actions_ok s' t ta'" by blast obtain ws''' where "redT_updWs t (wset s') \ta'\\<^bsub>w\<^esub> ws'''" using redT_updWs_total .. then obtain s''' where s''': "redT_upd s' t ta' x'' m'' s'''" by fastforce with red \thr s' t = \(x, no_wait_locks)\\ aok have "s' -t\ta'\ s'''" by(rule redT.redT_normal) moreover from sc sc' have "ta_seq_consist P vs (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) (lappend (llist_of ttas) (LCons (t, ta') LNil))))" by(auto simp add: lmap_lappend_distrib ta_seq_consist_lappend split_def lconcat_llist_of[symmetric] o_def list_of_lconcat) ultimately show thesis using eq by(rule that) next case (redT_acquire x ln n) hence "ta_seq_consist P vs (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) (lappend (llist_of ttas) (LCons (t, ta) LNil))))" and "eq_upto_seq_inconsist P \ta\\<^bsub>o\<^esub> \ta\\<^bsub>o\<^esub> (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) ttas)))" using sc by(simp_all add: lmap_lappend_distrib ta_seq_consist_lappend split_def lconcat_llist_of[symmetric] o_def list_of_lconcat ta_seq_consist_convert_RA ta_seq_consist_imp_eq_upto_seq_inconsist_refl) with red show thesis by(rule that) qed txt \Now, find a sequentially consistent completion from @{term "s'''"} onwards.\ from Red red' have Red': "s -\ttas @ [(t, ta')]\* s'''" unfolding RedT_def by(auto intro: rtrancl3p_step) from sc sc' have "ta_seq_consist P vs (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) (llist_of (ttas @ [(t, ta')]))))" by(simp add: o_def split_def lappend_llist_of_llist_of[symmetric]) with cut_and_update_imp_sc_completion[OF cut_and_update] Red' have "sc_completion s''' (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) (ttas @ [(t, ta')]))))" by(rule sc_completion_shift) from sequential_completion_Runs[OF this ta_seq_consist_convert_RA] obtain ttas' where \Runs: "mthr.Runs s''' ttas'" and sc'': "ta_seq_consist P (mrw_values P vs (concat (map (\(t, ta). \ta\\<^bsub>o\<^esub>) (ttas @ [(t, ta')])))) (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) ttas'))" by blast from red' \Runs have "mthr.Runs s' (LCons (t, ta') ttas')" .. moreover from sc sc' sc'' have "ta_seq_consist P vs (lconcat (lmap (\(t, ta). llist_of \ta\\<^bsub>o\<^esub>) (lappend (llist_of ttas) (LCons (t, ta') ttas'))))" unfolding lmap_lappend_distrib lconcat_lappend by(simp add: o_def ta_seq_consist_lappend split_def list_of_lconcat) ultimately show ?thesis using eq by blast qed end end diff --git a/thys/LightweightJava/Lightweight_Java_Equivalence.thy b/thys/LightweightJava/Lightweight_Java_Equivalence.thy --- a/thys/LightweightJava/Lightweight_Java_Equivalence.thy +++ b/thys/LightweightJava/Lightweight_Java_Equivalence.thy @@ -1,893 +1,893 @@ (* Title: Lightweight Java, the equivalence functions Authors: Rok Strnisa , 2006 Matthew Parkinson , 2006 Maintainer: *) theory Lightweight_Java_Equivalence imports Lightweight_Java_Definition begin (* BEGIN: HELPER FUNCTIONS *) lemma map_id[simp]: "map id list = list" by (induct list) auto lemma id_map_two[simp]: "map (\(x,y). (x,y)) list = list" by (induct list) auto lemma id_image_two[simp]: "(\(x,y). (x,y)) ` set list = set list" by (induct list) auto lemma map_fst[simp]: "map (\(x, y). x) list = map fst list" by (induct list) auto lemma map_snd[simp]: "map (\(x, y). y) list = map snd list" by (induct list) auto lemma zip_map_map_two [simp]: "zip (map fst list) (map snd list) = list" by (induct list) auto lemma concat_map_singlton [simp]: "concat (map (\e. [e]) list) = list" by (induct list) simp_all lemma list_all_map_P [simp]: "list_all (\b. b) (map (\x. P x) list) = (\x \ set list. P x)" by (induct list) simp_all lemma dom_single[simp]: "a : dom (Map.empty(k \ v)) = (a = k)" by (simp add: dom_def) lemma predicted_lu[rule_format, simp]: "x \ set list \ map_of (map (\key. (key, value)) list) x = Some value" by (induct list) auto lemma key_in_map1[simp]: "k \ dom M' \ (M ++ M') k = M k" apply (subgoal_tac "M' k = None") apply (simp add: map_add_def, force simp add: domI) done lemma forall_cons [rule_format]: "(\x \ set (s#S). P x) \ y \ set S \ P y" by (induct_tac S) simp_all lemma mem_cong[rule_format]: "x \ set list \ (f x \ set (map f list))" by (induct list) auto lemma forall_union: "\\a \ dom A. P (A a); \b \ dom B. P (B b)\ \ \x \ dom A \ dom B. P ((B ++ A) x)" apply(safe) apply(force) apply(drule_tac x = x in bspec, simp add: domI) apply(case_tac "A x = None") apply(force simp add: map_add_def) by (force) (* END: HELPER FUNCTIONS *) definition class_name_f :: "cld \ dcl" where "class_name_f cld = (case cld of cld_def dcl cl fds mds \ dcl)" lemma [simp]: "(class_name cld dcl) = (class_name_f cld = dcl)" by (force simp add: class_name_f_def split: cld.splits intro: class_nameI elim: class_name.cases) definition superclass_name_f :: "cld \ cl" where "superclass_name_f cld = (case cld of cld_def dcl cl fds mds \ cl)" lemma [simp]: "(superclass_name cld cl) = (superclass_name_f cld = cl)" by (force simp add: superclass_name_f_def split: cld.splits intro: superclass_nameI elim: superclass_name.cases) definition class_fields_f :: "cld \ fds" where "class_fields_f cld = (case cld of cld_def dcl cl fds mds \ fds)" lemma [simp]: "(class_fields cld fds) = (class_fields_f cld = fds)" by (force simp add: class_fields_f_def split: cld.splits intro: class_fieldsI elim: class_fields.cases) definition class_methods_f :: "cld \ meth_defs" where "class_methods_f cld = (case cld of cld_def dcl cl fds mds \ mds)" lemma [simp]: "(class_methods cld fds) = (class_methods_f cld = fds)" by (force simp add: class_methods_f_def split: cld.splits intro: class_methodsI elim: class_methods.cases) definition method_name_f :: "meth_def \ meth" where "method_name_f md = (case md of meth_def_def meth_sig meth_body \ (case meth_sig of meth_sig_def cl meth vds \ meth))" lemma [simp]: "(method_name md m) = (method_name_f md = m)" by (force simp add: method_name_f_def split: meth_def.splits meth_sig.splits intro: method_nameI elim: method_name.cases) definition distinct_names_f :: "P \ bool" where "distinct_names_f P = (distinct (map class_name_f P))" lemma distinct_names_map[rule_format]: "(\x\set cld_dcl_list. case_prod (\cld. (=) (class_name_f cld)) x) \ distinct (map snd cld_dcl_list) \ distinct_names_f (map fst cld_dcl_list)" apply(induct cld_dcl_list) apply(clarsimp simp add: distinct_names_f_def)+ apply(force) done lemma [simp]: "(distinct_names P) = (distinct_names_f P)" apply(rule) apply(erule distinct_names.cases) apply(clarsimp simp add: distinct_names_map) apply(simp add: distinct_names_f_def) apply(rule_tac cld_dcl_list = "map (\cld. (cld, class_name_f cld)) P" in dn_defI) apply(simp) apply(induct P) apply(simp) apply(simp) apply(clarsimp) apply(clarsimp) apply(induct P) apply(simp) apply(force) done primrec find_cld_f :: "P \ ctx \ fqn \ ctxcld_opt" where "find_cld_f [] ctx fqn = None" | "find_cld_f (cld#clds) ctx fqn = (case cld of cld_def dcl cl fds mds \ (case fqn of fqn_def dcl' \ (if dcl = dcl' then Some (ctx, cld) else find_cld_f clds ctx fqn)))" lemma [simp]: "(find_cld P ctx fqn ctxcld_opt) = (find_cld_f P ctx fqn = ctxcld_opt)" apply(rule) apply(induct rule: find_cld.induct) apply(simp+) apply(clarsimp) apply(induct P) apply(simp add: fc_emptyI) apply(case_tac fqn) apply(rename_tac cld clds dcl) apply(case_tac cld) apply(clarsimp) apply(rename_tac dcl' cl' vds' mds') apply(rule) apply(clarsimp) apply(rule fc_cons_trueI) apply(simp) apply(force) apply(force intro: fc_cons_falseI[simplified]) done lemma find_to_mem[rule_format]: "find_cld_f P ctx fqn = Some (ctx', cld) \ cld : set P" apply(induct P) by (clarsimp split: cld.splits fqn.splits)+ lemma find_cld_name_eq[rule_format]: "\ctxcld. find_cld_f P ctx (fqn_def dcl) = Some ctxcld \ (\cl fds mds. (ctx, cld_def dcl cl fds mds) = ctxcld)" apply(induct P) apply(simp) apply(clarsimp split: cld.splits fqn.splits) done primrec find_type_f :: "P \ ctx \ cl \ ty_opt" where "find_type_f P ctx cl_object = Some ty_top" | "find_type_f P ctx (cl_fqn fqn) = (case fqn of fqn_def dcl \ (case find_cld_f P ctx fqn of None \ None | Some (ctx', cld) \ Some (ty_def ctx' dcl)))" lemma [simp]: "(find_type P ctx cl ty_opt) = (find_type_f P ctx cl = ty_opt)" apply(rule) apply(force elim: find_type.cases split: fqn.splits) apply(case_tac cl) apply(force intro: ft_objI) apply(rename_tac fqn) apply(case_tac fqn) apply(clarsimp) apply(split option.splits) apply(rule) apply(force intro: ft_nullI) by (force intro: ft_dclI) lemma mem_remove: "cld : set P \ length (remove1 cld P) < length P" apply(induct P) by(simp, force split: if_split_asm) lemma finite_program[rule_format, intro]: "\P cld. (\ctx ctx' fqn. find_cld_f P ctx fqn = Some (ctx', cld)) \ length (remove1 cld P) < length P" apply(clarsimp) apply(drule find_to_mem) by (simp add: mem_remove) lemma path_length_eq[rule_format]: "path_length P ctx cl nn \ \nn'. path_length P ctx cl nn' \ nn = nn'" apply(erule path_length.induct) apply(clarsimp) apply(erule path_length.cases) apply(simp) apply(simp) apply(rule) apply(rule) apply(erule_tac ?a4.0 = nn' in path_length.cases) apply(clarify) apply(clarsimp) done lemma fpr_termination[iff]: "\P cld ctx ctx' fqn. find_cld_f P ctx fqn = Some (ctx', cld) \ acyclic_clds P \ The (path_length P ctx' (superclass_name_f cld)) < The (path_length P ctx (cl_fqn fqn))" apply(clarsimp) apply(erule acyclic_clds.cases) apply(clarsimp) apply(rename_tac P) apply(erule_tac x = ctx in allE) apply(erule_tac x = fqn in allE) apply(clarsimp) apply(rule theI2) apply(simp) apply(simp add: path_length_eq) apply(erule path_length.cases) apply(simp) apply(clarsimp) apply(rule theI2) apply(simp) apply(simp add: path_length_eq) apply(drule_tac path_length_eq, simp) apply(erule path_length.cases) apply(simp) apply(clarsimp) apply(drule_tac path_length_eq, simp) apply(simp) done function find_path_rec_f :: "P \ ctx \ cl \ ctxclds \ ctxclds_opt" where "find_path_rec_f P ctx cl_object path = Some path" | "find_path_rec_f P ctx (cl_fqn fqn) path = (if ~(acyclic_clds P) then None else (case find_cld_f P ctx fqn of None \ None | Some (ctx', cld) \ find_path_rec_f P ctx' (superclass_name_f cld) (path @ [(ctx',cld)])))" by pat_completeness auto termination by (relation "measure (\(P, ctx, cl, path). (THE nn. path_length P ctx cl nn))") auto lemma [simp]: "(find_path_rec P ctx cl path path_opt) = (find_path_rec_f P ctx cl path = path_opt)" apply(rule) apply(erule find_path_rec.induct) apply(simp)+ apply(induct rule: find_path_rec_f.induct) apply(clarsimp simp add: fpr_objI) apply(clarsimp) apply(rule) apply(simp add: fpr_nullI) apply(clarsimp split: option.splits) apply(rule fpr_nullI) apply(simp add: fpr_nullI) apply(rule fpr_fqnI) apply(force)+ done definition find_path_f :: "P \ ctx \ cl \ ctxclds_opt" where "find_path_f P ctx cl = find_path_rec_f P ctx cl []" lemma [simp]: "(find_path P ctx cl path_opt) = (find_path_f P ctx cl = path_opt)" apply(rule) apply(erule find_path.cases) apply(unfold find_path_f_def) apply(simp) apply(simp add: fp_defI) done primrec find_path_ty_f :: "P \ ty \ ctxclds_opt" where "find_path_ty_f P ty_top = Some []" | "find_path_ty_f P (ty_def ctx dcl) = find_path_f P ctx (cl_fqn (fqn_def dcl))" lemma [simp]: "(find_path_ty P ty ctxclds_opt) = (find_path_ty_f P ty = ctxclds_opt)" apply(rule) apply(force elim: find_path_ty.cases) apply(case_tac ty) apply(clarsimp simp add: fpty_objI) apply(clarsimp simp add: fpty_dclI) done primrec fields_in_path_f :: "ctxclds \ fs" where "fields_in_path_f [] = []" | "fields_in_path_f (ctxcld#ctxclds) = (map (\fd. case fd of fd_def cl f \ f) (class_fields_f (snd ctxcld))) @ fields_in_path_f ctxclds" lemma cl_f_list_map: "map (case_fd (\cl f. f)) (map (\(x, y). fd_def x y) cl_f_list) = map (\(cl_XXX, f_XXX). f_XXX) cl_f_list" by (induct cl_f_list, auto) lemma fip_ind_to_f: "\fs. fields_in_path clds fs \ fields_in_path_f clds = fs" apply(induct clds) apply(clarsimp, erule fields_in_path.cases) apply(simp) apply(clarsimp) apply(clarsimp) apply(erule fields_in_path.cases) apply(simp) by(clarsimp simp add: cl_f_list_map) lemma fd_map_split: "map (case_fd (\cl f. f)) (map (\(x, y). fd_def x y) list) = map (\(cl, f). f) list" apply(induct list) apply(simp) apply(clarsimp) done lemma fd_map_split': "map (\(x, y). fd_def x y) (map (case_fd Pair) list) = list" apply(induct list) apply(simp split: fd.splits)+ done lemma fd_map_split'': "map ((\(x, y). fd_def x y) \ case_fd Pair) list = list" apply(induct list) apply(simp split: fd.splits)+ done lemma [simp]: "\fs. (fields_in_path ctxclds fs) = (fields_in_path_f ctxclds = fs)" apply(induct ctxclds) apply(rule) apply(rule) apply(force elim: fields_in_path.cases) apply(simp add: fip_emptyI) apply(clarsimp) apply(rule) apply(erule fields_in_path.cases) apply(simp) apply(simp add: fip_ind_to_f fd_map_split) apply(clarsimp) apply(rule_tac cld = b and ctxcld_list = ctxclds and cl_f_list = "map (case_fd (\cl f. (cl, f))) (class_fields_f b)" in fip_consI[simplified]) apply(simp add: fd_map_split'') apply(simp add: fd_map_split') apply(clarsimp split: fd.splits) done definition fields_f :: "P \ ty \ fs option" where "fields_f P ty = (case find_path_ty_f P ty of None \ None | Some ctxclds \ Some (fields_in_path_f ctxclds))" lemma [simp]: "\fs_opt. (fields P ty fs_opt) = (fields_f P ty = fs_opt)" apply(rule) apply(rule) apply(case_tac fs_opt) apply(clarsimp) apply(erule fields.cases) apply(clarsimp) apply(simp add: fields_f_def) apply(clarsimp) apply(erule fields.cases) apply(simp) apply(clarsimp) apply(simp add: fields_f_def) apply(simp add: fields_f_def) apply(case_tac "find_path_ty_f P ty") apply(simp) apply(simp add: fields_noneI[simplified]) -apply(clarsimp) apply(case_tac "find_path_ty_f P ty") apply(simp) apply(simp) apply(clarsimp) +apply(clarsimp) apply(case_tac "find_path_ty_f P ty") apply(simp) apply(simp) apply(rule fields_someI) apply(simp) apply(simp) done primrec methods_in_path_f :: "clds \ meths" where "methods_in_path_f [] = []" | "methods_in_path_f (cld#clds) = map (\md. case md of meth_def_def meth_sig meth_body \ case meth_sig of meth_sig_def cl meth vds \ meth) (class_methods_f cld) @ methods_in_path_f clds" lemma meth_def_map[THEN mp]: "(\x \ set list. (\(md, cl, m, vds, mb). md = meth_def_def (meth_sig_def cl m vds) mb) x) \ map (case_meth_def (\ms mb. case ms of meth_sig_def cl m vds \ m)) (map (\(md, cl, m, vds, mb). md) list) = map (\(md, cl, m, vds, mb). m) list" by (induct list, auto) lemma meth_def_map': "map ((\(md, cl, m, vds, mb). md) \ (\md. case md of meth_def_def (meth_sig_def cl m vds) mb \ (md, cl, m, vds, mb))) list = list" apply(induct list) by (auto split: meth_def.splits meth_sig.splits) lemma [simp]: "\meths. (methods_in_path clds meths) = (methods_in_path_f clds = meths)" apply(induct clds) apply(clarsimp) apply(rule) apply(erule methods_in_path.cases) apply(simp) apply(clarsimp) apply(clarsimp) apply(rule mip_emptyI) apply(clarsimp) apply(rule) apply(erule methods_in_path.cases) apply(simp) apply(force) apply(rule_tac meth_def_cl_meth_vds_meth_body_list = "(case a of cld_def dcl cl fds mds \ (map (\md. (case md of meth_def_def ms mb \ (case ms of meth_sig_def cl m vds \ (md, cl, m, vds, mb)))) mds))" in mip_consI[simplified]) apply(clarsimp) apply(case_tac a) apply(simp add: class_methods_f_def meth_def_map') apply(clarsimp) apply(clarsimp split: meth_def.splits meth_sig.splits) apply(simp) apply(clarsimp) apply(case_tac a) apply(clarsimp simp add: class_methods_f_def split: meth_def.splits meth_sig.splits) done definition methods_f :: "P \ ty \ meths option" where "methods_f P ty = (case find_path_ty_f P ty of None \ None | Some ctxclds \ Some (methods_in_path_f (map (\(ctx, cld). cld) ctxclds)))" lemma [simp]: "(methods P ty meths) = (methods_f P ty = Some meths)" apply(rule) apply(erule methods.cases) apply(clarsimp simp add: methods_f_def) apply(simp add: methods_f_def) apply(split option.splits) apply(simp) apply(clarsimp) apply(rule methods_methodsI) apply(simp) apply(clarsimp) done primrec ftype_in_fds_f :: "P \ ctx \ fds \ f \ ty_opt_bot" where "ftype_in_fds_f P ctx [] f = ty_opt_bot_opt None" | "ftype_in_fds_f P ctx (fd#fds) f = (case fd of fd_def cl f' \ (if f = f' then (case find_type_f P ctx cl of None \ ty_opt_bot_bot | Some ty \ ty_opt_bot_opt (Some ty)) else ftype_in_fds_f P ctx fds f))" lemma [simp]: "(ftype_in_fds P ctx fds f ty_opt) = (ftype_in_fds_f P ctx fds f = ty_opt)" apply(rule) apply(induct fds) apply(erule ftype_in_fds.cases) apply(simp) apply(simp) apply(simp) apply(simp) apply(erule ftype_in_fds.cases) apply(simp) apply(simp) apply(simp) apply(clarsimp) apply(induct fds) apply(clarsimp) apply(rule ftif_emptyI) apply(rename_tac fd fds) apply(case_tac fd, rename_tac cl f', clarsimp) apply(rule) apply(clarsimp) apply(case_tac "find_type_f P ctx cl") apply(simp add: ftif_cons_botI[simplified]) apply(simp add: ftif_cons_trueI[simplified]) apply(force intro: ftif_cons_falseI[simplified]) done primrec ftype_in_path_f :: "P \ ctxclds \ f \ ty_opt" where "ftype_in_path_f P [] f = None" | "ftype_in_path_f P (ctxcld#ctxclds) f = (case ctxcld of (ctx, cld) \ (case ftype_in_fds_f P ctx (class_fields_f cld) f of ty_opt_bot_bot \ None | ty_opt_bot_opt ty_opt \ (case ty_opt of Some ty \ Some ty | None \ ftype_in_path_f P ctxclds f)))" lemma [simp]: "(ftype_in_path P ctxclds f ty_opt) = (ftype_in_path_f P ctxclds f = ty_opt)" apply(rule) apply(induct rule: ftype_in_path.induct) apply(simp+) apply(induct ctxclds) apply(simp) apply(rule ftip_emptyI) apply(hypsubst_thin) apply(clarsimp) apply(case_tac "ftype_in_fds_f P a (class_fields_f b) f") apply(rename_tac ty_opt) apply(case_tac ty_opt) apply(simp add: ftip_cons_falseI[simplified]) apply(simp add: ftip_cons_trueI[simplified]) apply(simp add: ftip_cons_botI[simplified]) done definition ftype_f :: "P \ ty \ f \ ty_opt" where "ftype_f P ty f = (case find_path_ty_f P ty of None \ None | Some ctxclds \ ftype_in_path_f P ctxclds f)" lemma [simp]: "(ftype P ty f ty') = (ftype_f P ty f = Some ty')" apply(rule) apply(induct rule: ftype.induct) apply(clarsimp simp add: ftype_f_def) apply(clarsimp simp add: ftype_f_def split: option.splits) apply(rule ftypeI) apply(simp+) done primrec find_meth_def_in_list_f :: "meth_defs \ meth \ meth_def_opt" where "find_meth_def_in_list_f [] m = None" | "find_meth_def_in_list_f (md#mds) m = (case md of meth_def_def ms mb \ (case ms of meth_sig_def cl m' vds \ (if m = m' then Some md else find_meth_def_in_list_f mds m)))" lemma [simp]: "(find_meth_def_in_list mds m md_opt) = (find_meth_def_in_list_f mds m = md_opt)" apply(rule) apply(induct rule: find_meth_def_in_list.induct) apply(simp+) apply(induct mds) apply(simp) apply(rule fmdil_emptyI) apply(clarsimp) apply(clarsimp split: meth_def.split meth_sig.split) apply(rule) apply(clarsimp) apply(rule fmdil_cons_trueI[simplified]) apply(force) apply(clarsimp) apply(rule fmdil_cons_falseI[simplified]) apply(force+) done primrec find_meth_def_in_path_f :: "ctxclds \ meth \ ctxmeth_def_opt" where fmdip_empty: "find_meth_def_in_path_f [] m = None" | fmdip_cons: "find_meth_def_in_path_f (ctxcld#ctxclds) m = (case ctxcld of (ctx, cld) \ (case find_meth_def_in_list_f (class_methods_f cld) m of Some md \ Some (ctx, md) | None \ find_meth_def_in_path_f ctxclds m))" lemma [simp]: "(find_meth_def_in_path ctxclds m ctxmeth_def_opt) = (find_meth_def_in_path_f ctxclds m = ctxmeth_def_opt)" apply(rule) apply(induct rule: find_meth_def_in_path.induct) apply(simp+) apply(induct ctxclds) apply(simp) apply(rule fmdip_emptyI) apply(clarsimp) apply(case_tac "find_meth_def_in_list_f (class_methods_f b) m") apply(clarsimp) apply(simp add: fmdip_cons_falseI[simplified]) apply(simp add: fmdip_cons_trueI[simplified]) done definition find_meth_def_f :: "P \ ty \ meth \ ctxmeth_def_opt" where "find_meth_def_f P ty m = (case find_path_ty_f P ty of None \ None | Some ctxclds \ find_meth_def_in_path_f ctxclds m)" lemma [simp]: "(find_meth_def P ty m ctxmd_opt) = (find_meth_def_f P ty m = ctxmd_opt)" apply(rule) apply(induct rule: find_meth_def.induct) apply(simp add: find_meth_def_f_def)+ apply(clarsimp simp add: find_meth_def_f_def split: option.splits) apply(simp add: fmd_nullI) apply(simp add: fmd_optI) done primrec lift_opts :: "'a option list \ 'a list option" where "lift_opts [] = Some []" | "lift_opts (opt#opts) = (case opt of None \ None | Some v \ (case lift_opts opts of None \ None | Some vs \ Some (v#vs)))" definition mtype_f :: "P \ ty \ meth \ mty option" where "mtype_f P ty m = (case find_meth_def_f P ty m of None \ None | Some (ctx, md) \ (case md of meth_def_def ms mb \ (case ms of meth_sig_def cl m' vds \ (case find_type_f P ctx cl of None \ None | Some ty' \ (case lift_opts (map (\vd. case vd of vd_def clk vark \ find_type_f P ctx clk) vds) of None \ None | Some tys \ Some (mty_def tys ty'))))))" lemma lift_opts_ind[rule_format]: "(\x\set list. (\(cl, var, ty). find_type_f P ctx cl = Some ty) x) \ lift_opts (map (case_vd (\clk vark. find_type_f P ctx clk) \ (\(cl, var, ty). vd_def cl var)) list) = Some (map (\(cl, var, ty). ty) list)" by (induct list, auto) lemma find_md_m_match'[rule_format]: "find_meth_def_in_list_f mds m = Some (meth_def_def (meth_sig_def cl m' vds) mb) \ m' = m" apply(induct mds) apply(simp) apply(clarsimp split: meth_def.splits meth_sig.splits) done lemma find_md_m_match: "find_meth_def_in_path_f path m = Some (ctx, meth_def_def (meth_sig_def cl m' vds) mb) \ m' = m" apply(induct path) apply(simp) apply(clarsimp split: option.splits) by(rule find_md_m_match') lemma vds_map_length: "length (map (case_vd (\clk vark. find_type_f P ctx clk)) vds) = length vds" by (induct vds, auto) lemma lift_opts_length[rule_format]: "\tys. lift_opts ty_opts = Some tys \ length ty_opts = length tys" apply(induct ty_opts) apply(simp) by(clarsimp split: option.splits) lemma vds_tys_length_eq[rule_format]: "lift_opts (map (case_vd (\clk vark. find_type_f P ctx clk)) vds) = Some tys \ length vds = length tys" apply(rule) apply(drule lift_opts_length) apply(simp add: vds_map_length) done lemma vds_tys_length_eq'[rule_format]: "\tys. length vds = length tys \ vds = map (\(cl, var, ty). vd_def cl var) (map (\(vd, ty). case vd of vd_def cl var \ (cl, var, ty)) (zip vds tys))" apply(induct vds) apply(simp) apply(clarsimp) apply(case_tac a) apply(clarsimp) apply(case_tac tys) apply(simp) apply(clarsimp) done lemma vds_tys_length_eq''[rule_format]: "\vds. length vds = length tys \ tys = map ((\(cl, var, ty). ty) \ (\(vd, ty). case vd of vd_def cl var \ (cl, var, ty))) (zip vds tys)" apply(induct tys) apply(simp) apply(clarsimp) apply(case_tac vds) apply(clarsimp) apply(clarsimp) apply(split vd.splits) apply(simp) done lemma lift_opts_find_type[rule_format]: "\tys. lift_opts (map (case_vd (\clk vark. find_type_f P ctx clk)) vds) = Some tys \ (\(vd, ty) \ set (zip vds tys). case vd of vd_def cl var \ find_type_f P ctx cl = Some ty)" apply(induct vds) apply(simp) apply(clarsimp split: vd.splits option.splits) apply(rename_tac cl' var) apply(drule_tac x = "(vd_def cl' var, b)" in bspec, simp) apply(force) done lemma [simp]: "(mtype P ty m mty) = (mtype_f P ty m = Some mty)" apply(rule) apply(induct rule: mtype.induct) apply(clarsimp simp add: mtype_f_def) apply(split option.splits) apply(rule) apply(clarsimp) apply(rule_tac x = "map (\(cl, var, ty). ty) cl_var_ty_list" in exI) apply(simp add: lift_opts_ind) apply(clarsimp) apply(simp add: lift_opts_ind) apply(clarsimp simp add: mtype_f_def split: option.splits meth_def.splits meth_sig.splits) apply(rename_tac ctx mb cl m' vds ty' tys) apply(rule_tac ctx = ctx and cl = cl and meth_body = mb and ty' = ty' and cl_var_ty_list = "map (\(vd, ty). case vd of vd_def cl var \ (cl, var, ty)) (zip vds tys)" and meth_def = "meth_def_def (meth_sig_def cl m' vds) mb" in mtypeI[simplified]) apply(simp) apply(clarsimp) apply(rule) apply(clarsimp simp add: find_meth_def_f_def split: option.splits) apply(simp add: find_md_m_match) apply(drule vds_tys_length_eq) apply(frule vds_tys_length_eq') apply(clarsimp) apply(simp) apply(clarsimp) apply(split vd.splits) apply(clarsimp) apply(drule lift_opts_find_type) apply(simp) apply(clarsimp) apply(clarsimp) apply(drule vds_tys_length_eq) apply(simp add: vds_tys_length_eq'') done definition is_sty_one :: "P \ ty \ ty \ bool option" where "is_sty_one P ty ty' = (case find_path_ty_f P ty of None \ None | Some ctxclds \ (case ty' of ty_top \ Some True | ty_def ctx' dcl' \ Some ((ctx', dcl') : set (map (\(ctx, cld). (ctx, class_name_f cld)) ctxclds)) ))" lemma class_name_mem_map[rule_format]: "(ctx, cld, class_name_f cld) \ set ctx_cld_dcl_list \ (ctx, class_name_f cld) \ ((\(ctx, cld). (ctx, class_name_f cld)) \ (\(ctx, cld, dcl). (ctx, cld))) ` set ctx_cld_dcl_list" by (induct ctx_cld_dcl_list, auto) lemma map_map_three: " ctxclds = map ((\(ctx, cld, dcl). (ctx, cld)) \ (\(ctx, cld). (ctx, cld, class_name_f cld))) ctxclds" by (induct ctxclds, auto) lemma mem_el_map[rule_format]: "(ctx, dcl) \ set ctxclds \ (ctx, class_name_f dcl) \ (\(ctx_XXX, cld_XXX, y). (ctx_XXX, y)) ` set (map (\(ctx, cld). (ctx, cld, class_name_f cld)) ctxclds)" by (induct ctxclds, auto) lemma [simp]: "(sty_one P ty ty') = (is_sty_one P ty ty' = Some True)" apply(rule) apply(induct rule: sty_one.induct) apply(simp add: is_sty_one_def) apply(clarsimp simp add: is_sty_one_def) apply(rename_tac ctx cld dcl) apply(drule_tac x = "(ctx, cld, dcl)" in bspec, simp) apply(clarsimp) using case_prod_conv image_iff apply fastforce apply(clarsimp simp add: is_sty_one_def split: option.splits ty.splits) apply(simp add: sty_objI) apply(rename_tac ctxclds ctx dcl) apply(rule_tac ctx_cld_dcl_list = "map (\(ctx, cld). (ctx, cld, class_name_f cld)) ctxclds" in sty_dclI[simplified]) apply(clarsimp) apply(rule map_map_three) apply(force) apply(rule mem_el_map, assumption) done lemma path_append[rule_format]: "find_path_rec_f P ctx cl path' = Some path \ \path''. path = path' @ path''" apply(induct rule: find_path_rec_f.induct) apply(clarsimp) apply(force split: if_split_asm option.splits) done lemma all_in_path_found'[rule_format]: "find_path_rec_f P ctx cl path' = Some path \ (\ctxcld \ set path. ctxcld \ set path' \ (\ctx' fqn'. find_cld_f P ctx' fqn' = Some ctxcld))" apply(induct rule: find_path_rec_f.induct) apply(clarsimp) apply(rule) apply(force split: if_split_asm option.splits) done lemma all_in_path_found: "\find_path_f P ctx cl = Some path; ctxcld \ set path\ \ \ctx' fqn'. find_cld_f P ctx' fqn' = Some ctxcld" by (unfold find_path_f_def, simp only: all_in_path_found'[of _ _ _ "[]", simplified]) lemma fpr_target_is_head': "find_path_rec_f P ctx cl path' = Some path \ (\fqn ctxcld. cl = cl_fqn fqn \ find_cld_f P ctx fqn = Some ctxcld \ (\path''. path = path' @ ctxcld # path''))" apply(induct_tac P ctx cl path' rule: find_path_rec_f.induct) apply(simp) apply(clarsimp split: if_split_asm option.splits) apply(case_tac "superclass_name_f b") apply(clarsimp) apply(clarsimp split: if_split_asm option.splits) apply(force) done lemma fpr_target_is_head: "find_path_f P ctx (cl_fqn fqn) = Some path \ \ctxcld. find_cld_f P ctx fqn = Some ctxcld \ (\path''. path = ctxcld # path'')" apply(unfold find_path_f_def) apply(frule fpr_target_is_head'[of _ _ _ "[]", THEN mp]) apply(clarsimp split: option.splits) done lemma fpr_sub_path': "find_path_rec_f P ctx cl path' = Some path \ (\fqn ctxcld path'' path_fqn. cl = cl_fqn fqn \ find_cld_f P ctx fqn = Some ctxcld \ find_path_rec_f P (fst ctxcld) (superclass_name_f (snd ctxcld)) path'' = Some path_fqn \ (\path'''. path_fqn = path'' @ path''' \ path = path' @ ctxcld # path'''))" apply(induct_tac P ctx cl path' rule: find_path_rec_f.induct) apply(simp) apply(clarsimp split: if_split_asm option.splits) apply(case_tac "superclass_name_f b") apply(simp add: find_path_f_def) apply(clarsimp split: if_split_asm option.splits) apply(frule_tac path = path in path_append) apply(clarsimp) apply(force) done lemma fpr_sub_path: "\find_path_f P ctx (cl_fqn fqn) = Some path; find_cld_f P ctx fqn = Some ctxcld; find_path_f P (fst ctxcld) (superclass_name_f (snd ctxcld)) = Some path'\ \ path = ctxcld # path'" by (unfold find_path_f_def, force intro: fpr_sub_path'[rule_format, of _ _ _ "[]" _ _ _ "[]", simplified]) lemma fpr_sub_path_simp: "\find_path_rec_f P ctx (superclass_name_f cld) path'' = Some path_fqn; find_cld_f P ctx fqn = Some (ctx, cld); acyclic_clds P; find_path_rec_f P ctx (superclass_name_f cld) (path' @ [(ctx, cld)]) = Some path\ \ \path'''. path_fqn = path'' @ path''' \ path = path' @ (ctx, cld) # path'''" apply(cut_tac P = P and ctx = ctx and cl = "cl_fqn fqn" and path' = path' and path = path in fpr_sub_path') apply(clarsimp split: option.splits if_split_asm) done lemma fpr_same_suffix'[rule_format]: "find_path_rec_f P ctx cl prefix = Some path \ (\suffix prefix'. path = prefix @ suffix \ find_path_rec_f P ctx cl prefix' = Some (prefix' @ suffix))" apply(induct_tac P ctx cl prefix rule: find_path_rec_f.induct) apply(clarsimp) apply(clarsimp split: option.splits) apply(frule path_append) apply(force) done lemma fpr_same_suffix: "find_path_rec_f P ctx cl prefix = Some path \ (\suffix prefix' suffix'. path = prefix @ suffix \ find_path_rec_f P ctx cl prefix' = Some (prefix' @ suffix') \ suffix = suffix')" apply(induct_tac P ctx cl prefix rule: find_path_rec_f.induct) apply(clarsimp) by (metis fpr_same_suffix' option.inject same_append_eq) lemma fpr_mid_path'[rule_format]: "find_path_rec_f P ctx cl path' = Some path \ (\ctxcld \ set path. ctxcld \ set path' \ (\path_fqn. find_path_rec_f P (fst ctxcld) (cl_fqn (fqn_def (class_name_f (snd ctxcld)))) path'' = Some path_fqn \ (\path'''. path_fqn = path'' @ path''' \ (\path_rest. path = path_rest @ path'''))))" apply(induct_tac P ctx cl path' rule: find_path_rec_f.induct) apply(simp) apply(clarsimp split: option.splits) apply(frule find_cld_name_eq) apply(clarsimp) apply(rename_tac path' ctx' cld' cld'' ctx'' path''' cl fds mds) apply(subgoal_tac "find_path_rec_f P ctx' (superclass_name_f cld') (path' @ [(ctx', cld')]) = Some path \ (\ctxcld\set path. ctxcld = (ctx', cld') \ ctxcld \ set path' \ (\path_fqn. case_option None (case_prod (\ctx' cld. find_path_rec_f P ctx' (superclass_name_f cld) (path'' @ [(ctx', cld)]))) (find_cld_f P (fst ctxcld) (fqn_def (class_name_f (snd ctxcld)))) = Some path_fqn \ (\path'''. path_fqn = path'' @ path''' \ (\path_rest. path = path_rest @ path'''))))") apply(erule impE) apply simp apply(drule_tac x = "(ctx'', cld'')" in bspec, simp) apply(clarsimp) apply(simp add: superclass_name_f_def) apply(case_tac cld') apply(rename_tac dcl' cl' fds' mds') apply(clarsimp simp add: class_name_f_def) apply(case_tac fqn) apply(rename_tac dcl'') apply(clarsimp) apply(frule find_cld_name_eq) apply(clarsimp) apply(frule path_append) apply(frule_tac path = "path'' @ path'''" in path_append) apply(clarsimp) apply(rule_tac x = path' in exI) apply(clarsimp) apply(frule_tac suffix = path''a and prefix' = "path'' @ [(ctx', cld_def dcl' cl' fds' mds')]" and suffix' = path''aa in fpr_same_suffix[rule_format]) apply(simp) apply(force) apply(simp) done lemma fpr_mid_path: "\find_path_f P ctx cl = Some path; (ctx', cld') \ set path; find_path_f P ctx' (cl_fqn (fqn_def (class_name_f cld'))) = Some path'\ \ \path''. path = path'' @ path'" apply(cut_tac P = P and ctx = ctx and cl = cl and path' = "[]" and path = path and ctxcld = "(ctx', cld')" and path'' = "[]" in fpr_mid_path') apply(unfold find_path_f_def, assumption) apply(assumption) apply(simp) done lemma fpr_first_in_path'[rule_format]: "find_path_rec_f P ctx cl path' = Some path \ (\fqn ctxcld. cl = cl_fqn fqn \ find_cld_f P ctx fqn = Some ctxcld \ ctxcld \ set path)" apply(case_tac cl) apply(simp) apply(clarsimp) apply(drule path_append) apply(force) done lemma fpr_first_in_path: "\find_path_f P ctx (cl_fqn fqn) = Some path; find_cld_f P ctx fqn = Some ctxcld\ \ ctxcld \ set path" by (unfold find_path_f_def, force intro: fpr_first_in_path'[of _ _ _ "[]", simplified]) lemma cld_for_path: "find_path_f P ctx (cl_fqn fqn) = Some path \ \ctxcld. find_cld_f P ctx fqn = Some ctxcld" apply(unfold find_path_f_def) apply(clarsimp split: if_split_asm option.splits) done lemma ctx_cld_ctx_dcl[rule_format]: "(ctx, cld_def dcl cl fds mds) \ set path \ (ctx, dcl) \ (\(ctx, cld). (ctx, class_name_f cld)) ` set path" by (induct path, auto simp add: class_name_f_def) lemma ctx_dcl_ctx_cld[rule_format]: "(ctx, dcl) \ (\(ctx, cld). (ctx, class_name_f cld)) ` set path \ (\cl fds mds. (ctx, cld_def dcl cl fds mds) \ set path)" apply(induct path) apply(simp) apply(auto) apply(case_tac b) apply(force simp add: class_name_f_def) done lemma ctx_dcl_mem_path: "find_path_f P ctx (cl_fqn (fqn_def dcl)) = Some path \ (ctx, dcl) \ (\(ctx, cld). (ctx, class_name_f cld)) ` set path" apply(frule cld_for_path) apply(erule exE) apply(frule fpr_first_in_path) apply(assumption) apply(frule find_cld_name_eq) apply(elim exE) apply(clarsimp simp add: ctx_cld_ctx_dcl) done lemma sty_reflexiveI: "is_sty_one P ty ty' = Some True \ is_sty_one P ty ty = Some True" apply(simp add: is_sty_one_def split: option.splits) apply(case_tac ty) apply(simp) apply(clarsimp) apply(drule ctx_dcl_mem_path) apply(force) done lemma sty_transitiveI: "\is_sty_one P ty ty' = Some True; is_sty_one P ty' ty'' = Some True\ \ is_sty_one P ty ty'' = Some True" apply(clarsimp simp add: is_sty_one_def split: ty.splits option.splits) apply(rename_tac path ctx dcl path' ctx' dcl') apply(case_tac ty) apply(simp) apply(clarsimp) apply(rename_tac ctx dcl ctx' cld' ctx'' cld'') apply(frule fpr_mid_path) apply(simp) apply(simp) apply(force) done definition is_sty_many :: "P \ tys \ tys \ bool option" where "is_sty_many P tys tys' = (if length tys \ length tys' then None else (case lift_opts (map (\(ty, ty'). is_sty_one P ty ty') (zip tys tys')) of None \ None | Some bools \ Some (list_all id bools)))" lemma lift_opts_exists: "\x\set ty_ty'_list. (\(ty, ty'). is_sty_one P ty ty' = Some True) x \ \bools. lift_opts (map (\(ty, ty'). is_sty_one P ty ty') ty_ty'_list) = Some bools" by (induct ty_ty'_list, auto) lemma lift_opts_all_true[rule_format]: "\bools. (\x\set ty_ty'_list. (\(ty, ty'). is_sty_one P ty ty' = Some True) x) \ lift_opts (map (\(ty, ty'). is_sty_one P ty ty') ty_ty'_list) = Some bools \ list_all id bools" by (induct ty_ty'_list, auto split: option.splits) lemma tys_tys'_list: "\bools ty ty'. \lift_opts (map (\(x, y). is_sty_one P x y) tys_tys'_list) = Some bools; length tys = length tys'; list_all id bools; (ty, ty') \ set tys_tys'_list\ \ is_sty_one P ty ty' = Some True" apply(induct tys_tys'_list) apply(simp) apply(clarsimp split: option.splits) by force lemma [simp]: "(sty_many P tys tys') = (is_sty_many P tys tys' = Some True)" apply(rule) apply(erule sty_many.cases) apply(clarsimp simp add: is_sty_many_def split: option.splits) apply(rule) apply(simp add: lift_opts_exists) apply(force intro: lift_opts_all_true) apply(clarsimp simp add: is_sty_many_def split: option.splits if_split_asm) apply(rule_tac ty_ty'_list = "zip tys tys'" in sty_manyI) apply(simp add: map_fst_zip[THEN sym]) apply(simp add: map_snd_zip[THEN sym]) apply(clarsimp) apply(simp add: tys_tys'_list) done (* TODO: here will go all the other definitions for functions equivalent to relations concerning well-formedness *) definition tr_x :: "T \ x \ x" where "tr_x T x = (case T x of None \ x | Some x' \ x')" definition tr_var :: "T \ var \ var" where "tr_var T var = (case tr_x T (x_var var) of x_this \ var | x_var var' \ var')" primrec tr_s_f :: "T \ s \ s" and tr_ss_f :: "T \ s list \ s list" where "tr_s_f T (s_block ss) = s_block (tr_ss_f T ss)" | "tr_s_f T (s_ass var x) = s_ass (tr_var T var) (tr_x T x)" | "tr_s_f T (s_read var x f) = s_read (tr_var T var) (tr_x T x) f" | "tr_s_f T (s_write x f y) = s_write (tr_x T x) f (tr_x T y)" | "tr_s_f T (s_if x y s1 s2) = s_if (tr_x T x) (tr_x T y) (tr_s_f T s1) (tr_s_f T s2)" | "tr_s_f T (s_call var x m ys) = s_call (tr_var T var) (tr_x T x) m (map (tr_x T) ys)" | "tr_s_f T (s_new var ctx cl) = s_new (tr_var T var) ctx cl" | "tr_ss_f T [] = []" | "tr_ss_f T (s#ss) = tr_s_f T s # tr_ss_f T ss" lemma [simp]: "(\x\set s_s'_list. case x of (s_XXX, s_') \ tr_s T s_XXX s_' \ tr_s_f T s_XXX = s_') \ tr_ss_f T (map fst s_s'_list) = map snd s_s'_list" by (induct s_s'_list, auto) lemma [simp]: "(\x\set y_y'_list. case_prod (\y_XXX. (=) (case T y_XXX of None \ y_XXX | Some x' \ x')) x) \ map (tr_x T) (map fst y_y'_list) = map snd y_y'_list" apply(induct y_y'_list) apply(simp) apply(clarsimp) apply(simp only: tr_x_def) done lemma set_zip_tr[simp]: "(s, s') \ set (zip ss (tr_ss_f T ss)) \ s' = tr_s_f T s" by (induct ss, auto) lemma [iff]: "length ss = length (tr_ss_f T ss)" by (induct ss, auto) lemma tr_ss_map: "tr_ss_f T (map fst s_s'_list) = map snd s_s'_list \ (\x\set s_s'_list. case_prod (tr_s T) x) \ (a, b) \ set s_s'_list \ tr_s T a (tr_s_f T a)" apply(induct s_s'_list) by auto lemma tr_f_to_rel: "\s'. tr_s_f T s = s' \ tr_s T s s'" apply(induct s rule: tr_s_f.induct) apply(simp) apply(clarsimp) apply(rule tr_s_var_assignI) apply(clarsimp simp add: tr_x_def tr_var_def split: option.splits) apply(simp add: tr_x_def) apply(clarsimp) apply(rule tr_s_field_readI) apply(clarsimp simp add: tr_x_def tr_var_def split: option.splits) apply(simp add: tr_x_def) apply(clarsimp) apply(rule tr_s_field_writeI) apply(simp add: tr_x_def) apply(simp add: tr_x_def) apply(clarsimp simp only: tr_s_f.simps) apply(rule tr_s_ifI) apply(simp only: tr_x_def) apply(simp only: tr_x_def) apply(simp) apply(simp) apply(clarsimp) apply(rule tr_s_newI) apply(clarsimp simp add: tr_x_def tr_var_def split: option.splits) apply(clarsimp) apply(rename_tac var x m ys) apply(cut_tac T = T and var = var and var' = "tr_var T var" and x = x and x' = "tr_x T x" and y_y'_list = "zip ys (map (tr_x T) ys)" and meth = m in tr_s_mcallI) apply(clarsimp simp add: tr_x_def tr_var_def split: option.splits) apply(simp only: tr_x_def) apply(clarsimp simp add: set_zip tr_x_def) apply(simp) apply(simp) apply(cut_tac T = T and s_s'_list = "[]" in tr_s_blockI[simplified]) apply(simp) apply(clarsimp) apply(clarsimp) apply(rename_tac s ss) apply(cut_tac T = T and s_s'_list = "zip (s # ss) (tr_s_f T s # tr_ss_f T ss)" in tr_s_blockI[simplified]) apply(clarsimp) apply(rename_tac x x') apply(frule set_zip_tr[THEN mp]) apply(clarsimp) apply(erule in_set_zipE) apply(erule_tac ?a1.0="T" and ?a2.0="s_block ss" and ?a3.0="s_block (tr_ss_f T ss)" in tr_s.cases) apply(clarsimp) apply(rule tr_ss_map[THEN mp], force) apply(clarsimp)+ done lemma tr_rel_f_eq: "((tr_s T s s') = (tr_s_f T s = s'))" apply(rule) apply(erule tr_s.induct) apply(force simp add: tr_x_def tr_var_def split: option.splits)+ apply(cut_tac T = T and s = s in tr_f_to_rel) apply(simp) done end diff --git a/thys/LightweightJava/Lightweight_Java_Proof.thy b/thys/LightweightJava/Lightweight_Java_Proof.thy --- a/thys/LightweightJava/Lightweight_Java_Proof.thy +++ b/thys/LightweightJava/Lightweight_Java_Proof.thy @@ -1,1813 +1,1813 @@ (* Title: Lightweight Java, the proof Authors: Rok Strnisa , 2006 Matthew Parkinson , 2006 Maintainer: *) theory Lightweight_Java_Proof imports Lightweight_Java_Equivalence "HOL-Library.Infinite_Set" begin lemmas wf_intros = wf_object_wf_varstate_wf_heap_wf_config_wf_stmt_wf_meth_wf_class_common_wf_class_wf_program.intros [simplified] lemmas wf_induct = wf_object_wf_varstate_wf_heap_wf_config_wf_stmt_wf_meth_wf_class_common_wf_class_wf_program.induct [simplified] lemmas wf_config_normalI = wf_object_wf_varstate_wf_heap_wf_config_wf_stmt_wf_meth_wf_class_common_wf_class_wf_program.wf_allI [simplified] lemmas wf_objectE = wf_object.cases[simplified] lemmas wf_varstateE = wf_varstate.cases[simplified] lemmas wf_heapE = wf_heap.cases[simplified] lemmas wf_configE = wf_config.cases[simplified] lemmas wf_stmtE = wf_stmt.cases[simplified] lemmas wf_methE = wf_meth.cases[simplified] lemmas wf_class_cE = wf_class_common.cases[simplified] lemmas wf_classE = wf_class.cases[simplified] lemmas wf_programE = wf_program.cases[simplified] lemma wf_subvarstateI: "\wf_varstate P \ H L; \ x' = Some ty; wf_object P H (Some v) (Some ty)\ \ wf_varstate P \ H (L (x' \ v))" apply(erule wf_varstateE) apply(rule wf_varstateI) apply(simp) apply(clarsimp) done lemma finite_super_varstate: "finite (dom ((L ++ map_of (map (\(y, cl, var, var', y). (x_var var', y)) y_cl_var_var'_v_list))(x' \ v_oid oid))) = finite (dom L)" apply(induct y_cl_var_var'_v_list) apply(clarsimp) apply(simp add: dom_def) apply(subgoal_tac "{a. a \ x' \ (\y. L a = Some y)} = {a. a = x' \ (\y. L a = Some y)}") apply(simp add: Collect_disj_eq) apply(force) apply(clarsimp simp add: map_add_def dom_def split: if_split_asm) apply(subgoal_tac "{aa. aa \ x_var a \ aa \ x' \ (\y. case_option (L aa) Some (map_of (map (\(y, cl, var, var', y). (x_var var', y)) y_cl_var_var'_v_list) aa) = Some y)} = {aa. aa = x_var a \ (aa = x' \ (\y. case_option (L aa) Some (map_of (map (\(y, cl, var, var', y). (x_var var', y)) y_cl_var_var'_v_list) aa) = Some y))}") apply(simp add: Collect_disj_eq) apply(force) done lemma fst_map_elem: "(y_k, ty_k, var_k, var'_k, v_k) \ set y_ty_var_var'_v_list \ x_var var'_k \ fst ` (\(y, ty, var, var', v). (x_var var', ty)) ` set y_ty_var_var'_v_list" by (force) lemma map_add_x_var[THEN mp]: "var' \ set (map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list) \ x_var var' \ set (map (\(y, cl, var, var', v). x_var var) y_cl_var_var'_v_list)" by (induct y_cl_var_var'_v_list, auto) lemma map_of_map_and_x[simp]: "\\ x = Some ty_x; L x' = None; \x\set y_cl_var_var'_v_list. (\(y, cl, var, var', v). L (x_var var') = None) x; \x\Map.dom \. wf_object Pa H (L x) (\ x)\ \ (if x = x' then Some ty else (\ ++ map_of (map (\((y, cl, var, var', v), y', ty). (x_var var', ty)) (zip y_cl_var_var'_v_list y_ty_list))) x) = Some ty_x" apply(subgoal_tac "x \ x'") apply(subgoal_tac "\(y, cl, var, var', v) \ set y_cl_var_var'_v_list. x_var var' \ x") apply(case_tac "map_of (map (\((y, cl, var, var', v), y', ty). (x_var var', ty)) (zip y_cl_var_var'_v_list y_ty_list)) x") apply(clarsimp simp add: map_add_def) apply(clarsimp) apply(drule map_of_SomeD) apply(clarsimp) apply(rename_tac y cl var var' v y' ty) apply(drule_tac x = "(y, cl, var, var', v)" in bspec) apply(force simp add: set_zip) apply(drule_tac x = "x_var var'" in bspec, simp add: domI) apply(erule wf_objectE, simp+) apply(force elim: wf_objectE)+ done lemma wf_stmt_in_G': "(L x' = None \ (\x\set y_cl_var_var'_v_list. (\(y, cl, var, var', v). L (x_var var') = None) x) \ (\x\dom \. wf_object P H (L x) (\ x)) \ wf_stmt P \ s \ wf_stmt P ((\ ++ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)))(x' \ ty)) s) \ (L x' = None \ (\x\set y_cl_var_var'_v_list. (\(y, cl, var, var', v). L (x_var var') = None) x) \ (\x\dom \. wf_object P H (L x) (\ x)) \ (\s\set ss. wf_stmt P \ s) \ (\s\set ss. wf_stmt P ((\ ++ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)))(x' \ ty)) s))" apply(rule tr_s_f_tr_ss_f.induct) apply(clarsimp, erule wf_stmtE, simp_all, simp add: wf_blockI) apply(clarify, erule wf_stmtE, simp_all) apply(erule sty_option.cases, force intro: wf_var_assignI sty_optionI) apply(clarify, erule wf_stmtE, simp_all) apply(erule sty_option.cases, force intro: wf_field_readI sty_optionI) apply(clarify, erule wf_stmtE, simp_all) apply(erule sty_option.cases, force intro: wf_field_writeI sty_optionI) apply(clarify, erule wf_stmtE, simp_all) apply(clarsimp) apply(rule wf_ifI) apply(erule disjE) apply(rule disjI1, erule sty_option.cases, force intro: sty_optionI) apply(rule disjI2, erule sty_option.cases, force intro: sty_optionI) apply(assumption+) apply(clarify, erule wf_stmtE, simp_all) apply(erule sty_option.cases, force intro: wf_newI sty_optionI) apply(clarify, erule wf_stmtE, simp_all) apply(rule wf_mcallI, simp, simp, simp) apply(clarsimp split del: if_split) apply(rename_tac y_k ty_k) apply(drule_tac x = "(y_k, ty_k)" in bspec, simp, clarify) apply(erule_tac ?a3.0 = "Some ty_k" in sty_option.cases) apply(force intro: sty_optionI) apply(clarify) apply(erule sty_option.cases) apply(rule sty_optionI, simp+) done lemma map_three_in_four: "var_k \ set (map (\(y, cl, var, u). var) y_cl_var_var'_v_list) \ (\y cl u. (y, cl, var_k, u) \ set y_cl_var_var'_v_list)" by (induct y_cl_var_var'_v_list, auto) lemma map_var: "\(cl_k, var_k, ty_k) \ set cl_var_ty_list; map (\(y, cl, var, u). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list\ \ \y_k cl'_k u_k. (y_k, cl'_k, var_k, u_k) \ set y_cl_var_var'_v_list" apply(subgoal_tac "var_k \ set (map (\(y, cl, var, u). var) y_cl_var_var'_v_list)") by (force simp add: map_three_in_four)+ lemma length_one_in_two: "length y_ty_list = length (map fst y_ty_list)" by (induct y_ty_list, auto) lemma length_two_in_two: "length y_ty_list = length (map snd y_ty_list)" by (induct y_ty_list, auto) lemma length_two_in_three: "length cl_var_ty_list = length (map (\(cl, var, ty). var) cl_var_ty_list)" by (induct cl_var_ty_list, auto) lemma length_three_in_three: "length cl_var_ty_list = length (map (\(cl, var, ty). ty) cl_var_ty_list)" by (induct cl_var_ty_list, auto) lemma length_three_in_four: "length y_cl_var_var'_v_list = length (map (\(y, cl, var, u). var) y_cl_var_var'_v_list)" by (induct y_cl_var_var'_v_list, auto) lemma length_one_in_five: "length y_cl_var_var'_v_list = length (map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list)" by (induct y_cl_var_var'_v_list, auto) lemma length_three_in_five: "length y_cl_var_var'_v_list = length (map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list)" by (induct y_cl_var_var'_v_list, auto) lemma map_length_list: "length (map (\((y, y'), yy, ty). (y', ty)) list) = length list" by (induct list, auto) lemma map_length_y: "map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list = map fst y_ty_list \ length y_cl_var_var'_v_list = length y_ty_list" by (simp only: length_one_in_five length_one_in_two) lemma map_length_y': "map fst y_y'_list = map fst y_ty_list \ length y_y'_list = length y_ty_list" by (simp only: length_one_in_two length_two_in_two) lemma map_length_var: "map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list \ length y_cl_var_var'_v_list = length cl_var_ty_list" by (simp only: length_three_in_five length_two_in_three) lemma map_length_ty: "map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list \ length cl_var_ty_list = length y_ty_list" by (simp only: length_three_in_three length_two_in_two) lemma map_length_var_ty: "\map (\(y, cl, var, u). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list; map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list\ \ length y_cl_var_var'_v_list = length y_ty_list" apply(rule_tac s = "length cl_var_ty_list" in trans) apply (simp only: length_three_in_four length_two_in_three) apply(simp only: length_three_in_three length_two_in_two) done lemma fun_eq_fst: "(fst \ (\((y, cl, var, var', v), y', y). (x_var var', y))) = (\((y, cl, var, var', v), y', y). (x_var var'))" by (simp add: fun_eq_iff) lemma fst_zip_eq: "length y_cl_var_var'_v_list = length y_ty_list \ (map fst (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list))) = (map (\(y, cl, var, var', v). x_var var') y_cl_var_var'_v_list)" apply(simp) apply(simp add: fun_eq_fst) apply(rule nth_equalityI) apply(force) apply(clarsimp) apply(frule nth_mem) apply(subgoal_tac "\x. (y_cl_var_var'_v_list ! i) = x") apply(force) apply(rule_tac x = "y_cl_var_var'_v_list ! i" in exI) apply(simp) done lemma distinct_x_var: "distinct (map (\(y, cl, var, var', v). x_var var) y_cl_var_var'_v_list) = distinct (map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list)" apply (induct y_cl_var_var'_v_list) by (force simp add: contrapos_np)+ lemma distinct_x_var': "distinct (map (\(y, cl, var, var', v). x_var var') y_cl_var_var'_v_list) = distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list)" apply(induct y_cl_var_var'_v_list) by (force simp add: contrapos_np)+ lemma map_fst_two: "map fst (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) = map (\(y, cl, var, var', v). x_var var) y_cl_var_var'_v_list" by (induct y_cl_var_var'_v_list, auto) lemma map_fst_two': "map fst (map (\(y, cl, var, var', y). (x_var var', y)) y_cl_var_var'_v_list) = map (\(y, cl, var, var', y). x_var var') y_cl_var_var'_v_list" by (induct y_cl_var_var'_v_list, auto) lemma map_fst_var': "distinct (map fst (map (\(y, cl, var, var', y). (x_var var', y)) y_cl_var_var'_v_list)) = distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list)" by (simp only: map_fst_two' distinct_x_var') lemma distinct_var: "\distinct (map (\(cl, var, ty). var) cl_var_ty_list); map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list\ \ distinct (map fst (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list))" by (simp only: map_fst_two distinct_x_var) lemma distinct_var': "\map (\(y, cl, var, u). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list; map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list\ \ distinct (map fst (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list))) = distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list)" by (simp only: map_length_var_ty fst_zip_eq distinct_x_var') lemma weaken_list_var: "map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list = map (\(y, cl, var, u). var) y_cl_var_var'_v_list" by (induct y_cl_var_var'_v_list, auto) lemma weaken_list_fd: "map (\(y, cl, var, var', v). vd_def cl var) list = map (\(y, cl, var, u). vd_def cl var) list" by (induct list, auto) lemma weaken_list_y: "map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list = map fst y_cl_var_var'_v_list" by (induct y_cl_var_var'_v_list, auto) lemma wf_cld_from_lu: "\wf_program P; find_cld P ctx fqn (Some (ctx', cld'))\ \ wf_class P cld'" apply(clarsimp) apply(drule find_to_mem) apply(erule wf_programE) by (simp add: member_def) lemma meth_def_in_set[rule_format]: "find_meth_def_in_list meth_defs meth (Some meth_def) \ meth_def \ set meth_defs" apply(clarsimp) apply(induct meth_defs) by (auto split: meth_def.splits meth_sig.splits if_split_asm) lemma find_meth_def_in_list_mem[rule_format, simp]: "find_meth_def_in_list_f meth_defs meth = Some meth_def \ meth_def \ set meth_defs" apply(induct meth_defs) apply(auto split: meth_def.splits meth_sig.splits) done lemma find_meth_def_in_path_ex_cld[rule_format]: "find_meth_def_in_path_f ctxclds meth = Some (ctx, meth_def) \ (\cld meth_defs. (ctx, cld) \ set ctxclds \ class_methods_f cld = meth_defs \ meth_def \ set meth_defs)" apply(induct ctxclds) apply(simp) apply(clarsimp) apply(clarsimp split: option.splits) apply(rule_tac x = cld in exI) apply(force) apply(force) done lemma map_ctx_cld_dcl_two[simp]: "ctxclds = map (\(ctx, cld, dcl). (ctx, cld)) (map (\(ctx, cld). (ctx, cld, classname_f cld)) ctxclds)" by (induct ctxclds, auto) lemma clds_in_path_exist: "find_path_f P ctx' fqn = Some path \ (\(ctx, cld) \ set path. cld \ set P)" apply(clarify) apply(drule all_in_path_found) apply(simp) apply(clarsimp) apply(drule find_to_mem) apply(simp add: member_def) done lemma wf_meth_defs_in_wf_class[rule_format]: "meth_def \ set (class_methods_f cld) \ wf_class P cld \ wf_meth P (ty_def ctx_def (class_name_f cld)) meth_def" apply(clarsimp) apply(erule wf_classE) apply(clarsimp simp: class_methods_f_def) apply(erule wf_class_cE) apply(clarsimp simp: class_name_f_def) apply(drule(1)bspec, simp) done lemma map_ctxclds: "ctxclds = map ((\(ctx_XXX, cld_XXX, dcl_XXX). (ctx_XXX, cld_XXX)) \ (\(ctx, cld). (ctx, cld, class_name_f cld))) ctxclds" by (induct ctxclds, auto) lemma wf_method_from_find'[rule_format]: "wf_program P \ find_path_ty_f P (ty_def ctxa dcl) = Some ctxclds \ find_meth_def_in_path_f ctxclds meth = Some (ctx, meth_def) \ (\dcla. is_sty_one P (ty_def ctxa dcl) (ty_def ctx dcla) = Some True \ wf_meth P (ty_def ctx_def dcla) meth_def)" apply(clarsimp) apply(drule find_meth_def_in_path_ex_cld) apply(clarsimp) apply(rule_tac x = "class_name_f cld" in exI) apply(rule) apply(rule_tac ctx_cld_dcl_list = "map (\(ctx, cld). (ctx, cld, class_name_f cld)) ctxclds" in sty_dclI[simplified]) apply(simp add: map_ctxclds) apply(clarsimp) apply(force) apply(drule clds_in_path_exist) apply(simp) -apply(erule wf_programE) apply(clarsimp) +apply(erule wf_programE) apply(drule_tac x = "(ctx, cld)" in bspec, simp) apply(drule_tac x = cld in bspec, simp) apply(simp add: wf_meth_defs_in_wf_class) done (* fix the ctx problem here\ *) lemma wf_method_from_find: "\wf_program P; find_meth_def P ty_x_d meth (Some (ctx, meth_def))\ \ \dcl. sty_one P ty_x_d (ty_def ctx dcl) \ wf_meth P (ty_def ctx_def dcl) meth_def" apply(erule find_meth_def.cases, clarsimp+) apply(induct ty_x_d) apply(simp) apply(rename_tac ctx' dcl' ctxclds) apply(cut_tac P = P and ctxa = ctx' and dcl = dcl' and ctxclds = ctxclds and meth = meth and ctx = ctx and meth_def = meth_def in wf_method_from_find') apply(simp) apply(simp) done lemma find_type_lift_opts[rule_format]: "(\x\set cl_var_ty_list. (\(cl_k, var_k, ty_k). find_type_f P ctx cl_k = Some ty_k) x) \ lift_opts (map (case_vd (\clk vark. find_type_f P ctx clk)) (map (\(cl_k, var_k, ty_k). vd_def cl_k var_k) cl_var_ty_list)) = Some (map (\(cl, var, ty). ty) cl_var_ty_list)" by (induct cl_var_ty_list, auto) lemma find_md_in_pre_path[rule_format]: "find_meth_def_in_path_f path m = Some ctxmd \ (\path'. find_meth_def_in_path_f (path @ path') m = Some ctxmd)" by (induct path, auto split: option.splits) lemma lift_opts_map: "\x\set cl_var_ty_list. (\(cl_k, var_k, ty_k). find_type_f P ctx cl_k = Some ty_k) x \ lift_opts (map (case_vd (\clk vark. find_type_f P ctx clk)) (map (\(cl_k, var_k, ty_k). vd_def cl_k var_k) cl_var_ty_list)) = Some (map (\(cl, var, ty). ty) cl_var_ty_list)" by (induct cl_var_ty_list, auto) lemma m_in_list[rule_format]: "(\x\set meth_def_meth_list. case_prod (\meth_def. (=) (method_name_f meth_def)) x) \ find_meth_def_in_list_f (map fst meth_def_meth_list) m = Some md \ m \ snd ` set meth_def_meth_list" by (induct meth_def_meth_list, auto simp add: method_name_f_def split: meth_def.splits meth_sig.splits) lemma m_image_eq[rule_format]: "meth_def_def (meth_sig_def cl m list2) meth_body \ set meth_defs \ m \ case_meth_def (\meth_sig meth_body. case meth_sig of meth_sig_def cl meth vds \ meth) ` set meth_defs" by (induct meth_defs, auto) lemma fmdip_to_mip[rule_format]: "find_meth_def_in_path_f path m = Some ctxmd \ m \ set (methods_in_path_f (map snd path))" apply(induct path) apply(simp) apply(clarsimp simp add: class_methods_f_def split: option.splits cld.splits) apply(rename_tac aa bb) apply(case_tac aa) apply(rename_tac meth_sig meth_body) apply(case_tac meth_sig) apply(clarsimp) apply(frule find_md_m_match') apply(clarsimp) apply(frule find_meth_def_in_list_mem) apply(frule m_image_eq) apply(assumption) done lemma lift_opts_for_all[rule_format]: "lift_opts (map (case_vd (\clk vark. find_type_f P ctx clk)) (map (\(cl, var, ty). vd_def cl var) cl_var_ty_list)) = None \ (\x\set cl_var_ty_list. (\(cl, var, ty). find_type_f P ctx_def cl = Some ty) x) \ False" apply(induct cl_var_ty_list) apply(simp) apply(clarsimp) apply(case_tac ctx) apply(simp) done lemma mty_preservation'''': "\find_cld_f P ctx (fqn_def dcl') = Some (ctx, cld_def dcl' (cl_fqn (fqn_def dcl'')) fds' mds'); find_cld_f P ctx (fqn_def dcl'') = Some (ctx, cld_def dcl'' cl'' fds'' mds''); find_path_rec_f P ctx cl'' [(ctx, cld_def dcl'' cl'' fds'' mds'')] = Some path; find_meth_def_in_path_f path m = Some (ctx'', meth_def_def (meth_sig_def cl_r m' vds) mb); find_type_f P ctx'' cl_r = Some ty_r; lift_opts (map (case_vd (\clk vark. find_type_f P ctx'' clk)) vds) = Some tys; lift_opts (map (case_vd (\clk vark. find_type_f P ctx' clk)) vds') = Some tys'; find_type_f P ctx' cl_r' = Some ty_r'; find_meth_def_in_path_f ((ctx, cld_def dcl' (cl_fqn (fqn_def dcl'')) fds' mds') # path) m = Some (ctx', meth_def_def (meth_sig_def cl_r' m'' vds') mb'); wf_program P\ \ tys' = tys \ ty_r' = ty_r" using [[hypsubst_thin = true]] apply(clarsimp simp add: class_methods_f_def split: option.splits) apply(drule wf_cld_from_lu) apply(simp) apply(erule wf_classE) apply(erule wf_class_cE) apply(clarsimp) apply(subgoal_tac "m \ snd ` set meth_def_meth_list") apply(clarsimp) apply(rename_tac md_m m) apply(drule_tac x = "(md_m, m)" in bspec, simp)+ apply(clarsimp split: option.splits) apply(case_tac ctx') apply(clarsimp) apply(case_tac md_m) apply(rename_tac meth_sig meth_body) apply(case_tac meth_sig) apply(rename_tac ms mb_m cl_r_m m vds_m) apply(clarsimp simp add: method_name_f_def) apply(erule_tac Q = "tys' = tys \ ty_r' \ ty_r" in contrapos_pp) apply(clarsimp) apply(clarsimp simp add: methods_f_def find_path_f_def superclass_name_f_def split: option.splits if_split_asm) apply(frule fmdip_to_mip) apply(clarsimp) apply(rename_tac m mty mty') apply(drule_tac x = "(m, mty, mty')" in bspec, simp)+ apply(clarsimp) apply(frule_tac f = snd in imageI) apply(simp) apply(thin_tac "m \ snd ` set meth_def_meth_list") apply(clarsimp) apply(clarsimp simp add: mtype_f_def split: option.splits meth_def.splits meth_sig.splits) apply(clarsimp simp add: find_meth_def_f_def find_path_f_def superclass_name_f_def split: option.splits if_split_asm) apply(frule path_append) apply(rename_tac ad) apply(frule_tac path = ad in path_append) apply(clarsimp) apply(frule_tac prefix = "[(ctx_def, cld_def dcl'' cl'' fds'' mds'')]" and suffix = path'' and prefix' = "[(ctx_def, cld_def dcl' (cl_fqn (fqn_def dcl'')) (map (\(cl, f, ty). fd_def cl f) cl_f_ty_list) (map fst meth_def_meth_list)), (ctx_def, cld_def dcl'' cl'' fds'' mds'')]" in fpr_same_suffix[rule_format]) apply(simp) apply(clarsimp simp add: class_methods_f_def) apply(rule m_in_list) apply(simp) done lemma mty_preservation'''[rule_format]: "\suffix dcl. find_path_rec_f P ctx cl prefix = Some (prefix @ suffix) \ wf_program P \ cl = cl_fqn (fqn_def dcl) \ (\(ctx', cld') \ set suffix. (\prefix' suffix'. find_path_rec_f P ctx' (cl_fqn (fqn_def (class_name_f cld'))) prefix' = Some (prefix' @ suffix') \ mtype_f P (ty_def ctx' (class_name_f cld')) m = Some mty \ mtype_f P (ty_def ctx dcl) m = Some mty))" apply(induct_tac P ctx cl prefix rule: find_path_rec_f.induct) apply(simp) apply(clarsimp split: option.splits) apply(frule path_append) apply(clarify) apply(clarsimp) apply(erule disjE) apply(clarify) apply(clarsimp simp add: class_name_f_def split: cld.splits) apply(frule find_cld_name_eq) apply(frule_tac ctx = a in find_cld_name_eq) apply(clarsimp simp add: class_name_f_def split: cld.splits) apply(case_tac b) apply(rename_tac dcl' cl' fds' mds') apply(clarsimp simp add: superclass_name_f_def) apply(case_tac cl') apply(simp) apply(rename_tac fqn) apply(case_tac fqn) apply(rename_tac dcl'') apply(clarsimp) apply(clarsimp split: option.splits) apply(subgoal_tac "\aa b ab bb. (a = ab \ cld_def dcl' (cl_fqn (fqn_def dcl'')) fds' mds' = bb \ aa = ab \ b = bb) \ (\suffix. find_path_rec_f P ab (case bb of cld_def dcl cl fds mds \ cl) (path @ [(ab, bb)]) = Some (path @ (ab, bb) # suffix) \ (\dcl. (case bb of cld_def dcl cl fds mds \ cl) = cl_fqn (fqn_def dcl) \ (\x\set suffix. (\(ctx', cld'). (\prefix' suffix'. case_option None (case_prod (\ctx' cld. find_path_rec_f P ctx' (superclass_name_f cld) (prefix' @ [(ctx', cld)]))) (find_cld_f P ctx' (fqn_def (class_name_f cld'))) = Some (prefix' @ suffix')) \ mtype_f P (ty_def ctx' (class_name_f cld')) m = Some mty \ mtype_f P (ty_def ab dcl) m = Some mty) x)))") defer apply(force) apply(thin_tac "\aa b x y. \a = aa \ cld_def dcl' (cl_fqn (fqn_def dcl'')) fds' mds' = b; x = aa \ y = b\ \ \suffix. find_path_rec_f P aa (case b of cld_def dcl cl fds mds \ cl) (path @ [(aa, b)]) = Some (path @ (aa, b) # suffix) \ (\dcl. (case b of cld_def dcl cl fds mds \ cl) = cl_fqn (fqn_def dcl) \ (\x\set suffix. case x of (ctx', cld') \ (\prefix' suffix'. case_option None (\(ctx', cld). find_path_rec_f P ctx' (superclass_name_f cld) (prefix' @ [(ctx', cld)])) (find_cld_f P ctx' (fqn_def (class_name_f cld'))) = Some (prefix' @ suffix')) \ mtype_f P (ty_def ctx' (class_name_f cld')) m = Some mty \ mtype_f P (ty_def aa dcl) m = Some mty))") apply(clarsimp) apply(drule_tac x = "(aa, ba)" in bspec, simp) apply(clarsimp) apply(subgoal_tac "(\prefix' suffix'. find_path_rec_f P ab (case bb of cld_def dcl cl fds mds \ cl) (prefix' @ [(ab, bb)]) = Some (prefix' @ suffix'))") apply(erule impE) apply(simp add: superclass_name_f_def) apply(thin_tac "\prefix suffix'. P prefix suffix'" for P) apply(thin_tac "mtype_f P (ty_def aa (class_name_f ba)) m = Some mty") apply(frule find_cld_name_eq) apply(clarsimp split: option.splits if_split_asm) apply(clarsimp simp add: mtype_f_def split: option.splits meth_def.splits meth_sig.splits) apply(rule) apply(clarsimp simp add: find_meth_def_f_def) apply(subgoal_tac "\path'. find_path_f P a (cl_fqn (fqn_def dcl')) = Some path'") apply(case_tac b) apply(clarsimp simp add: find_path_f_def superclass_name_f_def split: if_split_asm option.splits) apply(rename_tac meth_body cl list_char1 list_vd ty list_ty list_char2 cla list_fd list_md list_p1 path') apply(frule_tac path = path' in path_append) apply(rename_tac ag ah) apply(frule_tac path = ag in path_append) apply(clarify) apply(frule_tac suffix = path''a and suffix' = path''b and prefix' = "[(ac, cld_def list_char2 cla list_fd list_md)]" in fpr_same_suffix[rule_format], force) apply(simp) apply(clarify) apply(clarsimp simp add: class_methods_f_def split: option.splits) apply(clarsimp simp add: find_path_f_def superclass_name_f_def split: option.splits if_split_asm) apply(frule_tac path' = "(path @ [(a, cld_def dcl' (cl_fqn (fqn_def dcl'')) fds' mds'), (ac, b)])" in path_append) apply(clarsimp) apply(frule_tac suffix = path''a and prefix' = "[(a, cld_def dcl' (cl_fqn (fqn_def dcl'')) fds' mds'), (ac, b)]" in fpr_same_suffix') back apply(simp) apply(simp) apply(clarsimp) apply(rule) apply(clarsimp) apply(frule_tac ty_x_d = "(ty_def a dcl')" in wf_method_from_find[simplified]) apply(simp) apply(clarsimp) apply(erule wf_methE) apply(rename_tac ag ah ai aj ak al am an ao ap aq ar as at au av) apply(case_tac ag) apply(clarsimp) apply(clarsimp) apply(rule) apply(clarsimp) apply(frule_tac ty_x_d = "(ty_def a dcl')" in wf_method_from_find[simplified]) apply(simp) apply(clarsimp) apply(erule wf_methE) apply(clarsimp) apply(cut_tac lift_opts_for_all) apply(assumption) apply(rule) apply(simp) apply(assumption) apply(clarsimp) apply(thin_tac "find_path_rec_f P ab (case bb of cld_def dcl cl fds mds \ cl) (prefix' @ [(ab, bb)]) = Some (prefix' @ suffix')") apply(thin_tac "find_cld_f P aa (fqn_def (class_name_f ba)) = Some (ab, bb)") apply(thin_tac "(aa, ba) \ set path''") apply(case_tac b) apply(frule_tac dcl = dcl'' in find_cld_name_eq) apply(clarsimp simp add: superclass_name_f_def) apply(rename_tac ctx ctx'' mb cl_r m' vds ty_r tys ctx' cl_r' m'' vds' ty_r' tys' mb' dcl'' cl'' fds'' mds'') apply(clarsimp simp add: find_meth_def_f_def find_path_f_def superclass_name_f_def split: option.splits if_split_asm) apply(thin_tac "find_path_rec_f P ctx cl'' (path @ [(ctx, cld_def dcl' (cl_fqn (fqn_def dcl'')) fds' mds'), (ctx, cld_def dcl'' cl'' fds'' mds'')]) = Some (path @ (ctx, cld_def dcl' (cl_fqn (fqn_def dcl'')) fds' mds') # path'')") apply(clarsimp) apply(rename_tac path'' path') apply(frule_tac path = path' in path_append) apply(frule_tac path = path'' in path_append) apply(clarify) apply(frule_tac suffix = path''a and suffix' = path''b and prefix' = "[(ctx, cld_def dcl'' cl'' fds'' mds'')]" in fpr_same_suffix[rule_format], force) apply(clarsimp simp del: find_meth_def_in_path_f.simps) apply(rename_tac path'') apply(frule_tac ctx' = ctx' and m'' = m'' and cl'' = cl'' and fds'' = fds'' and ctx'' = ctx'' and path = "(ctx, cld_def dcl'' cl'' fds'' mds'') # path''" in mty_preservation'''', simp+) apply(rule_tac x = prefix' in exI) apply(clarsimp simp add: superclass_name_f_def) done lemma mty_preservation'': "\wf_program P; find_path_f P ctx (cl_fqn (fqn_def dcl)) = Some path; (ctx', cld') \ set path\ \ \prefix' suffix'. find_path_rec_f P ctx' (cl_fqn (fqn_def (class_name_f cld'))) prefix' = Some (prefix' @ suffix') \ mtype_f P (ty_def ctx' (class_name_f cld')) m = Some mty \ mtype_f P (ty_def ctx dcl) m = Some mty" apply(cut_tac x = "(ctx', cld')" in mty_preservation'''[of _ _ _ "[]", simplified]) apply(unfold find_path_f_def) apply(assumption) apply(simp+) done lemma mty_preservation': "\wf_program P; find_path_f P ctx (cl_fqn (fqn_def dcl)) = Some path; (ctx', cld') \ set path; find_path_f P ctx' (cl_fqn (fqn_def (class_name_f cld'))) = Some path'; mtype_f P (ty_def ctx' (class_name_f cld')) m = Some mty\ \ mtype_f P (ty_def ctx dcl) m = Some mty" apply(cut_tac path = path and suffix' = path' in mty_preservation''[rule_format, of _ _ _ _ _ _ "[]"]) apply(assumption+) apply(rule) apply(simp add: find_path_f_def) apply(assumption+) done lemma lift_opts_for_all_true[rule_format]: "\y_ty_list. (\x\set cl_var_ty_list. (\(cl_k, var_k, ty_k). find_type_f P ctx cl_k = Some ty_k) x) \ lift_opts (map (case_vd (\clk vark. find_type_f P ctx clk)) (map (\(cl_k, var_k, ty_k). vd_def cl_k var_k) cl_var_ty_list)) = Some (map snd y_ty_list) \ map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list" by (induct cl_var_ty_list, auto split: option.splits) lemma mty_preservation: "\wf_program P; find_meth_def_f P ty_x_d meth = Some (ctx, meth_def_def (meth_sig_def cl_r meth (map (\(cl_k, var_k, ty_k). vd_def cl_k var_k) cl_var_ty_list)) meth_body); find_type_f P ctx cl_r = Some ty_r_d; \x\set cl_var_ty_list. (\(cl_k, var_k, ty_k). find_type_f P ctx cl_k = Some ty_k) x; mtype_f P ty_x_s meth = Some (mty_def (map snd y_ty_list) ty_r_s); is_sty_one P ty_x_d ty_x_s = Some True\ \ ty_r_d = ty_r_s \ map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list" apply(case_tac "ty_x_d = ty_x_s") apply(clarsimp) apply(clarsimp simp add: mtype_f_def find_type_lift_opts[simplified] split: option.splits) apply(simp add: is_sty_one_def split: option.splits) apply(case_tac ty_x_s) apply(clarsimp simp add: mtype_f_def find_meth_def_f_def) apply(clarsimp) apply(rename_tac path_d ctx_s cld_s) apply(case_tac ty_x_d) apply(clarsimp) apply(clarsimp) apply(rename_tac ctx_d dcl_d) apply(clarsimp simp add: find_meth_def_f_def split: option.splits) apply(subgoal_tac "\path_s. find_path_f P ctx_s (cl_fqn (fqn_def (class_name_f cld_s))) = Some path_s") apply(clarify) apply(frule_tac path = path_d and path' = path_s in mty_preservation') apply(simp+) apply(clarsimp simp add: mtype_f_def split: option.splits meth_def.splits meth_sig.splits) apply(clarsimp simp add: find_meth_def_f_def) apply(rule_tac P = P and ctx = ctx in lift_opts_for_all_true) apply(simp) apply(clarsimp simp add: mtype_f_def find_meth_def_f_def split: option.splits) done lemma map_not_mem: "\(y_k, ty_k, var_k, var'_k, v_k) \ set y_ty_var_var'_v_list; x' \ (\(y, ty, var, var', v). x_var var') ` set y_ty_var_var'_v_list\ \ x_var var'_k \ x'" by (force) lemma map_fst: "map fst (map (\(y, ty, var, var', v). (x_var var', ty)) y_ty_var_var'_v_list) = map (\(y, ty, var, var', v). x_var var') y_ty_var_var'_v_list" by (force) lemma supertype_top: "sty_one P ty ty' \ ty = ty_top \ ty' = ty_top" by (induct rule: sty_one.induct, auto) lemma top_not_subtype[rule_format]: "sty_one P ty_top (ty_def ctx dcl) \ False" by (rule, drule supertype_top, simp) lemma f_in_found_fds: "ftype_in_fds_f P ctx fds f = ty_opt_bot_opt (Some ty_f) \ f \ case_fd (\cl f. f) ` set fds" by (induct fds, auto split: fd.splits) lemma ftype_fields[rule_format]: "ftype_in_path_f P path_s f = Some ty_f \ f \ set (fields_in_path_f path_s)" apply(induct path_s) apply(simp) apply(clarsimp split: option.splits) apply(case_tac b) apply(clarsimp) apply(rename_tac ctx path_s dcl cl fds mds) apply(clarsimp simp add: class_fields_f_def split: ty_opt_bot.splits option.splits) apply(simp add: f_in_found_fds) done lemma no_field_hiding''': "\suffix. find_path_rec_f P ctx cl prefix = Some (prefix @ suffix) \ wf_program P \ (\(ctx, cld) \ set suffix. (\prefix' suffix'. find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) prefix' = Some (prefix' @ suffix') \ f \ set (fields_in_path_f suffix') \ f \ set (fields_in_path_f suffix)))" apply(induct_tac P ctx cl prefix rule: find_path_rec_f.induct) apply(simp) apply(clarsimp split: option.splits) apply(subgoal_tac "\aa ba aaa bb. (a = aaa \ b = bb \ aa = aaa \ ba = bb) \ (\suffix. find_path_rec_f P aaa (superclass_name_f bb) (path @ [(aaa, bb)]) = Some (path @ (aaa, bb) # suffix) \ (\x\set suffix. (\(ctx, cld). (\prefix' suffix'. case_option None (case_prod (\ctx' cld. find_path_rec_f P ctx' (superclass_name_f cld) (prefix' @ [(ctx', cld)]))) (find_cld_f P ctx (fqn_def (class_name_f cld))) = Some (prefix' @ suffix') \ f \ set (fields_in_path_f suffix')) \ f \ set (fields_in_path_f suffix)) x))") defer apply(thin_tac _)+ apply(force) apply(thin_tac "\aa ba x y. \a = aa \ b = ba; x = aa \ y = ba\ \ \suffix. find_path_rec_f P aa (superclass_name_f ba) (path @ [(aa, ba)]) = Some (path @ (aa, ba) # suffix) \ (\x\set suffix. case x of (ctx, cld) \ (\prefix' suffix'. case_option None (\(ctx', cld). find_path_rec_f P ctx' (superclass_name_f cld) (prefix' @ [(ctx', cld)])) (find_cld_f P ctx (fqn_def (class_name_f cld))) = Some (prefix' @ suffix') \ f \ set (fields_in_path_f suffix')) \ f \ set (fields_in_path_f suffix))") apply(clarsimp) apply(frule path_append) apply(clarify) apply(erule_tac x = "tl suffix" in allE) apply(clarsimp) apply(erule disjE) apply(clarify) apply(case_tac fqn) apply(clarsimp simp del: fmdip_cons simp add: class_name_f_def split: cld.splits) apply(frule find_cld_name_eq) apply(frule_tac ctx = a in find_cld_name_eq) apply(clarsimp simp del: fmdip_cons simp add: class_name_f_def split: cld.splits) apply (rename_tac list1 cl list2 list3) apply(frule_tac path = "prefix' @ suffix'" in path_append) apply(clarsimp simp del: fmdip_cons) apply(frule_tac prefix = "path @ [(ab, cld_def list1 cl list2 list3)]" and suffix = path'' and prefix' = "prefix' @ [(ab, cld_def list1 cl list2 list3)]" and suffix' = path''a in fpr_same_suffix[rule_format]) apply(clarsimp) apply(simp) apply(drule_tac x = "(aa, ba)" in bspec, simp) apply(force) done lemma no_field_hiding'': "\find_path_f P ctx' cl = Some suffix; wf_program P; (ctx, cld) \ set suffix\ \ \prefix' suffix'. find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) prefix' = Some (prefix' @ suffix') \ f \ set (fields_in_path_f suffix') \ f \ set (fields_in_path_f suffix)" apply(cut_tac x = "(ctx, cld)" in no_field_hiding'''[of _ _ _ "[]", rule_format]) apply(simp add: find_path_f_def)+ done lemma no_field_hiding': "\find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \ set path; find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; f \ set (fields_in_path_f path')\ \ f \ set (fields_in_path_f path)" apply(cut_tac no_field_hiding''[rule_format, of _ _ _ _ _ _ "[]"]) apply(assumption+) apply(unfold find_path_f_def) apply(rule) apply(simp+) done lemma no_field_hiding: "\wf_program P; is_sty_one P ty_x_d ty_x_s = Some True; ftype_f P ty_x_s f = Some ty_f; fields_f P ty_x_d = Some fields_oid\ \ f \ set fields_oid" apply(case_tac ty_x_s) apply(simp add: ftype_f_def) apply(simp add: is_sty_one_def split: option.splits) apply(case_tac ty_x_d) apply(simp) apply(clarsimp) apply(rename_tac ctx_s dcl_s path_d ctx_d dcl_d) apply(simp add: ftype_f_def split: option.splits) apply(clarsimp simp add: fields_f_def) apply(drule ftype_fields) apply(frule no_field_hiding') apply(simp add: ftype_fields class_name_f_def)+ done lemma ftype_pre_path[rule_format]: "ftype_in_path_f P path_s f = Some ty_f \ ftype_in_path_f P (path_s @ path'') f = Some ty_f" by (induct path_s, auto split: option.splits ty_opt_bot.splits) lemma fields_non_intersection[rule_format]: " (\(cl, f, ty). f) ` set cl_f_ty_list \ set (fields_in_path_f path'') = {} \ f \ set (fields_in_path_f path'') \ ftype_in_fds_f P ctx_def (map (\(cl, f, ty). fd_def cl f) cl_f_ty_list) f = ty_opt_bot_opt None" by (induct cl_f_ty_list, auto) lemma f_notin_list[rule_format]: "f \ set (map (\(cl, f, ty). f) cl_f_ty_list) \ ftype_in_fds_f P ctx_def (map (\(cl, f, ty). fd_def cl f) cl_f_ty_list) f = ty_opt_bot_opt None" by (induct cl_f_ty_list, auto) declare find_path_rec_f.simps[simp del] lemma ftype_preservation''''': "\find_path_rec_f P ctx cl (prefix @ [cld']) = Some (prefix @ cld' # path''); find_type_f P ctx_def cl = Some ty; fields_f P ty = Some fs; ftype_in_path_f P path'' f = Some ty_f; (set (map (\(cl, f, ty). f) cl_f_ty_list)) \ (set fs) = {}\ \ ftype_in_fds_f P ctx (map (\(cl, f, ty). fd_def cl f) cl_f_ty_list) f = ty_opt_bot_opt None" apply(clarsimp simp add: fields_f_def split: option.splits) apply(rename_tac path) apply(case_tac cl) apply(simp add: find_path_rec_f.simps) apply(clarsimp split: fqn.splits option.splits) apply(rename_tac dcl ctx'' cld'') apply(case_tac ctx'') apply(case_tac ctx) apply(clarsimp simp add: find_path_f_def) apply(frule_tac prefix = "prefix @ [cld']" and suffix = path'' and prefix' = "[]" in fpr_same_suffix[rule_format]) apply(simp) apply(clarsimp) apply(drule ftype_fields) apply(cut_tac fields_non_intersection) apply(force) apply(force) done declare find_path_rec_f.simps[simp] lemma ftype_preservation'''': "\suffix. find_path_rec_f P ctx cl prefix = Some (prefix @ suffix) \ wf_program P \ (\(ctx, cld) \ set suffix. (\prefix' suffix'. find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) prefix' = Some (prefix' @ suffix') \ ftype_in_path_f P suffix' f = Some ty \ ftype_in_path_f P suffix f = Some ty))" apply(induct_tac P ctx cl prefix rule: find_path_rec_f.induct) apply(simp) apply(clarsimp split: option.splits) apply(subgoal_tac "\aa ba aaa bb. (a = aaa \ b = bb \ aa = aaa \ ba = bb) \ (\suffix. find_path_rec_f P aaa (superclass_name_f bb) (path @ [(aaa, bb)]) = Some (path @ (aaa, bb) # suffix) \ (\x\set suffix. (\(ctx, cld). (\prefix' suffix'. case_option None (case_prod (\ctx' cld. find_path_rec_f P ctx' (superclass_name_f cld) (prefix' @ [(ctx', cld)]))) (find_cld_f P ctx (fqn_def (class_name_f cld))) = Some (prefix' @ suffix') \ ftype_in_path_f P suffix' f = Some ty) \ ftype_in_path_f P suffix f = Some ty) x))") defer apply(thin_tac _)+ apply(force) apply(clarsimp) apply(frule path_append) apply(clarify) apply(erule_tac x = "tl suffix" in allE) apply(clarsimp) apply(erule disjE) apply(clarify) apply(case_tac fqn) apply(clarsimp simp add: class_name_f_def split: cld.splits) apply(frule find_cld_name_eq) apply(frule_tac ctx = a in find_cld_name_eq) apply(clarsimp simp add: class_name_f_def split: cld.splits) apply(rename_tac list1 cl list2 list3) apply(frule_tac path = "prefix' @ suffix'" in path_append) apply(clarsimp) apply(frule_tac prefix = "path @ [(ab, cld_def list1 cl list2 list3)]" and suffix = path'' and prefix' = "prefix' @ [(ab, cld_def list1 cl list2 list3)]" and suffix' = path''a in fpr_same_suffix[rule_format]) apply(clarsimp) apply(clarsimp) apply(drule_tac x = "(aa, ba)" in bspec, simp) apply(clarsimp) apply(subgoal_tac "(\prefix' suffix'. find_path_rec_f P ab (superclass_name_f bb) (prefix' @ [(ab, bb)]) = Some (prefix' @ suffix') \ ftype_in_path_f P suffix' f = Some ty)") apply(clarsimp) apply(subgoal_tac "ftype_in_fds_f P a (class_fields_f b) f = ty_opt_bot_opt None") apply(simp) apply(frule_tac cld' = b in wf_cld_from_lu) apply(simp) apply(erule wf_classE) apply(erule wf_class_cE) apply(clarsimp simp add: superclass_name_f_def class_fields_f_def) apply(rule ftype_preservation''''') apply(simp+) apply(force) done lemma ftype_preservation''': "\find_path_f P ctx' cl = Some suffix; wf_program P; (ctx, cld) \ set suffix\ \ \prefix' suffix'. find_path_rec_f P ctx (cl_fqn (fqn_def (class_name_f cld))) prefix' = Some (prefix' @ suffix') \ ftype_in_path_f P suffix' f = Some ty \ ftype_in_path_f P suffix f = Some ty" apply(cut_tac x = "(ctx, cld)" in ftype_preservation''''[of _ _ _ "[]", rule_format]) apply(simp add: find_path_f_def)+ done lemma ftype_preservation'': "\find_path_f P ctx' cl = Some path; wf_program P; (ctx, cld) \ set path; find_path_f P ctx (cl_fqn (fqn_def (class_name_f cld))) = Some path'; ftype_in_path_f P path' f = Some ty\ \ ftype_in_path_f P path f = Some ty" apply(cut_tac ftype_preservation'''[rule_format, of _ _ _ _ _ _ "[]"]) apply(assumption+) apply(unfold find_path_f_def) apply(rule) apply(simp+) done lemma ftype_preservation'[simplified]: "\wf_program P; sty_one P ty_x_d ty_x_s; ftype P ty_x_s f ty_f\ \ ftype P ty_x_d f ty_f" apply(case_tac ty_x_s) apply(simp add: ftype_f_def) apply(simp add: is_sty_one_def split: option.splits) apply(case_tac ty_x_d) apply(simp) apply(clarsimp) apply(rename_tac ctx_s dcl_s path_d ctx_d dcl_d) apply(simp add: ftype_f_def split: option.splits) apply(rename_tac path_s cl_s fds_s mds_s) apply(rule ftype_preservation'') apply(simp add: class_name_f_def)+ done lemma ftype_preservation[simplified]: "\wf_program P; \x\dom \. wf_object P H (L x) (\ x); \ x = Some ty_x_s; ftype P ty_x_s f ty_f_s; L x = Some (v_oid oid_x); H oid_x = Some (ty_x_d, fs_oid); ftype P ty_x_d f ty_f_d\ \ ty_f_s = ty_f_d" apply(drule_tac x = x in bspec, simp add: domI) apply(erule wf_objectE) apply(simp) apply(clarsimp) apply(erule sty_option.cases) apply(clarsimp) apply(simp add: ftype_preservation') done lemma map_f: "\cl_f_ty_list. map (\(cl, f). fd_def cl f) cl_f_list = map (\(cl, f, ty). fd_def cl f) cl_f_ty_list \ map (\(cl, f). f) cl_f_list = map (\(cl, f, ty). f) cl_f_ty_list" by (induct cl_f_list, auto) lemma fields_to_owner[rule_format]: "f \ set (fields_in_path_f path) \ (\cld \ set (map snd path). \cl_f. fd_def cl_f f \ set (class_fields_f cld))" apply(induct path) apply(simp) apply(clarsimp) apply(rename_tac cld path fd) apply(clarsimp simp add: class_fields_f_def split: option.splits cld.splits fd.splits) apply(force) done lemma ftype_in_fds_none[rule_format]: "f \ (\(cl, f, ty). f) ` set cl_f_ty_list \ ftype_in_fds_f P ctx (map (\(cl, f, ty). fd_def cl f) cl_f_ty_list) f = ty_opt_bot_opt None" by (induct cl_f_ty_list, auto) lemma ftype_in_fds_some[rule_format]: "(cl_f, f, ty_f) \ set cl_f_ty_list \ distinct (map (\(cl, f, ty). f) cl_f_ty_list) \ find_type_f P ctx cl_f = Some ty_f \ ftype_in_fds_f P ctx (map (\(cl, f, ty). fd_def cl f) cl_f_ty_list) f = ty_opt_bot_opt (Some ty_f)" apply(induct cl_f_ty_list) apply(simp) apply(clarsimp) apply(safe) apply(force) apply(simp) apply(frule ftype_in_fds_none) apply(force) done lemma no_ty_bot[THEN mp]: "(\x\set cl_f_ty_list. (\(cl, f, ty). find_type_f P ctx_def cl = Some ty) x) \ ftype_in_fds_f P ctx' (map (\(cl, f, ty). fd_def cl f) cl_f_ty_list) f \ ty_opt_bot_bot" apply(induct cl_f_ty_list) apply(simp) apply(clarsimp split: if_split_asm option.splits) apply(case_tac ctx', simp) (* should get rid of ctx dependency here *) done lemma field_has_type'[rule_format]: "(cl_f, f, ty_f) \ set cl_f_ty_list \ (\(ctx, cld) \ set path. wf_class P cld) \ (ctx, cld_def dcl cl (map (\(cl, f, ty). fd_def cl f) cl_f_ty_list) mds) \ set path \ distinct (map (\(cl, f, ty). f) cl_f_ty_list) \ find_type_f P ctx cl_f = Some ty_f \ (\ty'. ftype_in_path_f P path f = Some ty')" apply(induct path) apply(simp) apply(simp) apply(rule) apply(elim conjE) apply(erule disjE) apply(clarsimp) apply(subgoal_tac "ftype_in_fds_f P ctx (class_fields_f (cld_def dcl cl (map (\(cl, f, ty). fd_def cl f) cl_f_ty_list) mds)) f = ty_opt_bot_opt (Some ty_f)") apply(simp) apply(simp add: class_fields_f_def) apply(rule ftype_in_fds_some) apply(force) apply(case_tac a) apply(rename_tac ctx' cld') apply(simp) apply(case_tac "ftype_in_fds_f P ctx' (class_fields_f cld') f") apply(rename_tac ty_opt) apply(case_tac ty_opt) apply(clarsimp) apply(clarsimp) apply(clarsimp) apply(erule wf_classE) apply(erule wf_class_cE) apply(clarsimp simp add: class_fields_f_def) apply(frule no_ty_bot) apply(force) done lemma mem_class_wf: "\wf_program P; cld \ set P\ \ wf_class P cld" apply(erule wf_programE) apply(drule_tac x = cld in bspec, simp) apply(clarsimp) done lemma field_has_type: "\wf_program P; fields P ty_oid (Some f_list); f \ set f_list\ \ \ty. ftype P ty_oid f ty" apply(simp) apply(clarsimp simp add: fields_f_def split: option.splits) apply(rename_tac path) apply(case_tac ty_oid) apply(simp) apply(clarsimp) apply(rename_tac dcl) apply(frule fields_to_owner) apply(clarsimp) apply(rename_tac ctx' cld cl_f) apply(frule clds_in_path_exist) apply(drule_tac x = "(ctx', cld)" in bspec, simp) apply(clarsimp) apply(frule mem_class_wf, simp) apply(erule wf_classE) apply(erule wf_class_cE) apply(clarsimp simp add: class_fields_f_def) apply(rename_tac cl_f ty_f) apply(drule_tac x = "(cl_f, f, ty_f)" in bspec, simp) apply(clarsimp) apply(simp add: ftype_f_def) apply(rule_tac cl_f = cl_f and ty_f = ty_f and cl_f_ty_list = cl_f_ty_list and ctx = ctx' and dcl = dclb and cl = cla and mds = "(map fst meth_def_meth_list)" in field_has_type') apply(simp) apply(case_tac ctx') apply(simp) apply(clarify) apply(drule_tac cl = "cl_fqn (fqn_def dcl)" and ctxcld = "(a, b)" in all_in_path_found) apply(simp) apply(clarsimp) apply(rule wf_cld_from_lu) apply(simp) apply(force) done lemma exists_three_in_five: "var_k \ set (map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list) \ (\y cl var' v. (y, cl, var_k, var', v) \ set y_cl_var_var'_v_list)" by (induct y_cl_var_var'_v_list, auto) lemma map_xx[rule_format]: "(y, cl, var_k, var', v) \ set y_cl_var_var'_v_list \ (x_var var_k, x_var var') \ set (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list)" by (induct y_cl_var_var'_v_list, auto) lemma map_of_vd[rule_format]: "\cl_var_ty_list. map (\(y, cl, var, var', v). vd_def cl var) y_cl_var_var'_v_list = map (\(cl, var, ty). vd_def cl var) cl_var_ty_list \ map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list" by (induct y_cl_var_var'_v_list, auto) lemma mem_vd: "vd_def cl_k var_k \ set (map (\(y, cl, var, var', v). vd_def cl var) y_cl_var_var'_v_list) \ var_k \ set (map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list)" by (induct y_cl_var_var'_v_list, auto) lemma exists_var': "\distinct (map (\(cl, var, ty). var) cl_var_ty_list); map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list; map_of (map (\(cl, var, ty). (x_var var, ty)) cl_var_ty_list) var_k = Some ty_k\ \ \var'_k. map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) var_k = Some (x_var var'_k)" apply(drule map_of_SomeD) apply(clarsimp, hypsubst_thin) apply(rename_tac cl_k var_k) apply(subgoal_tac "var_k \ set (map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list)") apply(simp (no_asm_use)) apply(clarify) apply(rename_tac y_k cl_k' var_k var'_k v_k) apply(rule_tac x = var'_k in exI) apply(rule map_of_is_SomeI) apply(drule distinct_var, assumption+) apply(rule map_xx, assumption) by force lemma map_y: "\((y_k, cl_k, var_k, var'_k, v_k), y'_k, ty_k) \ set (zip y_cl_var_var'_v_list y_ty_list); map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list = map fst y_ty_list\ \ (y_k, ty_k) \ set y_ty_list" apply(clarsimp simp add: set_zip) apply(drule_tac f = "\(y, cl, var, var', v). y" in arg_cong) apply(drule_tac f1 = "\(y, cl, var, var', v). y" in nth_map[THEN sym]) apply(frule_tac f = "\(y, ty). y" in arg_cong) apply(case_tac "y_ty_list ! i") apply(force simp add: in_set_conv_nth) done lemma set_zip_var'_v: "\((y_k, cl_k, var_k, var'_k, v_k), y'_k, ty_k) \ set (zip y_cl_var_var'_v_list y_ty_list)\ \ (x_var var'_k, v_k) \ set (map (\(y, cl, var, var', y). (x_var var', y)) y_cl_var_var'_v_list)" apply(subgoal_tac "(y_k, cl_k, var_k, var'_k, v_k) \ set y_cl_var_var'_v_list") by (force simp add: set_zip)+ lemma map_of_map_zip_none: "\map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list = map fst y_ty_list; map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)) x_G = None\ \ map_of (map (\(y, cl, var, var', y). (x_var var', y)) y_cl_var_var'_v_list) x_G = None" apply(drule map_length_y) apply(clarsimp simp add: map_of_eq_None_iff) apply(erule contrapos_np) apply(simp only: set_map[THEN sym] fst_zip_eq) by force lemma map_of_map_zip_some: "\distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list); map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list = map fst y_ty_list; map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)) x_G = Some ty_k\ \ \v_k. map_of (map (\(y, cl, var, var', y). (x_var var', y)) y_cl_var_var'_v_list) x_G = Some v_k" apply(drule map_of_SomeD) apply(clarsimp) apply(rename_tac y cl var var' v y') apply(subgoal_tac "(y, cl, var, var', v) \ set y_cl_var_var'_v_list") apply(rule_tac x = v in exI) apply(rule map_of_is_SomeI) apply(simp only: map_fst_two' distinct_x_var') apply(force) by (force simp add: set_zip) lemma x'_not_in_list: "\(y_k, cl_k, var_k, var'_k, v_k) \ set y_cl_var_var'_v_list; x' \ (\(y, cl, var, var', v). x_var var') ` set y_cl_var_var'_v_list\ \ x_var var'_k \ x'" by (erule contrapos_nn, force) lemma nth_in_set: "\i < length y_ty_list; map fst y_ty_list ! i = y_k\ \ y_k \ set (map fst y_ty_list)" apply(subgoal_tac "\i set (map fst y_ty_list) \ (\ty. (y_k, ty) \ set y_ty_list)" by (induct y_ty_list, auto) lemma exists_i: (* can probably simplify a lot *) "\distinct (map (\(cl, var, ty). var) cl_var_ty_list); (y_k, cl_ka, var_k, var'_k, v_k) \ set y_cl_var_var'_v_list; (cl_k, var_k, ty_k) \ set cl_var_ty_list; map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list\ \ \i < length cl_var_ty_list. cl_var_ty_list ! i = (cl_k, var_k, ty_k) \ y_cl_var_var'_v_list ! i = (y_k, cl_ka, var_k, var'_k, v_k)" apply(simp only: in_set_conv_nth) apply(erule_tac P = "\i. i < length cl_var_ty_list \ cl_var_ty_list ! i = (cl_k, var_k, ty_k)" in exE) apply(erule conjE) apply(rule_tac x = x in exI) apply(simp) apply(drule_tac f = "\(cl, var, ty). var" in arg_cong) apply(frule_tac f1 = "\(cl, var, ty). var" in nth_map[THEN sym]) apply(drule_tac t = "map (\(cl, var, ty). var) cl_var_ty_list ! x" in sym) apply(drule_tac s = "(\(cl, var, ty). var) (cl_var_ty_list ! x)" in trans) apply(simp) apply(thin_tac "(\(cl, var, ty). var) (cl_var_ty_list ! x) = (\(cl, var, ty). var) (cl_k, var_k, ty_k)") apply(drule sym) apply(simp) apply(erule exE) apply(rename_tac j) apply(erule conjE) apply(simp only: distinct_conv_nth) apply(erule_tac x = x in allE) apply(drule_tac s = "map (\(cl, var, ty). var) cl_var_ty_list" in sym) apply(drule map_length_var) apply(simp) apply(erule_tac x = j in allE) apply(simp) done lemma y_ty_match: "\i < length cl_var_ty_list; cl_var_ty_list ! i = (cl_k, var_k, ty_k); y_cl_var_var'_v_list ! i = (y_k, cl_ka, var_k, var'_k, v_k); map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list = map fst y_ty_list; map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list\ \ y_ty_list ! i = (y_k, ty_k)" apply(drule_tac f = "\(cl, var, ty). ty" in arg_cong) apply(frule_tac f1 = "\(cl, var, ty). ty" in nth_map[THEN sym]) apply(drule_tac t = "map (\(cl, var, ty). ty) cl_var_ty_list ! i" in sym) apply(frule map_length_y) apply(frule map_length_ty) apply(drule_tac t = "length y_ty_list" in sym) apply(simp) apply(drule_tac f = "\(y, cl, var, var', v). y" in arg_cong) apply(frule_tac f1 = "\(y, cl, var, var', v). y" in nth_map[THEN sym]) apply(drule_tac t = "map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list ! i" in sym) apply(case_tac "y_ty_list ! i") apply(simp) done lemma map_of_ty_k: "\distinct (map (\(cl, var, ty). var) cl_var_ty_list); distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list); (y_k, cl_ka, var_k, var'_k, v_k) \ set y_cl_var_var'_v_list; (cl_k, var_k, ty_k) \ set cl_var_ty_list; map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list = map fst y_ty_list; map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list; map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list\ \ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)) (x_var var'_k) = Some ty_k" apply(frule map_length_y) apply(rule map_of_is_SomeI) apply(simp only: fst_zip_eq) apply(simp only: distinct_x_var') apply(clarsimp) apply(frule map_length_var) apply(frule exists_i, simp+) apply(erule exE) apply(drule_tac t = "length y_ty_list" in sym) apply(clarsimp) apply(frule y_ty_match, assumption+) apply(subgoal_tac "zip y_cl_var_var'_v_list y_ty_list ! i = (y_cl_var_var'_v_list ! i, y_ty_list ! i)") apply(drule_tac f = "\((y, cl, var, var', v), y', y). (x_var var', y)" in arg_cong) apply(subgoal_tac "i < length (zip y_cl_var_var'_v_list y_ty_list)") apply(frule_tac f1 = "\((y, cl, var, var', v), y', y). (x_var var', y)" in nth_map[THEN sym]) apply(drule_tac t = "map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list) ! i" in sym) apply(drule_tac s = "(\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list ! i)" in trans) apply(simp) apply(subgoal_tac "\j((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)). map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list) ! j = (x_var var'_k, ty_k)") apply(simp only: in_set_conv_nth[THEN sym]) apply(simp) apply(rule_tac x = i in exI) apply(simp) apply(force) apply(rule nth_zip) apply(simp) apply(simp) done lemma map_of_ty_k': "\distinct (map (\(cl, var, ty). var) cl_var_ty_list); distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list); x' \ (\(y, cl, var, var', v). x_var var') ` set y_cl_var_var'_v_list; map fst y_cl_var_var'_v_list = map fst y_ty_list; map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list; map (\(y, cl, var, u). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list; map_of (map (\(cl, var, y). (x_var var, y)) cl_var_ty_list) (x_var var) = Some ty_var\ \ (if x_var (case_option var (case_x (\var'. var') var) (map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) (x_var var))) = x' then Some ty_x_m else (\ ++ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list))) (x_var (case_option var (case_x (\var'. var') var) (map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) (x_var var))))) = Some ty_var" apply(frule exists_var') apply(simp add: weaken_list_var) apply(simp) apply(clarsimp split del: if_split) apply(clarsimp) apply(drule map_of_SomeD)+ apply(clarsimp split del: if_split) apply(frule x'_not_in_list, simp+) apply(frule map_of_ty_k) apply(simp add: weaken_list_y weaken_list_var)+ done lemma var_not_var'_x': "\\ (x_var var) = Some ty_var; L (x_var var) = Some v_var; \x\set y_cl_var_var'_v_list. L (x_var ((\(y, cl, var, var', v). var') x)) = None; L x' = None\ \ (\ ++ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)) ++ Map.empty(x' \ ty_x_d)) (x_var var) = Some ty_var" apply(subgoal_tac "x_var var \ x'") apply(simp add: map_add_def) apply(subgoal_tac "map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)) (x_var var) = None") apply(simp) apply(clarsimp simp add: map_of_eq_None_iff set_zip) apply(rename_tac y_k cl_k var_k v_k y'_k ty_k i) apply(drule_tac x = "(y_k, cl_k, var_k, var, v_k)" in bspec, simp) apply(force) by force lemma same_el: "\distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list); i < length y_cl_var_var'_v_list; (y_i, cl_i, var_i, var'_i, v_i) = y_cl_var_var'_v_list ! i; (y_j, cl_j, var_j, var'_i, v_j) \ set y_cl_var_var'_v_list\ \ (y_i, cl_i, var_i, var'_i, v_i) = (y_j, cl_j, var_j, var'_i, v_j)" apply(simp only: in_set_conv_nth) apply(clarsimp) apply(rename_tac j) apply(simp only: distinct_conv_nth) apply(erule_tac x = i in allE) apply(simp) apply(erule_tac x = j in allE) apply(simp) apply(drule sym) apply(simp) done lemma same_y: "\map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list = map fst y_ty_list; i < length y_cl_var_var'_v_list; (y'_k, ty_k) = y_ty_list ! i; y_cl_var_var'_v_list ! i = (y_k, cl_k, var_k, var'_k, v_k)\ \ y'_k = y_k" apply(drule_tac f = "\(y, cl, var, var', v). y" in arg_cong) apply(frule_tac f1 = "\(y, cl, var, var', v). y" in nth_map[THEN sym]) apply(drule_tac f = fst in arg_cong) apply(simp) apply(frule map_length_y) apply(simp) done lemma map_map_zip_y': "map fst y_y'_list = map fst y_ty_list \ map snd y_y'_list = map fst (map (\((y, y'), yy, ty). (y', ty)) (zip y_y'_list y_ty_list))" apply(rule nth_equalityI) apply(induct y_y'_list) apply(simp) apply(clarsimp) apply(frule map_length_y') apply(simp) apply(clarsimp) apply(case_tac "y_y'_list ! i") apply(case_tac "y_ty_list ! i") apply(clarsimp) apply(subgoal_tac "i < length (map (\((y, y'), yy, ty). (y', ty)) (zip y_y'_list y_ty_list))") apply(frule_tac f = fst and xs = "map (\((y, y'), yy, ty). (y', ty)) (zip y_y'_list y_ty_list)" in nth_map) apply(simp) apply(subgoal_tac "length (map (\((y, y'), yy, ty). (y', ty)) list) = length list" for list) apply(simp) apply(frule map_length_y') apply(simp) apply(simp only: map_length_list) done lemma map_map_zip_ty: "map fst y_y'_list = map fst y_ty_list \ map snd (map (\((y, y'), yy, ty). (y', ty)) (zip y_y'_list y_ty_list)) = map snd y_ty_list" apply(frule map_length_y') apply(rule nth_equalityI) apply(induct y_ty_list) apply(simp) apply(simp) apply(clarsimp) apply(case_tac "y_y'_list ! i") apply(case_tac "y_ty_list ! i") apply(simp) done lemma map_y_yy: "\i < length y_y'_list; i < length y_ty_list; map fst y_y'_list = map fst y_ty_list; y_y'_list ! i = (y, y'); y_ty_list ! i = (yy, ty)\ \ y = yy" apply(drule_tac f = fst in arg_cong) apply(frule_tac f1 = fst in nth_map[THEN sym]) by simp lemma var_k_of_ty_k: "\distinct (map (\(cl, var, ty). var) cl_var_ty_list); distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list); x' \ (\(y, cl, var, var', v). x_var var') ` set y_cl_var_var'_v_list; map fst y_cl_var_var'_v_list = map fst y_ty_list; map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list; map (\(y, cl, var, u). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list; (if x = x_this then Some ty_x_m else map_of (map (\(cl, var, y). (x_var var, y)) cl_var_ty_list) x) = Some ty_x\ \ (if (case if x = x_this then Some x' else map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) x of None \ x | Some x' \ x') = x' then Some ty_x_m else (\ ++ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list))) (case if x = x_this then Some x' else map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) x of None \ x | Some x' \ x')) = Some ty_x" apply(clarsimp) apply(rule) apply(rule) apply(simp) apply(clarsimp) apply(frule exists_var') apply(simp add: weaken_list_var) apply(simp) apply(clarsimp split del: if_split) apply(drule map_of_SomeD)+ apply(clarsimp split del: if_split) apply(frule x'_not_in_list, simp+) apply(frule map_of_ty_k) apply(simp add: weaken_list_y weaken_list_var)+ done lemma x_var_not_this: "(if x_var var = x_this then Some x' else Q) = Q" by simp lemma subtype_through_tr: "\distinct (map (\(cl, var, ty). var) cl_var_ty_list); distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list); x' \ (\(y, cl, var, var', v). x_var var') ` set y_cl_var_var'_v_list; map fst y_cl_var_var'_v_list = map fst y_ty_list; map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list; map (\(y, cl, var, u). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list; is_sty_one P ty_x ty_var = Some True; (if x = x_this then Some ty_x_m else map_of (map (\(cl, var, y). (x_var var, y)) cl_var_ty_list) x) = Some ty_x; map_of (map (\(cl, var, y). (x_var var, y)) cl_var_ty_list) (x_var var) = Some ty_var\ \ sty_option P (if (case if x = x_this then Some x' else map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) x of None \ x | Some x' \ x') = x' then Some ty_x_m else (\ ++ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list))) (case if x = x_this then Some x' else map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) x of None \ x | Some x' \ x')) (if x_var (case_option var (case_x (\var'. var') var) (map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) (x_var var))) = x' then Some ty_x_m else (\ ++ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list))) (x_var (case_option var (case_x (\var'. var') var) (if x_var var = x_this then Some x' else map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) (x_var var)))))" apply(rule_tac ty = ty_x and ty' = ty_var in sty_optionI) apply(simp add: var_k_of_ty_k) apply(simp only: x_var_not_this) apply(rule map_of_ty_k') apply(simp+) done lemma subtypes_through_tr: "\distinct (map (\(cl, var, ty). var) cl_var_ty_list); distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list); x' \ (\(y, cl, var, var', v). x_var var') ` set y_cl_var_var'_v_list; map fst y_cl_var_var'_v_list = map fst y_ty_list; map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list; map (\(y, cl, var, u). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list; \x\set y_y'_list. case_prod (\y. (=) (case if y = x_this then Some x' else map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list) y of None \ y | Some x' \ x')) x; \x\set y_ty_lista. (\(y, ty). sty_option P (if y = x_this then Some ty_x_m else map_of (map (\(cl, var, y). (x_var var, y)) cl_var_ty_list) y) (Some ty)) x; map fst y_y'_list = map fst y_ty_lista; (y, y') = y_y'_list ! i; (yy, ty) = y_ty_lista ! i; i < length y_y'_list; i < length y_ty_lista\ \ sty_option P (if y' = x' then Some ty_x_m else (\ ++ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list))) y') (Some ty)" apply(drule_tac x = "(y, y')" in bspec, simp) apply(drule_tac x = "(yy, ty)" in bspec, simp) apply(clarsimp split del: if_split) apply(frule_tac y_ty_list = y_ty_lista in map_y_yy, assumption+) apply(erule sty_option.cases) apply(clarsimp split del: if_split) apply(rule_tac ty = tya in sty_optionI, (simp add: var_k_of_ty_k)+) done lemma map_override_get: "(\ ++ (\u. if u = x' then Some ty_x_d else None)) x' = Some ty_x_d" apply(simp add: map_add_def) done lemma s_induct': "\\ss. \s\set ss. P s \ P (s_block ss); \list x. P (s_ass list x); \list1 x list2. P (s_read list1 x list2); \x1 list x2. P (s_write x1 list x2); \x1 x2 s1 s2. \P s1; P s2\ \ P (s_if x1 x2 s1 s2); \list1 x list2 list3. P (s_call list1 x list2 list3); \list ctx cl. P (s_new list ctx cl)\ \ P s" by (metis s.induct) lemma wf_tr_stmt_in_G': "\s'. distinct (map (\(cl, var, ty). var) cl_var_ty_list) \ distinct (map (\(y, cl, var, var', v). var') y_cl_var_var'_v_list) \ x' \ (\(y, cl, var, var', v). x_var var') ` set y_cl_var_var'_v_list \ map (\(y, cl, var, var', v). y) y_cl_var_var'_v_list = map fst y_ty_list \ map (\(cl, var, ty). ty) cl_var_ty_list = map snd y_ty_list \ map (\(y, cl, var, var', v). var) y_cl_var_var'_v_list = map (\(cl, var, ty). var) cl_var_ty_list \ is_sty_one P ty_x_d ty_x_m = Some True \ wf_stmt P (map_of (map (\(cl, var, ty). (x_var var, ty)) cl_var_ty_list)(x_this \ ty_x_m)) s' \ tr_s (map_of (map (\(y, cl, var, var', v). (x_var var, x_var var')) y_cl_var_var'_v_list)(x_this \ x')) s' s'' \ wf_stmt P ((\ ++ map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)))(x' \ ty_x_m)) s''" apply(rule s_induct') apply(clarsimp) apply(erule tr_s.cases, simp_all) apply(clarsimp) apply(erule wf_stmtE, simp_all) apply(clarsimp) apply(rule wf_blockI) apply(clarsimp) apply(rename_tac s'_s''_list s' s'') apply(drule_tac x = "(s', s'')" in bspec, simp)+ apply(clarsimp) apply(clarsimp) apply(rename_tac var x s') apply(erule tr_s.cases, simp+) apply(erule wf_stmtE, simp_all) apply(erule sty_option.cases) apply(rule wf_var_assignI) apply(clarsimp split del: if_split) apply(rule_tac ty_x = ty in subtype_through_tr, simp+) apply(clarsimp) apply(rename_tac var x f s') apply(erule tr_s.cases, simp+) apply(erule wf_stmtE, simp_all) apply(erule sty_option.cases) apply(clarsimp split del: if_split) apply(rename_tac f ty_x var x ty_f ty_var) apply(rule wf_field_readI) apply(simp add: var_k_of_ty_k) apply(simp) apply(rule_tac ty' = ty_var in sty_optionI) apply(simp) apply(rule map_of_ty_k', simp+) apply(clarsimp) apply(rename_tac x f y s') apply(erule tr_s.cases, simp+) apply(erule wf_stmtE, simp_all) apply(erule sty_option.cases) apply(clarsimp split del: if_split) apply(rename_tac f ty_x x y ty_y ty_f) apply(rule wf_field_writeI) apply(simp add: var_k_of_ty_k) apply(simp) apply(rule sty_optionI, (simp add: var_k_of_ty_k)+) apply(clarsimp) apply(rename_tac x y s1 s2 s') apply(erule tr_s.cases, simp+) apply(erule wf_stmtE, simp_all) apply(rule wf_ifI) apply(erule disjE) apply(rule disjI1) apply(erule sty_option.cases) apply(clarsimp split del: if_split) apply(rule sty_optionI, (simp add: var_k_of_ty_k split del: if_split)+) apply(rule disjI2) apply(erule sty_option.cases) apply(clarsimp split del: if_split) apply(rule sty_optionI, (simp add: var_k_of_ty_k)+) apply(clarsimp) apply(clarsimp) apply(clarsimp) apply(rename_tac var x meth ys s') apply(erule tr_s.cases, simp+) apply(erule wf_stmtE, simp_all) apply(clarsimp split del: if_split) apply(rule_tac y_ty_list = "map (\((y, y'), (yy, ty)). (y', ty)) (zip y_y'_list y_ty_lista)" in wf_mcallI[simplified]) apply(force simp add: map_map_zip_y'[simplified]) apply(simp add: var_k_of_ty_k) apply(force simp add: map_map_zip_ty[simplified]) apply(clarsimp simp add: set_zip split del: if_split) apply(simp add: subtypes_through_tr) apply(erule sty_option.cases) apply(clarsimp split del: if_split) apply(rule sty_optionI, (simp add: map_of_ty_k')+) apply(clarsimp) apply(rename_tac var ctx cl s') apply(erule tr_s.cases, simp+) apply(erule wf_stmtE, simp_all) apply(clarsimp) apply(rule wf_newI) apply(simp) apply(erule sty_option.cases) apply(clarsimp split del: if_split) apply(rule sty_optionI, (simp add: map_of_ty_k')+) done lemma wf_object_heap: "\wf_object P H v_opt (Some ty'); is_sty_one Pa ty_y_s ty_f = Some True; H oid_x = Some (ty_x_d, fs_oid)\ \ wf_object P (H(oid_x \ (ty_x_d, fs_oid(f \ v)))) v_opt (Some ty')" apply(erule wf_objectE) apply(clarsimp) apply(rule wf_nullI, simp) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(rule wf_objectI) apply(force intro: sty_optionI) done lemma not_dom_is_none: "((\(y, cl, var, var', v). x_var var') ` set y_cl_var_var'_v_list \ Map.dom L = {}) \ (\x\set y_cl_var_var'_v_list. (\(y, cl, var, var', v). L (x_var var') = None) x)" by force lemma type_to_val: "\wf_varstate P \ H L; sty_option P (\ x) (\ y)\ \ \v w. L x = Some v \ L y = Some w" apply(erule sty_option.cases) apply(clarsimp) apply(erule wf_varstateE) apply(frule_tac x = x in bspec, simp add: domI) apply(drule_tac x = y in bspec, simp add: domI) apply(elim wf_objectE) by (simp+) lemma find_path_fields[THEN mp]: "find_path_ty_f P ty = Some path \ (\fs. fields_in_path_f path = fs)" by (force) lemma replicate_eq_length: "replicate x c = replicate y c \ x = y" apply(subgoal_tac "length (replicate x c) = length (replicate y c)") apply(thin_tac _) apply(subgoal_tac "length (replicate x c) = x") apply(subgoal_tac "length (replicate y c) = y") by simp_all lemma string_infinite: "infinite (UNIV :: string set)" apply(simp add: infinite_iff_countable_subset) apply(rule_tac x = "\n. replicate n c" in exI) apply(rule linorder_injI) apply(clarify) apply(frule replicate_eq_length) apply(simp) done lemma x_infinite: "infinite (UNIV :: x set)" apply(simp add: infinite_iff_countable_subset) apply(rule_tac x = "\n. if n = 0 then x_this else x_var (replicate n c)" in exI) apply(rule linorder_injI) apply(clarsimp) done lemma fresh_oid: "wf_heap P H \ (\oid. oid \ dom H)" -apply(erule wf_heapE) apply(clarsimp) +apply(erule wf_heapE) apply(cut_tac infinite_UNIV_nat) apply(frule_tac A = "dom H" in ex_new_if_finite) apply(assumption) apply(simp) done lemma fresh_x: "finite A \ \x::x. x \ A" apply(cut_tac x_infinite) apply(frule_tac A = A in ex_new_if_finite) apply(assumption) apply(simp) done lemma fresh_var_not_in_list: "finite (A::x set) \ \var::var. x_var var \ A \ var \ set list" apply(cut_tac x_infinite) apply(subgoal_tac "finite (insert x_this A)") apply(frule_tac F = "insert x_this A" and G = "x_var ` set list" in finite_UnI) apply(simp) apply(frule_tac A = "insert x_this A \ x_var ` set list" in ex_new_if_finite) apply(assumption) apply(erule exE) apply(clarsimp) apply(case_tac a) apply(force+) done lemma fresh_vars[rule_format]: "finite (A::x set) \ (\list::var list. length list = i \ x_var` set list \ A = {} \ distinct list)" apply(induct i) apply(simp) apply(clarsimp) apply(frule fresh_var_not_in_list) apply(erule exE) apply(rule_tac x = "var#list" in exI) apply(simp) done lemma fresh_x_not_in_vars': "finite A \ \x'. x' \ A \ x' \ (x_var ` set vars')" apply(subgoal_tac "finite (x_var ` set vars')") apply(frule_tac G = "x_var ` set vars'" in finite_UnI) apply(assumption) apply(cut_tac x_infinite) apply(frule_tac A = "A \ x_var ` set vars'" in ex_new_if_finite) apply(simp) apply(simp) apply(simp) done lemma lift_opts_ft_length[rule_format]: "\tys. lift_opts (map (case_vd (\clk vark. find_type_f P ctx_o clk)) vds) = Some tys \ length tys = length vds" by (induct vds, auto split: vd.splits option.splits) lemma fpr_mem_has_path'[rule_format]: "\suffix. find_path_rec_f P ctx cl prefix = Some (prefix @ suffix) \ (\(ctx', cld') \ set suffix. \prefix'. (\suffix'. find_path_rec_f P ctx' (cl_fqn (fqn_def (class_name_f cld'))) prefix' = Some suffix'))" apply(induct_tac P ctx cl prefix rule: find_path_rec_f.induct) apply(simp) apply(clarsimp split: option.splits) apply(subgoal_tac "\aa ba aaa bb. (a = aaa \ b = bb \ aa = aaa \ ba = bb) \ (\suffix. find_path_rec_f P aaa (superclass_name_f bb) (path @ [(aaa, bb)]) = Some (path @ (aaa, bb) # suffix) \ (\x\set suffix. (\(ctx', cld'). \prefix'. \suffix'. case_option None (case_prod (\ctx' cld. find_path_rec_f P ctx' (superclass_name_f cld) (prefix' @ [(ctx', cld)]))) (find_cld_f P ctx' (fqn_def (class_name_f cld'))) = Some suffix') x))") defer apply(thin_tac _)+ apply(force) apply(thin_tac "\aa ba x y. \a = aa \ b = ba; x = aa \ y = ba\ \ \suffix. find_path_rec_f P aa (superclass_name_f ba) (path @ [(aa, ba)]) = Some (path @ (aa, ba) # suffix) \ (\x\set suffix. case x of (ctx', cld') \ \prefix'. \suffix'. case_option None (\(ctx', cld). find_path_rec_f P ctx' (superclass_name_f cld) (prefix' @ [(ctx', cld)])) (find_cld_f P ctx' (fqn_def (class_name_f cld'))) = Some suffix')") apply(clarsimp) apply(frule path_append) apply(clarify) apply(erule_tac x = "tl suffix" in allE) apply(clarsimp) apply(erule disjE) apply(clarify) apply(clarsimp simp add: class_name_f_def split: cld.splits) apply(rename_tac list1 cl list2 list3) apply(case_tac fqn) apply(clarsimp) apply(frule find_cld_name_eq) apply(clarsimp simp add: class_name_f_def split: cld.splits) apply(frule_tac suffix = path'' and prefix' = " prefix' @ [(a, cld_def list1 cl list2 list3)]" in fpr_same_suffix') apply(simp) apply(simp) apply(drule_tac x = "(aa, ba)" in bspec, simp) apply(clarsimp split: option.splits) done lemma fpr_mem_has_path: "\find_path_f P ctx cl = Some suffix; (ctx', cld') \ set suffix\ \ \suffix'. find_path_rec_f P ctx' (cl_fqn (fqn_def (class_name_f cld'))) [] = Some suffix'" apply(cut_tac x = "(ctx', cld')" in fpr_mem_has_path'[of _ _ _ "[]"]) apply(simp add: find_path_f_def)+ done lemma mtype_to_find_md: "\wf_program P; mtype_f P ty_x_s m = Some (mty_def tys ty_r); is_sty_one P ty_x_d ty_x_s = Some True\ \ \ctx cl_r vds ss' y. find_meth_def_f P ty_x_d m = Some (ctx, meth_def_def (meth_sig_def cl_r m vds) (meth_body_def ss' y)) \ length tys = length vds" apply(clarsimp simp add: is_sty_one_def split: option.splits ty.splits) apply(clarsimp simp add: mtype_f_def find_meth_def_f_def) apply(case_tac ty_x_d) apply(simp) apply(clarsimp) apply(rename_tac path_d ctx_s dcl_s ctx_d dcl_d) apply(rename_tac cl_s fds_s mds_s) apply(frule fpr_mem_has_path) apply(simp) apply(clarsimp simp del: find_path_rec_f.simps) apply(frule_tac path = path_d and m = m and mty = "mty_def tys ty_r" and path' = suffix' in mty_preservation') apply(simp add: class_name_f_def find_path_f_def)+ apply(clarsimp simp add: mtype_f_def split: option.splits if_split_asm meth_def.splits meth_sig.splits) apply(rename_tac meth_body a b c d e f g) apply(case_tac meth_body) apply(simp) apply(clarsimp simp add: lift_opts_ft_length) apply(clarsimp simp add: find_meth_def_f_def split: option.splits) apply(frule find_md_m_match[rule_format]) apply(assumption) done lemma exist_lifted_values: "\\x\dom \. wf_object P H (L x) (\ x); \x\set y_ty_list. (\(y, ty). sty_option P (\ y) (Some ty)) x\ \ \vs. lift_opts (map (\(y, ty). L y) y_ty_list) = Some vs" apply(induct y_ty_list) apply(simp) apply(clarsimp) apply(erule sty_option.cases) apply(clarsimp) apply(drule_tac x = a in bspec, simp add: domI) apply(erule wf_objectE) apply(simp) apply(simp) done lemma [iff]: "length (tr_ss_f (map_of (zip (map (case_vd (\cl. x_var)) vds) (map x_var vars'))(x_this \ x')) ss') = length ss'" by (induct ss', auto) lemma collect_suc_eq_lt[simp]: "{f i |i::nat. i < Suc n} = {f i |i. i = 0} \ {f (Suc i) |i. i < n}" apply(induct n) apply(simp) apply(clarsimp) apply(simp add: less_Suc_eq) apply(simp add: image_Collect[THEN sym]) apply(simp add: Collect_disj_eq) apply(blast) done lemma [iff]: "\y_ty_list vars vars' vs. length y_ty_list = length vds \ length vars = length vds \ length vars' = length vds \ length vs = length vds \ map (\(y, cl, var, var', v). vd_def cl var) (zip (map fst y_ty_list) (zip (map (case_vd (\cl var. cl)) vds) (zip (map (case_vd (\cl var. var)) vds) (zip vars' vs)))) = vds" apply(induct vds) apply(simp) apply(clarsimp simp add: set_zip length_Suc_conv split: vd.splits) done lemma vars'_eq[rule_format]: "\y_ty_list vds vars vs. length y_ty_list = length vars' \ length vds = length vars' \ length vars = length vars' \ length vs = length vars' \ (\(y, cl, var, var', v). x_var var') ` set (zip (map fst y_ty_list) (zip (map (case_vd (\cl var. cl)) vds) (zip vars (zip vars' vs)))) = x_var ` set vars'" apply(induct vars') apply(simp) apply(clarsimp simp add: set_zip length_Suc_conv) done lemma [iff]: "\y_ty_list vds vars vs. length y_ty_list = length vars' \ length vds = length vars' \ length vars = length vars' \ length vs = length vars' \ map (\(y, cl, var, var', v). var') (zip (map fst y_ty_list) (zip (map (case_vd (\cl var. cl)) vds) (zip vars (zip vars' vs)))) = vars'" apply(induct vars') apply(simp) apply(clarsimp simp add: set_zip length_Suc_conv) done lemma [iff]: "\y_ty_list vds vars vs. length y_ty_list = length vars' \ length vds = length vars' \ length vars = length vars' \ length vs = length vars' \ map (\(y, cl, var, var', v). (x_var var, x_var var')) (zip (map fst y_ty_list) (zip (map (case_vd (\cl var. cl)) vds) (zip (map (case_vd (\cl var. var)) vds) (zip vars' vs)))) = zip (map (case_vd (\cl. x_var)) vds) (map x_var vars')" apply(induct vars') apply(simp) apply(clarsimp simp add: set_zip length_Suc_conv split: vd.splits) done lemma [iff]: "\vds vars vars' vs. length vds = length y_ty_list \ length vars = length y_ty_list \ length vars' = length y_ty_list \ length vs = length y_ty_list \ map (\(y, cl, var, var', v). y) (zip (map fst y_ty_list) (zip (map (case_vd (\cl var. cl)) vds) (zip vars (zip vars' vs)))) = map fst y_ty_list" apply(induct y_ty_list) apply(simp) apply(clarsimp simp add: set_zip length_Suc_conv) done lemma lift_opts_mapping: "\vds vars vars' vs. length vds = length y_ty_list \ length vars = length y_ty_list \ length vars' = length y_ty_list \ length vs = length y_ty_list \ lift_opts (map (\(y, ty). L y) y_ty_list) = Some vs \ (\x\set (zip (map fst y_ty_list) (zip (map (case_vd (\cl var. cl)) vds) (zip (map (case_vd (\cl var. var)) vds) (zip vars' vs)))). (\(y, cl, var, var', v). L y = Some v) x)" apply(induct y_ty_list) apply(simp) apply(clarsimp simp add: set_zip length_Suc_conv split: vd.splits option.splits) apply(rename_tac y1 y_ty_list v2 vs cl1 var1 var'1 v1 vds vars var'2 vars' i cl2 var2) apply(erule_tac x = vds in allE) apply(erule_tac x = vars in allE) apply(erule_tac x = vars' in allE) apply(simp) apply(case_tac i) apply(simp) apply(rename_tac j) apply(clarsimp) apply(erule_tac x = "map fst y_ty_list ! j" in allE) apply(clarsimp) apply(case_tac "zip (map (case_vd (\cl var. cl)) vds) (zip (map (case_vd (\cl var. var)) vds) (zip vars' vs)) ! j") apply(rename_tac cl1 var1 var'1 v1) apply(erule_tac x = cl1 in allE) apply(erule_tac x = var1 in allE) apply(erule_tac x = var'1 in allE) apply(erule_tac x = v1 in allE) apply(clarsimp) apply(erule_tac x = j in allE) apply(simp) done lemma length_y_ty_list_vs[rule_format]: "\vs. lift_opts (map (\(y, ty). L y) y_ty_list) = Some vs \ length y_ty_list = length vs" by (induct y_ty_list, auto split: option.splits) lemma translation_eq: "\x\set (zip (tr_ss_f (map_of (zip (map (case_vd (\cl. x_var)) vds) (map x_var vars'))(x_this \ x')) ss') ss'). (\(s'', s'). tr_s (map_of (zip (map (case_vd (\cl. x_var)) vds) (map x_var vars'))(x_this \ x')) s' s'') x" apply(induct ss') apply(simp add: tr_rel_f_eq)+ done theorem progress: "\wf_config \ (config_normal P L H S); S \ []\ \ \config'. r_stmt (config_normal P L H S) config'" apply(case_tac S) apply(simp) apply(clarsimp) apply(rename_tac s ss) apply(case_tac s) apply(erule_tac[1-7] wf_configE) apply(simp_all) \ \block\ apply(force intro: r_blockI[simplified]) \ \variable assignment\ apply(clarsimp, erule wf_stmtE, simp_all, clarsimp) apply(frule type_to_val, simp) apply(clarify) apply(frule r_var_assignI) apply(force) \ \field read\ apply(clarsimp, erule wf_stmtE, simp_all, clarsimp) apply(erule wf_varstateE) apply(drule_tac x = x in bspec, simp add: domI) apply(erule wf_objectE) apply(clarsimp) apply(frule r_field_read_npeI) apply(force) apply(clarsimp) apply(erule_tac ?a3.0 = "Some ty" in sty_option.cases) apply(clarsimp split: option.splits) apply(erule wf_heapE) apply(drule_tac x = oid in bspec, simp add: domI) apply(clarsimp) apply(rename_tac x oid ty_x_s ty_x_d fields_oid fs) apply(frule no_field_hiding, simp+) apply(drule_tac x = f in bspec, simp) apply(clarsimp) apply(erule wf_objectE) apply(clarsimp, frule_tac H = H in r_field_readI, simp, force)+ \ \field write\ apply(clarsimp, erule wf_stmtE, simp_all, clarsimp) apply(erule wf_varstateE) apply(frule_tac x = x in bspec, simp add: domI) apply(erule wf_objectE) apply(clarsimp) apply(frule r_field_write_npeI) apply(force) apply(clarsimp) apply(erule sty_option.cases) apply(clarsimp) apply(rename_tac ty_y ty_f) apply(drule_tac x = y in bspec, simp add: domI) apply(clarsimp) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(erule wf_objectE) apply(clarsimp) apply(frule_tac H = H and y = y in r_field_writeI, simp, force)+ \ \conditional branch\ apply(clarsimp, erule wf_stmtE, simp_all, clarsimp) apply(erule disjE) apply(frule type_to_val, simp, clarify) apply(case_tac "v = w") apply(frule_tac y = y in r_if_trueI, force+) apply(frule_tac y = y in r_if_falseI, force+) apply(frule type_to_val, simp, clarify) apply(case_tac "v = w") apply(frule_tac y = y and v = w in r_if_trueI, force+) apply(frule_tac y = y and v = w in r_if_falseI, force+) \ \object creation\ apply(clarsimp, erule wf_stmtE, simp_all, clarsimp) apply(rename_tac cl ctx ty var) apply(erule sty_option.cases) apply(clarsimp) apply(rename_tac ty_cl ty_var) apply(simp add: is_sty_one_def split: option.splits) apply(rename_tac path) apply(frule find_path_fields) apply(erule exE) apply(frule fresh_oid) apply(erule exE) apply(frule_tac H = H and L = L and var = var and s_list = ss and f_list = fs in r_newI[simplified]) apply(clarsimp simp add: fields_f_def split: option.splits) apply(assumption) apply(simp) apply(force) \ \method call\ apply(clarsimp, erule wf_stmtE, simp_all, clarsimp) apply(rename_tac ss y_ty_list x ty_x_s m ty_r_s var) apply(erule wf_varstateE) apply(frule_tac x = x in bspec, simp add: domI) apply(clarsimp) apply(erule wf_objectE) apply(clarsimp) apply(frule r_mcall_npeI) apply(force) apply(elim sty_option.cases) apply(clarsimp split: option.splits) apply(rename_tac ty_r_s ty_var_s ty_x_s ty_x_d fs_x) apply(frule mtype_to_find_md, simp+) apply(clarsimp) apply(frule_tac A = "dom L" and i = "length vds" in fresh_vars) apply(clarsimp) apply(rename_tac vars') apply(frule exist_lifted_values) apply(simp) apply(clarify) apply(frule_tac vars' = vars' in fresh_x_not_in_vars') apply(erule exE) apply(erule conjE) apply(subgoal_tac "\vars. vars = map (\vd. case vd of vd_def cl var \ var) vds") apply(erule exE) apply(subgoal_tac "length vars = length vds") apply(frule length_y_ty_list_vs) apply(subgoal_tac "\T. T = (map_of (zip (map (\vd. case vd of vd_def cl var \ x_var var) vds) (map x_var vars')))(x_this \ x')") apply(erule exE) apply(frule_tac H = H and P = P and meth = m and ctx = ctx and cl = cl_r and y = y and ty = ty_x_d and y_cl_var_var'_v_list = "zip (map fst y_ty_list) (zip (map (\vd. case vd of vd_def cl var \ cl) vds) (zip vars (zip vars' vs)))" and s''_s'_list = "zip (tr_ss_f T ss') ss'" and var = var and s_list = ss and x' = x' and T = T in r_mcallI[simplified]) apply(force) apply(simp) apply(simp) apply(simp add: vars'_eq) apply(simp) apply(assumption) apply(simp add: vars'_eq) apply(cut_tac L = L and y_ty_list = y_ty_list in lift_opts_mapping) apply(erule_tac x = vds in allE) apply(erule_tac x = vars in allE) apply(erule_tac x = vars' in allE) apply(erule_tac x = vs in allE) apply(simp) apply(simp) apply(simp) apply(simp add: translation_eq) apply(simp) apply(force) by force+ theorem wf_preservation: "\\ config. \wf_config \ config; r_stmt config config'\ \ \\'. \ \\<^sub>m \' \ wf_config \' config'" apply(erule r_stmt.cases) (* s_read_npe, s_write_npe *) (* \' = \ for all cases except for mcall (case 11) *) apply(rule_tac[1-10] x = \ in exI) apply(erule_tac[1-11] wf_configE) apply(simp_all) (* s_if s_block *) apply(clarsimp) apply(erule wf_stmtE) apply(simp_all) apply(force intro: wf_config_normalI) (* s_ass *) apply(clarsimp) apply(erule wf_stmtE) apply(simp_all) apply(rule wf_config_normalI, assumption+) apply(erule sty_option.cases) apply(rule wf_subvarstateI, assumption+, simp) apply(erule wf_varstateE) apply(drule_tac x = x in bspec, simp add: domI) apply(erule wf_objectE) apply(simp add: wf_nullI) apply(clarsimp) apply(rule wf_objectI) apply(erule sty_option.cases, simp) apply(rule sty_optionI, simp+) apply(rule_tac ty' = ty'a in sty_transitiveI, assumption+) (* s_read *) apply(force intro: wf_intros) apply(clarsimp split: option.splits) apply(erule wf_stmtE) apply(simp_all) apply(rule wf_config_normalI, assumption+) apply(clarsimp) apply(rename_tac L oid H v s_list ty_oid fs_oid \ x ty_x P f ty_f var) apply(erule sty_option.cases) apply(rule wf_subvarstateI, assumption, simp) apply(clarsimp) apply(case_tac v) apply(clarify) apply(rule wf_nullI, simp) apply(clarify) apply(rename_tac ty_f ty_var P oid_v) apply(erule wf_heapE) apply(drule_tac x = oid in bspec, simp add: domI) apply(clarsimp) apply(rename_tac H P fs_oid) apply(erule wf_varstateE) apply(frule_tac x = x in bspec, simp add: domI) apply(clarsimp) apply(erule wf_objectE) apply(simp) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(rename_tac L \ H oid_x ty_x_d ty_x_s P) apply(frule_tac ty_x_s = ty_x_s in no_field_hiding, simp+) apply(drule_tac x = f in bspec, simp) apply(clarsimp) apply(frule ftype_preservation[simplified], assumption+) apply(clarsimp) apply(rename_tac ty_f) apply(erule wf_objectE) apply(simp) apply(clarsimp) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(rename_tac H oid_v ty_f P ty_v fs_v) apply(rule wf_objectI) apply(rule sty_optionI, simp+) apply(rule sty_transitiveI, assumption+) (* s_write *) apply(force intro: wf_intros) apply(clarsimp, hypsubst_thin) apply(erule wf_stmtE) apply(simp_all) apply(clarsimp split: option.splits) apply(rule) apply(erule wf_varstateE) apply(clarsimp) apply(rename_tac \ P H) apply(drule_tac x = xa in bspec, simp add: domI) apply(erule wf_objectE) apply(simp) apply(clarsimp) apply(elim sty_option.cases) apply(simp) apply(erule sty_option.cases) apply(clarsimp) apply(rule wf_config_normalI) apply(assumption) apply(rename_tac L oid_x v H ss \ x ty_x_s f y ty_y_s ty_f P ty_x_d fs_oid) apply(erule wf_heapE) apply(rule wf_heapI) apply(simp) apply(rule ballI) apply(clarsimp simp add: map_add_def) apply(rule) apply(clarsimp) apply(drule_tac x = oid_x in bspec, simp add: domI) apply(clarsimp) apply(drule_tac x = fa in bspec, simp) apply(clarsimp) apply(rule) apply(clarsimp) apply(case_tac v) apply(clarsimp simp add: wf_nullI) apply(clarsimp) apply(rename_tac H P fields_x_d ty_f_d oid_v) apply(erule wf_varstateE) apply(clarsimp) apply(frule ftype_preservation, assumption+) apply(clarsimp) apply(drule_tac x = y in bspec, simp add: domI) apply(clarsimp) apply(erule_tac ?a4.0 = "Some ty_y_s" in wf_objectE) apply(simp) apply(clarsimp) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(rule wf_objectI) apply(force intro: sty_optionI sty_transitiveI) apply(clarsimp) apply(case_tac "fs_oid fa") apply(clarsimp) apply(force elim: wf_objectE) apply(case_tac a) apply(force intro: wf_nullI) apply(clarsimp) apply(rename_tac H P fields_x_d f ty_f_d oid_v) apply(erule wf_varstateE) apply(clarsimp) apply(drule_tac x = y in bspec, simp add: domI) apply(clarsimp) apply(erule wf_objectE) apply(simp) apply(clarsimp) apply(rule wf_objectI) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(safe) apply(clarsimp) apply(force intro: sty_optionI) apply(force intro: sty_optionI) apply(drule_tac x = oid in bspec) apply(clarsimp) apply(clarsimp) apply(drule_tac x = fa in bspec, simp) apply(clarsimp simp add: wf_object_heap) apply(rename_tac L oid_x v H ss \ x ty_x_s f y ty_y_s ty_f P ty_x_d fs_x) apply(erule wf_varstateE) apply(rule wf_varstateI) apply(simp) apply(clarsimp) apply(rename_tac L \ P H x' y') apply(drule_tac x = x' in bspec, simp add: domI) apply(clarsimp) apply(erule wf_objectE) apply(clarsimp) apply(rule wf_nullI, simp) apply(clarsimp) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(rename_tac H oid_x' ty_x'_s P ty_x'_d fs_x') apply(rule wf_objectI) apply(rule sty_optionI) apply(clarsimp) apply(rule conjI) apply(clarsimp) apply(simp) apply(clarsimp) apply(simp) apply(simp) (* s_if *) apply(erule wf_stmtE) apply(simp_all) apply(force intro: wf_config_normalI) apply(erule wf_stmtE) apply(simp_all) apply(force intro: wf_config_normalI) (* s_new *) apply(erule contrapos_np) apply(erule wf_stmtE, simp_all, clarsimp) apply(erule sty_option.cases, clarsimp) apply(rule wf_config_normalI) apply(assumption) apply(rename_tac H L ss ctx cl \ var ty' ty_var P) apply(erule wf_heapE) apply(rule wf_heapI) apply(simp) apply(safe) apply(simp add: map_upd_Some_unfold) apply(split if_split_asm) apply(clarsimp) apply(frule field_has_type, simp+) apply(erule exE) apply(rule_tac x = ty in exI) apply(simp) apply(force intro: wf_nullI) apply(drule_tac x = oida in bspec, simp add: domI) apply(clarsimp split: option.splits) apply(drule_tac x = f in bspec, simp) apply(safe) apply(rule_tac x = ty'a in exI) apply(simp) apply(erule wf_objectE) apply(clarsimp) apply(force intro: wf_nullI) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(rule wf_objectI) apply(clarsimp) apply(rule conjI) apply(force) apply(force intro: sty_optionI) apply(rename_tac oid_new H L ss ctx cl \ var ty_new ty_var P) apply(erule wf_varstateE) apply(clarsimp) apply(rule wf_varstateI) apply(simp) apply(rule ballI) apply(drule_tac x = x in bspec, simp) apply(clarsimp) apply(rule conjI) apply(clarsimp) apply(rule wf_objectI) apply(clarsimp) apply(force intro: sty_optionI) apply(rule) apply(erule wf_objectE) apply(clarsimp) apply(force intro: wf_nullI) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(rule wf_objectI) apply(clarsimp) apply(rule) apply(rule) apply(force) apply(rule) apply(force intro: sty_optionI) (* s_call *) apply(force intro: wf_all_exI) (* setting the new type environment *) apply(case_tac "H oid", simp) apply(clarsimp) apply(erule wf_stmtE, simp_all, clarsimp) apply(frule wf_method_from_find[simplified]) apply(simp) apply(safe) apply(erule_tac Q = "\\'. \ \\<^sub>m \' \ ~ (wf_config \' (config_normal Pa ((L ++ map_of (map (\(y_XXX, cl_XXX, var_XXX, var_', y). (x_var var_', y)) y_cl_var_var'_v_list))(x' \ v_oid oid)) H (map fst s''_s'_list @ s_ass vara (case if y = x_this then Some x' else map_of (map (\(y_XXX, cl_XXX, var_XXX, var_', v_XXX). (x_var var_XXX, x_var var_')) y_cl_var_var'_v_list) y of None \ y | Some x' \ x') # s_list)))" in contrapos_pp) apply(simp only: not_all) apply(rule_tac x = "\ ++ map_of (map (\((y, cl, var, var', v), (y', ty)). (x_var var', ty)) (zip y_cl_var_var'_v_list y_ty_list)) ++ Map.empty (x' \ ty_def ctx dcl)" in exI) apply(clarsimp split del: if_split) apply(rule) apply(clarsimp simp add: map_le_def split del: if_split) apply(rule sym) apply(rule_tac L = L and Pa = Pa and H = H in map_of_map_and_x) apply(assumption+) apply(simp add: not_dom_is_none) apply(erule wf_varstateE) apply(clarsimp) apply(rename_tac ss ty_x_d fs_x y_ty_list \ x ty_x_s P meth ty_r var dcl) apply(erule sty_option.cases) apply(clarsimp split del: if_split) apply(rename_tac ty_r ty_var P) apply(erule wf_varstateE, clarify) apply(rename_tac L \ P H) apply(frule_tac x = x in bspec, simp add: domI) apply(frule_tac x = "x_var var" in bspec, simp add: domI) apply(clarsimp split del: if_split) apply(erule wf_objectE, simp, clarsimp split del: if_split) apply(rename_tac P H oid) apply(erule sty_option.cases, clarsimp split del: if_split) apply(rename_tac ty_x_d ty_x_s P) apply(subgoal_tac "x_var var \ dom \") apply(clarify) apply(rename_tac v_var) apply(drule not_dom_is_none) apply(rule wf_config_normalI) apply(assumption) apply(assumption) (* varstate *) apply(rule wf_varstateI) apply(simp only: finite_super_varstate) apply(clarsimp) apply(rule) apply(rule wf_objectI) apply(simp) apply(rule sty_optionI, simp+) apply(clarsimp, hypsubst_thin) apply(rule) apply(rule) apply(rule wf_objectI) apply(clarsimp) apply(rule sty_optionI, simp+) apply(rule) apply(erule disjE) apply(clarsimp) apply(drule map_of_SomeD) apply(clarsimp) apply(rename_tac y_k cl_k var_k var'_k v_k y'_k ty_k) apply(frule_tac x = "(y_k, cl_k, var_k, var'_k, v_k)" in bspec) apply(force simp add: set_zip) apply(clarsimp) apply(subgoal_tac "map_of (map (\(y, cl, var, var', v). (x_var var', v)) y_cl_var_var'_v_list) (x_var var'_k) = Some v_k") apply(clarsimp) apply(drule map_y) apply(simp) apply(drule_tac x = "(y_k, ty_k)" in bspec, assumption) apply(clarsimp) apply(erule sty_option.cases) apply(clarsimp) apply(rename_tac ty_y_k ty_k P) apply(drule_tac x = y_k in bspec, simp add: domI) apply(clarsimp) apply(case_tac v_k) apply(clarify, rule wf_nullI, simp) apply(clarsimp) apply(rename_tac oid_y_k) apply(erule_tac ?a4.0 = "Some ty_y_k" in wf_objectE) apply(simp) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(rule wf_objectI) apply(clarsimp) apply(rule sty_optionI, simp+) apply(rule_tac ty' = ty' in sty_transitiveI, assumption+) apply(rule map_of_is_SomeI) apply(simp add: map_fst_var'[simplified]) apply(rule set_zip_var'_v, simp) apply(clarify) apply(rename_tac x_G ty_x_G) apply(frule_tac x = x_G in bspec, simp add: domI) apply(case_tac "map_of (map (\((y, cl, var, var', v), y', y). (x_var var', y)) (zip y_cl_var_var'_v_list y_ty_list)) x_G") apply(frule map_of_map_zip_none, simp+) apply(simp add: map_add_def) apply(frule map_of_map_zip_some, simp+) apply(clarsimp) apply(drule map_of_SomeD) apply(drule map_of_SomeD) apply(clarsimp simp add: set_zip) apply(frule same_el, simp+) apply(drule_tac s = "(aa, ab, ac, ai, b)" in sym) apply(clarsimp) apply(rename_tac y_k cl_k var_k v_k y'_k ty_k var'_k i) apply(frule_tac y_k = y_k and cl_k = cl_k and var_k = var_k and var'_k = var'_k and v_k = v_k in same_y, simp+) apply(clarsimp) apply(drule_tac x = "(y_k, ty_k)" in bspec, simp) apply(clarsimp) apply(rename_tac y_k ty_k) apply(erule sty_option.cases) apply(clarsimp) apply(drule_tac x = y_k in bspec, simp add: domI) apply(clarsimp) apply(drule_tac x = "(y_k, cl_k, var_k, var'_k, v_k)" in bspec) apply(drule_tac t = "(y_k, cl_k, var_k, var'_k, v_k)" in sym) apply(simp) apply(clarsimp) apply(erule_tac ?a4.0 = "Some ty" in wf_objectE) apply(clarsimp) apply(rule wf_nullI, simp) apply(erule sty_option.cases) apply(clarsimp split: option.splits) apply(rule wf_objectI) apply(clarsimp) apply(rule sty_optionI, simp+) apply(rule_tac ty' = ty'a in sty_transitiveI, assumption+) (* assignment *) apply(clarsimp split del: if_split) apply(rule) apply(rule wf_var_assignI) apply(frule wf_method_from_find) apply(simp) apply(erule exE) apply(erule wf_methE) apply(case_tac "ya = x_this") apply(clarsimp split del: if_split) apply(rule_tac ty' = ty_var in sty_optionI) apply(simp) apply(simp) apply(case_tac ctx, clarify) apply(frule_tac ty_r_d = ty in mty_preservation, assumption+) apply(clarify) apply(erule sty_option.cases) apply(clarsimp) apply(rule_tac ty' = ty' in sty_transitiveI, assumption+) (* y != this *) apply(erule sty_option.cases) apply(clarsimp split del: if_split) apply(frule_tac var_k = ya and ty_k = tya in exists_var') apply(drule map_of_vd) apply(assumption+) apply(erule exE) apply(clarsimp) apply(drule_tac y = "x_var var'_k" in map_of_SomeD) apply(clarsimp split del: if_split) apply(rename_tac y_k cl_k var_k var'_k v_k) apply(frule x'_not_in_list, assumption+) apply(clarsimp) apply(drule map_of_SomeD) apply(clarsimp split del: if_split) apply(rename_tac cl_k' var_k ty_k) apply(case_tac ctx, clarify) apply(frule mty_preservation, assumption+) apply(clarsimp split del: if_split) apply(drule map_of_vd) apply(frule map_of_ty_k, assumption+) apply(rule_tac ty = ty_k and ty' = ty_var in sty_optionI) apply(simp add: map_add_def) apply(simp) apply(simp) apply(rule_tac ty' = ty_r in sty_transitiveI) apply(assumption+) (* wf of translated statements *) apply(clarsimp) apply(erule disjE) apply(erule wf_methE) apply(clarsimp) apply(frule map_of_vd[rule_format]) apply(case_tac ctx, clarify) apply(frule mty_preservation, simp+) apply(clarsimp) apply(rename_tac P dcl cl y meth s'' s') apply(drule_tac x = "(s'', s')" in bspec, simp)+ apply(clarsimp) apply(cut_tac cl_var_ty_list = cl_var_ty_list and y_cl_var_var'_v_list = y_cl_var_var'_v_list and y_ty_list = y_ty_list and P = P and ty_x_d = ty_x_d and ty_x_m = "ty_def ctx_def dcl" and x' = x' and s'' = s'' and \ = \ in wf_tr_stmt_in_G') apply(clarsimp) (* wf of non-translated statements *) apply(cut_tac L = L and x' = x' and y_cl_var_var'_v_list = y_cl_var_var'_v_list and \ = \ and P = P and H = H and y_ty_list = y_ty_list and ty = "ty_def ctx dcl" and ss = ss in wf_stmt_in_G') apply(clarsimp) apply(erule wf_objectE) apply(force) apply(force) done end diff --git a/thys/List_Update/BIT.thy b/thys/List_Update/BIT.thy --- a/thys/List_Update/BIT.thy +++ b/thys/List_Update/BIT.thy @@ -1,2158 +1,2158 @@ (* Title: Competitive Analysis of BIT Author: Max Haslbeck *) section "BIT: an Online Algorithm for the List Update Problem" theory BIT imports Bit_Strings MTF2_Effects begin abbreviation "config'' A qs init n == config_rand A init (take n qs)" lemma sum_my: fixes f g::"'b \ 'a::ab_group_add" assumes "finite A" "finite B" shows "(\x\A. f x) - (\x\B. g x) = (\x\(A \ B). f x - g x) + (\x\A-B. f x) - (\x\B-A. g x)" proof - have "finite (A-B)" and "finite (A\B)" and "finite (B-A)" and "finite (B\A)" using assms by auto note finites=this have "(A-B) \ ( (A\B) ) = {}" and "(B-A) \ ( (B\A) ) = {}" by auto note inters=this have commute: "A\B=B\A" by auto have "A = (A-B) \ (A\B)" and "B = (B-A) \ ( (B\A))" by auto then have "(\x\A. f x) - (\x\B. g x) = (\x\(A-B) \ (A\B). f x) - (\x\(B-A) \ (B\A). g x)" by auto also have "\ = ( (\x\(A-B). f x) + (\x\(A\B). f x) - (\x\(A-B)\(A\B). f x) ) -( (\x\(B-A). g x) + (\x\(B\A). g x) - (\x\(B-A)\(B\A). g x))" using sum_Un[where ?f="f",OF finites(1) finites(2)] sum_Un[where ?f="g",OF finites(3) finites(4)] by(simp) also have "\ = ( (\x\(A-B). f x) + (\x\(A\B). f x) ) - (\x\(B-A). g x) - (\x\(B\A). g x) " using inters by auto also have "\ = (\x\(A-B). f x) - (\x\(A\B). g x) + (\x\(A\B). f x) - (\x\(B-A). g x) " using commute by auto also have "\ = (\x\(A\B). f x - g x) +(\x\(A-B). f x) - (\x\(B-A). g x)" using sum_subtractf[of f g "(A\B)"] by auto finally show ?thesis . qed lemma sum_my2: "(\x\A. f x = g x) \ (\x\A. f x) = (\x\A. g x)" by auto subsection "Definition of BIT" definition BIT_init :: "('a state,bool list * 'a list)alg_on_init" where "BIT_init init = map_pmf (\l. (l,init)) (bv (length init))" lemma "~ deterministic_init BIT_init" unfolding deterministic_init_def BIT_init_def apply(auto) apply(intro exI[where x="[a]"]) \ \comment in a proof\ by(auto simp: UNIV_bool set_pmf_bernoulli) definition BIT_step :: "('a state, bool list * 'a list, 'a, answer)alg_on_step" where "BIT_step s q = ( let a=((if (fst (snd s))!(index (snd (snd s)) q) then 0 else (length (fst s))),[]) in return_pmf (a , (flip (index (snd (snd s)) q) (fst (snd s)), snd (snd s))))" lemma "deterministic_step BIT_step" unfolding deterministic_step_def BIT_step_def by simp abbreviation BIT :: "('a state, bool list*'a list, 'a, answer)alg_on_rand" where "BIT == (BIT_init, BIT_step)" subsection "Properties of BIT's state distribution" lemma BIT_no_paid: "\((free,paid),_) \ (BIT_step s q). paid=[]" unfolding BIT_step_def by(auto) subsubsection "About the Internal State" term "(config'_rand (BIT_init, BIT_step) s0 qs) " lemma config'_n_init: fixes qs init n shows "map_pmf (snd \ snd) (config'_rand (BIT_init, BIT_step) init qs) = map_pmf (snd \ snd) init" apply (induct qs arbitrary: init) by (simp_all add: map_pmf_def bind_assoc_pmf BIT_step_def bind_return_pmf ) lemma config_n_init: "map_pmf (snd \ snd) (config_rand (BIT_init, BIT_step) s0 qs) = return_pmf s0" using config'_n_init[of "((fst (BIT_init, BIT_step) s0) \ (\is. return_pmf (s0, is)))"] by (simp_all add: map_pmf_def bind_assoc_pmf bind_return_pmf BIT_init_def ) lemma config_n_init2: "\(_,(_,x)) \ set_pmf (config_rand (BIT_init, BIT_step) init qs). x = init" proof (rule, goal_cases) case (1 z) then have 1: "snd(snd z) \ (snd \ snd) ` set_pmf (config_rand (BIT_init, BIT_step) init qs)" by force have "(snd \ snd) ` set_pmf (config_rand (BIT_init, BIT_step) init qs) = set_pmf (map_pmf (snd \ snd) (config_rand (BIT_init, BIT_step) init qs))" by(simp) also have "\ = {init}" apply(simp only: config_n_init) by simp finally have "snd(snd z) = init" using 1 by auto then show ?case by auto qed lemma config_n_init3: "\x \ set_pmf (config_rand (BIT_init, BIT_step) init qs). snd (snd x) = init" using config_n_init2 by(simp add: split_def) lemma config'_n_bv: fixes qs init n shows " map_pmf (snd \ snd) init = return_pmf s0 \ map_pmf (fst \ snd) init = bv (length s0) \ map_pmf (snd \ snd) (config'_rand (BIT_init, BIT_step) init qs) = return_pmf s0 \ map_pmf (fst \ snd) (config'_rand (BIT_init, BIT_step) init qs) = bv (length s0)" proof (induct qs arbitrary: init) case (Cons r rs) from Cons(2) have a: "map_pmf (snd \ snd) (init \ (\s. snd (BIT_init, BIT_step) s r \ (\(a, is'). return_pmf (step (fst s) r a, is')))) = return_pmf s0" apply(simp add: BIT_step_def) by (simp_all add: map_pmf_def bind_assoc_pmf BIT_step_def bind_return_pmf ) then have b: "\z\set_pmf (init \ (\s. snd (BIT_init, BIT_step) s r \ (\(a, is'). return_pmf (step (fst s) r a, is')))). snd (snd z) = s0" by (metis (mono_tags, lifting) comp_eq_dest_lhs map_pmf_eq_return_pmf_iff) show ?case apply(simp only: config'_rand.simps) proof (rule Cons(1), goal_cases) case 2 have "map_pmf (fst \ snd) (init \ (\s. snd (BIT_init, BIT_step) s r \ (\(a, is'). return_pmf (step (fst s) r a, is')))) = map_pmf (flip (index s0 r)) (bv (length s0))" using b apply(simp add: BIT_step_def Cons(3)[symmetric] bind_return_pmf map_pmf_def bind_assoc_pmf ) apply(rule bind_pmf_cong) apply(simp) by(simp add: inv_flip_bv) also have "\ = bv (length s0)" using inv_flip_bv by auto finally show ?case . qed (fact) qed simp lemma config_n_bv_2: "map_pmf (snd \ snd) (config_rand (BIT_init, BIT_step) s0 qs) = return_pmf s0 \ map_pmf (fst \ snd) (config_rand (BIT_init, BIT_step) s0 qs) = bv (length s0)" apply(rule config'_n_bv) by(simp_all add: bind_return_pmf map_pmf_def bind_assoc_pmf bind_return_pmf' BIT_init_def) lemma config_n_bv: "map_pmf (fst \ snd) (config_rand (BIT_init, BIT_step) s0 qs) = bv (length s0)" using config_n_bv_2 by auto lemma config_n_fst_init_length: "\(_,(x,_)) \ set_pmf (config_rand (BIT_init, BIT_step) s0 qs). length x = length s0" proof fix x::"('a list \ (bool list \ 'a list))" assume ass:"x \ set_pmf (config_rand (BIT_init, BIT_step) s0 qs)" let ?a="fst (snd x)" from ass have "(fst x,(?a,snd (snd x))) \ set_pmf (config_rand (BIT_init, BIT_step) s0 qs)" by auto with ass have "?a \ (fst \ snd) ` set_pmf (config_rand (BIT_init, BIT_step) s0 qs)" by force then have "?a \ set_pmf (map_pmf (fst \ snd) (config_rand (BIT_init, BIT_step) s0 qs))" by auto then have "?a \ bv (length s0)" by(simp only: config_n_bv) then have "length ?a = length s0" by (auto simp: len_bv_n) then show "case x of (uu_, xa, uua_) \ length xa = length s0" by(simp add: split_def) qed lemma config_n_fst_init_length2: "\x \ set_pmf (config_rand (BIT_init, BIT_step) s0 qs). length (fst (snd x)) = length s0" using config_n_fst_init_length by(simp add: split_def) lemma fperms: "finite {x::'a list. length x = length init \ distinct x \ set x = set init}" apply(rule finite_subset[where B="{xs. set xs \ set init \ length xs \ length init}"]) apply(force) apply(rule finite_lists_length_le) by auto lemma finite_config_BIT: assumes [simp]: "distinct init" shows "finite (set_pmf (config_rand (BIT_init, BIT_step) init qs))" (is "finite ?D") proof - have a: "(fst \ snd) ` ?D \ {x. length x = length init}" using config_n_fst_init_length2 by force have c: "(snd \ snd) ` ?D = {init}" proof - have "(snd \ snd) ` set_pmf (config_rand (BIT_init, BIT_step) init qs) = set_pmf (map_pmf (snd \ snd) (config_rand (BIT_init, BIT_step) init qs))" by(simp) also have "\ = {init}" apply(subst config_n_init) by simp finally show ?thesis . qed from a c have d: "snd ` ?D \ {x. length x = length init} \ {init}" by force have b: "fst ` ?D \ {x. length x = length init \ distinct x \ set x = set init}" using config_rand by fastforce from b d have "?D \ {x. length x = length init \ distinct x \ set x = set init} \ ({x. length x = length init} \ {init})" by auto then show ?thesis apply (rule finite_subset) apply(rule finite_cartesian_product) apply(rule fperms) apply(rule finite_cartesian_product) apply (rule bitstrings_finite) by(simp) qed subsection "BIT is $1.75$-competitive (a combinatorial proof)" subsubsection "Definition of the Locale and Helper Functions" locale BIT_Off = fixes acts :: "answer list" fixes qs :: "'a list" fixes init :: "'a list" assumes dist_init[simp]: "distinct init" assumes len_acts: "length acts = length qs" begin lemma setinit: "(index init) ` set init = {0.. {x. (a \ x)}) = set as" by fastforce have 2: "(\a. Suc (index as a)) ` set as = (\a. Suc a) ` ((index as) ` set as )" by auto show ?case apply(simp add: 1 2 iH) by auto qed simp definition free_A :: "nat list" where (* free exchanges of A *) "free_A = map fst acts" definition paid_A' :: "nat list list" where (* paid exchanges of A' *) "paid_A' = map snd acts" definition paid_A :: "nat list list" where (* paid exchanges of A *) "paid_A = map (filter (\x. Suc x < length init)) paid_A'" lemma len_paid_A[simp]: "length paid_A = length qs" unfolding paid_A_def paid_A'_def using len_acts by auto lemma len_paid_A'[simp]: "length paid_A' = length qs" unfolding paid_A'_def using len_acts by auto lemma paidAnm_inbound: "n < length paid_A \ m < length(paid_A!n) \ (Suc ((paid_A!n)!(length (paid_A ! n) - Suc m))) < length init" proof - assume "n < length paid_A" then have "n < length paid_A'" by auto then have a: "(paid_A!n) = filter (\x. Suc x < length init) (paid_A' ! n)" unfolding paid_A_def by auto let ?filtered="(filter (\x. Suc x < length init) (paid_A' ! n))" assume mtt: "m < length (paid_A!n)" with a have "(length (paid_A ! n) - Suc m) < length ?filtered" by auto with nth_mem have b: "Suc(?filtered ! (length (paid_A ! n) - Suc m)) < length init" by force show "Suc (paid_A ! n ! (length (paid_A ! n) - Suc m)) < length init" using a b by auto qed fun s_A' :: "nat \ 'a list" where "s_A' 0 = init" | "s_A'(Suc n) = step (s_A' n) (qs!n) (free_A!n, paid_A'!n)" lemma length_s_A'[simp]: "length(s_A' n) = length init" by (induction n) simp_all lemma dist_s_A'[simp]: "distinct(s_A' n)" by(induction n) (simp_all add: step_def) lemma set_s_A'[simp]: "set(s_A' n) = set init" by(induction n) (simp_all add: step_def) fun s_A :: "nat \ 'a list" where "s_A 0 = init" | "s_A(Suc n) = step (s_A n) (qs!n) (free_A!n, paid_A!n)" lemma length_s_A[simp]: "length(s_A n) = length init" by (induction n) simp_all lemma dist_s_A[simp]: "distinct(s_A n)" by(induction n) (simp_all add: step_def) lemma set_s_A[simp]: "set(s_A n) = set init" by(induction n) (simp_all add: step_def) lemma cost_paidAA': "n < length paid_A' \ length (paid_A!n) \ length (paid_A'!n)" unfolding paid_A_def by simp lemma swaps_filtered: "swaps (filter (\x. Suc x < length xs) ys) xs = swaps (ys) xs" apply (induct ys) by auto lemma sAsA': "n < length paid_A' \ s_A' n = s_A n" proof (induct n) case (Suc m) have " s_A' (Suc m) = mtf2 (free_A!m) (qs!m) (swaps (paid_A'!m) (s_A' m))" by (simp add: step_def) also from Suc(2) have "\ = mtf2 (free_A!m) (qs!m) (swaps (paid_A!m) (s_A' m))" unfolding paid_A_def by (simp only: nth_map swaps_filtered[where xs="s_A' m", simplified]) also have "\ = mtf2 (free_A!m) (qs!m) (swaps (paid_A!m) (s_A m))" using Suc by auto also have "\ = s_A (Suc m)" by (simp add: step_def) finally show ?case . qed simp lemma sAsA'': "n < length qs \ s_A n = s_A' n" using sAsA' by auto definition t_BIT :: "nat \ real" where (* BIT's cost in nth step *) "t_BIT n = T_on_rand_n BIT init qs n" definition T_BIT :: "nat \ real" where (* BIT's cost in first n steps *) "T_BIT n = (\i int" where "c_A n = index (swaps (paid_A!n) (s_A n)) (qs!n) + 1" definition f_A :: "nat \ int" where "f_A n = min (free_A!n) (index (swaps (paid_A!n) (s_A n)) (qs!n))" definition p_A :: "nat \ int" where "p_A n = size(paid_A!n)" definition t_A :: "nat \ int" where "t_A n = c_A n + p_A n" definition c_A' :: "nat \ int" where "c_A' n = index (swaps (paid_A'!n) (s_A' n)) (qs!n) + 1" definition p_A' :: "nat \ int" where "p_A' n = size(paid_A'!n)" definition t_A' :: "nat \ int" where "t_A' n = c_A' n + p_A' n" lemma t_A_A'_leq: "n < length paid_A' \ t_A n \ t_A' n" unfolding t_A_def t_A'_def c_A_def c_A'_def p_A_def p_A'_def apply(simp add: sAsA') unfolding paid_A_def by (simp add: swaps_filtered[where xs="(s_A n)", simplified]) definition T_A' :: "nat \ int" where "T_A' n = (\i int" where "T_A n = (\i length paid_A' \ T_A n \ T_A' n" unfolding T_A'_def T_A_def apply(rule sum_mono) by (simp add: t_A_A'_leq) lemma T_A_A'_leq': "n \ length qs \ T_A n \ T_A' n" using T_A_A'_leq by auto fun s'_A :: "nat \ nat \ 'a list" where "s'_A n 0 = s_A n" | "(s'_A n (Suc m)) = swap ((paid_A ! n)!(length (paid_A ! n) -(Suc m)) ) (s'_A n m)" lemma set_s'_A[simp]: "set (s'_A n m) = set init" apply(induct m) by(auto) lemma len_s'_A[simp]: "length (s'_A n m) = length init" apply(induct m) by(auto) lemma distperm_s'_A[simp]: "dist_perm (s'_A n m) init" apply(induct m) by auto lemma s'A_m_le: "m \ (length (paid_A ! n)) \ swaps (drop (length (paid_A ! n) - m) (paid_A ! n)) (s_A n) = s'_A n m" apply(induct m) apply(simp) proof - fix m assume iH: "(m \ length (paid_A ! n) \ swaps (drop (length (paid_A ! n) - m) (paid_A ! n)) (s_A n) = s'_A n m)" assume Suc: "Suc m \ length (paid_A ! n)" then have "m \ length (paid_A ! n)" by auto with iH have x: "swaps (drop (length (paid_A ! n) - m) (paid_A ! n)) (s_A n) = s'_A n m" by auto from Suc have mlen: "(length (paid_A ! n) - Suc m) < length (paid_A ! n)" by auto let ?l="length (paid_A ! n) - Suc m" let ?Sucl="length (paid_A ! n) - m" have Sucl: "Suc ?l = ?Sucl" using Suc by auto from mlen have yu: "((paid_A ! n)! ?l ) # (drop (Suc ?l) (paid_A ! n)) = (drop ?l (paid_A ! n))" by (rule Cons_nth_drop_Suc) from Suc have "s'_A n (Suc m) = swap ((paid_A ! n)!(length (paid_A ! n) - (Suc m)) ) (s'_A n m)" by auto also have "\ = swap ((paid_A ! n)!(length (paid_A ! n) - (Suc m)) ) (swaps (drop (length (paid_A ! n) - m) (paid_A ! n)) (s_A n))" by(simp only: x) also have "\ = (swaps (((paid_A ! n)!(length (paid_A ! n) - (Suc m)) ) # (drop (length (paid_A ! n) - m) (paid_A ! n))) (s_A n))" by auto also have "\ = (swaps (((paid_A ! n)! ?l ) # (drop (Suc ?l) (paid_A ! n))) (s_A n))" using Sucl by auto also from mlen have "\ = (swaps ((drop ?l (paid_A ! n))) (s_A n))" by (simp only: yu) finally have " s'_A n (Suc m) = swaps (drop (length (paid_A ! n) - Suc m) (paid_A ! n)) (s_A n)" . then show " swaps (drop (length (paid_A ! n) - Suc m) (paid_A ! n)) (s_A n) = s'_A n (Suc m)" by auto qed lemma s'A_m: "swaps (paid_A ! n) (s_A n) = s'_A n (length (paid_A ! n))" using s'A_m_le[of "(length (paid_A ! n))" "n", simplified] by auto definition gebub :: "nat \ nat \ nat" where "gebub n m = index init ((s'_A n m)!(Suc ((paid_A!n)!(length (paid_A ! n) - Suc m))))" lemma gebub_inBound: assumes 1: " n < length paid_A " and 2: "m < length (paid_A ! n)" shows "gebub n m < length init" proof - have "Suc (paid_A ! n ! (length (paid_A ! n) - Suc m)) < length (s'_A n m)" using paidAnm_inbound[OF 1 2] by auto then have "s'_A n m ! Suc (paid_A ! n ! (length (paid_A ! n) - Suc m)) \ set (s'_A n m)" by (rule nth_mem) then show ?thesis unfolding gebub_def using setinit by auto qed subsubsection "The Potential Function" fun phi :: "nat \'a list\ (bool list \ 'a list) \ real" ("\") where "phi n (c,(b,_)) = (\(x,y)\(Inv c (s_A n)). (if b!(index init y) then 2 else 1))" lemma phi': "phi n z = (\(x,y)\(Inv (fst z) (s_A n)). (if (fst (snd z))!(index init y) then 2 else 1))" proof - have "phi n z = phi n (fst z, (fst(snd z),snd(snd z)))" by (metis prod.collapse) also have "\ = (\(x,y)\(Inv (fst z) (s_A n)). (if (fst (snd z))!(index init y) then 2 else 1))" by(simp del: prod.collapse) finally show ?thesis . qed lemma Inv_empty2: "length d = 0 \ Inv c d = {}" unfolding Inv_def before_in_def by(auto) corollary Inv_empty3: "length init = 0 \ Inv c (s_A n) = {}" apply(rule Inv_empty2) by (metis length_s_A) lemma phi_empty2: "length init = 0 \ phi n (c,(b,i)) = 0" apply(simp only: phi.simps Inv_empty3) by auto lemma phi_nonzero: "phi n (c,(b,i)) \ 0" by (simp add: sum_nonneg split_def) (* definition of the potential function! *) definition Phi :: "nat \ real" ("\") where "Phi n = E( map_pmf (\ n) (config'' BIT qs init n))" definition PhiPlus :: "nat \ real" ("\\<^sup>+") where "PhiPlus n = (let nextconfig = bind_pmf (config'' BIT qs init n) (\(s,is). bind_pmf (BIT_step (s,is) (qs!n)) (\(a,nis). return_pmf (step s (qs!n) a,nis)) ) in E( map_pmf (phi (Suc n)) nextconfig) )" lemma PhiPlus_is_Phi_Suc: "n PhiPlus n = Phi (Suc n)" unfolding PhiPlus_def Phi_def apply (simp add: bind_return_pmf map_pmf_def bind_assoc_pmf split_def take_Suc_conv_app_nth ) apply(simp add: config'_rand_snoc) by(simp add: bind_assoc_pmf split_def bind_return_pmf) lemma phi0: "Phi 0 = 0" unfolding Phi_def by (simp add: bind_return_pmf map_pmf_def bind_assoc_pmf BIT_init_def) lemma phi_pos: "Phi n \ 0" unfolding Phi_def apply(rule E_nonneg_fun) using phi_nonzero by auto subsubsection "Helper lemmas" lemma swap_subs: "dist_perm X Y \ Inv X (swap z Y) \ Inv X Y \ {(Y ! z, Y ! Suc z)}" proof - assume "dist_perm X Y" note aj = Inv_swap[OF this, of z] show "Inv X (swap z Y) \ Inv X Y \ {(Y ! z, Y ! Suc z)}" proof cases assume c1: "Suc z < length X" show "Inv X (swap z Y) \ Inv X Y \ {(Y ! z, Y ! Suc z)}" proof cases assume "Y ! z < Y ! Suc z in X" with c1 have "Inv X (swap z Y) = Inv X Y \ {(Y ! z, Y ! Suc z)}" using aj by auto then show "Inv X (swap z Y) \ Inv X Y \ {(Y ! z, Y ! Suc z)}" by auto next assume "~ Y ! z < Y ! Suc z in X" with c1 have "Inv X (swap z Y) = Inv X Y - {(Y ! Suc z, Y ! z)}" using aj by auto then show "Inv X (swap z Y) \ Inv X Y \ {(Y ! z, Y ! Suc z)}" by auto qed next assume "~ (Suc z < length X)" then have "Inv X (swap z Y) = Inv X Y" using aj by auto then show "Inv X (swap z Y) \ Inv X Y \ {(Y ! z, Y ! Suc z)}" by auto qed qed subsubsection "InvOf" term "Inv" (* BIT A *) abbreviation "InvOf y bits as \ {(x,y)|x. x < y in bits \ y < x in as}" lemma "InvOf y xs ys = {(x,y)|x. (x,y)\Inv xs ys}" unfolding Inv_def by auto lemma "InvOf y xs ys \ Inv xs ys" unfolding Inv_def by auto lemma numberofIsbeschr: assumes distxsys: "dist_perm xs ys" and yinxs: "y \ set xs" shows "index xs y \ index ys y + card (InvOf y xs ys)" (is "?iBit \ ?iA + card ?I") proof - from assms have distinctxs: "distinct xs" and distinctys: "distinct ys" and yinys: "y \ set ys" by auto let ?A="fst ` ?I" have aha: "card ?A = card ?I" apply(rule card_image) unfolding inj_on_def by(auto) have "?A \ (before y xs)" by(auto) have "?A \ (after y ys)" by auto have "finite (before y ys)" by auto have bef: "(before y xs) - ?A \ before y ys" apply(auto) proof - fix x assume a: "x < y in xs" assume " x \ fst ` {(x, y) |x. x < y in xs \ y < x in ys}" then have "~ (x < y in xs \ y < x in ys)" by force with a have d: "~ y < x in ys" by auto from a have "x \ set xs" by (rule before_in_setD1) with distxsys have b: "x \ set ys" by auto from a have "y \ set xs" by (rule before_in_setD2) with distxsys have c: "y \ set ys" by auto from a have e: "~ x = y" unfolding before_in_def by auto have "(\ y < x in ys) = (x < y in ys \ y = x)" apply(rule not_before_in) using b c by auto with d e show "x < y in ys" by auto qed have "(index xs y) - card (InvOf y xs ys) = card (before y xs) - card ?A" by(simp only: aha card_before[OF distinctxs yinxs]) also have "\ = card ((before y xs) - ?A)" apply(rule card_Diff_subset[symmetric]) by auto also have "\ \ card (before y ys)" apply(rule card_mono) apply(simp) apply(rule bef) done also have "\ = (index ys y)" by(simp only: card_before[OF distinctys yinys]) finally have "index xs y - card ?I \ index ys y" . then show "index xs y \ index ys y + card ?I" by auto qed lemma "length init = 0 \ length xs = length init \ t xs q (mf, sws) = 1 + length sws" unfolding t_def by(auto) lemma integr_index: "integrable (measure_pmf (config'' (BIT_init, BIT_step) qs init n)) (\(s, is). real (Suc (index s (qs ! n))))" apply(rule measure_pmf.integrable_const_bound[where B="Suc (length init)"]) apply(simp add: split_def) apply (metis (mono_tags) index_le_size AE_measure_pmf_iff config_rand_length) by (auto) subsubsection "Upper Bound on the Cost of BIT" lemma t_BIT_ub2: "(qs!n) \ set init \ t_BIT n \ Suc(size init)" apply(simp add: t_BIT_def t_def BIT_step_def) apply(simp add: bind_return_pmf) proof (goal_cases) case 1 note qs=this let ?D = "(config'' (BIT_init, BIT_step) qs init n)" have absch: "(\x\ set_pmf ?D. ((\(s,is). real (Suc (index s (qs ! n)))) x) \ ((\(is,s). Suc (length init)) x))" proof (rule ballI, goal_cases) case (1 x) from 1 config_rand_length have f1: "length (fst x) = length init" by fastforce from 1 config_rand_set have 2: "set (fst x) = set init" by fastforce from qs 2 have "(qs!n) \ set (fst x)" by auto then show ?case using f1 by (simp add: split_def) qed have "integrable (measure_pmf (config'' (BIT_init, BIT_step) qs init n)) (\(s, is). Suc (length init))" by(simp) have "E(bind_pmf ?D (\(s, is). return_pmf (real (Suc (index s (qs ! n)))))) = E(map_pmf (\(s, is). real (Suc (index s (qs ! n)))) ?D)" by(simp add: split_def map_pmf_def) also have "\ \ E(map_pmf (\(s, is). Suc (length init)) ?D)" apply (rule E_mono3) apply(fact integr_index) apply(simp) using absch by auto also have "\ = Suc (length init)" by(simp add: split_def) finally show ?case by(simp add: map_pmf_def bind_assoc_pmf bind_return_pmf split_def) qed lemma t_BIT_ub: "(qs!n) \ set init \ t_BIT n \ size init" apply(simp add: t_BIT_def t_def BIT_step_def) apply(simp add: bind_return_pmf) proof (goal_cases) case 1 note qs=this let ?D = "(config'' (BIT_init, BIT_step) qs init n)" have absch: "(\x\ set_pmf ?D. ((\(s, is). real (Suc (index s (qs ! n)))) x) \ ((\(s, is). length init) x))" proof (rule ballI, goal_cases) case (1 x) from 1 config_rand_length have f1: "length (fst x) = length init" by fastforce from 1 config_rand_set have 2: "set (fst x) = set init" by fastforce from qs 2 have "(qs!n) \ set (fst x)" by auto then have "(index (fst x) (qs ! n)) < length init" apply(rule index_less) using f1 by auto then show ?case by (simp add: split_def) qed have "E(bind_pmf ?D (\(s, is). return_pmf (real (Suc (index s (qs ! n)))))) = E(map_pmf (\(s, is). real (Suc (index s (qs ! n)))) ?D)" by(simp add: split_def map_pmf_def) also have "\ \ E(map_pmf (\(s, is). length init) ?D)" apply(rule E_mono3) apply(fact integr_index) apply(simp) using absch by auto also have "\ = length init" by(simp add: split_def) finally show ?case by(simp add: map_pmf_def bind_assoc_pmf bind_return_pmf split_def) qed lemma T_BIT_ub: "\i set init \ T_BIT n \ n * size init" proof(induction n) case 0 show ?case by(simp add: T_BIT_def) next case (Suc n) thus ?case using t_BIT_ub[where n="n"] by (simp add: T_BIT_def) qed subsubsection "Main Lemma" lemma myub: "n < length qs \ t_BIT n + Phi(n + 1) - Phi n \ (7 / 4) * t_A n - 3/4" proof - assume nqs: "n < length qs" have "t_BIT n + Phi (n+1) - Phi n \ (7 / 4) * t_A n - 3/4" proof (cases "length init > 0") case False show ?thesis proof - from False have qsn: "(qs!n) \ set init" by auto from False have l0: "length init = 0" by auto then have "length (swaps (paid_A ! n) (s_A n)) = 0" using length_s_A by auto with l0 have 4: "t_A n = 1 + length (paid_A ! n)" unfolding t_A_def c_A_def p_A_def by(simp) have 1: "t_BIT n \ 1" using t_BIT_ub2[OF qsn] l0 by auto { fix m have "phi m = (\(b,(a,i)). phi m (b,(a,i)))" by auto also have "\ = (\(b,(a,i)). 0)" by(simp only: phi_empty2[OF l0]) finally have "phi m= (\(b,(a,i)). 0)". } note phinull=this have 2: "PhiPlus n = 0" unfolding PhiPlus_def apply(simp) apply(simp only: phinull) by (auto simp: split_def) have 3:"Phi n = 0" unfolding Phi_def apply(simp only: phinull) by (auto simp: split_def) have "t_A n \ 1 \ 1 \ 7 / 4 * (t_A n) - 3 / 4" by(simp) with 4 have 5: "1 \ 7 / 4 * (t_A n) - 3 / 4" by auto from 1 2 3 have "t_BIT n + PhiPlus n - Phi n \ 1" by auto also from 5 have "\ \ 7 / 4 * (t_A n) - 3 / 4" by auto finally show ?thesis using PhiPlus_is_Phi_Suc nqs by auto qed next case True let ?l = "length init" from True obtain l' where lSuc: "?l = Suc l'" by (metis Suc_pred) have 31: "n < length paid_A" using nqs by auto define q where "q = qs!n" define D where [simp]: "D = (config'' (BIT_init, BIT_step) qs init n)" define cost where [simp]: "cost = (\(s, is).(t s q (if (fst is) ! (index (snd is) q) then 0 else length s, [])))" define \\<^sub>2 where [simp]: "\\<^sub>2 = (\(s, is). ((phi (Suc n)) (step s q (if (fst is) ! (index (snd is) q) then 0 else length s, []),(flip (index (snd is) q) (fst is), snd is))))" define \\<^sub>0 where [simp]: "\\<^sub>0 = phi n" have inEreinziehn: "t_BIT n + Phi (n+1) - Phi n = E (map_pmf (\x. (cost x) + (\\<^sub>2 x) - (\\<^sub>0 x)) D)" proof - have "bind_pmf D (\(s, is). bind_pmf (BIT_step (s, is) (q)) (\(a,nis). return_pmf (real(t s (q) a)))) = bind_pmf D (\(s, is). return_pmf (t s q (if (fst is) ! (index (snd is) q) then 0 else length s, [])))" unfolding BIT_step_def apply (auto simp: bind_return_pmf split_def) by (metis prod.collapse) also have "\ = map_pmf cost D" by (auto simp: map_pmf_def split_def) finally have rightform1: "bind_pmf D (\(s, is). bind_pmf (BIT_step (s, is) (q)) (\(a,nis). return_pmf (real(t s (q) a)))) = map_pmf cost D" . have rightform2: "map_pmf (phi (Suc n)) (bind_pmf D (\(s, is). bind_pmf (BIT_step (s, is) (q)) (\(a, nis). return_pmf (step s (q) a, nis)))) = map_pmf \\<^sub>2 D" apply(simp add: bind_return_pmf bind_assoc_pmf map_pmf_def split_def BIT_step_def) by (metis prod.collapse) have "t_BIT n + Phi (n+1) - Phi n = t_BIT n + PhiPlus n - Phi n" using PhiPlus_is_Phi_Suc nqs by auto also have "\ = T_on_rand_n BIT init qs n + E (map_pmf (phi (Suc n)) (bind_pmf D (\(s, is). bind_pmf (BIT_step (s, is) (q)) (\(a, nis). return_pmf (step s (q) a, nis))))) - E (map_pmf (phi n) D) " unfolding PhiPlus_def Phi_def t_BIT_def q_def by auto also have "\ = E (bind_pmf D (\(s, is). bind_pmf (BIT_step (s, is) (q)) (\(a,nis). return_pmf (t s (q) a)))) + E (map_pmf (phi (Suc n)) (bind_pmf D (\(s, is). bind_pmf (BIT_step (s, is) (q)) (\(a, nis). return_pmf (step s (q) a, nis))))) - E (map_pmf \\<^sub>0 D)" by (auto simp: q_def split_def) also have "\ = E (map_pmf cost D) + E (map_pmf \\<^sub>2 D) - E (map_pmf \\<^sub>0 D)" using rightform1 rightform2 split_def by auto also have "\ = E (map_pmf (\x. (cost x) + (\\<^sub>2 x)) D) - E (map_pmf (\x. (\\<^sub>0 x)) D)" unfolding D_def using E_linear_plus2[OF finite_config_BIT[OF dist_init]] by auto also have "\ = E (map_pmf (\x. (cost x) + (\\<^sub>2 x) - (\\<^sub>0 x)) D)" unfolding D_def by(simp only: E_linear_diff2[OF finite_config_BIT[OF dist_init]] split_def) finally show "t_BIT n + Phi (n+1) - Phi n = E (map_pmf (\x. (cost x) + (\\<^sub>2 x) - (\\<^sub>0 x)) D)" by auto qed define xs where [simp]: "xs = s_A n" define xs' where [simp]: "xs' = swaps (paid_A!n) xs" define xs'' where [simp]: "xs'' = mtf2 (free_A!n) (q) xs'" define k where [simp]: "k = index xs' q" (* position of the requested element in A's list *) define k' where [simp]: "k' = max 0 (k-free_A!n)" (* position where A moves the requested element to *) have [simp]: "length xs = length init" by auto have dp_xs_init[simp]: "dist_perm xs init" by auto text "The Transformation" have ub_cost: "\x\set_pmf D. (real (cost x)) + (\\<^sub>2 x) - (\\<^sub>0 x) \ k + 1 + (if (q) \ set init then (if (fst (snd x))!(index init q) then k-k' else (\ji<(length (paid_A!n)). (if (fst (snd x))!(gebub n i) then 2 else 1))" proof (rule, goal_cases) case (1 x) note xinD=1 then have [simp]: "snd (snd x) = init" using D_def config_n_init3 by fast define b where "b = fst (snd x)" define ys where "ys = fst x" define aBIT where [simp]: "aBIT = (if b ! (index (snd (snd x)) q) then 0 else length ys, ([]::nat list))" define ys' where "ys' = step ys (q) aBIT" define b' where "b' = flip (index init q) b" define \\<^sub>1 where "\\<^sub>1 = (\z:: 'a list\ (bool list \ 'a list) . (\(x,y)\(Inv ys xs'). (if fst (snd z)!(index init y) then 2::real else 1)))" have xs''_step: "xs'' = step xs (q) (free_A!n,paid_A!n)" unfolding xs'_def xs''_def xs_def step_def free_A_def paid_A_def by(auto simp: split_def) have gis2: "(\\<^sub>2 (ys,(b,init))) = (\(x,y)\(Inv ys' xs''). (if b'!(index init y) then 2 else 1))" apply(simp only: split_def) apply(simp only: xs''_step) apply(simp only: \\<^sub>2_def phi.simps) unfolding b'_def b_def ys'_def aBIT_def q_def unfolding s_A.simps apply(simp only: split_def) by auto then have gis: "\\<^sub>2 x = (\(x,y)\(Inv ys' xs''). (if b'!(index init y) then 2 else 1))" unfolding ys_def b_def by (auto simp: split_def) have his2: "(\\<^sub>0 (ys,(b,init))) = (\(x,y)\(Inv ys xs). (if b!(index init y) then 2 else 1))" apply(simp only: split_def) apply(simp only: \\<^sub>0_def phi.simps) by(simp add: split_def) then have his: "(\\<^sub>0 x) = (\(x,y)\(Inv ys xs). (if b!(index init y) then 2 else 1))" by(auto simp: ys_def b_def split_def phi') have dis: "\\<^sub>1 x = (\(x,y)\(Inv ys xs'). (if b!(index init y) then 2 else 1))" unfolding \\<^sub>1_def b_def by auto have "ys' = mtf2 (fst aBIT) (q) ys" by (simp add: step_def ys'_def) from config_rand_distinct[of BIT] config_rand_set[of BIT] xinD have dp_ys_init[simp]: "dist_perm ys init" unfolding D_def ys_def by force have dp_ys'_init[simp]: "dist_perm ys' init" unfolding ys'_def step_def by (auto) then have lenys'[simp]: "length ys' = length init" by (metis distinct_card) have dp_xs'_init[simp]: "dist_perm xs' init" by auto have gra: "dist_perm ys xs'" by auto have leninitb[simp]: "length b = length init" using b_def config_n_fst_init_length2 xinD[unfolded] by auto have leninitys[simp]: "length ys = length init" using dp_ys_init by (metis distinct_card) {fix m have "dist_perm ys (s'_A n m)" using dp_ys_init by auto } note dist=this text "Upper bound of the inversions created by paid exchanges of A" (* ============================================ first we adress the paid exchanges paid cost of A: p_A *) let ?paidUB="(\i<(length (paid_A!n)). (if b!(gebub n i) then 2::real else 1))" have paid_ub: "\\<^sub>1 x \ \\<^sub>0 x + ?paidUB" proof - have a: "length (paid_A ! n) \ length (paid_A ! n)" by auto have b: "xs' = (s'_A n (length (paid_A ! n)))" using s'A_m by auto { fix m have "m\length (paid_A!n) \ (\(x,y)\(Inv ys (s'_A n m)). (if b!(index init y) then 2::real else 1)) \ (\(x,y)\(Inv ys xs). (if b!(index init y) then 2 else 1)) + (\i length (paid_A ! n)" and m_bd: "m < length (paid_A ! n)" by auto note yeah = Suc(1)[OF m_bd2] let ?revm="(length (paid_A ! n) - Suc m)" note ah=Inv_swap[of "ys" "(s'_A n m)" "(paid_A ! n ! ?revm)", OF dist] have "(\(xa, y)\Inv ys (s'_A n (Suc m)). if b ! (index init y) then 2::real else 1) = (\(xa, y)\Inv ys (swap (paid_A ! n ! ?revm) (s'_A n m)). if b ! (index init y) then 2 else 1)" using s'_A.simps(2) by auto also have "\ = (\(xa, y)\(if Suc (paid_A ! n ! ?revm) < length ys then if s'_A n m ! (paid_A ! n ! ?revm) < s'_A n m ! Suc (paid_A ! n ! ?revm) in ys then Inv ys (s'_A n m) \ {(s'_A n m ! (paid_A ! n ! ?revm), s'_A n m ! Suc (paid_A ! n ! ?revm))} else Inv ys (s'_A n m) - {(s'_A n m ! Suc (paid_A ! n ! ?revm), s'_A n m ! (paid_A ! n ! ?revm))} else Inv ys (s'_A n m)). if b ! (index init y) then 2::real else 1)" by (simp only: ah) also have "\ \ (\(xa, y)\Inv ys (s'_A n m). if b ! (index init y) then 2::real else 1) + (if (b) ! (index init (s'_A n m ! Suc (paid_A ! n ! ?revm))) then 2::real else 1)" (is "?A \ ?B") proof(cases "Suc (paid_A ! n ! ?revm) < length ys") case False (* FIXME! can't occur! because it has already been filtered out! see: then have "False" using paidAnm_inbound apply(auto) using m_bd nqs by blast *) then have "?A = (\(xa, y)\(Inv ys (s'_A n m)). if b ! (index init y) then 2 else 1)" by auto also have "\ \ (\(xa, y)\(Inv ys (s'_A n m)). if b ! (index init y) then 2 else 1) + (if b ! (index init (s'_A n m ! Suc (paid_A ! n ! ?revm))) then 2::real else 1)" by auto finally show "?A \ ?B" . next case True then have "?A = (\(xa, y)\(if s'_A n m ! (paid_A ! n ! ?revm) < s'_A n m ! Suc (paid_A ! n ! ?revm) in ys then Inv ys (s'_A n m) \ {(s'_A n m ! (paid_A ! n ! ?revm), s'_A n m ! Suc (paid_A ! n ! ?revm))} else Inv ys (s'_A n m) - {(s'_A n m ! Suc (paid_A ! n ! ?revm), s'_A n m ! (paid_A ! n ! ?revm))} ). if b ! (index init y) then 2 else 1)" by auto also have "\ \ ?B" (is "?A' \ ?B") proof (cases "s'_A n m ! (paid_A ! n ! ?revm) < s'_A n m ! Suc (paid_A ! n ! ?revm) in ys") case True let ?neurein="(s'_A n m ! (paid_A ! n ! ?revm), s'_A n m ! Suc (paid_A ! n ! ?revm))" from True have "?A' = (\(xa, y)\(Inv ys (s'_A n m) \ {?neurein} ). if b ! (index init y) then 2 else 1)" by auto also have "\ = (\(xa, y)\insert ?neurein (Inv ys (s'_A n m) ). if b ! (index init y) then 2 else 1)" by auto also have "\ \ (if b ! (index init (snd ?neurein)) then 2 else 1) + (\(xa, y)\(Inv ys (s'_A n m)). if b ! (index init y) then 2 else 1)" proof (cases "?neurein \ Inv ys (s'_A n m)") case True then have "insert ?neurein (Inv ys (s'_A n m)) = (Inv ys (s'_A n m))" by auto then have "(\(xa, y)\insert ?neurein (Inv ys (s'_A n m)). if b ! (index init y) then 2 else 1) = (\(xa, y)\(Inv ys (s'_A n m)). if b ! (index init y) then 2 else 1)" by auto also have "\ \ (if b ! (index init (snd ?neurein)) then 2::real else 1) + (\(xa, y)\(Inv ys (s'_A n m)). if b ! (index init y) then 2 else 1)" by auto finally show ?thesis . next case False have "(\(xa, y)\insert ?neurein (Inv ys (s'_A n m)). if b ! (index init y) then 2 else 1) = (\y\insert ?neurein (Inv ys (s'_A n m)). (\i. if b ! (index init (snd i)) then 2 else 1) y)" by(auto simp: split_def) also have "\ = (\i. if b ! (index init (snd i)) then 2 else 1) ?neurein + (\y\(Inv ys (s'_A n m)) - {?neurein}. (\i. if b ! (index init (snd i)) then 2 else 1) y)" apply(rule sum.insert_remove) by(auto) also have "\ = (if b ! (index init (snd ?neurein)) then 2 else 1) + (\y\(Inv ys (s'_A n m)). (\i. if b ! (index init (snd i)) then 2::real else 1) y)" using False by auto also have "\ \ (if b ! (index init (snd ?neurein)) then 2 else 1) + (\(xa, y)\(Inv ys (s'_A n m)). if b ! (index init y) then 2 else 1)" by(simp only: split_def) finally show ?thesis . qed also have "\ = (\(xa, y)\Inv ys (s'_A n m). if b ! (index init y) then 2 else 1) + (if b ! (index init (s'_A n m ! Suc (paid_A ! n ! ?revm))) then 2 else 1)" by auto finally show ?thesis . next case False then have "?A' = (\(xa, y)\(Inv ys (s'_A n m) - {(s'_A n m ! Suc (paid_A ! n ! ?revm), s'_A n m ! (paid_A ! n ! ?revm))} ). if b ! (index init y) then 2 else 1)" by auto also have "\ \ (\(xa, y)\(Inv ys (s'_A n m)). if b ! (index init y) then 2 else 1)" (is "(\(xa, y)\?X-{?x}. ?g y) \ (\(xa, y)\?X. ?g y) ") proof (cases "?x \ ?X") case True have "(\(xa, y)\?X-{?x}. ?g y) \ (%(xa,y). ?g y) ?x + (\(xa, y)\?X-{?x}. ?g y)" by simp also have "\ = (\(xa, y)\?X. ?g y)" apply(rule sum.remove[symmetric]) apply simp apply(fact) done finally show ?thesis . qed simp also have "\ \ ?B" by auto finally show ?thesis . qed finally show "?A \ ?B" . qed also have "\ \ (\(xa, y)\Inv ys (s_A n). if b ! (index init y) then 2::real else 1) + (\i = (\(xa, y)\Inv ys (s_A n). if b ! (index init y) then 2::real else 1) + (\i = (\(xa, y)\Inv ys (s_A n). if b ! (index init y) then 2::real else 1) + (\i<(Suc m). if b ! gebub n i then 2 else 1)" by auto finally show ?case by simp qed (simp add: split_def) } note x = this[OF a] show ?thesis unfolding \\<^sub>1_def his apply(simp only: b) using x b_def by auto qed text "Upper bound for the costs of BIT" define inI where [simp]: "inI = InvOf (q) ys xs'" define I where [simp]: "I = card(InvOf (q) ys xs')" (* ys is BITs list, xs' is A's list after paid exchanges *) have ub_cost_BIT: "(cost x) \ k + 1 + I" proof (cases "(q) \ set init") case False (* cannot occur! ! ! OBSOLETE *) from False have 4: "I = 0" by(auto simp: before_in_def) have "(cost x) = 1 + index ys (q)" by (auto simp: ys_def t_def split_def) also have "\ = 1 + length init" using False by auto also have "\ = 1 + k" using False by auto finally show ?thesis using 4 by auto next case True then have gra2: "(q) \ set ys" using dp_ys_init by auto have "(cost x) = 1 + index ys (q)" by(auto simp: ys_def t_def split_def) also have "\ \ k + 1 + I" using numberofIsbeschr[OF gra gra2] by auto finally show"(cost x) \ k + 1 + I" . qed text "Upper bound for inversions generated by free exchanges" (* ================================================ *) (* ================================================ *) (* second part: FREE EXCHANGES *) define ub_free where "ub_free = (if (q \ set init) then (if b!(index init q) then k-k' else (\j(x,y)\(Inv ys' xs''). (if b' !(index init y) then 2 else 1 ) ) - (\(x,y)\(Inv ys xs'). (if b!(index init y) then 2 else 1) ) \ ?ub2" proof (cases "(q) \ set init") case False from False have 1: "ys' = ys" unfolding ys'_def step_def mtf2_def by(simp) from False have 2: "xs' = xs''" unfolding xs''_def mtf2_def by(simp) from False have "(index init q) \ length b" using setinit by auto then have 3: "b' = b" unfolding b'_def using flip_out_of_bounds by auto from False have 4: "I = 0" unfolding I_def before_in_def by(auto) note ubnn=False have nn: "k-k'\0" unfolding k_def k'_def by auto from 1 2 3 4 have "(\(x,y)\(Inv ys' xs''). (if b'!(index init y) then 2::real else 1)) - (\(x,y)\(Inv ys xs'). (if b!(index init y) then 2 else 1)) = -I" by auto with ubnn show ?thesis unfolding ub_free_def by auto next case True note queryinlist=this then have gra2: "q \ set ys" using dp_ys_init by auto have k_inbounds: "k < length init" using index_less_size_conv queryinlist by (simp) { fix y e fix X::"bool list" assume rd: "e < length X" have "y < length X \ (if flip e X ! y then 2::real else 1) - (if X ! y then 2 else 1) = (if e=y then (if X ! y then -1 else 1) else 0)" proof cases assume "y < length X" and ey: "e=y" then have "(if flip e X ! y then 2::real else 1) - (if X ! y then 2 else 1) = (if X ! y then 1::real else 2) - (if X ! y then 2 else 1)" using flip_itself by auto also have "\ = (if X ! y then -1::real else 1)" by auto finally show "(if flip e X ! y then 2::real else 1) - (if X ! y then 2 else 1) = (if e=y then (if X ! y then -1 else 1) else 0)" using ey by auto next assume len: "y < length X" and eny: "e\y" then have "(if flip e X ! y then 2::real else 1) - (if X ! y then 2 else 1) = (if X ! y then 2::real else 1) - (if X ! y then 2 else 1)" using flip_other[OF len rd eny] by auto also have "\ = 0" by auto finally show "(if flip e X ! y then 2::real else 1) - (if X ! y then 2 else 1) = (if e=y then (if X ! y then -1 else 1) else 0)" using eny by auto qed } note flipstyle=this from queryinlist setinit have qsfst: "(index init q) < length b" by simp have fA: "finite (Inv ys' xs'')" by auto have fB: "finite (Inv ys xs')" by auto define \ where [simp]: "\ = (\(x,y)\(Inv ys' xs''). (if b'!(index init y) then 2::real else 1)) - (\(x,y)\(Inv ys xs'). (if b!(index init y) then 2 else 1))" define C where [simp]: "C = (\(x,y)\(Inv ys' xs'') \ (Inv ys xs'). (if b'!(index init y) then 2::real else 1) - (if b!(index init y) then 2 else 1))" define A where [simp]: "A = (\(x,y)\(Inv ys' xs'')-(Inv ys xs'). (if b'!(index init y) then 2::real else 1))" define B where [simp]: "B = (\(x,y)\(Inv ys xs')-(Inv ys' xs''). (if b!(index init y) then 2::real else 1))" have teilen: "\ = C + A - B" (* C A B *) unfolding \_def A_def B_def C_def using sum_my[OF fA fB] by (auto simp: split_def) then have "\ = A - B + C" by auto then have teilen2: "\\<^sub>2 x - \\<^sub>1 x = A - B + C" unfolding \_def using dis gis by auto have setys': "(index init) ` (set ys') = {0.. = {0.. = {0.. -I" proof (cases "b!(index init q)") (* case distinction on whether the bit of the requested element is set *) case True then have samesame: "ys' = ys" unfolding ys'_def step_def by auto then have puh: "(Inv ys' xs') = (Inv ys xs')" by auto { fix \ \ assume "(\,\)\(Inv ys' xs'') \ (Inv ys' xs')" then have "(\,\)\(Inv ys' xs'')" by auto then have "(\< \ in ys')" unfolding Inv_def by auto then have 1: "\ \ set ys'" by (simp only: before_in_setD2) then have "index init \ < length ys'" using setys' by auto then have "index init \ < length init" using lenys' by auto then have puzzel: "index init \ < length b" using leninitb by auto have betainit: "\ \ set init" using 1 by auto have aha: "(q=\) = (index init q = index init \)" using betainit by simp have "(if b'!(index init \) then 2::real else 1) - (if b!(index init \) then 2 else 1) = (if (index init q) = (index init \) then if b !(index init \) then - 1 else 1 else 0)" unfolding b'_def apply(rule flipstyle) by(fact)+ also have "\ = (if (index init q) = (index init \) then if b ! (index init q) then - 1 else 1 else 0)" by auto also have "\ = (if q = \ then - 1 else 0)" using aha True by auto finally have "(if b'!(index init \) then 2::real else 1) - (if b!(index init \) then 2 else 1) = (if (q) = \ then -1::real else 0)" by auto } then have grreeeaa: "\x\(Inv ys' xs'') \ (Inv ys' xs'). (\x. (if b'! (index init (snd x)) then 2::real else 1) - (if b! (index init (snd x)) then 2 else 1)) x = (\x. (if (q) = snd x then -1::real else 0)) x" by force let ?fin="(Inv ys' xs'') \ (Inv ys' xs')" have ttt: "{(x,y). (x,y)\(Inv ys' xs'') \ (Inv ys' xs') \ y = (q)} \ {(x,y). (x,y)\(Inv ys' xs'') \ (Inv ys' xs') \ y \ (q)} = (Inv ys' xs'') \ (Inv ys' xs')" (is "?split1 \ ?split2 = ?easy") by auto have interem: "?split1 \ ?split2 = {}" by auto have split1subs: "?split1 \ ?fin" by auto have split2subs: "?split2 \ ?fin" by auto have fs1: "finite ?split1" apply(rule finite_subset[where B="?fin"]) apply(rule split1subs) by(auto) have fs2: "finite ?split2" apply(rule finite_subset[where B="?fin"]) apply(rule split2subs) by(auto) have "k - k' \ (free_A!n)" by auto have g: "InvOf (q) ys' xs'' \ InvOf (q) ys' xs'" using True apply(auto) apply(rule mtf2_mono[of "swaps (paid_A ! n) (s_A n)"]) by (auto simp: queryinlist) have h: "?split1 = (InvOf (q) ys' xs'') \ (InvOf (q) ys' xs')" unfolding Inv_def by auto also from g have "\ = InvOf (q) ys' xs'" by force also from samesame have "\ = InvOf (q) ys xs'" by simp finally have "?split1 = inI" unfolding inI_def . then have cardsp1isI: "card ?split1 = I" by auto { fix a b assume "(a,b)\?split1" then have "b = (q)" by auto then have "(if (q) = b then (-1::real) else 0) = (-1::real)" by auto } then have split1easy: "\x\?split1. (\x. (if (q) = snd x then (-1::real) else 0)) x = (\x. (-1::real)) x" by force { fix a b assume "(a,b)\?split2" then have "~ b = (q)" by auto then have "(if (q) = b then (-1::real) else 0) = 0" by auto } then have split2easy: "\x\?split2. (\x. (if (q) = snd x then (-1::real) else 0)) x = (\x. 0::real) x" by force have E0: "C = (\(x,y)\(Inv ys' xs'') \ (Inv ys xs'). (if b'!(index init y) then 2::real else 1) - (if b!(index init y) then 2 else 1))" by auto also from puh have E1: "... = (\(x,y)\(Inv ys' xs'') \ (Inv ys' xs'). (if b'!(index init y) then 2::real else 1) - (if b!(index init y) then 2 else 1))" by auto also have E2: "\ = (\(x,y)\?easy. (if (q) = y then (-1::real) else 0))" using sum_my2[OF grreeeaa] by (auto simp: split_def) also have E3: "\ = (\(x,y)\?split1 \ ?split2. (if (q) = y then (-1::real) else 0))" by(simp only: ttt) also have "\ = (\(x,y)\?split1. (if (q) = y then (-1::real) else 0)) + (\(x,y)\?split2. (if (q) = y then (-1::real) else 0)) - (\(x,y)\?split1 \ ?split2. (if (q) = y then (-1::real) else 0))" by(rule sum_Un[OF fs1 fs2]) also have "\ = (\(x,y)\?split1. (if (q) = y then (-1::real) else 0)) + (\(x,y)\?split2. (if (q) = y then (-1::real) else 0))" apply(simp only: interem) by auto also have E4: "\ = (\(x,y)\?split1. (-1::real) ) + (\(x,y)\?split2. 0)" using sum_my2[OF split1easy]sum_my2[OF split2easy] by(simp only: split_def) also have "\ = (\(x,y)\?split1. (-1::real) )" by auto also have E5: "\ = - card ?split1 " by auto also have E6: "\ = - I " using cardsp1isI by auto finally have abschC: "C = -I". have abschB: "B \ (0::real)" unfolding B_def apply(rule sum_nonneg) by auto from abschB abschC show "C - B \ -I" by simp next case False from leninitys False have ya: "ys' = mtf2 (length ys) q ys" unfolding step_def ys'_def by(auto) have "index ys' q = 0" unfolding ya apply(rule mtf2_moves_to_front) using gra2 by simp_all then have nixbefore: "before q ys' = {}" unfolding before_in_def by auto { fix \ \ assume "(\,\)\(Inv ys' xs'') \ (Inv ys xs')" then have "(\,\)\(Inv ys' xs'')" by auto then have "(\< \ in ys')" unfolding Inv_def by auto then have 1: "\ \ set ys'" by (simp only: before_in_setD2) then have "(index init \) < length ys'" using setys' by auto then have "(index init \) < length init" using lenys' by auto then have puzzel: "(index init \) < length b" using leninitb by auto have betainit: "\ \ set init" using 1 by auto have aha: "(q=\) = (index init q = index init \)" using betainit by simp have "(if b'!(index init \) then 2::real else 1) - (if b!(index init \) then 2 else 1) = (if (index init q) = (index init \) then if b ! (index init \) then - 1 else 1 else 0)" unfolding b'_def apply(rule flipstyle) by(fact)+ also have "\ = (if (index init q) = (index init \) then if b ! (index init q) then - 1 else 1 else 0)" by auto also have "\ = (if (q) = \ then 1 else 0)" using False aha by auto finally have "(if b'!(index init \) then 2::real else 1) - (if b!(index init \) then 2 else 1) = (if (q) = \ then 1::real else 0)" by auto } then have grreeeaa2: "\x\(Inv ys' xs'') \ (Inv ys xs'). (\x. (if b'! (index init (snd x)) then 2::real else 1) - (if b! (index init (snd x)) then 2 else 1)) x = (\x. (if (q) = snd x then 1::real else 0)) x" by force let ?fin="(Inv ys' xs'') \ (Inv ys xs')" have ttt: "{(x,y). (x,y)\(Inv ys' xs'') \ (Inv ys xs') \ y = (q)} \ {(x,y). (x,y)\(Inv ys' xs'') \ (Inv ys xs') \ y \ (q)} = (Inv ys' xs'') \ (Inv ys xs')" (is "?split1 \ ?split2 = ?easy") by auto have interem: "?split1 \ ?split2 = {}" by auto have split1subs: "?split1 \ ?fin" by auto have split2subs: "?split2 \ ?fin" by auto have fs1: "finite ?split1" apply(rule finite_subset[where B="?fin"]) apply(rule split1subs) by(auto) have fs2: "finite ?split2" apply(rule finite_subset[where B="?fin"]) apply(rule split2subs) by(auto) have split1easy : "\x\?split1. (\x. (if (q) = snd x then (1::real) else 0)) x = (\x. (1::real)) x" by force have split2easy : "\x\?split2. (\x. (if (q) = snd x then (1::real) else 0)) x = (\x. (0::real)) x" by force from nixbefore have InvOfempty: "InvOf q ys' xs'' = {}" unfolding Inv_def by auto have "?split1 = InvOf q ys' xs'' \ InvOf q ys xs'" unfolding Inv_def by auto also from InvOfempty have "\ = {}" by auto finally have split1empty: "?split1 = {}" . have "C = (\(x,y)\?easy. (if (q) = y then (1::real) else 0))" unfolding C_def by(simp only: split_def sum_my2[OF grreeeaa2]) also have "\ = (\(x,y)\?split1 \ ?split2. (if (q) = y then (1::real) else 0))" by(simp only: ttt) also have "\ = (\(x,y)\?split1. (if (q) = y then (1::real) else 0)) + (\(x,y)\?split2. (if (q) = y then (1::real) else 0)) - (\(x,y)\?split1 \ ?split2. (if (q) = y then (1::real) else 0))" by(rule sum_Un[OF fs1 fs2]) also have "\ = (\(x,y)\?split1. (if (q) = y then (1::real) else 0)) + (\(x,y)\?split2. (if (q) = y then (1::real) else 0))" apply(simp only: interem) by auto also have "\ = (\(x,y)\?split1. (1::real) ) + (\(x,y)\?split2. 0)" using sum_my2[OF split1easy] sum_my2[OF split2easy] by (simp only: split_def) also have "\ = (\(x,y)\?split1. (1::real) )" by auto also have "\ = card ?split1" by auto also have "\ = (0::real)" apply(simp only: split1empty) by auto finally have abschC: "C = (0::real)" . (* approx for B *) have ttt2: "{(x,y). (x,y)\(Inv ys xs') - (Inv ys' xs'') \ y = (q)} \ {(x,y). (x,y)\(Inv ys xs') - (Inv ys' xs'') \ y \ (q)} = (Inv ys xs') - (Inv ys' xs'')" (is "?split1 \ ?split2 = ?easy2") by auto have interem: "?split1 \ ?split2 = {}" by auto have split1subs: "?split1 \ ?easy2" by auto have split2subs: "?split2 \ ?easy2" by auto have fs1: "finite ?split1" apply(rule finite_subset[where B="?easy2"]) apply(rule split1subs) by(auto) have fs2: "finite ?split2" apply(rule finite_subset[where B="?easy2"]) apply(rule split2subs) by(auto) from False have split1easy2: "\x\?split1. (\x. (if b! (index init (snd x)) then 2::real else 1)) x = (\x. (1::real)) x" by force have "?split1 = (InvOf q ys xs') - (InvOf q ys' xs'')" unfolding Inv_def by auto also have "\ = inI" unfolding InvOfempty by auto finally have splI: "?split1 = inI" . have abschaway: "(\(x,y)\?split2. (if b!(index init y) then 2::real else 1)) \ 0" apply(rule sum_nonneg) by auto have "B = (\(x,y)\?split1 \ ?split2. (if b!(index init y) then 2::real else 1) )" unfolding B_def by(simp only: ttt2) also have "\ = (\(x,y)\?split1. (if b!(index init y) then 2::real else 1)) + (\(x,y)\?split2. (if b!(index init y) then 2::real else 1)) - (\(x,y)\?split1 \ ?split2. (if b!(index init y) then 2::real else 1))" by(rule sum_Un[OF fs1 fs2]) also have "\ = (\(x,y)\?split1. (if b!(index init y) then 2::real else 1)) + (\(x,y)\?split2. (if b!(index init y) then 2::real else 1))" apply(simp only: interem) by auto also have "\ = (\(x,y)\?split1. 1) + (\(x,y)\?split2. (if b!(index init y) then 2::real else 1))" using sum_my2[OF split1easy2] by (simp only: split_def) also have "\ = card ?split1 + (\(x,y)\?split2. (if b!(index init y) then 2::real else 1))" by auto also have "\ = I + (\(x,y)\?split2. (if b!(index init y) then 2::real else 1))" using splI by auto also have "\ \ I" using abschaway by auto finally have abschB: "B \ I" . from abschB abschC show "C - B \ -I" by auto qed (* ========================================== central! calculations for A ========================================== *) have A_absch: "A \ (if b!(index init q) then k-k' else (\j(x,y)\(Inv ys' xs'')-(Inv ys xs'). (if b'!(index init y) then 2::real else 1))" by auto have "index (mtf2 (free_A ! n) (q) (swaps (paid_A ! n) (s_A n))) (q) = (index (swaps (paid_A ! n) (s_A n)) (q) - free_A ! n)" apply(rule mtf2_q_after) using queryinlist by auto then have whatisk': "k' = index xs'' q" by auto have ss: "set ys' = set ys" by auto have ss2: "set xs' = set xs''" by auto have di: "distinct init" by auto have dys: "distinct ys" by auto have "(Inv ys' xs'')-(Inv ys xs') = {(x,y). x < y in ys' \ y < x in xs'' \ (~x < y in ys \ ~ y < x in xs')}" unfolding Inv_def by auto also have "\ = {(x,y). y\q \ x < y in ys' \ y < x in xs'' \ (~x < y in ys \ ~ y < x in xs') }" using nixbefore by blast also have "\ = {(x,y). x\y \ y\q \ x < y in ys' \ y < x in xs'' \ (~x < y in ys \ ~ y < x in xs') }" unfolding before_in_def by auto also have "\ = {(x,y). x\y \ y\q \ x < y in ys' \ y < x in xs'' \ ~x < y in ys } \ {(x,y). x\y \ y\q \ x < y in ys' \ y < x in xs'' \ ~ y < x in xs' }" by force also have "\ = {(x,y). x\y \ y\q \ x < y in ys' \ y < x in xs'' \ y < x in ys } \ {(x,y). x\y \ y\q \ x < y in ys' \ y < x in xs'' \ ~ y < x in xs' }" using before_in_setD1[where xs="ys'"] before_in_setD2[where xs="ys'"] not_before_in ss by metis also have "\ = {(x,y). x\y \ y\q \ x < y in ys' \ y < x in xs'' \ y < x in ys } \ {(x,y). x\y \ y\q \ x < y in ys' \ y < x in xs'' \ x < y in xs' }" (is "?S1 \ ?S2 = ?S1 \ ?S2'") proof - have "?S2 = ?S2'" apply(safe) proof (goal_cases) case (2 a b) from 2(5) have "~ b < a in xs'" by auto with 2(6) show "False" by auto next case (1 a b) from 1(4) have "a \ set xs'" "b \ set xs'" using before_in_setD1[where xs="xs''"] before_in_setD2[where xs="xs''"] ss2 by auto with not_before_in 1(5) have "(a < b in xs' \ a = b)" by metis with 1(1) show "a < b in xs'" by auto qed then show ?thesis by auto qed also have "\ = {(x,y). x\y \ y\q \ x < y in ys' \ y < x in xs'' \ y < x in ys } \ {(x,y). x\y \ y\q \ x < y in ys' \ ~ x < y in xs'' \ x < y in xs' }" (is "?S1 \ ?S2 = ?S1 \ ?S2'") proof - have "?S2 = ?S2'" apply(safe) proof (goal_cases) case (1 a b) from 1(4) have "~ a < b in xs''" by auto with 1(6) show "False" by auto next case (2 a b) from 2(5) have "a \ set xs''" "b \ set xs''" using before_in_setD1[where xs="xs'"] before_in_setD2[where xs="xs'"] ss2 by auto with not_before_in 2(4) have "(b < a in xs'' \ a = b)" by metis with 2(1) show "b < a in xs''" by auto qed then show ?thesis by auto qed also have "\ = {(x,y). x\y \ y\q \ x < y in ys' \ y < x in xs'' \ y < x in ys } \ {}" using x_stays_before_y_if_y_not_moved_to_front[where xs="xs'" and q="q"] before_in_setD1[where xs="xs'"] before_in_setD2[where xs="xs'"] by (auto simp: queryinlist) also have "\ = {(x,y). x\y \ x=q \ y\q \ x < y in ys' \ y < x in xs'' \ y < x in ys }" apply(simp only: ya) using swapped_by_mtf2[where xs="ys" and q="q" and n="(length ys)"] dys before_in_setD1[where xs="ys"] before_in_setD2[where xs="ys"] by (auto simp: queryinlist) also have "\ \ {(x,y). x=q \ y\q \ q < y in ys' \ y < q in xs''}" by force also have "\ = {(x,y). x=q \ y\q \ q < y in ys' \ y < q in xs'' \ y \ set xs''}" using before_in_setD1 by metis also have "\ = {(x,y). x=q \ y\q \ q < y in ys' \ index xs'' y < index xs'' q \ q \ set xs'' \ y \ set xs''}" unfolding before_in_def by auto also have "\ = {(x,y). x=q \ y\q \ q < y in ys' \ index xs'' y < index xs' q - (free_A ! n) \ q \ set xs'' \ y \ set xs''}" using mtf2_q_after[where A="xs'" and q="q"] by force also have "\ \ {(x,y). x=q \ y\q \ index xs' y < index xs' q - (free_A ! n) \ y \ set xs''}" using mtf2_backwards_effect4'[where xs="xs'" and q="q" and n="(free_A ! n)", simplified ] by auto also have "\ \ {(x,y). x=q \ y\q \ index xs' y < k'}" using mtf2_q_after[where A="xs'" and q="q"] by auto finally have subsa: "(Inv ys' xs'')-(Inv ys xs') \ {(x,y). x=q \ y\q \ index xs' y < k'}" . have k'xs': "k' < length xs''" unfolding whatisk' apply(rule index_less) by (auto simp: queryinlist) then have k'xs': "k' < length xs'" by auto have "{(x,y). x=q \ index xs' y < k'} \ {(x,y). x=q \ index xs' y < length xs'}" using k'xs' by auto also have "\ = {(x,y). x=q \ y \ set xs'}" using index_less_size_conv by fast finally have "{(x,y). x=q \ index xs' y < k'} \ {(x,y). x=q \ y \ set xs'}" . then have finia2: "finite {(x,y). x=q \ index xs' y < k'}" apply(rule finite_subset) by(simp) have lulae: "{(a,b). a=q \ index xs' b < k'} = {(q,b)|b. index xs' b < k'}" by auto have k'b: "k' < length b" using whatisk' by (auto simp: queryinlist) have asdasd: "{(\,\). \=q \ \\q \ index xs' \ < k'} = {(\,\). \=q \ \\q \ index xs' \ < k' \ (index init \) < length b }" proof (auto, goal_cases) case (1 b) from 1(2) have "index xs' b < index xs' (q)" by auto also have "\ < length xs'" by (auto simp: queryinlist) finally have "b \ set xs'" using index_less_size_conv by metis then show ?case using setinit by auto qed { fix \ have "\\q \ (index init \)\(index init q)" using queryinlist by auto } note ij=this have subsa2: "{(\,\). \=q \ \\q \ index xs' \ < k'} \ {(\,\). \=q \ index xs' \ < k'}" by auto then have finia: "finite {(x,y). x=q \ y\q \ index xs' y < k'}" apply(rule finite_subset) using finia2 by auto have E0: "A = (\(x,y)\(Inv ys' xs'')-(Inv ys xs'). (if b'!(index init y) then 2::real else 1))" by auto also have E1: "\ \ (\(x,y)\{(a,b). a=q \ b\q \ index xs' b < k'}. (if b'!(index init y) then 2::real else 1))" unfolding A_def apply(rule sum_mono2[OF finia subsa]) by auto also have "\ = (\(x,y)\{(\,\). \=q \ \\q \ index xs' \ < k' \ (index init \) < length b }. (if b'!(index init y) then 2::real else 1))" using asdasd by auto also have "\ = (\(x,y)\{(\,\). \=q \ \\q \ index xs' \ < k' \ (index init \) < length b }. (if b!(index init y) then 2::real else 1))" proof (rule sum.cong, goal_cases) case (2 z) then obtain \ \ where zab: "z=(\, \)" and "\ = q" and diff: "\ \ q" and "index xs' \ < k'" and i: "index init \ < length b" by auto from diff ij have "index init \ \ index init q" by auto with flip_other qsfst i have "b' ! index init \ = b ! index init \" unfolding b'_def by auto with zab show ?case by(auto simp add: split_def) qed simp also have E1a: "\ = (\(x,y)\{(a,b). a=q \ b\q \ index xs' b < k'}. (if b!(index init y) then 2::real else 1))" using asdasd by auto also have "\ \ (\(x,y)\{(a,b). a=q \ index xs' b < k'}. (if b!(index init y) then 2::real else 1))" apply(rule sum_mono2[OF finia2 subsa2]) by auto also have E2: "\ = (\(x,y)\{(q,b)|b. index xs' b < k'}. (if b!(index init y) then 2::real else 1))" by (simp only: lulae[symmetric]) finally have aa: "A \ (\(x,y)\{(q,b)|b. index xs' b < k'}. (if b!(index init y) then 2::real else 1))" . have sameset: "{y. index xs' y < k'} = {xs'!i | i. i < k'}" proof (safe, goal_cases) case (1 z) show ?case proof from 1(1) have "index xs' z < index (swaps (paid_A ! n) (s_A n)) (q)" by auto also have "\ < length xs'" using index_less_size_conv by (auto simp: queryinlist) finally have "index xs' z < length xs'" . then have zset: "z \ set xs'" using index_less_size_conv by metis have f1: "xs' ! (index xs' z) = z" apply(rule nth_index) using zset by auto show "z = xs' ! (index xs' z) \ (index xs' z) < k'" using f1 1(1) by auto qed next case (2 k i) from 2(1) have "i < index (swaps (paid_A ! n) (s_A n)) (q)" by auto also have "\ < length xs'" using index_less_size_conv by (auto simp: queryinlist) finally have iset: "i < length xs'" . have "index xs' (xs' ! i) = i" apply(rule index_nth_id) using iset by(auto) with 2 show ?case by auto qed have aaa23: "inj_on (\i. xs'!i) {i. i < k'}" apply(rule inj_on_nth) apply(simp) apply(simp) proof (safe, goal_cases) case (1 i) then have "i < index xs' (q)" by auto also have "\ < length xs'" using index_less_size_conv by (auto simp: queryinlist) also have "\ = length init" by auto finally show " i < length init" . qed have aa3: "{xs'!i | i. i < k'} = (\i. xs'!i) ` {i. i < k'}" by auto have aa4: "{(q,b)|b. index xs' b < k'} = (\b. (q,b)) ` {b. index xs' b < k'}" by auto have unbelievable: "{i::nat. i < k'} = {..b. (q,b)) {b. index xs' b < k'}" unfolding inj_on_def by(simp) have "(\(x,y)\{(q,b)|b. index xs' b < k'}. (if b!(index init y) then 2::real else 1)) = (\y\{y. index xs' y < k'}. (if b!(index init y) then 2::real else 1))" proof - have "(\(x,y)\{(q,b)|b. index xs' b < k'}. (if b!(index init y) then 2::real else 1)) = (\(x,y)\ (\b. (q,b)) ` {b. index xs' b < k'}. (if b!(index init y) then 2::real else 1))" using aa4 by simp also have "\ = (\z\ (\b. (q,b)) ` {b. index xs' b < k'}. (if b!(index init (snd z)) then 2::real else 1))" by (simp add: split_def) also have "\ = (\z\{b. index xs' b < k'}. (if b!(index init (snd ((\b. (q,b)) z))) then 2::real else 1))" apply(simp only: sum.reindex[OF aadad]) by auto also have "\ = (\y\{y. index xs' y < k'}. (if b!(index init y) then 2::real else 1))" by auto finally show ?thesis . qed also have "\ = (\y\{xs'!i | i. i < k'}. (if b!(index init y) then 2::real else 1))" using sameset by auto also have "\ = (\y\(\i. xs'!i) ` {i. i < k'}. (if b!(index init y) then 2::real else 1))" using aa3 by simp also have "\ = (\y\{i::nat. i < k'}. (if b!(index init (xs'!y)) then 2::real else 1))" using sum.reindex[OF aaa23] by simp also have E3: "\ = (\j::nat(x,y)\{(q,b)|b. index xs' b < k'}. (if b!(index init y) then 2::real else 1)) = (\j (\j (if b!(index init q) then k-k' else (\j y < x in xs'' \ ~ y < x in xs'}" unfolding Inv_def using samesame by auto also have "\ \ {(xs'!i,q)|i. i\{k'.. b < a in xs'" then have anb: "a \ b" using no_before_inI by(force) have a: "a \ set init" and b: "b \ set init" using before_in_setD1[OF 1] before_in_setD2[OF 1] by auto with anb 3 have 3: "a < b in xs'" by (simp add: not_before_in) note all= anb 1 2 3 a b have bq: "b=q" apply(rule swapped_by_mtf2[where xs="xs'" and x=a]) using queryinlist apply(simp_all add: all) using all(4) apply(simp) using all(3) apply(simp) done note mine=mtf2_backwards_effect3[THEN conjunct1] from bq have "q < a in xs''" using 2 by auto then have "(k' < index xs'' a \ a \ set xs'')" unfolding before_in_def using whatisk' by auto then have low : "k' \ index xs' a" unfolding whatisk' unfolding xs''_def apply(subst mtf2_q_after) apply(simp) using queryinlist apply(simp) apply(rule mine) apply (simp add: queryinlist) using bq b apply(simp) apply(simp) apply(simp del: xs'_def) apply (metis "3" a before_in_def bq dp_xs'_init k'_def k_def max_0L mtf2_forward_beforeq nth_index whatisk' xs''_def) using a by(simp)(* unfolding xs'_def xs_def sledgehammer TODO: make this step readable by (metis "3" mtf2_q_after a before_in_def bq dp_xs'_init index_less_size_conv mtf2_forward_beforeq nth_index whatisk' xs''_def xs'_def xs_def) *) from bq have "a < q in xs'" using 3 by auto then have up: "(index xs' a < k )" unfolding before_in_def by auto from a have "a \ set xs'" by simp then have aa: "a = xs'!index xs' a" using nth_index by simp have inset: "index xs' a \ {k'.. index xs' a \ {k'.. {(xs'!i,q)|i. i\{k'.. ?UB") . have card_of_UB: "card {(xs'!i,q)|i. i\{k'.. = card ((%i. xs' ! i) ` {k'.. = card {k'.. = k-k'" by auto finally show ?thesis . qed have flipit: "flip (index init q) b ! (index init q) = (~ (b) ! (index init q))" apply(rule flip_itself) using queryinlist setinit by auto have q: "{x\?UB. snd x=q} = ?UB" by auto have E0: "A = (\(x,y)\(Inv ys' xs'')-(Inv ys xs'). (if b'!(index init y) then 2::real else 1))" by auto also have E1: "\ \ (\(z,y)\?UB. if flip (index init q) (b) ! (index init y) then 2::real else 1)" unfolding b'_def apply(rule sum_mono2[OF _ a]) by(simp_all add: split_def) also have "\ = (\(z,y)\{x\?UB. snd x=q}. if flip (index init q) (b) ! (index init y) then 2::real else 1)" by(simp only: q) also have "\ = (\z\{x\?UB. snd x=q}. if flip (index init q) (b) ! (index init (snd z)) then 2::real else 1)" by(simp add: split_def) also have "\ = (\z\{x\?UB. snd x=q}. if flip (index init q) (b) ! (index init q) then 2::real else 1)" by simp also have E2: "\ = (\z\?UB. if flip (index init q) (b) ! (index init q) then 2::real else 1)" by(simp only: q) also have E3: "\ = (\y\?UB. 1)" using flipit True by simp also have E4: "\ = k-k'" by(simp only: real_of_card[symmetric] card_of_UB) finally have result: "A \ k-k'" . with True show ?thesis by auto qed show "(\(x,y)\(Inv ys' xs''). (if b'!(index init y) then 2::real else 1)) - (\(x,y)\(Inv ys xs'). (if b!(index init y) then 2::real else 1)) \ ?ub2" unfolding ub_free_def teilen[unfolded \_def A_def B_def C_def] using BC_absch A_absch using True by auto qed from paid_ub have kl: "\\<^sub>1 x \ \\<^sub>0 x + ?paidUB" by auto from free_ub have kl2: "\\<^sub>2 x - ?ub2 \ \\<^sub>1 x" using gis dis by auto have iub_free: "I + ?ub2 = ub_free" by auto from kl kl2 have "\\<^sub>2 x - \\<^sub>0 x \ ?ub2 + ?paidUB" by auto then have "(cost x) + (\\<^sub>2 x) - (\\<^sub>0 x) \ k + 1 + I + ?ub2 + ?paidUB" using ub_cost_BIT by auto then show ?case unfolding ub_free_def b_def by auto qed text "Approximation of the Term for Free exchanges" have free_absch: "E(map_pmf (\x. (if (q) \ set init then (if (fst (snd x))!(index init q) then k-k' else (\j 3/4 * k" (is "?EA \ ?absche") proof (cases "(q) \ set init") case False then have "?EA = 0" by auto then show ?thesis by auto next case True note queryinlist=this have "k-k' \ k" by auto have "k' \ k" by auto text "Transformation of the first term" have qsn: "{index init q} \ {} \ {0.. l!(index init q)} = {xs. Ball {(index init q)} ((!) xs) \ (\i\{}. \ xs ! i) \ length xs = ?l}" by auto then have "card {l::bool list. length l = ?l \ l!(index init q)} = card {xs. Ball {index init q} ((!) xs) \ (\i\{}. \ xs ! i) \ length xs = length init} " by auto also have "\ = 2^(length init - card {index init q} - card {})" apply(subst card2[of "{(index init q)}" "{}" "?l"]) using qsn by auto finally have lulu: "card {l::bool list. length l = ?l \ l!(index init q)} = 2^(?l-1)" by auto have "(\x\{l::bool list. length l = ?l \ l!(index init q)}. real(k-k')) = (\x\{l::bool list. length l = ?l \ l!(index init q)}. k-k')" by auto also have "\ = (k-k')*2^(?l-1)" using lulu by simp finally have absch1stterm: "(\x\{l::bool list. length l = ?l \ l!(index init q)}. real(k-k')) = real((k-k')*2^(?l-1))" . text "Transformation of the second term" let ?S="{(xs'!j)|j. j set (swaps (paid_A ! n) (s_A n))" by auto then have "index (swaps (paid_A ! n) (s_A n)) q < length xs'" by auto then have k'inbound: "k' < length xs'" by auto { fix x have a: "{..jt. (if x!(index init t) then 2::real else 1)) (xs'!j)) = sum ((\t. (if x!(index init t) then 2::real else 1)) o (%j. xs'!j)) {.. = sum ((\t. (if x!(index init t) then 2::real else 1)) o (%j. xs'!j)) {j. j = sum (\t. (if x!(index init t) then 2::real else 1)) ((%j. xs'!j) ` {j. jjt. (if x!(index init t) then 2::real else 1)) (xs'!j)) = (\j\?S. (\t. (if x!(index init t) then 2 else 1)) j)" using b by simp } note reindex=this have identS: "?S = set (take k' xs')" proof - have "index (swaps (paid_A ! n) (s_A n)) (q) \ length (swaps (paid_A ! n) (s_A n))" by (rule index_le_size) then have kxs': "k' \ length xs'" by simp have "?S = (!) xs' ` {0.. = set (take k' xs')" apply(rule nth_image) by(rule kxs') finally show "?S = set (take k' xs')" . qed have distinctS: "distinct (take k' xs')" using distinct_take identS by simp have lengthS: "length (take k' xs') = k'" using length_take k'inbound by simp from distinct_card[OF distinctS] lengthS have "card (set (take k' xs')) = k'" by simp then have cardS: "card ?S = k'" using identS by simp have a: "?S \ set xs'" using set_take_subset identS by metis then have Ssubso: "(index init) ` ?S \ {0.. set init" by auto note index_inj_on_S=subset_inj_on[OF inj_on_index[of "init"] s_subst_init] have l: "xs'!k = q" unfolding k_def apply(rule nth_index) using queryinlist by(auto) have "xs'!k \ set (take k' xs')" apply(rule index_take) using l by simp then have requestnotinS: "(q) \ ?S" using l identS by simp then have indexnotin: "index init q \ (index init) ` ?S" using index_inj_on_S s_subst_init by auto have lua: "{l. length l = ?l \ ~l!(index init q)} = {xs. (\i\{}. xs ! i) \ (\i\{index init q}. \ xs ! i) \ length xs = ?l}" by auto from k'inbound have k'inbound2: "Suc k' \ length init" using Suc_le_eq by auto (* rewrite from sum over indices of the list to sum over elements (thus indices of the bit vector) *) have "(\x\{l::bool list. length l = ?l \ ~l!(index init q)}. (\jx\{l. length l = ?l \ ~l!(index init q)}. (\j\?S. (\t. (if x!(index init t) then 2 else 1)) j))" using reindex by auto (* rewrite to conform the syntax of Expactation2or1 *) also have "\ = (\x\{xs. (\i\{}. xs ! i) \ (\i\{index init q}. \ xs ! i) \ length xs = ?l}. (\j\?S. (\t. (if x!(index init t) then 2 else 1)) j))" using lua by auto also have "\ = (\x\{xs. (\i\{}. xs ! i) \ (\i\{index init q}. \ xs ! i) \ length xs = ?l}. (\j\(index init) ` ?S. (\t. (if x!t then 2 else 1)) j))" proof - { fix x have "(\j\?S. (\t. (if x!(index init t) then 2 else 1)) j) = (\j\(index init) ` ?S. (\t. (if x!t then 2 else 1)) j)" apply(simp only: sum.reindex[OF index_inj_on_S, where g="(%j. if x ! j then 2 else 1)"]) by(simp) } note a=this show ?thesis by(simp only: a) qed (* use Expactation2or1, and solve all the conditions *) also have "\ = 3 / 2 * real (card ?S) * 2 ^ (?l - card {} - card {q})" apply(subst Expactation2or1) apply(simp) apply(simp) apply(simp) apply(simp only: card_image index_inj_on_S cardS ) apply(simp add: k'inbound2 del: k'_def) using indexnotin apply(simp add: ) apply(simp) using Ssubso queryinlist apply(simp) apply(simp only: card_image[OF index_inj_on_S]) by simp finally have "(\x\{l. length l = ?l \ \ l ! (index init q)}. \jx\{l. length l = ?l \ \ l ! (index init q)}. \j l!(index init q)} = 2^(?l-1)" using lulu by auto have splitie: "{l::bool list. length l = ?l} = {l::bool list. length l = ?l \ l!(index init q)} \ {l::bool list. length l = ?l \ ~l!(index init q)}" by auto have interempty: "{l::bool list. length l = ?l \ l!(index init q)} \ {l::bool list. length l = ?l \ ~l!(index init q)} = {}" by auto have fa: "finite {l::bool list. length l = ?l \ l!(index init q)}" using bitstrings_finite by auto have fb: "finite {l::bool list. length l = ?l \ ~l!(index init q)}" using bitstrings_finite by auto { fix f :: "bool list \ real" have "(\x\{l::bool list. length l = ?l}. f x) = (\x\{l::bool list. length l = ?l \ l!(index init q)} \ {l::bool list. length l = ?l \ ~l!(index init q)}. f x)" by(simp only: splitie) also have "\ = (\x\{l::bool list. length l = ?l \ l!(index init q)}. f x) + (\x\{l::bool list. length l = ?l \ ~l!(index init q)}. f x) - (\x\{l::bool list. length l = ?l \ l!(index init q)} \ {l::bool list. length l = ?l \ ~l!(index init q)}. f x)" using sum_Un[OF fa fb, of "f"] by simp also have "\ = (\x\{l::bool list. length l = ?l \ l!(index init q)}. f x) + (\x\{l::bool list. length l = ?l \ ~l!(index init q)}. f x)" by(simp add: interempty) finally have "sum f {l. length l = length init} = sum f {l. length l = length init \ l ! (index init q)} + sum f {l. length l = length init \ \ l ! (index init q)}" . } note darfstsplitten=this have E1: "E(map_pmf (\x. (if (fst (snd x))!(index init q) then real(k-k') else (\jx. (if x!(index init q) then real(k-k') else (\j snd) D))" proof - have triv: "\x. (fst \ snd) x = fst (snd x)" by simp have "E((map_pmf (\x. (if (fst (snd x))!(index init q) then real(k-k') else (\jx. ((\y. (if y!(index init q) then real(k-k') else (\j (fst \ snd)) x) D)" apply(auto simp: comp_assoc) by (simp only: triv) also have "\ = E((map_pmf (\x. (if x!(index init q) then real(k-k') else (\j (map_pmf (fst \ snd))) D)" using map_pmf_compose by metis also have "\ = E(map_pmf (\x. (if x!(index init q) then real(k-k') else (\j snd) D))" by auto finally show ?thesis . qed also have E2: "\ = E(map_pmf (\x. (if x!(index init q) then real(k-k') else (\j = (\x\(set_pmf (bv ?l)). (?insf x) * pmf (bv ?l) x)" by (subst E_finite_sum_fun) (auto simp: bv_finite mult_ac) also have "\ = (\x\{l::bool list. length l = ?l}. (?insf x) * pmf (bv ?l) x)" using bv_set by auto also have E4: "\ = (\x\{l::bool list. length l = ?l}. (?insf x) * (1/2)^?l)" - using list_pmf by auto + by (simp add: list_pmf) also have "\ = (\x\{l::bool list. length l = ?l}. (?insf x)) * ((1/2)^?l)" by(simp only: sum_distrib_right[where r="(1/2)^?l"]) also have E5: "\ = ((1/2)^?l) *(\x\{l::bool list. length l = ?l}. (?insf x))" by(auto) also have E6: "\ = ((1/2)^?l) * ( (\x\{l::bool list. length l = ?l \ l!(index init q)}. ?insf x) + (\x\{l::bool list. length l = ?l \ ~l!(index init q)}. ?insf x) )" using darfstsplitten by auto also have E7: "\ = ((1/2)^?l) * ( (\x\{l::bool list. length l = ?l \ l!(index init q)}. ((\x. real(k-k'))) x) + (\x\{l::bool list. length l = ?l \ ~l!(index init q)}. ((\x. (\jx. (if (fst (snd x))!(index init q) then real(k-k') else (\jx\{l::bool list. length l = ?l \ l!(index init q)}. ((\x. real(k-k'))) x) + (\x\{l::bool list. length l = ?l \ ~l!(index init q)}. ((\x. (\j = ((1/2)^?l) * ( (\x\{l::bool list. length l = ?l \ l!(index init q)}. real(k-k')) + (3/2)*(real (k'))*2^(?l-1) )" by(simp only: absch2ndterm) also have E8: "\ = ((1/2)^?l) * ( real((k-k')*2^(?l-1)) + (3/2)*(real (k'))*2^(?l-1))" by(simp only: absch1stterm) (* from here it is only arithmetic ... *) also have "\ = ((1/2)^?l) * ( ( (k-k') + (k')*(3/2) ) * 2^(?l-1) )" apply(simp only: distrib_right) by simp also have "\ = ((1/2)^?l) * 2^(?l-1) * ( (k-k') + (k')*(3/2) )" by simp also have "\ = (((1::real)/2)^(Suc l')) * 2^(l') * ( real(k-k') + (k')*(3/2) )" using lSuc by auto (* REFACTOR: the only place where I use lSuc , can I avoid it? yes, if ?l=0 then k=k' = (1/2) * ( real(k-k') + (k')*(3/2) )" proof - have "((1::real)/2)^l' * 2^l' = ((1::real)/2 * 2)^l' " by(rule power_mult_distrib[symmetric]) also have "... = 1" by auto finally have "(((1::real)/2)^(Suc l'))* 2^l'=(1/2)" by auto then show ?thesis by auto qed also have E10: "\ \ (1/2) * ( (3/2)*(k-k') + (k')*(3/2) )" by auto (* and one inequality *) also have "\ = (1/2) * ( (3/2)*(k-k'+(k')) )" by auto also have "\ = (1/2) * ( (3/2)*(k) )" by auto also have E11: "\ = (3/4)*(k )" by auto finally show "E(map_pmf (\x. (if q \ set init then (if (fst (snd x))!(index init q) then real( k-k' ) else (\j 3/4 * k " using True by simp qed (* free_absch *) text "Transformation of the Term for Paid Exchanges" have paid_absch: "E(map_pmf (\x. (\i<(length (paid_A!n)). (if (fst (snd x))!(gebub n i) then 2::real else 1) )) D) = 3/2 * (length (paid_A!n))" proof - { fix i assume inbound: "(index init i) < length init" have "map_pmf (\xx. if fst (snd xx) ! (index init i) then 2::real else 1) D = bind_pmf (map_pmf (fst \ snd) D) (\b. return_pmf (if b! index init i then 2::real else 1))" unfolding map_pmf_def by(simp add: bind_assoc_pmf bind_return_pmf) also have "\ = bind_pmf (bv (length init)) (\b. return_pmf (if b! index init i then 2::real else 1))" using config_n_bv[of init "take n qs"] by simp also have "\ = map_pmf (\yy. (if yy then 2 else 1)) ( map_pmf (\y. y!(index init i)) (bv (length init)))" by (simp add: map_pmf_def bind_return_pmf bind_assoc_pmf) also have "\ = map_pmf (\yy. (if yy then 2 else 1)) (bernoulli_pmf (5 / 10))" by (auto simp add: bv_comp_bernoulli[OF inbound]) finally have "map_pmf (\xx. if fst (snd xx) ! (index init i) then 2::real else 1) D = map_pmf (\yy. if yy then 2::real else 1) (bernoulli_pmf (5 / 10)) " . } note umform = this have "E(map_pmf (\x. (\i<(length (paid_A!n)). (if (fst (snd x))!(gebub n i) then 2::real else 1))) D) = (\i<(length (paid_A!n)). E(map_pmf ((\xx. (if (fst (snd xx))!(gebub n i) then 2::real else 1))) D))" apply(subst E_linear_sum2) using finite_config_BIT[OF dist_init] by(simp_all) also have "\ = (\i<(length (paid_A!n)). E(map_pmf (\y. if y then 2::real else 1) (bernoulli_pmf (5 / 10))))" using umform gebub_def gebub_inBound[OF 31] by simp also have "\ = 3/2 * (length (paid_A!n))" by(simp add: E_bernoulli) finally show "E(map_pmf (\x. (\i<(length (paid_A!n)). (if (fst (snd x))!(gebub n i) then 2::real else 1))) D) = 3/2 * (length (paid_A!n))" . qed text "Combine the Results" (* cost of A *) have costA_absch: "k+(length (paid_A!n)) + 1 = t_A n" unfolding k_def q_def c_A_def p_A_def t_A_def by (auto) (* combine *) let ?yo= "(\x. (cost x) + (\\<^sub>2 x) - (\\<^sub>0 x))" let ?yo2=" (\x. (k + 1) + (if (q)\set init then (if (fst (snd x))!(index init q) then k-k' else (\ji<(length (paid_A!n)). (if (fst (snd x))!(gebub n i) then 2 else 1)))" have E0: "t_BIT n + Phi(n+1) - Phi n = E (map_pmf ?yo D) " using inEreinziehn by auto also have "\ \ E(map_pmf ?yo2 D)" apply(rule E_mono2) unfolding D_def apply(fact finite_config_BIT[OF dist_init]) apply(fact ub_cost[unfolded D_def]) done also have E2: "\ = E(map_pmf (\x. k + 1::real) D) + (E(map_pmf (\x. (if (q)\set init then (if (fst (snd x))!(index init q) then real(k-k') else (\jx. (\i<(length (paid_A!n)). (if (fst (snd x))!(gebub n i) then 2::real else 1))) D))" unfolding D_def apply(simp only: E_linear_plus2[OF finite_config_BIT[OF dist_init]]) by(auto simp: add.assoc) also have E3: "\ \ k + 1 + (3/4 * (real (k)) + (3/2 * real (length (paid_A!n))))" using paid_absch free_absch by auto also have "\ = k + (3/4 * (real k)) + 1 + 3/2 *(length (paid_A!n)) " by auto (* arithmetic! *) also have "\ = (1+3/4) * (real k) + 1 + 3/2 *(length (paid_A!n)) " by auto (* arithmetic! *) also have E4: "\ = 7/4*(real k) + 3/2 *(length (paid_A!n)) + 1 " by auto (* arithmetic! *) also have "\ \ 7/4*(real k) + 7/4 *(length (paid_A!n)) + 1" by auto (* arithmetic! *) also have E5:"\ = 7/4*(k+(length (paid_A!n))) + 1 " by auto also have E6:"\ = 7/4*(t_A n - (1::real)) + 1" using costA_absch by auto also have "\ = 7/4*(t_A n) - 7/4 + 1" by algebra also have E7: "\ = 7/4*(t_A n)- 3/4" by auto finally show "t_BIT n + Phi(n+1) - Phi n \ (7 / 4) * t_A n - 3/4" . qed then show "t_BIT n + Phi(n + 1) - Phi n \ (7 / 4) * t_A n - 3/4" . qed subsubsection "Lift the Result to the Whole Request List" lemma T_BIT_absch_le: assumes nqs: "n \ length qs" shows "T_BIT n \ (7 / 4) * T_A n - 3/4*n" unfolding T_BIT_def T_A_def proof - from potential2[of "Phi", OF phi0 phi_pos myub] nqs have "sum t_BIT {.. (\i = (\ii = (\ii = (\i = (7 / 4) * (\i = (7 / 4) * real_of_int (\i 7 / 4 * real_of_int (sum t_A {.. length qs" shows "T_BIT n \ (7 / 4) * T_A' n - 3/4*n" using nqs T_BIT_absch_le[of n] T_A_A'_leq[of n] by auto lemma T_A_nneg: "0 \ T_A n" by(auto simp add: sum_nonneg T_A_def t_A_def c_A_def p_A_def) lemma T_BIT_eq: "T_BIT (length qs) = T_on_rand BIT init qs" unfolding T_BIT_def T_on_rand_as_sum using t_BIT_def by auto corollary T_BIT_competitive: assumes "n \ length qs" and "init \ []" and "\i set init" shows "T_BIT n \ ((7 / 4) - 3/(4 * size init)) * T_A' n" proof cases assume 0: "real_of_int(T_A' n) \ n * (size init)" then have 1: "3/4*real_of_int(T_A' n) \ 3/4*(n * (size init))" by auto have "T_BIT n \ (7 / 4) * T_A' n - 3/4*n" using T_BIT_absch[OF assms(1)] by auto also have "\ = ((7 / 4) * real_of_int(T_A' n)) - (3/4*(n * size init)) / size init" using assms(2) by simp also have "\ \ ((7 / 4) * real_of_int(T_A' n)) - 3/4*T_A' n / size init" by(rule diff_left_mono[OF divide_right_mono[OF 1]]) simp also have "\ = ((7 / 4) - 3/4 / size init) * T_A' n" by algebra also have "\ = ((7 / 4) - 3/(4 * size init)) * T_A' n" by simp finally show ?thesis . next assume 0: "\ real_of_int(T_A' n) \ n * (size init)" have T_A'_nneg: "0 \ T_A' n" using T_A_nneg[of n] T_A_A'_leq[of n] assms(1) by auto have "2 - 1 / size init \ 1" using assms(2) by (auto simp add: field_simps neq_Nil_conv) have " T_BIT n \ n * size init" using T_BIT_ub[OF assms(3)] by linarith also have "\ < real_of_int(T_A' n)" using 0 by linarith also have "\ \ ((7 / 4) - 3/4 / size init) * T_A' n" using assms(2) T_A'_nneg by(auto simp add: mult_le_cancel_right1 field_simps neq_Nil_conv) finally show ?thesis by simp qed lemma t_A'_t: "n < length qs \ t_A' n = int (t (s_A' n) (qs!n) (acts ! n))" by (simp add: t_A'_def t_def c_A'_def p_A'_def paid_A'_def len_acts split: prod.split) lemma T_A'_eq_lem: "(\i=0.. n < length qs" thus ?case by (simp add: len_acts) qed qed lemma T_A'_eq: "T_A' (length qs) = T init qs acts" using T_A'_eq_lem by(simp add: T_A'_def atLeast0LessThan) corollary BIT_competitive3: "init \ [] \ \i set init \ T_BIT (length qs) \ ( (7/4) - 3 / (4 * length init)) * T init qs acts" using order.refl T_BIT_competitive[of "length qs"] T_A'_eq by (simp add: of_int_of_nat_eq) corollary BIT_competitive2: "init \ [] \ \i set init \ T_on_rand BIT init qs \ ( (7/4) - 3 / (4 * length init)) * T init qs acts" using BIT_competitive3 T_BIT_eq by auto corollary BIT_absch_le: "init \ [] \ T_on_rand BIT init qs \ (7 / 4) * (T init qs acts) - 3/4 * length qs" using T_BIT_absch[of "length qs", unfolded T_A'_eq T_BIT_eq] by auto end subsubsection "Generalize Competitivness of BIT" lemma setdi: "set xs = {0.. distinct xs" apply(rule card_distinct) by auto theorem compet_BIT: assumes "init \ []" "distinct init" "set qs \ set init" shows "T_on_rand BIT init qs \ ( (7/4) - 3 / (4 * length init)) * T_opt init qs" proof- from assms(3) have 1: "\i < length qs. qs!i \ set init" by auto { fix acts :: "answer list" assume len: "length acts = length qs" interpret BIT_Off acts qs init proof qed (auto simp: assms(2) len) from BIT_competitive2[OF assms(1) 1] assms(1) have "T_on_rand BIT init qs / ( (7/4) - 3 / (4 * length init)) \ real(T init qs acts)" by(simp add: field_simps length_greater_0_conv[symmetric] del: length_greater_0_conv) } hence "T_on_rand BIT init qs / ( (7/4) - 3 / (4 * length init)) \ T_opt init qs" apply(simp add: T_opt_def Inf_nat_def) apply(rule LeastI2_wellorder) using length_replicate[of "length qs" undefined] apply fastforce apply auto done thus ?thesis using assms by(simp add: field_simps length_greater_0_conv[symmetric] del: length_greater_0_conv) qed theorem compet_BIT4: assumes "init \ []" "distinct init" shows "T_on_rand BIT init qs \ 7/4 * T_opt init qs" proof- { fix acts :: "answer list" assume len: "length acts = length qs" interpret BIT_Off acts qs init proof qed (auto simp: assms(2) len) from BIT_absch_le[OF assms(1)] assms(1) have "(T_on_rand BIT init qs + 3 / 4 * length qs)/ (7/4) \ real(T init qs acts)" by(simp add: field_simps length_greater_0_conv[symmetric] del: length_greater_0_conv) } hence "(T_on_rand BIT init qs + 3 / 4 * length qs)/ (7/4) \ T_opt init qs" apply(simp add: T_opt_def Inf_nat_def) apply(rule LeastI2_wellorder) using length_replicate[of "length qs" undefined] apply fastforce apply auto done thus ?thesis by(simp add: field_simps length_greater_0_conv[symmetric] del: length_greater_0_conv) qed theorem compet_BIT_2: "compet_rand BIT (7/4) {init. init \ [] \ distinct init}" unfolding compet_rand_def proof fix init assume "init \ {init. init \ [] \ distinct init }" then have ne: "init \ []" and a: "distinct init" by auto { fix qs assume "init \ []" and a: "distinct init" then have "T_on_rand BIT init qs \ 7/4 * T_opt init qs" using compet_BIT4[of init qs] by simp } with a ne show "\b\0. \qs. static init qs \ T_on_rand BIT init qs \ (7 / 4) * (T_opt init qs) + b" by auto qed end diff --git a/thys/List_Update/List_Factoring.thy b/thys/List_Update/List_Factoring.thy --- a/thys/List_Update/List_Factoring.thy +++ b/thys/List_Update/List_Factoring.thy @@ -1,2389 +1,2389 @@ (* Title: List Factoring Author: Max Haslbeck *) section "List factoring technique" theory List_Factoring imports Partial_Cost_Model MTF2_Effects begin hide_const config compet subsection "Helper functions" subsubsection "Helper lemmas" lemma befaf: assumes "q\set s" "distinct s" shows "before q s \ {q} \ after q s = set s" proof - have "before q s \ {y. index s y = index s q \ q \ set s} = {y. index s y \ index s q \ q \ set s}" unfolding before_in_def apply(auto) by (simp add: le_neq_implies_less) also have "\ = {y. index s y \ index s q \ y\ set s \ q \ set s}" apply(auto) by (metis index_conv_size_if_notin index_less_size_conv not_less) also with \q \ set s\ have "\ = {y. index s y \ index s q \ y\ set s}" by auto finally have "before q s \ {y. index s y = index s q \ q \ set s} \ after q s = {y. index s y \ index s q \ y\ set s} \ {y. index s y > index s q \ y \ set s}" unfolding before_in_def by simp also have "\ = set s" by auto finally show ?thesis using assms by simp qed lemma index_sum: assumes "distinct s" "q\set s" shows "index s q = (\e\set s. if e < q in s then 1 else 0)" proof - from assms have bia_empty: "before q s \ ({q} \ after q s) = {}" by(auto simp: before_in_def) from befaf[OF assms(2) assms(1)] have "(\e\set s. if e < q in s then 1::nat else 0) = (\e\(before q s \ {q} \ after q s). if e < q in s then 1 else 0)" by auto also have "\ = (\e\before q s. if e < q in s then 1 else 0) + (\e\{q}. if e < q in s then 1 else 0) + (\e\after q s. if e < q in s then 1 else 0)" proof - have "(\e\(before q s \ {q} \ after q s). if e < q in s then 1::nat else 0) = (\e\(before q s \ ({q} \ after q s)). if e < q in s then 1::nat else 0)" by simp also have "\ = (\e\before q s. if e < q in s then 1 else 0) + (\e\({q} \ after q s). if e < q in s then 1 else 0) - (\e\(before q s \ ({q} \ after q s)). if e < q in s then 1 else 0)" apply(rule sum_Un_nat) by(simp_all) also have "\ = (\e\before q s. if e < q in s then 1 else 0) + (\e\({q} \ after q s). if e < q in s then 1 else 0)" using bia_empty by auto also have "\ = (\e\before q s. if e < q in s then 1 else 0) + (\e\{q}. if e < q in s then 1 else 0) + (\e\after q s. if e < q in s then 1 else 0)" by (simp add: before_in_def) finally show ?thesis . qed also have "\ = (\e\before q s. 1) + (\e\({q} \ after q s). 0)" apply(auto) unfolding before_in_def by auto also have "\ = card (before q s)" by auto also have "\ = card (set (take (index s q) s))" using before_conv_take[OF assms(2)] by simp also have "\ = length (take (index s q) s)" using distinct_card assms(1) distinct_take by metis also have "\ = min (length s) (index s q)" by simp also have "\ = index s q" using index_le_size[of s q] by(auto) finally show ?thesis by simp qed subsubsection "ALG" fun ALG :: "'a \ 'a list \ nat \ ('a list * 'is) \ nat" where "ALG x qs i s = (if x < (qs!i) in fst s then 1::nat else 0)" (* no paid exchanges, requested items in state (nice, quickcheck is awesome!) *) lemma t\<^sub>p_sumofALG: "distinct (fst s) \ snd a = [] \ (qs!i)\set (fst s) \ t\<^sub>p (fst s) (qs!i) a = (\e\set (fst s). ALG e qs i s)" unfolding t\<^sub>p_def apply(simp add: split_def ) using index_sum by metis lemma t\<^sub>p_sumofALGreal: assumes "distinct (fst s)" "snd a = []" "qs!i \ set(fst s)" shows "real(t\<^sub>p (fst s) (qs!i) a) = (\e\set (fst s). real(ALG e qs i s))" proof - from assms have "real(t\<^sub>p (fst s) (qs!i) a) = real(\e\set (fst s). ALG e qs i s)" using t\<^sub>p_sumofALG by metis also have "\ = (\e\set (fst s). real (ALG e qs i s))" by auto finally show ?thesis . qed subsubsection "The function steps'" fun steps' where "steps' s _ _ 0 = s" | "steps' s [] [] (Suc n) = s" | "steps' s (q#qs) (a#as) (Suc n) = steps' (step s q a) qs as n" lemma steps'_steps: "length as = length qs \ steps' s as qs (length as) = steps s as qs" by(induct arbitrary: s rule: list_induct2, simp_all) lemma steps'_length: "length qs = length as \ n \ length as \ length (steps' s qs as n) = length s" apply(induct qs as arbitrary: s n rule: list_induct2) apply(simp) apply(case_tac n) by (auto) lemma steps'_set: "length qs = length as \ n \ length as \ set (steps' s qs as n) = set s" apply(induct qs as arbitrary: s n rule: list_induct2) apply(simp) apply(case_tac n) by(auto simp: set_step) lemma steps'_distinct2: "length qs = length as \ n \ length as \ distinct s \ distinct (steps' s qs as n)" apply(induct qs as arbitrary: s n rule: list_induct2) apply(simp) apply(case_tac n) by(auto simp: distinct_step) lemma steps'_distinct: "length qs = length as \ length as = n \ distinct (steps' s qs as n) = distinct s" -apply(induct qs as arbitrary: s n rule: list_induct2) by(auto simp: distinct_step) + by (induct qs as arbitrary: s n rule: list_induct2) (fastforce simp add: distinct_step)+ lemma steps'_dist_perm: "length qs = length as \ length as = n \ dist_perm s s \ dist_perm (steps' s qs as n) (steps' s qs as n)" using steps'_set steps'_distinct by blast lemma steps'_rests: "length qs = length as \ n \ length as \ steps' s qs as n = steps' s (qs@r1) (as@r2) n" apply(induct qs as arbitrary: s n rule: list_induct2) apply(simp) apply(case_tac n) by auto lemma steps'_append: "length qs = length as \ length qs = n \ steps' s (qs@[q]) (as@[a]) (Suc n) = step (steps' s qs as n) q a" apply(induct qs as arbitrary: s n rule: list_induct2) by auto subsubsection "\ALG'_det\" definition "ALG'_det Strat qs init i x = ALG x qs i (swaps (snd (Strat!i)) (steps' init qs Strat i),())" lemma ALG'_det_append: "n < length Strat \ n < length qs \ ALG'_det Strat (qs@a) init n x = ALG'_det Strat qs init n x" proof - assume qs: "n < length qs" assume S: "n < length Strat" have tt: "(qs @ a) ! n = qs ! n" using qs by (simp add: nth_append) have "steps' init (take n qs) (take n Strat) n = steps' init ((take n qs) @ drop n qs) ((take n Strat) @ (drop n Strat)) n" apply(rule steps'_rests) using S qs by auto then have A: "steps' init (take n qs) (take n Strat) n = steps' init qs Strat n" by auto have "steps' init (take n qs) (take n Strat) n = steps' init ((take n qs) @ ((drop n qs)@a)) ((take n Strat) @((drop n Strat)@[])) n" apply(rule steps'_rests) using S qs by auto then have B: "steps' init (take n qs) (take n Strat) n = steps' init (qs@a) (Strat@[]) n" by (metis append_assoc List.append_take_drop_id) from A B have "steps' init qs Strat n = steps' init (qs@a) (Strat@[]) n" by auto then have C: "steps' init qs Strat n = steps' init (qs@a) Strat n" by auto show ?thesis unfolding ALG'_det_def C unfolding ALG.simps tt by auto qed subsubsection "ALG'" abbreviation "config'' A qs init n == config_rand A init (take n qs)" definition "ALG' A qs init i x = E( map_pmf (ALG x qs i) (config'' A qs init i))" lemma ALG'_refl: "qs!i = x \ ALG' A qs init i x = 0" unfolding ALG'_def by(simp add: split_def before_in_def) subsubsection "\ALGxy_det\" definition ALGxy_det where "ALGxy_det A qs init x y = (\i\{.. {y,x}) then ALG'_det A qs init i y + ALG'_det A qs init i x else 0::nat))" lemma ALGxy_det_alternativ: "ALGxy_det A qs init x y = (\i\{i. i (qs!i \ {y,x})}. ALG'_det A qs init i y + ALG'_det A qs init i x)" proof - have f: "{i. i (qs!i \ {y,x})} = {i. i {i. (qs!i \ {y,x})}" by auto have "(\i\{i. i (qs!i \ {y,x})}. ALG'_det A qs init i y + ALG'_det A qs init i x) = (\i\{i. i {i. (qs!i \ {y,x})}. ALG'_det A qs init i y + ALG'_det A qs init i x)" unfolding e by simp also have "\ = (\i\{i. i {i. (qs!i \ {y,x})} then ALG'_det A qs init i y + ALG'_det A qs init i x else 0))" apply(rule sum.inter_restrict) by auto also have "\ = (\i\{.. {i. (qs!i \ {y,x})} then ALG'_det A qs init i y + ALG'_det A qs init i x else 0))" unfolding f by auto also have "\ = ALGxy_det A qs init x y" unfolding ALGxy_det_def by auto finally show ?thesis by simp qed subsubsection "ALGxy" definition ALGxy where "ALGxy A qs init x y = (\i\{.. {i. (qs!i \ {y,x})}. ALG' A qs init i y + ALG' A qs init i x)" lemma ALGxy_def2: "ALGxy A qs init x y = (\i\{i. i (qs!i \ {y,x})}. ALG' A qs init i y + ALG' A qs init i x)" proof - have a: "{i. i (qs!i \ {y,x})} = {.. {i. (qs!i \ {y,x})}" by auto show ?thesis unfolding ALGxy_def a by simp qed lemma ALGxy_append: "ALGxy A (rs@[r]) init x y = ALGxy A rs init x y + (if (r \ {y,x}) then ALG' A (rs@[r]) init (length rs) y + ALG' A (rs@[r]) init (length rs) x else 0 )" proof - have "ALGxy A (rs@[r]) init x y = (\i\{..<(Suc (length rs))} \ {i. (rs @ [r]) ! i \ {y, x}}. ALG' A (rs @ [r]) init i y + ALG' A (rs @ [r]) init i x)" unfolding ALGxy_def by(simp) also have "\ = (\i\{..<(Suc (length rs))}. (if i\{i. (rs @ [r]) ! i \ {y, x}} then ALG' A (rs @ [r]) init i y + ALG' A (rs @ [r]) init i x else 0) )" apply(rule sum.inter_restrict) by simp also have "\ = (\i\{..{i. (rs @ [r]) ! i \ {y, x}} then ALG' A (rs @ [r]) init i y + ALG' A (rs @ [r]) init i x else 0) ) + (if length rs\{i. (rs @ [r]) ! i \ {y, x}} then ALG' A (rs @ [r]) init (length rs) y + ALG' A (rs @ [r]) init(length rs) x else 0) " by simp also have "\ = ALGxy A rs init x y + (if r \ {y, x} then ALG' A (rs @ [r]) init (length rs) y + ALG' A (rs @ [r]) init(length rs) x else 0)" apply(simp add: ALGxy_def sum.inter_restrict nth_append) unfolding ALG'_def apply(rule sum.cong) apply(simp) by(auto simp: nth_append) finally show ?thesis . qed lemma ALGxy_wholerange: "ALGxy A qs init x y = (\i<(length qs). (if qs ! i \ {y, x} then ALG' A qs init i y + ALG' A qs init i x else 0 ))" proof - have "ALGxy A qs init x y = (\i\ {i. i < length qs} \ {i. qs ! i \ {y, x}}. ALG' A qs init i y + ALG' A qs init i x)" unfolding ALGxy_def apply(rule sum.cong) apply(simp) apply(blast) by simp also have "\ = (\i\{i. i < length qs}. if i \ {i. qs ! i \ {y, x}} then ALG' A qs init i y + ALG' A qs init i x else 0)" by(rule sum.inter_restrict) simp also have "\ = (\i<(length qs). (if qs ! i \ {y, x} then ALG' A qs init i y + ALG' A qs init i x else 0 ))" apply(rule sum.cong) by(auto) finally show ?thesis . qed subsection "Transformation to Blocking Cost" lemma umformung: fixes A :: "(('a::linorder) list,'is,'a,(nat * nat list)) alg_on_rand" assumes no_paid: "\is s q. \((free,paid),_) \ (snd A (s,is) q). paid=[]" assumes inlist: "set qs \ set init" assumes dist: "distinct init" assumes "\x. x < length qs \ finite (set_pmf (config'' A qs init x))" shows "T\<^sub>p_on_rand A init qs = (\(x,y)\{(x,y). x \ set init \ y\set init \ xn. \xa \ set_pmf (config'' A qs init n). distinct (fst xa)" using dist config_rand_distinct by metis have E0: "T\<^sub>p_on_rand A init qs = (\i\{..p_on_rand_n A init qs i)" unfolding T_on_rand_as_sum by auto also have "\ = (\is. bind_pmf (snd A s (qs ! i)) (\(a, nis). return_pmf (real (\x\set init. ALG x qs i s))))))" apply(rule sum.cong) apply(simp) apply(simp add: bind_return_pmf bind_assoc_pmf) apply(rule arg_cong[where f=E]) apply(rule bind_pmf_cong) apply(simp) apply(rule bind_pmf_cong) apply(simp) apply(simp add: split_def) apply(subst t\<^sub>p_sumofALGreal) proof (goal_cases) case 1 then show ?case using config_dist by(metis) next case (2 a b c) then show ?case using no_paid[of "fst b" "snd b"] by(auto simp add: split_def) next case (3 a b c) with config_rand_set have a: "set (fst b) = set init" by metis with inlist have " set qs \ set (fst b)" by auto with 3 show ?case by auto next case (4 a b c) with config_rand_set have a: "set (fst b) = set init" by metis then show ?case by(simp) qed (* hier erst s, dann init *) also have "\ = (\i(is, s). (real (\x\set init. ALG x qs i (is,s)))) (config'' A qs init i)))" apply(simp only: map_pmf_def split_def) by simp also have E1: "\ = (\ix\set init. ALG' A qs init i x))" apply(rule sum.cong) apply(simp) apply(simp add: split_def ALG'_def) apply(rule E_linear_sum_allg) by(rule assms(4)) also have E2: "\ = (\x\set init. (\i = (\x\set init. (\y\set init. (\i\{i. i qs!i=y}. ALG' A qs init i x)))" proof (rule sum.cong, goal_cases) case (2 x) have "(\i = sum (%i. ALG' A qs init i x) (\y\{y. y \ set init}. {i. i < length qs \ qs ! i = y})" apply(rule sum.cong) apply(auto) using inlist by auto also have "\ = sum (%t. sum (%i. ALG' A qs init i x) {i. i qs ! i = t}) {y. y\ set init}" apply(rule sum.UNION_disjoint) apply(simp_all) by force also have "\ = (\y\set init. \i | i < length qs \ qs ! i = y. ALG' A qs init i x)" by auto finally show ?case . qed (simp) also have "\ = (\(x,y)\ (set init \ set init). (\i\{i. i qs!i=y}. ALG' A qs init i x))" by (rule sum.cartesian_product) also have "\ = (\(x,y)\ {(x,y). x\set init \ y\ set init}. (\i\{i. i qs!i=y}. ALG' A qs init i x))" by simp also have E4: "\ = (\(x,y)\{(x,y). x\set init \ y\ set init \ x\y}. (\i\{i. i qs!i=y}. ALG' A qs init i x))" (is "(\(x,y)\ ?L. ?f x y) = (\(x,y)\ ?R. ?f x y)") proof - let ?M = "{(x,y). x\set init \ y\ set init \ x=y}" have A: "?L = ?R \ ?M" by auto have B: "{} = ?R \ ?M" by auto have "(\(x,y)\ ?L. ?f x y) = (\(x,y)\ ?R \ ?M. ?f x y)" by(simp only: A) also have "\ = (\(x,y)\ ?R. ?f x y) + (\(x,y)\ ?M. ?f x y)" apply(rule sum.union_disjoint) apply(rule finite_subset[where B="set init \ set init"]) apply(auto) apply(rule finite_subset[where B="set init \ set init"]) by(auto) also have "(\(x,y)\ ?M. ?f x y) = 0" apply(rule sum.neutral) by (auto simp add: ALG'_refl) finally show ?thesis by simp qed also have "\ = (\(x,y)\{(x,y). x \ set init \ y\set init \ xi\{i. i qs!i=y}. ALG' A qs init i x) + (\i\{i. i qs!i=x}. ALG' A qs init i y) )" (is "(\(x,y)\ ?L. ?f x y) = (\(x,y)\ ?R. ?f x y + ?f y x)") proof - let ?R' = "{(x,y). x \ set init \ y\set init \ y ?R'" by auto have "{} = ?R \ ?R'" by auto have C: "?R' = (%(x,y). (y, x)) ` ?R" by auto have D: "(\(x,y)\ ?R'. ?f x y) = (\(x,y)\ ?R. ?f y x)" proof - have "(\(x,y)\ ?R'. ?f x y) = (\(x,y)\ (%(x,y). (y, x)) ` ?R. ?f x y)" by(simp only: C) also have "(\z\ (%(x,y). (y, x)) ` ?R. (%(x,y). ?f x y) z) = (\z\?R. ((%(x,y). ?f x y) \ (%(x,y). (y, x))) z)" apply(rule sum.reindex) by(fact swap_inj_on) also have "\ = (\z\?R. (%(x,y). ?f y x) z)" apply(rule sum.cong) by(auto) finally show ?thesis . qed have "(\(x,y)\ ?L. ?f x y) = (\(x,y)\ ?R \ ?R'. ?f x y)" by(simp only: A) also have "\ = (\(x,y)\ ?R. ?f x y) + (\(x,y)\ ?R'. ?f x y)" apply(rule sum.union_disjoint) apply(rule finite_subset[where B="set init \ set init"]) apply(auto) apply(rule finite_subset[where B="set init \ set init"]) by(auto) also have "\ = (\(x,y)\ ?R. ?f x y) + (\(x,y)\ ?R. ?f y x)" by(simp only: D) also have "\ = (\(x,y)\ ?R. ?f x y + ?f y x)" by(simp add: split_def sum.distrib[symmetric]) finally show ?thesis . qed also have E5: "\ = (\(x,y)\{(x,y). x \ set init \ y\set init \ xi\{i. i (qs!i=y \ qs!i=x)}. ALG' A qs init i y + ALG' A qs init i x))" apply(rule sum.cong) apply(simp) proof goal_cases case (1 x) then obtain a b where x: "x=(a,b)" and a: "a \ set init" "b \ set init" "a < b" by auto then have "a\b" by simp then have disj: "{i. i < length qs \ qs ! i = b} \ {i. i < length qs \ qs ! i = a} = {}" by auto have unio: "{i. i < length qs \ (qs ! i = b \ qs ! i = a)} = {i. i < length qs \ qs ! i = b} \ {i. i < length qs \ qs ! i = a}" by auto have "(\i\{i. i < length qs \ qs ! i = b} \ {i. i < length qs \ qs ! i = a}. ALG' A qs init i b + ALG' A qs init i a) = (\i\{i. i < length qs \ qs ! i = b}. ALG' A qs init i b + ALG' A qs init i a) + (\i\ {i. i < length qs \ qs ! i = a}. ALG' A qs init i b + ALG' A qs init i a) - (\i\{i. i < length qs \ qs ! i = b} \ {i. i < length qs \ qs ! i = a}. ALG' A qs init i b + ALG' A qs init i a) " apply(rule sum_Un) by(auto) also have "\ = (\i\{i. i < length qs \ qs ! i = b}. ALG' A qs init i b + ALG' A qs init i a) + (\i\ {i. i < length qs \ qs ! i = a}. ALG' A qs init i b + ALG' A qs init i a)" using disj by auto also have "\ = (\i\{i. i < length qs \ qs ! i = b}. ALG' A qs init i a) + (\i\{i. i < length qs \ qs ! i = a}. ALG' A qs init i b)" by (auto simp: ALG'_refl) finally show ?case unfolding x apply(simp add: split_def) unfolding unio by simp qed also have E6: "\ = (\(x,y)\{(x,y). x \ set init \ y\set init \ xy" shows "(if (x < y in l) then 0 else 1) = index l x" unfolding before_in_def proof (auto, goal_cases) (* bad style! *) case 1 from assms(1) have "index l y < length l" by simp with assms(2) 1(1) show "index l x = 0" by auto next case 2 from assms(1) have a: "index l x < length l" by simp from assms(1,3) have "index l y \ index l x" by simp with assms(2) 2(1) a show "Suc 0 = index l x" by simp qed (simp add: assms) lemma before_in_index2: fixes l assumes "set l = {x,y}" and "length l = 2" and "x\y" shows "(if (x < y in l) then 1 else 0) = index l y" unfolding before_in_def proof (auto, goal_cases) (* bad style! *) case 2 from assms(1,3) have a: "index l y \ index l x" by simp from assms(1) have "index l x < length l" by simp with assms(2) a 2(1) show "index l y = 0" by auto next case 1 from assms(1) have a: "index l y < length l" by simp from assms(1,3) have "index l y \ index l x" by simp with assms(2) 1(1) a show "Suc 0 = index l y" by simp qed (simp add: assms) lemma before_in_index: fixes l assumes "set l = {x,y}" and "length l = 2" and "x\y" shows "(x < y in l) = (index l x = 0)" unfolding before_in_def proof (safe, goal_cases) case 1 from assms(1) have "index l y < length l" by simp with assms(2) 1(1) show "index l x = 0" by auto next case 2 from assms(1,3) have "index l y \ index l x" by simp with 2(1) show "index l x < index l y" by simp qed (simp add: assms) subsection "The pairwise property" definition pairwise where "pairwise A = (\init. distinct init \ (\qs\{xs. set xs \ set init}. \(x::('a::linorder),y)\{(x,y). x \ set init \ y\set init \ xp_on_rand A (Lxy init {x,y}) (Lxy qs {x,y}) = ALGxy A qs init x y))" definition "Pbefore_in x y A qs init = map_pmf (\p. x < y in fst p) (config_rand A init qs)" lemma T_on_n_no_paid: assumes nopaid: "\s n. map_pmf (\x. snd (fst x)) (snd A s n) = return_pmf []" shows "T_on_rand_n A init qs i = E (config'' A qs init i \ (\p. return_pmf (real(index (fst p) (qs ! i)))))" proof - have "(\s. snd A s (qs ! i) \ (\(a, is'). return_pmf (real (t\<^sub>p (fst s) (qs ! i) a)))) = (\s. (snd A s (qs ! i) \ (\x. return_pmf (snd (fst x)))) \ (\p. return_pmf (real (index (swaps p (fst s)) (qs ! i)) + real (length p))))" by(simp add: t\<^sub>p_def split_def bind_return_pmf bind_assoc_pmf) also have "\ = (\p. return_pmf (real (index (fst p) (qs ! i))))" using nopaid[unfolded map_pmf_def] by(simp add: split_def bind_return_pmf) finally show ?thesis by simp qed lemma pairwise_property_lemma: assumes relativeorder: "(\init qs. distinct init \ qs \ {xs. set xs \ set init} \ (\x y. (x,y)\ {(x,y). x \ set init \ y\set init \ x\y} \ x \ y \ Pbefore_in x y A qs init = Pbefore_in x y A (Lxy qs {x,y}) (Lxy init {x,y}) ))" and nopaid: "\xa r. \z\ set_pmf(snd A xa r). snd(fst z) = []" shows "pairwise A" unfolding pairwise_def proof (clarify, goal_cases) case (1 init rs x y) then have xny: "x\y" by auto note dinit=1(1) then have dLyx: "distinct (Lxy init {y,x})" by(rule Lxy_distinct) from dinit have dLxy: "distinct (Lxy init {x,y})" by(rule Lxy_distinct) have setLxy: "set (Lxy init {x, y}) = {x,y}" apply(subst Lxy_set_filter) using 1 by auto have setLyx: "set (Lxy init {y, x}) = {x,y}" apply(subst Lxy_set_filter) using 1 by auto have lengthLyx:" length (Lxy init {y, x}) = 2" using setLyx distinct_card[OF dLyx] xny by simp have lengthLxy:" length (Lxy init {x, y}) = 2" using setLxy distinct_card[OF dLxy] xny by simp have aee: "{x,y} = {y,x}" by auto from 1(2) show ?case proof(induct rs rule: rev_induct) case (snoc r rs) have b: "Pbefore_in x y A rs init = Pbefore_in x y A (Lxy rs {x,y}) (Lxy init {x,y})" apply(rule relativeorder) using snoc 1 xny by(simp_all) show ?case (is "?L (rs @ [r]) = ?R (rs @ [r])") proof(cases "r\{x,y}") case True note xyrequest=this let ?expr = "E (Partial_Cost_Model.config'_rand A (fst A (Lxy init {x, y}) \ (\is. return_pmf (Lxy init {x, y}, is))) (Lxy rs {x, y}) \ (\s. snd A s r \ (\(a, is'). return_pmf (real (t\<^sub>p (fst s) r a)))))" let ?expr2 = "ALG' A (rs @ [r]) init (length rs) y + ALG' A (rs @ [r]) init (length rs) x" from xyrequest have "?L (rs @ [r]) = ?L rs + ?expr" by(simp add: Lxy_snoc T_on_rand'_append) also have "\ = ?L rs + ?expr2" proof(cases "r=x") case True let ?projS ="config'_rand A (fst A (Lxy init {x, y}) \ (\is. return_pmf (Lxy init {x, y}, is))) (Lxy rs {x, y})" let ?S = "(config'_rand A (fst A init \ (\is. return_pmf (init, is))) rs)" have "?projS \ (\s. snd A s r \ (\(a, is'). return_pmf (real (t\<^sub>p (fst s) r a)))) = ?projS \ (\s. return_pmf (real (index (fst s) r)))" proof (rule bind_pmf_cong, goal_cases) case (2 z) have "snd A z r \ (\(a, is'). return_pmf (real (t\<^sub>p (fst z) r a))) = snd A z r \ (\x. return_pmf (real (index (fst z) r)))" apply(rule bind_pmf_cong) apply(simp) using nopaid[of z r] by(simp add: split_def t\<^sub>p_def) then show ?case by(simp add: bind_return_pmf) qed simp also have "\ = map_pmf (%b. (if b then 0::real else 1)) (Pbefore_in x y A (Lxy rs {x,y}) (Lxy init {x,y}))" unfolding Pbefore_in_def map_pmf_def apply(simp add: bind_return_pmf bind_assoc_pmf) apply(rule bind_pmf_cong) apply(simp add: aee) proof goal_cases case (1 z) have " (if x < y in fst z then 0 else 1) = (index (fst z) x)" apply(rule before_in_index1) using 1 config_rand_set setLxy apply fast using 1 config_rand_length lengthLxy apply metis using xny by simp with True show ?case by(auto) qed also have "\ = map_pmf (%b. (if b then 0::real else 1)) (Pbefore_in x y A rs init)" by(simp add: b) also have "\ = map_pmf (\xa. real (if y < x in fst xa then 1 else 0)) ?S" apply(simp add: Pbefore_in_def map_pmf_comp) proof (rule map_pmf_cong, goal_cases) case (2 z) then have set_z: "set (fst z) = set init" using config_rand_set by fast have "(\ x < y in fst z) = y < x in fst z" apply(subst not_before_in) using set_z 1(3,4) xny by(simp_all) then show ?case by(simp add: ) qed simp finally have a: "?projS \ (\s. snd A s x \ (\(a, is'). return_pmf (real (t\<^sub>p (fst s) x a)))) = map_pmf (\xa. real (if y < x in fst xa then 1 else 0)) ?S" using True by simp from True show ?thesis apply(simp add: ALG'_refl nth_append) unfolding ALG'_def by(simp add: a) next case False with xyrequest have request: "r=y" by blast let ?projS ="config'_rand A (fst A (Lxy init {x, y}) \ (\is. return_pmf (Lxy init {x, y}, is))) (Lxy rs {x, y})" let ?S = "(config'_rand A (fst A init \ (\is. return_pmf (init, is))) rs)" have "?projS \ (\s. snd A s r \ (\(a, is'). return_pmf (real (t\<^sub>p (fst s) r a)))) = ?projS \ (\s. return_pmf (real (index (fst s) r)))" proof (rule bind_pmf_cong, goal_cases) case (2 z) have "snd A z r \ (\(a, is'). return_pmf (real (t\<^sub>p (fst z) r a))) = snd A z r \ (\x. return_pmf (real (index (fst z) r)))" apply(rule bind_pmf_cong) apply(simp) using nopaid[of z r] by(simp add: split_def t\<^sub>p_def) then show ?case by(simp add: bind_return_pmf) qed simp also have "\ = map_pmf (%b. (if b then 1::real else 0)) (Pbefore_in x y A (Lxy rs {x,y}) (Lxy init {x,y}))" unfolding Pbefore_in_def map_pmf_def apply(simp add: bind_return_pmf bind_assoc_pmf) apply(rule bind_pmf_cong) apply(simp add: aee) proof goal_cases case (1 z) have " (if x < y in fst z then 1 else 0) = (index (fst z) y)" apply(rule before_in_index2) using 1 config_rand_set setLxy apply fast using 1 config_rand_length lengthLxy apply metis using xny by simp with request show ?case by(auto) qed also have "\ = map_pmf (%b. (if b then 1::real else 0)) (Pbefore_in x y A rs init)" by(simp add: b) also have "\ = map_pmf (\xa. real (if x < y in fst xa then 1 else 0)) ?S" apply(simp add: Pbefore_in_def map_pmf_comp) apply (rule map_pmf_cong) by simp_all finally have a: "?projS \ (\s. snd A s y \ (\(a, is'). return_pmf (real (t\<^sub>p (fst s) y a)))) = map_pmf (\xa. real (if x < y in fst xa then 1 else 0)) ?S" using request by simp from request show ?thesis apply(simp add: ALG'_refl nth_append) unfolding ALG'_def by(simp add: a) qed also have "\ = ?R rs + ?expr2" using snoc by simp also from True have "\ = ?R (rs@[r])" apply(subst ALGxy_append) by(auto) finally show ?thesis . next case False then have "?L (rs @ [r]) = ?L rs" apply(subst Lxy_snoc) by simp also have "\ = ?R rs" using snoc by(simp) also have "\ = ?R (rs @ [r])" apply(subst ALGxy_append) using False by(simp) finally show ?thesis . qed qed (simp add: ALGxy_def) qed lemma umf_pair: assumes 0: "pairwise A" assumes 1: "\is s q. \((free,paid),_) \ (snd A (s, is) q). paid=[]" assumes 2: "set qs \ set init" assumes 3: "distinct init" assumes 4: "\x. x finite (set_pmf (config'' A qs init x))" shows "T\<^sub>p_on_rand A init qs = (\(x,y)\{(x, y). x \ set init \ y \ set init \ x < y}. T\<^sub>p_on_rand A (Lxy init {x,y}) (Lxy qs {x,y}))" proof - have " T\<^sub>p_on_rand A init qs = (\(x,y)\{(x, y). x \ set init \ y \ set init \ x < y}. ALGxy A qs init x y)" by(simp only: umformung[OF 1 2 3 4]) also have "\ = (\(x,y)\{(x, y). x \ set init \ y \ set init \ x < y}. T\<^sub>p_on_rand A (Lxy init {x,y}) (Lxy qs {x,y}))" apply(rule sum.cong) apply(simp) using 0[unfolded pairwise_def] 2 3 by auto finally show ?thesis . qed subsection "List Factoring for OPT" (* calculates given a list of swaps, elements x and y and a current state how many swaps between x and y there are *) fun ALG_P :: "nat list \ 'a \ 'a \ 'a list \ nat" where "ALG_P [] x y xs = (0::nat)" | "ALG_P (s#ss) x y xs = (if Suc s < length (swaps ss xs) then (if ((swaps ss xs)!s=x \ (swaps ss xs)!(Suc s)=y) \ ((swaps ss xs)!s=y \ (swaps ss xs)!(Suc s)=x) then 1 else 0) else 0) + ALG_P ss x y xs" (* nat list ersetzen durch (a::ordered) list *) lemma ALG_P_erwischt_alle: assumes dinit: "distinct init" shows "\l< length sws. Suc (sws!l) < length init \ length sws = (\(x,y)\{(x,y). x \ set (init::('a::linorder) list) \ y\set init \ xl set ?co'" by (rule nth_mem) then have a: "?co'!s \ set init" by auto from isT7 have "?co'! (Suc s) \ set ?co'" by (rule nth_mem) then have b: "?co'!(Suc s) \ set init" by auto have "{(x,y). x \ set init \ y\set init \ x {(x,y). ?expr3 x y} = {(x,y). x \ set init \ y\set init \ x (?co'!s=x \ ?co'!(Suc s)=y \ ?co'!s=y \ ?co'!(Suc s)=x)}" by auto also have "\ = {(x,y). x \ set init \ y\set init \ x ?co'!s=x \ ?co'!(Suc s)=y } \ {(x,y). x \ set init \ y\set init \ x ?co'!s=y \ ?co'!(Suc s)=x}" by auto also have "\ = {(x,y). x ?co'!s=x \ ?co'!(Suc s)=y} \ {(x,y). x ?co'!s=y \ ?co'!(Suc s)=x}" using a b by(auto) finally have c1: "{(x,y). x \ set init \ y\set init \ x {(x,y). ?expr3 x y} = {(x,y). x ?co'!s=x \ ?co'!(Suc s)=y} \ {(x,y). x ?co'!s=y \ ?co'!(Suc s)=x}" . have c2: "card ({(x,y). x ?co'!s=x \ ?co'!(Suc s)=y} \ {(x,y). x ?co'!s=y \ ?co'!(Suc s)=x}) = 1" (is "card (?A \ ?B) = 1") proof (cases "?co'!s ?B = { (?co'!s, ?co'!(Suc s)) }" apply(simp only: a b) by simp have "card (?A \ ?B) = 1" unfolding c by auto then show ?thesis . next case False then have a: "?A = {}" by auto have b: "?B = { (?co'!(Suc s), ?co'!s) } " proof - from dco distinct_conv_nth[of "?co'"] have "swaps ss init ! s \ swaps ss init ! (Suc s)" using isT2 isT3 by simp with False show ?thesis by auto qed have c: "?A \ ?B = { (?co'!(Suc s), ?co'!s) }" apply(simp only: a b) by simp have "card (?A \ ?B) = 1" unfolding c by auto then show ?thesis . qed have yeah: "(\(x,y)\{(x,y). x \ set init \ y\set init \ x(x,y)\{(x,y). x \ set init \ y\set init \ x(x,y)\{(x,y). x \ set init \ y\set init \ x = (\z\{(x,y). x \ set init \ y\set init \ x = (\z\{(x,y). x \ set init \ y\set init \ x = (\z\{(x,y). x \ set init \ y\set init \ x{(x,y). ?expr3 x y} . 1)" apply(rule sum.inter_restrict[symmetric]) apply(rule finite_subset[where B="set init \ set init"]) by(auto) also have "\ = card ({(x,y). x \ set init \ y\set init \ x {(x,y). ?expr3 x y})" by auto also have "\ = card ({(x,y). x ?co'!s=x \ ?co'!(Suc s)=y} \ {(x,y). x ?co'!s=y \ ?co'!(Suc s)=x})" by(simp only: c1) also have "\ = (1::nat)" using c2 by auto finally show ?thesis . qed have "length (s # ss) = 1 + length ss" by auto also have "\ = 1 + (\(x,y)\{(x,y). x \ set init \ y\set init \ x = (\(x,y)\{(x,y). x \ set init \ y\set init \ x(x,y)\{(x,y). x \ set init \ y\set init \ x = (\(x,y)\{(x,y). x \ set init \ y\set init \ x = (\(x,y)\{(x,y). x \ set init \ y\set init \ xp_sumofALGALGP: assumes "distinct s" "(qs!i)\set s" and "\l< length (snd a). Suc ((snd a)!l) < length s" shows "t\<^sub>p s (qs!i) a = (\e\set s. ALG e qs i (swaps (snd a) s,())) + (\(x,y)\{(x::('a::linorder),y). x \ set s \ y\set s \ x(x,y)\{(x,y). x \ set s \ y\set s \ xe\set s. ALG e qs i (swaps (snd a) s,()))" proof - have "index (swaps (snd a) s) (qs ! i) = (\e\set (swaps (snd a) s). if e < (qs ! i) in (swaps (snd a) s) then 1 else 0)" apply(rule index_sum) using assms by(simp_all) also have "\ = (\e\set s. ALG e qs i (swaps (snd a) s,()))" by auto finally show ?thesis . qed show ?thesis unfolding t\<^sub>p_def apply (simp add: split_def) unfolding ac pe by (simp add: split_def) qed (* given a Strategy Strat to serve request sequence qs on initial list init how many swaps between elements x and y occur during the ith step *) definition "ALG_P' Strat qs init i x y = ALG_P (snd (Strat!i)) x y (steps' init qs Strat i)" (* if n is in bound, Strat may be too long, that does not matter *) lemma ALG_P'_rest: "n < length qs \ n < length Strat \ ALG_P' Strat (take n qs @ [qs ! n]) init n x y = ALG_P' (take n Strat @ [Strat ! n]) (take n qs @ [qs ! n]) init n x y" proof - assume qs: "n < length qs" assume S: "n < length Strat" then have lS: "length (take n Strat) = n" by auto have "(take n Strat @ [Strat ! n]) ! n = (take n Strat @ (Strat ! n) # []) ! length (take n Strat)" using lS by auto also have "\ = Strat ! n" by(rule nth_append_length) finally have tt: "(take n Strat @ [Strat ! n]) ! n = Strat ! n" . obtain rest where rest: "Strat = (take n Strat @ [Strat ! n] @ rest)" using S apply(auto) using id_take_nth_drop by blast have "steps' init (take n qs @ [qs ! n]) (take n Strat @ [Strat ! n]) n = steps' init (take n qs) (take n Strat) n" apply(rule steps'_rests[symmetric]) using S qs by auto also have "\ = steps' init (take n qs @ [qs ! n]) (take n Strat @ ([Strat ! n] @ rest)) n" apply(rule steps'_rests) using S qs by auto finally show ?thesis unfolding ALG_P'_def tt using rest by auto qed (* verallgemeinert ALG_P'_rest, sollte mergen! *) lemma ALG_P'_rest2: "n < length qs \ n < length Strat \ ALG_P' Strat qs init n x y = ALG_P' (Strat@r1) (qs@r2) init n x y" proof - assume qs: "n < length qs" assume S: "n < length Strat" have tt: "Strat ! n = (Strat @ r1) ! n" using S by (simp add: nth_append) have "steps' init (take n qs) (take n Strat) n = steps' init ((take n qs) @ drop n qs) ((take n Strat) @ (drop n Strat)) n" apply(rule steps'_rests) using S qs by auto then have A: "steps' init (take n qs) (take n Strat) n = steps' init qs Strat n" by auto have "steps' init (take n qs) (take n Strat) n = steps' init ((take n qs) @ ((drop n qs)@r2)) ((take n Strat) @((drop n Strat)@r1)) n" apply(rule steps'_rests) using S qs by auto then have B: "steps' init (take n qs) (take n Strat) n = steps' init (qs@r2) (Strat@r1) n" by (metis append_assoc List.append_take_drop_id) from A B have C: "steps' init qs Strat n = steps' init (qs@r2) (Strat@r1) n" by auto show ?thesis unfolding ALG_P'_def tt using C by auto qed (* total number of swaps of elements x and y during execution of Strategy Strat *) definition ALG_Pxy where "ALG_Pxy Strat qs init x y = (\i b \ {x,y} \ ALGxy_det Strat (A @ [b]) init x y = ALGxy_det Strat A init x y" proof - assume bn: "b \ {x,y}" have "(A @ [b]) ! (length A) = b" by auto assume l: "length A < length Strat" term "%i. ALG'_det Strat (A @ [b]) init i y" have e: "\i. i (A @ [b]) ! i = A ! i" by(auto simp: nth_append) have "(\i\ {..< length (A @ [b])}. if (A @ [b]) ! i \ {y, x} then ALG'_det Strat (A @ [b]) init i y + ALG'_det Strat (A @ [b]) init i x else 0) = (\i\ {..< Suc(length (A))}. if (A @ [b]) ! i \ {y, x} then ALG'_det Strat (A @ [b]) init i y + ALG'_det Strat (A @ [b]) init i x else 0)" by auto also have "\ = (\i\ {..< (length (A))}. if (A @ [b]) ! i \ {y, x} then ALG'_det Strat (A @ [b]) init i y + ALG'_det Strat (A @ [b]) init i x else 0) + ( if (A @ [b]) ! (length A) \ {y, x} then ALG'_det Strat (A @ [b]) init (length A) y + ALG'_det Strat (A @ [b]) init (length A) x else 0) " by simp (* abspalten des letzten glieds *) also have "\ = (\i\ {..< (length (A))}. if (A @ [b]) ! i \ {y, x} then ALG'_det Strat (A @ [b]) init i y + ALG'_det Strat (A @ [b]) init i x else 0)" using bn by auto also have "\ = (\i\ {..< (length (A))}. if A ! i \ {y, x} then ALG'_det Strat A init i y + ALG'_det Strat A init i x else 0)" apply(rule sum.cong) apply(simp) using l ALG'_det_append[where qs=A] e by(simp) finally show ?thesis unfolding ALGxy_det_def by simp qed lemma ALG_P_split: "length qs < length Strat \ ALG_Pxy Strat (qs@[q]) init x y = ALG_Pxy Strat qs init x y + ALG_P' Strat (qs@[q]) init (length qs) x y " unfolding ALG_Pxy_def apply(auto) apply(rule sum.cong) apply(simp) using ALG_P'_rest2[symmetric, of _ qs Strat "[]" "[q]"] by(simp) lemma swap0in2: assumes "set l = {x,y}" "x\y" "length l = 2" "dist_perm l l" shows "x < y in (swap 0) l = (~ x < y in l)" proof (cases "x < y in l") case True then have a: "index l x < index l y" unfolding before_in_def by simp from assms(1) have drin: "x\set l" "y\set l" by auto from assms(1,3) have b: "index l y < 2" by simp from a b have k: "index l x = 0" "index l y = 1" by auto have g: "x = l ! 0" "y = l ! 1" using k nth_index assms(1) by force+ have "x < y in swap 0 l = (x < y in l \ \ (x = l ! 0 \ y = l ! Suc 0) \ x = l ! Suc 0 \ y = l ! 0)" apply(rule before_in_swap) apply(fact assms(4)) using assms(3) by simp also have "\ = (\ (x = l ! 0 \ y = l ! Suc 0) \ x = l ! Suc 0 \ y = l ! 0)" using True by simp also have "\ = False" using g assms(2) by auto finally have "~ x < y in (swap 0) l" by simp then show ?thesis using True by auto next case False from assms(1,2) have "index l y \ index l x" by simp with False assms(1,2) have a: "index l y < index l x" by (metis before_in_def insert_iff linorder_neqE_nat) from assms(1) have drin: "x\set l" "y\set l" by auto from assms(1,3) have b: "index l x < 2" by simp from a b have k: "index l x = 1" "index l y = 0" by auto then have g: "x = l ! 1" "y = l ! 0" using k nth_index assms(1) by force+ have "x < y in swap 0 l = (x < y in l \ \ (x = l ! 0 \ y = l ! Suc 0) \ x = l ! Suc 0 \ y = l ! 0)" apply(rule before_in_swap) apply(fact assms(4)) using assms(3) by simp also have "\ = (x = l ! Suc 0 \ y = l ! 0)" using False by simp also have "\ = True" using g by auto finally have "x < y in (swap 0) l" by simp then show ?thesis using False by auto qed lemma before_in_swap2: "dist_perm xs ys \ Suc n < size xs \ x\y \ x < y in (swap n xs) \ (~ x < y in xs \ (y = xs!n \ x = xs!Suc n) \ x < y in xs \ ~(y = xs!Suc n \ x = xs!n))" apply(simp add:before_in_def index_swap_distinct) by (metis Suc_lessD Suc_lessI index_nth_id less_Suc_eq nth_mem yes) lemma projected_paid_same_effect: assumes d: "dist_perm s1 s1" and ee: "x\y" and f: "set s2 = {x, y}" and g: "length s2 = 2" and h: "dist_perm s2 s2" shows "x < y in s1 = x < y in s2 \ x < y in swaps acs s1 = x < y in (swap 0 ^^ ALG_P acs x y s1) s2" proof (induct acs) case Nil then show ?case by auto next case (Cons s ss) from d have dd: "dist_perm (swaps ss s1) (swaps ss s1)" by simp from f have ff: "set ((swap 0 ^^ ALG_P ss x y s1) s2) = {x, y}" by (metis foldr_replicate swaps_inv) from g have gg: "length ((swap 0 ^^ ALG_P ss x y s1) s2) = 2" by (metis foldr_replicate swaps_inv) from h have hh: "dist_perm ((swap 0 ^^ ALG_P ss x y s1) s2) ((swap 0 ^^ ALG_P ss x y s1) s2)" by (metis foldr_replicate swaps_inv) show ?case (is "?LHS = ?RHS") proof (cases "Suc s < length (swaps ss s1) \ (((swaps ss s1)!s=x \ (swaps ss s1)!(Suc s)=y) \ ((swaps ss s1)!s=y \ (swaps ss s1)!(Suc s)=x))") case True from True have 1:" Suc s < length (swaps ss s1)" and 2: "(swaps ss s1 ! s = x \ swaps ss s1 ! Suc s = y \ swaps ss s1 ! s = y \ swaps ss s1 ! Suc s = x)" by auto from True have "ALG_P (s # ss) x y s1 = 1 + ALG_P ss x y s1" by auto then have "?RHS = x < y in (swap 0) ((swap 0 ^^ ALG_P ss x y s1) s2)" by auto also have "\ = (~ x < y in ((swap 0 ^^ ALG_P ss x y s1) s2))" apply(rule swap0in2) by(fact)+ also have "\ = (~ x < y in swaps ss s1)" using Cons by auto also have "\ = x < y in (swap s) (swaps ss s1)" using 1 2 before_in_swap by (metis Suc_lessD before_id dd lessI no_before_inI) (* bad *) also have "\ = ?LHS" by auto finally show ?thesis by simp next case False note F=this then have "ALG_P (s # ss) x y s1 = ALG_P ss x y s1" by auto then have "?RHS = x < y in ((swap 0 ^^ ALG_P ss x y s1) s2)" by auto also have "\ = x < y in swaps ss s1" using Cons by auto also have "\ = x < y in (swap s) (swaps ss s1)" proof (cases "Suc s < length (swaps ss s1)") case True with F have g: "swaps ss s1 ! s \ x \ swaps ss s1 ! Suc s \ y" and h: "swaps ss s1 ! s \ y \ swaps ss s1 ! Suc s \ x" by auto show ?thesis unfolding before_in_swap[OF dd True, of x y] apply(simp) using g h by auto next case False then show ?thesis unfolding swap_def by(simp) qed also have "\ = ?LHS" by auto finally show ?thesis by simp qed qed lemma steps_steps': "length qs = length as \ steps s qs as = steps' s qs as (length as)" by (induct qs as arbitrary: s rule: list_induct2) (auto) lemma T1_7': "T\<^sub>p init qs Strat = T\<^sub>p_opt init qs \ length Strat = length qs \ n\length qs \ x\(y::('a::linorder)) \ x\ set init \ y \ set init \ distinct init \ set qs \ set init \ (\Strat2 sws. \<^cancel>\T\<^sub>p_opt (Lxy init {x,y}) (Lxy (take n qs) {x,y}) \ T\<^sub>p (Lxy init {x,y}) (Lxy (take n qs) {x,y}) Strat2 \\ length Strat2 = length (Lxy (take n qs) {x,y}) \ (x < y in (steps' init (take n qs) (take n Strat) n)) = (x < y in (swaps sws (steps' (Lxy init {x,y}) (Lxy (take n qs) {x,y}) Strat2 (length Strat2)))) \ T\<^sub>p (Lxy init {x,y}) (Lxy (take n qs) {x,y}) Strat2 + length sws = ALGxy_det Strat (take n qs) init x y + ALG_Pxy Strat (take n qs) init x y)" proof(induct n) case (Suc n) from Suc(3,4) have ns: "n < length qs" by simp then have n: "n \ length qs" by simp from Suc(1)[OF Suc(2) Suc(3) n Suc(5) Suc(6) Suc(7) Suc(8) Suc(9) ] obtain Strat2 sws where (*S2: "T\<^sub>p_opt (Lxy init {x,y}) (Lxy (take n qs) {x, y}) \ T\<^sub>p (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2" and *) len: "length Strat2 = length (Lxy (take n qs) {x, y})" and iff: "x < y in steps' init (take n qs) (take n Strat) n = x < y in swaps sws (steps' (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2 (length Strat2))" and T_Strat2: "T\<^sub>p (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2 + length sws = ALGxy_det Strat (take n qs) init x y + ALG_Pxy Strat (take n qs) init x y " by (auto) from Suc(3-4) have nStrat: "n < length Strat" by auto from take_Suc_conv_app_nth[OF this] have tak2: "take (Suc n) Strat = take n Strat @ [Strat ! n]" by auto from take_Suc_conv_app_nth[OF ns] have tak: "take (Suc n) qs = take n qs @ [qs ! n]" by auto have aS: "length (take n Strat) = n" using Suc(3,4) by auto have aQ: "length (take n qs) = n" using Suc(4) by auto from aS aQ have qQS: "length (take n qs) = length (take n Strat)" by auto have xyininit: "x\ set init" "y : set init" by fact+ then have xysubs: "{x,y} \ set init" by auto have dI: "distinct init" by fact have "set qs \ set init" by fact then have qsnset: "qs ! n \ set init" using ns by auto from xyininit have ahjer: "set (Lxy init {x, y}) = {x,y}" using xysubs by (simp add: Lxy_set_filter) with Suc(5) have ah: "card (set (Lxy init {x, y})) = 2" by simp have ahjer3: "distinct (Lxy init {x,y})" apply(rule Lxy_distinct) by fact from ah have ahjer2: "length (Lxy init {x,y}) = 2" using distinct_card[OF ahjer3] by simp show ?case proof (cases "qs ! n \ {x,y}") case False with tak have nixzutun: "Lxy (take (Suc n) qs) {x,y} = Lxy (take n qs) {x,y}" unfolding Lxy_def by simp let ?m="ALG_P' (take n Strat @ [Strat ! n]) (take n qs @ [qs ! n]) init n x y" let ?L="replicate ?m 0 @ sws" { fix xs::"('a::linorder) list" fix m::nat fix q::'a assume "q \ {x,y}" then have 5: "y \ q" by auto assume 1: "q \ set xs" assume 2: "distinct xs" assume 3: "x \ set xs" assume 4: "y \ set xs" have "(x < y in xs) = (x < y in (mtf2 m q xs))" by (metis "1" "2" "3" "4" \q \ {x, y}\ insertCI not_before_in set_mtf2 swapped_by_mtf2) } note f=this (* taktik, erstmal das mtf weg bekommen, dann induct über snd (Strat!n) *) have "(x < y in steps' init (take (Suc n) qs) (take (Suc n) Strat) (Suc n)) = (x < y in mtf2 (fst (Strat ! n)) (qs ! n) (swaps (snd (Strat ! n)) (steps' init (take n qs) (take n Strat) n)))" unfolding tak2 tak apply(simp only: steps'_append[OF qQS aQ] ) by (simp add: step_def split_def) also have "\ = (x < y in (swaps (snd (Strat ! n)) (steps' init (take n qs) (take n Strat) n)))" apply(rule f[symmetric]) apply(fact) using qsnset steps'_set[OF qQS] aS apply(simp) using steps'_distinct[OF qQS] aS dI apply(simp) using steps'_set[OF qQS] aS xyininit by simp_all also have "\ = x < y in (swap 0 ^^ ALG_P (snd (Strat ! n)) x y (steps' init (take n qs) (take n Strat) n)) (swaps sws (steps' (Lxy init {x, y}) (Lxy (take n qs) {x, y}) Strat2 (length Strat2)))" apply(rule projected_paid_same_effect) apply(rule steps'_dist_perm) apply(fact qQS) apply(fact aS) using dI apply(simp) apply(fact Suc(5)) apply(simp) apply(rule steps'_set[where s="Lxy init {x,y}", unfolded ahjer]) using len apply(simp) apply(simp) apply(simp) apply(rule steps'_length[where s="Lxy init {x,y}", unfolded ahjer2]) using len apply(simp) apply(simp) apply(simp) apply(rule steps'_distinct2[where s="Lxy init {x,y}"]) using len apply(simp) apply(simp) apply(fact) using iff by auto finally have umfa: "x < y in steps' init (take (Suc n) qs) (take (Suc n) Strat) (Suc n) = x < y in (swap 0 ^^ ALG_P (snd (Strat ! n)) x y (steps' init (take n qs) (take n Strat) n)) (swaps sws (steps' (Lxy init {x, y}) (Lxy (take n qs) {x, y}) Strat2 (length Strat2)))" . from Suc(3,4) have lS: "length (take n Strat) = n" by auto have "(take n Strat @ [Strat ! n]) ! n = (take n Strat @ (Strat ! n) # []) ! length (take n Strat)" using lS by auto also have "\ = Strat ! n" by(rule nth_append_length) finally have tt: "(take n Strat @ [Strat ! n]) ! n = Strat ! n" . show ?thesis apply(rule exI[where x="Strat2"]) apply(rule exI[where x="?L"]) unfolding nixzutun apply(safe) apply(fact) proof goal_cases case 1 show ?case unfolding tak2 tak apply(simp add: step_def split_def) unfolding ALG_P'_def unfolding tt using aS apply(simp only: steps'_rests[OF qQS, symmetric]) using 1(1) umfa by auto next case 2 then show ?case apply(simp add: step_def split_def) unfolding ALG_P'_def unfolding tt using aS apply(simp only: steps'_rests[OF qQS, symmetric]) using umfa[symmetric] by auto next case 3 have ns2: "n < length (take n qs @ [qs ! n])" using ns by auto have er: "length (take n qs) < length Strat" using Suc.prems(2) aQ ns by linarith have "T\<^sub>p (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2 + length (replicate (ALG_P' Strat (take n qs @ [qs ! n]) init n x y) 0 @ sws) = ( T\<^sub>p (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2 + length sws) + ALG_P' Strat (take n qs @ [qs ! n]) init n x y" by simp also have "\ = ALGxy_det Strat (take n qs) init x y + ALG_Pxy Strat (take n qs) init x y + ALG_P' Strat (take n qs @ [qs ! n]) init n x y" unfolding T_Strat2 by simp also have "\ = ALGxy_det Strat (take (Suc n) qs) init x y + ALG_Pxy Strat (take (Suc n) qs) init x y" unfolding tak unfolding wegdamit[OF er False] apply(simp) unfolding ALG_P_split[of "take n qs" Strat "qs ! n" init x y, unfolded aQ, OF nStrat] by(simp) finally show ?case unfolding tak using ALG_P'_rest[OF ns nStrat] by auto qed next case True note qsinxy=this then have yeh: "Lxy (take (Suc n) qs) {x, y} = Lxy (take n qs) {x,y} @ [qs!n]" unfolding tak Lxy_def by auto from True have garar: "(take n qs @ [qs ! n]) ! n \ {y, x}" using tak[symmetric] by(auto) have aer: "\i {y, x}) = (take n qs ! i \ {y, x})" using ns by (metis less_SucI nth_take tak) (* erst definiere ich die zwischenzeitlichen Configurationen ?xs \ ?xs' \ ?xs'' und ?ys \ ?ys' \ ?ys'' \ ?ys''' und einige Eigenschaften über sie *) (* what is the mtf action taken by Strat? *) let ?Strat_mft = "fst (Strat ! n)" let ?Strat_sws = "snd (Strat ! n)" (* what is the configuration before the step? *) let ?xs = "steps' init (take n qs) (take n Strat) n" (* what is the configuration before the mtf *) let ?xs' = "(swaps (snd (Strat!n)) ?xs)" let ?xs'' = "steps' init (take (Suc n) qs) (take (Suc n) Strat) (Suc n)" let ?xs''2 = "mtf2 ?Strat_mft (qs!n) ?xs'" (* position of requested element *) let ?no_swap_occurs = "(x < y in ?xs') = (x < y in ?xs''2)" let ?mtf="(if ?no_swap_occurs then 0 else 1::nat)" let ?m="ALG_P' Strat (take n qs @ [qs ! n]) init n x y" let ?L="replicate ?m 0 @ sws" let ?newStrat="Strat2@[(?mtf,?L)]" have "?xs'' = step ?xs (qs!n) (Strat!n)" unfolding tak tak2 apply(rule steps'_append) by fact+ also have "\ = mtf2 (fst (Strat!n)) (qs!n) (swaps (snd (Strat!n)) ?xs)" unfolding step_def by (auto simp: split_def) finally have A: "?xs'' = mtf2 (fst (Strat!n)) (qs!n) ?xs'" . let ?ys = "(steps' (Lxy init {x, y}) (Lxy (take n qs) {x, y}) Strat2 (length Strat2))" let ?ys' = "( swaps sws (steps' (Lxy init {x, y}) (Lxy (take n qs) {x, y}) Strat2 (length Strat2)))" let ?ys'' = " (swap 0 ^^ ALG_P (snd (Strat!n)) x y ?xs) ?ys'" let ?ys''' = "(steps' (Lxy init {x, y}) (Lxy (take (Suc n) qs) {x, y}) ?newStrat (length ?newStrat))" have gr: "Lxy (take n qs @ [qs ! n]) {x, y} = Lxy (take n qs) {x, y} @ [qs ! n]" unfolding Lxy_def using True by(simp) have "steps' init (take n qs @ [qs ! n]) Strat n = steps' init (take n qs @ [qs ! n]) (take n Strat @ drop n Strat) n" by simp also have "\ = steps' init (take n qs) (take n Strat) n" apply(subst steps'_rests[symmetric]) using aS qQS by(simp_all) finally have t: "steps' init (take n qs @ [qs ! n]) Strat n = steps' init (take n qs) (take n Strat) n" . have gge: "swaps (replicate ?m 0) ?ys' = (swap 0 ^^ ALG_P (snd (Strat!n)) x y ?xs) ?ys'" unfolding ALG_P'_def t by simp have gg: "length ?newStrat = Suc (length Strat2)" by auto have "?ys''' = step ?ys (qs!n) (?mtf,?L)" unfolding tak gr unfolding gg apply(rule steps'_append) using len by auto also have "\ = mtf2 ?mtf (qs!n) (swaps ?L ?ys)" unfolding step_def by (simp add: split_def) also have "\ = mtf2 ?mtf (qs!n) (swaps (replicate ?m 0) ?ys')" by (simp) also have "\ = mtf2 ?mtf (qs!n) ?ys''" using gge by (simp) finally have B: "?ys''' = mtf2 ?mtf (qs!n) ?ys''" . have 3: "set ?ys' = {x,y}" apply(simp add: swaps_inv) apply(subst steps'_set) using ahjer len by(simp_all) have k: "?ys'' = swaps (replicate (ALG_P (snd (Strat!n)) x y ?xs) 0) ?ys'" by (auto) have 6: "set ?ys'' = {x,y}" unfolding k using 3 swaps_inv by metis have 7: "set ?ys''' = {x,y}" unfolding B using set_mtf2 6 by metis have 22: "x \ set ?ys''" "y \ set ?ys''" using 6 by auto have 23: "x \ set ?ys'''" "y \ set ?ys'''" using 7 by auto have 26: "(qs!n) \ set ?ys''" using 6 True by auto have "distinct ?ys" apply(rule steps'_distinct2) using len ahjer3 by(simp)+ then have 9: "distinct ?ys'" using swaps_inv by metis then have 27: "distinct ?ys''" unfolding k using swaps_inv by metis from 3 Suc(5) have "card (set ?ys') = 2" by auto then have 4: "length ?ys' = 2" using distinct_card[OF 9] by simp have "length ?ys'' = 2" unfolding k using 4 swaps_inv by metis have 5: "dist_perm ?ys' ?ys'" using 9 by auto have sxs: "set ?xs = set init" apply(rule steps'_set) using qQS n Suc(3) by(auto) have sxs': "set ?xs' = set ?xs" using swaps_inv by metis have sxs'': "set ?xs'' = set ?xs'" unfolding A using set_mtf2 by metis have 24: "x \ set ?xs'" "y\set ?xs'" "(qs!n) \ set ?xs'" using xysubs True sxs sxs' by auto have 28: "x \ set ?xs''" "y\set ?xs''" "(qs!n) \ set ?xs''" using xysubs True sxs sxs' sxs'' by auto have 0: "dist_perm init init" using dI by auto have 1: "dist_perm ?xs ?xs" apply(rule steps'_dist_perm) by fact+ then have 25: "distinct ?xs'" using swaps_inv by metis (* aus der Induktionsvorraussetzung (iff) weiß ich bereits dass die Ordnung erhalten wird bis zum nten Schritt, mit Theorem "projected_paid_same_effect" kann ich auch die paid exchanges abarbeiten ...*) from projected_paid_same_effect[OF 1 Suc(5) 3 4 5, OF iff, where acs="snd (Strat ! n)"] have aaa: "x < y in ?xs' = x < y in ?ys''" . (* ... was nun noch fehlt ist, dass die moveToFront anweisungen von Strat und Strat2 sich in gleicher Art auf die Ordnung von x und y auswirken *) have t: "?mtf = (if (xa b. a\{x,y} \ b\{x,y} \ set ?ys'' = {x,y} \ a\b \ distinct ?ys'' \ a ~a set ?ys''" by auto from d e have p: "mtf2 1 b ?ys'' = swap 0 ?ys''" unfolding mtf2_def by auto have q: "a < b in swap 0 ?ys'' = (\ a < b in ?ys'')" apply(rule swap0in2) by(fact)+ from 1(6) p q show ?case by metis qed show ?thesis proof (cases "xi\{.. {y, x} then ALG'_det Strat (take n qs @ [qs ! n]) init i y + ALG'_det Strat (take n qs @ [qs ! n]) init i x else 0)" unfolding ALGxy_det_def tak by auto also have "\ = (\i\{.. {y, x} then ALG'_det Strat (take n qs @ [qs ! n]) init i y + ALG'_det Strat (take n qs @ [qs ! n]) init i x else 0)" using ns by simp also have "\ = (\i\{.. {y, x} then ALG'_det Strat (take n qs @ [qs ! n]) init i y + ALG'_det Strat (take n qs @ [qs ! n]) init i x else 0) + (if (take n qs @ [qs ! n]) ! n \ {y, x} then ALG'_det Strat (take n qs @ [qs ! n]) init n y + ALG'_det Strat (take n qs @ [qs ! n]) init n x else 0)" by simp also have "\ = (\i\{..< n}. if take n qs ! i \ {y, x} then ALG'_det Strat (take n qs @ [qs ! n]) init i y + ALG'_det Strat (take n qs @ [qs ! n]) init i x else 0) + ALG'_det Strat (take n qs @ [qs ! n]) init n y + ALG'_det Strat (take n qs @ [qs ! n]) init n x " using aer using garar by simp also have "\ = (\i\{..< n}. if take n qs ! i \ {y, x} then ALG'_det Strat (take n qs @ [qs ! n]) init i y + ALG'_det Strat (take n qs @ [qs ! n]) init i x else 0) + ALG'_det Strat qs init n y + ALG'_det Strat qs init n x" proof - have "ALG'_det Strat qs init n y = ALG'_det Strat ((take n qs @ [qs ! n]) @ drop (Suc n) qs) init n y" unfolding tak[symmetric] by auto also have "\ = ALG'_det Strat (take n qs @ [qs ! n]) init n y " apply(rule ALG'_det_append) using nStrat ns by(auto) finally have 1: "ALG'_det Strat qs init n y = ALG'_det Strat (take n qs @ [qs ! n]) init n y" . have "ALG'_det Strat qs init n x = ALG'_det Strat ((take n qs @ [qs ! n]) @ drop (Suc n) qs) init n x" unfolding tak[symmetric] by auto also have "\ = ALG'_det Strat (take n qs @ [qs ! n]) init n x " apply(rule ALG'_det_append) using nStrat ns by(auto) finally have 2: "ALG'_det Strat qs init n x = ALG'_det Strat (take n qs @ [qs ! n]) init n x" . from 1 2 show ?thesis by auto qed also have "\ = (\i\{..< n}. if take n qs ! i \ {y, x} then ALG'_det Strat (take n qs) init i y + ALG'_det Strat (take n qs) init i x else 0) + ALG'_det Strat qs init n y + ALG'_det Strat qs init n x" apply(simp) apply(rule sum.cong) apply(simp) apply(simp) using ALG'_det_append[where qs="take n qs"] Suc.prems(2) ns by auto also have "\ = (\i\{..< length(take n qs)}. if take n qs ! i \ {y, x} then ALG'_det Strat (take n qs) init i y + ALG'_det Strat (take n qs) init i x else 0) + ALG'_det Strat qs init n y + ALG'_det Strat qs init n x" using aQ by auto also have "\ = ALGxy_det Strat (take n qs) init x y + (ALG'_det Strat qs init n y + ALG'_det Strat qs init n x)" unfolding ALGxy_det_def by(simp) finally show ?thesis . qed (* aaa: x < y in ?xs' = x < y in ?ys'' central: x < y in ?xs'' = x < y in ?ys''' *) have list: "?ys' = swaps sws (steps (Lxy init {x, y}) (Lxy (take n qs) {x, y}) Strat2)" unfolding steps_steps'[OF len[symmetric], of "(Lxy init {x, y})"] by simp have j2: "steps' init (take n qs @ [qs ! n]) Strat n = steps' init (take n qs) (take n Strat) n" proof - have "steps' init (take n qs @ [qs ! n]) Strat n = steps' init (take n qs @ [qs ! n]) (take n Strat @ drop n Strat) n" by auto also have "\ = steps' init (take n qs) (take n Strat) n" apply(rule steps'_rests[symmetric]) apply fact using aS by simp finally show ?thesis . qed have arghschonwieder: "steps' init (take n qs) (take n Strat) n = steps' init qs Strat n" proof - have "steps' init qs Strat n = steps' init (take n qs @ drop n qs) (take n Strat @ drop n Strat) n" by auto also have "\ = steps' init (take n qs) (take n Strat) n" apply(rule steps'_rests[symmetric]) apply fact using aS by simp finally show ?thesis by simp qed have indexe: "((swap 0 ^^ ?m) (swaps sws (steps (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2))) = ?ys''" unfolding ALG_P'_def unfolding list using j2 by auto have blocky: "ALG'_det Strat qs init n y = (if y < qs ! n in ?xs' then 1 else 0)" unfolding ALG'_det_def ALG.simps by(auto simp: arghschonwieder split_def) have blockx: "ALG'_det Strat qs init n x = (if x < qs ! n in ?xs' then 1 else 0)" unfolding ALG'_det_def ALG.simps by(auto simp: arghschonwieder split_def) have index_is_blocking_cost: "index ((swap 0 ^^ ?m) (swaps sws (steps (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2))) (qs ! n) = ALG'_det Strat qs init n y + ALG'_det Strat qs init n x" proof (cases "x= qs!n") case True then have "ALG'_det Strat qs init n x = 0" unfolding blockx apply(simp) using before_in_irefl by metis then have "ALG'_det Strat qs init n y + ALG'_det Strat qs init n x = (if y < x in ?xs' then 1 else 0)" unfolding blocky using True by simp also have "\ = (if ~y < x in ?xs' then 0 else 1)" by auto also have "\ = (if x < y in ?xs' then 0 else 1)" apply(simp) by (meson 24 Suc.prems(4) not_before_in) also have "\ = (if x < y in ?ys'' then 0 else 1)" using aaa by simp also have "\ = index ?ys'' x" apply(rule before_in_index1) by(fact)+ finally show ?thesis unfolding indexe using True by auto next case False then have q: "y = qs!n" using qsinxy by auto then have "ALG'_det Strat qs init n y = 0" unfolding blocky apply(simp) using before_in_irefl by metis then have "ALG'_det Strat qs init n y + ALG'_det Strat qs init n x = (if x < y in ?xs' then 1 else 0)" unfolding blockx using q by simp also have "\ = (if x < y in ?ys'' then 1 else 0)" using aaa by simp also have "\ = index ?ys'' y" apply(rule before_in_index2) by(fact)+ finally show ?thesis unfolding indexe using q by auto qed have jj: "ALG_Pxy Strat (take (Suc n) qs) init x y = ALG_Pxy Strat (take n qs) init x y + ALG_P' Strat (take n qs @ [qs ! n]) init n x y" proof - have "ALG_Pxy Strat (take (Suc n) qs) init x y = (\i = (\i< Suc n. ALG_P' Strat (take (Suc n) qs) init i x y)" unfolding tak using ns by simp also have "\ = (\i = (\i = (\i = ALG_Pxy Strat (take n qs) init x y + ALG_P' Strat (take n qs @ [qs ! n]) init n x y" unfolding ALG_Pxy_def by auto finally show ?thesis . qed have tw: "length (Lxy (take n qs) {x, y}) = length Strat2" using len by auto have "T\<^sub>p (Lxy init {x,y}) (Lxy (take (Suc n) qs) {x, y}) ?newStrat + length [] = T\<^sub>p (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2 + t\<^sub>p (steps (Lxy init {x, y}) (Lxy (take n qs) {x, y}) Strat2) (qs ! n) (?mtf,?L)" unfolding yeh by(simp add: T_append[OF tw, of "(Lxy init) {x,y}"]) also have "\ = T\<^sub>p (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2 + length sws + index ((swap 0 ^^ ?m) (swaps sws (steps (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2))) (qs ! n) + ALG_P' Strat (take n qs @ [qs ! n]) init n x y" by(simp add: t\<^sub>p_def) (* now use iH *) also have "\ = (ALGxy_det Strat (take n qs) init x y + index ((swap 0 ^^ ?m) (swaps sws (steps (Lxy init {x,y}) (Lxy (take n qs) {x, y}) Strat2))) (qs ! n)) + (ALG_Pxy Strat (take n qs) init x y + ALG_P' Strat (take n qs @ [qs ! n]) init n x y)" by (simp only: T_Strat2) (* the current cost are equal to the blocking costs: *) also from index_is_blocking_cost have "\ = (ALGxy_det Strat (take n qs) init x y + ALG'_det Strat qs init n y + ALG'_det Strat qs init n x) + (ALG_Pxy Strat (take n qs) init x y + ALG_P' Strat (take n qs @ [qs ! n]) init n x y)" by auto also have "\ = ALGxy_det Strat (take (Suc n) qs) init x y + (ALG_Pxy Strat (take n qs) init x y + ALG_P' Strat (take n qs @ [qs ! n]) init n x y)" using j by auto also have "\ = ALGxy_det Strat (take (Suc n) qs) init x y + ALG_Pxy Strat (take (Suc n) qs) init x y" using jj by auto finally show ?case . qed qed qed next case 0 then show ?case apply (simp add: Lxy_def ALGxy_det_def ALG_Pxy_def T_opt_def) proof goal_cases case 1 show ?case apply(rule Lxy_mono[unfolded Lxy_def, simplified]) using 1 by auto qed qed lemma T1_7: assumes "T\<^sub>p init qs Strat = T\<^sub>p_opt init qs" "length Strat = length qs" "x \ (y::('a::linorder))" "x\ set init" "y \ set init" "distinct init" "set qs \ set init" shows "T\<^sub>p_opt (Lxy init {x,y}) (Lxy qs {x,y}) \ ALGxy_det Strat qs init x y + ALG_Pxy Strat qs init x y" proof - have A:"length qs \ length qs" by auto have B:" x \ y " using assms by auto from T1_7'[OF assms(1,2), of "length qs" x y, OF A B assms(4-7)] obtain Strat2 sws where len: "length Strat2 = length (Lxy qs {x, y})" and "x < y in steps' init qs (take (length qs) Strat) (length qs) = x < y in swaps sws (steps' (Lxy init {x,y}) (Lxy qs {x, y}) Strat2 (length Strat2))" and Tp: "T\<^sub>p (Lxy init {x,y}) (Lxy qs {x, y}) Strat2 + length sws = ALGxy_det Strat qs init x y + ALG_Pxy Strat qs init x y" by auto have "T\<^sub>p_opt (Lxy init {x,y}) (Lxy qs {x,y}) \ T\<^sub>p (Lxy init {x,y}) (Lxy qs {x, y}) Strat2" unfolding T_opt_def apply(rule cInf_lower) using len by auto also have "\ \ ALGxy_det Strat qs init x y + ALG_Pxy Strat qs init x y" using Tp by auto finally show ?thesis . qed lemma T_snoc: "length rs = length as \ T init (rs@[r]) (as@[a]) = T init rs as + t\<^sub>p (steps' init rs as (length rs)) r a" apply(induct rs as arbitrary: init rule: list_induct2) by simp_all lemma steps'_snoc: "length rs = length as \ n = (length as) \ steps' init (rs@[r]) (as@[a]) (Suc n) = step (steps' init rs as n) r a" apply(induct rs as arbitrary: init n r a rule: list_induct2) by (simp_all) lemma steps'_take: assumes "n = steps' init (take n qs) (take n Strat) n" apply(subst steps'_rests[symmetric]) using assms by auto finally show ?thesis by simp qed lemma Tp_darstellung: "length qs = length Strat \ T\<^sub>p init qs Strat = (\i\{..p (steps' init qs Strat i) (qs!i) (Strat!i))" proof - assume a[simp]: "length qs = length Strat" {fix n have "n\length qs \ T\<^sub>p init (take n qs) (take n Strat) = (\i\{..p (steps' init qs Strat i) (qs!i) (Strat!i))" apply(induct n) apply(simp) apply(simp add: take_Suc_conv_app_nth) apply(subst T_snoc) apply(simp) by(simp add: min_def steps'_take) } from a this[of "length qs"] show ?thesis by auto qed (* Gleichung 1.8 in Borodin *) lemma umformung_OPT': assumes inlist: "set qs \ set init" assumes dist: "distinct init" assumes qsStrat: "length qs = length Strat" assumes noStupid: "\x l. x l< length (snd (Strat ! x)) \ Suc ((snd (Strat ! x))!l) < length init" shows "T\<^sub>p init qs Strat = (\(x,y)\{(x,y::('a::linorder)). x \ set init \ y\set init \ xn. \xa \ set_pmf (config\<^sub>p (I, S) qs init n). distinct (snd xa)" using dist config_rand_distinct by metis *) (* ersten Teil umformen: *) have "(\i\{..(x,y)\{(x,y). x \ set init \ y\set init \ xi\{..z\{(x,y). x \ set init \ y\set init \ x = (\z\{(x,y). x \ set init \ y\set init \ xi\{.. = (\(x,y)\{(x,y). x \ set init \ y\set init \ xi\{.. = (\(x,y)\{(x,y). x \ set init \ y\set init \ xi\{..(x,y)\{(x,y). x \ set init \ y\set init \ x(x,y)\{(x,y). x \ set init \ y\set init \ xi\{..e\set init. ALG e qs i (?config i, ()))) = (\e\set init. (\i\{.. = (\e\set init. (\y\set init. (\i\{i. i qs!i=y}. ALG e qs i (?config i,()))))" proof (rule sum.cong, goal_cases) case (2 x) have "(\i = sum (%i. ALG x qs i (?config i, ())) (\y\{y. y \ set init}. {i. i < length qs \ qs ! i = y})" apply(rule sum.cong) proof goal_cases case 1 show ?case apply(auto) using inlist by auto qed simp also have "\ = sum (%t. sum (%i. ALG x qs i (?config i, ())) {i. i qs ! i = t}) {y. y\ set init}" apply(rule sum.UNION_disjoint) apply(simp_all) by force also have "\ = (\y\set init. \i | i < length qs \ qs ! i = y. ALG x qs i (?config i, ()))" by auto finally show ?case . qed (simp) also have "\ = (\(x,y)\ (set init \ set init). (\i\{i. i qs!i=y}. ALG x qs i (?config i, ())))" by (rule sum.cartesian_product) also have "\ = (\(x,y)\ {(x,y). x\set init \ y\ set init}. (\i\{i. i qs!i=y}. ALG x qs i (?config i, ())))" by simp also have E4: "\ = (\(x,y)\{(x,y). x\set init \ y\ set init \ x\y}. (\i\{i. i qs!i=y}. ALG x qs i (?config i, ())))" (is "(\(x,y)\ ?L. ?f x y) = (\(x,y)\ ?R. ?f x y)") proof goal_cases case 1 let ?M = "{(x,y). x\set init \ y\ set init \ x=y}" have A: "?L = ?R \ ?M" by auto have B: "{} = ?R \ ?M" by auto have "(\(x,y)\ ?L. ?f x y) = (\(x,y)\ ?R \ ?M. ?f x y)" by(simp only: A) also have "\ = (\(x,y)\ ?R. ?f x y) + (\(x,y)\ ?M. ?f x y)" apply(rule sum.union_disjoint) apply(rule finite_subset[where B="set init \ set init"]) apply(auto) apply(rule finite_subset[where B="set init \ set init"]) by(auto) also have "(\(x,y)\ ?M. ?f x y) = 0" apply(rule sum.neutral) by (auto simp add: split_def before_in_def) finally show ?case by simp qed also have "\ = (\(x,y)\{(x,y). x \ set init \ y\set init \ xi\{i. i qs!i=y}. ALG x qs i (?config i, ())) + (\i\{i. i qs!i=x}. ALG y qs i (?config i, ())) )" (is "(\(x,y)\ ?L. ?f x y) = (\(x,y)\ ?R. ?f x y + ?f y x)") proof - let ?R' = "{(x,y). x \ set init \ y\set init \ y ?R'" by auto have "{} = ?R \ ?R'" by auto have C: "?R' = (%(x,y). (y, x)) ` ?R" by auto have D: "(\(x,y)\ ?R'. ?f x y) = (\(x,y)\ ?R. ?f y x)" proof - have "(\(x,y)\ ?R'. ?f x y) = (\(x,y)\ (%(x,y). (y, x)) ` ?R. ?f x y)" by(simp only: C) also have "(\z\ (%(x,y). (y, x)) ` ?R. (%(x,y). ?f x y) z) = (\z\?R. ((%(x,y). ?f x y) \ (%(x,y). (y, x))) z)" apply(rule sum.reindex) by(fact swap_inj_on) also have "\ = (\z\?R. (%(x,y). ?f y x) z)" apply(rule sum.cong) by(auto) finally show ?thesis . qed have "(\(x,y)\ ?L. ?f x y) = (\(x,y)\ ?R \ ?R'. ?f x y)" by(simp only: A) also have "\ = (\(x,y)\ ?R. ?f x y) + (\(x,y)\ ?R'. ?f x y)" apply(rule sum.union_disjoint) apply(rule finite_subset[where B="set init \ set init"]) apply(auto) apply(rule finite_subset[where B="set init \ set init"]) by(auto) also have "\ = (\(x,y)\ ?R. ?f x y) + (\(x,y)\ ?R. ?f y x)" by(simp only: D) also have "\ = (\(x,y)\ ?R. ?f x y + ?f y x)" by(simp add: split_def sum.distrib[symmetric]) finally show ?thesis . qed also have E5: "\ = (\(x,y)\{(x,y). x \ set init \ y\set init \ xi\{i. i (qs!i=y \ qs!i=x)}. ALG y qs i (?config i, ()) + ALG x qs i (?config i, ())))" apply(rule sum.cong) apply(simp) proof goal_cases case (1 x) then obtain a b where x: "x=(a,b)" and a: "a \ set init" "b \ set init" "a < b" by auto then have "a\b" by simp then have disj: "{i. i < length qs \ qs ! i = b} \ {i. i < length qs \ qs ! i = a} = {}" by auto have unio: "{i. i < length qs \ (qs ! i = b \ qs ! i = a)} = {i. i < length qs \ qs ! i = b} \ {i. i < length qs \ qs ! i = a}" by auto let ?f="%i. ALG b qs i (?config i, ()) + ALG a qs i (?config i, ())" let ?B="{i. i < length qs \ qs ! i = b}" let ?A="{i. i < length qs \ qs ! i = a}" have "(\i\?B \ ?A. ?f i) = (\i\?B. ?f i) + (\i\?A. ?f i) - (\i\?B \ ?A. ?f i) " apply(rule sum_Un_nat) by auto also have "\ = (\i\?B. ALG b qs i (?config i, ()) + ALG a qs i (?config i, ())) + (\i\?A. ALG b qs i (?config i, ()) + ALG a qs i (?config i, ()))" using disj by auto also have "\ = (\i\?B. ALG a qs i (?config i, ())) + (\i\?A. ALG b qs i (?config i, ()))" by (auto simp: split_def before_in_def) finally show ?case unfolding x apply(simp add: split_def) unfolding unio by simp qed also have E6: "\ = (\(x,y)\{(x,y). x \ set init \ y\set init \ xie\set init. ALG e qs i (?config i, ())) = (\(x,y)\{(x,y). x \ set init \ y\set init \ xp init qs Strat = (\i\{..p (steps' init qs Strat i) (qs!i) (Strat!i))" by auto also have "\ = (\i\{..e\set (steps' init qs Strat i). ALG e qs i (swaps (snd (Strat!i)) (steps' init qs Strat i),())) + (\(x,y)\{(x,(y::('a::linorder))). x \ set (steps' init qs Strat i) \ y\set (steps' init qs Strat i) \ xp_sumofALGALGP) apply(rule steps'_distinct2) using dist qsStrat apply(simp_all) apply(subst steps'_set) using dist qsStrat inlist apply(simp_all) apply fastforce apply(subst steps'_length) apply(simp_all) using noStupid by auto also have "\ = (\i\{..e\set init. ALG e qs i (swaps (snd (Strat!i)) (steps' init qs Strat i),())) + (\(x,y)\{(x,y). x \ set init \ y\set init \ x = (\i\{..e\set init. ALG e qs i (swaps (snd (Strat!i)) (steps' init qs Strat i), ()))) + (\i\{..(x,y)\{(x,y). x \ set init \ y\set init \ x = (\(x,y)\{(x,y). x \ set init \ y\set init \ xi\{..(x,y)\{(x,y). x \ set init \ y\set init \ x = (\(x,y)\{(x,y). x \ set init \ y\set init \ x(x,y)\{(x,y). x \ set init \ y\set init \ x = (\(x,y)\{(x,y). x \ set init \ y\set init \ x {}" shows "Inf S \ S" using assms Inf_nat_def LeastI by force lemma steps_length: "length qs = length as \ length (steps s qs as) = length s" apply(induct qs as arbitrary: s rule: list_induct2) by simp_all (* shows that OPT does not use paid exchanges that do not have an effect *) lemma OPT_noStupid: fixes Strat assumes [simp]: "length Strat = length qs" assumes opt: "T\<^sub>p init qs Strat = T\<^sub>p_opt init qs" assumes init_nempty: "init\[]" shows "\x l. x < length Strat \ l < length (snd (Strat ! x)) \ Suc ((snd (Strat ! x))!l) < length init" proof (rule ccontr, goal_cases) case (1 x l) (* construct a Stratgy that leaves out that paid exchange *) let ?sws' = "take l (snd (Strat!x)) @ drop (Suc l) (snd (Strat!x))" let ?Strat' = "take x Strat @ (fst (Strat!x),?sws') # drop (Suc x) Strat" from 1(1) have valid: "length ?Strat' = length qs" by simp from valid have isin: "T\<^sub>p init qs ?Strat' \ {T\<^sub>p init qs as |as. length as = length qs}" by blast from 1(1,2) have lsws': "length (snd (Strat!x)) = length ?sws' + 1" by (simp) have a: "(take x ?Strat') = (take x Strat)" using 1(1) by(auto simp add: min_def take_Suc_conv_app_nth) have b: "(drop (Suc x) Strat) = (drop (Suc x) ?Strat')" using 1(1) by(auto simp add: min_def take_Suc_conv_app_nth) have aa: "(take l (snd (Strat!x))) = (take l (snd (?Strat'!x)))" using 1(1,2) by(auto simp add: min_def take_Suc_conv_app_nth nth_append) have bb: "(drop (Suc l) (snd (Strat!x))) = (drop l (snd (?Strat'!x)))" using 1(1,2) by(auto simp add: min_def take_Suc_conv_app_nth nth_append ) have "(swaps (snd (Strat ! x)) (steps init (take x qs) (take x Strat))) = (swaps (take l (snd (Strat ! x)) @ (snd (Strat ! x))!l # drop (Suc l) (snd (Strat ! x))) (steps init (take x qs) (take x Strat)))" unfolding id_take_nth_drop[OF 1(2), symmetric] by simp also have "... = (swaps (take l (snd (Strat ! x)) @ drop (Suc l) (snd (Strat ! x))) (steps init (take x qs) (take x Strat)))" using 1(3) by(simp add: swap_def steps_length) finally have noeffect: "(swaps (snd (Strat ! x)) (steps init (take x qs) (take x Strat))) = (swaps (take l (snd (Strat ! x)) @ drop (Suc l) (snd (Strat ! x))) (steps init (take x qs) (take x Strat)))" . have c: "t\<^sub>p (steps init (take x qs) (take x Strat)) (qs ! x) (Strat ! x) = t\<^sub>p (steps init (take x qs) (take x ?Strat')) (qs ! x) (?Strat' ! x) + 1" unfolding a t\<^sub>p_def using 1(1,2) apply(simp add: min_def split_def nth_append) unfolding noeffect by(simp) have "T\<^sub>p init (take (Suc x) qs) (take (Suc x) Strat) = T\<^sub>p init (take x qs) (take x ?Strat') + t\<^sub>p (steps init (take x qs) (take x Strat)) (qs ! x) (Strat ! x)" using 1(1) a by(simp add: take_Suc_conv_app_nth T_append) also have "\ = T\<^sub>p init (take x qs) (take x ?Strat') + t\<^sub>p (steps init (take x qs) (take x ?Strat')) (qs ! x) (?Strat' ! x) + 1" unfolding c by(simp) also have "\ = T\<^sub>p init (take (Suc x) qs) (take (Suc x) ?Strat') + 1" using 1(1) a by(simp add: min_def take_Suc_conv_app_nth T_append nth_append) finally have bef: "T\<^sub>p init (take (Suc x) qs) (take (Suc x) Strat) = T\<^sub>p init (take (Suc x) qs) (take (Suc x) ?Strat') + 1" . let ?interstate = "(steps init (take (Suc x) qs) (take (Suc x) Strat))" let ?interstate' = "(steps init (take (Suc x) qs) (take (Suc x) ?Strat'))" have state: "?interstate' = ?interstate" using 1(1) apply(simp add: take_Suc_conv_app_nth min_def) apply(simp add: steps_append step_def split_def) using noeffect by simp have "T\<^sub>p init qs Strat = T\<^sub>p init (take (Suc x) qs @ drop (Suc x) qs) (take (Suc x) Strat @ drop (Suc x) Strat)" by simp also have "\ = T\<^sub>p init (take (Suc x) qs) (take (Suc x) Strat) + T\<^sub>p ?interstate (drop (Suc x) qs) (drop (Suc x) Strat)" apply(subst T_append2) by(simp_all) also have "\ = T\<^sub>p init (take (Suc x) qs) (take (Suc x) ?Strat') + T\<^sub>p ?interstate' (drop (Suc x) qs) (drop (Suc x) ?Strat') + 1" unfolding bef state using 1(1) by(simp add: min_def nth_append) also have "\ = T\<^sub>p init (take (Suc x) qs @ drop (Suc x) qs) (take (Suc x) ?Strat' @ drop (Suc x) ?Strat') + 1" apply(subst T_append2) using 1(1) by(simp_all add: min_def) also have "\ = T\<^sub>p init qs ?Strat' + 1" by simp finally have better: "T\<^sub>p init qs ?Strat' + 1 = T\<^sub>p init qs Strat" by simp have "T\<^sub>p init qs ?Strat' + 1 = T\<^sub>p init qs Strat" by (fact better) also have "\ = T\<^sub>p_opt init qs" by (fact opt) also from cInf_lower[OF isin] have "\ \ T\<^sub>p init qs ?Strat'" unfolding T_opt_def by simp finally show "False" using init_nempty by auto qed (* Gleichung 1.8 in Borodin *) lemma umformung_OPT: assumes inlist: "set qs \ set init" assumes dist: "distinct init" assumes a: "T\<^sub>p_opt init qs = T\<^sub>p init qs Strat" assumes b: " length qs = length Strat" assumes c: "init\[]" shows "T\<^sub>p_opt init qs = (\(x,y)\{(x,y::('a::linorder)). x \ set init \ y\set init \ xp_opt init qs = T\<^sub>p init qs Strat" by(fact a) also have "\ = (\(x,y)\{(x,y::('a::linorder)). x \ set init \ y\set init \ x[]" and setqsinit: "set qs \ set init" shows "(\(x,y)\{(x,y::('a::linorder)). x \ set init \ y\set init \ xp_opt (Lxy init {x,y}) (Lxy qs {x,y}))) \ T\<^sub>p_opt init qs" proof - have "T\<^sub>p_opt init qs \ {T\<^sub>p init qs as |as. length as = length qs}" unfolding T_opt_def apply(rule nn_contains_Inf) apply(auto) by (rule Ex_list_of_length) then obtain Strat where a: "T\<^sub>p init qs Strat = T\<^sub>p_opt init qs" and b: "length Strat = length qs" unfolding T_opt_def by auto have "(\(x,y)\{(x,y). x \ set init \ y\set init \ xp_opt (Lxy init {x,y}) (Lxy qs {x, y})) \ (\(x,y)\{(x,y). x \ set init \ y\set init \ xb" by auto show ?case apply(rule T1_7[OF a b]) by(fact)+ qed also from umformung_OPT[OF setqsinit dist] a b c have "\ = T\<^sub>p init qs Strat" by auto also from a have "\ = T\<^sub>p_opt init qs" by simp finally show ?thesis . qed subsection "Factoring Lemma" lemma cardofpairs: "S \ [] \ sorted S \ distinct S \ card {(x,y). x \ set S \ y\set S \ x set ss \ y \ set ss \ x < y} = (length ss * (length ss-1)) / 2" by auto from cons have sss: "s \ set ss" by auto from cons have tt: "(\y\set (s#ss). s \ y)" by auto with cons have tt': "(\y\set ss. s < y)" proof - from sss have "(\y\set ss. s \ y)" by auto with tt show ?thesis by fastforce qed then have "{(x, y) . x = s \ y \ set ss \ x < y} = {(x, y) . x = s \ y \ set ss}" by auto also have "\ = {s}\(set ss)" by auto finally have "{(x, y) . x = s \ y \ set ss \ x < y} = {s}\(set ss)" . then have "card {(x, y) . x = s \ y \ set ss \ x < y} = card (set ss)" by(auto) also from cons distinct_card have "\ = length ss" by auto finally have step: "card {(x, y) . x = s \ y \ set ss \ x < y} = length ss" . have uni: "{(x, y) . x \ set (s # ss) \ y \ set (s # ss) \ x < y} = {(x, y) . x \ set ss \ y \ set ss \ x < y} \ {(x, y) . x = s \ y \ set ss \ x < y}" using tt by auto have disj: "{(x, y) . x \ set ss \ y \ set ss \ x < y} \ {(x, y) . x = s \ y \ set ss \ x < y} = {}" using sss by(auto) have "card {(x, y) . x \ set (s # ss) \ y \ set (s # ss) \ x < y} = card ({(x, y) . x \ set ss \ y \ set ss \ x < y} \ {(x, y) . x = s \ y \ set ss \ x < y})" using uni by auto also have "\ = card {(x, y) . x \ set ss \ y \ set ss \ x < y} + card {(x, y) . x = s \ y \ set ss \ x < y}" apply(rule card_Un_disjoint) apply(rule finite_subset[where B="(set ss) \ (set ss)"]) apply(force) apply(simp) apply(rule finite_subset[where B="{s} \ (set ss)"]) apply(force) apply(simp) using disj apply(simp) done also have "\ = (length ss * (length ss-1)) / 2 + length ss" using iH step by auto also have "\ = (length ss * (length ss-1) + 2*length ss) / 2" by auto also have "\ = (length ss * (length ss-1) + length ss * 2) / 2" by auto also have "\ = (length ss * (length ss-1+2)) / 2" by simp also have "\ = (length ss * (length ss+1)) / 2" using cons(1) by simp also have "\ = ((length ss+1) * length ss) / 2" by auto also have "\ = (length (s#ss) * (length (s#ss)-1)) / 2" by auto finally show ?case by auto next case single thus ?case by(simp cong: conj_cong) qed (* factoring lemma *) lemma factoringlemma_withconstant: fixes A and b::real and c::real assumes c: "c \ 1" assumes dist: "\e\S0. distinct e" assumes notempty: "\e\S0. length e > 0" (* A has pairwise property *) assumes pw: "pairwise A" (* A is c-competitive on list of length 2 *) assumes on2: "\s0\S0. \b\0. \qs\{x. set x \ set s0}. \(x,y)\{(x,y). x \ set s0 \ y\set s0 \ xp_on_rand A (Lxy s0 {x,y}) (Lxy qs {x,y}) \ c * (T\<^sub>p_opt (Lxy s0 {x,y}) (Lxy qs {x,y})) + b" assumes nopaid: "\is s q. \((free,paid),_) \ (snd A (s, is) q). paid=[]" assumes 4: "\init qs. distinct init \ set qs \ set init \ (\x. x finite (set_pmf (config'' A qs init x)))" (* then A is c-competitive on arbitrary list lengths *) shows "\s0\S0. \b\0. \qs\{x. set x \ set s0}. T\<^sub>p_on_rand A s0 qs \ c * real (T\<^sub>p_opt s0 qs) + b" proof (standard, goal_cases) case (1 init) have d: "distinct init" using dist 1 by auto have d2: "init \ []" using notempty 1 by auto obtain b where on3: "\qs\{x. set x \ set init}. \(x,y)\{(x,y). x \ set init \ y\set init \ xp_on_rand A (Lxy init {x,y}) (Lxy qs {x,y}) \ c * (T\<^sub>p_opt (Lxy init {x,y}) (Lxy qs {x,y})) + b" and b: "b\0" using on2 1 by auto { fix qs assume drin: "set qs \ set init" have "T\<^sub>p_on_rand A init qs = (\(x,y)\{(x, y) . x \ set init \ y \ set init \ x < y}. T\<^sub>p_on_rand A (Lxy init {x,y}) (Lxy qs {x, y})) " apply(rule umf_pair) apply(fact)+ using 4[of init qs] drin d by(simp add: split_def) (* 1.4 *) also have "\ \ (\(x,y)\{(x,y). x \ set init \ y\set init \ xp_opt (Lxy init {x,y}) (Lxy qs {x,y})) + b)" apply(rule sum_mono) using on3 drin by(simp add: split_def) also have "\ = c * (\(x,y)\{(x,y). x \ set init \ y\set init \ xp_opt (Lxy init {x,y}) (Lxy qs {x,y})) + b*(((length init)*(length init-1)) / 2)" proof - { fix S::"'a list" assume dis: "distinct S" assume d2: "S \ []" then have d3: "sort S \ []" by (metis length_0_conv length_sort) have "card {(x,y). x \ set S \ y\set S \ x set (sort S) \ y\set (sort S) \ x = (length (sort S) * (length (sort S) - 1)) / 2" apply(rule cardofpairs) using dis d2 d3 by (simp_all) finally have "card {(x, y) . x \ set S \ y \ set S \ x < y} = (length (sort S) * (length (sort S) - 1)) / 2 " . } with d d2 have e: "card {(x,y). x \ set init \ y\set init \ x(x,y)\?S. c * (?T x y) + b) = c * ?R + b*?T2") proof - have "(\(x,y)\?S. c * (?T x y) + b) = c * (\(x,y)\?S. (?T x y)) + (\(x,y)\?S. b)" by(simp add: split_def sum.distrib sum_distrib_left) also have "\ = c * (\(x,y)\?S. (?T x y)) + b*?T2" using e by(simp add: split_def) finally show ?thesis by(simp add: split_def) qed qed also have "\ \ c * T\<^sub>p_opt init qs + (b*((length init)*(length init-1)) / 2)" proof - have "(\(x, y)\{(x, y) . x \ set init \ y \ set init \ x < y}. T\<^sub>p_opt (Lxy init {x,y}) (Lxy qs {x, y})) \ T\<^sub>p_opt init qs" using OPT_zerlegen drin d d2 by auto then have " real (\(x, y)\{(x, y) . x \ set init \ y \ set init \ x < y}. T\<^sub>p_opt (Lxy init {x,y}) (Lxy qs {x, y})) \ (T\<^sub>p_opt init qs)" by linarith with c show ?thesis by(auto simp: split_def) qed finally have f: "T\<^sub>p_on_rand A init qs \ c * real (T\<^sub>p_opt init qs) + (b*((length init)*(length init-1)) / 2)" . } note all=this show ?case unfolding compet_def apply(auto) apply(rule exI[where x="(b*((length init)*(length init-1)) / 2)"]) apply(safe) using notempty 1 b apply simp using all b by simp qed lemma factoringlemma_withconstant': fixes A and b::real and c::real assumes c: "c \ 1" assumes dist: "\e\S0. distinct e" assumes notempty: "\e\S0. length e > 0" (* A has pairwise property *) assumes pw: "pairwise A" (* A is c-competitive on list of length 2 *) assumes on2: "\s0\S0. \b\0. \qs\{x. set x \ set s0}. \(x,y)\{(x,y). x \ set s0 \ y\set s0 \ xp_on_rand A (Lxy s0 {x,y}) (Lxy qs {x,y}) \ c * (T\<^sub>p_opt (Lxy s0 {x,y}) (Lxy qs {x,y})) + b" assumes nopaid: "\is s q. \((free,paid),_) \ (snd A (s, is) q). paid=[]" assumes 4: "\init qs. distinct init \ set qs \ set init \ (\x. x finite (set_pmf (config'' A qs init x)))" (* then A is c-competitive on arbitrary list lengths *) shows "compet_rand A c S0" unfolding compet_rand_def static_def using factoringlemma_withconstant[OF assms] by simp end diff --git a/thys/Lower_Semicontinuous/Lower_Semicontinuous.thy b/thys/Lower_Semicontinuous/Lower_Semicontinuous.thy --- a/thys/Lower_Semicontinuous/Lower_Semicontinuous.thy +++ b/thys/Lower_Semicontinuous/Lower_Semicontinuous.thy @@ -1,1859 +1,1860 @@ (* Title: thys/Lower_Semicontinuous/Lower_Semicontinuous.thy Author: Bogdan Grechuk, University of Edinburgh *) section \Lower semicontinuous functions\ theory Lower_Semicontinuous imports "HOL-Analysis.Multivariate_Analysis" begin subsection\Relative interior in one dimension\ lemma rel_interior_ereal_semiline: fixes a :: ereal shows "rel_interior {y. a \ ereal y} = {y. a < ereal y}" proof (cases a) case (real r) then show ?thesis using rel_interior_real_semiline[of r] by (simp add: atLeast_def greaterThan_def) next case PInf thus ?thesis using rel_interior_empty by auto next case MInf thus ?thesis using rel_interior_UNIV by auto qed lemma closed_ereal_semiline: fixes a :: ereal shows "closed {y. a \ ereal y}" proof (cases a) case (real r) then show ?thesis using closed_real_atLeast unfolding atLeast_def by simp qed auto lemma ereal_semiline_unique: fixes a b :: ereal shows "{y. a \ ereal y} = {y. b \ ereal y} \ a = b" by (metis mem_Collect_eq ereal_le_real order_antisym) subsection \Lower and upper semicontinuity\ definition lsc_at :: "'a \ ('a::topological_space \ 'b::order_topology) \ bool" where "lsc_at x0 f \ (\X l. X \ x0 \ (f \ X) \ l \ f x0 \ l)" definition usc_at :: "'a \ ('a::topological_space \ 'b::order_topology) \ bool" where "usc_at x0 f \ (\X l. X \ x0 \ (f \ X) \ l \ l \ f x0)" lemma lsc_at_mem: assumes "lsc_at x0 f" assumes "x \ x0" assumes "(f \ x) \ A" shows "f x0 \ A" using assms lsc_at_def[of x0 f] by blast lemma usc_at_mem: assumes "usc_at x0 f" assumes "x \ x0" assumes "(f \ x) \ A" shows "f x0 \ A" using assms usc_at_def[of x0 f] by blast lemma lsc_at_open: fixes f :: "'a::first_countable_topology \ 'b::{complete_linorder, linorder_topology}" shows "lsc_at x0 f \ (\S. open S \ f x0 \ S \ (\T. open T \ x0 \ T \ (\x'\T. f x' \ f x0 \ f x' \ S)))" (is "?lhs \ ?rhs") proof- { assume "~?rhs" from this obtain S where S_def: "open S \ f x0 : S \ (\T. (open T \ x0 \ T) \ (\x'\T. f x' \ f x0 \ f x' \ S))" by metis define X where "X = {x'. f x' \ f x0 \ f x' \ S}" hence "x0 islimpt X" unfolding islimpt_def using S_def by auto from this obtain x where x_def: "(\n. x n \ X) \ x \ x0" using islimpt_sequential[of x0 X] by auto hence not: "~(f \ x) \ (f x0)" unfolding lim_explicit using X_def S_def by auto from compact_complete_linorder[of "f \ x"] obtain l r where r_def: "strict_mono r \ ((f \ x) \ r) \ l" by auto { assume "l : S" hence "\N. \n\N. f(x(r n)) \ S" using r_def lim_explicit[of "f \ x \ r" l] S_def by auto hence False using x_def X_def by auto } hence l_prop: "l \ S \ l\f x0" using r_def x_def X_def Lim_bounded[of "f \ x \ r"] by auto { assume "f x0 \ l" hence "f x0 = l" using l_prop by auto hence False using l_prop S_def by auto } hence "\x l. x \ x0 \ (f \ x) \ l \ ~(f x0 \ l)" apply(rule_tac x="x \ r" in exI) apply(rule_tac x=l in exI) using r_def x_def by (auto simp add: o_assoc LIMSEQ_subseq_LIMSEQ) hence "~?lhs" unfolding lsc_at_def by blast } moreover { assume "?rhs" { fix x A assume x_def: "x \ x0" "(f \ x) \ A" { assume "A \ f x0" from this obtain S V where SV_def: "open S \ open V \ f x0 : S \ A : V \ S Int V = {}" using hausdorff[of "f x0" A] by auto from this obtain T where T_def: "open T \ x0 : T \ (\x'\T. (f x' \ f x0 \ f x' \ S))" using \?rhs\ by metis from this obtain N1 where "\n\N1. x n \ T" using x_def lim_explicit[of x x0] by auto hence *: "\n\N1. (f (x n) \ f x0 \ f(x n) \ S)" using T_def by auto from SV_def obtain N2 where "\n\N2. f(x n) \ V" using lim_explicit[of "f \ x" A] x_def by auto hence "\n\(max N1 N2). \(f(x n) \ f x0)" using SV_def * by auto hence "\n\(max N1 N2). f(x n) \ f x0" by auto hence "f x0 \ A" using Lim_bounded2[of "f \ x" A "max N1 N2" "f x0"] x_def by auto } hence "f x0 \ A" by auto } hence "?lhs" unfolding lsc_at_def by blast } ultimately show ?thesis by blast qed lemma lsc_at_open_mem: fixes f :: "'a::first_countable_topology \ 'b::{complete_linorder, linorder_topology}" assumes "lsc_at x0 f" assumes "open S \ f x0 : S" obtains T where "open T \ x0 \ T \ (\x'\T. (f x' \ f x0 \ f x' \ S))" using assms lsc_at_open[of x0 f] by blast lemma lsc_at_MInfty: fixes f :: "'a::topological_space \ ereal" assumes "f x0 = -\" shows "lsc_at x0 f" unfolding lsc_at_def using assms by auto lemma lsc_at_PInfty: fixes f :: "'a::metric_space \ ereal" assumes "f x0 = \" shows "lsc_at x0 f \ continuous (at x0) f" unfolding lsc_at_open continuous_at_open using assms by auto lemma lsc_at_real: fixes f :: "'a::metric_space \ ereal" assumes "\f x0\ \ \" shows "lsc_at x0 f \ (\e. e>0 \ (\T. open T \ x0 : T \ (\y \ T. f y > f x0 - e)))" (is "?lhs \ ?rhs") proof- obtain m where m_def: "f x0 = ereal m" using assms by (cases "f x0") auto { assume lsc: "lsc_at x0 f" { fix e assume e_def: "(e :: ereal)>0" hence *: "f x0 : {f x0 - e <..< f x0 + e}" using assms ereal_between by auto from this obtain T where T_def: "open T \ x0 : T \ (\x'\T. f x' \ f x0 \ f x' \ {f x0 - e <..< f x0 + e})" apply (subst lsc_at_open_mem[of x0 f "{f x0 - e <..< f x0 + e}"]) using lsc e_def by auto { fix y assume "y:T" { assume "f y \ f x0" hence "f y > f x0 - e" using T_def \y:T\ by auto } moreover { assume "f y > f x0" hence "\>f x0 - e" using * by auto } ultimately have "f y > f x0 - e" using not_le by blast } hence "\T. open T \ x0 \ T \ (\y \ T. f y > f x0 - e)" using T_def by auto } hence "?rhs" by auto } moreover { assume "?rhs" { fix S assume S_def: "open S \ f x0 : S" from this obtain e where e_def: "e>0 \ {f x0 - e<.. S" apply (subst ereal_open_cont_interval[of S "f x0"]) using assms by auto from this obtain T where T_def: "open T \ x0 : T \ (\y \ T. f y > f x0 - e)" using \?rhs\[rule_format, of e] by auto { fix y assume "y:T" "f y \ f x0" hence "f y < f x0 + e" using assms e_def ereal_between[of "f x0" e] by auto hence "f y \ S" using e_def T_def \y\T\ by auto } hence "\T. open T \ x0 : T \ (\y\T. (f y \ f x0 \ f y \ S))" using T_def by auto } hence "lsc_at x0 f" using lsc_at_open by auto } ultimately show ?thesis by auto qed lemma lsc_at_ereal: fixes f :: "'a::metric_space \ ereal" shows "lsc_at x0 f \ (\CT. open T \ x0 \ T \ (\y \ T. f y > C))" (is "?lhs \ ?rhs") proof- { assume "f x0 = -\" hence ?thesis using lsc_at_MInfty by auto } moreover { assume pinf: "f x0 = \" { assume lsc: "lsc_at x0 f" { fix C assume "C f x0 : {C<..}" by auto from this obtain T where T_def: "open T \ x0 \ T \ (\y\T. f y \ {C<..})" using pinf lsc lsc_at_PInfty[of f x0] unfolding continuous_at_open by metis hence "\T. open T \ x0 \ T \ (\y\T. C < f y)" by auto } hence "?rhs" by auto } moreover { assume "?rhs" { fix S assume S_def: "open S \ f x0 : S" then obtain C where C_def: "ereal C < f x0 \ {ereal C<..} \ S" using pinf open_PInfty by auto then obtain T where T_def: "open T \ x0 : T \ (\y \ T. f y \ S)" using \?rhs\[rule_format, of "ereal C"] by auto hence "\T. open T \ x0 \ T \ (\y\T. (f y \ f x0 \ f y \ S))" using T_def by auto } hence "lsc_at x0 f" using lsc_at_open by auto } ultimately have ?thesis by blast } moreover { assume fin: "f x0 \ -\" "f x0 \ \" { assume lsc: "lsc_at x0 f" { fix C assume "C0" using fin ereal_less_minus_iff by auto from this obtain T where T_def: "open T \ x0 \ T \ (\y\T. f x0 - (f x0-C) < f y)" using lsc_at_real[of f x0] lsc fin by auto moreover have "f x0 - (f x0-C) = C" using fin apply (cases "f x0", cases C) by auto ultimately have "\T. open T \ x0 \ T \ (\y\T. C < f y)" by auto } hence "?rhs" by auto } moreover { assume "?rhs" { fix e :: ereal assume "e>0" hence "f x0 - e < f x0" using fin apply (cases "f x0", cases e) by auto hence "\T. open T \ x0 \ T \ (\y\T. f x0 - e < f y)" using fin \?rhs\ by auto } hence "lsc_at x0 f" using lsc_at_real[of f x0] fin by auto } ultimately have ?thesis by blast } ultimately show ?thesis by blast qed lemma lst_at_ball: fixes f :: "'a::metric_space => ereal" shows "lsc_at x0 f \ (\Cd>0. \y \ (ball x0 d). C ?rhs") proof- { assume lsc: "lsc_at x0 f" { fix C :: ereal assume "C x0 : T \ (\y \ T. C < f y)" using lsc lsc_at_ereal[of x0 f] by auto hence "\d. d>0 \ (\y \ (ball x0 d). C < f y)" by (force simp add: open_contains_ball) } } moreover { assume "?rhs" { fix C :: ereal assume "C0 \ (\y \ (ball x0 d). C < f y)" using \?rhs\ by auto hence "\T. open T \ x0 \ T \ (\y \ T. C < f y)" apply (rule_tac x="ball x0 d" in exI) apply (simp add: centre_in_ball) done } hence "lsc_at x0 f" using lsc_at_ereal[of x0 f] by auto } ultimately show ?thesis by auto qed lemma lst_at_delta: fixes f :: "'a::metric_space \ ereal" shows "lsc_at x0 f \ (\Cd>0. \y. dist x0 y < d \ C < f y)" (is "?lhs \ ?rhs") proof- have "?rhs \ (\Cd>0. \y \ (ball x0 d). C < f y)" unfolding ball_def by auto thus ?thesis using lst_at_ball[of x0 f] by auto qed lemma lsc_liminf_at: fixes f :: "'a::metric_space \ ereal" shows "lsc_at x0 f \ f x0 \ Liminf (at x0) f" unfolding lst_at_ball le_Liminf_iff eventually_at by (intro arg_cong[where f=All] imp_cong refl ext ex_cong) (auto simp: dist_commute zero_less_dist_iff) lemma lsc_liminf_at_eq: fixes f :: "'a::metric_space \ ereal" shows "lsc_at x0 f \ (f x0 = min (f x0) (Liminf (at x0) f))" by (metis inf_ereal_def le_iff_inf lsc_liminf_at) lemma lsc_imp_liminf: fixes f :: "'a::metric_space \ ereal" assumes "lsc_at x0 f" assumes "x \ x0" shows "f x0 \ liminf (f \ x)" proof (cases "f x0") case PInf then show ?thesis using assms lsc_at_PInfty[of f x0] lim_imp_Liminf[of _ "f \ x"] continuous_at_sequentially[of x0 f] by auto next case (real r) { fix e assume e_def: "(e :: ereal)>0" from this obtain T where T_def: "open T \ x0 : T \ (\y \ T. f y > f x0 - e)" using lsc_at_real[of f x0] real assms by auto from this obtain N where N_def: "\n\N. x n \ T" apply (subst tendsto_obtains_N[of x x0 T]) using assms by auto hence "\n\N. f x0 - e < (f \ x) n" using T_def by auto hence "liminf (f \ x) \ f x0 - e" by (intro Liminf_bounded) (auto simp: eventually_sequentially intro!: exI[of _ N]) hence "f x0 \ liminf (f \ x) + e" apply (cases e) unfolding ereal_minus_le_iff by auto } then show ?thesis by (intro ereal_le_epsilon) auto qed auto lemma lsc_liminf: fixes f :: "'a::metric_space \ ereal" shows "lsc_at x0 f \ (\x. x \ x0 \ f x0 \ liminf (f \ x))" (is "?lhs \ ?rhs") proof- { assume "?rhs" { fix x A assume x_def: "x \ x0" "(f \ x) \ A" hence "f x0 \ A" using \?rhs\ lim_imp_Liminf[of sequentially] by auto } hence "?lhs" unfolding lsc_at_def by blast } from this show ?thesis using lsc_imp_liminf by auto qed lemma lsc_sequentially: fixes f :: "'a::metric_space \ ereal" shows "lsc_at x0 f \ (\x c. x \ x0 \ (\n. f(x n)\c) \ f(x0)\c)" (is "?lhs \ ?rhs") proof- { assume "?rhs" { fix x l assume "x \ x0" "(f \ x) \ l" { assume "l = \" hence "f x0 \ l" by auto } moreover { assume "l = -\" { fix B :: real obtain N where N_def: "\n\N. f(x n) \ ereal B" using Lim_MInfty[of "f \ x"] \(f \ x) \ l\ \l = -\\ by auto define g where "g n = (if n\N then x n else x N)" for n hence "g \ x0" by (intro filterlim_cong[THEN iffD1, OF refl refl _ \x \ x0\]) (auto simp: eventually_sequentially) moreover have "\n. f(g n) \ ereal B" using g_def N_def by auto ultimately have "f x0 \ ereal B" using \?rhs\ by auto } hence "f x0 = -\" using ereal_bot by auto hence "f x0 \ l" by auto } moreover { assume fin: "\l\ \ \" { fix e assume e_def: "(e :: ereal)>0" from this obtain N where N_def: "\n\N. f(x n) \ {l - e <..< l + e}" apply (subst tendsto_obtains_N[of "f \ x" l "{l - e <..< l + e}"]) using fin e_def ereal_between \(f \ x) \ l\ by auto define g where "g n = (if n\N then x n else x N)" for n hence "g \ x0" by (intro filterlim_cong[THEN iffD1, OF refl refl _ \x \ x0\]) (auto simp: eventually_sequentially) moreover have "\n. f(g n) \ l + e" using g_def N_def by auto ultimately have "f x0 \ l + e" using \?rhs\ by auto } hence "f x0 \ l" using ereal_le_epsilon by auto } ultimately have "f x0 \ l" by blast } hence "lsc_at x0 f" unfolding lsc_at_def by auto } moreover { assume lsc: "lsc_at x0 f" { fix x c assume xc_def: "x \ x0 \ (\n. f(x n)\c)" hence "liminf (f \ x) \ c" using Limsup_bounded[where F = sequentially and X = "f \ x" and C = c] Liminf_le_Limsup[of sequentially "f \ x"] by auto hence "f x0 \ c" using lsc xc_def lsc_imp_liminf[of x0 f x] by auto } hence "?rhs" by auto } ultimately show ?thesis by blast qed lemma lsc_sequentially_gen: fixes f :: "'a::metric_space \ ereal" shows "lsc_at x0 f \ (\x c c0. x \ x0 \ c \ c0 \ (\n. f(x n)\c n) \ f(x0)\c0)" (is "?lhs \ ?rhs") proof- { assume "?rhs" { fix x c0 assume a: "x \ x0 \ (\n. f (x n) \ c0)" define c where "c = (\n::nat. c0)" hence "c \ c0" by auto hence "f(x0)\c0" using \?rhs\[rule_format, of x c c0] using a c_def by auto } hence "?lhs" using lsc_sequentially[of x0 f] by auto } moreover { assume lsc: "lsc_at x0 f" { fix x c c0 assume xc_def: "x \ x0 \ c \ c0 \ (\n. f(x n)\c n)" hence "liminf (f \ x) \ c0" using Liminf_mono[of "f \ x" c sequentially] lim_imp_Liminf[of sequentially] by auto hence "f x0 \ c0" using lsc xc_def lsc_imp_liminf[of x0 f x] by auto } hence "?rhs" by auto } ultimately show ?thesis by blast qed lemma lsc_sequentially_mem: fixes f :: "'a::metric_space \ ereal" assumes "lsc_at x0 f" assumes "x \ x0" "c \ c0" assumes "\n. f(x n)\c n" shows "f(x0)\c0" using lsc_sequentially_gen[of x0 f] assms by auto lemma lsc_uminus: fixes f :: "'a::metric_space \ ereal" shows "lsc_at x0 (\x. -f x) \ usc_at x0 f" proof- { assume lsc: "lsc_at x0 (\x. -f x)" { fix x A assume x_def: "x \ x0" "(f \ x) \ A" hence "(\i. - f (x i)) \ -A" using tendsto_uminus_ereal[of "f \ x" A] by auto hence "((\x. - f x) \ x) \ -A" unfolding o_def by auto hence " - f x0 \ - A" apply (subst lsc_at_mem[of x0 "(\x. -f x)" x]) using lsc x_def by auto hence "f x0 \ A" by auto } hence "usc_at x0 f" unfolding usc_at_def by auto } moreover { assume usc: "usc_at x0 f" { fix x A assume x_def: "x \ x0" "((\x. - f x) \ x) \ A" hence "(\i. - f (x i)) \ A" unfolding o_def by auto hence "(\i. f (x i)) \ - A" using tendsto_uminus_ereal[of "(\i. - f (x i))" A] by auto hence "(f \ x) \ -A" unfolding o_def by auto hence "f x0 \ - A" apply (subst usc_at_mem[of x0 "f" x]) using usc x_def by auto hence "-f x0 \ A" by (auto simp: ereal_uminus_le_reorder) } hence "lsc_at x0 (\x. -f x)" unfolding lsc_at_def by auto } ultimately show ?thesis by blast qed lemma usc_limsup: fixes f :: "'a::metric_space \ ereal" shows "usc_at x0 f \ (\x. x \ x0 \ f x0 \ limsup (f \ x))" (is "?lhs \ ?rhs") proof- have "usc_at x0 f \ (\x. x \ x0 \ - f x0 \ liminf ((\x. - f x) \ x))" using lsc_uminus[of x0 f] lsc_liminf[of x0 "(\x. - f x)"] by auto moreover { fix x assume "x \ x0" have "(-f x0 \ -limsup (f \ x)) \ (-f x0 \ liminf ((\x. - f x) \ x))" using ereal_Liminf_uminus[of _ "f \ x"] unfolding o_def by auto hence "(f x0 \ limsup (f \ x)) \ (-f x0 \ liminf ((\x. - f x) \ x))" by auto } ultimately show ?thesis by auto qed lemma usc_imp_limsup: fixes f :: "'a::metric_space \ ereal" assumes "usc_at x0 f" assumes "x \ x0" shows "f x0 \ limsup (f \ x)" using assms usc_limsup[of x0 f] by auto lemma usc_limsup_at: fixes f :: "'a::metric_space \ ereal" shows "usc_at x0 f \ f x0 \ Limsup (at x0) f" proof- have "usc_at x0 f \ lsc_at x0 (\x. -(f x))" by (metis lsc_uminus) also have "\ \ -(f x0) \ Liminf (at x0) (\x. -(f x))" by (metis lsc_liminf_at) also have "\ \ -(f x0) \ -(Limsup (at x0) f)" by (metis ereal_Liminf_uminus) finally show ?thesis by auto qed lemma continuous_iff_lsc_usc: fixes f :: "'a::metric_space \ ereal" shows "continuous (at x0) f \ (lsc_at x0 f) \ (usc_at x0 f)" proof- { assume a: "continuous (at x0) f" { fix x assume "x \ x0" hence "(f \ x) \ f x0" using a continuous_imp_tendsto[of x0 f x] by auto hence "liminf (f \ x) = f x0 \ limsup (f \ x) = f x0" using lim_imp_Liminf[of sequentially] lim_imp_Limsup[of sequentially] by auto } hence "lsc_at x0 f \ usc_at x0 f" unfolding lsc_liminf usc_limsup by auto } moreover { assume a: "(lsc_at x0 f) \ (usc_at x0 f)" { fix x assume "x \ x0" hence "limsup (f \ x) \ f x0" using a unfolding usc_limsup by auto moreover have "\ \ liminf (f \ x)" using a \x \ x0\ unfolding lsc_liminf by auto ultimately have "limsup (f \ x) = f x0 \ liminf (f \ x) = f x0" - using Liminf_le_Limsup[of sequentially "f \ x"] by auto - hence "(f \ x) \ f x0" using Liminf_eq_Limsup[of sequentially] by auto + using Liminf_le_Limsup[of sequentially "f \ x"] by auto + hence "(f \ x) \ f x0" using Liminf_eq_Limsup[of sequentially] + by (simp add: tendsto_iff_Liminf_eq_Limsup) } hence "continuous (at x0) f" using continuous_at_sequentially[of x0 f] by auto } ultimately show ?thesis by blast qed lemma continuous_lsc_compose: assumes "lsc_at (g x0) f" "continuous (at x0) g" shows "lsc_at x0 (f \ g)" proof- { fix x L assume "x \ x0" "(f \ g \ x) \ L" hence "f(g x0) \ L" apply (subst lsc_at_mem[of "g x0" f "g \ x" L]) using assms continuous_imp_tendsto[of x0 g x] unfolding o_def by auto } from this show ?thesis unfolding lsc_at_def by auto qed lemma continuous_isCont: "continuous (at x0) f \ isCont f x0" by (metis continuous_at isCont_def) lemma isCont_iff_lsc_usc: fixes f :: "'a::metric_space \ ereal" shows "isCont f x0 \ (lsc_at x0 f) \ (usc_at x0 f)" by (metis continuous_iff_lsc_usc continuous_isCont) definition lsc :: "('a::topological_space \ 'b::order_topology) \ bool" where "lsc f \ (\x. lsc_at x f)" definition usc :: "('a::topological_space \ 'b::order_topology) \ bool" where "usc f \ (\x. usc_at x f)" lemma continuous_UNIV_iff_lsc_usc: fixes f :: "'a::metric_space \ ereal" shows "(\x. continuous (at x) f) \ (lsc f) \ (usc f)" by (metis continuous_iff_lsc_usc lsc_def usc_def) subsection \Epigraphs\ definition "Epigraph S (f::_ \ ereal) = {xy. fst xy : S \ f (fst xy) \ ereal(snd xy)}" lemma mem_Epigraph: "(x, y) \ Epigraph S f \ x \ S \ f x \ ereal y" unfolding Epigraph_def by auto lemma ereal_closed_levels: fixes f :: "'a::metric_space \ ereal" shows "(\y. closed {x. f(x)\y}) \ (\r. closed {x. f(x)\ereal r})" (is "?lhs \ ?rhs") proof- { assume "?rhs" { fix y :: ereal { assume "y \ \ \ y \ -\" hence "closed {x. f(x)\y}" using \?rhs\ by (cases y) auto } moreover { assume "y=\" hence "closed {x. f(x)\y}" by auto } moreover { assume "y=-\" hence "{x. f(x)\y} = Inter {{x. f(x)\ereal r} |r. r : UNIV}" using ereal_bot by auto hence "closed {x. f(x)\y}" using closed_Inter \?rhs\ by auto } ultimately have "closed {x. f(x)\y}" by blast } hence "?lhs" by auto } from this show ?thesis by auto qed lemma lsc_iff: fixes f :: "'a::metric_space \ ereal" shows "(lsc f \ (\y. closed {x. f(x)\y})) \ (lsc f \ closed (Epigraph UNIV f))" proof- { assume "lsc f" { fix z z0 assume a: "\n. z n \ (Epigraph UNIV f) \ z \ z0" { fix n have "z n : (Epigraph UNIV f)" using a by auto hence "f (fst (z n)) \ ereal(snd (z n))" using a unfolding Epigraph_def by auto hence "\xn cn. z n = (xn, cn) \ f(xn)\ereal cn" apply (rule_tac x="fst (z n)" in exI) apply (rule_tac x="snd (z n)" in exI) by auto } from this obtain x c where xc_def: "\n. z n = (x n, c n) \ f(x n)\ereal (c n)" by metis from this a have "\x0 c0. z0 = (x0, c0) \ x \ x0 \ c \ c0" apply (rule_tac x="fst z0" in exI) apply (rule_tac x="snd z0" in exI) using tendsto_fst[of z z0] tendsto_snd[of z z0] by auto from this obtain x0 c0 where xc0_def: "z0 = (x0, c0) \ x \ x0 \ c \ c0" by auto hence "f(x0)\ereal c0" apply (subst lsc_sequentially_mem[of x0 f x "ereal \ c" "ereal c0"]) using \lsc f\ xc_def unfolding lsc_def unfolding o_def by auto hence "z0 : (Epigraph UNIV f)" unfolding Epigraph_def using xc0_def by auto } hence "closed (Epigraph UNIV f)" by (simp add: closed_sequential_limits) } moreover { assume "closed (Epigraph UNIV f)" hence *: "\x l. (\n. f (fst (x n)) \ ereal(snd (x n))) \ x \ l \ f (fst l) \ ereal(snd l)" unfolding Epigraph_def closed_sequential_limits by auto { fix r :: real { fix z z0 assume a: "\n. f(z n)\ereal r \ z \ z0" hence "f(z0)\ereal r" using *[rule_format, of "(\n. (z n,r))" "(z0,r)"] tendsto_Pair[of z z0] by auto } hence "closed {x. f(x)\ereal r}" by (simp add: closed_sequential_limits) } hence "\y. closed {x. f(x)\y}" using ereal_closed_levels by auto } moreover { assume a: "\y. closed {x. f(x)\ y}" { fix x0 { fix x l assume "x \ x0" "(f \ x) \ l" { assume "l = \" hence "f x0 \ l" by auto } moreover { assume mi: "l = -\" { fix B :: real obtain N where N_def: "\n\N. f(x n)\ereal B" using mi \(f \ x) \ l\ Lim_MInfty[of "f \ x"] by auto { fix d assume "(d::real)>0" from this obtain N1 where N1_def: "\n\N1. dist (x n) x0 < d" using \x \ x0\ unfolding lim_sequentially by auto hence "\y. dist y x0 < d \ y : {x. f(x)\ereal B}" apply (rule_tac x="x (max N N1)" in exI) using N_def by auto } hence "x0 : closure {x. f(x)\ereal B}" apply (subst closure_approachable) by auto hence "f x0 \ ereal B" using a by auto } hence "f x0 \ l" using ereal_bot[of "f x0"] by auto } moreover { assume fin: "\l\ \ \" { fix e assume e_def: "(e :: ereal)>0" from this obtain N where N_def: "\n\N. f(x n) : {l - e <..< l + e}" apply (subst tendsto_obtains_N[of "f \ x" l "{l - e <..< l + e}"]) using fin e_def ereal_between \(f \ x) \ l\ by auto hence *: "\n\N. x n : {x. f(x)\l+e}" using N_def by auto { fix d assume "(d::real)>0" from this obtain N1 where N1_def: "\n\N1. dist (x n) x0 < d" using \x \ x0\ unfolding lim_sequentially by auto hence "\y. dist y x0 < d \ y : {x. f(x)\l+e}" apply (rule_tac x="x (max N N1)" in exI) using * by auto } hence "x0 : closure {x. f(x)\l+e}" apply (subst closure_approachable) by auto hence "f x0 \ l+e" using a by auto } hence "f x0 \ l" using ereal_le_epsilon by auto } ultimately have "f x0 \ l" by blast } hence "lsc_at x0 f" unfolding lsc_at_def by auto } hence "lsc f" unfolding lsc_def by auto } ultimately show ?thesis by auto qed definition lsc_hull :: "('a::metric_space \ ereal) \ ('a::metric_space \ ereal)" where "lsc_hull f = (SOME g. Epigraph UNIV g = closure(Epigraph UNIV f))" lemma epigraph_mono: fixes f :: "'a::metric_space \ ereal" shows "(x,y):Epigraph UNIV f \ y\z \ (x,z):Epigraph UNIV f" unfolding Epigraph_def apply auto using ereal_less_eq(3)[of y z] order_trans_rules(23) by blast lemma closed_epigraph_lines: fixes S :: "('a::metric_space * 'b::metric_space) set" assumes "closed S" shows "closed {z. (x, z) : S}" proof- { fix z assume z_def: "z : closure {z. (x, z) : S}" { fix e :: real assume "e>0" from this obtain y where y_def: "(x,y) : S \ dist y z < e" using closure_approachable[of z "{z. (x, z) : S}"] z_def by auto moreover have "dist (x,y) (x,z) = dist y z" unfolding dist_prod_def by auto ultimately have "\s. s \ S \ dist s (x,z) < e" apply(rule_tac x="(x,y)" in exI) by auto } hence "(x,z):S" using closed_approachable[of S "(x,z)"] assms by auto } hence "closure {z. (x, z) : S} \ {z. (x, z) : S}" by blast from this show ?thesis using closure_subset_eq by auto qed lemma mono_epigraph: fixes S :: "('a::metric_space * real) set" assumes mono: "\x y z. (x,y):S \ y\z \ (x,z):S" assumes "closed S" shows "\g. ((Epigraph UNIV g) = S)" proof- { fix x have "closed {z. (x, z) : S}" using \closed S\ closed_epigraph_lines by auto hence "\a. {z. (x, z) : S} = {z. a \ ereal z}" apply (subst mono_closed_ereal) using mono by auto } from this obtain g where g_def: "\x. {z. (x, z) : S} = {z. g x \ ereal z}" by metis { fix s have "s:S \ (fst s, snd s) : S" by auto also have "\ \ g(fst s) \ ereal (snd s)" using g_def[rule_format, of "fst s"] by blast finally have "s:S \ g(fst s) \ ereal (snd s)" by auto } hence "(Epigraph UNIV g) = S" unfolding Epigraph_def by auto from this show ?thesis by auto qed lemma lsc_hull_exists: fixes f :: "'a::metric_space \ ereal" shows "\g. Epigraph UNIV g = closure (Epigraph UNIV f)" proof- { fix x y z assume xy: "(x,y):closure (Epigraph UNIV f) \ y\z" { fix e :: real assume "e>0" hence "\ya\Epigraph UNIV f. dist ya (x, y) < e" using xy closure_approachable[of "(x,y)" "Epigraph UNIV f"] by auto from this obtain a b where ab: "(a,b):Epigraph UNIV f \ dist (a,b) (x,y) < e" by auto moreover have "dist (a,b) (x,y) = dist (a,b+(z-y)) (x,z)" unfolding dist_prod_def dist_norm by (simp add: algebra_simps) moreover have "(a,b+(z-y)):Epigraph UNIV f" apply (subst epigraph_mono[of _ b]) using ab xy by auto ultimately have "\w\Epigraph UNIV f. dist w (x, z) < e" by auto } hence "(x,z):closure (Epigraph UNIV f)" using closure_approachable by auto } hence "\x y z. (x,y) \ closure (Epigraph UNIV f) \ y\z \ (x,z) \ closure (Epigraph UNIV f)" by auto from this show ?thesis using mono_epigraph[of "closure (Epigraph UNIV f)"] by auto qed lemma epigraph_invertible: assumes "Epigraph UNIV f = Epigraph UNIV g" shows "f=g" proof- { fix x { fix C have "f x \ ereal C \ (x,C) : Epigraph UNIV f" unfolding Epigraph_def by auto also have "\ \ (x,C) : Epigraph UNIV g" using assms by auto also have "\ \ g x \ ereal C" unfolding Epigraph_def by auto finally have "f x \ ereal C \ g x \ ereal C" by auto } hence "g x = f x" using ereal_le_real[of "g x" "f x"] ereal_le_real[of "f x" "g x"] by auto } from this show ?thesis by (simp add: ext) qed lemma lsc_hull_ex_unique: fixes f :: "'a::metric_space \ ereal" shows "\!g. Epigraph UNIV g = closure (Epigraph UNIV f)" using lsc_hull_exists epigraph_invertible by metis lemma epigraph_lsc_hull: fixes f :: "'a::metric_space \ ereal" shows "Epigraph UNIV (lsc_hull f) = closure(Epigraph UNIV f)" proof- have "\g. Epigraph UNIV g = closure (Epigraph UNIV f)" using lsc_hull_exists by auto thus ?thesis unfolding lsc_hull_def using some_eq_ex[of "(\g. Epigraph UNIV g = closure(Epigraph UNIV f))"] by auto qed lemma lsc_hull_expl: "(g = lsc_hull f) \ (Epigraph UNIV g = closure(Epigraph UNIV f))" proof- { assume "Epigraph UNIV g = closure(Epigraph UNIV f)" hence "lsc_hull f = g" unfolding lsc_hull_def apply (subst some1_equality[of _ g]) using lsc_hull_ex_unique by metis+ } from this show ?thesis using epigraph_lsc_hull by auto qed lemma lsc_lsc_hull: "lsc (lsc_hull f)" using epigraph_lsc_hull[of f] lsc_iff[of "lsc_hull f"] by auto lemma epigraph_subset_iff: fixes f g :: "'a::metric_space \ ereal" shows "Epigraph UNIV f \ Epigraph UNIV g \ (\x. g x \ f x)" proof- { assume epi: "Epigraph UNIV f \ Epigraph UNIV g" { fix x { fix z assume "f x \ ereal z" hence "(x,z)\Epigraph UNIV f" unfolding Epigraph_def by auto hence "(x,z)\Epigraph UNIV g" using epi by auto hence "g x \ ereal z" unfolding Epigraph_def by auto } hence "g x \ f x" apply (subst ereal_le_real) by auto } } moreover { assume le: "\x. g x \ f x" { fix x y assume "(x,y):Epigraph UNIV f" hence "f x \ ereal y" unfolding Epigraph_def by auto moreover have "g x \ f x" using le by auto ultimately have "g x \ ereal y" by auto hence "(x,y):Epigraph UNIV g" unfolding Epigraph_def by auto } } ultimately show ?thesis by auto qed lemma lsc_hull_le: "(lsc_hull f) x \ f x" using epigraph_lsc_hull[of f] closure_subset epigraph_subset_iff[of f "lsc_hull f"] by auto lemma lsc_hull_greatest: fixes f g :: "'a::metric_space \ ereal" assumes "lsc g" "\x. g x \ f x" shows "\x. g x \ (lsc_hull f) x" proof- have "closure(Epigraph UNIV f) \ Epigraph UNIV g" using lsc_iff epigraph_subset_iff assms by (metis closure_minimal) from this show ?thesis using epigraph_subset_iff lsc_hull_expl by metis qed lemma lsc_hull_iff_greatest: fixes f g :: "'a::metric_space \ ereal" shows "(g = lsc_hull f) \ lsc g \ (\x. g x \ f x) \ (\h. lsc h \ (\x. h x \ f x) \ (\x. h x \ g x))" (is "?lhs \ ?rhs") proof- { assume "?lhs" hence "?rhs" using lsc_lsc_hull lsc_hull_le lsc_hull_greatest by metis } moreover { assume "?rhs" { fix x have "(lsc_hull f) x \ g x" using \?rhs\ lsc_lsc_hull lsc_hull_le by metis moreover have "(lsc_hull f) x \ g x" using \?rhs\ lsc_hull_greatest by metis ultimately have "(lsc_hull f) x = g x" by auto } hence "?lhs" by (simp add: ext) } ultimately show ?thesis by blast qed lemma lsc_hull_mono: fixes f g :: "'a::metric_space \ ereal" assumes "\x. g x \ f x" shows "\x. (lsc_hull g) x \ (lsc_hull f) x" proof- { fix x have "(lsc_hull g) x \ g x" using lsc_hull_le[of g x] by auto also have "\ \ f x" using assms by auto finally have "(lsc_hull g) x \ f x" by auto } note * = this show ?thesis apply (subst lsc_hull_greatest) using lsc_lsc_hull[of g] * by auto qed lemma lsc_hull_lsc: "lsc f \ (f = lsc_hull f)" using lsc_hull_iff_greatest[of f f] by auto lemma lsc_hull_liminf_at: fixes f :: "'a::metric_space \ ereal" shows "\x. (lsc_hull f) x = min (f x) (Liminf (at x) f)" proof- { fix x z assume "(x,z):Epigraph UNIV (\x. min (f x) (Liminf (at x) f))" hence xz_def: "ereal z \ (SUP e\{0<..}. INF y\ball x e. f y)" unfolding Epigraph_def min_Liminf_at by auto { fix e::real assume "e>0" hence "e/sqrt 2>0" using \e>0\ by simp from this obtain e1 where e1_def: "e1 e1>0" using dense by auto hence "(SUP e\{0<..}. INF y\ball x e. f y) \ (INF y\ball x e1. f y)" by (auto intro: SUP_upper) hence "ereal z \ (INF y\ball x e1. f y)" using xz_def by auto hence *: "\y>ereal z. \t. t \ ball x e1 \ f t \ y" by (simp add: Bex_def Inf_le_iff_less) obtain t where t_def: "t : ball x e1 \ f t \ ereal(z+e1)" using e1_def *[rule_format, of "ereal(z+e1)"] by auto hence epi: "(t,z+e1):Epigraph UNIV f" unfolding Epigraph_def by auto have "dist x t < e1" using t_def unfolding ball_def dist_norm by auto hence "sqrt (e1 ^ 2 + dist x t ^ 2) < e" using e1_def apply (subst real_sqrt_sum_squares_less) by auto moreover have "dist (x,z) (t,z+e1) = sqrt (e1 ^ 2 + dist x t ^ 2)" unfolding dist_prod_def dist_norm by (simp add: algebra_simps) ultimately have "dist (x,z) (t,z+e1) < e" by auto hence "\y\Epigraph UNIV f. dist y (x, z) < e" apply (rule_tac x="(t,z+e1)" in bexI) apply (simp add: dist_commute) using epi by auto } hence "(x,z) : closure (Epigraph UNIV f)" using closure_approachable[of "(x,z)" "Epigraph UNIV f"] by auto } moreover { fix x z assume xz_def: "(x,z):closure (Epigraph UNIV f)" { fix e::real assume "e>0" from this obtain y where y_def: "y:(Epigraph UNIV f) \ dist y (x,z) < e" using closure_approachable[of "(x,z)" "Epigraph UNIV f"] xz_def by metis have "dist (fst y) x \ sqrt ((dist (fst y) x) ^ 2 + (dist (snd y) z) ^ 2)" by (auto intro: real_sqrt_sum_squares_ge1) also have "\ sqrt ((dist (fst y) x) ^ 2 + (dist (snd y) z) ^ 2)" by (auto intro: real_sqrt_sum_squares_ge2) also have "\ball x e. f y) \ f(fst y)" using h1 by (simp add: INF_lower) also have "\\ereal(snd y)" using y_def unfolding Epigraph_def by auto also have "\ < ereal(z+e)" using h2 unfolding dist_norm by auto finally have "(INF y\ball x e. f y) < ereal(z+e)" by auto } hence *: "\e>0. (INF y\ball x e. f y) < ereal(z+e)" by auto { fix e assume "(e::real)>0" { fix d assume "(d::real)>0" { assume "dball x d. f y) < ereal(z+d)" using * \d>0\ by auto also have "\< ereal(z+e)" using \d by auto finally have "(INF y\ball x d. f y) < ereal(z+e)" by auto } moreover { assume "~(d ball x d" by auto hence "(INF y\ball x d. f y) \ (INF y\ball x e. f y)" apply (subst INF_mono) by auto also have "\< ereal(z+e)" using * \e>0\ by auto finally have "(INF y\ball x d. f y) < ereal(z+e)" by auto } ultimately have "(INF y\ball x d. f y) < ereal(z+e)" by blast hence "(INF y\ball x d. f y) \ ereal(z+e)" by auto } hence "min (f x) (Liminf (at x) f) \ ereal (z+e)" unfolding min_Liminf_at by (auto intro: SUP_least) } hence "min (f x) (Liminf (at x) f) \ ereal z" using ereal_le_epsilon2 by auto hence "(x,z):Epigraph UNIV (\x. min (f x) (Liminf (at x) f))" unfolding Epigraph_def by auto } ultimately have "Epigraph UNIV (\x. min (f x) (Liminf (at x) f))= closure (Epigraph UNIV f)" by auto hence "(\x. min (f x) (Liminf (at x) f)) = lsc_hull f" using epigraph_invertible epigraph_lsc_hull[of f] by blast from this show ?thesis by metis qed lemma lsc_hull_same_inf: fixes f :: "'a::metric_space \ ereal" shows "(INF x. lsc_hull f x) = (INF x. f x)" proof- { fix x have "(INF x. f x) \ (INF y\ball x 1. f y)" apply (subst INF_mono) by auto also have "\ \ min (f x) (Liminf (at x) f)" unfolding min_Liminf_at by (auto intro: SUP_upper) also have "\=(lsc_hull f) x" using lsc_hull_liminf_at[of f] by auto finally have "(INF x. f x) \ (lsc_hull f) x" by auto } hence "(INF x. f x) \ (INF x. lsc_hull f x)" apply (subst INF_greatest) by auto moreover have "(INF x. lsc_hull f x) \ (INF x. f x)" apply (subst INF_mono) using lsc_hull_le by auto ultimately show ?thesis by auto qed subsection \Convex Functions\ definition convex_on :: "'a::real_vector set \ ('a \ ereal) \ bool" where "convex_on s f \ (\x\s. \y\s. \u\0. \v\0. u + v = 1 \ f (u *\<^sub>R x + v *\<^sub>R y) \ ereal u * f x + ereal v * f y)" lemma convex_on_ereal_mem: assumes "convex_on s f" assumes "x:s" "y:s" assumes "u\0" "v\0" "u+v=1" shows "f (u *\<^sub>R x + v *\<^sub>R y) \ ereal u * f x + ereal v * f y" using assms unfolding convex_on_def by auto lemma convex_on_ereal_subset: "convex_on t f \ s \ t \ convex_on s f" unfolding convex_on_def by auto lemma convex_on_ereal_univ: "convex_on UNIV f \ (\S. convex_on S f)" using convex_on_ereal_subset by auto lemma ereal_pos_sum_distrib_left: fixes f :: "'a \ ereal" assumes "r\0" "r \ \" shows "r * sum f A = sum (\n. r * f n) A" proof (cases "finite A") case True thus ?thesis proof induct case empty thus ?case by simp next case (insert x A) thus ?case using assms by (simp add: ereal_pos_distrib) qed next case False thus ?thesis by simp qed lemma convex_ereal_add: fixes f g :: "'a::real_vector \ ereal" assumes "convex_on s f" "convex_on s g" shows "convex_on s (\x. f x + g x)" proof- { fix x y assume "x:s" "y:s" moreover fix u v ::real assume uv: "0 \ u" "0 \ v" "u + v = 1" ultimately have "f (u *\<^sub>R x + v *\<^sub>R y) + g (u *\<^sub>R x + v *\<^sub>R y) \ (ereal u * f x + ereal v * f y) + (ereal u * g x + ereal v * g y)" using assms unfolding convex_on_def by (auto simp add: add_mono) also have "\=(ereal u * f x + ereal u * g x) + (ereal v * f y + ereal v * g y)" by (simp add: algebra_simps) also have "\= ereal u * (f x + g x) + ereal v * (f y + g y)" using uv by (simp add: ereal_pos_distrib) finally have "f (u *\<^sub>R x + v *\<^sub>R y) + g (u *\<^sub>R x + v *\<^sub>R y) \ ereal u * (f x + g x) + ereal v * (f y + g y)" by auto } thus ?thesis unfolding convex_on_def by auto qed lemma convex_ereal_cmul: assumes "0 \ (c::ereal)" "convex_on s f" shows "convex_on s (\x. c * f x)" proof- { fix x y assume "x:s" "y:s" moreover fix u v ::real assume uv: "0 \ u" "0 \ v" "u + v = 1" ultimately have "f (u *\<^sub>R x + v *\<^sub>R y) \ (ereal u * f x + ereal v * f y)" using assms unfolding convex_on_def by auto hence "c * f (u *\<^sub>R x + v *\<^sub>R y) \ c * (ereal u * f x + ereal v * f y)" using assms by (intro ereal_mult_left_mono) auto also have "\ \ c * (ereal u * f x) + c * (ereal v * f y)" using assms by (simp add: ereal_le_distrib) also have "\ = ereal u *(c* f x) + ereal v *(c* f y)" by (simp add: algebra_simps) finally have "c * f (u *\<^sub>R x + v *\<^sub>R y) \ ereal u * (c * f x) + ereal v * (c * f y)" by auto } thus ?thesis unfolding convex_on_def by auto qed lemma convex_ereal_max: fixes f g :: "'a::real_vector \ ereal" assumes "convex_on s f" "convex_on s g" shows "convex_on s (\x. max (f x) (g x))" proof- { fix x y assume "x:s" "y:s" moreover fix u v ::real assume uv: "0 \ u" "0 \ v" "u + v = 1" ultimately have "max (f (u *\<^sub>R x + v *\<^sub>R y)) (g (u *\<^sub>R x + v *\<^sub>R y)) \ max (ereal u * f x + ereal v * f y) (ereal u * g x + ereal v * g y)" apply (subst max.mono) using assms unfolding convex_on_def by auto also have "\\ereal u * max (f x) (g x) + ereal v * max (f y) (g y)" apply (subst max.boundedI) apply (subst add_mono) prefer 4 apply (subst add_mono) by (subst ereal_mult_left_mono, auto simp add: uv)+ finally have "max (f (u *\<^sub>R x + v *\<^sub>R y)) (g (u *\<^sub>R x + v *\<^sub>R y)) \ ereal u * max (f x) (g x) + ereal v * max (f y) (g y)" by auto } thus ?thesis unfolding convex_on_def by auto qed lemma convex_on_ereal_alt: fixes C :: "'a::real_vector set" assumes "convex C" shows "convex_on C f = (\x \ C. \y \ C. \m :: real. m \ 0 \ m \ 1 \ f (m *\<^sub>R x + (1 - m) *\<^sub>R y) \ (ereal m) * f x + (1 - (ereal m)) * f y)" proof safe fix x y fix m :: real have[simp]: "ereal (1 - m) = (1 - ereal m)" using ereal_minus(1)[of 1 m] by (auto simp: one_ereal_def) assume asms: "convex_on C f" "x : C" "y : C" "0 \ m" "m \ 1" from this[unfolded convex_on_def, rule_format] have "\u v. ((0 \ u \ 0 \ v \ u + v = 1) \ f (u *\<^sub>R x + v *\<^sub>R y) \ (ereal u) * f x + (ereal v) * f y)" by auto from this[rule_format, of "m" "1 - m", simplified] asms show "f (m *\<^sub>R x + (1 - m) *\<^sub>R y) \ (ereal m) * f x + (1 - ereal m) * f y" by auto next assume asm: "\x\C. \y\C. \m. 0 \ m \ m \ 1 \ f (m *\<^sub>R x + (1 - m) *\<^sub>R y) \ (ereal m) * f x + (1 - ereal m) * f y" { fix x y fix u v :: real assume lasm: "x \ C" "y \ C" "u \ 0" "v \ 0" "u + v = 1" hence[simp]: "1-u=v" "1 - ereal u = ereal v" using ereal_minus(1)[of 1 m] by (auto simp: one_ereal_def) from asm[rule_format, of x y u] have "f (u *\<^sub>R x + v *\<^sub>R y) \ (ereal u) * f x + (ereal v) * f y" using lasm by auto } thus "convex_on C f" unfolding convex_on_def by auto qed lemma convex_on_ereal_alt_mem: fixes C :: "'a::real_vector set" assumes "convex C" assumes "convex_on C f" assumes "x : C" "y : C" assumes "(m::real) \ 0" "m \ 1" shows "f (m *\<^sub>R x + (1 - m) *\<^sub>R y) \ (ereal m) * f x + (1 - (ereal m)) * f y" by (metis assms convex_on_ereal_alt) lemma ereal_add_right_mono: "(a::ereal) \ b \ a + c \ b + c" by (metis add_mono order_refl) lemma convex_on_ereal_sum_aux: assumes "1-a>0" shows "(1 - ereal a) * (ereal (c / (1 - a)) * b) = (ereal c) * b" by (metis mult.assoc mult.commute eq_divide_eq ereal_minus(1) assms one_ereal_def less_le times_ereal.simps(1)) lemma convex_on_ereal_sum: fixes a :: "'a \ real" fixes y :: "'a \ 'b::real_vector" fixes f :: "'b \ ereal" assumes "finite s" "s \ {}" assumes "convex_on C f" assumes "convex C" assumes "(SUM i : s. a i) = 1" assumes "\i. i \ s \ a i \ 0" assumes "\i. i \ s \ y i \ C" shows "f (SUM i : s. a i *\<^sub>R y i) \ (SUM i : s. ereal (a i) * f (y i))" using assms(1,2,5-) proof (induct s arbitrary:a rule:finite_ne_induct) case (singleton i) hence ai: "a i = 1" by auto thus ?case by (auto simp: one_ereal_def[symmetric]) next case (insert i s) from \convex_on C f\ have conv: "\x y m. ((x \ C \ y \ C \ 0 \ m \ m \ 1) \ f (m *\<^sub>R x + (1 - m) *\<^sub>R y) \ (ereal m) * f x + (1 - ereal m) * f y)" using convex_on_ereal_alt[of C f] \convex C\ by auto { assume "a i = 1" hence "(SUM j : s. a j) = 0" using insert by auto hence "\j. (j \ s \ a j = 0)" using insert by (simp add: sum_nonneg_eq_0_iff) hence ?case using insert.hyps(1-3) \a i = 1\ by (simp add: zero_ereal_def[symmetric] one_ereal_def[symmetric]) } moreover { assume asm: "a i \ 1" from insert have yai: "y i : C" "a i \ 0" by auto have fis: "finite (insert i s)" using insert by auto hence ai1: "a i \ 1" using sum_nonneg_leq_bound[of "insert i s" a] insert by simp hence "a i < 1" using asm by auto hence i0: "1 - a i > 0" by auto hence i1: "1 - ereal (a i) > 0" using ereal_minus(1)[of 1 "a i"] by (simp add: zero_ereal_def[symmetric] one_ereal_def[symmetric]) have i2: "1 - ereal (a i) \ \" using ereal_minus(1)[of 1] by (simp add: zero_ereal_def[symmetric] one_ereal_def[symmetric]) let "?a j" = "a j / (1 - a i)" have a_nonneg: "\j. j \ s \ 0 \ a j / (1 - a i)" using i0 insert by (metis insert_iff divide_nonneg_pos) have "(SUM j : insert i s. a j) = 1" using insert by auto hence "(SUM j : s. a j) = 1 - a i" using sum.insert insert by fastforce hence "(SUM j : s. a j) / (1 - a i) = 1" using i0 by auto hence a1: "(SUM j : s. ?a j) = 1" unfolding sum_divide_distrib by simp have asum: "(SUM j : s. ?a j *\<^sub>R y j) : C" using insert convex_sum[OF \finite s\ \convex C\ a1 a_nonneg] by auto have asum_le: "f (SUM j : s. ?a j *\<^sub>R y j) \ (SUM j : s. ereal (?a j) * f (y j))" using a_nonneg a1 insert by blast have "f (SUM j : insert i s. a j *\<^sub>R y j) = f ((SUM j : s. a j *\<^sub>R y j) + a i *\<^sub>R y i)" using sum.insert[of s i "\j. a j *\<^sub>R y j", OF \finite s\ \i \ s\] by (auto simp only:add.commute) also have "\ = f (((1 - a i) * inverse (1 - a i)) *\<^sub>R (SUM j : s. a j *\<^sub>R y j) + a i *\<^sub>R y i)" using i0 by auto also have "\ = f ((1 - a i) *\<^sub>R (SUM j : s. (a j * inverse (1 - a i)) *\<^sub>R y j) + a i *\<^sub>R y i)" using scaleR_right.sum[of "inverse (1 - a i)" "\j. a j *\<^sub>R y j" s, symmetric] by (auto simp:algebra_simps) also have "\ = f ((1 - a i) *\<^sub>R (SUM j : s. ?a j *\<^sub>R y j) + a i *\<^sub>R y i)" by (auto simp: divide_inverse) also have "\ \ (1 - ereal (a i)) * f ((SUM j : s. ?a j *\<^sub>R y j)) + (ereal (a i)) * f (y i)" using conv[rule_format, of "y i" "(SUM j : s. ?a j *\<^sub>R y j)" "a i"] using yai(1) asum yai(2) ai1 by (auto simp add:add.commute) also have "\ \ (1 - ereal (a i)) * (SUM j : s. ereal (?a j) * f (y j)) + (ereal (a i)) * f (y i)" using ereal_add_right_mono[OF ereal_mult_left_mono[of _ _ "1 - ereal (a i)", OF asum_le less_imp_le[OF i1]], of "(ereal (a i)) * f (y i)"] by simp also have "\ = (SUM j : s. (1 - ereal (a i)) * (ereal (?a j) * f (y j))) + (ereal (a i)) * f (y i)" unfolding ereal_pos_sum_distrib_left[of "1 - ereal (a i)" "\j. (ereal (?a j)) * f (y j)", OF less_imp_le[OF i1] i2] by auto also have "\ = (SUM j : s. (ereal (a j)) * f (y j)) + (ereal (a i)) * f (y i)" using i0 convex_on_ereal_sum_aux by auto also have "\ = (ereal (a i)) * f (y i) + (SUM j : s. (ereal (a j)) * f (y j))" by (simp add: add.commute) also have "\ = (SUM j : insert i s. (ereal (a j)) * f (y j))" using insert by auto finally have "f (SUM j : insert i s. a j *\<^sub>R y j) \ (SUM j : insert i s. (ereal (a j)) * f (y j))" by simp } ultimately show ?case by auto qed lemma sum_2: "sum u {1::nat..2} = (u 1)+(u 2)" proof- have "{1::nat..2} = {1::nat,2}" by auto thus ?thesis by auto qed lemma convex_on_ereal_iff: assumes "convex s" shows "convex_on s f \ (\k u x. (\i\{1..k::nat}. 0 \ u i \ x i : s) \ sum u {1..k} = 1 \ f (sum (\i. u i *\<^sub>R x i) {1..k} ) \ sum (\i. (ereal (u i)) * f(x i)) {1..k} )" (is "?rhs \ ?lhs") proof- { assume "?rhs" { fix k u x have zero: "~(sum u {1..0::nat} = (1::real))" by auto assume "(\i\{1..k::nat}. (0::real) \ u i \ x i \ s)" moreover assume *: "sum u {1..k} = 1" moreover from * have "k \ 0" using zero by metis ultimately have "f (sum (\i. u i *\<^sub>R x i) {1..k} ) \ sum (\i. (ereal (u i)) * f(x i)) {1..k}" using convex_on_ereal_sum[of "{1..k}" s f u x] using assms \?rhs\ by auto } hence "?lhs" by auto } moreover { assume "?lhs" { fix x y u v assume xys: "x:s" "y:s" assume uv1: "u\0" "v\0" "u + v = (1::real)" define xy where "xy = (\i::nat. if i=1 then x else y)" define uv where "uv = (\i::nat. if i=1 then u else v)" have "\i\{1..2::nat}. (0 \ uv i) \ (xy i : s)" unfolding xy_def uv_def using xys uv1 by auto moreover have "sum uv {1..2} = 1" using sum_2[of uv] unfolding uv_def using uv1 by auto moreover have "(SUM i = 1..2. uv i *\<^sub>R xy i) = u *\<^sub>R x + v *\<^sub>R y" using sum_2[of "(\i. uv i *\<^sub>R xy i)"] unfolding xy_def uv_def using xys uv1 by auto moreover have "(SUM i = 1..2. ereal (uv i) * f (xy i)) = ereal u * f x + ereal v * f y" using sum_2[of "(\i. ereal (uv i) * f (xy i))"] unfolding xy_def uv_def using xys uv1 by auto ultimately have "f (u *\<^sub>R x + v *\<^sub>R y) \ ereal u * f x + ereal v * f y" using \?lhs\[rule_format, of "2" uv xy] by auto } hence "?rhs" unfolding convex_on_def by auto } ultimately show ?thesis by blast qed lemma convex_Epigraph: assumes "convex S" shows "convex(Epigraph S f) \ convex_on S f" proof- { assume rhs: "convex(Epigraph S f)" { fix x y assume xy: "x:S" "y:S" fix u v ::real assume uv: "0 \ u" "0 \ v" "u + v = 1" have "f (u *\<^sub>R x + v *\<^sub>R y) \ ereal u * f x + ereal v * f y" proof- { assume "u=0 | v=0" hence ?thesis using uv by (auto simp: zero_ereal_def[symmetric] one_ereal_def[symmetric]) } moreover { assume "f x = \ | f y = \" hence ?thesis using uv by (auto simp: zero_ereal_def[symmetric] one_ereal_def[symmetric]) } moreover { assume a: "f x = -\ \ (f y \ \) \ ~(u=0)" from this obtain z where "f y \ ereal z" apply (cases "f y") by auto hence yz: "(y,z) : Epigraph S f" unfolding Epigraph_def using xy by auto { fix C::real have "(x, (1/u)*(C - v * z)) : Epigraph S f" unfolding Epigraph_def using a xy by auto hence "(u *\<^sub>R x + v *\<^sub>R y, C) : Epigraph S f" using rhs convexD[of "Epigraph S f" "(x, (1/u)*(C - v * z))" "(y,z)" u v] uv yz a by auto hence "(f (u *\<^sub>R x + v *\<^sub>R y) \ ereal C)" unfolding Epigraph_def by auto } hence "f (u *\<^sub>R x + v *\<^sub>R y) = -\" using ereal_bot by auto hence ?thesis by auto } moreover { assume a: "(f x \ \) \ f y = -\ \ ~(v=0)" from this obtain z where "f x \ ereal z" apply (cases "f x") by auto hence xz: "(x,z) : Epigraph S f" unfolding Epigraph_def using xy by auto { fix C::real have "(y, (1/v)*(C - u * z)) : Epigraph S f" unfolding Epigraph_def using a xy by auto hence "(u *\<^sub>R x + v *\<^sub>R y, C) : Epigraph S f" using rhs convexD[of "Epigraph S f" "(x,z)" "(y, (1/v)*(C - u * z))" u v] uv xz a by auto hence "(f (u *\<^sub>R x + v *\<^sub>R y) \ ereal C)" unfolding Epigraph_def by auto } hence "f (u *\<^sub>R x + v *\<^sub>R y) = -\" using ereal_bot by auto hence ?thesis by auto } moreover { assume a: "f x \ \ \ f x \ -\ \ f y \ \ \ f y \ -\" from this obtain fx fy where fxy: "f x = ereal fx \ f y = ereal fy" apply (cases "f x", cases "f y") by auto hence "(x, fx) : Epigraph S f \ (y, fy) : Epigraph S f" unfolding Epigraph_def using xy by auto hence "(u *\<^sub>R x + v *\<^sub>R y, u * fx + v * fy) : Epigraph S f" using rhs convexD[of "Epigraph S f" "(x,fx)" "(y,fy)" u v] uv by auto hence ?thesis unfolding Epigraph_def using fxy by auto } ultimately show ?thesis by blast qed } hence "convex_on S f" unfolding convex_on_def by auto } moreover { assume lhs: "convex_on S f" { fix x y fx fy assume xy: "(x,fx):Epigraph S f" "(y,fy):Epigraph S f" fix u v ::real assume uv: "0 \ u" "0 \ v" "u + v = 1" hence le: "f x \ ereal fx \ f y \ ereal fy" using xy unfolding Epigraph_def by auto have "x:S \ y:S" using xy unfolding Epigraph_def by auto moreover hence inS: "u *\<^sub>R x + v *\<^sub>R y : S" using assms uv convexD[of S] by auto ultimately have "f(u *\<^sub>R x + v *\<^sub>R y) \ (ereal u) * f x + (ereal v) * f y" using lhs convex_on_ereal_mem[of S f x y u v] uv by auto also have "\ \ (ereal u) * (ereal fx) + (ereal v) * (ereal fy)" apply (subst add_mono) apply (subst ereal_mult_left_mono) prefer 4 apply (subst ereal_mult_left_mono) using le uv by auto also have "\ = ereal (u * fx + v * fy)" by auto finally have "(u *\<^sub>R x + v *\<^sub>R y, u * fx + v * fy) : Epigraph S f" unfolding Epigraph_def using inS by auto } hence "convex(Epigraph S f)" unfolding convex_def by auto } ultimately show ?thesis by auto qed lemma convex_EpigraphI: "convex_on s f \ convex s \ convex(Epigraph s f)" unfolding convex_Epigraph by auto definition concave_on :: "'a::real_vector set \ ('a \ ereal) \ bool" where "concave_on S f \ convex_on S (\x. - f x)" definition finite_on :: "'a::real_vector set \ ('a \ ereal) \ bool" where "finite_on S f \ (\x\S. (f x \ \ \ f x \ -\))" definition affine_on :: "'a::real_vector set \ ('a \ ereal) \ bool" where "affine_on S f \ (convex_on S f \ concave_on S f \ finite_on S f)" definition "domain (f::_ \ ereal) = {x. f x < \}" lemma domain_Epigraph_aux: assumes "x \ \" shows "\r. x\ereal r" using assms by (cases x) auto lemma domain_Epigraph: "domain f = {x. \y. (x,y) \ Epigraph UNIV f}" unfolding domain_def Epigraph_def using domain_Epigraph_aux by auto lemma domain_Epigraph_fst: "domain f = fst ` (Epigraph UNIV f)" proof- { fix x assume "x:domain f" from this obtain y where "(x,y):Epigraph UNIV f" using domain_Epigraph[of f] by auto moreover have "x = fst (x,y)" by auto ultimately have "x:fst ` (Epigraph UNIV f)" unfolding image_def by blast } from this show ?thesis using domain_Epigraph[of f] by auto qed lemma convex_on_domain: "convex_on (domain f) f \ convex_on UNIV f" proof- { assume lhs: "convex_on (domain f) f" { fix x y fix u v ::real assume uv: "0 \ u" "0 \ v" "u + v = 1" have "f (u *\<^sub>R x + v *\<^sub>R y) \ ereal u * f x + ereal v * f y" proof- { assume "f x = \ | f y = \" hence ?thesis using uv by (auto simp: zero_ereal_def[symmetric] one_ereal_def[symmetric]) } moreover { assume "~ (f x = \ | f y = \)" hence "x : domain f \ y : domain f" unfolding domain_def by auto hence ?thesis using lhs unfolding convex_on_def using uv by auto } ultimately show ?thesis by blast qed } hence "convex_on UNIV f" unfolding convex_on_def by auto } from this show ?thesis using convex_on_ereal_subset by auto qed lemma convex_on_domain2: "convex_on (domain f) f \ (\S. convex_on S f)" by (metis convex_on_domain convex_on_ereal_univ) lemma convex_domain: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" shows "convex (domain f)" proof- have "convex (Epigraph UNIV f)" using assms convex_Epigraph by auto thus ?thesis unfolding domain_Epigraph_fst apply (subst convex_linear_image) using linear_fst linear_conv_bounded_linear by auto qed lemma infinite_convex_domain_iff: fixes f :: "'a::euclidean_space \ ereal" assumes "\x. (f x = \ | f x = -\)" shows "convex_on UNIV f \ convex (domain f)" proof- { assume dom: "convex (domain f)" { fix x y assume "x:domain f" "y:domain f" moreover fix u v ::real assume uv: "0 \ u" "0 \ v" "u + v = 1" ultimately have "u *\<^sub>R x + v *\<^sub>R y : domain f" using dom unfolding convex_def by auto hence "f(u *\<^sub>R x + v *\<^sub>R y) = -\" using assms unfolding domain_def by auto } hence "convex_on (domain f) f" unfolding convex_on_def by auto hence "convex_on UNIV f" by (metis convex_on_domain2) } thus ?thesis by (metis convex_domain) qed lemma convex_PInfty_outside: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" "convex S" shows "convex_on UNIV (\x. if x:S then (f x) else \)" proof- define g where "g x = (if x:S then -\ else \::ereal)" for x hence "convex_on UNIV g" apply (subst infinite_convex_domain_iff) using assms unfolding domain_def by auto moreover have "(\x. if x:S then (f x) else \) = (\x. max (f x) (g x))" apply (subst fun_eq_iff) unfolding g_def by auto ultimately show ?thesis using convex_ereal_max assms by auto qed definition proper_on :: "'a::real_vector set \ ('a \ ereal) \ bool" where "proper_on S f \ ((\x\S. f x \ -\) \ (\x\S. f x \ \))" definition proper :: "('a::real_vector \ ereal) \ bool" where "proper f \ proper_on UNIV f" lemma proper_iff: "proper f \ ((\x. f x \ -\) \ (\x. f x \ \))" unfolding proper_def proper_on_def by auto lemma improper_iff: "~(proper f) \ ((\x. f x = -\) | (\x. f x = \))" using proper_iff by auto lemma ereal_MInf_plus[simp]: "-\ + x = (if x = \ then \ else -\::ereal)" by (cases x) auto lemma convex_improper: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "~(proper f)" shows "\x\rel_interior(domain f). f x = -\" proof- { assume "domain f = {}" hence ?thesis using rel_interior_empty by auto } moreover { assume nemp: "domain f \ {}" then obtain u where u_def: "f u = -\" using assms improper_iff[of f] unfolding domain_def by auto hence udom: "u:domain f" unfolding domain_def by auto { fix x assume "x:rel_interior(domain f)" then obtain m where m_def: "m>1 \ (1 - m) *\<^sub>R u + m *\<^sub>R x : domain f" using convex_rel_interior_iff[of "domain f" x] nemp convex_domain[of f] assms udom by auto define v where "v = 1/m" hence v01: "0 v<1" using m_def by auto define y where "y = (1 - m) *\<^sub>R u + m *\<^sub>R x" have "x = (1-v) *\<^sub>R u+v *\<^sub>R y" unfolding v_def y_def using m_def by (simp add: algebra_simps) hence "f x \ (1-ereal v) * f u+ereal v * f y" using convex_on_ereal_alt_mem[of "UNIV" f y u v] assms convex_UNIV v01 by (simp add: add.commute) moreover have "f y < \" using m_def y_def unfolding domain_def by auto moreover have *: "0 < 1 - ereal v" using v01 by (metis diff_gt_0_iff_gt ereal_less(2) ereal_minus(1) one_ereal_def) moreover from * have "(1-ereal v) * f u = -\" using u_def by auto ultimately have "f x = -\" using v01 by simp } hence ?thesis by auto } ultimately show ?thesis by blast qed lemma convex_improper2: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "~(proper f)" shows "f x = \ | f x = -\ | x : rel_frontier (domain f)" proof- { assume a: "~(f x = \ | f x = -\)" hence "x: domain f" unfolding domain_def by auto hence "x : closure (domain f)" using closure_subset by auto moreover have "x \ rel_interior (domain f)" using assms convex_improper a by auto ultimately have "x : rel_frontier (domain f)" unfolding rel_frontier_def by auto } thus ?thesis by auto qed lemma convex_lsc_improper: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "~(proper f)" assumes "lsc f" shows "f x = \ | f x = -\" proof- { fix x assume "f x \ \" hence "lsc_at x f" using assms unfolding lsc_def by auto have "x: domain f" unfolding domain_def using \f x \ \\ by auto hence "x : closure (domain f)" using closure_subset by auto hence x_def: "x : closure (rel_interior (domain f))" by (metis assms(1) convex_closure_rel_interior convex_domain) { fix C assume "C0 \ (\y. dist x y < d \ C < f y)" using lst_at_delta[of x f] \lsc_at x f\ by auto from this obtain y where y_def: "y:rel_interior (domain f) \ dist y x < d" using x_def closure_approachable[of x "rel_interior (domain f)"] by auto hence "f y = -\" by (metis assms(1) assms(2) convex_improper) moreover have "C < f y" using y_def d_def by (simp add: dist_commute) ultimately have False by auto } hence "f x = -\" by auto } from this show ?thesis by auto qed lemma convex_lsc_hull: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" shows "convex_on UNIV (lsc_hull f)" proof- have "convex(Epigraph UNIV f)" by (metis assms convex_EpigraphI convex_UNIV) hence "convex (Epigraph UNIV (lsc_hull f))" by (metis convex_closure epigraph_lsc_hull) thus ?thesis by (metis convex_Epigraph convex_UNIV) qed lemma improper_lsc_hull: fixes f :: "'a::euclidean_space \ ereal" assumes "~(proper f)" shows "~(proper (lsc_hull f))" proof- { assume *: "\x. f x = \" then have "lsc f" by (metis (full_types) UNIV_I lsc_at_open lsc_def open_UNIV) with * have "\x. (lsc_hull f) x = \" by (metis lsc_hull_lsc) } hence "(\x. f x = \) \ (\x. (lsc_hull f) x = \)" by (metis ereal_infty_less_eq(1) lsc_hull_le) moreover have "(\x. f x = -\) \ (\x. (lsc_hull f) x = -\)" by (metis ereal_infty_less_eq2(2) lsc_hull_le) ultimately show ?thesis using assms unfolding improper_iff by auto qed lemma lsc_hull_convex_improper: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "~(proper f)" shows "\x\rel_interior(domain f). (lsc_hull f) x = f x" by (metis assms convex_improper ereal_infty_less_eq(2) lsc_hull_le) lemma convex_with_rel_open_domain: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "rel_open (domain f)" shows "(\x. f x > -\) | (\x. (f x = \ | f x = -\))" proof- { assume "\(\x. f x > -\)" hence "\(proper f)" using proper_iff by auto hence "\x\rel_interior(domain f). f x = -\" by (metis assms(1) convex_improper) hence "\x\domain f. f x = -\" by (metis assms(2) rel_open_def) hence "\x. (f x = \ | f x = -\)" unfolding domain_def by auto } thus ?thesis by auto qed lemma convex_with_UNIV_domain: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "domain f = UNIV" shows "(\x. f x > -\) \ (\x. f x = -\)" by (metis assms convex_improper ereal_MInfty_lessI proper_iff rel_interior_UNIV UNIV_I) lemma rel_interior_Epigraph: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" shows "(x,z) : rel_interior (Epigraph UNIV f) \ (x : rel_interior (domain f) \ f x < ereal z)" apply (subst rel_interior_projection[of _ "(\y. {z. (y, z) : Epigraph UNIV f})"]) apply (metis assms convex_EpigraphI convex_UNIV convex_on_ereal_univ) unfolding domain_Epigraph Epigraph_def using rel_interior_ereal_semiline by auto lemma rel_interior_EpigraphI: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" shows "rel_interior (Epigraph UNIV f) = {(x,z) |x z. x : rel_interior (domain f) \ f x < ereal z}" proof- { fix x z have "(x,z):rel_interior (Epigraph UNIV f) \ (x : rel_interior (domain f) \ f x < ereal z)" using rel_interior_Epigraph[of f x z] assms by auto } thus ?thesis by auto qed lemma convex_less_ri_domain: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "\x. f x < a" shows "\x\rel_interior (domain f). f x < a" proof- define A where "A = {(x::'a::euclidean_space,m)|x m. ereal m ereal z < a" using ereal_dense2 by auto hence "(x,z):A \ (x,z):Epigraph UNIV f" unfolding A_def Epigraph_def by auto hence "A Int (Epigraph UNIV f) \ {}" unfolding A_def Epigraph_def using assms by auto moreover have "open A" proof(cases a) case real hence "A = {y. inner (0::'a, 1) y < real_of_ereal a}" using A_def by auto thus ?thesis using open_halfspace_lt by auto next case PInf thus ?thesis using A_def by auto next case MInf thus ?thesis using A_def by auto qed ultimately have "A Int rel_interior(Epigraph UNIV f) \ {}" by (metis assms(1) convex_Epigraph convex_UNIV open_Int_closure_eq_empty open_inter_closure_rel_interior) then obtain x0 z0 where "(x0,z0):A \ x0 : rel_interior (domain f) \ f x0 < ereal z0" using rel_interior_Epigraph[of f] assms by auto thus ?thesis apply(rule_tac x="x0" in bexI) using A_def by auto qed lemma rel_interior_eq_between: fixes S T :: "('m::euclidean_space) set" assumes "convex S" "convex T" shows "(rel_interior S = rel_interior T) \ (rel_interior S \ T \ T \ closure S)" by (metis assms closure_eq_between convex_closure_rel_interior convex_rel_interior_closure) lemma convex_less_in_riS: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "convex S" "rel_interior S \ domain f" assumes "\x\closure S. f x < a" shows "\x\rel_interior S. f x < a" proof- define g where "g x = (if x:closure S then (f x) else \)" for x hence "\x. g x < a" using assms by auto have convg: "convex_on UNIV g" unfolding g_def apply (subst convex_PInfty_outside) using assms convex_closure by auto hence *: "\x\rel_interior (domain g). g x < a" apply (subst convex_less_ri_domain) using assms g_def by auto have "convex (domain g)" by (metis convg convex_domain) moreover have "rel_interior S \ domain g \ domain g \ closure S" using g_def assms rel_interior_subset_closure unfolding domain_def by auto ultimately have "rel_interior (domain g) = rel_interior S" by (metis assms(2) rel_interior_eq_between) thus ?thesis by (metis "*" g_def less_ereal.simps(2)) qed lemma convex_less_inS: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "convex S" "S \ domain f" assumes "\x\closure S. f x < a" shows "\x\S. f x < a" using convex_less_in_riS[of f S a] rel_interior_subset[of S] assms by auto lemma convex_finite_geq_on_closure: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "convex S" "finite_on S f" assumes "\x\S. f x \ a" shows "\x\closure S. f x \ a" proof- have "S \ domain f" using assms unfolding finite_on_def domain_def by auto { assume "\(\x\closure S. f x \ a)" hence "\x\closure S. f x < a" by (simp add: not_le) hence False using convex_less_inS[of f S a] assms \S \ domain f\ by auto } thus ?thesis by auto qed lemma lsc_hull_of_convex_determined: fixes f g :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" "convex_on UNIV g" assumes "rel_interior (domain f) = rel_interior (domain g)" assumes "\x\rel_interior (domain f). f x = g x" shows "lsc_hull f = lsc_hull g" proof- have "rel_interior (Epigraph UNIV f) = rel_interior (Epigraph UNIV g)" apply (subst rel_interior_EpigraphI, metis assms)+ using assms by auto hence "closure (Epigraph UNIV f) = closure (Epigraph UNIV g)" by (metis assms convex_EpigraphI convex_UNIV convex_closure_rel_interior) thus ?thesis by (metis lsc_hull_expl) qed lemma domain_lsc_hull_between: fixes f :: "'a::euclidean_space \ ereal" shows "domain f \ domain (lsc_hull f) \ domain (lsc_hull f) \ closure (domain f)" proof- { fix x assume "x:domain f" hence "x:domain (lsc_hull f)" unfolding domain_def using lsc_hull_le[of f x] by auto } moreover { fix x assume "x:domain (lsc_hull f)" hence "min (f x) (Liminf (at x) f) < \" unfolding domain_def using lsc_hull_liminf_at[of f] by auto then obtain z where z_def: "min (f x) (Liminf (at x) f) < z \ z < \" by (metis dense) { fix e::real assume "e>0" hence "Inf (f ` ball x e) \ min (f x) (Liminf (at x) f)" unfolding min_Liminf_at apply (subst SUP_upper) by auto hence "\y. y \ ball x e \ f y \ z" using Inf_le_iff_less [of f "ball x e" "min (f x) (Liminf (at x) f)"] z_def by (auto simp add: Bex_def) hence "\y. dist x y < e \ y \ domain f" unfolding domain_def ball_def using z_def by auto } hence "x\closure(domain f)" unfolding closure_approachable by (auto simp add: dist_commute) } ultimately show ?thesis by auto qed lemma domain_vs_domain_lsc_hull: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" shows "rel_interior(domain (lsc_hull f)) = rel_interior(domain f) \ closure(domain (lsc_hull f)) = closure(domain f) \ aff_dim(domain (lsc_hull f)) = aff_dim(domain f)" proof- have "convex (domain f)" by (metis assms convex_domain) moreover have "convex (domain (lsc_hull f))" by (metis assms convex_domain convex_lsc_hull) moreover have "rel_interior (domain f) \ domain (lsc_hull f) \ domain (lsc_hull f) \ closure (domain f)" by (metis domain_lsc_hull_between rel_interior_subset subset_trans) ultimately show ?thesis by (metis closure_eq_between rel_interior_aff_dim rel_interior_eq_between) qed lemma vertical_line_affine: fixes x :: "'a::euclidean_space" shows "affine {(x,m::real)|m. m:UNIV}" unfolding affine_def by (auto simp add: pth_8) lemma lsc_hull_of_convex_agrees_onRI: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" shows "\x\rel_interior (domain f). (f x = (lsc_hull f) x)" proof- have cEpi: "convex (Epigraph UNIV f)" by (metis assms convex_EpigraphI convex_UNIV) { fix x assume x_def: "x : rel_interior (domain f)" hence "f x < \" unfolding domain_def using rel_interior_subset by auto then obtain r where r_def: "(x,r):rel_interior (Epigraph UNIV f)" using assms x_def rel_interior_Epigraph[of f x] by (metis ereal_dense2) define M where "M = {(x,m::real)|m. m:UNIV}" hence "affine M" using vertical_line_affine by auto moreover have "rel_interior (Epigraph UNIV f) Int M \ {}" using r_def M_def by auto ultimately have *: "closure (Epigraph UNIV f) Int M = closure (Epigraph UNIV f Int M)" using convex_affine_closure_Int[of "Epigraph UNIV f" M] cEpi by auto have "Epigraph UNIV f Int M = {x} \ {m. f x \ ereal m}" unfolding Epigraph_def M_def by auto moreover have "closed({x} \ {m. f x\ ereal m})" apply (subst closed_Times) using closed_ereal_semiline by auto ultimately have "{x} \ {m. f x \ ereal m} = closure (Epigraph UNIV f) Int M" by (metis "*" Int_commute closure_closed) also have "\=Epigraph UNIV (lsc_hull f) Int M" by (metis Int_commute epigraph_lsc_hull) also have "\={x} \ {m. ((lsc_hull f) x) \ ereal m}" unfolding Epigraph_def M_def by auto finally have "{m. f x \ ereal m} = {m. lsc_hull f x \ ereal m}" by auto hence "f x = (lsc_hull f) x" using ereal_semiline_unique by auto } thus ?thesis by auto qed lemma lsc_hull_of_convex_agrees_outside: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" shows "\x. x \ closure (domain f) \ (f x = (lsc_hull f) x)" proof- { fix x assume "x \ closure (domain f)" hence "x \ domain (lsc_hull f)" using domain_lsc_hull_between by auto hence "(lsc_hull f) x = \" unfolding domain_def by auto hence "f x = (lsc_hull f) x" using lsc_hull_le[of f x] by auto } thus ?thesis by auto qed lemma lsc_hull_of_convex_agrees: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" shows "\x. (f x = (lsc_hull f) x) | x : rel_frontier (domain f)" by (metis DiffI assms lsc_hull_of_convex_agrees_onRI lsc_hull_of_convex_agrees_outside rel_frontier_def) lemma lsc_hull_of_proper_convex_proper: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" "proper f" shows "proper (lsc_hull f)" proof- obtain x where x_def: "x:rel_interior (domain f) \ f x < \" by (metis assms convex_less_ri_domain ereal_less_PInfty proper_iff) hence "f x = (lsc_hull f) x" using lsc_hull_of_convex_agrees[of f] assms unfolding rel_frontier_def by auto moreover have "f x > -\" using assms proper_iff by auto ultimately have "(lsc_hull f) x < \ \ (lsc_hull f) x > -\" using x_def by auto thus ?thesis using convex_lsc_improper[of "lsc_hull f" x] lsc_lsc_hull[of f] assms convex_lsc_hull[of f] by auto qed lemma lsc_hull_of_proper_convex: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" "proper f" shows "lsc (lsc_hull f) \ proper (lsc_hull f) \ convex_on UNIV (lsc_hull f) \ (\x. (f x = (lsc_hull f) x) | x : rel_frontier (domain f))" by (metis assms convex_lsc_hull lsc_hull_of_convex_agrees lsc_hull_of_proper_convex_proper lsc_lsc_hull) lemma affine_no_rel_frontier: fixes S :: "('n::euclidean_space) set" assumes "affine S" shows "rel_frontier S = {}" unfolding rel_frontier_def using assms affine_closed[of S] closure_closed[of S] affine_rel_open[of S] rel_open_def[of S] by auto lemma convex_with_affine_domain_is_lsc: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "affine (domain f)" shows "lsc f" by (metis assms affine_no_rel_frontier emptyE lsc_def lsc_hull_liminf_at lsc_hull_of_convex_agrees lsc_liminf_at_eq) lemma convex_finite_is_lsc: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "finite_on UNIV f" shows "lsc f" proof- have "affine (domain f)" using assms affine_UNIV unfolding finite_on_def domain_def by auto thus ?thesis by (metis assms(1) convex_with_affine_domain_is_lsc) qed lemma always_eventually_within: "(\x\S. P x) \ eventually P (at x within S)" unfolding eventually_at_filter by auto lemma ereal_divide_pos: assumes "(a::ereal)>0" "b>0" shows "a/(ereal b)>0" by (metis PInfty_eq_infinity assms ereal.simps(2) ereal_less(2) ereal_less_divide_pos ereal_mult_zero) lemma real_interval_limpt: assumes "a0 \ cball b e \ T" using open_contains_cball[of T] by auto hence "(b-e):cball b e" unfolding cball_def dist_norm by auto moreover { assume "a\b-e" hence "a:cball b e" unfolding cball_def dist_norm using \a by auto } ultimately have "max a (b-e):cball b e" by (metis max.absorb1 max.absorb2 linear) hence "max a (b-e):T" using e_def by auto moreover have "max a (b-e):{a..a by auto ultimately have "\y\{a.. y \ b" by auto } thus ?thesis unfolding islimpt_def by auto qed lemma lsc_hull_of_convex_aux: "Limsup (at 1 within {0..<1}) (\m. ereal ((1-m)*a+m*b)) \ ereal b" proof- have nontr: "~trivial_limit (at 1 within {0..< 1::real})" apply (subst trivial_limit_within) using real_interval_limpt by auto have "((\m. ereal ((1-m)*a+m*b)) \ (1 - 1) * a + 1 * b) (at 1 within {0..<1})" unfolding lim_ereal by (intro tendsto_intros) from lim_imp_Limsup[OF nontr this] show ?thesis by simp qed lemma lsc_hull_of_convex: fixes f :: "'a::euclidean_space \ ereal" assumes "convex_on UNIV f" assumes "x : rel_interior (domain f)" shows "((\m. f((1-m)*\<^sub>R x + m *\<^sub>R y)) \ (lsc_hull f) y) (at 1 within {0..<1})" (is "(?g \ _ y) _") proof (cases "y=x") case True hence "?g = (\m. f y)" by (simp add: algebra_simps) hence "(?g \ f y) (at 1 within {0..<1})" by simp moreover have "(lsc_hull f) y = f y" by (metis \y=x\ assms lsc_hull_of_convex_agrees_onRI) ultimately show ?thesis by auto next case False have aux: "\m. y - ((1 - m) *\<^sub>R x + m *\<^sub>R y) = (1-m)*\<^sub>R (y-x)" by (simp add: algebra_simps) have "(lsc_hull f) y = min (f y) (Liminf (at y) f)" by (metis lsc_hull_liminf_at) also have "\ \ Liminf (at 1 within {0..<1}) ?g" unfolding min_Liminf_at unfolding Liminf_within apply (subst SUP_mono) apply (rule_tac x="n/norm(y-x)" in bexI) apply (subst INF_mono) apply (rule_tac x="(1 - m) *\<^sub>R x + m *\<^sub>R y" in bexI) prefer 2 unfolding ball_def dist_norm by (auto simp add: aux \y\x\ less_divide_eq) finally have *: "(lsc_hull f) y \ Liminf (at 1 within {0..<1}) ?g" by auto { fix b assume "ereal b\(lsc_hull f) y" hence yb: "(y,b):closure(Epigraph UNIV f)" by (metis epigraph_lsc_hull mem_Epigraph UNIV_I) have "x : domain f" by (metis assms(2) rel_interior_subset rev_subsetD) hence "f x<\" unfolding domain_def by auto then obtain a where"ereal a>f x" by (metis ereal_dense2) hence xa: "(x,a):rel_interior(Epigraph UNIV f)" by (metis assms rel_interior_Epigraph) { fix m :: real assume "0 \ m \ m < 1" hence "(y, b) - (1-m) *\<^sub>R ((y, b) - (x, a)) : rel_interior (Epigraph UNIV f)" apply (subst rel_interior_closure_convex_shrink) apply (metis assms(1) convex_Epigraph convex_UNIV convex_on_ereal_univ) using yb xa by auto hence "f (y - (1-m) *\<^sub>R (y-x) ) < ereal (b-(1-m)*(b-a))" using assms(1) rel_interior_Epigraph by auto hence "?g m \ ereal ((1-m)*a+m*b)" by (simp add: algebra_simps) } hence "eventually (\m. ?g m \ ereal ((1-m)*a+m*b)) (at 1 within {0..<1})" apply (subst always_eventually_within) by auto hence "Limsup (at 1 within {0..<1}) ?g \ Limsup (at 1 within {0..<1}) (\m. ereal ((1-m)*a+m*b))" apply (subst Limsup_mono) by auto also have "\ \ ereal b" using lsc_hull_of_convex_aux by auto finally have "Limsup (at 1 within {0..<1}) ?g \ereal b" by auto } hence "Limsup (at 1 within {0..<1}) ?g \ (lsc_hull f) y" using ereal_le_real[of "(lsc_hull f) y"] by auto moreover have nontr: "~trivial_limit (at (1::real) within {0..<1})" apply (subst trivial_limit_within) using real_interval_limpt by auto moreover hence "Liminf (at 1 within {0..<1}) ?g \ Limsup (at 1 within {0..<1}) ?g" apply (subst Liminf_le_Limsup) by auto ultimately have "Limsup (at 1 within {0..<1}) ?g = (lsc_hull f) y \ Liminf (at 1 within {0..<1}) ?g = (lsc_hull f) y" using * by auto thus ?thesis apply (subst Liminf_eq_Limsup) using nontr by auto qed end diff --git a/thys/MSO_Regex_Equivalence/Pi_Regular_Operators.thy b/thys/MSO_Regex_Equivalence/Pi_Regular_Operators.thy --- a/thys/MSO_Regex_Equivalence/Pi_Regular_Operators.thy +++ b/thys/MSO_Regex_Equivalence/Pi_Regular_Operators.thy @@ -1,548 +1,554 @@ (* Author: Dmitriy Traytel *) section "Some Useful Regular Operators" (*<*) theory Pi_Regular_Operators imports Pi_Derivatives "HOL-Library.While_Combinator" begin (*>*) primrec REV :: "'a rexp \ 'a rexp" where "REV Zero = Zero" | "REV Full = Full" | "REV One = One" | "REV (Atom a) = Atom a" | "REV (Plus r s) = Plus (REV r) (REV s)" | "REV (Times r s) = Times (REV s) (REV r)" | "REV (Star r) = Star (REV r)" | "REV (Not r) = Not (REV r)" | "REV (Inter r s) = Inter (REV r) (REV s)" | "REV (Pr r) = Pr (REV r)" lemma REV_REV[simp]: "REV (REV r) = r" by (induct r) auto lemma final_REV[simp]: "final (REV r) = final r" by (induct r) auto lemma REV_PLUS: "REV (PLUS xs) = PLUS (map REV xs)" by (induct xs rule: list_singleton_induct) auto lemma (in alphabet) wf_REV[simp]: "wf n r \ wf n (REV r)" by (induct r arbitrary: n) auto lemma (in project) lang_REV[simp]: "lang n (REV r) = rev ` lang n r" by (induct r arbitrary: n) (auto simp: image_image rev_map image_set_diff) context embed begin primrec rderiv :: "'a \ 'b rexp \ 'b rexp" where "rderiv _ Zero = Zero" | "rderiv _ Full = Full" | "rderiv _ One = Zero" | "rderiv a (Atom b) = (if lookup b a then One else Zero)" | "rderiv a (Plus r s) = Plus (rderiv a r) (rderiv a s)" | "rderiv a (Times r s) = (let rs' = Times r (rderiv a s) in if final s then Plus rs' (rderiv a r) else rs')" | "rderiv a (Star r) = Times (Star r) (rderiv a r)" | "rderiv a (Not r) = Not (rderiv a r)" | "rderiv a (Inter r s) = Inter (rderiv a r) (rderiv a s)" | "rderiv a (Pr r) = Pr (PLUS (map (\a'. rderiv a' r) (embed a)))" primrec rderivs where "rderivs [] r = r" | "rderivs (w#ws) r = rderivs ws (rderiv w r)" lemma rderivs_snoc: "rderivs (ws @ [w]) r = rderiv w (rderivs ws r)" by (induct ws arbitrary: r) auto lemma rderivs_append: "rderivs (ws @ ws') r = rderivs ws' (rderivs ws r)" by (induct ws arbitrary: r) auto lemma rderiv_lderiv: "rderiv as r = REV (lderiv as (REV r))" by (induct r arbitrary: as) (auto simp: Let_def o_def REV_PLUS) lemma rderivs_lderivs: "rderivs w r = REV (lderivs w (REV r))" by (induct w arbitrary: r) (auto simp: rderiv_lderiv) lemma wf_rderiv[simp]: "wf n r \ wf n (rderiv w r)" unfolding rderiv_lderiv by (rule wf_REV[OF wf_lderiv[OF wf_REV]]) lemma wf_rderivs[simp]: "wf n r \ wf n (rderivs ws r)" unfolding rderivs_lderivs by (rule wf_REV[OF wf_lderivs[OF wf_REV]]) lemma lang_rderiv: "\wf n r; as \ \ n\ \ lang n (rderiv as r) = rQuot as (lang n r)" unfolding rderiv_lderiv rQuot_rev_lQuot by (simp add: lang_lderiv) lemma lang_rderivs: "\wf n r; wf_word n w\ \ lang n (rderivs w r) = rQuots w (lang n r)" unfolding rderivs_lderivs rQuots_rev_lQuots by (simp add: lang_lderivs) corollary rderivs_final: assumes "wf n r" "wf_word n w" shows "final (rderivs w r) \ rev w \ lang n r" using lang_rderivs[OF assms] lang_final[of "rderivs w r" n] by auto lemma toplevel_summands_REV[simp]: "toplevel_summands (REV r) = REV ` toplevel_summands r" by (induct r) auto lemma ACI_norm_REV: "\REV \r\\ = \REV r\" proof (induct r) case (Plus r s) show ?case using [[unfold_abs_def = false]] unfolding REV.simps ACI_norm.simps Plus[symmetric] image_Un[symmetric] toplevel_summands.simps(1) toplevel_summands_ACI_norm toplevel_summands_REV unfolding toplevel_summands.simps(1)[symmetric] ACI_norm_flatten toplevel_summands_REV unfolding ACI_norm_flatten[symmetric] toplevel_summands_ACI_norm .. qed auto lemma ACI_norm_rderiv: "\rderiv as \r\\ = \rderiv as r\" unfolding rderiv_lderiv by (metis ACI_norm_REV ACI_norm_lderiv) lemma ACI_norm_rderivs: "\rderivs w \r\\ = \rderivs w r\" unfolding rderivs_lderivs by (metis ACI_norm_REV ACI_norm_lderivs) theorem finite_rderivs: "finite {\rderivs xs r\ | xs . True}" unfolding rderivs_lderivs by (subst ACI_norm_REV[symmetric]) (auto intro: finite_surj[OF finite_lderivs, of _ "\r. \REV r\"]) lemma lderiv_PLUS[simp]: "lderiv a (PLUS xs) = PLUS (map (lderiv a) xs)" by (induct xs rule: list_singleton_induct) auto lemma rderiv_PLUS[simp]: "rderiv a (PLUS xs) = PLUS (map (rderiv a) xs)" by (induct xs rule: list_singleton_induct) auto lemma lang_rderiv_lderiv: "lang n (rderiv a (lderiv b r)) = lang n (lderiv b (rderiv a r))" by (induct r arbitrary: n a b) (auto simp: Let_def conc_assoc) lemma lang_lderiv_rderiv: "lang n (lderiv a (rderiv b r)) = lang n (rderiv b (lderiv a r))" by (induct r arbitrary: n a b) (auto simp: Let_def conc_assoc) lemma lang_rderiv_lderivs[simp]: "\wf n r; wf_word n w; a \ \ n\ \ lang n (rderiv a (lderivs w r)) = lang n (lderivs w (rderiv a r))" by (induct w arbitrary: n r) (auto, auto simp: lang_lderivs lang_lderiv lang_rderiv lQuot_rQuot) lemma lang_lderiv_rderivs[simp]: "\wf n r; wf_word n w; a \ \ n\ \ lang n (lderiv a (rderivs w r)) = lang n (rderivs w (lderiv a r))" by (induct w arbitrary: n r) (auto, auto simp: lang_rderivs lang_lderiv lang_rderiv lQuot_rQuot) (* primrec bideriv :: "'a \ 'a \ 'a rexp \ 'a rexp" where "bideriv _ _ Zero = Zero" | "bideriv _ _ One = Zero" | "bideriv a b (Atom c) = Zero" | "bideriv a b (Plus r s) = Plus (bideriv a b r) (bideriv a b s)" | "bideriv a b (Times r s) = (let rs' = Times (lderiv a r) (rderiv b s) in if final r \ final s then Plus (Plus rs' (bideriv a b r)) (bideriv a b s) else if final r then Plus rs' (bideriv a b s) else if final s then Plus rs' (bideriv a b r) else rs')" | "bideriv a b (Star r) = Plus (bideriv a b r) (TIMES [lderiv a r, Star r, rderiv b r])" | "bideriv a b (Not r) = Not (bideriv a b r)" | "bideriv a b (Inter r s) = Inter (bideriv a b r) (bideriv a b s)" | "bideriv a b (Pr r) = Pr (PLUS (map (\b. PLUS (map (\a. bideriv a b r) (embed a))) (embed b)))" lemma wf_bideriv[simp]: "wf n r \ wf n (bideriv a b r)" by (induct r arbitrary: n a b) (auto simp: maps_def Let_def) lemma ACI_norm_bideriv_rderiv_lderiv: "\bideriv a b r\ = \rderiv b (lderiv a r)\" by (induct r arbitrary: a b) (auto simp: Let_def maps_def o_def list_all2_map1 list_all2_map2 intro!: ACI_PLUS list_all2_refl) lemma bideriv_rderiv_lderiv: "lang n (bideriv a b r) = lang n (rderiv b (lderiv a r))" by (induct r arbitrary: n a b) (auto simp: Let_def) lemma lang_bideriv: "\wf n r; a \ \ n; b \ \ n\ \ lang n (bideriv a b r) = biQuot a b (lang n r)" by (auto simp: bideriv_rderiv_lderiv lang_lderiv lang_rderiv biQuot_rQuot_lQuot) lemma ACI_norm_bideriv: "\bideriv a b \r\\ = \bideriv a b r\" unfolding ACI_norm_bideriv_rderiv_lderiv by (metis ACI_norm_lderiv ACI_norm_rderiv) fun biderivs where "biderivs [] [] r = r" | "biderivs as [] r = lderivs as r" | "biderivs [] bs r = rderivs bs r" | "biderivs (a#as) (b#bs) r = biderivs as bs (bideriv a b r)" lemma biderivs_rderivs_lderivs: "\wf n r; wf_word n w1; wf_word n w2\ \ lang n (biderivs w1 w2 r) = lang n (rderivs w2 (lderivs w1 r))" by (induct arbitrary: r rule: biderivs.induct) (auto simp: lang_rderivs lang_lderivs bideriv_rderiv_lderiv) lemma lang_biderivs: "\wf n r; wf_word n w1; wf_word n w2\ \ lang n (biderivs w1 w2 r) = biQuots w1 w2 (lang n r)" by (auto simp: biderivs_rderivs_lderivs lang_lderivs lang_rderivs biQuots_rQuots_lQuots) lemma wf_biderivs[simp]: "wf n r \ wf n (biderivs w1 w2 r)" by (induct arbitrary: r rule: biderivs.induct) auto *) definition "biderivs w1 w2 = rderivs w2 o lderivs w1" lemma lang_biderivs: "\wf n r; wf_word n w1; wf_word n w2\ \ lang n (biderivs w1 w2 r) = biQuots w1 w2 (lang n r)" unfolding biderivs_def by (auto simp: lang_rderivs lang_lderivs in_lists_conv_set) lemma wf_biderivs[simp]: "wf n r \ wf n (biderivs w1 w2 r)" unfolding biderivs_def by simp corollary biderivs_final: assumes "wf n r" "wf_word n w1" "wf_word n w2" shows "final (biderivs w1 w2 r) \ w1 @ rev w2 \ lang n r" using lang_biderivs[OF assms] lang_final[of "biderivs w1 w2 r" n] by auto lemma ACI_norm_biderivs: "\biderivs w1 w2 \r\\ = \biderivs w1 w2 r\" unfolding biderivs_def by (metis ACI_norm_lderivs ACI_norm_rderivs o_apply) lemma "finite {\biderivs w1 w2 r\ | w1 w2 . True}" proof - have "{\biderivs w1 w2 r\ | w1 w2 . True} = (\s \ {\lderivs as r\ | as . True}. {\rderivs bs s\ | bs . True})" unfolding biderivs_def by (fastforce simp: ACI_norm_rderivs) also have "finite \" by (rule iffD2[OF finite_UN[OF finite_lderivs] ballI[OF finite_rderivs]]) finally show ?thesis . qed end subsection \Quotioning by the same letter\ definition "fin_cut_same x xs = take (LEAST n. drop n xs = replicate (length xs - n) x) xs" lemma fin_cut_same_Nil[simp]: "fin_cut_same x [] = []" unfolding fin_cut_same_def by simp lemma Least_fin_cut_same: "(LEAST n. drop n xs = replicate (length xs - n) y) = length xs - length (takeWhile (\x. x = y) (rev xs))" (is "Least ?P = ?min") proof (rule Least_equality) show "?P ?min" by (induct xs rule: rev_induct) (auto simp: Suc_diff_le replicate_append_same) next fix m assume "?P m" have "length xs - m \ length (takeWhile (\x. x = y) (rev xs))" proof (intro length_takeWhile_less_P_nth) fix i assume "i < length xs - m" hence "rev xs ! i \ set (drop m xs)" by (induct xs arbitrary: i rule: rev_induct) (auto simp: nth_Cons') with \?P m\ show "rev xs ! i = y" by simp qed simp thus "?min \ m" by linarith qed lemma takeWhile_takes_all: "length xs = m \ m \ length (takeWhile P xs) \ Ball (set xs) P" by hypsubst_thin (induct xs, auto) lemma fin_cut_same_Cons[simp]: "fin_cut_same x (y # xs) = (if fin_cut_same x xs = [] then if x = y then [] else [y] else y # fin_cut_same x xs)" unfolding fin_cut_same_def Least_fin_cut_same apply auto apply (simp add: takeWhile_takes_all) apply (simp add: takeWhile_takes_all) apply auto apply (metis (full_types) Suc_diff_le length_rev length_takeWhile_le take_Suc_Cons) apply (simp add: takeWhile_takes_all) apply (subst takeWhile_append2) apply auto apply (simp add: takeWhile_takes_all) apply auto apply (metis (full_types) Suc_diff_le length_rev length_takeWhile_le take_Suc_Cons) done lemma fin_cut_same_singleton[simp]: "fin_cut_same x (xs @ [x]) = fin_cut_same x xs" by (induct xs) auto lemma fin_cut_same_replicate[simp]: "fin_cut_same x (xs @ replicate n x) = fin_cut_same x xs" by (induct n arbitrary: xs) (auto simp: replicate_append_same[symmetric] append_assoc[symmetric] simp del: append_assoc) lemma fin_cut_sameE: "fin_cut_same x xs = ys \ \m. xs = ys @ replicate m x" - by (induct xs arbitrary: ys) (auto, metis replicate_Suc) + apply (induct xs arbitrary: ys) + apply auto + apply (metis replicate_Suc) + apply metis + apply metis + done + definition "SAMEQUOT a A = {fin_cut_same a x @ replicate m a| x m. x \ A}" lemma SAMEQUOT_mono: "A \ B \ SAMEQUOT a A \ SAMEQUOT a B" unfolding SAMEQUOT_def by auto locale embed2 = embed \ wf_atom project lookup embed for \ :: "nat \ 'a set" and wf_atom :: "nat \ 'b :: linorder \ bool" and project :: "'a \ 'a" and lookup :: "'b \ 'a \ bool" and embed :: "'a \ 'a list" + fixes singleton :: "'a \ 'b" assumes wf_singleton[simp]: "a \ \ n \ wf_atom n (singleton a)" assumes lookup_singleton[simp]: "lookup (singleton a) a' = (a = a')" begin lemma finite_rderivs_same: "finite {\rderivs (replicate m a) r\ | m . True}" by (auto intro: finite_subset[OF _ finite_rderivs]) lemma wf_word_replicate[simp]: "a \ \ n \ wf_word n (replicate m a)" by (induct m) auto lemma star_singleton[simp]: "star {[x]} = {replicate m x | m . True}" proof (intro equalityI subsetI) fix xs assume "xs \ star {[x]}" thus "xs \ {replicate m x | m . True}" by (induct xs) (auto, metis replicate_Suc) qed (auto intro: Ball_starI) definition "samequot a r = Times (flatten PLUS {\rderivs (replicate m a) r\ | m . True}) (Star (Atom (singleton a)))" lemma wf_samequot: "\wf n r; a \ \ n\ \ wf n (samequot a r)" unfolding samequot_def wf.simps wf_flatten_PLUS[OF finite_rderivs_same] by auto lemma lang_samequot: "\wf n r; a \ \ n\ \ lang n (samequot a r) = SAMEQUOT a (lang n r)" unfolding SAMEQUOT_def samequot_def lang.simps lang_flatten_PLUS[OF finite_rderivs_same] apply (rule sym) apply (auto simp: lang_rderivs) apply (intro concI) apply auto apply (insert fin_cut_sameE[OF refl, of _ a]) apply (drule meta_spec) apply (erule exE) apply (intro exI conjI) apply (rule refl) apply (auto simp: lang_rderivs) apply (erule subst) apply assumption apply (erule concE) apply (auto simp: lang_rderivs) apply (drule meta_spec) apply (erule exE) apply (intro exI conjI) defer apply assumption unfolding fin_cut_same_replicate apply (erule trans) unfolding fin_cut_same_replicate apply (rule refl) done fun rderiv_and_add where "rderiv_and_add as (_ :: bool, rs) = (let r = \rderiv as (hd rs)\ in if r \ set rs then (False, rs) else (True, r # rs))" definition "invar_rderiv_and_add as r brs \ (if fst brs then True else \rderiv as (hd (snd brs))\ \ set (snd brs)) \ snd brs \ [] \ distinct (snd brs) \ (\i < length (snd brs). snd brs ! i = \rderivs (replicate (length (snd brs) - 1 - i) as) r\)" lemma invar_rderiv_and_add_init: "invar_rderiv_and_add as r (True, [\r\])" unfolding invar_rderiv_and_add_def by auto lemma invar_rderiv_and_add_step: "invar_rderiv_and_add as r brs \ fst brs \ invar_rderiv_and_add as r (rderiv_and_add as brs)" unfolding invar_rderiv_and_add_def by (cases brs) (auto simp: Let_def nth_Cons' ACI_norm_rderiv rderivs_snoc[symmetric] neq_Nil_conv replicate_append_same) lemma rderivs_replicate_mult: "\\rderivs (replicate i as) r\ = \r\; i > 0\ \ \rderivs (replicate (m * i) as) r\ = \r\" proof (induct m arbitrary: r) case (Suc m) hence "\rderivs (replicate (m * i) as) \rderivs (replicate i as) r\\ = \r\" by (auto simp: ACI_norm_rderivs) thus ?case by (auto simp: ACI_norm_rderivs replicate_add rderivs_append) qed simp lemma rderivs_replicate_mult_rest: assumes "\rderivs (replicate i as) r\ = \r\" "k < i" shows "\rderivs (replicate (m * i + k) as) r\ = \rderivs (replicate k as) r\" (is "?L = ?R") proof - have "?L = \rderivs (replicate k as) \rderivs (replicate (m * i) as) r\\" by (simp add: ACI_norm_rderivs replicate_add rderivs_append) also have "\rderivs (replicate (m * i) as) r\ = \r\" using assms by (simp add: rderivs_replicate_mult) finally show ?thesis by (simp add: ACI_norm_rderivs) qed lemma rderivs_replicate_mod: assumes "\rderivs (replicate i as) r\ = \r\" "i > 0" shows "\rderivs (replicate m as) r\ = \rderivs (replicate (m mod i) as) r\" (is "?L = ?R") by (subst div_mult_mod_eq[symmetric, of m i]) (intro rderivs_replicate_mult_rest[OF assms(1)] mod_less_divisor[OF assms(2)]) lemma rderivs_replicate_diff: "\\rderivs (replicate i as) r\ = \rderivs (replicate j as) r\; i > j\ \ \rderivs (replicate (i - j) as) (rderivs (replicate j as) r)\ = \rderivs (replicate j as) r\" unfolding rderivs_append[symmetric] replicate_add[symmetric] by auto lemma samequot_wf: assumes "wf n r" "while_option fst (rderiv_and_add as) (True, [\r\]) = Some (b, rs)" shows "wf n (PLUS rs)" proof - have "\ b" using while_option_stop[OF assms(2)] by simp from while_option_rule[where P="invar_rderiv_and_add as r", OF invar_rderiv_and_add_step assms(2) invar_rderiv_and_add_init] have *: "invar_rderiv_and_add as r (b, rs)" by simp thus "wf n (PLUS rs)" unfolding invar_rderiv_and_add_def wf_PLUS by (auto simp: in_set_conv_nth wf_rderivs[OF assms(1)]) qed lemma samequot_soundness: assumes "while_option fst (rderiv_and_add as) (True, [\r\]) = Some (b, rs)" shows "lang n (PLUS rs) = \ (lang n ` {\rderivs (replicate m as) r\ | m. True})" proof - have "\ b" using while_option_stop[OF assms] by simp moreover from while_option_rule[where P="invar_rderiv_and_add as r", OF invar_rderiv_and_add_step assms invar_rderiv_and_add_init] have *: "invar_rderiv_and_add as r (b, rs)" by simp ultimately obtain i where i: "i < length rs" and "\rderivs (replicate (length rs - Suc i) as) r\ = \rderivs (replicate (Suc (length rs - Suc 0)) as) r\" (is "\rderivs ?x r\ = _") unfolding invar_rderiv_and_add_def by (auto simp: in_set_conv_nth hd_conv_nth ACI_norm_rderiv rderivs_snoc[symmetric] replicate_append_same) with * have "\rderivs ?x r\ = \rderivs (replicate (length rs) as) r\" by (auto simp: invar_rderiv_and_add_def) with i have cyc: "\rderivs (replicate (Suc i) as) (rderivs ?x r)\ = \rderivs ?x r\" by (fastforce dest: rderivs_replicate_diff[OF sym]) { fix m have "\irderivs (replicate m as) r\" proof (cases "m > length rs - Suc i") case True with i obtain m' where m: "m = m' + length rs - Suc i" by atomize_elim (auto intro: exI[of _ "m - (length rs - Suc i)"]) with i have "\rderivs (replicate m as) r\ = \rderivs (replicate m' as) (rderivs ?x r)\" unfolding replicate_add[symmetric] rderivs_append[symmetric] by (simp add: add.commute) also from cyc have "\ = \rderivs (replicate (m' mod (Suc i)) as) (rderivs ?x r)\" by (elim rderivs_replicate_mod) simp also from i have "\ = \rderivs (replicate (m' mod (Suc i) + length rs - Suc i) as) r\" unfolding rderivs_append[symmetric] replicate_add[symmetric] by (simp add: add.commute) also from m i have "\ = \rderivs (replicate ((m - (length rs - Suc i)) mod (Suc i) + length rs - Suc i) as) r\" by simp also have "\ = \rderivs (replicate (length rs - Suc (i - (m - (length rs - Suc i)) mod (Suc i))) as) r\" by (subst Suc_diff_le[symmetric]) (metis less_Suc_eq_le mod_less_divisor zero_less_Suc, simp add: add.commute) finally have "\j < length rs. \rderivs (replicate m as) r\ = \rderivs (replicate (length rs - Suc j) as) r\" using i by (metis less_imp_diff_less) with * show ?thesis unfolding invar_rderiv_and_add_def by auto next case False with i have "\j < length rs. m = length rs - Suc j" by (induct m) (metis diff_self_eq_0 gr_implies_not0 lessI nat.exhaust, metis (no_types) One_nat_def Suc_diff_Suc diff_Suc_1 gr0_conv_Suc less_imp_diff_less not_less_eq not_less_iff_gr_or_eq) with * show ?thesis unfolding invar_rderiv_and_add_def by auto qed } hence "\ (lang n ` {\rderivs (replicate m as) r\ |m. True}) \ lang n (PLUS rs)" by (fastforce simp: in_set_conv_nth intro!: bexI[rotated]) moreover from * have "lang n (PLUS rs) \ \ (lang n ` {\rderivs (replicate m as) r\ |m. True})" unfolding invar_rderiv_and_add_def by (fastforce simp: in_set_conv_nth) ultimately show "lang n (PLUS rs) = \ (lang n ` {\rderivs (replicate m as) r\ |m. True})" by blast qed lemma length_subset_card: "\finite X; distinct (x # xs); set (x # xs) \ X\ \ length xs < card X" by (metis card_mono distinct_card impossible_Cons not_le_imp_less order_trans) lemma samequot_termination: assumes "while_option fst (rderiv_and_add as) (True, [\r\]) = None" (is "?cl = None") shows "False" proof - let ?D = "{\rderivs (replicate m as) r\ | m . True}" let ?f = "\(b, rs). card ?D + 1 - length rs + (if b then 1 else 0)" have "\st. ?cl = Some st" apply (rule measure_while_option_Some[of "invar_rderiv_and_add as r" _ _ ?f]) apply (auto simp: invar_rderiv_and_add_init invar_rderiv_and_add_step) apply (auto simp: invar_rderiv_and_add_def Let_def neq_Nil_conv in_set_conv_nth intro!: diff_less_mono2 length_subset_card[OF finite_rderivs_same, simplified]) apply auto [] apply fastforce apply (metis Suc_less_eq nth_Cons_Suc) done with assms show False by auto qed definition "samequot_exec a r = Times (PLUS (snd (the (while_option fst (rderiv_and_add a) (True, [\r\]))))) (Star (Atom (singleton a)))" lemma wf_samequot_exec: "\wf n r; as \ \ n\ \ wf n (samequot_exec as r)" unfolding samequot_exec_def by (cases "while_option fst (rderiv_and_add as) (True, [\r\])") (auto dest: samequot_termination samequot_wf) lemma samequot_exec_samequot: "lang n (samequot_exec as r) = lang n (samequot as r)" unfolding samequot_exec_def samequot_def lang.simps lang_flatten_PLUS[OF finite_rderivs_same] by (cases "while_option fst (rderiv_and_add as) (True, [\r\])") (auto dest: samequot_termination dest!: samequot_soundness[of _ _ _ _ n] simp del: ACI_norm_lang) lemma lang_samequot_exec: "\wf n r; as \ \ n\ \ lang n (samequot_exec as r) = SAMEQUOT as (lang n r)" unfolding samequot_exec_samequot by (rule lang_samequot) end subsection \Suffix and Prefix Languages\ definition Suffix :: "'a lang \ 'a lang" where "Suffix L = {w. \u. u @ w \ L}" definition Prefix :: "'a lang \ 'a lang" where "Prefix L = {w. \u. w @ u \ L}" lemma Prefix_Suffix: "Prefix L = rev ` Suffix (rev ` L)" unfolding Prefix_def Suffix_def by (auto simp: rev_append_invert intro: image_eqI[of _ rev, OF rev_rev_ident[symmetric]] image_eqI[of _ rev, OF rev_append[symmetric]]) definition Root :: "'a lang \ 'a lang" where "Root L = {x . \n > 0. x ^^ n \ L}" definition Cycle :: "'a lang \ 'a lang" where "Cycle L = {u @ w | u w. w @ u \ L}" context embed begin context fixes n :: nat begin definition SUFFIX :: "'b rexp \ 'b rexp" where "SUFFIX r = flatten PLUS {\lderivs w r\| w. wf_word n w}" lemma finite_lderivs_wf: "finite {\lderivs w r\| w. wf_word n w}" by (auto intro: finite_subset[OF _ finite_lderivs]) definition PREFIX :: "'b rexp \ 'b rexp" where "PREFIX r = REV (SUFFIX (REV r))" lemma wf_SUFFIX[simp]: "wf n r \ wf n (SUFFIX r)" unfolding SUFFIX_def by (intro iffD2[OF wf_flatten_PLUS[OF finite_lderivs_wf]]) auto lemma lang_SUFFIX[simp]: "wf n r \ lang n (SUFFIX r) = Suffix (lang n r)" unfolding SUFFIX_def Suffix_def using lang_flatten_PLUS[OF finite_lderivs_wf] lang_lderivs wf_lang_wf_word by fastforce lemma wf_PREFIX[simp]: "wf n r \ wf n (PREFIX r)" unfolding PREFIX_def by auto lemma lang_PREFIX[simp]: "wf n r \ lang n (PREFIX r) = Prefix (lang n r)" unfolding PREFIX_def by (auto simp: Prefix_Suffix) end lemma take_drop_CycleI[intro!]: "x \ L \ drop i x @ take i x \ Cycle L" unfolding Cycle_def by fastforce lemma take_drop_CycleI'[intro!]: "drop i x @ take i x \ L \ x \ Cycle L" by (drule take_drop_CycleI[of _ _ "length x - i"]) auto end (*<*) end (*>*) diff --git a/thys/Monad_Memo_DP/example/Min_Ed_Dist0.thy b/thys/Monad_Memo_DP/example/Min_Ed_Dist0.thy --- a/thys/Monad_Memo_DP/example/Min_Ed_Dist0.thy +++ b/thys/Monad_Memo_DP/example/Min_Ed_Dist0.thy @@ -1,411 +1,409 @@ subsection "Minimum Edit Distance" theory Min_Ed_Dist0 imports "HOL-Library.IArray" "HOL-Library.Code_Target_Numeral" "HOL-Library.Product_Lexorder" "HOL-Library.RBT_Mapping" "../state_monad/State_Main" "../heap_monad/Heap_Main" Example_Misc "../util/Tracing" "../util/Ground_Function" begin subsubsection "Misc" text "Executable argmin" fun argmin :: "('a \ 'b::order) \ 'a list \ 'a" where "argmin f [a] = a" | "argmin f (a#as) = (let m = argmin f as in if f a \ f m then a else m)" (* end rm *) (* Ex: Optimization of argmin *) fun argmin2 :: "('a \ 'b::order) \ 'a list \ 'a * 'b" where "argmin2 f [a] = (a, f a)" | "argmin2 f (a#as) = (let fa = f a; (am,m) = argmin2 f as in if fa \ m then (a, fa) else (am,m))" subsubsection "Edit Distance" datatype 'a ed = Copy | Repl 'a | Ins 'a | Del fun edit :: "'a ed list \ 'a list \ 'a list" where "edit (Copy # es) (x # xs) = x # edit es xs" | "edit (Repl a # es) (x # xs) = a # edit es xs" | "edit (Ins a # es) xs = a # edit es xs" | "edit (Del # es) (x # xs) = edit es xs" | "edit (Copy # es) [] = edit es []" | "edit (Repl a # es) [] = edit es []" | "edit (Del # es) [] = edit es []" | "edit [] xs = xs" abbreviation cost where "cost es \ length [e <- es. e \ Copy]" subsubsection "Minimum Edit Sequence" fun min_eds :: "'a list \ 'a list \ 'a ed list" where "min_eds [] [] = []" | "min_eds [] (y#ys) = Ins y # min_eds [] ys" | "min_eds (x#xs) [] = Del # min_eds xs []" | "min_eds (x#xs) (y#ys) = argmin cost [Ins y # min_eds (x#xs) ys, Del # min_eds xs (y#ys), (if x=y then Copy else Repl y) # min_eds xs ys]" lemma "min_eds ''vintner'' ''writers'' = [Ins CHR ''w'', Repl CHR ''r'', Copy, Del, Copy, Del, Copy, Copy, Ins CHR ''s'']" by eval (* value "min_eds ''madagascar'' ''bananas''" value "min_eds ''madagascaram'' ''banananas''" *) lemma min_eds_correct: "edit (min_eds xs ys) xs = ys" by (induction xs ys rule: min_eds.induct) auto lemma min_eds_same: "min_eds xs xs = replicate (length xs) Copy" by (induction xs) auto lemma min_eds_eq_Nil_iff: "min_eds xs ys = [] \ xs = [] \ ys = []" by (induction xs ys rule: min_eds.induct) auto lemma min_eds_Nil: "min_eds [] ys = map Ins ys" by (induction ys) auto lemma min_eds_Nil2: "min_eds xs [] = replicate (length xs) Del" by (induction xs) auto lemma if_edit_Nil2: "edit es ([]::'a list) = ys \ length ys \ cost es" apply(induction es "[]::'a list" arbitrary: ys rule: edit.induct) apply auto - apply fastforce -apply fastforce done lemma if_edit_eq_Nil: "edit es xs = [] \ length xs \ cost es" by (induction es xs rule: edit.induct) auto lemma min_eds_minimal: "edit es xs = ys \ cost(min_eds xs ys) \ cost es" proof(induction xs ys arbitrary: es rule: min_eds.induct) case 1 thus ?case by simp next case 2 thus ?case by (auto simp add: min_eds_Nil dest: if_edit_Nil2) next case 3 thus ?case by(auto simp add: min_eds_Nil2 dest: if_edit_eq_Nil) next case 4 show ?case proof (cases "es") case Nil then show ?thesis using "4.prems" by (auto simp: min_eds_same) next case [simp]: (Cons e es') show ?thesis proof (cases e) case Copy thus ?thesis using "4.prems" "4.IH"(3)[of es'] by simp next case (Repl a) thus ?thesis using "4.prems" "4.IH"(3)[of es'] using [[simp_depth_limit=1]] by simp next case (Ins a) thus ?thesis using "4.prems" "4.IH"(1)[of es'] using [[simp_depth_limit=1]] by auto next case Del thus ?thesis using "4.prems" "4.IH"(2)[of es'] using [[simp_depth_limit=1]] by auto qed qed qed subsubsection "Computing the Minimum Edit Distance" fun min_ed :: "'a list \ 'a list \ nat" where "min_ed [] [] = 0" | "min_ed [] (y#ys) = 1 + min_ed [] ys" | "min_ed (x#xs) [] = 1 + min_ed xs []" | "min_ed (x#xs) (y#ys) = Min {1 + min_ed (x#xs) ys, 1 + min_ed xs (y#ys), (if x=y then 0 else 1) + min_ed xs ys}" lemma min_ed_min_eds: "min_ed xs ys = cost(min_eds xs ys)" apply(induction xs ys rule: min_ed.induct) apply (auto split!: if_splits) done lemma "min_ed ''madagascar'' ''bananas'' = 6" by eval (* value "min_ed ''madagascaram'' ''banananas''" *) text "Exercise: Optimization of the Copy case" fun min_eds2 :: "'a list \ 'a list \ 'a ed list" where "min_eds2 [] [] = []" | "min_eds2 [] (y#ys) = Ins y # min_eds2 [] ys" | "min_eds2 (x#xs) [] = Del # min_eds2 xs []" | "min_eds2 (x#xs) (y#ys) = (if x=y then Copy # min_eds2 xs ys else argmin cost [Ins y # min_eds2 (x#xs) ys, Del # min_eds2 xs (y#ys), Repl y # min_eds2 xs ys])" value "min_eds2 ''madagascar'' ''bananas''" lemma cost_Copy_Del: "cost(min_eds xs ys) \ cost (min_eds xs (x#ys)) + 1" apply(induction xs ys rule: min_eds.induct) apply(auto simp del: filter_True filter_False split!: if_splits) done lemma cost_Copy_Ins: "cost(min_eds xs ys) \ cost (min_eds (x#xs) ys) + 1" apply(induction xs ys rule: min_eds.induct) apply(auto simp del: filter_True filter_False split!: if_splits) done lemma "cost(min_eds2 xs ys) = cost(min_eds xs ys)" proof(induction xs ys rule: min_eds2.induct) case (4 x xs y ys) thus ?case apply (auto split!: if_split) apply (metis (mono_tags, lifting) Suc_eq_plus1 Suc_leI cost_Copy_Del cost_Copy_Ins le_imp_less_Suc le_neq_implies_less not_less) apply (metis Suc_eq_plus1 cost_Copy_Del le_antisym) by (metis Suc_eq_plus1 cost_Copy_Ins le_antisym) qed simp_all lemma "min_eds2 xs ys = min_eds xs ys" oops (* Not proveable because Copy comes last in min_eds but first in min_eds2. Can reorder, but the proof still requires the same two lemmas cost_*_* above. *) subsubsection "Indexing" text "Indexing lists" context fixes xs ys :: "'a list" fixes m n :: nat begin function (sequential) min_ed_ix' :: "nat * nat \ nat" where "min_ed_ix' (i,j) = (if i \ m then if j \ n then 0 else 1 + min_ed_ix' (i,j+1) else if j \ n then 1 + min_ed_ix' (i+1, j) else Min {1 + min_ed_ix' (i,j+1), 1 + min_ed_ix' (i+1, j), (if xs!i = ys!j then 0 else 1) + min_ed_ix' (i+1,j+1)})" by pat_completeness auto termination by(relation "measure(\(i,j). (m - i) + (n - j))") auto declare min_ed_ix'.simps[simp del] end lemma min_ed_ix'_min_ed: "min_ed_ix' xs ys (length xs) (length ys) (i, j) = min_ed (drop i xs) (drop j ys)" apply(induction "(i,j)" arbitrary: i j rule: min_ed_ix'.induct[of "length xs" "length ys"]) apply(subst min_ed_ix'.simps) apply(simp add: Cons_nth_drop_Suc[symmetric]) done text "Indexing functions" context fixes xs ys :: "nat \ 'a" fixes m n :: nat begin function (sequential) min_ed_ix :: "nat \ nat \ nat" where "min_ed_ix (i, j) = (if i \ m then if j \ n then 0 else n-j else if j \ n then m-i else min_list [1 + min_ed_ix (i, j+1), 1 + min_ed_ix (i+1, j), (if xs i = ys j then 0 else 1) + min_ed_ix (i+1, j+1)])" by pat_completeness auto termination by(relation "measure(\(i,j). (m - i) + (n - j))") auto subsubsection \Functional Memoization\ memoize_fun min_ed_ix\<^sub>m: min_ed_ix with_memory dp_consistency_mapping monadifies (state) min_ed_ix.simps thm min_ed_ix\<^sub>m'.simps memoize_correct by memoize_prover print_theorems lemmas [code] = min_ed_ix\<^sub>m.memoized_correct declare min_ed_ix.simps[simp del] subsubsection \Imperative Memoization\ context fixes mem :: "nat ref \ nat ref \ nat option array ref \ nat option array ref" assumes mem_is_init: "mem = result_of (init_state (n + 1) m (m + 1)) Heap.empty" begin interpretation iterator "\ (x, y). x \ m \ y \ n \ x > 0" "\ (x, y). if y > 0 then (x, y - 1) else (x - 1, n)" "\ (x, y). (m - x) * (n + 1) + (n - y)" by (rule table_iterator_down) lemma [intro]: "dp_consistency_heap_array_pair' (n + 1) fst snd id m (m + 1) mem" by (standard; simp add: mem_is_init injective_def) lemma [intro]: "dp_consistency_heap_array_pair_iterator (n + 1) fst snd id m (m + 1) mem (\ (x, y). if y > 0 then (x, y - 1) else (x - 1, n)) (\ (x, y). (m - x) * (n + 1) + (n - y)) (\ (x, y). x \ m \ y \ n \ x > 0) " by (standard; simp add: mem_is_init injective_def) memoize_fun min_ed_ix\<^sub>h: min_ed_ix with_memory (default_proof) dp_consistency_heap_array_pair_iterator where size = "n + 1" and key1="fst :: nat \ nat \ nat" and key2="snd :: nat \ nat \ nat" and k1="m :: nat" and k2="m + 1 :: nat" and to_index = "id :: nat \ nat" and mem = mem and cnt = "\ (x, y). x \ m \ y \ n \ x > 0" and nxt = "\ (x::nat, y). if y > 0 then (x, y - 1) else (x - 1, n)" and sizef = "\ (x, y). (m - x) * (n + 1) + (n - y)" monadifies (heap) min_ed_ix.simps memoize_correct by memoize_prover lemmas memoized_empty = min_ed_ix\<^sub>h.memoized_empty[OF min_ed_ix\<^sub>h.consistent_DP_iter_and_compute[OF min_ed_ix\<^sub>h.crel]] lemmas iter_heap_unfold = iter_heap_unfold end (* Fixed Memory *) end subsubsection \Test Cases\ abbreviation "slice xs i j \ map xs [i..Functional Test Cases\ definition "min_ed_list xs ys = min_ed_ix (\i. xs!i) (\i. ys!i) (length xs) (length ys) (0,0)" lemma "min_ed_list ''madagascar'' ''bananas'' = 6" by eval definition "min_ed_ia xs ys = (let a = IArray xs; b = IArray ys in min_ed_ix (\i. a!!i) (\i. b!!i) (length xs) (length ys) (0,0))" lemma "min_ed_ia ''madagascar'' ''bananas'' = 6" by eval text \Extracting an Executable Constant for the Imperative Implementation\ ground_function min_ed_ix\<^sub>h'_impl: min_ed_ix\<^sub>h'.simps termination by(relation "measure(\(xs, ys, m, n, mem, i, j). (m - i) + (n - j))") auto lemmas [simp del] = min_ed_ix\<^sub>h'_impl.simps min_ed_ix\<^sub>h'.simps lemma min_ed_ix\<^sub>h'_impl_def: includes heap_monad_syntax fixes m n :: nat fixes mem :: "nat ref \ nat ref \ nat option array ref \ nat option array ref" assumes mem_is_init: "mem = result_of (init_state (n + 1) m (m + 1)) Heap.empty" shows "min_ed_ix\<^sub>h'_impl xs ys m n mem = min_ed_ix\<^sub>h' xs ys m n mem" proof - have "min_ed_ix\<^sub>h'_impl xs ys m n mem (i, j) = min_ed_ix\<^sub>h' xs ys m n mem (i, j)" for i j apply (induction rule: min_ed_ix\<^sub>h'.induct[OF mem_is_init]) apply (subst min_ed_ix\<^sub>h'_impl.simps) apply (subst min_ed_ix\<^sub>h'.simps[OF mem_is_init]) apply (solve_cong simp) done then show ?thesis by auto qed definition "iter_min_ed_ix xs ys m n mem = iterator_defs.iter_heap (\ (x, y). x \ m \ y \ n \ x > 0) (\ (x, y). if y > 0 then (x, y - 1) else (x - 1, n)) (min_ed_ix\<^sub>h'_impl xs ys m n mem) " lemma iter_min_ed_ix_unfold[code]: "iter_min_ed_ix xs ys m n mem = (\ (i, j). (if i > 0 \ i \ m \ j \ n then do { min_ed_ix\<^sub>h'_impl xs ys m n mem (i, j); iter_min_ed_ix xs ys m n mem (if j > 0 then (i, j - 1) else (i - 1, n)) } else Heap_Monad.return ()))" unfolding iter_min_ed_ix_def by (rule ext) (safe, simp add: iter_heap_unfold) definition "min_ed_ix_impl xs ys m n i j = do { mem \ (init_state (n + 1) (m::nat) (m + 1) :: (nat ref \ nat ref \ nat option array ref \ nat option array ref) Heap); iter_min_ed_ix xs ys m n mem (m, n); min_ed_ix\<^sub>h'_impl xs ys m n mem (i, j) }" lemma bf_impl_correct: "min_ed_ix xs ys m n (i, j) = result_of (min_ed_ix_impl xs ys m n i j) Heap.empty" using memoized_empty[OF HOL.refl, of xs ys m n "(i, j)" "\ _. (m, n)"] by (simp add: execute_bind_success[OF succes_init_state] min_ed_ix_impl_def min_ed_ix\<^sub>h'_impl_def iter_min_ed_ix_def ) text \Imperative Test Case\ definition "min_ed_ia\<^sub>h xs ys = (let a = IArray xs; b = IArray ys in min_ed_ix_impl (\i. a!!i) (\i. b!!i) (length xs) (length ys) 0 0)" definition "test_case = min_ed_ia\<^sub>h ''madagascar'' ''bananas''" export_code min_ed_ix in SML module_name Test code_reflect Test functions test_case text \One can see a trace of the calls to the memory in the output\ ML \Test.test_case ()\ end diff --git a/thys/Monad_Memo_DP/heap_monad/Memory_Heap.thy b/thys/Monad_Memo_DP/heap_monad/Memory_Heap.thy --- a/thys/Monad_Memo_DP/heap_monad/Memory_Heap.thy +++ b/thys/Monad_Memo_DP/heap_monad/Memory_Heap.thy @@ -1,924 +1,924 @@ subsection \Heap Memory Implementations\ theory Memory_Heap imports State_Heap DP_CRelVH Pair_Memory "HOL-Eisbach.Eisbach" "../Index" begin text \Move\ abbreviation "result_of c h \ fst (the (execute c h))" abbreviation "heap_of c h \ snd (the (execute c h))" lemma map_emptyI: "m \\<^sub>m Map.empty" if "\ x. m x = None" using that unfolding map_le_def by auto lemma result_of_return[simp]: "result_of (Heap_Monad.return x) h = x" by (simp add: execute_simps) lemma get_result_of_lookup: "result_of (!r) heap = x" if "Ref.get heap r = x" using that by (auto simp: execute_simps) context fixes size :: nat and to_index :: "('k2 :: heap) \ nat" begin definition "mem_empty = (Array.new size (None :: ('v :: heap) option))" lemma success_empty[intro]: "success mem_empty heap" unfolding mem_empty_def by (auto intro: success_intros) lemma length_mem_empty: "Array.length (heap_of (mem_empty:: (('b :: heap) option array) Heap) h) (result_of (mem_empty :: ('b option array) Heap) h) = size" unfolding mem_empty_def by (auto simp: execute_simps Array.length_alloc) lemma nth_mem_empty: "result_of (Array.nth (result_of (mem_empty :: ('b option array) Heap) h) i) (heap_of (mem_empty :: (('b :: heap) option array) Heap) h) = None" if "i < size" apply (subst execute_nth(1)) apply (simp add: length_mem_empty that) apply (simp add: execute_simps mem_empty_def Array.get_alloc that) done context fixes mem :: "('v :: heap) option array" begin definition "mem_lookup k = (let i = to_index k in if i < size then Array.nth mem i else return None )" definition "mem_update k v = (let i = to_index k in if i < size then (Array.upd i (Some v) mem \ (\ _. return ())) else return () ) " context assumes injective: "injective size to_index" begin interpretation heap_correct "\heap. Array.length heap mem = size" mem_update mem_lookup apply standard subgoal lookup_inv unfolding State_Heap.lift_p_def mem_lookup_def by (simp add: Let_def execute_simps) subgoal update_inv unfolding State_Heap.lift_p_def mem_update_def by (simp add: Let_def execute_simps) subgoal for k heap unfolding heap_mem_defs.map_of_heap_def map_le_def mem_lookup_def by (auto simp: execute_simps Let_def split: if_split_asm) subgoal for heap k unfolding heap_mem_defs.map_of_heap_def map_le_def mem_lookup_def mem_update_def apply (auto simp: execute_simps Let_def length_def split: if_split_asm) apply (subst (asm) nth_list_update_neq) using injective[unfolded injective_def] apply auto done done lemmas mem_heap_correct = heap_correct_axioms context assumes [simp]: "mem = result_of mem_empty Heap.empty" begin interpretation heap_correct_empty "\heap. Array.length heap mem = size" mem_update mem_lookup "heap_of (mem_empty :: 'v option array Heap) Heap.empty" apply standard subgoal apply (rule map_emptyI) unfolding map_of_heap_def mem_lookup_def by (auto simp: Let_def nth_mem_empty) subgoal by (simp add: length_mem_empty) done lemmas array_heap_emptyI = heap_correct_empty_axioms context fixes dp :: "'k2 \ 'v" begin interpretation dp_consistency_heap_empty "\heap. Array.length heap mem = size" mem_update mem_lookup dp "heap_of (mem_empty :: 'v option array Heap) Heap.empty" by standard lemmas array_consistentI = dp_consistency_heap_empty_axioms end end (* Empty Memory *) end (* Injectivity *) end (* Fixed array *) lemma execute_bind_success': assumes "success f h" "execute (f \ g) h = Some (y, h'')" obtains x h' where "execute f h = Some (x, h')" "execute (g x) h' = Some (y, h'')" using assms by (auto simp: execute_simps elim: successE) lemma success_bind_I: assumes "success f h" and "\ x h'. execute f h = Some (x, h') \ success (g x) h'" shows "success (f \ g) h" by (rule successE[OF assms(1)]) (auto elim: assms(2) intro: success_bind_executeI) definition "alloc_pair a b \ do { r1 \ ref a; r2 \ ref b; return (r1, r2) }" lemma alloc_pair_alloc: "Ref.get heap' r1 = a" "Ref.get heap' r2 = b" if "execute (alloc_pair a b) heap = Some ((r1, r2), heap')" using that unfolding alloc_pair_def by (auto simp: execute_simps elim!: execute_bind_success'[OF success_refI]) (metis Ref.get_alloc fst_conv get_alloc_neq next_present present_alloc_neq snd_conv)+ lemma alloc_pairD1: "r =!= r1 \ r =!= r2 \ Ref.present heap' r" if "execute (alloc_pair a b) heap = Some ((r1, r2), heap')" "Ref.present heap r" using that unfolding alloc_pair_def by (auto simp: execute_simps elim!: execute_bind_success'[OF success_refI]) (metis next_fresh noteq_I Ref.present_alloc snd_conv)+ lemma alloc_pairD2: "r1 =!= r2 \ Ref.present heap' r2 \ Ref.present heap' r1" if "execute (alloc_pair a b) heap = Some ((r1, r2), heap')" using that unfolding alloc_pair_def by (auto simp: execute_simps elim!: execute_bind_success'[OF success_refI]) (metis next_fresh next_present noteq_I Ref.present_alloc snd_conv)+ lemma alloc_pairD3: "Array.present heap' r" if "execute (alloc_pair a b) heap = Some ((r1, r2), heap')" "Array.present heap r" using that unfolding alloc_pair_def by (auto simp: execute_simps elim!: execute_bind_success'[OF success_refI]) (metis array_present_alloc snd_conv) lemma alloc_pairD4: "Ref.get heap' r = x" if "execute (alloc_pair a b) heap = Some ((r1, r2), heap')" "Ref.get heap r = x" "Ref.present heap r" using that unfolding alloc_pair_def by (auto simp: execute_simps elim!: execute_bind_success'[OF success_refI]) (metis Ref.not_present_alloc Ref.present_alloc get_alloc_neq noteq_I snd_conv) lemma alloc_pair_array_get: "Array.get heap' r = x" if "execute (alloc_pair a b) heap = Some ((r1, r2), heap')" "Array.get heap r = x" using that unfolding alloc_pair_def by (auto simp: execute_simps elim!: execute_bind_success'[OF success_refI]) (metis array_get_alloc snd_conv) lemma alloc_pair_array_length: "Array.length heap' r = Array.length heap r" if "execute (alloc_pair a b) heap = Some ((r1, r2), heap')" using that unfolding alloc_pair_def by (auto simp: execute_simps elim!: execute_bind_success'[OF success_refI]) (metis Ref.length_alloc snd_conv) lemma alloc_pair_nth: "result_of (Array.nth r i) heap' = result_of (Array.nth r i) heap" if "execute (alloc_pair a b) heap = Some ((r1, r2), heap')" using alloc_pair_array_get[OF that(1) HOL.refl, of r] alloc_pair_array_length[OF that(1), of r] by (cases "(\h. i < Array.length h r) heap"; simp add: execute_simps Array.nth_def) lemma succes_alloc_pair[intro]: "success (alloc_pair a b) heap" unfolding alloc_pair_def by (auto intro: success_intros success_bind_I) definition "init_state_inner k1 k2 m1 m2 \ do { (k_ref1, k_ref2) \ alloc_pair k1 k2; (m_ref1, m_ref2) \ alloc_pair m1 m2; return (k_ref1, k_ref2, m_ref1, m_ref2) } " lemma init_state_inner_alloc: assumes "execute (init_state_inner k1 k2 m1 m2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" shows "Ref.get heap' k_ref1 = k1" "Ref.get heap' k_ref2 = k2" "Ref.get heap' m_ref1 = m1" "Ref.get heap' m_ref2 = m2" using assms unfolding init_state_inner_def by (auto simp: execute_simps elim!: execute_bind_success'[OF succes_alloc_pair]) (auto intro: alloc_pair_alloc dest: alloc_pairD2 elim: alloc_pairD4) lemma init_state_inner_distinct: assumes "execute (init_state_inner k1 k2 m1 m2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" shows "m_ref1 =!= m_ref2 \ m_ref1 =!= k_ref1 \ m_ref1 =!= k_ref2 \ m_ref2 =!= k_ref1 \ m_ref2 =!= k_ref2 \ k_ref1 =!= k_ref2" using assms unfolding init_state_inner_def by (auto simp: execute_simps elim!: execute_bind_success'[OF succes_alloc_pair]) (blast dest: alloc_pairD1 alloc_pairD2 intro: noteq_sym)+ lemma init_state_inner_present: assumes "execute (init_state_inner k1 k2 m1 m2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" shows "Ref.present heap' k_ref1" "Ref.present heap' k_ref2" "Ref.present heap' m_ref1" "Ref.present heap' m_ref2" using assms unfolding init_state_inner_def by (auto simp: execute_simps elim!: execute_bind_success'[OF succes_alloc_pair]) (blast dest: alloc_pairD1 alloc_pairD2)+ lemma inite_state_inner_present': assumes "execute (init_state_inner k1 k2 m1 m2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" "Array.present heap a" shows "Array.present heap' a" using assms unfolding init_state_inner_def by (auto simp: execute_simps elim!: execute_bind_success'[OF succes_alloc_pair] alloc_pairD3) lemma succes_init_state_inner[intro]: "success (init_state_inner k1 k2 m1 m2) heap" unfolding init_state_inner_def by (auto 4 3 intro: success_intros success_bind_I) lemma init_state_inner_nth: "result_of (Array.nth r i) heap' = result_of (Array.nth r i) heap" if "execute (init_state_inner k1 k2 m1 m2) heap = Some ((r1, r2), heap')" using that unfolding init_state_inner_def by (auto simp: execute_simps alloc_pair_nth elim!: execute_bind_success'[OF succes_alloc_pair]) definition "init_state k1 k2 \ do { m1 \ mem_empty; m2 \ mem_empty; init_state_inner k1 k2 m1 m2 }" lemma succes_init_state[intro]: "success (init_state k1 k2) heap" unfolding init_state_def by (auto intro: success_intros success_bind_I) definition "inv_distinct k_ref1 k_ref2 m_ref1 m_ref2 \ m_ref1 =!= m_ref2 \ m_ref1 =!= k_ref1 \ m_ref1 =!= k_ref2 \ m_ref2 =!= k_ref1 \ m_ref2 =!= k_ref2 \ k_ref1 =!= k_ref2 " lemma init_state_distinct: assumes "execute (init_state k1 k2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" shows "inv_distinct k_ref1 k_ref2 m_ref1 m_ref2" using assms unfolding init_state_def inv_distinct_def by (elim execute_bind_success'[OF success_empty] init_state_inner_distinct) lemma init_state_present: assumes "execute (init_state k1 k2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" shows "Ref.present heap' k_ref1" "Ref.present heap' k_ref2" "Ref.present heap' m_ref1" "Ref.present heap' m_ref2" using assms unfolding init_state_def by (auto simp: execute_simps elim!: execute_bind_success'[OF success_empty] dest: init_state_inner_present ) lemma empty_present: "Array.present h' x" if "execute mem_empty heap = Some (x, h')" using that unfolding mem_empty_def by (auto simp: execute_simps) (metis Array.present_alloc fst_conv snd_conv) lemma empty_present': "Array.present h' a" if "execute mem_empty heap = Some (x, h')" "Array.present heap a" using that unfolding mem_empty_def by (auto simp: execute_simps Array.present_def Array.alloc_def Array.set_def Let_def) lemma init_state_present2: assumes "execute (init_state k1 k2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" shows "Array.present heap' (Ref.get heap' m_ref1)" "Array.present heap' (Ref.get heap' m_ref2)" using assms unfolding init_state_def by (auto 4 3 simp: execute_simps init_state_inner_alloc elim!: execute_bind_success'[OF success_empty] dest: inite_state_inner_present' empty_present empty_present' ) lemma init_state_neq: assumes "execute (init_state k1 k2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" shows "Ref.get heap' m_ref1 =!!= Ref.get heap' m_ref2" using assms unfolding init_state_def by (auto 4 3 simp: execute_simps init_state_inner_alloc elim!: execute_bind_success'[OF success_empty] dest: inite_state_inner_present' empty_present empty_present' ) (metis empty_present execute_new fst_conv mem_empty_def option.inject present_alloc_noteq) lemma present_alloc_get: "Array.get heap' a = Array.get heap a" if "Array.alloc xs heap = (a', heap')" "Array.present heap a" using that by (auto simp: Array.alloc_def Array.present_def Array.get_def Let_def Array.set_def) lemma init_state_length: assumes "execute (init_state k1 k2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" shows "Array.length heap' (Ref.get heap' m_ref1) = size" "Array.length heap' (Ref.get heap' m_ref2) = size" using assms unfolding init_state_def apply (auto simp: execute_simps init_state_inner_alloc elim!: execute_bind_success'[OF success_empty] dest: inite_state_inner_present' empty_present empty_present' ) apply (auto simp: execute_simps init_state_inner_def alloc_pair_def mem_empty_def Array.length_def elim!: execute_bind_success'[OF success_refI] ) apply (metis Array.alloc_def Array.get_set_eq Array.present_alloc array_get_alloc fst_conv length_replicate present_alloc_get snd_conv )+ done context fixes key1 :: "'k \ ('k1 :: heap)" and key2 :: "'k \ 'k2" and m_ref1 m_ref2 :: "('v :: heap) option array ref" and k_ref1 k_ref2 :: "('k1 :: heap) ref" begin text \We assume that look-ups happen on the older row, so this is biased towards the second entry.\ definition "lookup_pair k = do { let k' = key1 k; k2 \ !k_ref2; if k' = k2 then do { m2 \ !m_ref2; mem_lookup m2 (key2 k) } else do { k1 \ !k_ref1; if k' = k1 then do { m1 \ !m_ref1; mem_lookup m1 (key2 k) } else return None } } " text \We assume that updates happen on the newer row, so this is biased towards the first entry.\ definition "update_pair k v = do { let k' = key1 k; k1 \ !k_ref1; if k' = k1 then do { m \ !m_ref1; mem_update m (key2 k) v } else do { k2 \ !k_ref2; if k' = k2 then do { m \ !m_ref2; mem_update m (key2 k) v } else do { do { k1 \ !k_ref1; m \ mem_empty; m1 \ !m_ref1; k_ref2 := k1; k_ref1 := k'; m_ref2 := m1; m_ref1 := m } ; m \ !m_ref1; mem_update m (key2 k) v } } } " definition "inv_pair_weak heap = ( let m1 = Ref.get heap m_ref1; m2 = Ref.get heap m_ref2 in Array.length heap m1 = size \ Array.length heap m2 = size \ Ref.present heap k_ref1 \ Ref.present heap k_ref2 \ Ref.present heap m_ref1 \ Ref.present heap m_ref2 \ Array.present heap m1 \ Array.present heap m2 \ m1 =!!= m2 )" (* TODO: Remove? *) definition "inv_pair heap \ inv_pair_weak heap \ inv_distinct k_ref1 k_ref2 m_ref1 m_ref2" lemma init_state_inv: assumes "execute (init_state k1 k2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" shows "inv_pair_weak heap'" using assms unfolding inv_pair_weak_def Let_def by (auto intro: init_state_present init_state_present2 init_state_neq init_state_length init_state_distinct ) lemma inv_pair_lengthD1: "Array.length heap (Ref.get heap m_ref1) = size" if "inv_pair_weak heap" using that unfolding inv_pair_weak_def by (auto simp: Let_def) lemma inv_pair_lengthD2: "Array.length heap (Ref.get heap m_ref2) = size" if "inv_pair_weak heap" using that unfolding inv_pair_weak_def by (auto simp: Let_def) lemma inv_pair_presentD: "Array.present heap (Ref.get heap m_ref1)" "Array.present heap (Ref.get heap m_ref2)" if "inv_pair_weak heap" using that unfolding inv_pair_weak_def by (auto simp: Let_def) lemma inv_pair_presentD2: "Ref.present heap m_ref1" "Ref.present heap m_ref2" "Ref.present heap k_ref1" "Ref.present heap k_ref2" if "inv_pair_weak heap" using that unfolding inv_pair_weak_def by (auto simp: Let_def) lemma inv_pair_not_eqD: "Ref.get heap m_ref1 =!!= Ref.get heap m_ref2" if "inv_pair_weak heap" using that unfolding inv_pair_weak_def by (auto simp: Let_def) definition "lookup1 k \ state_of (do {m \ !m_ref1; mem_lookup m k})" definition "lookup2 k \ state_of (do {m \ !m_ref2; mem_lookup m k})" definition "update1 k v \ state_of (do {m \ !m_ref1; mem_update m k v})" definition "update2 k v \ state_of (do {m \ !m_ref2; mem_update m k v})" definition "move12 k \ state_of (do { k1 \ !k_ref1; m \ mem_empty; m1 \ !m_ref1; k_ref2 := k1; k_ref1 := k; m_ref2 := m1; m_ref1 := m }) " definition "get_k1 \ state_of (!k_ref1)" definition "get_k2 \ state_of (!k_ref2)" lemma run_state_state_of[simp]: "State_Monad.run_state (state_of p) m = the (execute p m)" unfolding state_of_def by simp context assumes injective: "injective size to_index" begin context assumes inv_distinct: "inv_distinct k_ref1 k_ref2 m_ref1 m_ref2" begin lemma disjoint[simp]: "m_ref1 =!= m_ref2" "m_ref1 =!= k_ref1" "m_ref1 =!= k_ref2" "m_ref2 =!= k_ref1" "m_ref2 =!= k_ref2" "k_ref1 =!= k_ref2" using inv_distinct unfolding inv_distinct_def by auto lemmas [simp] = disjoint[THEN noteq_sym] lemma [simp]: "Array.get (snd (Array.alloc xs heap)) a = Array.get heap a" if "Array.present heap a" using that unfolding Array.alloc_def Array.present_def apply (simp add: Let_def) apply (subst Array.get_set_neq) subgoal by (simp add: Array.noteq_def) subgoal unfolding Array.get_def by simp done lemma [simp]: "Ref.get (snd (Array.alloc xs heap)) r = Ref.get heap r" if "Ref.present heap r" using that unfolding Array.alloc_def Ref.present_def by (simp add: Let_def Ref.get_def Array.set_def) lemma alloc_present: "Array.present (snd (Array.alloc xs heap)) a" if "Array.present heap a" using that unfolding Array.present_def Array.alloc_def by (simp add: Let_def Array.set_def) lemma alloc_present': "Ref.present (snd (Array.alloc xs heap)) r" if "Ref.present heap r" using that unfolding Ref.present_def Array.alloc_def by (simp add: Let_def Array.set_def) lemma length_get_upd[simp]: "length (Array.get (Array.update a i x heap) r) = length (Array.get heap r)" unfolding Array.get_def Array.update_def Array.set_def by simp method solve1 = (frule inv_pair_lengthD1, frule inv_pair_lengthD2, frule inv_pair_not_eqD)?, auto split: if_split_asm dest: Array.noteq_sym interpretation pair: pair_mem lookup1 lookup2 update1 update2 move12 get_k1 get_k2 inv_pair_weak supply [simp] = mem_empty_def state_mem_defs.map_of_def map_le_def move12_def update1_def update2_def lookup1_def lookup2_def get_k1_def get_k2_def mem_update_def mem_lookup_def execute_bind_success[OF success_newI] execute_simps Let_def Array.get_alloc length_def inv_pair_presentD inv_pair_presentD2 Memory_Heap.lookup1_def Memory_Heap.lookup2_def Memory_Heap.mem_lookup_def apply standard apply (solve1; fail)+ subgoal apply (rule lift_pI) unfolding inv_pair_weak_def apply (auto simp: intro: alloc_present alloc_present' elim: present_alloc_noteq[THEN Array.noteq_sym] ) done apply (rule lift_pI, unfold inv_pair_weak_def, auto split: if_split_asm; fail)+ apply (solve1; fail)+ subgoal using injective[unfolded injective_def] by - (solve1, subst (asm) nth_list_update_neq, auto) subgoal using injective[unfolded injective_def] by - (solve1, subst (asm) nth_list_update_neq, auto) apply (solve1; fail)+ done lemmas mem_correct_pair = pair.mem_correct_pair definition "mem_lookup1 k = do {m \ !m_ref1; mem_lookup m k}" definition "mem_lookup2 k = do {m \ !m_ref2; mem_lookup m k}" definition "get_k1' \ !k_ref1" definition "get_k2' \ !k_ref2" definition "update1' k v \ do {m \ !m_ref1; mem_update m k v}" definition "update2' k v \ do {m \ !m_ref2; mem_update m k v}" definition "move12' k \ do { k1 \ !k_ref1; m \ mem_empty; m1 \ !m_ref1; k_ref2 := k1; k_ref1 := k; m_ref2 := m1; m_ref1 := m }" interpretation heap_mem_defs inv_pair_weak lookup_pair update_pair . lemma rel_state_ofI: "rel_state (=) (state_of m) m" if "\ heap. inv_pair_weak heap \ success m heap" "lift_p inv_pair_weak m" using that unfolding rel_state_def by (auto split: option.split intro: lift_p_P'' simp: success_def) lemma inv_pair_iff: "inv_pair_weak = inv_pair" unfolding inv_pair_def using inv_distinct by simp lemma lift_p_inv_pairI: "State_Heap.lift_p inv_pair m" if "State_Heap.lift_p inv_pair_weak m" using that unfolding inv_pair_iff by simp lemma lift_p_success: "State_Heap.lift_p inv_pair_weak m" if "DP_CRelVS.lift_p inv_pair_weak (state_of m)" "\ heap. inv_pair_weak heap \ success m heap" using that unfolding lift_p_def DP_CRelVS.lift_p_def by (auto simp: success_def split: option.split) lemma rel_state_ofI2: "rel_state (=) (state_of m) m" if "\ heap. inv_pair_weak heap \ success m heap" "DP_CRelVS.lift_p inv_pair_weak (state_of m)" using that by (blast intro: rel_state_ofI lift_p_success) context includes lifting_syntax begin lemma [transfer_rule]: "((=) ===> rel_state (=)) move12 move12'" unfolding move12_def move12'_def apply (intro rel_funI) apply simp apply (rule rel_state_ofI2) subgoal by (auto simp: mem_empty_def inv_pair_lengthD1 execute_simps Let_def intro: success_intros intro!: success_bind_I ) subgoal using pair.move12_inv unfolding move12_def . done lemma [transfer_rule]: "((=) ===> rel_state (rel_option (=))) lookup1 mem_lookup1" unfolding lookup1_def mem_lookup1_def apply (intro rel_funI) apply (simp add: option.rel_eq) apply (rule rel_state_ofI2) subgoal by (auto 4 4 simp: mem_lookup_def inv_pair_lengthD1 execute_simps Let_def intro: success_bind_executeI success_returnI Array.success_nthI ) subgoal using pair.lookup_inv(1) unfolding lookup1_def . done lemma [transfer_rule]: "((=) ===> rel_state (rel_option (=))) lookup2 mem_lookup2" unfolding lookup2_def mem_lookup2_def apply (intro rel_funI) apply (simp add: option.rel_eq) apply (rule rel_state_ofI2) subgoal by (auto 4 3 simp: mem_lookup_def inv_pair_lengthD2 execute_simps Let_def intro: success_intros intro!: success_bind_I ) subgoal using pair.lookup_inv(2) unfolding lookup2_def . done lemma [transfer_rule]: "rel_state (=) get_k1 get_k1'" unfolding get_k1_def get_k1'_def apply (rule rel_state_ofI2) subgoal by (auto intro: success_lookupI) subgoal unfolding get_k1_def[symmetric] by (auto dest: pair.get_state(1) intro: lift_pI) done lemma [transfer_rule]: "rel_state (=) get_k2 get_k2'" unfolding get_k2_def get_k2'_def apply (rule rel_state_ofI2) subgoal by (auto intro: success_lookupI) subgoal unfolding get_k2_def[symmetric] by (auto dest: pair.get_state(2) intro: lift_pI) done lemma [transfer_rule]: "((=) ===> (=) ===> rel_state (=)) update1 update1'" unfolding update1_def update1'_def apply (intro rel_funI) apply simp apply (rule rel_state_ofI2) subgoal by (auto 4 3 simp: mem_update_def inv_pair_lengthD1 execute_simps Let_def intro: success_intros intro!: success_bind_I ) subgoal using pair.update_inv(1) unfolding update1_def . done lemma [transfer_rule]: "((=) ===> (=) ===> rel_state (=)) update2 update2'" unfolding update2_def update2'_def apply (intro rel_funI) apply simp apply (rule rel_state_ofI2) subgoal by (auto 4 3 simp: mem_update_def inv_pair_lengthD2 execute_simps Let_def intro: success_intros intro!: success_bind_I ) subgoal using pair.update_inv(2) unfolding update2_def . done lemma [transfer_rule]: "((=) ===> rel_state (rel_option (=))) lookup1 mem_lookup1" unfolding lookup1_def mem_lookup1_def apply (intro rel_funI) apply (simp add: option.rel_eq) apply (rule rel_state_ofI2) subgoal by (auto 4 3 simp: mem_lookup_def inv_pair_lengthD1 execute_simps Let_def intro: success_intros intro!: success_bind_I ) subgoal using pair.lookup_inv(1) unfolding lookup1_def . done lemma rel_state_lookup: "((=) ===> rel_state (=)) pair.lookup_pair lookup_pair" unfolding pair.lookup_pair_def lookup_pair_def unfolding mem_lookup1_def[symmetric] mem_lookup2_def[symmetric] get_k2_def[symmetric] get_k2'_def[symmetric] get_k1_def[symmetric] get_k1'_def[symmetric] by transfer_prover lemma rel_state_update: "((=) ===> (=) ===> rel_state (=)) pair.update_pair update_pair" unfolding pair.update_pair_def update_pair_def unfolding move12'_def[symmetric] unfolding update1'_def[symmetric] update2'_def[symmetric] get_k2_def[symmetric] get_k2'_def[symmetric] get_k1_def[symmetric] get_k1'_def[symmetric] by transfer_prover interpretation mem: heap_mem_defs pair.inv_pair lookup_pair update_pair . lemma inv_pairD: "inv_pair_weak heap" if "pair.inv_pair heap" using that unfolding pair.inv_pair_def by (auto simp: Let_def) lemma mem_rel_state_ofI: "mem.rel_state (=) m' m" if "rel_state (=) m' m" "\ heap. pair.inv_pair heap \ (case State_Monad.run_state m' heap of (_, heap) \ inv_pair_weak heap \ pair.inv_pair heap)" proof - show ?thesis apply (rule mem.rel_state_intro) subgoal for heap v heap' by (auto elim: rel_state_elim[OF that(1)] dest!: inv_pairD) subgoal premises prems for heap v heap' proof - from prems that(1) have "inv_pair_weak heap'" by (fastforce elim: rel_state_elim dest: inv_pairD) with prems show ?thesis by (auto dest: that(2)) qed done qed lemma mem_rel_state_ofI': "mem.rel_state (=) m' m" if "rel_state (=) m' m" "DP_CRelVS.lift_p pair.inv_pair m'" using that by (auto elim: DP_CRelVS.lift_p_P intro: mem_rel_state_ofI) context assumes keys: "\k k'. key1 k = key1 k' \ key2 k = key2 k' \ k = k'" begin interpretation mem_correct pair.lookup_pair pair.update_pair pair.inv_pair by (rule mem_correct_pair[OF keys]) lemma rel_state_lookup': "((=) ===> mem.rel_state (=)) pair.lookup_pair lookup_pair" apply (intro rel_funI) apply simp apply (rule mem_rel_state_ofI') - subgoal for x y - using rel_state_lookup by (rule rel_funD) (rule HOL.refl) - by (rule lookup_inv) + using rel_state_lookup apply (rule rel_funD) apply (rule refl) + apply (rule lookup_inv) + done lemma rel_state_update': "((=) ===> (=) ===> mem.rel_state (=)) pair.update_pair update_pair" apply (intro rel_funI) apply simp apply (rule mem_rel_state_ofI') subgoal for x y a b using rel_state_update by (blast dest: rel_funD) by (rule update_inv) interpretation heap_correct pair.inv_pair update_pair lookup_pair by (rule mem.mem_correct_heap_correct[OF _ rel_state_lookup' rel_state_update']) standard lemmas heap_correct_pairI = heap_correct_axioms (* TODO: Generalize *) lemma mem_rel_state_resultD: "result_of m heap = fst (run_state m' heap)" if "mem.rel_state (=) m' m" "pair.inv_pair heap" by (metis (mono_tags, lifting) mem.rel_state_elim option.sel that) lemma map_of_heap_eq: "mem.map_of_heap heap = pair.pair.map_of heap" if "pair.inv_pair heap" unfolding mem.map_of_heap_def pair.pair.map_of_def using that by (simp add: mem_rel_state_resultD[OF rel_state_lookup'[THEN rel_funD]]) context fixes k1 k2 heap heap' assumes init: "execute (init_state k1 k2) heap = Some ((k_ref1, k_ref2, m_ref1, m_ref2), heap')" begin lemma init_state_empty1: "pair.mem1.map_of heap' k = None" using init unfolding pair.mem1.map_of_def lookup1_def mem_lookup_def init_state_def by (auto simp: init_state_inner_nth init_state_inner_alloc(3) execute_simps Let_def elim!: execute_bind_success'[OF success_empty]) (metis Array.present_alloc Memory_Heap.length_mem_empty execute_new execute_nth(1) fst_conv length_def mem_empty_def nth_mem_empty option.sel present_alloc_get snd_conv ) lemma init_state_empty2: "pair.mem2.map_of heap' k = None" using init unfolding pair.mem2.map_of_def lookup2_def mem_lookup_def init_state_def by (auto simp: execute_simps init_state_inner_nth init_state_inner_alloc(4) Let_def elim!: execute_bind_success'[OF success_empty] ) (metis fst_conv nth_mem_empty option.sel snd_conv) lemma shows init_state_k1: "result_of (!k_ref1) heap' = k1" and init_state_k2: "result_of (!k_ref2) heap' = k2" using init init_state_inner_alloc by (auto simp: execute_simps init_state_def elim!: execute_bind_success'[OF success_empty]) context assumes neq: "k1 \ k2" begin lemma init_state_inv': "pair.inv_pair heap'" unfolding pair.inv_pair_def apply (auto simp: Let_def) subgoal using init_state_empty1 by simp subgoal using init_state_empty2 by simp subgoal using neq init by (simp add: get_k1_def get_k2_def init_state_k1 init_state_k2) subgoal by (rule init_state_inv[OF init]) done lemma init_state_empty: "pair.pair.map_of heap' \\<^sub>m Map.empty" using neq by (intro pair.emptyI init_state_inv' map_emptyI init_state_empty1 init_state_empty2) interpretation heap_correct_empty pair.inv_pair update_pair lookup_pair heap' apply (rule heap_correct_empty.intro) apply (rule heap_correct_pairI) apply standard subgoal by (subst map_of_heap_eq; intro init_state_inv' init_state_empty) subgoal by (rule init_state_inv') done lemmas heap_correct_empty_pairI = heap_correct_empty_axioms context fixes dp :: "'k \ 'v" begin interpretation dp_consistency_heap_empty pair.inv_pair update_pair lookup_pair dp heap' by standard lemmas consistent_empty_pairI = dp_consistency_heap_empty_axioms end (* DP *) end (* Unequal Keys *) end (* Init State *) end (* Keys injective *) end (* Lifting Syntax *) end (* Disjoint *) end (* Injectivity *) end (* Refs *) end (* Key functions & Size *) end (* Theory *) diff --git a/thys/Monad_Memo_DP/heap_monad/Pair_Memory.thy b/thys/Monad_Memo_DP/heap_monad/Pair_Memory.thy --- a/thys/Monad_Memo_DP/heap_monad/Pair_Memory.thy +++ b/thys/Monad_Memo_DP/heap_monad/Pair_Memory.thy @@ -1,668 +1,670 @@ subsection \Pair Memory\ theory Pair_Memory imports "../state_monad/Memory" begin (* XXX Move *) lemma map_add_mono: "(m1 ++ m2) \\<^sub>m (m1' ++ m2')" if "m1 \\<^sub>m m1'" "m2 \\<^sub>m m2'" "dom m1 \ dom m2' = {}" using that unfolding map_le_def map_add_def dom_def by (auto split: option.splits) lemma map_add_upd2: "f(x \ y) ++ g = (f ++ g)(x \ y)" if "dom f \ dom g = {}" "x \ dom g" apply (subst map_add_comm) defer apply simp apply (subst map_add_comm) using that by auto locale pair_mem_defs = fixes lookup1 lookup2 :: "'a \ ('mem, 'v option) state" and update1 update2 :: "'a \ 'v \ ('mem, unit) state" and move12 :: "'k1 \ ('mem, unit) state" and get_k1 get_k2 :: "('mem, 'k1) state" and P :: "'mem \ bool" fixes key1 :: "'k \ 'k1" and key2 :: "'k \ 'a" begin text \We assume that look-ups happen on the older row, so it is biased towards the second entry.\ definition "lookup_pair k = do { let k' = key1 k; k2 \ get_k2; if k' = k2 then lookup2 (key2 k) else do { k1 \ get_k1; if k' = k1 then lookup1 (key2 k) else State_Monad.return None } } " text \We assume that updates happen on the newer row, so it is biased towards the first entry.\ definition "update_pair k v = do { let k' = key1 k; k1 \ get_k1; if k' = k1 then update1 (key2 k) v else do { k2 \ get_k2; if k' = k2 then update2 (key2 k) v else (move12 k' \ update1 (key2 k) v) } } " sublocale pair: state_mem_defs lookup_pair update_pair . sublocale mem1: state_mem_defs lookup1 update1 . sublocale mem2: state_mem_defs lookup2 update2 . definition "inv_pair heap \ let k1 = fst (State_Monad.run_state get_k1 heap); k2 = fst (State_Monad.run_state get_k2 heap) in (\ k \ dom (mem1.map_of heap). \ k'. key1 k' = k1 \ key2 k' = k) \ (\ k \ dom (mem2.map_of heap). \ k'. key1 k' = k2 \ key2 k' = k) \ k1 \ k2 \ P heap " definition "map_of1 m k = (if key1 k = fst (State_Monad.run_state get_k1 m) then mem1.map_of m (key2 k) else None)" definition "map_of2 m k = (if key1 k = fst (State_Monad.run_state get_k2 m) then mem2.map_of m (key2 k) else None)" end (* Pair Mem Defs *) locale pair_mem = pair_mem_defs + assumes get_state: "State_Monad.run_state get_k1 m = (k, m') \ m' = m" "State_Monad.run_state get_k2 m = (k, m') \ m' = m" assumes move12_correct: "P m \ State_Monad.run_state (move12 k1) m = (x, m') \ mem1.map_of m' \\<^sub>m Map.empty" "P m \ State_Monad.run_state (move12 k1) m = (x, m') \ mem2.map_of m' \\<^sub>m mem1.map_of m" assumes move12_keys: "State_Monad.run_state (move12 k1) m = (x, m') \ fst (State_Monad.run_state get_k1 m') = k1" "State_Monad.run_state (move12 k1) m = (x, m') \ fst (State_Monad.run_state get_k2 m') = fst (State_Monad.run_state get_k1 m)" assumes move12_inv: "lift_p P (move12 k1)" assumes lookup_inv: "lift_p P (lookup1 k')" "lift_p P (lookup2 k')" assumes update_inv: "lift_p P (update1 k' v)" "lift_p P (update2 k' v)" assumes lookup_keys: "P m \ State_Monad.run_state (lookup1 k') m = (v', m') \ fst (State_Monad.run_state get_k1 m') = fst (State_Monad.run_state get_k1 m)" "P m \ State_Monad.run_state (lookup1 k') m = (v', m') \ fst (State_Monad.run_state get_k2 m') = fst (State_Monad.run_state get_k2 m)" "P m \ State_Monad.run_state (lookup2 k') m = (v', m') \ fst (State_Monad.run_state get_k1 m') = fst (State_Monad.run_state get_k1 m)" "P m \ State_Monad.run_state (lookup2 k') m = (v', m') \ fst (State_Monad.run_state get_k2 m') = fst (State_Monad.run_state get_k2 m)" assumes update_keys: "P m \ State_Monad.run_state (update1 k' v) m = (x, m') \ fst (State_Monad.run_state get_k1 m') = fst (State_Monad.run_state get_k1 m)" "P m \ State_Monad.run_state (update1 k' v) m = (x, m') \ fst (State_Monad.run_state get_k2 m') = fst (State_Monad.run_state get_k2 m)" "P m \ State_Monad.run_state (update2 k' v) m = (x, m') \ fst (State_Monad.run_state get_k1 m') = fst (State_Monad.run_state get_k1 m)" "P m \ State_Monad.run_state (update2 k' v) m = (x, m') \ fst (State_Monad.run_state get_k2 m') = fst (State_Monad.run_state get_k2 m)" assumes lookup_correct: "P m \ mem1.map_of (snd (State_Monad.run_state (lookup1 k') m)) \\<^sub>m (mem1.map_of m)" "P m \ mem2.map_of (snd (State_Monad.run_state (lookup1 k') m)) \\<^sub>m (mem2.map_of m)" "P m \ mem1.map_of (snd (State_Monad.run_state (lookup2 k') m)) \\<^sub>m (mem1.map_of m)" "P m \ mem2.map_of (snd (State_Monad.run_state (lookup2 k') m)) \\<^sub>m (mem2.map_of m)" assumes update_correct: "P m \ mem1.map_of (snd (State_Monad.run_state (update1 k' v) m)) \\<^sub>m (mem1.map_of m)(k' \ v)" "P m \ mem2.map_of (snd (State_Monad.run_state (update2 k' v) m)) \\<^sub>m (mem2.map_of m)(k' \ v)" "P m \ mem2.map_of (snd (State_Monad.run_state (update1 k' v) m)) \\<^sub>m mem2.map_of m" "P m \ mem1.map_of (snd (State_Monad.run_state (update2 k' v) m)) \\<^sub>m mem1.map_of m" begin lemma map_of_le_pair: "pair.map_of m \\<^sub>m map_of1 m ++ map_of2 m" if "inv_pair m" using that unfolding pair.map_of_def map_of1_def map_of2_def unfolding lookup_pair_def inv_pair_def map_of_def map_le_def dom_def map_add_def unfolding State_Monad.bind_def by (auto 4 4 simp: mem2.map_of_def mem1.map_of_def Let_def dest: get_state split: prod.split_asm if_split_asm ) lemma pair_le_map_of: "map_of1 m ++ map_of2 m \\<^sub>m pair.map_of m" if "inv_pair m" using that unfolding pair.map_of_def map_of1_def map_of2_def unfolding lookup_pair_def inv_pair_def map_of_def map_le_def dom_def map_add_def unfolding State_Monad.bind_def by (auto simp: mem2.map_of_def mem1.map_of_def State_Monad.run_state_return Let_def dest: get_state split: prod.splits if_split_asm option.split ) lemma map_of_eq_pair: "map_of1 m ++ map_of2 m = pair.map_of m" if "inv_pair m" using that unfolding pair.map_of_def map_of1_def map_of2_def unfolding lookup_pair_def inv_pair_def map_of_def map_le_def dom_def map_add_def unfolding State_Monad.bind_def by (auto 4 4 simp: mem2.map_of_def mem1.map_of_def State_Monad.run_state_return Let_def dest: get_state split: prod.splits option.split ) lemma inv_pair_neq[simp]: False if "inv_pair m" "fst (State_Monad.run_state get_k1 m) = fst (State_Monad.run_state get_k2 m)" using that unfolding inv_pair_def by auto lemma inv_pair_P_D: "P m" if "inv_pair m" using that unfolding inv_pair_def by (auto simp: Let_def) lemma inv_pair_domD[intro]: "dom (map_of1 m) \ dom (map_of2 m) = {}" if "inv_pair m" using that unfolding inv_pair_def map_of1_def map_of2_def by (auto split: if_split_asm) lemma move12_correct1: "map_of1 heap' \\<^sub>m Map.empty" if "State_Monad.run_state (move12 k1) heap = (x, heap')" "P heap" using move12_correct[OF that(2,1)] unfolding map_of1_def by (auto simp: move12_keys map_le_def) lemma move12_correct2: "map_of2 heap' \\<^sub>m map_of1 heap" if "State_Monad.run_state (move12 k1) heap = (x, heap')" "P heap" using move12_correct(2)[OF that(2,1)] that unfolding map_of1_def map_of2_def by (auto simp: move12_keys map_le_def) lemma dom_empty[simp]: "dom (map_of1 heap') = {}" if "State_Monad.run_state (move12 k1) heap = (x, heap')" "P heap" using move12_correct1[OF that] by (auto dest: map_le_implies_dom_le) lemma inv_pair_lookup1: "inv_pair m'" if "State_Monad.run_state (lookup1 k) m = (v, m')" "inv_pair m" using that lookup_inv[of k] inv_pair_P_D[OF \inv_pair m\] unfolding inv_pair_def by (auto 4 4 simp: Let_def lookup_keys dest: lift_p_P lookup_correct[of _ k, THEN map_le_implies_dom_le] ) lemma inv_pair_lookup2: "inv_pair m'" if "State_Monad.run_state (lookup2 k) m = (v, m')" "inv_pair m" using that lookup_inv[of k] inv_pair_P_D[OF \inv_pair m\] unfolding inv_pair_def by (auto 4 4 simp: Let_def lookup_keys dest: lift_p_P lookup_correct[of _ k, THEN map_le_implies_dom_le] ) lemma inv_pair_update1: "inv_pair m'" if "State_Monad.run_state (update1 (key2 k) v) m = (v', m')" "inv_pair m" "fst (State_Monad.run_state get_k1 m) = key1 k" using that update_inv[of "key2 k" v] inv_pair_P_D[OF \inv_pair m\] unfolding inv_pair_def apply (auto simp: Let_def update_keys dest: lift_p_P update_correct[of _ "key2 k" v, THEN map_le_implies_dom_le] ) apply (frule update_correct[of _ "key2 k" v, THEN map_le_implies_dom_le]; auto 13 2; fail) apply (frule update_correct[of _ "key2 k" v, THEN map_le_implies_dom_le]; auto 13 2; fail) done lemma inv_pair_update2: "inv_pair m'" if "State_Monad.run_state (update2 (key2 k) v) m = (v', m')" "inv_pair m" "fst (State_Monad.run_state get_k2 m) = key1 k" using that update_inv[of "key2 k" v] inv_pair_P_D[OF \inv_pair m\] unfolding inv_pair_def apply (auto simp: Let_def update_keys dest: lift_p_P update_correct[of _ "key2 k" v, THEN map_le_implies_dom_le] ) apply (frule update_correct[of _ "key2 k" v, THEN map_le_implies_dom_le]; auto 13 2; fail) apply (frule update_correct[of _ "key2 k" v, THEN map_le_implies_dom_le]; auto 13 2; fail) done lemma inv_pair_move12: "inv_pair m'" if "State_Monad.run_state (move12 k) m = (v', m')" "inv_pair m" "fst (State_Monad.run_state get_k1 m) \ k" using that move12_inv[of "k"] inv_pair_P_D[OF \inv_pair m\] unfolding inv_pair_def apply (auto simp: Let_def move12_keys dest: lift_p_P move12_correct[of _ "k", THEN map_le_implies_dom_le] ) apply (blast dest: move12_correct[of _ "k", THEN map_le_implies_dom_le]) done lemma mem_correct_pair: "mem_correct lookup_pair update_pair inv_pair" if injective: "\ k k'. key1 k = key1 k' \ key2 k = key2 k' \ k = k'" proof (standard, goal_cases) case (1 k) \ \Lookup invariant\ show ?case unfolding lookup_pair_def Let_def by (auto 4 4 intro!: lift_pI dest: get_state inv_pair_lookup1 inv_pair_lookup2 simp: State_Monad.bind_def State_Monad.run_state_return split: if_split_asm prod.split_asm ) next case (2 k v) \ \Update invariant\ show ?case unfolding update_pair_def Let_def apply (auto 4 4 intro!: lift_pI intro: inv_pair_update1 inv_pair_update2 dest: get_state simp: State_Monad.bind_def get_state State_Monad.run_state_return split: if_split_asm prod.split_asm )+ apply (elim inv_pair_update1 inv_pair_move12) apply (((subst get_state, assumption)+)?, auto intro: move12_keys dest: get_state; fail)+ done next case (3 m k) { let ?m = "snd (State_Monad.run_state (lookup2 (key2 k)) m)" have "map_of1 ?m \\<^sub>m map_of1 m" by (smt 3 domIff inv_pair_P_D local.lookup_keys lookup_correct map_le_def map_of1_def surjective_pairing) moreover have "map_of2 ?m \\<^sub>m map_of2 m" by (smt 3 domIff inv_pair_P_D local.lookup_keys lookup_correct map_le_def map_of2_def surjective_pairing) moreover have "dom (map_of1 ?m) \ dom (map_of2 m) = {}" using 3 \map_of1 ?m \\<^sub>m map_of1 m\ inv_pair_domD map_le_implies_dom_le by fastforce moreover have "inv_pair ?m" using 3 inv_pair_lookup2 surjective_pairing by metis ultimately have "pair.map_of ?m \\<^sub>m pair.map_of m" apply (subst map_of_eq_pair[symmetric]) defer apply (subst map_of_eq_pair[symmetric]) by (auto intro: 3 map_add_mono) } moreover { let ?m = "snd (State_Monad.run_state (lookup1 (key2 k)) m)" have "map_of1 ?m \\<^sub>m map_of1 m" by (smt 3 domIff inv_pair_P_D local.lookup_keys lookup_correct map_le_def map_of1_def surjective_pairing) moreover have "map_of2 ?m \\<^sub>m map_of2 m" by (smt 3 domIff inv_pair_P_D local.lookup_keys lookup_correct map_le_def map_of2_def surjective_pairing) moreover have "dom (map_of1 ?m) \ dom (map_of2 m) = {}" using 3 \map_of1 ?m \\<^sub>m map_of1 m\ inv_pair_domD map_le_implies_dom_le by fastforce moreover have "inv_pair ?m" using 3 inv_pair_lookup1 surjective_pairing by metis ultimately have "pair.map_of ?m \\<^sub>m pair.map_of m" apply (subst map_of_eq_pair[symmetric]) defer apply (subst map_of_eq_pair[symmetric]) by (auto intro: 3 map_add_mono) } ultimately show ?case by (auto split:if_split prod.split simp: Let_def lookup_pair_def State_Monad.bind_def State_Monad.run_state_return dest: get_state intro: map_le_refl ) next case prems: (4 m k v) let ?m1 = "snd (State_Monad.run_state (update1 (key2 k) v) m)" let ?m2 = "snd (State_Monad.run_state (update2 (key2 k) v) m)" from prems have disjoint: "dom (map_of1 m) \ dom (map_of2 m) = {}" by (simp add: inv_pair_domD) show ?case apply (auto intro: map_le_refl dest: get_state split: prod.split simp: Let_def update_pair_def State_Monad.bind_def State_Monad.run_state_return ) proof goal_cases case (1 m') then have "m' = m" by (rule get_state) from 1 prems have "map_of1 ?m1 \\<^sub>m map_of1 m(k \ v)" by (smt inv_pair_P_D map_le_def map_of1_def surjective_pairing domIff fst_conv fun_upd_apply injective update_correct update_keys ) moreover from prems have "map_of2 ?m1 \\<^sub>m map_of2 m" by (smt domIff inv_pair_P_D update_correct update_keys map_le_def map_of2_def surjective_pairing) moreover from prems have "dom (map_of1 ?m1) \ dom (map_of2 m) = {}" by (smt inv_pair_P_D[OF \inv_pair m\] domIff Int_emptyI eq_snd_iff inv_pair_neq map_of1_def map_of2_def update_keys(1) ) moreover from 1 prems have "k \ dom (map_of2 m)" using inv_pair_neq map_of2_def by fastforce moreover from 1 prems have "inv_pair ?m1" using inv_pair_update1 fst_conv surjective_pairing by metis ultimately show "pair.map_of (snd (State_Monad.run_state (update1 (key2 k) v) m')) \\<^sub>m pair.map_of m(k \ v)" unfolding \m' = m\ using disjoint apply (subst map_of_eq_pair[symmetric]) defer apply (subst map_of_eq_pair[symmetric], rule prems) apply (subst map_add_upd2[symmetric]) by (auto intro: map_add_mono) next case (2 k1 m' m'') then have "m' = m" "m'' = m" by (auto dest: get_state) from 2 prems have "map_of2 ?m2 \\<^sub>m map_of2 m(k \ v)" unfolding \m' = m\ \m'' = m\ by (smt inv_pair_P_D map_le_def map_of2_def surjective_pairing domIff fst_conv fun_upd_apply injective update_correct update_keys ) moreover from prems have "map_of1 ?m2 \\<^sub>m map_of1 m" by (smt domIff inv_pair_P_D update_correct update_keys map_le_def map_of1_def surjective_pairing) moreover from 2 have "dom (map_of1 ?m2) \ dom (map_of2 m(k \ v)) = {}" unfolding \m' = m\ by (smt domIff \map_of1 ?m2 \\<^sub>m map_of1 m\ disjoint_iff_not_equal fst_conv fun_upd_apply map_le_def map_of1_def map_of2_def ) moreover from 2 prems have "inv_pair ?m2" unfolding \m' = m\ using inv_pair_update2 fst_conv surjective_pairing by metis ultimately show "pair.map_of (snd (State_Monad.run_state (update2 (key2 k) v) m'')) \\<^sub>m pair.map_of m(k \ v)" unfolding \m' = m\ \m'' = m\ apply (subst map_of_eq_pair[symmetric]) defer apply (subst map_of_eq_pair[symmetric], rule prems) apply (subst map_add_upd[symmetric]) by (rule map_add_mono) next case (3 k1 m1 k2 m2 m3) then have "m1 = m" "m2 = m" by (auto dest: get_state) let ?m3 = "snd (State_Monad.run_state (update1 (key2 k) v) m3)" from 3 prems have "map_of1 ?m3 \\<^sub>m map_of2 m(k \ v)" unfolding \m2 = m\ by (smt inv_pair_P_D map_le_def map_of1_def surjective_pairing domIff fst_conv fun_upd_apply injective inv_pair_move12 move12_correct move12_keys update_correct update_keys ) moreover have "map_of2 ?m3 \\<^sub>m map_of1 m" proof - from prems 3 have "P m" "P m3" unfolding \m1 = m\ \m2 = m\ using inv_pair_P_D[OF prems] by (auto elim: lift_p_P[OF move12_inv]) from 3(3)[unfolded \m2 = m\] have "mem2.map_of ?m3 \\<^sub>m mem1.map_of m" by - (erule map_le_trans[OF update_correct(3)[OF \P m3\] move12_correct(2)[OF \P m\]]) with 3 prems show ?thesis unfolding \m1 = m\ \m2 = m\ map_le_def map_of2_def apply auto apply (frule move12_keys(2), simp) by (metis domI inv_pair_def map_of1_def surjective_pairing inv_pair_move12 move12_keys(2) update_keys(2) ) qed moreover from prems 3 have "dom (map_of1 ?m3) \ dom (map_of1 m) = {}" unfolding \m1 = m\ \m2 = m\ by (smt inv_pair_P_D disjoint_iff_not_equal map_of1_def surjective_pairing domIff fst_conv inv_pair_move12 move12_keys update_keys ) moreover from 3 have "k \ dom (map_of1 m)" by (simp add: domIff map_of1_def) moreover from 3 prems have "inv_pair ?m3" unfolding \m2 = m\ by (metis inv_pair_move12 inv_pair_update1 move12_keys(1) fst_conv surjective_pairing) ultimately show ?case unfolding \m1 = m\ \m2 = m\ using disjoint apply (subst map_of_eq_pair[symmetric]) defer apply (subst map_of_eq_pair[symmetric]) apply (rule prems) apply (subst (2) map_add_comm) defer apply (subst map_add_upd2[symmetric]) apply (auto intro: map_add_mono) done qed qed lemma emptyI: assumes "inv_pair m" "mem1.map_of m \\<^sub>m Map.empty" "mem2.map_of m \\<^sub>m Map.empty" shows "pair.map_of m \\<^sub>m Map.empty" using assms by (auto simp: map_of1_def map_of2_def map_le_def map_of_eq_pair[symmetric]) end (* Pair Mem *) datatype ('k, 'v) pair_storage = Pair_Storage 'k 'k 'v 'v context mem_correct_empty begin context fixes key :: "'a \ 'k" begin text \We assume that look-ups happen on the older row, so it is biased towards the second entry.\ definition "lookup_pair k = State (\ mem. ( case mem of Pair_Storage k1 k2 m1 m2 \ let k' = key k in if k' = k2 then case State_Monad.run_state (lookup k) m2 of (v, m) \ (v, Pair_Storage k1 k2 m1 m) else if k' = k1 then case State_Monad.run_state (lookup k) m1 of (v, m) \ (v, Pair_Storage k1 k2 m m2) else (None, mem) ) ) " text \We assume that updates happen on the newer row, so it is biased towards the first entry.\ definition "update_pair k v = State (\ mem. ( case mem of Pair_Storage k1 k2 m1 m2 \ let k' = key k in if k' = k1 then case State_Monad.run_state (update k v) m1 of (_, m) \ ((), Pair_Storage k1 k2 m m2) else if k' = k2 then case State_Monad.run_state (update k v) m2 of (_, m) \ ((),Pair_Storage k1 k2 m1 m) else case State_Monad.run_state (update k v) empty of (_, m) \ ((), Pair_Storage k' k1 m m1) ) ) " interpretation pair: state_mem_defs lookup_pair update_pair . definition "inv_pair p = (case p of Pair_Storage k1 k2 m1 m2 \ key ` dom (map_of m1) \ {k1} \ key ` dom (map_of m2) \ {k2} \ k1 \ k2 \ P m1 \ P m2 )" lemma map_of_le_pair: "pair.map_of (Pair_Storage k1 k2 m1 m2) \\<^sub>m (map_of m1 ++ map_of m2)" if "inv_pair (Pair_Storage k1 k2 m1 m2)" using that unfolding pair.map_of_def unfolding lookup_pair_def inv_pair_def map_of_def map_le_def dom_def map_add_def - by (auto 4 6 split: prod.split_asm if_split_asm option.split simp: Let_def) + apply auto + apply (auto 4 6 split: prod.split_asm if_split_asm option.split simp: Let_def) + done lemma pair_le_map_of: "map_of m1 ++ map_of m2 \\<^sub>m pair.map_of (Pair_Storage k1 k2 m1 m2)" if "inv_pair (Pair_Storage k1 k2 m1 m2)" using that unfolding pair.map_of_def unfolding lookup_pair_def inv_pair_def map_of_def map_le_def dom_def map_add_def by (auto 4 5 split: prod.split_asm if_split_asm option.split simp: Let_def) lemma map_of_eq_pair: "map_of m1 ++ map_of m2 = pair.map_of (Pair_Storage k1 k2 m1 m2)" if "inv_pair (Pair_Storage k1 k2 m1 m2)" using that unfolding pair.map_of_def unfolding lookup_pair_def inv_pair_def map_of_def map_le_def dom_def map_add_def by (auto 4 7 split: prod.split_asm if_split_asm option.split simp: Let_def) lemma inv_pair_neq[simp, dest]: False if "inv_pair (Pair_Storage k k x y)" using that unfolding inv_pair_def by auto lemma inv_pair_P_D1: "P m1" if "inv_pair (Pair_Storage k1 k2 m1 m2)" using that unfolding inv_pair_def by auto lemma inv_pair_P_D2: "P m2" if "inv_pair (Pair_Storage k1 k2 m1 m2)" using that unfolding inv_pair_def by auto lemma inv_pair_domD[intro]: "dom (map_of m1) \ dom (map_of m2) = {}" if "inv_pair (Pair_Storage k1 k2 m1 m2)" using that unfolding inv_pair_def by fastforce lemma mem_correct_pair: "mem_correct lookup_pair update_pair inv_pair" proof (standard, goal_cases) case (1 k) \ \Lookup invariant\ with lookup_inv[of k] show ?case unfolding lookup_pair_def Let_def by (auto intro!: lift_pI split: pair_storage.split_asm if_split_asm prod.split_asm) (auto dest: lift_p_P simp: inv_pair_def, (force dest!: lookup_correct[of _ k] map_le_implies_dom_le)+ ) next case (2 k v) \ \Update invariant\ with update_inv[of k v] update_correct[OF P_empty, of k v] P_empty show ?case unfolding update_pair_def Let_def by (auto intro!: lift_pI split: pair_storage.split_asm if_split_asm prod.split_asm) (auto dest: lift_p_P simp: inv_pair_def, (force dest: lift_p_P dest!: update_correct[of _ k v] map_le_implies_dom_le)+ ) next case (3 m k) { fix m1 v1 m1' m2 v2 m2' k1 k2 assume assms: "State_Monad.run_state (lookup k) m1 = (v1, m1')" "State_Monad.run_state (lookup k) m2 = (v2, m2')" "inv_pair (Pair_Storage k1 k2 m1 m2)" from assms have "P m1" "P m2" by (auto intro: inv_pair_P_D1 inv_pair_P_D2) have [intro]: "map_of m1' \\<^sub>m map_of m1" "map_of m2' \\<^sub>m map_of m2" using lookup_correct[OF \P m1\, of k] lookup_correct[OF \P m2\, of k] assms by auto from inv_pair_domD[OF assms(3)] have 1: "dom (map_of m1') \ dom (map_of m2) = {}" by (metis (no_types) \map_of m1' \\<^sub>m map_of m1\ disjoint_iff_not_equal domIff map_le_def) have inv1: "inv_pair (Pair_Storage (key k) k2 m1' m2)" if "k2 \ key k" "k1 = key k" using that \P m1\ \P m2\ unfolding inv_pair_def apply clarsimp apply safe subgoal for x' y using assms(1,3) lookup_correct[OF \P m1\, of k, THEN map_le_implies_dom_le] unfolding inv_pair_def by auto subgoal for x' y using assms(3) unfolding inv_pair_def by fastforce using lookup_inv[of k] assms unfolding lift_p_def by force have inv2: "inv_pair (Pair_Storage k1 (key k) m1 m2')" if "k2 = key k" "k1 \ key k" using that \P m1\ \P m2\ unfolding inv_pair_def apply clarsimp apply safe subgoal for x' y using assms(3) unfolding inv_pair_def by fastforce subgoal for x x' y using assms(2,3) lookup_correct[OF \P m2\, of k, THEN map_le_implies_dom_le] unfolding inv_pair_def by fastforce using lookup_inv[of k] assms unfolding lift_p_def by force have A: "pair.map_of (Pair_Storage (key k) k2 m1' m2) \\<^sub>m pair.map_of (Pair_Storage (key k) k2 m1 m2)" if "k2 \ key k" "k1 = key k" using inv1 assms(3) 1 by (auto intro: map_add_mono map_le_refl simp: that map_of_eq_pair[symmetric]) have B: "pair.map_of (Pair_Storage k1 (key k) m1 m2') \\<^sub>m pair.map_of (Pair_Storage k1 (key k) m1 m2)" if "k2 = key k" "k1 \ key k" using inv2 assms(3) that by (auto intro: map_add_mono map_le_refl simp: map_of_eq_pair[symmetric] dest: inv_pair_domD) note A B } with \inv_pair m\ show ?case by (auto split: pair_storage.split if_split prod.split simp: Let_def lookup_pair_def) next case (4 m k v) { fix m1 v1 m1' m2 v2 m2' m3 k1 k2 assume assms: "State_Monad.run_state (update k v) m1 = ((), m1')" "State_Monad.run_state (update k v) m2 = ((), m2')" "State_Monad.run_state (update k v) empty = ((), m3)" "inv_pair (Pair_Storage k1 k2 m1 m2)" from assms have "P m1" "P m2" by (auto intro: inv_pair_P_D1 inv_pair_P_D2) from assms(3) P_empty update_inv[of k v] have "P m3" unfolding lift_p_def by auto have [intro]: "map_of m1' \\<^sub>m map_of m1(k \ v)" "map_of m2' \\<^sub>m map_of m2(k \ v)" using update_correct[OF \P m1\, of k v] update_correct[OF \P m2\, of k v] assms by auto have "map_of m3 \\<^sub>m map_of empty(k \ v)" using assms(3) update_correct[OF P_empty, of k v] by auto also have "\ \\<^sub>m map_of m2(k \ v)" using empty_correct by (auto elim: map_le_trans intro!: map_le_upd) finally have "map_of m3 \\<^sub>m map_of m2(k \ v)" . have 1: "dom (map_of m1) \ dom (map_of m2(k \ v)) = {}" if "k1 \ key k" using assms(4) that by (force simp: inv_pair_def) have 2: "dom (map_of m3) \ dom (map_of m1) = {}" if "k1 \ key k" using \local.map_of m3 \\<^sub>m local.map_of empty(k \ v)\ assms(4) that by (fastforce dest!: map_le_implies_dom_le simp: inv_pair_def) have inv: "inv_pair (Pair_Storage (key k) k1 m3 m1)" if "k2 \ key k" "k1 \ key k" using that \P m1\ \P m2\ \P m3\ unfolding inv_pair_def apply clarsimp apply safe subgoal for x x' y using assms(3) update_correct[OF P_empty, of k v, THEN map_le_implies_dom_le] empty_correct by (auto dest: map_le_implies_dom_le) subgoal for x x' y using assms(4) unfolding inv_pair_def by fastforce done have A: "pair.map_of (Pair_Storage (key k) k1 m3 m1) \\<^sub>m pair.map_of (Pair_Storage k1 k2 m1 m2)(k \ v)" if "k2 \ key k" "k1 \ key k" using inv assms(4) \map_of m3 \\<^sub>m map_of m2(k \ v)\ 1 apply (simp add: that map_of_eq_pair[symmetric]) apply (subst map_add_upd[symmetric], subst Map.map_add_comm, rule 2, rule that) by (rule map_add_mono; auto) have inv1: "inv_pair (Pair_Storage (key k) k2 m1' m2)" if "k2 \ key k" "k1 = key k" using that \P m1\ \P m2\ unfolding inv_pair_def apply clarsimp apply safe subgoal for x' y using assms(1,4) update_correct[OF \P m1\, of k v, THEN map_le_implies_dom_le] unfolding inv_pair_def by auto subgoal for x' y using assms(4) unfolding inv_pair_def by fastforce using update_inv[of k v] assms unfolding lift_p_def by force have inv2: "inv_pair (Pair_Storage k1 (key k) m1 m2')" if "k2 = key k" "k1 \ key k" using that \P m1\ \P m2\ unfolding inv_pair_def apply clarsimp apply safe subgoal for x' y using assms(4) unfolding inv_pair_def by fastforce subgoal for x x' y using assms(2,4) update_correct[OF \P m2\, of k v, THEN map_le_implies_dom_le] unfolding inv_pair_def by fastforce using update_inv[of k v] assms unfolding lift_p_def by force have C: "pair.map_of (Pair_Storage (key k) k2 m1' m2) \\<^sub>m pair.map_of (Pair_Storage (key k) k2 m1 m2)(k \ v)" if "k2 \ key k" "k1 = key k" using inv1[OF that] assms(4) \inv_pair m\ by (simp add: that map_of_eq_pair[symmetric]) (subst map_add_upd2[symmetric]; force simp: inv_pair_def intro: map_add_mono map_le_refl) have B: "pair.map_of (Pair_Storage k1 (key k) m1 m2') \\<^sub>m pair.map_of (Pair_Storage k1 (key k) m1 m2)(k \ v)" if "k2 = key k" "k1 \ key k" using inv2[OF that] assms(4) by (simp add: that map_of_eq_pair[symmetric]) (subst map_add_upd[symmetric]; rule map_add_mono; force simp: inv_pair_def) note A B C } with \inv_pair m\ show ?case by (auto split: pair_storage.split if_split prod.split simp: Let_def update_pair_def) qed end (* Key function *) end (* Lookup & Update w/ Empty *) end (* Theory *) diff --git a/thys/Network_Security_Policy_Verification/Security_Invariants/SINVAR_CommunicationPartners.thy b/thys/Network_Security_Policy_Verification/Security_Invariants/SINVAR_CommunicationPartners.thy --- a/thys/Network_Security_Policy_Verification/Security_Invariants/SINVAR_CommunicationPartners.thy +++ b/thys/Network_Security_Policy_Verification/Security_Invariants/SINVAR_CommunicationPartners.thy @@ -1,152 +1,151 @@ theory SINVAR_CommunicationPartners imports "../TopoS_Helper" begin subsection \SecurityInvariant CommunicationPartners\ text\ Idea of this securityinvariant: Only some nodes can communicate with Master nodes. It constrains who may access master nodes, Master nodes can access the world (except other prohibited master nodes). A node configured as Master has a list of nodes that can access it. Also, in order to be able to access a Master node, the sender must be denoted as a node we Care about. By default, all nodes are set to DontCare, thus they cannot access Master nodes. But they can access all other DontCare nodes and Care nodes. TL;DR: An access control list determines who can access a master node. \ datatype 'v node_config = DontCare | Care | Master "'v list" definition default_node_properties :: "'v node_config" where "default_node_properties = DontCare" text\Unrestricted accesses among DontCare nodes!\ fun allowed_flow :: "'v node_config \ 'v \ 'v node_config \ 'v \ bool" where "allowed_flow DontCare _ DontCare _ = True" | "allowed_flow DontCare _ Care _ = True" | "allowed_flow DontCare _ (Master _) _ = False" | "allowed_flow Care _ Care _ = True" | "allowed_flow Care _ DontCare _ = True" | "allowed_flow Care s (Master M) r = (s \ set M)" | "allowed_flow (Master _) s (Master M) r = (s \ set M)" | "allowed_flow (Master _) _ Care _ = True" | "allowed_flow (Master _) _ DontCare _ = True" fun sinvar :: "'v graph \ ('v \ 'v node_config) \ bool" where "sinvar G nP = (\ (s,r) \ edges G. s \ r \ allowed_flow (nP s) s (nP r) r)" definition receiver_violation :: "bool" where "receiver_violation = False" subsubsection \Preliminaries\ lemma sinvar_mono: "SecurityInvariant_withOffendingFlows.sinvar_mono sinvar" apply(simp only: SecurityInvariant_withOffendingFlows.sinvar_mono_def) apply(clarify) by auto interpretation SecurityInvariant_preliminaries where sinvar = sinvar apply unfold_locales apply(frule_tac finite_distinct_list[OF wf_graph.finiteE]) apply(erule_tac exE) apply(rename_tac list_edges) apply(rule_tac ff="list_edges" in SecurityInvariant_withOffendingFlows.mono_imp_set_offending_flows_not_empty[OF sinvar_mono]) apply(auto)[6] apply(auto simp add: SecurityInvariant_withOffendingFlows.is_offending_flows_def graph_ops)[1] apply(fact SecurityInvariant_withOffendingFlows.sinvar_mono_imp_is_offending_flows_mono[OF sinvar_mono]) done subsubsection \ENRnr\ lemma CommunicationPartners_ENRnrSR: "SecurityInvariant_withOffendingFlows.sinvar_all_edges_normal_form_not_refl_SR sinvar allowed_flow" by(simp add: SecurityInvariant_withOffendingFlows.sinvar_all_edges_normal_form_not_refl_SR_def) lemma Unassigned_weakrefl: "\ s r. allowed_flow DontCare s DontCare r" by(simp) lemma Unassigned_botdefault: "\ s r. (nP r) \ DontCare \ \ allowed_flow (nP s) s (nP r) r \ \ allowed_flow DontCare s (nP r) r" apply(rule allI)+ apply(case_tac "nP r") apply(simp_all) apply(case_tac "nP s") apply(simp_all) done lemma "\ allowed_flow DontCare s (Master M) r" by(simp) lemma "\ allowed_flow any s (Master []) r" by(cases any, simp_all) lemma All_to_Unassigned: "\ s r. allowed_flow (nP s) s DontCare r" by (rule allI, rule allI, case_tac "nP s", simp_all) lemma Unassigned_default_candidate: "\ s r. \ allowed_flow (nP s) s (nP r) r \ \ allowed_flow DontCare s (nP r) r" apply(intro allI, rename_tac s r)+ apply(case_tac "nP s") apply(simp_all) apply(case_tac "nP r") apply(simp_all) apply(case_tac "nP r") apply(simp_all) done definition CommunicationPartners_offending_set:: "'v graph \ ('v \ 'v node_config) \ ('v \ 'v) set set" where "CommunicationPartners_offending_set G nP = (if sinvar G nP then {} else { {e \ edges G. case e of (e1,e2) \ e1 \ e2 \ \ allowed_flow (nP e1) e1 (nP e2) e2} })" lemma CommunicationPartners_offending_set: "SecurityInvariant_withOffendingFlows.set_offending_flows sinvar = CommunicationPartners_offending_set" apply(simp only: fun_eq_iff ENFnrSR_offending_set[OF CommunicationPartners_ENRnrSR] CommunicationPartners_offending_set_def) apply(rule allI)+ apply(rename_tac G nP) apply(auto) done interpretation CommunicationPartners: SecurityInvariant_ACS where default_node_properties = default_node_properties and sinvar = sinvar rewrites "SecurityInvariant_withOffendingFlows.set_offending_flows sinvar = CommunicationPartners_offending_set" unfolding receiver_violation_def unfolding default_node_properties_def apply unfold_locales apply(rule ballI) apply (rule_tac f="f" in SecurityInvariant_withOffendingFlows.ENFnrSR_fsts_weakrefl_instance[OF CommunicationPartners_ENRnrSR Unassigned_weakrefl Unassigned_botdefault All_to_Unassigned]) apply(simp) apply(simp) apply(erule default_uniqueness_by_counterexample_ACS) apply(rule_tac x="\ nodes={vertex_1,vertex_2}, edges = {(vertex_1,vertex_2)} \" in exI, simp) apply(rule conjI) apply(simp add: wf_graph_def) apply(simp add: CommunicationPartners_offending_set CommunicationPartners_offending_set_def delete_edges_simp2) apply(case_tac otherbot, simp_all) apply(rule_tac x="(\ x. DontCare)(vertex_1 := DontCare, vertex_2 := Master [vertex_1])" in exI, simp) apply(rule_tac x="vertex_1" in exI, simp) apply(simp split: prod.split) - apply(clarify) apply force apply(rename_tac M) (*case Master M*) apply(rule_tac x="(\ x. DontCare)(vertex_1 := DontCare, vertex_2 := (Master (vertex_1#M')))" in exI, simp) apply(simp split: prod.split) apply(clarify) apply force apply(fact CommunicationPartners_offending_set) done lemma TopoS_SubnetsInGW: "SecurityInvariant sinvar default_node_properties receiver_violation" unfolding receiver_violation_def by unfold_locales text\Example:\ lemma "sinvar \nodes = {''db1'', ''db2'', ''h1'', ''h2'', ''foo'', ''bar''}, edges = {(''h1'', ''db1''), (''h2'', ''db1''), (''h1'', ''h2''), (''db1'', ''h1''), (''db1'', ''foo''), (''db1'', ''db2''), (''db1'', ''db1''), (''h1'', ''foo''), (''foo'', ''h1''), (''foo'', ''bar'')}\ (((((\h. default_node_properties)(''h1'' := Care))(''h2'' := Care)) (''db1'' := Master [''h1'', ''h2'']))(''db2'' := Master [''db1'']))" by eval hide_fact (open) sinvar_mono hide_const (open) sinvar receiver_violation default_node_properties end diff --git a/thys/Network_Security_Policy_Verification/Security_Invariants/SINVAR_SecGwExt.thy b/thys/Network_Security_Policy_Verification/Security_Invariants/SINVAR_SecGwExt.thy --- a/thys/Network_Security_Policy_Verification/Security_Invariants/SINVAR_SecGwExt.thy +++ b/thys/Network_Security_Policy_Verification/Security_Invariants/SINVAR_SecGwExt.thy @@ -1,134 +1,131 @@ theory SINVAR_SecGwExt imports "../TopoS_Helper" begin subsection \SecurityInvariant PolEnforcePointExtended\ text \A PolEnforcePoint is an application-level central policy enforcement point. Legacy note: The old verions called it a SecurityGateway. Hosts may belong to a certain domain. Sometimes, a pattern where intra-domain communication between domain members must be approved by a central instance is required. We call such a central instance PolEnforcePoint and present a template for this architecture. Five host roles are distinguished:. A PolEnforcePoint, aPolEnforcePointIN which accessible from the outside, a DomainMember, a less-restricted AccessibleMember which is accessible from the outside world, and a default value Unassigned that reflects none of these roles.\ datatype secgw_member = PolEnforcePoint | PolEnforcePointIN | DomainMember | AccessibleMember | Unassigned definition default_node_properties :: "secgw_member" where "default_node_properties \ Unassigned" fun allowed_secgw_flow :: "secgw_member \ secgw_member \ bool" where "allowed_secgw_flow PolEnforcePoint _ = True" | "allowed_secgw_flow PolEnforcePointIN _ = True" | "allowed_secgw_flow DomainMember DomainMember = False" | "allowed_secgw_flow DomainMember _ = True" | "allowed_secgw_flow AccessibleMember DomainMember = False" | "allowed_secgw_flow AccessibleMember _ = True" | "allowed_secgw_flow Unassigned Unassigned = True" | "allowed_secgw_flow Unassigned PolEnforcePointIN = True" | "allowed_secgw_flow Unassigned AccessibleMember = True" | "allowed_secgw_flow Unassigned _ = False" fun sinvar :: "'v graph \ ('v \ secgw_member) \ bool" where "sinvar G nP = (\ (e1,e2) \ edges G. e1 \ e2 \ allowed_secgw_flow (nP e1) (nP e2))" definition receiver_violation :: "bool" where "receiver_violation = False" subsubsection \Preliminaries\ lemma sinvar_mono: "SecurityInvariant_withOffendingFlows.sinvar_mono sinvar" apply(simp only: SecurityInvariant_withOffendingFlows.sinvar_mono_def) apply(clarify) by auto interpretation SecurityInvariant_preliminaries where sinvar = sinvar apply unfold_locales apply(frule_tac finite_distinct_list[OF wf_graph.finiteE]) apply(erule_tac exE) apply(rename_tac list_edges) apply(rule_tac ff="list_edges" in SecurityInvariant_withOffendingFlows.mono_imp_set_offending_flows_not_empty[OF sinvar_mono]) apply(auto)[6] apply(auto simp add: SecurityInvariant_withOffendingFlows.is_offending_flows_def graph_ops)[1] apply(fact SecurityInvariant_withOffendingFlows.sinvar_mono_imp_is_offending_flows_mono[OF sinvar_mono]) done subsubsection\ENF\ lemma PolEnforcePoint_ENFnr: "SecurityInvariant_withOffendingFlows.sinvar_all_edges_normal_form_not_refl sinvar allowed_secgw_flow" by(simp add: SecurityInvariant_withOffendingFlows.sinvar_all_edges_normal_form_not_refl_def) lemma Unassigned_botdefault: "\ e1 e2. e2 \ Unassigned \ \ allowed_secgw_flow e1 e2 \ \ allowed_secgw_flow Unassigned e2" apply(rule allI)+ apply(case_tac e2) apply(simp_all) apply(case_tac e1) apply(simp_all) apply(case_tac e1) apply(simp_all) done lemma Unassigned_not_to_Member: "\ allowed_secgw_flow Unassigned DomainMember" by(simp) lemma All_to_Unassigned: "\ e1. allowed_secgw_flow e1 Unassigned" by (rule allI, case_tac e1, simp_all) definition PolEnforcePointExtended_offending_set:: "'v graph \ ('v \ secgw_member) \ ('v \ 'v) set set" where "PolEnforcePointExtended_offending_set G nP = (if sinvar G nP then {} else { {e \ edges G. case e of (e1,e2) \ e1 \ e2 \ \ allowed_secgw_flow (nP e1) (nP e2)} })" lemma PolEnforcePointExtended_offending_set: "SecurityInvariant_withOffendingFlows.set_offending_flows sinvar = PolEnforcePointExtended_offending_set" apply(simp only: fun_eq_iff ENFnr_offending_set[OF PolEnforcePoint_ENFnr] PolEnforcePointExtended_offending_set_def) apply(rule allI)+ apply(rename_tac G nP) apply(auto) done interpretation PolEnforcePointExtended: SecurityInvariant_ACS where default_node_properties = default_node_properties and sinvar = sinvar rewrites "SecurityInvariant_withOffendingFlows.set_offending_flows sinvar = PolEnforcePointExtended_offending_set" unfolding default_node_properties_def apply unfold_locales apply(rule ballI) apply (rule SecurityInvariant_withOffendingFlows.ENFnr_fsts_weakrefl_instance[OF PolEnforcePoint_ENFnr Unassigned_botdefault All_to_Unassigned])[1] apply(simp) apply(simp) apply(erule default_uniqueness_by_counterexample_ACS) apply (simp add: SecurityInvariant_withOffendingFlows.set_offending_flows_def SecurityInvariant_withOffendingFlows.is_offending_flows_min_set_def SecurityInvariant_withOffendingFlows.is_offending_flows_def) apply (simp add:graph_ops) apply (simp split: prod.split_asm prod.split) apply(rule_tac x="\ nodes={vertex_1,vertex_2}, edges = {(vertex_1,vertex_2)} \" in exI, simp) apply(rule conjI) apply(simp add: wf_graph_def) apply(case_tac otherbot, simp_all) - apply(rename_tac secgwcase) apply(rule_tac x="(\ x. Unassigned)(vertex_1 := Unassigned, vertex_2 := DomainMember)" in exI, simp) apply(rule_tac x="{(vertex_1,vertex_2)}" in exI, simp) - apply(rename_tac secgwINcase) apply(rule_tac x="(\ x. Unassigned)(vertex_1 := Unassigned, vertex_2 := DomainMember)" in exI, simp) apply(rule_tac x="vertex_1" in exI, simp) apply(rule_tac x="{(vertex_1,vertex_2)}" in exI, simp) - apply(rename_tac membercase) apply(rule_tac x="(\ x. Unassigned)(vertex_1 := Unassigned, vertex_2 := PolEnforcePoint)" in exI, simp) apply(rule_tac x="vertex_1" in exI, simp) apply(rule_tac x="{(vertex_1,vertex_2)}" in exI, simp) apply(rule_tac x="(\ x. Unassigned)(vertex_1 := Unassigned, vertex_2 := PolEnforcePoint)" in exI, simp) apply(rule_tac x="vertex_1" in exI, simp) apply(rule_tac x="{(vertex_1,vertex_2)}" in exI, simp) apply(fact PolEnforcePointExtended_offending_set) done lemma TopoS_PolEnforcePointExtended: "SecurityInvariant sinvar default_node_properties receiver_violation" unfolding receiver_violation_def by unfold_locales hide_const (open) sinvar receiver_violation end diff --git a/thys/Ordered_Resolution_Prover/FO_Ordered_Resolution_Prover.thy b/thys/Ordered_Resolution_Prover/FO_Ordered_Resolution_Prover.thy --- a/thys/Ordered_Resolution_Prover/FO_Ordered_Resolution_Prover.thy +++ b/thys/Ordered_Resolution_Prover/FO_Ordered_Resolution_Prover.thy @@ -1,1359 +1,1360 @@ (* Title: An Ordered Resolution Prover for First-Order Clauses Author: Anders Schlichtkrull , 2016, 2017 Author: Jasmin Blanchette , 2014, 2017 Author: Dmitriy Traytel , 2014 Maintainer: Anders Schlichtkrull *) section \An Ordered Resolution Prover for First-Order Clauses\ theory FO_Ordered_Resolution_Prover imports FO_Ordered_Resolution begin text \ This material is based on Section 4.3 (``A Simple Resolution Prover for First-Order Clauses'') of Bachmair and Ganzinger's chapter. Specifically, it formalizes the RP prover defined in Figure 5 and its related lemmas and theorems, including Lemmas 4.10 and 4.11 and Theorem 4.13 (completeness). \ definition is_least :: "(nat \ bool) \ nat \ bool" where "is_least P n \ P n \ (\n' < n. \ P n')" lemma least_exists: "P n \ \n. is_least P n" using exists_least_iff unfolding is_least_def by auto text \ The following corresponds to page 42 and 43 of Section 4.3, from the explanation of RP to Lemma 4.10. \ type_synonym 'a state = "'a clause set \ 'a clause set \ 'a clause set" locale FO_resolution_prover = FO_resolution subst_atm id_subst comp_subst renamings_apart atm_of_atms mgu less_atm + selection S for S :: "('a :: wellorder) clause \ 'a clause" and subst_atm :: "'a \ 's \ 'a" and id_subst :: "'s" and comp_subst :: "'s \ 's \ 's" and renamings_apart :: "'a clause list \ 's list" and atm_of_atms :: "'a list \ 'a" and mgu :: "'a set set \ 's option" and less_atm :: "'a \ 'a \ bool" + assumes sel_stable: "\\ C. is_renaming \ \ S (C \ \) = S C \ \" begin fun N_of_state :: "'a state \ 'a clause set" where "N_of_state (N, P, Q) = N" fun P_of_state :: "'a state \ 'a clause set" where "P_of_state (N, P, Q) = P" text \ \O\ denotes relation composition in Isabelle, so the formalization uses \Q\ instead. \ fun Q_of_state :: "'a state \ 'a clause set" where "Q_of_state (N, P, Q) = Q" abbreviation clss_of_state :: "'a state \ 'a clause set" where "clss_of_state St \ N_of_state St \ P_of_state St \ Q_of_state St" abbreviation grounding_of_state :: "'a state \ 'a clause set" where "grounding_of_state St \ grounding_of_clss (clss_of_state St)" interpretation ord_FO_resolution: inference_system "ord_FO_\ S" . text \ The following inductive predicate formalizes the resolution prover in Figure 5. \ inductive RP :: "'a state \ 'a state \ bool" (infix "\" 50) where tautology_deletion: "Neg A \# C \ Pos A \# C \ (N \ {C}, P, Q) \ (N, P, Q)" | forward_subsumption: "D \ P \ Q \ subsumes D C \ (N \ {C}, P, Q) \ (N, P, Q)" | backward_subsumption_P: "D \ N \ strictly_subsumes D C \ (N, P \ {C}, Q) \ (N, P, Q)" | backward_subsumption_Q: "D \ N \ strictly_subsumes D C \ (N, P, Q \ {C}) \ (N, P, Q)" | forward_reduction: "D + {#L'#} \ P \ Q \ - L = L' \l \ \ D \ \ \# C \ (N \ {C + {#L#}}, P, Q) \ (N \ {C}, P, Q)" | backward_reduction_P: "D + {#L'#} \ N \ - L = L' \l \ \ D \ \ \# C \ (N, P \ {C + {#L#}}, Q) \ (N, P \ {C}, Q)" | backward_reduction_Q: "D + {#L'#} \ N \ - L = L' \l \ \ D \ \ \# C \ (N, P, Q \ {C + {#L#}}) \ (N, P \ {C}, Q)" | clause_processing: "(N \ {C}, P, Q) \ (N, P \ {C}, Q)" | inference_computation: "N = concls_of (ord_FO_resolution.inferences_between Q C) \ ({}, P \ {C}, Q) \ (N, P, Q \ {C})" lemma final_RP: "\ ({}, {}, Q) \ St" by (auto elim: RP.cases) definition Sup_state :: "'a state llist \ 'a state" where "Sup_state Sts = (Sup_llist (lmap N_of_state Sts), Sup_llist (lmap P_of_state Sts), Sup_llist (lmap Q_of_state Sts))" definition Liminf_state :: "'a state llist \ 'a state" where "Liminf_state Sts = (Liminf_llist (lmap N_of_state Sts), Liminf_llist (lmap P_of_state Sts), Liminf_llist (lmap Q_of_state Sts))" context fixes Sts Sts' :: "'a state llist" assumes Sts: "lfinite Sts" "lfinite Sts'" "\ lnull Sts" "\ lnull Sts'" "llast Sts' = llast Sts" begin lemma N_of_Liminf_state_fin: "N_of_state (Liminf_state Sts') = N_of_state (Liminf_state Sts)" and P_of_Liminf_state_fin: "P_of_state (Liminf_state Sts') = P_of_state (Liminf_state Sts)" and Q_of_Liminf_state_fin: "Q_of_state (Liminf_state Sts') = Q_of_state (Liminf_state Sts)" using Sts by (simp_all add: Liminf_state_def lfinite_Liminf_llist llast_lmap) lemma Liminf_state_fin: "Liminf_state Sts' = Liminf_state Sts" using N_of_Liminf_state_fin P_of_Liminf_state_fin Q_of_Liminf_state_fin by (simp add: Liminf_state_def) end context fixes Sts Sts' :: "'a state llist" assumes Sts: "\ lfinite Sts" "emb Sts Sts'" begin lemma N_of_Liminf_state_inf: "N_of_state (Liminf_state Sts') \ N_of_state (Liminf_state Sts)" and P_of_Liminf_state_inf: "P_of_state (Liminf_state Sts') \ P_of_state (Liminf_state Sts)" and Q_of_Liminf_state_inf: "Q_of_state (Liminf_state Sts') \ Q_of_state (Liminf_state Sts)" using Sts by (simp_all add: Liminf_state_def emb_Liminf_llist_infinite emb_lmap) lemma clss_of_Liminf_state_inf: "clss_of_state (Liminf_state Sts') \ clss_of_state (Liminf_state Sts)" using N_of_Liminf_state_inf P_of_Liminf_state_inf Q_of_Liminf_state_inf by blast end definition fair_state_seq :: "'a state llist \ bool" where "fair_state_seq Sts \ N_of_state (Liminf_state Sts) = {} \ P_of_state (Liminf_state Sts) = {}" text \ The following formalizes Lemma 4.10. \ context fixes Sts :: "'a state llist" begin definition S_Q :: "'a clause \ 'a clause" where "S_Q = S_M S (Q_of_state (Liminf_state Sts))" interpretation sq: selection S_Q unfolding S_Q_def using S_M_selects_subseteq S_M_selects_neg_lits selection_axioms by unfold_locales auto interpretation gr: ground_resolution_with_selection S_Q by unfold_locales interpretation sr: standard_redundancy_criterion_reductive gr.ord_\ by unfold_locales interpretation sr: standard_redundancy_criterion_counterex_reducing gr.ord_\ "ground_resolution_with_selection.INTERP S_Q" by unfold_locales text \ The extension of ordered resolution mentioned in 4.10. We let it consist of all sound rules. \ definition ground_sound_\:: "'a inference set" where "ground_sound_\ = {Infer CC D E | CC D E. (\I. I \m CC \ I \ D \ I \ E)}" text \ We prove that we indeed defined an extension. \ lemma gd_ord_\_ngd_ord_\: "gr.ord_\ \ ground_sound_\" unfolding ground_sound_\_def using gr.ord_\_def gr.ord_resolve_sound by fastforce lemma sound_ground_sound_\: "sound_inference_system ground_sound_\" unfolding sound_inference_system_def ground_sound_\_def by auto lemma sat_preserving_ground_sound_\: "sat_preserving_inference_system ground_sound_\" using sound_ground_sound_\ sat_preserving_inference_system.intro sound_inference_system.\_sat_preserving by blast definition sr_ext_Ri :: "'a clause set \ 'a inference set" where "sr_ext_Ri N = sr.Ri N \ (ground_sound_\ - gr.ord_\)" interpretation sr_ext: sat_preserving_redundancy_criterion ground_sound_\ sr.Rf sr_ext_Ri unfolding sat_preserving_redundancy_criterion_def sr_ext_Ri_def using sat_preserving_ground_sound_\ redundancy_criterion_standard_extension gd_ord_\_ngd_ord_\ sr.redundancy_criterion_axioms by auto lemma strict_subset_subsumption_redundant_clause: assumes sub: "D \ \ \# C" and ground_\: "is_ground_subst \" shows "C \ sr.Rf (grounding_of_cls D)" proof - from sub have "\I. I \ D \ \ \ I \ C" unfolding true_cls_def by blast moreover have "C > D \ \" using sub by (simp add: subset_imp_less_mset) moreover have "D \ \ \ grounding_of_cls D" using ground_\ by (metis (mono_tags) mem_Collect_eq substitution_ops.grounding_of_cls_def) ultimately have "set_mset {#D \ \#} \ grounding_of_cls D" "(\I. I \m {#D \ \#} \ I \ C)" "(\D'. D' \# {#D \ \#} \ D' < C)" by auto then show ?thesis using sr.Rf_def by blast qed lemma strict_subset_subsumption_redundant_clss: assumes "D \ \ \# C" and "is_ground_subst \" and "D \ CC" shows "C \ sr.Rf (grounding_of_clss CC)" using assms proof - have "C \ sr.Rf (grounding_of_cls D)" using strict_subset_subsumption_redundant_clause assms by auto then show ?thesis using assms unfolding grounding_of_clss_def by (metis (no_types) sr.Rf_mono sup_ge1 SUP_absorb contra_subsetD) qed lemma strict_subset_subsumption_grounding_redundant_clss: assumes D\_subset_C: "D \ \ \# C" and D_in_St: "D \ CC" shows "grounding_of_cls C \ sr.Rf (grounding_of_clss CC)" proof fix C\ assume "C\ \ grounding_of_cls C" then obtain \ where \_p: "C\ = C \ \ \ is_ground_subst \" unfolding grounding_of_cls_def by auto have D\\C\: "D \ \ \ \ \# C \ \" using D\_subset_C subst_subset_mono by auto then show "C\ \ sr.Rf (grounding_of_clss CC)" using \_p strict_subset_subsumption_redundant_clss[of D "\ \ \" "C \ \"] D_in_St by auto qed lemma derive_if_remove_subsumed: assumes "D \ clss_of_state St" and "subsumes D C" shows "sr_ext.derive (grounding_of_state St \ grounding_of_cls C) (grounding_of_state St)" proof - from assms obtain \ where "D \ \ = C \ D \ \ \# C" by (auto simp: subsumes_def subset_mset_def) then have "D \ \ = C \ D \ \ \# C" by (simp add: subset_mset_def) then show ?thesis proof assume "D \ \ = C" then have "grounding_of_cls C \ grounding_of_cls D" - using subst_cls_eq_grounding_of_cls_subset_eq by simp + using subst_cls_eq_grounding_of_cls_subset_eq + by (auto dest: sym) then have "(grounding_of_state St \ grounding_of_cls C) = grounding_of_state St" using assms unfolding grounding_of_clss_def by auto then show ?thesis by (auto intro: sr_ext.derive.intros) next assume a: "D \ \ \# C" then have "grounding_of_cls C \ sr.Rf (grounding_of_state St)" using strict_subset_subsumption_grounding_redundant_clss assms by auto then show ?thesis unfolding grounding_of_clss_def by (force intro: sr_ext.derive.intros) qed qed lemma reduction_in_concls_of: assumes "C\ \ grounding_of_cls C" and "D + {#L'#} \ CC" and "- L = L' \l \" and "D \ \ \# C" shows "C\ \ concls_of (sr_ext.inferences_from (grounding_of_clss (CC \ {C + {#L#}})))" proof - from assms obtain \ where \_p: "C\ = C \ \ \ is_ground_subst \" unfolding grounding_of_cls_def by auto define \ where "\ = Infer {#(C + {#L#}) \ \#} ((D + {#L'#}) \ \ \ \) (C \ \)" have "(D + {#L'#}) \ \ \ \ \ grounding_of_clss (CC \ {C + {#L#}})" unfolding grounding_of_clss_def grounding_of_cls_def by (rule UN_I[of "D + {#L'#}"], use assms(2) in simp, metis (mono_tags, lifting) \_p is_ground_comp_subst mem_Collect_eq subst_cls_comp_subst) moreover have "(C + {#L#}) \ \ \ grounding_of_clss (CC \ {C + {#L#}})" using \_p unfolding grounding_of_clss_def grounding_of_cls_def by auto moreover have "\I. I \ D \ \ \ \ + {#- (L \l \)#} \ I \ C \ \ + {#L \l \#} \ I \ D \ \ \ \ + C \ \" by auto then have "\I. I \ (D + {#L'#}) \ \ \ \ \ I \ (C + {#L#}) \ \ \ I \ D \ \ \ \ + C \ \" using assms by (metis add_mset_add_single subst_cls_add_mset subst_cls_union subst_minus) then have "\I. I \ (D + {#L'#}) \ \ \ \ \ I \ (C + {#L#}) \ \ \ I \ C \ \" using assms by (metis (no_types, lifting) subset_mset.le_iff_add subst_cls_union true_cls_union) then have "\I. I \m {#(D + {#L'#}) \ \ \ \#} \ I \ (C + {#L#}) \ \ \ I \ C \ \" by (meson true_cls_mset_singleton) ultimately have "\ \ sr_ext.inferences_from (grounding_of_clss (CC \ {C + {#L#}}))" unfolding sr_ext.inferences_from_def unfolding ground_sound_\_def infer_from_def \_def by auto then have "C \ \ \ concls_of (sr_ext.inferences_from (grounding_of_clss (CC \ {C + {#L#}})))" using image_iff unfolding \_def by fastforce then show "C\ \ concls_of (sr_ext.inferences_from (grounding_of_clss (CC \ {C + {#L#}})))" using \_p by auto qed lemma reduction_derivable: assumes "D + {#L'#} \ CC" and "- L = L' \l \" and "D \ \ \# C" shows "sr_ext.derive (grounding_of_clss (CC \ {C + {#L#}})) (grounding_of_clss (CC \ {C}))" proof - from assms have "grounding_of_clss (CC \ {C}) - grounding_of_clss (CC \ {C + {#L#}}) \ concls_of (sr_ext.inferences_from (grounding_of_clss (CC \ {C + {#L#}})))" using reduction_in_concls_of unfolding grounding_of_clss_def by auto moreover have "grounding_of_cls (C + {#L#}) \ sr.Rf (grounding_of_clss (CC \ {C}))" using strict_subset_subsumption_grounding_redundant_clss[of C "id_subst"] by auto then have "grounding_of_clss (CC \ {C + {#L#}}) - grounding_of_clss (CC \ {C}) \ sr.Rf (grounding_of_clss (CC \ {C}))" unfolding grounding_of_clss_def by auto ultimately show "sr_ext.derive (grounding_of_clss (CC \ {C + {#L#}})) (grounding_of_clss (CC \ {C}))" using sr_ext.derive.intros[of "grounding_of_clss (CC \ {C})" "grounding_of_clss (CC \ {C + {#L#}})"] by auto qed text \ The following corresponds the part of Lemma 4.10 that states we have a theorem proving process: \ lemma RP_ground_derive: "St \ St' \ sr_ext.derive (grounding_of_state St) (grounding_of_state St')" proof (induction rule: RP.induct) case (tautology_deletion A C N P Q) { fix C\ assume "C\ \ grounding_of_cls C" then obtain \ where "C\ = C \ \" unfolding grounding_of_cls_def by auto then have "Neg (A \a \) \# C\ \ Pos (A \a \) \# C\" using tautology_deletion Neg_Melem_subst_atm_subst_cls Pos_Melem_subst_atm_subst_cls by auto then have "C\ \ sr.Rf (grounding_of_state (N, P, Q))" using sr.tautology_Rf by auto } then have "grounding_of_state (N \ {C}, P, Q) - grounding_of_state (N, P, Q) \ sr.Rf (grounding_of_state (N, P, Q))" unfolding grounding_of_clss_def by auto moreover have "grounding_of_state (N, P, Q) - grounding_of_state (N \ {C}, P, Q) = {}" unfolding grounding_of_clss_def by auto ultimately show ?case using sr_ext.derive.intros[of "grounding_of_state (N, P, Q)" "grounding_of_state (N \ {C}, P, Q)"] by auto next case (forward_subsumption D P Q C N) then show ?case using derive_if_remove_subsumed[of D "(N, P, Q)" C] unfolding grounding_of_clss_def by (simp add: sup_commute sup_left_commute) next case (backward_subsumption_P D N C P Q) then show ?case using derive_if_remove_subsumed[of D "(N, P, Q)" C] strictly_subsumes_def unfolding grounding_of_clss_def by (simp add: sup_commute sup_left_commute) next case (backward_subsumption_Q D N C P Q) then show ?case using derive_if_remove_subsumed[of D "(N, P, Q)" C] strictly_subsumes_def unfolding grounding_of_clss_def by (simp add: sup_commute sup_left_commute) next case (forward_reduction D L' P Q L \ C N) then show ?case using reduction_derivable[of _ _ "N \ P \ Q"] by force next case (backward_reduction_P D L' N L \ C P Q) then show ?case using reduction_derivable[of _ _ "N \ P \ Q"] by force next case (backward_reduction_Q D L' N L \ C P Q) then show ?case using reduction_derivable[of _ _ "N \ P \ Q"] by force next case (clause_processing N C P Q) then show ?case using sr_ext.derive.intros by auto next case (inference_computation N Q C P) { fix E\ assume "E\ \ grounding_of_clss N" then obtain \ E where E_\_p: "E\ = E \ \ \ E \ N \ is_ground_subst \" unfolding grounding_of_clss_def grounding_of_cls_def by auto then have E_concl: "E \ concls_of (ord_FO_resolution.inferences_between Q C)" using inference_computation by auto then obtain \ where \_p: "\ \ ord_FO_\ S \ infer_from (Q \ {C}) \ \ C \# prems_of \ \ concl_of \ = E" unfolding ord_FO_resolution.inferences_between_def by auto then obtain CC CAs D AAs As \ where \_p2: "\ = Infer CC D E \ ord_resolve_rename S CAs D AAs As \ E \ mset CAs = CC" unfolding ord_FO_\_def by auto define \ where "\ = hd (renamings_apart (D # CAs))" define \s where "\s = tl (renamings_apart (D # CAs))" define \_ground where "\_ground = Infer (mset (CAs \\cl \s) \cm \ \cm \) (D \ \ \ \ \ \) (E \ \)" have "\I. I \m mset (CAs \\cl \s) \cm \ \cm \ \ I \ D \ \ \ \ \ \ \ I \ E \ \" using ord_resolve_rename_ground_inst_sound[of _ _ _ _ _ _ _ _ _ _ \] \_def \s_def E_\_p \_p2 by auto then have "\_ground \ {Infer cc d e | cc d e. \I. I \m cc \ I \ d \ I \ e}" unfolding \_ground_def by auto moreover have "set_mset (prems_of \_ground) \ grounding_of_state ({}, P \ {C}, Q)" proof - have "D = C \ D \ Q" unfolding \_ground_def using E_\_p \_p2 \_p unfolding infer_from_def unfolding grounding_of_clss_def grounding_of_cls_def by simp then have "D \ \ \ \ \ \ \ grounding_of_cls C \ (\x \ Q. D \ \ \ \ \ \ \ grounding_of_cls x)" using E_\_p unfolding grounding_of_cls_def by (metis (mono_tags, lifting) is_ground_comp_subst mem_Collect_eq subst_cls_comp_subst) then have "(D \ \ \ \ \ \ \ grounding_of_cls C \ (\x \ P. D \ \ \ \ \ \ \ grounding_of_cls x) \ (\x \ Q. D \ \ \ \ \ \ \ grounding_of_cls x))" by metis moreover have "\i < length (CAs \\cl \s \cl \ \cl \). (CAs \\cl \s \cl \ \cl \) ! i \ {C \ \ |\. is_ground_subst \} \ ((\C \ P. {C \ \ | \. is_ground_subst \}) \ (\C\Q. {C \ \ | \. is_ground_subst \}))" proof (rule, rule) fix i assume "i < length (CAs \\cl \s \cl \ \cl \)" then have a: "i < length CAs \ i < length \s" by simp moreover from a have "CAs ! i \ {C} \ Q" using \_p2 \_p unfolding infer_from_def by (metis (no_types, lifting) Un_subset_iff inference.sel(1) set_mset_union sup_commute nth_mem_mset subsetCE) ultimately have "(CAs \\cl \s \cl \ \cl \) ! i \ {C \ \ |\. is_ground_subst \} \ ((CAs \\cl \s \cl \ \cl \) ! i \ (\C\P. {C \ \ |\. is_ground_subst \}) \ (CAs \\cl \s \cl \ \cl \) ! i \ (\C \ Q. {C \ \ | \. is_ground_subst \}))" using E_\_p \_p2 \_p unfolding \_ground_def infer_from_def grounding_of_clss_def grounding_of_cls_def apply - apply (cases "CAs ! i = C") subgoal apply (rule disjI1) apply (rule Set.CollectI) apply (rule_tac x = "(\s ! i) \ \ \ \" in exI) using \s_def using renamings_apart_length by (auto; fail) subgoal apply (rule disjI2) apply (rule disjI2) apply (rule_tac a = "CAs ! i" in UN_I) subgoal by blast subgoal apply (rule Set.CollectI) apply (rule_tac x = "(\s ! i) \ \ \ \" in exI) using \s_def using renamings_apart_length by (auto; fail) done done then show "(CAs \\cl \s \cl \ \cl \) ! i \ {C \ \ |\. is_ground_subst \} \ ((\C \ P. {C \ \ |\. is_ground_subst \}) \ (\C \ Q. {C \ \ |\. is_ground_subst \}))" by blast qed then have "\x \# mset (CAs \\cl \s \cl \ \cl \). x \ {C \ \ |\. is_ground_subst \} \ ((\C \ P. {C \ \ |\. is_ground_subst \}) \ (\C \ Q. {C \ \ |\. is_ground_subst \}))" by (metis (lifting) in_set_conv_nth set_mset_mset) then have "set_mset (mset (CAs \\cl \s) \cm \ \cm \) \ grounding_of_cls C \ grounding_of_clss P \ grounding_of_clss Q" unfolding grounding_of_cls_def grounding_of_clss_def using mset_subst_cls_list_subst_cls_mset by auto ultimately show ?thesis unfolding \_ground_def grounding_of_clss_def by auto qed ultimately have "E \ \ \ concls_of (sr_ext.inferences_from (grounding_of_state ({}, P \ {C}, Q)))" unfolding sr_ext.inferences_from_def inference_system.inferences_from_def ground_sound_\_def infer_from_def using \_ground_def by (metis (mono_tags, lifting) image_eqI inference.sel(3) mem_Collect_eq) then have "E\ \ concls_of (sr_ext.inferences_from (grounding_of_state ({}, P \ {C}, Q)))" using E_\_p by auto } then have "grounding_of_state (N, P, Q \ {C}) - grounding_of_state ({}, P \ {C}, Q) \ concls_of (sr_ext.inferences_from (grounding_of_state ({}, P \ {C}, Q)))" unfolding grounding_of_clss_def by auto moreover have "grounding_of_state ({}, P \ {C}, Q) - grounding_of_state (N, P, Q \ {C}) = {}" unfolding grounding_of_clss_def by auto ultimately show ?case using sr_ext.derive.intros[of "(grounding_of_state (N, P, Q \ {C}))" "(grounding_of_state ({}, P \ {C}, Q))"] by auto qed text \ A useful consequence: \ theorem RP_model: "St \ St' \ I \s grounding_of_state St' \ I \s grounding_of_state St" proof (drule RP_ground_derive, erule sr_ext.derive.cases, hypsubst) let ?gSt = "grounding_of_state St" and ?gSt' = "grounding_of_state St'" assume deduct: "?gSt' - ?gSt \ concls_of (sr_ext.inferences_from ?gSt)" (is "_ \ ?concls") and delete: "?gSt - ?gSt' \ sr.Rf ?gSt'" show "I \s ?gSt' \ I \s ?gSt" proof assume bef: "I \s ?gSt" then have "I \s ?concls" unfolding ground_sound_\_def inference_system.inferences_from_def true_clss_def true_cls_mset_def by (auto simp add: image_def infer_from_def dest!: spec[of _ I]) then have diff: "I \s ?gSt' - ?gSt" using deduct by (blast intro: true_clss_mono) then show "I \s ?gSt'" using bef unfolding true_clss_def by blast next assume aft: "I \s ?gSt'" have "I \s ?gSt' \ sr.Rf ?gSt'" by (rule sr.Rf_model) (smt Diff_eq_empty_iff Diff_subset Un_Diff aft standard_redundancy_criterion.Rf_mono sup_bot.right_neutral sup_ge1 true_clss_mono) then have "I \s sr.Rf ?gSt'" using true_clss_union by blast then have diff: "I \s ?gSt - ?gSt'" using delete by (blast intro: true_clss_mono) then show "I \s ?gSt" using aft unfolding true_clss_def by blast qed qed text \ Another formulation of the part of Lemma 4.10 that states we have a theorem proving process: \ lemma ground_derive_chain: "chain (\) Sts \ chain sr_ext.derive (lmap grounding_of_state Sts)" using RP_ground_derive by (simp add: chain_lmap[of "(\)"]) text \ The following is used prove to Lemma 4.11: \ lemma Sup_llist_grounding_of_state_ground: assumes "C \ Sup_llist (lmap grounding_of_state Sts)" shows "is_ground_cls C" proof - have "\j. enat j < llength (lmap grounding_of_state Sts) \ C \ lnth (lmap grounding_of_state Sts) j" using assms Sup_llist_imp_exists_index by fast then show ?thesis unfolding grounding_of_clss_def grounding_of_cls_def by auto qed lemma Liminf_grounding_of_state_ground: "C \ Liminf_llist (lmap grounding_of_state Sts) \ is_ground_cls C" using Liminf_llist_subset_Sup_llist[of "lmap grounding_of_state Sts"] Sup_llist_grounding_of_state_ground by blast lemma in_Sup_llist_in_Sup_state: assumes "C \ Sup_llist (lmap grounding_of_state Sts)" shows "\D \. D \ clss_of_state (Sup_state Sts) \ D \ \ = C \ is_ground_subst \" proof - from assms obtain i where i_p: "enat i < llength Sts \ C \ lnth (lmap grounding_of_state Sts) i" using Sup_llist_imp_exists_index by fastforce then obtain D \ where "D \ clss_of_state (lnth Sts i) \ D \ \ = C \ is_ground_subst \" using assms unfolding grounding_of_clss_def grounding_of_cls_def by fastforce then have "D \ clss_of_state (Sup_state Sts) \ D \ \ = C \ is_ground_subst \" using i_p unfolding Sup_state_def by (metis (no_types, lifting) UnCI UnE contra_subsetD N_of_state.simps P_of_state.simps Q_of_state.simps llength_lmap lnth_lmap lnth_subset_Sup_llist) then show ?thesis by auto qed lemma N_of_state_Liminf: "N_of_state (Liminf_state Sts) = Liminf_llist (lmap N_of_state Sts)" and P_of_state_Liminf: "P_of_state (Liminf_state Sts) = Liminf_llist (lmap P_of_state Sts)" unfolding Liminf_state_def by auto lemma eventually_removed_from_N: assumes d_in: "D \ N_of_state (lnth Sts i)" and fair: "fair_state_seq Sts" and i_Sts: "enat i < llength Sts" shows "\l. D \ N_of_state (lnth Sts l) \ D \ N_of_state (lnth Sts (Suc l)) \ i \ l \ enat (Suc l) < llength Sts" proof (rule ccontr) assume a: "\ ?thesis" have "i \ l \ enat l < llength Sts \ D \ N_of_state (lnth Sts l)" for l using d_in by (induction l, blast, metis a Suc_ile_eq le_SucE less_imp_le) then have "D \ Liminf_llist (lmap N_of_state Sts)" unfolding Liminf_llist_def using i_Sts by auto then show False using fair unfolding fair_state_seq_def by (simp add: N_of_state_Liminf) qed lemma eventually_removed_from_P: assumes d_in: "D \ P_of_state (lnth Sts i)" and fair: "fair_state_seq Sts" and i_Sts: "enat i < llength Sts" shows "\l. D \ P_of_state (lnth Sts l) \ D \ P_of_state (lnth Sts (Suc l)) \ i \ l \ enat (Suc l) < llength Sts" proof (rule ccontr) assume a: "\ ?thesis" have "i \ l \ enat l < llength Sts \ D \ P_of_state (lnth Sts l)" for l using d_in by (induction l, blast, metis a Suc_ile_eq le_SucE less_imp_le) then have "D \ Liminf_llist (lmap P_of_state Sts)" unfolding Liminf_llist_def using i_Sts by auto then show False using fair unfolding fair_state_seq_def by (simp add: P_of_state_Liminf) qed lemma instance_if_subsumed_and_in_limit: assumes deriv: "chain (\) Sts" and ns: "Gs = lmap grounding_of_state Sts" and c: "C \ Liminf_llist Gs - sr.Rf (Liminf_llist Gs)" and d: "D \ clss_of_state (lnth Sts i)" "enat i < llength Sts" "subsumes D C" shows "\\. D \ \ = C \ is_ground_subst \" proof - let ?Ps = "\i. P_of_state (lnth Sts i)" let ?Qs = "\i. Q_of_state (lnth Sts i)" have ground_C: "is_ground_cls C" using c using Liminf_grounding_of_state_ground ns by auto have derivns: "chain sr_ext.derive Gs" using ground_derive_chain deriv ns by auto have "\\. D \ \ = C" proof (rule ccontr) assume "\\. D \ \ = C" moreover from d(3) obtain \_proto where "D \ \_proto \# C" unfolding subsumes_def by blast then obtain \ where \_p: "D \ \ \# C \ is_ground_subst \" using ground_C by (metis is_ground_cls_mono make_ground_subst subset_mset.order_refl) ultimately have subsub: "D \ \ \# C" using subset_mset.le_imp_less_or_eq by auto moreover have "is_ground_subst \" using \_p by auto moreover have "D \ clss_of_state (lnth Sts i)" using d by auto ultimately have "C \ sr.Rf (grounding_of_state (lnth Sts i))" using strict_subset_subsumption_redundant_clss by auto then have "C \ sr.Rf (Sup_llist Gs)" using d ns by (smt contra_subsetD llength_lmap lnth_lmap lnth_subset_Sup_llist sr.Rf_mono) then have "C \ sr.Rf (Liminf_llist Gs)" unfolding ns using local.sr_ext.Rf_limit_Sup derivns ns by auto then show False using c by auto qed then obtain \ where "D \ \ = C \ is_ground_subst \" using ground_C by (metis make_ground_subst) then show ?thesis by auto qed lemma from_Q_to_Q_inf: assumes deriv: "chain (\) Sts" and fair: "fair_state_seq Sts" and ns: "Gs = lmap grounding_of_state Sts" and c: "C \ Liminf_llist Gs - sr.Rf (Liminf_llist Gs)" and d: "D \ Q_of_state (lnth Sts i)" "enat i < llength Sts" "subsumes D C" and d_least: "\E \ {E. E \ (clss_of_state (Sup_state Sts)) \ subsumes E C}. \ strictly_subsumes E D" shows "D \ Q_of_state (Liminf_state Sts)" proof - let ?Ps = "\i. P_of_state (lnth Sts i)" let ?Qs = "\i. Q_of_state (lnth Sts i)" have ground_C: "is_ground_cls C" using c using Liminf_grounding_of_state_ground ns by auto have derivns: "chain sr_ext.derive Gs" using ground_derive_chain deriv ns by auto have "\\. D \ \ = C \ is_ground_subst \" using instance_if_subsumed_and_in_limit[OF deriv] c d unfolding ns by blast then obtain \ where \: "D \ \ = C" "is_ground_subst \" by auto have in_Sts_in_Sts_Suc: "\l \ i. enat (Suc l) < llength Sts \ D \ Q_of_state (lnth Sts l) \ D \ Q_of_state (lnth Sts (Suc l))" proof (rule, rule, rule, rule) fix l assume len: "i \ l" and llen: "enat (Suc l) < llength Sts" and d_in_q: "D \ Q_of_state (lnth Sts l)" have "lnth Sts l \ lnth Sts (Suc l)" using llen deriv chain_lnth_rel by blast then show "D \ Q_of_state (lnth Sts (Suc l))" proof (cases rule: RP.cases) case (backward_subsumption_Q D' N D_removed P Q) moreover { assume "D_removed = D" then obtain D_subsumes where D_subsumes_p: "D_subsumes \ N \ strictly_subsumes D_subsumes D" using backward_subsumption_Q by auto moreover from D_subsumes_p have "subsumes D_subsumes C" using d subsumes_trans unfolding strictly_subsumes_def by blast moreover from backward_subsumption_Q have "D_subsumes \ clss_of_state (Sup_state Sts)" using D_subsumes_p llen by (metis (no_types) UnI1 N_of_state.simps llength_lmap lnth_lmap lnth_subset_Sup_llist rev_subsetD Sup_state_def) ultimately have False using d_least unfolding subsumes_def by auto } ultimately show ?thesis using d_in_q by auto next case (backward_reduction_Q E L' N L \ D' P Q) { assume "D' + {#L#} = D" then have D'_p: "strictly_subsumes D' D \ D' \ ?Ps (Suc l)" using subset_strictly_subsumes[of D' D] backward_reduction_Q by auto then have subc: "subsumes D' C" using d(3) subsumes_trans unfolding strictly_subsumes_def by auto from D'_p have "D' \ clss_of_state (Sup_state Sts)" using llen by (metis (no_types) UnI1 P_of_state.simps llength_lmap lnth_lmap lnth_subset_Sup_llist subsetCE sup_ge2 Sup_state_def) then have False using d_least D'_p subc by auto } then show ?thesis using backward_reduction_Q d_in_q by auto qed (use d_in_q in auto) qed have D_in_Sts: "D \ Q_of_state (lnth Sts l)" and D_in_Sts_Suc: "D \ Q_of_state (lnth Sts (Suc l))" if l_i: "l \ i" and enat: "enat (Suc l) < llength Sts" for l proof - show "D \ Q_of_state (lnth Sts l)" using l_i enat apply (induction "l - i" arbitrary: l) subgoal using d by auto subgoal using d(1) in_Sts_in_Sts_Suc by (metis (no_types, lifting) Suc_ile_eq add_Suc_right add_diff_cancel_left' le_SucE le_Suc_ex less_imp_le) done then show "D \ Q_of_state (lnth Sts (Suc l))" using l_i enat in_Sts_in_Sts_Suc by blast qed have "i \ x \ enat x < llength Sts \ D \ Q_of_state (lnth Sts x)" for x apply (cases x) subgoal using d(1) by (auto intro!: exI[of _ i] simp: less_Suc_eq) subgoal for x' using d(1) D_in_Sts_Suc[of x'] by (cases \i \ x'\) (auto simp: not_less_eq_eq) done then have "D \ Liminf_llist (lmap Q_of_state Sts)" unfolding Liminf_llist_def by (auto intro!: exI[of _ i] simp: d) then show ?thesis unfolding Liminf_state_def by auto qed lemma from_P_to_Q: assumes deriv: "chain (\) Sts" and fair: "fair_state_seq Sts" and ns: "Gs = lmap grounding_of_state Sts" and c: "C \ Liminf_llist Gs - sr.Rf (Liminf_llist Gs)" and d: "D \ P_of_state (lnth Sts i)" "enat i < llength Sts" "subsumes D C" and d_least: "\E \ {E. E \ (clss_of_state (Sup_state Sts)) \ subsumes E C}. \ strictly_subsumes E D" shows "\l. D \ Q_of_state (lnth Sts l) \ enat l < llength Sts" proof - let ?Ns = "\i. N_of_state (lnth Sts i)" let ?Ps = "\i. P_of_state (lnth Sts i)" let ?Qs = "\i. Q_of_state (lnth Sts i)" have ground_C: "is_ground_cls C" using c using Liminf_grounding_of_state_ground ns by auto have derivns: "chain sr_ext.derive Gs" using ground_derive_chain deriv ns by auto have "\\. D \ \ = C \ is_ground_subst \" using instance_if_subsumed_and_in_limit[OF deriv] ns c d by blast then obtain \ where \: "D \ \ = C" "is_ground_subst \" by auto obtain l where l_p: "D \ P_of_state (lnth Sts l) \ D \ P_of_state (lnth Sts (Suc l)) \ i \ l \ enat (Suc l) < llength Sts" using fair using eventually_removed_from_P d unfolding ns by auto then have l_Gs: "enat (Suc l) < llength Gs" using ns by auto from l_p have "lnth Sts l \ lnth Sts (Suc l)" using deriv using chain_lnth_rel by auto then show ?thesis proof (cases rule: RP.cases) case (backward_subsumption_P D' N D_twin P Q) note lrhs = this(1,2) and D'_p = this(3,4) then have twins: "D_twin = D" "?Ns (Suc l) = N" "?Ns l = N" "?Ps (Suc l) = P" "?Ps l = P \ {D_twin}" "?Qs (Suc l) = Q" "?Qs l = Q" using l_p by auto note D'_p = D'_p[unfolded twins(1)] then have subc: "subsumes D' C" unfolding strictly_subsumes_def subsumes_def using \ by (metis subst_cls_comp_subst subst_cls_mono_mset) from D'_p have "D' \ clss_of_state (Sup_state Sts)" unfolding twins(2)[symmetric] using l_p by (metis (no_types) UnI1 N_of_state.simps llength_lmap lnth_lmap lnth_subset_Sup_llist subsetCE Sup_state_def) then have False using d_least D'_p subc by auto then show ?thesis by auto next case (backward_reduction_P E L' N L \ D' P Q) then have twins: "D' + {#L#} = D" "?Ns (Suc l) = N" "?Ns l = N" "?Ps (Suc l) = P \ {D'}" "?Ps l = P \ {D' + {#L#}}" "?Qs (Suc l) = Q" "?Qs l = Q" using l_p by auto then have D'_p: "strictly_subsumes D' D \ D' \ ?Ps (Suc l)" using subset_strictly_subsumes[of D' D] by auto then have subc: "subsumes D' C" using d(3) subsumes_trans unfolding strictly_subsumes_def by auto from D'_p have "D' \ clss_of_state (Sup_state Sts)" using l_p by (metis (no_types) UnI1 P_of_state.simps llength_lmap lnth_lmap lnth_subset_Sup_llist subsetCE sup_ge2 Sup_state_def) then have False using d_least D'_p subc by auto then show ?thesis by auto next case (inference_computation N Q D_twin P) then have twins: "D_twin = D" "?Ps (Suc l) = P" "?Ps l = P \ {D_twin}" "?Qs (Suc l) = Q \ {D_twin}" "?Qs l = Q" using l_p by auto then show ?thesis using d \ l_p by auto qed (use l_p in auto) qed lemma from_N_to_P_or_Q: assumes deriv: "chain (\) Sts" and fair: "fair_state_seq Sts" and ns: "Gs = lmap grounding_of_state Sts" and c: "C \ Liminf_llist Gs - sr.Rf (Liminf_llist Gs)" and d: "D \ N_of_state (lnth Sts i)" "enat i < llength Sts" "subsumes D C" and d_least: "\E \ {E. E \ (clss_of_state (Sup_state Sts)) \ subsumes E C}. \ strictly_subsumes E D" shows "\l D' \'. D' \ P_of_state (lnth Sts l) \ Q_of_state (lnth Sts l) \ enat l < llength Sts \ (\E \ {E. E \ (clss_of_state (Sup_state Sts)) \ subsumes E C}. \ strictly_subsumes E D') \ D' \ \' = C \ is_ground_subst \' \ subsumes D' C" proof - let ?Ns = "\i. N_of_state (lnth Sts i)" let ?Ps = "\i. P_of_state (lnth Sts i)" let ?Qs = "\i. Q_of_state (lnth Sts i)" have ground_C: "is_ground_cls C" using c using Liminf_grounding_of_state_ground ns by auto have derivns: "chain sr_ext.derive Gs" using ground_derive_chain deriv ns by auto have "\\. D \ \ = C \ is_ground_subst \" using instance_if_subsumed_and_in_limit[OF deriv] ns c d by blast then obtain \ where \: "D \ \ = C" "is_ground_subst \" by auto from c have no_taut: "\ (\A. Pos A \# C \ Neg A \# C)" using sr.tautology_Rf by auto have "\l. D \ N_of_state (lnth Sts l) \ D \ N_of_state (lnth Sts (Suc l)) \ i \ l \ enat (Suc l) < llength Sts" using fair using eventually_removed_from_N d unfolding ns by auto then obtain l where l_p: "D \ N_of_state (lnth Sts l) \ D \ N_of_state (lnth Sts (Suc l)) \ i \ l \ enat (Suc l) < llength Sts" by auto then have l_Gs: "enat (Suc l) < llength Gs" using ns by auto from l_p have "lnth Sts l \ lnth Sts (Suc l)" using deriv using chain_lnth_rel by auto then show ?thesis proof (cases rule: RP.cases) case (tautology_deletion A D_twin N P Q) then have "D_twin = D" using l_p by auto then have "Pos (A \a \) \# C \ Neg (A \a \) \# C" using tautology_deletion(3,4) \ by (metis Melem_subst_cls eql_neg_lit_eql_atm eql_pos_lit_eql_atm) then have False using no_taut by metis then show ?thesis by blast next case (forward_subsumption D' P Q D_twin N) note lrhs = this(1,2) and D'_p = this(3,4) then have twins: "D_twin = D" "?Ns (Suc l) = N" "?Ns l = N \ {D_twin}" "?Ps (Suc l) = P " "?Ps l = P" "?Qs (Suc l) = Q" "?Qs l = Q" using l_p by auto note D'_p = D'_p[unfolded twins(1)] from D'_p(2) have subs: "subsumes D' C" using d(3) by (blast intro: subsumes_trans) moreover have "D' \ clss_of_state (Sup_state Sts)" using twins D'_p l_p unfolding Sup_state_def by simp (metis (no_types) contra_subsetD llength_lmap lnth_lmap lnth_subset_Sup_llist) ultimately have "\ strictly_subsumes D' D" using d_least by auto then have "subsumes D D'" unfolding strictly_subsumes_def using D'_p by auto then have v: "variants D D'" using D'_p unfolding variants_iff_subsumes by auto then have mini: "\E \ {E \ clss_of_state (Sup_state Sts). subsumes E C}. \ strictly_subsumes E D'" using d_least D'_p neg_strictly_subsumes_variants[of _ D D'] by auto from v have "\\'. D' \ \' = C" using \ variants_imp_exists_substitution variants_sym by (metis subst_cls_comp_subst) then have "\\'. D' \ \' = C \ is_ground_subst \'" using ground_C by (meson make_ground_subst refl) then obtain \' where \'_p: "D' \ \' = C \ is_ground_subst \'" by metis show ?thesis using D'_p twins l_p subs mini \'_p by auto next case (forward_reduction E L' P Q L \ D' N) then have twins: "D' + {#L#} = D" "?Ns (Suc l) = N \ {D'}" "?Ns l = N \ {D' + {#L#}}" "?Ps (Suc l) = P " "?Ps l = P" "?Qs (Suc l) = Q" "?Qs l = Q" using l_p by auto then have D'_p: "strictly_subsumes D' D \ D' \ ?Ns (Suc l)" using subset_strictly_subsumes[of D' D] by auto then have subc: "subsumes D' C" using d(3) subsumes_trans unfolding strictly_subsumes_def by blast from D'_p have "D' \ clss_of_state (Sup_state Sts)" using l_p by (metis (no_types) UnI1 N_of_state.simps llength_lmap lnth_lmap lnth_subset_Sup_llist subsetCE Sup_state_def) then have False using d_least D'_p subc by auto then show ?thesis by auto next case (clause_processing N D_twin P Q) then have twins: "D_twin = D" "?Ns (Suc l) = N" "?Ns l = N \ {D}" "?Ps (Suc l) = P \ {D}" "?Ps l = P" "?Qs (Suc l) = Q" "?Qs l = Q" using l_p by auto then show ?thesis using d \ l_p d_least by blast qed (use l_p in auto) qed lemma eventually_in_Qinf: assumes deriv: "chain (\) Sts" and D_p: "D \ clss_of_state (Sup_state Sts)" "subsumes D C" "\E \ {E. E \ (clss_of_state (Sup_state Sts)) \ subsumes E C}. \ strictly_subsumes E D" and fair: "fair_state_seq Sts" and ns: "Gs = lmap grounding_of_state Sts" and c: "C \ Liminf_llist Gs - sr.Rf (Liminf_llist Gs)" and ground_C: "is_ground_cls C" shows "\D' \'. D' \ Q_of_state (Liminf_state Sts) \ D' \ \' = C \ is_ground_subst \'" proof - let ?Ns = "\i. N_of_state (lnth Sts i)" let ?Ps = "\i. P_of_state (lnth Sts i)" let ?Qs = "\i. Q_of_state (lnth Sts i)" from D_p obtain i where i_p: "i < llength Sts" "D \ ?Ns i \ D \ ?Ps i \ D \ ?Qs i" unfolding Sup_state_def by simp_all (metis (no_types) Sup_llist_imp_exists_index llength_lmap lnth_lmap) have derivns: "chain sr_ext.derive Gs" using ground_derive_chain deriv ns by auto have "\\. D \ \ = C \ is_ground_subst \" using instance_if_subsumed_and_in_limit[OF deriv ns c] D_p i_p by blast then obtain \ where \: "D \ \ = C" "is_ground_subst \" by blast { assume a: "D \ ?Ns i" then obtain D' \' l where D'_p: "D' \ ?Ps l \ ?Qs l" "D' \ \' = C" "enat l < llength Sts" "is_ground_subst \'" "\E \ {E. E \ (clss_of_state (Sup_state Sts)) \ subsumes E C}. \ strictly_subsumes E D'" "subsumes D' C" using from_N_to_P_or_Q deriv fair ns c i_p(1) D_p(2) D_p(3) by blast then obtain l' where l'_p: "D' \ ?Qs l'" "l' < llength Sts" using from_P_to_Q[OF deriv fair ns c _ D'_p(3) D'_p(6) D'_p(5)] by blast then have "D' \ Q_of_state (Liminf_state Sts)" using from_Q_to_Q_inf[OF deriv fair ns c _ l'_p(2)] D'_p by auto then have ?thesis using D'_p by auto } moreover { assume a: "D \ ?Ps i" then obtain l' where l'_p: "D \ ?Qs l'" "l' < llength Sts" using from_P_to_Q[OF deriv fair ns c a i_p(1) D_p(2) D_p(3)] by auto then have "D \ Q_of_state (Liminf_state Sts)" using from_Q_to_Q_inf[OF deriv fair ns c l'_p(1) l'_p(2)] D_p(3) \(1) \(2) D_p(2) by auto then have ?thesis using D_p \ by auto } moreover { assume a: "D \ ?Qs i" then have "D \ Q_of_state (Liminf_state Sts)" using from_Q_to_Q_inf[OF deriv fair ns c a i_p(1)] \ D_p(2,3) by auto then have ?thesis using D_p \ by auto } ultimately show ?thesis using i_p by auto qed text \ The following corresponds to Lemma 4.11: \ lemma fair_imp_Liminf_minus_Rf_subset_ground_Liminf_state: assumes deriv: "chain (\) Sts" and fair: "fair_state_seq Sts" and ns: "Gs = lmap grounding_of_state Sts" shows "Liminf_llist Gs - sr.Rf (Liminf_llist Gs) \ grounding_of_clss (Q_of_state (Liminf_state Sts))" proof let ?Ns = "\i. N_of_state (lnth Sts i)" let ?Ps = "\i. P_of_state (lnth Sts i)" let ?Qs = "\i. Q_of_state (lnth Sts i)" have SQinf: "clss_of_state (Liminf_state Sts) = Liminf_llist (lmap Q_of_state Sts)" using fair unfolding fair_state_seq_def Liminf_state_def by auto fix C assume C_p: "C \ Liminf_llist Gs - sr.Rf (Liminf_llist Gs)" then have "C \ Sup_llist Gs" using Liminf_llist_subset_Sup_llist[of Gs] by blast then obtain D_proto where "D_proto \ clss_of_state (Sup_state Sts) \ subsumes D_proto C" using in_Sup_llist_in_Sup_state unfolding ns subsumes_def by blast then obtain D where D_p: "D \ clss_of_state (Sup_state Sts)" "subsumes D C" "\E \ {E. E \ clss_of_state (Sup_state Sts) \ subsumes E C}. \ strictly_subsumes E D" using strictly_subsumes_has_minimum[of "{E. E \ clss_of_state (Sup_state Sts) \ subsumes E C}"] by auto have ground_C: "is_ground_cls C" using C_p using Liminf_grounding_of_state_ground ns by auto have "\D' \'. D' \ Q_of_state (Liminf_state Sts) \ D' \ \' = C \ is_ground_subst \'" using eventually_in_Qinf[of D C Gs] using D_p(1-3) deriv fair ns C_p ground_C by auto then obtain D' \' where D'_p: "D' \ Q_of_state (Liminf_state Sts) \ D' \ \' = C \ is_ground_subst \'" by blast then have "D' \ clss_of_state (Liminf_state Sts)" by simp then have "C \ grounding_of_state (Liminf_state Sts)" unfolding grounding_of_clss_def grounding_of_cls_def using D'_p by auto then show "C \ grounding_of_clss (Q_of_state (Liminf_state Sts))" using SQinf fair fair_state_seq_def by auto qed text \ The following corresponds to (one direction of) Theorem 4.13: \ lemma subseteq_Liminf_state_eventually_always: fixes CC assumes "finite CC" and "CC \ {}" and "CC \ Q_of_state (Liminf_state Sts)" shows "\j. enat j < llength Sts \ (\j' \ enat j. j' < llength Sts \ CC \ Q_of_state (lnth Sts j'))" proof - from assms(3) have "\C \ CC. \j. enat j < llength Sts \ (\j' \ enat j. j' < llength Sts \ C \ Q_of_state (lnth Sts j'))" unfolding Liminf_state_def Liminf_llist_def by force then obtain f where f_p: "\C \ CC. f C < llength Sts \ (\j' \ enat (f C). j' < llength Sts \ C \ Q_of_state (lnth Sts j'))" by moura define j :: nat where "j = Max (f ` CC)" have "enat j < llength Sts" unfolding j_def using f_p assms(1) by (metis (mono_tags) Max_in assms(2) finite_imageI imageE image_is_empty) moreover have "\C j'. C \ CC \ enat j \ j' \ j' < llength Sts \ C \ Q_of_state (lnth Sts j')" proof (intro allI impI) fix C :: "'a clause" and j' :: nat assume a: "C \ CC" "enat j \ enat j'" "enat j' < llength Sts" then have "f C \ j'" unfolding j_def using assms(1) Max.bounded_iff by auto then show "C \ Q_of_state (lnth Sts j')" using f_p a by auto qed ultimately show ?thesis by auto qed lemma empty_clause_in_Q_of_Liminf_state: assumes deriv: "chain (\) Sts" and fair: "fair_state_seq Sts" and empty_in: "{#} \ Liminf_llist (lmap grounding_of_state Sts)" shows "{#} \ Q_of_state (Liminf_state Sts)" proof - define Gs :: "'a clause set llist" where ns: "Gs = lmap grounding_of_state Sts" from empty_in have in_Liminf_not_Rf: "{#} \ Liminf_llist Gs - sr.Rf (Liminf_llist Gs)" unfolding ns sr.Rf_def by auto then have "{#} \ grounding_of_clss (Q_of_state (Liminf_state Sts))" using fair_imp_Liminf_minus_Rf_subset_ground_Liminf_state[OF deriv fair ns] by auto then show ?thesis unfolding grounding_of_clss_def grounding_of_cls_def by auto qed lemma grounding_of_state_Liminf_state_subseteq: "grounding_of_state (Liminf_state Sts) \ Liminf_llist (lmap grounding_of_state Sts)" proof fix C :: "'a clause" assume "C \ grounding_of_state (Liminf_state Sts)" then obtain D \ where D_\_p: "D \ clss_of_state (Liminf_state Sts)" "D \ \ = C" "is_ground_subst \" unfolding grounding_of_clss_def grounding_of_cls_def by auto then have ii: "D \ Liminf_llist (lmap N_of_state Sts) \ D \ Liminf_llist (lmap P_of_state Sts) \ D \ Liminf_llist (lmap Q_of_state Sts)" unfolding Liminf_state_def by simp then have "C \ Liminf_llist (lmap grounding_of_clss (lmap N_of_state Sts)) \ C \ Liminf_llist (lmap grounding_of_clss (lmap P_of_state Sts)) \ C \ Liminf_llist (lmap grounding_of_clss (lmap Q_of_state Sts))" unfolding Liminf_llist_def grounding_of_clss_def grounding_of_cls_def using D_\_p apply - apply (erule disjE) subgoal apply (rule disjI1) using D_\_p by auto subgoal apply (erule disjE) subgoal apply (rule disjI2) apply (rule disjI1) using D_\_p by auto subgoal apply (rule disjI2) apply (rule disjI2) using D_\_p by auto done done then show "C \ Liminf_llist (lmap grounding_of_state Sts)" unfolding Liminf_llist_def grounding_of_clss_def by auto qed theorem RP_sound: assumes deriv: "chain (\) Sts" and "{#} \ clss_of_state (Liminf_state Sts)" shows "\ satisfiable (grounding_of_state (lhd Sts))" proof - from assms have "{#} \ grounding_of_state (Liminf_state Sts)" unfolding grounding_of_clss_def by (force intro: ex_ground_subst) then have "{#} \ Liminf_llist (lmap grounding_of_state Sts)" using grounding_of_state_Liminf_state_subseteq by auto then have "\ satisfiable (Liminf_llist (lmap grounding_of_state Sts))" using true_clss_def by auto then have "\ satisfiable (lhd (lmap grounding_of_state Sts))" using sr_ext.sat_limit_iff ground_derive_chain deriv by blast then show ?thesis using chain_not_lnull deriv by fastforce qed theorem RP_saturated_if_fair: assumes deriv: "chain (\) Sts" and fair: "fair_state_seq Sts" and empty_Q0: "Q_of_state (lhd Sts) = {}" shows "sr.saturated_upto (Liminf_llist (lmap grounding_of_state Sts))" proof - define Gs :: "'a clause set llist" where ns: "Gs = lmap grounding_of_state Sts" let ?N = "\i. grounding_of_state (lnth Sts i)" let ?Ns = "\i. N_of_state (lnth Sts i)" let ?Ps = "\i. P_of_state (lnth Sts i)" let ?Qs = "\i. Q_of_state (lnth Sts i)" have ground_ns_in_ground_limit_st: "Liminf_llist Gs - sr.Rf (Liminf_llist Gs) \ grounding_of_clss (Q_of_state (Liminf_state Sts))" using fair deriv fair_imp_Liminf_minus_Rf_subset_ground_Liminf_state ns by blast have derivns: "chain sr_ext.derive Gs" using ground_derive_chain deriv ns by auto { fix \ :: "'a inference" assume \_p: "\ \ gr.ord_\" let ?CC = "side_prems_of \" let ?DA = "main_prem_of \" let ?E = "concl_of \" assume a: "set_mset ?CC \ {?DA} \ Liminf_llist (lmap grounding_of_state Sts) - sr.Rf (Liminf_llist (lmap grounding_of_state Sts))" have ground_ground_Liminf: "is_ground_clss (Liminf_llist (lmap grounding_of_state Sts))" using Liminf_grounding_of_state_ground unfolding is_ground_clss_def by auto have ground_cc: "is_ground_clss (set_mset ?CC)" using a ground_ground_Liminf is_ground_clss_def by auto have ground_da: "is_ground_cls ?DA" using a grounding_ground singletonI ground_ground_Liminf by (simp add: Liminf_grounding_of_state_ground) from \_p obtain CAs AAs As where CAs_p: "gr.ord_resolve CAs ?DA AAs As ?E \ mset CAs = ?CC" unfolding gr.ord_\_def by auto have DA_CAs_in_ground_Liminf: "{?DA} \ set CAs \ grounding_of_clss (Q_of_state (Liminf_state Sts))" using a CAs_p fair unfolding fair_state_seq_def by (metis (no_types, lifting) Un_empty_left ground_ns_in_ground_limit_st a ns set_mset_mset subset_trans sup_commute) then have ground_cas: "is_ground_cls_list CAs" using CAs_p unfolding is_ground_cls_list_def by auto have "\\. ord_resolve S_Q CAs ?DA AAs As \ ?E" by (rule ground_ord_resolve_imp_ord_resolve[OF ground_da ground_cas gr.ground_resolution_with_selection_axioms CAs_p[THEN conjunct1]]) then obtain \ where \_p: "ord_resolve S_Q CAs ?DA AAs As \ ?E" by auto then obtain \s' \' \2' CAs' DA' AAs' As' \' E' where s_p: "is_ground_subst \'" "is_ground_subst_list \s'" "is_ground_subst \2'" "ord_resolve_rename S CAs' DA' AAs' As' \' E'" "CAs' \\cl \s' = CAs" "DA' \ \' = ?DA" "E' \ \2' = ?E" "{DA'} \ set CAs' \ Q_of_state (Liminf_state Sts)" using ord_resolve_rename_lifting[OF sel_stable, of "Q_of_state (Liminf_state Sts)" CAs ?DA] \_p[unfolded S_Q_def] selection_axioms DA_CAs_in_ground_Liminf by metis from this(8) have "\j. enat j < llength Sts \ (set CAs' \ {DA'} \ ?Qs j)" unfolding Liminf_llist_def using subseteq_Liminf_state_eventually_always[of "{DA'} \ set CAs'"] by auto then obtain j where j_p: "is_least (\j. enat j < llength Sts \ set CAs' \ {DA'} \ ?Qs j) j" using least_exists[of "\j. enat j < llength Sts \ set CAs' \ {DA'} \ ?Qs j"] by force then have j_p': "enat j < llength Sts" "set CAs' \ {DA'} \ ?Qs j" unfolding is_least_def by auto then have jn0: "j \ 0" using empty_Q0 by (metis bot_eq_sup_iff gr_implies_not_zero insert_not_empty llength_lnull lnth_0_conv_lhd sup.orderE) then have j_adds_CAs': "\ set CAs' \ {DA'} \ ?Qs (j - 1)" "set CAs' \ {DA'} \ ?Qs j" using j_p unfolding is_least_def apply (metis (no_types) One_nat_def Suc_diff_Suc Suc_ile_eq diff_diff_cancel diff_zero less_imp_le less_one neq0_conv zero_less_diff) using j_p'(2) by blast have "lnth Sts (j - 1) \ lnth Sts j" using j_p'(1) jn0 deriv chain_lnth_rel[of _ _ "j - 1"] by force then obtain C' where C'_p: "?Ns (j - 1) = {}" "?Ps (j - 1) = ?Ps j \ {C'}" "?Qs j = ?Qs (j - 1) \ {C'}" "?Ns j = concls_of (ord_FO_resolution.inferences_between (?Qs (j - 1)) C')" "C' \ set CAs' \ {DA'}" "C' \ ?Qs (j - 1)" using j_adds_CAs' by (induction rule: RP.cases) auto have "E' \ ?Ns j" proof - have "E' \ concls_of (ord_FO_resolution.inferences_between (Q_of_state (lnth Sts (j - 1))) C')" unfolding infer_from_def ord_FO_\_def inference_system.inferences_between_def apply (rule_tac x = "Infer (mset CAs') DA' E'" in image_eqI) subgoal by auto subgoal unfolding infer_from_def by (rule ord_resolve_rename.cases[OF s_p(4)]) (use s_p(4) C'_p(3,5) j_p'(2) in force) done then show ?thesis using C'_p(4) by auto qed then have "E' \ clss_of_state (lnth Sts j)" using j_p' by auto then have "?E \ grounding_of_state (lnth Sts j)" using s_p(7) s_p(3) unfolding grounding_of_clss_def grounding_of_cls_def by force then have "\ \ sr.Ri (grounding_of_state (lnth Sts j))" using sr.Ri_effective \_p by auto then have "\ \ sr_ext_Ri (?N j)" unfolding sr_ext_Ri_def by auto then have "\ \ sr_ext_Ri (Sup_llist (lmap grounding_of_state Sts))" using j_p' contra_subsetD llength_lmap lnth_lmap lnth_subset_Sup_llist sr_ext.Ri_mono by smt then have "\ \ sr_ext_Ri (Liminf_llist (lmap grounding_of_state Sts))" using sr_ext.Ri_limit_Sup[of Gs] derivns ns by blast } then have "sr_ext.saturated_upto (Liminf_llist (lmap grounding_of_state Sts))" unfolding sr_ext.saturated_upto_def sr_ext.inferences_from_def infer_from_def sr_ext_Ri_def by auto then show ?thesis using gd_ord_\_ngd_ord_\ sr.redundancy_criterion_axioms redundancy_criterion_standard_extension_saturated_upto_iff[of gr.ord_\] unfolding sr_ext_Ri_def by auto qed corollary RP_complete_if_fair: assumes deriv: "chain (\) Sts" and fair: "fair_state_seq Sts" and empty_Q0: "Q_of_state (lhd Sts) = {}" and unsat: "\ satisfiable (grounding_of_state (lhd Sts))" shows "{#} \ Q_of_state (Liminf_state Sts)" proof - have "\ satisfiable (Liminf_llist (lmap grounding_of_state Sts))" using unsat sr_ext.sat_limit_iff[OF ground_derive_chain] chain_not_lnull deriv by fastforce moreover have "sr.saturated_upto (Liminf_llist (lmap grounding_of_state Sts))" by (rule RP_saturated_if_fair[OF deriv fair empty_Q0, simplified]) ultimately have "{#} \ Liminf_llist (lmap grounding_of_state Sts)" using sr.saturated_upto_complete_if by auto then show ?thesis using empty_clause_in_Q_of_Liminf_state[OF deriv fair] by auto qed end end end diff --git a/thys/POPLmark-deBruijn/POPLmark.thy b/thys/POPLmark-deBruijn/POPLmark.thy --- a/thys/POPLmark-deBruijn/POPLmark.thy +++ b/thys/POPLmark-deBruijn/POPLmark.thy @@ -1,1547 +1,1536 @@ (* Title: POPLmark/POPLmark.thy Author: Stefan Berghofer, TU Muenchen, 2005 *) theory POPLmark imports Basis begin section \Formalization of the basic calculus\ text \ \label{sec:basic-calculus} In this section, we describe the formalization of the basic calculus without records. As a main result, we prove {\it type safety}, presented as two separate theorems, namely {\it preservation} and {\it progress}. \ subsection \Types and Terms\ text \ The types of System \fsub{} are represented by the following datatype: \ datatype type = TVar nat | Top | Fun type type (infixr "\" 200) | TyAll type type ("(3\<:_./ _)" [0, 10] 10) text \ The subtyping and typing judgements depend on a {\it context} (or environment) @{term \} containing bindings for term and type variables. A context is a list of bindings, where the @{term i}th element @{term "\\i\"} corresponds to the variable with index @{term i}. \ datatype binding = VarB type | TVarB type type_synonym env = "binding list" text \ In contrast to the usual presentation of type systems often found in textbooks, new elements are added to the left of a context using the \Cons\ operator \\\ for lists. We write @{term is_TVarB} for the predicate that returns @{term True} when applied to a type variable binding, function @{term type_ofB} extracts the type contained in a binding, and @{term "mapB f"} applies @{term f} to the type contained in a binding. \ primrec is_TVarB :: "binding \ bool" where "is_TVarB (VarB T) = False" | "is_TVarB (TVarB T) = True" primrec type_ofB :: "binding \ type" where "type_ofB (VarB T) = T" | "type_ofB (TVarB T) = T" primrec mapB :: "(type \ type) \ binding \ binding" where "mapB f (VarB T) = VarB (f T)" | "mapB f (TVarB T) = TVarB (f T)" text \ The following datatype represents the terms of System \fsub{}: \ datatype trm = Var nat | Abs type trm ("(3\:_./ _)" [0, 10] 10) | TAbs type trm ("(3\<:_./ _)" [0, 10] 10) | App trm trm (infixl "\" 200) | TApp trm type (infixl "\\<^sub>\" 200) subsection \Lifting and Substitution\ text \ One of the central operations of $\lambda$-calculus is {\it substitution}. In order to avoid that free variables in a term or type get ``captured'' when substituting it for a variable occurring in the scope of a binder, we have to increment the indices of its free variables during substitution. This is done by the lifting functions \\\<^sub>\ n k\ and \\ n k\ for types and terms, respectively, which increment the indices of all free variables with indices \\ k\ by @{term n}. The lifting functions on types and terms are defined by \ primrec liftT :: "nat \ nat \ type \ type" ("\\<^sub>\") where "\\<^sub>\ n k (TVar i) = (if i < k then TVar i else TVar (i + n))" | "\\<^sub>\ n k Top = Top" | "\\<^sub>\ n k (T \ U) = \\<^sub>\ n k T \ \\<^sub>\ n k U" | "\\<^sub>\ n k (\<:T. U) = (\<:\\<^sub>\ n k T. \\<^sub>\ n (k + 1) U)" primrec lift :: "nat \ nat \ trm \ trm" ("\") where "\ n k (Var i) = (if i < k then Var i else Var (i + n))" | "\ n k (\:T. t) = (\:\\<^sub>\ n k T. \ n (k + 1) t)" | "\ n k (\<:T. t) = (\<:\\<^sub>\ n k T. \ n (k + 1) t)" | "\ n k (s \ t) = \ n k s \ \ n k t" | "\ n k (t \\<^sub>\ T) = \ n k t \\<^sub>\ \\<^sub>\ n k T" text \ It is useful to also define an ``unlifting'' function \\\<^sub>\ n k\ for decrementing all free variables with indices \\ k\ by @{term n}. Moreover, we need several substitution functions, denoted by \mbox{\T[k \\<^sub>\ S]\<^sub>\\}, \mbox{\t[k \\<^sub>\ S]\}, and \mbox{\t[k \ s]\}, which substitute type variables in types, type variables in terms, and term variables in terms, respectively. They are defined as follows: \ primrec substTT :: "type \ nat \ type \ type" ("_[_ \\<^sub>\ _]\<^sub>\" [300, 0, 0] 300) where "(TVar i)[k \\<^sub>\ S]\<^sub>\ = (if k < i then TVar (i - 1) else if i = k then \\<^sub>\ k 0 S else TVar i)" | "Top[k \\<^sub>\ S]\<^sub>\ = Top" | "(T \ U)[k \\<^sub>\ S]\<^sub>\ = T[k \\<^sub>\ S]\<^sub>\ \ U[k \\<^sub>\ S]\<^sub>\" | "(\<:T. U)[k \\<^sub>\ S]\<^sub>\ = (\<:T[k \\<^sub>\ S]\<^sub>\. U[k+1 \\<^sub>\ S]\<^sub>\)" primrec decT :: "nat \ nat \ type \ type" ("\\<^sub>\") where "\\<^sub>\ 0 k T = T" | "\\<^sub>\ (Suc n) k T = \\<^sub>\ n k (T[k \\<^sub>\ Top]\<^sub>\)" primrec subst :: "trm \ nat \ trm \ trm" ("_[_ \ _]" [300, 0, 0] 300) where "(Var i)[k \ s] = (if k < i then Var (i - 1) else if i = k then \ k 0 s else Var i)" | "(t \ u)[k \ s] = t[k \ s] \ u[k \ s]" | "(t \\<^sub>\ T)[k \ s] = t[k \ s] \\<^sub>\ \\<^sub>\ 1 k T" | "(\:T. t)[k \ s] = (\:\\<^sub>\ 1 k T. t[k+1 \ s])" | "(\<:T. t)[k \ s] = (\<:\\<^sub>\ 1 k T. t[k+1 \ s])" primrec substT :: "trm \ nat \ type \ trm" ("_[_ \\<^sub>\ _]" [300, 0, 0] 300) where "(Var i)[k \\<^sub>\ S] = (if k < i then Var (i - 1) else Var i)" | "(t \ u)[k \\<^sub>\ S] = t[k \\<^sub>\ S] \ u[k \\<^sub>\ S]" | "(t \\<^sub>\ T)[k \\<^sub>\ S] = t[k \\<^sub>\ S] \\<^sub>\ T[k \\<^sub>\ S]\<^sub>\" | "(\:T. t)[k \\<^sub>\ S] = (\:T[k \\<^sub>\ S]\<^sub>\. t[k+1 \\<^sub>\ S])" | "(\<:T. t)[k \\<^sub>\ S] = (\<:T[k \\<^sub>\ S]\<^sub>\. t[k+1 \\<^sub>\ S])" text \ Lifting and substitution extends to typing contexts as follows: \ primrec liftE :: "nat \ nat \ env \ env" ("\\<^sub>e") where "\\<^sub>e n k [] = []" | "\\<^sub>e n k (B \ \) = mapB (\\<^sub>\ n (k + \\\)) B \ \\<^sub>e n k \" primrec substE :: "env \ nat \ type \ env" ("_[_ \\<^sub>\ _]\<^sub>e" [300, 0, 0] 300) where "[][k \\<^sub>\ T]\<^sub>e = []" | "(B \ \)[k \\<^sub>\ T]\<^sub>e = mapB (\U. U[k + \\\ \\<^sub>\ T]\<^sub>\) B \ \[k \\<^sub>\ T]\<^sub>e" primrec decE :: "nat \ nat \ env \ env" ("\\<^sub>e") where "\\<^sub>e 0 k \ = \" | "\\<^sub>e (Suc n) k \ = \\<^sub>e n k (\[k \\<^sub>\ Top]\<^sub>e)" text \ Note that in a context of the form @{term "B \ \"}, all variables in @{term B} with indices smaller than the length of @{term \} refer to entries in @{term \} and therefore must not be affected by substitution and lifting. This is the reason why an additional offset @{term "\\\"} needs to be added to the index @{term k} in the second clauses of the above functions. Some standard properties of lifting and substitution, which can be proved by structural induction on terms and types, are proved below. Properties of this kind are quite standard for encodings using de Bruijn indices and can also be found in papers by Barras and Werner \cite{Barras-Werner-JAR} and Nipkow \cite{Nipkow-JAR01}. \ lemma liftE_length [simp]: "\\\<^sub>e n k \\ = \\\" by (induct \) simp_all lemma substE_length [simp]: "\\[k \\<^sub>\ U]\<^sub>e\ = \\\" by (induct \) simp_all lemma liftE_nth [simp]: "(\\<^sub>e n k \)\i\ = map_option (mapB (\\<^sub>\ n (k + \\\ - i - 1))) (\\i\)" apply (induct \ arbitrary: i) apply simp apply simp apply (case_tac i) apply simp apply simp done lemma substE_nth [simp]: "(\[0 \\<^sub>\ T]\<^sub>e)\i\ = map_option (mapB (\U. U[\\\ - i - 1 \\<^sub>\ T]\<^sub>\)) (\\i\)" apply (induct \ arbitrary: i) apply simp apply simp apply (case_tac i) apply simp apply simp done lemma liftT_liftT [simp]: "i \ j \ j \ i + m \ \\<^sub>\ n j (\\<^sub>\ m i T) = \\<^sub>\ (m + n) i T" by (induct T arbitrary: i j m n) simp_all lemma liftT_liftT' [simp]: "i + m \ j \ \\<^sub>\ n j (\\<^sub>\ m i T) = \\<^sub>\ m i (\\<^sub>\ n (j - m) T)" apply (induct T arbitrary: i j m n) apply simp_all apply arith apply (subgoal_tac "Suc j - m = Suc (j - m)") apply simp apply arith done lemma lift_size [simp]: "size (\\<^sub>\ n k T) = size T" by (induct T arbitrary: k) simp_all lemma liftT0 [simp]: "\\<^sub>\ 0 i T = T" by (induct T arbitrary: i) simp_all lemma lift0 [simp]: "\ 0 i t = t" by (induct t arbitrary: i) simp_all theorem substT_liftT [simp]: "k \ k' \ k' < k + n \ (\\<^sub>\ n k T)[k' \\<^sub>\ U]\<^sub>\ = \\<^sub>\ (n - 1) k T" by (induct T arbitrary: k k') simp_all theorem liftT_substT [simp]: "k \ k' \ \\<^sub>\ n k (T[k' \\<^sub>\ U]\<^sub>\) = \\<^sub>\ n k T[k' + n \\<^sub>\ U]\<^sub>\" apply (induct T arbitrary: k k') apply simp_all done theorem liftT_substT' [simp]: "k' < k \ \\<^sub>\ n k (T[k' \\<^sub>\ U]\<^sub>\) = \\<^sub>\ n (k + 1) T[k' \\<^sub>\ \\<^sub>\ n (k - k') U]\<^sub>\" apply (induct T arbitrary: k k') apply simp_all apply arith done lemma liftT_substT_Top [simp]: "k \ k' \ \\<^sub>\ n k' (T[k \\<^sub>\ Top]\<^sub>\) = \\<^sub>\ n (Suc k') T[k \\<^sub>\ Top]\<^sub>\" apply (induct T arbitrary: k k') apply simp_all apply arith done lemma liftT_substT_strange: "\\<^sub>\ n k T[n + k \\<^sub>\ U]\<^sub>\ = \\<^sub>\ n (Suc k) T[k \\<^sub>\ \\<^sub>\ n 0 U]\<^sub>\" apply (induct T arbitrary: n k) apply simp_all apply (thin_tac "\x. PROP P x" for P :: "_ \ prop") apply (drule_tac x=n in meta_spec) apply (drule_tac x="Suc k" in meta_spec) apply simp done lemma lift_lift [simp]: "k \ k' \ k' \ k + n \ \ n' k' (\ n k t) = \ (n + n') k t" by (induct t arbitrary: k k') simp_all lemma substT_substT: "i \ j \ T[Suc j \\<^sub>\ V]\<^sub>\[i \\<^sub>\ U[j - i \\<^sub>\ V]\<^sub>\]\<^sub>\ = T[i \\<^sub>\ U]\<^sub>\[j \\<^sub>\ V]\<^sub>\" apply (induct T arbitrary: i j U V) apply (simp_all add: diff_Suc split: nat.split) apply (thin_tac "\x. PROP P x" for P :: "_ \ prop") apply (drule_tac x="Suc i" in meta_spec) apply (drule_tac x="Suc j" in meta_spec) apply simp done subsection \Well-formedness\ text \ \label{sec:wf} The subtyping and typing judgements to be defined in \secref{sec:subtyping} and \secref{sec:typing} may only operate on types and contexts that are well-formed. Intuitively, a type @{term T} is well-formed with respect to a context @{term \}, if all variables occurring in it are defined in @{term \}. More precisely, if @{term T} contains a type variable @{term "TVar i"}, then the @{term i}th element of @{term \} must exist and have the form @{term "TVarB U"}. \ inductive well_formed :: "env \ type \ bool" ("_ \\<^sub>w\<^sub>f _" [50, 50] 50) where wf_TVar: "\\i\ = \TVarB T\ \ \ \\<^sub>w\<^sub>f TVar i" | wf_Top: "\ \\<^sub>w\<^sub>f Top" | wf_arrow: "\ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f U \ \ \\<^sub>w\<^sub>f T \ U" | wf_all: "\ \\<^sub>w\<^sub>f T \ TVarB T \ \ \\<^sub>w\<^sub>f U \ \ \\<^sub>w\<^sub>f (\<:T. U)" text \ A context @{term "\"} is well-formed, if all types occurring in it only refer to type variables declared ``further to the right'': \ inductive well_formedE :: "env \ bool" ("_ \\<^sub>w\<^sub>f" [50] 50) and well_formedB :: "env \ binding \ bool" ("_ \\<^sub>w\<^sub>f\<^sub>B _" [50, 50] 50) where "\ \\<^sub>w\<^sub>f\<^sub>B B \ \ \\<^sub>w\<^sub>f type_ofB B" | wf_Nil: "[] \\<^sub>w\<^sub>f" | wf_Cons: "\ \\<^sub>w\<^sub>f\<^sub>B B \ \ \\<^sub>w\<^sub>f \ B \ \ \\<^sub>w\<^sub>f" text \ The judgement \\ \\<^sub>w\<^sub>f\<^sub>B B\, which denotes well-formedness of the binding @{term B} with respect to context @{term \}, is just an abbreviation for \\ \\<^sub>w\<^sub>f type_ofB B\. We now present a number of properties of the well-formedness judgements that will be used in the proofs in the following sections. \ inductive_cases well_formed_cases: "\ \\<^sub>w\<^sub>f TVar i" "\ \\<^sub>w\<^sub>f Top" "\ \\<^sub>w\<^sub>f T \ U" "\ \\<^sub>w\<^sub>f (\<:T. U)" inductive_cases well_formedE_cases: "B \ \ \\<^sub>w\<^sub>f" lemma wf_TVarB: "\ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f \ TVarB T \ \ \\<^sub>w\<^sub>f" by (rule wf_Cons) simp_all lemma wf_VarB: "\ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f \ VarB T \ \ \\<^sub>w\<^sub>f" by (rule wf_Cons) simp_all lemma map_is_TVarb: "map is_TVarB \' = map is_TVarB \ \ \\i\ = \TVarB T\ \ \T. \'\i\ = \TVarB T\" apply (induct \ arbitrary: \' T i) apply simp apply (auto split: nat.split_asm) apply (case_tac z) apply simp_all done text \ A type that is well-formed in a context @{term \} is also well-formed in another context @{term \'} that contains type variable bindings at the same positions as @{term \}: \ lemma wf_equallength: assumes H: "\ \\<^sub>w\<^sub>f T" shows "map is_TVarB \' = map is_TVarB \ \ \' \\<^sub>w\<^sub>f T" using H by (induct arbitrary: \') (auto intro: well_formed.intros dest: map_is_TVarb) text \ A well-formed context of the form @{term "\ @ B \ \"} remains well-formed if we replace the binding @{term B} by another well-formed binding @{term B'}: \ lemma wfE_replace: "\ @ B \ \ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f\<^sub>B B' \ is_TVarB B' = is_TVarB B \ \ @ B' \ \ \\<^sub>w\<^sub>f" apply (induct \) apply simp apply (erule wf_Cons) apply (erule well_formedE_cases) apply assumption apply simp apply (erule well_formedE_cases) apply (rule wf_Cons) apply (case_tac a) apply simp apply (rule wf_equallength) apply assumption apply simp apply simp apply (rule wf_equallength) apply assumption apply simp apply simp done text \ The following weakening lemmas can easily be proved by structural induction on types and contexts: \ lemma wf_weaken: assumes H: "\ @ \ \\<^sub>w\<^sub>f T" shows "\\<^sub>e (Suc 0) 0 \ @ B \ \ \\<^sub>w\<^sub>f \\<^sub>\ (Suc 0) \\\ T" using H apply (induct "\ @ \" T arbitrary: \) apply simp_all apply (rule conjI) apply (rule impI) apply (rule wf_TVar) apply simp apply (rule impI) apply (rule wf_TVar) apply (subgoal_tac "Suc i - \\\ = Suc (i - \\\)") apply simp apply arith apply (rule wf_Top) apply (rule wf_arrow) apply simp apply simp apply (rule wf_all) apply simp - apply (drule_tac x="TVarB T \ \" in meta_spec) apply simp done lemma wf_weaken': "\ \\<^sub>w\<^sub>f T \ \ @ \ \\<^sub>w\<^sub>f \\<^sub>\ \\\ 0 T" apply (induct \) apply simp_all apply (drule_tac B=a in wf_weaken [of "[]", simplified]) apply simp done lemma wfE_weaken: "\ @ \ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f\<^sub>B B \ \\<^sub>e (Suc 0) 0 \ @ B \ \ \\<^sub>w\<^sub>f" apply (induct \) apply simp apply (rule wf_Cons) apply assumption+ apply simp apply (rule wf_Cons) apply (erule well_formedE_cases) apply (case_tac a) apply simp apply (rule wf_weaken) apply assumption apply simp apply (rule wf_weaken) apply assumption apply (erule well_formedE_cases) apply simp done text \ Intuitively, lemma \wf_weaken\ states that a type @{term T} which is well-formed in a context is still well-formed in a larger context, whereas lemma \wfE_weaken\ states that a well-formed context remains well-formed when extended with a well-formed binding. Owing to the encoding of variables using de Bruijn indices, the statements of the above lemmas involve additional lifting functions. The typing judgement, which will be described in \secref{sec:typing}, involves the lookup of variables in a context. It has already been pointed out earlier that each entry in a context may only depend on types declared ``further to the right''. To ensure that a type @{term T} stored at position @{term i} in an environment @{term \} is valid in the full environment, as opposed to the smaller environment consisting only of the entries in @{term \} at positions greater than @{term i}, we need to increment the indices of all free type variables in @{term T} by @{term "Suc i"}: \ lemma wf_liftB: assumes H: "\ \\<^sub>w\<^sub>f" shows "\\i\ = \VarB T\ \ \ \\<^sub>w\<^sub>f \\<^sub>\ (Suc i) 0 T" using H apply (induct arbitrary: i) apply simp apply (simp split: nat.split_asm) apply (frule_tac B="VarB T" in wf_weaken [of "[]", simplified]) apply simp+ apply (rename_tac nat) apply (drule_tac x=nat in meta_spec) apply simp apply (frule_tac T="\\<^sub>\ (Suc nat) 0 T" in wf_weaken [of "[]", simplified]) apply simp done text \ We also need lemmas stating that substitution of well-formed types preserves the well-formedness of types and contexts: \ theorem wf_subst: "\ @ B \ \ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f U \ \[0 \\<^sub>\ U]\<^sub>e @ \ \\<^sub>w\<^sub>f T[\\\ \\<^sub>\ U]\<^sub>\" apply (induct T arbitrary: \) apply simp_all apply (rule conjI) apply (rule impI) apply (drule_tac \=\ and \="\[0 \\<^sub>\ U]\<^sub>e" in wf_weaken') apply simp apply (rule impI conjI)+ apply (erule well_formed_cases) apply (rule wf_TVar) apply (simp split: nat.split_asm) apply (rename_tac nat \ T nata) apply (subgoal_tac "\\\ \ nat - Suc 0") apply (subgoal_tac "nat - Suc \\\ = nata") apply (simp (no_asm_simp)) apply arith apply arith apply (rule impI) apply (erule well_formed_cases) apply (rule wf_TVar) apply simp apply (rule wf_Top) apply (erule well_formed_cases) apply (rule wf_arrow) apply simp+ apply (erule well_formed_cases) apply (rule wf_all) apply simp apply (thin_tac "\x. PROP P x" for P :: "_ \ prop") apply (drule_tac x="TVarB T1 \ \" in meta_spec) apply simp done theorem wfE_subst: "\ @ B \ \ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f U \ \[0 \\<^sub>\ U]\<^sub>e @ \ \\<^sub>w\<^sub>f" apply (induct \) apply simp apply (erule well_formedE_cases) apply assumption apply simp apply (case_tac a) apply (erule well_formedE_cases) apply (rule wf_Cons) apply simp apply (rule wf_subst) apply assumption+ apply simp apply (erule well_formedE_cases) apply (rule wf_Cons) apply simp apply (rule wf_subst) apply assumption+ done subsection \Subtyping\ text \ \label{sec:subtyping} We now come to the definition of the subtyping judgement \\ \ T <: U\. \ inductive subtyping :: "env \ type \ type \ bool" ("_ \ _ <: _" [50, 50, 50] 50) where SA_Top: "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f S \ \ \ S <: Top" | SA_refl_TVar: "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f TVar i \ \ \ TVar i <: TVar i" | SA_trans_TVar: "\\i\ = \TVarB U\ \ \ \ \\<^sub>\ (Suc i) 0 U <: T \ \ \ TVar i <: T" | SA_arrow: "\ \ T\<^sub>1 <: S\<^sub>1 \ \ \ S\<^sub>2 <: T\<^sub>2 \ \ \ S\<^sub>1 \ S\<^sub>2 <: T\<^sub>1 \ T\<^sub>2" | SA_all: "\ \ T\<^sub>1 <: S\<^sub>1 \ TVarB T\<^sub>1 \ \ \ S\<^sub>2 <: T\<^sub>2 \ \ \ (\<:S\<^sub>1. S\<^sub>2) <: (\<:T\<^sub>1. T\<^sub>2)" text \ The rules \SA_Top\ and \SA_refl_TVar\, which appear at the leaves of the derivation tree for a judgement @{term "\ \ T <: U"}, contain additional side conditions ensuring the well-formedness of the contexts and types involved. In order for the rule \SA_trans_TVar\ to be applicable, the context @{term \} must be of the form \mbox{@{term "\\<^sub>1 @ B \ \\<^sub>2"}}, where @{term "\\<^sub>1"} has the length @{term i}. Since the indices of variables in @{term B} can only refer to variables defined in @{term "\\<^sub>2"}, they have to be incremented by @{term "Suc i"} to ensure that they point to the right variables in the larger context \\\. \ lemma wf_subtype_env: assumes PQ: "\ \ P <: Q" shows "\ \\<^sub>w\<^sub>f" using PQ by induct assumption+ lemma wf_subtype: assumes PQ: "\ \ P <: Q" shows "\ \\<^sub>w\<^sub>f P \ \ \\<^sub>w\<^sub>f Q" using PQ by induct (auto intro: well_formed.intros elim!: wf_equallength) lemma wf_subtypeE: assumes H: "\ \ T <: U" and H': "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f U \ P" shows "P" apply (rule H') apply (rule wf_subtype_env) apply (rule H) apply (rule wf_subtype [OF H, THEN conjunct1]) apply (rule wf_subtype [OF H, THEN conjunct2]) done text \ By induction on the derivation of @{term "\ \ T <: U"}, it can easily be shown that all types and contexts occurring in a subtyping judgement must be well-formed: \ lemma wf_subtype_conj: "\ \ T <: U \ \ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f U" by (erule wf_subtypeE) iprover text \ By induction on types, we can prove that the subtyping relation is reflexive: \ lemma subtype_refl: \ \A.1\ "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f T \ \ \ T <: T" by (induct T arbitrary: \) (blast intro: subtyping.intros wf_Nil wf_TVarB elim: well_formed_cases)+ text \ The weakening lemma for the subtyping relation is proved in two steps: by induction on the derivation of the subtyping relation, we first prove that inserting a single type into the context preserves subtyping: \ lemma subtype_weaken: assumes H: "\ @ \ \ P <: Q" and wf: "\ \\<^sub>w\<^sub>f\<^sub>B B" shows "\\<^sub>e 1 0 \ @ B \ \ \ \\<^sub>\ 1 \\\ P <: \\<^sub>\ 1 \\\ Q" using H proof (induct "\ @ \" P Q arbitrary: \) case SA_Top with wf show ?case by (auto intro: subtyping.SA_Top wfE_weaken wf_weaken) next case SA_refl_TVar with wf show ?case by (auto intro!: subtyping.SA_refl_TVar wfE_weaken dest: wf_weaken) next case (SA_trans_TVar i U T) thus ?case proof (cases "i < \\\") case True with SA_trans_TVar have "(\\<^sub>e 1 0 \ @ B \ \)\i\ = \TVarB (\\<^sub>\ 1 (\\\ - Suc i) U)\" by simp moreover from True SA_trans_TVar have "\\<^sub>e 1 0 \ @ B \ \ \ \\<^sub>\ (Suc i) 0 (\\<^sub>\ 1 (\\\ - Suc i) U) <: \\<^sub>\ 1 \\\ T" by simp ultimately have "\\<^sub>e 1 0 \ @ B \ \ \ TVar i <: \\<^sub>\ 1 \\\ T" by (rule subtyping.SA_trans_TVar) with True show ?thesis by simp next case False then have "Suc i - \\\ = Suc (i - \\\)" by arith with False SA_trans_TVar have "(\\<^sub>e 1 0 \ @ B \ \)\Suc i\ = \TVarB U\" by simp moreover from False SA_trans_TVar have "\\<^sub>e 1 0 \ @ B \ \ \ \\<^sub>\ (Suc (Suc i)) 0 U <: \\<^sub>\ 1 \\\ T" by simp ultimately have "\\<^sub>e 1 0 \ @ B \ \ \ TVar (Suc i) <: \\<^sub>\ 1 \\\ T" by (rule subtyping.SA_trans_TVar) with False show ?thesis by simp qed next case SA_arrow thus ?case by simp (iprover intro: subtyping.SA_arrow) next case (SA_all T\<^sub>1 S\<^sub>1 S\<^sub>2 T\<^sub>2 \) with SA_all(4) [of "TVarB T\<^sub>1 \ \"] show ?case by simp (iprover intro: subtyping.SA_all) qed text \ All cases are trivial, except for the \SA_trans_TVar\ case, which requires a case distinction on whether the index of the variable is smaller than @{term "\\\"}. The stronger result that appending a new context @{term \} to a context @{term \} preserves subtyping can be proved by induction on @{term \}, using the previous result in the induction step: \ lemma subtype_weaken': \ \A.2\ "\ \ P <: Q \ \ @ \ \\<^sub>w\<^sub>f \ \ @ \ \ \\<^sub>\ \\\ 0 P <: \\<^sub>\ \\\ 0 Q" apply (induct \) apply simp_all apply (erule well_formedE_cases) apply simp apply (drule_tac B="a" and \="\ @ \" in subtype_weaken [of "[]", simplified]) apply simp_all done text \ An unrestricted transitivity rule has the disadvantage that it can be applied in any situation. In order to make the above definition of the subtyping relation {\it syntax-directed}, the transitivity rule \SA_trans_TVar\ is restricted to the case where the type on the left-hand side of the \<:\ operator is a variable. However, the unrestricted transitivity rule can be derived from this definition. In order for the proof to go through, we have to simultaneously prove another property called {\it narrowing}. The two properties are proved by nested induction. The outer induction is on the size of the type @{term Q}, whereas the two inner inductions for proving transitivity and narrowing are on the derivation of the subtyping judgements. The transitivity property is needed in the proof of narrowing, which is by induction on the derivation of \mbox{@{term "\ @ TVarB Q \ \ \ M <: N"}}. In the case corresponding to the rule \SA_trans_TVar\, we must prove \mbox{@{term "\ @ TVarB P \ \ \ TVar i <: T"}}. The only interesting case is the one where @{term "i = \\\"}. By induction hypothesis, we know that @{term "\ @ TVarB P \ \ \ \\<^sub>\ (i+1) 0 Q <: T"} and @{term "(\ @ TVarB Q \ \)\i\ = \TVarB Q\"}. By assumption, we have @{term "\ \ P <: Q"} and hence \mbox{@{term "\ @ TVarB P \ \ \ \\<^sub>\ (i+1) 0 P <: \\<^sub>\ (i+1) 0 Q"}} by weakening. Since @{term "\\<^sub>\ (i+1) 0 Q"} has the same size as @{term Q}, we can use the transitivity property, which yields @{term "\ @ TVarB P \ \ \ \\<^sub>\ (i+1) 0 P <: T"}. The claim then follows easily by an application of \SA_trans_TVar\. \ lemma subtype_trans: \ \A.3\ "\ \ S <: Q \ \ \ Q <: T \ \ \ S <: T" "\ @ TVarB Q \ \ \ M <: N \ \ \ P <: Q \ \ @ TVarB P \ \ \ M <: N" using wf_measure_size proof (induct Q arbitrary: \ S T \ P M N rule: wf_induct_rule) case (less Q) { fix \ S T Q' assume "\ \ S <: Q'" then have "\ \ Q' <: T \ size Q = size Q' \ \ \ S <: T" proof (induct arbitrary: T) case SA_Top from SA_Top(3) show ?case by cases (auto intro: subtyping.SA_Top SA_Top) next case SA_refl_TVar show ?case by fact next case SA_trans_TVar thus ?case by (auto intro: subtyping.SA_trans_TVar) next case (SA_arrow \ T\<^sub>1 S\<^sub>1 S\<^sub>2 T\<^sub>2) note SA_arrow' = SA_arrow from SA_arrow(5) show ?case proof cases case SA_Top with SA_arrow show ?thesis by (auto intro: subtyping.SA_Top wf_arrow elim: wf_subtypeE) next case (SA_arrow T\<^sub>1' T\<^sub>2') from SA_arrow SA_arrow' have "\ \ S\<^sub>1 \ S\<^sub>2 <: T\<^sub>1' \ T\<^sub>2'" by (auto intro!: subtyping.SA_arrow intro: less(1) [of "T\<^sub>1"] less(1) [of "T\<^sub>2"]) with SA_arrow show ?thesis by simp qed next case (SA_all \ T\<^sub>1 S\<^sub>1 S\<^sub>2 T\<^sub>2) note SA_all' = SA_all from SA_all(5) show ?case proof cases case SA_Top with SA_all show ?thesis by (auto intro!: subtyping.SA_Top wf_all intro: wf_equallength elim: wf_subtypeE) next case (SA_all T\<^sub>1' T\<^sub>2') from SA_all SA_all' have "\ \ T\<^sub>1' <: S\<^sub>1" by - (rule less(1), simp_all) moreover from SA_all SA_all' have "TVarB T\<^sub>1' \ \ \ S\<^sub>2 <: T\<^sub>2" by - (rule less(2) [of _ "[]", simplified], simp_all) with SA_all SA_all' have "TVarB T\<^sub>1' \ \ \ S\<^sub>2 <: T\<^sub>2'" by - (rule less(1), simp_all) ultimately have "\ \ (\<:S\<^sub>1. S\<^sub>2) <: (\<:T\<^sub>1'. T\<^sub>2')" by (rule subtyping.SA_all) with SA_all show ?thesis by simp qed qed } note tr = this { case 1 thus ?case using refl by (rule tr) next case 2 from 2(1) show "\ @ TVarB P \ \ \ M <: N" proof (induct "\ @ TVarB Q \ \" M N arbitrary: \) case SA_Top with 2 show ?case by (auto intro!: subtyping.SA_Top intro: wf_equallength wfE_replace elim!: wf_subtypeE) next case SA_refl_TVar with 2 show ?case by (auto intro!: subtyping.SA_refl_TVar intro: wf_equallength wfE_replace elim!: wf_subtypeE) next case (SA_trans_TVar i U T) show ?case proof (cases "i < \\\") case True with SA_trans_TVar show ?thesis by (auto intro!: subtyping.SA_trans_TVar) next case False note False' = False show ?thesis proof (cases "i = \\\") case True from SA_trans_TVar have "(\ @ [TVarB P]) @ \ \\<^sub>w\<^sub>f" by (auto elim!: wf_subtypeE) with \\ \ P <: Q\ have "(\ @ [TVarB P]) @ \ \ \\<^sub>\ \\ @ [TVarB P]\ 0 P <: \\<^sub>\ \\ @ [TVarB P]\ 0 Q" by (rule subtype_weaken') with SA_trans_TVar True False have "\ @ TVarB P \ \ \ \\<^sub>\ (Suc \\\) 0 P <: T" by - (rule tr, simp+) with True and False and SA_trans_TVar show ?thesis by (auto intro!: subtyping.SA_trans_TVar) next case False with False' have "i - \\\ = Suc (i - \\\ - 1)" by arith with False False' SA_trans_TVar show ?thesis by - (rule subtyping.SA_trans_TVar, simp+) qed qed next case SA_arrow thus ?case by (auto intro!: subtyping.SA_arrow) next case (SA_all T\<^sub>1 S\<^sub>1 S\<^sub>2 T\<^sub>2) thus ?case by (auto intro: subtyping.SA_all SA_all(4) [of "TVarB T\<^sub>1 \ \", simplified]) qed } qed text \ In the proof of the preservation theorem presented in \secref{sec:evaluation}, we will also need a substitution theorem, which is proved by induction on the subtyping derivation: \ lemma substT_subtype: \ \A.10\ assumes H: "\ @ TVarB Q \ \ \ S <: T" shows "\ \ P <: Q \ \[0 \\<^sub>\ P]\<^sub>e @ \ \ S[\\\ \\<^sub>\ P]\<^sub>\ <: T[\\\ \\<^sub>\ P]\<^sub>\" using H apply (induct "\ @ TVarB Q \ \" S T arbitrary: \) apply simp_all apply (rule SA_Top) apply (rule wfE_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (rule wf_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (rule impI conjI)+ apply (rule subtype_refl) apply (rule wfE_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (erule wf_subtypeE) apply (drule_tac T=P and \="\[0 \\<^sub>\ P]\<^sub>e" in wf_weaken') apply simp apply (rule conjI impI)+ apply (rule SA_refl_TVar) apply (rule wfE_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (erule wf_subtypeE) apply (drule wf_subst) apply assumption apply simp apply (rule impI) apply (rule SA_refl_TVar) apply (rule wfE_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (erule wf_subtypeE) apply (drule wf_subst) apply assumption apply simp apply (rule conjI impI)+ apply simp apply (drule_tac \=\ and \="\[0 \\<^sub>\ P]\<^sub>e" in subtype_weaken') apply (erule wf_subtypeE)+ apply assumption apply simp apply (rule subtype_trans(1)) apply assumption+ apply (rule conjI impI)+ apply (rule SA_trans_TVar) apply (simp split: nat.split_asm) apply (subgoal_tac "\\\ \ i - Suc 0") apply (rename_tac nat) apply (subgoal_tac "i - Suc \\\ = nat") apply (simp (no_asm_simp)) apply arith apply arith apply simp apply (rule impI) apply (rule SA_trans_TVar) apply (simp split: nat.split_asm) apply (subgoal_tac "Suc (\\\ - Suc 0) = \\\") apply (simp (no_asm_simp)) apply arith apply (rule SA_arrow) apply simp+ apply (rule SA_all) apply simp - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp done lemma subst_subtype: assumes H: "\ @ VarB V \ \ \ T <: U" shows "\\<^sub>e 1 0 \ @ \ \ \\<^sub>\ 1 \\\ T <: \\<^sub>\ 1 \\\ U" using H apply (induct "\ @ VarB V \ \" T U arbitrary: \) apply simp_all apply (rule SA_Top) apply (rule wfE_subst) apply assumption apply (rule wf_Top) apply (rule wf_subst) apply assumption apply (rule wf_Top) apply (rule impI conjI)+ apply (rule SA_Top) apply (rule wfE_subst) apply assumption apply (rule wf_Top)+ apply (rule conjI impI)+ apply (rule SA_refl_TVar) apply (rule wfE_subst) apply assumption apply (rule wf_Top) apply (drule wf_subst) apply (rule wf_Top) apply simp apply (rule impI) apply (rule SA_refl_TVar) apply (rule wfE_subst) apply assumption apply (rule wf_Top) apply (drule wf_subst) apply (rule wf_Top) apply simp apply (rule conjI impI)+ apply simp apply (rule conjI impI)+ apply (simp split: nat.split_asm) apply (rule SA_trans_TVar) apply (subgoal_tac "\\\ \ i - Suc 0") apply (rename_tac nat) apply (subgoal_tac "i - Suc \\\ = nat") apply (simp (no_asm_simp)) apply arith apply arith apply simp apply (rule impI) apply (rule SA_trans_TVar) apply simp apply (subgoal_tac "0 < \\\") apply simp apply arith apply (rule SA_arrow) apply simp+ apply (rule SA_all) apply simp - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp done subsection \Typing\ text \ \label{sec:typing} We are now ready to give a definition of the typing judgement \\ \ t : T\. \ inductive typing :: "env \ trm \ type \ bool" ("_ \ _ : _" [50, 50, 50] 50) where T_Var: "\ \\<^sub>w\<^sub>f \ \\i\ = \VarB U\ \ T = \\<^sub>\ (Suc i) 0 U \ \ \ Var i : T" | T_Abs: "VarB T\<^sub>1 \ \ \ t\<^sub>2 : T\<^sub>2 \ \ \ (\:T\<^sub>1. t\<^sub>2) : T\<^sub>1 \ \\<^sub>\ 1 0 T\<^sub>2" | T_App: "\ \ t\<^sub>1 : T\<^sub>1\<^sub>1 \ T\<^sub>1\<^sub>2 \ \ \ t\<^sub>2 : T\<^sub>1\<^sub>1 \ \ \ t\<^sub>1 \ t\<^sub>2 : T\<^sub>1\<^sub>2" | T_TAbs: "TVarB T\<^sub>1 \ \ \ t\<^sub>2 : T\<^sub>2 \ \ \ (\<:T\<^sub>1. t\<^sub>2) : (\<:T\<^sub>1. T\<^sub>2)" | T_TApp: "\ \ t\<^sub>1 : (\<:T\<^sub>1\<^sub>1. T\<^sub>1\<^sub>2) \ \ \ T\<^sub>2 <: T\<^sub>1\<^sub>1 \ \ \ t\<^sub>1 \\<^sub>\ T\<^sub>2 : T\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2]\<^sub>\" | T_Sub: "\ \ t : S \ \ \ S <: T \ \ \ t : T" text \ Note that in the rule \T_Var\, the indices of the type @{term U} looked up in the context @{term \} need to be incremented in order for the type to be well-formed with respect to @{term \}. In the rule \T_Abs\, the type @{term "T\<^sub>2"} of the abstraction body @{term "t\<^sub>2"} may not contain the variable with index \0\, since it is a term variable. To compensate for the disappearance of the context element @{term "VarB T\<^sub>1"} in the conclusion of thy typing rule, the indices of all free type variables in @{term "T\<^sub>2"} have to be decremented by \1\. \ theorem wf_typeE1: assumes H: "\ \ t : T" shows "\ \\<^sub>w\<^sub>f" using H by induct (blast elim: well_formedE_cases)+ theorem wf_typeE2: assumes H: "\ \ t : T" shows "\ \\<^sub>w\<^sub>f T" using H apply induct apply simp apply (rule wf_liftB) apply assumption+ apply (drule wf_typeE1)+ apply (erule well_formedE_cases)+ apply (rule wf_arrow) apply simp apply simp apply (rule wf_subst [of "[]", simplified]) apply assumption apply (rule wf_Top) apply (erule well_formed_cases) apply assumption apply (rule wf_all) apply (drule wf_typeE1) apply (erule well_formedE_cases) apply simp apply assumption apply (erule well_formed_cases) apply (rule wf_subst [of "[]", simplified]) apply assumption apply (erule wf_subtypeE) apply assumption apply (erule wf_subtypeE) apply assumption done text \ Like for the subtyping judgement, we can again prove that all types and contexts involved in a typing judgement are well-formed: \ lemma wf_type_conj: "\ \ t : T \ \ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f T" by (frule wf_typeE1, drule wf_typeE2) iprover text \ The narrowing theorem for the typing judgement states that replacing the type of a variable in the context by a subtype preserves typability: \ lemma narrow_type: \ \A.7\ assumes H: "\ @ TVarB Q \ \ \ t : T" shows "\ \ P <: Q \ \ @ TVarB P \ \ \ t : T" using H apply (induct "\ @ TVarB Q \ \" t T arbitrary: \) apply simp_all apply (rule T_Var) apply (erule wfE_replace) apply (erule wf_subtypeE) apply simp+ apply (case_tac "i < \\\") apply simp apply (case_tac "i = \\\") apply simp apply (simp split: nat.split nat.split_asm)+ apply (rule T_Abs [simplified]) - apply (drule_tac x="VarB T\<^sub>1 \ \" in meta_spec) apply simp apply (rule_tac T\<^sub>1\<^sub>1=T\<^sub>1\<^sub>1 in T_App) apply simp+ apply (rule T_TAbs) - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply (rule_tac T\<^sub>1\<^sub>1=T\<^sub>1\<^sub>1 in T_TApp) apply simp apply (rule subtype_trans(2)) apply assumption+ apply (rule_tac S=S in T_Sub) apply simp apply (rule subtype_trans(2)) apply assumption+ done lemma subtype_refl': assumes t: "\ \ t : T" shows "\ \ T <: T" proof (rule subtype_refl) from t show "\ \\<^sub>w\<^sub>f" by (rule wf_typeE1) from t show "\ \\<^sub>w\<^sub>f T" by (rule wf_typeE2) qed lemma Abs_type: \ \A.13(1)\ assumes H: "\ \ (\:S. s) : T" shows "\ \ T <: U \ U' \ (\S'. \ \ U <: S \ VarB S \ \ \ s : S' \ \ \ \\<^sub>\ 1 0 S' <: U' \ P) \ P" using H proof (induct \ "\:S. s" T arbitrary: U U' S s P) case (T_Abs T\<^sub>1 \ t\<^sub>2 T\<^sub>2) from \\ \ T\<^sub>1 \ \\<^sub>\ 1 0 T\<^sub>2 <: U \ U'\ obtain ty1: "\ \ U <: T\<^sub>1" and ty2: "\ \ \\<^sub>\ 1 0 T\<^sub>2 <: U'" by cases simp_all from ty1 \VarB T\<^sub>1 \ \ \ t\<^sub>2 : T\<^sub>2\ ty2 show ?case by (rule T_Abs) next case (T_Sub \ S' T) from \\ \ S' <: T\ and \\ \ T <: U \ U'\ have "\ \ S' <: U \ U'" by (rule subtype_trans(1)) then show ?case by (rule T_Sub) (rule T_Sub(5)) qed lemma Abs_type': assumes H: "\ \ (\:S. s) : U \ U'" and R: "\S'. \ \ U <: S \ VarB S \ \ \ s : S' \ \ \ \\<^sub>\ 1 0 S' <: U' \ P" shows "P" using H subtype_refl' [OF H] by (rule Abs_type) (rule R) lemma TAbs_type: \ \A.13(2)\ assumes H: "\ \ (\<:S. s) : T" shows "\ \ T <: (\<:U. U') \ (\S'. \ \ U <: S \ TVarB U \ \ \ s : S' \ TVarB U \ \ \ S' <: U' \ P) \ P" using H proof (induct \ "\<:S. s" T arbitrary: U U' S s P) case (T_TAbs T\<^sub>1 \ t\<^sub>2 T\<^sub>2) from \\ \ (\<:T\<^sub>1. T\<^sub>2) <: (\<:U. U')\ obtain ty1: "\ \ U <: T\<^sub>1" and ty2: "TVarB U \ \ \ T\<^sub>2 <: U'" by cases simp_all from \TVarB T\<^sub>1 \ \ \ t\<^sub>2 : T\<^sub>2\ have "TVarB U \ \ \ t\<^sub>2 : T\<^sub>2" using ty1 by (rule narrow_type [of "[]", simplified]) with ty1 show ?case using ty2 by (rule T_TAbs) next case (T_Sub \ S' T) from \\ \ S' <: T\ and \\ \ T <: (\<:U. U')\ have "\ \ S' <: (\<:U. U')" by (rule subtype_trans(1)) then show ?case by (rule T_Sub) (rule T_Sub(5)) qed lemma TAbs_type': assumes H: "\ \ (\<:S. s) : (\<:U. U')" and R: "\S'. \ \ U <: S \ TVarB U \ \ \ s : S' \ TVarB U \ \ \ S' <: U' \ P" shows "P" using H subtype_refl' [OF H] by (rule TAbs_type) (rule R) lemma T_eq: "\ \ t : T \ T = T' \ \ \ t : T'" by simp text \ The weakening theorem states that inserting a binding @{term B} does not affect typing: \ lemma type_weaken: assumes H: "\ @ \ \ t : T" shows "\ \\<^sub>w\<^sub>f\<^sub>B B \ \\<^sub>e 1 0 \ @ B \ \ \ \ 1 \\\ t : \\<^sub>\ 1 \\\ T" using H apply (induct "\ @ \" t T arbitrary: \) apply simp_all apply (rule conjI) apply (rule impI) apply (rule T_Var) apply (erule wfE_weaken) apply simp+ apply (rule impI) apply (rule T_Var) apply (erule wfE_weaken) apply assumption apply (subgoal_tac "Suc i - \\\ = Suc (i - \\\)") apply simp apply arith apply (rule refl) apply (rule T_Abs [THEN T_eq]) - apply (drule_tac x="VarB T\<^sub>1 \ \" in meta_spec) apply simp apply simp apply (rule_tac T\<^sub>1\<^sub>1="\\<^sub>\ (Suc 0) \\\ T\<^sub>1\<^sub>1" in T_App) apply simp apply simp apply (rule T_TAbs) - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply (erule_tac T_TApp [THEN T_eq]) apply (drule subtype_weaken) apply simp+ apply (case_tac \) apply (simp add: liftT_substT_strange [of _ 0, simplified])+ apply (rule_tac S="\\<^sub>\ (Suc 0) \\\ S" in T_Sub) apply simp apply (drule subtype_weaken) apply simp+ done text \ We can strengthen this result, so as to mean that concatenating a new context @{term \} to the context @{term \} preserves typing: \ lemma type_weaken': \ \A.5(6)\ "\ \ t : T \ \ @ \ \\<^sub>w\<^sub>f \ \ @ \ \ \ \\\ 0 t : \\<^sub>\ \\\ 0 T" apply (induct \) apply simp apply simp apply (erule well_formedE_cases) apply simp apply (drule_tac B=a in type_weaken [of "[]", simplified]) apply simp+ done text \ This property is proved by structural induction on the context @{term \}, using the previous result in the induction step. In the proof of the preservation theorem, we will need two substitution theorems for term and type variables, both of which are proved by induction on the typing derivation. Since term and type variables are stored in the same context, we again have to decrement the free type variables in @{term \} and @{term T} by \1\ in the substitution rule for term variables in order to compensate for the disappearance of the variable. \ theorem subst_type: \ \A.8\ assumes H: "\ @ VarB U \ \ \ t : T" shows "\ \ u : U \ \\<^sub>e 1 0 \ @ \ \ t[\\\ \ u] : \\<^sub>\ 1 \\\ T" using H apply (induct "\ @ VarB U \ \" t T arbitrary: \) apply simp apply (rule conjI) apply (rule impI) apply simp apply (drule_tac \="\[0 \\<^sub>\ Top]\<^sub>e" in type_weaken') apply (rule wfE_subst) apply assumption apply (rule wf_Top) apply simp apply (rule impI conjI)+ apply (simp split: nat.split_asm) apply (rule T_Var) apply (erule wfE_subst) apply (rule wf_Top) apply (subgoal_tac "\\\ \ i - Suc 0") apply (rename_tac nat) apply (subgoal_tac "i - Suc \\\ = nat") apply (simp (no_asm_simp)) apply arith apply arith apply simp apply (rule impI) apply (rule T_Var) apply (erule wfE_subst) apply (rule wf_Top) apply simp apply (subgoal_tac "Suc (\\\ - Suc 0) = \\\") apply (simp (no_asm_simp)) apply arith apply simp - apply (drule_tac x="VarB T\<^sub>1 \ \" in meta_spec) apply (rule T_Abs [THEN T_eq]) apply simp apply (simp add: substT_substT [symmetric]) apply simp apply (rule_tac T\<^sub>1\<^sub>1="T\<^sub>1\<^sub>1[\\\ \\<^sub>\ Top]\<^sub>\" in T_App) apply simp+ apply (rule T_TAbs) - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply simp apply (rule T_TApp [THEN T_eq]) apply simp apply (rule subst_subtype [simplified]) apply assumption apply (simp add: substT_substT [symmetric]) apply (rule_tac S="S[\\\ \\<^sub>\ Top]\<^sub>\" in T_Sub) apply simp apply simp apply (rule subst_subtype [simplified]) apply assumption done theorem substT_type: \ \A.11\ assumes H: "\ @ TVarB Q \ \ \ t : T" shows "\ \ P <: Q \ \[0 \\<^sub>\ P]\<^sub>e @ \ \ t[\\\ \\<^sub>\ P] : T[\\\ \\<^sub>\ P]\<^sub>\" using H apply (induct "\ @ TVarB Q \ \" t T arbitrary: \) apply simp_all apply (rule impI conjI)+ apply simp apply (rule T_Var) apply (erule wfE_subst) apply (erule wf_subtypeE) apply assumption apply (simp split: nat.split_asm) apply (subgoal_tac "\\\ \ i - Suc 0") apply (rename_tac nat) apply (subgoal_tac "i - Suc \\\ = nat") apply (simp (no_asm_simp)) apply arith apply arith apply simp apply (rule impI) apply (case_tac "i = \\\") apply simp apply (rule T_Var) apply (erule wfE_subst) apply (erule wf_subtypeE) apply assumption apply simp apply (subgoal_tac "i < \\\") apply (subgoal_tac "Suc (\\\ - Suc 0) = \\\") apply (simp (no_asm_simp)) apply arith apply arith apply (rule T_Abs [THEN T_eq]) - apply (drule_tac x="VarB T\<^sub>1 \ \" in meta_spec) apply simp apply (simp add: substT_substT [symmetric]) apply (rule_tac T\<^sub>1\<^sub>1="T\<^sub>1\<^sub>1[\\\ \\<^sub>\ P]\<^sub>\" in T_App) apply simp+ apply (rule T_TAbs) - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply (rule T_TApp [THEN T_eq]) apply simp apply (rule substT_subtype) apply assumption apply assumption apply (simp add: substT_substT [symmetric]) apply (rule_tac S="S[\\\ \\<^sub>\ P]\<^sub>\" in T_Sub) apply simp apply (rule substT_subtype) apply assumption apply assumption done subsection \Evaluation\ text \ \label{sec:evaluation} For the formalization of the evaluation strategy, it is useful to first define a set of {\it canonical values} that are not evaluated any further. The canonical values of call-by-value \fsub{} are exactly the abstractions over term and type variables: \ inductive_set "value" :: "trm set" where Abs: "(\:T. t) \ value" | TAbs: "(\<:T. t) \ value" text \ The notion of a @{term value} is now used in the defintion of the evaluation relation \mbox{\t \ t'\}. There are several ways for defining this evaluation relation: Aydemir et al.\ \cite{PoplMark} advocate the use of {\it evaluation contexts} that allow to separate the description of the ``immediate'' reduction rules, i.e.\ $\beta$-reduction, from the description of the context in which these reductions may occur in. The rationale behind this approach is to keep the formalization more modular. We will take a closer look at this style of presentation in section \secref{sec:evaluation-ctxt}. For the rest of this section, we will use a different approach: both the ``immediate'' reductions and the reduction context are described within the same inductive definition, where the context is described by additional congruence rules. \ inductive eval :: "trm \ trm \ bool" (infixl "\" 50) where E_Abs: "v\<^sub>2 \ value \ (\:T\<^sub>1\<^sub>1. t\<^sub>1\<^sub>2) \ v\<^sub>2 \ t\<^sub>1\<^sub>2[0 \ v\<^sub>2]" | E_TAbs: "(\<:T\<^sub>1\<^sub>1. t\<^sub>1\<^sub>2) \\<^sub>\ T\<^sub>2 \ t\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2]" | E_App1: "t \ t' \ t \ u \ t' \ u" | E_App2: "v \ value \ t \ t' \ v \ t \ v \ t'" | E_TApp: "t \ t' \ t \\<^sub>\ T \ t' \\<^sub>\ T" text \ Here, the rules \E_Abs\ and \E_TAbs\ describe the ``immediate'' reductions, whereas \E_App1\, \E_App2\, and \E_TApp\ are additional congruence rules describing reductions in a context. The most important theorems of this section are the {\it preservation} theorem, stating that the reduction of a well-typed term does not change its type, and the {\it progress} theorem, stating that reduction of a well-typed term does not ``get stuck'' -- in other words, every well-typed, closed term @{term t} is either a value, or there is a term @{term t'} to which @{term t} can be reduced. The preservation theorem is proved by induction on the derivation of @{term "\ \ t : T"}, followed by a case distinction on the last rule used in the derivation of @{term "t \ t'"}. \ theorem preservation: \ \A.20\ assumes H: "\ \ t : T" shows "t \ t' \ \ \ t' : T" using H proof (induct arbitrary: t') case (T_Var \ i U T t') from \Var i \ t'\ show ?case by cases next case (T_Abs T\<^sub>1 \ t\<^sub>2 T\<^sub>2 t') from \(\:T\<^sub>1. t\<^sub>2) \ t'\ show ?case by cases next case (T_App \ t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 t\<^sub>2 t') from \t\<^sub>1 \ t\<^sub>2 \ t'\ show ?case proof cases case (E_Abs T\<^sub>1\<^sub>1' t\<^sub>1\<^sub>2) with T_App have "\ \ (\:T\<^sub>1\<^sub>1'. t\<^sub>1\<^sub>2) : T\<^sub>1\<^sub>1 \ T\<^sub>1\<^sub>2" by simp then obtain S' where T\<^sub>1\<^sub>1: "\ \ T\<^sub>1\<^sub>1 <: T\<^sub>1\<^sub>1'" and t\<^sub>1\<^sub>2: "VarB T\<^sub>1\<^sub>1' \ \ \ t\<^sub>1\<^sub>2 : S'" and S': "\ \ S'[0 \\<^sub>\ Top]\<^sub>\ <: T\<^sub>1\<^sub>2" by (rule Abs_type' [simplified]) blast from \\ \ t\<^sub>2 : T\<^sub>1\<^sub>1\ have "\ \ t\<^sub>2 : T\<^sub>1\<^sub>1'" using T\<^sub>1\<^sub>1 by (rule T_Sub) with t\<^sub>1\<^sub>2 have "\ \ t\<^sub>1\<^sub>2[0 \ t\<^sub>2] : S'[0 \\<^sub>\ Top]\<^sub>\" by (rule subst_type [where \="[]", simplified]) hence "\ \ t\<^sub>1\<^sub>2[0 \ t\<^sub>2] : T\<^sub>1\<^sub>2" using S' by (rule T_Sub) with E_Abs show ?thesis by simp next case (E_App1 t'') from \t\<^sub>1 \ t''\ have "\ \ t'' : T\<^sub>1\<^sub>1 \ T\<^sub>1\<^sub>2" by (rule T_App) hence "\ \ t'' \ t\<^sub>2 : T\<^sub>1\<^sub>2" using \\ \ t\<^sub>2 : T\<^sub>1\<^sub>1\ by (rule typing.T_App) with E_App1 show ?thesis by simp next case (E_App2 t'') from \t\<^sub>2 \ t''\ have "\ \ t'' : T\<^sub>1\<^sub>1" by (rule T_App) with T_App(1) have "\ \ t\<^sub>1 \ t'' : T\<^sub>1\<^sub>2" by (rule typing.T_App) with E_App2 show ?thesis by simp qed next case (T_TAbs T\<^sub>1 \ t\<^sub>2 T\<^sub>2 t') from \(\<:T\<^sub>1. t\<^sub>2) \ t'\ show ?case by cases next case (T_TApp \ t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 T\<^sub>2 t') from \t\<^sub>1 \\<^sub>\ T\<^sub>2 \ t'\ show ?case proof cases case (E_TAbs T\<^sub>1\<^sub>1' t\<^sub>1\<^sub>2) with T_TApp have "\ \ (\<:T\<^sub>1\<^sub>1'. t\<^sub>1\<^sub>2) : (\<:T\<^sub>1\<^sub>1. T\<^sub>1\<^sub>2)" by simp then obtain S' where "TVarB T\<^sub>1\<^sub>1 \ \ \ t\<^sub>1\<^sub>2 : S'" and "TVarB T\<^sub>1\<^sub>1 \ \ \ S' <: T\<^sub>1\<^sub>2" by (rule TAbs_type') blast hence "TVarB T\<^sub>1\<^sub>1 \ \ \ t\<^sub>1\<^sub>2 : T\<^sub>1\<^sub>2" by (rule T_Sub) hence "\ \ t\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2] : T\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2]\<^sub>\" using T_TApp(3) by (rule substT_type [where \="[]", simplified]) with E_TAbs show ?thesis by simp next case (E_TApp t'') from \t\<^sub>1 \ t''\ have "\ \ t'' : (\<:T\<^sub>1\<^sub>1. T\<^sub>1\<^sub>2)" by (rule T_TApp) hence "\ \ t'' \\<^sub>\ T\<^sub>2 : T\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2]\<^sub>\" using \\ \ T\<^sub>2 <: T\<^sub>1\<^sub>1\ by (rule typing.T_TApp) with E_TApp show ?thesis by simp qed next case (T_Sub \ t S T t') from \t \ t'\ have "\ \ t' : S" by (rule T_Sub) then show ?case using \\ \ S <: T\ by (rule typing.T_Sub) qed text \ The progress theorem is also proved by induction on the derivation of @{term "[] \ t : T"}. In the induction steps, we need the following two lemmas about {\it canonical forms} stating that closed values of types @{term "T\<^sub>1 \ T\<^sub>2"} and @{term "\<:T\<^sub>1. T\<^sub>2"} must be abstractions over term and type variables, respectively. \ lemma Fun_canonical: \ \A.14(1)\ assumes ty: "[] \ v : T\<^sub>1 \ T\<^sub>2" shows "v \ value \ \t S. v = (\:S. t)" using ty proof (induct "[]::env" v "T\<^sub>1 \ T\<^sub>2" arbitrary: T\<^sub>1 T\<^sub>2) case T_Abs show ?case by iprover next case (T_App t\<^sub>1 T\<^sub>1\<^sub>1 t\<^sub>2 T\<^sub>1 T\<^sub>2) from \t\<^sub>1 \ t\<^sub>2 \ value\ show ?case by cases next case (T_TApp t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 T\<^sub>2 T\<^sub>1 T\<^sub>2') from \t\<^sub>1 \\<^sub>\ T\<^sub>2 \ value\ show ?case by cases next case (T_Sub t S T\<^sub>1 T\<^sub>2) from \[] \ S <: T\<^sub>1 \ T\<^sub>2\ obtain S\<^sub>1 S\<^sub>2 where S: "S = S\<^sub>1 \ S\<^sub>2" by cases (auto simp add: T_Sub) show ?case by (rule T_Sub S)+ qed simp lemma TyAll_canonical: \ \A.14(3)\ assumes ty: "[] \ v : (\<:T\<^sub>1. T\<^sub>2)" shows "v \ value \ \t S. v = (\<:S. t)" using ty proof (induct "[]::env" v "\<:T\<^sub>1. T\<^sub>2" arbitrary: T\<^sub>1 T\<^sub>2) case (T_App t\<^sub>1 T\<^sub>1\<^sub>1 t\<^sub>2 T\<^sub>1 T\<^sub>2) from \t\<^sub>1 \ t\<^sub>2 \ value\ show ?case by cases next case T_TAbs show ?case by iprover next case (T_TApp t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 T\<^sub>2 T\<^sub>1 T\<^sub>2') from \t\<^sub>1 \\<^sub>\ T\<^sub>2 \ value\ show ?case by cases next case (T_Sub t S T\<^sub>1 T\<^sub>2) from \[] \ S <: (\<:T\<^sub>1. T\<^sub>2)\ obtain S\<^sub>1 S\<^sub>2 where S: "S = (\<:S\<^sub>1. S\<^sub>2)" by cases (auto simp add: T_Sub) show ?case by (rule T_Sub S)+ qed simp theorem progress: assumes ty: "[] \ t : T" shows "t \ value \ (\t'. t \ t')" using ty proof (induct "[]::env" t T) case T_Var thus ?case by simp next case T_Abs from value.Abs show ?case .. next case (T_App t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 t\<^sub>2) hence "t\<^sub>1 \ value \ (\t'. t\<^sub>1 \ t')" by simp thus ?case proof assume t\<^sub>1_val: "t\<^sub>1 \ value" with T_App obtain t S where t\<^sub>1: "t\<^sub>1 = (\:S. t)" by (auto dest!: Fun_canonical) from T_App have "t\<^sub>2 \ value \ (\t'. t\<^sub>2 \ t')" by simp thus ?thesis proof assume "t\<^sub>2 \ value" with t\<^sub>1 have "t\<^sub>1 \ t\<^sub>2 \ t[0 \ t\<^sub>2]" by simp (rule eval.intros) thus ?thesis by iprover next assume "\t'. t\<^sub>2 \ t'" then obtain t' where "t\<^sub>2 \ t'" by iprover with t\<^sub>1_val have "t\<^sub>1 \ t\<^sub>2 \ t\<^sub>1 \ t'" by (rule eval.intros) thus ?thesis by iprover qed next assume "\t'. t\<^sub>1 \ t'" then obtain t' where "t\<^sub>1 \ t'" .. hence "t\<^sub>1 \ t\<^sub>2 \ t' \ t\<^sub>2" by (rule eval.intros) thus ?thesis by iprover qed next case T_TAbs from value.TAbs show ?case .. next case (T_TApp t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 T\<^sub>2) hence "t\<^sub>1 \ value \ (\t'. t\<^sub>1 \ t')" by simp thus ?case proof assume "t\<^sub>1 \ value" with T_TApp obtain t S where "t\<^sub>1 = (\<:S. t)" by (auto dest!: TyAll_canonical) hence "t\<^sub>1 \\<^sub>\ T\<^sub>2 \ t[0 \\<^sub>\ T\<^sub>2]" by simp (rule eval.intros) thus ?thesis by iprover next assume "\t'. t\<^sub>1 \ t'" then obtain t' where "t\<^sub>1 \ t'" .. hence "t\<^sub>1 \\<^sub>\ T\<^sub>2 \ t' \\<^sub>\ T\<^sub>2" by (rule eval.intros) thus ?thesis by iprover qed next case (T_Sub t S T) show ?case by (rule T_Sub) qed end diff --git a/thys/POPLmark-deBruijn/POPLmarkRecord.thy b/thys/POPLmark-deBruijn/POPLmarkRecord.thy --- a/thys/POPLmark-deBruijn/POPLmarkRecord.thy +++ b/thys/POPLmark-deBruijn/POPLmarkRecord.thy @@ -1,2520 +1,2500 @@ (* Title: POPLmark/POPLmarkRecord.thy Author: Stefan Berghofer, TU Muenchen, 2005 *) theory POPLmarkRecord imports Basis begin section \Extending the calculus with records\ text \ \label{sec:record-calculus} We now describe how the calculus introduced in the previous section can be extended with records. An important point to note is that many of the definitions and proofs developed for the simple calculus can be reused. \ subsection \Types and Terms\ text \ In order to represent records, we also need a type of {\it field names}. For this purpose, we simply use the type of {\it strings}. We extend the datatype of types of System \fsub{} by a new constructor \RcdT\ representing record types. \ type_synonym name = string datatype type = TVar nat | Top | Fun type type (infixr "\" 200) | TyAll type type ("(3\<:_./ _)" [0, 10] 10) | RcdT "(name \ type) list" type_synonym fldT = "name \ type" type_synonym rcdT = "(name \ type) list" datatype binding = VarB type | TVarB type type_synonym env = "binding list" primrec is_TVarB :: "binding \ bool" where "is_TVarB (VarB T) = False" | "is_TVarB (TVarB T) = True" primrec type_ofB :: "binding \ type" where "type_ofB (VarB T) = T" | "type_ofB (TVarB T) = T" primrec mapB :: "(type \ type) \ binding \ binding" where "mapB f (VarB T) = VarB (f T)" | "mapB f (TVarB T) = TVarB (f T)" text \ A record type is essentially an association list, mapping names of record fields to their types. The types of bindings and environments remain unchanged. The datatype \trm\ of terms is extended with three new constructors \Rcd\, \Proj\, and \LET\, denoting construction of a new record, selection of a specific field of a record (projection), and matching of a record against a pattern, respectively. A pattern, represented by datatype \pat\, can be either a variable matching any value of a given type, or a nested record pattern. Due to the encoding of variables using de Bruijn indices, a variable pattern only consists of a type. \ datatype pat = PVar type | PRcd "(name \ pat) list" datatype trm = Var nat | Abs type trm ("(3\:_./ _)" [0, 10] 10) | TAbs type trm ("(3\<:_./ _)" [0, 10] 10) | App trm trm (infixl "\" 200) | TApp trm type (infixl "\\<^sub>\" 200) | Rcd "(name \ trm) list" | Proj trm name ("(_.._)" [90, 91] 90) | LET pat trm trm ("(LET (_ =/ _)/ IN (_))" 10) type_synonym fld = "name \ trm" type_synonym rcd = "(name \ trm) list" type_synonym fpat = "name \ pat" type_synonym rpat = "(name \ pat) list" text \ In order to motivate the typing and evaluation rules for the \LET\, it is important to note that an expression of the form @{text [display] "LET PRcd [(l\<^sub>1, PVar T\<^sub>1), \, (l\<^sub>n, PVar T\<^sub>n)] = Rcd [(l\<^sub>1, v\<^sub>1), \, (l\<^sub>n, v\<^sub>n)] IN t"} can be treated like a nested abstraction \(\:T\<^sub>1. \ \:T\<^sub>n. t) \ v\<^sub>1 \ \ \ v\<^sub>n\ \ subsection \Lifting and Substitution\ primrec psize :: "pat \ nat" ("\_\\<^sub>p") and rsize :: "rpat \ nat" ("\_\\<^sub>r") and fsize :: "fpat \ nat" ("\_\\<^sub>f") where "\PVar T\\<^sub>p = 1" | "\PRcd fs\\<^sub>p = \fs\\<^sub>r" | "\[]\\<^sub>r = 0" | "\f \ fs\\<^sub>r = \f\\<^sub>f + \fs\\<^sub>r" | "\(l, p)\\<^sub>f = \p\\<^sub>p" primrec liftT :: "nat \ nat \ type \ type" ("\\<^sub>\") and liftrT :: "nat \ nat \ rcdT \ rcdT" ("\\<^sub>r\<^sub>\") and liftfT :: "nat \ nat \ fldT \ fldT" ("\\<^sub>f\<^sub>\") where "\\<^sub>\ n k (TVar i) = (if i < k then TVar i else TVar (i + n))" | "\\<^sub>\ n k Top = Top" | "\\<^sub>\ n k (T \ U) = \\<^sub>\ n k T \ \\<^sub>\ n k U" | "\\<^sub>\ n k (\<:T. U) = (\<:\\<^sub>\ n k T. \\<^sub>\ n (k + 1) U)" | "\\<^sub>\ n k (RcdT fs) = RcdT (\\<^sub>r\<^sub>\ n k fs)" | "\\<^sub>r\<^sub>\ n k [] = []" | "\\<^sub>r\<^sub>\ n k (f \ fs) = \\<^sub>f\<^sub>\ n k f \ \\<^sub>r\<^sub>\ n k fs" | "\\<^sub>f\<^sub>\ n k (l, T) = (l, \\<^sub>\ n k T)" primrec liftp :: "nat \ nat \ pat \ pat" ("\\<^sub>p") and liftrp :: "nat \ nat \ rpat \ rpat" ("\\<^sub>r\<^sub>p") and liftfp :: "nat \ nat \ fpat \ fpat" ("\\<^sub>f\<^sub>p") where "\\<^sub>p n k (PVar T) = PVar (\\<^sub>\ n k T)" | "\\<^sub>p n k (PRcd fs) = PRcd (\\<^sub>r\<^sub>p n k fs)" | "\\<^sub>r\<^sub>p n k [] = []" | "\\<^sub>r\<^sub>p n k (f \ fs) = \\<^sub>f\<^sub>p n k f \ \\<^sub>r\<^sub>p n k fs" | "\\<^sub>f\<^sub>p n k (l, p) = (l, \\<^sub>p n k p)" primrec lift :: "nat \ nat \ trm \ trm" ("\") and liftr :: "nat \ nat \ rcd \ rcd" ("\\<^sub>r") and liftf :: "nat \ nat \ fld \ fld" ("\\<^sub>f") where "\ n k (Var i) = (if i < k then Var i else Var (i + n))" | "\ n k (\:T. t) = (\:\\<^sub>\ n k T. \ n (k + 1) t)" | "\ n k (\<:T. t) = (\<:\\<^sub>\ n k T. \ n (k + 1) t)" | "\ n k (s \ t) = \ n k s \ \ n k t" | "\ n k (t \\<^sub>\ T) = \ n k t \\<^sub>\ \\<^sub>\ n k T" | "\ n k (Rcd fs) = Rcd (\\<^sub>r n k fs)" | "\ n k (t..a) = (\ n k t)..a" | "\ n k (LET p = t IN u) = (LET \\<^sub>p n k p = \ n k t IN \ n (k + \p\\<^sub>p) u)" | "\\<^sub>r n k [] = []" | "\\<^sub>r n k (f \ fs) = \\<^sub>f n k f \ \\<^sub>r n k fs" | "\\<^sub>f n k (l, t) = (l, \ n k t)" primrec substTT :: "type \ nat \ type \ type" ("_[_ \\<^sub>\ _]\<^sub>\" [300, 0, 0] 300) and substrTT :: "rcdT \ nat \ type \ rcdT" ("_[_ \\<^sub>\ _]\<^sub>r\<^sub>\" [300, 0, 0] 300) and substfTT :: "fldT \ nat \ type \ fldT" ("_[_ \\<^sub>\ _]\<^sub>f\<^sub>\" [300, 0, 0] 300) where "(TVar i)[k \\<^sub>\ S]\<^sub>\ = (if k < i then TVar (i - 1) else if i = k then \\<^sub>\ k 0 S else TVar i)" | "Top[k \\<^sub>\ S]\<^sub>\ = Top" | "(T \ U)[k \\<^sub>\ S]\<^sub>\ = T[k \\<^sub>\ S]\<^sub>\ \ U[k \\<^sub>\ S]\<^sub>\" | "(\<:T. U)[k \\<^sub>\ S]\<^sub>\ = (\<:T[k \\<^sub>\ S]\<^sub>\. U[k+1 \\<^sub>\ S]\<^sub>\)" | "(RcdT fs)[k \\<^sub>\ S]\<^sub>\ = RcdT (fs[k \\<^sub>\ S]\<^sub>r\<^sub>\)" | "[][k \\<^sub>\ S]\<^sub>r\<^sub>\ = []" | "(f \ fs)[k \\<^sub>\ S]\<^sub>r\<^sub>\ = f[k \\<^sub>\ S]\<^sub>f\<^sub>\ \ fs[k \\<^sub>\ S]\<^sub>r\<^sub>\" | "(l, T)[k \\<^sub>\ S]\<^sub>f\<^sub>\ = (l, T[k \\<^sub>\ S]\<^sub>\)" primrec substpT :: "pat \ nat \ type \ pat" ("_[_ \\<^sub>\ _]\<^sub>p" [300, 0, 0] 300) and substrpT :: "rpat \ nat \ type \ rpat" ("_[_ \\<^sub>\ _]\<^sub>r\<^sub>p" [300, 0, 0] 300) and substfpT :: "fpat \ nat \ type \ fpat" ("_[_ \\<^sub>\ _]\<^sub>f\<^sub>p" [300, 0, 0] 300) where "(PVar T)[k \\<^sub>\ S]\<^sub>p = PVar (T[k \\<^sub>\ S]\<^sub>\)" | "(PRcd fs)[k \\<^sub>\ S]\<^sub>p = PRcd (fs[k \\<^sub>\ S]\<^sub>r\<^sub>p)" | "[][k \\<^sub>\ S]\<^sub>r\<^sub>p = []" | "(f \ fs)[k \\<^sub>\ S]\<^sub>r\<^sub>p = f[k \\<^sub>\ S]\<^sub>f\<^sub>p \ fs[k \\<^sub>\ S]\<^sub>r\<^sub>p" | "(l, p)[k \\<^sub>\ S]\<^sub>f\<^sub>p = (l, p[k \\<^sub>\ S]\<^sub>p)" primrec decp :: "nat \ nat \ pat \ pat" ("\\<^sub>p") where "\\<^sub>p 0 k p = p" | "\\<^sub>p (Suc n) k p = \\<^sub>p n k (p[k \\<^sub>\ Top]\<^sub>p)" text \ In addition to the lifting and substitution functions already needed for the basic calculus, we also have to define lifting and substitution functions for patterns, which we denote by @{term "\\<^sub>p n k p"} and @{term "T[k \\<^sub>\ S]\<^sub>p"}, respectively. The extension of the existing lifting and substitution functions to records is fairly standard. \ primrec subst :: "trm \ nat \ trm \ trm" ("_[_ \ _]" [300, 0, 0] 300) and substr :: "rcd \ nat \ trm \ rcd" ("_[_ \ _]\<^sub>r" [300, 0, 0] 300) and substf :: "fld \ nat \ trm \ fld" ("_[_ \ _]\<^sub>f" [300, 0, 0] 300) where "(Var i)[k \ s] = (if k < i then Var (i - 1) else if i = k then \ k 0 s else Var i)" | "(t \ u)[k \ s] = t[k \ s] \ u[k \ s]" | "(t \\<^sub>\ T)[k \ s] = t[k \ s] \\<^sub>\ T[k \\<^sub>\ Top]\<^sub>\" | "(\:T. t)[k \ s] = (\:T[k \\<^sub>\ Top]\<^sub>\. t[k+1 \ s])" | "(\<:T. t)[k \ s] = (\<:T[k \\<^sub>\ Top]\<^sub>\. t[k+1 \ s])" | "(Rcd fs)[k \ s] = Rcd (fs[k \ s]\<^sub>r)" | "(t..a)[k \ s] = (t[k \ s])..a" | "(LET p = t IN u)[k \ s] = (LET \\<^sub>p 1 k p = t[k \ s] IN u[k + \p\\<^sub>p \ s])" | "[][k \ s]\<^sub>r = []" | "(f \ fs)[k \ s]\<^sub>r = f[k \ s]\<^sub>f \ fs[k \ s]\<^sub>r" | "(l, t)[k \ s]\<^sub>f = (l, t[k \ s])" text \ Note that the substitution function on terms is defined simultaneously with a substitution function @{term "fs[k \ s]\<^sub>r"} on records (i.e.\ lists of fields), and a substitution function @{term "f[k \ s]\<^sub>f"} on fields. To avoid conflicts with locally bound variables, we have to add an offset @{term "\p\\<^sub>p"} to @{term k} when performing substitution in the body of the \LET\ binder, where @{term "\p\\<^sub>p"} is the number of variables in the pattern @{term p}. \ primrec substT :: "trm \ nat \ type \ trm" ("_[_ \\<^sub>\ _]" [300, 0, 0] 300) and substrT :: "rcd \ nat \ type \ rcd" ("_[_ \\<^sub>\ _]\<^sub>r" [300, 0, 0] 300) and substfT :: "fld \ nat \ type \ fld" ("_[_ \\<^sub>\ _]\<^sub>f" [300, 0, 0] 300) where "(Var i)[k \\<^sub>\ S] = (if k < i then Var (i - 1) else Var i)" | "(t \ u)[k \\<^sub>\ S] = t[k \\<^sub>\ S] \ u[k \\<^sub>\ S]" | "(t \\<^sub>\ T)[k \\<^sub>\ S] = t[k \\<^sub>\ S] \\<^sub>\ T[k \\<^sub>\ S]\<^sub>\" | "(\:T. t)[k \\<^sub>\ S] = (\:T[k \\<^sub>\ S]\<^sub>\. t[k+1 \\<^sub>\ S])" | "(\<:T. t)[k \\<^sub>\ S] = (\<:T[k \\<^sub>\ S]\<^sub>\. t[k+1 \\<^sub>\ S])" | "(Rcd fs)[k \\<^sub>\ S] = Rcd (fs[k \\<^sub>\ S]\<^sub>r)" | "(t..a)[k \\<^sub>\ S] = (t[k \\<^sub>\ S])..a" | "(LET p = t IN u)[k \\<^sub>\ S] = (LET p[k \\<^sub>\ S]\<^sub>p = t[k \\<^sub>\ S] IN u[k + \p\\<^sub>p \\<^sub>\ S])" | "[][k \\<^sub>\ S]\<^sub>r = []" | "(f \ fs)[k \\<^sub>\ S]\<^sub>r = f[k \\<^sub>\ S]\<^sub>f \ fs[k \\<^sub>\ S]\<^sub>r" | "(l, t)[k \\<^sub>\ S]\<^sub>f = (l, t[k \\<^sub>\ S])" primrec liftE :: "nat \ nat \ env \ env" ("\\<^sub>e") where "\\<^sub>e n k [] = []" | "\\<^sub>e n k (B \ \) = mapB (\\<^sub>\ n (k + \\\)) B \ \\<^sub>e n k \" primrec substE :: "env \ nat \ type \ env" ("_[_ \\<^sub>\ _]\<^sub>e" [300, 0, 0] 300) where "[][k \\<^sub>\ T]\<^sub>e = []" | "(B \ \)[k \\<^sub>\ T]\<^sub>e = mapB (\U. U[k + \\\ \\<^sub>\ T]\<^sub>\) B \ \[k \\<^sub>\ T]\<^sub>e" text \ For the formalization of the reduction rules for \LET\, we need a function \mbox{\t[k \\<^sub>s us]\} for simultaneously substituting terms @{term us} for variables with consecutive indices: \ primrec substs :: "trm \ nat \ trm list \ trm" ("_[_ \\<^sub>s _]" [300, 0, 0] 300) where "t[k \\<^sub>s []] = t" | "t[k \\<^sub>s u \ us] = t[k + \us\ \ u][k \\<^sub>s us]" primrec decT :: "nat \ nat \ type \ type" ("\\<^sub>\") where "\\<^sub>\ 0 k T = T" | "\\<^sub>\ (Suc n) k T = \\<^sub>\ n k (T[k \\<^sub>\ Top]\<^sub>\)" primrec decE :: "nat \ nat \ env \ env" ("\\<^sub>e") where "\\<^sub>e 0 k \ = \" | "\\<^sub>e (Suc n) k \ = \\<^sub>e n k (\[k \\<^sub>\ Top]\<^sub>e)" primrec decrT :: "nat \ nat \ rcdT \ rcdT" ("\\<^sub>r\<^sub>\") where "\\<^sub>r\<^sub>\ 0 k fTs = fTs" | "\\<^sub>r\<^sub>\ (Suc n) k fTs = \\<^sub>r\<^sub>\ n k (fTs[k \\<^sub>\ Top]\<^sub>r\<^sub>\)" text \ The lemmas about substitution and lifting are very similar to those needed for the simple calculus without records, with the difference that most of them have to be proved simultaneously with a suitable property for records. \ lemma liftE_length [simp]: "\\\<^sub>e n k \\ = \\\" by (induct \) simp_all lemma substE_length [simp]: "\\[k \\<^sub>\ U]\<^sub>e\ = \\\" by (induct \) simp_all lemma liftE_nth [simp]: "(\\<^sub>e n k \)\i\ = map_option (mapB (\\<^sub>\ n (k + \\\ - i - 1))) (\\i\)" apply (induct \ arbitrary: i) apply simp apply simp apply (case_tac i) apply simp apply simp done lemma substE_nth [simp]: "(\[0 \\<^sub>\ T]\<^sub>e)\i\ = map_option (mapB (\U. U[\\\ - i - 1 \\<^sub>\ T]\<^sub>\)) (\\i\)" apply (induct \ arbitrary: i) apply simp apply simp apply (case_tac i) apply simp apply simp done lemma liftT_liftT [simp]: "i \ j \ j \ i + m \ \\<^sub>\ n j (\\<^sub>\ m i T) = \\<^sub>\ (m + n) i T" "i \ j \ j \ i + m \ \\<^sub>r\<^sub>\ n j (\\<^sub>r\<^sub>\ m i rT) = \\<^sub>r\<^sub>\ (m + n) i rT" "i \ j \ j \ i + m \ \\<^sub>f\<^sub>\ n j (\\<^sub>f\<^sub>\ m i fT) = \\<^sub>f\<^sub>\ (m + n) i fT" by (induct T and rT and fT arbitrary: i j m n and i j m n and i j m n rule: liftT.induct liftrT.induct liftfT.induct) simp_all lemma liftT_liftT' [simp]: "i + m \ j \ \\<^sub>\ n j (\\<^sub>\ m i T) = \\<^sub>\ m i (\\<^sub>\ n (j - m) T)" "i + m \ j \ \\<^sub>r\<^sub>\ n j (\\<^sub>r\<^sub>\ m i rT) = \\<^sub>r\<^sub>\ m i (\\<^sub>r\<^sub>\ n (j - m) rT)" "i + m \ j \ \\<^sub>f\<^sub>\ n j (\\<^sub>f\<^sub>\ m i fT) = \\<^sub>f\<^sub>\ m i (\\<^sub>f\<^sub>\ n (j - m) fT)" apply (induct T and rT and fT arbitrary: i j m n and i j m n and i j m n rule: liftT.induct liftrT.induct liftfT.induct) apply simp_all apply arith apply (subgoal_tac "Suc j - m = Suc (j - m)") apply simp apply arith done lemma lift_size [simp]: "size (\\<^sub>\ n k T) = size T" "size_list (size_prod (\x. 0) size) (\\<^sub>r\<^sub>\ n k rT) = size_list (size_prod (\x. 0) size) rT" "size_prod (\x. 0) size (\\<^sub>f\<^sub>\ n k fT) = size_prod (\x. 0) size fT" by (induct T and rT and fT arbitrary: k and k and k rule: liftT.induct liftrT.induct liftfT.induct) simp_all lemma liftT0 [simp]: "\\<^sub>\ 0 i T = T" "\\<^sub>r\<^sub>\ 0 i rT = rT" "\\<^sub>f\<^sub>\ 0 i fT = fT" by (induct T and rT and fT arbitrary: i and i and i rule: liftT.induct liftrT.induct liftfT.induct) simp_all lemma liftp0 [simp]: "\\<^sub>p 0 i p = p" "\\<^sub>r\<^sub>p 0 i fs = fs" "\\<^sub>f\<^sub>p 0 i f = f" by (induct p and fs and f arbitrary: i and i and i rule: liftp.induct liftrp.induct liftfp.induct) simp_all lemma lift0 [simp]: "\ 0 i t = t" "\\<^sub>r 0 i fs = fs" "\\<^sub>f 0 i f = f" by (induct t and fs and f arbitrary: i and i and i rule: lift.induct liftr.induct liftf.induct) simp_all theorem substT_liftT [simp]: "k \ k' \ k' < k + n \ (\\<^sub>\ n k T)[k' \\<^sub>\ U]\<^sub>\ = \\<^sub>\ (n - 1) k T" "k \ k' \ k' < k + n \ (\\<^sub>r\<^sub>\ n k rT)[k' \\<^sub>\ U]\<^sub>r\<^sub>\ = \\<^sub>r\<^sub>\ (n - 1) k rT" "k \ k' \ k' < k + n \ (\\<^sub>f\<^sub>\ n k fT)[k' \\<^sub>\ U]\<^sub>f\<^sub>\ = \\<^sub>f\<^sub>\ (n - 1) k fT" by (induct T and rT and fT arbitrary: k k' and k k' and k k' rule: liftT.induct liftrT.induct liftfT.induct) simp_all theorem liftT_substT [simp]: "k \ k' \ \\<^sub>\ n k (T[k' \\<^sub>\ U]\<^sub>\) = \\<^sub>\ n k T[k' + n \\<^sub>\ U]\<^sub>\" "k \ k' \ \\<^sub>r\<^sub>\ n k (rT[k' \\<^sub>\ U]\<^sub>r\<^sub>\) = \\<^sub>r\<^sub>\ n k rT[k' + n \\<^sub>\ U]\<^sub>r\<^sub>\" "k \ k' \ \\<^sub>f\<^sub>\ n k (fT[k' \\<^sub>\ U]\<^sub>f\<^sub>\) = \\<^sub>f\<^sub>\ n k fT[k' + n \\<^sub>\ U]\<^sub>f\<^sub>\" apply (induct T and rT and fT arbitrary: k k' and k k' and k k' rule: liftT.induct liftrT.induct liftfT.induct) apply simp_all done theorem liftT_substT' [simp]: "k' < k \ \\<^sub>\ n k (T[k' \\<^sub>\ U]\<^sub>\) = \\<^sub>\ n (k + 1) T[k' \\<^sub>\ \\<^sub>\ n (k - k') U]\<^sub>\" "k' < k \ \\<^sub>r\<^sub>\ n k (rT[k' \\<^sub>\ U]\<^sub>r\<^sub>\) = \\<^sub>r\<^sub>\ n (k + 1) rT[k' \\<^sub>\ \\<^sub>\ n (k - k') U]\<^sub>r\<^sub>\" "k' < k \ \\<^sub>f\<^sub>\ n k (fT[k' \\<^sub>\ U]\<^sub>f\<^sub>\) = \\<^sub>f\<^sub>\ n (k + 1) fT[k' \\<^sub>\ \\<^sub>\ n (k - k') U]\<^sub>f\<^sub>\" apply (induct T and rT and fT arbitrary: k k' and k k' and k k' rule: liftT.induct liftrT.induct liftfT.induct) apply simp_all apply arith done lemma liftT_substT_Top [simp]: "k \ k' \ \\<^sub>\ n k' (T[k \\<^sub>\ Top]\<^sub>\) = \\<^sub>\ n (Suc k') T[k \\<^sub>\ Top]\<^sub>\" "k \ k' \ \\<^sub>r\<^sub>\ n k' (rT[k \\<^sub>\ Top]\<^sub>r\<^sub>\) = \\<^sub>r\<^sub>\ n (Suc k') rT[k \\<^sub>\ Top]\<^sub>r\<^sub>\" "k \ k' \ \\<^sub>f\<^sub>\ n k' (fT[k \\<^sub>\ Top]\<^sub>f\<^sub>\) = \\<^sub>f\<^sub>\ n (Suc k') fT[k \\<^sub>\ Top]\<^sub>f\<^sub>\" apply (induct T and rT and fT arbitrary: k k' and k k' and k k' rule: liftT.induct liftrT.induct liftfT.induct) apply simp_all apply arith done theorem liftE_substE [simp]: "k \ k' \ \\<^sub>e n k (\[k' \\<^sub>\ U]\<^sub>e) = \\<^sub>e n k \[k' + n \\<^sub>\ U]\<^sub>e" apply (induct \ arbitrary: k k' and k k' and k k') apply simp_all apply (case_tac a) apply (simp_all add: ac_simps) done lemma liftT_decT [simp]: "k \ k' \ \\<^sub>\ n k' (\\<^sub>\ m k T) = \\<^sub>\ m k (\\<^sub>\ n (m + k') T)" by (induct m arbitrary: T) simp_all lemma liftT_substT_strange: "\\<^sub>\ n k T[n + k \\<^sub>\ U]\<^sub>\ = \\<^sub>\ n (Suc k) T[k \\<^sub>\ \\<^sub>\ n 0 U]\<^sub>\" "\\<^sub>r\<^sub>\ n k rT[n + k \\<^sub>\ U]\<^sub>r\<^sub>\ = \\<^sub>r\<^sub>\ n (Suc k) rT[k \\<^sub>\ \\<^sub>\ n 0 U]\<^sub>r\<^sub>\" "\\<^sub>f\<^sub>\ n k fT[n + k \\<^sub>\ U]\<^sub>f\<^sub>\ = \\<^sub>f\<^sub>\ n (Suc k) fT[k \\<^sub>\ \\<^sub>\ n 0 U]\<^sub>f\<^sub>\" apply (induct T and rT and fT arbitrary: n k and n k and n k rule: liftT.induct liftrT.induct liftfT.induct) apply simp_all apply (thin_tac "\x. PROP P x" for P :: "_ \ prop") apply (drule_tac x=n in meta_spec) apply (drule_tac x="Suc k" in meta_spec) apply simp done lemma liftp_liftp [simp]: "k \ k' \ k' \ k + n \ \\<^sub>p n' k' (\\<^sub>p n k p) = \\<^sub>p (n + n') k p" "k \ k' \ k' \ k + n \ \\<^sub>r\<^sub>p n' k' (\\<^sub>r\<^sub>p n k rp) = \\<^sub>r\<^sub>p (n + n') k rp" "k \ k' \ k' \ k + n \ \\<^sub>f\<^sub>p n' k' (\\<^sub>f\<^sub>p n k fp) = \\<^sub>f\<^sub>p (n + n') k fp" by (induct p and rp and fp arbitrary: k k' and k k' and k k' rule: liftp.induct liftrp.induct liftfp.induct) simp_all lemma liftp_psize[simp]: "\\\<^sub>p n k p\\<^sub>p = \p\\<^sub>p" "\\\<^sub>r\<^sub>p n k fs\\<^sub>r = \fs\\<^sub>r" "\\\<^sub>f\<^sub>p n k f\\<^sub>f = \f\\<^sub>f" by (induct p and fs and f rule: liftp.induct liftrp.induct liftfp.induct) simp_all lemma lift_lift [simp]: "k \ k' \ k' \ k + n \ \ n' k' (\ n k t) = \ (n + n') k t" "k \ k' \ k' \ k + n \ \\<^sub>r n' k' (\\<^sub>r n k fs) = \\<^sub>r (n + n') k fs" "k \ k' \ k' \ k + n \ \\<^sub>f n' k' (\\<^sub>f n k f) = \\<^sub>f (n + n') k f" by (induct t and fs and f arbitrary: k k' and k k' and k k' rule: lift.induct liftr.induct liftf.induct) simp_all lemma liftE_liftE [simp]: "k \ k' \ k' \ k + n \ \\<^sub>e n' k' (\\<^sub>e n k \) = \\<^sub>e (n + n') k \" apply (induct \ arbitrary: k k') apply simp_all apply (case_tac a) apply simp_all done lemma liftE_liftE' [simp]: "i + m \ j \ \\<^sub>e n j (\\<^sub>e m i \) = \\<^sub>e m i (\\<^sub>e n (j - m) \)" apply (induct \ arbitrary: i j m n) apply simp_all apply (case_tac a) apply simp_all done lemma substT_substT: "i \ j \ T[Suc j \\<^sub>\ V]\<^sub>\[i \\<^sub>\ U[j - i \\<^sub>\ V]\<^sub>\]\<^sub>\ = T[i \\<^sub>\ U]\<^sub>\[j \\<^sub>\ V]\<^sub>\" "i \ j \ rT[Suc j \\<^sub>\ V]\<^sub>r\<^sub>\[i \\<^sub>\ U[j - i \\<^sub>\ V]\<^sub>\]\<^sub>r\<^sub>\ = rT[i \\<^sub>\ U]\<^sub>r\<^sub>\[j \\<^sub>\ V]\<^sub>r\<^sub>\" "i \ j \ fT[Suc j \\<^sub>\ V]\<^sub>f\<^sub>\[i \\<^sub>\ U[j - i \\<^sub>\ V]\<^sub>\]\<^sub>f\<^sub>\ = fT[i \\<^sub>\ U]\<^sub>f\<^sub>\[j \\<^sub>\ V]\<^sub>f\<^sub>\" apply (induct T and rT and fT arbitrary: i j U V and i j U V and i j U V rule: liftT.induct liftrT.induct liftfT.induct) apply (simp_all add: diff_Suc split: nat.split) apply (thin_tac "\x. PROP P x" for P :: "_ \ prop") apply (drule_tac x="Suc i" in meta_spec) apply (drule_tac x="Suc j" in meta_spec) apply simp done lemma substT_decT [simp]: "k \ j \ (\\<^sub>\ i k T)[j \\<^sub>\ U]\<^sub>\ = \\<^sub>\ i k (T[i + j \\<^sub>\ U]\<^sub>\)" by (induct i arbitrary: T j) (simp_all add: substT_substT [symmetric]) lemma substT_decT' [simp]: "i \ j \ \\<^sub>\ k (Suc j) T[i \\<^sub>\ Top]\<^sub>\ = \\<^sub>\ k j (T[i \\<^sub>\ Top]\<^sub>\)" by (induct k arbitrary: i j T) (simp_all add: substT_substT [of _ _ _ _ Top, simplified]) lemma substE_substE: "i \ j \ \[Suc j \\<^sub>\ V]\<^sub>e[i \\<^sub>\ U[j - i \\<^sub>\ V]\<^sub>\]\<^sub>e = \[i \\<^sub>\ U]\<^sub>e[j \\<^sub>\ V]\<^sub>e" apply (induct \) apply (case_tac [2] a) apply (simp_all add: substT_substT [symmetric]) done lemma substT_decE [simp]: "i \ j \ \\<^sub>e k (Suc j) \[i \\<^sub>\ Top]\<^sub>e = \\<^sub>e k j (\[i \\<^sub>\ Top]\<^sub>e)" by (induct k arbitrary: i j \) (simp_all add: substE_substE [of _ _ _ _ Top, simplified]) lemma liftE_app [simp]: "\\<^sub>e n k (\ @ \) = \\<^sub>e n (k + \\\) \ @ \\<^sub>e n k \" by (induct \ arbitrary: k) (simp_all add: ac_simps) lemma substE_app [simp]: "(\ @ \)[k \\<^sub>\ T]\<^sub>e = \[k + \\\ \\<^sub>\ T]\<^sub>e @ \[k \\<^sub>\ T]\<^sub>e" by (induct \) (simp_all add: ac_simps) lemma substs_app [simp]: "t[k \\<^sub>s ts @ us] = t[k + \us\ \\<^sub>s ts][k \\<^sub>s us]" by (induct ts arbitrary: t k) (simp_all add: ac_simps) theorem decE_Nil [simp]: "\\<^sub>e n k [] = []" by (induct n) simp_all theorem decE_Cons [simp]: "\\<^sub>e n k (B \ \) = mapB (\\<^sub>\ n (k + \\\)) B \ \\<^sub>e n k \" apply (induct n arbitrary: B \) apply (case_tac B) apply (case_tac [3] B) apply simp_all done theorem decE_app [simp]: "\\<^sub>e n k (\ @ \) = \\<^sub>e n (k + \\\) \ @ \\<^sub>e n k \" by (induct n arbitrary: \ \) simp_all theorem decT_liftT [simp]: "k \ k' \ k' + m \ k + n \ \\<^sub>\ m k' (\\<^sub>\ n k \) = \\<^sub>\ (n - m) k \" apply (induct m arbitrary: n) apply (subgoal_tac [2] "k' + m \ k + (n - Suc 0)") apply simp_all done theorem decE_liftE [simp]: "k \ k' \ k' + m \ k + n \ \\<^sub>e m k' (\\<^sub>e n k \) = \\<^sub>e (n - m) k \" apply (induct \ arbitrary: k k') apply (case_tac [2] a) apply simp_all done theorem liftE0 [simp]: "\\<^sub>e 0 k \ = \" apply (induct \) apply (case_tac [2] a) apply simp_all done lemma decT_decT [simp]: "\\<^sub>\ n k (\\<^sub>\ n' (k + n) T) = \\<^sub>\ (n + n') k T" by (induct n arbitrary: k T) simp_all lemma decE_decE [simp]: "\\<^sub>e n k (\\<^sub>e n' (k + n) \) = \\<^sub>e (n + n') k \" by (induct n arbitrary: k \) simp_all lemma decE_length [simp]: "\\\<^sub>e n k \\ = \\\" by (induct \) simp_all lemma liftrT_assoc_None [simp]: "(\\<^sub>r\<^sub>\ n k fs\l\\<^sub>? = \) = (fs\l\\<^sub>? = \)" by (induct fs rule: list.induct) auto lemma liftrT_assoc_Some: "fs\l\\<^sub>? = \T\ \ \\<^sub>r\<^sub>\ n k fs\l\\<^sub>? = \\\<^sub>\ n k T\" by (induct fs rule: list.induct) auto lemma liftrp_assoc_None [simp]: "(\\<^sub>r\<^sub>p n k fps\l\\<^sub>? = \) = (fps\l\\<^sub>? = \)" by (induct fps rule: list.induct) auto lemma liftr_assoc_None [simp]: "(\\<^sub>r n k fs\l\\<^sub>? = \) = (fs\l\\<^sub>? = \)" by (induct fs rule: list.induct) auto lemma unique_liftrT [simp]: "unique (\\<^sub>r\<^sub>\ n k fs) = unique fs" by (induct fs rule: list.induct) auto lemma substrTT_assoc_None [simp]: "(fs[k \\<^sub>\ U]\<^sub>r\<^sub>\\a\\<^sub>? = \) = (fs\a\\<^sub>? = \)" by (induct fs rule: list.induct) auto lemma substrTT_assoc_Some [simp]: "fs\a\\<^sub>? = \T\ \ fs[k \\<^sub>\ U]\<^sub>r\<^sub>\\a\\<^sub>? = \T[k \\<^sub>\ U]\<^sub>\\" by (induct fs rule: list.induct) auto lemma substrT_assoc_None [simp]: "(fs[k \\<^sub>\ P]\<^sub>r\l\\<^sub>? = \) = (fs\l\\<^sub>? = \)" by (induct fs rule: list.induct) auto lemma substrp_assoc_None [simp]: "(fps[k \\<^sub>\ U]\<^sub>r\<^sub>p\l\\<^sub>? = \) = (fps\l\\<^sub>? = \)" by (induct fps rule: list.induct) auto lemma substr_assoc_None [simp]: "(fs[k \ u]\<^sub>r\l\\<^sub>? = \) = (fs\l\\<^sub>? = \)" by (induct fs rule: list.induct) auto lemma unique_substrT [simp]: "unique (fs[k \\<^sub>\ U]\<^sub>r\<^sub>\) = unique fs" by (induct fs rule: list.induct) auto lemma liftrT_set: "(a, T) \ set fs \ (a, \\<^sub>\ n k T) \ set (\\<^sub>r\<^sub>\ n k fs)" by (induct fs rule: list.induct) auto lemma liftrT_setD: "(a, T) \ set (\\<^sub>r\<^sub>\ n k fs) \ \T'. (a, T') \ set fs \ T = \\<^sub>\ n k T'" by (induct fs rule: list.induct) auto lemma substrT_set: "(a, T) \ set fs \ (a, T[k \\<^sub>\ U]\<^sub>\) \ set (fs[k \\<^sub>\ U]\<^sub>r\<^sub>\)" by (induct fs rule: list.induct) auto lemma substrT_setD: "(a, T) \ set (fs[k \\<^sub>\ U]\<^sub>r\<^sub>\) \ \T'. (a, T') \ set fs \ T = T'[k \\<^sub>\ U]\<^sub>\" by (induct fs rule: list.induct) auto subsection \Well-formedness\ text \ The definition of well-formedness is extended with a rule stating that a record type @{term "RcdT fs"} is well-formed, if for all fields @{term "(l, T)"} contained in the list @{term fs}, the type @{term T} is well-formed, and all labels @{term l} in @{term fs} are {\it unique}. \ inductive well_formed :: "env \ type \ bool" ("_ \\<^sub>w\<^sub>f _" [50, 50] 50) where wf_TVar: "\\i\ = \TVarB T\ \ \ \\<^sub>w\<^sub>f TVar i" | wf_Top: "\ \\<^sub>w\<^sub>f Top" | wf_arrow: "\ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f U \ \ \\<^sub>w\<^sub>f T \ U" | wf_all: "\ \\<^sub>w\<^sub>f T \ TVarB T \ \ \\<^sub>w\<^sub>f U \ \ \\<^sub>w\<^sub>f (\<:T. U)" | wf_RcdT: "unique fs \ \(l, T)\set fs. \ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f RcdT fs" inductive well_formedE :: "env \ bool" ("_ \\<^sub>w\<^sub>f" [50] 50) and well_formedB :: "env \ binding \ bool" ("_ \\<^sub>w\<^sub>f\<^sub>B _" [50, 50] 50) where "\ \\<^sub>w\<^sub>f\<^sub>B B \ \ \\<^sub>w\<^sub>f type_ofB B" | wf_Nil: "[] \\<^sub>w\<^sub>f" | wf_Cons: "\ \\<^sub>w\<^sub>f\<^sub>B B \ \ \\<^sub>w\<^sub>f \ B \ \ \\<^sub>w\<^sub>f" inductive_cases well_formed_cases: "\ \\<^sub>w\<^sub>f TVar i" "\ \\<^sub>w\<^sub>f Top" "\ \\<^sub>w\<^sub>f T \ U" "\ \\<^sub>w\<^sub>f (\<:T. U)" "\ \\<^sub>w\<^sub>f (RcdT fTs)" inductive_cases well_formedE_cases: "B \ \ \\<^sub>w\<^sub>f" lemma wf_TVarB: "\ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f \ TVarB T \ \ \\<^sub>w\<^sub>f" by (rule wf_Cons) simp_all lemma wf_VarB: "\ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f \ VarB T \ \ \\<^sub>w\<^sub>f" by (rule wf_Cons) simp_all lemma map_is_TVarb: "map is_TVarB \' = map is_TVarB \ \ \\i\ = \TVarB T\ \ \T. \'\i\ = \TVarB T\" apply (induct \ arbitrary: \' T i) apply simp apply (auto split: nat.split_asm) apply (case_tac z) apply simp_all done lemma wf_equallength: assumes H: "\ \\<^sub>w\<^sub>f T" shows "map is_TVarB \' = map is_TVarB \ \ \' \\<^sub>w\<^sub>f T" using H apply (induct arbitrary: \') apply (auto intro: well_formed.intros dest: map_is_TVarb)+ apply (fastforce intro: well_formed.intros) done lemma wfE_replace: "\ @ B \ \ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f\<^sub>B B' \ is_TVarB B' = is_TVarB B \ \ @ B' \ \ \\<^sub>w\<^sub>f" apply (induct \) apply simp apply (erule wf_Cons) apply (erule well_formedE_cases) apply assumption apply simp apply (erule well_formedE_cases) apply (rule wf_Cons) apply (case_tac a) apply simp apply (rule wf_equallength) apply assumption apply simp apply simp apply (rule wf_equallength) apply assumption apply simp apply simp done lemma wf_weaken: assumes H: "\ @ \ \\<^sub>w\<^sub>f T" shows "\\<^sub>e (Suc 0) 0 \ @ B \ \ \\<^sub>w\<^sub>f \\<^sub>\ (Suc 0) \\\ T" using H apply (induct "\ @ \" T arbitrary: \) apply simp_all apply (rule conjI) apply (rule impI) apply (rule wf_TVar) apply simp apply (rule impI) apply (rule wf_TVar) apply (subgoal_tac "Suc i - \\\ = Suc (i - \\\)") apply simp apply arith apply (rule wf_Top) apply (rule wf_arrow) apply simp apply simp apply (rule wf_all) apply simp - apply (drule_tac x="TVarB T \ \" in meta_spec) apply simp \ \records\ apply (rule wf_RcdT) apply simp apply (rule ballpI) apply (drule liftrT_setD) apply (erule exE conjE)+ apply (drule_tac x=l and y="T[\\\ \\<^sub>\ Top]\<^sub>\" in bpspec) apply simp+ done lemma wf_weaken': "\ \\<^sub>w\<^sub>f T \ \ @ \ \\<^sub>w\<^sub>f \\<^sub>\ \\\ 0 T" apply (induct \) apply simp_all apply (drule_tac B=a in wf_weaken [of "[]", simplified]) apply simp done lemma wfE_weaken: "\ @ \ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f\<^sub>B B \ \\<^sub>e (Suc 0) 0 \ @ B \ \ \\<^sub>w\<^sub>f" apply (induct \) apply simp apply (rule wf_Cons) apply assumption+ apply simp apply (rule wf_Cons) apply (erule well_formedE_cases) apply (case_tac a) apply simp apply (rule wf_weaken) apply assumption apply simp apply (rule wf_weaken) apply assumption apply (erule well_formedE_cases) apply simp done lemma wf_liftB: assumes H: "\ \\<^sub>w\<^sub>f" shows "\\i\ = \VarB T\ \ \ \\<^sub>w\<^sub>f \\<^sub>\ (Suc i) 0 T" using H apply (induct arbitrary: i) apply simp apply (simp split: nat.split_asm) apply (frule_tac B="VarB T" in wf_weaken [of "[]", simplified]) apply simp+ apply (rename_tac nat) apply (drule_tac x=nat in meta_spec) apply simp apply (frule_tac T="\\<^sub>\ (Suc nat) 0 T" in wf_weaken [of "[]", simplified]) apply simp done theorem wf_subst: "\ @ B \ \ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f U \ \[0 \\<^sub>\ U]\<^sub>e @ \ \\<^sub>w\<^sub>f T[\\\ \\<^sub>\ U]\<^sub>\" "\(l, T) \ set (rT::rcdT). \ @ B \ \ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f U \ \(l, T) \ set rT. \[0 \\<^sub>\ U]\<^sub>e @ \ \\<^sub>w\<^sub>f T[\\\ \\<^sub>\ U]\<^sub>\" "\ @ B \ \ \\<^sub>w\<^sub>f snd (fT::fldT) \ \ \\<^sub>w\<^sub>f U \ \[0 \\<^sub>\ U]\<^sub>e @ \ \\<^sub>w\<^sub>f snd fT[\\\ \\<^sub>\ U]\<^sub>\" apply (induct T and rT and fT arbitrary: \ and \ and \ rule: liftT.induct liftrT.induct liftfT.induct) apply (rename_tac nat \) apply simp_all apply (rule conjI) apply (rule impI) apply (drule_tac \=\ and \="\[0 \\<^sub>\ U]\<^sub>e" in wf_weaken') apply simp apply (rule impI conjI)+ apply (erule well_formed_cases) apply (rule wf_TVar) apply (simp split: nat.split_asm) apply (subgoal_tac "\\\ \ nat - Suc 0") apply (rename_tac nata) apply (subgoal_tac "nat - Suc \\\ = nata") apply (simp (no_asm_simp)) apply arith apply arith apply (rule impI) apply (erule well_formed_cases) apply (rule wf_TVar) apply simp apply (rule wf_Top) apply (erule well_formed_cases) apply (rule wf_arrow) apply simp+ apply (rename_tac type1 type2 \) apply (erule well_formed_cases) apply (rule wf_all) apply simp apply (thin_tac "\x. PROP P x" for P :: "_ \ prop") apply (drule_tac x="TVarB type1 \ \" in meta_spec) apply simp apply (erule well_formed_cases) apply (rule wf_RcdT) apply simp apply (rule ballpI) apply (drule substrT_setD) apply (erule exE conjE)+ apply (drule meta_spec) apply (drule meta_mp) apply assumption apply (thin_tac "\x \ S. P x" for S P) apply (drule bpspec) apply assumption apply simp apply (simp add: split_paired_all) done theorem wf_dec: "\ @ \ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f \\<^sub>\ \\\ 0 T" apply (induct \ arbitrary: T) apply simp apply simp apply (drule wf_subst(1) [of "[]", simplified]) apply (rule wf_Top) apply simp done theorem wfE_subst: "\ @ B \ \ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f U \ \[0 \\<^sub>\ U]\<^sub>e @ \ \\<^sub>w\<^sub>f" apply (induct \) apply simp apply (erule well_formedE_cases) apply assumption apply simp apply (case_tac a) apply (erule well_formedE_cases) apply (rule wf_Cons) apply simp apply (rule wf_subst) apply assumption+ apply simp apply (erule well_formedE_cases) apply (rule wf_Cons) apply simp apply (rule wf_subst) apply assumption+ done subsection \Subtyping\ text \ The definition of the subtyping judgement is extended with a rule \SA_Rcd\ stating that a record type @{term "RcdT fs"} is a subtype of @{term "RcdT fs'"}, if for all fields \mbox{@{term "(l, T)"}} contained in @{term fs'}, there exists a corresponding field @{term "(l, S)"} such that @{term S} is a subtype of @{term T}. If the list @{term fs'} is empty, \SA_Rcd\ can appear as a leaf in the derivation tree of the subtyping judgement. Therefore, the introduction rule needs an additional premise @{term "\ \\<^sub>w\<^sub>f"} to make sure that only subtyping judgements with well-formed contexts are derivable. Moreover, since @{term fs} can contain additional fields not present in @{term fs'}, we also have to require that the type @{term "RcdT fs"} is well-formed. In order to ensure that the type @{term "RcdT fs'"} is well-formed, too, we only have to require that labels in @{term fs'} are unique, since, by induction on the subtyping derivation, all types contained in @{term fs'} are already well-formed. \ inductive subtyping :: "env \ type \ type \ bool" ("_ \ _ <: _" [50, 50, 50] 50) where SA_Top: "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f S \ \ \ S <: Top" | SA_refl_TVar: "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f TVar i \ \ \ TVar i <: TVar i" | SA_trans_TVar: "\\i\ = \TVarB U\ \ \ \ \\<^sub>\ (Suc i) 0 U <: T \ \ \ TVar i <: T" | SA_arrow: "\ \ T\<^sub>1 <: S\<^sub>1 \ \ \ S\<^sub>2 <: T\<^sub>2 \ \ \ S\<^sub>1 \ S\<^sub>2 <: T\<^sub>1 \ T\<^sub>2" | SA_all: "\ \ T\<^sub>1 <: S\<^sub>1 \ TVarB T\<^sub>1 \ \ \ S\<^sub>2 <: T\<^sub>2 \ \ \ (\<:S\<^sub>1. S\<^sub>2) <: (\<:T\<^sub>1. T\<^sub>2)" | SA_Rcd: "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f RcdT fs \ unique fs' \ \(l, T)\set fs'. \S. (l, S)\set fs \ \ \ S <: T \ \ \ RcdT fs <: RcdT fs'" lemma wf_subtype_env: assumes PQ: "\ \ P <: Q" shows "\ \\<^sub>w\<^sub>f" using PQ by induct assumption+ lemma wf_subtype: assumes PQ: "\ \ P <: Q" shows "\ \\<^sub>w\<^sub>f P \ \ \\<^sub>w\<^sub>f Q" using PQ by induct (auto intro: well_formed.intros elim!: wf_equallength) lemma wf_subtypeE: assumes H: "\ \ T <: U" and H': "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f T \ \ \\<^sub>w\<^sub>f U \ P" shows "P" apply (rule H') apply (rule wf_subtype_env) apply (rule H) apply (rule wf_subtype [OF H, THEN conjunct1]) apply (rule wf_subtype [OF H, THEN conjunct2]) done lemma subtype_refl: \ \A.1\ "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f T \ \ \ T <: T" "\ \\<^sub>w\<^sub>f \ \(l::name, T)\set fTs. \ \\<^sub>w\<^sub>f T \ \ \ T <: T" "\ \\<^sub>w\<^sub>f \ \ \\<^sub>w\<^sub>f snd (fT::fldT) \ \ \ snd fT <: snd fT" by (induct T and fTs and fT arbitrary: \ and \ and \ rule: liftT.induct liftrT.induct liftfT.induct, simp_all add: split_paired_all, simp_all) (blast intro: subtyping.intros wf_Nil wf_TVarB bexpI intro!: ballpI elim: well_formed_cases ballpE elim!: bexpE)+ lemma subtype_weaken: assumes H: "\ @ \ \ P <: Q" and wf: "\ \\<^sub>w\<^sub>f\<^sub>B B" shows "\\<^sub>e 1 0 \ @ B \ \ \ \\<^sub>\ 1 \\\ P <: \\<^sub>\ 1 \\\ Q" using H proof (induct "\ @ \" P Q arbitrary: \) case SA_Top with wf show ?case by (auto intro: subtyping.SA_Top wfE_weaken wf_weaken) next case SA_refl_TVar with wf show ?case by (auto intro!: subtyping.SA_refl_TVar wfE_weaken dest: wf_weaken) next case (SA_trans_TVar i U T) thus ?case proof (cases "i < \\\") case True with SA_trans_TVar have "(\\<^sub>e 1 0 \ @ B \ \)\i\ = \TVarB (\\<^sub>\ 1 (\\\ - Suc i) U)\" by simp moreover from True SA_trans_TVar have "\\<^sub>e 1 0 \ @ B \ \ \ \\<^sub>\ (Suc i) 0 (\\<^sub>\ 1 (\\\ - Suc i) U) <: \\<^sub>\ 1 \\\ T" by simp ultimately have "\\<^sub>e 1 0 \ @ B \ \ \ TVar i <: \\<^sub>\ 1 \\\ T" by (rule subtyping.SA_trans_TVar) with True show ?thesis by simp next case False then have "Suc i - \\\ = Suc (i - \\\)" by arith with False SA_trans_TVar have "(\\<^sub>e 1 0 \ @ B \ \)\Suc i\ = \TVarB U\" by simp moreover from False SA_trans_TVar have "\\<^sub>e 1 0 \ @ B \ \ \ \\<^sub>\ (Suc (Suc i)) 0 U <: \\<^sub>\ 1 \\\ T" by simp ultimately have "\\<^sub>e 1 0 \ @ B \ \ \ TVar (Suc i) <: \\<^sub>\ 1 \\\ T" by (rule subtyping.SA_trans_TVar) with False show ?thesis by simp qed next case SA_arrow thus ?case by simp (iprover intro: subtyping.SA_arrow) next case (SA_all T\<^sub>1 S\<^sub>1 S\<^sub>2 T\<^sub>2) with SA_all(4) [of "TVarB T\<^sub>1 \ \"] show ?case by simp (iprover intro: subtyping.SA_all) next case (SA_Rcd fs fs') with wf have "\\<^sub>e (Suc 0) 0 \ @ B \ \ \\<^sub>w\<^sub>f" by simp (rule wfE_weaken) moreover from \\ @ \ \\<^sub>w\<^sub>f RcdT fs\ have "\\<^sub>e (Suc 0) 0 \ @ B \ \ \\<^sub>w\<^sub>f \\<^sub>\ (Suc 0) \\\ (RcdT fs)" by (rule wf_weaken) hence "\\<^sub>e (Suc 0) 0 \ @ B \ \ \\<^sub>w\<^sub>f RcdT (\\<^sub>r\<^sub>\ (Suc 0) \\\ fs)" by simp moreover from SA_Rcd have "unique (\\<^sub>r\<^sub>\ (Suc 0) \\\ fs')" by simp moreover have "\(l, T)\set (\\<^sub>r\<^sub>\ (Suc 0) \\\ fs'). \S. (l, S)\set (\\<^sub>r\<^sub>\ (Suc 0) \\\ fs) \ \\<^sub>e (Suc 0) 0 \ @ B \ \ \ S <: T" proof (rule ballpI) fix l T assume "(l, T) \ set (\\<^sub>r\<^sub>\ (Suc 0) \\\ fs')" then obtain T' where "(l, T') \ set fs'" and T: "T = \\<^sub>\ (Suc 0) \\\ T'" by (blast dest: liftrT_setD) with SA_Rcd obtain S where lS: "(l, S) \ set fs" and ST: "\\<^sub>e (Suc 0) 0 \ @ B \ \ \ \\<^sub>\ (Suc 0) \\\ S <: \\<^sub>\ (Suc 0) \\\ (T[\\\ \\<^sub>\ Top]\<^sub>\)" by fastforce with T have "\\<^sub>e (Suc 0) 0 \ @ B \ \ \ \\<^sub>\ (Suc 0) \\\ S <: \\<^sub>\ (Suc 0) \\\ T'" by simp moreover from lS have "(l, \\<^sub>\ (Suc 0) \\\ S) \ set (\\<^sub>r\<^sub>\ (Suc 0) \\\ fs)" by (rule liftrT_set) moreover note T ultimately show "\S. (l, S)\set (\\<^sub>r\<^sub>\ (Suc 0) \\\ fs) \ \\<^sub>e (Suc 0) 0 \ @ B \ \ \ S <: T" by auto qed ultimately have "\\<^sub>e (Suc 0) 0 \ @ B \ \ \ RcdT (\\<^sub>r\<^sub>\ (Suc 0) \\\ fs) <: RcdT (\\<^sub>r\<^sub>\ (Suc 0) \\\ fs')" by (rule subtyping.SA_Rcd) thus ?case by simp qed lemma subtype_weaken': \ \A.2\ "\ \ P <: Q \ \ @ \ \\<^sub>w\<^sub>f \ \ @ \ \ \\<^sub>\ \\\ 0 P <: \\<^sub>\ \\\ 0 Q" apply (induct \) apply simp_all apply (erule well_formedE_cases) apply simp apply (drule_tac B="a" and \="\ @ \" in subtype_weaken [of "[]", simplified]) apply simp_all done lemma fieldT_size [simp]: "(a, T) \ set fs \ size T < Suc (size_list (size_prod (\x. 0) size) fs)" by (induct fs arbitrary: a T rule: list.induct) fastforce+ lemma subtype_trans: \ \A.3\ "\ \ S <: Q \ \ \ Q <: T \ \ \ S <: T" "\ @ TVarB Q \ \ \ M <: N \ \ \ P <: Q \ \ @ TVarB P \ \ \ M <: N" using wf_measure_size proof (induct Q arbitrary: \ S T \ P M N rule: wf_induct_rule) case (less Q) { fix \ S T Q' assume "\ \ S <: Q'" then have "\ \ Q' <: T \ size Q = size Q' \ \ \ S <: T" proof (induct arbitrary: T) case SA_Top from SA_Top(3) show ?case by cases (auto intro: subtyping.SA_Top SA_Top) next case SA_refl_TVar show ?case by fact next case SA_trans_TVar thus ?case by (auto intro: subtyping.SA_trans_TVar) next case (SA_arrow \ T\<^sub>1 S\<^sub>1 S\<^sub>2 T\<^sub>2) note SA_arrow' = SA_arrow from SA_arrow(5) show ?case proof cases case SA_Top with SA_arrow show ?thesis by (auto intro: subtyping.SA_Top wf_arrow elim: wf_subtypeE) next case (SA_arrow T\<^sub>1' T\<^sub>2') from SA_arrow SA_arrow' have "\ \ S\<^sub>1 \ S\<^sub>2 <: T\<^sub>1' \ T\<^sub>2'" by (auto intro!: subtyping.SA_arrow intro: less(1) [of "T\<^sub>1"] less(1) [of "T\<^sub>2"]) with SA_arrow show ?thesis by simp qed next case (SA_all \ T\<^sub>1 S\<^sub>1 S\<^sub>2 T\<^sub>2) note SA_all' = SA_all from SA_all(5) show ?case proof cases case SA_Top with SA_all show ?thesis by (auto intro!: subtyping.SA_Top wf_all intro: wf_equallength elim: wf_subtypeE) next case (SA_all T\<^sub>1' T\<^sub>2') from SA_all SA_all' have "\ \ T\<^sub>1' <: S\<^sub>1" by - (rule less(1), simp_all) moreover from SA_all SA_all' have "TVarB T\<^sub>1' \ \ \ S\<^sub>2 <: T\<^sub>2" by - (rule less(2) [of _ "[]", simplified], simp_all) with SA_all SA_all' have "TVarB T\<^sub>1' \ \ \ S\<^sub>2 <: T\<^sub>2'" by - (rule less(1), simp_all) ultimately have "\ \ (\<:S\<^sub>1. S\<^sub>2) <: (\<:T\<^sub>1'. T\<^sub>2')" by (rule subtyping.SA_all) with SA_all show ?thesis by simp qed next case (SA_Rcd \ fs\<^sub>1 fs\<^sub>2) note SA_Rcd' = SA_Rcd from SA_Rcd(5) show ?case proof cases case SA_Top with SA_Rcd show ?thesis by (auto intro!: subtyping.SA_Top) next case (SA_Rcd fs\<^sub>2') note \\ \\<^sub>w\<^sub>f\ moreover note \\ \\<^sub>w\<^sub>f RcdT fs\<^sub>1\ moreover note \unique fs\<^sub>2'\ moreover have "\(l, T)\set fs\<^sub>2'. \S. (l, S)\set fs\<^sub>1 \ \ \ S <: T" proof (rule ballpI) fix l T assume lT: "(l, T) \ set fs\<^sub>2'" with SA_Rcd obtain U where fs2: "(l, U) \ set fs\<^sub>2" and UT: "\ \ U <: T" by blast with SA_Rcd SA_Rcd' obtain S where fs1: "(l, S) \ set fs\<^sub>1" and SU: "\ \ S <: U" by blast from SA_Rcd SA_Rcd' fs2 have "(U, Q) \ measure size" by simp hence "\ \ S <: T" using SU UT by (rule less(1)) with fs1 show "\S. (l, S)\set fs\<^sub>1 \ \ \ S <: T" by blast qed ultimately have "\ \ RcdT fs\<^sub>1 <: RcdT fs\<^sub>2'" by (rule subtyping.SA_Rcd) with SA_Rcd show ?thesis by simp qed qed } note tr = this { case 1 thus ?case using refl by (rule tr) next case 2 from 2(1) show "\ @ TVarB P \ \ \ M <: N" proof (induct "\ @ TVarB Q \ \" M N arbitrary: \) case SA_Top with 2 show ?case by (auto intro!: subtyping.SA_Top intro: wf_equallength wfE_replace elim!: wf_subtypeE) next case SA_refl_TVar with 2 show ?case by (auto intro!: subtyping.SA_refl_TVar intro: wf_equallength wfE_replace elim!: wf_subtypeE) next case (SA_trans_TVar i U T) show ?case proof (cases "i < \\\") case True with SA_trans_TVar show ?thesis by (auto intro!: subtyping.SA_trans_TVar) next case False note False' = False show ?thesis proof (cases "i = \\\") case True from SA_trans_TVar have "(\ @ [TVarB P]) @ \ \\<^sub>w\<^sub>f" by (auto intro: wfE_replace elim!: wf_subtypeE) with \\ \ P <: Q\ have "(\ @ [TVarB P]) @ \ \ \\<^sub>\ \\ @ [TVarB P]\ 0 P <: \\<^sub>\ \\ @ [TVarB P]\ 0 Q" by (rule subtype_weaken') with SA_trans_TVar True False have "\ @ TVarB P \ \ \ \\<^sub>\ (Suc \\\) 0 P <: T" by - (rule tr, simp+) with True and False and SA_trans_TVar show ?thesis by (auto intro!: subtyping.SA_trans_TVar) next case False with False' have "i - \\\ = Suc (i - \\\ - 1)" by arith with False False' SA_trans_TVar show ?thesis by - (rule subtyping.SA_trans_TVar, simp+) qed qed next case SA_arrow thus ?case by (auto intro!: subtyping.SA_arrow) next case (SA_all T\<^sub>1 S\<^sub>1 S\<^sub>2 T\<^sub>2) thus ?case by (auto intro: subtyping.SA_all SA_all(4) [of "TVarB T\<^sub>1 \ \", simplified]) next case (SA_Rcd fs fs') from \\ \ P <: Q\ have "\ \\<^sub>w\<^sub>f P" by (rule wf_subtypeE) with SA_Rcd have "\ @ TVarB P \ \ \\<^sub>w\<^sub>f" by - (rule wfE_replace, simp+) moreover from SA_Rcd have "\ @ TVarB Q \ \ \\<^sub>w\<^sub>f RcdT fs" by simp hence "\ @ TVarB P \ \ \\<^sub>w\<^sub>f RcdT fs" by (rule wf_equallength) simp_all moreover note \unique fs'\ moreover from SA_Rcd have "\(l, T)\set fs'. \S. (l, S)\set fs \ \ @ TVarB P \ \ \ S <: T" by blast ultimately show ?case by (rule subtyping.SA_Rcd) qed } qed lemma substT_subtype: \ \A.10\ assumes H: "\ @ TVarB Q \ \ \ S <: T" shows "\ \ P <: Q \ \[0 \\<^sub>\ P]\<^sub>e @ \ \ S[\\\ \\<^sub>\ P]\<^sub>\ <: T[\\\ \\<^sub>\ P]\<^sub>\" using H apply (induct "\ @ TVarB Q \ \" S T arbitrary: \) apply simp_all apply (rule SA_Top) apply (rule wfE_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (rule wf_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (rule impI conjI)+ apply (rule subtype_refl) apply (rule wfE_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (erule wf_subtypeE) apply (drule_tac T=P and \="\[0 \\<^sub>\ P]\<^sub>e" in wf_weaken') apply simp apply (rule conjI impI)+ apply (rule SA_refl_TVar) apply (rule wfE_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (erule wf_subtypeE) apply (drule wf_subst) apply assumption apply simp apply (rule impI) apply (rule SA_refl_TVar) apply (rule wfE_subst) apply assumption apply (erule wf_subtypeE) apply assumption apply (erule wf_subtypeE) apply (drule wf_subst) apply assumption apply simp apply (rule conjI impI)+ apply simp apply (drule_tac \=\ and \="\[0 \\<^sub>\ P]\<^sub>e" in subtype_weaken') apply (erule wf_subtypeE)+ apply assumption apply simp apply (rule subtype_trans(1)) apply assumption+ apply (rule conjI impI)+ apply (rule SA_trans_TVar) apply (simp split: nat.split_asm) apply (subgoal_tac "\\\ \ i - Suc 0") apply (rename_tac nat) apply (subgoal_tac "i - Suc \\\ = nat") apply (simp (no_asm_simp)) apply arith apply arith apply simp apply (rule impI) apply (rule SA_trans_TVar) apply (simp split: nat.split_asm) apply (subgoal_tac "Suc (\\\ - Suc 0) = \\\") apply (simp (no_asm_simp)) apply arith apply (rule SA_arrow) apply simp+ apply (rule SA_all) apply simp - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply (erule wf_subtypeE) apply (rule SA_Rcd) apply (erule wfE_subst) apply assumption apply (drule wf_subst) apply assumption apply simp apply simp apply (rule ballpI) apply (drule substrT_setD) apply (erule exE conjE)+ apply (drule bpspec) apply assumption apply simp apply (erule exE) apply (erule conjE)+ apply (rule exI) apply (rule conjI) apply (erule substrT_set) apply assumption done lemma subst_subtype: assumes H: "\ @ VarB V \ \ \ T <: U" shows "\\<^sub>e 1 0 \ @ \ \ \\<^sub>\ 1 \\\ T <: \\<^sub>\ 1 \\\ U" using H apply (induct "\ @ VarB V \ \" T U arbitrary: \) apply simp_all apply (rule SA_Top) apply (rule wfE_subst) apply assumption apply (rule wf_Top) apply (rule wf_subst) apply assumption apply (rule wf_Top) apply (rule impI conjI)+ apply (rule SA_Top) apply (rule wfE_subst) apply assumption apply (rule wf_Top)+ apply (rule conjI impI)+ apply (rule SA_refl_TVar) apply (rule wfE_subst) apply assumption apply (rule wf_Top) apply (drule wf_subst) apply (rule wf_Top) apply simp apply (rule impI) apply (rule SA_refl_TVar) apply (rule wfE_subst) apply assumption apply (rule wf_Top) apply (drule wf_subst) apply (rule wf_Top) apply simp apply (rule conjI impI)+ apply simp apply (rule conjI impI)+ apply (simp split: nat.split_asm) apply (rule SA_trans_TVar) apply (subgoal_tac "\\\ \ i - Suc 0") apply (rename_tac nat) apply (subgoal_tac "i - Suc \\\ = nat") apply (simp (no_asm_simp)) apply arith apply arith apply simp apply (rule impI) apply (rule SA_trans_TVar) apply simp apply (subgoal_tac "0 < \\\") apply simp apply arith apply (rule SA_arrow) apply simp+ apply (rule SA_all) apply simp - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply (rule SA_Rcd) apply (erule wfE_subst) apply (rule wf_Top) apply (drule wf_subst) apply (rule wf_Top) apply simp apply simp apply (rule ballpI) apply (drule substrT_setD) apply (erule exE conjE)+ apply (drule bpspec) apply assumption apply simp apply (erule exE) apply (erule conjE)+ apply (rule exI) apply (rule conjI) apply (erule substrT_set) apply assumption done subsection \Typing\ text \ In the formalization of the type checking rule for the \LET\ binder, we use an additional judgement \\ p : T \ \\ for checking whether a given pattern @{term p} is compatible with the type @{term T} of an object that is to be matched against this pattern. The judgement will be defined simultaneously with a judgement \mbox{\\ ps [:] Ts \ \\} for type checking field patterns. Apart from checking the type, the judgement also returns a list of bindings @{term \}, which can be thought of as a ``flattened'' list of types of the variables occurring in the pattern. Since typing environments are extended ``to the left'', the bindings in @{term \} appear in reverse order. \ inductive ptyping :: "pat \ type \ env \ bool" ("\ _ : _ \ _" [50, 50, 50] 50) and ptypings :: "rpat \ rcdT \ env \ bool" ("\ _ [:] _ \ _" [50, 50, 50] 50) where P_Var: "\ PVar T : T \ [VarB T]" | P_Rcd: "\ fps [:] fTs \ \ \ \ PRcd fps : RcdT fTs \ \" | P_Nil: "\ [] [:] [] \ []" | P_Cons: "\ p : T \ \\<^sub>1 \ \ fps [:] fTs \ \\<^sub>2 \ fps\l\\<^sub>? = \ \ \ ((l, p) \ fps) [:] ((l, T) \ fTs) \ \\<^sub>e \\\<^sub>1\ 0 \\<^sub>2 @ \\<^sub>1" text \ The definition of the typing judgement for terms is extended with the rules \T_Let\, @{term "T_Rcd"}, and @{term "T_Proj"} for pattern matching, record construction and field selection, respectively. The above typing judgement for patterns is used in the rule \T_Let\. The typing judgement for terms is defined simultaneously with a typing judgement \\ \ fs [:] fTs\ for record fields. \ inductive typing :: "env \ trm \ type \ bool" ("_ \ _ : _" [50, 50, 50] 50) and typings :: "env \ rcd \ rcdT \ bool" ("_ \ _ [:] _" [50, 50, 50] 50) where T_Var: "\ \\<^sub>w\<^sub>f \ \\i\ = \VarB U\ \ T = \\<^sub>\ (Suc i) 0 U \ \ \ Var i : T" | T_Abs: "VarB T\<^sub>1 \ \ \ t\<^sub>2 : T\<^sub>2 \ \ \ (\:T\<^sub>1. t\<^sub>2) : T\<^sub>1 \ \\<^sub>\ 1 0 T\<^sub>2" | T_App: "\ \ t\<^sub>1 : T\<^sub>1\<^sub>1 \ T\<^sub>1\<^sub>2 \ \ \ t\<^sub>2 : T\<^sub>1\<^sub>1 \ \ \ t\<^sub>1 \ t\<^sub>2 : T\<^sub>1\<^sub>2" | T_TAbs: "TVarB T\<^sub>1 \ \ \ t\<^sub>2 : T\<^sub>2 \ \ \ (\<:T\<^sub>1. t\<^sub>2) : (\<:T\<^sub>1. T\<^sub>2)" | T_TApp: "\ \ t\<^sub>1 : (\<:T\<^sub>1\<^sub>1. T\<^sub>1\<^sub>2) \ \ \ T\<^sub>2 <: T\<^sub>1\<^sub>1 \ \ \ t\<^sub>1 \\<^sub>\ T\<^sub>2 : T\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2]\<^sub>\" | T_Sub: "\ \ t : S \ \ \ S <: T \ \ \ t : T" | T_Let: "\ \ t\<^sub>1 : T\<^sub>1 \ \ p : T\<^sub>1 \ \ \ \ @ \ \ t\<^sub>2 : T\<^sub>2 \ \ \ (LET p = t\<^sub>1 IN t\<^sub>2) : \\<^sub>\ \\\ 0 T\<^sub>2" | T_Rcd: "\ \ fs [:] fTs \ \ \ Rcd fs : RcdT fTs" | T_Proj: "\ \ t : RcdT fTs \ fTs\l\\<^sub>? = \T\ \ \ \ t..l : T" | T_Nil: "\ \\<^sub>w\<^sub>f \ \ \ [] [:] []" | T_Cons: "\ \ t : T \ \ \ fs [:] fTs \ fs\l\\<^sub>? = \ \ \ \ (l, t) \ fs [:] (l, T) \ fTs" theorem wf_typeE1: "\ \ t : T \ \ \\<^sub>w\<^sub>f" "\ \ fs [:] fTs \ \ \\<^sub>w\<^sub>f" by (induct set: typing typings) (blast elim: well_formedE_cases)+ theorem wf_typeE2: "\ \ t : T \ \ \\<^sub>w\<^sub>f T" "\' \ fs [:] fTs \ (\(l, T) \ set fTs. \' \\<^sub>w\<^sub>f T) \ unique fTs \ (\l. (fs\l\\<^sub>? = \) = (fTs\l\\<^sub>? = \))" apply (induct set: typing typings) apply simp apply (rule wf_liftB) apply assumption+ apply (drule wf_typeE1)+ apply (erule well_formedE_cases)+ apply (rule wf_arrow) apply simp apply simp apply (rule wf_subst [of "[]", simplified]) apply assumption apply (rule wf_Top) apply (erule well_formed_cases) apply assumption apply (rule wf_all) apply (drule wf_typeE1) apply (erule well_formedE_cases) apply simp apply assumption apply (erule well_formed_cases) apply (rule wf_subst [of "[]", simplified]) apply assumption apply (erule wf_subtypeE) apply assumption apply (erule wf_subtypeE) apply assumption \ \records\ apply (erule wf_dec) apply (erule conjE)+ apply (rule wf_RcdT) apply assumption+ apply (erule well_formed_cases) apply (blast dest: assoc_set) apply simp apply simp done lemmas ptyping_induct = ptyping_ptypings.inducts(1) [of _ _ _ _ "\x y z. True", simplified True_simps, consumes 1, case_names P_Var P_Rcd] lemmas ptypings_induct = ptyping_ptypings.inducts(2) [of _ _ _ "\x y z. True", simplified True_simps, consumes 1, case_names P_Nil P_Cons] lemmas typing_induct = typing_typings.inducts(1) [of _ _ _ _ "\x y z. True", simplified True_simps, consumes 1, case_names T_Var T_Abs T_App T_TAbs T_TApp T_Sub T_Let T_Rcd T_Proj] lemmas typings_induct = typing_typings.inducts(2) [of _ _ _ "\x y z. True", simplified True_simps, consumes 1, case_names T_Nil T_Cons] lemma narrow_type: \ \A.7\ "\ @ TVarB Q \ \ \ t : T \ \ \ P <: Q \ \ @ TVarB P \ \ \ t : T" "\ @ TVarB Q \ \ \ ts [:] Ts \ \ \ P <: Q \ \ @ TVarB P \ \ \ ts [:] Ts" apply (induct "\ @ TVarB Q \ \" t T and "\ @ TVarB Q \ \" ts Ts arbitrary: \ and \ set: typing typings) apply simp_all apply (rule T_Var) apply (erule wfE_replace) apply (erule wf_subtypeE) apply simp+ apply (case_tac "i < \\\") apply simp apply (case_tac "i = \\\") apply simp apply (simp split: nat.split nat.split_asm)+ apply (rule T_Abs [simplified]) - apply (drule_tac x="VarB T\<^sub>1 \ \" in meta_spec) apply simp apply (rule_tac T\<^sub>1\<^sub>1=T\<^sub>1\<^sub>1 in T_App) apply simp+ apply (rule T_TAbs) - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply (rule_tac T\<^sub>1\<^sub>1=T\<^sub>1\<^sub>1 in T_TApp) apply simp apply (rule subtype_trans(2)) apply assumption+ apply (rule_tac S=S in T_Sub) apply simp apply (rule subtype_trans(2)) apply assumption+ \ \records\ apply (rule T_Let) apply blast apply assumption - apply (drule_tac x="\ @ \'" in meta_spec, drule meta_mp, rule refl) apply simp apply (rule T_Rcd) apply simp apply (rule T_Proj) apply blast apply assumption apply (rule T_Nil) apply (erule wfE_replace) apply (erule wf_subtypeE) apply simp+ apply (rule T_Cons) apply simp+ done lemma typings_setD: assumes H: "\ \ fs [:] fTs" shows "(l, T) \ set fTs \ \t. fs\l\\<^sub>? = \t\ \ \ \ t : T" using H by (induct arbitrary: l T rule: typings_induct) fastforce+ lemma subtype_refl': assumes t: "\ \ t : T" shows "\ \ T <: T" proof (rule subtype_refl) from t show "\ \\<^sub>w\<^sub>f" by (rule wf_typeE1) from t show "\ \\<^sub>w\<^sub>f T" by (rule wf_typeE2) qed lemma Abs_type: \ \A.13(1)\ assumes H: "\ \ (\:S. s) : T" shows "\ \ T <: U \ U' \ (\S'. \ \ U <: S \ VarB S \ \ \ s : S' \ \ \ \\<^sub>\ 1 0 S' <: U' \ P) \ P" using H proof (induct \ "\:S. s" T arbitrary: U U' S s P) case (T_Abs T\<^sub>1 \ t\<^sub>2 T\<^sub>2) from \\ \ T\<^sub>1 \ \\<^sub>\ 1 0 T\<^sub>2 <: U \ U'\ obtain ty1: "\ \ U <: T\<^sub>1" and ty2: "\ \ \\<^sub>\ 1 0 T\<^sub>2 <: U'" by cases simp_all from ty1 \VarB T\<^sub>1 \ \ \ t\<^sub>2 : T\<^sub>2\ ty2 show ?case by (rule T_Abs) next case (T_Sub \ S' T) from \\ \ S' <: T\ and \\ \ T <: U \ U'\ have "\ \ S' <: U \ U'" by (rule subtype_trans(1)) then show ?case by (rule T_Sub) (rule T_Sub(5)) qed lemma Abs_type': assumes H: "\ \ (\:S. s) : U \ U'" and R: "\S'. \ \ U <: S \ VarB S \ \ \ s : S' \ \ \ \\<^sub>\ 1 0 S' <: U' \ P" shows "P" using H subtype_refl' [OF H] by (rule Abs_type) (rule R) lemma TAbs_type: \ \A.13(2)\ assumes H: "\ \ (\<:S. s) : T" shows "\ \ T <: (\<:U. U') \ (\S'. \ \ U <: S \ TVarB U \ \ \ s : S' \ TVarB U \ \ \ S' <: U' \ P) \ P" using H proof (induct \ "\<:S. s" T arbitrary: U U' S s P) case (T_TAbs T\<^sub>1 \ t\<^sub>2 T\<^sub>2) from \\ \ (\<:T\<^sub>1. T\<^sub>2) <: (\<:U. U')\ obtain ty1: "\ \ U <: T\<^sub>1" and ty2: "TVarB U \ \ \ T\<^sub>2 <: U'" by cases simp_all from \TVarB T\<^sub>1 \ \ \ t\<^sub>2 : T\<^sub>2\ have "TVarB U \ \ \ t\<^sub>2 : T\<^sub>2" using ty1 by (rule narrow_type [of "[]", simplified]) with ty1 show ?case using ty2 by (rule T_TAbs) next case (T_Sub \ S' T) from \\ \ S' <: T\ and \\ \ T <: (\<:U. U')\ have "\ \ S' <: (\<:U. U')" by (rule subtype_trans(1)) then show ?case by (rule T_Sub) (rule T_Sub(5)) qed lemma TAbs_type': assumes H: "\ \ (\<:S. s) : (\<:U. U')" and R: "\S'. \ \ U <: S \ TVarB U \ \ \ s : S' \ TVarB U \ \ \ S' <: U' \ P" shows "P" using H subtype_refl' [OF H] by (rule TAbs_type) (rule R) text \ In the proof of the preservation theorem, the following elimination rule for typing judgements on record types will be useful: \ lemma Rcd_type1: \ \A.13(3)\ assumes H: "\ \ t : T" shows "t = Rcd fs \ \ \ T <: RcdT fTs \ \(l, U) \ set fTs. \u. fs\l\\<^sub>? = \u\ \ \ \ u : U" using H apply (induct arbitrary: fs fTs rule: typing_induct, simp_all) apply (drule meta_spec)+ apply (drule meta_mp) apply (rule refl) apply (erule meta_mp) apply (rule subtype_trans(1)) apply assumption+ apply (erule subtyping.cases) apply simp_all apply (rule ballpI) apply (drule bpspec) apply assumption apply (erule exE conjE)+ apply (drule typings_setD) apply assumption apply (erule exE conjE)+ apply (rule exI conjI)+ apply simp apply (erule T_Sub) apply assumption done lemma Rcd_type1': assumes H: "\ \ Rcd fs : RcdT fTs" shows "\(l, U) \ set fTs. \u. fs\l\\<^sub>? = \u\ \ \ \ u : U" using H refl subtype_refl' [OF H] by (rule Rcd_type1) text \ Intuitively, this means that for a record @{term "Rcd fs"} of type @{term "RcdT fTs"}, each field with name @{term l} associated with a type @{term U} in @{term "fTs"} must correspond to a field in @{term fs} with value @{term u}, where @{term u} has type @{term U}. Thanks to the subsumption rule \T_Sub\, the typing judgement for terms is not sensitive to the order of record fields. For example, @{term [display] "\ \ Rcd [(l\<^sub>1, t\<^sub>1), (l\<^sub>2, t\<^sub>2), (l\<^sub>3, t\<^sub>3)] : RcdT [(l\<^sub>2, T\<^sub>2), (l\<^sub>1, T\<^sub>1)]"} provided that \\ \ t\<^sub>i : T\<^sub>i\. Note however that this does not imply @{term [display] "\ \ [(l\<^sub>1, t\<^sub>1), (l\<^sub>2, t\<^sub>2), (l\<^sub>3, t\<^sub>3)] [:] [(l\<^sub>2, T\<^sub>2), (l\<^sub>1, T\<^sub>1)]"} In order for this statement to hold, we need to remove the field @{term "l\<^sub>3"} and exchange the order of the fields @{term "l\<^sub>1"} and @{term "l\<^sub>2"}. This gives rise to the following variant of the above elimination rule: \ lemma Rcd_type2: "\ \ Rcd fs : T \ \ \ T <: RcdT fTs \ \ \ map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs [:] fTs" apply (drule Rcd_type1) apply (rule refl) apply assumption apply (induct fTs rule: list.induct) apply simp apply (rule T_Nil) apply (erule wf_subtypeE) apply assumption apply (simp add: split_paired_all) apply (rule T_Cons) apply (drule_tac x=a and y=b in bpspec) apply simp apply (erule exE conjE)+ apply simp apply (rename_tac list) apply (subgoal_tac "\ \ RcdT ((a, b) \ list) <: RcdT list") apply (erule meta_mp) apply (erule subtype_trans(1)) apply assumption apply (erule wf_subtypeE) apply (rule SA_Rcd) apply assumption+ apply (erule well_formed_cases) apply simp apply (rule ballpI) apply (rule exI) apply (rule conjI) apply (rule_tac [2] subtype_refl) apply simp apply assumption apply (erule well_formed_cases) apply (erule_tac x=l and y=Ta in bpspec) apply simp apply (erule wf_subtypeE) apply (erule well_formed_cases) apply simp done lemma Rcd_type2': assumes H: "\ \ Rcd fs : RcdT fTs" shows "\ \ map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs [:] fTs" using H subtype_refl' [OF H] by (rule Rcd_type2) lemma T_eq: "\ \ t : T \ T = T' \ \ \ t : T'" by simp lemma ptyping_length [simp]: "\ p : T \ \ \ \p\\<^sub>p = \\\" "\ fps [:] fTs \ \ \ \fps\\<^sub>r = \\\" by (induct set: ptyping ptypings) simp_all lemma lift_ptyping: "\ p : T \ \ \ \ \\<^sub>p n k p : \\<^sub>\ n k T \ \\<^sub>e n k \" "\ fps [:] fTs \ \ \ \ \\<^sub>r\<^sub>p n k fps [:] \\<^sub>r\<^sub>\ n k fTs \ \\<^sub>e n k \" apply (induct set: ptyping ptypings) apply simp_all apply (rule P_Var) apply (erule P_Rcd) apply (rule P_Nil) apply (drule_tac p="\\<^sub>p n k p" and fps="\\<^sub>r\<^sub>p n k fps" in P_Cons) apply simp_all done lemma type_weaken: "\ @ \ \ t : T \ \ \\<^sub>w\<^sub>f\<^sub>B B \ \\<^sub>e 1 0 \ @ B \ \ \ \ 1 \\\ t : \\<^sub>\ 1 \\\ T" "\ @ \ \ fs [:] fTs \ \ \\<^sub>w\<^sub>f\<^sub>B B \ \\<^sub>e 1 0 \ @ B \ \ \ \\<^sub>r 1 \\\ fs [:] \\<^sub>r\<^sub>\ 1 \\\ fTs" apply (induct "\ @ \" t T and "\ @ \" fs fTs arbitrary: \ and \ set: typing typings) apply simp_all apply (rule conjI) apply (rule impI) apply (rule T_Var) apply (erule wfE_weaken) apply simp+ apply (rule impI) apply (rule T_Var) apply (erule wfE_weaken) apply assumption apply (subgoal_tac "Suc i - \\\ = Suc (i - \\\)") apply simp apply arith apply (rule refl) apply (rule T_Abs [simplified]) - apply (drule_tac x="VarB T\<^sub>1 \ \" in meta_spec) apply simp apply (rule_tac T\<^sub>1\<^sub>1="\\<^sub>\ (Suc 0) \\\ T\<^sub>1\<^sub>1" in T_App) apply simp apply simp apply (rule T_TAbs) - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply (erule_tac T_TApp [THEN T_eq]) apply (drule subtype_weaken) apply simp+ apply (case_tac \) apply (simp add: liftT_substT_strange [of _ 0, simplified])+ apply (rule_tac S="\\<^sub>\ (Suc 0) \\\ S" in T_Sub) apply simp apply (drule subtype_weaken) apply simp+ \ \records\ - apply (drule_tac x="\ @ \'" in meta_spec) - apply (drule meta_mp) - apply simp - apply simp apply (drule_tac \="\\<^sub>e (Suc 0) 0 \' @ B \ \" in T_Let) apply (erule lift_ptyping) apply assumption apply (simp add: ac_simps) apply (rule T_Rcd) apply simp apply (rule_tac fTs="\\<^sub>r\<^sub>\ (Suc 0) \\\ fTs" in T_Proj) apply simp apply (erule_tac liftrT_assoc_Some) apply (rule T_Nil) apply (erule wfE_weaken) apply assumption apply (rule T_Cons) apply simp+ done lemma type_weaken': \ \A.5(6)\ "\ \ t : T \ \ @ \ \\<^sub>w\<^sub>f \ \ @ \ \ \ \\\ 0 t : \\<^sub>\ \\\ 0 T" apply (induct \) apply simp apply simp apply (erule well_formedE_cases) apply simp apply (drule_tac B=a in type_weaken(1) [of "[]", simplified]) apply simp+ done text \ The substitution lemmas are now proved by mutual induction on the derivations of the typing derivations for terms and lists of fields. \ lemma subst_ptyping: "\ p : T \ \ \ \ p[k \\<^sub>\ U]\<^sub>p : T[k \\<^sub>\ U]\<^sub>\ \ \[k \\<^sub>\ U]\<^sub>e" "\ fps [:] fTs \ \ \ \ fps[k \\<^sub>\ U]\<^sub>r\<^sub>p [:] fTs[k \\<^sub>\ U]\<^sub>r\<^sub>\ \ \[k \\<^sub>\ U]\<^sub>e" apply (induct set: ptyping ptypings) apply simp_all apply (rule P_Var) apply (erule P_Rcd) apply (rule P_Nil) apply (drule_tac p="p[k \\<^sub>\ U]\<^sub>p" and fps="fps[k \\<^sub>\ U]\<^sub>r\<^sub>p" in P_Cons) apply simp+ done theorem subst_type: \ \A.8\ "\ @ VarB U \ \ \ t : T \ \ \ u : U \ \\<^sub>e 1 0 \ @ \ \ t[\\\ \ u] : \\<^sub>\ 1 \\\ T" "\ @ VarB U \ \ \ fs [:] fTs \ \ \ u : U \ \\<^sub>e 1 0 \ @ \ \ fs[\\\ \ u]\<^sub>r [:] \\<^sub>r\<^sub>\ 1 \\\ fTs" apply (induct "\ @ VarB U \ \" t T and "\ @ VarB U \ \" fs fTs arbitrary: \ and \ set: typing typings) apply simp apply (rule conjI) apply (rule impI) apply simp apply (drule_tac \="\[0 \\<^sub>\ Top]\<^sub>e" in type_weaken') apply (rule wfE_subst) apply assumption apply (rule wf_Top) apply simp apply (rule impI conjI)+ apply (simp split: nat.split_asm) apply (rule T_Var) apply (erule wfE_subst) apply (rule wf_Top) apply (subgoal_tac "\\\ \ i - Suc 0") apply (rename_tac nat) apply (subgoal_tac "i - Suc \\\ = nat") apply (simp (no_asm_simp)) apply arith apply arith apply simp apply (rule impI) apply (rule T_Var) apply (erule wfE_subst) apply (rule wf_Top) apply simp apply (subgoal_tac "Suc (\\\ - Suc 0) = \\\") apply (simp (no_asm_simp)) apply arith apply simp - apply (drule_tac x="VarB T\<^sub>1 \ \" in meta_spec) apply (rule T_Abs [THEN T_eq]) apply simp apply (simp add: substT_substT [symmetric]) apply simp apply (rule_tac T\<^sub>1\<^sub>1="T\<^sub>1\<^sub>1[\\\ \\<^sub>\ Top]\<^sub>\" in T_App) apply simp+ apply (rule T_TAbs) - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply simp apply (rule T_TApp [THEN T_eq]) apply simp apply (rule subst_subtype [simplified]) apply assumption apply (simp add: substT_substT [symmetric]) apply (rule_tac S="S[\\\ \\<^sub>\ Top]\<^sub>\" in T_Sub) apply simp apply simp apply (rule subst_subtype [simplified]) apply assumption \ \records\ apply simp - apply (drule meta_spec)+ - apply (drule meta_mp, rule refl)+ apply (drule_tac \="\'[0 \\<^sub>\ Top]\<^sub>e @ \" in T_Let) apply (erule subst_ptyping) apply simp apply (simp add: ac_simps) apply simp apply (rule T_Rcd) apply simp apply simp apply (rule_tac fTs="fTs[\\\ \\<^sub>\ Top]\<^sub>r\<^sub>\" in T_Proj) apply simp apply (erule_tac substrTT_assoc_Some) apply simp apply (rule T_Nil) apply (erule wfE_subst) apply (rule wf_Top) apply simp apply (rule T_Cons) apply simp+ done theorem substT_type: \ \A.11\ "\ @ TVarB Q \ \ \ t : T \ \ \ P <: Q \ \[0 \\<^sub>\ P]\<^sub>e @ \ \ t[\\\ \\<^sub>\ P] : T[\\\ \\<^sub>\ P]\<^sub>\" "\ @ TVarB Q \ \ \ fs [:] fTs \ \ \ P <: Q \ \[0 \\<^sub>\ P]\<^sub>e @ \ \ fs[\\\ \\<^sub>\ P]\<^sub>r [:] fTs[\\\ \\<^sub>\ P]\<^sub>r\<^sub>\" apply (induct "\ @ TVarB Q \ \" t T and "\ @ TVarB Q \ \" fs fTs arbitrary: \ and \ set: typing typings) apply simp_all apply (rule impI conjI)+ apply (rule T_Var) apply (erule wfE_subst) apply (erule wf_subtypeE) apply assumption apply (simp split: nat.split_asm) apply (subgoal_tac "\\\ \ i - Suc 0") apply (rename_tac nat) apply (subgoal_tac "i - Suc \\\ = nat") apply (simp (no_asm_simp)) apply arith apply arith apply simp apply (rule impI) apply (case_tac "i = \\\") apply simp apply (rule T_Var) apply (erule wfE_subst) apply (erule wf_subtypeE) apply assumption apply simp apply (subgoal_tac "i < \\\") apply (subgoal_tac "Suc (\\\ - Suc 0) = \\\") apply (simp (no_asm_simp)) apply arith apply arith apply (rule T_Abs [THEN T_eq]) - apply (drule_tac x="VarB T\<^sub>1 \ \" in meta_spec) apply simp apply (simp add: substT_substT [symmetric]) apply (rule_tac T\<^sub>1\<^sub>1="T\<^sub>1\<^sub>1[\\\ \\<^sub>\ P]\<^sub>\" in T_App) apply simp+ apply (rule T_TAbs) - apply (drule_tac x="TVarB T\<^sub>1 \ \" in meta_spec) apply simp apply (rule T_TApp [THEN T_eq]) apply simp apply (rule substT_subtype) apply assumption apply assumption apply (simp add: substT_substT [symmetric]) apply (rule_tac S="S[\\\ \\<^sub>\ P]\<^sub>\" in T_Sub) apply simp apply (rule substT_subtype) apply assumption apply assumption \ \records\ - apply (drule meta_spec)+ - apply (drule meta_mp, rule refl)+ apply (drule_tac \="\'[0 \\<^sub>\ P]\<^sub>e @ \" in T_Let) apply (erule subst_ptyping) apply simp apply (simp add: ac_simps) apply (rule T_Rcd) apply simp apply (rule_tac fTs="fTs[\\\ \\<^sub>\ P]\<^sub>r\<^sub>\" in T_Proj) apply simp apply (erule_tac substrTT_assoc_Some) apply (rule T_Nil) apply (erule wfE_subst) apply (erule wf_subtypeE) apply assumption apply (rule T_Cons) apply simp+ done subsection \Evaluation\ text \ \label{sec:evaluation-rcd} The definition of canonical values is extended with a clause saying that a record @{term "Rcd fs"} is a canonical value if all fields contain canonical values: \ inductive_set "value" :: "trm set" where Abs: "(\:T. t) \ value" | TAbs: "(\<:T. t) \ value" | Rcd: "\(l, t) \ set fs. t \ value \ Rcd fs \ value" text \ In order to formalize the evaluation rule for \LET\, we introduce another relation \\ p \ t \ ts\ expressing that a pattern @{term p} matches a term @{term t}. The relation also yields a list of terms @{term ts} corresponding to the variables in the pattern. The relation is defined simultaneously with another relation \\ fps [\] fs \ ts\ for matching a list of field patterns @{term fps} against a list of fields @{term fs}: \ inductive match :: "pat \ trm \ trm list \ bool" ("\ _ \ _ \ _" [50, 50, 50] 50) and matchs :: "rpat \ rcd \ trm list \ bool" ("\ _ [\] _ \ _" [50, 50, 50] 50) where M_PVar: "\ PVar T \ t \ [t]" | M_Rcd: "\ fps [\] fs \ ts \ \ PRcd fps \ Rcd fs \ ts" | M_Nil: "\ [] [\] fs \ []" | M_Cons: "fs\l\\<^sub>? = \t\ \ \ p \ t \ ts \ \ fps [\] fs \ us \ \ (l, p) \ fps [\] fs \ ts @ us" text \ The rules of the evaluation relation for the calculus with records are as follows: \ inductive eval :: "trm \ trm \ bool" (infixl "\" 50) and evals :: "rcd \ rcd \ bool" (infixl "[\]" 50) where E_Abs: "v\<^sub>2 \ value \ (\:T\<^sub>1\<^sub>1. t\<^sub>1\<^sub>2) \ v\<^sub>2 \ t\<^sub>1\<^sub>2[0 \ v\<^sub>2]" | E_TAbs: "(\<:T\<^sub>1\<^sub>1. t\<^sub>1\<^sub>2) \\<^sub>\ T\<^sub>2 \ t\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2]" | E_App1: "t \ t' \ t \ u \ t' \ u" | E_App2: "v \ value \ t \ t' \ v \ t \ v \ t'" | E_TApp: "t \ t' \ t \\<^sub>\ T \ t' \\<^sub>\ T" | E_LetV: "v \ value \ \ p \ v \ ts \ (LET p = v IN t) \ t[0 \\<^sub>s ts]" | E_ProjRcd: "fs\l\\<^sub>? = \v\ \ v \ value \ Rcd fs..l \ v" | E_Proj: "t \ t' \ t..l \ t'..l" | E_Rcd: "fs [\] fs' \ Rcd fs \ Rcd fs'" | E_Let: "t \ t' \ (LET p = t IN u) \ (LET p = t' IN u)" | E_hd: "t \ t' \ (l, t) \ fs [\] (l, t') \ fs" | E_tl: "v \ value \ fs [\] fs' \ (l, v) \ fs [\] (l, v) \ fs'" text \ The relation @{term "t \ t'"} is defined simultaneously with a relation \mbox{@{term "fs [\] fs'"}} for evaluating record fields. The ``immediate'' reductions, namely pattern matching and projection, are described by the rules \E_LetV\ and \E_ProjRcd\, respectively, whereas \E_Proj\, \E_Rcd\, \E_Let\, \E_hd\ and \E_tl\ are congruence rules. \ lemmas matchs_induct = match_matchs.inducts(2) [of _ _ _ "\x y z. True", simplified True_simps, consumes 1, case_names M_Nil M_Cons] lemmas evals_induct = eval_evals.inducts(2) [of _ _ "\x y. True", simplified True_simps, consumes 1, case_names E_hd E_tl] lemma matchs_mono: assumes H: "\ fps [\] fs \ ts" shows "fps\l\\<^sub>? = \ \ \ fps [\] (l, t) \ fs \ ts" using H apply (induct rule: matchs_induct) apply (rule M_Nil) apply (simp split: if_split_asm) apply (rule M_Cons) apply simp_all done lemma matchs_eq: assumes H: "\ fps [\] fs \ ts" shows "\(l, p) \ set fps. fs\l\\<^sub>? = fs'\l\\<^sub>? \ \ fps [\] fs' \ ts" using H apply (induct rule: matchs_induct) apply (rule M_Nil) apply (rule M_Cons) apply auto done lemma reorder_eq: assumes H: "\ fps [:] fTs \ \" shows "\(l, U)\set fTs. \u. fs\l\\<^sub>? = \u\ \ \(l, p) \ set fps. fs\l\\<^sub>? = (map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs)\l\\<^sub>?" using H by (induct rule: ptypings_induct) auto lemma matchs_reorder: "\ fps [:] fTs \ \ \ \(l, U)\set fTs. \u. fs\l\\<^sub>? = \u\ \ \ fps [\] fs \ ts \ \ fps [\] map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs \ ts" by (rule matchs_eq [OF _ reorder_eq], assumption+) lemma matchs_reorder': "\ fps [:] fTs \ \ \ \(l, U)\set fTs. \u. fs\l\\<^sub>? = \u\ \ \ fps [\] map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs \ ts \ \ fps [\] fs \ ts" by (rule matchs_eq [OF _ reorder_eq [THEN ball_eq_sym]], assumption+) theorem matchs_tl: assumes H: "\ fps [\] (l, t) \ fs \ ts" shows "fps\l\\<^sub>? = \ \ \ fps [\] fs \ ts" using H apply (induct fps "(l, t) \ fs" ts arbitrary: l t fs rule: matchs_induct) apply (simp_all split: if_split_asm) apply (rule M_Nil) apply (rule M_Cons) apply auto done theorem match_length: "\ p \ t \ ts \ \ p : T \ \ \ \ts\ = \\\" "\ fps [\] ft \ ts \ \ fps [:] fTs \ \ \ \ts\ = \\\" by (induct arbitrary: T \ and fTs \ set: match matchs) (erule ptyping.cases ptypings.cases, simp+)+ text \ In the proof of the preservation theorem for the calculus with records, we need the following lemma relating the matching and typing judgements for patterns, which means that well-typed matching preserves typing. Although this property will only be used for @{term "\\<^sub>1 = []"} later, the statement must be proved in a more general form in order for the induction to go through. \ theorem match_type: \ \A.17\ "\ p : T\<^sub>1 \ \ \ \\<^sub>2 \ t\<^sub>1 : T\<^sub>1 \ \\<^sub>1 @ \ @ \\<^sub>2 \ t\<^sub>2 : T\<^sub>2 \ \ p \ t\<^sub>1 \ ts \ \\<^sub>e \\\ 0 \\<^sub>1 @ \\<^sub>2 \ t\<^sub>2[\\\<^sub>1\ \\<^sub>s ts] : \\<^sub>\ \\\ \\\<^sub>1\ T\<^sub>2" "\ fps [:] fTs \ \ \ \\<^sub>2 \ fs [:] fTs \ \\<^sub>1 @ \ @ \\<^sub>2 \ t\<^sub>2 : T\<^sub>2 \ \ fps [\] fs \ ts \ \\<^sub>e \\\ 0 \\<^sub>1 @ \\<^sub>2 \ t\<^sub>2[\\\<^sub>1\ \\<^sub>s ts] : \\<^sub>\ \\\ \\\<^sub>1\ T\<^sub>2" proof (induct arbitrary: \\<^sub>1 \\<^sub>2 t\<^sub>1 t\<^sub>2 T\<^sub>2 ts and \\<^sub>1 \\<^sub>2 fs t\<^sub>2 T\<^sub>2 ts set: ptyping ptypings) case (P_Var T \\<^sub>1 \\<^sub>2 t\<^sub>1 t\<^sub>2 T\<^sub>2 ts) from P_Var have "\\<^sub>1[0 \\<^sub>\ Top]\<^sub>e @ \\<^sub>2 \ t\<^sub>2[\\\<^sub>1\ \ t\<^sub>1] : T\<^sub>2[\\\<^sub>1\ \\<^sub>\ Top]\<^sub>\" by - (rule subst_type [simplified], simp_all) moreover from P_Var(3) have "ts = [t\<^sub>1]" by cases simp_all ultimately show ?case by simp next case (P_Rcd fps fTs \ \\<^sub>1 \\<^sub>2 t\<^sub>1 t\<^sub>2 T\<^sub>2 ts) from P_Rcd(5) obtain fs where t\<^sub>1: "t\<^sub>1 = Rcd fs" and fps: "\ fps [\] fs \ ts" by cases simp_all with P_Rcd have fs: "\\<^sub>2 \ Rcd fs : RcdT fTs" by simp hence "\\<^sub>2 \ map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs [:] fTs" by (rule Rcd_type2') moreover note P_Rcd(4) moreover from fs have "\(l, U)\set fTs. \u. fs\l\\<^sub>? = \u\ \ \\<^sub>2 \ u : U" by (rule Rcd_type1') hence "\(l, U)\set fTs. \u. fs\l\\<^sub>? = \u\" by blast with P_Rcd(1) have "\ fps [\] map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs \ ts" using fps by (rule matchs_reorder) ultimately show ?case by (rule P_Rcd) next case (P_Nil \\<^sub>1 \\<^sub>2 fs t\<^sub>2 T\<^sub>2 ts) from P_Nil(3) have "ts = []" by cases simp_all with P_Nil show ?case by simp next case (P_Cons p T \\<^sub>1 fps fTs \\<^sub>2 l \\<^sub>1 \\<^sub>2 fs t\<^sub>2 T\<^sub>2 ts) from P_Cons(8) obtain t ts\<^sub>1 ts\<^sub>2 where t: "fs\l\\<^sub>? = \t\" and p: "\ p \ t \ ts\<^sub>1" and fps: "\ fps [\] fs \ ts\<^sub>2" and ts: "ts = ts\<^sub>1 @ ts\<^sub>2" by cases simp_all from P_Cons(6) t fps obtain fs' where fps': "\ fps [\] (l, t) \ fs' \ ts\<^sub>2" and tT: "\\<^sub>2 \ t : T" and fs': "\\<^sub>2 \ fs' [:] fTs" and l: "fs'\l\\<^sub>? = \" by cases auto from P_Cons have "(\\<^sub>1 @ \\<^sub>e \\\<^sub>1\ 0 \\<^sub>2) @ \\<^sub>1 @ \\<^sub>2 \ t\<^sub>2 : T\<^sub>2" by simp with tT have ts\<^sub>1: "\\<^sub>e \\\<^sub>1\ 0 (\\<^sub>1 @ \\<^sub>e \\\<^sub>1\ 0 \\<^sub>2) @ \\<^sub>2 \ t\<^sub>2[\\\<^sub>1 @ \\<^sub>e \\\<^sub>1\ 0 \\<^sub>2\ \\<^sub>s ts\<^sub>1] : \\<^sub>\ \\\<^sub>1\ \\\<^sub>1 @ \\<^sub>e \\\<^sub>1\ 0 \\<^sub>2\ T\<^sub>2" using p by (rule P_Cons) from fps' P_Cons(5) have "\ fps [\] fs' \ ts\<^sub>2" by (rule matchs_tl) with fs' ts\<^sub>1 [simplified] have "\\<^sub>e \\\<^sub>2\ 0 (\\<^sub>e \\\<^sub>1\ \\\<^sub>2\ \\<^sub>1) @ \\<^sub>2 \ t\<^sub>2[\\\<^sub>1\ + \\\<^sub>2\ \\<^sub>s ts\<^sub>1][\\\<^sub>e \\\<^sub>1\ \\\<^sub>2\ \\<^sub>1\ \\<^sub>s ts\<^sub>2] : \\<^sub>\ \\\<^sub>2\ \\\<^sub>e \\\<^sub>1\ \\\<^sub>2\ \\<^sub>1\ (\\<^sub>\ \\\<^sub>1\ (\\\<^sub>1\ + \\\<^sub>2\) T\<^sub>2)" by (rule P_Cons(4)) thus ?case by (simp add: decE_decE [of _ 0, simplified] match_length(2) [OF fps P_Cons(3)] ts) qed lemma evals_labels [simp]: assumes H: "fs [\] fs'" shows "(fs\l\\<^sub>? = \) = (fs'\l\\<^sub>? = \)" using H by (induct rule: evals_induct) simp_all theorem preservation: \ \A.20\ "\ \ t : T \ t \ t' \ \ \ t' : T" "\ \ fs [:] fTs \ fs [\] fs' \ \ \ fs' [:] fTs" proof (induct arbitrary: t' and fs' set: typing typings) case (T_Var \ i U T t') from \Var i \ t'\ show ?case by cases next case (T_Abs T\<^sub>1 \ t\<^sub>2 T\<^sub>2 t') from \(\:T\<^sub>1. t\<^sub>2) \ t'\ show ?case by cases next case (T_App \ t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 t\<^sub>2 t') from \t\<^sub>1 \ t\<^sub>2 \ t'\ show ?case proof cases case (E_Abs T\<^sub>1\<^sub>1' t\<^sub>1\<^sub>2) with T_App have "\ \ (\:T\<^sub>1\<^sub>1'. t\<^sub>1\<^sub>2) : T\<^sub>1\<^sub>1 \ T\<^sub>1\<^sub>2" by simp then obtain S' where T\<^sub>1\<^sub>1: "\ \ T\<^sub>1\<^sub>1 <: T\<^sub>1\<^sub>1'" and t\<^sub>1\<^sub>2: "VarB T\<^sub>1\<^sub>1' \ \ \ t\<^sub>1\<^sub>2 : S'" and S': "\ \ S'[0 \\<^sub>\ Top]\<^sub>\ <: T\<^sub>1\<^sub>2" by (rule Abs_type' [simplified]) blast from \\ \ t\<^sub>2 : T\<^sub>1\<^sub>1\ have "\ \ t\<^sub>2 : T\<^sub>1\<^sub>1'" using T\<^sub>1\<^sub>1 by (rule T_Sub) with t\<^sub>1\<^sub>2 have "\ \ t\<^sub>1\<^sub>2[0 \ t\<^sub>2] : S'[0 \\<^sub>\ Top]\<^sub>\" by (rule subst_type [where \="[]", simplified]) hence "\ \ t\<^sub>1\<^sub>2[0 \ t\<^sub>2] : T\<^sub>1\<^sub>2" using S' by (rule T_Sub) with E_Abs show ?thesis by simp next case (E_App1 t'') from \t\<^sub>1 \ t''\ have "\ \ t'' : T\<^sub>1\<^sub>1 \ T\<^sub>1\<^sub>2" by (rule T_App) hence "\ \ t'' \ t\<^sub>2 : T\<^sub>1\<^sub>2" using \\ \ t\<^sub>2 : T\<^sub>1\<^sub>1\ by (rule typing_typings.T_App) with E_App1 show ?thesis by simp next case (E_App2 t'') from \t\<^sub>2 \ t''\ have "\ \ t'' : T\<^sub>1\<^sub>1" by (rule T_App) with T_App(1) have "\ \ t\<^sub>1 \ t'' : T\<^sub>1\<^sub>2" by (rule typing_typings.T_App) with E_App2 show ?thesis by simp qed next case (T_TAbs T\<^sub>1 \ t\<^sub>2 T\<^sub>2 t') from \(\<:T\<^sub>1. t\<^sub>2) \ t'\ show ?case by cases next case (T_TApp \ t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 T\<^sub>2 t') from \t\<^sub>1 \\<^sub>\ T\<^sub>2 \ t'\ show ?case proof cases case (E_TAbs T\<^sub>1\<^sub>1' t\<^sub>1\<^sub>2) with T_TApp have "\ \ (\<:T\<^sub>1\<^sub>1'. t\<^sub>1\<^sub>2) : (\<:T\<^sub>1\<^sub>1. T\<^sub>1\<^sub>2)" by simp then obtain S' where "TVarB T\<^sub>1\<^sub>1 \ \ \ t\<^sub>1\<^sub>2 : S'" and "TVarB T\<^sub>1\<^sub>1 \ \ \ S' <: T\<^sub>1\<^sub>2" by (rule TAbs_type') blast hence "TVarB T\<^sub>1\<^sub>1 \ \ \ t\<^sub>1\<^sub>2 : T\<^sub>1\<^sub>2" by (rule T_Sub) hence "\ \ t\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2] : T\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2]\<^sub>\" using T_TApp(3) by (rule substT_type [where \="[]", simplified]) with E_TAbs show ?thesis by simp next case (E_TApp t'') from \t\<^sub>1 \ t''\ have "\ \ t'' : (\<:T\<^sub>1\<^sub>1. T\<^sub>1\<^sub>2)" by (rule T_TApp) hence "\ \ t'' \\<^sub>\ T\<^sub>2 : T\<^sub>1\<^sub>2[0 \\<^sub>\ T\<^sub>2]\<^sub>\" using \\ \ T\<^sub>2 <: T\<^sub>1\<^sub>1\ by (rule typing_typings.T_TApp) with E_TApp show ?thesis by simp qed next case (T_Sub \ t S T t') from \t \ t'\ have "\ \ t' : S" by (rule T_Sub) then show ?case using \\ \ S <: T\ by (rule typing_typings.T_Sub) next case (T_Let \ t\<^sub>1 T\<^sub>1 p \ t\<^sub>2 T\<^sub>2 t') from \(LET p = t\<^sub>1 IN t\<^sub>2) \ t'\ show ?case proof cases case (E_LetV ts) from T_Let (3,1,4) \\ p \ t\<^sub>1 \ ts\ have "\ \ t\<^sub>2[0 \\<^sub>s ts] : \\<^sub>\ \\\ 0 T\<^sub>2" by (rule match_type(1) [of _ _ _ _ _ "[]", simplified]) with E_LetV show ?thesis by simp next case (E_Let t'') from \t\<^sub>1 \ t''\ have "\ \ t'' : T\<^sub>1" by (rule T_Let) hence "\ \ (LET p = t'' IN t\<^sub>2) : \\<^sub>\ \\\ 0 T\<^sub>2" using T_Let(3,4) by (rule typing_typings.T_Let) with E_Let show ?thesis by simp qed next case (T_Rcd \ fs fTs t') from \Rcd fs \ t'\ obtain fs' where t': "t' = Rcd fs'" and fs: "fs [\] fs'" by cases simp_all from fs have "\ \ fs' [:] fTs" by (rule T_Rcd) hence "\ \ Rcd fs' : RcdT fTs" by (rule typing_typings.T_Rcd) with t' show ?case by simp next case (T_Proj \ t fTs l T t') from \t..l \ t'\ show ?case proof cases case (E_ProjRcd fs) with T_Proj have "\ \ Rcd fs : RcdT fTs" by simp hence "\(l, U)\set fTs. \u. fs\l\\<^sub>? = \u\ \ \ \ u : U" by (rule Rcd_type1') with E_ProjRcd T_Proj show ?thesis by (fastforce dest: assoc_set) next case (E_Proj t'') from \t \ t''\ have "\ \ t'' : RcdT fTs" by (rule T_Proj) hence "\ \ t''..l : T" using T_Proj(3) by (rule typing_typings.T_Proj) with E_Proj show ?thesis by simp qed next case (T_Nil \ fs') from \[] [\] fs'\ show ?case by cases next case (T_Cons \ t T fs fTs l fs') from \(l, t) \ fs [\] fs'\ show ?case proof cases case (E_hd t') from \t \ t'\ have "\ \ t' : T" by (rule T_Cons) hence "\ \ (l, t') \ fs [:] (l, T) \ fTs" using T_Cons(3,5) by (rule typing_typings.T_Cons) with E_hd show ?thesis by simp next case (E_tl fs'') note fs = \fs [\] fs''\ note T_Cons(1) moreover from fs have "\ \ fs'' [:] fTs" by (rule T_Cons) moreover from fs T_Cons have "fs''\l\\<^sub>? = \" by simp ultimately have "\ \ (l, t) \ fs'' [:] (l, T) \ fTs" by (rule typing_typings.T_Cons) with E_tl show ?thesis by simp qed qed lemma Fun_canonical: \ \A.14(1)\ assumes ty: "[] \ v : T\<^sub>1 \ T\<^sub>2" shows "v \ value \ \t S. v = (\:S. t)" using ty proof (induct "[]::env" v "T\<^sub>1 \ T\<^sub>2" arbitrary: T\<^sub>1 T\<^sub>2 rule: typing_induct) case T_Abs show ?case by iprover next case (T_App t\<^sub>1 T\<^sub>1\<^sub>1 t\<^sub>2 T\<^sub>1 T\<^sub>2) from \t\<^sub>1 \ t\<^sub>2 \ value\ show ?case by cases next case (T_TApp t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 T\<^sub>2 T\<^sub>1 T\<^sub>2') from \t\<^sub>1 \\<^sub>\ T\<^sub>2 \ value\ show ?case by cases next case (T_Sub t S T\<^sub>1 T\<^sub>2) from \[] \ S <: T\<^sub>1 \ T\<^sub>2\ obtain S\<^sub>1 S\<^sub>2 where S: "S = S\<^sub>1 \ S\<^sub>2" by cases (auto simp add: T_Sub) show ?case by (rule T_Sub S)+ next case (T_Let t\<^sub>1 T\<^sub>1 p \ t\<^sub>2 T\<^sub>2 T\<^sub>1' T\<^sub>2') from \(LET p = t\<^sub>1 IN t\<^sub>2) \ value\ show ?case by cases next case (T_Proj t fTs l T\<^sub>1 T\<^sub>2) from \t..l \ value\ show ?case by cases qed simp_all lemma TyAll_canonical: \ \A.14(3)\ assumes ty: "[] \ v : (\<:T\<^sub>1. T\<^sub>2)" shows "v \ value \ \t S. v = (\<:S. t)" using ty proof (induct "[]::env" v "\<:T\<^sub>1. T\<^sub>2" arbitrary: T\<^sub>1 T\<^sub>2 rule: typing_induct) case (T_App t\<^sub>1 T\<^sub>1\<^sub>1 t\<^sub>2 T\<^sub>1 T\<^sub>2) from \t\<^sub>1 \ t\<^sub>2 \ value\ show ?case by cases next case T_TAbs show ?case by iprover next case (T_TApp t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 T\<^sub>2 T\<^sub>1 T\<^sub>2') from \t\<^sub>1 \\<^sub>\ T\<^sub>2 \ value\ show ?case by cases next case (T_Sub t S T\<^sub>1 T\<^sub>2) from \[] \ S <: (\<:T\<^sub>1. T\<^sub>2)\ obtain S\<^sub>1 S\<^sub>2 where S: "S = (\<:S\<^sub>1. S\<^sub>2)" by cases (auto simp add: T_Sub) show ?case by (rule T_Sub S)+ next case (T_Let t\<^sub>1 T\<^sub>1 p \ t\<^sub>2 T\<^sub>2 T\<^sub>1' T\<^sub>2') from \(LET p = t\<^sub>1 IN t\<^sub>2) \ value\ show ?case by cases next case (T_Proj t fTs l T\<^sub>1 T\<^sub>2) from \t..l \ value\ show ?case by cases qed simp_all text \ Like in the case of the simple calculus, we also need a canonical values theorem for record types: \ lemma RcdT_canonical: \ \A.14(2)\ assumes ty: "[] \ v : RcdT fTs" shows "v \ value \ \fs. v = Rcd fs \ (\(l, t) \ set fs. t \ value)" using ty proof (induct "[]::env" v "RcdT fTs" arbitrary: fTs rule: typing_induct) case (T_App t\<^sub>1 T\<^sub>1\<^sub>1 t\<^sub>2 fTs) from \t\<^sub>1 \ t\<^sub>2 \ value\ show ?case by cases next case (T_TApp t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 T\<^sub>2 fTs) from \t\<^sub>1 \\<^sub>\ T\<^sub>2 \ value\ show ?case by cases next case (T_Sub t S fTs) from \[] \ S <: RcdT fTs\ obtain fTs' where S: "S = RcdT fTs'" by cases (auto simp add: T_Sub) show ?case by (rule T_Sub S)+ next case (T_Let t\<^sub>1 T\<^sub>1 p \ t\<^sub>2 T\<^sub>2 fTs) from \(LET p = t\<^sub>1 IN t\<^sub>2) \ value\ show ?case by cases next case (T_Rcd fs fTs) from \Rcd fs \ value\ show ?case using T_Rcd by cases simp_all next case (T_Proj t fTs l fTs') from \t..l \ value\ show ?case by cases qed simp_all theorem reorder_prop: "\(l, t) \ set fs. P t \ \(l, U)\set fTs. \u. fs\l\\<^sub>? = \u\ \ \(l, t) \ set (map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs). P t" apply (induct fs) apply simp apply (simp add: split_paired_all) apply simp apply (rule ballI) apply (simp add: split_paired_all) apply (drule bpspec) apply assumption apply (erule exE) apply (simp split: if_split_asm) apply (auto dest: assoc_set) done text \ Another central property needed in the proof of the progress theorem is that well-typed matching is defined. This means that if the pattern @{term p} is compatible with the type @{term T} of the closed term @{term t} that it has to match, then it is always possible to extract a list of terms @{term ts} corresponding to the variables in @{term p}. Interestingly, this important property is missing in the description of the {\sc PoplMark} Challenge \cite{PoplMark}. \ theorem ptyping_match: "\ p : T \ \ \ [] \ t : T \ t \ value \ \ts. \ p \ t \ ts" "\ fps [:] fTs \ \ \ [] \ fs [:] fTs \ \(l, t) \ set fs. t \ value \ \us. \ fps [\] fs \ us" proof (induct arbitrary: t and fs set: ptyping ptypings) case (P_Var T t) show ?case by (iprover intro: M_PVar) next case (P_Rcd fps fTs \ t) then obtain fs where t: "t = Rcd fs" and fs: "\(l, t) \ set fs. t \ value" by (blast dest: RcdT_canonical) with P_Rcd have fs': "[] \ Rcd fs : RcdT fTs" by simp hence "[] \ map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs [:] fTs" by (rule Rcd_type2') moreover from Rcd_type1' [OF fs'] have assoc: "\(l, U)\set fTs. \u. fs\l\\<^sub>? = \u\" by blast with fs have "\(l, t) \ set (map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs). t \ value" by (rule reorder_prop) ultimately have "\us. \ fps [\] map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs \ us" by (rule P_Rcd) then obtain us where "\ fps [\] map (\(l, T). (l, the (fs\l\\<^sub>?))) fTs \ us" .. with P_Rcd(1) assoc have "\ fps [\] fs \ us" by (rule matchs_reorder') hence "\ PRcd fps \ Rcd fs \ us" by (rule M_Rcd) with t show ?case by fastforce next case (P_Nil fs) show ?case by (iprover intro: M_Nil) next case (P_Cons p T \\<^sub>1 fps fTs \\<^sub>2 l fs) from \[] \ fs [:] (l, T) \ fTs\ obtain t fs' where fs: "fs = (l, t) \ fs'" and t: "[] \ t : T" and fs': "[] \ fs' [:] fTs" by cases auto have "((l, t) \ fs')\l\\<^sub>? = \t\" by simp moreover from fs P_Cons have "t \ value" by simp with t have "\ts. \ p \ t \ ts" by (rule P_Cons) then obtain ts where "\ p \ t \ ts" .. moreover from P_Cons fs have "\(l, t)\set fs'. t \ value" by auto with fs' have "\us. \ fps [\] fs' \ us" by (rule P_Cons) then obtain us where "\ fps [\] fs' \ us" .. hence "\ fps [\] (l, t) \ fs' \ us" using P_Cons(5) by (rule matchs_mono) ultimately have "\ (l, p) \ fps [\] (l, t) \ fs' \ ts @ us" by (rule M_Cons) with fs show ?case by iprover qed theorem progress: \ \A.16\ "[] \ t : T \ t \ value \ (\t'. t \ t')" "[] \ fs [:] fTs \ (\(l, t) \ set fs. t \ value) \ (\fs'. fs [\] fs')" proof (induct "[]::env" t T and "[]::env" fs fTs set: typing typings) case T_Var thus ?case by simp next case T_Abs from value.Abs show ?case .. next case (T_App t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 t\<^sub>2) hence "t\<^sub>1 \ value \ (\t'. t\<^sub>1 \ t')" by simp thus ?case proof assume t\<^sub>1_val: "t\<^sub>1 \ value" with T_App obtain t S where t\<^sub>1: "t\<^sub>1 = (\:S. t)" by (auto dest!: Fun_canonical) from T_App have "t\<^sub>2 \ value \ (\t'. t\<^sub>2 \ t')" by simp thus ?thesis proof assume "t\<^sub>2 \ value" with t\<^sub>1 have "t\<^sub>1 \ t\<^sub>2 \ t[0 \ t\<^sub>2]" by simp (rule eval_evals.intros) thus ?thesis by iprover next assume "\t'. t\<^sub>2 \ t'" then obtain t' where "t\<^sub>2 \ t'" by iprover with t\<^sub>1_val have "t\<^sub>1 \ t\<^sub>2 \ t\<^sub>1 \ t'" by (rule eval_evals.intros) thus ?thesis by iprover qed next assume "\t'. t\<^sub>1 \ t'" then obtain t' where "t\<^sub>1 \ t'" .. hence "t\<^sub>1 \ t\<^sub>2 \ t' \ t\<^sub>2" by (rule eval_evals.intros) thus ?thesis by iprover qed next case T_TAbs from value.TAbs show ?case .. next case (T_TApp t\<^sub>1 T\<^sub>1\<^sub>1 T\<^sub>1\<^sub>2 T\<^sub>2) hence "t\<^sub>1 \ value \ (\t'. t\<^sub>1 \ t')" by simp thus ?case proof assume "t\<^sub>1 \ value" with T_TApp obtain t S where "t\<^sub>1 = (\<:S. t)" by (auto dest!: TyAll_canonical) hence "t\<^sub>1 \\<^sub>\ T\<^sub>2 \ t[0 \\<^sub>\ T\<^sub>2]" by simp (rule eval_evals.intros) thus ?thesis by iprover next assume "\t'. t\<^sub>1 \ t'" then obtain t' where "t\<^sub>1 \ t'" .. hence "t\<^sub>1 \\<^sub>\ T\<^sub>2 \ t' \\<^sub>\ T\<^sub>2" by (rule eval_evals.intros) thus ?thesis by iprover qed next case (T_Sub t S T) show ?case by (rule T_Sub) next case (T_Let t\<^sub>1 T\<^sub>1 p \ t\<^sub>2 T\<^sub>2) hence "t\<^sub>1 \ value \ (\t'. t\<^sub>1 \ t')" by simp thus ?case proof assume t\<^sub>1: "t\<^sub>1 \ value" with T_Let have "\ts. \ p \ t\<^sub>1 \ ts" by (auto intro: ptyping_match) with t\<^sub>1 show ?thesis by (blast intro: eval_evals.intros) next assume "\t'. t\<^sub>1 \ t'" thus ?thesis by (blast intro: eval_evals.intros) qed next case (T_Rcd fs fTs) thus ?case by (blast intro: value.intros eval_evals.intros) next case (T_Proj t fTs l T) hence "t \ value \ (\t'. t \ t')" by simp thus ?case proof assume tv: "t \ value" with T_Proj obtain fs where t: "t = Rcd fs" and fs: "\(l, t) \ set fs. t \ value" by (auto dest: RcdT_canonical) with T_Proj have "[] \ Rcd fs : RcdT fTs" by simp hence "\(l, U)\set fTs. \u. fs\l\\<^sub>? = \u\ \ [] \ u : U" by (rule Rcd_type1') with T_Proj obtain u where u: "fs\l\\<^sub>? = \u\" by (blast dest: assoc_set) with fs have "u \ value" by (blast dest: assoc_set) with u t show ?case by (blast intro: eval_evals.intros) next assume "\t'. t \ t'" thus ?case by (blast intro: eval_evals.intros) qed next case T_Nil show ?case by simp next case (T_Cons t T fs fTs l) thus ?case by (auto intro: eval_evals.intros) qed end diff --git a/thys/Poincare_Disc/Poincare_Distance.thy b/thys/Poincare_Disc/Poincare_Distance.thy --- a/thys/Poincare_Disc/Poincare_Distance.thy +++ b/thys/Poincare_Disc/Poincare_Distance.thy @@ -1,1567 +1,1567 @@ theory Poincare_Distance imports Poincare_Lines_Ideal_Points Hyperbolic_Functions begin (* ------------------------------------------------------------------ *) section \H-distance in the Poincar\'e model\ (* ------------------------------------------------------------------ *) text\Informally, the \emph{h-distance} between the two h-points is defined as the absolute value of the logarithm of the cross ratio between those two points and the two ideal points.\ abbreviation Re_cross_ratio where "Re_cross_ratio z u v w \ Re (to_complex (cross_ratio z u v w))" definition calc_poincare_distance :: "complex_homo \ complex_homo \ complex_homo \ complex_homo \ real" where [simp]: "calc_poincare_distance u i1 v i2 = abs (ln (Re_cross_ratio u i1 v i2))" definition poincare_distance_pred :: "complex_homo \ complex_homo \ real \ bool" where [simp]: "poincare_distance_pred u v d \ (u = v \ d = 0) \ (u \ v \ (\ i1 i2. ideal_points (poincare_line u v) = {i1, i2} \ d = calc_poincare_distance u i1 v i2))" definition poincare_distance :: "complex_homo \ complex_homo \ real" where "poincare_distance u v = (THE d. poincare_distance_pred u v d)" text\We shown that the described cross-ratio is always finite, positive real number.\ lemma distance_cross_ratio_real_positive: assumes "u \ unit_disc" and "v \ unit_disc" and "u \ v" shows "\ i1 i2. ideal_points (poincare_line u v) = {i1, i2} \ cross_ratio u i1 v i2 \ \\<^sub>h \ is_real (to_complex (cross_ratio u i1 v i2)) \ Re_cross_ratio u i1 v i2 > 0" (is "?P u v") proof (rule wlog_positive_x_axis[OF assms]) fix x assume *: "is_real x" "0 < Re x" "Re x < 1" hence "x \ -1" "x \ 1" by auto hence **: "of_complex x \ \\<^sub>h" "of_complex x \ 0\<^sub>h" "of_complex x \ of_complex (-1)" "of_complex 1 \ of_complex x" "of_complex x \ circline_set x_axis" using * unfolding circline_set_x_axis by (auto simp add: of_complex_inj) have ***: "0\<^sub>h \ of_complex (-1)" "0\<^sub>h \ of_complex 1" by (metis of_complex_zero_iff zero_neq_neg_one, simp) have ****: "- x - 1 \ 0" "x - 1 \ 0" using \x \ -1\ \x \ 1\ by (metis add.inverse_inverse eq_iff_diff_eq_0, simp) have "poincare_line 0\<^sub>h (of_complex x) = x_axis" using ** by (simp add: poincare_line_0_real_is_x_axis) thus "?P 0\<^sub>h (of_complex x)" using * ** *** **** using cross_ratio_not_inf[of "0\<^sub>h" "of_complex 1" "of_complex (-1)" "of_complex x"] using cross_ratio_not_inf[of "0\<^sub>h" "of_complex (-1)" "of_complex 1" "of_complex x"] using cross_ratio_real[of 0 "-1" x 1] cross_ratio_real[of 0 1 x "-1"] apply (auto simp add: poincare_line_0_real_is_x_axis doubleton_eq_iff circline_set_x_axis) apply (subst cross_ratio, simp_all, subst Re_complex_div_gt_0, simp, subst mult_neg_neg, simp_all)+ done next fix M u v let ?Mu = "moebius_pt M u" and ?Mv = "moebius_pt M v" assume *: "unit_disc_fix M" "u \ unit_disc" "v \ unit_disc" "u \ v" "?P ?Mu ?Mv" show "?P u v" proof safe fix i1 i2 let ?cr = "cross_ratio u i1 v i2" assume **: "ideal_points (poincare_line u v) = {i1, i2}" have "i1 \ u" "i1 \ v" "i2 \ u" "i2 \ v" "i1 \ i2" using ideal_points_different[OF *(2-3), of i1 i2] ** \u \ v\ by auto hence "0 < Re (to_complex ?cr) \ is_real (to_complex ?cr) \ ?cr \ \\<^sub>h" using * ** apply (erule_tac x="moebius_pt M i1" in allE) apply (erule_tac x="moebius_pt M i2" in allE) apply (subst (asm) ideal_points_poincare_line_moebius[of M u v i1 i2], simp_all) done thus "0 < Re (to_complex ?cr)" "is_real (to_complex ?cr)" "?cr = \\<^sub>h \ False" by simp_all qed qed text\Next we can show that for every different points from the unit disc there is exactly one number that satisfies the h-distance predicate.\ lemma distance_unique: assumes "u \ unit_disc" and "v \ unit_disc" shows "\! d. poincare_distance_pred u v d" proof (cases "u = v") case True thus ?thesis by auto next case False obtain i1 i2 where *: "i1 \ i2" "ideal_points (poincare_line u v) = {i1, i2}" using obtain_ideal_points[OF is_poincare_line_poincare_line] \u \ v\ by blast let ?d = "calc_poincare_distance u i1 v i2" show ?thesis proof (rule ex1I) show "poincare_distance_pred u v ?d" using * \u \ v\ proof (simp del: calc_poincare_distance_def, safe) fix i1' i2' assume "{i1, i2} = {i1', i2'}" hence **: "(i1' = i1 \ i2' = i2) \ (i1' = i2 \ i2' = i1)" using doubleton_eq_iff[of i1 i2 i1' i2'] by blast have all_different: "u \ i1" "u \ i2" "v \ i1" "v \ i2" "u \ i1'" "u \ i2'" "v \ i1'" "v \ i2'" "i1 \ i2" using ideal_points_different[OF assms, of i1 i2] * ** \u \ v\ by auto show "calc_poincare_distance u i1 v i2 = calc_poincare_distance u i1' v i2'" proof- let ?cr = "cross_ratio u i1 v i2" let ?cr' = "cross_ratio u i1' v i2'" have "Re (to_complex ?cr) > 0" "is_real (to_complex ?cr)" "Re (to_complex ?cr') > 0" "is_real (to_complex ?cr')" using False distance_cross_ratio_real_positive[OF assms(1-2)] * ** by auto thus ?thesis using ** using cross_ratio_not_zero cross_ratio_not_inf all_different by auto (subst cross_ratio_commute_24, subst reciprocal_real, simp_all add: ln_div) qed qed next fix d assume "poincare_distance_pred u v d" thus "d = ?d" using * \u \ v\ by auto qed qed lemma poincare_distance_satisfies_pred [simp]: assumes "u \ unit_disc" and "v \ unit_disc" shows "poincare_distance_pred u v (poincare_distance u v)" using distance_unique[OF assms] theI'[of "poincare_distance_pred u v"] unfolding poincare_distance_def by blast lemma poincare_distance_I: assumes "u \ unit_disc" and "v \ unit_disc" and "u \ v" and "ideal_points (poincare_line u v) = {i1, i2}" shows "poincare_distance u v = calc_poincare_distance u i1 v i2" using assms using poincare_distance_satisfies_pred[OF assms(1-2)] by simp lemma poincare_distance_refl [simp]: assumes "u \ unit_disc" shows "poincare_distance u u = 0" using assms using poincare_distance_satisfies_pred[OF assms assms] by simp text\Unit disc preserving Möbius transformations preserve h-distance. \ lemma unit_disc_fix_preserve_poincare_distance [simp]: assumes "unit_disc_fix M" and "u \ unit_disc" and "v \ unit_disc" shows "poincare_distance (moebius_pt M u) (moebius_pt M v) = poincare_distance u v" proof (cases "u = v") case True have "moebius_pt M u \ unit_disc" "moebius_pt M v \ unit_disc" using unit_disc_fix_iff[OF assms(1), symmetric] assms by blast+ thus ?thesis using assms \u = v\ by simp next case False obtain i1 i2 where *: "ideal_points (poincare_line u v) = {i1, i2}" using \u \ v\ by (rule obtain_ideal_points[OF is_poincare_line_poincare_line[of u v]]) let ?Mu = "moebius_pt M u" and ?Mv = "moebius_pt M v" and ?Mi1 = "moebius_pt M i1" and ?Mi2 = "moebius_pt M i2" have **: "?Mu \ unit_disc" "?Mv \ unit_disc" using assms using unit_disc_fix_iff by blast+ have ***: "?Mu \ ?Mv" using \u \ v\ by simp have "poincare_distance u v = calc_poincare_distance u i1 v i2" using poincare_distance_I[OF assms(2-3) \u \ v\ *] by auto moreover have "unit_circle_fix M" using assms by simp hence ++: "ideal_points (poincare_line ?Mu ?Mv) = {?Mi1, ?Mi2}" using \u \ v\ assms * by simp have "poincare_distance ?Mu ?Mv = calc_poincare_distance ?Mu ?Mi1 ?Mv ?Mi2" by (rule poincare_distance_I[OF ** *** ++]) moreover have "calc_poincare_distance ?Mu ?Mi1 ?Mv ?Mi2 = calc_poincare_distance u i1 v i2" using ideal_points_different[OF assms(2-3) \u \ v\ *] unfolding calc_poincare_distance_def by (subst moebius_preserve_cross_ratio[symmetric], simp_all) ultimately show ?thesis by simp qed text\Knowing ideal points for x-axis, we can easily explicitly calculate distances.\ lemma poincare_distance_x_axis_x_axis: assumes "x \ unit_disc" and "y \ unit_disc" and "x \ circline_set x_axis" and "y \ circline_set x_axis" shows "poincare_distance x y = (let x' = to_complex x; y' = to_complex y in abs (ln (Re (((1 + x') * (1 - y')) / ((1 - x') * (1 + y'))))))" proof- obtain x' y' where *: "x = of_complex x'" "y = of_complex y'" using inf_or_of_complex[of x] inf_or_of_complex[of y] \x \ unit_disc\ \y \ unit_disc\ by auto have "cmod x' < 1" "cmod y' < 1" using \x \ unit_disc\ \y \ unit_disc\ * by (metis unit_disc_iff_cmod_lt_1)+ hence **: "x' \ 1" "x' \ 1" "y' \ -1" "y' \ 1" by auto have "1 + y' \ 0" using ** by (metis add.left_cancel add_neg_numeral_special(7)) show ?thesis proof (cases "x = y") case True thus ?thesis using assms(1-2) using unit_disc_iff_cmod_lt_1[of "to_complex x"] * ** `1 + y' \ 0` by auto next case False hence "poincare_line x y = x_axis" using poincare_line_x_axis[OF assms] by simp hence "ideal_points (poincare_line x y) = {of_complex (-1), of_complex 1}" by simp hence "poincare_distance x y = calc_poincare_distance x (of_complex (-1)) y (of_complex 1)" using poincare_distance_I assms \x \ y\ by auto also have "... = abs (ln (Re (((x' + 1) * (y' - 1)) / ((x' - 1) * (y' + 1)))))" using * \cmod x' < 1\ \cmod y' < 1\ by (simp, transfer, transfer, auto) finally show ?thesis using * by (metis (no_types, lifting) add.commute minus_diff_eq minus_divide_divide mult_minus_left mult_minus_right to_complex_of_complex) qed qed lemma poincare_distance_zero_x_axis: assumes "x \ unit_disc" and "x \ circline_set x_axis" shows "poincare_distance 0\<^sub>h x = (let x' = to_complex x in abs (ln (Re ((1 - x') / (1 + x')))))" using assms using poincare_distance_x_axis_x_axis[of "0\<^sub>h" x] by (simp add: Let_def) lemma poincare_distance_zero: assumes "x \ unit_disc" shows "poincare_distance 0\<^sub>h x = (let x' = to_complex x in abs (ln (Re ((1 - cmod x') / (1 + cmod x')))))" (is "?P x") proof (cases "x = 0\<^sub>h") case True thus ?thesis by auto next case False show ?thesis proof (rule wlog_rotation_to_positive_x_axis) show "x \ unit_disc" "x \ 0\<^sub>h" by fact+ next fix \ u assume "u \ unit_disc" "u \ 0\<^sub>h" "?P (moebius_pt (moebius_rotation \) u)" thus "?P u" using unit_disc_fix_preserve_poincare_distance[of "moebius_rotation \" "0\<^sub>h" u] by (cases "u = \\<^sub>h") (simp_all add: Let_def) next fix x assume "is_real x" "0 < Re x" "Re x < 1" thus "?P (of_complex x)" using poincare_distance_zero_x_axis[of "of_complex x"] by simp (auto simp add: circline_set_x_axis cmod_eq_Re complex_is_Real_iff) qed qed lemma poincare_distance_zero_opposite [simp]: assumes "of_complex z \ unit_disc" shows "poincare_distance 0\<^sub>h (of_complex (- z)) = poincare_distance 0\<^sub>h (of_complex z)" proof- have *: "of_complex (-z) \ unit_disc" using assms by auto show ?thesis using poincare_distance_zero[OF assms] using poincare_distance_zero[OF *] by simp qed (* ------------------------------------------------------------------ *) subsection\Distance explicit formula\ (* ------------------------------------------------------------------ *) text\Instead of the h-distance itself, very frequently its hyperbolic cosine is analyzed.\ abbreviation "cosh_dist u v \ cosh (poincare_distance u v)" lemma cosh_poincare_distance_cross_ratio_average: assumes "u \ unit_disc" "v \ unit_disc" "u \ v" "ideal_points (poincare_line u v) = {i1, i2}" shows "cosh_dist u v = ((Re_cross_ratio u i1 v i2) + (Re_cross_ratio v i1 u i2)) / 2" proof- let ?cr = "cross_ratio u i1 v i2" let ?crRe = "Re (to_complex ?cr)" have "?cr \ \\<^sub>h" "is_real (to_complex ?cr)" "?crRe > 0" using distance_cross_ratio_real_positive[OF assms(1-3)] assms(4) by simp_all then obtain cr where *: "cross_ratio u i1 v i2 = of_complex cr" "cr \ 0" "is_real cr" "Re cr > 0" using inf_or_of_complex[of "cross_ratio u i1 v i2"] by (smt to_complex_of_complex zero_complex.simps(1)) thus ?thesis using * using assms cross_ratio_commute_13[of v i1 u i2] unfolding poincare_distance_I[OF assms] calc_poincare_distance_def cosh_def by (cases "Re cr \ 1") (auto simp add: ln_div[of 0] exp_minus field_simps Re_divide power2_eq_square complex.expand) qed definition poincare_distance_formula' :: "complex \ complex \ real" where [simp]: "poincare_distance_formula' u v = 1 + 2 * ((cmod (u - v))\<^sup>2 / ((1 - (cmod u)\<^sup>2) * (1 - (cmod v)\<^sup>2)))" text\Next we show that the following formula expresses h-distance between any two h-points (note that the ideal points do not figure anymore).\ definition poincare_distance_formula :: "complex \ complex \ real" where [simp]: "poincare_distance_formula u v = arcosh (poincare_distance_formula' u v)" lemma blaschke_preserve_distance_formula [simp]: assumes "of_complex k \ unit_disc" "u \ unit_disc" "v \ unit_disc" shows "poincare_distance_formula (to_complex (moebius_pt (blaschke k) u)) (to_complex (moebius_pt (blaschke k) v)) = poincare_distance_formula (to_complex u) (to_complex v)" proof (cases "k = 0") case True thus ?thesis by simp next case False obtain u' v' where *: "u' = to_complex u" "v' = to_complex v" by auto have "cmod u' < 1" "cmod v' < 1" "cmod k < 1" using assms * using inf_or_of_complex[of u] inf_or_of_complex[of v] by auto obtain nu du nv dv d kk ddu ddv where **: "nu = u' - k" "du = 1 - cnj k *u'" "nv = v' - k" "dv = 1 - cnj k * v'" "d = u' - v'" "ddu = 1 - u'*cnj u'" "ddv = 1 - v'*cnj v'" "kk = 1 - k*cnj k" by auto have d: "nu*dv - nv*du = d*kk" by (subst **)+ (simp add: field_simps) have ddu: "du*cnj du - nu*cnj nu = ddu*kk" by (subst **)+ (simp add: field_simps) have ddv: "dv*cnj dv - nv*cnj nv = ddv*kk" by (subst **)+ (simp add: field_simps) have "du \ 0" proof (rule ccontr) assume "\ ?thesis" hence "cmod (1 - cnj k * u') = 0" using \du = 1 - cnj k * u'\ by auto hence "cmod (cnj k * u') = 1" by auto hence "cmod k * cmod u' = 1" by auto thus False using \cmod k < 1\ \cmod u' < 1\ using mult_strict_mono[of "cmod k" 1 "cmod u'" 1] by simp qed have "dv \ 0" proof (rule ccontr) assume "\ ?thesis" hence "cmod (1 - cnj k * v') = 0" using \dv = 1 - cnj k * v'\ by auto hence "cmod (cnj k * v') = 1" by auto hence "cmod k * cmod v' = 1" by auto thus False using \cmod k < 1\ \cmod v' < 1\ using mult_strict_mono[of "cmod k" 1 "cmod v'" 1] by simp qed have "kk \ 0" proof (rule ccontr) assume "\ ?thesis" hence "cmod (1 - k * cnj k) = 0" using \kk = 1 - k * cnj k\ by auto hence "cmod (k * cnj k) = 1" by auto hence "cmod k * cmod k = 1" by auto thus False using \cmod k < 1\ using mult_strict_mono[of "cmod k" 1 "cmod k" 1] by simp qed note nz = \du \ 0\ \dv \ 0\ \kk \ 0\ have "nu / du - nv / dv = (nu*dv - nv*du) / (du * dv)" using nz by (simp add: field_simps) hence "(cmod (nu/du - nv/dv))\<^sup>2 = cmod ((d*kk) / (du*dv) * (cnj ((d*kk) / (du*dv))))" (is "?lhs = _") unfolding complex_mod_mult_cnj[symmetric] by (subst (asm) d) simp also have "... = cmod ((d*cnj d*kk*kk) / (du*cnj du*dv*cnj dv))" by (simp add: field_simps) finally have 1: "?lhs = cmod ((d*cnj d*kk*kk) / (du*cnj du*dv*cnj dv))" . have "(1 - ((cmod nu) / (cmod du))\<^sup>2)*(1 - ((cmod nv) / (cmod dv))\<^sup>2) = (1 - cmod((nu * cnj nu) / (du * cnj du)))*(1 - cmod((nv * cnj nv) / (dv * cnj dv)))" (is "?rhs = _") by (metis cmod_divide complex_mod_mult_cnj power_divide) also have "... = cmod(((du*cnj du - nu*cnj nu) / (du * cnj du)) * ((dv*cnj dv - nv*cnj nv) / (dv * cnj dv)))" proof- have "u' \ 1 / cnj k" "v' \ 1 / cnj k" using \cmod u' < 1\ \cmod v' < 1\ \cmod k < 1\ by (auto simp add: False) moreover have "cmod k \ 1" using \cmod k < 1\ by linarith ultimately have "cmod (nu/du) < 1" "cmod (nv/dv) < 1" using **(1-4) using unit_disc_fix_discI[OF blaschke_unit_disc_fix[OF \cmod k < 1\] \u \ unit_disc\] \u' = to_complex u\ using unit_disc_fix_discI[OF blaschke_unit_disc_fix[OF \cmod k < 1\] \v \ unit_disc\] \v' = to_complex v\ using inf_or_of_complex[of u] \u \ unit_disc\ inf_or_of_complex[of v] \v \ unit_disc\ using moebius_pt_blaschke[of k u'] using moebius_pt_blaschke[of k v'] by auto hence "(cmod (nu/du))\<^sup>2 < 1" "(cmod (nv/dv))\<^sup>2 < 1" by (simp_all add: cmod_def) hence "cmod (nu * cnj nu / (du * cnj du)) < 1" "cmod (nv * cnj nv / (dv * cnj dv)) < 1" by (metis complex_mod_mult_cnj norm_divide power_divide)+ moreover have "is_real (nu * cnj nu / (du * cnj du))" "is_real (nv * cnj nv / (dv * cnj dv))" using eq_cnj_iff_real[of "nu * cnj nu / (du * cnj du)"] using eq_cnj_iff_real[of "nv * cnj nv / (dv * cnj dv)"] by (auto simp add: mult.commute) moreover have "Re (nu * cnj nu / (du * cnj du)) \ 0" "Re (nv * cnj nv / (dv * cnj dv)) \ 0" using \du \ 0\ \dv \ 0\ unfolding complex_mult_cnj_cmod by simp_all ultimately have "1 - cmod (nu * cnj nu / (du * cnj du)) = cmod (1 - nu * cnj nu / (du * cnj du))" "1 - cmod (nv * cnj nv / (dv * cnj dv)) = cmod (1 - nv * cnj nv / (dv * cnj dv))" by (simp_all add: cmod_def) thus ?thesis using nz apply simp apply (subst diff_divide_eq_iff, simp, simp) apply (subst diff_divide_eq_iff, simp, simp) done qed also have "... = cmod(((ddu * kk) / (du * cnj du)) * ((ddv * kk) / (dv * cnj dv)))" by (subst ddu, subst ddv, simp) also have "... = cmod((ddu*ddv*kk*kk) / (du*cnj du*dv*cnj dv))" by (simp add: field_simps) finally have 2: "?rhs = cmod((ddu*ddv*kk*kk) / (du*cnj du*dv*cnj dv))" . have "?lhs / ?rhs = cmod ((d*cnj d*kk*kk) / (du*cnj du*dv*cnj dv)) / cmod((ddu*ddv*kk*kk) / (du*cnj du*dv*cnj dv))" by (subst 1, subst 2, simp) also have "... = cmod ((d*cnj d)/(ddu*ddv))" using nz by simp also have "... = (cmod d)\<^sup>2 / ((1 - (cmod u')\<^sup>2)*(1 - (cmod v')\<^sup>2))" proof- have "(cmod u')\<^sup>2 < 1" "(cmod v')\<^sup>2 < 1" using \cmod u' < 1\ \cmod v' < 1\ by (simp_all add: cmod_def) hence "cmod (1 - u' * cnj u') = 1 - (cmod u')\<^sup>2" "cmod (1 - v' * cnj v') = 1 - (cmod v')\<^sup>2" by (auto simp add: cmod_eq_Re cmod_power2 power2_eq_square[symmetric]) thus ?thesis using nz apply (subst **)+ unfolding complex_mod_mult_cnj[symmetric] by simp qed finally have 3: "?lhs / ?rhs = (cmod d)\<^sup>2 / ((1 - (cmod u')\<^sup>2)*(1 - (cmod v')\<^sup>2))" . have "cmod k \ 1" "u' \ 1 / cnj k" "v' \ 1 / cnj k" "u \ \\<^sub>h" "v \ \\<^sub>h" using \cmod k < 1\ \u \ unit_disc\ \v \ unit_disc\ * \k \ 0\ ** \kk \ 0\ nz by auto thus ?thesis using assms using * ** 3 using moebius_pt_blaschke[of k u'] using moebius_pt_blaschke[of k v'] by simp qed text \To prove the equivalence between the h-distance definition and the distance formula, we shall employ the without loss of generality principle. Therefore, we must show that the distance formula is preserved by h-isometries.\ text\Rotation preserve @{term poincare_distance_formula}.\ lemma rotation_preserve_distance_formula [simp]: assumes "u \ unit_disc" "v \ unit_disc" shows "poincare_distance_formula (to_complex (moebius_pt (moebius_rotation \) u)) (to_complex (moebius_pt (moebius_rotation \) v)) = poincare_distance_formula (to_complex u) (to_complex v)" using assms using inf_or_of_complex[of u] inf_or_of_complex[of v] by auto text\Unit disc fixing Möbius preserve @{term poincare_distance_formula}.\ lemma unit_disc_fix_preserve_distance_formula [simp]: assumes "unit_disc_fix M" "u \ unit_disc" "v \ unit_disc" shows "poincare_distance_formula (to_complex (moebius_pt M u)) (to_complex (moebius_pt M v)) = poincare_distance_formula (to_complex u) (to_complex v)" (is "?P' u v M") proof- have "\ u \ unit_disc. \ v \ unit_disc. ?P' u v M" (is "?P M") proof (rule wlog_unit_disc_fix[OF assms(1)]) fix k assume "cmod k < 1" hence "of_complex k \ unit_disc" by simp thus "?P (blaschke k)" using blaschke_preserve_distance_formula by simp next fix \ show "?P (moebius_rotation \)" using rotation_preserve_distance_formula by simp next fix M1 M2 assume *: "?P M1" and **: "?P M2" and u11: "unit_disc_fix M1" "unit_disc_fix M2" thus "?P (M1 + M2)" by (auto simp del: poincare_distance_formula_def) qed thus ?thesis using assms by simp qed text\The equivalence between the two h-distance representations.\ lemma poincare_distance_formula: assumes "u \ unit_disc" and "v \ unit_disc" shows "poincare_distance u v = poincare_distance_formula (to_complex u) (to_complex v)" (is "?P u v") proof (rule wlog_x_axis) fix x assume *: "is_real x" "0 \ Re x" "Re x < 1" show "?P 0\<^sub>h (of_complex x)" (is "?lhs = ?rhs") proof- have "of_complex x \ unit_disc" "of_complex x \ circline_set x_axis" "cmod x < 1" using * cmod_eq_Re by (auto simp add: circline_set_x_axis) hence "?lhs = \ln (Re ((1 - x) / (1 + x)))\" using poincare_distance_zero_x_axis[of "of_complex x"] by simp moreover have "?rhs = \ln (Re ((1 - x) / (1 + x)))\" proof- let ?x = "1 + 2 * (cmod x)\<^sup>2 / (1 - (cmod x)\<^sup>2)" have "0 \ 2 * (cmod x)\<^sup>2 / (1 - (cmod x)\<^sup>2)" by (smt \cmod x < 1\ divide_nonneg_nonneg norm_ge_zero power_le_one zero_le_power2) hence arcosh_real_gt: "1 \ ?x" by auto have "?rhs = arcosh ?x" by simp also have "... = ln ((1 + (cmod x)\<^sup>2) / (1 - (cmod x)\<^sup>2) + 2 * (cmod x) / (1 - (cmod x)\<^sup>2))" proof- have "1 - (cmod x)\<^sup>2 > 0" using \cmod x < 1\ by (smt norm_not_less_zero one_power2 power2_eq_imp_eq power_mono) hence 1: "?x = (1 + (cmod x)\<^sup>2) / (1 - (cmod x)\<^sup>2)" by (simp add: field_simps) have 2: "?x\<^sup>2 - 1 = (4 * (cmod x)\<^sup>2) / (1 - (cmod x)\<^sup>2)\<^sup>2" using \1 - (cmod x)\<^sup>2 > 0\ apply (subst 1) unfolding power_divide by (subst divide_diff_eq_iff, simp, simp add: power2_eq_square field_simps) show ?thesis using \1 - (cmod x)\<^sup>2 > 0\ apply (subst arcosh_real_def[OF arcosh_real_gt]) apply (subst 2) apply (subst 1) apply (subst real_sqrt_divide) apply (subst real_sqrt_mult) apply simp done qed also have "... = ln (((1 + (cmod x))\<^sup>2) / (1 - (cmod x)\<^sup>2))" apply (subst add_divide_distrib[symmetric]) apply (simp add: field_simps power2_eq_square) done also have "... = ln ((1 + cmod x) / (1 - (cmod x)))" using \cmod x < 1\ using square_diff_square_factored[of 1 "cmod x"] by (simp add: power2_eq_square) also have "... = \ln (Re ((1 - x) / (1 + x)))\" proof- have *: "Re ((1 - x) / (1 + x)) \ 1" "Re ((1 - x) / (1 + x)) > 0" using \is_real x\ \Re x \ 0\ \Re x < 1\ using complex_is_Real_iff by auto hence "\ln (Re ((1 - x) / (1 + x)))\ = - ln (Re ((1 - x) / (1 + x)))" by auto hence "\ln (Re ((1 - x) / (1 + x)))\ = ln (Re ((1 + x) / (1 - x)))" using ln_div[of 1 "Re ((1 - x)/(1 + x))"] * \is_real x\ by (simp add: complex_is_Real_iff) moreover have "ln ((1 + cmod x) / (1 - cmod x)) = ln ((1 + Re x) / (1 - Re x))" using \Re x \ 0\ \is_real x\ using cmod_eq_Re by auto moreover have "(1 + Re x) / (1 - Re x) = Re ((1 + x) / (1 - x))" using \is_real x\ \Re x < 1\ by (smt Re_divide_real eq_iff_diff_eq_0 minus_complex.simps one_complex.simps plus_complex.simps) ultimately show ?thesis by simp qed finally show ?thesis . qed ultimately show ?thesis by simp qed next fix M u v assume *: "unit_disc_fix M" "u \ unit_disc" "v \ unit_disc" assume "?P (moebius_pt M u) (moebius_pt M v)" thus "?P u v" using *(1-3) by (simp del: poincare_distance_formula_def) next show "u \ unit_disc" "v \ unit_disc" by fact+ qed text\Some additional properties proved easily using the distance formula.\ text \@{term poincare_distance} is symmetric.\ lemma poincare_distance_sym: assumes "u \ unit_disc" and "v \ unit_disc" shows "poincare_distance u v = poincare_distance v u" using assms using poincare_distance_formula[OF assms(1) assms(2)] using poincare_distance_formula[OF assms(2) assms(1)] by (simp add: mult.commute norm_minus_commute) lemma poincare_distance_formula'_ge_1: assumes "u \ unit_disc" and "v \ unit_disc" shows "1 \ poincare_distance_formula' (to_complex u) (to_complex v)" using unit_disc_cmod_square_lt_1[OF assms(1)] unit_disc_cmod_square_lt_1[OF assms(2)] by auto text\@{term poincare_distance} is non-negative.\ lemma poincare_distance_ge0: assumes "u \ unit_disc" and "v \ unit_disc" shows "poincare_distance u v \ 0" using poincare_distance_formula'_ge_1 unfolding poincare_distance_formula[OF assms(1) assms(2)] unfolding poincare_distance_formula_def unfolding poincare_distance_formula'_def by (rule arcosh_ge_0, simp_all add: assms) lemma cosh_dist: assumes "u \ unit_disc" and "v \ unit_disc" shows "cosh_dist u v = poincare_distance_formula' (to_complex u) (to_complex v)" using poincare_distance_formula[OF assms] poincare_distance_formula'_ge_1[OF assms] by simp text\@{term poincare_distance} is zero only if the two points are equal.\ lemma poincare_distance_eq_0_iff: assumes "u \ unit_disc" and "v \ unit_disc" shows "poincare_distance u v = 0 \ u = v" using assms apply auto using poincare_distance_formula'_ge_1[OF assms] using unit_disc_cmod_square_lt_1[OF assms(1)] unit_disc_cmod_square_lt_1[OF assms(2)] unfolding poincare_distance_formula[OF assms(1) assms(2)] unfolding poincare_distance_formula_def unfolding poincare_distance_formula'_def apply (subst (asm) arcosh_eq_0_iff) apply assumption apply (simp add: unit_disc_to_complex_inj) done text\Conjugate preserve @{term poincare_distance_formula}.\ lemma conjugate_preserve_poincare_distance [simp]: assumes "u \ unit_disc" and "v \ unit_disc" shows "poincare_distance (conjugate u) (conjugate v) = poincare_distance u v" proof- obtain u' v' where *: "u = of_complex u'" "v = of_complex v'" using assms inf_or_of_complex[of u] inf_or_of_complex[of v] by auto have **: "conjugate u \ unit_disc" "conjugate v \ unit_disc" using * assms by auto show ?thesis using * using poincare_distance_formula[OF assms] using poincare_distance_formula[OF **] by (metis complex_cnj_diff complex_mod_cnj conjugate_of_complex poincare_distance_def poincare_distance_formula'_def poincare_distance_formula_def to_complex_of_complex) qed (* ------------------------------------------------------------------ *) subsection\Existence and uniqueness of points with a given distance\ (* ------------------------------------------------------------------ *) lemma ex_x_axis_poincare_distance_negative': fixes d :: real assumes "d \ 0" shows "let z = (1 - exp d) / (1 + exp d) in is_real z \ Re z \ 0 \ Re z > -1 \ of_complex z \ unit_disc \ of_complex z \ circline_set x_axis \ poincare_distance 0\<^sub>h (of_complex z) = d" proof- have "exp d \ 1" using assms using one_le_exp_iff[of d, symmetric] by blast hence "1 + exp d \ 0" by linarith let ?z = "(1 - exp d) / (1 + exp d)" have "?z \ 0" using \exp d \ 1\ by (simp add: divide_nonpos_nonneg) moreover have "?z > -1" using exp_gt_zero[of d] by (smt divide_less_eq_1_neg nonzero_minus_divide_right) moreover hence "abs ?z < 1" using \?z \ 0\ by simp hence "cmod ?z < 1" by (metis norm_of_real) hence "of_complex ?z \ unit_disc" by simp moreover have "of_complex ?z \ circline_set x_axis" unfolding circline_set_x_axis by simp moreover have "(1 - ?z) / (1 + ?z) = exp d" proof- have "1 + ?z = 2 / (1 + exp d)" using \1 + exp d \ 0\ by (subst add_divide_eq_iff, auto) moreover have "1 - ?z = 2 * exp d / (1 + exp d)" using \1 + exp d \ 0\ by (subst diff_divide_eq_iff, auto) ultimately show ?thesis using \1 + exp d \ 0\ by simp qed ultimately show ?thesis using poincare_distance_zero_x_axis[of "of_complex ?z"] using \d \ 0\ \exp d \ 1\ by simp (simp add: cmod_eq_Re) qed lemma ex_x_axis_poincare_distance_negative: assumes "d \ 0" shows "\ z. is_real z \ Re z \ 0 \ Re z > -1 \ of_complex z \ unit_disc \ of_complex z \ circline_set x_axis \ poincare_distance 0\<^sub>h (of_complex z) = d" (is "\ z. ?P z") using ex_x_axis_poincare_distance_negative'[OF assms] unfolding Let_def by blast text\For each real number $d$ there is exactly one point on the positive x-axis such that h-distance between 0 and that point is $d$.\ lemma unique_x_axis_poincare_distance_negative: assumes "d \ 0" shows "\! z. is_real z \ Re z \ 0 \ Re z > -1 \ poincare_distance 0\<^sub>h (of_complex z) = d" (is "\! z. ?P z") proof- let ?z = "(1 - exp d) / (1 + exp d)" have "?P ?z" using ex_x_axis_poincare_distance_negative'[OF assms] unfolding Let_def by blast moreover have "\ z'. ?P z' \ z' = ?z" proof- let ?g = "\ x'. \ln (Re ((1 - x') / (1 + x')))\" let ?A = "{x. is_real x \ Re x > -1 \ Re x \ 0}" have "inj_on (poincare_distance 0\<^sub>h \ of_complex) ?A" proof (rule comp_inj_on) show "inj_on of_complex ?A" using of_complex_inj unfolding inj_on_def by blast next show "inj_on (poincare_distance 0\<^sub>h) (of_complex ` ?A)" (is "inj_on ?f (of_complex ` ?A)") proof (subst inj_on_cong) have *: "of_complex ` ?A = {z. z \ unit_disc \ z \ circline_set x_axis \ Re (to_complex z) \ 0}" (is "_ = ?B") by (auto simp add: cmod_eq_Re circline_set_x_axis) fix x assume "x \ of_complex ` ?A" hence "x \ ?B" using * by simp thus "poincare_distance 0\<^sub>h x = (?g \ to_complex) x" using poincare_distance_zero_x_axis by (simp add: Let_def) next have *: "to_complex ` of_complex ` ?A = ?A" by (auto simp add: image_iff) show "inj_on (?g \ to_complex) (of_complex ` ?A)" proof (rule comp_inj_on) show "inj_on to_complex (of_complex ` ?A)" unfolding inj_on_def by auto next have "inj_on ?g ?A" unfolding inj_on_def proof(safe) fix x y assume hh: "is_real x" "is_real y" "- 1 < Re x" "Re x \ 0" "- 1 < Re y" "Re y \ 0" "\ln (Re ((1 - x) / (1 + x)))\ = \ln (Re ((1 - y) / (1 + y)))\" have "is_real ((1 - x)/(1 + x))" using \is_real x\ div_reals[of "1-x" "1+x"] by auto have "is_real ((1 - y)/(1 + y))" using \is_real y\ div_reals[of "1-y" "1+y"] by auto have "Re (1 + x) > 0" using \- 1 < Re x\ by auto hence "1 + x \ 0" by force have "Re (1 - x) \ 0" using \Re x \ 0\ by auto hence "Re ((1 - x)/(1 + x)) > 0" using Re_divide_real \0 < Re (1 + x)\ complex_eq_if_Re_eq hh(1) hh(4) by auto have "Re(1 - x) \ Re ( 1 + x)" using hh by auto hence "Re ((1 - x)/(1 + x)) \ 1" using \Re (1 + x) > 0\ \is_real ((1 - x)/(1 + x))\ by (smt Re_divide_real arg_0_iff hh(1) le_divide_eq_1_pos one_complex.simps(2) plus_complex.simps(2)) have "Re (1 + y) > 0" using \- 1 < Re y\ by auto hence "1 + y \ 0" by force have "Re (1 - y) \ 0" using \Re y \ 0\ by auto hence "Re ((1 - y)/(1 + y)) > 0" using Re_divide_real \0 < Re (1 + y)\ complex_eq_if_Re_eq hh by auto have "Re(1 - y) \ Re ( 1 + y)" using hh by auto hence "Re ((1 - y)/(1 + y)) \ 1" using \Re (1 + y) > 0\ \is_real ((1 - y)/(1 + y))\ by (smt Re_divide_real arg_0_iff hh le_divide_eq_1_pos one_complex.simps(2) plus_complex.simps(2)) have "ln (Re ((1 - x) / (1 + x))) = ln (Re ((1 - y) / (1 + y)))" using \Re ((1 - y)/(1 + y)) \ 1\ \Re ((1 - x)/(1 + x)) \ 1\ hh by auto hence "Re ((1 - x) / (1 + x)) = Re ((1 - y) / (1 + y))" using \Re ((1 - y)/(1 + y)) > 0\ \Re ((1 - x)/(1 + x)) > 0\ by auto hence "(1 - x) / (1 + x) = (1 - y) / (1 + y)" using \is_real ((1 - y)/(1 + y))\ \is_real ((1 - x)/(1 + x))\ using complex_eq_if_Re_eq by blast hence "(1 - x) * (1 + y) = (1 - y) * (1 + x)" using \1 + y \ 0\ \1 + x \ 0\ by (simp add:field_simps) thus "x = y" by (simp add:field_simps) qed thus "inj_on ?g (to_complex ` of_complex ` ?A)" using * by simp qed qed qed thus ?thesis using \?P ?z\ unfolding inj_on_def by auto qed ultimately show ?thesis by blast qed lemma ex_x_axis_poincare_distance_positive: assumes "d \ 0" shows "\ z. is_real z \ Re z \ 0 \ Re z < 1 \ of_complex z \ unit_disc \ of_complex z \ circline_set x_axis \ poincare_distance 0\<^sub>h (of_complex z) = d" (is "\ z. is_real z \ Re z \ 0 \ Re z < 1 \ ?P z") proof- obtain z where *: "is_real z" "Re z \ 0" "Re z > -1" "?P z" using ex_x_axis_poincare_distance_negative[OF assms] by auto hence **: "of_complex z \ unit_disc" "of_complex z \ circline_set x_axis" by (auto simp add: cmod_eq_Re) have "is_real (-z) \ Re (-z) \ 0 \ Re (-z) < 1 \ ?P (-z)" using * ** by (simp add: circline_set_x_axis) thus ?thesis by blast qed lemma unique_x_axis_poincare_distance_positive: assumes "d \ 0" shows "\! z. is_real z \ Re z \ 0 \ Re z < 1 \ poincare_distance 0\<^sub>h (of_complex z) = d" (is "\! z. is_real z \ Re z \ 0 \ Re z < 1 \ ?P z") proof- obtain z where *: "is_real z" "Re z \ 0" "Re z > -1" "?P z" using unique_x_axis_poincare_distance_negative[OF assms] by auto hence **: "of_complex z \ unit_disc" "of_complex z \ circline_set x_axis" by (auto simp add: cmod_eq_Re circline_set_x_axis) show ?thesis proof show "is_real (-z) \ Re (-z) \ 0 \ Re (-z) < 1 \ ?P (-z)" using * ** by simp next fix z' assume "is_real z' \ Re z' \ 0 \ Re z' < 1 \ ?P z'" hence "is_real (-z') \ Re (-z') \ 0 \ Re (-z') > -1 \ ?P (-z')" by (auto simp add: circline_set_x_axis cmod_eq_Re) hence "-z' = z" using unique_x_axis_poincare_distance_negative[OF assms] * by blast thus "z' = -z" by auto qed qed text\Equal distance implies that segments are isometric - this means that congruence could be defined either by two segments having the same distance or by requiring existence of an isometry that maps one segment to the other.\ lemma poincare_distance_eq_ex_moebius: assumes in_disc: "u \ unit_disc" and "v \ unit_disc" and "u' \ unit_disc" and "v' \ unit_disc" assumes "poincare_distance u v = poincare_distance u' v'" shows "\ M. unit_disc_fix M \ moebius_pt M u = u' \ moebius_pt M v = v'" (is "?P' u v u' v'") proof (cases "u = v") case True thus ?thesis using assms poincare_distance_eq_0_iff[of u' v'] by (simp add: unit_disc_fix_transitive) next case False have "\ u' v'. u \ v \ u' \ unit_disc \ v' \ unit_disc \ poincare_distance u v = poincare_distance u' v' \ ?P' u' v' u v" (is "?P u v") proof (rule wlog_positive_x_axis[where P="?P"]) fix x assume "is_real x" "0 < Re x" "Re x < 1" hence "of_complex x \ unit_disc" "of_complex x \ circline_set x_axis" unfolding circline_set_x_axis by (auto simp add: cmod_eq_Re) show "?P 0\<^sub>h (of_complex x)" proof safe fix u' v' assume "0\<^sub>h \ of_complex x" and in_disc: "u' \ unit_disc" "v' \ unit_disc" and "poincare_distance 0\<^sub>h (of_complex x) = poincare_distance u' v'" hence "u' \ v'" "poincare_distance u' v' > 0" using poincare_distance_eq_0_iff[of "0\<^sub>h" "of_complex x"] \of_complex x \ unit_disc\ using poincare_distance_ge0[of "0\<^sub>h" "of_complex x"] by auto then obtain M where M: "unit_disc_fix M" "moebius_pt M u' = 0\<^sub>h" "moebius_pt M v' \ positive_x_axis" using ex_unit_disc_fix_to_zero_positive_x_axis[of u' v'] in_disc by auto then obtain Mv' where Mv': "moebius_pt M v' = of_complex Mv'" using inf_or_of_complex[of "moebius_pt M v'"] in_disc unit_disc_fix_iff[of M] by (metis image_eqI inf_notin_unit_disc) have "moebius_pt M v' \ unit_disc" using M(1) \v' \ unit_disc\ by auto have "Re Mv' > 0" "is_real Mv'" "Re Mv' < 1" using M Mv' of_complex_inj \moebius_pt M v' \ unit_disc\ unfolding positive_x_axis_def circline_set_x_axis using cmod_eq_Re by auto fastforce have "poincare_distance 0\<^sub>h (moebius_pt M v') = poincare_distance u' v'" using M(1) using in_disc by (subst M(2)[symmetric], simp) have "Mv' = x" using \poincare_distance 0\<^sub>h (moebius_pt M v') = poincare_distance u' v'\ Mv' using \poincare_distance 0\<^sub>h (of_complex x) = poincare_distance u' v'\ using unique_x_axis_poincare_distance_positive[of "poincare_distance u' v'"] \poincare_distance u' v' > 0\ using \Re Mv' > 0\ \Re Mv' < 1\ \is_real Mv'\ using \is_real x\ \Re x > 0\ \Re x < 1\ unfolding positive_x_axis_def by auto thus "?P' u' v' 0\<^sub>h (of_complex x)" using M Mv' by auto qed next show "u \ unit_disc" "v \ unit_disc" "u \ v" by fact+ next fix M u v let ?Mu = "moebius_pt M u" and ?Mv = "moebius_pt M v" assume 1: "unit_disc_fix M" "u \ unit_disc" "v \ unit_disc" "u \ v" hence 2: "?Mu \ ?Mv" "?Mu \ unit_disc" "?Mv \ unit_disc" by auto assume 3: "?P (moebius_pt M u) (moebius_pt M v)" show "?P u v" proof safe fix u' v' assume 4: "u' \ unit_disc" "v' \ unit_disc" "poincare_distance u v = poincare_distance u' v'" hence "poincare_distance ?Mu ?Mv = poincare_distance u v" using 1 by simp then obtain M' where 5: "unit_disc_fix M'" "moebius_pt M' u' = ?Mu" "moebius_pt M' v' = ?Mv" using 2 3 4 by auto let ?M = "(-M) + M'" have "unit_disc_fix ?M \ moebius_pt ?M u' = u \ moebius_pt ?M v' = v" using 5 \unit_disc_fix M\ using unit_disc_fix_moebius_comp[of "-M" "M'"] using unit_disc_fix_moebius_inv[of M] by simp thus "\M. unit_disc_fix M \ moebius_pt M u' = u \ moebius_pt M v' = v" by blast qed qed then obtain M where "unit_disc_fix M \ moebius_pt M u' = u \ moebius_pt M v' = v" using assms \u \ v\ by blast hence "unit_disc_fix (-M) \ moebius_pt (-M) u = u' \ moebius_pt (-M) v = v'" using unit_disc_fix_moebius_inv[of M] by auto thus ?thesis by blast qed lemma unique_midpoint_x_axis: assumes x: "is_real x" "-1 < Re x" "Re x < 1" and y: "is_real y" "-1 < Re y" "Re y < 1" and "x \ y" shows "\! z. -1 < Re z \ Re z < 1 \ is_real z \ poincare_distance (of_complex z) (of_complex x) = poincare_distance (of_complex z) (of_complex y)" (is "\! z. ?R z (of_complex x) (of_complex y)") proof- let ?x = "of_complex x" and ?y = "of_complex y" let ?P = "\ x y. \! z. ?R z x y" have "\ x. -1 < Re x \ Re x < 1 \ is_real x \ of_complex x \ ?y \ ?P (of_complex x) ?y" (is "?Q (of_complex y)") proof (rule wlog_real_zero) show "?y \ unit_disc" using y by (simp add: cmod_eq_Re) next show "is_real (to_complex ?y)" using y by simp next show "?Q 0\<^sub>h" proof (rule allI, rule impI, (erule conjE)+) fix x assume x: "-1 < Re x" "Re x < 1" "is_real x" let ?x = "of_complex x" assume "?x \ 0\<^sub>h" hence "x \ 0" by auto hence "Re x \ 0" using x using complex_neq_0 by auto have *: "\ a. -1 < a \ a < 1 \ (poincare_distance (of_complex (cor a)) ?x = poincare_distance (of_complex (cor a)) 0\<^sub>h \ (Re x) * a * a - 2 * a + Re x = 0)" proof (rule allI, rule impI) fix a :: real assume "-1 < a \ a < 1" hence "of_complex (cor a) \ unit_disc" by auto moreover have "(a - Re x)\<^sup>2 / ((1 - a\<^sup>2) * (1 - (Re x)\<^sup>2)) = a\<^sup>2 / (1 - a\<^sup>2) \ (Re x) * a * a - 2 * a + Re x = 0" (is "?lhs \ ?rhs") proof- have "1 - a\<^sup>2 \ 0" using \-1 < a \ a < 1\ by (metis cancel_comm_monoid_add_class.diff_cancel diff_eq_diff_less less_numeral_extra(4) power2_eq_1_iff right_minus_eq) hence "?lhs \ (a - Re x)\<^sup>2 / (1 - (Re x)\<^sup>2) = a\<^sup>2" by (smt divide_cancel_right divide_divide_eq_left mult.commute) also have "... \ (a - Re x)\<^sup>2 = a\<^sup>2 * (1 - (Re x)\<^sup>2)" proof- have "1 - (Re x)\<^sup>2 \ 0" using x by (smt power2_eq_1_iff) thus ?thesis by (simp add: divide_eq_eq) qed also have "... \ a\<^sup>2 * (Re x)\<^sup>2 - 2*a*Re x + (Re x)\<^sup>2 = 0" by (simp add: power2_diff field_simps) also have "... \ Re x * (a\<^sup>2 * Re x - 2 * a + Re x) = 0" by (simp add: power2_eq_square field_simps) also have "... \ ?rhs" using \Re x \ 0\ by (simp add: mult.commute mult.left_commute power2_eq_square) finally show ?thesis . qed moreover have "arcosh (1 + 2 * ((a - Re x)\<^sup>2 / ((1 - a\<^sup>2) * (1 - (Re x)\<^sup>2)))) = arcosh (1 + 2 * a\<^sup>2 / (1 - a\<^sup>2)) \ ?lhs" using \-1 < a \ a < 1\ x mult_left_cancel[of "2::real" "(a - Re x)\<^sup>2 / ((1 - a\<^sup>2) * (1 - (Re x)\<^sup>2))" "a\<^sup>2 / (1 - a\<^sup>2)"] by (subst arcosh_eq_iff, simp_all add: square_le_1) ultimately show "poincare_distance (of_complex (cor a)) (of_complex x) = poincare_distance (of_complex (cor a)) 0\<^sub>h \ (Re x) * a * a - 2 * a + Re x = 0" using x by (auto simp add: poincare_distance_formula cmod_eq_Re) qed show "?P ?x 0\<^sub>h" proof let ?a = "(1 - sqrt(1 - (Re x)\<^sup>2)) / (Re x)" let ?b = "(1 + sqrt(1 - (Re x)\<^sup>2)) / (Re x)" have "is_real ?a" by simp moreover have "1 - (Re x)\<^sup>2 > 0" using x by (smt power2_eq_1_iff square_le_1) have "\?a\ < 1" proof (cases "Re x > 0") case True have "(1 - Re x)\<^sup>2 < 1 - (Re x)\<^sup>2" using \Re x > 0\ x by (simp add: power2_eq_square field_simps) hence "1 - Re x < sqrt (1 - (Re x)\<^sup>2)" using real_less_rsqrt by fastforce thus ?thesis using \1 - (Re x)\<^sup>2 > 0\ \Re x > 0\ by simp next case False hence "Re x < 0" using \Re x \ 0\ by simp have "1 + Re x > 0" using \Re x > -1\ by simp hence "2*Re x + 2*Re x*Re x < 0" using \Re x < 0\ by (metis comm_semiring_class.distrib mult.commute mult_2_right mult_less_0_iff one_add_one zero_less_double_add_iff_zero_less_single_add) hence "(1 + Re x)\<^sup>2 < 1 - (Re x)\<^sup>2" by (simp add: power2_eq_square field_simps) hence "1 + Re x < sqrt (1 - (Re x)\<^sup>2)" using \1 - (Re x)\<^sup>2 > 0\ using real_less_rsqrt by blast thus ?thesis using \Re x < 0\ by (simp add: field_simps) qed hence "-1 < ?a" "?a < 1" by linarith+ moreover have "(Re x) * ?a * ?a - 2 * ?a + Re x = 0" using \Re x \ 0\ \1 - (Re x)\<^sup>2 > 0\ by (simp add: field_simps power2_eq_square) ultimately show "-1 < Re (cor ?a) \ Re (cor ?a) < 1 \ is_real ?a \ poincare_distance (of_complex ?a) (of_complex x) = poincare_distance (of_complex ?a) 0\<^sub>h" using * by auto fix z assume **: "- 1 < Re z \ Re z < 1 \ is_real z \ poincare_distance (of_complex z) (of_complex x) = poincare_distance (of_complex z) 0\<^sub>h" hence "Re x * Re z * Re z - 2 * Re z + Re x = 0" using *[rule_format, of "Re z"] x by auto moreover have "sqrt (4 - 4 * Re x * Re x) = 2 * sqrt(1 - Re x * Re x)" proof- have "sqrt (4 - 4 * Re x * Re x) = sqrt(4 * (1 - Re x * Re x))" by simp thus ?thesis by (simp only: real_sqrt_mult, simp) qed moreover have "(2 - 2 * sqrt (1 - Re x * Re x)) / (2 * Re x) = ?a" proof- have "(2 - 2 * sqrt (1 - Re x * Re x)) / (2 * Re x) = (2 * (1 - sqrt (1 - Re x * Re x))) / (2 * Re x)" by simp thus ?thesis by (subst (asm) mult_divide_mult_cancel_left) (auto simp add: power2_eq_square) qed moreover have "(2 + 2 * sqrt (1 - Re x * Re x)) / (2 * Re x) = ?b" proof- have "(2 + 2 * sqrt (1 - Re x * Re x)) / (2 * Re x) = (2 * (1 + sqrt (1 - Re x * Re x))) / (2 * Re x)" by simp thus ?thesis by (subst (asm) mult_divide_mult_cancel_left) (auto simp add: power2_eq_square) qed ultimately have "Re z = ?a \ Re z = ?b" using discriminant_nonneg[of "Re x" "-2" "Re x" "Re z"] discrim_def[of "Re x" "-2" "Re x"] using \Re x \ 0\ \-1 < Re x\ \Re x < 1\ \1 - (Re x)\<^sup>2 > 0\ by (auto simp add:power2_eq_square) have "\?b\ > 1" proof (cases "Re x > 0") case True have "(Re x - 1)\<^sup>2 < 1 - (Re x)\<^sup>2" using \Re x > 0\ x by (simp add: power2_eq_square field_simps) hence "Re x - 1 < sqrt (1 - (Re x)\<^sup>2)" using real_less_rsqrt by simp thus ?thesis using \1 - (Re x)\<^sup>2 > 0\ \Re x > 0\ by simp next case False hence "Re x < 0" using \Re x \ 0\ by simp have "1 + Re x > 0" using \Re x > -1\ by simp hence "2*Re x + 2*Re x*Re x < 0" using \Re x < 0\ by (metis comm_semiring_class.distrib mult.commute mult_2_right mult_less_0_iff one_add_one zero_less_double_add_iff_zero_less_single_add) hence "1 - (Re x)\<^sup>2 > (- 1 - (Re x))\<^sup>2" by (simp add: field_simps power2_eq_square) hence "sqrt (1 - (Re x)\<^sup>2) > -1 - Re x" using real_less_rsqrt by simp thus ?thesis using \Re x < 0\ by (simp add: field_simps) qed hence "?b < -1 \ ?b > 1" by auto hence "Re z = ?a" using \Re z = ?a \ Re z = ?b\ ** by auto thus "z = ?a" using ** complex_of_real_Re by fastforce qed qed next fix a u let ?M = "moebius_pt (blaschke a)" let ?Mu = "?M u" assume "u \ unit_disc" "is_real a" "cmod a < 1" assume *: "?Q ?Mu" show "?Q u" proof (rule allI, rule impI, (erule conjE)+) fix x assume x: "-1 < Re x" "Re x < 1" "is_real x" "of_complex x \ u" let ?Mx = "?M (of_complex x)" have "of_complex x \ unit_disc" using x cmod_eq_Re by auto hence "?Mx \ unit_disc" using \is_real a\ \cmod a < 1\ blaschke_unit_disc_fix[of a] using unit_disc_fix_discI by blast hence "?Mx \ \\<^sub>h" by auto moreover have "of_complex x \ circline_set x_axis" using x by auto hence "?Mx \ circline_set x_axis" using blaschke_real_preserve_x_axis[OF \is_real a\ \cmod a < 1\, of "of_complex x"] by auto hence "-1 < Re (to_complex ?Mx) \ Re (to_complex ?Mx) < 1 \ is_real (to_complex ?Mx)" using \?Mx \ \\<^sub>h\ \?Mx \ unit_disc\ unfolding circline_set_x_axis by (auto simp add: cmod_eq_Re) moreover have "?Mx \ ?Mu" using \of_complex x \ u\ by simp ultimately have "?P ?Mx ?Mu" using *[rule_format, of "to_complex ?Mx"] \?Mx \ \\<^sub>h\ by simp then obtain Mz where "?R Mz ?Mx ?Mu" by blast have "of_complex Mz \ unit_disc" "of_complex Mz \ circline_set x_axis" using \?R Mz ?Mx ?Mu\ using cmod_eq_Re by auto let ?Minv = "- (blaschke a)" let ?z = "moebius_pt ?Minv (of_complex Mz)" have "?z \ unit_disc" using \of_complex Mz \ unit_disc\ \cmod a < 1\ by auto moreover have "?z \ circline_set x_axis" using \of_complex Mz \ circline_set x_axis\ using blaschke_real_preserve_x_axis \is_real a\ \cmod a < 1\ by fastforce ultimately have z1: "-1 < Re (to_complex ?z)" "Re (to_complex ?z) < 1" "is_real (to_complex ?z)" using inf_or_of_complex[of "?z"] unfolding circline_set_x_axis by (auto simp add: cmod_eq_Re) have z2: "poincare_distance ?z (of_complex x) = poincare_distance ?z u" using \?R Mz ?Mx ?Mu\ \cmod a < 1\ \?z \ unit_disc\ \of_complex x \ unit_disc\ \u \ unit_disc\ by (metis blaschke_preserve_distance_formula blaschke_unit_disc_fix moebius_pt_comp_inv_right poincare_distance_formula uminus_moebius_def unit_disc_fix_discI unit_disc_iff_cmod_lt_1) show "?P (of_complex x) u" proof show "?R (to_complex ?z) (of_complex x) u" using z1 z2 \?z \ unit_disc\ inf_or_of_complex[of ?z] by auto next fix z' assume "?R z' (of_complex x) u" hence "of_complex z' \ unit_disc" "of_complex z' \ circline_set x_axis" by (auto simp add: cmod_eq_Re) let ?Mz' = "?M (of_complex z')" have "?Mz' \ unit_disc" "?Mz' \ circline_set x_axis" using \of_complex z' \ unit_disc\ \of_complex z' \ circline_set x_axis\ \cmod a < 1\ \is_real a\ using blaschke_unit_disc_fix unit_disc_fix_discI using blaschke_real_preserve_x_axis circline_set_x_axis by blast+ hence "-1 < Re (to_complex ?Mz')" "Re (to_complex ?Mz') < 1" "is_real (to_complex ?Mz')" unfolding circline_set_x_axis by (auto simp add: cmod_eq_Re) moreover have "poincare_distance ?Mz' ?Mx = poincare_distance ?Mz' ?Mu" using \?R z' (of_complex x) u\ using \cmod a < 1\ \of_complex x \ unit_disc\ \of_complex z' \ unit_disc\ \u \ unit_disc\ by auto ultimately have "?R (to_complex ?Mz') ?Mx ?Mu" using \?Mz' \ unit_disc\ inf_or_of_complex[of ?Mz'] by auto hence "?Mz' = of_complex Mz" using \?P ?Mx ?Mu\ \?R Mz ?Mx ?Mu\ by (metis \moebius_pt (blaschke a) (of_complex z') \ unit_disc\ \of_complex Mz \ unit_disc\ to_complex_of_complex unit_disc_to_complex_inj) thus "z' = to_complex ?z" - using moebius_pt_invert by auto + by (simp add: moebius_pt_invert) qed qed qed thus ?thesis using assms by (metis to_complex_of_complex) qed (* ------------------------------------------------------------------ *) subsection\Triangle inequality\ (* ------------------------------------------------------------------ *) lemma poincare_distance_formula_zero_sum: assumes "u \ unit_disc" and "v \ unit_disc" shows "poincare_distance u 0\<^sub>h + poincare_distance 0\<^sub>h v = (let u' = cmod (to_complex u); v' = cmod (to_complex v) in arcosh (((1 + u'\<^sup>2) * (1 + v'\<^sup>2) + 4 * u' * v') / ((1 - u'\<^sup>2) * (1 - v'\<^sup>2))))" proof- obtain u' v' where uv: "u' = to_complex u" "v' = to_complex v" by auto have uv': "u = of_complex u'" "v = of_complex v'" using uv assms inf_or_of_complex[of u] inf_or_of_complex[of v] by auto let ?u' = "cmod u'" and ?v' = "cmod v'" have disc: "?u'\<^sup>2 < 1" "?v'\<^sup>2 < 1" using unit_disc_cmod_square_lt_1[OF \u \ unit_disc\] using unit_disc_cmod_square_lt_1[OF \v \ unit_disc\] uv by auto thm arcosh_add have "arcosh (1 + 2 * ?u'\<^sup>2 / (1 - ?u'\<^sup>2)) + arcosh (1 + 2 * ?v'\<^sup>2 / (1 - ?v'\<^sup>2)) = arcosh (((1 + ?u'\<^sup>2) * (1 + ?v'\<^sup>2) + 4 * ?u' * ?v') / ((1 - ?u'\<^sup>2) * (1 - ?v'\<^sup>2)))" (is "arcosh ?ll + arcosh ?rr = arcosh ?r") proof (subst arcosh_add) show "?ll \ 1" "?rr \ 1" using disc by auto next show "arcosh ((1 + 2 * ?u'\<^sup>2 / (1 - ?u'\<^sup>2)) * (1 + 2 * ?v'\<^sup>2 / (1 - ?v'\<^sup>2)) + sqrt (((1 + 2 * ?u'\<^sup>2 / (1 - ?u'\<^sup>2))\<^sup>2 - 1) * ((1 + 2 * ?v'\<^sup>2 / (1 - ?v'\<^sup>2))\<^sup>2 - 1))) = arcosh ?r" (is "arcosh ?l = _") proof- have "1 + 2 * ?u'\<^sup>2 / (1 - ?u'\<^sup>2) = (1 + ?u'\<^sup>2) / (1 - ?u'\<^sup>2)" using disc by (subst add_divide_eq_iff, simp_all) moreover have "1 + 2 * ?v'\<^sup>2 / (1 - ?v'\<^sup>2) = (1 + ?v'\<^sup>2) / (1 - ?v'\<^sup>2)" using disc by (subst add_divide_eq_iff, simp_all) moreover have "sqrt (((1 + 2 * ?u'\<^sup>2 / (1 - ?u'\<^sup>2))\<^sup>2 - 1) * ((1 + 2 * ?v'\<^sup>2 / (1 - ?v'\<^sup>2))\<^sup>2 - 1)) = (4 * ?u' * ?v') / ((1 - ?u'\<^sup>2) * (1 - ?v'\<^sup>2))" (is "sqrt ?s = ?t") proof- have "?s = ?t\<^sup>2" using disc apply (subst add_divide_eq_iff, simp)+ apply (subst power_divide)+ apply simp apply (subst divide_diff_eq_iff, simp)+ apply (simp add: power2_eq_square field_simps) done thus ?thesis using disc by simp qed ultimately have "?l = ?r" using disc by simp (subst add_divide_distrib, simp) thus ?thesis by simp qed qed thus ?thesis using uv' assms using poincare_distance_formula by (simp add: Let_def) qed lemma poincare_distance_triangle_inequality: assumes "u \ unit_disc" and "v \ unit_disc" and "w \ unit_disc" shows "poincare_distance u v + poincare_distance v w \ poincare_distance u w" (is "?P' u v w") proof- have "\ w. w \ unit_disc \ ?P' u v w" (is "?P v u") proof (rule wlog_x_axis[where P="?P"]) fix x assume "is_real x" "0 \ Re x" "Re x < 1" hence "of_complex x \ unit_disc" by (simp add: cmod_eq_Re) show "?P 0\<^sub>h (of_complex x)" proof safe fix w assume "w \ unit_disc" then obtain w' where w: "w = of_complex w'" using inf_or_of_complex[of w] by auto let ?x = "cmod x" and ?w = "cmod w'" and ?xw = "cmod (x - w')" have disc: "?x\<^sup>2 < 1" "?w\<^sup>2 < 1" using unit_disc_cmod_square_lt_1[OF \of_complex x \ unit_disc\] using unit_disc_cmod_square_lt_1[OF \w \ unit_disc\] w by auto have "poincare_distance (of_complex x) 0\<^sub>h + poincare_distance 0\<^sub>h w = arcosh (((1 + ?x\<^sup>2) * (1 + ?w\<^sup>2) + 4 * ?x * ?w) / ((1 - ?x\<^sup>2) * (1 - ?w\<^sup>2)))" (is "_ = arcosh ?r1") using poincare_distance_formula_zero_sum[OF \of_complex x \ unit_disc\ \w \ unit_disc\] w by (simp add: Let_def) moreover have "poincare_distance (of_complex x) (of_complex w') = arcosh (((1 - ?x\<^sup>2) * (1 - ?w\<^sup>2) + 2 * ?xw\<^sup>2) / ((1 - ?x\<^sup>2) * (1 - ?w\<^sup>2)))" (is "_ = arcosh ?r2") using disc using poincare_distance_formula[OF \of_complex x \ unit_disc\ \w \ unit_disc\] w by (subst add_divide_distrib) simp moreover have *: "(1 - ?x\<^sup>2) * (1 - ?w\<^sup>2) + 2 * ?xw\<^sup>2 \ (1 + ?x\<^sup>2) * (1 + ?w\<^sup>2) + 4 * ?x * ?w" proof- have "(cmod (x - w'))\<^sup>2 \ (cmod x + cmod w')\<^sup>2" using norm_triangle_ineq4[of x w'] by (simp add: power_mono) thus ?thesis by (simp add: field_simps power2_sum) qed have "arcosh ?r1 \ arcosh ?r2" proof (subst arcosh_mono) show "?r1 \ 1" using disc by (smt "*" le_divide_eq_1_pos mult_pos_pos zero_le_power2) next show "?r2 \ 1" using disc by simp next show "?r1 \ ?r2" using disc using * by (subst divide_right_mono, simp_all) qed ultimately show "poincare_distance (of_complex x) w \ poincare_distance (of_complex x) 0\<^sub>h + poincare_distance 0\<^sub>h w" using \of_complex x \ unit_disc\ \w \ unit_disc\ w using poincare_distance_formula by simp qed next show "v \ unit_disc" "u \ unit_disc" by fact+ next fix M u v assume *: "unit_disc_fix M" "u \ unit_disc" "v \ unit_disc" assume **: "?P (moebius_pt M u) (moebius_pt M v)" show "?P u v" proof safe fix w assume "w \ unit_disc" thus "?P' v u w" using * **[rule_format, of "moebius_pt M w"] by simp qed qed thus ?thesis using assms by auto qed end \ No newline at end of file diff --git a/thys/QHLProver/Complex_Matrix.thy b/thys/QHLProver/Complex_Matrix.thy --- a/thys/QHLProver/Complex_Matrix.thy +++ b/thys/QHLProver/Complex_Matrix.thy @@ -1,2346 +1,2346 @@ section \Complex matrices\ theory Complex_Matrix imports "Jordan_Normal_Form.Matrix" "Jordan_Normal_Form.Conjugate" "Jordan_Normal_Form.Jordan_Normal_Form_Existence" begin subsection \Trace of a matrix\ definition trace :: "'a::ring mat \ 'a" where "trace A = (\ i \ {0 ..< dim_row A}. A $$ (i,i))" lemma trace_zero [simp]: "trace (0\<^sub>m n n) = 0" by (simp add: trace_def) lemma trace_id [simp]: "trace (1\<^sub>m n) = n" by (simp add: trace_def) lemma trace_comm: fixes A B :: "'a::comm_ring mat" assumes A: "A \ carrier_mat n n" and B: "B \ carrier_mat n n" shows "trace (A * B) = trace (B * A)" proof (simp add: trace_def) have "(\i = 0..i = 0..j = 0.. = (\j = 0..i = 0.. = (\j = 0.. row B j)" by (metis (no_types, lifting) A B atLeastLessThan_iff carrier_matD index_col index_row scalar_prod_def sum.cong) also have "\ = (\j = 0.. col A j)" apply (rule sum.cong) apply auto apply (subst comm_scalar_prod[where n=n]) apply auto using assms by auto also have "\ = (\j = 0..i = 0..i = 0.. carrier_mat n n" and B: "B \ carrier_mat n n" shows "trace (A + B) = trace A + trace B" (is "?lhs = ?rhs") proof - have "?lhs = (\i=0.. = (\i=0..i=0..i=0..i=0..i=0..i=0.. carrier_mat n n" and B: "B \ carrier_mat n n" shows "trace (A - B) = trace A - trace B" (is "?lhs = ?rhs") proof - have "?lhs = (\i=0.. = (\i=0..i=0..i=0..i=0..i=0..i=0.. carrier_mat n n" shows "trace (c \\<^sub>m A) = c * trace A" proof - have "trace (c \\<^sub>m A) = (\i = 0.. = c * (\i = 0.. = c * trace A" unfolding trace_def by auto ultimately show ?thesis by auto qed subsection \Conjugate of a vector\ lemma conjugate_scalar_prod: fixes v w :: "'a::conjugatable_ring vec" assumes "dim_vec v = dim_vec w" shows "conjugate (v \ w) = conjugate v \ conjugate w" using assms by (simp add: scalar_prod_def sum_conjugate conjugate_dist_mul) subsection \Inner product\ abbreviation inner_prod :: "'a vec \ 'a vec \ 'a :: conjugatable_ring" where "inner_prod v w \ w \c v" lemma conjugate_scalar_prod_Im [simp]: "Im (v \c v) = 0" by (simp add: scalar_prod_def conjugate_vec_def sum.neutral) lemma conjugate_scalar_prod_Re [simp]: "Re (v \c v) \ 0" by (simp add: scalar_prod_def conjugate_vec_def sum_nonneg) lemma self_cscalar_prod_geq_0: fixes v :: "'a::conjugatable_ordered_field vec" shows "v \c v \ 0" by (auto simp add: scalar_prod_def, rule sum_nonneg, rule conjugate_square_positive) lemma inner_prod_distrib_left: fixes u v w :: "('a::conjugatable_field) vec" assumes dimu: "u \ carrier_vec n" and dimv:"v \ carrier_vec n" and dimw: "w \ carrier_vec n" shows "inner_prod (v + w) u = inner_prod v u + inner_prod w u" (is "?lhs = ?rhs") proof - have dimcv: "conjugate v \ carrier_vec n" and dimcw: "conjugate w \ carrier_vec n" using assms by auto have dimvw: "conjugate (v + w) \ carrier_vec n" using assms by auto have "u \ (conjugate (v + w)) = u \ conjugate v + u \ conjugate w" using dimv dimw dimu dimcv dimcw by (metis conjugate_add_vec scalar_prod_add_distrib) then show ?thesis by auto qed lemma inner_prod_distrib_right: fixes u v w :: "('a::conjugatable_field) vec" assumes dimu: "u \ carrier_vec n" and dimv:"v \ carrier_vec n" and dimw: "w \ carrier_vec n" shows "inner_prod u (v + w) = inner_prod u v + inner_prod u w" (is "?lhs = ?rhs") proof - have dimvw: "v + w \ carrier_vec n" using assms by auto have dimcu: "conjugate u \ carrier_vec n" using assms by auto have "(v + w) \ (conjugate u) = v \ conjugate u + w \ conjugate u" apply (simp add: comm_scalar_prod[OF dimvw dimcu]) apply (simp add: scalar_prod_add_distrib[OF dimcu dimv dimw]) apply (insert dimv dimw dimcu, simp add: comm_scalar_prod[of _ n]) done then show ?thesis by auto qed lemma inner_prod_minus_distrib_right: fixes u v w :: "('a::conjugatable_field) vec" assumes dimu: "u \ carrier_vec n" and dimv:"v \ carrier_vec n" and dimw: "w \ carrier_vec n" shows "inner_prod u (v - w) = inner_prod u v - inner_prod u w" (is "?lhs = ?rhs") proof - have dimvw: "v - w \ carrier_vec n" using assms by auto have dimcu: "conjugate u \ carrier_vec n" using assms by auto have "(v - w) \ (conjugate u) = v \ conjugate u - w \ conjugate u" apply (simp add: comm_scalar_prod[OF dimvw dimcu]) apply (simp add: scalar_prod_minus_distrib[OF dimcu dimv dimw]) apply (insert dimv dimw dimcu, simp add: comm_scalar_prod[of _ n]) done then show ?thesis by auto qed lemma inner_prod_smult_right: fixes u v :: "complex vec" assumes dimu: "u \ carrier_vec n" and dimv:"v \ carrier_vec n" shows "inner_prod (a \\<^sub>v u) v = conjugate a * inner_prod u v" (is "?lhs = ?rhs") using assms apply (simp add: scalar_prod_def conjugate_dist_mul) apply (subst sum_distrib_left) by (rule sum.cong, auto) lemma inner_prod_smult_left: fixes u v :: "complex vec" assumes dimu: "u \ carrier_vec n" and dimv: "v \ carrier_vec n" shows "inner_prod u (a \\<^sub>v v) = a * inner_prod u v" (is "?lhs = ?rhs") using assms apply (simp add: scalar_prod_def) apply (subst sum_distrib_left) by (rule sum.cong, auto) lemma inner_prod_smult_left_right: fixes u v :: "complex vec" assumes dimu: "u \ carrier_vec n" and dimv: "v \ carrier_vec n" shows "inner_prod (a \\<^sub>v u) (b \\<^sub>v v) = conjugate a * b * inner_prod u v" (is "?lhs = ?rhs") using assms apply (simp add: scalar_prod_def) apply (subst sum_distrib_left) by (rule sum.cong, auto) lemma inner_prod_swap: fixes x y :: "complex vec" assumes "y \ carrier_vec n" and "x \ carrier_vec n" shows "inner_prod y x = conjugate (inner_prod x y)" apply (simp add: scalar_prod_def) apply (rule sum.cong) using assms by auto text \Cauchy-Schwarz theorem for complex vectors. This is analogous to aux\_Cauchy and Cauchy\_Schwarz\_ineq in Generalizations2.thy in QR\_Decomposition. Consider merging and moving to Isabelle library.\ lemma aux_Cauchy: fixes x y :: "complex vec" assumes "x \ carrier_vec n" and "y \ carrier_vec n" shows "0 \ inner_prod x x + a * (inner_prod x y) + (cnj a) * ((cnj (inner_prod x y)) + a * (inner_prod y y))" proof - have "(inner_prod (x+ a \\<^sub>v y) (x+a \\<^sub>v y)) = (inner_prod (x+a \\<^sub>v y) x) + (inner_prod (x+a \\<^sub>v y) (a \\<^sub>v y))" apply (subst inner_prod_distrib_right) using assms by auto also have "\ = inner_prod x x + (a) * (inner_prod x y) + cnj a * ((cnj (inner_prod x y)) + (a) * (inner_prod y y))" apply (subst (1 2) inner_prod_distrib_left[of _ n]) apply (auto simp add: assms) apply (subst (1 2) inner_prod_smult_right[of _ n]) apply (auto simp add: assms) apply (subst inner_prod_smult_left[of _ n]) apply (auto simp add: assms) apply (subst inner_prod_swap[of y n x]) apply (auto simp add: assms) unfolding distrib_left by auto finally show ?thesis by (metis self_cscalar_prod_geq_0) qed lemma Cauchy_Schwarz_complex_vec: fixes x y :: "complex vec" assumes "x \ carrier_vec n" and "y \ carrier_vec n" shows "inner_prod x y * inner_prod y x \ inner_prod x x * inner_prod y y" proof - define cnj_a where "cnj_a = - (inner_prod x y)/ cnj (inner_prod y y)" define a where "a = cnj (cnj_a)" have cnj_rw: "(cnj a) = cnj_a" unfolding a_def by (simp) have rw_0: "cnj (inner_prod x y) + a * (inner_prod y y) = 0" unfolding a_def cnj_a_def using assms(1) assms(2) conjugate_square_eq_0_vec by fastforce have "0 \ (inner_prod x x + a * (inner_prod x y) + (cnj a) * ((cnj (inner_prod x y)) + a * (inner_prod y y)))" using aux_Cauchy assms by auto also have "\ = (inner_prod x x + a * (inner_prod x y))" unfolding rw_0 by auto also have "\ = (inner_prod x x - (inner_prod x y) * cnj (inner_prod x y) / (inner_prod y y))" unfolding a_def cnj_a_def by simp finally have " 0 \ (inner_prod x x - (inner_prod x y) * cnj (inner_prod x y) / (inner_prod y y)) " . hence "0 \ (inner_prod x x - (inner_prod x y) * cnj (inner_prod x y) / (inner_prod y y)) * (inner_prod y y)" by auto also have "\ = ((inner_prod x x)*(inner_prod y y) - (inner_prod x y) * cnj (inner_prod x y))" by (smt add.inverse_neutral add_diff_cancel diff_0 diff_divide_eq_iff divide_cancel_right mult_eq_0_iff nonzero_mult_div_cancel_right rw_0) finally have "(inner_prod x y) * cnj (inner_prod x y) \ (inner_prod x x)*(inner_prod y y)" by auto then show ?thesis apply (subst inner_prod_swap[of y n x]) by (auto simp add: assms) qed subsection \Hermitian adjoint of a matrix\ abbreviation adjoint where "adjoint \ mat_adjoint" lemma adjoint_dim_row [simp]: "dim_row (adjoint A) = dim_col A" by (simp add: mat_adjoint_def) lemma adjoint_dim_col [simp]: "dim_col (adjoint A) = dim_row A" by (simp add: mat_adjoint_def) lemma adjoint_dim: "A \ carrier_mat n n \ adjoint A \ carrier_mat n n" using adjoint_dim_col adjoint_dim_row by blast lemma adjoint_def: "adjoint A = mat (dim_col A) (dim_row A) (\(i,j). conjugate (A $$ (j,i)))" unfolding mat_adjoint_def mat_of_rows_def by auto lemma adjoint_eval: assumes "i < dim_col A" "j < dim_row A" shows "(adjoint A) $$ (i,j) = conjugate (A $$ (j,i))" using assms by (simp add: adjoint_def) lemma adjoint_row: assumes "i < dim_col A" shows "row (adjoint A) i = conjugate (col A i)" apply (rule eq_vecI) using assms by (auto simp add: adjoint_eval) lemma adjoint_col: assumes "i < dim_row A" shows "col (adjoint A) i = conjugate (row A i)" apply (rule eq_vecI) using assms by (auto simp add: adjoint_eval) text \The identity = \ lemma adjoint_def_alter: fixes v w :: "'a::conjugatable_field vec" and A :: "'a::conjugatable_field mat" assumes dims: "v \ carrier_vec n" "w \ carrier_vec m" "A \ carrier_mat n m" shows "inner_prod v (A *\<^sub>v w) = inner_prod (adjoint A *\<^sub>v v) w" (is "?lhs = ?rhs") proof - from dims have "?lhs = (\i=0..j=0..i=0..j=0..m n) = (1\<^sub>m n::complex mat)" apply (rule eq_matI) by (auto simp add: adjoint_eval) lemma adjoint_scale: fixes A :: "'a::conjugatable_field mat" shows "adjoint (a \\<^sub>m A) = (conjugate a) \\<^sub>m adjoint A" apply (rule eq_matI) using conjugatable_ring_class.conjugate_dist_mul by (auto simp add: adjoint_eval) lemma adjoint_add: fixes A B :: "'a::conjugatable_field mat" assumes "A \ carrier_mat n m" "B \ carrier_mat n m" shows "adjoint (A + B) = adjoint A + adjoint B" apply (rule eq_matI) using assms conjugatable_ring_class.conjugate_dist_add by( auto simp add: adjoint_eval) lemma adjoint_minus: fixes A B :: "'a::conjugatable_field mat" assumes "A \ carrier_mat n m" "B \ carrier_mat n m" shows "adjoint (A - B) = adjoint A - adjoint B" apply (rule eq_matI) using assms apply(auto simp add: adjoint_eval) by (metis add_uminus_conv_diff conjugate_dist_add conjugate_neg) lemma adjoint_mult: fixes A B :: "'a::conjugatable_field mat" assumes "A \ carrier_mat n m" "B \ carrier_mat m l" shows "adjoint (A * B) = adjoint B * adjoint A" proof (rule eq_matI, auto simp add: adjoint_eval adjoint_row adjoint_col) fix i j assume "i < dim_col B" "j < dim_row A" show "conjugate (row A j \ col B i) = conjugate (col B i) \ conjugate (row A j)" using assms apply (simp add: conjugate_scalar_prod) apply (subst comm_scalar_prod[where n="dim_row B"]) by (auto simp add: carrier_vecI) qed lemma adjoint_adjoint: fixes A :: "'a::conjugatable_field mat" shows "adjoint (adjoint A) = A" by (rule eq_matI, auto simp add: adjoint_eval) lemma trace_adjoint_positive: fixes A :: "complex mat" shows "trace (A * adjoint A) \ 0" apply (auto simp add: trace_def adjoint_col) apply (rule sum_nonneg) by auto subsection \Algebraic manipulations on matrices\ lemma right_add_zero_mat[simp]: "(A :: 'a :: monoid_add mat) \ carrier_mat nr nc \ A + 0\<^sub>m nr nc = A" by (intro eq_matI, auto) lemma add_carrier_mat': "A \ carrier_mat nr nc \ B \ carrier_mat nr nc \ A + B \ carrier_mat nr nc" by simp lemma minus_carrier_mat': "A \ carrier_mat nr nc \ B \ carrier_mat nr nc \ A - B \ carrier_mat nr nc" by auto lemma swap_plus_mat: fixes A B C :: "'a::semiring_1 mat" assumes "A \ carrier_mat n n" "B \ carrier_mat n n" "C \ carrier_mat n n" shows "A + B + C = A + C + B" by (metis assms assoc_add_mat comm_add_mat) lemma uminus_mat: fixes A :: "complex mat" assumes "A \ carrier_mat n n" shows "-A = (-1) \\<^sub>m A" by auto ML_file "mat_alg.ML" method_setup mat_assoc = {* mat_assoc_method *} "Normalization of expressions on matrices" lemma mat_assoc_test: fixes A B C D :: "complex mat" assumes "A \ carrier_mat n n" "B \ carrier_mat n n" "C \ carrier_mat n n" "D \ carrier_mat n n" shows "(A * B) * (C * D) = A * B * C * D" "adjoint (A * adjoint B) * C = B * (adjoint A * C)" "A * 1\<^sub>m n * 1\<^sub>m n * B * 1\<^sub>m n = A * B" "(A - B) + (B - C) = A + (-B) + B + (-C)" "A + (B - C) = A + B - C" "A - (B + C + D) = A - B - C - D" "(A + B) * (B + C) = A * B + B * B + A * C + B * C" "A - B = A + (-1) \\<^sub>m B" "A * (B - C) * D = A * B * D - A * C * D" "trace (A * B * C) = trace (B * C * A)" "trace (A * B * C * D) = trace (C * D * A * B)" "trace (A + B * C) = trace A + trace (C * B)" "A + B = B + A" "A + B + C = C + B + A" "A + B + (C + D) = A + C + (B + D)" using assms by (mat_assoc n)+ subsection \Hermitian matrices\ text \A Hermitian matrix is a matrix that is equal to its Hermitian adjoint.\ definition hermitian :: "'a::conjugatable_field mat \ bool" where "hermitian A \ (adjoint A = A)" lemma hermitian_one: shows "hermitian ((1\<^sub>m n)::('a::conjugatable_field mat))" unfolding hermitian_def proof- have "conjugate (1::'a) = 1" apply (subst mult_1_right[symmetric, of "conjugate 1"]) apply (subst conjugate_id[symmetric, of "conjugate 1 * 1"]) apply (subst conjugate_dist_mul) apply auto done then show "adjoint ((1\<^sub>m n)::('a::conjugatable_field mat)) = (1\<^sub>m n)" by (auto simp add: adjoint_eval) qed subsection \Inverse matrices\ lemma inverts_mat_symm: fixes A B :: "'a::field mat" assumes dim: "A \ carrier_mat n n" "B \ carrier_mat n n" and AB: "inverts_mat A B" shows "inverts_mat B A" proof - have "A * B = 1\<^sub>m n" using dim AB unfolding inverts_mat_def by auto with dim have "B * A = 1\<^sub>m n" by (rule mat_mult_left_right_inverse) then show "inverts_mat B A" using dim inverts_mat_def by auto qed lemma inverts_mat_unique: fixes A B C :: "'a::field mat" assumes dim: "A \ carrier_mat n n" "B \ carrier_mat n n" "C \ carrier_mat n n" and AB: "inverts_mat A B" and AC: "inverts_mat A C" shows "B = C" proof - have AB1: "A * B = 1\<^sub>m n" using AB dim unfolding inverts_mat_def by auto have "A * C = 1\<^sub>m n" using AC dim unfolding inverts_mat_def by auto then have CA1: "C * A = 1\<^sub>m n" using mat_mult_left_right_inverse[of A n C] dim by auto then have "C = C * 1\<^sub>m n" using dim by auto also have "\ = C * (A * B)" using AB1 by auto also have "\ = (C * A) * B" using dim by auto also have "\ = 1\<^sub>m n * B" using CA1 by auto also have "\ = B" using dim by auto finally show "B = C" .. qed subsection \Unitary matrices\ text \A unitary matrix is a matrix whose Hermitian adjoint is also its inverse.\ definition unitary :: "'a::conjugatable_field mat \ bool" where "unitary A \ A \ carrier_mat (dim_row A) (dim_row A) \ inverts_mat A (adjoint A)" lemma unitaryD2: assumes "A \ carrier_mat n n" shows "unitary A \ inverts_mat (adjoint A) A" using assms adjoint_dim inverts_mat_symm unitary_def by blast lemma unitary_simps [simp]: "A \ carrier_mat n n \ unitary A \ adjoint A * A = 1\<^sub>m n" "A \ carrier_mat n n \ unitary A \ A * adjoint A = 1\<^sub>m n" apply (metis adjoint_dim_row carrier_matD(2) inverts_mat_def unitaryD2) by (simp add: inverts_mat_def unitary_def) lemma unitary_adjoint [simp]: assumes "A \ carrier_mat n n" "unitary A" shows "unitary (adjoint A)" unfolding unitary_def using adjoint_dim[OF assms(1)] assms by (auto simp add: unitaryD2[OF assms] adjoint_adjoint) lemma unitary_one: shows "unitary ((1\<^sub>m n)::('a::conjugatable_field mat))" unfolding unitary_def proof - define I where I_def[simp]: "I \ ((1\<^sub>m n)::('a::conjugatable_field mat))" have dim: "I \ carrier_mat n n" by auto have "hermitian I" using hermitian_one by auto hence "adjoint I = I" using hermitian_def by auto with dim show "I \ carrier_mat (dim_row I) (dim_row I) \ inverts_mat I (adjoint I)" unfolding inverts_mat_def using dim by auto qed lemma unitary_zero: fixes A :: "'a::conjugatable_field mat" assumes "A \ carrier_mat 0 0" shows "unitary A" unfolding unitary_def inverts_mat_def Let_def using assms by auto lemma unitary_elim: assumes dims: "A \ carrier_mat n n" "B \ carrier_mat n n" "P \ carrier_mat n n" and uP: "unitary P" and eq: "P * A * adjoint P = P * B * adjoint P" shows "A = B" proof - have dimaP: "adjoint P \ carrier_mat n n" using dims by auto have iv: "inverts_mat P (adjoint P)" using uP unitary_def by auto then have "P * (adjoint P) = 1\<^sub>m n" using inverts_mat_def dims by auto then have aPP: "adjoint P * P = 1\<^sub>m n" using mat_mult_left_right_inverse[OF dims(3) dimaP] by auto have "adjoint P * (P * A * adjoint P) * P = (adjoint P * P) * A * (adjoint P * P)" using dims dimaP by (mat_assoc n) also have "\ = 1\<^sub>m n * A * 1\<^sub>m n" using aPP by auto also have "\ = A" using dims by auto finally have eqA: "A = adjoint P * (P * A * adjoint P) * P" .. have "adjoint P * (P * B * adjoint P) * P = (adjoint P * P) * B * (adjoint P * P)" using dims dimaP by (mat_assoc n) also have "\ = 1\<^sub>m n * B * 1\<^sub>m n" using aPP by auto also have "\ = B" using dims by auto finally have eqB: "B = adjoint P * (P * B * adjoint P) * P" .. then show ?thesis using eqA eqB eq by auto qed lemma unitary_is_corthogonal: fixes U :: "'a::conjugatable_field mat" assumes dim: "U \ carrier_mat n n" and U: "unitary U" shows "corthogonal_mat U" unfolding corthogonal_mat_def Let_def proof (rule conjI) have dima: "adjoint U \ carrier_mat n n" using dim by auto have aUU: "mat_adjoint U * U = (1\<^sub>m n)" apply (insert U[unfolded unitary_def] dim dima, drule conjunct2) apply (drule inverts_mat_symm[of "U", OF dim dima], unfold inverts_mat_def, auto) done then show "diagonal_mat (mat_adjoint U * U)" by (simp add: diagonal_mat_def) show "\i 0" using dim by (simp add: aUU) qed lemma unitary_times_unitary: fixes P Q :: "'a:: conjugatable_field mat" assumes dim: "P \ carrier_mat n n" "Q \ carrier_mat n n" and uP: "unitary P" and uQ: "unitary Q" shows "unitary (P * Q)" proof - have dim_pq: "P * Q \ carrier_mat n n" using dim by auto have "(P * Q) * adjoint (P * Q) = P * (Q * adjoint Q) * adjoint P" using dim by (mat_assoc n) also have "\ = P * (1\<^sub>m n) * adjoint P" using uQ dim by auto also have "\ = P * adjoint P" using dim by (mat_assoc n) also have "\ = 1\<^sub>m n" using uP dim by simp finally have "(P * Q) * adjoint (P * Q) = 1\<^sub>m n" by auto hence "inverts_mat (P * Q) (adjoint (P * Q))" using inverts_mat_def dim_pq by auto thus "unitary (P*Q)" using unitary_def dim_pq by auto qed lemma unitary_operator_keep_trace: fixes U A :: "complex mat" assumes dU: "U \ carrier_mat n n" and dA: "A \ carrier_mat n n" and u: "unitary U" shows "trace A = trace (adjoint U * A * U)" proof - have u': "U * adjoint U = 1\<^sub>m n" using u unfolding unitary_def inverts_mat_def using dU by auto have "trace (adjoint U * A * U) = trace (U * adjoint U * A)" using dU dA by (mat_assoc n) also have "\ = trace A" using u' dA by auto finally show ?thesis by auto qed subsection \Normalization of vectors\ definition vec_norm :: "complex vec \ complex" where "vec_norm v \ csqrt (v \c v)" lemma vec_norm_geq_0: fixes v :: "complex vec" shows "vec_norm v \ 0" unfolding vec_norm_def by (insert self_cscalar_prod_geq_0[of v], simp) lemma vec_norm_zero: fixes v :: "complex vec" assumes dim: "v \ carrier_vec n" shows "vec_norm v = 0 \ v = 0\<^sub>v n" unfolding vec_norm_def by (subst conjugate_square_eq_0_vec[OF dim, symmetric], rule csqrt_eq_0) lemma vec_norm_ge_0: fixes v :: "complex vec" assumes dim_v: "v \ carrier_vec n" and neq0: "v \ 0\<^sub>v n" shows "vec_norm v > 0" proof - have geq: "vec_norm v \ 0" using vec_norm_geq_0 by auto have neq: "vec_norm v \ 0" apply (insert dim_v neq0) apply (drule vec_norm_zero, auto) done show ?thesis using neq geq by (rule dual_order.not_eq_order_implies_strict) qed definition vec_normalize :: "complex vec \ complex vec" where "vec_normalize v = (if (v = 0\<^sub>v (dim_vec v)) then v else 1 / (vec_norm v) \\<^sub>v v)" lemma normalized_vec_dim[simp]: assumes "(v::complex vec) \ carrier_vec n" shows "vec_normalize v \ carrier_vec n" unfolding vec_normalize_def using assms by auto lemma vec_eq_norm_smult_normalized: shows "v = vec_norm v \\<^sub>v vec_normalize v" proof (cases "v = 0\<^sub>v (dim_vec v)") define n where "n = dim_vec v" then have dimv: "v \ carrier_vec n" by auto then have dimnv: "vec_normalize v \ carrier_vec n" by auto { case True then have v0: "v = 0\<^sub>v n" using n_def by auto then have n0: "vec_norm v = 0" using vec_norm_def by auto have "vec_norm v \\<^sub>v vec_normalize v = 0\<^sub>v n" unfolding smult_vec_def by (auto simp add: n0 carrier_vecD[OF dimnv]) then show ?thesis using v0 by auto next case False then have v: "v \ 0\<^sub>v n" using n_def by auto then have ge0: "vec_norm v > 0" using vec_norm_ge_0 dimv by auto have "vec_normalize v = (1 / vec_norm v) \\<^sub>v v" using False vec_normalize_def by auto then have "vec_norm v \\<^sub>v vec_normalize v = (vec_norm v * (1 / vec_norm v)) \\<^sub>v v" using smult_smult_assoc by auto also have "\ = v" using ge0 by auto finally have "v = vec_norm v \\<^sub>v vec_normalize v".. then show "v = vec_norm v \\<^sub>v vec_normalize v" using v by auto } qed lemma normalized_cscalar_prod: fixes v w :: "complex vec" assumes dim_v: "v \ carrier_vec n" and dim_w: "w \ carrier_vec n" shows "v \c w = (vec_norm v * vec_norm w) * (vec_normalize v \c vec_normalize w)" unfolding vec_normalize_def apply (split if_split, split if_split) proof (intro conjI impI) note dim0 = dim_v dim_w have dim: "dim_vec v = n" "dim_vec w = n" using dim0 by auto { assume "w = 0\<^sub>v n" "v = 0\<^sub>v n" then have lhs: "v \c w = 0" by auto then moreover have rhs: "vec_norm v * vec_norm w * (v \c w) = 0" by auto ultimately have "v \c w = vec_norm v * vec_norm w * (v \c w)" by auto } with dim show "w = 0\<^sub>v (dim_vec w) \ v = 0\<^sub>v (dim_vec v) \ v \c w = vec_norm v * vec_norm w * (v \c w)" by auto { assume asm: "w = 0\<^sub>v n" "v \ 0\<^sub>v n" then have w0: "conjugate w = 0\<^sub>v n" by auto with dim0 have "(1 / vec_norm v \\<^sub>v v) \c w = 0" by auto then moreover have rhs: "vec_norm v * vec_norm w * ((1 / vec_norm v \\<^sub>v v) \c w) = 0" by auto moreover have "v \c w = 0" using w0 dim0 by auto ultimately have "v \c w = vec_norm v * vec_norm w * ((1 / vec_norm v \\<^sub>v v) \c w)" by auto } with dim show "w = 0\<^sub>v (dim_vec w) \ v \ 0\<^sub>v (dim_vec v) \ v \c w = vec_norm v * vec_norm w * ((1 / vec_norm v \\<^sub>v v) \c w)" by auto { assume asm: "w \ 0\<^sub>v n" "v = 0\<^sub>v n" with dim0 have "v \c (1 / vec_norm w \\<^sub>v w) = 0" by auto then moreover have rhs: "vec_norm v * vec_norm w * (v \c (1 / vec_norm w \\<^sub>v w)) = 0" by auto moreover have "v \c w = 0" using asm dim0 by auto ultimately have "v \c w = vec_norm v * vec_norm w * (v \c (1 / vec_norm w \\<^sub>v w))" by auto } with dim show "w \ 0\<^sub>v (dim_vec w) \ v = 0\<^sub>v (dim_vec v) \ v \c w = vec_norm v * vec_norm w * (v \c (1 / vec_norm w \\<^sub>v w))" by auto { assume asmw: "w \ 0\<^sub>v n" and asmv: "v \ 0\<^sub>v n" have "vec_norm w > 0" by (insert asmw dim0, rule vec_norm_ge_0, auto) then have cw: "conjugate (1 / vec_norm w) = 1 / vec_norm w" by (simp add: complex_eq_iff complex_is_Real_iff) from dim0 have "((1 / vec_norm v \\<^sub>v v) \c (1 / vec_norm w \\<^sub>v w)) = 1 / vec_norm v * (v \c (1 / vec_norm w \\<^sub>v w))" by auto also have "\ = 1 / vec_norm v * (v \ (conjugate (1 / vec_norm w) \\<^sub>v conjugate w))" by (subst conjugate_smult_vec, auto) also have "\ = 1 / vec_norm v * conjugate (1 / vec_norm w) * (v \ conjugate w)" using dim by auto also have "\ = 1 / vec_norm v * (1 / vec_norm w) * (v \c w)" using vec_norm_ge_0 cw by auto finally have eq1: "(1 / vec_norm v \\<^sub>v v) \c (1 / vec_norm w \\<^sub>v w) = 1 / vec_norm v * (1 / vec_norm w) * (v \c w)" . then have "vec_norm v * vec_norm w * ((1 / vec_norm v \\<^sub>v v) \c (1 / vec_norm w \\<^sub>v w)) = (v \c w)" by (subst eq1, insert vec_norm_ge_0[of v n, OF dim_v asmv] vec_norm_ge_0[of w n, OF dim_w asmw], auto) } with dim show " w \ 0\<^sub>v (dim_vec w) \ v \ 0\<^sub>v (dim_vec v) \ v \c w = vec_norm v * vec_norm w * ((1 / vec_norm v \\<^sub>v v) \c (1 / vec_norm w \\<^sub>v w))" by auto qed lemma normalized_vec_norm : fixes v :: "complex vec" assumes dim_v: "v \ carrier_vec n" and neq0: "v \ 0\<^sub>v n" shows "vec_normalize v \c vec_normalize v = 1" unfolding vec_normalize_def proof (simp, rule conjI) show "v = 0\<^sub>v (dim_vec v) \ v \c v = 1" using neq0 dim_v by auto have dim_a: "(vec_normalize v) \ carrier_vec n" "conjugate (vec_normalize v) \ carrier_vec n" using dim_v vec_normalize_def by auto note dim = dim_v dim_a have nvge0: "vec_norm v > 0" using vec_norm_ge_0 neq0 dim_v by auto then have vvvv: "v \c v = (vec_norm v) * (vec_norm v)" unfolding vec_norm_def by (metis power2_csqrt power2_eq_square) from nvge0 have "conjugate (vec_norm v) = vec_norm v" by (simp add: complex_eq_iff complex_is_Real_iff) then have "v \c (1 / vec_norm v \\<^sub>v v) = 1 / vec_norm v * (v \c v)" by (subst conjugate_smult_vec, auto) also have "\ = 1 / vec_norm v * vec_norm v * vec_norm v" using vvvv by auto also have "\ = vec_norm v" by auto finally have "v \c (1 / vec_norm v \\<^sub>v v) = vec_norm v". then show "v \ 0\<^sub>v (dim_vec v) \ vec_norm v \ 0 \ v \c (1 / vec_norm v \\<^sub>v v) = vec_norm v" using neq0 nvge0 by auto qed lemma normalize_zero: assumes "v \ carrier_vec n" shows "vec_normalize v = 0\<^sub>v n \ v = 0\<^sub>v n" proof show "v = 0\<^sub>v n \ vec_normalize v = 0\<^sub>v n" unfolding vec_normalize_def by auto next have "v \ 0\<^sub>v n \ vec_normalize v \ 0\<^sub>v n" unfolding vec_normalize_def proof (simp, rule impI) assume asm: "v \ 0\<^sub>v n" then have "vec_norm v > 0" using vec_norm_ge_0 assms by auto then have nvge0: "1 / vec_norm v > 0" by (simp add: complex_is_Real_iff) have "\k < n. v $ k \ 0" using asm assms by auto then obtain k where kn: "k < n" and vkneq0: "v $ k \ 0" by auto then have "(1 / vec_norm v \\<^sub>v v) $ k = (1 / vec_norm v) * (v $ k)" using assms carrier_vecD index_smult_vec(1) by blast with nvge0 vkneq0 have "(1 / vec_norm v \\<^sub>v v) $ k \ 0" by auto then show "1 / vec_norm v \\<^sub>v v \ 0\<^sub>v n" using assms kn by fastforce qed then show "vec_normalize v = 0\<^sub>v n \ v = 0\<^sub>v n" by auto qed lemma normalize_normalize[simp]: "vec_normalize (vec_normalize v) = vec_normalize v" proof (rule disjE[of "v = 0\<^sub>v (dim_vec v)" "v \ 0\<^sub>v (dim_vec v)"], auto) let ?n = "dim_vec v" { assume "v = 0\<^sub>v ?n" then have "vec_normalize v = v" unfolding vec_normalize_def by auto then show "vec_normalize (vec_normalize v) = vec_normalize v" by auto } assume neq0: "v \ 0\<^sub>v ?n" have dim: "v \ carrier_vec ?n" by auto have "vec_norm (vec_normalize v) = 1" unfolding vec_norm_def using normalized_vec_norm[OF dim neq0] by auto then show "vec_normalize (vec_normalize v) = vec_normalize v" by (subst (1) vec_normalize_def, simp) qed subsection \Spectral decomposition of normal complex matrices\ lemma normalize_keep_corthogonal: fixes vs :: "complex vec list" assumes cor: "corthogonal vs" and dims: "set vs \ carrier_vec n" shows "corthogonal (map vec_normalize vs)" unfolding corthogonal_def proof (rule allI, rule impI, rule allI, rule impI, goal_cases) case c: (1 i j) let ?m = "length vs" have len: "length (map vec_normalize vs) = ?m" by auto have dim: "\k. k < ?m \ (vs ! k) \ carrier_vec n" using dims by auto have map: "\k. k < ?m \ map vec_normalize vs ! k = vec_normalize (vs ! k)" by auto have eq1: "\j k. j < ?m \ k < ?m \ ((vs ! j) \c (vs ! k) = 0) = (j \ k)" using assms unfolding corthogonal_def by auto then have "\k. k < ?m \ (vs ! k) \c (vs ! k) \ 0 " by auto then have "\k. k < ?m \ (vs ! k) \ (0\<^sub>v n)" using dim by (auto simp add: conjugate_square_eq_0_vec[of _ n, OF dim]) then have vnneq0: "\k. k < ?m \ vec_norm (vs ! k) \ 0" using vec_norm_zero[OF dim] by auto then have i0: "vec_norm (vs ! i) \ 0" and j0: "vec_norm (vs ! j) \ 0" using c by auto have "(vs ! i) \c (vs ! j) = vec_norm (vs ! i) * vec_norm (vs ! j) * (vec_normalize (vs ! i) \c vec_normalize (vs ! j))" by (subst normalized_cscalar_prod[of "vs ! i" n "vs ! j"], auto, insert dim c, auto) with i0 j0 have "(vec_normalize (vs ! i) \c vec_normalize (vs ! j) = 0) = ((vs ! i) \c (vs ! j) = 0)" by auto with eq1 c have "(vec_normalize (vs ! i) \c vec_normalize (vs ! j) = 0) = (i \ j)" by auto with map c show "(map vec_normalize vs ! i \c map vec_normalize vs ! j = 0) = (i \ j)" by auto qed lemma normalized_corthogonal_mat_is_unitary: assumes W: "set ws \ carrier_vec n" and orth: "corthogonal ws" and len: "length ws = n" shows "unitary (mat_of_cols n (map vec_normalize ws))" (is "unitary ?W") proof - define vs where "vs = map vec_normalize ws" define W where "W = mat_of_cols n vs" have W': "set vs \ carrier_vec n" using assms vs_def by auto then have W'': "\k. k < length vs \ vs ! k \ carrier_vec n" by auto have orth': "corthogonal vs" using assms normalize_keep_corthogonal vs_def by auto have len'[simp]: "length vs = n" using assms vs_def by auto have dimW: "W \ carrier_mat n n" using W_def len by auto have "adjoint W \ carrier_mat n n" using dimW by auto then have dimaW: "mat_adjoint W \ carrier_mat n n" by auto { fix i j assume i: "i < n" and j: "j < n" have dimws: "(ws ! i) \ carrier_vec n" "(ws ! j) \ carrier_vec n" using W len i j by auto have "(ws ! i) \c (ws ! i) \ 0" "(ws ! j) \c (ws ! j) \ 0" using orth corthogonal_def[of ws] len i j by auto then have neq0: "(ws ! i) \ 0\<^sub>v n" "(ws ! j) \ 0\<^sub>v n" by (auto simp add: conjugate_square_eq_0_vec[of "ws ! i" n]) then have "vec_norm (ws ! i) > 0" "vec_norm (ws ! j) > 0" using vec_norm_ge_0 dimws by auto then have ge0: "vec_norm (ws ! i) * vec_norm (ws ! j) > 0" by auto have ws': "vs ! i = vec_normalize (ws ! i)" "vs ! j = vec_normalize (ws ! j)" using len i j vs_def by auto have ii1: "(vs ! i) \c (vs ! i) = 1" apply (simp add: ws') apply (rule normalized_vec_norm[of "ws ! i"], rule dimws, rule neq0) done have ij0: "i \ j \ (ws ! i) \c (ws ! j) = 0" using i j by (insert orth, auto simp add: corthogonal_def[of ws] len) have "i \ j \ (ws ! i) \c (ws ! j) = (vec_norm (ws ! i) * vec_norm (ws ! j)) * ((vs ! i) \c (vs ! j))" apply (auto simp add: ws') apply (rule normalized_cscalar_prod) apply (rule dimws, rule dimws) done with ij0 have ij0': "i \ j \ (vs ! i) \c (vs ! j) = 0" using ge0 by auto have cWk: "\k. k < n \ col W k = vs ! k" unfolding W_def apply (subst col_mat_of_cols) apply (auto simp add: W'') done have "(mat_adjoint W * W) $$ (j, i) = row (mat_adjoint W) j \ col W i" by (insert dimW i j dimaW, auto) also have "\ = conjugate (col W j) \ col W i" by (insert dimW i j dimaW, auto simp add: mat_adjoint_def) also have "\ = col W i \ conjugate (col W j)" using comm_scalar_prod[of "col W i" n] dimW by auto also have "\ = (vs ! i) \c (vs ! j)" using W_def col_mat_of_cols i j len cWk by auto finally have "(mat_adjoint W * W) $$ (j, i) = (vs ! i) \c (vs ! j)". then have "(mat_adjoint W * W) $$ (j, i) = (if (j = i) then 1 else 0)" by (auto simp add: ii1 ij0') } note maWW = this then have "mat_adjoint W * W = 1\<^sub>m n" unfolding one_mat_def using dimW dimaW by (auto simp add: maWW adjoint_def) then have iv0: "adjoint W * W = 1\<^sub>m n" by auto have dimaW: "adjoint W \ carrier_mat n n" using dimaW by auto then have iv1: "W * adjoint W = 1\<^sub>m n" using mat_mult_left_right_inverse dimW iv0 by auto then show "unitary W" unfolding unitary_def inverts_mat_def using dimW dimaW iv0 iv1 by auto qed lemma normalize_keep_eigenvector: assumes ev: "eigenvector A v e" and dim: "A \ carrier_mat n n" "v \ carrier_vec n" shows "eigenvector A (vec_normalize v) e" unfolding eigenvector_def proof show "vec_normalize v \ carrier_vec (dim_row A)" using dim by auto have eg: "A *\<^sub>v v = e \\<^sub>v v" using ev dim eigenvector_def by auto have vneq0: "v \ 0\<^sub>v n" using ev dim unfolding eigenvector_def by auto then have s0: "vec_normalize v \ 0\<^sub>v n" by (insert dim, subst normalize_zero[of v], auto) from vneq0 have vvge0: "vec_norm v > 0" using vec_norm_ge_0 dim by auto have s1: "A *\<^sub>v vec_normalize v = e \\<^sub>v vec_normalize v" unfolding vec_normalize_def using vneq0 dim apply (auto, simp add: mult_mat_vec) apply (subst eg, auto) done with s0 dim show "vec_normalize v \ 0\<^sub>v (dim_row A) \ A *\<^sub>v vec_normalize v = e \\<^sub>v vec_normalize v" by auto qed lemma four_block_mat_adjoint: fixes A B C D :: "'a::conjugatable_field mat" assumes dim: "A \ carrier_mat nr1 nc1" "B \ carrier_mat nr1 nc2" "C \ carrier_mat nr2 nc1" "D \ carrier_mat nr2 nc2" shows "adjoint (four_block_mat A B C D) = four_block_mat (adjoint A) (adjoint C) (adjoint B) (adjoint D)" by (rule eq_matI, insert dim, auto simp add: adjoint_eval) fun unitary_schur_decomposition :: "complex mat \ complex list \ complex mat \ complex mat \ complex mat" where "unitary_schur_decomposition A [] = (A, 1\<^sub>m (dim_row A), 1\<^sub>m (dim_row A))" | "unitary_schur_decomposition A (e # es) = (let n = dim_row A; n1 = n - 1; v' = find_eigenvector A e; v = vec_normalize v'; ws0 = gram_schmidt n (basis_completion v); ws = map vec_normalize ws0; W = mat_of_cols n ws; W' = corthogonal_inv W; A' = W' * A * W; (A1,A2,A0,A3) = split_block A' 1 1; (B,P,Q) = unitary_schur_decomposition A3 es; z_row = (0\<^sub>m 1 n1); z_col = (0\<^sub>m n1 1); one_1 = 1\<^sub>m 1 in (four_block_mat A1 (A2 * P) A0 B, W * four_block_mat one_1 z_row z_col P, four_block_mat one_1 z_row z_col Q * W'))" theorem unitary_schur_decomposition: assumes A: "(A::complex mat) \ carrier_mat n n" and c: "char_poly A = (\ (e :: complex) \ es. [:- e, 1:])" and B: "unitary_schur_decomposition A es = (B,P,Q)" shows "similar_mat_wit A B P Q \ upper_triangular B \ diag_mat B = es \ unitary P \ (Q = adjoint P)" using assms proof (induct es arbitrary: n A B P Q) case Nil with degree_monic_char_poly[of A n] show ?case by (auto intro: similar_mat_wit_refl simp: diag_mat_def unitary_zero) next case (Cons e es n A C P Q) let ?n1 = "n - 1" from Cons have A: "A \ carrier_mat n n" and dim: "dim_row A = n" by auto let ?cp = "char_poly A" from Cons(3) have cp: "?cp = [: -e, 1 :] * (\e \ es. [:- e, 1:])" by auto have mon: "monic (\e\ es. [:- e, 1:])" by (rule monic_prod_list, auto) have deg: "degree ?cp = Suc (degree (\e\ es. [:- e, 1:]))" unfolding cp by (subst degree_mult_eq, insert mon, auto) with degree_monic_char_poly[OF A] have n: "n \ 0" by auto define v' where "v' = find_eigenvector A e" define v where "v = vec_normalize v'" define b where "b = basis_completion v" define ws0 where "ws0 = gram_schmidt n b" define ws where "ws = map vec_normalize ws0" define W where "W = mat_of_cols n ws" define W' where "W' = corthogonal_inv W" define A' where "A' = W' * A * W" obtain A1 A2 A0 A3 where splitA': "split_block A' 1 1 = (A1,A2,A0,A3)" by (cases "split_block A' 1 1", auto) obtain B P' Q' where schur: "unitary_schur_decomposition A3 es = (B,P',Q')" by (cases "unitary_schur_decomposition A3 es", auto) let ?P' = "four_block_mat (1\<^sub>m 1) (0\<^sub>m 1 ?n1) (0\<^sub>m ?n1 1) P'" let ?Q' = "four_block_mat (1\<^sub>m 1) (0\<^sub>m 1 ?n1) (0\<^sub>m ?n1 1) Q'" have C: "C = four_block_mat A1 (A2 * P') A0 B" and P: "P = W * ?P'" and Q: "Q = ?Q' * W'" using Cons(4) unfolding unitary_schur_decomposition.simps Let_def list.sel dim v'_def[symmetric] v_def[symmetric] b_def[symmetric] ws0_def[symmetric] ws_def[symmetric] W'_def[symmetric] W_def[symmetric] A'_def[symmetric] split splitA' schur by auto have e: "eigenvalue A e" unfolding eigenvalue_root_char_poly[OF A] cp by simp from find_eigenvector[OF A e] have ev': "eigenvector A v' e" unfolding v'_def . then have "v' \ carrier_vec n" unfolding eigenvector_def using A by auto with ev' have ev: "eigenvector A v e" unfolding v_def using A dim normalize_keep_eigenvector by auto from this[unfolded eigenvector_def] have v[simp]: "v \ carrier_vec n" and v0: "v \ 0\<^sub>v n" using A by auto interpret cof_vec_space n "TYPE(complex)" . from basis_completion[OF v v0, folded b_def] have span_b: "span (set b) = carrier_vec n" and dist_b: "distinct b" and indep: "\ lin_dep (set b)" and b: "set b \ carrier_vec n" and hdb: "hd b = v" and len_b: "length b = n" by auto from hdb len_b n obtain vs where bv: "b = v # vs" by (cases b, auto) from gram_schmidt_result[OF b dist_b indep refl, folded ws0_def] have ws0: "set ws0 \ carrier_vec n" "corthogonal ws0" "length ws0 = n" by (auto simp: len_b) then have ws: "set ws \ carrier_vec n" "corthogonal ws" "length ws = n" unfolding ws_def using normalize_keep_corthogonal by auto have ws0ne: "ws0 \ []" using `length ws0 = n` n by auto from gram_schmidt_hd[OF v, of vs, folded bv] have hdws0: "hd ws0 = (vec_normalize v')" unfolding ws0_def v_def . have "hd ws = vec_normalize (hd ws0)" unfolding ws_def using hd_map[OF ws0ne] by auto then have hdws: "hd ws = v" unfolding v_def using normalize_normalize[of v'] hdws0 by auto have orth_W: "corthogonal_mat W" using orthogonal_mat_of_cols ws unfolding W_def. have W: "W \ carrier_mat n n" using ws unfolding W_def using mat_of_cols_carrier(1)[of n ws] by auto have W': "W' \ carrier_mat n n" unfolding W'_def corthogonal_inv_def using W by (auto simp: mat_of_rows_def) from corthogonal_inv_result[OF orth_W] have W'W: "inverts_mat W' W" unfolding W'_def . hence WW': "inverts_mat W W'" using mat_mult_left_right_inverse[OF W' W] W' W unfolding inverts_mat_def by auto have A': "A' \ carrier_mat n n" using W W' A unfolding A'_def by auto have A'A_wit: "similar_mat_wit A' A W' W" by (rule similar_mat_witI[of _ _ n], insert W W' A A' W'W WW', auto simp: A'_def inverts_mat_def) hence A'A: "similar_mat A' A" unfolding similar_mat_def by blast from similar_mat_wit_sym[OF A'A_wit] have simAA': "similar_mat_wit A A' W W'" by auto have eigen[simp]: "A *\<^sub>v v = e \\<^sub>v v" and v0: "v \ 0\<^sub>v n" using v_def v'_def find_eigenvector[OF A e] A normalize_keep_eigenvector unfolding eigenvector_def by auto let ?f = "(\ i. if i = 0 then e else 0)" have col0: "col A' 0 = vec n ?f" unfolding A'_def W'_def W_def using corthogonal_col_ev_0[OF A v v0 eigen n hdws ws]. from A' n have "dim_row A' = 1 + ?n1" "dim_col A' = 1 + ?n1" by auto from split_block[OF splitA' this] have A2: "A2 \ carrier_mat 1 ?n1" and A3: "A3 \ carrier_mat ?n1 ?n1" and A'block: "A' = four_block_mat A1 A2 A0 A3" by auto have A1id: "A1 = mat 1 1 (\ _. e)" using splitA'[unfolded split_block_def Let_def] arg_cong[OF col0, of "\ v. v $ 0"] A' n by (auto simp: col_def) have A1: "A1 \ carrier_mat 1 1" unfolding A1id by auto { fix i assume "i < ?n1" with arg_cong[OF col0, of "\ v. v $ Suc i"] A' have "A' $$ (Suc i, 0) = 0" by auto } note A'0 = this have A0id: "A0 = 0\<^sub>m ?n1 1" using splitA'[unfolded split_block_def Let_def] A'0 A' by auto have A0: "A0 \ carrier_mat ?n1 1" unfolding A0id by auto from cp char_poly_similar[OF A'A] have cp: "char_poly A' = [: -e,1 :] * (\ e \ es. [:- e, 1:])" by simp also have "char_poly A' = char_poly A1 * char_poly A3" unfolding A'block A0id by (rule char_poly_four_block_zeros_col[OF A1 A2 A3]) also have "char_poly A1 = [: -e,1 :]" by (simp add: A1id char_poly_defs det_def signof_def sign_def) finally have cp: "char_poly A3 = (\ e \ es. [:- e, 1:])" by (metis mult_cancel_left pCons_eq_0_iff zero_neq_one) from Cons(1)[OF A3 cp schur] have simIH: "similar_mat_wit A3 B P' Q'" and ut: "upper_triangular B" and diag: "diag_mat B = es" and uP': "unitary P'" and Q'P': "Q' = adjoint P'" by auto from similar_mat_witD2[OF A3 simIH] have B: "B \ carrier_mat ?n1 ?n1" and P': "P' \ carrier_mat ?n1 ?n1" and Q': "Q' \ carrier_mat ?n1 ?n1" and PQ': "P' * Q' = 1\<^sub>m ?n1" by auto have A0_eq: "A0 = P' * A0 * 1\<^sub>m 1" unfolding A0id using P' by auto have simA'C: "similar_mat_wit A' C ?P' ?Q'" unfolding A'block C by (rule similar_mat_wit_four_block[OF similar_mat_wit_refl[OF A1] simIH _ A0_eq A1 A3 A0], insert PQ' A2 P' Q', auto) have ut1: "upper_triangular A1" unfolding A1id by auto have ut: "upper_triangular C" unfolding C A0id by (intro upper_triangular_four_block[OF _ B ut1 ut], auto simp: A1id) from A1id have diagA1: "diag_mat A1 = [e]" unfolding diag_mat_def by auto from diag_four_block_mat[OF A1 B] have diag: "diag_mat C = e # es" unfolding diag diagA1 C by simp have aW: "adjoint W \ carrier_mat n n" using W by auto have aW': "adjoint W' \ carrier_mat n n" using W' by auto have "unitary W" using W_def ws_def ws0 normalized_corthogonal_mat_is_unitary by auto then have ivWaW: "inverts_mat W (adjoint W)" using unitary_def W aW by auto with WW' have W'aW: "W' = (adjoint W)" using inverts_mat_unique W W' aW by auto then have "adjoint W' = W" using adjoint_adjoint by auto with ivWaW have "inverts_mat W' (adjoint W')" using inverts_mat_symm W aW W'aW by auto then have "unitary W'" using unitary_def W' by auto have newP': "P' \ carrier_mat (n - Suc 0) (n - Suc 0)" using P' by auto have rl: "\ x1 x2 x3 x4 y1 y2 y3 y4 f. x1 = y1 \ x2 = y2 \ x3 = y3 \ x4 = y4 \ f x1 x2 x3 x4 = f y1 y2 y3 y4" by simp have Q'aP': "?Q' = adjoint ?P'" apply (subst four_block_mat_adjoint, auto simp add: newP') apply (rule rl[where f2 = four_block_mat]) apply (auto simp add: eq_matI adjoint_eval Q'P') done have "adjoint P = adjoint ?P' * adjoint W" using W newP' n apply (simp add: P) apply (subst adjoint_mult[of W, symmetric]) apply (auto simp add: W P' carrier_matD[of W n n]) done also have "\ = ?Q' * W'" using Q'aP' W'aW by auto also have "\ = Q" using Q by auto finally have QaP: "Q = adjoint P" .. from similar_mat_wit_trans[OF simAA' simA'C, folded P Q] have smw: "similar_mat_wit A C P Q" by blast then have dimP: "P \ carrier_mat n n" and dimQ: "Q \ carrier_mat n n" unfolding similar_mat_wit_def using A by auto from smw have "P * Q = 1\<^sub>m n" unfolding similar_mat_wit_def using A by auto then have "inverts_mat P Q" using inverts_mat_def dimP by auto then have uP: "unitary P" using QaP unitary_def dimP by auto from ut similar_mat_wit_trans[OF simAA' simA'C, folded P Q] diag uP QaP show ?case by blast qed lemma complex_mat_char_poly_factorizable: fixes A :: "complex mat" assumes "A \ carrier_mat n n" shows "\as. char_poly A = (\ a \ as. [:- a, 1:]) \ length as = n" proof - let ?ca = "char_poly A" have ex0: "\bs. Polynomial.smult (lead_coeff ?ca) (\b\bs. [:- b, 1:]) = ?ca \ length bs = degree ?ca" by (simp add: fundamental_theorem_algebra_factorized) then obtain bs where " Polynomial.smult (lead_coeff ?ca) (\b\bs. [:- b, 1:]) = ?ca \ length bs = degree ?ca" by auto moreover have "lead_coeff ?ca = (1::complex)" using assms degree_monic_char_poly by blast ultimately have ex1: "?ca = (\b\bs. [:- b, 1:]) \ length bs = degree ?ca" by auto moreover have "degree ?ca = n" by (simp add: assms degree_monic_char_poly) ultimately show ?thesis by auto qed lemma complex_mat_has_unitary_schur_decomposition: fixes A :: "complex mat" assumes "A \ carrier_mat n n" shows "\B P es. similar_mat_wit A B P (adjoint P) \ unitary P \ char_poly A = (\ (e :: complex) \ es. [:- e, 1:]) \ diag_mat B = es" proof - have "\es. char_poly A = (\ e \ es. [:- e, 1:]) \ length es = n" using assms by (simp add: complex_mat_char_poly_factorizable) then obtain es where es: "char_poly A = (\ e \ es. [:- e, 1:]) \ length es = n" by auto obtain B P Q where B: "unitary_schur_decomposition A es = (B,P,Q)" by (cases "unitary_schur_decomposition A es", auto) have "similar_mat_wit A B P Q \ upper_triangular B \ unitary P \ (Q = adjoint P) \ char_poly A = (\ (e :: complex) \ es. [:- e, 1:]) \ diag_mat B = es" using assms es B by (auto simp add: unitary_schur_decomposition) then show ?thesis by auto qed lemma normal_upper_triangular_matrix_is_diagonal: fixes A :: "'a::conjugatable_ordered_field mat" assumes "A \ carrier_mat n n" and tri: "upper_triangular A" and norm: "A * adjoint A = adjoint A * A" shows "diagonal_mat A" proof (rule disjE[of "n = 0" "n > 0"], blast) have dim: "dim_row A = n" "dim_col A = n" using assms by auto from norm have eq0: "\i j. (A * adjoint A)$$(i,j) = (adjoint A * A)$$(i,j)" by auto have nat_induct_strong: "\P. (P::nat\bool) 0 \ (\i. i < n \ (\k. k < i \ P k) \ P i) \ (\i. i < n \ P i)" by (metis dual_order.strict_trans infinite_descent0 linorder_neqE_nat) show "n = 0 \ ?thesis" using dim unfolding diagonal_mat_def by auto show "n > 0 \ ?thesis" unfolding diagonal_mat_def dim apply (rule allI, rule impI) apply (rule nat_induct_strong) proof (rule allI, rule impI, rule impI) assume asm: "n > 0" from tri upper_triangularD[of A 0 j] dim have z0: "\j. 0< j \ j < n \ A$$(j, 0) = 0" by auto then have ada00: "(adjoint A * A)$$(0,0) = conjugate (A$$(0,0)) * A$$(0,0)" using asm dim by (auto simp add: scalar_prod_def adjoint_eval sum.atLeast_Suc_lessThan) have aad00: "(A * adjoint A)$$(0,0) = (\k=0.. = A$$(0,0) * conjugate (A$$(0,0)) + (\k=1..k. A$$(0, k) * conjugate (A$$(0, k))"], auto) ultimately have f1tneq0: "(\k=(Suc 0)..k. k < n \ A$$(0, k) * conjugate (A$$(0, k)) \ 0" using conjugate_square_positive by auto have "\k. 1 \ k \ k < n \ A$$(0, k) * conjugate (A$$(0, k)) = 0" by (rule sum_nonneg_0[of "{1..j. 0 < n \ j < n \ 0 \ j \ A $$ (0, j) = 0" by auto { fix i assume asm: "n > 0" "i < n" "i > 0" and ih: "\k. k < i \ \j j \ A $$ (k, j) = 0" then have "\j. j i \ j \ A $$ (i, j) = 0" proof - have inter_part: "\b m e. (b::nat) < e \ b < m \ m < e \ {b.. {m..b m e f. (b::nat) < e \ b < m \ m < e \ (\k=b..k\{b..{m..b m e f. (b::nat) < e \ b < m \ m < e \ (\k=b..k=b..k=m..j. j < i \ A$$(i, j) = 0" by auto from tri upper_triangularD[of A j i] asm dim have zsi1: "\k. i < k \ k < n \ A$$(k, i) = 0" by auto have "(A * adjoint A)$$(i, i) = (\k=0.. = (\k=0..k=i.. = (\k=i.. = conjugate (A$$(i, i)) * A$$(i, i) + (\k=(Suc i)..k=(Suc i)..k=0.. = (\k=0..k=i.. = (\k=i.. = conjugate (A$$(i, i)) * A$$(i, i)" using asm zsi1 by (auto simp add: sum.atLeast_Suc_lessThan) finally have "(adjoint A * A)$$(i, i) = conjugate (A$$(i, i)) * A$$(i, i)" . with adaii eq0 have fsitoneq0: "(\k=(Suc i)..k. k i < k \ conjugate (A$$(i, k)) * A$$(i, k) = 0" by (rule sum_nonneg_0[of "{(Suc i)..k. k i A $$ (i, k) = 0" by auto with zsi0 show "\j. j i \ j \ A $$ (i, j) = 0" by (metis linorder_neqE_nat) qed } with case0 show "\i ia. 0 < n \ i < n \ ia < n \ (\k. k < ia \ \j j \ A $$ (k, j) = 0) \ \j j \ A $$ (ia, j) = 0" by auto qed qed lemma normal_complex_mat_has_spectral_decomposition: assumes A: "(A::complex mat) \ carrier_mat n n" and normal: "A * adjoint A = adjoint A * A" and c: "char_poly A = (\ (e :: complex) \ es. [:- e, 1:])" and B: "unitary_schur_decomposition A es = (B,P,Q)" shows "similar_mat_wit A B P (adjoint P) \ diagonal_mat B \ diag_mat B = es \ unitary P" proof - have smw: "similar_mat_wit A B P (adjoint P)" and ut: "upper_triangular B" and uP: "unitary P" and dB: "diag_mat B = es" and "(Q = adjoint P)" using assms by (auto simp add: unitary_schur_decomposition) from smw have dimP: "P \ carrier_mat n n" and dimB: "B \ carrier_mat n n" and dimaP: "adjoint P \ carrier_mat n n" unfolding similar_mat_wit_def using A by auto have dimaB: "adjoint B \ carrier_mat n n" using dimB by auto note dims = dimP dimB dimaP dimaB have "inverts_mat P (adjoint P)" using unitary_def uP dims by auto then have iaPP: "inverts_mat (adjoint P) P" using inverts_mat_symm using dims by auto have aPP: "adjoint P * P = 1\<^sub>m n" using dims iaPP unfolding inverts_mat_def by auto from smw have A: "A = P * B * (adjoint P)" unfolding similar_mat_wit_def Let_def by auto then have aA: "adjoint A = P * adjoint B * adjoint P" by (insert A dimP dimB dimaP, auto simp add: adjoint_mult[of _ n n _ n] adjoint_adjoint) have "A * adjoint A = (P * B * adjoint P) * (P * adjoint B * adjoint P)" using A aA by auto also have "\ = P * B * (adjoint P * P) * (adjoint B * adjoint P)" using dims by (mat_assoc n) also have "\ = P * B * 1\<^sub>m n * (adjoint B * adjoint P)" using dims aPP by (auto) also have "\ = P * B * adjoint B * adjoint P" using dims by (mat_assoc n) finally have "A * adjoint A = P * B * adjoint B * adjoint P". then have "adjoint P * (A * adjoint A) * P = (adjoint P * P) * B * adjoint B * (adjoint P * P)" using dims by (simp add: assoc_mult_mat[of _ n n _ n _ n]) also have "\ = 1\<^sub>m n * B * adjoint B * 1\<^sub>m n" using aPP by auto also have "\ = B * adjoint B" using dims by auto finally have eq0: "adjoint P * (A * adjoint A) * P = B * adjoint B". have "adjoint A * A = (P * adjoint B * adjoint P) * (P * B * adjoint P)" using A aA by auto also have "\ = P * adjoint B * (adjoint P * P) * (B * adjoint P)" using dims by (mat_assoc n) also have "\ = P * adjoint B * 1\<^sub>m n * (B * adjoint P)" using dims aPP by (auto) also have "\ = P * adjoint B * B * adjoint P" using dims by (mat_assoc n) finally have "adjoint A * A = P * adjoint B * B * adjoint P" by auto then have "adjoint P * (adjoint A * A) * P = (adjoint P * P) * adjoint B * B * (adjoint P * P)" using dims by (simp add: assoc_mult_mat[of _ n n _ n _ n]) also have "\ = 1\<^sub>m n * adjoint B * B * 1\<^sub>m n" using aPP by auto also have "\ = adjoint B * B" using dims by auto finally have eq1: "adjoint P * (adjoint A * A) * P = adjoint B * B". from normal have "adjoint P * (adjoint A * A) * P = adjoint P * (A * adjoint A) * P" by auto with eq0 eq1 have "B * adjoint B = adjoint B * B" by auto with ut dims have "diagonal_mat B" using normal_upper_triangular_matrix_is_diagonal by auto with smw uP dB show "similar_mat_wit A B P (adjoint P) \ diagonal_mat B \ diag_mat B = es \ unitary P" by auto qed lemma complex_mat_has_jordan_nf: fixes A :: "complex mat" assumes "A \ carrier_mat n n" shows "\n_as. jordan_nf A n_as" proof - have "\as. char_poly A = (\ a \ as. [:- a, 1:]) \ length as = n" using assms by (simp add: complex_mat_char_poly_factorizable) then show ?thesis using assms by (auto simp add: jordan_nf_iff_linear_factorization) qed lemma hermitian_is_normal: assumes "hermitian A" shows "A * adjoint A = adjoint A * A" using assms by (auto simp add: hermitian_def) lemma hermitian_eigenvalue_real: assumes dim: "(A::complex mat) \ carrier_mat n n" and hA: "hermitian A" and c: "char_poly A = (\ (e :: complex) \ es. [:- e, 1:])" and B: "unitary_schur_decomposition A es = (B,P,Q)" shows "similar_mat_wit A B P (adjoint P) \ diagonal_mat B \ diag_mat B = es \ unitary P \ (\i < n. B$$(i, i) \ Reals)" proof - have normal: "A * adjoint A = adjoint A * A" using hA hermitian_is_normal by auto then have schur: "similar_mat_wit A B P (adjoint P) \ diagonal_mat B \ diag_mat B = es \ unitary P" using normal_complex_mat_has_spectral_decomposition[OF dim normal c B] by (simp) then have "similar_mat_wit A B P (adjoint P)" and uP: "unitary P" and dB: "diag_mat B = es" using assms by auto then have A: "A = P * B * (adjoint P)" and dimB: "B \ carrier_mat n n" and dimP: "P \ carrier_mat n n" unfolding similar_mat_wit_def Let_def using dim by auto then have dimaB: "adjoint B \ carrier_mat n n" by auto have "adjoint A = adjoint (adjoint P) * adjoint (P * B)" apply (subst A) apply (subst adjoint_mult[of "P * B" n n "adjoint P" n]) apply (insert dimB dimP, auto) done also have "\ = P * adjoint (P * B)" by (auto simp add: adjoint_adjoint) also have "\ = P * (adjoint B * adjoint P)" using dimB dimP by (auto simp add: adjoint_mult) also have "\ = P * adjoint B * adjoint P" using dimB dimP by (subst assoc_mult_mat[symmetric, of P n n "adjoint B" n "adjoint P" n], auto) finally have aA: "adjoint A = P * adjoint B * adjoint P" . have "A = adjoint A" using hA hermitian_def[of A] by auto then have "P * B * adjoint P = P * adjoint B * adjoint P" using A aA by auto then have BaB: "B = adjoint B" using unitary_elim[OF dimB dimaB dimP] uP by auto { fix i assume "i < n" then have "B$$(i, i) = conjugate (B$$(i, i))" apply (subst BaB) by (insert dimB, simp add: adjoint_eval) then have "B$$(i, i) \ Reals" unfolding conjugate_complex_def using Reals_cnj_iff by auto } then have "\i Reals" by auto with schur show ?thesis by auto qed lemma hermitian_inner_prod_real: assumes dimA: "(A::complex mat) \ carrier_mat n n" and dimv: "v \ carrier_vec n" and hA: "hermitian A" shows "inner_prod v (A *\<^sub>v v) \ Reals" proof - obtain es where es: "char_poly A = (\ (e :: complex) \ es. [:- e, 1:])" using complex_mat_char_poly_factorizable dimA by auto obtain B P Q where "unitary_schur_decomposition A es = (B,P,Q)" by (cases "unitary_schur_decomposition A es", auto) then have "similar_mat_wit A B P (adjoint P) \ diagonal_mat B \ diag_mat B = es \ unitary P \ (\i < n. B$$(i, i) \ Reals)" using hermitian_eigenvalue_real dimA es hA by auto then have A: "A = P * B * (adjoint P)" and dB: "diagonal_mat B" and Bii: "\i. i < n \ B$$(i, i) \ Reals" and dimB: "B \ carrier_mat n n" and dimP: "P \ carrier_mat n n" and dimaP: "adjoint P \ carrier_mat n n" unfolding similar_mat_wit_def Let_def using dimA by auto define w where "w = (adjoint P) *\<^sub>v v" then have dimw: "w \ carrier_vec n" using dimaP dimv by auto from A have "inner_prod v (A *\<^sub>v v) = inner_prod v ((P * B * (adjoint P)) *\<^sub>v v)" by auto also have "\ = inner_prod v ((P * B) *\<^sub>v ((adjoint P) *\<^sub>v v))" using dimP dimB dimv by (subst assoc_mult_mat_vec[of _ n n "adjoint P" n], auto) also have "\ = inner_prod v (P *\<^sub>v (B *\<^sub>v ((adjoint P) *\<^sub>v v)))" using dimP dimB dimv dimaP by (subst assoc_mult_mat_vec[of _ n n "B" n], auto) also have "\ = inner_prod w (B *\<^sub>v w)" unfolding w_def apply (rule adjoint_def_alter[OF _ _ dimP]) apply (insert mult_mat_vec_carrier[OF dimB mult_mat_vec_carrier[OF dimaP dimv]], auto simp add: dimv) done also have "\ = (\i=0..j=0.. = (\i=0..v v) = (\i=0..i. i < n \ B$$(i, i) * conjugate (w$i) * w$i \ Reals" using Bii by (simp add: Reals_cnj_iff) then have "(\i=0.. Reals" by auto then show ?thesis using sum by auto qed lemma unit_vec_bracket: fixes A :: "complex mat" assumes dimA: "A \ carrier_mat n n" and i: "i < n" shows "inner_prod (unit_vec n i) (A *\<^sub>v (unit_vec n i)) = A$$(i, i)" proof - define w where "(w::complex vec) = unit_vec n i" have "A *\<^sub>v w = col A i" using i dimA w_def by auto then have 1: "inner_prod w (A *\<^sub>v w) = inner_prod w (col A i)" using w_def by auto have "conjugate w = w" unfolding w_def unit_vec_def conjugate_vec_def using i by auto then have 2: "inner_prod w (col A i) = A$$(i, i)" using i dimA w_def by auto from 1 2 show "inner_prod w (A *\<^sub>v w) = A$$(i, i)" by auto qed lemma spectral_decomposition_extract_diag: fixes P B :: "complex mat" assumes dimP: "P \ carrier_mat n n" and dimB: "B \ carrier_mat n n" and uP: "unitary P" and dB: "diagonal_mat B" and i: "i < n" shows "inner_prod (col P i) (P * B * (adjoint P) *\<^sub>v (col P i)) = B$$(i, i)" proof - have dimaP: "adjoint P\ carrier_mat n n" using dimP by auto have uaP: "unitary (adjoint P)" using unitary_adjoint uP dimP by auto then have "inverts_mat (adjoint P) P" by (simp add: unitary_def adjoint_adjoint) then have iv: "(adjoint P) * P = 1\<^sub>m n" using dimaP inverts_mat_def by auto define v where "v = col P i" then have dimv: "v \ carrier_vec n" using dimP by auto define w where "(w::complex vec) = unit_vec n i" then have dimw: "w \ carrier_vec n" by auto have BaPv: "B *\<^sub>v (adjoint P *\<^sub>v v) \ carrier_vec n" using dimB dimaP dimv by auto have "(adjoint P) *\<^sub>v v = (col (adjoint P * P) i)" by (simp add: col_mult2[OF dimaP dimP i, symmetric] v_def) then have aPv: "(adjoint P) *\<^sub>v v = w" by (auto simp add: iv i w_def) have "inner_prod v (P * B * (adjoint P) *\<^sub>v v) = inner_prod v ((P * B) *\<^sub>v ((adjoint P) *\<^sub>v v))" using dimP dimB dimv by (subst assoc_mult_mat_vec[of _ n n "adjoint P" n], auto) also have "\ = inner_prod v (P *\<^sub>v (B *\<^sub>v ((adjoint P) *\<^sub>v v)))" using dimP dimB dimv dimaP by (subst assoc_mult_mat_vec[of _ n n "B" n], auto) also have "\ = inner_prod (adjoint P *\<^sub>v v) (B *\<^sub>v (adjoint P *\<^sub>v v))" by (simp add: adjoint_def_alter[OF dimv BaPv dimP]) also have "\ = inner_prod w (B *\<^sub>v w)" using aPv by auto also have "\ = B$$(i, i)" using w_def unit_vec_bracket dimB i by auto finally show "inner_prod v (P * B * (adjoint P) *\<^sub>v v) = B$$(i, i)". qed lemma hermitian_inner_prod_zero: fixes A :: "complex mat" assumes dimA: "A \ carrier_mat n n" and hA: "hermitian A" and zero: "\v\carrier_vec n. inner_prod v (A *\<^sub>v v) = 0" shows "A = 0\<^sub>m n n" proof - obtain es where es: "char_poly A = (\ (e :: complex) \ es. [:- e, 1:])" using complex_mat_char_poly_factorizable dimA by auto obtain B P Q where "unitary_schur_decomposition A es = (B,P,Q)" by (cases "unitary_schur_decomposition A es", auto) then have "similar_mat_wit A B P (adjoint P) \ diagonal_mat B \ diag_mat B = es \ unitary P \ (\i < n. B$$(i, i) \ Reals)" using hermitian_eigenvalue_real dimA es hA by auto then have A: "A = P * B * (adjoint P)" and dB: "diagonal_mat B" and Bii: "\i. i < n \ B$$(i, i) \ Reals" and dimB: "B \ carrier_mat n n" and dimP: "P \ carrier_mat n n" and dimaP: "adjoint P \ carrier_mat n n" and uP: "unitary P" unfolding similar_mat_wit_def Let_def unitary_def using dimA by auto then have uaP: "unitary (adjoint P)" using unitary_adjoint by auto then have "inverts_mat (adjoint P) P" by (simp add: unitary_def adjoint_adjoint) then have iv: "adjoint P * P = 1\<^sub>m n" using dimaP inverts_mat_def by auto have "B = 0\<^sub>m n n" proof- { fix i assume i: "i < n" define v where "v = col P i" then have dimv: "v \ carrier_vec n" using v_def dimP by auto have "inner_prod v (A *\<^sub>v v) = B$$(i, i)" unfolding A v_def using spectral_decomposition_extract_diag[OF dimP dimB uP dB i] by auto moreover have "inner_prod v (A *\<^sub>v v) = 0" using dimv zero by auto ultimately have "B$$(i, i) = 0" by auto } note zB = this show "B = 0\<^sub>m n n" by (insert zB dB dimB, rule eq_matI, auto simp add: diagonal_mat_def) qed then show "A = 0\<^sub>m n n" using A dimB dimP dimaP by auto qed lemma complex_mat_decomposition_to_hermitian: fixes A :: "complex mat" assumes dim: "A \ carrier_mat n n" shows "\B C. hermitian B \ hermitian C \ A = B + \ \\<^sub>m C \ B \ carrier_mat n n \ C \ carrier_mat n n" proof - obtain B C where B: "B = (1 / 2) \\<^sub>m (A + adjoint A)" and C: "C = (-\ / 2) \\<^sub>m (A - adjoint A)" by auto then have dimB: "B \ carrier_mat n n" and dimC: "C \ carrier_mat n n" using dim by auto have "hermitian B" unfolding B hermitian_def using dim by (auto simp add: adjoint_eval) moreover have "hermitian C" unfolding C hermitian_def using dim apply (subst eq_matI) apply (auto simp add: adjoint_eval algebra_simps) done moreover have "A = B + \ \\<^sub>m C" using dim B C apply (subst eq_matI) apply (auto simp add: adjoint_eval algebra_simps) done ultimately show ?thesis using dimB dimC by auto qed subsection \Outer product\ definition outer_prod :: "'a::conjugatable_field vec \ 'a vec \ 'a mat" where "outer_prod v w = mat (dim_vec v) 1 (\(i, j). v $ i) * mat 1 (dim_vec w) (\(i, j). (conjugate w) $ j)" lemma outer_prod_dim[simp]: fixes v w :: "'a::conjugatable_field vec" assumes v: "v \ carrier_vec n" and w: "w \ carrier_vec m" shows "outer_prod v w \ carrier_mat n m" unfolding outer_prod_def using assms mat_of_cols_carrier mat_of_rows_carrier by auto lemma mat_of_vec_mult_eq_scalar_prod: fixes v w :: "'a::conjugatable_field vec" assumes "v \ carrier_vec n" and "w \ carrier_vec n" shows "mat 1 (dim_vec v) (\(i, j). (conjugate v) $ j) * mat (dim_vec w) 1 (\(i, j). w $ i) = mat 1 1 (\k. inner_prod v w)" apply (rule eq_matI) using assms apply (simp add: scalar_prod_def) apply (rule sum.cong) by auto lemma one_dim_mat_mult_is_scale: fixes A B :: "('a::conjugatable_field mat)" assumes "B \ carrier_mat 1 n" shows "(mat 1 1 (\k. a)) * B = a \\<^sub>m B" apply (rule eq_matI) using assms by (auto simp add: scalar_prod_def) lemma outer_prod_mult_outer_prod: fixes a b c d :: "'a::conjugatable_field vec" assumes a: "a \ carrier_vec d1" and b: "b \ carrier_vec d2" and c: "c \ carrier_vec d2" and d: "d \ carrier_vec d3" shows "outer_prod a b * outer_prod c d = inner_prod b c \\<^sub>m outer_prod a d" proof - let ?ma = "mat (dim_vec a) 1 (\(i, j). a $ i)" let ?mb = "mat 1 (dim_vec b) (\(i, j). (conjugate b) $ j)" let ?mc = "mat (dim_vec c) 1 (\(i, j). c $ i)" let ?md = "mat 1 (dim_vec d) (\(i, j). (conjugate d) $ j)" have "(?ma * ?mb) * (?mc * ?md) = ?ma * (?mb * (?mc * ?md))" apply (subst assoc_mult_mat[of "?ma" d1 1 "?mb" d2 "?mc * ?md" d3] ) using assms by auto also have "\ = ?ma * ((?mb * ?mc) * ?md)" apply (subst assoc_mult_mat[symmetric, of "?mb" 1 d2 "?mc" 1 "?md" d3]) using assms by auto also have "\ = ?ma * ((mat 1 1 (\k. inner_prod b c)) * ?md)" apply (subst mat_of_vec_mult_eq_scalar_prod[of b d2 c]) using assms by auto also have "\ = ?ma * (inner_prod b c \\<^sub>m ?md)" apply (subst one_dim_mat_mult_is_scale) using assms by auto also have "\ = (inner_prod b c) \\<^sub>m (?ma * ?md)" using assms by auto finally show ?thesis unfolding outer_prod_def by auto qed lemma index_outer_prod: fixes v w :: "'a::conjugatable_field vec" assumes v: "v \ carrier_vec n" and w: "w \ carrier_vec m" and ij: "i < n" "j < m" shows "(outer_prod v w)$$(i, j) = v $ i * conjugate (w $ j)" unfolding outer_prod_def using assms by (simp add: scalar_prod_def) lemma mat_of_vec_mult_vec: fixes a b c :: "'a::conjugatable_field vec" assumes a: "a \ carrier_vec d" and b: "b \ carrier_vec d" shows "mat 1 d (\(i, j). (conjugate a) $ j) *\<^sub>v b = vec 1 (\k. inner_prod a b)" apply (rule eq_vecI) apply (simp add: scalar_prod_def carrier_vecD[OF a] carrier_vecD[OF b]) apply (rule sum.cong) by auto lemma mat_of_vec_mult_one_dim_vec: fixes a b :: "'a::conjugatable_field vec" assumes a: "a \ carrier_vec d" shows "mat d 1 (\(i, j). a $ i) *\<^sub>v vec 1 (\k. c) = c \\<^sub>v a" apply (rule eq_vecI) by (auto simp add: scalar_prod_def carrier_vecD[OF a]) lemma outer_prod_mult_vec: fixes a b c :: "'a::conjugatable_field vec" assumes a: "a \ carrier_vec d1" and b: "b \ carrier_vec d2" and c: "c \ carrier_vec d2" shows "outer_prod a b *\<^sub>v c = inner_prod b c \\<^sub>v a" proof - have "outer_prod a b *\<^sub>v c = mat d1 1 (\(i, j). a $ i) * mat 1 d2 (\(i, j). (conjugate b) $ j) *\<^sub>v c" unfolding outer_prod_def using assms by auto also have "\ = mat d1 1 (\(i, j). a $ i) *\<^sub>v (mat 1 d2 (\(i, j). (conjugate b) $ j) *\<^sub>v c)" apply (subst assoc_mult_mat_vec) using assms by auto also have "\ = mat d1 1 (\(i, j). a $ i) *\<^sub>v vec 1 (\k. inner_prod b c)" using mat_of_vec_mult_vec[of b] assms by auto also have "\ = inner_prod b c \\<^sub>v a" using mat_of_vec_mult_one_dim_vec assms by auto finally show ?thesis by auto qed lemma trace_outer_prod_right: fixes A :: "'a::conjugatable_field mat" and v w :: "'a vec" assumes A: "A \ carrier_mat n n" and v: "v \ carrier_vec n" and w: "w \ carrier_vec n" shows "trace (A * outer_prod v w) = inner_prod w (A *\<^sub>v v)" (is "?lhs = ?rhs") proof - define B where "B = outer_prod v w" then have B: "B \ carrier_mat n n" using assms by auto have "trace(A * B) = (\i = 0..j = 0.. = (\i = 0..j = 0..i = 0..j = 0..i = 0..j = 0.. carrier_vec n" and w: "w \ carrier_vec n" shows "trace (outer_prod v w) = inner_prod w v" (is "?lhs = ?rhs") proof - have "(1\<^sub>m n) * (outer_prod v w) = outer_prod v w" apply (subst left_mult_one_mat) using outer_prod_dim assms by auto moreover have "1\<^sub>m n *\<^sub>v v = v" using assms by auto ultimately show ?thesis using trace_outer_prod_right[of "1\<^sub>m n" n v w] assms by auto qed lemma inner_prod_outer_prod: fixes a b c d :: "'a::conjugatable_field vec" assumes a: "a \ carrier_vec n" and b: "b \ carrier_vec n" and c: "c \ carrier_vec m" and d: "d \ carrier_vec m" shows "inner_prod a (outer_prod b c *\<^sub>v d) = inner_prod a b * inner_prod c d" (is "?lhs = ?rhs") proof - define P where "P = outer_prod b c" then have dimP: "P \ carrier_mat n m" using assms by auto have "inner_prod a (P *\<^sub>v d) = (\i=0..j=0.. = (\i=0..j=0..i=0..j=0..i=0..j=0.. = (\i=0..j=0..Semi-definite matrices\ definition positive :: "complex mat \ bool" where "positive A \ A \ carrier_mat (dim_col A) (dim_col A) \ (\v. dim_vec v = dim_col A \ inner_prod v (A *\<^sub>v v) \ 0)" lemma positive_iff_normalized_vec: "positive A \ A \ carrier_mat (dim_col A) (dim_col A) \ (\v. (dim_vec v = dim_col A \ vec_norm v = 1) \ inner_prod v (A *\<^sub>v v) \ 0)" proof (rule) assume "positive A" then show "A \ carrier_mat (dim_col A) (dim_col A) \ (\v. dim_vec v = dim_col A \ vec_norm v = 1 \ 0 \ inner_prod v (A *\<^sub>v v))" unfolding positive_def by auto next define n where "n = dim_col A" assume "A \ carrier_mat (dim_col A) (dim_col A) \ (\v. dim_vec v = dim_col A \ vec_norm v = 1 \ 0 \ inner_prod v (A *\<^sub>v v))" then have A: "A \ carrier_mat (dim_col A) (dim_col A)" and geq0: "\v. dim_vec v = dim_col A \ vec_norm v = 1 \ 0 \ inner_prod v (A *\<^sub>v v)" by auto then have dimA: "A \ carrier_mat n n" using n_def[symmetric] by auto { fix v assume dimv: "(v::complex vec) \ carrier_vec n" have "0 \ inner_prod v (A *\<^sub>v v)" proof (cases "v = 0\<^sub>v n") case True then show "0 \ inner_prod v (A *\<^sub>v v)" using dimA by auto next case False then have 1: "vec_norm v > 0" using vec_norm_ge_0 dimv by auto then have cnv: "cnj (vec_norm v) = vec_norm v" using Reals_cnj_iff complex_is_Real_iff by auto define w where "w = vec_normalize v" then have dimw: "w \ carrier_vec n" using dimv by auto have nvw: "v = vec_norm v \\<^sub>v w" using w_def vec_eq_norm_smult_normalized by auto have "vec_norm w = 1" using normalized_vec_norm[OF dimv False] vec_norm_def w_def by auto then have 2: "0 \ inner_prod w (A *\<^sub>v w)" using geq0 dimw dimA by auto have "inner_prod v (A *\<^sub>v v) = vec_norm v * vec_norm v * inner_prod w (A *\<^sub>v w)" using dimA dimv dimw apply (subst (1 2) nvw) apply (subst mult_mat_vec, simp, simp) apply (subst scalar_prod_smult_left[of "(A *\<^sub>v w)" "conjugate (vec_norm v \\<^sub>v w)" "vec_norm v"], simp) apply (simp add: conjugate_smult_vec cnv) done also have "\ \ 0" using 1 2 by auto finally show "0 \ inner_prod v (A *\<^sub>v v)" by auto qed } then have geq: "\v. dim_vec v = dim_col A \ 0 \ inner_prod v (A *\<^sub>v v)" using dimA by auto show "positive A" unfolding positive_def by (rule, simp add: A, rule geq) qed lemma positive_is_hermitian: fixes A :: "complex mat" assumes pA: "positive A" shows "hermitian A" proof - define n where "n = dim_col A" then have dimA: "A \ carrier_mat n n" using positive_def pA by auto obtain B C where B: "hermitian B" and C: "hermitian C" and A: "A = B + \ \\<^sub>m C" and dimB: "B \ carrier_mat n n" and dimC: "C \ carrier_mat n n" and dimiC: "\ \\<^sub>m C \ carrier_mat n n" using complex_mat_decomposition_to_hermitian[OF dimA] by auto { fix v :: "complex vec" assume dimv: "v \ carrier_vec n" have dimvA: "dim_vec v = dim_col A" using dimv dimA by auto have "inner_prod v (A *\<^sub>v v) = inner_prod v (B *\<^sub>v v) + inner_prod v ((\ \\<^sub>m C) *\<^sub>v v)" unfolding A using dimB dimiC dimv by (simp add: add_mult_distrib_mat_vec inner_prod_distrib_right) moreover have "inner_prod v ((\ \\<^sub>m C) *\<^sub>v v) = \ * inner_prod v (C *\<^sub>v v)" using dimv dimC apply (simp add: scalar_prod_def sum_distrib_left cong: sum.cong) apply (rule sum.cong, auto) done ultimately have ABC: "inner_prod v (A *\<^sub>v v) = inner_prod v (B *\<^sub>v v) + \ * inner_prod v (C *\<^sub>v v)" by auto moreover have "inner_prod v (B *\<^sub>v v) \ Reals" using B dimB dimv hermitian_inner_prod_real by auto moreover have "inner_prod v (C *\<^sub>v v) \ Reals" using C dimC dimv hermitian_inner_prod_real by auto moreover have "inner_prod v (A *\<^sub>v v) \ Reals" using pA unfolding positive_def apply (rule) apply (fold n_def) apply (simp add: complex_is_Real_iff[of "inner_prod v (A *\<^sub>v v)"]) apply (auto simp add: dimvA) done ultimately have "inner_prod v (C *\<^sub>v v) = 0" using of_real_Re by fastforce } then have "C = 0\<^sub>m n n" using hermitian_inner_prod_zero dimC C by auto then have "A = B" using A dimC dimB by auto then show "hermitian A" using B by auto qed lemma positive_eigenvalue_positive: assumes dimA: "(A::complex mat) \ carrier_mat n n" and pA: "positive A" and c: "char_poly A = (\ (e :: complex) \ es. [:- e, 1:])" and B: "unitary_schur_decomposition A es = (B,P,Q)" shows "\i. i < n \ B$$(i, i) \ 0" proof - have hA: "hermitian A" using positive_is_hermitian pA by auto have "similar_mat_wit A B P (adjoint P) \ diagonal_mat B \ diag_mat B = es \ unitary P \ (\i < n. B$$(i, i) \ Reals)" using hermitian_eigenvalue_real dimA hA B c by auto then have A: "A = P * B * (adjoint P)" and dB: "diagonal_mat B" and Bii: "\i. i < n \ B$$(i, i) \ Reals" and dimB: "B \ carrier_mat n n" and dimP: "P \ carrier_mat n n" and dimaP: "adjoint P \ carrier_mat n n" and uP: "unitary P" unfolding similar_mat_wit_def Let_def unitary_def using dimA by auto { fix i assume i: "i < n" define v where "v = col P i" then have dimv: "v \ carrier_vec n" using v_def dimP by auto have "inner_prod v (A *\<^sub>v v) = B$$(i, i)" unfolding A v_def using spectral_decomposition_extract_diag[OF dimP dimB uP dB i] by auto moreover have "inner_prod v (A *\<^sub>v v) \ 0" using dimv pA dimA positive_def by auto ultimately show "B$$(i, i) \ 0" by auto } qed lemma diag_mat_mult_diag_mat: fixes B D :: "'a::semiring_0 mat" assumes dimB: "B \ carrier_mat n n" and dimD: "D \ carrier_mat n n" and dB: "diagonal_mat B" and dD: "diagonal_mat D" shows "B * D = mat n n (\(i,j). (if i = j then (B$$(i, i)) * (D$$(i, i)) else 0))" proof(rule eq_matI, auto) have Bij: "\x y. x < n \ y < n \ x \ y \ B$$(x, y) = 0" using dB diagonal_mat_def dimB by auto have Dij: "\x y. x < n \ y < n \ x \ y \ D$$(x, y) = 0" using dD diagonal_mat_def dimD by auto { fix i j assume ij: "i < n" "j < n" have "(B * D) $$ (i, j) = (\k=0.. = B$$(i, i) * D$$(i, j)" apply (simp add: sum.remove[of _i] ij) apply (simp add: Bij Dij ij) done finally have "(B * D) $$ (i, j) = B$$(i, i) * D$$(i, j)". } note BDij = this from BDij show "\j. j < n \ (B * D) $$ (j, j) = B $$ (j, j) * D $$ (j, j)" by auto from BDij show "\i j. i < n \ j < n \ i \ j \ (B * D) $$ (i, j) = 0" using Bij Dij by auto from assms show "dim_row B = n" "dim_col D = n" by auto qed lemma positive_only_if_decomp: assumes dimA: "A \ carrier_mat n n" and pA: "positive A" shows "\M \ carrier_mat n n. M * adjoint M = A" proof - from pA have hA: "hermitian A" using positive_is_hermitian by auto obtain es where es: "char_poly A = (\ (e :: complex) \ es. [:- e, 1:])" using complex_mat_char_poly_factorizable dimA by auto obtain B P Q where schur: "unitary_schur_decomposition A es = (B,P,Q)" by (cases "unitary_schur_decomposition A es", auto) then have "similar_mat_wit A B P (adjoint P) \ diagonal_mat B \ diag_mat B = es \ unitary P \ (\i < n. B$$(i, i) \ Reals)" using hermitian_eigenvalue_real dimA es hA by auto then have A: "A = P * B * (adjoint P)" and dB: "diagonal_mat B" and Bii: "\i. i < n \ B$$(i, i) \ Reals" and dimB: "B \ carrier_mat n n" and dimP: "P \ carrier_mat n n" and dimaP: "adjoint P \ carrier_mat n n" unfolding similar_mat_wit_def Let_def using dimA by auto have Bii: "\i. i < n \ B$$(i, i) \ 0" using pA dimA es schur positive_eigenvalue_positive by auto define D where "D = mat n n (\(i, j). (if (i = j) then csqrt (B$$(i, i)) else 0))" then have dimD: "D \ carrier_mat n n" and dimaD: "adjoint D \ carrier_mat n n" using dimB by auto have dD: "diagonal_mat D" using dB D_def unfolding diagonal_mat_def by auto then have daD: "diagonal_mat (adjoint D)" by (simp add: adjoint_eval diagonal_mat_def) have Dii: "\i. i < n \ D$$(i, i) = csqrt (B$$(i, i))" using dimD D_def by auto { fix i assume i: "i < n" define c where "c = csqrt (B$$(i, i))" have c: "c \ 0" using Bii i c_def by auto then have "conjugate c = c" using Reals_cnj_iff complex_is_Real_iff by auto then have "c * cnj c = B$$(i, i)" using c_def c unfolding conjugate_complex_def by (metis power2_csqrt power2_eq_square) } note cBii = this have "D * adjoint D = mat n n (\(i,j). (if (i = j) then B$$(i, i) else 0))" apply (simp add: diag_mat_mult_diag_mat[OF dimD dimaD dD daD]) apply (rule eq_matI, auto simp add: D_def adjoint_eval cBii) done also have "\ = B" using dimB dB[unfolded diagonal_mat_def] by auto finally have DaDB: "D * adjoint D = B". define M where "M = P * D" then have dimM: "M \ carrier_mat n n" using dimP dimD by auto have "M * adjoint M = (P * D) * (adjoint D * adjoint P)" using M_def adjoint_mult[OF dimP dimD] by auto also have "\ = P * (D * adjoint D) * (adjoint P)" using dimP dimD by (mat_assoc n) also have "\ = P * B * (adjoint P)" using DaDB by auto finally have "M * adjoint M = A" using A by auto with dimM show "\M \ carrier_mat n n. M * adjoint M = A" by auto qed lemma positive_if_decomp: assumes dimA: "A \ carrier_mat n n" and "\M. M * adjoint M = A" shows "positive A" proof - from assms obtain M where M: "M * adjoint M = A" by auto define m where "m = dim_col M" have dimM: "M \ carrier_mat n m" using M dimA m_def by auto { fix v assume dimv: "(v::complex vec) \ carrier_vec n" have dimaM: "adjoint M \ carrier_mat m n" using dimM by auto have dimaMv: "(adjoint M) *\<^sub>v v \ carrier_vec m" using dimaM dimv by auto have "inner_prod v (A *\<^sub>v v) = inner_prod v (M * adjoint M *\<^sub>v v)" using M by auto also have "\ = inner_prod v (M *\<^sub>v (adjoint M *\<^sub>v v))" using assoc_mult_mat_vec dimM dimaM dimv by auto also have "\ = inner_prod (adjoint M *\<^sub>v v) (adjoint M *\<^sub>v v)" using adjoint_def_alter[OF dimv dimaMv dimM] by auto also have "\ \ 0" using self_cscalar_prod_geq_0 by auto finally have "inner_prod v (A *\<^sub>v v) \ 0". } note geq0 = this from dimA geq0 show "positive A" using positive_def by auto qed lemma positive_iff_decomp: assumes dimA: "A \ carrier_mat n n" shows "positive A \ (\M\carrier_mat n n. M * adjoint M = A)" proof assume pA: "positive A" then show "\M\carrier_mat n n. M * adjoint M = A" using positive_only_if_decomp assms by auto next assume "\M\carrier_mat n n. M * adjoint M = A" then obtain M where M: "M * adjoint M = A" by auto then show "positive A" using M positive_if_decomp assms by auto qed lemma positive_dim_eq: assumes "positive A" shows "dim_row A = dim_col A" using carrier_matD(1)[of A "dim_col A" "dim_col A"] assms[unfolded positive_def] by simp lemma positive_zero: "positive (0\<^sub>m n n)" by (simp add: positive_def zero_mat_def mult_mat_vec_def scalar_prod_def) lemma positive_one: "positive (1\<^sub>m n)" proof (rule positive_if_decomp) show "1\<^sub>m n \ carrier_mat n n" by auto have "adjoint (1\<^sub>m n) = 1\<^sub>m n" using hermitian_one hermitian_def by auto then have "1\<^sub>m n * adjoint (1\<^sub>m n) = 1\<^sub>m n" by auto then show "\M. M * adjoint M = 1\<^sub>m n" by fastforce qed lemma positive_antisym: assumes pA: "positive A" and pnA: "positive (-A)" shows "A = 0\<^sub>m (dim_col A) (dim_col A)" proof - define n where "n = dim_col A" from pA have dimA: "A \ carrier_mat n n" and dimnA: "-A \ carrier_mat n n" using positive_def n_def by auto from pA have hA: "hermitian A" using positive_is_hermitian by auto obtain es where es: "char_poly A = (\ (e :: complex) \ es. [:- e, 1:])" using complex_mat_char_poly_factorizable dimA by auto obtain B P Q where schur: "unitary_schur_decomposition A es = (B,P,Q)" by (cases "unitary_schur_decomposition A es", auto) then have "similar_mat_wit A B P (adjoint P) \ diagonal_mat B \ unitary P" using hermitian_eigenvalue_real dimA es hA by auto then have A: "A = P * B * (adjoint P)" and dB: "diagonal_mat B" and uP: "unitary P" and dimB: "B \ carrier_mat n n" and dimnB: "-B \ carrier_mat n n" and dimP: "P \ carrier_mat n n" and dimaP: "adjoint P \ carrier_mat n n" unfolding similar_mat_wit_def Let_def using dimA by auto from es schur have geq0: "\i. i < n \ B$$(i, i) \ 0" using positive_eigenvalue_positive dimA pA by auto from A have nA: "-A = P * (-B) * (adjoint P)" using mult_smult_assoc_mat dimB dimP dimaP by auto from dB have dnB: "diagonal_mat (-B)" by (simp add: diagonal_mat_def) { fix i assume i: "i < n" define v where "v = col P i" then have dimv: "v \ carrier_vec n" using v_def dimP by auto have "inner_prod v ((-A) *\<^sub>v v) = (-B)$$(i, i)" unfolding nA v_def using spectral_decomposition_extract_diag[OF dimP dimnB uP dnB i] by auto moreover have "inner_prod v ((-A) *\<^sub>v v) \ 0" using dimv pnA dimnA positive_def by auto ultimately have "B$$(i, i) \ 0" using dimB i by auto moreover have "B$$(i, i) \ 0" using i geq0 by auto ultimately have "B$$(i, i) = 0" by (metis no_atp(10)) } then have "B = 0\<^sub>m n n" using dimB dB[unfolded diagonal_mat_def] by (subst eq_matI, auto) then show "A = 0\<^sub>m n n" using A dimB dimP dimaP by auto qed lemma positive_add: assumes pA: "positive A" and pB: "positive B" and dimA: "A \ carrier_mat n n" and dimB: "B \ carrier_mat n n" shows "positive (A + B)" unfolding positive_def proof have dimApB: "A + B \ carrier_mat n n" using dimA dimB by auto then show "A + B \ carrier_mat (dim_col (A + B)) (dim_col (A + B))" using carrier_matD[of "A+B"] by auto { fix v assume dimv: "(v::complex vec) \ carrier_vec n" have 1: "inner_prod v (A *\<^sub>v v) \ 0" using dimv pA[unfolded positive_def] dimA by auto have 2: "inner_prod v (B *\<^sub>v v) \ 0" using dimv pB[unfolded positive_def] dimB by auto have "inner_prod v ((A + B) *\<^sub>v v) = inner_prod v (A *\<^sub>v v) + inner_prod v (B *\<^sub>v v)" using dimA dimB dimv by (simp add: add_mult_distrib_mat_vec inner_prod_distrib_right) also have "\ \ 0" using 1 2 by auto finally have "inner_prod v ((A + B) *\<^sub>v v) \ 0". } note geq0 = this then have "\v. dim_vec v = n \ 0 \ inner_prod v ((A + B) *\<^sub>v v)" by auto then show "\v. dim_vec v = dim_col (A + B) \ 0 \ inner_prod v ((A + B) *\<^sub>v v)" using dimApB by auto qed lemma positive_trace: assumes "A \ carrier_mat n n" and "positive A" shows "trace A \ 0" using assms positive_iff_decomp trace_adjoint_positive by auto lemma positive_close_under_left_right_mult_adjoint: fixes M A :: "complex mat" assumes dM: "M \ carrier_mat n n" and dA: "A \ carrier_mat n n" and pA: "positive A" shows "positive (M * A * adjoint M)" unfolding positive_def proof (rule, simp add: mult_carrier_mat[OF mult_carrier_mat[OF dM dA] adjoint_dim[OF dM]] carrier_matD[OF dM], rule, rule) have daM: "adjoint M \ carrier_mat n n" using dM by auto fix v::"complex vec" assume "dim_vec v = dim_col (M * A * adjoint M)" then have dv: "v \ carrier_vec n" using assms by auto then have "adjoint M *\<^sub>v v \ carrier_vec n" using daM by auto have assoc: "M * A * adjoint M *\<^sub>v v = M *\<^sub>v (A *\<^sub>v (adjoint M *\<^sub>v v))" using dA dM daM dv by (auto simp add: assoc_mult_mat_vec[of _ n n _ n]) have "inner_prod v (M * A * adjoint M *\<^sub>v v) = inner_prod (adjoint M *\<^sub>v v) (A *\<^sub>v (adjoint M *\<^sub>v v))" apply (subst assoc) apply (subst adjoint_def_alter[where ?A = "M"]) by (auto simp add: dv dA daM dM carrier_matD[OF dM] mult_mat_vec_carrier[of _ n n]) also have "\ \ 0" using dA dv daM pA positive_def by auto finally show "inner_prod v (M * A * adjoint M *\<^sub>v v) \ 0" by auto qed lemma positive_same_outer_prod: fixes v w :: "complex vec" assumes v: "v \ carrier_vec n" shows "positive (outer_prod v v)" proof - have d1: "adjoint (mat (dim_vec v) 1 (\(i, j). v $ i)) \ carrier_mat 1 n" using assms by auto have d2: "mat 1 (dim_vec v) (\(i, y). conjugate v $ y) \ carrier_mat 1 n" using assms by auto have dv: "dim_vec v = n" using assms by auto have "mat 1 (dim_vec v) (\(i, y). conjugate v $ y) = adjoint (mat (dim_vec v) 1 (\(i, j). v $ i))" (is "?r = adjoint ?l") apply (rule eq_matI) subgoal for i j by (simp add: dv adjoint_eval) using d1 d2 by auto then have "outer_prod v v = ?l * adjoint ?l" unfolding outer_prod_def by auto then have "\M. M * adjoint M = outer_prod v v" by auto then show "positive (outer_prod v v)" using positive_if_decomp[OF outer_prod_dim[OF v v]] by auto qed lemma smult_smult_mat: fixes k :: complex and l :: complex assumes "A \ carrier_mat nr n" shows "k \\<^sub>m (l \\<^sub>m A) = (k * l) \\<^sub>m A" by auto lemma positive_smult: assumes "A \ carrier_mat n n" and "positive A" and "c \ 0" shows "positive (c \\<^sub>m A)" proof - have sc: "csqrt c \ 0" using assms(3) by fastforce obtain M where dimM: "M \ carrier_mat n n" and A: "M * adjoint M = A" using assms(1-2) positive_iff_decomp by auto have "c \\<^sub>m A = c \\<^sub>m (M * adjoint M)" using A by auto have ccsq: "conjugate (csqrt c) = (csqrt c)" using sc Reals_cnj_iff[of "csqrt c"] complex_is_Real_iff by auto have MM: "(M * adjoint M) \ carrier_mat n n" using A assms by fastforce have leftd: "c \\<^sub>m (M * adjoint M) \ carrier_mat n n" using A assms by fastforce have rightd: "(csqrt c \\<^sub>m M) * (adjoint (csqrt c \\<^sub>m M))\ carrier_mat n n" using A assms by fastforce have "(csqrt c \\<^sub>m M) * (adjoint (csqrt c \\<^sub>m M)) = (csqrt c \\<^sub>m M) * ((conjugate (csqrt c)) \\<^sub>m adjoint M)" using adjoint_scale assms(1) by (metis adjoint_scale) also have "\ = (csqrt c \\<^sub>m M) * (csqrt c \\<^sub>m adjoint M)" using sc ccsq by fastforce also have "\ = csqrt c \\<^sub>m (M * (csqrt c \\<^sub>m adjoint M))" using mult_smult_assoc_mat index_smult_mat(2,3) by fastforce also have "\ = csqrt c \\<^sub>m ((csqrt c) \\<^sub>m (M * adjoint M))" using mult_smult_distrib by fastforce also have "\ = c \\<^sub>m (M * adjoint M)" using smult_smult_mat[of "M * adjoint M" n n "(csqrt c)" "(csqrt c)"] MM sc by (metis power2_csqrt power2_eq_square ) also have "\ = c \\<^sub>m A" using A by auto finally have "(csqrt c \\<^sub>m M) * (adjoint (csqrt c \\<^sub>m M)) = c \\<^sub>m A" by auto moreover have "c \\<^sub>m A \ carrier_mat n n" using assms(1) by auto moreover have "csqrt c \\<^sub>m M \ carrier_mat n n" using dimM by auto ultimately show ?thesis using positive_iff_decomp by auto qed text \Version of previous theorem for real numbers\ lemma positive_scale: fixes c :: real assumes "A \ carrier_mat n n" and "positive A" and "c \ 0" shows "positive (c \\<^sub>m A)" apply (rule positive_smult) using assms by auto subsection \L\"{o}wner partial order\ definition lowner_le :: "complex mat \ complex mat \ bool" (infix "\\<^sub>L" 50) where "A \\<^sub>L B \ dim_row A = dim_row B \ dim_col A = dim_col B \ positive (B - A)" lemma lowner_le_refl: assumes "A \ carrier_mat n n" shows "A \\<^sub>L A" unfolding lowner_le_def apply (simp add: minus_r_inv_mat[OF assms]) by (rule positive_zero) lemma lowner_le_antisym: assumes A: "A \ carrier_mat n n" and B: "B \ carrier_mat n n" and L1: "A \\<^sub>L B" and L2: "B \\<^sub>L A" shows "A = B" proof - from L1 have P1: "positive (B - A)" by (simp add: lowner_le_def) from L2 have P2: "positive (A - B)" by (simp add: lowner_le_def) have "A - B = - (B - A)" using A B by auto then have P3: "positive (- (B - A))" using P2 by auto have BA: "B - A \ carrier_mat n n" using A B by auto have "B - A = 0\<^sub>m n n" using BA by (subst positive_antisym[OF P1 P3], auto) then have "B + (-A) + A = 0\<^sub>m n n + A" using A B minus_add_uminus_mat[OF B A] by auto then have "B + (-A + A) = 0\<^sub>m n n + A" using A B by auto then show "A = B" using A B BA uminus_l_inv_mat[OF A] by auto qed lemma lowner_le_inner_prod_le: fixes A B :: "complex mat" and v :: "complex vec" assumes A: "A \ carrier_mat n n" and B: "B \ carrier_mat n n" and v: "v \ carrier_vec n" and "A \\<^sub>L B" shows "inner_prod v (A *\<^sub>v v) \ inner_prod v (B *\<^sub>v v)" proof - from assms have "positive (B-A)" by (auto simp add: lowner_le_def) with assms have geq: "inner_prod v ((B-A) *\<^sub>v v) \ 0" unfolding positive_def by auto have "inner_prod v ((B-A) *\<^sub>v v) = inner_prod v (B *\<^sub>v v) - inner_prod v (A *\<^sub>v v)" unfolding minus_add_uminus_mat[OF B A] by (subst add_mult_distrib_mat_vec[OF B _ v], insert A B v, auto simp add: inner_prod_distrib_right[OF v]) then show ?thesis using geq by auto qed lemma lowner_le_trans: fixes A B C :: "complex mat" assumes A: "A \ carrier_mat n n" and B: "B \ carrier_mat n n" and C: "C \ carrier_mat n n" and L1: "A \\<^sub>L B" and L2: "B \\<^sub>L C" shows "A \\<^sub>L C" unfolding lowner_le_def proof (auto simp add: carrier_matD[OF A] carrier_matD[OF C]) have dim: "C - A \ carrier_mat n n" using A C by auto { fix v assume v: "(v::complex vec) \ carrier_vec n" from L1 have "inner_prod v (A *\<^sub>v v) \ inner_prod v (B *\<^sub>v v)" using lowner_le_inner_prod_le A B v by auto also from L2 have "\ \ inner_prod v (C *\<^sub>v v)" using lowner_le_inner_prod_le B C v by auto finally have "inner_prod v (A *\<^sub>v v) \ inner_prod v (C *\<^sub>v v)". then have "inner_prod v (C *\<^sub>v v) - inner_prod v (A *\<^sub>v v) \ 0" by auto then have "inner_prod v ((C - A) *\<^sub>v v) \ 0" using A C v apply (subst minus_add_uminus_mat[OF C A]) apply (subst add_mult_distrib_mat_vec[OF C _ v], simp) apply (simp add: inner_prod_distrib_right[OF v]) done } note leq = this show "positive (C - A)" unfolding positive_def apply (rule, simp add: carrier_matD[OF A] dim) apply (subst carrier_matD[OF dim], insert leq, auto) done qed lemma lowner_le_imp_trace_le: assumes "A \ carrier_mat n n" and "B \ carrier_mat n n" and "A \\<^sub>L B" shows "trace A \ trace B" proof - have "positive (B - A)" using assms lowner_le_def by auto moreover have "B - A \ carrier_mat n n" using assms by auto ultimately have "trace (B - A) \ 0" using positive_trace by auto moreover have "trace (B - A) = trace B - trace A" using trace_minus_linear assms by auto ultimately have "trace B - trace A \ 0" by auto then show "trace A \ trace B" by auto qed lemma lowner_le_add: assumes "A \ carrier_mat n n" "B \ carrier_mat n n" "C \ carrier_mat n n" "D \ carrier_mat n n" and "A \\<^sub>L B" "C \\<^sub>L D" shows "A + C \\<^sub>L B + D" proof - have "B + D - (A + C) = B - A + (D - C) " using assms by auto then have "positive (B + D - (A + C))" using assms unfolding lowner_le_def using positive_add by (metis minus_carrier_mat) then show "A + C \\<^sub>L B + D" unfolding lowner_le_def using assms by fastforce qed lemma lowner_le_swap: assumes "A \ carrier_mat n n" "B \ carrier_mat n n" and "A \\<^sub>L B" shows "-B \\<^sub>L -A" proof - have "positive (B - A)" using assms lowner_le_def by fastforce moreover have "B - A = (-A) - (-B)" using assms by fastforce ultimately have "positive ((-A) - (-B))" by auto then show ?thesis using lowner_le_def assms by fastforce qed lemma lowner_le_minus: assumes "A \ carrier_mat n n" "B \ carrier_mat n n" "C \ carrier_mat n n" "D \ carrier_mat n n" and "A \\<^sub>L B" "C \\<^sub>L D" shows "A - D \\<^sub>L B - C" proof - have "positive (D - C)" using assms lowner_le_def by auto then have "-D \\<^sub>L -C" using lowner_le_swap assms by auto then have "A + (-D) \\<^sub>L B + (-C)" using lowner_le_add[of "A" n "B"] assms by auto moreover have "A + (-D) = A - D" and "B + (-C) = B - C" by auto ultimately show ?thesis by auto qed lemma outer_prod_le_one: assumes "v \ carrier_vec n" and "inner_prod v v \ 1" shows "outer_prod v v \\<^sub>L 1\<^sub>m n" proof - let ?o = "outer_prod v v" have do: "?o \ carrier_mat n n" using assms by auto { fix u :: "complex vec" assume "dim_vec u = n" then have du: "u \ carrier_vec n" by auto have r: "inner_prod u u \ Reals" apply (simp add: scalar_prod_def carrier_vecD[OF du]) using complex_In_mult_cnj_zero complex_is_Real_iff by blast have geq0: "inner_prod u u \ 0" using self_cscalar_prod_geq_0 by auto have "inner_prod u (?o *\<^sub>v u) = inner_prod u v * inner_prod v u" apply (subst inner_prod_outer_prod) using du assms by auto also have "\ \ inner_prod u u * inner_prod v v" using Cauchy_Schwarz_complex_vec du assms by auto also have "\ \ inner_prod u u" using assms(2) r geq0 by (simp add: mult_right_le_one_le) finally have le: "inner_prod u (?o *\<^sub>v u) \ inner_prod u u". have "inner_prod u ((1\<^sub>m n - ?o) *\<^sub>v u) = inner_prod u ((1\<^sub>m n *\<^sub>v u) - ?o *\<^sub>v u)" apply (subst minus_mult_distrib_mat_vec) using do du by auto also have "\ = inner_prod u u - inner_prod u (?o *\<^sub>v u)" apply (subst inner_prod_minus_distrib_right) using du do by auto also have "\ \ 0" using le by auto finally have "inner_prod u ((1\<^sub>m n - ?o) *\<^sub>v u) \ 0" by auto } then have "positive (1\<^sub>m n - outer_prod v v)" unfolding positive_def using do by auto then show ?thesis unfolding lowner_le_def using do by auto qed lemma zero_lowner_le_positiveD: fixes A :: "complex mat" assumes dA: "A \ carrier_mat n n" and le: "0\<^sub>m n n \\<^sub>L A" shows "positive A" using assms unfolding lowner_le_def by (subgoal_tac "A - 0\<^sub>m n n = A", auto) lemma zero_lowner_le_positiveI: fixes A :: "complex mat" assumes dA: "A \ carrier_mat n n" and le: "positive A" shows "0\<^sub>m n n \\<^sub>L A" using assms unfolding lowner_le_def by (subgoal_tac "A - 0\<^sub>m n n = A", auto) lemma lowner_le_trans_positiveI: fixes A B :: "complex mat" assumes dA: "A \ carrier_mat n n" and pA: "positive A" and le: "A \\<^sub>L B" shows "positive B" proof - have dB: "B \ carrier_mat n n" using le dA lowner_le_def by auto have "0\<^sub>m n n \\<^sub>L A" using zero_lowner_le_positiveI dA pA by auto then have "0\<^sub>m n n \\<^sub>L B" using dA dB le by (simp add: lowner_le_trans[of _ n A B]) then show ?thesis using dB zero_lowner_le_positiveD by auto qed lemma lowner_le_keep_under_measurement: fixes M A B :: "complex mat" assumes dM: "M \ carrier_mat n n" and dA: "A \ carrier_mat n n" and dB: "B \ carrier_mat n n" and le: "A \\<^sub>L B" shows "adjoint M * A * M \\<^sub>L adjoint M * B * M" unfolding lowner_le_def proof (rule conjI, fastforce)+ have daM: "adjoint M \ carrier_mat n n" using dM by auto have dBmA: "B - A \ carrier_mat n n" using dB dA by fastforce have "positive (B - A)" using le lowner_le_def by auto then have p: "positive (adjoint M * (B - A) * M)" using positive_close_under_left_right_mult_adjoint[OF daM dBmA] adjoint_adjoint[of M] by auto moreover have e: "adjoint M * (B - A) * M = adjoint M * B * M - adjoint M * A * M" using dM dB dA by (mat_assoc n) ultimately show "positive (adjoint M * B * M - adjoint M * A * M)" by auto qed lemma smult_distrib_left_minus_mat: fixes A B :: "'a::comm_ring_1 mat" assumes "A \ carrier_mat n n" "B \ carrier_mat n n" shows "c \\<^sub>m (B - A) = c \\<^sub>m B - c \\<^sub>m A" using assms by (auto simp add: minus_add_uminus_mat add_smult_distrib_left_mat) lemma lowner_le_smultc: fixes c :: complex assumes "c \ 0" "A \\<^sub>L B" "A \ carrier_mat n n" "B \ carrier_mat n n" shows "c \\<^sub>m A \\<^sub>L c \\<^sub>m B" proof - have eqBA: "c \\<^sub>m (B - A) = c \\<^sub>m B - c \\<^sub>m A" using assms by (auto simp add: smult_distrib_left_minus_mat) have "positive (B - A)" using assms(2) unfolding lowner_le_def by auto then have "positive (c \\<^sub>m (B - A))" using positive_smult[of "B-A" n c] assms by fastforce moreover have "c \\<^sub>m A \ carrier_mat n n" using index_smult_mat(2,3) assms(3) by auto moreover have "c \\<^sub>m B \ carrier_mat n n" using index_smult_mat(2,3) assms(4) by auto ultimately show ?thesis unfolding lowner_le_def using eqBA by fastforce qed lemma lowner_le_smult: fixes c :: real assumes "c \ 0" "A \\<^sub>L B" "A \ carrier_mat n n" "B \ carrier_mat n n" shows "c \\<^sub>m A \\<^sub>L c \\<^sub>m B" apply (rule lowner_le_smultc) using assms by auto lemma minus_smult_vec_distrib: fixes w :: "'a::comm_ring_1 vec" shows "(a - b) \\<^sub>v w = a \\<^sub>v w - b \\<^sub>v w" apply (rule eq_vecI) by (auto simp add: scalar_prod_def algebra_simps) lemma smult_mat_mult_mat_vec_assoc: fixes A :: "'a::comm_ring_1 mat" assumes A: "A \ carrier_mat n m" and w: "w \ carrier_vec m" shows "a \\<^sub>m A *\<^sub>v w = a \\<^sub>v (A *\<^sub>v w)" apply (rule eq_vecI) apply (simp add: scalar_prod_def carrier_matD[OF A] carrier_vecD[OF w]) apply (subst sum_distrib_left) apply (rule sum.cong, simp) by auto lemma mult_mat_vec_smult_vec_assoc: fixes A :: "'a::comm_ring_1 mat" assumes A: "A \ carrier_mat n m" and w: "w \ carrier_vec m" shows "A *\<^sub>v (a \\<^sub>v w) = a \\<^sub>v (A *\<^sub>v w)" apply (rule eq_vecI) apply (simp add: scalar_prod_def carrier_matD[OF A] carrier_vecD[OF w]) apply (subst sum_distrib_left) apply (rule sum.cong, simp) by auto lemma outer_prod_left_right_mat: fixes A B :: "complex mat" assumes du: "u \ carrier_vec d2" and dv: "v \ carrier_vec d3" and dA: "A \ carrier_mat d1 d2" and dB: "B \ carrier_mat d3 d4" shows "A * (outer_prod u v) * B = (outer_prod (A *\<^sub>v u) (adjoint B *\<^sub>v v))" unfolding outer_prod_def proof - have eq1: "A * (mat (dim_vec u) 1 (\(i, j). u $ i)) = mat (dim_vec (A *\<^sub>v u)) 1 (\(i, j). (A *\<^sub>v u) $ i)" apply (rule eq_matI) by (auto simp add: dA du scalar_prod_def) have conj: "conjugate a * b = conjugate ((a::complex) * conjugate b) " for a b by auto have eq2: "mat 1 (dim_vec v) (\(i, y). conjugate v $ y) * B = mat 1 (dim_vec (adjoint B *\<^sub>v v)) (\(i, y). conjugate (adjoint B *\<^sub>v v) $ y)" apply (rule eq_matI) apply (auto simp add: carrier_matD[OF dB] carrier_vecD[OF dv] scalar_prod_def adjoint_def conjugate_vec_def sum_conjugate ) apply (rule sum.cong) by (auto simp add: conj) have "A * (mat (dim_vec u) 1 (\(i, j). u $ i) * mat 1 (dim_vec v) (\(i, y). conjugate v $ y)) * B = (A * (mat (dim_vec u) 1 (\(i, j). u $ i))) *(mat 1 (dim_vec v) (\(i, y). conjugate v $ y)) * B" using dA du dv dB assoc_mult_mat[OF dA, of "mat (dim_vec u) 1 (\(i, j). u $ i)" 1 "mat 1 (dim_vec v) (\(i, y). conjugate v $ y)"] by fastforce also have "\ = (A * (mat (dim_vec u) 1 (\(i, j). u $ i))) *((mat 1 (dim_vec v) (\(i, y). conjugate v $ y)) * B)" using dA du dv dB assoc_mult_mat[OF _ _ dB, of "(A * (mat (dim_vec u) 1 (\(i, j). u $ i)))" d1 1] by fastforce finally show "A * (mat (dim_vec u) 1 (\(i, j). u $ i) * mat 1 (dim_vec v) (\(i, y). conjugate v $ y)) * B = mat (dim_vec (A *\<^sub>v u)) 1 (\(i, j). (A *\<^sub>v u) $ i) * mat 1 (dim_vec (adjoint B *\<^sub>v v)) (\(i, y). conjugate (adjoint B *\<^sub>v v) $ y)" using eq1 eq2 by auto qed subsection \Density operators\ definition density_operator :: "complex mat \ bool" where "density_operator A \ positive A \ trace A = 1" definition partial_density_operator :: "complex mat \ bool" where "partial_density_operator A \ positive A \ trace A \ 1" lemma pure_state_self_outer_prod_is_partial_density_operator: fixes v :: "complex vec" assumes dimv: "v \ carrier_vec n" and nv: "vec_norm v = 1" shows "partial_density_operator (outer_prod v v)" unfolding partial_density_operator_def proof have dimov: "outer_prod v v \ carrier_mat n n" using dimv by auto show "positive (outer_prod v v)" unfolding positive_def proof (rule, simp add: carrier_matD(2)[OF dimov] dimov, rule allI, rule impI) fix w assume "dim_vec (w::complex vec) = dim_col (outer_prod v v)" then have dimw: "w \ carrier_vec n" using dimov carrier_vecI by auto then have "inner_prod w ((outer_prod v v) *\<^sub>v w) = inner_prod w v * inner_prod v w" using inner_prod_outer_prod dimw dimv by auto also have "\ = inner_prod w v * conjugate (inner_prod w v)" using dimw dimv apply (subst conjugate_scalar_prod[of v "conjugate w"], simp) apply (subst conjugate_vec_sprod_comm[of "conjugate v" _ "conjugate w"], auto) apply (rule carrier_vec_conjugate[OF dimv]) apply (rule carrier_vec_conjugate[OF dimw]) done also have "\ \ 0" by auto finally show "inner_prod w ((outer_prod v v) *\<^sub>v w) \ 0". qed have eq: "trace (outer_prod v v) = (\i=0..i=0..i=0.. 1" by auto qed (* Lemma 2.1 *) lemma lowner_le_trace: assumes A: "A \ carrier_mat n n" and B: "B \ carrier_mat n n" shows "A \\<^sub>L B \ (\\\carrier_mat n n. partial_density_operator \ \ trace (A * \) \ trace (B * \))" proof (rule iffI) have dimBmA: "B - A \ carrier_mat n n" using A B by auto { assume "A \\<^sub>L B" then have pBmA: "positive (B - A)" using lowner_le_def by auto moreover have "B - A \ carrier_mat n n" using assms by auto ultimately have "\M\carrier_mat n n. M * adjoint M = B - A" using positive_iff_decomp[of "B - A"] by auto then obtain M where dimM: "M \ carrier_mat n n" and M: "M * adjoint M = B - A" by auto { fix \ assume dimr: "\ \ carrier_mat n n" and pdr: "partial_density_operator \" have eq: "trace(B * \) - trace(A * \) = trace((B - A) * \)" using A B dimr apply (subst minus_mult_distrib_mat, auto) apply (subst trace_minus_linear, auto) done have pr: "positive \" using pdr partial_density_operator_def by auto then have "\P\carrier_mat n n. \ = P * adjoint P" using positive_iff_decomp dimr by auto then obtain P where dimP: "P \ carrier_mat n n" and P: "\ = P * adjoint P" by auto have "trace((B - A) * \) = trace(M * adjoint M * (P * adjoint P))" using P M by auto also have "\ = trace((adjoint P * M) * adjoint (adjoint P * M))" using dimM dimP by (mat_assoc n) also have "\ \ 0" using trace_adjoint_positive by auto finally have "trace((B - A) * \) \ 0". with eq have " trace (B * \) - trace (A * \) \ 0" by auto } then show "\\\carrier_mat n n. partial_density_operator \ \ trace (A * \) \ trace (B * \)" by auto } { assume asm: "\\\carrier_mat n n. partial_density_operator \ \ trace (A * \) \ trace (B * \)" have "positive (B - A)" proof - { fix v assume "dim_vec (v::complex vec) = dim_col (B - A) \ vec_norm v = 1" then have dimv: "v \ carrier_vec n" and nv: "vec_norm v = 1" - using carrier_matD[OF dimBmA] carrier_vecI by auto + using carrier_matD[OF dimBmA] by (auto intro: carrier_vecI) have dimov: "outer_prod v v \ carrier_mat n n" using dimv by auto then have "partial_density_operator (outer_prod v v)" using dimv nv pure_state_self_outer_prod_is_partial_density_operator by auto then have leq: "trace(A * (outer_prod v v)) \ trace(B * (outer_prod v v))" using asm dimov by auto have "trace((B - A) * (outer_prod v v)) = trace(B * (outer_prod v v)) - trace(A * (outer_prod v v))" using A B dimov apply (subst minus_mult_distrib_mat, auto) apply (subst trace_minus_linear, auto) done then have "trace((B - A) * (outer_prod v v)) \ 0" using leq by auto then have "inner_prod v ((B - A) *\<^sub>v v) \ 0" using trace_outer_prod_right[OF dimBmA dimv dimv] by auto } then show "positive (B - A)" using positive_iff_normalized_vec[of "B - A"] dimBmA A by simp qed then show "A \\<^sub>L B" using lowner_le_def A B by auto } qed lemma lowner_le_traceI: assumes "A \ carrier_mat n n" and "B \ carrier_mat n n" and "\\. \ \ carrier_mat n n \ partial_density_operator \ \ trace (A * \) \ trace (B * \)" shows "A \\<^sub>L B" using lowner_le_trace assms by auto lemma trace_pdo_eq_imp_eq: assumes A: "A \ carrier_mat n n" and B: "B \ carrier_mat n n" and teq: "\\. \ \ carrier_mat n n \ partial_density_operator \ \ trace (A * \) = trace (B * \)" shows "A = B" proof - from teq have "A \\<^sub>L B" using lowner_le_trace[OF A B] teq by auto moreover from teq have "B \\<^sub>L A" using lowner_le_trace[OF B A] teq by auto ultimately show "A = B" using lowner_le_antisym A B by auto qed lemma lowner_le_traceD: assumes "A \ carrier_mat n n" "B \ carrier_mat n n" "\ \ carrier_mat n n" and "A \\<^sub>L B" and "partial_density_operator \" shows "trace (A * \) \ trace (B * \)" using lowner_le_trace assms by blast lemma sum_only_one_neq_0: assumes "finite A" and "j \ A" and "\i. i \ A \ i \ j \ g i = 0" shows "sum g A = g j" proof - have "{j} \ A" using assms by auto moreover have "\i\A - {j}. g i = 0" using assms by simp ultimately have "sum g A = sum g {j}" using assms by (auto simp add: comm_monoid_add_class.sum.mono_neutral_right[of A "{j}" g]) moreover have "sum g {j} = g j" by simp ultimately show ?thesis by auto qed end diff --git a/thys/QHLProver/Grover.thy b/thys/QHLProver/Grover.thy --- a/thys/QHLProver/Grover.thy +++ b/thys/QHLProver/Grover.thy @@ -1,3184 +1,3184 @@ section \Grover's algorithm\ theory Grover imports Partial_State Gates Quantum_Hoare begin subsection \Basic definitions\ locale grover_state = fixes n :: nat (* number of qubits *) and f :: "nat \ bool" (* characteristic function, only need values in [0,N). *) assumes n: "n > 1" and dimM: "card {i. i < (2::nat) ^ n \ f i} > 0" "card {i. i < (2::nat) ^ n \ f i} < (2::nat) ^ n" begin definition N where "N = (2::nat) ^ n" definition M where "M = card {i. i < N \ f i}" lemma N_ge_0 [simp]: "0 < N" by (simp add: N_def) lemma M_ge_0 [simp]: "0 < M" by (simp add: M_def dimM N_def) lemma M_neq_0 [simp]: "M \ 0" by simp lemma M_le_N [simp]: "M < N" by (simp add: M_def dimM N_def) lemma M_not_ge_N [simp]: "\ M \ N" using M_le_N by arith definition \ :: "complex vec" where "\ = Matrix.vec N (\i. 1 / sqrt N)" lemma \_dim [simp]: "\ \ carrier_vec N" "dim_vec \ = N" by (simp add: \_def)+ lemma \_eval: "i < N \ \ $ i = 1 / sqrt N" by (simp add: \_def) lemma \_inner: "inner_prod \ \ = 1" apply (simp add: \_eval scalar_prod_def) by (smt of_nat_less_0_iff of_real_mult of_real_of_nat_eq real_sqrt_mult_self) lemma \_norm: "vec_norm \ = 1" by (simp add: \_eval vec_norm_def scalar_prod_def) definition \ :: "complex vec" where "\ = Matrix.vec N (\i. if f i then 0 else 1 / sqrt (N - M))" lemma \_dim [simp]: "\ \ carrier_vec N" "dim_vec \ = N" by (simp add: \_def)+ lemma \_eval: "i < N \ \ $ i = (if f i then 0 else 1 / sqrt (N - M))" by (simp add: \_def) lemma \_inner: "inner_prod \ \ = 1" apply (simp add: scalar_prod_def \_eval) apply (subst sum.mono_neutral_cong_right[of "{0.. f i}"]) apply auto apply (subgoal_tac "card ({0.. f i}) = N - M") subgoal by (metis of_nat_0_le_iff of_real_of_nat_eq of_real_power power2_eq_square real_sqrt_pow2) unfolding N_def M_def by (metis (no_types, lifting) atLeastLessThan_iff card.infinite card_Diff_subset card_atLeastLessThan diff_zero dimM(1) mem_Collect_eq neq0_conv subsetI zero_order(1)) definition \ :: "complex vec" where "\ = Matrix.vec N (\i. if f i then 1 / sqrt M else 0)" lemma \_dim [simp]: "\ \ carrier_vec N" "dim_vec \ = N" by (simp add: \_def)+ lemma \_eval: "i < N \ \ $ i = (if f i then 1 / sqrt M else 0)" by (simp add: \_def) lemma \_inner: "inner_prod \ \ = 1" apply (simp add: scalar_prod_def \_eval) apply (subst sum.mono_neutral_cong_right[of "{0.. f i}"]) apply auto apply (fold M_def) by (metis of_nat_0_le_iff of_real_of_nat_eq of_real_power power2_eq_square real_sqrt_pow2) lemma alpha_beta_orth: "inner_prod \ \ = 0" unfolding \_def \_def by (simp add: scalar_prod_def) lemma beta_alpha_orth: "inner_prod \ \ = 0" unfolding \_def \_def by (simp add: scalar_prod_def) definition \ :: real where "\ = 2 * arccos (sqrt ((N - M) / N))" lemma cos_theta_div_2: "cos (\ / 2) = sqrt ((N - M) / N)" proof - have "\ / 2 = arccos (sqrt ((N - M) / N))" using \_def by simp then show "cos (\ / 2) = sqrt ((N - M) / N)" by (simp add: cos_arccos_abs) qed lemma sin_theta_div_2: "sin (\ / 2) = sqrt (M / N)" proof - have a: "\ / 2 = arccos (sqrt ((N - M) / N))" using \_def by simp have N: "N > 0" using N_def by auto have M: "M < N" using M_def dimM N_def by auto then show "sin (\ / 2) = sqrt (M / N)" unfolding a apply (simp add: sin_arccos_abs) proof - have eq: "real (N - M) = real N - real M" using N M using M_not_ge_N nat_le_linear of_nat_diff by blast have "1 - real (N - M) / real N = (real N - (real N - real M)) / real N" unfolding eq using N by (metis diff_divide_distrib divide_self_if eq gr_implies_not0 of_nat_0_eq_iff) then show "1 - real (N - M) / real N = real M / real N" by auto qed qed lemma \_neq_0: "\ \ 0" proof - { assume "\ = 0" then have "\ / 2 = 0" by auto then have "sin (\ / 2) = 0" by auto } note z = this have "sin (\ / 2) = sqrt (M / N)" using sin_theta_div_2 by auto moreover have "M > 0" unfolding M_def N_def using dimM by auto ultimately have "sin (\ / 2) > 0" by auto with z show ?thesis by auto qed abbreviation ccos where "ccos \ \ complex_of_real (cos \)" abbreviation csin where "csin \ \ complex_of_real (sin \)" lemma \_eq: "\ = ccos (\ / 2) \\<^sub>v \ + csin (\ / 2) \\<^sub>v \" apply (simp add: cos_theta_div_2 sin_theta_div_2) apply (rule eq_vecI) by (auto simp add: \_def \_def \_def real_sqrt_divide) lemma psi_inner_alpha: "inner_prod \ \ = ccos (\ / 2)" unfolding \_eq proof - have "inner_prod (ccos (\ / 2) \\<^sub>v \) \ = ccos (\ / 2)" apply (subst inner_prod_smult_right[of _ N]) using \_dim \_inner by auto moreover have "inner_prod (csin (\ / 2) \\<^sub>v \) \ = 0" apply (subst inner_prod_smult_right[of _ N]) using \_dim \_dim beta_alpha_orth by auto ultimately show "inner_prod (ccos (\ / 2) \\<^sub>v \ + csin (\ / 2) \\<^sub>v \) \ = ccos (\ / 2)" apply (subst inner_prod_distrib_left[of _ N]) using \_dim \_dim by auto qed lemma psi_inner_beta: "inner_prod \ \ = csin (\ / 2)" unfolding \_eq proof - have "inner_prod (ccos (\ / 2) \\<^sub>v \) \ = 0" apply (subst inner_prod_smult_right[of _ N]) using \_dim \_dim alpha_beta_orth by auto moreover have "inner_prod (csin (\ / 2) \\<^sub>v \) \ = csin (\ / 2)" apply (subst inner_prod_smult_right[of _ N]) using \_dim \_inner by auto ultimately show "inner_prod (ccos (\ / 2) \\<^sub>v \ + csin (\ / 2) \\<^sub>v \) \ = csin (\ / 2)" apply (subst inner_prod_distrib_left[of _ N]) using \_dim \_dim by auto qed definition alpha_l :: "nat \ complex" where "alpha_l l = ccos ((l + 1 / 2) * \)" lemma alpha_l_real: "alpha_l l \ Reals" unfolding alpha_l_def by auto lemma cnj_alpha_l: "conjugate (alpha_l l) = alpha_l l" using alpha_l_real Reals_cnj_iff by auto definition beta_l :: "nat \ complex" where "beta_l l = csin ((l + 1 / 2) * \)" lemma beta_l_real: "beta_l l \ Reals" unfolding beta_l_def by auto lemma cnj_beta_l: "conjugate (beta_l l) = beta_l l" using beta_l_real Reals_cnj_iff by auto lemma csin_ccos_squared_add: "ccos (a::real) * ccos a + csin a * csin a = 1" by (smt cos_diff cos_zero of_real_add of_real_hom.hom_one of_real_mult) lemma alpha_l_beta_l_add_norm: "alpha_l l * alpha_l l + beta_l l * beta_l l = 1" using alpha_l_def beta_l_def csin_ccos_squared_add by auto definition psi_l where "psi_l l = (alpha_l l) \\<^sub>v \ + (beta_l l) \\<^sub>v \" lemma psi_l_dim: "psi_l l \ carrier_vec N" unfolding psi_l_def \_def \_def by auto lemma inner_psi_l: "inner_prod (psi_l l) (psi_l l) = 1" proof - have eq0: "inner_prod (psi_l l) (psi_l l) = inner_prod ((alpha_l l) \\<^sub>v \) (psi_l l) + inner_prod ((beta_l l) \\<^sub>v \) (psi_l l)" unfolding psi_l_def apply (subst inner_prod_distrib_left) using \_def \_def by auto have "inner_prod ((alpha_l l) \\<^sub>v \) (psi_l l) = inner_prod ((alpha_l l) \\<^sub>v \) ((alpha_l l) \\<^sub>v \) + inner_prod ((alpha_l l) \\<^sub>v \) ((beta_l l) \\<^sub>v \)" unfolding psi_l_def apply (subst inner_prod_distrib_right) using \_def \_def by auto also have "\ = (conjugate (alpha_l l)) * (alpha_l l) * inner_prod \ \ + (conjugate (alpha_l l)) * (beta_l l) * inner_prod \ \" apply (subst (1 2) inner_prod_smult_left_right) using \_def \_def by auto also have "\ = conjugate (alpha_l l) * (alpha_l l) " by (simp add: alpha_beta_orth \_inner) also have "\ = (alpha_l l) * (alpha_l l)" using cnj_alpha_l by simp finally have eq1: "inner_prod (alpha_l l \\<^sub>v \) (psi_l l) = alpha_l l * alpha_l l". have "inner_prod ((beta_l l) \\<^sub>v \) (psi_l l) = inner_prod ((beta_l l) \\<^sub>v \) ((alpha_l l) \\<^sub>v \) + inner_prod ((beta_l l) \\<^sub>v \) ((beta_l l) \\<^sub>v \)" unfolding psi_l_def apply (subst inner_prod_distrib_right) using \_def \_def by auto also have "\ = (conjugate (beta_l l)) * (alpha_l l) * inner_prod \ \ + (conjugate (beta_l l)) * (beta_l l) * inner_prod \ \" apply (subst (1 2) inner_prod_smult_left_right) using \_def \_def by auto also have "\ = (conjugate (beta_l l)) * (beta_l l)" using \_inner beta_alpha_orth by auto also have "\ = (beta_l l) * (beta_l l)" using cnj_beta_l by auto finally have eq2: "inner_prod (beta_l l \\<^sub>v \) (psi_l l) = beta_l l * beta_l l". show ?thesis unfolding eq0 eq1 eq2 using alpha_l_beta_l_add_norm by auto qed abbreviation proj :: "complex vec \ complex mat" where "proj v \ outer_prod v v" definition psi'_l where "psi'_l l = (alpha_l l) \\<^sub>v \ - (beta_l l) \\<^sub>v \" lemma psi'_l_dim: "psi'_l l \ carrier_vec N" unfolding psi'_l_def \_def \_def by auto definition proj_psi'_l where "proj_psi'_l l = proj (psi'_l l)" lemma proj_psi'_dim: "proj_psi'_l l \ carrier_mat N N" unfolding proj_psi'_l_def using psi'_l_dim by auto lemma psi_inner_psi'_l: "inner_prod \ (psi'_l l) = (alpha_l l * ccos (\ / 2) - beta_l l * csin (\ / 2))" proof - have "inner_prod \ (psi'_l l) = inner_prod \ (alpha_l l \\<^sub>v \) - inner_prod \ (beta_l l \\<^sub>v \)" unfolding psi'_l_def apply (subst inner_prod_minus_distrib_right[of _ N]) by auto also have "\ = alpha_l l * (inner_prod \ \) - beta_l l * (inner_prod \ \)" using \_dim \_dim \_dim by auto also have "\ = alpha_l l * (ccos (\ / 2)) - beta_l l * (csin (\ / 2))" using psi_inner_alpha psi_inner_beta by auto finally show ?thesis by auto qed lemma double_ccos_square: "2 * ccos (a::real) * ccos a = ccos (2 * a) + 1" proof - have eq: "ccos (2 * a) = ccos a * ccos a - csin a * csin a" using cos_add[of a a] by auto have "csin a * csin a = 1 - ccos a * ccos a" using csin_ccos_squared_add[of a] by (metis add_diff_cancel_left') then have "ccos a * ccos a - csin a * csin a = 2 * ccos a * ccos a - 1" by simp with eq show ?thesis by simp qed lemma double_csin_square: "2 * csin (a::real) * csin a = 1 - ccos (2 * a)" proof - have eq: "ccos (2 * a) = ccos a * ccos a - csin a * csin a" using cos_add[of a a] by auto have "ccos a * ccos a = 1 - csin a * csin a" using csin_ccos_squared_add[of a] - cancel_comm_monoid_add_class.add_implies_diff by auto + by (auto intro: add_implies_diff) then have "ccos a * ccos a - csin a * csin a = 1 - 2 * csin (a::real) * csin a" by simp with eq show ?thesis by simp qed lemma csin_double: "2 * csin (a::real) * ccos a = csin(2 * a)" using sin_add[of a a] by simp lemma ccos_add: "ccos (x + y) = ccos x * ccos y - csin x * csin y" using cos_add[of x y] by simp lemma alpha_l_Suc_l_derive: "2 * (alpha_l l * ccos (\ / 2) - beta_l l * csin (\ / 2)) * ccos (\ / 2) - alpha_l l = alpha_l (l + 1)" (is "?lhs = ?rhs") proof - have "2 * ((alpha_l l) * ccos (\ / 2) - (beta_l l) * csin (\ / 2)) * ccos (\ / 2) = (alpha_l l) * (2 * ccos (\ / 2)* ccos (\ / 2)) - (beta_l l) * (2 * csin (\ / 2) * ccos (\ / 2))" by (simp add: left_diff_distrib) also have "\ = (alpha_l l) * (ccos (\) + 1) - (beta_l l) * csin \" using double_ccos_square csin_double by auto finally have "2 * ((alpha_l l) * ccos (\ / 2) - (beta_l l) * csin (\ / 2)) * ccos (\ / 2) = (alpha_l l) * (ccos (\) + 1) - (beta_l l) * csin \". then have "?lhs = (alpha_l l) * ccos (\) - (beta_l l) * csin \" by (simp add: algebra_simps) also have "\ = (alpha_l (l + 1))" unfolding alpha_l_def beta_l_def apply (subst ccos_add[of "(real l + 1 / 2) * \" "\", symmetric]) by (simp add: algebra_simps) finally show ?thesis by auto qed lemma csin_add: "csin (x + y) = ccos x * csin y + csin x * ccos y" using sin_add[of x y] by simp lemma beta_l_Suc_l_derive: "2 * (alpha_l l * ccos (\ / 2) - (beta_l l) * csin (\ / 2)) * csin (\ / 2) + beta_l l = beta_l (l + 1)" (is "?lhs = ?rhs") proof - have "2 * ((alpha_l l) * ccos (\ / 2) - (beta_l l) * csin (\ / 2)) * csin (\ / 2) = (alpha_l l) * (2 * csin (\ / 2)* ccos (\ / 2)) - (beta_l l) * (2 * csin (\ / 2) * csin (\ / 2))" by (simp add: left_diff_distrib) also have "\ = (alpha_l l) * (csin \) - (beta_l l) * (1 - ccos (\))" using double_csin_square csin_double by auto finally have "2 * ((alpha_l l) * ccos (\ / 2) - (beta_l l) * csin (\ / 2)) * csin (\ / 2) = (alpha_l l) * (csin \) - (beta_l l) * (1 - ccos (\))". then have "?lhs = (alpha_l l) * (csin \) + (beta_l l) * ccos \" by (simp add: algebra_simps) also have "\ = (beta_l (l + 1))" unfolding alpha_l_def beta_l_def apply (subst csin_add[of "(real l + 1 / 2) * \" "\", symmetric]) by (simp add: algebra_simps) finally show ?thesis by auto qed lemma psi_l_Suc_l_derive: "2 * (alpha_l l * ccos (\ / 2) - beta_l l * csin (\ / 2)) \\<^sub>v \ - psi'_l l = psi_l (l + 1)" (is "?lhs = ?rhs") proof - let ?l = "2 * ((alpha_l l) * ccos (\ / 2) - (beta_l l) * csin (\ / 2))" have "?l \\<^sub>v \ = ?l \\<^sub>v (ccos (\ / 2) \\<^sub>v \ + csin (\ / 2) \\<^sub>v \)" unfolding \_eq by auto also have "\ = ?l \\<^sub>v (ccos (\ / 2) \\<^sub>v \) + ?l \\<^sub>v (csin (\ / 2) \\<^sub>v \)" apply (subst smult_add_distrib_vec[of _ N]) using \_dim \_dim by auto also have "\ = (?l * ccos (\ / 2)) \\<^sub>v \ + (?l * csin (\ / 2)) \\<^sub>v \" by auto finally have "?l \\<^sub>v \ = (?l * ccos (\ / 2)) \\<^sub>v \ + (?l * csin (\ / 2)) \\<^sub>v \". then have "?l \\<^sub>v \ - (psi'_l l) = ((?l * ccos (\ / 2)) \\<^sub>v \ - (alpha_l l) \\<^sub>v \) + ((?l * csin (\ / 2)) \\<^sub>v \ + (beta_l l) \\<^sub>v \)" unfolding psi'_l_def by auto also have "\ = (?l * ccos (\ / 2) - alpha_l l) \\<^sub>v \ + (?l * csin (\ / 2) + beta_l l) \\<^sub>v \" apply (subst minus_smult_vec_distrib) apply (subst add_smult_distrib_vec) by auto also have "\ = (alpha_l (l + 1)) \\<^sub>v \ + (beta_l (l + 1)) \\<^sub>v \" using alpha_l_Suc_l_derive beta_l_Suc_l_derive by auto finally have "?l \\<^sub>v \ - (psi'_l l) = (alpha_l (l + 1)) \\<^sub>v \ + (beta_l (l + 1)) \\<^sub>v \". then show ?thesis unfolding psi_l_def by auto qed subsection \Grover operator\ text \Oracle O\ definition proj_O :: "complex mat" where "proj_O = mat N N (\(i, j). if i = j then (if f i then 1 else 0) else 0)" lemma proj_O_dim: "proj_O \ carrier_mat N N" unfolding proj_O_def by auto lemma proj_O_mult_alpha: "proj_O *\<^sub>v \ = zero_vec N" by (auto simp add: proj_O_def \_def scalar_prod_def) lemma proj_O_mult_beta: "proj_O *\<^sub>v \ = \" by (auto simp add: proj_O_def \_def scalar_prod_def sum_only_one_neq_0) definition mat_O :: "complex mat" where "mat_O = mat N N (\(i,j). if i = j then (if f i then -1 else 1) else 0)" lemma mat_O_dim: "mat_O \ carrier_mat N N" unfolding mat_O_def by auto lemma mat_O_mult_alpha: "mat_O *\<^sub>v \ = \" by (auto simp add: mat_O_def \_def scalar_prod_def sum_only_one_neq_0) lemma mat_O_mult_beta: "mat_O *\<^sub>v \ = - \" by (auto simp add: mat_O_def \_def scalar_prod_def sum_only_one_neq_0) lemma hermitian_mat_O: "hermitian mat_O" by (auto simp add: hermitian_def mat_O_def adjoint_eval) lemma unitary_mat_O: "unitary mat_O" proof - have "mat_O \ carrier_mat N N" unfolding mat_O_def by auto moreover have "mat_O * adjoint mat_O = mat_O * mat_O" using hermitian_mat_O unfolding hermitian_def by auto moreover have "mat_O * mat_O = 1\<^sub>m N" apply (rule eq_matI) unfolding mat_O_def apply (simp add: scalar_prod_def) subgoal for i j apply (rule) subgoal apply (subst sum_only_one_neq_0[of "{0..(i,j). if i = j then if i = 0 then 1 else -1 else 0)" lemma hermitian_mat_Ph: "hermitian mat_Ph" unfolding hermitian_def mat_Ph_def apply (rule eq_matI) by (auto simp add: adjoint_eval) lemma unitary_mat_Ph: "unitary mat_Ph" proof - have "mat_Ph \ carrier_mat N N" unfolding mat_Ph_def by auto moreover have "mat_Ph * adjoint mat_Ph = mat_Ph * mat_Ph" using hermitian_mat_Ph unfolding hermitian_def by auto moreover have "mat_Ph * mat_Ph = 1\<^sub>m N" apply (rule eq_matI) unfolding mat_Ph_def apply (simp add: scalar_prod_def) subgoal for i j apply (rule) subgoal apply (subst sum_only_one_neq_0[of "{0..(i,j). if i = j then 2 / N - 1 else 2 / N)" text \Geometrically, the Grover operator G is a rotation\ definition mat_G :: "complex mat" where "mat_G = mat_G' * mat_O" end subsection \State of Grover's algorithm\ text \The dimensions are [2, 2, ..., 2, n]. We work with a very special case as in the paper\ locale grover_state_sig = grover_state + state_sig + fixes R :: nat fixes K :: nat assumes dims_def: "dims = replicate n 2 @ [K]" assumes R: "R = pi / (2 * \) - 1 / 2" assumes K: "K > R" begin lemma K_gt_0: "K > 0" using K by auto text \Bits q0 to q\_(n-1)\ definition vars1 :: "nat set" where "vars1 = {0 ..< n}" text \Bit r\ definition vars2 :: "nat set" where "vars2 = {n}" lemma length_dims: "length dims = n + 1" unfolding dims_def by auto lemma dims_nth_lt_n: "l < n \ nth dims l = 2" unfolding dims_def by (simp add: nth_append) lemma nths_Suc_n_dims: "nths dims {0..<(Suc n)} = dims" using length_dims nths_upt_eq_take by (metis add_Suc_right add_Suc_shift lessThan_atLeast0 less_add_eq_less less_numeral_extra(4) not_less plus_1_eq_Suc take_all) interpretation ps2_P: partial_state2 dims vars1 vars2 apply unfold_locales unfolding vars1_def vars2_def by auto interpretation ps_P: partial_state ps2_P.dims0 ps2_P.vars1'. abbreviation tensor_P where "tensor_P A B \ ps2_P.ptensor_mat A B" lemma tensor_P_dim: "tensor_P A B \ carrier_mat d d" proof - have "ps2_P.d0 = prod_list (nths dims ({0.. {n}))" unfolding ps2_P.d0_def ps2_P.dims0_def ps2_P.vars0_def by (simp add: vars1_def vars2_def) also have "\ = prod_list (nths dims ({0.. {n} = {0..<(Suc n)}") by auto also have "\ = prod_list dims" using nths_Suc_n_dims by auto also have "\ = d" unfolding d_def by auto finally show ?thesis using ps2_P.ptensor_mat_carrier by auto qed lemma dims_nths_le_n: assumes "l \ n" shows "nths dims {0.. n \ (i < Suc n \ i < l) = (i < l)" for i using less_trans by fastforce then show l: "length (nths dims {0..i. i < l \ {a. a < i \ a \ {0..i. i < l \ card {j. j < i \ j \ {0.. i \ {l}} = {l}" using assms length_dims by auto then have "nths dims {l} = [dims ! l]" using nths_only_one[of dims "{l}" l] by auto moreover have "dims ! l = 2" unfolding dims_def using assms by (simp add: nth_append) ultimately show ?thesis by auto qed lemma dims_vars1: "nths dims vars1 = replicate n 2" proof (rule nth_equalityI, auto) show l: "length (nths dims vars1) = n" apply (auto simp add: length_nths vars1_def length_dims) by (metis (no_types, lifting) Collect_cong Suc_lessD card_Collect_less_nat not_less_eq) have v1: "\i. i < n \ {a. a < i \ a \ vars1} = {0..i. i < n \ card {j. j < i \ j \ vars1} = i" by auto then have "nths dims vars1 ! i = dims ! i" if "i < n" for i using nth_nths_card[of i dims vars1] that length_dims vars1_def by auto moreover have "dims ! i = replicate n 2 ! i" if "i < n" for i unfolding dims_def by (simp add: nth_append that) ultimately show "nths dims vars1 ! i = replicate n 2 ! i" if "i < length (nths dims vars1)" for i using l that by auto qed lemma nths_rep_2_n: "nths (replicate n 2) {n} = []" by (metis (no_types, lifting) Collect_empty_eq card_empty length_0_conv length_replicate less_Suc_eq not_less_eq nths_replicate singletonD) lemma dims_vars2: "nths dims vars2 = [K]" unfolding dims_def vars2_def apply (subst nths_append) apply (subst nths_rep_2_n) by simp lemma d_vars1: "prod_list (nths dims vars1) = N" proof - have eq: "{0.. vars2 = {0.. vars2) = dims" unfolding vars1_def vars2_def using nths_Suc_n_dims by auto then show ?thesis unfolding ps2_P.dims0_def ps2_P.vars0_def apply (subst dims) by auto qed lemma ps2_P_vars1': "ps2_P.vars1' = vars1" unfolding ps2_P.vars1'_def ps2_P.vars0_def proof - have eq: "vars1 \ vars2 = {0..<(Suc n)}" unfolding vars1_def vars2_def by auto have "x < Suc n \ {i \ {0.. ind_in_set {0..<(Suc n)} x = x" for x unfolding ind_in_set_def by auto then have "x \ vars1 \ ind_in_set {0..<(Suc n)} x = x" for x unfolding vars1_def by auto then have "ind_in_set {0..<(Suc n)} ` vars1 = vars1" by force with eq show "ind_in_set (vars1 \ vars2) ` vars1 = vars1" by auto qed lemma ps2_P_d0: "ps2_P.d0 = d" unfolding ps2_P.d0_def using ps2_P_dims0 d_def by auto lemma ps2_P_d1: "ps2_P.d1 = N" unfolding ps2_P.d1_def ps2_P.dims1_def by (simp add: dims_vars1 N_def) lemma ps2_P_d2: "ps2_P.d2 = K" unfolding ps2_P.d2_def ps2_P.dims2_def by (simp add: dims_vars2) lemma ps_P_d: "ps_P.d = d" unfolding ps_P.d_def ps2_P_dims0 by auto lemma ps_P_d1: "ps_P.d1 = N" unfolding ps_P.d1_def ps_P.dims1_def ps2_P.nths_vars1' using ps2_P_d1 unfolding ps2_P.d1_def by auto lemma ps_P_d2: "ps_P.d2 = K" unfolding ps_P.d2_def ps_P.dims2_def ps2_P.nths_vars2' using ps2_P_d2 unfolding ps2_P.d2_def by auto lemma nths_uminus_vars1: "nths dims (- vars1) = nths dims vars2" using ps2_P.nths_vars2' unfolding ps2_P_dims0 ps2_P_vars1' ps2_P.dims2_def by auto lemma tensor_P_mult: assumes "m1 \ carrier_mat (2^n) (2^n)" and "m2 \ carrier_mat (2^n) (2^n)" and "m3 \ carrier_mat K K" and "m4 \ carrier_mat K K" shows "(tensor_P m1 m3) * (tensor_P m2 m4) = tensor_P (m1 * m2) (m3 * m4)" proof - have eq:"{0..m K)" unfolding Utrans_P_def ps2_P.ptensor_mat_def partial_state.mat_extension_def partial_state.d2_def partial_state.dims2_def ps2_P.nths_vars2'[simplified ps2_P_dims0 ps2_P_vars1'] using ps2_P_d2 unfolding ps2_P.d2_def using ps2_P_dims0 ps2_P_vars1' by auto lemma Utrans_P_is_tensor_P1: "Utrans_P vars1 A = Utrans (tensor_P A (1\<^sub>m K))" unfolding Utrans_P_def ps2_P.ptensor_mat_def partial_state.mat_extension_def partial_state.d2_def partial_state.dims2_def ps2_P.nths_vars2'[simplified ps2_P_dims0 ps2_P_vars1'] using ps2_P_d2 unfolding ps2_P.d2_def using ps2_P_dims0 ps2_P_vars1' by auto lemma nths_dims_uminus_vars2: "nths dims (-vars2) = nths dims vars1" proof - have "nths dims (-vars2) = nths dims ({0.. = nths dims vars1" unfolding vars1_def vars2_def length_dims apply (subgoal_tac "{0.. carrier_mat K K" shows "mat_extension dims vars2 A = tensor_P (1\<^sub>m N) A" proof - have "mat_extension dims vars2 A = tensor_mat dims vars2 A (1\<^sub>m N)" unfolding Utrans_P_def partial_state.mat_extension_def partial_state.d2_def partial_state.dims2_def nths_dims_uminus_vars2 dims_vars1 N_def by auto also have "\ = tensor_mat dims vars1 (1\<^sub>m N) A" apply (subst tensor_mat_comm[of vars1 vars2]) subgoal unfolding vars1_def vars2_def by auto subgoal unfolding length_dims vars1_def vars2_def by auto subgoal unfolding dims_vars1 N_def by auto unfolding dims_vars2 using assms by auto finally show "mat_extension dims vars2 A = tensor_P (1\<^sub>m N) A" unfolding ps2_P.ptensor_mat_def ps2_P_dims0 ps2_P_vars1' by auto qed lemma Utrans_P_is_tensor_P2: assumes "A \ carrier_mat K K" shows "Utrans_P vars2 A = Utrans (tensor_P (1\<^sub>m N) A)" unfolding Utrans_P_def using mat_ext_vars2 assms by auto subsection \Grover's algorithm\ text \Apply hadamard operator to first n variables\ definition hadamard_on_i :: "nat \ complex mat" where "hadamard_on_i i = pmat_extension dims {i} (vars1 - {i}) hadamard" declare hadamard_on_i_def [simp] fun hadamard_n :: "nat \ com" where "hadamard_n 0 = SKIP" | "hadamard_n (Suc i) = hadamard_n i ;; Utrans (tensor_P (hadamard_on_i i) (1\<^sub>m K))" text \Body of the loop\ definition D :: com where "D = Utrans_P vars1 mat_O ;; hadamard_n n ;; Utrans_P vars1 mat_Ph ;; hadamard_n n ;; Utrans_P vars2 (mat_incr K)" lemma unitary_ex_mat_O: "unitary (tensor_P mat_O (1\<^sub>m K))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_unitary) subgoal using ps_P_d1 mat_O_def by auto subgoal using ps_P_d2 by auto subgoal using unitary_mat_O by auto using unitary_one by auto lemma unitary_ex_mat_Ph: "unitary (tensor_P mat_Ph (1\<^sub>m K))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_unitary) subgoal using ps_P_d1 mat_Ph_def by auto subgoal using ps_P_d2 by auto subgoal using unitary_mat_Ph by auto using unitary_one by auto lemma unitary_hadamard_on_i: assumes "k < n" shows "unitary (hadamard_on_i k)" proof - interpret st2: partial_state2 dims "{k}" "vars1 - {k}" apply unfold_locales by auto show ?thesis unfolding hadamard_on_i_def st2.pmat_extension_def st2.ptensor_mat_def apply (rule partial_state.tensor_mat_unitary) subgoal unfolding partial_state.d1_def partial_state.dims1_def st2.nths_vars1' st2.dims1_def using dims_nths_one_lt_n assms hadamard_dim by auto subgoal unfolding st2.d2_def st2.dims2_def partial_state.d2_def partial_state.dims2_def st2.nths_vars2' st2.dims1_def by auto subgoal using unitary_hadamard by auto subgoal using unitary_one by auto done qed lemma unitary_exhadamard_on_i: assumes "k < n" shows "unitary (tensor_P (hadamard_on_i k) (1\<^sub>m K))" proof - interpret st2: partial_state2 dims "{k}" "vars1 - {k}" apply unfold_locales by auto have d1: "st2.d0 = partial_state.d1 ps2_P.dims0 ps2_P.vars1'" unfolding partial_state.d1_def partial_state.dims1_def ps2_P.nths_vars1' ps2_P.dims1_def st2.d0_def st2.dims0_def st2.vars0_def using assms apply (subgoal_tac "{k} \ (vars1 - {k}) = vars1") apply simp unfolding vars1_def by auto show ?thesis unfolding ps2_P.ptensor_mat_def apply (rule partial_state.tensor_mat_unitary) subgoal unfolding hadamard_on_i_def st2.pmat_extension_def using st2.ptensor_mat_carrier[of hadamard "1\<^sub>m st2.d2"] using d1 by auto subgoal unfolding partial_state.d2_def partial_state.dims2_def ps2_P.nths_vars2' ps2_P.dims2_def dims_vars2 by auto using unitary_hadamard_on_i unitary_one assms by auto qed lemma hadamard_on_i_dim: assumes "k < n" shows "hadamard_on_i k \ carrier_mat N N" proof - interpret st: partial_state2 dims "{k}" "(vars1 - {k})" apply unfold_locales by auto have vars1: "{k} \ (vars1 - {k}) = vars1" unfolding vars1_def using assms by auto show ?thesis unfolding hadamard_on_i_def N_def using st.pmat_extension_carrier unfolding st.d0_def st.dims0_def st.vars0_def using vars1 dims_vars1 by auto qed lemma well_com_hadamard_k: "k \ n \ well_com (hadamard_n k)" proof (induct k) case 0 then show ?case by auto next case (Suc n) then have "well_com (hadamard_n n)" by auto then show ?case unfolding hadamard_n.simps well_com.simps using tensor_P_dim unitary_exhadamard_on_i Suc by auto qed lemma well_com_hadamard_n: "well_com (hadamard_n n)" using well_com_hadamard_k by auto lemma well_com_mat_O: "well_com (Utrans_P vars1 mat_O)" apply (subst Utrans_P_is_tensor_P1) apply simp using tensor_P_dim unitary_ex_mat_O by auto lemma well_com_mat_Ph: "well_com (Utrans_P vars1 mat_Ph)" apply (subst Utrans_P_is_tensor_P1) apply simp using tensor_P_dim unitary_ex_mat_Ph by auto lemma unitary_exmat_incr: "unitary (tensor_P (1\<^sub>m N) (mat_incr K))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_unitary) using unitary_mat_incr K unitary_one by (auto simp add: ps_P_d1 ps_P_d2 mat_incr_def) lemma well_com_mat_incr: "well_com (Utrans_P vars2 (mat_incr K))" apply (subst Utrans_P_is_tensor_P2) apply (simp add: mat_incr_def) using tensor_P_dim unitary_exmat_incr by auto lemma well_com_D: "well_com D" unfolding D_def apply auto using well_com_hadamard_n well_com_mat_incr well_com_mat_O well_com_mat_Ph by auto text \Test at while loop\ definition M0 :: "complex mat" where "M0 = mat K K (\(i,j). if i = j \ i \ R then 1 else 0)" lemma hermitian_M0: "hermitian M0" by (auto simp add: hermitian_def M0_def adjoint_eval) lemma M0_dim: "M0 \ carrier_mat K K" unfolding M0_def by auto lemma M0_mult_M0: "M0 * M0 = M0" by (auto simp add: M0_def scalar_prod_def sum_only_one_neq_0) definition M1 :: "complex mat" where "M1 = mat K K (\(i,j). if i = j \ i < R then 1 else 0)" lemma M1_dim: "M1 \ carrier_mat K K" unfolding M1_def by auto lemma hermitian_M1: "hermitian M1" by (auto simp add: hermitian_def M1_def adjoint_eval) lemma M1_mult_M1: "M1 * M1 = M1" by (auto simp add: M1_def scalar_prod_def sum_only_one_neq_0) lemma M1_add_M0: "M1 + M0 = 1\<^sub>m K" unfolding M0_def M1_def by auto text \Test at the end\ definition testN :: "nat \ complex mat" where "testN k = mat N N (\(i,j). if i = k \ j = k then 1 else 0)" lemma hermitian_testN: "hermitian (testN k)" unfolding hermitian_def testN_def by (auto simp add: scalar_prod_def adjoint_eval) lemma testN_mult_testN: "testN k * testN k = testN k" unfolding testN_def by (auto simp add: scalar_prod_def sum_only_one_neq_0) lemma testN_dim: "testN k \ carrier_mat N N" unfolding testN_def by auto definition test_fst_k :: "nat \ complex mat" where "test_fst_k k = mat N N (\(i, j). if (i = j \ i < k) then 1 else 0)" lemma sum_test_k: assumes "m \ N" shows "matrix_sum N (\k. testN k) m = test_fst_k m" proof - have "m \ N \ matrix_sum N (\k. testN k) m = mat N N (\(i, j). if (i = j \ i < m) then 1 else 0)" for m proof (induct m) case 0 then show ?case apply simp apply (rule eq_matI) by auto next case (Suc m) then have m: "m < N" by auto then have m': "m \ N" by auto have "matrix_sum N testN (Suc m) = testN m + matrix_sum N testN m" by simp also have "\ = mat N N (\(i, j). if (i = j \ i < (Suc m)) then 1 else 0)" unfolding testN_def Suc(1)[OF m'] apply (rule eq_matI) by auto finally show ?case by auto qed then show ?thesis unfolding test_fst_k_def using assms by auto qed lemma test_fst_kN: "test_fst_k N = 1\<^sub>m N" apply (rule eq_matI) unfolding test_fst_k_def by auto lemma matrix_sum_tensor_P1: "(\k. k < m \ g k \ carrier_mat N N) \ (A \ carrier_mat K K) \ matrix_sum d (\k. tensor_P (g k) A) m = tensor_P (matrix_sum N g m) A" proof (induct m) case 0 show ?case apply (simp) unfolding ps2_P.ptensor_mat_def using ps_P.tensor_mat_zero1[simplified ps_P_d ps_P_d1, of A] by auto next case (Suc m) then have ind: "matrix_sum d (\k. tensor_P (g k) A) m = tensor_P (matrix_sum N g m) A" and dk: "\k. k < m \ g k \ carrier_mat N N" and "A \ carrier_mat K K" by auto have ds: "matrix_sum N g m \ carrier_mat N N" apply (subst matrix_sum_dim) using dk by auto show ?case apply simp apply (subst ind) unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_add1) unfolding ps_P_d1 ps_P_d2 using Suc ds by auto qed text \Grover's algorithm. Assume we start in the zero state\ definition Grover :: com where "Grover = hadamard_n n ;; While_P vars2 M0 M1 D ;; Measure_P vars1 N testN (replicate N SKIP)" lemma well_com_if: "well_com (Measure_P vars1 N testN (replicate N SKIP))" unfolding Measure_P_def apply auto proof - have eq0: "\n. mat_extension dims vars1 (testN n) = tensor_P (testN n) (1\<^sub>m K)" unfolding mat_ext_vars1 by auto have eq1: "adjoint (tensor_P (testN j) (1\<^sub>m K)) * tensor_P (testN j) (1\<^sub>m K) = tensor_P (testN j) (1\<^sub>m K)" for j unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_adjoint) apply (auto simp add: ps_P_d1 ps_P_d2 testN_dim hermitian_testN[unfolded hermitian_def] hermitian_one[unfolded hermitian_def]) apply (subst ps_P.tensor_mat_mult[symmetric]) by (auto simp add: ps_P_d1 ps_P_d2 testN_dim testN_mult_testN) have "measurement d N (\n. tensor_P (testN n) (1\<^sub>m K))" unfolding measurement_def apply (simp add: tensor_P_dim) apply (subst eq1) apply (subst matrix_sum_tensor_P1) apply (auto simp add: testN_dim) apply (subst sum_test_k, simp) apply (subst test_fst_kN) unfolding ps2_P.ptensor_mat_def using ps_P.tensor_mat_id ps_P_d ps_P_d1 ps_P_d2 by auto then show "measurement d N (\n. mat_extension dims vars1 (testN n))" using eq0 by auto show "list_all well_com (replicate N SKIP)" apply (subst list_all_length) by simp qed lemma well_com_while: "well_com (While_P vars2 M0 M1 D)" unfolding While_P_def apply auto apply (subst (1 2) mat_ext_vars2) apply (auto simp add: M1_dim M0_dim) proof - have 2: "2 = Suc (Suc 0)" by auto have ad0: "adjoint (tensor_P (1\<^sub>m N) M0) = (tensor_P (1\<^sub>m N) M0)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_adjoint) unfolding ps_P_d1 ps_P_d2 by (auto simp add: M0_dim adjoint_one hermitian_M0[unfolded hermitian_def]) have ad1: "adjoint (tensor_P (1\<^sub>m N) M1) = (tensor_P (1\<^sub>m N) M1)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_adjoint) unfolding ps_P_d1 ps_P_d2 by (auto simp add: M1_dim adjoint_one hermitian_M1[unfolded hermitian_def]) have m0: "tensor_P (1\<^sub>m N) M0 * tensor_P (1\<^sub>m N) M0 = tensor_P (1\<^sub>m N) M0" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_mult[symmetric]) unfolding ps_P_d1 ps_P_d2 using M0_dim M0_mult_M0 by auto have m1: "tensor_P (1\<^sub>m N) M1 * tensor_P (1\<^sub>m N) M1 = tensor_P (1\<^sub>m N) M1" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_mult[symmetric]) unfolding ps_P_d1 ps_P_d2 using M1_dim M1_mult_M1 by auto have s: "tensor_P (1\<^sub>m N) M1 + tensor_P (1\<^sub>m N) M0 = 1\<^sub>m d" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_add2[symmetric]) unfolding ps_P_d1 ps_P_d2 by (auto simp add: M1_dim M0_dim M1_add_M0 ps_P.tensor_mat_id[simplified ps_P_d1 ps_P_d2 ps_P_d]) show "measurement d 2 (\n. if n = 0 then tensor_P (1\<^sub>m N) M0 else if n = 1 then tensor_P (1\<^sub>m N) M1 else undefined)" unfolding measurement_def apply (auto simp add: tensor_P_dim) apply (subst 2) apply (simp add: ad0 ad1 m0 m1) apply (subst assoc_add_mat[symmetric, of _ d d]) using tensor_P_dim s by auto show "well_com D" using well_com_D by auto qed lemma well_com_Grover: "well_com Grover" unfolding Grover_def apply auto using well_com_hadamard_n well_com_if well_com_while by auto subsection \Correctness\ text \Pre-condition: assume in the zero state\ definition ket_pre :: "complex vec" where "ket_pre = Matrix.vec N (\k. if k = 0 then 1 else 0)" lemma ket_pre_dim: "ket_pre \ carrier_vec N" using ket_pre_def by auto definition pre :: "complex mat" where "pre = proj ket_pre" lemma pre_dim: "pre \ carrier_mat N N" using pre_def ket_pre_def by auto lemma norm_pre: "inner_prod ket_pre ket_pre = 1" unfolding ket_pre_def scalar_prod_def using sum_only_one_neq_0[of "{0..i. (if i = 0 then 1 else 0) * cnj (if i = 0 then 1 else 0)"] by auto lemma pre_trace: "trace pre = 1" unfolding pre_def apply (subst trace_outer_prod[of _ N]) subgoal unfolding ket_pre_def by auto using norm_pre by auto lemma positive_pre: "positive pre" using positive_same_outer_prod unfolding pre_def ket_pre_def by auto lemma pre_le_one: "pre \\<^sub>L 1\<^sub>m N" unfolding pre_def using outer_prod_le_one norm_pre ket_pre_def by auto text \Post-condition: should be in a state i with f i = 1\ definition post :: "complex mat" where "post = mat N N (\(i, j). if (i = j \ f i) then 1 else 0)" lemma post_dim: "post \ carrier_mat N N" unfolding post_def by auto lemma hermitian_post: "hermitian post" unfolding hermitian_def post_def by (auto simp add: adjoint_eval) text \Hoare triples of initialization\ definition ket_zero :: "complex vec" where "ket_zero = Matrix.vec 2 (\k. if k = 0 then 1 else 0)" lemma ket_zero_dim: "ket_zero \ carrier_vec 2" unfolding ket_zero_def by auto definition proj_zero where "proj_zero = proj ket_zero" definition ket_one where "ket_one = Matrix.vec 2 (\k. if k = 1 then 1 else 0)" definition proj_one where "proj_one = proj ket_one" definition ket_plus where "ket_plus = Matrix.vec 2 (\k.1 / csqrt 2) " lemma ket_plus_dim: "ket_plus \ carrier_vec 2" unfolding ket_plus_def by auto lemma ket_plus_eval [simp]: "i < 2 \ ket_plus $ i = 1 / csqrt 2" apply (simp only: ket_plus_def) using index_vec less_2_cases by force lemma csqrt_2_sq [simp]: "complex_of_real (sqrt 2) * complex_of_real (sqrt 2) = 2" by (smt of_real_add of_real_hom.hom_one of_real_power one_add_one power2_eq_square real_sqrt_pow2) lemma ket_plus_tensor_n: "partial_state.tensor_vec [2, 2] {0} ket_plus ket_plus = Matrix.vec 4 (\k. 1 / 2)" unfolding partial_state.tensor_vec_def state_sig.d_def proof (rule eq_vecI, auto) fix i :: nat assume i: "i < 4" interpret st: partial_state "[2, 2]" "{0}" . have d1_eq: "st.d1 = 2" by (simp add: st.d1_def st.dims1_def nths_def) have "st.encode1 i < st.d1" by (simp add: st.d_def i) then have i1_lt: "st.encode1 i < 2" using d1_eq by auto have d2_eq: "st.d2 = 2" by (simp add: st.d2_def st.dims2_def nths_def) have "st.encode2 i < st.d2" by (simp add: st.d_def i) then have i2_lt: "st.encode2 i < 2" using d2_eq by auto show "ket_plus $ st.encode1 i * ket_plus $ st.encode2 i * 2 = 1" by (auto simp add: i1_lt i2_lt) qed definition proj_plus where "proj_plus = proj ket_plus" lemma hadamard_on_zero: "hadamard *\<^sub>v ket_zero = ket_plus" unfolding hadamard_def ket_zero_def ket_plus_def mat_of_rows_list_def apply (rule eq_vecI, auto simp add: scalar_prod_def) subgoal for i apply (drule less_2_cases) apply (drule disjE, auto) by (subst sum_le_2, auto)+. fun exH_k :: "nat \ complex mat" where "exH_k 0 = hadamard_on_i 0" | "exH_k (Suc k) = exH_k k * hadamard_on_i (Suc k)" fun H_k :: "nat \ complex mat" where "H_k 0 = hadamard" | "H_k (Suc k) = ptensor_mat dims {0.. H_k k \ carrier_mat (2^(Suc k)) (2^(Suc k))" proof (induct k) case 0 then show ?case using hadamard_dim by auto next case (Suc k) interpret st: partial_state2 dims "{0..<(Suc k)}" "{Suc k}" apply unfold_locales by auto have "Suc (Suc k) \ n" using Suc by auto then have "nths dims ({0.. {Suc k} = {0..<(Suc (Suc k))}" by auto ultimately have plssk: "prod_list (nths dims ({0.. {Suc k})) = 2^(Suc (Suc k))" by auto have "dim_col (H_k (Suc k)) = 2^(Suc (Suc k))" using st.ptensor_mat_dim_col unfolding st.d0_def st.dims0_def st.vars0_def using plssk by auto moreover have "dim_row (H_k (Suc k)) = 2^(Suc (Suc k))" using st.ptensor_mat_dim_row unfolding st.d0_def st.dims0_def st.vars0_def using plssk by auto ultimately show ?case by auto qed lemma exH_k_eq_H_k: "k < n \ exH_k k = pmat_extension dims {0..<(Suc k)} {(Suc k)..m st.d2)" using st.pmat_extension_def by auto from dims_nths_one_lt_n[OF Suc(2)] have st1d1: "st1.d1 = 2" unfolding st1.d1_def st1.dims1_def by fastforce have "{Suc k} \ {Suc (Suc k)..m 2) (1\<^sub>m st1.d2) = 1\<^sub>m st.d2" using st1.ptensor_mat_id st1d1 by auto have eql3: "st.ptensor_mat (H_k k) (1\<^sub>m st.d2) = st.ptensor_mat (H_k k) (st1.ptensor_mat (1\<^sub>m 2) (1\<^sub>m st1.d2))" apply (subst eql2[symmetric]) by auto have eqr1: "(st2.pmat_extension hadamard) = st2.ptensor_mat hadamard (1\<^sub>m st2.d2)" using st2.pmat_extension_def by auto have splitset: "{0.. {Suc (Suc k).. {Suc (Suc k)..{Suc k} = {0..m 2) (1\<^sub>m st1.d2)) = ptensor_mat dims ({0..{Suc k}) {Suc (Suc k)..m 2)) (1\<^sub>m st1.d2)" apply (subst ptensor_mat_assoc[symmetric, of "{0..m 2" "1\<^sub>m st1.d2", simplified Sksplit]) using Suc length_dims by auto also have "\ = ptensor_mat dims ({0..{Suc k}) {Suc (Suc k)..m 2) (H_k k)) (1\<^sub>m st1.d2)" using ptensor_mat_comm[of "{0.. = ptensor_mat dims {Suc k} ({0.. {Suc (Suc k)..m 2) (ptensor_mat dims {0..m st1.d2))" apply (subst sup_commute) apply (subst ptensor_mat_assoc[of "{Suc k}" "{0..m 2)" "H_k k" "1\<^sub>m st1.d2"]) using Suc length_dims by auto finally have eql4: "st.pmat_extension (H_k k) = st2.ptensor_mat (1\<^sub>m 2) (st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2))" using eql1 eql3 splitset by auto have "st2.ptensor_mat (1\<^sub>m 2) (st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2)) * st2.ptensor_mat hadamard (1\<^sub>m st2.d2) = st2.ptensor_mat ((1\<^sub>m 2)*hadamard) ((st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2))*(1\<^sub>m st2.d2))" apply (rule st2.ptensor_mat_mult[symmetric, of "1\<^sub>m 2" "hadamard" "(st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2))" "(1\<^sub>m st2.d2)"]) subgoal unfolding st2.d1_def st2.dims1_def by (simp add: dims_nths_one_lt_n Suc(2)) subgoal unfolding st2.d1_def st2.dims1_def apply (simp add: dims_nths_one_lt_n Suc(2)) using hadamard_dim by auto subgoal unfolding st2.d2_def[unfolded st2.dims2_def] using st3.ptensor_mat_dim_col[unfolded st3.d0_def st3.dims0_def st3.vars0_def, simplified splitset] st3.ptensor_mat_dim_row[unfolded st3.d0_def st3.dims0_def st3.vars0_def, simplified splitset] by auto by auto also have "\ = st2.ptensor_mat (hadamard) (st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2))" unfolding st2.d2_def[unfolded st2.dims2_def] using hadamard_dim st3.ptensor_mat_dim_col[unfolded st3.d0_def st3.dims0_def st3.vars0_def, simplified splitset] st3.ptensor_mat_dim_row[unfolded st3.d0_def st3.dims0_def st3.vars0_def, simplified splitset] by auto also have "\ = ptensor_mat dims ({0..{Suc k}) {Suc (Suc k)..m st3.d2)" apply (subst ptensor_mat_assoc[symmetric, of "{Suc k}" "{0..m st3.d2", simplified splitset]) using Suc length_dims by auto also have "\ = ptensor_mat dims ({0..{Suc k}) {Suc (Suc k)..m st3.d2)" using ptensor_mat_comm[of "{Suc k}"] Sksplit1 by auto also have "\ = ptensor_mat dims ({0..m st3.d2)" using Sksplit1 by auto also have "\ = pmat_extension dims {0..m st.d2)" using st.pmat_extension_def by auto from dims_nths_one_lt_n[OF assms] have st1d1: "st1.d1 = 2" unfolding st1.d1_def st1.dims1_def by fastforce have "{Suc k} \ {Suc (Suc k)..m 2) (1\<^sub>m st1.d2) = 1\<^sub>m st.d2" using st1.ptensor_mat_id st1d1 by auto have eql3: "st.ptensor_mat (H_k k) (1\<^sub>m st.d2) = st.ptensor_mat (H_k k) (st1.ptensor_mat (1\<^sub>m 2) (1\<^sub>m st1.d2))" apply (subst eql2[symmetric]) by auto have eqr1: "(st2.pmat_extension hadamard) = st2.ptensor_mat hadamard (1\<^sub>m st2.d2)" using st2.pmat_extension_def by auto have splitset: "{0.. {Suc (Suc k).. {Suc (Suc k)..{Suc k} = {0..m 2) (1\<^sub>m st1.d2)) = ptensor_mat dims ({0..{Suc k}) {Suc (Suc k)..m 2)) (1\<^sub>m st1.d2)" apply (subst ptensor_mat_assoc[symmetric, of "{0..m 2" "1\<^sub>m st1.d2", simplified Sksplit]) using assms length_dims by auto also have "\ = ptensor_mat dims ({0..{Suc k}) {Suc (Suc k)..m 2) (H_k k)) (1\<^sub>m st1.d2)" using ptensor_mat_comm[of "{0.. = ptensor_mat dims {Suc k} ({0.. {Suc (Suc k)..m 2) (ptensor_mat dims {0..m st1.d2))" apply (subst sup_commute) apply (subst ptensor_mat_assoc[of "{Suc k}" "{0..m 2)" "H_k k" "1\<^sub>m st1.d2"]) using assms length_dims by auto finally have "st.pmat_extension (H_k k) = st2.ptensor_mat (1\<^sub>m 2) (st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2))" using eql1 eql3 splitset by auto moreover have "st.pmat_extension (H_k k) = exH_k k" using exH_k_eq_H_k assms by auto ultimately have eql4: "exH_k k = st2.ptensor_mat (1\<^sub>m 2) (st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2))" by auto have "st2.ptensor_mat hadamard (1\<^sub>m st2.d2) * st2.ptensor_mat (1\<^sub>m 2) (st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2)) = st2.ptensor_mat (hadamard*(1\<^sub>m 2)) ((1\<^sub>m st2.d2)* (st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2)))" apply (rule st2.ptensor_mat_mult[symmetric, of "hadamard" "1\<^sub>m 2" "(1\<^sub>m st2.d2)" "(st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2))"]) subgoal unfolding st2.d1_def st2.dims1_def apply (simp add: dims_nths_one_lt_n assms) using hadamard_dim by auto subgoal unfolding st2.d1_def st2.dims1_def by (simp add: dims_nths_one_lt_n assms) subgoal by auto subgoal unfolding st2.d2_def[unfolded st2.dims2_def] using st3.ptensor_mat_dim_col[unfolded st3.d0_def st3.dims0_def st3.vars0_def, simplified splitset] st3.ptensor_mat_dim_row[unfolded st3.d0_def st3.dims0_def st3.vars0_def, simplified splitset] by auto done also have "\ = st2.ptensor_mat (hadamard) (st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2))" unfolding st2.d2_def[unfolded st2.dims2_def] using hadamard_dim st3.ptensor_mat_dim_col[unfolded st3.d0_def st3.dims0_def st3.vars0_def, simplified splitset] st3.ptensor_mat_dim_row[unfolded st3.d0_def st3.dims0_def st3.vars0_def, simplified splitset] by auto also have "\ = ptensor_mat dims ({0..{Suc k}) {Suc (Suc k)..m st3.d2)" apply (subst ptensor_mat_assoc[symmetric, of "{Suc k}" "{0..m st3.d2", simplified splitset]) using assms length_dims by auto also have "\ = ptensor_mat dims ({0..{Suc k}) {Suc (Suc k)..m st3.d2)" using ptensor_mat_comm[of "{Suc k}"] Sksplit1 by auto also have "\ = ptensor_mat dims ({0..m st3.d2)" using Sksplit1 by auto also have "\ = pmat_extension dims {0.. = exH_k (Suc k)" using exH_k_eq_H_k[of "Suc k"] assms by auto finally have "st2.ptensor_mat hadamard (1\<^sub>m st2.d2) * st2.ptensor_mat (1\<^sub>m 2) (st3.ptensor_mat (H_k k) (1\<^sub>m st3.d2)) =exH_k (Suc k)". then show ?thesis unfolding hadamard_on_i_def using eql4 eqr1 by auto qed lemma exH_eq_H: "exH_k (n - 1) = H_k (n - 1)" proof - have "\m. n = Suc (Suc m)" using n by presburger then obtain m where m: "n = Suc (Suc m)" using n by auto then have "exH_k m = pmat_extension dims {0..<(Suc m)} {(Suc m).. carrier_mat (2^(Suc m)) (2^(Suc m))" using H_k_dim by auto then have Hkm1: "(H_k m) * (1\<^sub>m stm2.d1) = (H_k m)" unfolding stm2d1 by auto have eqd12: "stm1.d2 = stm2.d1" unfolding stm1.d2_def stm1.dims2_def stm2.d1_def stm2.dims1_def by auto have "pmat_extension dims {Suc m} {0..m stm1.d2)" using stm1.pmat_extension_def by auto also have "\ = stm2.ptensor_mat (1\<^sub>m stm2.d1) hadamard" using ptensor_mat_comm eqd12 by auto finally have eqr: "(pmat_extension dims {Suc m} {0..m stm2.d1) hadamard". then have "exH_k (Suc m) = stm2.ptensor_mat (H_k m) (1\<^sub>m stm2.d2) * stm2.ptensor_mat (1\<^sub>m stm2.d1) hadamard" using eqSm unfolding stm2.pmat_extension_def by auto also have "\ = stm2.ptensor_mat ((H_k m) * (1\<^sub>m stm2.d1)) (1\<^sub>m stm2.d2 * hadamard)" apply (rule stm2.ptensor_mat_mult[symmetric, of "H_k m" "1\<^sub>m stm2.d1" "1\<^sub>m stm2.d2" "hadamard"]) unfolding stm2d1 stm2d2 using H_k_dim m hadamard_dim by auto also have "\ = stm2.ptensor_mat (H_k m) (hadamard)" using H_k_dim hadamard_dim stm2d1 stm2d2 Hkm1 by auto also have "\ = H_k (Suc m)" unfolding stm2.ptensor_mat_def H_k.simps by auto finally have "exH_k (Suc m) = H_k (Suc m)" by auto moreover have "Suc m = n - 1" using m by auto ultimately show ?thesis by auto qed fun ket_zero_k :: "nat \ complex vec" where "ket_zero_k 0 = ket_zero" | "ket_zero_k (Suc k) = ptensor_vec dims {0..<(Suc k)} {Suc k} (ket_zero_k k) ket_zero" lemma ket_zero_k_dim: assumes "k < n" shows "ket_zero_k k \ carrier_vec (2^(Suc k))" proof (cases k) case 0 show ?thesis using ket_zero_dim 0 by auto next case (Suc k) interpret st: partial_state2 dims "{0..<(Suc k)}" "{Suc k}" apply unfold_locales by auto have "Suc (Suc k) \ n" using assms Suc by auto then have "nths dims ({0.. {Suc k} = {0..<(Suc (Suc k))}" by auto ultimately have plssk: "prod_list (nths dims ({0.. {Suc k})) = 2^(Suc (Suc k))" by auto show ?thesis apply (rule carrier_vecI) unfolding ket_zero_k.simps Suc using st.ptensor_vec_dim[of "ket_zero_k k" ket_zero] plssk unfolding st.d0_def st.dims0_def st.vars0_def by auto qed fun ket_plus_k where "ket_plus_k 0 = ket_plus" | "ket_plus_k (Suc k) = ptensor_vec dims {0..<(Suc k)} {Suc k} (ket_plus_k k) ket_plus" lemma ket_plus_k_dim: assumes "k < n" shows "ket_plus_k k \ carrier_vec (2^(Suc k))" proof (cases k) case 0 show ?thesis using ket_plus_dim 0 by auto next case (Suc k) interpret st: partial_state2 dims "{0..<(Suc k)}" "{Suc k}" apply unfold_locales by auto have "Suc (Suc k) \ n" using assms Suc by auto then have "nths dims ({0.. {Suc k} = {0..<(Suc (Suc k))}" by auto ultimately have plssk: "prod_list (nths dims ({0.. {Suc k})) = 2^(Suc (Suc k))" by auto show ?thesis apply (rule carrier_vecI) unfolding ket_zero_k.simps Suc using st.ptensor_vec_dim plssk unfolding st.d0_def st.dims0_def st.vars0_def by auto qed lemma H_k_ket_zero_k: "k < n \ (H_k k) *\<^sub>v (ket_zero_k k) = (ket_plus_k k)" proof (induct k) case 0 show ?case using hadamard_on_zero unfolding H_k.simps ket_zero_k.simps ket_plus_k.simps by auto next case (Suc k) then have k: "k < n" by auto interpret st: partial_state2 dims "{0..<(Suc k)}" "{Suc k}" apply unfold_locales by auto have "nths dims {0..v ket_zero_k (Suc k) = st.ptensor_mat (H_k k) hadamard *\<^sub>v st.ptensor_vec (ket_zero_k k) ket_zero" by auto also have "\ = st.ptensor_vec ((H_k k) *\<^sub>v (ket_zero_k k)) (hadamard *\<^sub>v ket_zero)" using st.ptensor_mat_mult_vec[unfolded std1 std2, OF H_k_dim[OF k] ket_zero_k_dim[OF k] hadamard_dim ket_zero_dim] by auto also have "\ = st.ptensor_vec (ket_plus_k k) ket_plus" using Suc hadamard_on_zero by auto finally show ?case by auto qed lemma encode1_replicate_2: "partial_state.encode1 (replicate (Suc k) 2) {0.. ket_zero_k k = Matrix.vec (2^(Suc k)) (\k. if k = 0 then 1 else 0)" proof (induct k) case 0 show ?case apply (rule eq_vecI) by (auto simp add: ket_zero_def) next case (Suc k) then have k: "k < n" by auto have kzkk: "ket_zero_k k = Matrix.vec (2 ^ Suc k) (\k. if (k = 0) then 1 else 0)" using Suc(1)[OF k] by auto have dSk: "ket_zero_k (Suc k) \ carrier_vec (2^(Suc (Suc k)))" using ket_zero_k_dim[OF Suc(2)] by auto interpret st: partial_state "replicate (Suc (Suc k)) 2" "{0.. {Suc k}) = {0..x. (x \ {0.. {y \ {0..x. (x \ {0.. card {y \ {0..g h I. (\x. (x \ I \ g x = h x)) \ {g x | x. x \ I} = {h x | x. x \ I}" by metis have "{card {y \ {0.. {0.. {0.. = {0.. 0" and ile: "i < 2^(Suc (Suc k))" for i proof (cases "i mod (2 ^ Suc k) \ 0") case True then have "ket_zero_k k $ st.encode1 i = 0" unfolding kzkk using encode1_replicate_2[of "Suc k" i] ile by auto then show ?thesis unfolding ket_zero_k.simps st2pvsttv st.tensor_vec_def ket_zero_def std using ile by auto next case False have "i div (2 ^ Suc k) \ 0 \ i mod (2 ^ Suc k) \ 0" using ine0 by fastforce then have "i div (2 ^ Suc k) \ 0" using False by auto moreover have "i div (2 ^ Suc k) < 2" using ile less_mult_imp_div_less by auto ultimately have "i div (2 ^ Suc k) = 1" by auto then have "st.encode2 i = 1" using encode2_replicate_2[of i "Suc k"] ile by auto then have "Matrix.vec 2 (\k. if k = 0 then 1 else 0) $ st.encode2 i = 0" unfolding kzkk by fastforce then show ?thesis unfolding ket_zero_k.simps st2pvsttv st.tensor_vec_def ket_zero_def std using ile by auto qed show ?case apply (rule eq_vecI) subgoal for i using kzkk0 kzkki by auto using carrier_vecD[OF dSk] by auto qed lemma ket_plus_k_decode: "k < n \ ket_plus_k k = Matrix.vec (2^(Suc k)) (\l. 1 / csqrt (2^(Suc k)))" proof (induct k) case 0 then show ?case unfolding ket_plus_k.simps ket_plus_def by auto next case (Suc k) then have kpkk: "ket_plus_k k = Matrix.vec (2 ^ Suc k) (\l. 1 / csqrt (2 ^ Suc k))" by auto have dSk: "ket_plus_k (Suc k) \ carrier_vec (2^(Suc (Suc k)))" using ket_plus_k_dim[OF Suc(2)] by auto interpret st: partial_state "replicate (Suc (Suc k)) 2" "{0.. {Suc k}) = {0..x. (x \ {0.. {y \ {0..x. (x \ {0.. card {y \ {0..g h I. (\x. (x \ I \ g x = h x)) \ {g x | x. x \ I} = {h x | x. x \ I}" by metis have "{card {y \ {0.. {0.. {0.. = {0..{card {0.. {0.. atLeastLessThan_iff card_atLeastLessThan diff_zero less_SucI) then have std1: "st.d1 = 2^(Suc k)" unfolding st.d1_def st.dims1_def nthsSSk2 by auto have "{i. i < Suc (Suc k) \ i \ {Suc k..}} = {Suc k}" by auto then have "nths (replicate (Suc (Suc k)) 2) ({Suc k..}) = replicate 1 2" unfolding nths_replicate by auto moreover have "(- {0..v ket_pre = \" proof - have "exH_k (n - 1) = H_k (n - 1)" using exH_eq_H by auto moreover have "ket_zero_k (n - 1) = ket_pre" using ket_zero_k_decode[of "n - 1"] ket_pre_def N_def n by auto moreover have "ket_plus_k (n - 1) = \" using ket_plus_k_decode[of "n - 1"] \_def N_def n by auto moreover have "H_k (n - 1) *\<^sub>v ket_zero_k (n - 1) = ket_plus_k (n - 1)" using H_k_ket_zero_k n by auto ultimately show ?thesis by auto qed definition ket_k :: "nat \ complex vec" where "ket_k x = Matrix.vec K (\k. if k = x then 1 else 0)" lemma ket_k_dim: "ket_k k \ carrier_vec K" unfolding ket_k_def by auto lemma mat_incr_mult_ket_k: "k < K \ (mat_incr K) *\<^sub>v (ket_k k) = (ket_k ((k + 1) mod K))" apply (rule eq_vecI) unfolding mat_incr_def ket_k_def apply (simp add: scalar_prod_def) apply (case_tac "k = K - 1") subgoal for i apply auto by (simp add: sum_only_one_neq_0[of _ "K - 1"]) subgoal for i apply auto by (simp add: sum_only_one_neq_0[of _ "i - 1"]) by auto definition proj_k where "proj_k x = proj (ket_k x)" lemma proj_k_dim: "proj_k k \ carrier_mat K K" unfolding proj_k_def using ket_k_dim by auto lemma norm_ket_k_lt_K: "k < K \ inner_prod (ket_k k) (ket_k k) = 1" unfolding ket_k_def apply (simp add: scalar_prod_def) using sum_only_one_neq_0[of "{0..i. (if i = k then 1 else 0) * cnj (if i = k then 1 else 0)"] by auto lemma norm_ket_k_ge_K: "k \ K \ inner_prod (ket_k k) (ket_k k) = 0" unfolding ket_k_def by (simp add: scalar_prod_def) lemma norm_ket_k: "inner_prod (ket_k k) (ket_k k) \ 1" apply (case_tac "k < K") using norm_ket_k_lt_K norm_ket_k_ge_K by auto lemma proj_k_mat: assumes "k < K" shows "proj_k k = mat K K (\(i, j). if (i = j \ i = k) then 1 else 0)" apply (rule eq_matI) apply (simp add: proj_k_def ket_k_def index_outer_prod) using proj_k_dim by auto lemma positive_proj_k: "positive (proj_k k)" using positive_same_outer_prod unfolding proj_k_def ket_k_def by auto lemma proj_k_le_one: "(proj_k k) \\<^sub>L 1\<^sub>m K" unfolding proj_k_def using outer_prod_le_one norm_ket_k ket_k_def by auto definition proj_psi where "proj_psi = proj \" lemma proj_psi_dim: "proj_psi \ carrier_mat N N" unfolding proj_psi_def \_def by auto lemma norm_psi: "inner_prod \ \ = 1" apply (simp add: \_eval scalar_prod_def) by (metis norm_of_nat norm_of_real of_real_mult of_real_of_nat_eq real_sqrt_mult_self) lemma proj_psi_mat: "proj_psi = mat N N (\k. 1 / N)" unfolding proj_psi_def apply (rule eq_matI, simp_all) apply (simp add: \_def index_outer_prod) apply (smt of_nat_less_0_iff of_real_of_nat_eq of_real_power power2_eq_square real_sqrt_pow2) by (auto simp add: carrier_matD[OF outer_prod_dim[OF \_dim(1) \_dim(1)]]) lemma hermitian_proj_psi: "hermitian proj_psi" unfolding hermitian_def proj_psi_mat apply (rule eq_matI) by (auto simp add: adjoint_eval) lemma hermitian_exproj_psi: "hermitian (tensor_P proj_psi (1\<^sub>m K))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_hermitian) using proj_psi_dim ps_P_d1 ps_P_d2 hermitian_proj_psi hermitian_one by auto lemma proj_psi_is_projection: "proj_psi * proj_psi = proj_psi" proof - have "proj_psi * proj_psi = inner_prod \ \ \\<^sub>m proj_psi" unfolding proj_psi_def apply (subst outer_prod_mult_outer_prod) using \_def by auto also have "\ = proj_psi" using \_inner by auto finally show ?thesis. qed lemma proj_psi_trace: "trace (proj_psi) = 1" unfolding proj_psi_def apply (subst trace_outer_prod[of _ N]) subgoal unfolding \_def by auto using norm_psi by auto lemma positive_proj_psi: "positive (proj_psi)" using positive_same_outer_prod unfolding proj_psi_def \_def by auto lemma proj_psi_le_one: "(proj_psi) \\<^sub>L 1\<^sub>m N" unfolding proj_psi_def using outer_prod_le_one norm_psi \_def by auto lemma hermitian_hadamard_on_k: assumes "k < n" shows "hermitian (hadamard_on_i k)" proof - interpret st2: partial_state2 dims "{k}" "(vars1 - {k})" apply unfold_locales by auto have st2d1: "st2.dims1 = [2]" unfolding st2.dims1_def dims_def using assms dims_nths_one_lt_n local.dims_def st2.dims1_def by auto show "hermitian (hadamard_on_i k)" unfolding hadamard_on_i_def st2.pmat_extension_def st2.ptensor_mat_def apply (rule partial_state.tensor_mat_hermitian) subgoal unfolding partial_state.d1_def partial_state.dims1_def st2.nths_vars1' hadamard_def by (simp add: st2d1) subgoal unfolding partial_state.d2_def partial_state.dims2_def st2.nths_vars2' st2.d2_def by auto subgoal unfolding hermitian_def hadamard_def apply (rule eq_matI) by (auto simp add: adjoint_dim adjoint_eval) using hermitian_one by auto qed lemma hermitian_H_k: "k < n \ hermitian (H_k k)" proof (induct k) case 0 show ?case unfolding H_k.simps hermitian_def hadamard_def apply (rule eq_matI) by (auto simp add: adjoint_dim adjoint_eval) next case (Suc k) interpret st2: partial_state2 dims "{0.. unitary (H_k k)" proof (induct k) case 0 show ?case using unitary_hadamard by auto next case (Suc k) then have k: "k < n" by auto interpret st2: partial_state2 dims "{0.. exH_k k \ carrier_mat N N" apply (induct k) using hadamard_on_i_dim by auto lemma exH_n_dim: shows "exH_k (n - 1) \ carrier_mat N N" using exH_k_dim n by auto lemma unitary_exH_k: shows "k < n \ unitary (exH_k k)" proof (induct k) case 0 then show ?case unfolding exH_k.simps using unitary_hadamard_on_i 0 by auto next case (Suc k) show ?case unfolding exH_k.simps apply (subst unitary_times_unitary[of _ N]) subgoal using exH_k_dim Suc by auto subgoal using hadamard_on_i_dim Suc by auto subgoal using Suc by auto using unitary_hadamard_on_i Suc by auto qed lemma hermitian_exH_n: "hermitian (exH_k (n - 1))" using hermitian_H_k exH_eq_H n by auto lemma exH_k_mult_psi_is_pre: "exH_k (n - 1) *\<^sub>v \ = ket_pre" proof - let ?H = "exH_k (n - 1)" have "hermitian ?H" using hermitian_H_k exH_eq_H n by auto then have eqad: "adjoint ?H = ?H" unfolding hermitian_def by auto have d: "?H \ carrier_mat N N" using exH_k_dim n by auto have "unitary ?H" using unitary_exH_k n by auto then have id: "?H * ?H = 1\<^sub>m N" unfolding unitary_def inverts_mat_def using d apply (subst (2) eqad[symmetric]) by auto have "?H *\<^sub>v \ = ?H *\<^sub>v (?H *\<^sub>v ket_pre)" using exH_k_mult_pre_is_psi by auto also have "\ = (?H * ?H) *\<^sub>v ket_pre" using d ket_pre_def by auto also have "\ = ket_pre" using id ket_pre_def by auto finally show ?thesis by auto qed fun exexH_k :: "nat \ complex mat" where "exexH_k k = tensor_P (exH_k k) (1\<^sub>m K)" lemma unitary_exexH_k: "k < n \ unitary (exexH_k k)" unfolding exexH_k.simps ps2_P.ptensor_mat_def apply (subst partial_state.tensor_mat_unitary) subgoal using exH_k_dim unfolding partial_state.d1_def partial_state.dims1_def ps2_P.nths_vars1' ps2_P.dims1_def dims_vars1 N_def by auto subgoal unfolding partial_state.d2_def partial_state.dims2_def ps2_P.nths_vars2' ps2_P.dims2_def dims_vars2 by auto using unitary_exH_k unitary_one by auto lemma exexH_k_dim: "k < n \ exexH_k k \ carrier_mat d d" unfolding exexH_k.simps using ps2_P.ptensor_mat_carrier ps2_P_d0 by auto lemma hoare_seq_utrans: fixes P :: "complex mat" assumes "unitary U1" and "unitary U2" and "is_quantum_predicate P" and dU1: "U1 \ carrier_mat d d" and dU2: "U2 \ carrier_mat d d" shows " \\<^sub>p {adjoint (U2 * U1) * P * (U2 * U1)} Utrans U1;; Utrans U2 {P}" proof - have hp0: "\\<^sub>p {adjoint (U2) * P * (U2)} Utrans U2 {P}" using assms hoare_partial.intros by auto have qp: "is_quantum_predicate (adjoint (U2) * P * (U2))" using qp_close_under_unitary_operator assms by auto then have hp1: "\\<^sub>p {adjoint U1 * (adjoint (U2) * P * (U2)) * U1} Utrans U1 {adjoint (U2) * P * (U2)}" using hoare_partial.intros by auto have dP: "P \ carrier_mat d d" using assms is_quantum_predicate_def by auto have eq: "adjoint U1 * (adjoint U2 * P * U2) * U1 = adjoint (U2 * U1) * P * (U2 * U1)" using dU1 dU2 dP by (mat_assoc d) with hp1 have hp2: "\\<^sub>p {adjoint (U2 * U1) * P * (U2 * U1)} Utrans U1 {adjoint (U2) * P * (U2)}" by auto have "is_quantum_predicate (adjoint U1 * (adjoint U2 * P * U2) * U1)" using qp qp_close_under_unitary_operator assms by auto then have "is_quantum_predicate (adjoint (U2 * U1) * P * (U2 * U1))" using eq by auto then show ?thesis using hoare_partial.intros(3)[OF _ qp assms(3)] hp0 hp2 by auto qed lemma qp_close_after_exexH_k: fixes P :: "complex mat" assumes "is_quantum_predicate P" shows "k < n \ is_quantum_predicate (adjoint (exexH_k k) * P * exexH_k k)" apply (subst qp_close_under_unitary_operator) subgoal using exexH_k_dim by auto subgoal using unitary_exexH_k by auto using assms by auto lemma hoare_hadamard_n: fixes P :: "complex mat" shows "is_quantum_predicate P \ k < n \ \\<^sub>p {adjoint (exexH_k k) * P * exexH_k k} hadamard_n (Suc k) {P}" proof (induct k arbitrary: P) case 0 have qp: "is_quantum_predicate (adjoint (exexH_k 0) * P * exexH_k 0)" using qp_close_under_unitary_operator[OF _ unitary_exhadamard_on_i[of 0]] tensor_P_dim 0 by auto then have "\\<^sub>p {adjoint (exexH_k 0) * P * exexH_k 0} SKIP {adjoint (exexH_k 0) * P * exexH_k 0}" using hoare_partial.intros(1) by auto moreover have "\\<^sub>p {adjoint (exexH_k 0) * P * exexH_k 0} Utrans (tensor_P (hadamard_on_i 0) (1\<^sub>m K)) {P}" using hoare_partial.intros(2) 0 by auto ultimately have "\\<^sub>p {adjoint (exexH_k 0) * P * exexH_k 0} SKIP;; Utrans (tensor_P (hadamard_on_i 0) (1\<^sub>m K)) {P}" using hoare_partial.intros(3) qp 0 by auto then show ?case using qp by auto next case (Suc k) have h1: "\\<^sub>p {adjoint (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) * P * (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K))} Utrans (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) {P}" using hoare_partial.intros Suc by auto have qp: "is_quantum_predicate (adjoint (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) * P * (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)))" apply (subst qp_close_under_unitary_operator) subgoal using ps2_P.ptensor_mat_carrier ps2_P_d0 by auto subgoal unfolding ps2_P.ptensor_mat_def apply (subst partial_state.tensor_mat_unitary ) subgoal unfolding partial_state.d1_def partial_state.dims1_def ps2_P.nths_vars1' ps2_P.dims1_def d_vars1 using hadamard_on_i_dim Suc by auto subgoal unfolding partial_state.d2_def partial_state.dims2_def ps2_P.nths_vars2' ps2_P.dims2_def using dims_vars2 by auto using unitary_hadamard_on_i unitary_one Suc by auto using Suc by auto then have h2: "\\<^sub>p {adjoint (exexH_k k) * (adjoint (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) * P * (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K))) * exexH_k k} hadamard_n (Suc k) {adjoint (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) * P * (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K))}" using Suc by auto have "(tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) * exexH_k k = (tensor_P (hadamard_on_i (Suc k) * (exH_k k)) (1\<^sub>m K * (1\<^sub>m K)))" apply (subst ps2_P.ptensor_mat_mult) subgoal using hadamard_on_i_dim ps2_P_d1 Suc by auto subgoal using exH_k_dim ps2_P_d1 Suc by auto using ps2_P_d2 by auto also have "\ = exexH_k (Suc k)" using mult_exH_k_left Suc by auto finally have eq1: "(tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) * exexH_k k = exexH_k (Suc k)". then have eq2: "adjoint (exexH_k k) * adjoint (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) = adjoint (exexH_k (Suc k))" apply (subst adjoint_mult[symmetric, of _ d d _ d]) subgoal using tensor_P_dim by auto using exexH_k_dim Suc by auto have dP: "P \ carrier_mat d d" using is_quantum_predicate_def Suc by auto moreover have dH: "exexH_k k \ carrier_mat d d" using exexH_k_dim Suc by auto moreover have dHi: "tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K) \ carrier_mat d d" using tensor_P_dim by auto ultimately have eq3: "adjoint (exexH_k k) * (adjoint (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) * P * tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) * exexH_k k = (adjoint (exexH_k k) * adjoint (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K))) * P * (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K) * exexH_k k)" by (mat_assoc d) show ?case apply (subst hadamard_n.simps) apply (subst hoare_partial.intros(3)[of _ "adjoint (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K)) * P * (tensor_P (hadamard_on_i (Suc k)) (1\<^sub>m K))"]) subgoal using qp_close_after_exexH_k[of P "Suc k"] Suc by auto subgoal using qp by auto subgoal using Suc by auto subgoal using h2[simplified eq3 eq1 eq2] by auto using h1 by auto qed lemma qp_pre: "is_quantum_predicate (tensor_P pre (proj_k 0))" unfolding is_quantum_predicate_def proof (intro conjI) show "tensor_P pre (proj_k 0) \ carrier_mat d d" using tensor_P_dim by auto interpret st: partial_state dims vars1 . have d1: "st.d1 = N" unfolding st.d1_def st.dims1_def using d_vars1 by auto have d2: "st.d2 = K" unfolding st.d2_def st.dims2_def nths_uminus_vars1 dims_vars2 by auto show "positive (tensor_P pre (proj_k 0))" unfolding ps2_P.ptensor_mat_def ps2_P_dims0 ps2_P_vars1' apply (subst st.tensor_mat_positive) subgoal unfolding pre_def using outer_prod_dim ket_pre_def d1 by auto subgoal unfolding proj_k_def using outer_prod_dim ket_k_def d2 by auto subgoal using positive_pre by auto using positive_proj_k[of 0] K_gt_0 by auto show "tensor_P pre (proj_k 0) \\<^sub>L 1\<^sub>m d" unfolding ps2_P.ptensor_mat_def ps2_P_dims0 ps2_P_vars1' apply (subst st.tensor_mat_le_one) subgoal using pre_def ket_pre_def outer_prod_dim d1 by auto subgoal using proj_k_def K_gt_0 ket_k_def outer_prod_dim d2 by auto using d1 d2 K_gt_0 outer_prod_dim positive_pre positive_proj_k pre_le_one proj_k_le_one by auto qed lemma qp_init_post: "is_quantum_predicate (tensor_P proj_psi (proj_k 0))" unfolding is_quantum_predicate_def proof (intro conjI) show "tensor_P proj_psi (proj_k 0) \ carrier_mat d d" using tensor_P_dim by auto interpret st: partial_state dims vars1 . have d1: "st.d1 = N" unfolding st.d1_def st.dims1_def using d_vars1 by auto have d2: "st.d2 = K" unfolding st.d2_def st.dims2_def nths_uminus_vars1 dims_vars2 by auto show "positive (tensor_P proj_psi (proj_k 0))" unfolding ps2_P.ptensor_mat_def ps2_P_dims0 ps2_P_vars1' apply (subst st.tensor_mat_positive) subgoal unfolding proj_psi_def using outer_prod_dim \_def d1 by auto subgoal unfolding proj_k_def using outer_prod_dim ket_k_def d2 by auto subgoal using positive_proj_psi by auto using positive_proj_k[of 0] K_gt_0 by auto show "tensor_P proj_psi (proj_k 0) \\<^sub>L 1\<^sub>m d" unfolding ps2_P.ptensor_mat_def ps2_P_dims0 ps2_P_vars1' apply (subst st.tensor_mat_le_one) subgoal using proj_psi_def outer_prod_dim d1 by auto subgoal using proj_k_def K_gt_0 ket_k_def outer_prod_dim d2 by auto using d1 d2 K_gt_0 outer_prod_dim positive_proj_psi positive_proj_k proj_psi_le_one proj_k_le_one by auto qed lemma tensor_P_adjoint_left_right: assumes "m1 \ carrier_mat N N" and "m2 \ carrier_mat K K" and "m3 \ carrier_mat N N" and "m4 \ carrier_mat K K" shows "adjoint (tensor_P m1 m2) * tensor_P m3 m4 * tensor_P m1 m2 = tensor_P (adjoint m1 * m3 * m1) (adjoint m2 * m4 * m2)" proof - have eq1: "adjoint (tensor_P m1 m2) = tensor_P (adjoint m1) (adjoint m2)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_adjoint) using ps_P_d1 ps_P_d2 assms by auto have eq2: "adjoint (tensor_P m1 m2) * tensor_P m3 m4 = tensor_P (adjoint m1 * m3) (adjoint m2 * m4)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_mult) using ps_P_d1 ps_P_d2 assms eq1 unfolding ps2_P.ptensor_mat_def by (auto simp add: adjoint_dim) have eq3: "tensor_P (adjoint m1 * m3) (adjoint m2 * m4) * (tensor_P m1 m2) = tensor_P (adjoint m1 * m3 * m1) (adjoint m2 * m4 * m2)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_mult[of "adjoint m1 * m3"]) using ps_P_d1 ps_P_d2 assms by (auto simp add: adjoint_dim) show ?thesis using eq1 eq2 eq3 by auto qed abbreviation exH_n where "exH_n \ exH_k (n - 1)" lemma hoare_triple_init: "\\<^sub>p {tensor_P pre (proj_k 0)} hadamard_n n {tensor_P proj_psi (proj_k 0)}" proof - have h: "\\<^sub>p {adjoint (exexH_k (n - 1)) * (tensor_P proj_psi (proj_k 0)) * (exexH_k (n - 1))} hadamard_n n {tensor_P proj_psi (proj_k 0)}" using hoare_hadamard_n[OF qp_init_post, of "n - 1"] qp_init_post n by auto have "adjoint (exexH_k (n - 1)) * tensor_P proj_psi (proj_k 0) * exexH_k (n - 1) = tensor_P (adjoint exH_n * proj_psi * exH_n) (adjoint (1\<^sub>m K) * proj_k 0 * 1\<^sub>m K)" unfolding exexH_k.simps apply (subst tensor_P_adjoint_left_right) using exH_k_dim proj_psi_def \_def proj_k_def ket_k_def n by (auto) moreover have "adjoint exH_n * proj_psi * exH_n = pre" unfolding proj_psi_def pre_def apply (subst outer_prod_left_right_mat[of _ N _ N _ N _ N]) subgoal using \_def by auto subgoal using exH_k_dim n by (simp add: adjoint_dim) subgoal using exH_k_dim n by simp apply (subst (1 2) hermitian_exH_n[simplified hermitian_def]) apply (subst (1 2) exH_k_mult_psi_is_pre) by auto moreover have "adjoint (1\<^sub>m K) * (proj_k 0) * (1\<^sub>m K) = proj_k 0" apply (subst adjoint_one) using proj_k_dim[of 0] K_gt_0 by auto ultimately have "adjoint (exexH_k (n - 1)) * tensor_P proj_psi (proj_k 0) * exexH_k (n - 1) = tensor_P pre (proj_k 0)" by auto with h show ?thesis by auto qed text \Hoare triples of while loop\ definition proj_psi_l where "proj_psi_l l = proj (psi_l l)" lemma positive_psi_l: "k < K \ positive (proj_psi_l k)" unfolding proj_psi_l_def apply (subst positive_same_outer_prod) using psi_l_dim by auto lemma hermitian_proj_psi_l: "k < K \ hermitian (proj_psi_l k)" using positive_psi_l positive_is_hermitian by auto definition P' where "P' = tensor_P (proj_psi_l R) (proj_k R)" lemma proj_psi_l_dim: "proj_psi_l l \ carrier_mat N N" unfolding proj_psi_l_def using psi_l_def by auto definition Q :: "complex mat" where "Q = matrix_sum d (\l. tensor_P (proj_psi_l l) (proj_k l)) R" lemma psi_l_le_id: shows "proj_psi_l l \\<^sub>L 1\<^sub>m N" proof - have "inner_prod (psi_l l) (psi_l l) = 1" using inner_psi_l by auto then show ?thesis using outer_prod_le_one psi_l_def proj_psi_l_def by auto qed lemma positive_proj_psi_l: shows "positive (proj_psi_l l)" using positive_same_outer_prod proj_psi_l_def psi_l_dim by auto definition proj_fst_k :: "nat \ complex mat" where "proj_fst_k k = mat K K (\(i, j). if (i = j \ i < k) then 1 else 0)" lemma hermitian_proj_fst_k: "adjoint (proj_fst_k k) = proj_fst_k k" by (auto simp add: proj_fst_k_def adjoint_eval) lemma proj_fst_k_is_projection: "proj_fst_k k * proj_fst_k k = proj_fst_k k" by (auto simp add: proj_fst_k_def scalar_prod_def sum_only_one_neq_0) lemma positive_proj_fst_k: "positive (proj_fst_k k)" proof - have "(proj_fst_k k) * adjoint (proj_fst_k k) = (proj_fst_k k)" using hermitian_proj_fst_k proj_fst_k_is_projection by auto then have "\M. M * adjoint M = (proj_fst_k k)" by auto then show ?thesis apply (subst positive_if_decomp) using proj_fst_k_def by auto qed lemma proj_fst_k_le_one: "proj_fst_k k \\<^sub>L 1\<^sub>m K" proof - define M where "M l = mat K K (\(i, j). if (i = j \ i \ l) then (1::complex) else 0)" for l have eq: "1\<^sub>m K - proj_fst_k k = M k" unfolding M_def proj_fst_k_def apply (rule eq_matI) by auto have "M k * M k = M k" unfolding M_def apply (rule eq_matI) apply (simp add: scalar_prod_def) apply (subst sum_only_one_neq_0[of _ j]) by auto moreover have "adjoint (M k) = M k" unfolding M_def apply (rule eq_matI) by (auto simp add: adjoint_eval) ultimately have "M k * adjoint (M k) = M k" by auto then have "\M. M * adjoint M = 1\<^sub>m K - proj_fst_k k" using eq by auto then have "positive (1\<^sub>m K - proj_fst_k k)" apply (subst positive_if_decomp) using proj_fst_k_def by auto then show ?thesis unfolding lowner_le_def using proj_fst_k_def by auto qed lemma sum_proj_k: assumes "m \ K" shows "matrix_sum K (\k. proj_k k) m = proj_fst_k m" proof - have "m \ K \ matrix_sum K (\k. proj_k k) m = mat K K (\(i, j). if (i = j \ i < m) then 1 else 0)" for m proof (induct m) case 0 then show ?case apply simp apply (rule eq_matI) by auto next case (Suc m) then have m: "m < K" by auto then have m': "m \ K" by auto have "matrix_sum K proj_k (Suc m) = proj_k m + matrix_sum K proj_k m" by simp also have "\ = mat K K (\(i, j). if (i = j \ i < (Suc m)) then 1 else 0)" unfolding proj_k_mat[OF m] Suc(1)[OF m'] apply (rule eq_matI) by auto finally show ?case by auto qed then show ?thesis unfolding proj_fst_k_def using assms by auto qed lemma proj_psi_proj_k_le_exproj_k: shows "tensor_P (proj_psi_l k) (proj_k l) \\<^sub>L tensor_P (1\<^sub>m N) (proj_k l)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive_le) subgoal using proj_psi_l_def psi_l_dim ps_P_d1 by auto subgoal using proj_k_def ket_k_def ps_P_d2 by auto subgoal using positive_proj_psi_l by auto subgoal using positive_same_outer_prod proj_k_def ket_k_def by auto subgoal using psi_l_le_id by auto apply (subst lowner_le_refl[of _ K]) by (auto simp add: proj_k_def ket_k_def) definition Q1 :: "complex mat" where "Q1 = matrix_sum d (\l. tensor_P (proj_psi'_l l) (proj_k l)) R" lemma tensor_P_left_right_partial1: assumes "m1 \ carrier_mat N N" and "m2 \ carrier_mat N N" and "m3 \ carrier_mat K K" and "m4 \ carrier_mat N N" shows "tensor_P m1 (1\<^sub>m K) * tensor_P m2 m3 * tensor_P m4 (1\<^sub>m K) = tensor_P (m1 * m2 * m4) m3" proof - have "tensor_P m1 (1\<^sub>m K) * tensor_P m2 m3 = tensor_P (m1 * m2) m3" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_mult[symmetric]) using assms ps_P_d1 ps_P_d2 by auto moreover have "tensor_P (m1 * m2) m3 * tensor_P m4 (1\<^sub>m K) = tensor_P (m1 * m2 * m4) m3" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_mult[symmetric]) using assms ps_P_d1 ps_P_d2 by auto ultimately show ?thesis by auto qed lemma tensor_P_left_right_partial2: assumes "m1 \ carrier_mat K K" and "m2 \ carrier_mat K K" and "m3 \ carrier_mat N N" and "m4 \ carrier_mat K K" shows "tensor_P (1\<^sub>m N) m1 * tensor_P m3 m2 * tensor_P (1\<^sub>m N) m4 = tensor_P m3 (m1 * m2 * m4)" proof - have "tensor_P (1\<^sub>m N) m1 * tensor_P m3 m2 = tensor_P m3 (m1 * m2)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_mult[symmetric]) using assms ps_P_d1 ps_P_d2 by auto moreover have "tensor_P m3 (m1 * m2) * tensor_P (1\<^sub>m N) m4 = tensor_P m3 (m1 * m2 * m4)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_mult[symmetric]) using assms ps_P_d1 ps_P_d2 by auto ultimately show ?thesis by auto qed lemma matrix_sum_mult_left_right: fixes A B :: "complex mat" assumes dg: "(\k. k < l \ g k \ carrier_mat m m) " and dA: "A \ carrier_mat m m" and dB: "B \ carrier_mat m m" shows "matrix_sum m (\k. A * g k * B) l = A * matrix_sum m g l * B" proof - have eq: "A * matrix_sum m g l = matrix_sum m (\k. A * g k) l" using matrix_sum_distrib_left assms by auto have "A * matrix_sum m g l * B = matrix_sum m (\k. A * g k * B) l" apply (subst eq) using matrix_sum_mult_right[of l "\k. A * g k"] assms by auto then show ?thesis by auto qed lemma mat_O_split: "mat_O = 1\<^sub>m N - 2 \\<^sub>m proj_O" apply (rule eq_matI) unfolding mat_O_def proj_O_def by auto lemma mat_O_mult_psi'_l: "mat_O *\<^sub>v (psi'_l l) = psi_l l" proof - have "mat_O *\<^sub>v (psi'_l l) = mat_O *\<^sub>v ((alpha_l l) \\<^sub>v \) - mat_O *\<^sub>v ((beta_l l) \\<^sub>v \)" unfolding psi'_l_def apply (subst mult_minus_distrib_mat_vec) using mat_O_dim \_dim \_dim by auto also have "\ = (alpha_l l) \\<^sub>v (mat_O *\<^sub>v \) - (beta_l l) \\<^sub>v (mat_O *\<^sub>v \)" using mult_mat_vec_smult_vec_assoc[of mat_O N N] mat_O_dim \_dim \_dim by auto also have "\ = (alpha_l l) \\<^sub>v \ - (beta_l l) \\<^sub>v (- \)" using mat_O_mult_alpha mat_O_mult_beta by auto also have "\ = (alpha_l l) \\<^sub>v \ + (beta_l l) \\<^sub>v \" by auto finally show ?thesis unfolding psi_l_def by auto qed lemma mat_O_times_Q1: "adjoint (tensor_P mat_O (1\<^sub>m K)) * Q1 * (tensor_P mat_O (1\<^sub>m K)) = Q" proof - let ?m1 = "tensor_P mat_O (1\<^sub>m K)" have eq:"adjoint ?m1 = ?m1" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_adjoint) apply (auto simp add: mat_O_dim ps_P_d1 ps_P_d2) by (simp add: hermitian_mat_O[unfolded hermitian_def] hermitian_one[unfolded hermitian_def]) { fix l let ?m2 = "tensor_P (proj_psi'_l l) (proj_k l)" have "?m1 * ?m2 * ?m1 = tensor_P (mat_O * (proj_psi'_l l) * mat_O) (proj_k l)" apply (subst tensor_P_left_right_partial1) using mat_O_dim proj_psi'_dim proj_k_dim by auto moreover have "mat_O * (proj_psi'_l l) * mat_O = outer_prod (psi_l l) (psi_l l)" unfolding proj_psi'_l_def apply (subst outer_prod_left_right_mat[of _ N _ N _ N _ N]) using psi'_l_dim mat_O_dim mat_O_mult_psi'_l hermitian_mat_O[unfolded hermitian_def] by auto ultimately have "?m1 * ?m2 * ?m1 = tensor_P (proj_psi_l l) (proj_k l)" unfolding proj_psi_l_def by auto } note p1 = this have "adjoint (tensor_P mat_O (1\<^sub>m K)) * Q1 * (tensor_P mat_O (1\<^sub>m K)) = ?m1 * Q1 * ?m1" using eq by auto also have "\ = matrix_sum d (\l. ?m1 * (tensor_P (proj_psi'_l l) (proj_k l)) * ?m1) R" unfolding Q1_def apply (subst matrix_sum_mult_left_right) using tensor_P_dim by auto also have "\ = Q" unfolding Q_def using p1 by auto finally show ?thesis by auto qed definition Q2 where "Q2 = matrix_sum d (\l. tensor_P (proj_psi_l (l + 1)) (proj_k l)) R" lemma Q2_dim: "Q2 \ carrier_mat d d" unfolding Q2_def apply (subst matrix_sum_dim) using tensor_P_dim by auto lemma Q2_le_one: "Q2 \\<^sub>L 1\<^sub>m d" proof - have leq: "Q2 \\<^sub>L matrix_sum d (\k. tensor_P (1\<^sub>m N) (proj_k k)) R" unfolding Q2_def apply (subst lowner_le_matrix_sum) subgoal using tensor_P_dim by auto subgoal using tensor_P_dim by auto using proj_psi_proj_k_le_exproj_k by auto have "matrix_sum d (\k. tensor_P (1\<^sub>m N) (proj_k k)) R = tensor_P (1\<^sub>m N) (matrix_sum K proj_k R)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_matrix_sum2[simplified ps_P_d ps_P_d2]) subgoal using ps_P_d1 by auto using proj_k_dim by auto also have "\ = tensor_P (1\<^sub>m N) (proj_fst_k R)" using sum_proj_k K by auto also have "\ \\<^sub>L tensor_P (1\<^sub>m N) (1\<^sub>m K)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive_le) subgoal using ps_P_d1 by auto subgoal using ps_P_d2 proj_fst_k_def by auto subgoal using positive_one by auto subgoal using positive_proj_fst_k by auto subgoal using lowner_le_refl[of "1\<^sub>m N" N] by auto using proj_fst_k_le_one by auto also have "\ = 1\<^sub>m d" unfolding ps2_P.ptensor_mat_def using ps_P.tensor_mat_id ps_P_d1 ps_P_d2 ps_P_d by auto finally have leq2: "matrix_sum d (\k. tensor_P (1\<^sub>m N) (proj_k k)) R \\<^sub>L 1\<^sub>m d" by auto have ds: "matrix_sum d (\k. tensor_P (1\<^sub>m N) (proj_k k)) R \ carrier_mat d d" apply (subst matrix_sum_dim) using tensor_P_dim by auto then show ?thesis using leq leq2 lowner_le_trans[OF Q2_dim ds, of "1\<^sub>m d"] by auto qed lemma qp_Q2: "is_quantum_predicate Q2" unfolding is_quantum_predicate_def proof (intro conjI) show "Q2 \ carrier_mat d d" unfolding Q2_def apply (subst matrix_sum_dim) using tensor_P_dim by auto next show "positive Q2" unfolding Q2_def apply (subst matrix_sum_positive) subgoal using tensor_P_dim by auto subgoal for k unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive) subgoal using proj_psi_l_def psi_l_dim ps_P_d1 by auto subgoal using proj_k_dim ps_P_d2 K by auto subgoal using positive_proj_psi_l by auto using positive_proj_k K by auto by auto next show "Q2 \\<^sub>L 1\<^sub>m d" using Q2_le_one by auto qed lemma pre_mat: "pre = mat N N (\(i, j). if i = j \ i = 0 then 1 else 0)" apply (rule eq_matI) subgoal for i j unfolding pre_def apply (subst index_outer_prod[OF ket_pre_dim ket_pre_dim]) apply simp_all unfolding ket_pre_def by auto using outer_prod_dim[OF ket_pre_dim ket_pre_dim, folded pre_def] by auto lemma mat_Ph_split: "mat_Ph = 2 \\<^sub>m pre - 1\<^sub>m N" unfolding mat_Ph_def pre_mat apply (rule eq_matI) by auto lemma H_Ph_H: "exexH_k (n-1) * tensor_P mat_Ph (1\<^sub>m K) * exexH_k (n - 1) = 2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d" unfolding mat_Ph_split exexH_k.simps apply (subst tensor_P_left_right_partial1) subgoal using exH_k_dim[of "n - 1"] n by auto subgoal using pre_dim by auto subgoal by auto proof - have eq1: "exH_n * exH_n = 1\<^sub>m N" using unitary_exH_k[of "n - 1"] unfolding unitary_def inverts_mat_def using n hermitian_exH_n[simplified hermitian_def] exH_n_dim by auto have eq2: "exH_n * pre * exH_n = proj_psi" unfolding pre_def proj_psi_def apply (subst outer_prod_left_right_mat[of _ N _ N _ N _ N]) subgoal using ket_pre_dim by auto subgoal using exH_n_dim by auto apply (subst hermitian_exH_n[simplified hermitian_def]) using exH_k_mult_pre_is_psi by auto have eq3: "exH_n * (2 \\<^sub>m pre) * exH_n = 2 \\<^sub>m (exH_n * pre * exH_n)" using pre_dim exH_n_dim by (mat_assoc N) have "exH_n * (2 \\<^sub>m pre - 1\<^sub>m N) * exH_n = exH_n * (2 \\<^sub>m pre) * exH_n - exH_n * exH_n" using pre_dim exH_n_dim apply (mat_assoc N) by auto also have "\ = 2 \\<^sub>m (exH_n * pre * exH_n) - 1\<^sub>m N" using eq1 eq3 by auto finally have eq4: "exH_n * (2 \\<^sub>m pre - 1\<^sub>m N) * exH_n = 2 \\<^sub>m proj_psi - 1\<^sub>m N" using eq2 by auto show "tensor_P (exH_n * (2 \\<^sub>m pre - 1\<^sub>m N) * exH_n) (1\<^sub>m K) = 2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d" unfolding eq4 unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_minus1) unfolding ps_P_d1 ps_P_d2 apply (auto simp add: proj_psi_dim) apply (subst ps_P.tensor_mat_scale1) unfolding ps_P_d1 ps_P_d2 apply (auto simp add: proj_psi_dim) apply (subst ps_P.tensor_mat_id[simplified ps_P_d1 ps_P_d2 ps_P_d]) by auto qed lemma hermitian_proj_psi_minus_1: "hermitian (2 \\<^sub>m proj_psi - 1\<^sub>m N)" unfolding hermitian_def apply (subst adjoint_minus[of _ N N]) apply (auto simp add: proj_psi_dim) apply (subst adjoint_scale) using hermitian_proj_psi[simplified hermitian_def] hermitian_def adjoint_one by auto lemma unitary_proj_psi_minus_1: "unitary (2 \\<^sub>m proj_psi - 1\<^sub>m N)" proof - have a: "adjoint (2 \\<^sub>m proj_psi) = 2 \\<^sub>m proj_psi" apply (subst adjoint_scale) using hermitian_proj_psi[simplified hermitian_def] by simp have eq: "adjoint (2 \\<^sub>m proj_psi - 1\<^sub>m N) = 2 \\<^sub>m proj_psi - 1\<^sub>m N" apply (subst adjoint_minus) using proj_psi_dim a adjoint_one by auto have "(2 \\<^sub>m proj_psi) * (2 \\<^sub>m proj_psi) = 4 \\<^sub>m (proj_psi * proj_psi)" using proj_psi_dim by auto also have "\ = 4 \\<^sub>m proj_psi" using proj_psi_is_projection by auto finally have sq: "(2 \\<^sub>m proj_psi) * (2 \\<^sub>m proj_psi) = 4 \\<^sub>m proj_psi". have l: "(2 \\<^sub>m proj_psi) * (2 \\<^sub>m proj_psi - 1\<^sub>m N) = 4 \\<^sub>m proj_psi - (2 \\<^sub>m proj_psi)" apply (subst mult_minus_distrib_mat) using proj_psi_dim sq by auto have "(2 \\<^sub>m proj_psi - 1\<^sub>m N) * adjoint (2 \\<^sub>m proj_psi - 1\<^sub>m N) = (2 \\<^sub>m proj_psi - 1\<^sub>m N) * (2 \\<^sub>m proj_psi - 1\<^sub>m N)" using eq by auto also have "\ = (2 \\<^sub>m proj_psi) * (2 \\<^sub>m proj_psi - 1\<^sub>m N) - 2 \\<^sub>m proj_psi + 1\<^sub>m N" apply (subst minus_mult_distrib_mat[of _ N N]) using proj_psi_dim by auto also have "\ = 4 \\<^sub>m proj_psi - (2 \\<^sub>m proj_psi) - 2 \\<^sub>m proj_psi + 1\<^sub>m N" using l by auto also have "\ = 1\<^sub>m N" using proj_psi_dim by auto finally have "(2 \\<^sub>m proj_psi - 1\<^sub>m N) * adjoint (2 \\<^sub>m proj_psi - 1\<^sub>m N) = 1\<^sub>m N". then show ?thesis unfolding unitary_def inverts_mat_def using proj_psi_dim by auto qed lemma proj_psi_minus_1_mult_psi'_l: "(2 \\<^sub>m proj_psi - 1\<^sub>m N) *\<^sub>v psi'_l l = psi_l (l + 1)" proof - have eq1: "(2 \\<^sub>m proj_psi - 1\<^sub>m N) *\<^sub>v psi'_l l = 2 \\<^sub>m proj_psi *\<^sub>v psi'_l l - psi'_l l" apply (subst minus_mult_distrib_mat_vec) using psi'_l_dim proj_psi'_dim proj_psi_dim by auto have eq2: "2 \\<^sub>m proj_psi *\<^sub>v (psi'_l l) = 2 \\<^sub>v (proj_psi *\<^sub>v (psi'_l l))" apply (subst smult_mat_mult_mat_vec_assoc) using proj_psi_dim psi'_l_dim by auto have "proj_psi *\<^sub>v (psi'_l l) = inner_prod \ (psi'_l l) \\<^sub>v \" unfolding proj_psi_def apply (subst outer_prod_mult_vec[of _ N _ N]) using \_dim psi'_l_dim by auto also have "\ = ((alpha_l l) * ccos (\ / 2) - (beta_l l) * csin (\ / 2)) \\<^sub>v \" using psi_inner_psi'_l by auto finally have "proj_psi *\<^sub>v (psi'_l l) = ((alpha_l l) * ccos (\ / 2) - (beta_l l) * csin (\ / 2)) \\<^sub>v \" by auto then have eq3: "2 \\<^sub>v (proj_psi *\<^sub>v (psi'_l l)) = 2 * ((alpha_l l) * ccos (\ / 2) - (beta_l l) * csin (\ / 2)) \\<^sub>v \" by auto then show "(2 \\<^sub>m proj_psi - (1\<^sub>m N)) *\<^sub>v (psi'_l l) = psi_l (l + 1)" using eq1 eq2 eq3 psi_l_Suc_l_derive by simp qed lemma proj_psi_minus_1_mult_psi_Suc_l: "(2 \\<^sub>m proj_psi - 1\<^sub>m N) *\<^sub>v psi_l (l + 1) = psi'_l l" proof - have id: "(2 \\<^sub>m proj_psi - 1\<^sub>m N) * (2 \\<^sub>m proj_psi - 1\<^sub>m N) = 1\<^sub>m N" using unitary_proj_psi_minus_1 unfolding unitary_def hermitian_proj_psi_minus_1[simplified hermitian_def] unfolding inverts_mat_def by auto have "(2 \\<^sub>m proj_psi - 1\<^sub>m N) *\<^sub>v psi_l (l + 1) = (2 \\<^sub>m proj_psi - 1\<^sub>m N) *\<^sub>v ((2 \\<^sub>m proj_psi - 1\<^sub>m N) *\<^sub>v psi'_l l)" using proj_psi_minus_1_mult_psi'_l by auto also have "\ = ((2 \\<^sub>m proj_psi - 1\<^sub>m N) * (2 \\<^sub>m proj_psi - 1\<^sub>m N) *\<^sub>v psi'_l l)" apply (subst assoc_mult_mat_vec) using proj_psi_dim psi'_l_dim by auto also have "\ = psi'_l l" using psi'_l_dim id by auto finally show ?thesis by auto qed lemma exproj_psi_minus_1_tensor: "(2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K)) - 1\<^sub>m d = tensor_P (2 \\<^sub>m proj_psi - (1\<^sub>m N)) (1\<^sub>m K)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_id[symmetric, simplified ps_P_d]) apply (auto simp add: ps_P_d1 ps_P_d2) apply (subst ps_P.tensor_mat_scale1[symmetric]) apply (auto simp add: ps_P_d1 ps_P_d2 proj_psi_dim) apply (subst ps_P.tensor_mat_minus1) by (auto simp add: ps_P_d1 ps_P_d2 proj_psi_dim) lemma unitary_exproj_psi_minus_1: "unitary (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d)" unfolding exproj_psi_minus_1_tensor unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_unitary) using ps_P_d1 ps_P_d2 unitary_proj_psi_minus_1 unitary_one by auto lemma proj_psi_minus_1_Q2: "adjoint (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d) * Q2 * (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d) = Q1" proof - have eq1: "adjoint (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d) = 2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d" apply (subst adjoint_minus[of _ d d]) subgoal using tensor_P_dim[of proj_psi] by auto subgoal by auto apply (subst adjoint_one) apply (subst adjoint_scale) using hermitian_exproj_psi[simplified hermitian_def] by auto let ?m1 = "tensor_P (2 \\<^sub>m proj_psi - (1\<^sub>m N)) (1\<^sub>m K)" { fix l let ?m2 = "tensor_P (proj_psi_l (l + 1)) (proj_k l)" have 121: "?m1 * ?m2 * ?m1 = tensor_P ((2 \\<^sub>m proj_psi - (1\<^sub>m N)) * (proj_psi_l (l + 1)) * (2 \\<^sub>m proj_psi - (1\<^sub>m N))) (proj_k l)" apply (subst tensor_P_left_right_partial1) using proj_psi_dim proj_psi_l_dim proj_k_dim by auto have "(2 \\<^sub>m proj_psi - (1\<^sub>m N)) * (proj_psi_l (l + 1)) * (2 \\<^sub>m proj_psi - (1\<^sub>m N)) = outer_prod ((2 \\<^sub>m proj_psi - (1\<^sub>m N)) *\<^sub>v (psi_l (l + 1))) ((2 \\<^sub>m proj_psi - (1\<^sub>m N)) *\<^sub>v (psi_l (l + 1)))" unfolding proj_psi_l_def apply (subst outer_prod_left_right_mat[of _ N _ N _ N _ N]) using proj_psi_dim psi_l_dim hermitian_proj_psi_minus_1[simplified hermitian_def] by auto also have "\ = outer_prod (psi'_l l) (psi'_l l)" using proj_psi_minus_1_mult_psi_Suc_l by auto finally have "(2 \\<^sub>m proj_psi - (1\<^sub>m N)) * (proj_psi_l (l + 1)) * (2 \\<^sub>m proj_psi - (1\<^sub>m N)) = outer_prod (psi'_l l) (psi'_l l)". then have "?m1 * ?m2 * ?m1 = tensor_P (proj_psi'_l l) (proj_k l)" using 121 proj_psi'_l_def by auto } note p1 = this have "adjoint (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d) * Q2 * (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d) = (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d) * Q2 * (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d)" using eq1 by auto also have "\ = matrix_sum d (\l. (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d) * tensor_P (proj_psi_l (l + 1)) (proj_k l) * (2 \\<^sub>m tensor_P proj_psi (1\<^sub>m K) - 1\<^sub>m d)) R" unfolding Q2_def apply (subst matrix_sum_mult_left_right) using tensor_P_dim by auto also have "\ = matrix_sum d (\l. tensor_P (proj_psi'_l l) (proj_k l)) R" using p1 exproj_psi_minus_1_tensor by auto also have "\ = Q1" unfolding Q1_def by auto finally show ?thesis using eq1 by auto qed lemma qp_Q1: "is_quantum_predicate Q1" unfolding proj_psi_minus_1_Q2[symmetric] apply (subst qp_close_under_unitary_operator) using tensor_P_dim unitary_exproj_psi_minus_1 qp_Q2 by auto lemma qp_Q: "is_quantum_predicate Q" proof - have u: "unitary (tensor_P mat_O (1\<^sub>m K))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_unitary) subgoal unfolding ps_P_d1 mat_O_def by auto subgoal unfolding ps_P_d2 by auto subgoal using unitary_mat_O by auto using unitary_one by auto then show ?thesis using tensor_P_dim qp_Q1 using qp_close_under_unitary_operator[OF tensor_P_dim u qp_Q1] by (simp add: mat_O_times_Q1 ) qed lemma hoare_triple_D1: "\\<^sub>p {Q} Utrans_P vars1 mat_O {Q1}" unfolding Utrans_P_is_tensor_P1 mat_O_times_Q1[symmetric] apply (subst hoare_partial.intros(2)) using qp_Q1 by auto lemma hoare_triple_D2: "\\<^sub>p {Q1} hadamard_n n ;; Utrans_P vars1 mat_Ph ;; hadamard_n n {Q2}" proof - let ?H = "exexH_k (n - 1)" let ?Ph = "tensor_P mat_Ph (1\<^sub>m K)" let ?O = "tensor_P mat_O (1\<^sub>m K)" have h1: "\\<^sub>p {adjoint ?H * Q2 * ?H} hadamard_n n {Q2}" using hoare_hadamard_n[OF qp_Q2, of "n - 1"] n by auto have qp1: "is_quantum_predicate ((adjoint ?H) * Q2 * ?H)" using qp_close_under_unitary_operator unitary_exexH_k n exexH_k_dim qp_Q2 by auto then have h2: "\\<^sub>p {adjoint ?Ph * (adjoint ?H * Q2 * ?H) * ?Ph} Utrans_P vars1 mat_Ph {adjoint ?H * Q2 * ?H}" using qp1 Utrans_P_is_tensor_P1 hoare_partial.intros by auto have qp2: "is_quantum_predicate (adjoint ?Ph * (adjoint ?H * Q2 * ?H) * ?Ph)" using qp_close_under_unitary_operator[of "tensor_P mat_Ph (1\<^sub>m K)"] ps2_P.ptensor_mat_carrier ps2_P_d0 unitary_ex_mat_Ph qp1 by auto then have h3: "\\<^sub>p {adjoint ?H * (adjoint ?Ph * (adjoint ?H * Q2 * ?H) * ?Ph) * ?H} hadamard_n n {adjoint ?Ph * (adjoint ?H * Q2 * ?H) * ?Ph}" using hoare_hadamard_n[OF qp2, of "n - 1"] n by auto have qp3: "is_quantum_predicate (adjoint ?H * (adjoint ?Ph * (adjoint ?H * Q2 * ?H) * ?Ph) * ?H)" using qp_close_under_unitary_operator[of "?H"] exexH_k_dim unitary_exexH_k qp2 n by auto have h4: "\\<^sub>p {adjoint ?H * (adjoint ?Ph * (adjoint ?H * Q2 * ?H) * ?Ph) * ?H} hadamard_n n ;; Utrans_P vars1 mat_Ph {adjoint ?H * Q2 * ?H}" using h2 h3 qp1 qp2 qp3 hoare_partial.intros by auto then have h5: "\\<^sub>p {adjoint ?H * (adjoint ?Ph * (adjoint ?H * Q2 * ?H) * ?Ph) * ?H} hadamard_n n ;; Utrans_P vars1 mat_Ph ;; hadamard_n n {Q2}" using h1 qp_Q2 qp3 qp1 hoare_partial.intros(3)[OF qp3 qp1 qp_Q2 h4 h1] by auto have "adjoint ?H * (adjoint ?Ph * (adjoint ?H * Q2 * ?H) * ?Ph) * ?H = adjoint (?H * ?Ph * ?H) * Q2 * (?H * ?Ph * ?H)" apply (mat_assoc d) using exexH_k_dim n tensor_P_dim Q2_dim by auto also have "\ = Q1" using H_Ph_H proj_psi_minus_1_Q2 by auto finally show ?thesis using h5 by auto qed definition exM0 where "exM0 = tensor_P (1\<^sub>m N) M0" lemma M0_mult_ket_k_R: "M0 *\<^sub>v ket_k R = ket_k R" apply (rule eq_vecI) unfolding M0_def ket_k_def by (auto simp add: scalar_prod_def sum_only_one_neq_0) lemma exP0_P': "adjoint exM0 * P' * exM0 = P'" proof - have eq: "adjoint exM0 = exM0" unfolding exM0_def ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_adjoint) unfolding ps_P_d1 ps_P_d2 using M0_dim adjoint_one hermitian_M0[unfolded hermitian_def] by auto have eq2: "M0 * (proj_k R) * M0 = (proj_k R)" unfolding proj_k_def apply (subst outer_prod_left_right_mat[of _ K _ K _ K _ K]) unfolding hermitian_M0[unfolded hermitian_def] M0_mult_ket_k_R using ket_k_dim M0_dim by auto show ?thesis unfolding eq unfolding exM0_def P'_def apply (subst tensor_P_left_right_partial2) using M0_dim proj_k_dim eq2 proj_psi_l_dim by auto qed definition exM1 where "exM1 = tensor_P (1\<^sub>m N) M1" lemma M1_mult_ket_k: assumes "k < R" shows "M1 *\<^sub>v ket_k k = ket_k k" apply (rule eq_vecI) unfolding M1_def ket_k_def by (auto simp add: scalar_prod_def assms R sum_only_one_neq_0) lemma exP1_Q: "adjoint exM1 * Q * exM1 = Q" proof - have eq: "adjoint exM1 = exM1" unfolding exM1_def ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_adjoint) unfolding ps_P_d1 ps_P_d2 using M1_dim adjoint_one hermitian_M1[unfolded hermitian_def] by auto { fix k assume k: "k < R" let ?m = "tensor_P (proj_psi_l k) (proj_k k)" have "exM1 * ?m * exM1 = tensor_P (proj_psi_l k) (M1 * (proj_k k) * M1)" unfolding exM1_def apply (subst tensor_P_left_right_partial2) using M1_dim proj_k_dim proj_psi_l_dim by auto also have "\ = tensor_P (proj_psi_l k) (outer_prod (M1 *\<^sub>v ket_k k) (M1 *\<^sub>v ket_k k))" unfolding proj_k_def apply (subst outer_prod_left_right_mat[of _ K _ K _ K _ K]) unfolding hermitian_M1[unfolded hermitian_def] using ket_k_dim M1_dim by auto finally have "exM1 * ?m * exM1 = ?m" unfolding proj_k_def using k M1_mult_ket_k by auto } note p1 = this have "adjoint exM1 * Q * exM1 = exM1 * Q * exM1" using eq by auto also have "\ = matrix_sum d (\k. exM1 * (tensor_P (proj_psi_l k) (proj_k k)) * exM1) R" unfolding Q_def apply (subst matrix_sum_mult_left_right) using tensor_P_dim exM1_def by auto also have "\ = matrix_sum d (\k. tensor_P (proj_psi_l k) (proj_k k)) R" apply (subst matrix_sum_cong) using p1 by auto finally show ?thesis using Q_def by auto qed lemma qp_P': "is_quantum_predicate P'" unfolding is_quantum_predicate_def proof (intro conjI) show "P' \ carrier_mat d d" unfolding P'_def using tensor_P_dim by auto show "positive P'" unfolding P'_def ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive) apply (auto simp add: ps_P_d1 ps_P_d2 proj_O_dim proj_k_dim) using proj_psi_l_dim positive_proj_psi_l positive_proj_k K by auto show "P' \\<^sub>L 1\<^sub>m d" unfolding P'_def ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_le_one[simplified ps_P_d]) by (auto simp add: ps_P_d1 ps_P_d2 proj_psi_l_dim K proj_k_dim positive_proj_psi_l positive_proj_k proj_k_le_one psi_l_le_id) qed lemma P'_add_Q: "P' + Q = matrix_sum d (\l. tensor_P (proj_psi_l l) (proj_k l)) (R + 1)" apply simp unfolding P'_def Q_def by auto lemma positive_Qk: "positive (tensor_P (proj_psi_l l) (proj_k l))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive) unfolding ps_P_d1 ps_P_d2 using proj_psi_l_dim proj_k_dim positive_proj_psi_l positive_proj_k by auto lemma P'_Q_dim: "P' + Q \ carrier_mat d d" unfolding P'_add_Q apply (subst matrix_sum_dim) using tensor_P_dim by auto lemma P'_add_Q_le_one: "P' + Q \\<^sub>L 1\<^sub>m d" proof - have leq: "matrix_sum d (\l. tensor_P (proj_psi_l l) (proj_k l)) (R + 1) \\<^sub>L matrix_sum d (\k. tensor_P (1\<^sub>m N) (proj_k k)) (R + 1)" unfolding Q2_def apply (subst lowner_le_matrix_sum) subgoal using tensor_P_dim by auto subgoal using tensor_P_dim by auto using proj_psi_proj_k_le_exproj_k by auto have "matrix_sum d (\k. tensor_P (1\<^sub>m N) (proj_k k)) (R + 1) = tensor_P (1\<^sub>m N) (matrix_sum K proj_k (R + 1))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_matrix_sum2[simplified ps_P_d ps_P_d2]) subgoal using ps_P_d1 by auto using proj_k_dim by auto also have "\ = tensor_P (1\<^sub>m N) (proj_fst_k (R + 1))" using sum_proj_k[of "R + 1"] K by auto also have "\ \\<^sub>L tensor_P (1\<^sub>m N) (1\<^sub>m K)" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive_le) subgoal using ps_P_d1 by auto subgoal using ps_P_d2 proj_fst_k_def by auto subgoal using positive_one by auto subgoal using positive_proj_fst_k by auto subgoal using lowner_le_refl[of "1\<^sub>m N" N] by auto using proj_fst_k_le_one by auto also have "\ = 1\<^sub>m d" unfolding ps2_P.ptensor_mat_def using ps_P.tensor_mat_id ps_P_d1 ps_P_d2 ps_P_d by auto finally have leq2: "matrix_sum d (\k. tensor_P (1\<^sub>m N) (proj_k k)) (R + 1) \\<^sub>L 1\<^sub>m d" by auto have ds: "matrix_sum d (\k. tensor_P (1\<^sub>m N) (proj_k k)) (R + 1) \ carrier_mat d d" apply (subst matrix_sum_dim) using tensor_P_dim by auto then show ?thesis using leq leq2 lowner_le_trans[OF P'_Q_dim ds, of "1\<^sub>m d"] unfolding P'_add_Q by auto qed lemma qp_P'_Q: "is_quantum_predicate (P' + Q)" unfolding is_quantum_predicate_def proof (intro conjI) show "P' + Q \ carrier_mat d d" unfolding P'_add_Q apply (subst matrix_sum_dim) using tensor_P_dim by auto show "positive (P' + Q)" unfolding P'_add_Q apply (subst matrix_sum_positive) using tensor_P_dim positive_Qk by auto show " P' + Q \\<^sub>L 1\<^sub>m d" using P'_add_Q_le_one by auto qed lemma Q2_leq_lemma: "tensor_P (1\<^sub>m N) (mat_incr K) * Q2 * adjoint (tensor_P (1\<^sub>m N) (mat_incr K)) \\<^sub>L P' + Q" proof - have ad: "adjoint (tensor_P (1\<^sub>m N) (mat_incr K)) = tensor_P (1\<^sub>m N) (adjoint (mat_incr K))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_adjoint) using ps_P_d1 ps_P_d2 mat_incr_dim adjoint_one by auto let ?m1 = "tensor_P (1\<^sub>m N) (mat_incr K)" let ?m3 = "tensor_P (1\<^sub>m N) (adjoint (mat_incr K))" { fix l assume "l < R" then have "l < K - 1" using K by auto then have m: "(mat_incr K) *\<^sub>v (ket_k l) = (ket_k (l + 1))" using mat_incr_mult_ket_k by auto let ?m2 = "tensor_P (proj_psi_l (l + 1)) (proj_k l)" have eq: "?m1 * ?m2 * ?m3 = tensor_P (proj_psi_l (l + 1)) ((mat_incr K) * (proj_k l) * adjoint (mat_incr K))" apply (subst tensor_P_left_right_partial2) using proj_k_dim proj_psi_l_dim mat_incr_dim adjoint_dim[OF mat_incr_dim] by auto have "(mat_incr K) * (proj_k l) * adjoint (mat_incr K) = outer_prod ((mat_incr K) *\<^sub>v (ket_k l)) ((mat_incr K) *\<^sub>v (ket_k l))" unfolding proj_k_def apply (subst outer_prod_left_right_mat[of _ K _ K _ K _ K]) using ket_k_dim mat_incr_dim adjoint_dim[OF mat_incr_dim] adjoint_adjoint[of "mat_incr K"] by auto also have "\ = proj_k (l + 1)" unfolding proj_k_def using m by auto finally have "?m1 * ?m2 * ?m3 = tensor_P (proj_psi_l (l + 1)) (proj_k (l + 1))" using eq by auto } note p1 = this have "?m1 * Q2 * ?m3 = matrix_sum d (\l. ?m1 * (tensor_P (proj_psi_l (l + 1)) (proj_k l)) * ?m3) R" unfolding Q2_def apply(subst matrix_sum_mult_left_right) using tensor_P_dim by auto also have "\ = matrix_sum d (\l. tensor_P (proj_psi_l (l + 1)) (proj_k (l + 1))) R" apply (subst matrix_sum_cong) using p1 by auto finally have eq1: "?m1 * Q2 * ?m3 = matrix_sum d (\l. tensor_P (proj_psi_l (l + 1)) (proj_k (l + 1))) R" (is "_=?r") . have eq2: "P' + Q = tensor_P (proj_psi_l 0) (proj_k 0) + ?r" unfolding P'_add_Q apply (subst matrix_sum_Suc_remove_head) using tensor_P_dim by auto have "tensor_P (proj_psi_l 0) (proj_k 0) + ?r \\<^sub>L P' + Q" unfolding eq2[symmetric] apply (subst lowner_le_refl) using P'_Q_dim by auto moreover have "positive (tensor_P (proj_psi_l 0) (proj_k 0))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive) unfolding ps_P_d1 ps_P_d2 using proj_psi_l_dim proj_k_dim positive_proj_psi_l positive_proj_k by auto moreover have "matrix_sum d (\l. tensor_P (proj_psi_l (l + 1)) (proj_k (l + 1))) R \ carrier_mat d d" apply (subst matrix_sum_dim) using tensor_P_dim by auto ultimately have "?r \\<^sub>L P' + Q" apply (subst add_positive_le_reduce2[of ?r d "tensor_P (proj_psi_l 0) (proj_k 0)" "P' + Q"]) using tensor_P_dim P'_Q_dim by auto then show ?thesis using eq1 ad by auto qed lemma Q2_leq: "Q2 \\<^sub>L adjoint (tensor_P (1\<^sub>m N) (mat_incr K)) * (P' + Q) * tensor_P (1\<^sub>m N) (mat_incr K)" proof - let ?m1 = "tensor_P (1\<^sub>m N) (mat_incr K)" let ?m2 = "adjoint (tensor_P (1\<^sub>m N) (mat_incr K))" have "?m1 * ?m2 = 1\<^sub>m d" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_adjoint) unfolding ps_P_d1 ps_P_d2 apply (auto simp add: mat_incr_dim adjoint_one) apply (subst ps_P.tensor_mat_mult[symmetric]) unfolding ps_P_d1 ps_P_d2 apply (auto simp add: mat_incr_dim adjoint_dim mat_incr_mult_adjoint_mat_incr) using ps_P.tensor_mat_id ps_P_d ps_P_d1 ps_P_d2 by auto then have inv: "?m2 * ?m1 = 1\<^sub>m d" using mat_mult_left_right_inverse[of ?m1 d ?m2] tensor_P_dim adjoint_dim by auto have d: "?m1 * Q2 * ?m2 \ carrier_mat d d" using tensor_P_dim adjoint_dim[OF tensor_P_dim] Q2_dim by fastforce have le: "?m2 * (?m1 * Q2 * ?m2) * ?m1 \\<^sub>L ?m2 * (P' + Q) * ?m1" (is "lowner_le ?l ?r") apply (subst lowner_le_keep_under_measurement[of _ d]) using Q2_leq_lemma tensor_P_dim P'_Q_dim d by auto have "?l = (?m2 * ?m1) * Q2 * (?m2 * ?m1)" apply (mat_assoc d) using tensor_P_dim Q2_dim by auto also have "\ = 1\<^sub>m d * Q2 * 1\<^sub>m d" using inv by auto also have "\ = Q2" using Q2_dim by auto finally have eq: "?l = Q2". show ?thesis using eq le by auto qed lemma hoare_triple_D3: "\\<^sub>p {Q2} Utrans_P vars2 (mat_incr K) {adjoint exM0 * P' * exM0 + adjoint exM1 * Q * exM1}" unfolding exP0_P' exP1_Q proof - let ?m = "tensor_P (1\<^sub>m N) (mat_incr K)" have h1: "\\<^sub>p {adjoint ?m * (P' + Q) * ?m} Utrans ?m {P' + Q}" using qp_P'_Q hoare_partial.intros by auto have qp: "is_quantum_predicate (adjoint ?m * (P' + Q) * ?m)" using qp_close_under_unitary_operator tensor_P_dim qp_P'_Q unitary_exmat_incr by auto then have "\\<^sub>p {Q2} Utrans ?m {P' + Q}" using hoare_partial.intros(6)[OF qp_Q2 qp_P'_Q qp qp_P'_Q] Q2_leq h1 lowner_le_refl[OF P'_Q_dim] by auto moreover have "Utrans ?m = Utrans_P vars2 (mat_incr K)" apply (subst Utrans_P_is_tensor_P2) unfolding mat_incr_def by auto ultimately show "\\<^sub>p {Q2} Utrans_P vars2 (mat_incr K) {P' + Q}" by auto qed lemma qp_D3_post: "is_quantum_predicate (adjoint exM0 * P' * exM0 + adjoint exM1 * Q * exM1)" unfolding exP0_P' exP1_Q using qp_P'_Q by auto lemma hoare_triple_D: "\\<^sub>p {Q} D {adjoint exM0 * P' * exM0 + adjoint exM1 * Q * exM1}" proof - have "\\<^sub>p {Q1} hadamard_n n;; (Utrans_P vars1 mat_Ph;; hadamard_n n) {Q2}" using well_com_hadamard_n well_com_mat_Ph hoare_triple_D2 qp_Q1 qp_Q2 by (auto simp add: hoare_patial_seq_assoc) then have "\\<^sub>p {Q} Utrans_P vars1 mat_O;; (hadamard_n n;; (Utrans_P vars1 mat_Ph;; hadamard_n n)) {Q2}" using hoare_triple_D1 qp_Q qp_Q1 qp_Q2 hoare_partial.intros(3) by auto moreover have "well_com (Utrans_P vars1 mat_Ph;; hadamard_n n)" using well_com_hadamard_n well_com_mat_Ph by auto ultimately have "\\<^sub>p {Q} (Utrans_P vars1 mat_O;; hadamard_n n);; (Utrans_P vars1 mat_Ph;; hadamard_n n) {Q2}" using well_com_hadamard_n well_com_mat_O qp_Q qp_Q2 by (auto simp add: hoare_patial_seq_assoc) moreover have "well_com (Utrans_P vars1 mat_O;; hadamard_n n)" using well_com_mat_O well_com_hadamard_n by auto ultimately have "\\<^sub>p {Q} Utrans_P vars1 mat_O;; hadamard_n n;; Utrans_P vars1 mat_Ph;; hadamard_n n {Q2}" using well_com_hadamard_n well_com_mat_Ph qp_Q qp_Q2 by (auto simp add: hoare_patial_seq_assoc) with qp_Q qp_Q2 qp_D3_post hoare_triple_D3 show "\\<^sub>p {Q} D {adjoint exM0 * P' * exM0 + adjoint exM1 * Q * exM1}" unfolding D_def using hoare_partial.intros(3) by auto qed lemma psi_is_psi_l0: "\ = psi_l 0" unfolding \_eq psi_l_def alpha_l_def beta_l_def by auto lemma proj_psi_is_proj_psi_l0: "proj_psi = proj_psi_l 0" unfolding proj_psi_def psi_is_psi_l0 proj_psi_l_def by auto lemma lowner_le_Q: "tensor_P proj_psi (proj_k 0) \\<^sub>L adjoint exM0 * P' * exM0 + adjoint exM1 * Q * exM1" proof - let ?r = "matrix_sum d (\l. tensor_P (proj_psi_l l) (proj_k l)) (R + 1)" let ?l = "tensor_P (proj_psi_l 0) (proj_k 0)" have eq: "?r = ?l + matrix_sum d (\l. tensor_P (proj_psi_l (l + 1)) (proj_k (l + 1))) R" (is "_ = _ + ?s") apply (subst matrix_sum_Suc_remove_head) using tensor_P_dim by auto have d: "?s \ carrier_mat d d" apply (subst matrix_sum_dim) using tensor_P_dim by auto have pt: "positive (tensor_P (proj_psi_l l) (proj_k l))" for l unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive) unfolding ps_P_d1 ps_P_d2 using proj_psi_l_dim proj_k_dim positive_proj_psi_l positive_proj_k by auto have ps: "positive ?s" apply (subst matrix_sum_positive) subgoal using tensor_P_dim by auto using pt by auto have "?l \\<^sub>L ?r" unfolding eq apply (subst add_positive_le_reduce1[of ?l d ?s]) subgoal using tensor_P_dim by auto subgoal using d by auto subgoal using tensor_P_dim d by auto subgoal using ps by auto apply (subst lowner_le_refl[of _ d]) using tensor_P_dim d by auto then show ?thesis unfolding exP0_P' exP1_Q P'_add_Q proj_psi_is_proj_psi_l0 by auto qed lemma hoare_triple_while: "\\<^sub>p {adjoint exM0 * P' * exM0 + adjoint exM1 * Q * exM1} While_P vars2 M0 M1 D {P'}" proof - let ?m = "\(n::nat). if n = 0 then mat_extension dims vars2 M0 else if n = 1 then mat_extension dims vars2 M1 else undefined" have dM0: "M0 \ carrier_mat K K" unfolding M0_def by auto have dM1: "M1 \ carrier_mat K K" unfolding M1_def by auto have m0: "?m 0 = exM0" apply (simp) unfolding exM0_def ps2_P.ptensor_mat_def mat_ext_vars2[OF dM0] by auto have m1: "?m 1 = exM1" unfolding exM1_def ps2_P.ptensor_mat_def mat_ext_vars2[OF dM1] by auto have "\\<^sub>p {Q} D {adjoint (?m 0) * P' * (?m 0) + adjoint (?m 1) * Q * (?m 1)}" using hoare_triple_D m0 m1 by auto then show ?thesis unfolding While_P_def using qp_D3_post qp_P' hoare_partial.intros(5)[OF qp_P' qp_Q, of D ?m] m0 m1 by auto qed lemma R_and_a_half_\: "(R + 1/2) * \ = pi / 2" using R \_neq_0 by auto lemma psi_lR_is_beta: "psi_l R = \" unfolding psi_l_def alpha_l_def beta_l_def R_and_a_half_\ by auto lemma post_mult_beta: "post *\<^sub>v \ = \" by (auto simp add: post_def \_def scalar_prod_def sum_only_one_neq_0) lemma post_mult_post: "post * post = post" by (auto simp add: post_def scalar_prod_def sum_only_one_neq_0) lemma post_mult_proj_psi_lR: "post * proj_psi_l R = proj_psi_l R" proof - let ?R = "proj_psi_l R" have "post * ?R = post * ?R * 1\<^sub>m N" using post_dim proj_psi_l_dim[of R] by auto also have "\ = outer_prod (post *\<^sub>v psi_l R) ((1\<^sub>m N) *\<^sub>v psi_l R)" unfolding proj_psi_l_def apply (subst outer_prod_left_right_mat[of _ N _ N _ N _ N]) by (auto simp add: psi_l_dim post_dim adjoint_one) also have "\ = ?R" unfolding proj_psi_l_def unfolding psi_lR_is_beta unfolding post_mult_beta using \_dim by auto finally show "post * ?R = ?R". qed lemma proj_psi_lR_mult_post: "proj_psi_l R * post = proj_psi_l R" proof - let ?R = "proj_psi_l R" have "?R * post = 1\<^sub>m N * ?R * post" using post_dim proj_psi_l_dim[of R] by auto also have "\ = outer_prod ((1\<^sub>m N) *\<^sub>v psi_l R) (post *\<^sub>v psi_l R)" unfolding proj_psi_l_def apply (subst outer_prod_left_right_mat[of _ N _ N _ N _ N]) by (auto simp add: psi_l_dim post_dim hermitian_post[unfolded hermitian_def]) also have "\ = ?R" unfolding proj_psi_l_def unfolding psi_lR_is_beta unfolding post_mult_beta using \_dim by auto finally show "?R * post = ?R". qed lemma proj_psi_lR_mult_proj_psi_lR: "proj_psi_l R * proj_psi_l R = proj_psi_l R" unfolding proj_psi_l_def psi_lR_is_beta apply (subst outer_prod_mult_outer_prod[of _ N _ N _ _ N]) by (auto simp add: \_inner) lemma proj_psi_lR_le_post: "proj_psi_l R \\<^sub>L post" proof - let ?R = "proj_psi_l R" let ?s = "post - ?R" have eq1: "post * (post - ?R) = post - ?R" apply (subst mult_minus_distrib_mat[of _ N N _ N]) apply (auto simp add: post_dim proj_psi_l_dim[of R]) using post_mult_post post_mult_proj_psi_lR by auto have eq2: "?R * (post - ?R) = 0\<^sub>m N N" apply (subst mult_minus_distrib_mat[of _ N N _ N]) apply (auto simp add: post_dim proj_psi_l_dim[of R]) unfolding proj_psi_lR_mult_post proj_psi_lR_mult_proj_psi_lR using proj_psi_l_dim[of R] by auto have "adjoint ?s = ?s" apply (subst adjoint_minus[of _ N N]) using post_dim proj_psi_l_dim hermitian_post hermitian_proj_psi_l K by (auto simp add: hermitian_def) then have "?s * adjoint ?s = ?s * ?s" by auto also have "\ = post * (post - ?R) - ?R * (post - ?R)" using post_dim proj_psi_l_dim[of R] by (mat_assoc N) also have "\ = post - ?R" unfolding eq1 eq2 using post_dim proj_psi_l_dim[of R] by auto finally have "?s * adjoint ?s = ?s". then have "\M. M * adjoint M = ?s" by auto then have "positive ?s" apply (subst positive_if_decomp[of ?s N]) using post_dim proj_psi_l_dim[of R] by auto then show ?thesis unfolding lowner_le_def using post_dim proj_psi_l_dim[of R] by auto qed lemma P'_le_post_R: "P' \\<^sub>L (tensor_P post (proj_k R))" proof - let ?r = "tensor_P post (proj_k R)" have "?r - P' = tensor_P (post - proj_psi_l R) (proj_k R)" unfolding P'_def ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_minus1) unfolding ps_P_d1 ps_P_d2 using post_dim proj_psi_l_dim proj_k_dim by auto moreover have "positive (tensor_P (post - proj_psi_l R) (proj_k R))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive) unfolding ps_P_d1 ps_P_d2 using proj_psi_lR_le_post[unfolded lowner_le_def] post_dim proj_psi_l_dim[of R] proj_k_dim positive_proj_k by auto ultimately show "P' \\<^sub>L ?r" unfolding lowner_le_def P'_def using tensor_P_dim by auto qed lemma positive_post: "positive post" proof - have ad: "adjoint post = post" using hermitian_post[unfolded hermitian_def] by auto then have "post * adjoint post = post" unfolding ad post_mult_post by auto then have "\M. M * adjoint M = post" by auto then show ?thesis using positive_if_decomp post_dim by auto qed lemma lowner_le_P': "P' \\<^sub>L tensor_P post (1\<^sub>m K)" proof - let ?r = "tensor_P post (1\<^sub>m K)" let ?m = "tensor_P post (proj_k R)" have "?m \\<^sub>L ?r" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive_le) unfolding ps_P_d1 ps_P_d2 using post_dim proj_k_dim positive_post positive_proj_k lowner_le_refl[of post] proj_k_le_one by auto then show "P' \\<^sub>L ?r" using lowner_le_trans[of P' d ?m ?r] P'_le_post_R unfolding P'_def using tensor_P_dim by auto qed lemma post_mult_testNk: assumes "f k" shows "post * (testN k) = testN k" using assms by (auto simp add: post_def testN_def scalar_prod_def sum_only_one_neq_0) lemma post_mult_testNk_neg: assumes "\ f k" shows "post * testN k = 0\<^sub>m N N" using assms by (auto simp add: post_def testN_def scalar_prod_def sum_only_one_neq_0) lemma testN_post1: "f k \ adjoint (testN k) * post * testN k = testN k" apply (subst assoc_mult_mat[of _ N N _ N _ N]) apply (auto simp add: adjoint_dim testN_dim post_dim) apply (subst post_mult_testNk, simp) unfolding hermitian_testN[unfolded hermitian_def] using testN_mult_testN by auto lemma testN_post2: "\ f k \ adjoint (testN k) * post * testN k = 0\<^sub>m N N" apply (subst assoc_mult_mat[of _ N N _ N _ N]) apply (auto simp add: adjoint_dim testN_dim post_dim) apply (subst post_mult_testNk_neg, simp) unfolding hermitian_testN[unfolded hermitian_def] using testN_dim[of k] by auto definition post_fst_k :: "nat \ complex mat" where "post_fst_k k = mat N N (\(i, j). if (i = j \ f i \ i < k) then 1 else 0)" lemma post_fst_kN: "post_fst_k N = post" unfolding post_fst_k_def post_def by auto lemma post_fst_k_Suc: "f i \ post_fst_k (Suc i) = testN i + post_fst_k i" apply (rule eq_matI) unfolding post_fst_k_def testN_def by auto lemma post_fst_k_Suc_neg: "\ f i \ post_fst_k (Suc i) = post_fst_k i" apply (rule eq_matI) unfolding post_fst_k_def apply auto using less_antisym by fastforce lemma testN_sum: "matrix_sum N (\k. adjoint (testN k) * post * testN k) N = post" proof - have "m \ N \ matrix_sum N (\k. adjoint (testN k) * post * testN k) m = post_fst_k m" for m proof (induct m) case 0 then show ?case apply simp unfolding post_fst_k_def by auto next case (Suc m) then have m: "m \ N" by auto show ?case proof (cases "f m") case True show ?thesis apply simp apply (subst testN_post1[OF True]) apply (subst Suc(1)[OF m]) using post_fst_k_Suc True by auto next case False show ?thesis apply simp apply (subst testN_post2[OF False]) apply (subst Suc(1)[OF m]) using post_fst_k_Suc_neg False post_fst_k_def by auto qed qed then show ?thesis using post_fst_kN by auto qed lemma tensor_P_testN_sum: "matrix_sum d (\k. adjoint (tensor_P (testN k) (1\<^sub>m K)) * tensor_P post (1\<^sub>m K) * tensor_P (testN k) (1\<^sub>m K)) N = tensor_P post (1\<^sub>m K)" proof - have eq: "adjoint (tensor_P (testN k) (1\<^sub>m K)) * tensor_P post (1\<^sub>m K) * tensor_P (testN k) (1\<^sub>m K) = tensor_P (adjoint (testN k) * post * (testN k)) (1\<^sub>m K)" for k apply (subst tensor_P_adjoint_left_right) subgoal unfolding testN_def by auto subgoal by auto subgoal using post_dim by auto using adjoint_one by auto moreover have "matrix_sum N (\k. adjoint (testN k) * post * testN k) N = post" using testN_sum by auto show ?thesis unfolding eq apply (subst matrix_sum_tensor_P1) subgoal unfolding testN_def by auto subgoal by auto using testN_sum by auto qed lemma post_le_one: "post \\<^sub>L 1\<^sub>m N" proof - let ?s = "1\<^sub>m N - post" have eq1: "1\<^sub>m N * (1\<^sub>m N - post) = 1\<^sub>m N - post" apply (mat_assoc N) using post_dim by auto have eq2: "post * (1\<^sub>m N - post) = 0\<^sub>m N N" apply (subst mult_minus_distrib_mat[of _ N N]) using post_dim by (auto simp add: post_mult_post) have "adjoint ?s = ?s" apply (subst adjoint_minus) apply (auto simp add: post_dim adjoint_dim) using adjoint_one hermitian_post[unfolded hermitian_def] by auto then have "?s * adjoint ?s = ?s * ?s" by auto also have "\ = 1\<^sub>m N * (1\<^sub>m N - post) - post * (1\<^sub>m N - post)" apply (mat_assoc N) using post_dim by auto also have "\ = ?s" unfolding eq1 eq2 using post_dim by auto finally have "?s * adjoint ?s = ?s". then have "\M. M * adjoint M = ?s" by auto then have "positive ?s" apply (subst positive_if_decomp[of ?s N]) using post_dim by auto then show ?thesis unfolding lowner_le_def using post_dim by auto qed lemma qp_post: "is_quantum_predicate (tensor_P post (1\<^sub>m K))" unfolding is_quantum_predicate_def proof (intro conjI) show "tensor_P post (1\<^sub>m K) \ carrier_mat d d" using tensor_P_dim by auto show "positive (tensor_P post (1\<^sub>m K))" unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive) by (auto simp add: ps_P_d1 ps_P_d2 post_dim positive_post positive_one) show "tensor_P post (1\<^sub>m K) \\<^sub>L 1\<^sub>m d" unfolding ps_P.tensor_mat_id[symmetric, unfolded ps_P_d ps_P_d1 ps_P_d2] unfolding ps2_P.ptensor_mat_def apply (subst ps_P.tensor_mat_positive_le) unfolding ps_P_d1 ps_P_d2 using post_dim positive_post positive_one post_le_one lowner_le_refl[of "1\<^sub>m K" K] by auto qed lemma hoare_triple_if: "\\<^sub>p {tensor_P post (1\<^sub>m K)} Measure_P vars1 N testN (replicate N SKIP) {tensor_P post (1\<^sub>m K)}" proof - define M where "M = (\n. mat_extension dims vars1 (testN n))" define Post where "Post = (\(k::nat). tensor_P post (1\<^sub>m K))" have M: "M = (\n. tensor_P (testN n) (1\<^sub>m K))" unfolding M_def using mat_ext_vars1 by auto have skip: "\k. k < N \ (replicate N SKIP) ! k = SKIP" by simp have h: "\k. k < N \ \\<^sub>p {Post k} replicate N SKIP ! k {tensor_P post (1\<^sub>m K)}" unfolding Post_def skip using qp_post hoare_partial.intros by auto moreover have "\k. k < N \ is_quantum_predicate (Post k)" unfolding Post_def using qp_post by auto ultimately show ?thesis unfolding Measure_P_def apply (fold M_def) using hoare_partial.intros(4)[of N Post "tensor_P post (1\<^sub>m K)" "replicate N SKIP" M] unfolding M Post_def using tensor_P_testN_sum qp_post by auto qed theorem grover_partial_deduct: "\\<^sub>p {tensor_P pre (proj_k 0)} Grover {tensor_P post (1\<^sub>m K)}" unfolding Grover_def proof - have "\\<^sub>p {tensor_P pre (proj_k 0)} hadamard_n n {adjoint exM0 * P' * exM0 + adjoint exM1 * Q * exM1}" using hoare_partial.intros(6)[OF qp_pre qp_D3_post qp_pre qp_init_post] hoare_triple_init lowner_le_refl[OF tensor_P_dim] lowner_le_Q by auto then have "\\<^sub>p {tensor_P pre (proj_k 0)} hadamard_n n;; While_P vars2 M0 M1 D {P'}" using hoare_triple_while hoare_partial.intros(3) qp_pre qp_D3_post qp_P' by auto then have "\\<^sub>p {tensor_P pre (proj_k 0)} hadamard_n n;; While_P vars2 M0 M1 D {tensor_P post (1\<^sub>m K)}" using lowner_le_P' hoare_partial.intros(6)[OF qp_pre qp_post qp_pre qp_P'] lowner_le_P' lowner_le_refl[OF tensor_P_dim] by auto then show " \\<^sub>p {tensor_P pre (proj_k 0)} hadamard_n n;; While_P vars2 M0 M1 D;; Measure_P vars1 N testN (replicate N SKIP) {tensor_P post (1\<^sub>m K)}" using hoare_triple_if qp_pre qp_post hoare_partial.intros(3) by auto qed theorem grover_partial_correct: "\\<^sub>p {tensor_P pre (proj_k 0)} Grover {tensor_P post (1\<^sub>m K)}" using grover_partial_deduct well_com_Grover qp_pre qp_post hoare_partial_sound by auto end end diff --git a/thys/Rank_Nullity_Theorem/Mod_Type.thy b/thys/Rank_Nullity_Theorem/Mod_Type.thy --- a/thys/Rank_Nullity_Theorem/Mod_Type.thy +++ b/thys/Rank_Nullity_Theorem/Mod_Type.thy @@ -1,557 +1,557 @@ (* Title: Mod_Type.thy Author: Jose Divasón Author: Jesús Aransay *) section\Class for modular arithmetic\ theory Mod_Type imports "HOL-Library.Numeral_Type" "HOL-Analysis.Cartesian_Euclidean_Space" Dual_Order begin subsection\Definition and properties\ text\Class for modular arithmetic. It is inspired by the locale mod\_type.\ class mod_type = times + wellorder + neg_numeral + fixes Rep :: "'a => int" and Abs :: "int => 'a" assumes type: "type_definition Rep Abs {0.. int CARD ('a)" by (rule Rep_less_n [THEN order_less_imp_le]) lemma Rep_inject_sym: "x = y \ Rep x = Rep y" by (rule type_definition.Rep_inject [OF type, symmetric]) lemma Rep_inverse: "Abs (Rep x) = x" by (rule type_definition.Rep_inverse [OF type]) lemma Abs_inverse: "m \ {0.. Rep (Abs m) = m" by (rule type_definition.Abs_inverse [OF type]) lemma Rep_Abs_mod: "Rep (Abs (m mod int CARD ('a))) = m mod int CARD ('a)" by (simp add: Abs_inverse pos_mod_conj [OF size0]) lemma Rep_Abs_0: "Rep (Abs 0) = 0" apply (rule Abs_inverse [of 0]) using size0 by simp lemma Rep_0: "Rep 0 = 0" by (simp add: zero_def Rep_Abs_0) lemma Rep_Abs_1: "Rep (Abs 1) = 1" by (simp add: Abs_inverse size1) lemma Rep_1: "Rep 1 = 1" by (simp add: one_def Rep_Abs_1) lemma Rep_mod: "Rep x mod int CARD ('a) = Rep x" apply (rule_tac x=x in type_definition.Abs_cases [OF type]) apply (simp add: type_definition.Abs_inverse [OF type]) done lemmas Rep_simps = Rep_inject_sym Rep_inverse Rep_Abs_mod Rep_mod Rep_Abs_0 Rep_Abs_1 subsection\Conversion between a modular class and the subset of natural numbers associated.\ text\Definitions to make transformations among elements of a modular class and naturals\ definition to_nat :: "'a => nat" where "to_nat = nat \ Rep" definition Abs' :: "int => 'a" where "Abs' x = Abs(x mod int CARD ('a))" definition from_nat :: "nat \ 'a" where "from_nat = (Abs' \ int)" lemma bij_Rep: "bij_betw (Rep) (UNIV::'a set) {0.. Rep x" using bij_Rep unfolding bij_betw_def by auto lemma bij_Abs: "bij_betw (Abs) {0..xa\{0.. x" and x2: "x < int CARD('a)" show "x \ int ` {0::nat.. {0::nat..'a) {0::nat..'a) (int ` {0::nat..'a)` {0::nat..'a) = (to_nat::'a=>nat)" proof (unfold the_inv_into_def fun_eq_iff from_nat_def to_nat_def o_def, clarify) fix x::"'a" show "(THE y::nat. y \ {0::nat.. Abs' (int y) = x) = nat (Rep x)" proof (rule the_equality, auto) show " Abs' (Rep x) = x" by (metis Abs'_def Rep_inverse Rep_mod) show "nat (Rep x) < CARD('a)" by (metis (full_types) Rep_less_n nat_int size0 zless_nat_conj) assume x: "\ (0::int) \ Rep x" show "(0::nat) < CARD('a)" and "Abs' (0::int) = x" using Rep_ge_0 x by auto next fix y::nat assume y: "y < CARD('a)" have "(Rep(Abs'(int y)::'a)) = (Rep((Abs(int y mod int CARD('a)))::'a))" unfolding Abs'_def .. also have "... = (Rep (Abs (int y)::'a))" using zmod_int[of y "CARD('a)"] using y mod_less by auto also have "... = (int y)" proof (rule Abs_inverse) show "int y \ {0::int.. (UNIV::'a set)) = 0" proof (rule Least_equality, auto) fix y::"'a" have "(0::'a) \ Abs (Rep y mod int CARD('a))" using strict_mono_Rep unfolding strict_mono_def by (metis (hide_lams, mono_tags) Rep_0 Rep_ge_0 strict_mono_Rep strict_mono_less_eq) also have "... = y" by (metis Rep_inverse Rep_mod) finally show "(0::'a) \ y" . qed lemma add_to_nat_def: "x + y = from_nat (to_nat x + to_nat y)" unfolding from_nat_def to_nat_def o_def using Rep_ge_0[of x] using Rep_ge_0[of y] using Rep_less_n[of x] Rep_less_n[of y] unfolding Abs'_def unfolding add_def[of x y] by auto lemma to_nat_1: "to_nat 1 = 1" by (simp add: to_nat_def Rep_1) lemma add_def': shows "x + y = Abs' (Rep x + Rep y)" unfolding Abs'_def using add_def by simp lemma Abs'_0: shows "Abs' (CARD('a))=(0::'a)" by (metis (hide_lams, mono_tags) Abs'_def mod_self zero_def) lemma Rep_plus_one_le_card: assumes a: "a + 1 \ 0" shows "(Rep a) + 1 < CARD ('a)" proof (rule ccontr) assume "\ Rep a + 1 < CARD('a)" hence to_nat_eq_card: "Rep a + 1 = CARD('a)" using Rep_less_n by (simp add: add1_zle_eq order_class.less_le) have "a+1 = Abs' (Rep a + Rep (1::'a))" using add_def' by auto also have "... = Abs' ((Rep a) + 1)" using Rep_1 by simp also have "... = Abs' (CARD('a))" unfolding to_nat_eq_card .. also have "... = 0" using Abs'_0 by auto finally show False using a by contradiction qed lemma to_nat_plus_one_less_card: "\a. a+1 \ 0 --> to_nat a + 1 < CARD('a)" proof (clarify) fix a assume a: "a + 1 \ 0" have "Rep a + 1 < int CARD('a)" using Rep_plus_one_le_card[OF a] by auto hence "nat (Rep a + 1) < nat (int CARD('a))" unfolding zless_nat_conj using size0 by fast thus "to_nat a + 1 < CARD('a)" unfolding to_nat_def o_def using nat_add_distrib[OF Rep_ge_0] by simp qed corollary to_nat_plus_one_less_card': assumes "a+1 \ 0" shows "to_nat a + 1 < CARD('a)" using to_nat_plus_one_less_card assms by simp lemma strict_mono_to_nat: "strict_mono to_nat" using strict_mono_Rep unfolding strict_mono_def to_nat_def using Rep_ge_0 by (metis comp_apply nat_less_eq_zless) lemma to_nat_eq [simp]: "to_nat x = to_nat y \ x = y" using injD [OF bij_betw_imp_inj_on[OF bij_to_nat]] by blast lemma mod_type_forall_eq [simp]: "(\j::'a. (to_nat j) P j) = (\a. P a)" proof (auto) fix a assume a: "\j. (to_nat::'a=>nat) j < CARD('a) \ P j" have "(to_nat::'a=>nat) a < CARD('a)" using bij_to_nat unfolding bij_betw_def by auto thus "P a" using a by auto qed lemma to_nat_from_nat: assumes t:"to_nat j = k" shows "from_nat k = j" proof - have "from_nat k = from_nat (to_nat j)" unfolding t .. also have "... = from_nat (the_inv_into {0.. from_nat ` {0.. b" shows "to_nat a \ to_nat b" proof (cases "a=b") case True thus ?thesis by auto next case False hence "a (n::'a)" using least_0 by (metis (full_types) Least_le UNIV_I) lemma to_nat_from_nat_id: assumes x: "x'a) {0.. {0.. j" and "j from_nat j" proof (cases "i=j") case True have "(from_nat i::'a) = from_nat j" using True by simp thus ?thesis by simp next case False hence "i b" proof - have "a + 1 = (from_nat (to_nat (a + 1))::'a)" using from_nat_to_nat_id [of "a+1",symmetric] . also have "... \ (from_nat (to_nat (b::'a))::'a)" proof (rule from_nat_mono') have "to_nat a < to_nat b" using ab by (metis to_nat_mono) hence "to_nat a + 1 \ to_nat b" by simp thus "to_nat b < CARD ('a)" using bij_to_nat unfolding bij_betw_def by auto hence "to_nat a + 1 < CARD ('a)" by (metis \to_nat a + 1 \ to_nat b\ preorder_class.le_less_trans) thus "to_nat (a + 1) \ to_nat b" by (metis \to_nat a + 1 \ to_nat b\ to_nat_suc) qed also have "... = b" by (metis from_nat_to_nat_id) finally show "a + (1::'a) \ b" . qed lemma le_Suc': assumes ab: "a + 1 \ b" and less_card: "(to_nat a) + 1 < CARD ('a)" shows "a < b" proof - have "a = (from_nat (to_nat a)::'a)" using from_nat_to_nat_id [of "a",symmetric] . also have "... < (from_nat (to_nat b)::'a)" proof (rule from_nat_mono) show "to_nat b < CARD('a)" using bij_to_nat unfolding bij_betw_def by auto have "to_nat (a + 1) \ to_nat b" using ab by (metis to_nat_mono') hence "to_nat (a) + 1 \ to_nat b" using to_nat_suc[OF less_card] by auto thus "to_nat a < to_nat b" by simp qed finally show "a < b" by (metis to_nat_from_nat) qed lemma Suc_le: assumes less_card: "(to_nat a) + 1 < CARD ('a)" shows "a < a + 1" proof - have "(to_nat a) < (to_nat a) + 1" by simp hence "(to_nat a) < to_nat (a + 1)" by (metis less_card to_nat_suc) hence "(from_nat (to_nat a)::'a) < from_nat (to_nat (a + 1))" by (rule from_nat_mono, metis less_card to_nat_suc) thus "a < a + 1" by (metis to_nat_from_nat) qed lemma Suc_le': fixes a::'a assumes "a + 1 \ 0" shows "a < a + 1" using Suc_le to_nat_plus_one_less_card assms by blast lemma from_nat_not_eq: assumes a_eq_to_nat: "a \ to_nat b" and a_less_card: "a b" proof (rule ccontr) assume "\ from_nat a \ b" hence "from_nat a = b" by simp hence "to_nat ((from_nat a)::'a) = to_nat b" by auto thus False by (metis a_eq_to_nat a_less_card to_nat_from_nat_id) qed lemma Suc_less: fixes i::'a assumes "i j" shows "i+1a::'a. a \ -1" proof (clarify) fix a::'a have zero_ge_card_1: "0 \ int CARD('a) - 1" using size1 by auto have card_less: "int CARD('a) - 1 < int CARD('a)" by auto have not_zero: "1 mod int CARD('a) \ 0" by (metis (hide_lams, mono_tags) Rep_Abs_1 Rep_mod zero_neq_one) have int_card: "int (CARD('a) - 1) = int CARD('a) - 1" using of_nat_diff[of 1 "CARD ('a)"] using size1 by simp have "a = Abs' (Rep a)" by (metis (hide_lams, mono_tags) Rep_0 add_0_right add_def' monoid_add_class.add.right_neutral) also have "... = Abs' (int (nat (Rep a)))" by (metis Rep_ge_0 int_nat_eq) also have "... \ Abs' (int (CARD('a) - 1))" proof (rule from_nat_mono'[unfolded from_nat_def o_def, of "nat (Rep a)" "CARD('a) - 1"]) show "nat (Rep a) \ CARD('a) - 1" using Rep_less_n using int_card nat_le_iff by auto show "CARD('a) - 1 < CARD('a)" using finite_UNIV_card_ge_0 finite_mod_type by fastforce qed also have "... = - 1" unfolding Abs'_def unfolding minus_def zmod_zminus1_eq_if unfolding Rep_1 apply (rule cong [of Abs], rule refl) unfolding if_not_P [OF not_zero] unfolding int_card unfolding mod_pos_pos_trivial[OF zero_ge_card_1 card_less] using mod_pos_pos_trivial[OF _ size1] by presburger finally show "a \ -1" by fastforce qed lemma a_eq_minus_1: "\a::'a. a+1 = 0 \ a = -1" by (metis eq_neg_iff_add_eq_0) lemma forall_from_nat_rw: shows "(\x\{0..x. P (from_nat x))" proof (auto) fix y assume *: "\x\{0.. (UNIV::'a set)" by auto from this obtain x where x1: "from_nat y = (from_nat x::'a)" and x2: "x\{0.. CARD('a)" shows "a+1 \ 0" proof (rule ccontr, simp) assume a_plus_one_zero: "a + 1 = 0" hence rep_eq_card: "Rep a + 1 = CARD('a)" using assms to_nat_0 Suc_eq_plus1 Suc_lessI Zero_not_Suc to_nat_less_card to_nat_suc by (metis (hide_lams, mono_tags)) moreover have "Rep a + 1 < CARD('a)" using Abs'_0 Rep_1 Suc_eq_plus1 Suc_lessI Suc_neq_Zero add_def' assms rep_eq_card to_nat_0 to_nat_less_card to_nat_suc by (metis (hide_lams, mono_tags)) ultimately show False by fastforce qed lemma from_nat_suc: shows "from_nat (j + 1) = from_nat j + 1" unfolding from_nat_def o_def Abs'_def add_def' Rep_1 Rep_Abs_mod unfolding of_nat_add apply (subst mod_add_left_eq) unfolding of_nat_1 .. lemma to_nat_plus_1_set: shows "to_nat a + 1 \ {1..Instantiations\ instantiation bit0 and bit1:: (finite) mod_type begin definition "(Rep::'a bit0 => int) x = Rep_bit0 x" definition "(Abs::int => 'a bit0) x = Abs_bit0' x" definition "(Rep::'a bit1 => int) x = Rep_bit1 x" definition "(Abs::int => 'a bit1) x = Abs_bit1' x" instance proof show "(0::'a bit0) = Abs (0::int)" unfolding Abs_bit0_def Abs_bit0'_def zero_bit0_def by auto show "(1::int) < int CARD('a bit0)" by (metis bit0.size1) show "type_definition (Rep::'a bit0 => int) (Abs:: int => 'a bit0) {0::int..x::'a bit0. Rep_bit0 x \ {0::int..x::'a bit0. Abs_bit0 (Rep_bit0 x mod int CARD('a bit0)) = x" by (metis Rep_bit0_inverse bit0.Rep_mod) show "\y::int. y \ {0::int.. Rep_bit0 ((Abs_bit0::int => 'a bit0) (y mod int CARD('a bit0))) = y" by (metis bit0.Abs_inverse bit0.Rep_mod) qed show "(1::'a bit0) = Abs (1::int)" unfolding Abs_bit0_def Abs_bit0'_def one_bit0_def by (metis bit0.of_nat_eq of_nat_1 one_bit0_def) fix x y :: "'a bit0" show "x + y = Abs ((Rep x + Rep y) mod int CARD('a bit0))" unfolding Abs_bit0_def Rep_bit0_def plus_bit0_def Abs_bit0'_def by fastforce show "x * y = Abs (Rep x * Rep y mod int CARD('a bit0))" unfolding Abs_bit0_def Rep_bit0_def times_bit0_def Abs_bit0'_def by fastforce show "x - y = Abs ((Rep x - Rep y) mod int CARD('a bit0))" unfolding Abs_bit0_def Rep_bit0_def minus_bit0_def Abs_bit0'_def by fastforce show "- x = Abs (- Rep x mod int CARD('a bit0))" unfolding Abs_bit0_def Rep_bit0_def uminus_bit0_def Abs_bit0'_def by fastforce show "(0::'a bit1) = Abs (0::int)" unfolding Abs_bit1_def Abs_bit1'_def zero_bit1_def by auto show "(1::int) < int CARD('a bit1)" by (metis bit1.size1) show "(1::'a bit1) = Abs (1::int)" unfolding Abs_bit1_def Abs_bit1'_def one_bit1_def by (metis bit1.of_nat_eq of_nat_1 one_bit1_def) fix x y :: "'a bit1" show "x + y = Abs ((Rep x + Rep y) mod int CARD('a bit1))" unfolding Abs_bit1_def Abs_bit1'_def Rep_bit1_def plus_bit1_def by fastforce show "x * y = Abs (Rep x * Rep y mod int CARD('a bit1))" unfolding Abs_bit1_def Rep_bit1_def times_bit1_def Abs_bit1'_def by fastforce show "x - y = Abs ((Rep x - Rep y) mod int CARD('a bit1))" unfolding Abs_bit1_def Rep_bit1_def minus_bit1_def Abs_bit1'_def by fastforce show "- x = Abs (- Rep x mod int CARD('a bit1))" unfolding Abs_bit1_def Rep_bit1_def uminus_bit1_def Abs_bit1'_def by fastforce show "type_definition (Rep::'a bit1 => int) (Abs:: int => 'a bit1) {0::int..x::'a bit1. Rep_bit1 x \ {0::int..x::'a bit1. Abs_bit1 (Rep_bit1 x mod int CARD('a bit1)) = x" by (metis Rep_bit1_inverse bit1.Rep_mod) show "\y::int. y \ {0::int.. Rep_bit1 ((Abs_bit1::int => 'a bit1) (y mod int CARD('a bit1))) = y" by (metis bit1.Abs_inverse bit1.Rep_mod) qed show "strict_mono (Rep::'a bit0 => int)" unfolding strict_mono_def by (metis Rep_bit0_def less_bit0_def) show "strict_mono (Rep::'a bit1 => int)" unfolding strict_mono_def by (metis Rep_bit1_def less_bit1_def) qed end end diff --git a/thys/Root_Balanced_Tree/Root_Balanced_Tree.thy b/thys/Root_Balanced_Tree/Root_Balanced_Tree.thy --- a/thys/Root_Balanced_Tree/Root_Balanced_Tree.thy +++ b/thys/Root_Balanced_Tree/Root_Balanced_Tree.thy @@ -1,1990 +1,1990 @@ section "Root Balanced Tree" theory Root_Balanced_Tree imports Amortized_Complexity.Amortized_Framework0 "HOL-Library.Tree_Multiset" "HOL-Data_Structures.Tree_Set" "HOL-Data_Structures.Balance" Time_Monad begin declare Let_def[simp] subsection \Time Prelude\ text\Redefinition of some auxiliary functions, but now with \tm\ monad:\ subsubsection \@{const size_tree}\ fun size_tree_tm :: "'a tree \ nat tm" where "size_tree_tm \\ =1 return 0" | "size_tree_tm \l, x, r\ =1 do { m \ size_tree_tm l; n \ size_tree_tm r; return (m+n+1)}" definition size_tree :: "'a tree \ nat" where "size_tree t = val(size_tree_tm t)" lemma size_tree_Leaf[simp,code]: "size_tree \\ = 0" using val_cong[OF size_tree_tm.simps(1)] by(simp only: size_tree_def val_simps) lemma size_tree_Node[simp,code]: "size_tree \l, x, r\ = (let m = size_tree l; n = size_tree r in m+n+1)" using val_cong[OF size_tree_tm.simps(2)] by(simp only: size_tree_def val_simps) lemma size_tree: "size_tree t = size t" by(induction t rule: size_tree_tm.induct)(auto) definition t_size_tree :: "'a tree \ nat" where "t_size_tree t = time(size_tree_tm t)" lemma t_size_tree_Leaf: "t_size_tree \\ = 1" by(simp add: t_size_tree_def tm_simps) lemma t_size_tree_Node: "t_size_tree \l, x, r\ = t_size_tree l + t_size_tree r + 1" by(simp add: t_size_tree_def size_tree_def tm_simps split: tm.split) lemma t_size_tree: "t_size_tree t = 2 * size t + 1" by(induction t)(auto simp: t_size_tree_Leaf t_size_tree_Node) subsubsection \@{const inorder}\ fun inorder2_tm :: "'a tree \ 'a list \ 'a list tm" where "inorder2_tm \\ xs =1 return xs" | "inorder2_tm \l, x, r\ xs =1 do { rs \ inorder2_tm r xs; inorder2_tm l (x#rs) }" definition inorder2 :: "'a tree \ 'a list \ 'a list" where "inorder2 t xs = val(inorder2_tm t xs)" lemma inorder2_Leaf[simp,code]: "inorder2 \\ xs = xs" using val_cong[OF inorder2_tm.simps(1)] by(simp only: inorder2_def val_simps) lemma inorder2_Node[simp,code]: "inorder2 \l, x, r\ xs = (let rs = inorder2 r xs in inorder2 l (x # rs))" using val_cong[OF inorder2_tm.simps(2), of l] by(simp only: inorder2_def val_simps) lemma inorder2: "inorder2 t xs = Tree.inorder2 t xs" by(induction t xs rule: inorder2_tm.induct)(auto) definition t_inorder2 :: "'a tree \ 'a list \ nat" where "t_inorder2 t xs = time(inorder2_tm t xs)" lemma t_inorder2_Leaf: "t_inorder2 \\ xs = 1" by(simp add: t_inorder2_def tm_simps) lemma t_inorder2_Node: "t_inorder2 \l, x, r\ xs = t_inorder2 r xs + t_inorder2 l (x # inorder2 r xs) + 1" by(simp add: t_inorder2_def inorder2_def tm_simps split: tm.split) lemma t_inorder2: "t_inorder2 t xs = 2*size t + 1" by(induction t arbitrary: xs)(auto simp: t_inorder2_Leaf t_inorder2_Node) subsubsection \@{const split_min}\ fun split_min_tm :: "'a tree \ ('a * 'a tree) tm" where "split_min_tm Leaf =1 return undefined" | "split_min_tm (Node l x r) =1 (if l = Leaf then return (x,r) else do { (y,l') \ split_min_tm l; return (y, Node l' x r)})" definition split_min :: "'a tree \ ('a * 'a tree)" where "split_min t = val (split_min_tm t)" lemma split_min_Node[simp,code]: "split_min (Node l x r) = (if l = Leaf then (x,r) else let (y,l') = split_min l in (y, Node l' x r))" using val_cong[OF split_min_tm.simps(2)] by(simp only: split_min_def val_simps) definition t_split_min :: "'a tree \ nat" where "t_split_min t = time (split_min_tm t)" lemma t_split_min_Node[simp]: "t_split_min (Node l x r) = (if l = Leaf then 1 else t_split_min l + 1)" using val_cong[OF split_min_tm.simps(2)] by(simp add: t_split_min_def tm_simps split: tm.split) lemma split_minD: "split_min t = (x,t') \ t \ Leaf \ x # inorder t' = inorder t" by(induction t arbitrary: t' rule: split_min.induct) (auto simp: sorted_lems split: prod.splits if_splits) subsubsection \Balancing\ fun bal_tm :: "nat \ 'a list \ ('a tree * 'a list) tm" where "bal_tm n xs =1 (if n=0 then return (Leaf,xs) else (let m = n div 2 in do { (l, ys) \ bal_tm m xs; (r, zs) \ bal_tm (n-1-m) (tl ys); return (Node l (hd ys) r, zs)}))" declare bal_tm.simps[simp del] lemma bal_tm_simps: "bal_tm 0 xs =1 return(Leaf, xs)" "n > 0 \ bal_tm n xs =1 (let m = n div 2 in do { (l, ys) \ bal_tm m xs; (r, zs) \ bal_tm (n-1-m) (tl ys); return (Node l (hd ys) r, zs)})" by(simp_all add: bal_tm.simps) definition bal :: "nat \ 'a list \ ('a tree * 'a list)" where "bal n xs = val (bal_tm n xs)" lemma bal_def2[code]: "bal n xs = (if n=0 then (Leaf,xs) else (let m = n div 2; (l, ys) = bal m xs; (r, zs) = bal (n-1-m) (tl ys) in (Node l (hd ys) r, zs)))" using val_cong[OF bal_tm.simps(1)] by(simp only: bal_def val_simps) lemma bal_simps: "bal 0 xs = (Leaf, xs)" "n > 0 \ bal n xs = (let m = n div 2; (l, ys) = bal m xs; (r, zs) = bal (n-1-m) (tl ys) in (Node l (hd ys) r, zs))" by(simp_all add: bal_def2) lemma bal_eq: "bal n xs = Balance.bal n xs" apply(induction n xs rule: bal.induct) apply(case_tac "n=0") apply(simp add: bal_simps Balance.bal_simps) apply(simp add: bal_simps Balance.bal_simps split: prod.split) done definition t_bal :: "nat \ 'a list \ nat" where "t_bal n xs = time (bal_tm n xs)" lemma t_bal: "t_bal n xs = 2*n+1" unfolding t_bal_def apply(induction n xs rule: bal_tm.induct) apply(case_tac "n=0") apply(simp add: bal_tm_simps) -apply(auto simp add: bal_tm_simps tm_simps split: tm.split) + apply(auto simp add: bal_tm_simps tm_simps simp del: subst_all split: tm.split) subgoal premises p for n xs t1 xs1 using p(2)[OF refl,of xs1] p(3-) by(simp) done definition bal_list_tm :: "nat \ 'a list \ 'a tree tm" where "bal_list_tm n xs = do { (t,_) \ bal_tm n xs; return t }" definition bal_list :: "nat \ 'a list \ 'a tree" where "bal_list n xs = val (bal_list_tm n xs)" lemma bal_list_def2[code]: "bal_list n xs = (let (t,ys) = bal n xs in t)" using val_cong[OF bal_list_tm_def] by(simp only: bal_list_def bal_def val_simps) lemma bal_list: "bal_list n xs = Balance.bal_list n xs" by(auto simp add: bal_list_def2 Balance.bal_list_def bal_eq split: prod.split) definition bal_tree_tm :: "nat \ 'a tree \ 'a tree tm" where "bal_tree_tm n t =1 do { xs \ inorder2_tm t []; bal_list_tm n xs }" definition bal_tree :: "nat \ 'a tree \ 'a tree" where "bal_tree n t = val (bal_tree_tm n t)" lemma bal_tree_def2[code]: "bal_tree n t = (let xs = inorder2 t [] in bal_list n xs)" using val_cong[OF bal_tree_tm_def, of n t] by(simp only: bal_tree_def bal_list_def inorder2_def val_simps) lemma bal_tree: "bal_tree n t = Balance.bal_tree n t" by(simp add: bal_tree_def2 Balance.bal_tree_def bal_list inorder2 inorder2_inorder) definition t_bal_tree :: "nat \ 'a tree \ nat" where "t_bal_tree n xs = time (bal_tree_tm n xs)" lemma t_bal_tree: "n = size xs \ t_bal_tree n xs = 4*n+3" by(simp add: t_bal_tree_def bal_tree_tm_def tm_simps bal_list_tm_def surj_TM[OF inorder2_def t_inorder2_def] t_inorder2 surj_TM[OF bal_def t_bal_def] t_bal size1_size split: tm.split prod.split) subsection "Naive implementation (insert only)" fun node :: "bool \ 'a tree \ 'a \ 'a tree \ 'a tree" where "node twist s x t = (if twist then Node t x s else Node s x t)" datatype 'a up = Same | Bal "'a tree" | Unbal "'a tree" locale RBTi1 = fixes bal_i :: "nat \ nat \ bool" assumes bal_i_balance: "bal_i (size t) (height (balance_tree (t::'a::linorder tree)))" assumes mono_bal_i: "\ bal_i n h; n \ n'; h' \ h \ \ bal_i n' h'" begin subsubsection \Functions\ definition up :: "'a \ 'a tree \ bool \ 'a up \ 'a up" where "up x sib twist u = (case u of Same \ Same | Bal t \ Bal(node twist t x sib) | Unbal t \ let t' = node twist t x sib; h' = height t'; n' = size t' in if bal_i n' h' then Unbal t' else Bal(balance_tree t'))" declare up_def[simp] fun ins :: "nat \ nat \ 'a::linorder \ 'a tree \ 'a up" where "ins n d x Leaf = (if bal_i (n+1) (d+1) then Bal (Node Leaf x Leaf) else Unbal (Node Leaf x Leaf))" | "ins n d x (Node l y r) = (case cmp x y of LT \ up y r False (ins n (d+1) x l) | EQ \ Same | GT \ up y l True (ins n (d+1) x r))" fun insert :: "'a::linorder \ 'a tree \ 'a tree" where "insert x t = (case ins (size t) 0 x t of Same \ t | Bal t' \ t')" (* Unbal unreachable *) subsubsection \Functional Correctness and Invariants\ lemma height_balance: "\ \ bal_i (size t) h \ \ height (balance_tree (t::'a::linorder tree)) < h" by (meson bal_i_balance leI le_refl mono_bal_i) lemma mono_bal_i': "\ ASSUMPTION(bal_i n h); n \ n'; h' \ h \ \ bal_i n' h'" unfolding ASSUMPTION_def by(rule mono_bal_i) lemma inorder_ins: "sorted(inorder t) \ (ins n d x t = Same \ ins_list x (inorder t) = inorder t) \ (ins n d x t = Bal t' \ ins_list x (inorder t) = inorder t') \ (ins n d x t = Unbal t' \ ins_list x (inorder t) = inorder t')" by(induction t arbitrary: d t') (auto simp: ins_list_simps bal.simps[of "Suc 0"] bal.simps[of 0] split!: if_splits prod.splits up.splits) lemma ins_size: shows "ins n d x t = Bal t' \ size t' = size t + 1" and "ins n d x t = Unbal t' \ size t' = size t + 1" by(induction t arbitrary: d t') (auto split: if_splits up.splits) lemma ins_height: shows "ins n d x t = Bal t' \ height t' \ height t + 1" and "ins n d x t = Unbal t' \ height t \ height t' \ height t' \ height t + 1" proof(induction t arbitrary: d t') case Leaf { case 1 thus ?case by (auto split: if_splits) next case 2 thus ?case by (auto split: if_splits) } next case (Node l y r) { case 1 consider (ls) "x < y" | (eq) "x = y" | (gr) "x > y" by(metis less_linear) thus ?case proof cases case ls show ?thesis proof (cases "ins n (d+1) x l") case Same thus ?thesis using 1 ls by (simp) next case Bal thus ?thesis using 1 ls by (auto simp: max_def dest: Node) next case (Unbal l') let ?t = "Node l' y r" let ?h = "height ?t" let ?n = "size ?t" have "\ bal_i ?n ?h" using 1 ls Unbal by (auto) thus ?thesis using 1 ls Unbal Node.IH(2)[OF Unbal] height_balance[of ?t "height ?t"] by(auto) qed next case eq thus ?thesis using 1 by(simp) next case gr show ?thesis proof (cases "ins n (d+1) x r") case Same thus ?thesis using 1 gr by (simp) next case Bal thus ?thesis using 1 gr by (auto simp: max_def dest: Node) next case (Unbal r') let ?t = "Node l y r'" let ?h = "height ?t" let ?n = "size ?t" have "\ bal_i ?n ?h" using 1 gr Unbal by (auto) thus ?thesis using 1 gr Unbal Node.IH(4)[OF Unbal] height_balance[of ?t "height ?t"] by(auto) qed qed next case 2 thus ?case by(auto simp: max_def dest: Node split: if_splits up.splits) } qed lemma bal_i0: "bal_i 0 0" using bal_i_balance[of Leaf] by(auto simp add: Balance.bal_tree_def balance_tree_def Balance.bal_list_def Balance.bal_simps) lemma bal_i1: "bal_i 1 1" using bal_i_balance[of "Node Leaf undefined Leaf"] by(auto simp add: balance_tree_def Balance.bal_tree_def Balance.bal_list_def Balance.bal_simps) lemma bal_i_ins_Unbal: assumes "ins n d x t = Unbal t'" shows "bal_i (size t') (height t')" proof(cases t) case Leaf thus ?thesis using assms bal_i1 by(auto split: if_splits) next case Node thus ?thesis using assms by(auto split: if_splits up.splits) qed lemma unbal_ins_Unbal: "ins n d x t = Unbal t' \ \ bal_i (n+1) (height t' + d)" proof(induction t arbitrary: d t') case Leaf thus ?case by (auto split: if_splits) next case Node thus ?case by(fastforce simp: mono_bal_i' split: if_splits up.splits) qed lemma height_Unbal_l: assumes "ins n (d+1) x l = Unbal l'" "bal_i n (height \l, y, r\ + d)" shows "height r < height l'" (is ?P) proof(rule ccontr) assume "\ ?P" thus False using assms(2) unbal_ins_Unbal[OF assms(1)] by (auto simp: mono_bal_i') qed lemma height_Unbal_r: assumes "ins n (d+1) x r = Unbal r'" "bal_i n (height \l, y, r\ + d)" shows "height l < height r'" (is ?P) proof(rule ccontr) assume "\ ?P" thus False using assms(2) unbal_ins_Unbal[OF assms(1)] by (auto simp: mono_bal_i' split: if_splits) qed lemma ins_bal_i_Bal: "\ ins n d x t = Bal t'; bal_i n (height t + d) \ \ bal_i (n+1) (height t' + d)" proof(induction t arbitrary: d t') case Leaf thus ?case by (auto split: if_splits) next case (Node l y r) consider (ls) "x < y" | (eq) "x = y" | (gr) "x > y" by(metis less_linear) thus ?case proof cases case ls have 2: "bal_i n (height l + (d + 1))" using Node.prems(2) by (simp add: mono_bal_i') show ?thesis proof (cases "ins n (d+1) x l") case Same thus ?thesis using Node.prems ls by (simp) next case Bal thus ?thesis using Node.prems ls ins_height(1)[OF Bal] Node.IH(1)[OF Bal 2] by (auto simp: max_def mono_bal_i') next case (Unbal l') let ?t = "Node l' y r" let ?h = "height ?t" let ?n = "size ?t" have "\ bal_i ?n ?h" using Node.prems ls Unbal by (auto) thus ?thesis using Node.prems ls Unbal height_balance[of ?t "height ?t"] ins_height(2)[OF Unbal] by (auto simp: mono_bal_i') qed next case eq thus ?thesis using Node.prems by(simp) next case gr have 2: "bal_i n (height r + (d + 1))" using Node.prems(2) by(simp add: mono_bal_i') show ?thesis proof (cases "ins n (d+1) x r") case Same thus ?thesis using Node.prems gr by (simp) next case Bal thus ?thesis using Node.prems gr ins_height(1)[OF Bal] Node.IH(2)[OF Bal 2] by (auto simp: max_def mono_bal_i') next case (Unbal r') let ?t = "Node l y r'" let ?h = "height ?t" let ?n = "size ?t" have "\ bal_i ?n ?h" using Node.prems gr Unbal by (auto) thus ?thesis using Node.prems gr Unbal height_balance[of ?t "height ?t"] ins_height(2)[OF Unbal] by (auto simp: mono_bal_i') qed qed qed lemma ins0_neq_Unbal: assumes "n \ size t" shows "ins n 0 a t \ Unbal t'" proof(cases t) case Leaf thus ?thesis using bal_i1 by(simp add: numeral_eq_Suc mono_bal_i') next case Node thus ?thesis using unbal_ins_Unbal[of "n" 0 a t t'] assms by(auto simp: ins_size mono_bal_i' split: up.splits) qed lemma inorder_insert: "sorted(inorder t) \ inorder (insert x t) = ins_list x (inorder t)" using ins0_neq_Unbal by(auto simp add: insert_def inorder_ins split: prod.split up.split) lemma bal_i_insert: assumes "bal_i (size t) (height t)" shows "bal_i (size(insert x t)) (height(insert x t))" proof (cases "ins (size t) 0 x t") case Same with assms show ?thesis by simp next case Bal thus ?thesis using ins_bal_i_Bal[OF Bal] assms ins_size by(simp add: size1_size) next case (Unbal t') hence False using ins0_neq_Unbal by blast thus ?thesis .. qed end (* RBTi1 *) text \This is just a test that (a simplified version of) the intended interpretation works (so far):\ interpretation Test: RBTi1 "\n h. h \ log 2 (real(n + 1)) + 1" proof (standard, goal_cases) case (1 t) have *: "log 2 (1 + real(size t)) \ 0" by (simp) show ?case apply(simp add: height_balance_tree) using * by linarith next case (2 n h n' h') have "real h' \ real h" by(simp add: 2) also have "\ \ log 2 (n+1) + 1" by(rule 2) also have "\ \ log 2 (n'+1) + 1" using "2"(2,3) by(simp) finally show ?case . qed subsection "Efficient Implementation (insert only)" fun imbal :: "'a tree \ nat" where "imbal Leaf = 0" | "imbal (Node l _ r) = nat(abs(int(size l) - int(size r))) - 1" declare imbal.simps [simp del] lemma imbal0_if_wbalanced: "wbalanced t \ imbal t = 0" by (cases t) (auto simp add: imbal.simps) text \The degree of imbalance allowed: how far from the perfect balance may the tree degenerate.\ axiomatization c :: real where c1: "c > 1" definition bal_log :: "'a tree \ bool" where "bal_log t = (height t \ ceiling(c * log 2 (size1 t)))" fun hchild :: "'a tree \ 'a tree" where "hchild (Node l _ r) = (if height l \ height r then r else l)" lemma size1_imbal: assumes "\ bal_log t" and "bal_log (hchild t)" and "t \ Leaf" shows "imbal t > (2 powr (1 - 1/c) - 1) * size1 (t) - 1" proof - obtain l x r where t: "t = Node l x r" using \t \ Leaf\ by(auto simp: neq_Leaf_iff) let ?sh = "hchild t" have *: "c * log 2 (size1 ?sh) \ 0" using c1 apply(simp add: zero_le_mult_iff) using size1_ge0[of ?sh] by linarith have "(2 powr (1 - 1/c) - 1) * size1 t - 1 = 2 powr (1 - 1/c) * size1 t - size1 t - 1" by (simp add: ring_distribs) also have "\ = 2 * (2 powr (- 1/c) * size1 t) - size1 t - 1" using c1 by(simp add: powr_minus powr_add[symmetric] field_simps) also have "2 powr (- 1/c) * size1 t < size1 ?sh" proof - have "ceiling(c * log 2 (size1 t)) < ceiling (c * log 2 (size1 ?sh)) + 1" proof - have "ceiling(c * log 2 (size1 t)) < height t" using assms(1) by (simp add: bal_log_def) also have "\ = height(?sh) + 1" by(simp add: t max_def) finally show ?thesis using assms(2) unfolding bal_log_def by linarith qed hence "c * log 2 (size1 t) < c * log 2 (size1 ?sh) + 1" using * by linarith hence "log 2 (size1 t) - 1/c < log 2 (size1 ?sh)" using c1 by(simp add: field_simps) from powr_less_mono[OF this, of 2] show ?thesis by (simp add: powr_diff powr_minus field_simps) qed also have "2 * real(size1 ?sh) - size1 t - 1 = real(size1 ?sh) - (real(size1 t) - size1 ?sh) - 1" by (simp add: assms(1)) also have "\ \ imbal t" by (auto simp add: t assms(1) imbal.simps size1_size) finally show ?thesis by simp qed text\The following key lemma shows that \imbal\ is a suitable potential because it can pay for the linear-time cost of restructuring a tree that is not balanced but whose higher son is.\ lemma size1_imbal2: assumes "\ bal_log t" and "bal_log (hchild t)" and "t \ Leaf" shows "size1 (t) < (2 powr (1/c) / (2 - 2 powr (1/c))) * (imbal t + 1)" proof - have *: "2 powr (1 - 1 / c) - 1 > 0" using c1 by(simp add: field_simps log_less_iff[symmetric]) have "(2 powr (1 - 1 / c) - 1) * size1 t < imbal t + 1" using size1_imbal[OF assms] by linarith hence "size1 t < 1 / (2 powr (1 - 1 / c) - 1) * (imbal t + 1)" using * by(simp add: field_simps) also have "1 / (2 powr (1 - 1 / c) - 1) = 2 powr (1/c) / (2 - 2 powr (1/c))" proof - have "1 / (2 powr (1 - 1 / c) - 1) = 1 / (2 / 2 powr (1/c) - 1)" by(simp add: powr_diff) also have "\ = 2 powr (1/c) / (2 - 2 powr (1/c))" by(simp add: field_simps) finally show ?thesis . qed finally show ?thesis . qed datatype 'a up2 = Same2 | Bal2 "'a tree" | Unbal2 "'a tree" nat nat type_synonym 'a rbt1 = "'a tree * nat" text \An implementation where size and height are computed incrementally:\ locale RBTi2 = RBTi1 + fixes e :: real assumes e0: "e > 0" assumes imbal_size: "\ \ bal_i (size t) (height t); bal_i (size(hchild t)) (height(hchild t)); t \ Leaf \ \ e * (imbal t + 1) \ size1 (t::'a::linorder tree)" begin subsubsection \Functions\ definition up2 :: "'a \ 'a tree \ bool \ 'a up2 \ 'a up2" where "up2 x sib twist u = (case u of Same2 \ Same2 | Bal2 t \ Bal2(node twist t x sib) | Unbal2 t n1 h1 \ let n2 = size sib; h2 = height sib; t' = node twist t x sib; n' = n1+n2+1; h' = max h1 h2 + 1 in if bal_i n' h' then Unbal2 t' n' h' else Bal2(bal_tree n' t'))" declare up2_def[simp] text\@{const up2} traverses \sib\ twice; unnecessarily, as it turns out: \ definition up3_tm :: "'a \ 'a tree \ bool \ 'a up2 \ 'a up2 tm" where "up3_tm x sib twist u =1 (case u of Same2 \ return Same2 | Bal2 t \ return (Bal2(node twist t x sib)) | Unbal2 t n1 h1 \ do { n2 \ size_tree_tm sib; let t' = node twist t x sib; n' = n1+n2+1; h' = h1 + 1 in if bal_i n' h' then return (Unbal2 t' n' h') else do { t'' \ bal_tree_tm n' t'; return (Bal2 t'')}})" definition up3 :: "'a \ 'a tree \ bool \ 'a up2 \ 'a up2" where "up3 a sib twist u = val (up3_tm a sib twist u)" lemma up3_def2[simp,code]: "up3 x sib twist u = (case u of Same2 \ Same2 | Bal2 t \ Bal2 (node twist t x sib) | Unbal2 t n1 h1 \ let n2 = size_tree sib; t' = node twist t x sib; n' = n1 + n2 + 1; h' = h1 + 1 in if bal_i n' h' then Unbal2 t' n' h' else let t'' = bal_tree n' t' in Bal2 t'')" using val_cong[OF up3_tm_def] by(simp only: up3_def size_tree_def bal_tree_def val_simps up2.case_distrib[of val]) definition t_up3 :: "'a \ 'a tree \ bool \ 'a up2 \ nat" where "t_up3 x sib twist u = time (up3_tm x sib twist u)" lemma t_up3_def2[simp]: "t_up3 x sib twist u = (case u of Same2 \ 1 | Bal2 t \ 1 | Unbal2 t n1 h1 \ let n2 = size sib; t' = node twist t x sib; h' = h1 + 1; n' = n1+n2+1 in 2 * size sib + 1 + (if bal_i n' h' then 1 else t_bal_tree n' t' + 1))" by(simp add: t_up3_def up3_tm_def surj_TM[OF size_tree_def t_size_tree_def] size_tree t_size_tree t_bal_tree_def tm_simps split: tm.split up2.split) fun ins2 :: "nat \ nat \ 'a::linorder \ 'a tree \ 'a up2" where "ins2 n d x Leaf = (if bal_i (n+1) (d+1) then Bal2 (Node Leaf x Leaf) else Unbal2 (Node Leaf x Leaf) 1 1)" | "ins2 n d x (Node l y r) = (case cmp x y of LT \ up2 y r False (ins2 n (d+1) x l) | EQ \ Same2 | GT \ up2 y l True (ins2 n (d+1) x r))" text\Definition of timed final insertion function:\ fun ins3_tm :: "nat \ nat \ 'a::linorder \ 'a tree \ 'a up2 tm" where "ins3_tm n d x Leaf =1 (if bal_i (n+1) (d+1) then return(Bal2 (Node Leaf x Leaf)) else return(Unbal2 (Node Leaf x Leaf) 1 1))" | "ins3_tm n d x (Node l y r) =1 (case cmp x y of LT \ do {l' \ ins3_tm n (d+1) x l; up3_tm y r False l'} | EQ \ return Same2 | GT \ do {r' \ ins3_tm n (d+1) x r; up3_tm y l True r'})" definition ins3 :: "nat \ nat \ 'a::linorder \ 'a tree \ 'a up2" where "ins3 n d x t = val(ins3_tm n d x t)" lemma ins3_Leaf[simp,code]: "ins3 n d x Leaf = (if bal_i (n+1) (d+1) then Bal2 (Node Leaf x Leaf) else Unbal2 (Node Leaf x Leaf) 1 1)" using val_cong[OF ins3_tm.simps(1)] by(simp only: ins3_def val_simps cmp_val.case_distrib[of val]) lemma ins3_Node[simp,code]: "ins3 n d x (Node l y r) = (case cmp x y of LT \ let l' = ins3 n (d+1) x l in up3 y r False l' | EQ \ Same2 | GT \ let r' = ins3 n (d+1) x r in up3 y l True r')" using val_cong[OF ins3_tm.simps(2)] by(simp only: ins3_def up3_def val_simps cmp_val.case_distrib[of val]) definition t_ins3 :: "nat \ nat \ 'a::linorder \ 'a tree \ nat" where "t_ins3 n d x t = time(ins3_tm n d x t)" lemma t_ins3_Leaf[simp]: "t_ins3 n d x Leaf = 1" by(simp add: tm_simps t_ins3_def) lemma t_ins3_Node[simp]: "t_ins3 n d x (Node l y r) = (case cmp x y of LT \ t_ins3 n (d+1) x l + t_up3 y r False (ins3 n (d+1) x l) | EQ \ 0 | GT \ t_ins3 n (d+1) x r + t_up3 y l True (ins3 n (d+1) x r)) + 1" apply(subst t_ins3_def) apply(subst ins3_tm.simps) apply(auto simp add: tm_simps surj_TM[OF ins3_def t_ins3_def] surj_TM[OF up3_def t_up3_def] simp del: t_up3_def2 split: tm.splits up2.split) done (*FIXME simp del: t_up3_def2 t_ins3_Node[simp] *) fun insert2 :: "'a::linorder \ 'a rbt1 \ 'a rbt1" where "insert2 x (t,n) = (case ins2 n 0 x t of Same2 \ (t,n) | Bal2 t' \ (t',n+1))" (* Unbal unreachable *) fun insert3_tm :: "'a::linorder \ 'a rbt1 \ 'a rbt1 tm" where "insert3_tm x (t,n) =1 (do { u \ ins3_tm n 0 x t; case u of Same2 \ return (t,n) | Bal2 t' \ return (t',n+1) | Unbal2 _ _ _ \ return undefined })" (* Unbal unreachable *) definition insert3 :: "'a::linorder \ 'a rbt1 \ 'a rbt1" where "insert3 a t = val (insert3_tm a t)" lemma insert3_def2[simp]: "insert3 x (t,n) = (let t' = ins3 n 0 x t in case t' of Same2 \ (t,n) | Bal2 t' \ (t',n+1))" using val_cong[OF insert3_tm.simps(1)] by(simp only: insert3_def ins3_def val_simps up2.case_distrib[of val]) definition t_insert3 :: "'a::linorder \ 'a rbt1 \ nat" where "t_insert3 a t = time (insert3_tm a t)" lemma t_insert3_def2: "t_insert3 x (t,n) = t_ins3 n 0 x t + 1" by(simp add: t_insert3_def ins3_def t_ins3_def tm_simps split: tm.split up2.split) subsubsection "Equivalence Proofs" lemma ins_ins2: shows "ins2 n d x t = Same2 \ ins n d x t = Same" and "ins2 n d x t = Bal2 t' \ ins n d x t = Bal t'" and "ins2 n d x t = Unbal2 t' n' h' \ ins n d x t = Unbal t' \ n' = size t' \ h' = height t'" by(induction t arbitrary: d t' n' h') (auto simp: size_height add.commute max.commute balance_tree_def bal_tree split: if_splits up2.splits prod.splits) lemma ins2_ins: shows "ins n d x t = Same \ ins2 n d x t = Same2" and "ins n d x t = Bal t' \ ins2 n d x t = Bal2 t'" and "ins n d x t = Unbal t' \ ins2 n d x t = Unbal2 t' (size t') (height t')" by(induction t arbitrary: d t') (auto simp: size_height add.commute max.commute balance_tree_def bal_tree split: if_splits up.splits prod.splits) corollary ins2_iff_ins: shows "ins2 n d x t = Same2 \ ins n d x t = Same" and "ins2 n d x t = Bal2 t' \ ins n d x t = Bal t'" and "ins2 n d x t = Unbal2 t' n' h' \ ins n d x t = Unbal t' \ n' = size t' \ h' = height t'" using ins2_ins(1) ins_ins2(1) apply blast using ins2_ins(2) ins_ins2(2) apply blast using ins2_ins(3) ins_ins2(3) by blast lemma ins3_ins2: "bal_i n (height t + d) \ ins3 n d x t = ins2 n d x t" proof(induction t arbitrary: d) case Leaf thus ?case by (auto) next case (Node l y r) consider (ls) "x < y" | (eq) "x = y" | (gr) "x > y" by(metis less_linear) thus ?case proof cases case ls have *: "bal_i n (height l + (d + 1))" using Node.prems by (simp add: mono_bal_i') note IH = Node.IH(1)[OF *] show ?thesis proof (cases "ins2 n (d+1) x l") case Same2 thus ?thesis using IH ls by (simp) next case Bal2 thus ?thesis using IH ls by (simp) next case (Unbal2 l' nl' hl') let ?t' = "Node l' y r" let ?h' = "height ?t'" let ?n' = "size ?t'" have ins: "ins n (d+1) x l = Unbal l'" and "nl' = size l' \ hl' = height l'" using ins2_iff_ins(3)[THEN iffD1, OF Unbal2] by auto thus ?thesis using ls IH Unbal2 height_Unbal_l[OF ins Node.prems(1)] by(auto simp add: size_height mono_bal_i size_tree) qed next case eq thus ?thesis using Node.prems by(simp) next case gr have *: "bal_i n (height r + (d + 1))" using Node.prems by (simp add: mono_bal_i') note IH = Node.IH(2)[OF *] show ?thesis proof (cases "ins2 n (d+1) x r") case Same2 thus ?thesis using IH gr by (simp) next case Bal2 thus ?thesis using IH gr by (simp) next case (Unbal2 r' nr' hr') let ?t' = "Node r' y r" let ?h' = "height ?t'" let ?n' = "size ?t'" have ins: "ins n (d+1) x r = Unbal r'" and "nr' = size r' \ hr' = height r'" using ins2_iff_ins(3)[THEN iffD1, OF Unbal2] by auto thus ?thesis using gr IH Unbal2 height_Unbal_r[OF ins Node.prems] by(auto simp add: size_height mono_bal_i size_tree) qed qed qed lemma insert2_insert: "insert2 x (t,size t) = (t',n') \ t' = insert x t \ n' = size t'" using ins0_neq_Unbal by(auto simp: ins2_iff_ins ins_size split: up2.split up.split) lemma insert3_insert2: "bal_i n (height t) \ insert3 x (t,n) = insert2 x (t,n)" by(simp add: ins3_ins2 split: up2.split) subsubsection \Amortized Complexity\ fun \ :: "'a tree \ real" where "\ Leaf = 0" | "\ (Node l x r) = 6 * e * imbal (Node l x r) + \ l + \ r" lemma \_nn: "\ t \ 0" by(induction t) (use e0 in auto) lemma \_sum_mset: "\ t = (\s \# subtrees_mset t. 6*e*imbal s)" proof(induction t) case Leaf show ?case by(simp add: imbal.simps) next case Node thus ?case by(auto) qed lemma \_wbalanced: assumes "wbalanced t" shows "\ t = 0" proof - have "\ t = 6*e * (\s\#subtrees_mset t. real (imbal s))" by(simp add: \_sum_mset sum_mset_distrib_left) also have "\ = (6*e) * real(\s\#subtrees_mset t. imbal s)" using e0 by (simp add: multiset.map_comp o_def) also have "\ = 0" using e0 assms by(simp add: imbal0_if_wbalanced wbalanced_subtrees del: of_nat_sum_mset) finally show ?thesis . qed lemma imbal_ins_Bal: "ins n d x t = Bal t' \ real(imbal (node tw t' y s)) - imbal (node tw t y s) \ 1" apply(drule ins_size) apply(auto simp add: size1_size imbal.simps) done lemma imbal_ins_Unbal: "ins n d x t = Unbal t' \ real(imbal (node tw t' y s)) - imbal (node tw t y s) \ 1" apply(drule ins_size) apply(auto simp add: size1_size imbal.simps) done lemma t_ins3_Same: "ins3 n d x t = Same2 \ t_ins3 n d x t \ 2 * height t + 1" apply(induction t arbitrary: d) apply simp apply (force simp: max_def split!: up2.splits if_splits) done lemma t_ins3_Unbal: "\ ins3 n d x t = Unbal2 t' n' h'; bal_i n (height t + d) \ \ t_ins3 n d x t \ 2 * size t + 1 + height t" apply(induction t arbitrary: d t' n' h') apply simp apply (auto simp: ins3_ins2 ins2_iff_ins ins_height size_tree size1_size max_def mono_bal_i' dest: unbal_ins_Unbal split!: up2.splits if_splits) apply (fastforce simp: mono_bal_i')+ done lemma Phi_diff_Unbal: "\ ins3 n d x t = Unbal2 t' n' h'; bal_i n (height t + d) \ \ \ t' - \ t \ 6*e * height t" proof(induction t arbitrary: d t' n' h') case Leaf thus ?case by (auto simp: imbal.simps split: if_splits) next case (Node l y r) have ins: "ins n d x \l, y, r\ = Unbal t'" using Node.prems(1) by (simp only: ins2_iff_ins(3) ins3_ins2[OF Node.prems(2)]) consider (ls) "x < y" | (eq) "x = y" | (gr) "x > y" by(metis less_linear) thus ?case proof cases case ls with Node.prems obtain l' nl' nh' where rec: "ins3 n (d+1) x l = Unbal2 l' nl' nh'" and t': "t' = Node l' y r" by (auto split: up2.splits if_splits) have bal: "bal_i n (height l + (d+1))" using Node.prems(2) by (simp add: mono_bal_i' split: if_splits) have rec': "ins n (d+1) x l = Unbal l'" using rec ins_ins2(3) ins3_ins2[OF bal] by simp have "\ t' - \ \l,y,r\ = 6*e*imbal\l',y,r\ - 6*e*imbal\l,y,r\ + \ l' - \ l" using t' by simp also have "\ = 6*e * (real(imbal\l',y,r\) - imbal\l,y,r\) + \ l' - \ l" by (simp add: ring_distribs) also have "\ \ 6*e + \ l' - \ l" using imbal_ins_Unbal[OF rec', of False y r] e0 t' by (simp) also have "\ \ 6*e * (height l + 1)" using Node.IH(1)[OF rec bal] by (simp add: ring_distribs) also have "\ \ 6*e * height \l, y, r\" using e0 by(simp del: times_divide_eq_left) finally show ?thesis . next case eq thus ?thesis using Node.prems by(simp) next case gr with Node.prems obtain r' rn' rh' where rec: "ins3 n (d+1) x r = Unbal2 r' rn' rh'" and t': "t' = Node l y r'" by (auto split: up2.splits if_splits) have bal: "bal_i n (height r + (d+1))" using Node.prems(2) by (simp add: mono_bal_i' split: if_splits) have rec': "ins n (d+1) x r = Unbal r'" using rec ins_ins2(3) ins3_ins2[OF bal] by simp have "\ t' - \ \l,y,r\ = 6*e*imbal\l,y,r'\ - 6*e*imbal\l,y,r\ + \ r' - \ r" using t' by simp also have "\ = 6*e * (real(imbal\l,y,r'\) - imbal\l,y,r\) + \ r' - \ r" by (simp add: ring_distribs) also have "\ \ 6*e + \ r' - \ r" using imbal_ins_Unbal[OF rec', of True y l] e0 t' by (simp) also have "\ \ 6*e * (height r + 1)" using Node.IH(2)[OF rec bal] by (simp add: ring_distribs) also have "\ \ 6*e * height \l, y, r\" using e0 by(simp del: times_divide_eq_left) finally show ?thesis . qed qed lemma amor_Unbal: "\ ins3 n d x t = Unbal2 t' n' h'; bal_i n (height t + d) \ \ t_ins3 n d x t + \ t' - \ t \ 2*size1 t + (6*e + 1) * height t" apply(frule (1) t_ins3_Unbal) apply(drule (1) Phi_diff_Unbal) by(simp add: ring_distribs size1_size) lemma t_ins3_Bal: "\ ins3 n d x t = Bal2 t'; bal_i n (height t + d) \ \ t_ins3 n d x t + \ t' - \ t \ (6*e+2) * (height t + 1)" proof(induction t arbitrary: d t') case Leaf thus ?case using e0 by (auto simp: imbal.simps split: if_splits) next case (Node l y r) have Bal: "ins n d x \l, y, r\ = Bal t'" by (metis Node.prems ins3_ins2 ins_ins2(2)) consider (ls) "x < y" | (eq) "x = y" | (gr) "x > y" by(metis less_linear) thus ?case proof cases case ls have *: "bal_i n (height l + (d+1))" using Node.prems(2) by (simp add: mono_bal_i') show ?thesis proof (cases "ins3 n (d+1) x l") case Same2 thus ?thesis using Node ls by (simp) next case (Bal2 l') have Bal: "ins n (d + 1) x l = Bal l'" using "*" Bal2 by (auto simp: ins3_ins2 ins2_iff_ins(2)) let ?t = "Node l y r" let ?t' = "Node l' y r" from Bal2 have t': "t' = ?t'" using Node.prems ls by (simp) have "t_ins3 n d x ?t + \ t' - \ ?t = t_ins3 n (d+1) x l + 2 + \ t' - \ ?t" using ls Bal2 by simp also have "\ = t_ins3 n (d+1) x l + 6*e*imbal ?t' + \ l' - 6*e*imbal ?t - \ l + 2" using t' by simp also have "\ \ t_ins3 n (d+1) x l + \ l' - \ l + 6*e*imbal ?t' - 6*e*imbal ?t + 2" by linarith also have "\ \ (6*e+2) * height l + 6*e*imbal ?t' - 6*e*imbal ?t + 6*e + 4" using Node.IH(1)[OF Bal2 *] by(simp add: ring_distribs) also have "\ = (6*e+2) * height l + 6*e*(real(imbal ?t') - imbal ?t) + 6*e + 4" by(simp add: algebra_simps) also have "\ \ (6*e+2) * height l + 6*e + 6*e + 4" using imbal_ins_Bal[OF Bal, of False y r] e0 by (simp del: times_divide_eq_left) also have "\ = (6*e+2) * (height l + 1) + 6*e + 2" by (simp add: ring_distribs) also have "\ \ (6*e+2) * (max (height l) (height r) + 1) + 6*e + 2" using e0 by (simp add: mult_left_mono) also have "\ \ (6*e+2) * (height ?t + 1)" using e0 by(simp add: field_simps) finally show ?thesis . next case (Unbal2 l' nl' hl') have Unbal: "ins n (d + 1) x l = Unbal l'" and inv: "nl' = size l'" "hl' = height l'" using Unbal2 ins3_ins2[OF *] by(auto simp add: ins2_iff_ins(3)) have bal_l': "bal_i (size l') (height l')" by(fact bal_i_ins_Unbal[OF Unbal]) let ?t = "Node l y r" let ?h = "height ?t" let ?n = "size ?t" let ?t' = "Node l' y r" let ?h' = "height ?t'" let ?n' = "size ?t'" have bal_t': "\ bal_i ?n' ?h'" using ls Unbal Bal by (auto) hence t': "t' = balance_tree ?t'" using ls Unbal Bal by (auto) have hl': "height r < height l'" by(fact height_Unbal_l[OF Unbal Node.prems(2)]) have "t_ins3 n d x ?t + \ t' - \ ?t = t_ins3 n d x ?t - \ ?t" by(simp add: t' \_wbalanced wbalanced_balance_tree) also have "\ = t_ins3 n d x ?t - 6*e * imbal ?t - \ l - \ r" by simp also have "\ \ t_ins3 n d x ?t - 6*e * imbal ?t - \ l" using \_nn[of r] by linarith also have "\ \ t_ins3 n d x ?t - 6*e * imbal ?t' - \ l + 6*e" using mult_left_mono[OF imbal_ins_Unbal[OF Unbal, of False y r], of "4*e"] e0 apply (simp only: node.simps if_False ring_distribs) by (simp) also have "\ \ real(t_ins3 n d x ?t) - 6*(size1 ?t' - e) - \ l + 6*e + 1" using imbal_size[OF bal_t'] hl' bal_l' by(simp add: ring_distribs) also have "\ = real(t_ins3 n (d+1) x l) + 2*size1 l' + 4*size1 r - 4*size1 ?t' - \ l + 6*e + 6*e + 1" using ls Unbal2 inv bal_t' hl' by (simp add: t_bal_tree max_def size1_size) also have "\ = real(t_ins3 n (d+1) x l) - 2*size1 l' - \ l + 6*e + 6*e + 1" by simp also have "\ \ (6*e + 2) * height l + 6*e + 6*e" using amor_Unbal[OF Unbal2 *] ins_size(2)[OF Unbal] \_nn[of l'] by(simp add: ring_distribs size1_size) also have "\ \ (6*e + 2) * (height l + 2)" by (simp add: ring_distribs) also have "\ \ (6*e+2) * (height \l, y, r\ + 1)" using e0 by (simp add: mult_mono del: times_divide_eq_left) finally show ?thesis by linarith qed next case eq thus ?thesis using Node.prems by(simp) next case gr have *: "bal_i n (height r + (d+1))" using Node.prems(2) by (simp add: mono_bal_i') show ?thesis proof (cases "ins3 n (d+1) x r") case Same2 thus ?thesis using Node gr by (simp) next case (Bal2 r') have Bal: "ins n (d + 1) x r = Bal r'" using "*" Bal2 by (auto simp: ins3_ins2 ins2_iff_ins(2)) let ?t = "Node l y r" let ?t' = "Node l y r'" from Bal2 have t': "t' = ?t'" using Node.prems gr by (simp) have "t_ins3 n d x ?t + \ t' - \ ?t = t_ins3 n (d+1) x r + 2 + \ t' - \ ?t" using gr Bal2 by simp also have "\ = t_ins3 n (d+1) x r + 6*e*imbal ?t' + \ r' - 6*e*imbal ?t - \ r + 2" using t' by simp also have "\ \ t_ins3 n (d+1) x r + \ r' - \ r + 6*e*imbal ?t' - 6*e*imbal ?t + 2" by linarith also have "\ \ (6*e+2) * height r + 6*e*imbal ?t' - 6*e*imbal ?t + 6*e + 4" using Node.IH(2)[OF Bal2 *] by(simp add: ring_distribs) also have "\ = (6*e+2) * height r + 6*e*(real(imbal ?t') - imbal ?t) + 6*e + 4" by(simp add: algebra_simps) also have "\ \ (6*e+2) * height r + 6*e + 6*e + 4" using imbal_ins_Bal[OF Bal, of True y l] e0 by (simp del: times_divide_eq_left) also have "\ = (6*e+2) * (height r + 1) + 6*e + 2" by (simp add: ring_distribs) also have "\ \ (6*e+2) * (max (height l) (height r) + 1) + 6*e + 2" using e0 by (simp add: mult_left_mono) also have "\ = (6*e+2) * (height ?t + 1)" using e0 by(simp add: field_simps) finally show ?thesis . next case (Unbal2 r' nr' hr') have Unbal: "ins n (d + 1) x r = Unbal r'" and inv: "nr' = size r'" "hr' = height r'" using Unbal2 ins3_ins2[OF *] by(auto simp add: ins2_iff_ins(3)) have bal_r': "bal_i (size r') (height r')" by(fact bal_i_ins_Unbal[OF Unbal]) let ?t = "Node l y r" let ?h = "height ?t" let ?n = "size ?t" let ?t' = "Node l y r'" let ?h' = "height ?t'" let ?n' = "size ?t'" have bal_t': "\ bal_i ?n' ?h'" using gr Unbal Bal by (auto) hence t': "t' = balance_tree ?t'" using gr Unbal Bal by (auto) have hr': "height l < height r'" by(fact height_Unbal_r[OF Unbal Node.prems(2)]) have "t_ins3 n d x ?t + \ t' - \ ?t = t_ins3 n d x ?t - \ ?t" by(simp add: t' \_wbalanced wbalanced_balance_tree) also have "\ = t_ins3 n d x ?t - 6*e * imbal ?t - \ r - \ l" by simp also have "\ \ t_ins3 n d x ?t - 6*e * imbal ?t - \ r" using \_nn[of l] by linarith also have "\ \ t_ins3 n d x ?t - 6*e * imbal ?t' - \ r + 6*e" using mult_left_mono[OF imbal_ins_Unbal[OF Unbal, of True y l], of "4*e"] e0 apply (simp only: node.simps if_True ring_distribs) by (simp) also have "\ \ real(t_ins3 n d x ?t) - 6*(size1 ?t' - e) - \ r + 6*e + 1" using imbal_size[OF bal_t'] hr' bal_r' by (simp add: ring_distribs) also have "\ = real(t_ins3 n (d+1) x r) + 2*size1 r' + 4*size1 l - 4*size1 ?t' - \ r + 6*e + 6*e + 1" using gr Unbal2 inv bal_t' hr' by (simp add: t_bal_tree max_def size1_size add_ac) also have "\ = real(t_ins3 n (d+1) x r) - 2*size1 r' - \ r + 6*e + 6*e + 1" by simp also have "\ \ (6*e + 2) * height r + 6*e + 6*e" using amor_Unbal[OF Unbal2 *] ins_size(2)[OF Unbal] \_nn[of r'] by(simp add: ring_distribs size1_size) also have "\ \ (6*e + 2) * (height r + 2)" by (simp add: ring_distribs) also have "\ \ (6*e+2) * (height \l, y, r\ + 1)" using e0 by (simp add: mult_mono del: times_divide_eq_left) finally show ?thesis by linarith qed qed qed lemma t_insert3_amor: assumes "n = size t" "bal_i (size t) (height t)" "insert3 a (t,n) = (t',n')" shows "t_insert3 a (t,n) + \ t' - \ t \ (6*e+2) * (height t + 1) + 1" proof (cases "ins3 (size t) 0 a t") case Same2 have *: "5*e * real (height t') \ 0" using e0 by simp show ?thesis using Same2 assms(1,3) e0 t_ins3_Same[OF Same2] apply (simp add: ring_distribs t_insert3_def2) using * by linarith next case (Bal2 t') thus ?thesis using t_ins3_Bal[OF Bal2] assms by(simp add: ins_size t_insert3_def2) next case Unbal2 hence False using ins0_neq_Unbal using assms(1,2) ins3_ins2[of n t 0] by (fastforce simp: ins2_iff_ins(3)) thus ?thesis .. qed end (* RBTi2 *) text \The insert-only version is shown to have the desired logarithmic amortized complexity. First it is shown to be linear in the height of the tree.\ locale RBTi2_Amor = RBTi2 begin fun nxt :: "'a \ 'a rbt1 \ 'a rbt1" where "nxt x tn = insert3 x tn" fun t\<^sub>s :: "'a \ 'a rbt1 \ real" where "t\<^sub>s x tn = t_insert3 x tn" interpretation I_RBTi2_Amor: Amortized where init = "(Leaf,0)" and nxt = nxt and inv = "\(t,n). n = size t \ bal_i (size t) (height t)" and t = t\<^sub>s and \ = "\(t,n). \ t" and U = "\x (t,_). (6*e+2) * (height t + 1) + 1" proof (standard, goal_cases) case 1 show ?case using bal_i0 by (simp split: prod.split) next case (2 s x) thus ?case using insert2_insert[of x "fst s"] bal_i_insert[of "fst s"] by (simp del: insert2.simps insert3_def2 insert.simps add: insert3_insert2 split: prod.splits) next case (3 s) thus ?case using \_nn[of "fst s" ] by (auto split: prod.splits) next case 4 show ?case by(simp) next case (5 s x) thus ?case using t_insert3_amor[of "snd s" "fst s" x] by (auto simp del: insert3_def2 split: prod.splits) qed end text\Now it is shown that a certain instantiation of \bal_i\ that guarantees logarithmic height satisfies the assumptions of locale @{locale RBTi2}.\ interpretation I_RBTi2: RBTi2 where bal_i = "\n h. h \ ceiling(c * log 2 (n+1))" and e = "2 powr (1/c) / (2 - 2 powr (1/c))" proof (standard, goal_cases) case (1 t) have 0: "log 2 (1 + real (size t)) \ 0" by simp have 1: "log 2 (1 + real (size t)) \ c * log 2 (1 + real (size t))" using c1 "0" less_eq_real_def by auto thus ?case apply(simp add: height_balance_tree add_ac ceiling_mono) using 0 by linarith next case (2 n h n' h') have "int h' \ int h" by(simp add: 2) also have "\ \ ceiling(c * log 2 (real n + 1))" by(rule 2) also have "\ \ ceiling(c * log 2 (real n' + 1))" using c1 "2"(2,3) by (simp add: ceiling_mono) finally show ?case . next case 3 have "2 powr (1/c) < 2 powr 1" using c1 by (simp only: powr_less_cancel_iff) simp hence "2 - 2 powr (1 / c) > 0" by simp thus ?case by(simp) next case (4 t) thus ?case using size1_imbal2[of t] by(simp add: bal_log_def size1_size add_ac ring_distribs) qed subsection "Naive implementation (with delete)" axiomatization cd :: real where cd0: "cd > 0" definition bal_d :: "nat \ nat \ bool" where "bal_d n dl = (dl < cd*(n+1))" lemma bal_d0: "bal_d n 0" using cd0 by(simp add: bal_d_def) lemma mono_bal_d: "\ bal_d n dl; n \ n' \ \ bal_d n' dl" unfolding bal_d_def using cd0 mult_left_mono[of "real n + 1" "real n' + 1" cd] by linarith locale RBTid1 = RBTi1 begin subsubsection \Functions\ fun insert_d :: "'a::linorder \ 'a rbt1 \ 'a rbt1" where "insert_d x (t,dl) = (case ins (size t + dl) 0 x t of Same \ t | Bal t' \ t', dl)" (* Unbal unreachable *) definition up_d :: "'a \ 'a tree \ bool \ 'a tree option \ 'a tree option" where "up_d x sib twist u = (case u of None \ None | Some t \ Some(node twist t x sib))" declare up_d_def[simp] (* FIXME tdel \ height fun t_split_min :: "'a tree \ nat" where "t_split_min t = height t" *) fun del_tm :: "'a::linorder \ 'a tree \ 'a tree option tm" where "del_tm x Leaf =1 return None" | "del_tm x (Node l y r) =1 (case cmp x y of LT \ do { l' \ del_tm x l; return (up_d y r False l')} | EQ \ if r = Leaf then return (Some l) else do { (a',r') \ split_min_tm r; return (Some(Node l a' r'))} | GT \ do { r' \ del_tm x r; return (up_d y l True r')})" definition del :: "'a::linorder \ 'a tree \ 'a tree option" where "del x t = val(del_tm x t)" lemma del_Leaf[simp]: "del x Leaf = None" using val_cong[OF del_tm.simps(1)] by(simp only: del_def val_simps) lemma del_Node[simp]: "del x (Node l y r) = (case cmp x y of LT \ let l' = del x l in up_d y r False l' | EQ \ if r = Leaf then Some l else let (a',r') = split_min r in Some(Node l a' r') | GT \ let r' = del x r in up_d y l True r')" using val_cong[OF del_tm.simps(2)] by(simp only: del_def split_min_def val_simps cmp_val.case_distrib[of val]) definition t_del :: "'a::linorder \ 'a tree \ nat" where "t_del x t = time(del_tm x t)" lemma t_del_Leaf[simp]: "t_del x Leaf = 1" by(simp add: t_del_def tm_simps) lemma t_del_Node[simp]: "t_del x (Node l y r) = (case cmp x y of LT \ t_del x l + 1 | EQ \ if r = Leaf then 1 else t_split_min r + 1 | GT \ t_del x r + 1)" by(simp add: t_del_def t_split_min_def tm_simps split: tm.split prod.split) fun delete :: "'a::linorder \ 'a rbt1 \ 'a rbt1" where "delete x (t,dl) = (case del x t of None \ (t,dl) | Some t' \ if bal_d (size t') (dl+1) then (t',dl+1) else (balance_tree t', 0))" declare delete.simps [simp del] subsubsection \Functional Correctness\ lemma size_insert_d: "insert_d x (t,dl) = (t',dl') \ size t \ size t'" by(auto simp: ins_size ins0_neq_Unbal split: if_splits up.splits) lemma inorder_insert_d: "insert_d x (t,dl) = (t',dl') \ sorted(inorder t) \ inorder t' = ins_list x (inorder t)" by(auto simp add: ins0_neq_Unbal insert_def inorder_ins split: prod.split up.split) lemma bal_i_insert_d: assumes "insert_d x (t,dl) = (t',dl')" "bal_i (size t + dl) (height t)" shows "bal_i (size t' + dl) (height t')" proof (cases "ins (size t + dl) 0 x t") case Same with assms show ?thesis by (simp) next case Bal thus ?thesis using ins_bal_i_Bal[OF Bal] assms ins_size by(simp add: size1_size) next case (Unbal t') hence False by(simp add: ins0_neq_Unbal) thus ?thesis .. qed lemma inorder_del: "sorted(inorder t) \ inorder(case del x t of None \ t | Some t' \ t') = del_list x (inorder t)" by(induction t) (auto simp add: del_list_simps split_minD split: option.splits prod.splits) lemma inorder_delete: "\ delete x (t,dl) = (t',dl'); sorted(inorder t) \ \ inorder t' = del_list x (inorder t)" using inorder_del[of t x] by(auto simp add: delete.simps split: option.splits if_splits) lemma size_split_min: "\ split_min t = (a,t'); t \ Leaf \ \ size t' = size t - 1" by(induction t arbitrary: t') (auto simp add: zero_less_iff_neq_zero split: if_splits prod.splits) lemma height_split_min: "\ split_min t = (a,t'); t \ Leaf \ \ height t' \ height t" apply(induction t arbitrary: t') apply simp by(fastforce split: if_splits prod.splits) lemma size_del: "del x t = Some t' \ size t' = size t - 1" proof(induction x t arbitrary: t' rule: del_tm.induct) case 1 thus ?case by simp next case (2 x l y r) consider (ls) "x < y" | (eq) "x = y" | (gr) "x > y" by(metis less_linear) thus ?case proof cases case ls with "2.prems" obtain l' where l': "del x l = Some l'" by(auto split: option.splits) hence [arith]: "size l \ 0" by(cases l) auto show ?thesis using ls 2 l' by(auto) next case eq show ?thesis proof (cases "r = Leaf") case True thus ?thesis using eq "2.prems" by(simp) next case False thus ?thesis using eq "2.prems" eq_size_0[of r] by (auto simp add: size_split_min simp del: eq_size_0 split: prod.splits) qed next case gr with "2.prems" obtain r' where r': "del x r = Some r'" by(auto split: option.splits) hence [arith]: "size r \ 0" by(cases r) auto show ?thesis using gr 2 r' by(auto) qed qed lemma height_del: "del x t = Some t' \ height t' \ height t" proof(induction x t arbitrary: t' rule: del_tm.induct) case 1 thus ?case by simp next case (2 x l y r) consider (ls) "x < y" | (eq) "x = y" | (gr) "x > y" by(metis less_linear) thus ?case proof cases case ls thus ?thesis using 2 by(fastforce split: option.splits) next case eq thus ?thesis using "2.prems" by (auto dest: height_split_min split: if_splits prod.splits) next case gr thus ?thesis using 2 by(fastforce split: option.splits) qed qed lemma bal_i_delete: assumes "bal_i (size t + dl) (height t)" "delete x (t,dl) = (t',dl')" shows "bal_i (size t' + dl') (height t')" proof (cases "del x t") case None with assms show ?thesis by (simp add: delete.simps) next case Some hence "size t \ 0" by(cases t) auto thus ?thesis using Some assms size_del height_del[OF Some] by(force simp add: delete.simps bal_i_balance mono_bal_i' split: if_splits) qed lemma bal_d_delete: "\ bal_d (size t) dl; delete x (t,dl) = (t',dl') \ \ bal_d (size t') dl'" by (auto simp add: delete.simps bal_d0 size_del split: option.splits if_splits) text \Full functional correctness of the naive implementation:\ interpretation Set_by_Ordered where empty = "(Leaf,0)" and isin = "\(t,n). isin t" and insert = insert_d and delete = delete and inorder = "\(t,n). inorder t" and inv = "\_. True" proof (standard, goal_cases) case 1 show ?case by simp next case 2 thus ?case by(simp add: isin_set split: prod.splits) next case (3 t) thus ?case by(auto simp del: insert_d.simps simp add: inorder_insert_d split: prod.splits) next case (4 tn x) obtain t n where "tn = (t,n)" by fastforce thus ?case using 4 by(auto simp: inorder_delete split: prod.splits) qed (rule TrueI)+ end (* RBTid1 *) interpretation I_RBTid1: RBTid1 where bal_i = "\n h. h \ log 2 (real(n + 1)) + 1" .. subsection "Efficient Implementation (with delete)" type_synonym 'a rbt2 = "'a tree * nat * nat" locale RBTid2 = RBTi2 + RBTid1 begin subsubsection \Functions\ fun insert2_d :: "'a::linorder \ 'a rbt2 \ 'a rbt2" where "insert2_d x (t,n,dl) = (case ins2 (n+dl) 0 x t of Same2 \ (t,n,dl) | Bal2 t' \ (t',n+1,dl))" (* Unbal unreachable *) fun insert3_d_tm :: "'a::linorder \ 'a rbt2 \ 'a rbt2 tm" where "insert3_d_tm x (t,n,dl) =1 do { t' \ ins3_tm (n+dl) 0 x t; case t' of Same2 \ return (t,n,dl) | Bal2 t' \ return (t',n+1,dl) | Unbal2 _ _ _ \ return undefined}" (* Unbal unreachable *) definition insert3_d :: "'a::linorder \ 'a rbt2 \ 'a rbt2" where "insert3_d a t = val (insert3_d_tm a t)" lemma insert3_d_def2[simp,code]: "insert3_d x (t,n,dl) = (let t' = ins3 (n+dl) 0 x t in case t' of Same2 \ (t,n,dl) | Bal2 t' \ (t',n+1,dl) | Unbal2 _ _ _ \ undefined)" using val_cong[OF insert3_d_tm.simps(1)] by(simp only: insert3_d_def ins3_def val_simps up2.case_distrib[of val]) definition t_insert3_d :: "'a::linorder \ 'a rbt2 \ nat" where "t_insert3_d x t = time(insert3_d_tm x t)" lemma t_insert3_d_def2[simp]: "t_insert3_d x (t,n,dl) = (t_ins3 (n+dl) 0 x t + 1)" by(simp add: t_insert3_d_def t_ins3_def tm_simps split: tm.split up2.split) fun delete2_tm :: "'a::linorder \ 'a rbt2 \ 'a rbt2 tm" where "delete2_tm x (t,n,dl) =1 do { t' \ del_tm x t; case t' of None \ return (t,n,dl) | Some t' \ (let n' = n-1; dl' = dl + 1 in if bal_d n' dl' then return (t',n',dl') else do { t'' \ bal_tree_tm n' t'; return (t'', n', 0)})}" definition delete2 :: "'a::linorder \ 'a rbt2 \ 'a rbt2" where "delete2 x t = val(delete2_tm x t)" lemma delete2_def2: "delete2 x (t,n,dl) = (let t' = del x t in case t' of None \ (t,n,dl) | Some t' \ (let n' = n-1; dl' = dl + 1 in if bal_d n' dl' then (t',n',dl') else let t'' = bal_tree n' t' in (t'', n', 0)))" using val_cong[OF delete2_tm.simps(1)] by(simp only: delete2_def ins3_def del_def bal_tree_def val_simps option.case_distrib[of val]) definition t_delete2 :: "'a::linorder \ 'a rbt2 \ nat" where "t_delete2 x t = time(delete2_tm x t)" lemma t_delete2_def2: "t_delete2 x (t,n,dl) = (t_del x t + (case del x t of None \ 1 | Some t' \ (let n' = n-1; dl' = dl + 1 in if bal_d n' dl' then 1 else t_bal_tree n' t' + 1)))" by(auto simp add: t_delete2_def tm_simps t_del_def del_def t_bal_tree_def split: tm.split option.split) subsubsection \Equivalence proofs\ lemma insert2_insert_d: "insert2_d x (t,size t,dl) = (t',n',dl') \ (t',dl') = insert_d x (t,dl) \ n' = size t'" by(auto simp: ins2_iff_ins ins_size ins0_neq_Unbal split: up2.split up.split) lemma insert3_insert2_d: "bal_i (n+dl) (height t) \ insert3_d x (t,n,dl) = insert2_d x (t,n,dl)" by(simp add: ins3_ins2 split: up2.split) lemma delete2_delete: "delete2 x (t,size t,dl) = (t',n',dl') \ (t',dl') = delete x (t,dl) \ n' = size t'" by(auto simp: delete2_def2 delete.simps size_del balance_tree_def bal_tree split: option.splits) subsubsection \Amortized complexity\ fun \\<^sub>d :: "'a rbt2 \ real" where "\\<^sub>d (t,n,dl) = \ t + 4*dl/cd" lemma \\<^sub>d_case: "\\<^sub>d tndl = (case tndl of (t,n,dl) \ \ t + 4*dl/cd)" by(simp split: prod.split) lemma imbal_diff_decr: "size r' = size r - 1 \ real(imbal (Node l x' r')) - imbal (Node l x r) \ 1" by(simp add: imbal.simps) lemma tinsert_d_amor: assumes "n = size t" "insert_d a (t,dl) = (t',dl')" "bal_i (size t + dl) (height t)" shows "t_insert3_d a (t,n,dl) + \ t' - \ t \ (6*e+2) * (height t + 1) + 1" proof (cases "ins (size t + dl) 0 a t") case Same have *: "5*e * real (height t) \ 0" using e0 by simp show ?thesis using t_ins3_Same[of "size t + dl" 0 a t] Same assms apply (auto simp add: ring_distribs ins3_ins2 ins2_ins) using * e0 apply safe by linarith next case (Bal t') thus ?thesis using t_ins3_Bal[of "size t + dl" 0 a t t'] Bal assms by(simp add: ins_size ins3_ins2 ins2_ins) next case Unbal hence False by(simp add: ins0_neq_Unbal) thus ?thesis .. qed lemma t_split_min_ub: "t \ Leaf \ t_split_min t \ height t + 1" by(induction t) auto lemma t_del_ub: "t_del x t \ height t + 1" by(induction t) (auto dest: t_split_min_ub) lemma imbal_split_min: "split_min t = (x,t') \ t \ Leaf \ real(imbal t') - imbal t \ 1" proof(induction t arbitrary: t') case Leaf thus ?case by simp next case (Node l y r) thus ?case using size_split_min[OF Node.prems] apply(auto split: if_splits option.splits prod.splits) apply(auto simp: imbal.simps) apply(cases t') apply (simp add: imbal.simps) apply (simp add: imbal.simps) done qed lemma imbal_del_Some: "del x t = Some t' \ real(imbal t') - imbal t \ 1" proof(induction t arbitrary: t') case Leaf thus ?case by (auto simp add: imbal.simps split!: if_splits) next case (Node t1 x2 t2) thus ?case using size_del[OF Node.prems] apply(auto split: if_splits option.splits prod.splits) apply(auto simp: imbal.simps) apply(cases t') apply (simp add: imbal.simps) apply (simp add: imbal.simps) done qed lemma Phi_diff_split_min: "split_min t = (x, t') \ t \ Leaf \ \ t' - \ t \ 6*e*height t" proof(induction t arbitrary: t') case Leaf thus ?case by simp next case (Node l y r) note [arith] = e0 thus ?case proof (cases "l = Leaf") have *: "- a \ b \ 0 \ a+b" for a b :: real by linarith case True thus ?thesis using Node.prems by(cases r)(auto simp: imbal.simps *) next case False with Node.prems obtain l' where rec: "split_min l = (x,l')" and t': "t' = Node l' y r" by (auto split: prod.splits) hence "\ t' - \ \l,y,r\ = 6*e*imbal\l',y,r\ - 6*e*imbal\l,y,r\ + \ l' - \ l" by simp also have "\ = 6*e * (real(imbal\l',y,r\) - imbal\l,y,r\) + \ l' - \ l" by (simp add: ring_distribs) also have "\ \ 6*e + \ l' - \ l" using imbal_split_min[OF Node.prems(1)] t' by (simp) also have "\ \ 6*e * (height l + 1)" using Node.IH(1)[OF rec False] by (simp add: ring_distribs) also have "\ \ 6*e * height \l, y, r\" by(simp del: times_divide_eq_left) finally show ?thesis . qed qed lemma Phi_diff_del_Some: "del x t = Some t' \ \ t' - \ t \ 6*e * height t" proof(induction t arbitrary: t') case Leaf thus ?case by (auto simp: imbal.simps) next case (Node l y r) note [arith] = e0 consider (ls) "x < y" | (eq) "x = y" | (gr) "x > y" by(metis less_linear) thus ?case proof cases case ls with Node.prems obtain l' where rec: "del x l = Some l'" and t': "t' = Node l' y r" by (auto split: option.splits) hence "\ t' - \ \l,y,r\ = 6*e*imbal\l',y,r\ - 6*e*imbal\l,y,r\ + \ l' - \ l" by simp also have "\ = 6*e * (real(imbal\l',y,r\) - imbal\l,y,r\) + \ l' - \ l" by (simp add: ring_distribs) also have "\ \ 6*e + \ l' - \ l" using imbal_del_Some[OF Node.prems] t' by (simp) also have "\ \ 6*e * (height l + 1)" using Node.IH(1)[OF rec] by (simp add: ring_distribs) also have "\ \ 6*e * height \l, y, r\" by(simp del: times_divide_eq_left) finally show ?thesis . next case [simp]: eq show ?thesis proof (cases "r = Leaf") case [simp]: True show ?thesis proof (cases "size t' = 0") case True thus ?thesis using Node.prems by(auto simp: imbal.simps of_nat_diff) next case [arith]: False show ?thesis using Node.prems by(simp add: imbal.simps of_nat_diff algebra_simps) qed next case False then obtain a r' where *: "split_min r = (a,r')" using Node.prems by(auto split: prod.splits) from mult_left_mono[OF imbal_diff_decr[OF size_split_min[OF this False], of l a y], of "5*e"] have "6*e*real (imbal \l, a, r'\) - 6*e*real (imbal \l, y, r\) \ 6*e" by(simp add: ring_distribs) thus ?thesis using Node.prems * False Phi_diff_split_min[OF *] apply(auto simp add: max_def ring_distribs) using mult_less_cancel_left_pos[of "6*e" "height r" "height l"] by linarith qed next case gr with Node.prems obtain r' where rec: "del x r = Some r'" and t': "t' = Node l y r'" by (auto split: option.splits) hence "\ t' - \ \l,y,r\ = 6*e*imbal\l,y,r'\ - 6*e*imbal\l,y,r\ + \ r' - \ r" by simp also have "\ = 6*e * (real(imbal\l,y,r'\) - imbal\l,y,r\) + \ r' - \ r" by (simp add: ring_distribs) also have "\ \ 6*e + \ r' - \ r" using imbal_del_Some[OF Node.prems] t' by (simp) also have "\ \ 6*e * (height r + 1)" using Node.IH(2)[OF rec] by (simp add: ring_distribs) also have "\ \ 6*e * height \l, y, r\" by(simp del: times_divide_eq_left) finally show ?thesis . qed qed lemma amor_del_Some: "del x t = Some t' \ t_del x t + \ t' - \ t \ (6*e + 1) * height t + 1" apply(drule Phi_diff_del_Some) using t_del_ub[of x t] by (simp add: ring_distribs) lemma cd1: "1/cd > 0" by(simp add: cd0) lemma t_delete_amor: assumes "n = size t" shows "t_delete2 x (t,n,dl) + \\<^sub>d (delete2 x (t,n,dl)) - \\<^sub>d (t,n,dl) \ (6*e+1) * height t + 4/cd + 4" proof (cases "del x t") case None have *: "6*e * real (height t) \ 0" using e0 by simp show ?thesis using None apply (simp add: delete2_def2 t_delete2_def2 ring_distribs) using * t_del_ub[of x t] cd1 by linarith next case (Some t') show ?thesis proof (cases "bal_d (n-1) (dl+1)") case True thus ?thesis using assms Some amor_del_Some[OF Some] by(simp add: size_del delete2_def2 t_delete2_def2 algebra_simps add_divide_distrib) next case False from Some have [arith]: "size t \ 0" by(cases t) (auto) have "t_delete2 x (t, n, dl) + \\<^sub>d (delete2 x (t,n,dl)) - \\<^sub>d (t,n,dl) = t_delete2 x (t, n, dl) - \ t - 4*dl/cd" using False Some by(simp add: delete2_def2 t_delete2_def2 \_wbalanced bal_tree assms size_del) also have "\ = t_del x t + 4 * size t - \ t - 4*dl/cd" using False assms Some by(simp add: t_delete2_def2 t_bal_tree size_del size1_size) also have "\ \ (6*e+1)*height t + 4*(size t - dl/cd + 1)" using amor_del_Some[OF Some] \_nn[of t] \_nn[of t'] by(simp add: ring_distribs) also have "size t - dl/cd + 1 \ 1/cd + 1" using assms False cd0 unfolding bal_d_def by(simp add: algebra_simps of_nat_diff)(simp add: field_simps) finally show ?thesis by(simp add: ring_distribs) qed qed datatype (plugins del: lifting) 'b ops = Insert 'b | Delete 'b fun nxt :: "'a ops \ 'a rbt2 \ 'a rbt2" where "nxt (Insert x) t = insert3_d x t" | "nxt (Delete x) t = delete2 x t" fun t\<^sub>s :: "'a ops \ 'a rbt2 \ real" where "t\<^sub>s (Insert x) t = t_insert3_d x t" | "t\<^sub>s (Delete x) t = t_delete2 x t" interpretation RBTid2_Amor: Amortized where init = "(Leaf,0,0)" and nxt = nxt and inv = "\(t,n,dl). n = size t \ bal_i (size t+dl) (height t) \ bal_d (size t) dl" and t = t\<^sub>s and \ = \\<^sub>d and U = "\f (t,_). case f of Insert _ \ (6*e+2) * (height t + 1) + 1 | Delete _ \ (6*e+1) * height t + 4/cd + 4" proof (standard, goal_cases) case 1 show ?case using bal_i0 bal_d0 by (simp split: prod.split) next case (2 s f) obtain t n dl where [simp]: "s = (t,n,dl)" using prod_cases3 by blast show ?case proof (cases f) case (Insert x) thus ?thesis using 2 insert2_insert_d[of x t dl] bal_i_insert_d[of x t dl] mono_bal_d[OF _ size_insert_d] by (simp del: insert2_d.simps insert3_d_def2 insert_d.simps add: insert3_insert2_d split: prod.splits) fastforce next case (Delete x) thus ?thesis using 2 bal_i_delete[of t dl x] bal_d_delete[of t dl x] by (auto simp: delete2_delete) qed next case (3 s) thus ?case using \_nn[of "fst s" ] cd0 by (auto split: prod.splits) next case 4 show ?case by(simp) next case (5 s f) obtain t n dl where [simp]: "s = (t,n,dl)" using prod_cases3 by blast show ?case proof (cases f) case (Insert x) thus ?thesis using 5 insert2_insert_d[of x t dl] tinsert_d_amor[of n t x dl] by (fastforce simp del: insert2_d.simps insert3_d_def2 insert.simps simp add: insert3_insert2_d \\<^sub>d_case split: prod.split) next case (Delete x) then show ?thesis using 5 delete2_delete[of x t dl] t_delete_amor[of n t x dl] by (simp) qed qed end (* RBTid2 *) axiomatization b :: real where b0: "b > 0" axiomatization where cd_le_log: "cd \ 2 powr (b/c) - 1" text\This axiom is only used to prove that the height remains logarithmic in the size.\ interpretation I_RBTid2: RBTid2 where bal_i = "\n h. h \ ceiling(c * log 2 (n+1))" and e = "2 powr (1/c) / (2 - 2 powr (1/c))" .. (* For code generation: defines insert_id2 = I_RBTid2.insert3_d and ins3_id2 = I_RBTid2.ins3 and up3_id2 = I_RBTid2.up3 *) (* Code Generation *) (* (* FIXME why not done in Code_Float? *) lemmas [code del] = real_less_code real_less_eq_code real_floor_code lemma [code]: "bal_i n h = (h \ ceiling(c * log (real_of_integer 2) (n+1)))" by (simp add: bal_i_def real_of_integer_def) lemma myc[code_unfold]: "c = real_of_integer 3 / real_of_integer 2" sorry definition "floor_integer (x::real) = integer_of_int (floor x)" code_printing constant "floor_integer :: real \ integer" \ (SML) "Real.floor" lemma [code]: "(floor::real \ int) = (\x. int_of_integer (floor_integer x))" by (auto simp: floor_integer_def) definition "ceiling_integer (x::real) = integer_of_int (ceiling x)" code_printing constant "ceiling_integer :: real \ integer" \ (SML) "Real.ceil" code_printing constant "0 :: real" \ (SML) "0.0" code_printing constant "1 :: real" \ (SML) "1.0" lemma [code_unfold del]: "0 \ real_of_rat 0" by simp lemma [code_unfold del]: "1 \ real_of_rat 1" by simp lemma [code_abbrev]: "real_of_integer (integer_of_nat x) = real x" sorry lemma [code_abbrev]: "(\x. int_of_integer (ceiling_integer x)) = (ceiling::real \ int)" by (auto simp: ceiling_integer_def) print_codeproc export_code insert_id2 in SML module_name Root_Balanced *) text\Finally we show that under the above interpretation of \bal_i\ the height is logarithmic:\ definition bal_i :: "nat \ nat \ bool" where "bal_i n h = (h \ ceiling(c * log 2 (n+1)))" lemma assumes "bal_i (size t + dl) (height t)" "bal_d (size t) dl" shows "height t \ ceiling(c * log 2 (size1 t) + b)" proof - have *: "0 < real (size t + 1) + cd * real (size t + 1)" using cd0 by (simp add: add_pos_pos) have "0 < 2 powr (b / c) - 1" using b0 c1 by(auto simp: less_powr_iff) hence **: "0 < real (size t + 1) + (2 powr (b / c) - 1) * real (size t + 1)" by (simp add: add_pos_pos) have "height t \ ceiling(c * log 2 (size t + 1 + dl))" using assms(1) by(simp add: bal_i_def add_ac) also have "\ \ ceiling(c * log 2 (size t + 1 + cd * (size t + 1)))" using c1 cd0 assms(2) by(simp add: ceiling_mono add_pos_nonneg bal_d_def add_ac) also have "\ \ ceiling(c * log 2 (size t + 1 + (2 powr (b/c) - 1) * (size t + 1)))" using * ** cd_le_log c1 by(simp add: ceiling_mono mult_left_mono) also have "\ = ceiling(c * log 2 (2 powr (b/c) * (size1 t)))" by(simp add:algebra_simps size1_size) also have "\ = ceiling(c * (b/c + log 2 (size1 t)))" by(simp add: log_mult) also have "\ = ceiling(c * log 2 (size1 t) + b)" using c1 by(simp add: algebra_simps) finally show ?thesis . qed end diff --git a/thys/Simplex/Simplex.thy b/thys/Simplex/Simplex.thy --- a/thys/Simplex/Simplex.thy +++ b/thys/Simplex/Simplex.thy @@ -1,8323 +1,8324 @@ (* Authors: F. Maric, M. Spasic, R. Thiemann *) section \The Simplex Algorithm\ theory Simplex imports Linear_Poly_Maps QDelta Rel_Chain Simplex_Algebra "HOL-Library.RBT_Mapping" "HOL-Library.Permutation" "HOL-Library.Code_Target_Numeral" begin text\Linear constraints are of the form \p \ c\ or \p\<^sub>1 \ p\<^sub>2\, where \p\, \p\<^sub>1\, and \p\<^sub>2\, are linear polynomials, \c\ is a rational constant and \\ \ {<, >, \, \, =}\. Their abstract syntax is given by the \constraint\ type, and semantics is given by the relation \\\<^sub>c\, defined straightforwardly by primitive recursion over the \constraint\ type. A set of constraints is satisfied, denoted by \\\<^sub>c\<^sub>s\, if all constraints are. There is also an indexed version \\\<^sub>i\<^sub>c\<^sub>s\ which takes an explicit set of indices and then only demands that these constraints are satisfied.\ datatype constraint = LT linear_poly rat | GT linear_poly rat | LEQ linear_poly rat | GEQ linear_poly rat | EQ linear_poly rat | LTPP linear_poly linear_poly | GTPP linear_poly linear_poly | LEQPP linear_poly linear_poly | GEQPP linear_poly linear_poly | EQPP linear_poly linear_poly text \Indexed constraints are just pairs of indices and constraints. Indices will be used to identify constraints, e.g., to easily specify an unsatisfiable core by a list of indices.\ type_synonym 'i i_constraint = "'i \ constraint" abbreviation (input) restrict_to :: "'i set \ ('i \ 'a) set \ 'a set" where "restrict_to I xs \ snd ` (xs \ (I \ UNIV))" text \The operation @{const restrict_to} is used to select constraints for a given index set.\ abbreviation (input) flat :: "('i \ 'a) set \ 'a set" where "flat xs \ snd ` xs" text \The operation @{const flat} is used to drop indices from a set of indexed constraints.\ abbreviation (input) flat_list :: "('i \ 'a) list \ 'a list" where "flat_list xs \ map snd xs" primrec satisfies_constraint :: "'a :: lrv valuation \ constraint \ bool" (infixl "\\<^sub>c" 100) where "v \\<^sub>c (LT l r) \ (l\v\) < r *R 1" | "v \\<^sub>c GT l r \ (l\v\) > r *R 1" | "v \\<^sub>c LEQ l r \ (l\v\) \ r *R 1" | "v \\<^sub>c GEQ l r \ (l\v\) \ r *R 1" | "v \\<^sub>c EQ l r \ (l\v\) = r *R 1" | "v \\<^sub>c LTPP l1 l2 \ (l1\v\) < (l2\v\)" | "v \\<^sub>c GTPP l1 l2 \ (l1\v\) > (l2\v\)" | "v \\<^sub>c LEQPP l1 l2 \ (l1\v\) \ (l2\v\)" | "v \\<^sub>c GEQPP l1 l2 \ (l1\v\) \ (l2\v\)" | "v \\<^sub>c EQPP l1 l2 \ (l1\v\) = (l2\v\)" abbreviation satisfies_constraints :: "rat valuation \ constraint set \ bool" (infixl "\\<^sub>c\<^sub>s" 100) where "v \\<^sub>c\<^sub>s cs \ \ c \ cs. v \\<^sub>c c" lemma unsat_mono: assumes "\ (\ v. v \\<^sub>c\<^sub>s cs)" and "cs \ ds" shows "\ (\ v. v \\<^sub>c\<^sub>s ds)" using assms by auto fun i_satisfies_cs (infixl "\\<^sub>i\<^sub>c\<^sub>s" 100) where "(I,v) \\<^sub>i\<^sub>c\<^sub>s cs \ v \\<^sub>c\<^sub>s restrict_to I cs" definition distinct_indices :: "('i \ 'c) list \ bool" where "distinct_indices as = (distinct (map fst as))" lemma distinct_indicesD: "distinct_indices as \ (i,x) \ set as \ (i,y) \ set as \ x = y" unfolding distinct_indices_def by (rule eq_key_imp_eq_value) text \For the unsat-core predicate we only demand minimality in case that the indices are distinct. Otherwise, minimality does in general not hold. For instance, consider the input constraints $c_1: x < 0$, $c_2: x > 2$ and $c_2: x < 1$ where the index $c_2$ occurs twice. If the simplex-method first encounters constraint $c_1$, then it will detect that there is a conflict between $c_1$ and the first $c_2$-constraint. Consequently, the index-set $\{c_1,c_2\}$ will be returned, but this set is not minimal since $\{c_2\}$ is already unsatisfiable.\ definition minimal_unsat_core :: "'i set \ 'i i_constraint list \ bool" where "minimal_unsat_core I ics = ((I \ fst ` set ics) \ (\ (\ v. (I,v) \\<^sub>i\<^sub>c\<^sub>s set ics)) \ (distinct_indices ics \ (\ J. J \ I \ (\ v. (J,v) \\<^sub>i\<^sub>c\<^sub>s set ics))))" subsection \Procedure Specification\ abbreviation (input) Unsat where "Unsat \ Inl" abbreviation (input) Sat where "Sat \ Inr" text\The specification for the satisfiability check procedure is given by:\ locale Solve = \ \Decide if the given list of constraints is satisfiable. Return either an unsat core, or a satisfying valuation.\ fixes solve :: "'i i_constraint list \ 'i list + rat valuation" \ \If the status @{const Sat} is returned, then returned valuation satisfies all constraints.\ assumes simplex_sat: "solve cs = Sat v \ v \\<^sub>c\<^sub>s flat (set cs)" \ \If the status @{const Unsat} is returned, then constraints are unsatisfiable, i.e., an unsatisfiable core is returned.\ assumes simplex_unsat: "solve cs = Unsat I \ minimal_unsat_core (set I) cs" abbreviation (input) look where "look \ Mapping.lookup" abbreviation (input) upd where "upd \ Mapping.update" lemma look_upd: "look (upd k v m) = (look m)(k \ v)" by (transfer, auto) lemmas look_upd_simps[simp] = look_upd Mapping.lookup_empty definition map2fun:: "(var, 'a :: zero) mapping \ var \ 'a" where "map2fun v \ \x. case look v x of None \ 0 | Some y \ y" syntax "_map2fun" :: "(var, 'a) mapping \ var \ 'a" ("\_\") translations "\v\" == "CONST map2fun v" lemma map2fun_def': "\v\ x \ case Mapping.lookup v x of None \ 0 | Some y \ y" by (auto simp add: map2fun_def) text\Note that the above specification requires returning a valuation (defined as a HOL function), which is not efficiently executable. In order to enable more efficient data structures for representing valuations, a refinement of this specification is needed and the function \solve\ is replaced by the function \solve_exec\ returning optional \(var, rat) mapping\ instead of \var \ rat\ function. This way, efficient data structures for representing mappings can be easily plugged-in during code generation \cite{florian-refinement}. A conversion from the \mapping\ datatype to HOL function is denoted by \\_\\ and given by: @{thm map2fun_def'[no_vars]}.\ locale SolveExec = fixes solve_exec :: "'i i_constraint list \ 'i list + (var, rat) mapping" assumes simplex_sat0: "solve_exec cs = Sat v \ \v\ \\<^sub>c\<^sub>s flat (set cs)" assumes simplex_unsat0: "solve_exec cs = Unsat I \ minimal_unsat_core (set I) cs" begin definition solve where "solve cs \ case solve_exec cs of Sat v \ Sat \v\ | Unsat c \ Unsat c" end sublocale SolveExec < Solve solve by (unfold_locales, insert simplex_sat0 simplex_unsat0, auto simp: solve_def split: sum.splits) subsection \Handling Strict Inequalities\ text\The first step of the procedure is removing all equalities and strict inequalities. Equalities can be easily rewritten to non-strict inequalities. Removing strict inequalities can be done by replacing the list of constraints by a new one, formulated over an extension \\'\ of the space of rationals \\\. \\'\ must have a structure of a linearly ordered vector space over \\\ (represented by the type class \lrv\) and must guarantee that if some non-strict constraints are satisfied in \\'\, then there is a satisfying valuation for the original constraints in \\\. Our final implementation uses the \\\<^sub>\\ space, defined in \cite{simplex-rad} (basic idea is to replace \p < c\ by \p \ c - \\ and \p > c\ by \p \ c + \\ for a symbolic parameter \\\). So, all constraints are reduced to the form \p \ b\, where \p\ is a linear polynomial (still over \\\), \b\ is constant from \\'\ and \\ \ {\, \}\. The non-strict constraints are represented by the type \'a ns_constraint\, and their semantics is denoted by \\\<^sub>n\<^sub>s\ and \\\<^sub>n\<^sub>s\<^sub>s\. The indexed variant is \\\<^sub>i\<^sub>n\<^sub>s\<^sub>s\.\ datatype 'a ns_constraint = LEQ_ns linear_poly 'a | GEQ_ns linear_poly 'a type_synonym ('i,'a) i_ns_constraint = "'i \ 'a ns_constraint" primrec satisfiable_ns_constraint :: "'a::lrv valuation \ 'a ns_constraint \ bool" (infixl "\\<^sub>n\<^sub>s" 100) where "v \\<^sub>n\<^sub>s LEQ_ns l r \ l\v\ \ r" | "v \\<^sub>n\<^sub>s GEQ_ns l r \ l\v\ \ r" abbreviation satisfies_ns_constraints :: "'a::lrv valuation \ 'a ns_constraint set \ bool" (infixl "\\<^sub>n\<^sub>s\<^sub>s " 100) where "v \\<^sub>n\<^sub>s\<^sub>s cs \ \ c \ cs. v \\<^sub>n\<^sub>s c" fun i_satisfies_ns_constraints :: "'i set \ 'a::lrv valuation \ ('i,'a) i_ns_constraint set \ bool" (infixl "\\<^sub>i\<^sub>n\<^sub>s\<^sub>s " 100) where "(I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs \ v \\<^sub>n\<^sub>s\<^sub>s restrict_to I cs" lemma i_satisfies_ns_constraints_mono: "(I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs \ J \ I \ (J,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs" by auto primrec poly :: "'a ns_constraint \ linear_poly" where "poly (LEQ_ns p a) = p" | "poly (GEQ_ns p a) = p" primrec ns_constraint_const :: "'a ns_constraint \ 'a" where "ns_constraint_const (LEQ_ns p a) = a" | "ns_constraint_const (GEQ_ns p a) = a" definition distinct_indices_ns :: "('i,'a :: lrv) i_ns_constraint set \ bool" where "distinct_indices_ns ns = ((\ n1 n2 i. (i,n1) \ ns \ (i,n2) \ ns \ poly n1 = poly n2 \ ns_constraint_const n1 = ns_constraint_const n2))" definition minimal_unsat_core_ns :: "'i set \ ('i,'a :: lrv) i_ns_constraint set \ bool" where "minimal_unsat_core_ns I cs = ((I \ fst ` cs) \ (\ (\ v. (I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs)) \ (distinct_indices_ns cs \ (\ J \ I. \ v. (J,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs)))" text\Specification of reduction of constraints to non-strict form is given by:\ locale To_ns = \ \Convert a constraint to an equisatisfiable non-strict constraint list. The conversion must work for arbitrary subsets of constraints -- selected by some index set I -- in order to carry over unsat-cores and in order to support incremental simplex solving.\ fixes to_ns :: "'i i_constraint list \ ('i,'a::lrv) i_ns_constraint list" \ \Convert the valuation that satisfies all non-strict constraints to the valuation that satisfies all initial constraints.\ fixes from_ns :: "(var, 'a) mapping \ 'a ns_constraint list \ (var, rat) mapping" assumes to_ns_unsat: "minimal_unsat_core_ns I (set (to_ns cs)) \ minimal_unsat_core I cs" assumes i_to_ns_sat: "(I,\v'\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set (to_ns cs) \ (I,\from_ns v' (flat_list (to_ns cs))\) \\<^sub>i\<^sub>c\<^sub>s set cs" assumes to_ns_indices: "fst ` set (to_ns cs) = fst ` set cs" assumes distinct_cond: "distinct_indices cs \ distinct_indices_ns (set (to_ns cs))" begin lemma to_ns_sat: "\v'\ \\<^sub>n\<^sub>s\<^sub>s flat (set (to_ns cs)) \ \from_ns v' (flat_list (to_ns cs))\ \\<^sub>c\<^sub>s flat (set cs)" using i_to_ns_sat[of UNIV v' cs] by auto end locale Solve_exec_ns = fixes solve_exec_ns :: "('i,'a::lrv) i_ns_constraint list \ 'i list + (var, 'a) mapping" assumes simplex_ns_sat: "solve_exec_ns cs = Sat v \ \v\ \\<^sub>n\<^sub>s\<^sub>s flat (set cs)" assumes simplex_ns_unsat: "solve_exec_ns cs = Unsat I \ minimal_unsat_core_ns (set I) (set cs)" text\After the transformation, the procedure is reduced to solving only the non-strict constraints, implemented in the \solve_exec_ns\ function having an analogous specification to the \solve\ function. If \to_ns\, \from_ns\ and \solve_exec_ns\ are available, the \solve_exec\ function can be easily defined and it can be easily shown that this definition satisfies its specification (also analogous to \solve\). \ locale SolveExec' = To_ns to_ns from_ns + Solve_exec_ns solve_exec_ns for to_ns:: "'i i_constraint list \ ('i,'a::lrv) i_ns_constraint list" and from_ns :: "(var, 'a) mapping \ 'a ns_constraint list \ (var, rat) mapping" and solve_exec_ns :: "('i,'a) i_ns_constraint list \ 'i list + (var, 'a) mapping" begin definition solve_exec where "solve_exec cs \ let cs' = to_ns cs in case solve_exec_ns cs' of Sat v \ Sat (from_ns v (flat_list cs')) | Unsat is \ Unsat is" end sublocale SolveExec' < SolveExec solve_exec by (unfold_locales, insert simplex_ns_sat simplex_ns_unsat to_ns_sat to_ns_unsat, (force simp: solve_exec_def Let_def split: sum.splits)+) subsection \Preprocessing\ text\The next step in the procedure rewrites a list of non-strict constraints into an equisatisfiable form consisting of a list of linear equations (called the \emph{tableau}) and of a list of \emph{atoms} of the form \x\<^sub>i \ b\<^sub>i\ where \x\<^sub>i\ is a variable and \b\<^sub>i\ is a constant (from the extension field). The transformation is straightforward and introduces auxiliary variables for linear polynomials occurring in the initial formula. For example, \[x\<^sub>1 + x\<^sub>2 \ b\<^sub>1, x\<^sub>1 + x\<^sub>2 \ b\<^sub>2, x\<^sub>2 \ b\<^sub>3]\ can be transformed to the tableau \[x\<^sub>3 = x\<^sub>1 + x\<^sub>2]\ and atoms \[x\<^sub>3 \ b\<^sub>1, x\<^sub>3 \ b\<^sub>2, x\<^sub>2 \ b\<^sub>3]\.\ type_synonym eq = "var \ linear_poly" primrec lhs :: "eq \ var" where "lhs (l, r) = l" primrec rhs :: "eq \ linear_poly" where "rhs (l, r) = r" abbreviation rvars_eq :: "eq \ var set" where "rvars_eq eq \ vars (rhs eq)" definition satisfies_eq :: "'a::rational_vector valuation \ eq \ bool" (infixl "\\<^sub>e" 100) where "v \\<^sub>e eq \ v (lhs eq) = (rhs eq)\v\" lemma satisfies_eq_iff: "v \\<^sub>e (x, p) \ v x = p\v\" by (simp add: satisfies_eq_def) type_synonym tableau ="eq list" definition satisfies_tableau ::"'a::rational_vector valuation \ tableau \ bool" (infixl "\\<^sub>t" 100) where "v \\<^sub>t t \ \ e \ set t. v \\<^sub>e e" definition lvars :: "tableau \ var set" where "lvars t = set (map lhs t)" definition rvars :: "tableau \ var set" where "rvars t = \ (set (map rvars_eq t))" abbreviation tvars where "tvars t \ lvars t \ rvars t" text \The condition that the rhss are non-zero is required to obtain minimal unsatisfiable cores. To observe the problem with 0 as rhs, consider the tableau $x = 0$ in combination with atom $(A: x \leq 0)$ where then $(B: x \geq 1)$ is asserted. In this case, the unsat core would be computed as $\{A,B\}$, although already $\{B\}$ is unsatisfiable.\ definition normalized_tableau :: "tableau \ bool" ("\") where "normalized_tableau t \ distinct (map lhs t) \ lvars t \ rvars t = {} \ 0 \ rhs ` set t" text\Equations are of the form \x = p\, where \x\ is a variable and \p\ is a polynomial, and are represented by the type \eq = var \ linear_poly\. Semantics of equations is given by @{thm satisfies_eq_iff[no_vars]}. Tableau is represented as a list of equations, by the type \tableau = eq list\. Semantics for a tableau is given by @{thm satisfies_tableau_def[no_vars]}. Functions \lvars\ and \rvars\ return sets of variables appearing on the left hand side (lhs) and the right hand side (rhs) of a tableau. Lhs variables are called \emph{basic} while rhs variables are called \emph{non-basic} variables. A tableau \t\ is \emph{normalized} (denoted by @{term "\ t"}) iff no variable occurs on the lhs of two equations in a tableau and if sets of lhs and rhs variables are distinct.\ lemma normalized_tableau_unique_eq_for_lvar: assumes "\ t" shows "\ x \ lvars t. \! p. (x, p) \ set t" proof (safe) fix x assume "x \ lvars t" then show "\p. (x, p) \ set t" unfolding lvars_def by auto next fix x p1 p2 assume *: "(x, p1) \ set t" "(x, p2) \ set t" then show "p1 = p2" using \\ t\ unfolding normalized_tableau_def by (force simp add: distinct_map inj_on_def) qed lemma recalc_tableau_lvars: assumes "\ t" shows "\ v. \ v'. (\ x \ rvars t. v x = v' x) \ v' \\<^sub>t t" proof fix v let ?v' = "\ x. if x \ lvars t then (THE p. (x, p) \ set t) \ v \ else v x" show "\ v'. (\ x \ rvars t. v x = v' x) \ v' \\<^sub>t t" proof (rule_tac x="?v'" in exI, rule conjI) show "\x\rvars t. v x = ?v' x" using \\ t\ unfolding normalized_tableau_def by auto show "?v' \\<^sub>t t" unfolding satisfies_tableau_def satisfies_eq_def proof fix e assume "e \ set t" obtain l r where e: "e = (l,r)" by force show "?v' (lhs e) = rhs e \ ?v' \" proof- have "(lhs e, rhs e) \ set t" using \e \ set t\ e by auto have "\!p. (lhs e, p) \ set t" using \\ t\ normalized_tableau_unique_eq_for_lvar[of t] using \e \ set t\ unfolding lvars_def by auto let ?p = "THE p. (lhs e, p) \ set t" have "(lhs e, ?p) \ set t" apply (rule theI') using \\!p. (lhs e, p) \ set t\ by auto then have "?p = rhs e" using \(lhs e, rhs e) \ set t\ using \\!p. (lhs e, p) \ set t\ by auto moreover have "?v' (lhs e) = ?p \ v \" using \e \ set t\ unfolding lvars_def by simp moreover have "rhs e \ ?v' \ = rhs e \ v \" apply (rule valuate_depend) using \\ t\ \e \ set t\ unfolding normalized_tableau_def by (auto simp add: lvars_def rvars_def) ultimately show ?thesis by auto qed qed qed qed lemma tableau_perm: assumes "lvars t1 = lvars t2" "rvars t1 = rvars t2" "\ t1" "\ t2" "\ v::'a::lrv valuation. v \\<^sub>t t1 \ v \\<^sub>t t2" shows "t1 <~~> t2" proof- { fix t1 t2 assume "lvars t1 = lvars t2" "rvars t1 = rvars t2" "\ t1" "\ v::'a::lrv valuation. v \\<^sub>t t1 \ v \\<^sub>t t2" have "set t1 \ set t2" proof (safe) fix a b assume "(a, b) \ set t1" then have "a \ lvars t1" unfolding lvars_def by force then have "a \ lvars t2" using \lvars t1 = lvars t2\ by simp then obtain b' where "(a, b') \ set t2" unfolding lvars_def by force have "\v::'a valuation. \v'. (\x\vars (b - b'). v' x = v x) \ (b - b') \ v' \ = 0" proof fix v::"'a valuation" obtain v' where "v' \\<^sub>t t1" "\ x \ rvars t1. v x = v' x" using recalc_tableau_lvars[of t1] \\ t1\ by auto have "v' \\<^sub>t t2" using \v' \\<^sub>t t1\ \\ v. v \\<^sub>t t1 \ v \\<^sub>t t2\ by simp have "b \v'\ = b' \v'\" using \(a, b) \ set t1\ \v' \\<^sub>t t1\ using \(a, b') \ set t2\ \v' \\<^sub>t t2\ unfolding satisfies_tableau_def satisfies_eq_def by force then have "(b - b') \v'\ = 0" using valuate_minus[of b b' v'] by auto moreover have "vars b \ rvars t1" "vars b' \ rvars t1" using \(a, b) \ set t1\ \(a, b') \ set t2\ \rvars t1 = rvars t2\ unfolding rvars_def by force+ then have "vars (b - b') \ rvars t1" using vars_minus[of b b'] by blast then have "\x\vars (b - b'). v' x = v x" using \\ x \ rvars t1. v x = v' x\ by auto ultimately show "\v'. (\x\vars (b - b'). v' x = v x) \ (b - b') \ v' \ = 0" by auto qed then have "b = b'" using all_val[of "b - b'"] by simp then show "(a, b) \ set t2" using \(a, b') \ set t2\ by simp qed } note * = this have "set t1 = set t2" using *[of t1 t2] *[of t2 t1] using assms by auto moreover have "distinct t1" "distinct t2" using \\ t1\ \\ t2\ unfolding normalized_tableau_def by (auto simp add: distinct_map) ultimately show ?thesis by (auto simp add: set_eq_iff_mset_eq_distinct mset_eq_perm) qed text\Elementary atoms are represented by the type \'a atom\ and semantics for atoms and sets of atoms is denoted by \\\<^sub>a\ and \\\<^sub>a\<^sub>s\ and given by: \ datatype 'a atom = Leq var 'a | Geq var 'a primrec atom_var::"'a atom \ var" where "atom_var (Leq var a) = var" | "atom_var (Geq var a) = var" primrec atom_const::"'a atom \ 'a" where "atom_const (Leq var a) = a" | "atom_const (Geq var a) = a" primrec satisfies_atom :: "'a::linorder valuation \ 'a atom \ bool" (infixl "\\<^sub>a" 100) where "v \\<^sub>a Leq x c \ v x \ c" | "v \\<^sub>a Geq x c \ v x \ c" definition satisfies_atom_set :: "'a::linorder valuation \ 'a atom set \ bool" (infixl "\\<^sub>a\<^sub>s" 100) where "v \\<^sub>a\<^sub>s as \ \ a \ as. v \\<^sub>a a" definition satisfies_atom' :: "'a::linorder valuation \ 'a atom \ bool" (infixl "\\<^sub>a\<^sub>e" 100) where "v \\<^sub>a\<^sub>e a \ v (atom_var a) = atom_const a" lemma satisfies_atom'_stronger: "v \\<^sub>a\<^sub>e a \ v \\<^sub>a a" by (cases a, auto simp: satisfies_atom'_def) abbreviation satisfies_atom_set' :: "'a::linorder valuation \ 'a atom set \ bool" (infixl "\\<^sub>a\<^sub>e\<^sub>s" 100) where "v \\<^sub>a\<^sub>e\<^sub>s as \ \ a \ as. v \\<^sub>a\<^sub>e a" lemma satisfies_atom_set'_stronger: "v \\<^sub>a\<^sub>e\<^sub>s as \ v \\<^sub>a\<^sub>s as" using satisfies_atom'_stronger unfolding satisfies_atom_set_def by auto text \There is also the indexed variant of an atom\ type_synonym ('i,'a) i_atom = "'i \ 'a atom" fun i_satisfies_atom_set :: "'i set \ 'a::linorder valuation \ ('i,'a) i_atom set \ bool" (infixl "\\<^sub>i\<^sub>a\<^sub>s" 100) where "(I,v) \\<^sub>i\<^sub>a\<^sub>s as \ v \\<^sub>a\<^sub>s restrict_to I as" fun i_satisfies_atom_set' :: "'i set \ 'a::linorder valuation \ ('i,'a) i_atom set \ bool" (infixl "\\<^sub>i\<^sub>a\<^sub>e\<^sub>s" 100) where "(I,v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as \ v \\<^sub>a\<^sub>e\<^sub>s restrict_to I as" lemma i_satisfies_atom_set'_stronger: "Iv \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as \ Iv \\<^sub>i\<^sub>a\<^sub>s as" using satisfies_atom_set'_stronger[of _ "snd Iv"] by (cases Iv, auto) lemma satisifies_atom_restrict_to_Cons: "v \\<^sub>a\<^sub>s restrict_to I (set as) \ (i \ I \ v \\<^sub>a a) \ v \\<^sub>a\<^sub>s restrict_to I (set ((i,a) # as))" unfolding satisfies_atom_set_def by auto lemma satisfies_tableau_Cons: "v \\<^sub>t t \ v \\<^sub>e e \ v \\<^sub>t (e # t)" unfolding satisfies_tableau_def by auto definition distinct_indices_atoms :: "('i,'a) i_atom set \ bool" where "distinct_indices_atoms as = (\ i a b. (i,a) \ as \ (i,b) \ as \ atom_var a = atom_var b \ atom_const a = atom_const b)" text\ The specification of the preprocessing function is given by:\ locale Preprocess = fixes preprocess::"('i,'a::lrv) i_ns_constraint list \ tableau\ ('i,'a) i_atom list \ ((var,'a) mapping \ (var,'a) mapping) \ 'i list" assumes \ \The returned tableau is always normalized.\ preprocess_tableau_normalized: "preprocess cs = (t,as,trans_v,U) \ \ t" and \ \Tableau and atoms are equisatisfiable with starting non-strict constraints.\ i_preprocess_sat: "\ v. preprocess cs = (t,as,trans_v,U) \ I \ set U = {} \ (I,\v\) \\<^sub>i\<^sub>a\<^sub>s set as \ \v\ \\<^sub>t t \ (I,\trans_v v\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" and preprocess_unsat: "preprocess cs = (t, as,trans_v,U) \ (I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs \ \ v'. (I,v') \\<^sub>i\<^sub>a\<^sub>s set as \ v' \\<^sub>t t" and \ \distinct indices on ns-constraints ensures distinct indices in atoms\ preprocess_distinct: "preprocess cs = (t, as,trans_v, U) \ distinct_indices_ns (set cs) \ distinct_indices_atoms (set as)" and \ \unsat indices\ preprocess_unsat_indices: "preprocess cs = (t, as,trans_v, U) \ i \ set U \ \ (\ v. ({i},v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs)" and \ \preprocessing cannot introduce new indices\ preprocess_index: "preprocess cs = (t,as,trans_v, U) \ fst ` set as \ set U \ fst ` set cs" begin lemma preprocess_sat: "preprocess cs = (t,as,trans_v,U) \ U = [] \ \v\ \\<^sub>a\<^sub>s flat (set as) \ \v\ \\<^sub>t t \ \trans_v v\ \\<^sub>n\<^sub>s\<^sub>s flat (set cs)" using i_preprocess_sat[of cs t as trans_v U UNIV v] by auto end definition minimal_unsat_core_tabl_atoms :: "'i set \ tableau \ ('i,'a::lrv) i_atom set \ bool" where "minimal_unsat_core_tabl_atoms I t as = ( I \ fst ` as \ (\ (\ v. v \\<^sub>t t \ (I,v) \\<^sub>i\<^sub>a\<^sub>s as)) \ (distinct_indices_atoms as \ (\ J \ I. \ v. v \\<^sub>t t \ (J,v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as)))" lemma minimal_unsat_core_tabl_atomsD: assumes "minimal_unsat_core_tabl_atoms I t as" shows "I \ fst ` as" "\ (\ v. v \\<^sub>t t \ (I,v) \\<^sub>i\<^sub>a\<^sub>s as)" "distinct_indices_atoms as \ J \ I \ \ v. v \\<^sub>t t \ (J,v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as" using assms unfolding minimal_unsat_core_tabl_atoms_def by auto locale AssertAll = fixes assert_all :: "tableau \ ('i,'a::lrv) i_atom list \ 'i list + (var, 'a)mapping" assumes assert_all_sat: "\ t \ assert_all t as = Sat v \ \v\ \\<^sub>t t \ \v\ \\<^sub>a\<^sub>s flat (set as)" assumes assert_all_unsat: "\ t \ assert_all t as = Unsat I \ minimal_unsat_core_tabl_atoms (set I) t (set as)" text\Once the preprocessing is done and tableau and atoms are obtained, their satisfiability is checked by the \assert_all\ function. Its precondition is that the starting tableau is normalized, and its specification is analogue to the one for the \solve\ function. If \preprocess\ and \assert_all\ are available, the \solve_exec_ns\ can be defined, and it can easily be shown that this definition satisfies the specification.\ locale Solve_exec_ns' = Preprocess preprocess + AssertAll assert_all for preprocess:: "('i,'a::lrv) i_ns_constraint list \ tableau \ ('i,'a) i_atom list \ ((var,'a)mapping \ (var,'a)mapping) \ 'i list" and assert_all :: "tableau \ ('i,'a::lrv) i_atom list \ 'i list + (var, 'a) mapping" begin definition solve_exec_ns where "solve_exec_ns s \ case preprocess s of (t,as,trans_v,ui) \ (case ui of i # _ \ Inl [i] | _ \ (case assert_all t as of Inl I \ Inl I | Inr v \ Inr (trans_v v))) " end context Preprocess begin lemma preprocess_unsat_index: assumes prep: "preprocess cs = (t,as,trans_v,ui)" and i: "i \ set ui" shows "minimal_unsat_core_ns {i} (set cs)" proof - from preprocess_index[OF prep] have "set ui \ fst ` set cs" by auto with i have i': "i \ fst ` set cs" by auto from preprocess_unsat_indices[OF prep i] show ?thesis unfolding minimal_unsat_core_ns_def using i' by auto qed lemma preprocess_minimal_unsat_core: assumes prep: "preprocess cs = (t,as,trans_v,ui)" and unsat: "minimal_unsat_core_tabl_atoms I t (set as)" and inter: "I \ set ui = {}" shows "minimal_unsat_core_ns I (set cs)" proof - from preprocess_tableau_normalized[OF prep] have t: "\ t" . from preprocess_index[OF prep] have index: "fst ` set as \ set ui \ fst ` set cs" by auto from minimal_unsat_core_tabl_atomsD(1,2)[OF unsat] preprocess_unsat[OF prep, of I] have 1: "I \ fst ` set as" "\ (\ v. (I, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs)" by force+ show "minimal_unsat_core_ns I (set cs)" unfolding minimal_unsat_core_ns_def proof (intro conjI impI allI 1(2)) show "I \ fst ` set cs" using 1 index by auto fix J assume "distinct_indices_ns (set cs)" "J \ I" with preprocess_distinct[OF prep] have "distinct_indices_atoms (set as)" "J \ I" by auto from minimal_unsat_core_tabl_atomsD(3)[OF unsat this] obtain v where model: "v \\<^sub>t t" "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s set as" by auto from i_satisfies_atom_set'_stronger[OF model(2)] have model': "(J, v) \\<^sub>i\<^sub>a\<^sub>s set as" . define w where "w = Mapping.Mapping (\ x. Some (v x))" have "v = \w\" unfolding w_def map2fun_def by (intro ext, transfer, auto) with model model' have "\w\ \\<^sub>t t" "(J, \w\) \\<^sub>i\<^sub>a\<^sub>s set as" by auto from i_preprocess_sat[OF prep _ this(2,1)] \J \ I\ inter have "(J, \trans_v w\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" by auto then show "\ w. (J, w) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" by auto qed qed end sublocale Solve_exec_ns' < Solve_exec_ns solve_exec_ns proof fix cs obtain t as trans_v ui where prep: "preprocess cs = (t,as,trans_v,ui)" by (cases "preprocess cs") from preprocess_tableau_normalized[OF prep] have t: "\ t" . from preprocess_index[OF prep] have index: "fst ` set as \ set ui \ fst ` set cs" by auto note solve = solve_exec_ns_def[of cs, unfolded prep split] { fix v assume "solve_exec_ns cs = Sat v" from this[unfolded solve] t assert_all_sat[OF t] preprocess_sat[OF prep] show " \v\ \\<^sub>n\<^sub>s\<^sub>s flat (set cs)" by (auto split: sum.splits list.splits) } { fix I assume res: "solve_exec_ns cs = Unsat I" show "minimal_unsat_core_ns (set I) (set cs)" proof (cases ui) case (Cons i uis) hence I: "I = [i]" using res[unfolded solve] by auto from preprocess_unsat_index[OF prep, of i] I Cons index show ?thesis by auto next case Nil from res[unfolded solve Nil] have assert: "assert_all t as = Unsat I" by (auto split: sum.splits) from assert_all_unsat[OF t assert] have "minimal_unsat_core_tabl_atoms (set I) t (set as)" . from preprocess_minimal_unsat_core[OF prep this] Nil show "minimal_unsat_core_ns (set I) (set cs)" by simp qed } qed subsection\Incrementally Asserting Atoms\ text\The function @{term assert_all} can be implemented by iteratively asserting one by one atom from the given list of atoms. \ type_synonym 'a bounds = "var \ 'a" text\Asserted atoms will be stored in a form of \emph{bounds} for a given variable. Bounds are of the form \l\<^sub>i \ x\<^sub>i \ u\<^sub>i\, where \l\<^sub>i\ and \u\<^sub>i\ and are either scalars or $\pm \infty$. Each time a new atom is asserted, a bound for the corresponding variable is updated (checking for conflict with the previous bounds). Since bounds for a variable can be either finite or $\pm \infty$, they are represented by (partial) maps from variables to values (\'a bounds = var \ 'a\). Upper and lower bounds are represented separately. Infinite bounds map to @{term None} and this is reflected in the semantics: \begin{small} \c \\<^sub>u\<^sub>b b \ case b of None \ False | Some b' \ c \ b'\ \c \\<^sub>u\<^sub>b b \ case b of None \ True | Some b' \ c \ b'\ \end{small} \noindent Strict comparisons, and comparisons with lower bounds are performed similarly. \ abbreviation (input) le where "le lt x y \ lt x y \ x = y" definition geub ("\\<^sub>u\<^sub>b") where "\\<^sub>u\<^sub>b lt c b \ case b of None \ False | Some b' \ le lt b' c" definition gtub ("\\<^sub>u\<^sub>b") where "\\<^sub>u\<^sub>b lt c b \ case b of None \ False | Some b' \ lt b' c" definition leub ("\\<^sub>u\<^sub>b") where "\\<^sub>u\<^sub>b lt c b \ case b of None \ True | Some b' \ le lt c b'" definition ltub ("\\<^sub>u\<^sub>b") where "\\<^sub>u\<^sub>b lt c b \ case b of None \ True | Some b' \ lt c b'" definition lelb ("\\<^sub>l\<^sub>b") where "\\<^sub>l\<^sub>b lt c b \ case b of None \ False | Some b' \ le lt c b'" definition ltlb ("\\<^sub>l\<^sub>b") where "\\<^sub>l\<^sub>b lt c b \ case b of None \ False | Some b' \ lt c b'" definition gelb ("\\<^sub>l\<^sub>b") where "\\<^sub>l\<^sub>b lt c b \ case b of None \ True | Some b' \ le lt b' c" definition gtlb ("\\<^sub>l\<^sub>b") where "\\<^sub>l\<^sub>b lt c b \ case b of None \ True | Some b' \ lt b' c" definition ge_ubound :: "'a::linorder \ 'a option \ bool" (infixl "\\<^sub>u\<^sub>b" 100) where "c \\<^sub>u\<^sub>b b = \\<^sub>u\<^sub>b (<) c b" definition gt_ubound :: "'a::linorder \ 'a option \ bool" (infixl ">\<^sub>u\<^sub>b" 100) where "c >\<^sub>u\<^sub>b b = \\<^sub>u\<^sub>b (<) c b" definition le_ubound :: "'a::linorder \ 'a option \ bool" (infixl "\\<^sub>u\<^sub>b" 100) where "c \\<^sub>u\<^sub>b b = \\<^sub>u\<^sub>b (<) c b" definition lt_ubound :: "'a::linorder \ 'a option \ bool" (infixl "<\<^sub>u\<^sub>b" 100) where "c <\<^sub>u\<^sub>b b = \\<^sub>u\<^sub>b (<) c b" definition le_lbound :: "'a::linorder \ 'a option \ bool" (infixl "\\<^sub>l\<^sub>b" 100) where "c \\<^sub>l\<^sub>b b = \\<^sub>l\<^sub>b (<) c b" definition lt_lbound :: "'a::linorder \ 'a option \ bool" (infixl "<\<^sub>l\<^sub>b" 100) where "c <\<^sub>l\<^sub>b b = \\<^sub>l\<^sub>b (<) c b" definition ge_lbound :: "'a::linorder \ 'a option \ bool" (infixl "\\<^sub>l\<^sub>b" 100) where "c \\<^sub>l\<^sub>b b = \\<^sub>l\<^sub>b (<) c b" definition gt_lbound :: "'a::linorder \ 'a option \ bool" (infixl ">\<^sub>l\<^sub>b" 100) where "c >\<^sub>l\<^sub>b b = \\<^sub>l\<^sub>b (<) c b" lemmas bound_compare'_defs = geub_def gtub_def leub_def ltub_def gelb_def gtlb_def lelb_def ltlb_def lemmas bound_compare''_defs = ge_ubound_def gt_ubound_def le_ubound_def lt_ubound_def le_lbound_def lt_lbound_def ge_lbound_def gt_lbound_def lemmas bound_compare_defs = bound_compare'_defs bound_compare''_defs lemma opposite_dir [simp]: "\\<^sub>l\<^sub>b (>) a b = \\<^sub>u\<^sub>b (<) a b" "\\<^sub>u\<^sub>b (>) a b = \\<^sub>l\<^sub>b (<) a b" "\\<^sub>l\<^sub>b (>) a b = \\<^sub>u\<^sub>b (<) a b" "\\<^sub>u\<^sub>b (>) a b = \\<^sub>l\<^sub>b (<) a b" "\\<^sub>l\<^sub>b (>) a b = \\<^sub>u\<^sub>b (<) a b" "\\<^sub>u\<^sub>b (>) a b = \\<^sub>l\<^sub>b (<) a b" "\\<^sub>l\<^sub>b (>) a b = \\<^sub>u\<^sub>b (<) a b" "\\<^sub>u\<^sub>b (>) a b = \\<^sub>l\<^sub>b (<) a b" by (case_tac[!] b) (auto simp add: bound_compare'_defs) (* Auxiliary lemmas about bound comparison *) lemma [simp]: "\ c \\<^sub>u\<^sub>b None " "\ c \\<^sub>l\<^sub>b None" by (auto simp add: bound_compare_defs) lemma neg_bounds_compare: "(\ (c \\<^sub>u\<^sub>b b)) \ c <\<^sub>u\<^sub>b b" "(\ (c \\<^sub>u\<^sub>b b)) \ c >\<^sub>u\<^sub>b b" "(\ (c >\<^sub>u\<^sub>b b)) \ c \\<^sub>u\<^sub>b b" "(\ (c <\<^sub>u\<^sub>b b)) \ c \\<^sub>u\<^sub>b b" "(\ (c \\<^sub>l\<^sub>b b)) \ c >\<^sub>l\<^sub>b b" "(\ (c \\<^sub>l\<^sub>b b)) \ c <\<^sub>l\<^sub>b b" "(\ (c <\<^sub>l\<^sub>b b)) \ c \\<^sub>l\<^sub>b b" "(\ (c >\<^sub>l\<^sub>b b)) \ c \\<^sub>l\<^sub>b b" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma bounds_compare_contradictory [simp]: "\c \\<^sub>u\<^sub>b b; c <\<^sub>u\<^sub>b b\ \ False" "\c \\<^sub>u\<^sub>b b; c >\<^sub>u\<^sub>b b\ \ False" "\c >\<^sub>u\<^sub>b b; c \\<^sub>u\<^sub>b b\ \ False" "\c <\<^sub>u\<^sub>b b; c \\<^sub>u\<^sub>b b\ \ False" "\c \\<^sub>l\<^sub>b b; c >\<^sub>l\<^sub>b b\ \ False" "\c \\<^sub>l\<^sub>b b; c <\<^sub>l\<^sub>b b\ \ False" "\c <\<^sub>l\<^sub>b b; c \\<^sub>l\<^sub>b b\ \ False" "\c >\<^sub>l\<^sub>b b; c \\<^sub>l\<^sub>b b\ \ False" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma compare_strict_nonstrict: "x <\<^sub>u\<^sub>b b \ x \\<^sub>u\<^sub>b b" "x >\<^sub>u\<^sub>b b \ x \\<^sub>u\<^sub>b b" "x <\<^sub>l\<^sub>b b \ x \\<^sub>l\<^sub>b b" "x >\<^sub>l\<^sub>b b \ x \\<^sub>l\<^sub>b b" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma [simp]: "\ x \ c; c <\<^sub>u\<^sub>b b \ \ x <\<^sub>u\<^sub>b b" "\ x < c; c \\<^sub>u\<^sub>b b \ \ x <\<^sub>u\<^sub>b b" "\ x \ c; c \\<^sub>u\<^sub>b b \ \ x \\<^sub>u\<^sub>b b" "\ x \ c; c >\<^sub>l\<^sub>b b \ \ x >\<^sub>l\<^sub>b b" "\ x > c; c \\<^sub>l\<^sub>b b \ \ x >\<^sub>l\<^sub>b b" "\ x \ c; c \\<^sub>l\<^sub>b b \ \ x \\<^sub>l\<^sub>b b" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma bounds_lg [simp]: "\ c >\<^sub>u\<^sub>b b; x \\<^sub>u\<^sub>b b\ \ x < c" "\ c \\<^sub>u\<^sub>b b; x <\<^sub>u\<^sub>b b\ \ x < c" "\ c \\<^sub>u\<^sub>b b; x \\<^sub>u\<^sub>b b\ \ x \ c" "\ c <\<^sub>l\<^sub>b b; x \\<^sub>l\<^sub>b b\ \ x > c" "\ c \\<^sub>l\<^sub>b b; x >\<^sub>l\<^sub>b b\ \ x > c" "\ c \\<^sub>l\<^sub>b b; x \\<^sub>l\<^sub>b b\ \ x \ c" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma bounds_compare_Some [simp]: "x \\<^sub>u\<^sub>b Some c \ x \ c" "x \\<^sub>u\<^sub>b Some c \ x \ c" "x <\<^sub>u\<^sub>b Some c \ x < c" "x >\<^sub>u\<^sub>b Some c \ x > c" "x \\<^sub>l\<^sub>b Some c \ x \ c" "x \\<^sub>l\<^sub>b Some c \ x \ c" "x >\<^sub>l\<^sub>b Some c \ x > c" "x <\<^sub>l\<^sub>b Some c \ x < c" by (auto simp add: bound_compare_defs) fun in_bounds where "in_bounds x v (lb, ub) = (v x \\<^sub>l\<^sub>b lb x \ v x \\<^sub>u\<^sub>b ub x)" fun satisfies_bounds :: "'a::linorder valuation \ 'a bounds \ 'a bounds \ bool" (infixl "\\<^sub>b" 100) where "v \\<^sub>b b \ (\ x. in_bounds x v b)" declare satisfies_bounds.simps [simp del] lemma satisfies_bounds_iff: "v \\<^sub>b (lb, ub) \ (\ x. v x \\<^sub>l\<^sub>b lb x \ v x \\<^sub>u\<^sub>b ub x)" by (auto simp add: satisfies_bounds.simps) lemma not_in_bounds: "\ (in_bounds x v (lb, ub)) = (v x <\<^sub>l\<^sub>b lb x \ v x >\<^sub>u\<^sub>b ub x)" using bounds_compare_contradictory(7) using bounds_compare_contradictory(2) using neg_bounds_compare(7)[of "v x" "lb x"] using neg_bounds_compare(2)[of "v x" "ub x"] by auto fun atoms_equiv_bounds :: "'a::linorder atom set \ 'a bounds \ 'a bounds \ bool" (infixl "\" 100) where "as \ (lb, ub) \ (\ v. v \\<^sub>a\<^sub>s as \ v \\<^sub>b (lb, ub))" declare atoms_equiv_bounds.simps [simp del] lemma atoms_equiv_bounds_simps: "as \ (lb, ub) \ \ v. v \\<^sub>a\<^sub>s as \ v \\<^sub>b (lb, ub)" by (simp add: atoms_equiv_bounds.simps) text\A valuation satisfies bounds iff the value of each variable respects both its lower and upper bound, i.e, @{thm satisfies_bounds_iff[no_vars]}. Asserted atoms are precisely encoded by the current bounds in a state (denoted by \\\) if every valuation satisfies them iff it satisfies the bounds, i.e., @{thm atoms_equiv_bounds_simps[no_vars, iff]}.\ text\The procedure also keeps track of a valuation that is a candidate solution. Whenever a new atom is asserted, it is checked whether the valuation is still satisfying. If not, the procedure tries to fix that by changing it and changing the tableau if necessary (but so that it remains equivalent to the initial tableau).\ text\Therefore, the state of the procedure stores the tableau (denoted by \\\), lower and upper bounds (denoted by \\\<^sub>l\ and \\\<^sub>u\, and ordered pair of lower and upper bounds denoted by \\\), candidate solution (denoted by \\\) and a flag (denoted by \\\) indicating if unsatisfiability has been detected so far:\ text\Since we also need to now about the indices of atoms, actually, the bounds are also indexed, and in addition to the flag for unsatisfiability, we also store an optional unsat core.\ type_synonym 'i bound_index = "var \ 'i" type_synonym ('i,'a) bounds_index = "(var, ('i \ 'a))mapping" datatype ('i,'a) state = State (\: "tableau") (\\<^sub>i\<^sub>l: "('i,'a) bounds_index") (\\<^sub>i\<^sub>u: "('i,'a) bounds_index") (\: "(var, 'a) mapping") (\: bool) (\\<^sub>c: "'i list option") definition indexl :: "('i,'a) state \ 'i bound_index" ("\\<^sub>l") where "\\<^sub>l s = (fst o the) o look (\\<^sub>i\<^sub>l s)" definition boundsl :: "('i,'a) state \ 'a bounds" ("\\<^sub>l") where "\\<^sub>l s = map_option snd o look (\\<^sub>i\<^sub>l s)" definition indexu :: "('i,'a) state \ 'i bound_index" ("\\<^sub>u") where "\\<^sub>u s = (fst o the) o look (\\<^sub>i\<^sub>u s)" definition boundsu :: "('i,'a) state \ 'a bounds" ("\\<^sub>u") where "\\<^sub>u s = map_option snd o look (\\<^sub>i\<^sub>u s)" abbreviation BoundsIndicesMap ("\\<^sub>i") where "\\<^sub>i s \ (\\<^sub>i\<^sub>l s, \\<^sub>i\<^sub>u s)" abbreviation Bounds :: "('i,'a) state \ 'a bounds \ 'a bounds" ("\") where "\ s \ (\\<^sub>l s, \\<^sub>u s)" abbreviation Indices :: "('i,'a) state \ 'i bound_index \ 'i bound_index" ("\") where "\ s \ (\\<^sub>l s, \\<^sub>u s)" abbreviation BoundsIndices :: "('i,'a) state \ ('a bounds \ 'a bounds) \ ('i bound_index \ 'i bound_index)" ("\\") where "\\ s \ (\ s, \ s)" fun satisfies_bounds_index :: "'i set \ 'a::lrv valuation \ ('a bounds \ 'a bounds) \ ('i bound_index \ 'i bound_index) \ bool" (infixl "\\<^sub>i\<^sub>b" 100) where "(I,v) \\<^sub>i\<^sub>b ((BL,BU),(IL,IU)) \ ( (\ x c. BL x = Some c \ IL x \ I \ v x \ c) \ (\ x c. BU x = Some c \ IU x \ I \ v x \ c))" declare satisfies_bounds_index.simps[simp del] fun satisfies_bounds_index' :: "'i set \ 'a::lrv valuation \ ('a bounds \ 'a bounds) \ ('i bound_index \ 'i bound_index) \ bool" (infixl "\\<^sub>i\<^sub>b\<^sub>e" 100) where "(I,v) \\<^sub>i\<^sub>b\<^sub>e ((BL,BU),(IL,IU)) \ ( (\ x c. BL x = Some c \ IL x \ I \ v x = c) \ (\ x c. BU x = Some c \ IU x \ I \ v x = c))" declare satisfies_bounds_index'.simps[simp del] fun atoms_imply_bounds_index :: "('i,'a::lrv) i_atom set \ ('a bounds \ 'a bounds) \ ('i bound_index \ 'i bound_index) \ bool" (infixl "\\<^sub>i" 100) where "as \\<^sub>i bi \ (\ I v. (I,v) \\<^sub>i\<^sub>a\<^sub>s as \ (I,v) \\<^sub>i\<^sub>b bi)" declare atoms_imply_bounds_index.simps[simp del] lemma i_satisfies_atom_set_mono: "as \ as' \ v \\<^sub>i\<^sub>a\<^sub>s as' \ v \\<^sub>i\<^sub>a\<^sub>s as" by (cases v, auto simp: satisfies_atom_set_def) lemma atoms_imply_bounds_index_mono: "as \ as' \ as \\<^sub>i bi \ as' \\<^sub>i bi" unfolding atoms_imply_bounds_index.simps using i_satisfies_atom_set_mono by blast definition satisfies_state :: "'a::lrv valuation \ ('i,'a) state \ bool" (infixl "\\<^sub>s" 100) where "v \\<^sub>s s \ v \\<^sub>b \ s \ v \\<^sub>t \ s" definition curr_val_satisfies_state :: "('i,'a::lrv) state \ bool" ("\") where "\ s \ \\ s\ \\<^sub>s s" fun satisfies_state_index :: "'i set \ 'a::lrv valuation \ ('i,'a) state \ bool" (infixl "\\<^sub>i\<^sub>s" 100) where "(I,v) \\<^sub>i\<^sub>s s \ (v \\<^sub>t \ s \ (I,v) \\<^sub>i\<^sub>b \\ s)" declare satisfies_state_index.simps[simp del] fun satisfies_state_index' :: "'i set \ 'a::lrv valuation \ ('i,'a) state \ bool" (infixl "\\<^sub>i\<^sub>s\<^sub>e" 100) where "(I,v) \\<^sub>i\<^sub>s\<^sub>e s \ (v \\<^sub>t \ s \ (I,v) \\<^sub>i\<^sub>b\<^sub>e \\ s)" declare satisfies_state_index'.simps[simp del] definition indices_state :: "('i,'a)state \ 'i set" where "indices_state s = { i. \ x b. look (\\<^sub>i\<^sub>l s) x = Some (i,b) \ look (\\<^sub>i\<^sub>u s) x = Some (i,b)}" text \distinctness requires that for each index $i$, there is at most one variable $x$ and bound $b$ such that $x \leq b$ or $x \geq b$ or both are enforced.\ definition distinct_indices_state :: "('i,'a)state \ bool" where "distinct_indices_state s = (\ i x b x' b'. ((look (\\<^sub>i\<^sub>l s) x = Some (i,b) \ look (\\<^sub>i\<^sub>u s) x = Some (i,b)) \ (look (\\<^sub>i\<^sub>l s) x' = Some (i,b') \ look (\\<^sub>i\<^sub>u s) x' = Some (i,b')) \ (x = x' \ b = b')))" lemma distinct_indices_stateD: assumes "distinct_indices_state s" shows "look (\\<^sub>i\<^sub>l s) x = Some (i,b) \ look (\\<^sub>i\<^sub>u s) x = Some (i,b) \ look (\\<^sub>i\<^sub>l s) x' = Some (i,b') \ look (\\<^sub>i\<^sub>u s) x' = Some (i,b') \ x = x' \ b = b'" using assms unfolding distinct_indices_state_def by blast+ definition unsat_state_core :: "('i,'a::lrv) state \ bool" where "unsat_state_core s = (set (the (\\<^sub>c s)) \ indices_state s \ (\ (\ v. (set (the (\\<^sub>c s)),v) \\<^sub>i\<^sub>s s)))" definition subsets_sat_core :: "('i,'a::lrv) state \ bool" where "subsets_sat_core s = ((\ I. I \ set (the (\\<^sub>c s)) \ (\ v. (I,v) \\<^sub>i\<^sub>s\<^sub>e s)))" definition minimal_unsat_state_core :: "('i,'a::lrv) state \ bool" where "minimal_unsat_state_core s = (unsat_state_core s \ (distinct_indices_state s \ subsets_sat_core s))" lemma minimal_unsat_core_tabl_atoms_mono: assumes sub: "as \ bs" and unsat: "minimal_unsat_core_tabl_atoms I t as" shows "minimal_unsat_core_tabl_atoms I t bs" unfolding minimal_unsat_core_tabl_atoms_def proof (intro conjI impI allI) note min = unsat[unfolded minimal_unsat_core_tabl_atoms_def] from min have I: "I \ fst ` as" by blast with sub show "I \ fst ` bs" by blast from min have "(\v. v \\<^sub>t t \ (I, v) \\<^sub>i\<^sub>a\<^sub>s as)" by blast with i_satisfies_atom_set_mono[OF sub] show "(\v. v \\<^sub>t t \ (I, v) \\<^sub>i\<^sub>a\<^sub>s bs)" by blast fix J assume J: "J \ I" and dist_bs: "distinct_indices_atoms bs" hence dist: "distinct_indices_atoms as" using sub unfolding distinct_indices_atoms_def by blast from min dist J obtain v where v: "v \\<^sub>t t" "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as" by blast have "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s bs" unfolding i_satisfies_atom_set'.simps proof (intro ballI) fix a assume "a \ snd ` (bs \ J \ UNIV)" then obtain i where ia: "(i,a) \ bs" and i: "i \ J" by force with J have "i \ I" by auto with I obtain b where ib: "(i,b) \ as" by force with sub have ib': "(i,b) \ bs" by auto from dist_bs[unfolded distinct_indices_atoms_def, rule_format, OF ia ib'] have id: "atom_var a = atom_var b" "atom_const a = atom_const b" by auto from v(2)[unfolded i_satisfies_atom_set'.simps] i ib have "v \\<^sub>a\<^sub>e b" by force thus "v \\<^sub>a\<^sub>e a" using id unfolding satisfies_atom'_def by auto qed with v show "\v. v \\<^sub>t t \ (J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s bs" by blast qed lemma state_satisfies_index: assumes "v \\<^sub>s s" shows "(I,v) \\<^sub>i\<^sub>s s" unfolding satisfies_state_index.simps satisfies_bounds_index.simps proof (intro conjI impI allI) fix x c from assms[unfolded satisfies_state_def satisfies_bounds.simps, simplified] have "v \\<^sub>t \ s" and bnd: "v x \\<^sub>l\<^sub>b \\<^sub>l s x" "v x \\<^sub>u\<^sub>b \\<^sub>u s x" by auto show "v \\<^sub>t \ s" by fact show "\\<^sub>l s x = Some c \ \\<^sub>l s x \ I \ c \ v x" using bnd(1) by auto show "\\<^sub>u s x = Some c \ \\<^sub>u s x \ I \ v x \ c" using bnd(2) by auto qed lemma unsat_state_core_unsat: "unsat_state_core s \ \ (\ v. v \\<^sub>s s)" unfolding unsat_state_core_def using state_satisfies_index by blast definition tableau_valuated ("\") where "\ s \ \ x \ tvars (\ s). Mapping.lookup (\ s) x \ None" definition index_valid where "index_valid as (s :: ('i,'a) state) = (\ x b i. (look (\\<^sub>i\<^sub>l s) x = Some (i,b) \ ((i, Geq x b) \ as)) \ (look (\\<^sub>i\<^sub>u s) x = Some (i,b) \ ((i, Leq x b) \ as)))" lemma index_valid_indices_state: "index_valid as s \ indices_state s \ fst ` as" unfolding index_valid_def indices_state_def by force lemma index_valid_mono: "as \ bs \ index_valid as s \ index_valid bs s" unfolding index_valid_def by blast lemma index_valid_distinct_indices: assumes "index_valid as s" and "distinct_indices_atoms as" shows "distinct_indices_state s" unfolding distinct_indices_state_def proof (intro allI impI, goal_cases) case (1 i x b y c) note valid = assms(1)[unfolded index_valid_def, rule_format] from 1(1) valid[of x i b] have "(i, Geq x b) \ as \ (i, Leq x b) \ as" by auto then obtain a where a: "(i,a) \ as" "atom_var a = x" "atom_const a = b" by auto from 1(2) valid[of y i c] have y: "(i, Geq y c) \ as \ (i, Leq y c) \ as" by auto then obtain a' where a': "(i,a') \ as" "atom_var a' = y" "atom_const a' = c" by auto from assms(2)[unfolded distinct_indices_atoms_def, rule_format, OF a(1) a'(1)] show ?case using a a' by auto qed text\To be a solution of the initial problem, a valuation should satisfy the initial tableau and list of atoms. Since tableau is changed only by equivalency preserving transformations and asserted atoms are encoded in the bounds, a valuation is a solution if it satisfies both the tableau and the bounds in the final state (when all atoms have been asserted). So, a valuation \v\ satisfies a state \s\ (denoted by \\\<^sub>s\) if it satisfies the tableau and the bounds, i.e., @{thm satisfies_state_def [no_vars]}. Since \\\ should be a candidate solution, it should satisfy the state (unless the \\\ flag is raised). This is denoted by \\ s\ and defined by @{thm curr_val_satisfies_state_def[no_vars]}. \\ s\ will denote that all variables of \\ s\ are explicitly valuated in \\ s\.\ definition update\\ where [simp]: "update\\ field_update i x c s = field_update (upd x (i,c)) s" fun \\<^sub>i\<^sub>u_update where "\\<^sub>i\<^sub>u_update up (State T BIL BIU V U UC) = State T BIL (up BIU) V U UC" fun \\<^sub>i\<^sub>l_update where "\\<^sub>i\<^sub>l_update up (State T BIL BIU V U UC) = State T (up BIL) BIU V U UC" fun \_update where "\_update V (State T BIL BIU V_old U UC) = State T BIL BIU V U UC" fun \_update where "\_update T (State T_old BIL BIU V U UC) = State T BIL BIU V U UC" lemma update_simps[simp]: "\\<^sub>i\<^sub>u (\\<^sub>i\<^sub>u_update up s) = up (\\<^sub>i\<^sub>u s)" "\\<^sub>i\<^sub>l (\\<^sub>i\<^sub>u_update up s) = \\<^sub>i\<^sub>l s" "\ (\\<^sub>i\<^sub>u_update up s) = \ s" "\ (\\<^sub>i\<^sub>u_update up s) = \ s" "\ (\\<^sub>i\<^sub>u_update up s) = \ s" "\\<^sub>c (\\<^sub>i\<^sub>u_update up s) = \\<^sub>c s" "\\<^sub>i\<^sub>l (\\<^sub>i\<^sub>l_update up s) = up (\\<^sub>i\<^sub>l s)" "\\<^sub>i\<^sub>u (\\<^sub>i\<^sub>l_update up s) = \\<^sub>i\<^sub>u s" "\ (\\<^sub>i\<^sub>l_update up s) = \ s" "\ (\\<^sub>i\<^sub>l_update up s) = \ s" "\ (\\<^sub>i\<^sub>l_update up s) = \ s" "\\<^sub>c (\\<^sub>i\<^sub>l_update up s) = \\<^sub>c s" "\ (\_update V s) = V" "\\<^sub>i\<^sub>l (\_update V s) = \\<^sub>i\<^sub>l s" "\\<^sub>i\<^sub>u (\_update V s) = \\<^sub>i\<^sub>u s" "\ (\_update V s) = \ s" "\ (\_update V s) = \ s" "\\<^sub>c (\_update V s) = \\<^sub>c s" "\ (\_update T s) = T" "\\<^sub>i\<^sub>l (\_update T s) = \\<^sub>i\<^sub>l s" "\\<^sub>i\<^sub>u (\_update T s) = \\<^sub>i\<^sub>u s" "\ (\_update T s) = \ s" "\ (\_update T s) = \ s" "\\<^sub>c (\_update T s) = \\<^sub>c s" by (atomize(full), cases s, auto) declare \\<^sub>i\<^sub>u_update.simps[simp del] \\<^sub>i\<^sub>l_update.simps[simp del] fun set_unsat :: "'i list \ ('i,'a) state \ ('i,'a) state" where "set_unsat I (State T BIL BIU V U UC) = State T BIL BIU V True (Some (remdups I))" lemma set_unsat_simps[simp]: "\\<^sub>i\<^sub>l (set_unsat I s) = \\<^sub>i\<^sub>l s" "\\<^sub>i\<^sub>u (set_unsat I s) = \\<^sub>i\<^sub>u s" "\ (set_unsat I s) = \ s" "\ (set_unsat I s) = \ s" "\ (set_unsat I s) = True" "\\<^sub>c (set_unsat I s) = Some (remdups I)" by (atomize(full), cases s, auto) datatype ('i,'a) Direction = Direction (lt: "'a::linorder \ 'a \ bool") (LBI: "('i,'a) state \ ('i,'a) bounds_index") (UBI: "('i,'a) state \ ('i,'a) bounds_index") (LB: "('i,'a) state \ 'a bounds") (UB: "('i,'a) state \ 'a bounds") (LI: "('i,'a) state \ 'i bound_index") (UI: "('i,'a) state \ 'i bound_index") (UBI_upd: "(('i,'a) bounds_index \ ('i,'a) bounds_index) \ ('i,'a) state \ ('i,'a) state") (LE: "var \ 'a \ 'a atom") (GE: "var \ 'a \ 'a atom") (le_rat: "rat \ rat \ bool") definition Positive where [simp]: "Positive \ Direction (<) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>u_update Leq Geq (\)" definition Negative where [simp]: "Negative \ Direction (>) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>l_update Geq Leq (\)" text\Assuming that the \\\ flag and the current valuation \\\ in the final state determine the solution of a problem, the \assert_all\ function can be reduced to the \assert_all_state\ function that operates on the states: @{text[display] "assert_all t as \ let s = assert_all_state t as in if (\ s) then (False, None) else (True, Some (\ s))" } \ text\Specification for the \assert_all_state\ can be directly obtained from the specification of \assert_all\, and it describes the connection between the valuation in the final state and the initial tableau and atoms. However, we will make an additional refinement step and give stronger assumptions about the \assert_all_state\ function that describes the connection between the initial tableau and atoms with the tableau and bounds in the final state.\ locale AssertAllState = fixes assert_all_state::"tableau \ ('i,'a::lrv) i_atom list \ ('i,'a) state" assumes \ \The final and the initial tableau are equivalent.\ assert_all_state_tableau_equiv: "\ t \ assert_all_state t as = s' \ (v::'a valuation) \\<^sub>t t \ v \\<^sub>t \ s'" and \ \If @{term \} is not raised, then the valuation in the final state satisfies its tableau and its bounds (that are, in this case, equivalent to the set of all asserted bounds).\ assert_all_state_sat: "\ t \ assert_all_state t as = s' \ \ \ s' \ \ s'" and assert_all_state_sat_atoms_equiv_bounds: "\ t \ assert_all_state t as = s' \ \ \ s' \ flat (set as) \ \ s'" and \ \If @{term \} is raised, then there is no valuation satisfying the tableau and the bounds in the final state (that are, in this case, equivalent to a subset of asserted atoms).\ assert_all_state_unsat: "\ t \ assert_all_state t as = s' \ \ s' \ minimal_unsat_state_core s'" and assert_all_state_unsat_atoms_equiv_bounds: "\ t \ assert_all_state t as = s' \ \ s' \ set as \\<^sub>i \\ s'" and \ \The set of indices is taken from the constraints\ assert_all_state_indices: "\ t \ assert_all_state t as = s \ indices_state s \ fst ` set as" and assert_all_index_valid: "\ t \ assert_all_state t as = s \ index_valid (set as) s" begin definition assert_all where "assert_all t as \ let s = assert_all_state t as in if (\ s) then Unsat (the (\\<^sub>c s)) else Sat (\ s)" end text\The \assert_all_state\ function can be implemented by first applying the \init\ function that creates an initial state based on the starting tableau, and then by iteratively applying the \assert\ function for each atom in the starting atoms list.\ text\ \begin{small} \assert_loop as s \ foldl (\ s' a. if (\ s') then s' else assert a s') s as\ \assert_all_state t as \ assert_loop ats (init t)\ \end{small} \ locale Init' = fixes init :: "tableau \ ('i,'a::lrv) state" assumes init'_tableau_normalized: "\ t \ \ (\ (init t))" assumes init'_tableau_equiv: "\ t \ (v::'a valuation) \\<^sub>t t = v \\<^sub>t \ (init t)" assumes init'_sat: "\ t \ \ \ (init t) \ \ (init t)" assumes init'_unsat: "\ t \ \ (init t) \ minimal_unsat_state_core (init t)" assumes init'_atoms_equiv_bounds: "\ t \ {} \ \ (init t)" assumes init'_atoms_imply_bounds_index: "\ t \ {} \\<^sub>i \\ (init t)" text\Specification for \init\ can be obtained from the specification of \asser_all_state\ since all its assumptions must also hold for \init\ (when the list of atoms is empty). Also, since \init\ is the first step in the \assert_all_state\ implementation, the precondition for \init\ the same as for the \assert_all_state\. However, unsatisfiability is never going to be detected during initialization and @{term \} flag is never going to be raised. Also, the tableau in the initial state can just be initialized with the starting tableau. The condition @{term "{} \ \ (init t)"} is equivalent to asking that initial bounds are empty. Therefore, specification for \init\ can be refined to:\ locale Init = fixes init::"tableau \ ('i,'a::lrv) state" assumes \ \Tableau in the initial state for @{text t} is @{text t}:\ init_tableau_id: "\ (init t) = t" and \ \Since unsatisfiability is not detected, @{text \} flag must not be set:\ init_unsat_flag: "\ \ (init t)" and \ \The current valuation must satisfy the tableau:\ init_satisfies_tableau: "\\ (init t)\ \\<^sub>t t" and \ \In an inital state no atoms are yet asserted so the bounds must be empty:\ init_bounds: "\\<^sub>i\<^sub>l (init t) = Mapping.empty" "\\<^sub>i\<^sub>u (init t) = Mapping.empty" and \ \All tableau vars are valuated:\ init_tableau_valuated: "\ (init t)" begin lemma init_satisfies_bounds: "\\ (init t)\ \\<^sub>b \ (init t)" using init_bounds unfolding satisfies_bounds.simps in_bounds.simps bound_compare_defs by (auto simp: boundsl_def boundsu_def) lemma init_satisfies: "\ (init t)" using init_satisfies_tableau init_satisfies_bounds init_tableau_id unfolding curr_val_satisfies_state_def satisfies_state_def by simp lemma init_atoms_equiv_bounds: "{} \ \ (init t)" using init_bounds unfolding atoms_equiv_bounds.simps satisfies_bounds.simps in_bounds.simps satisfies_atom_set_def unfolding bound_compare_defs by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma init_atoms_imply_bounds_index: "{} \\<^sub>i \\ (init t)" using init_bounds unfolding atoms_imply_bounds_index.simps satisfies_bounds_index.simps in_bounds.simps i_satisfies_atom_set.simps satisfies_atom_set_def unfolding bound_compare_defs by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma init_tableau_normalized: "\ t \ \ (\ (init t))" using init_tableau_id by simp lemma init_index_valid: "index_valid as (init t)" using init_bounds unfolding index_valid_def by auto lemma init_indices: "indices_state (init t) = {}" unfolding indices_state_def init_bounds by auto end sublocale Init < Init' init using init_tableau_id init_satisfies init_unsat_flag init_atoms_equiv_bounds init_atoms_imply_bounds_index by unfold_locales auto abbreviation vars_list where "vars_list t \ remdups (map lhs t @ (concat (map (Abstract_Linear_Poly.vars_list \ rhs) t)))" lemma "tvars t = set (vars_list t)" by (auto simp add: set_vars_list lvars_def rvars_def) text\\smallskip The \assert\ function asserts a single atom. Since the \init\ function does not raise the \\\ flag, from the definition of \assert_loop\, it is clear that the flag is not raised when the \assert\ function is called. Moreover, the assumptions about the \assert_all_state\ imply that the loop invariant must be that if the \\\ flag is not raised, then the current valuation must satisfy the state (i.e., \\ s\). The \assert\ function will be more easily implemented if it is always applied to a state with a normalized and valuated tableau, so we make this another loop invariant. Therefore, the precondition for the \assert a s\ function call is that \\ \ s\, \\ s\, \\ (\ s)\ and \\ s\ hold. The specification for \assert\ directly follows from the specification of \assert_all_state\ (except that it is additionally required that bounds reflect asserted atoms also when unsatisfiability is detected, and that it is required that \assert\ keeps the tableau normalized and valuated).\ locale Assert = fixes assert::"('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" assumes \ \Tableau remains equivalent to the previous one and normalized and valuated.\ assert_tableau: "\\ \ s; \ s; \ (\ s); \ s\ \ let s' = assert a s in ((v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ s') \ \ (\ s') \ \ s'" and \ \If the @{term \} flag is not raised, then the current valuation is updated so that it satisfies the current tableau and the current bounds.\ assert_sat: "\\ \ s; \ s; \ (\ s); \ s\ \ \ \ (assert a s) \ \ (assert a s)" and \ \The set of asserted atoms remains equivalent to the bounds in the state.\ assert_atoms_equiv_bounds: "\\ \ s; \ s; \ (\ s); \ s\ \ flat ats \ \ s \ flat (ats \ {a}) \ \ (assert a s)" and \ \There is a subset of asserted atoms which remains index-equivalent to the bounds in the state.\ assert_atoms_imply_bounds_index: "\\ \ s; \ s; \ (\ s); \ s\ \ ats \\<^sub>i \\ s \ insert a ats \\<^sub>i \\ (assert a s)" and \ \If the @{term \} flag is raised, then there is no valuation that satisfies both the current tableau and the current bounds.\ assert_unsat: "\\ \ s; \ s; \ (\ s); \ s; index_valid ats s\ \ \ (assert a s) \ minimal_unsat_state_core (assert a s)" and assert_index_valid: "\\ \ s; \ s; \ (\ s); \ s\ \ index_valid ats s \ index_valid (insert a ats) (assert a s)" begin lemma assert_tableau_equiv: "\\ \ s; \ s; \ (\ s); \ s\ \ (v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ (assert a s)" using assert_tableau by (simp add: Let_def) lemma assert_tableau_normalized: "\\ \ s; \ s; \ (\ s); \ s\ \ \ (\ (assert a s))" using assert_tableau by (simp add: Let_def) lemma assert_tableau_valuated: "\\ \ s; \ s; \ (\ s); \ s\ \ \ (assert a s)" using assert_tableau by (simp add: Let_def) end locale AssertAllState' = Init init + Assert assert for init :: "tableau \ ('i,'a::lrv) state" and assert :: "('i,'a) i_atom \ ('i,'a) state \ ('i,'a) state" begin definition assert_loop where "assert_loop as s \ foldl (\ s' a. if (\ s') then s' else assert a s') s as" definition assert_all_state where [simp]: "assert_all_state t as \ assert_loop as (init t)" lemma AssertAllState'_precond: "\ t \ \ (\ (assert_all_state t as)) \ (\ (assert_all_state t as)) \ (\ \ (assert_all_state t as) \ \ (assert_all_state t as))" unfolding assert_all_state_def assert_loop_def using init_satisfies init_tableau_normalized init_index_valid using assert_sat assert_tableau_normalized init_tableau_valuated assert_tableau_valuated by (induct as rule: rev_induct) auto lemma AssertAllState'Induct: assumes "\ t" "P {} (init t)" "\ as bs t. as \ bs \ P as t \ P bs t" "\ s a as. \\ \ s; \ s; \ (\ s); \ s; P as s; index_valid as s\ \ P (insert a as) (assert a s)" shows "P (set as) (assert_all_state t as)" proof - have "P (set as) (assert_all_state t as) \ index_valid (set as) (assert_all_state t as)" proof (induct as rule: rev_induct) case Nil then show ?case unfolding assert_all_state_def assert_loop_def using assms(2) init_index_valid by auto next case (snoc a as') let ?f = "\s' a. if \ s' then s' else assert a s'" let ?s = "foldl ?f (init t) as'" show ?case proof (cases "\ ?s") case True from snoc index_valid_mono[of _ "set (a # as')" "(assert_all_state t as')"] have index: "index_valid (set (a # as')) (assert_all_state t as')" by auto from snoc assms(3)[of "set as'" "set (a # as')"] have P: "P (set (a # as')) (assert_all_state t as')" by auto show ?thesis using True P index unfolding assert_all_state_def assert_loop_def by simp next case False then show ?thesis using snoc using assms(1) assms(4) using AssertAllState'_precond assert_index_valid unfolding assert_all_state_def assert_loop_def by auto qed qed then show ?thesis .. qed lemma AssertAllState_index_valid: "\ t \ index_valid (set as) (assert_all_state t as)" by (rule AssertAllState'Induct, auto intro: assert_index_valid init_index_valid index_valid_mono) lemma AssertAllState'_sat_atoms_equiv_bounds: "\ t \ \ \ (assert_all_state t as) \ flat (set as) \ \ (assert_all_state t as)" using AssertAllState'_precond using init_atoms_equiv_bounds assert_atoms_equiv_bounds unfolding assert_all_state_def assert_loop_def by (induct as rule: rev_induct) auto lemma AssertAllState'_unsat_atoms_implies_bounds: assumes "\ t" shows "set as \\<^sub>i \\ (assert_all_state t as)" proof (induct as rule: rev_induct) case Nil then show ?case using assms init_atoms_imply_bounds_index unfolding assert_all_state_def assert_loop_def by simp next case (snoc a as') let ?s = "assert_all_state t as'" show ?case proof (cases "\ ?s") case True then show ?thesis using snoc atoms_imply_bounds_index_mono[of "set as'" "set (as' @ [a])"] unfolding assert_all_state_def assert_loop_def by auto next case False then have id: "assert_all_state t (as' @ [a]) = assert a ?s" unfolding assert_all_state_def assert_loop_def by simp from snoc have as': "set as' \\<^sub>i \\ ?s" by auto from AssertAllState'_precond[of t as'] assms False have "\ ?s" "\ (\ ?s)" "\ ?s" by auto from assert_atoms_imply_bounds_index[OF False this as', of a] show ?thesis unfolding id by auto qed qed end text\Under these assumptions, it can easily be shown (mainly by induction) that the previously shown implementation of \assert_all_state\ satisfies its specification.\ sublocale AssertAllState' < AssertAllState assert_all_state proof fix v::"'a valuation" and t as s' assume *: "\ t" and id: "assert_all_state t as = s'" note idsym = id[symmetric] show "v \\<^sub>t t = v \\<^sub>t \ s'" unfolding idsym using init_tableau_id[of t] assert_tableau_equiv[of _ v] by (induct rule: AssertAllState'Induct) (auto simp add: * ) show "\ \ s' \ \ s'" unfolding idsym using AssertAllState'_precond by (simp add: * ) show "\ \ s' \ flat (set as) \ \ s'" unfolding idsym using * by (rule AssertAllState'_sat_atoms_equiv_bounds) show "\ s' \ set as \\<^sub>i \\ s'" using * unfolding idsym by (rule AssertAllState'_unsat_atoms_implies_bounds) show "\ s' \ minimal_unsat_state_core s'" using init_unsat_flag assert_unsat assert_index_valid unfolding idsym by (induct rule: AssertAllState'Induct) (auto simp add: * ) show "indices_state s' \ fst ` set as" unfolding idsym using * by (intro index_valid_indices_state, induct rule: AssertAllState'Induct, auto simp: init_index_valid index_valid_mono assert_index_valid) show "index_valid (set as) s'" using "*" AssertAllState_index_valid idsym by blast qed subsection\Asserting Single Atoms\ text\The @{term assert} function is split in two phases. First, @{term assert_bound} updates the bounds and checks only for conflicts cheap to detect. Next, \check\ performs the full simplex algorithm. The \assert\ function can be implemented as \assert a s = check (assert_bound a s)\. Note that it is also possible to do the first phase for several asserted atoms, and only then to let the expensive second phase work. \medskip Asserting an atom \x \ b\ begins with the function \assert_bound\. If the atom is subsumed by the current bounds, then no changes are performed. Otherwise, bounds for \x\ are changed to incorporate the atom. If the atom is inconsistent with the previous bounds for \x\, the @{term \} flag is raised. If \x\ is not a lhs variable in the current tableau and if the value for \x\ in the current valuation violates the new bound \b\, the value for \x\ can be updated and set to \b\, meanwhile updating the values for lhs variables of the tableau so that it remains satisfied. Otherwise, no changes to the current valuation are performed.\ fun satisfies_bounds_set :: "'a::linorder valuation \ 'a bounds \ 'a bounds \ var set \ bool" where "satisfies_bounds_set v (lb, ub) S \ (\ x \ S. in_bounds x v (lb, ub))" declare satisfies_bounds_set.simps [simp del] syntax "_satisfies_bounds_set" :: "(var \ 'a::linorder) \ 'a bounds \ 'a bounds \ var set \ bool" ("_ \\<^sub>b _ \/ _") translations "v \\<^sub>b b \ S" == "CONST satisfies_bounds_set v b S" lemma satisfies_bounds_set_iff: "v \\<^sub>b (lb, ub) \ S \ (\ x \ S. v x \\<^sub>l\<^sub>b lb x \ v x \\<^sub>u\<^sub>b ub x)" by (simp add: satisfies_bounds_set.simps) definition curr_val_satisfies_no_lhs ("\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s") where "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s \ \\ s\ \\<^sub>t (\ s) \ (\\ s\ \\<^sub>b (\ s) \ (- lvars (\ s)))" lemma satisfies_satisfies_no_lhs: "\ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" by (simp add: curr_val_satisfies_state_def satisfies_state_def curr_val_satisfies_no_lhs_def satisfies_bounds.simps satisfies_bounds_set.simps) definition bounds_consistent :: "('i,'a::linorder) state \ bool" ("\") where "\ s \ \ x. if \\<^sub>l s x = None \ \\<^sub>u s x = None then True else the (\\<^sub>l s x) \ the (\\<^sub>u s x)" text\So, the \assert_bound\ function must ensure that the given atom is included in the bounds, that the tableau remains satisfied by the valuation and that all variables except the lhs variables in the tableau are within their bounds. To formalize this, we introduce the notation \v \\<^sub>b (lb, ub) \ S\, and define @{thm satisfies_bounds_set_iff[no_vars]}, and @{thm curr_val_satisfies_no_lhs_def[no_vars]}. The \assert_bound\ function raises the \\\ flag if and only if lower and upper bounds overlap. This is formalized as @{thm bounds_consistent_def[no_vars]}.\ lemma satisfies_bounds_consistent: "(v::'a::linorder valuation) \\<^sub>b \ s \ \ s" unfolding satisfies_bounds.simps in_bounds.simps bounds_consistent_def bound_compare_defs by (auto split: option.split) force lemma satisfies_consistent: "\ s \ \ s" by (auto simp add: curr_val_satisfies_state_def satisfies_state_def satisfies_bounds_consistent) lemma bounds_consistent_geq_lb: "\\ s; \\<^sub>u s x\<^sub>i = Some c\ \ c \\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i" unfolding bounds_consistent_def by (cases "\\<^sub>l s x\<^sub>i", auto simp add: bound_compare_defs split: if_splits) (erule_tac x="x\<^sub>i" in allE, auto) lemma bounds_consistent_leq_ub: "\\ s; \\<^sub>l s x\<^sub>i = Some c\ \ c \\<^sub>u\<^sub>b \\<^sub>u s x\<^sub>i" unfolding bounds_consistent_def by (cases "\\<^sub>u s x\<^sub>i", auto simp add: bound_compare_defs split: if_splits) (erule_tac x="x\<^sub>i" in allE, auto) lemma bounds_consistent_gt_ub: "\\ s; c <\<^sub>l\<^sub>b \\<^sub>l s x \ \ \ c >\<^sub>u\<^sub>b \\<^sub>u s x" unfolding bounds_consistent_def by (case_tac[!] "\\<^sub>l s x", case_tac[!] "\\<^sub>u s x") (auto simp add: bound_compare_defs, erule_tac x="x" in allE, simp) lemma bounds_consistent_lt_lb: "\\ s; c >\<^sub>u\<^sub>b \\<^sub>u s x \ \ \ c <\<^sub>l\<^sub>b \\<^sub>l s x" unfolding bounds_consistent_def by (case_tac[!] "\\<^sub>l s x", case_tac[!] "\\<^sub>u s x") (auto simp add: bound_compare_defs, erule_tac x="x" in allE, simp) text\Since the \assert_bound\ is the first step in the \assert\ function implementation, the preconditions for \assert_bound\ are the same as preconditions for the \assert\ function. The specifiction for the \assert_bound\ is:\ locale AssertBound = fixes assert_bound::"('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" assumes \ \The tableau remains unchanged and valuated.\ assert_bound_tableau: "\\ \ s; \ s; \ (\ s); \ s\ \ assert_bound a s = s' \ \ s' = \ s \ \ s'" and \ \If the @{term \} flag is not set, all but the lhs variables in the tableau remain within their bounds, the new valuation satisfies the tableau, and bounds do not overlap.\ assert_bound_sat: "\\ \ s; \ s; \ (\ s); \ s\ \ assert_bound a s = s' \ \ \ s' \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ s'" and \ \The set of asserted atoms remains equivalent to the bounds in the state.\ assert_bound_atoms_equiv_bounds: "\\ \ s; \ s; \ (\ s); \ s\ \ flat ats \ \ s \ flat (ats \ {a}) \ \ (assert_bound a s)" and assert_bound_atoms_imply_bounds_index: "\\ \ s; \ s; \ (\ s); \ s\ \ ats \\<^sub>i \\ s \ insert a ats \\<^sub>i \\ (assert_bound a s)" and \ \@{term \} flag is raised, only if the bounds became inconsistent:\ assert_bound_unsat: "\\ \ s; \ s; \ (\ s); \ s\ \ index_valid as s \ assert_bound a s = s' \ \ s' \ minimal_unsat_state_core s'" and assert_bound_index_valid: "\\ \ s; \ s; \ (\ s); \ s\ \ index_valid as s \ index_valid (insert a as) (assert_bound a s)" begin lemma assert_bound_tableau_id: "\\ \ s; \ s; \ (\ s); \ s\ \ \ (assert_bound a s) = \ s" using assert_bound_tableau by blast lemma assert_bound_tableau_valuated: "\\ \ s; \ s; \ (\ s); \ s\ \ \ (assert_bound a s)" using assert_bound_tableau by blast end locale AssertBoundNoLhs = fixes assert_bound :: "('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" assumes assert_bound_nolhs_tableau_id: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ \ (assert_bound a s) = \ s" assumes assert_bound_nolhs_sat: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ \ \ (assert_bound a s) \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (assert_bound a s) \ \ (assert_bound a s)" assumes assert_bound_nolhs_atoms_equiv_bounds: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ flat ats \ \ s \ flat (ats \ {a}) \ \ (assert_bound a s)" assumes assert_bound_nolhs_atoms_imply_bounds_index: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ ats \\<^sub>i \\ s \ insert a ats \\<^sub>i \\ (assert_bound a s)" assumes assert_bound_nolhs_unsat: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ index_valid as s \ \ (assert_bound a s) \ minimal_unsat_state_core (assert_bound a s)" assumes assert_bound_nolhs_tableau_valuated: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ \ (assert_bound a s)" assumes assert_bound_nolhs_index_valid: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ index_valid as s \ index_valid (insert a as) (assert_bound a s)" sublocale AssertBoundNoLhs < AssertBound by unfold_locales ((metis satisfies_satisfies_no_lhs satisfies_consistent assert_bound_nolhs_tableau_id assert_bound_nolhs_sat assert_bound_nolhs_atoms_equiv_bounds assert_bound_nolhs_index_valid assert_bound_nolhs_atoms_imply_bounds_index assert_bound_nolhs_unsat assert_bound_nolhs_tableau_valuated)+) text\The second phase of \assert\, the \check\ function, is the heart of the Simplex algorithm. It is always called after \assert_bound\, but in two different situations. In the first case \assert_bound\ raised the \\\ flag and then \check\ should retain the flag and should not perform any changes. In the second case \assert_bound\ did not raise the \\\ flag, so \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\, \\ s\, \\ (\ s)\, and \\ s\ hold.\ locale Check = fixes check::"('i,'a::lrv) state \ ('i,'a) state" assumes \ \If @{text check} is called from an inconsistent state, the state is unchanged.\ check_unsat_id: "\ s \ check s = s" and \ \The tableau remains equivalent to the previous one, normalized and valuated.\ check_tableau: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ let s' = check s in ((v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ s') \ \ (\ s') \ \ s'" and \ \The bounds remain unchanged.\ check_bounds_id: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \\<^sub>i (check s) = \\<^sub>i s" and \ \If @{term \} flag is not raised, the current valuation @{text "\"} satisfies both the tableau and the bounds and if it is raised, there is no valuation that satisfies them.\ check_sat: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \ \ (check s) \ \ (check s)" and check_unsat: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \ (check s) \ minimal_unsat_state_core (check s)" begin lemma check_tableau_equiv: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ (v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ (check s)" using check_tableau by (simp add: Let_def) lemma check_tableau_index_valid: assumes "\ \ s" " \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" shows "index_valid as (check s) = index_valid as s" unfolding index_valid_def using check_bounds_id[OF assms] by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma check_tableau_normalized: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \ (\ (check s))" using check_tableau by (simp add: Let_def) lemma check_tableau_valuated: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \ (check s)" using check_tableau by (simp add: Let_def) lemma check_indices_state: assumes "\ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ \ s \ \ s" "\ \ s \ \ (\ s)" "\ \ s \ \ s" shows "indices_state (check s) = indices_state s" using check_bounds_id[OF _ assms] check_unsat_id[of s] unfolding indices_state_def by (cases "\ s", auto) lemma check_distinct_indices_state: assumes "\ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ \ s \ \ s" "\ \ s \ \ (\ s)" "\ \ s \ \ s" shows "distinct_indices_state (check s) = distinct_indices_state s" using check_bounds_id[OF _ assms] check_unsat_id[of s] unfolding distinct_indices_state_def by (cases "\ s", auto) end locale Assert' = AssertBound assert_bound + Check check for assert_bound :: "('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" and check :: "('i,'a::lrv) state \ ('i,'a) state" begin definition assert :: "('i,'a) i_atom \ ('i,'a) state \ ('i,'a) state" where "assert a s \ check (assert_bound a s)" lemma Assert'Precond: assumes "\ \ s" "\ s" "\ (\ s)" "\ s" shows "\ (\ (assert_bound a s))" "\ \ (assert_bound a s) \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (assert_bound a s) \ \ (assert_bound a s)" "\ (assert_bound a s)" using assms using assert_bound_tableau_id assert_bound_sat assert_bound_tableau_valuated by (auto simp add: satisfies_bounds_consistent Let_def) end sublocale Assert' < Assert assert proof fix s::"('i,'a) state" and v::"'a valuation" and a::"('i,'a) i_atom" assume *: "\ \ s" "\ s" "\ (\ s)" "\ s" have "\ (\ (assert a s))" using check_tableau_normalized[of "assert_bound a s"] check_unsat_id[of "assert_bound a s"] * using assert_bound_sat[of s a] Assert'Precond[of s a] by (force simp add: assert_def) moreover have "v \\<^sub>t \ s = v \\<^sub>t \ (assert a s)" using check_tableau_equiv[of "assert_bound a s" v] * using check_unsat_id[of "assert_bound a s"] by (force simp add: assert_def Assert'Precond assert_bound_sat assert_bound_tableau_id) moreover have "\ (assert a s)" using assert_bound_tableau_valuated[of s a] * using check_tableau_valuated[of "assert_bound a s"] using check_unsat_id[of "assert_bound a s"] by (cases "\ (assert_bound a s)") (auto simp add: Assert'Precond assert_def) ultimately show "let s' = assert a s in (v \\<^sub>t \ s = v \\<^sub>t \ s') \ \ (\ s') \ \ s'" by (simp add: Let_def) next fix s::"('i,'a) state" and a::"('i,'a) i_atom" assume "\ \ s" "\ s" "\ (\ s)" "\ s" then show "\ \ (assert a s) \ \ (assert a s)" unfolding assert_def using check_unsat_id[of "assert_bound a s"] using check_sat[of "assert_bound a s"] by (force simp add: Assert'Precond) next fix s::"('i,'a) state" and a::"('i,'a) i_atom" and ats::"('i,'a) i_atom set" assume "\ \ s" "\ s" "\ (\ s)" "\ s" then show "flat ats \ \ s \ flat (ats \ {a}) \ \ (assert a s)" using assert_bound_atoms_equiv_bounds using check_unsat_id[of "assert_bound a s"] check_bounds_id by (cases "\ (assert_bound a s)") (auto simp add: Assert'Precond assert_def simp: indexl_def indexu_def boundsl_def boundsu_def) next fix s::"('i,'a) state" and a::"('i,'a) i_atom" and ats assume *: "\ \ s" "\ s" "\ (\ s)" "\ s" "\ (assert a s)" "index_valid ats s" show "minimal_unsat_state_core (assert a s)" proof (cases "\ (assert_bound a s)") case True then show ?thesis unfolding assert_def using * assert_bound_unsat check_tableau_equiv[of "assert_bound a s"] check_bounds_id using check_unsat_id[of "assert_bound a s"] by (auto simp add: Assert'Precond satisfies_state_def Let_def) next case False then show ?thesis unfolding assert_def using * assert_bound_sat[of s a] check_unsat Assert'Precond by (metis assert_def) qed next fix ats fix s::"('i,'a) state" and a::"('i,'a) i_atom" assume *: "index_valid ats s" and **: "\ \ s" "\ s" "\ (\ s)" "\ s" have *: "index_valid (insert a ats) (assert_bound a s)" using assert_bound_index_valid[OF ** *] . show "index_valid (insert a ats) (assert a s)" proof (cases "\ (assert_bound a s)") case True show ?thesis unfolding assert_def check_unsat_id[OF True] using * . next case False show ?thesis unfolding assert_def using Assert'Precond[OF **, of a] False * by (subst check_tableau_index_valid[OF False], auto) qed next fix s ats a let ?s = "assert_bound a s" assume *: "\ \ s" "\ s" "\ (\ s)" "\ s" "ats \\<^sub>i \\ s" from assert_bound_atoms_imply_bounds_index[OF this, of a] have as: "insert a ats \\<^sub>i \\ (assert_bound a s)" by auto show "insert a ats \\<^sub>i \\ (assert a s)" proof (cases "\ ?s") case True from check_unsat_id[OF True] as show ?thesis unfolding assert_def by auto next case False from assert_bound_tableau_id[OF *(1-4)] * have t: "\ (\ ?s)" by simp from assert_bound_tableau_valuated[OF *(1-4)] have v: "\ ?s" . from assert_bound_sat[OF *(1-4) refl False] have "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s" "\ ?s" by auto from check_bounds_id[OF False this t v] as show ?thesis unfolding assert_def by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) qed qed text\Under these assumptions for \assert_bound\ and \check\, it can be easily shown that the implementation of \assert\ (previously given) satisfies its specification.\ locale AssertAllState'' = Init init + AssertBoundNoLhs assert_bound + Check check for init :: "tableau \ ('i,'a::lrv) state" and assert_bound :: "('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" and check :: "('i,'a::lrv) state \ ('i,'a) state" begin definition assert_bound_loop where "assert_bound_loop ats s \ foldl (\ s' a. if (\ s') then s' else assert_bound a s') s ats" definition assert_all_state where [simp]: "assert_all_state t ats \ check (assert_bound_loop ats (init t))" text\However, for efficiency reasons, we want to allow implementations that delay the \check\ function call and call it after several \assert_bound\ calls. For example: \smallskip \begin{small} @{thm assert_bound_loop_def[no_vars]} @{thm assert_all_state_def[no_vars]} \end{small} \smallskip Then, the loop consists only of \assert_bound\ calls, so \assert_bound\ postcondition must imply its precondition. This is not the case, since variables on the lhs may be out of their bounds. Therefore, we make a refinement and specify weaker preconditions (replace \\ s\, by \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ and \\ s\) for \assert_bound\, and show that these preconditions are still good enough to prove the correctnes of this alternative \assert_all_state\ definition.\ lemma AssertAllState''_precond': assumes "\ (\ s)" "\ s" "\ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s \ \ s" shows "let s' = assert_bound_loop ats s in \ (\ s') \ \ s' \ (\ \ s' \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ s')" using assms using assert_bound_nolhs_tableau_id assert_bound_nolhs_sat assert_bound_nolhs_tableau_valuated by (induct ats rule: rev_induct) (auto simp add: assert_bound_loop_def Let_def) lemma AssertAllState''_precond: assumes "\ t" shows "let s' = assert_bound_loop ats (init t) in \ (\ s') \ \ s' \ (\ \ s' \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ s')" using assms using AssertAllState''_precond'[of "init t" ats] by (simp add: Let_def init_tableau_id init_unsat_flag init_satisfies satisfies_consistent satisfies_satisfies_no_lhs init_tableau_valuated) lemma AssertAllState''Induct: assumes "\ t" "P {} (init t)" "\ as bs t. as \ bs \ P as t \ P bs t" "\ s a ats. \\ \ s; \\ s\ \\<^sub>t \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s; P (set ats) s; index_valid (set ats) s\ \ P (insert a (set ats)) (assert_bound a s)" shows "P (set ats) (assert_bound_loop ats (init t))" proof - have "P (set ats) (assert_bound_loop ats (init t)) \ index_valid (set ats) (assert_bound_loop ats (init t))" proof (induct ats rule: rev_induct) case Nil then show ?case unfolding assert_bound_loop_def using assms(2) init_index_valid by simp next case (snoc a as') let ?s = "assert_bound_loop as' (init t)" from snoc index_valid_mono[of _ "set (a # as')" "assert_bound_loop as' (init t)"] have index: "index_valid (set (a # as')) (assert_bound_loop as' (init t))" by auto from snoc assms(3)[of "set as'" "set (a # as')"] have P: "P (set (a # as')) (assert_bound_loop as' (init t))" by auto show ?case proof (cases "\ ?s") case True then show ?thesis using P index unfolding assert_bound_loop_def by simp next case False have id': "set (as' @ [a]) = insert a (set as')" by simp have id: "assert_bound_loop (as' @ [a]) (init t) = assert_bound a (assert_bound_loop as' (init t))" using False unfolding assert_bound_loop_def by auto from snoc have index: "index_valid (set as') ?s" by simp show ?thesis unfolding id unfolding id' using False snoc AssertAllState''_precond[OF assms(1)] by (intro conjI assert_bound_nolhs_index_valid index assms(4); (force simp: Let_def curr_val_satisfies_no_lhs_def)?) qed qed then show ?thesis .. qed lemma AssertAllState''_tableau_id: "\ t \ \ (assert_bound_loop ats (init t)) = \ (init t)" by (rule AssertAllState''Induct) (auto simp add: init_tableau_id assert_bound_nolhs_tableau_id) lemma AssertAllState''_sat: "\ t \ \ \ (assert_bound_loop ats (init t)) \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (assert_bound_loop ats (init t)) \ \ (assert_bound_loop ats (init t))" by (rule AssertAllState''Induct) (auto simp add: init_unsat_flag init_satisfies satisfies_consistent satisfies_satisfies_no_lhs assert_bound_nolhs_sat) lemma AssertAllState''_unsat: "\ t \ \ (assert_bound_loop ats (init t)) \ minimal_unsat_state_core (assert_bound_loop ats (init t))" by (rule AssertAllState''Induct) (auto simp add: init_tableau_id assert_bound_nolhs_unsat init_unsat_flag) lemma AssertAllState''_sat_atoms_equiv_bounds: "\ t \ \ \ (assert_bound_loop ats (init t)) \ flat (set ats) \ \ (assert_bound_loop ats (init t))" using AssertAllState''_precond using assert_bound_nolhs_atoms_equiv_bounds init_atoms_equiv_bounds by (induct ats rule: rev_induct) (auto simp add: Let_def assert_bound_loop_def) lemma AssertAllState''_atoms_imply_bounds_index: assumes "\ t" shows "set ats \\<^sub>i \\ (assert_bound_loop ats (init t))" proof (induct ats rule: rev_induct) case Nil then show ?case unfolding assert_bound_loop_def using init_atoms_imply_bounds_index assms by simp next case (snoc a ats') let ?s = "assert_bound_loop ats' (init t)" show ?case proof (cases "\ ?s") case True then show ?thesis using snoc atoms_imply_bounds_index_mono[of "set ats'" "set (ats' @ [a])"] unfolding assert_bound_loop_def by auto next case False then have id: "assert_bound_loop (ats' @ [a]) (init t) = assert_bound a ?s" unfolding assert_bound_loop_def by auto from snoc have ats: "set ats' \\<^sub>i \\ ?s" by auto from AssertAllState''_precond[of t ats', OF assms, unfolded Let_def] False have *: "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s" "\ (\ ?s)" "\ ?s" "\ ?s" by auto show ?thesis unfolding id using assert_bound_nolhs_atoms_imply_bounds_index[OF False * ats, of a] by auto qed qed lemma AssertAllState''_index_valid: "\ t \ index_valid (set ats) (assert_bound_loop ats (init t))" by (rule AssertAllState''Induct, auto simp: init_index_valid index_valid_mono assert_bound_nolhs_index_valid) end sublocale AssertAllState'' < AssertAllState assert_all_state proof fix v::"'a valuation" and t ats s' assume *: "\ t" and "assert_all_state t ats = s'" then have s': "s' = assert_all_state t ats" by simp let ?s' = "assert_bound_loop ats (init t)" show "v \\<^sub>t t = v \\<^sub>t \ s'" unfolding assert_all_state_def s' using * check_tableau_equiv[of ?s' v] AssertAllState''_tableau_id[of t ats] init_tableau_id[of t] using AssertAllState''_sat[of t ats] check_unsat_id[of ?s'] using AssertAllState''_precond[of t ats] by force show "\ \ s' \ \ s'" unfolding assert_all_state_def s' using * AssertAllState''_precond[of t ats] using check_sat check_unsat_id by (force simp add: Let_def) show "\ s' \ minimal_unsat_state_core s'" using * check_unsat check_unsat_id[of ?s'] check_bounds_id using AssertAllState''_unsat[of t ats] AssertAllState''_precond[of t ats] s' by (force simp add: Let_def satisfies_state_def) show "\ \ s' \ flat (set ats) \ \ s'" unfolding assert_all_state_def s' using * AssertAllState''_precond[of t ats] using check_bounds_id[of ?s'] check_unsat_id[of ?s'] using AssertAllState''_sat_atoms_equiv_bounds[of t ats] by (force simp add: Let_def simp: indexl_def indexu_def boundsl_def boundsu_def) show "\ s' \ set ats \\<^sub>i \\ s'" unfolding assert_all_state_def s' using * AssertAllState''_precond[of t ats] unfolding Let_def using check_bounds_id[of ?s'] using AssertAllState''_atoms_imply_bounds_index[of t ats] using check_unsat_id[of ?s'] by (cases "\ ?s'") (auto simp add: Let_def simp: indexl_def indexu_def boundsl_def boundsu_def) show "index_valid (set ats) s'" unfolding assert_all_state_def s' using * AssertAllState''_precond[of t ats] AssertAllState''_index_valid[OF *, of ats] unfolding Let_def using check_tableau_index_valid[of ?s'] using check_unsat_id[of ?s'] by (cases "\ ?s'", auto) show "indices_state s' \ fst ` set ats" by (intro index_valid_indices_state, fact) qed subsection\Update and Pivot\ text\Both \assert_bound\ and \check\ need to update the valuation so that the tableau remains satisfied. If the value for a variable not on the lhs of the tableau is changed, this can be done rather easily (once the value of that variable is changed, one should recalculate and change the values for all lhs variables of the tableau). The \update\ function does this, and it is specified by:\ locale Update = fixes update::"var \ 'a::lrv \ ('i,'a) state \ ('i,'a) state" assumes \ \Tableau, bounds, and the unsatisfiability flag are preserved.\ update_id: "\\ (\ s); \ s; x \ lvars (\ s)\ \ let s' = update x c s in \ s' = \ s \ \\<^sub>i s' = \\<^sub>i s \ \ s' = \ s \ \\<^sub>c s' = \\<^sub>c s" and \ \Tableau remains valuated.\ update_tableau_valuated: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \ (update x v s)" and \ \The given variable @{text "x"} in the updated valuation is set to the given value @{text "v"} while all other variables (except those on the lhs of the tableau) are unchanged.\ update_valuation_nonlhs: "\\ (\ s); \ s; x \ lvars (\ s)\ \ x' \ lvars (\ s) \ look (\ (update x v s)) x' = (if x = x' then Some v else look (\ s) x')" and \ \Updated valuation continues to satisfy the tableau.\ update_satisfies_tableau: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \\ s\ \\<^sub>t \ s \ \\ (update x c s)\ \\<^sub>t \ s" begin lemma update_bounds_id: assumes "\ (\ s)" "\ s" "x \ lvars (\ s)" shows "\\<^sub>i (update x c s) = \\<^sub>i s" "\\ (update x c s) = \\ s" "\\<^sub>l (update x c s) = \\<^sub>l s" "\\<^sub>u (update x c s) = \\<^sub>u s" using update_id assms by (auto simp add: Let_def simp: indexl_def indexu_def boundsl_def boundsu_def) lemma update_indices_state_id: assumes "\ (\ s)" "\ s" "x \ lvars (\ s)" shows "indices_state (update x c s) = indices_state s" using update_bounds_id[OF assms] unfolding indices_state_def by auto lemma update_tableau_id: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \ (update x c s) = \ s" using update_id by (auto simp add: Let_def) lemma update_unsat_id: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \ (update x c s) = \ s" using update_id by (auto simp add: Let_def) lemma update_unsat_core_id: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \\<^sub>c (update x c s) = \\<^sub>c s" using update_id by (auto simp add: Let_def) definition assert_bound' where [simp]: "assert_bound' dir i x c s \ (if (\\<^sub>u\<^sub>b (lt dir)) c (UB dir s x) then s else let s' = update\\ (UBI_upd dir) i x c s in if (\\<^sub>l\<^sub>b (lt dir)) c ((LB dir) s x) then set_unsat [i, ((LI dir) s x)] s' else if x \ lvars (\ s') \ (lt dir) c (\\ s\ x) then update x c s' else s')" fun assert_bound :: "('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" where "assert_bound (i,Leq x c) s = assert_bound' Positive i x c s" | "assert_bound (i,Geq x c) s = assert_bound' Negative i x c s" lemma assert_bound'_cases: assumes "\\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ \ P s" assumes "\\ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)\ \ P (set_unsat [i, ((LI dir) s x)] (update\\ (UBI_upd dir) i x c s))" assumes "\x \ lvars (\ s); (lt dir) c (\\ s\ x); \ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x))\ \ P (update x c (update\\ (UBI_upd dir) i x c s))" assumes "\\ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)); x \ lvars (\ s)\ \ P (update\\ (UBI_upd dir) i x c s)" assumes "\\ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)); \ ((lt dir) c (\\ s\ x))\ \ P (update\\ (UBI_upd dir) i x c s)" assumes "dir = Positive \ dir = Negative" shows "P (assert_bound' dir i x c s)" proof (cases "\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)") case True then show ?thesis using assms(1) by simp next case False show ?thesis proof (cases "\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)") case True then show ?thesis using \\ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ using assms(2) by simp next case False let ?s = "update\\ (UBI_upd dir) i x c s" show ?thesis proof (cases "x \ lvars (\ ?s) \ (lt dir) c (\\ s\ x)") case True then show ?thesis using \\ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ \\ \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)\ using assms(3) assms(6) by auto next case False then have "x \ lvars (\ ?s) \ \ ((lt dir) c (\\ s\ x))" by simp then show ?thesis proof assume "x \ lvars (\ ?s)" then show ?thesis using \\ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ \\ \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)\ using assms(4) assms(6) by auto next assume "\ (lt dir) c (\\ s\ x)" then show ?thesis using \\ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ \\ \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)\ using assms(5) assms(6) by simp qed qed qed qed lemma assert_bound_cases: assumes "\ c x dir. \ dir = Positive \ dir = Negative; a = LE dir x c; \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) s" assumes "\ c x dir. \dir = Positive \ dir = Negative; a = LE dir x c; \ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x); \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) (set_unsat [i, ((LI dir) s x)] (update\\ (UBI_upd dir) i x c s))" assumes "\ c x dir. \ dir = Positive \ dir = Negative; a = LE dir x c; x \ lvars (\ s); (lt dir) c (\\ s\ x); \ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) (update x c ((update\\ (UBI_upd dir) i x c s)))" assumes "\ c x dir. \ dir = Positive \ dir = Negative; a = LE dir x c; x \ lvars (\ s); \ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) ((update\\ (UBI_upd dir) i x c s))" assumes "\ c x dir. \ dir = Positive \ dir = Negative; a = LE dir x c; \ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)); \ ((lt dir) c (\\ s\ x)) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) ((update\\ (UBI_upd dir) i x c s))" assumes "\ s. P s = P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s" assumes "\ s. P s = P' (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s" shows "P (assert_bound (i,a) s)" proof (cases a) case (Leq x c) then show ?thesis apply (simp del: assert_bound'_def) apply (rule assert_bound'_cases, simp_all) using assms(1)[of Positive x c] using assms(2)[of Positive x c] using assms(3)[of Positive x c] using assms(4)[of Positive x c] using assms(5)[of Positive x c] using assms(7) by auto next case (Geq x c) then show ?thesis apply (simp del: assert_bound'_def) apply (rule assert_bound'_cases) using assms(1)[of Negative x c] using assms(2)[of Negative x c] using assms(3)[of Negative x c] using assms(4)[of Negative x c] using assms(5)[of Negative x c] using assms(6) by auto qed end lemma set_unsat_bounds_id: "\ (set_unsat I s) = \ s" unfolding boundsl_def boundsu_def by auto lemma decrease_ub_satisfied_inverse: assumes lt: "\\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" and dir: "dir = Positive \ dir = Negative" assumes v: "v \\<^sub>b \ (update\\ (UBI_upd dir) i x c s)" shows "v \\<^sub>b \ s" unfolding satisfies_bounds.simps proof fix x' show "in_bounds x' v (\ s)" proof (cases "x = x'") case False then show ?thesis using v dir unfolding satisfies_bounds.simps by (auto split: if_splits simp: boundsl_def boundsu_def) next case True then show ?thesis using v dir unfolding satisfies_bounds.simps using lt by (erule_tac x="x'" in allE) (auto simp add: lt_ubound_def[THEN sym] gt_lbound_def[THEN sym] compare_strict_nonstrict boundsl_def boundsu_def) qed qed lemma atoms_equiv_bounds_extend: fixes x c dir assumes "dir = Positive \ dir = Negative" "\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" "ats \ \ s" shows "(ats \ {LE dir x c}) \ \ (update\\ (UBI_upd dir) i x c s)" unfolding atoms_equiv_bounds.simps proof fix v let ?s = "update\\ (UBI_upd dir) i x c s" show "v \\<^sub>a\<^sub>s (ats \ {LE dir x c}) = v \\<^sub>b \ ?s" proof assume "v \\<^sub>a\<^sub>s (ats \ {LE dir x c})" then have "v \\<^sub>a\<^sub>s ats" "le (lt dir) (v x) c" using \dir = Positive \ dir = Negative\ unfolding satisfies_atom_set_def by auto show "v \\<^sub>b \ ?s" unfolding satisfies_bounds.simps proof fix x' show "in_bounds x' v (\ ?s)" using \v \\<^sub>a\<^sub>s ats\ \le (lt dir) (v x) c\ \ats \ \ s\ using \dir = Positive \ dir = Negative\ unfolding atoms_equiv_bounds.simps satisfies_bounds.simps by (cases "x = x'") (auto simp: boundsl_def boundsu_def) qed next assume "v \\<^sub>b \ ?s" then have "v \\<^sub>b \ s" using \\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)\ using \dir = Positive \ dir = Negative\ using decrease_ub_satisfied_inverse[of dir c s x v] using neg_bounds_compare(1)[of c "\\<^sub>u s x"] using neg_bounds_compare(5)[of c "\\<^sub>l s x"] by (auto simp add: lt_ubound_def[THEN sym] ge_ubound_def[THEN sym] le_lbound_def[THEN sym] gt_lbound_def[THEN sym]) show "v \\<^sub>a\<^sub>s (ats \ {LE dir x c})" unfolding satisfies_atom_set_def proof fix a assume "a \ ats \ {LE dir x c}" then show "v \\<^sub>a a" proof assume "a \ {LE dir x c}" then show ?thesis using \v \\<^sub>b \ ?s\ using \dir = Positive \ dir = Negative\ unfolding satisfies_bounds.simps by (auto split: if_splits simp: boundsl_def boundsu_def) next assume "a \ ats" then show ?thesis using \ats \ \ s\ using \v \\<^sub>b \ s\ unfolding atoms_equiv_bounds.simps satisfies_atom_set_def by auto qed qed qed qed lemma bounds_updates: "\\<^sub>l (\\<^sub>i\<^sub>u_update u s) = \\<^sub>l s" "\\<^sub>u (\\<^sub>i\<^sub>l_update u s) = \\<^sub>u s" "\\<^sub>u (\\<^sub>i\<^sub>u_update (upd x (i,c)) s) = (\\<^sub>u s) (x \ c)" "\\<^sub>l (\\<^sub>i\<^sub>l_update (upd x (i,c)) s) = (\\<^sub>l s) (x \ c)" by (auto simp: boundsl_def boundsu_def) locale EqForLVar = fixes eq_idx_for_lvar :: "tableau \ var \ nat" assumes eq_idx_for_lvar: "\x \ lvars t\ \ eq_idx_for_lvar t x < length t \ lhs (t ! eq_idx_for_lvar t x) = x" begin definition eq_for_lvar :: "tableau \ var \ eq" where "eq_for_lvar t v \ t ! (eq_idx_for_lvar t v)" lemma eq_for_lvar: "\x \ lvars t\ \ eq_for_lvar t x \ set t \ lhs (eq_for_lvar t x) = x" unfolding eq_for_lvar_def using eq_idx_for_lvar by auto abbreviation rvars_of_lvar where "rvars_of_lvar t x \ rvars_eq (eq_for_lvar t x)" lemma rvars_of_lvar_rvars: assumes "x \ lvars t" shows "rvars_of_lvar t x \ rvars t" using assms eq_for_lvar[of x t] unfolding rvars_def by auto end text \Updating changes the value of \x\ and then updates values of all lhs variables so that the tableau remains satisfied. This can be based on a function that recalculates rhs polynomial values in the changed valuation:\ locale RhsEqVal = fixes rhs_eq_val::"(var, 'a::lrv) mapping \ var \ 'a \ eq \ 'a" \ \@{text rhs_eq_val} computes the value of the rhs of @{text e} in @{text "\v\(x := c)"}.\ assumes rhs_eq_val: "\v\ \\<^sub>e e \ rhs_eq_val v x c e = rhs e \ \v\ (x := c) \" begin text\\noindent Then, the next implementation of \update\ satisfies its specification:\ abbreviation update_eq where "update_eq v x c v' e \ upd (lhs e) (rhs_eq_val v x c e) v'" definition update :: "var \ 'a \ ('i,'a) state \ ('i,'a) state" where "update x c s \ \_update (upd x c (foldl (update_eq (\ s) x c) (\ s) (\ s))) s" lemma update_no_set_none: shows "look (\ s) y \ None \ look (foldl (update_eq (\ s) x v) (\ s) t) y \ None" by (induct t rule: rev_induct, auto simp: lookup_update') lemma update_no_left: assumes "y \ lvars t" shows "look (\ s) y = look (foldl (update_eq (\ s) x v) (\ s) t) y" using assms by (induct t rule: rev_induct) (auto simp add: lvars_def lookup_update') lemma update_left: assumes "y \ lvars t" shows "\ eq \ set t. lhs eq = y \ look (foldl (update_eq (\ s) x v) (\ s) t) y = Some (rhs_eq_val (\ s) x v eq)" using assms by (induct t rule: rev_induct) (auto simp add: lvars_def lookup_update') lemma update_valuate_rhs: assumes "e \ set (\ s)" "\ (\ s)" shows "rhs e \ \\ (update x c s)\ \ = rhs e \ \\ s\ (x := c) \" proof (rule valuate_depend, safe) fix y assume "y \ rvars_eq e" then have "y \ lvars (\ s)" using \\ (\ s)\ \e \ set (\ s)\ by (auto simp add: normalized_tableau_def rvars_def) then show "\\ (update x c s)\ y = (\\ s\(x := c)) y" using update_no_left[of y "\ s" s x c] by (auto simp add: update_def map2fun_def lookup_update') qed end sublocale RhsEqVal < Update update proof fix s::"('i,'a) state" and x c show "let s' = update x c s in \ s' = \ s \ \\<^sub>i s' = \\<^sub>i s \ \ s' = \ s \ \\<^sub>c s' = \\<^sub>c s" by (simp add: Let_def update_def add: boundsl_def boundsu_def indexl_def indexu_def) next fix s::"('i,'a) state" and x c assume "\ (\ s)" "\ s" "x \ lvars (\ s)" then show "\ (update x c s)" using update_no_set_none[of s] by (simp add: Let_def update_def tableau_valuated_def lookup_update') next fix s::"('i,'a) state" and x x' c assume "\ (\ s)" "\ s" "x \ lvars (\ s)" show "x' \ lvars (\ s) \ look (\ (update x c s)) x' = (if x = x' then Some c else look (\ s) x')" using update_no_left[of x' "\ s" s x c] unfolding update_def lvars_def Let_def by (auto simp: lookup_update') next fix s::"('i,'a) state" and x c assume "\ (\ s)" "\ s" "x \ lvars (\ s)" have "\\ s\ \\<^sub>t \ s \ \e \ set (\ s). \\ (update x c s)\ \\<^sub>e e" proof fix e assume "e \ set (\ s)" "\\ s\ \\<^sub>t \ s" then have "\\ s\ \\<^sub>e e" by (simp add: satisfies_tableau_def) have "x \ lhs e" using \x \ lvars (\ s)\ \e \ set (\ s)\ by (auto simp add: lvars_def) then have "\\ (update x c s)\ (lhs e) = rhs_eq_val (\ s) x c e" using update_left[of "lhs e" "\ s" s x c] \e \ set (\ s)\ \\ (\ s)\ by (auto simp add: lvars_def lookup_update' update_def Let_def map2fun_def normalized_tableau_def distinct_map inj_on_def) then show "\\ (update x c s)\ \\<^sub>e e" using \\\ s\ \\<^sub>e e\ \e \ set (\ s)\ \x \ lvars (\ s)\ \\ (\ s)\ using rhs_eq_val by (simp add: satisfies_eq_def update_valuate_rhs) qed then show "\\ s\ \\<^sub>t \ s \ \\ (update x c s)\ \\<^sub>t \ s" by(simp add: satisfies_tableau_def update_def) qed text\To update the valuation for a variable that is on the lhs of the tableau it should first be swapped with some rhs variable of its equation, in an operation called \emph{pivoting}. Pivoting has the precondition that the tableau is normalized and that it is always called for a lhs variable of the tableau, and a rhs variable in the equation with that lhs variable. The set of rhs variables for the given lhs variable is found using the \rvars_of_lvar\ function (specified in a very simple locale \EqForLVar\, that we do not print).\ locale Pivot = EqForLVar + fixes pivot::"var \ var \ ('i,'a::lrv) state \ ('i,'a) state" assumes \ \Valuation, bounds, and the unsatisfiability flag are not changed.\ pivot_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ let s' = pivot x\<^sub>i x\<^sub>j s in \ s' = \ s \ \\<^sub>i s' = \\<^sub>i s \ \ s' = \ s \ \\<^sub>c s' = \\<^sub>c s" and \ \The tableau remains equivalent to the previous one and normalized.\ pivot_tableau: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ let s' = pivot x\<^sub>i x\<^sub>j s in ((v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ s') \ \ (\ s') " and \ \@{text "x\<^sub>i"} and @{text "x\<^sub>j"} are swapped, while the other variables do not change sides.\ pivot_vars': "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ let s' = pivot x\<^sub>i x\<^sub>j s in rvars(\ s') = rvars(\ s)-{x\<^sub>j}\{x\<^sub>i} \ lvars(\ s') = lvars(\ s)-{x\<^sub>i}\{x\<^sub>j}" begin lemma pivot_bounds_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\<^sub>i (pivot x\<^sub>i x\<^sub>j s) = \\<^sub>i s" using pivot_id by (simp add: Let_def) lemma pivot_bounds_id': assumes "\ (\ s)" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" shows "\\ (pivot x\<^sub>i x\<^sub>j s) = \\ s" "\ (pivot x\<^sub>i x\<^sub>j s) = \ s" "\ (pivot x\<^sub>i x\<^sub>j s) = \ s" using pivot_bounds_id[OF assms] by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma pivot_valuation_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (pivot x\<^sub>i x\<^sub>j s) = \ s" using pivot_id by (simp add: Let_def) lemma pivot_unsat_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (pivot x\<^sub>i x\<^sub>j s) = \ s" using pivot_id by (simp add: Let_def) lemma pivot_unsat_core_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\<^sub>c (pivot x\<^sub>i x\<^sub>j s) = \\<^sub>c s" using pivot_id by (simp add: Let_def) lemma pivot_tableau_equiv: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ (v::'a valuation) \\<^sub>t \ s = v \\<^sub>t \ (pivot x\<^sub>i x\<^sub>j s)" using pivot_tableau by (simp add: Let_def) lemma pivot_tableau_normalized: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (\ (pivot x\<^sub>i x\<^sub>j s))" using pivot_tableau by (simp add: Let_def) lemma pivot_rvars: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ rvars (\ (pivot x\<^sub>i x\<^sub>j s)) = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" using pivot_vars' by (simp add: Let_def) lemma pivot_lvars: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ lvars (\ (pivot x\<^sub>i x\<^sub>j s)) = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" using pivot_vars' by (simp add: Let_def) lemma pivot_vars: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ tvars (\ (pivot x\<^sub>i x\<^sub>j s)) = tvars (\ s) " using pivot_lvars[of s x\<^sub>i x\<^sub>j] pivot_rvars[of s x\<^sub>i x\<^sub>j] using rvars_of_lvar_rvars[of x\<^sub>i "\ s"] by auto lemma pivot_tableau_valuated: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i; \ s\ \ \ (pivot x\<^sub>i x\<^sub>j s)" using pivot_valuation_id pivot_vars by (auto simp add: tableau_valuated_def) end text\Functions \pivot\ and \update\ can be used to implement the \check\ function. In its context, \pivot\ and \update\ functions are always called together, so the following definition can be used: @{prop "pivot_and_update x\<^sub>i x\<^sub>j c s = update x\<^sub>i c (pivot x\<^sub>i x\<^sub>j s)"}. It is possible to make a more efficient implementation of \pivot_and_update\ that does not use separate implementations of \pivot\ and \update\. To allow this, a separate specification for \pivot_and_update\ can be given. It can be easily shown that the \pivot_and_update\ definition above satisfies this specification.\ locale PivotAndUpdate = EqForLVar + fixes pivot_and_update :: "var \ var \ 'a::lrv \ ('i,'a) state \ ('i,'a) state" assumes pivotandupdate_unsat_id: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \ s" assumes pivotandupdate_unsat_core_id: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\<^sub>c (pivot_and_update x\<^sub>i x\<^sub>j c s) = \\<^sub>c s" assumes pivotandupdate_bounds_id: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\<^sub>i (pivot_and_update x\<^sub>i x\<^sub>j c s) = \\<^sub>i s" assumes pivotandupdate_tableau_normalized: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (\ (pivot_and_update x\<^sub>i x\<^sub>j c s))" assumes pivotandupdate_tableau_equiv: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ (v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ (pivot_and_update x\<^sub>i x\<^sub>j c s)" assumes pivotandupdate_satisfies_tableau: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\ s\ \\<^sub>t \ s \ \\ (pivot_and_update x\<^sub>i x\<^sub>j c s)\ \\<^sub>t \ s" assumes pivotandupdate_rvars: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ rvars (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" assumes pivotandupdate_lvars: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ lvars (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" assumes pivotandupdate_valuation_nonlhs: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ x \ lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j} \ look (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) x = (if x = x\<^sub>i then Some c else look (\ s) x)" assumes pivotandupdate_tableau_valuated: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (pivot_and_update x\<^sub>i x\<^sub>j c s)" begin lemma pivotandupdate_bounds_id': assumes "\ (\ s)" "\ s" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" shows "\\ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \\ s" "\ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \ s" "\ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \ s" using pivotandupdate_bounds_id[OF assms] by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma pivotandupdate_valuation_xi: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ look (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) x\<^sub>i = Some c" using pivotandupdate_valuation_nonlhs[of s x\<^sub>i x\<^sub>j x\<^sub>i c] using rvars_of_lvar_rvars by (auto simp add: normalized_tableau_def) lemma pivotandupdate_valuation_other_nolhs: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i; x \ lvars (\ s); x \ x\<^sub>j\ \ look (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) x = look (\ s) x" using pivotandupdate_valuation_nonlhs[of s x\<^sub>i x\<^sub>j x c] by auto lemma pivotandupdate_nolhs: "\ \ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \\<^sub>l s x\<^sub>i = Some c \ \\<^sub>u s x\<^sub>i = Some c\ \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (pivot_and_update x\<^sub>i x\<^sub>j c s)" using pivotandupdate_satisfies_tableau[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_tableau_equiv[of s x\<^sub>i x\<^sub>j _ c] using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_valuation_other_nolhs[of s x\<^sub>i x\<^sub>j _ c] using pivotandupdate_lvars[of s x\<^sub>i x\<^sub>j c] by (auto simp add: curr_val_satisfies_no_lhs_def satisfies_bounds.simps satisfies_bounds_set.simps bounds_consistent_geq_lb bounds_consistent_leq_ub map2fun_def pivotandupdate_bounds_id') lemma pivotandupdate_bounds_consistent: assumes "\ (\ s)" "\ s" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" shows "\ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \ s" using assms pivotandupdate_bounds_id'[of s x\<^sub>i x\<^sub>j c] by (simp add: bounds_consistent_def) end locale PivotUpdate = Pivot eq_idx_for_lvar pivot + Update update for eq_idx_for_lvar :: "tableau \ var \ nat" and pivot :: "var \ var \ ('i,'a::lrv) state \ ('i,'a) state" and update :: "var \ 'a \ ('i,'a) state \ ('i,'a) state" begin definition pivot_and_update :: "var \ var \ 'a \ ('i,'a) state \ ('i,'a) state" where [simp]: "pivot_and_update x\<^sub>i x\<^sub>j c s \ update x\<^sub>i c (pivot x\<^sub>i x\<^sub>j s)" lemma pivot_update_precond: assumes "\ (\ s)" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" shows "\ (\ (pivot x\<^sub>i x\<^sub>j s))" "x\<^sub>i \ lvars (\ (pivot x\<^sub>i x\<^sub>j s))" proof- from assms have "x\<^sub>i \ x\<^sub>j" using rvars_of_lvar_rvars[of x\<^sub>i "\ s"] by (auto simp add: normalized_tableau_def) then show "\ (\ (pivot x\<^sub>i x\<^sub>j s))" "x\<^sub>i \ lvars (\ (pivot x\<^sub>i x\<^sub>j s))" using assms using pivot_tableau_normalized[of s x\<^sub>i x\<^sub>j] using pivot_lvars[of s x\<^sub>i x\<^sub>j] by auto qed end sublocale PivotUpdate < PivotAndUpdate eq_idx_for_lvar pivot_and_update using pivot_update_precond using update_unsat_id pivot_unsat_id pivot_unsat_core_id update_bounds_id pivot_bounds_id update_tableau_id pivot_tableau_normalized pivot_tableau_equiv update_satisfies_tableau pivot_valuation_id pivot_lvars pivot_rvars update_valuation_nonlhs update_valuation_nonlhs pivot_tableau_valuated update_tableau_valuated update_unsat_core_id by (unfold_locales, auto) text\Given the @{term update} function, \assert_bound\ can be implemented as follows. \vspace{-2mm} @{text[display] "assert_bound (Leq x c) s \ if c \\<^sub>u\<^sub>b \\<^sub>u s x then s else let s' = s \ \\<^sub>u := (\\<^sub>u s) (x := Some c) \ in if c <\<^sub>l\<^sub>b \\<^sub>l s x then s' \ \ := True \ else if x \ lvars (\ s') \ c < \\ s\ x then update x c s' else s'" } \vspace{-2mm} \noindent The case of \Geq x c\ atoms is analogous (a systematic way to avoid symmetries is discussed in Section \ref{sec:symmetries}). This implementation satisfies both its specifications. \ lemma indices_state_set_unsat: "indices_state (set_unsat I s) = indices_state s" by (cases s, auto simp: indices_state_def) lemma \\_set_unsat: "\\ (set_unsat I s) = \\ s" by (cases s, auto simp: boundsl_def boundsu_def indexl_def indexu_def) lemma satisfies_tableau_cong: assumes "\ x. x \ tvars t \ v x = w x" shows "(v \\<^sub>t t) = (w \\<^sub>t t)" unfolding satisfies_tableau_def satisfies_eq_def by (intro ball_cong[OF refl] arg_cong2[of _ _ _ _ "(=)"] valuate_depend, insert assms, auto simp: lvars_def rvars_def) lemma satisfying_state_valuation_to_atom_tabl: assumes J: "J \ indices_state s" and model: "(J, v) \\<^sub>i\<^sub>s\<^sub>e s" and ivalid: "index_valid as s" and dist: "distinct_indices_atoms as" shows "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as" "v \\<^sub>t \ s" unfolding i_satisfies_atom_set'.simps proof (intro ballI) from model[unfolded satisfies_state_index'.simps] have model: "v \\<^sub>t \ s" "(J, v) \\<^sub>i\<^sub>b\<^sub>e \\ s" by auto show "v \\<^sub>t \ s" by fact fix a assume "a \ restrict_to J as" then obtain i where iJ: "i \ J" and mem: "(i,a) \ as" by auto with J have "i \ indices_state s" by auto from this[unfolded indices_state_def] obtain x c where look: "look (\\<^sub>i\<^sub>l s) x = Some (i, c) \ look (\\<^sub>i\<^sub>u s) x = Some (i, c)" by auto with ivalid[unfolded index_valid_def] obtain b where "(i,b) \ as" "atom_var b = x" "atom_const b = c" by force with dist[unfolded distinct_indices_atoms_def, rule_format, OF this(1) mem] have a: "atom_var a = x" "atom_const a = c" by auto from model(2)[unfolded satisfies_bounds_index'.simps] look iJ have "v x = c" by (auto simp: boundsu_def boundsl_def indexu_def indexl_def) thus "v \\<^sub>a\<^sub>e a" unfolding satisfies_atom'_def a . qed text \Note that in order to ensure minimality of the unsat cores, pivoting is required.\ sublocale AssertAllState < AssertAll assert_all proof fix t as v I assume D: "\ t" from D show "assert_all t as = Sat v \ \v\ \\<^sub>t t \ \v\ \\<^sub>a\<^sub>s flat (set as)" unfolding Let_def assert_all_def using assert_all_state_tableau_equiv[OF D refl] using assert_all_state_sat[OF D refl] using assert_all_state_sat_atoms_equiv_bounds[OF D refl, of as] unfolding atoms_equiv_bounds.simps curr_val_satisfies_state_def satisfies_state_def satisfies_atom_set_def by (auto simp: Let_def split: if_splits) let ?s = "assert_all_state t as" assume "assert_all t as = Unsat I" then have i: "I = the (\\<^sub>c ?s)" and U: "\ ?s" unfolding assert_all_def Let_def by (auto split: if_splits) from assert_all_index_valid[OF D refl, of as] have ivalid: "index_valid (set as) ?s" . note unsat = assert_all_state_unsat[OF D refl U, unfolded minimal_unsat_state_core_def unsat_state_core_def i[symmetric]] from unsat have "set I \ indices_state ?s" by auto also have "\ \ fst ` set as" using assert_all_state_indices[OF D refl] . finally have indices: "set I \ fst ` set as" . show "minimal_unsat_core_tabl_atoms (set I) t (set as)" unfolding minimal_unsat_core_tabl_atoms_def proof (intro conjI impI allI indices, clarify) fix v assume model: "v \\<^sub>t t" "(set I, v) \\<^sub>i\<^sub>a\<^sub>s set as" from unsat have no_model: "\ ((set I, v) \\<^sub>i\<^sub>s ?s)" by auto from assert_all_state_unsat_atoms_equiv_bounds[OF D refl U] have equiv: "set as \\<^sub>i \\ ?s" by auto from assert_all_state_tableau_equiv[OF D refl, of v] model have model_t: "v \\<^sub>t \ ?s" by auto have model_as': "(set I, v) \\<^sub>i\<^sub>a\<^sub>s set as" using model(2) by (auto simp: satisfies_atom_set_def) with equiv model_t have "(set I, v) \\<^sub>i\<^sub>s ?s" unfolding satisfies_state_index.simps atoms_imply_bounds_index.simps by simp with no_model show False by simp next fix J assume dist: "distinct_indices_atoms (set as)" and J: "J \ set I" from J unsat[unfolded subsets_sat_core_def, folded i] have J': "J \ indices_state ?s" by auto from index_valid_distinct_indices[OF ivalid dist] J unsat[unfolded subsets_sat_core_def, folded i] obtain v where model: "(J, v) \\<^sub>i\<^sub>s\<^sub>e ?s" by blast have "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s set as" "v \\<^sub>t t" using satisfying_state_valuation_to_atom_tabl[OF J' model ivalid dist] assert_all_state_tableau_equiv[OF D refl] by auto then show "\ v. v \\<^sub>t t \ (J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s set as" by blast qed qed lemma (in Update) update_to_assert_bound_no_lhs: assumes pivot: "Pivot eqlvar (pivot :: var \ var \ ('i,'a) state \ ('i,'a) state)" shows "AssertBoundNoLhs assert_bound" proof fix s::"('i,'a) state" and a assume "\ \ s" "\ (\ s)" "\ s" then show "\ (assert_bound a s) = \ s" by (cases a, cases "snd a") (auto simp add: Let_def update_tableau_id tableau_valuated_def) next fix s::"('i,'a) state" and ia and as assume *: "\ \ s" "\ (\ s)" "\ s" and **: "\ (assert_bound ia s)" and index: "index_valid as s" and consistent: "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" obtain i a where ia: "ia = (i,a)" by force let ?modelU = "\ lt UB UI s v x c i. UB s x = Some c \ UI s x = i \ i \ set (the (\\<^sub>c s)) \ (lt (v x) c \ v x = c)" let ?modelL = "\ lt LB LI s v x c i. LB s x = Some c \ LI s x = i \ i \ set (the (\\<^sub>c s)) \ (lt c (v x) \ c = v x)" let ?modelIU = "\ I lt UB UI s v x c i. UB s x = Some c \ UI s x = i \ i \ I \ (v x = c)" let ?modelIL = "\ I lt LB LI s v x c i. LB s x = Some c \ LI s x = i \ i \ I \ (v x = c)" let ?P' = "\ lt UBI LBI UB LB UBI_upd UI LI LE GE s. \ s \ (set (the (\\<^sub>c s)) \ indices_state s \ \ (\v. (v \\<^sub>t \ s \ (\ x c i. ?modelU lt UB UI s v x c i) \ (\ x c i. ?modelL lt LB LI s v x c i)))) \ (distinct_indices_state s \ (\ I. I \ set (the (\\<^sub>c s)) \ (\ v. v \\<^sub>t \ s \ (\ x c i. ?modelIU I lt UB UI s v x c i) \ (\ x c i. ?modelIL I lt LB LI s v x c i))))" have "\ (assert_bound ia s) \ (unsat_state_core (assert_bound ia s) \ (distinct_indices_state (assert_bound ia s) \ subsets_sat_core (assert_bound ia s)))" (is "?P (assert_bound ia s)") unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix s' :: "('i,'a) state" have id: "((x :: 'a) < y \ x = y) \ x \ y" "((x :: 'a) > y \ x = y) \ x \ y" for x y by auto have id': "?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u undefined \\<^sub>l \\<^sub>u Geq Leq s' = ?P' (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l undefined \\<^sub>u \\<^sub>l Leq Geq s'" by (intro arg_cong[of _ _ "\ y. _ \ y"] arg_cong[of _ _ "\ x. _ \ x"], intro arg_cong2[of _ _ _ _ "(\)"] arg_cong[of _ _ "\ y. _ \ y"] arg_cong[of _ _ "\ y. \ x \ set (the (\\<^sub>c s')). y x"] ext arg_cong[of _ _ Not], unfold id, auto) show "?P s' = ?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u undefined \\<^sub>l \\<^sub>u Geq Leq s'" unfolding satisfies_state_def satisfies_bounds_index.simps satisfies_bounds.simps in_bounds.simps unsat_state_core_def satisfies_state_index.simps subsets_sat_core_def satisfies_state_index'.simps satisfies_bounds_index'.simps unfolding bound_compare''_defs id by ((intro arg_cong[of _ _ "\ x. _ \ x"] arg_cong[of _ _ "\ x. _ \ x"], intro arg_cong2[of _ _ _ _ "(\)"] refl arg_cong[of _ _ "\ x. _ \ x"] arg_cong[of _ _ Not] arg_cong[of _ _ "\ y. \ x \ set (the (\\<^sub>c s')). y x"] ext; intro arg_cong[of _ _ Ex] ext), auto) then show "?P s' = ?P' (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l undefined \\<^sub>u \\<^sub>l Leq Geq s'" unfolding id' . next fix c::'a and x::nat and dir assume "\\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" and dir: "dir = Positive \ dir = Negative" then obtain d where some: "LB dir s x = Some d" and lt: "lt dir c d" by (auto simp: bound_compare'_defs split: option.splits) from index[unfolded index_valid_def, rule_format, of x _ d] some dir obtain j where ind: "LI dir s x = j" "look (LBI dir s) x = Some (j,d)" and ge: "(j, GE dir x d) \ as" by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) let ?s = "set_unsat [i, ((LI dir) s x)] (update\\ (UBI_upd dir) i x c s)" let ?ss = "update\\ (UBI_upd dir) i x c s" show "?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) ?s" proof (intro conjI impI allI, goal_cases) case 1 thus ?case using dir ind ge lt some by (force simp: indices_state_def split: if_splits) next case 2 { fix v assume vU: "\ x c i. ?modelU (lt dir) (UB dir) (UI dir) ?s v x c i" assume vL: "\ x c i. ?modelL (lt dir) (LB dir) (LI dir) ?s v x c i" from dir have "UB dir ?s x = Some c" "UI dir ?s x = i" by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) from vU[rule_format, OF this] have vx_le_c: "lt dir (v x) c \ v x = c" by auto from dir ind some have *: "LB dir ?s x = Some d" "LI dir ?s x = j" by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) have d_le_vx: "lt dir d (v x) \ d = v x" by (intro vL[rule_format, OF *], insert some ind, auto) from dir d_le_vx vx_le_c lt have False by auto } thus ?case by blast next case (3 I) then obtain j where I: "I \ {j}" by (auto split: if_splits) from 3 have dist: "distinct_indices_state ?ss" unfolding distinct_indices_state_def by auto have id1: "UB dir ?s y = UB dir ?ss y" "LB dir ?s y = LB dir ?ss y" "UI dir ?s y = UI dir ?ss y" "LI dir ?s y = LI dir ?ss y" "\ ?s = \ s" "set (the (\\<^sub>c ?s)) = {i,LI dir s x}" for y using dir by (auto simp: boundsu_def boundsl_def indexu_def indexl_def) from I have id: "(\ k. P1 k \ P2 k \ k \ I \ Q k) \ (I = {} \ (P1 j \ P2 j \ Q j))" for P1 P2 Q by auto have id2: "(UB dir s xa = Some ca \ UI dir s xa = j \ P) = (look (UBI dir s) xa = Some (j,ca) \ P)" "(LB dir s xa = Some ca \ LI dir s xa = j \ P) = (look (LBI dir s) xa = Some (j,ca) \ P)" for xa ca P s using dir by (auto simp: boundsu_def indexu_def boundsl_def indexl_def) have "\v. v \\<^sub>t \ s \ (\xa ca ia. UB dir ?ss xa = Some ca \ UI dir ?ss xa = ia \ ia \ I \ v xa = ca) \ (\xa ca ia. LB dir ?ss xa = Some ca \ LI dir ?ss xa = ia \ ia \ I \ v xa = ca)" proof (cases "\ xa ca. look (UBI dir ?ss) xa = Some (j,ca) \ look (LBI dir ?ss) xa = Some (j,ca)") case False thus ?thesis unfolding id id2 using consistent unfolding curr_val_satisfies_no_lhs_def by (intro exI[of _ "\\ s\"], auto) next case True from consistent have val: " \\ s\ \\<^sub>t \ s" unfolding curr_val_satisfies_no_lhs_def by auto define ss where ss: "ss = ?ss" from True obtain y b where "look (UBI dir ?ss) y = Some (j,b) \ look (LBI dir ?ss) y = Some (j,b)" by force then have id3: "(look (LBI dir ss) yy = Some (j,bb) \ look (UBI dir ss) yy = Some (j,bb)) \ (yy = y \ bb = b)" for yy bb using distinct_indices_stateD(1)[OF dist, of y j b yy bb] using dir unfolding ss[symmetric] by (auto simp: boundsu_def boundsl_def indexu_def indexl_def) have "\v. v \\<^sub>t \ s \ v y = b" proof (cases "y \ lvars (\ s)") case False let ?v = "\\ (update y b s)\" show ?thesis proof (intro exI[of _ ?v] conjI) from update_satisfies_tableau[OF *(2,3) False] val show "?v \\<^sub>t \ s" by simp from update_valuation_nonlhs[OF *(2,3) False, of y b] False show "?v y = b" by (simp add: map2fun_def') qed next case True from *(2)[unfolded normalized_tableau_def] have zero: "0 \ rhs ` set (\ s)" by auto interpret Pivot eqlvar pivot by fact interpret PivotUpdate eqlvar pivot update .. let ?eq = "eq_for_lvar (\ s) y" from eq_for_lvar[OF True] have "?eq \ set (\ s)" "lhs ?eq = y" by auto with zero have rhs: "rhs ?eq \ 0" by force hence "rvars_eq ?eq \ {}" by (simp add: vars_empty_zero) then obtain z where z: "z \ rvars_eq ?eq" by auto let ?v = "\ (pivot_and_update y z b s)" let ?vv = "\?v\" from pivotandupdate_valuation_xi[OF *(2,3) True z] have "look ?v y = Some b" . hence vv: "?vv y = b" unfolding map2fun_def' by auto show ?thesis proof (intro exI[of _ ?vv] conjI vv) show "?vv \\<^sub>t \ s" using pivotandupdate_satisfies_tableau[OF *(2,3) True z] val by auto qed qed thus ?thesis unfolding id id2 ss[symmetric] using id3 by metis qed thus ?case unfolding id1 . qed next fix c::'a and x::nat and dir assume **: "dir = Positive \ dir = Negative" "a = LE dir x c" "x \ lvars (\ s)" "lt dir c (\\ s\ x)" "\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" "\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" let ?s = "update\\ (UBI_upd dir) i x c s" show "?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) (update x c ?s)" using * ** by (auto simp add: update_unsat_id tableau_valuated_def) qed (auto simp add: * update_unsat_id tableau_valuated_def) with ** show "minimal_unsat_state_core (assert_bound ia s)" by (auto simp: minimal_unsat_state_core_def) next fix s::"('i,'a) state" and ia assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" and **: "\ \ (assert_bound ia s)" (is ?lhs) obtain i a where ia: "ia = (i,a)" by force have "\\ (assert_bound ia s)\ \\<^sub>t \ (assert_bound ia s)" proof- let ?P = "\ lt UBI LBI UB LB UBI_upd UI LI LE GE s. \\ s\ \\<^sub>t \ s" show ?thesis unfolding ia proof (rule assert_bound_cases[of _ _ ?P]) fix c x and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "x \ lvars (\ s)" "(lt dir) c (\\ s\ x)" "dir = Positive \ dir = Negative" then show "\\ (update x c ?s')\ \\<^sub>t \ (update x c ?s')" using * using update_satisfies_tableau[of ?s' x c] update_tableau_id by (auto simp add: curr_val_satisfies_no_lhs_def tableau_valuated_def) qed (insert *, auto simp add: curr_val_satisfies_no_lhs_def) qed moreover have "\ \ (assert_bound ia s) \ \\ (assert_bound ia s)\ \\<^sub>b \ (assert_bound ia s) \ - lvars (\ (assert_bound ia s))" (is "?P (assert_bound ia s)") proof- let ?P' = "\ lt UBI LBI UB LB UB_upd UI LI LE GE s. \ \ s \ (\x\- lvars (\ s). \\<^sub>l\<^sub>b lt (\\ s\ x) (LB s x) \ \\<^sub>u\<^sub>b lt (\\ s\ x) (UB s x))" let ?P'' = "\ dir. ?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have x: "\ s'. ?P s' = ?P' (<) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" and xx: "\ s'. ?P s' = ?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" unfolding satisfies_bounds_set.simps in_bounds.simps bound_compare_defs by (auto split: option.split) show ?thesis unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix dir :: "('i,'a) Direction" assume "dir = Positive \ dir = Negative" then show "?P'' dir s" using x[of s] xx[of s] \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by (auto simp add: curr_val_satisfies_no_lhs_def) next fix x c and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "x \ lvars (\ s)" "dir = Positive \ dir = Negative" then have "?P ?s'" using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by (auto simp add: satisfies_bounds_set.simps curr_val_satisfies_no_lhs_def boundsl_def boundsu_def indexl_def indexu_def) then show "?P'' dir ?s'" using x[of ?s'] xx[of ?s'] \dir = Positive \ dir = Negative\ by auto next fix c x and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "\ lt dir c (\\ s\ x)" "dir = Positive \ dir = Negative" then show "?P'' dir ?s'" using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by (auto simp add: satisfies_bounds_set.simps curr_val_satisfies_no_lhs_def simp: boundsl_def boundsu_def indexl_def indexu_def) (auto simp add: bound_compare_defs) next fix c x and dir :: "('i,'a) Direction" let ?s' = "update x c (update\\ (UBI_upd dir) i x c s)" assume "x \ lvars (\ s)" "\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" "dir = Positive \ dir = Negative" show "?P'' dir ?s'" proof (rule impI, rule ballI) fix y assume "\ \ ?s'" "y \ - lvars (\ ?s')" show "\\<^sub>l\<^sub>b (lt dir) (\\ ?s'\ y) (LB dir ?s' y) \ \\<^sub>u\<^sub>b (lt dir) (\\ ?s'\ y) (UB dir ?s' y)" proof (cases "x = y") case True then show ?thesis using \x \ lvars (\ s)\ using \y \ - lvars (\ ?s')\ using \\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)\ using \dir = Positive \ dir = Negative\ using neg_bounds_compare(7) neg_bounds_compare(3) using * by (auto simp add: update_valuation_nonlhs update_tableau_id update_bounds_id bound_compare''_defs map2fun_def tableau_valuated_def bounds_updates) (force simp add: bound_compare'_defs)+ next case False then show ?thesis using \x \ lvars (\ s)\ \y \ - lvars (\ ?s')\ using \dir = Positive \ dir = Negative\ * by (auto simp add: update_valuation_nonlhs update_tableau_id update_bounds_id bound_compare''_defs satisfies_bounds_set.simps curr_val_satisfies_no_lhs_def map2fun_def tableau_valuated_def bounds_updates) qed qed qed (auto simp add: x xx) qed moreover have "\ \ (assert_bound ia s) \ \ (assert_bound ia s)" (is "?P (assert_bound ia s)") proof- let ?P' = "\ lt UBI LBI UB LB UBI_upd UI LI LE GE s. \ \ s \ (\x. if LB s x = None \ UB s x = None then True else lt (the (LB s x)) (the (UB s x)) \ (the (LB s x) = the (UB s x)))" let ?P'' = "\ dir. ?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have x: "\ s'. ?P s' = ?P' (<) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" and xx: "\ s'. ?P s' = ?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" unfolding bounds_consistent_def by auto show ?thesis unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix dir :: "('i,'a) Direction" assume "dir = Positive \ dir = Negative" then show "?P'' dir s" using \\ s\ by (auto simp add: bounds_consistent_def) (erule_tac x=x in allE, auto)+ next fix x c and dir :: "('i,'a) Direction" let ?s' = "update x c (update\\ (UBI_upd dir) i x c s)" assume "dir = Positive \ dir = Negative" "x \ lvars (\ s)" "\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" "\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" then show "?P'' dir ?s'" using \\ s\ * unfolding bounds_consistent_def by (auto simp add: update_bounds_id tableau_valuated_def bounds_updates split: if_splits) (force simp add: bound_compare'_defs, erule_tac x=xa in allE, simp)+ next fix x c and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" "\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" "dir = Positive \ dir = Negative" then have "?P'' dir ?s'" using \\ s\ unfolding bounds_consistent_def by (auto split: if_splits simp: bounds_updates) (force simp add: bound_compare'_defs, erule_tac x=xa in allE, simp)+ then show "?P'' dir ?s'" "?P'' dir ?s'" by simp_all qed (auto simp add: x xx) qed ultimately show "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (assert_bound ia s) \ \ (assert_bound ia s)" using \?lhs\ unfolding curr_val_satisfies_no_lhs_def by simp next fix s :: "('i,'a) state" and ats and ia :: "('i,'a) i_atom" assume "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ (\ s)" "\ s" obtain i a where ia: "ia = (i,a)" by force { fix ats let ?P' = "\ lt UBI LBI UB LB UB_upd UI LI LE GE s'. ats \ \ s \ (ats \ {a}) \ \ s'" let ?P'' = "\ dir. ?P' (lt dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have "ats \ \ s \ (ats \ {a}) \ \ (assert_bound ia s)" (is "?P (assert_bound ia s)") unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix x c and dir :: "('i,'a) Direction" assume "dir = Positive \ dir = Negative" "a = LE dir x c" "\\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" then show "?P s" unfolding atoms_equiv_bounds.simps satisfies_atom_set_def satisfies_bounds.simps by auto (erule_tac x=x in allE, force simp add: bound_compare_defs)+ next fix x c and dir :: "('i,'a) Direction" let ?s' = "set_unsat [i, ((LI dir) s x)] (update\\ (UBI_upd dir) i x c s)" assume "dir = Positive \ dir = Negative" "a = LE dir x c" "\ (\\<^sub>u\<^sub>b (lt dir) c (UB dir s x))" then show "?P ?s'" unfolding set_unsat_bounds_id using atoms_equiv_bounds_extend[of dir c s x ats i] by auto next fix x c and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "dir = Positive \ dir = Negative" "a = LE dir x c" "\ (\\<^sub>u\<^sub>b (lt dir) c (UB dir s x))" then have "?P ?s'" using atoms_equiv_bounds_extend[of dir c s x ats i] by auto then show "?P ?s'" "?P ?s'" by simp_all next fix x c and dir :: "('i,'a) Direction" let ?s = "update\\ (UBI_upd dir) i x c s" let ?s' = "update x c ?s" assume *: "dir = Positive \ dir = Negative" "a = LE dir x c" "\ (\\<^sub>u\<^sub>b (lt dir) c (UB dir s x))" "x \ lvars (\ s)" then have "\ (\ ?s)" "\ ?s" "x \ lvars (\ ?s)" using \\ (\ s)\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ \\ s\ by (auto simp: tableau_valuated_def) from update_bounds_id[OF this, of c] have "\\<^sub>i ?s' = \\<^sub>i ?s" by blast then have id: "\ ?s' = \ ?s" unfolding boundsl_def boundsu_def by auto show "?P ?s'" unfolding id \a = LE dir x c\ by (intro impI atoms_equiv_bounds_extend[rule_format] *(1,3)) qed simp_all } then show "flat ats \ \ s \ flat (ats \ {ia}) \ \ (assert_bound ia s)" unfolding ia by auto next fix s :: "('i,'a) state" and ats and ia :: "('i,'a) i_atom" obtain i a where ia: "ia = (i,a)" by force assume "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ (\ s)" "\ s" have *: "\ dir x c s. dir = Positive \ dir = Negative \ \ (update\\ (UBI_upd dir) i x c s) = \ s" "\ s y I . \ (set_unsat I s) = \ s" by (auto simp add: tableau_valuated_def) show "\ (assert_bound ia s)" (is "?P (assert_bound ia s)") proof- let ?P' = "\ lt UBI LBI UB LB UB_upd UI LI LE GE s'. \ s'" let ?P'' = "\ dir. ?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" show ?thesis unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix x c and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "dir = Positive \ dir = Negative" then have "\ ?s'" using *(1)[of dir x c s] \\ s\ by simp then show "\ (set_unsat [i, ((LI dir) s x)] ?s')" using *(2) by auto next fix x c and dir :: "('i,'a) Direction" assume *: "x \ lvars (\ s)" "dir = Positive \ dir = Negative" let ?s = "update\\ (UBI_upd dir) i x c s" let ?s' = "update x c ?s" from * show "\ ?s'" using \\ (\ s)\ \\ s\ using update_tableau_valuated[of ?s x c] by (auto simp add: tableau_valuated_def) qed (insert \\ s\ *(1), auto) qed next fix s :: "('i,'a) state" and as and ia :: "('i,'a) i_atom" obtain i a where ia: "ia = (i,a)" by force assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ (\ s)" "\ s" and valid: "index_valid as s" have id: "\ dir x c s. dir = Positive \ dir = Negative \ \ (update\\ (UBI_upd dir) i x c s) = \ s" "\ s y I. \ (set_unsat I s) = \ s" by (auto simp add: tableau_valuated_def) let ?I = "insert (i,a) as" define I where "I = ?I" from index_valid_mono[OF _ valid] have valid: "index_valid I s" unfolding I_def by auto have I: "(i,a) \ I" unfolding I_def by auto let ?P = "\ s. index_valid I s" let ?P' = "\ (lt :: 'a \ 'a \ bool) (UBI :: ('i,'a) state \ ('i,'a) bounds_index) (LBI :: ('i,'a) state \ ('i,'a) bounds_index) (UB :: ('i,'a) state \ 'a bounds) (LB :: ('i,'a) state \ 'a bounds) (UBI_upd :: (('i,'a) bounds_index \ ('i,'a) bounds_index) \ ('i,'a) state \ ('i,'a) state) (UI :: ('i,'a) state \ 'i bound_index) (LI :: ('i,'a) state \ 'i bound_index) LE GE s'. (\ x c i. look (UBI s') x = Some (i,c) \ (i,LE (x :: var) c) \ I) \ (\ x c i. look (LBI s') x = Some (i,c) \ (i,GE (x :: nat) c) \ I)" define P where "P = ?P'" let ?P'' = "\ (dir :: ('i,'a) Direction). P (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have x: "\ s'. ?P s' = P (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" and xx: "\ s'. ?P s' = P (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" unfolding satisfies_bounds_set.simps in_bounds.simps bound_compare_defs index_valid_def P_def by (auto split: option.split simp: indexl_def indexu_def boundsl_def boundsu_def) from valid have P'': "dir = Positive \ dir = Negative \ ?P'' dir s" for dir using x[of s] xx[of s] by auto have UTrue: "dir = Positive \ dir = Negative \ ?P'' dir s \ ?P'' dir (set_unsat I s)" for dir s I unfolding P_def by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) have updateIB: "a = LE dir x c \ dir = Positive \ dir = Negative \ ?P'' dir s \ ?P'' dir (update\\ (UBI_upd dir) i x c s)" for dir x c s unfolding P_def using I by (auto split: if_splits simp: simp: boundsl_def boundsu_def indexl_def indexu_def) show "index_valid (insert ia as) (assert_bound ia s)" unfolding ia I_def[symmetric] proof ((rule assert_bound_cases[of _ _ P]; (intro UTrue x xx updateIB P'')?)) fix x c and dir :: "('i,'a) Direction" assume **: "dir = Positive \ dir = Negative" "a = LE dir x c" "x \ lvars (\ s)" let ?s = "(update\\ (UBI_upd dir) i x c s)" define s' where "s' = ?s" have 1: "\ (\ ?s)" using * ** by auto have 2: "\ ?s" using id(1) ** * \\ s\ by auto have 3: "x \ lvars (\ ?s)" using id(1) ** * \\ s\ by auto have "?P'' dir ?s" using ** by (intro updateIB P'') auto with update_id[of ?s x c, OF 1 2 3, unfolded Let_def] **(1) show "P (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) (update x c (update\\ (UBI_upd dir) i x c s))" unfolding P_def s'_def[symmetric] by auto qed auto next fix s and ia :: "('i,'a) i_atom" and ats :: "('i,'a) i_atom set" assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ (\ s)" "\ s" "\ s" and ats: "ats \\<^sub>i \\ s" obtain i a where ia: "ia = (i,a)" by force have id: "\ dir x c s. dir = Positive \ dir = Negative \ \ (update\\ (UBI_upd dir) i x c s) = \ s" "\ s I. \ (set_unsat I s) = \ s" by (auto simp add: tableau_valuated_def) have idlt: "(c < (a :: 'a) \ c = a) = (c \ a)" "(a < c \ c = a) = (c \ a)" for a c by auto define A where "A = insert (i,a) ats" let ?P = "\ (s :: ('i,'a) state). A \\<^sub>i \\ s" let ?Q = "\ bs (lt :: 'a \ 'a \ bool) (UBI :: ('i,'a) state \ ('i,'a) bounds_index) (LBI :: ('i,'a) state \ ('i,'a) bounds_index) (UB :: ('i,'a) state \ 'a bounds) (LB :: ('i,'a) state \ 'a bounds) (UBI_upd :: (('i,'a) bounds_index \ ('i,'a) bounds_index) \ ('i,'a) state \ ('i,'a) state) UI LI (LE :: nat \ 'a \ 'a atom) (GE :: nat \ 'a \ 'a atom) s'. (\ I v. (I :: 'i set,v) \\<^sub>i\<^sub>a\<^sub>s bs \ ((\ x c. LB s' x = Some c \ LI s' x \ I \ lt c (v x) \ c = v x) \ (\ x c. UB s' x = Some c \ UI s' x \ I \ lt (v x) c \ v x = c)))" define Q where "Q = ?Q" let ?P' = "Q A" have equiv: "bs \\<^sub>i \\ s' \ Q bs (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" "bs \\<^sub>i \\ s' \ Q bs (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" for bs s' unfolding satisfies_bounds_set.simps in_bounds.simps bound_compare_defs index_valid_def Q_def atoms_imply_bounds_index.simps by (atomize(full), (intro conjI iff_exI allI arg_cong2[of _ _ _ _ "(\)"] refl iff_allI arg_cong2[of _ _ _ _ "(=)"]; unfold satisfies_bounds_index.simps idlt), auto) have x: "\ s'. ?P s' = ?P' (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" and xx: "\ s'. ?P s' = ?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" using equiv by blast+ from ats equiv[of ats s] have Q_ats: "Q ats (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s" "Q ats (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s" by auto let ?P'' = "\ (dir :: ('i,'a) Direction). ?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have P_upd: "dir = Positive \ dir = Negative \ ?P'' dir (set_unsat I s) = ?P'' dir s" for s I dir unfolding Q_def by (intro iff_exI arg_cong2[of _ _ _ _ "(\)"] refl iff_allI arg_cong2[of _ _ _ _ "(=)"] arg_cong2[of _ _ _ _ "(\)"], auto simp: boundsl_def boundsu_def indexl_def indexu_def) have P_upd: "dir = Positive \ dir = Negative \ ?P'' dir s \ ?P'' dir (set_unsat I s)" for s I dir using P_upd[of dir] by blast have ats_sub: "ats \ A" unfolding A_def by auto { fix x c and dir :: "('i,'a) Direction" assume dir: "dir = Positive \ dir = Negative" and a: "a = LE dir x c" from Q_ats dir have Q_ats: "Q ats (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) s" by auto have "?P'' dir (update\\ (UBI_upd dir) i x c s)" unfolding Q_def proof (intro allI impI conjI) fix I v y d assume IvA: "(I, v) \\<^sub>i\<^sub>a\<^sub>s A" from i_satisfies_atom_set_mono[OF ats_sub this] have "(I, v) \\<^sub>i\<^sub>a\<^sub>s ats" by auto from Q_ats[unfolded Q_def, rule_format, OF this] have s_bnds: "LB dir s x = Some c \ LI dir s x \ I \ lt dir c (v x) \ c = v x" "UB dir s x = Some c \ UI dir s x \ I \ lt dir (v x) c \ v x = c" for x c by auto from IvA[unfolded A_def, unfolded i_satisfies_atom_set.simps satisfies_atom_set_def, simplified] have va: "i \ I \ v \\<^sub>a a" by auto with a dir have vc: "i \ I \ lt dir (v x) c \ v x = c" by auto let ?s = "(update\\ (UBI_upd dir) i x c s)" show "LB dir ?s y = Some d \ LI dir ?s y \ I \ lt dir d (v y) \ d = v y" "UB dir ?s y = Some d \ UI dir ?s y \ I \ lt dir (v y) d \ v y = d" proof (atomize(full), goal_cases) case 1 consider (main) "y = x" "UI dir ?s x = i" | (easy1) "x \ y" | (easy2) "x = y" "UI dir ?s y \ i" by blast then show ?case proof cases case easy1 then show ?thesis using s_bnds[of y d] dir by (fastforce simp: boundsl_def boundsu_def indexl_def indexu_def) next case easy2 then show ?thesis using s_bnds[of y d] dir by (fastforce simp: boundsl_def boundsu_def indexl_def indexu_def) next case main note s_bnds = s_bnds[of x] - show ?thesis unfolding main using s_bnds dir vc by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) blast+ + show ?thesis unfolding main using s_bnds dir vc + by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) qed qed qed } note main = this have Ps: "dir = Positive \ dir = Negative \ ?P'' dir s" for dir using Q_ats unfolding Q_def using i_satisfies_atom_set_mono[OF ats_sub] by fastforce have "?P (assert_bound (i,a) s)" proof ((rule assert_bound_cases[of _ _ ?P']; (intro x xx P_upd main Ps)?)) fix c x and dir :: "('i,'a) Direction" assume **: "dir = Positive \ dir = Negative" "a = LE dir x c" "x \ lvars (\ s)" let ?s = "update\\ (UBI_upd dir) i x c s" define s' where "s' = ?s" from main[OF **(1-2)] have P: "?P'' dir s'" unfolding s'_def . have 1: "\ (\ ?s)" using * ** by auto have 2: "\ ?s" using id(1) ** * \\ s\ by auto have 3: "x \ lvars (\ ?s)" using id(1) ** * \\ s\ by auto have "\ (\ s')" "\ s'" "x \ lvars (\ s')" using 1 2 3 unfolding s'_def by auto from update_bounds_id[OF this, of c] **(1) have "?P'' dir (update x c s') = ?P'' dir s'" unfolding Q_def by (intro iff_allI arg_cong2[of _ _ _ _ "(\)"] arg_cong2[of _ _ _ _ "(\)"] refl, auto) with P show "?P'' dir (update x c ?s)" unfolding s'_def by blast qed auto then show "insert ia ats \\<^sub>i \\ (assert_bound ia s)" unfolding ia A_def by blast qed text \Pivoting the tableau can be reduced to pivoting single equations, and substituting variable by polynomials. These operations are specified by:\ locale PivotEq = fixes pivot_eq::"eq \ var \ eq" assumes \ \Lhs var of @{text eq} and @{text x\<^sub>j} are swapped, while the other variables do not change sides.\ vars_pivot_eq:" \x\<^sub>j \ rvars_eq eq; lhs eq \ rvars_eq eq \ \ let eq' = pivot_eq eq x\<^sub>j in lhs eq' = x\<^sub>j \ rvars_eq eq' = {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" and \ \Pivoting keeps the equation equisatisfiable.\ equiv_pivot_eq: "\x\<^sub>j \ rvars_eq eq; lhs eq \ rvars_eq eq \ \ (v::'a::lrv valuation) \\<^sub>e pivot_eq eq x\<^sub>j \ v \\<^sub>e eq" begin lemma lhs_pivot_eq: "\x\<^sub>j \ rvars_eq eq; lhs eq \ rvars_eq eq \ \ lhs (pivot_eq eq x\<^sub>j) = x\<^sub>j" using vars_pivot_eq by (simp add: Let_def) lemma rvars_pivot_eq: "\x\<^sub>j \ rvars_eq eq; lhs eq \ rvars_eq eq \ \ rvars_eq (pivot_eq eq x\<^sub>j) = {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" using vars_pivot_eq by (simp add: Let_def) end abbreviation doublesub (" _ \s _ \s _" [50,51,51] 50) where "doublesub a b c \ a \ b \ b \ c" locale SubstVar = fixes subst_var::"var \ linear_poly \ linear_poly \ linear_poly" assumes \ \Effect of @{text "subst_var x\<^sub>j lp' lp"} on @{text lp} variables.\ vars_subst_var': "(vars lp - {x\<^sub>j}) - vars lp' \s vars (subst_var x\<^sub>j lp' lp) \s (vars lp - {x\<^sub>j}) \ vars lp'"and subst_no_effect: "x\<^sub>j \ vars lp \ subst_var x\<^sub>j lp' lp = lp" and subst_with_effect: "x\<^sub>j \ vars lp \ x \ vars lp' - vars lp \ x \ vars (subst_var x\<^sub>j lp' lp)" and \ \Effect of @{text "subst_var x\<^sub>j lp' lp"} on @{text lp} value.\ equiv_subst_var: "(v::'a :: lrv valuation) x\<^sub>j = lp' \v\ \ lp \v\ = (subst_var x\<^sub>j lp' lp) \v\" begin lemma vars_subst_var: "vars (subst_var x\<^sub>j lp' lp) \ (vars lp - {x\<^sub>j}) \ vars lp'" using vars_subst_var' by simp lemma vars_subst_var_supset: "vars (subst_var x\<^sub>j lp' lp) \ (vars lp - {x\<^sub>j}) - vars lp'" using vars_subst_var' by simp definition subst_var_eq :: "var \ linear_poly \ eq \ eq" where "subst_var_eq v lp' eq \ (lhs eq, subst_var v lp' (rhs eq))" lemma rvars_eq_subst_var_eq: shows "rvars_eq (subst_var_eq x\<^sub>j lp eq) \ (rvars_eq eq - {x\<^sub>j}) \ vars lp" unfolding subst_var_eq_def by (auto simp add: vars_subst_var) lemma rvars_eq_subst_var_eq_supset: "rvars_eq (subst_var_eq x\<^sub>j lp eq) \ (rvars_eq eq) - {x\<^sub>j} - (vars lp)" unfolding subst_var_eq_def by (simp add: vars_subst_var_supset) lemma equiv_subst_var_eq: assumes "(v::'a valuation) \\<^sub>e (x\<^sub>j, lp')" shows "v \\<^sub>e eq \ v \\<^sub>e subst_var_eq x\<^sub>j lp' eq" using assms unfolding subst_var_eq_def unfolding satisfies_eq_def using equiv_subst_var[of v x\<^sub>j lp' "rhs eq"] by auto end locale Pivot' = EqForLVar + PivotEq + SubstVar begin definition pivot_tableau' :: "var \ var \ tableau \ tableau" where "pivot_tableau' x\<^sub>i x\<^sub>j t \ let x\<^sub>i_idx = eq_idx_for_lvar t x\<^sub>i; eq = t ! x\<^sub>i_idx; eq' = pivot_eq eq x\<^sub>j in map (\ idx. if idx = x\<^sub>i_idx then eq' else subst_var_eq x\<^sub>j (rhs eq') (t ! idx) ) [0.. var \ ('i,'a::lrv) state \ ('i,'a) state" where "pivot' x\<^sub>i x\<^sub>j s \ \_update (pivot_tableau' x\<^sub>i x\<^sub>j (\ s)) s" text\\noindent Then, the next implementation of \pivot\ satisfies its specification:\ definition pivot_tableau :: "var \ var \ tableau \ tableau" where "pivot_tableau x\<^sub>i x\<^sub>j t \ let eq = eq_for_lvar t x\<^sub>i; eq' = pivot_eq eq x\<^sub>j in map (\ e. if lhs e = lhs eq then eq' else subst_var_eq x\<^sub>j (rhs eq') e) t" definition pivot :: "var \ var \ ('i,'a::lrv) state \ ('i,'a) state" where "pivot x\<^sub>i x\<^sub>j s \ \_update (pivot_tableau x\<^sub>i x\<^sub>j (\ s)) s" lemma pivot_tableau'pivot_tableau: assumes "\ t" "x\<^sub>i \ lvars t" shows "pivot_tableau' x\<^sub>i x\<^sub>j t = pivot_tableau x\<^sub>i x\<^sub>j t" proof- let ?f = "\idx. if idx = eq_idx_for_lvar t x\<^sub>i then pivot_eq (t ! eq_idx_for_lvar t x\<^sub>i) x\<^sub>j else subst_var_eq x\<^sub>j (rhs (pivot_eq (t ! eq_idx_for_lvar t x\<^sub>i) x\<^sub>j)) (t ! idx)" let ?f' = "\e. if lhs e = lhs (eq_for_lvar t x\<^sub>i) then pivot_eq (eq_for_lvar t x\<^sub>i) x\<^sub>j else subst_var_eq x\<^sub>j (rhs (pivot_eq (eq_for_lvar t x\<^sub>i) x\<^sub>j)) e" have "\ i < length t. ?f' (t ! i) = ?f i" proof(safe) fix i assume "i < length t" then have "t ! i \ set t" "i < length t" by auto moreover have "t ! eq_idx_for_lvar t x\<^sub>i \ set t" "eq_idx_for_lvar t x\<^sub>i < length t" using eq_for_lvar[of x\<^sub>i t] \x\<^sub>i \ lvars t\ eq_idx_for_lvar[of x\<^sub>i t] by (auto simp add: eq_for_lvar_def) ultimately have "lhs (t ! i) = lhs (t ! eq_idx_for_lvar t x\<^sub>i) \ t ! i = t ! (eq_idx_for_lvar t x\<^sub>i)" "distinct t" using \\ t\ unfolding normalized_tableau_def by (auto simp add: distinct_map inj_on_def) then have "lhs (t ! i) = lhs (t ! eq_idx_for_lvar t x\<^sub>i) \ i = eq_idx_for_lvar t x\<^sub>i" using \i < length t\ \eq_idx_for_lvar t x\<^sub>i < length t\ by (auto simp add: distinct_conv_nth) then show "?f' (t ! i) = ?f i" by (auto simp add: eq_for_lvar_def) qed then show "pivot_tableau' x\<^sub>i x\<^sub>j t = pivot_tableau x\<^sub>i x\<^sub>j t" unfolding pivot_tableau'_def pivot_tableau_def unfolding Let_def by (auto simp add: map_reindex) qed lemma pivot'pivot: fixes s :: "('i,'a::lrv)state" assumes "\ (\ s)" "x\<^sub>i \ lvars (\ s)" shows "pivot' x\<^sub>i x\<^sub>j s = pivot x\<^sub>i x\<^sub>j s" using pivot_tableau'pivot_tableau[OF assms] unfolding pivot_def pivot'_def by auto end sublocale Pivot' < Pivot eq_idx_for_lvar pivot proof fix s::"('i,'a) state" and x\<^sub>i x\<^sub>j and v::"'a valuation" assume "\ (\ s)" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)" show "let s' = pivot x\<^sub>i x\<^sub>j s in \ s' = \ s \ \\<^sub>i s' = \\<^sub>i s \ \ s' = \ s \ \\<^sub>c s' = \\<^sub>c s" unfolding pivot_def by (auto simp add: Let_def simp: boundsl_def boundsu_def indexl_def indexu_def) let ?t = "\ s" let ?idx = "eq_idx_for_lvar ?t x\<^sub>i" let ?eq = "?t ! ?idx" let ?eq' = "pivot_eq ?eq x\<^sub>j" have "?idx < length ?t" "lhs (?t ! ?idx) = x\<^sub>i" using \x\<^sub>i \ lvars ?t\ using eq_idx_for_lvar by auto have "distinct (map lhs ?t)" using \\ ?t\ unfolding normalized_tableau_def by simp have "x\<^sub>j \ rvars_eq ?eq" using \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ unfolding eq_for_lvar_def by simp then have "x\<^sub>j \ rvars ?t" using \?idx < length ?t\ using in_set_conv_nth[of ?eq ?t] by (auto simp add: rvars_def) then have "x\<^sub>j \ lvars ?t" using \\ ?t\ unfolding normalized_tableau_def by auto have "x\<^sub>i \ rvars ?t" using \x\<^sub>i \ lvars ?t\ \\ ?t\ unfolding normalized_tableau_def rvars_def by auto then have "x\<^sub>i \ rvars_eq ?eq" unfolding rvars_def using \?idx < length ?t\ using in_set_conv_nth[of ?eq ?t] by auto have "x\<^sub>i \ x\<^sub>j" using \x\<^sub>j \ rvars_eq ?eq\ \x\<^sub>i \ rvars_eq ?eq\ by auto have "?eq' = (x\<^sub>j, rhs ?eq')" using lhs_pivot_eq[of x\<^sub>j ?eq] using \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ \lhs (?t ! ?idx) = x\<^sub>i\ \x\<^sub>i \ rvars_eq ?eq\ by (auto simp add: eq_for_lvar_def) (cases "?eq'", simp)+ let ?I1 = "[0..?idx < length ?t\ by (rule interval_3split) then have map_lhs_pivot: "map lhs (\ (pivot' x\<^sub>i x\<^sub>j s)) = map (\idx. lhs (?t ! idx)) ?I1 @ [x\<^sub>j] @ map (\idx. lhs (?t ! idx)) ?I2" using \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ \lhs (?t ! ?idx) = x\<^sub>i\ \x\<^sub>i \ rvars_eq ?eq\ by (auto simp add: Let_def subst_var_eq_def eq_for_lvar_def lhs_pivot_eq pivot'_def pivot_tableau'_def) have lvars_pivot: "lvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" proof- have "lvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = {x\<^sub>j} \ (\idx. lhs (?t ! idx)) ` ({0..?idx < length ?t\ \?eq' = (x\<^sub>j, rhs ?eq')\ by (cases ?eq', auto simp add: Let_def pivot'_def pivot_tableau'_def lvars_def subst_var_eq_def)+ also have "... = {x\<^sub>j} \ (((\idx. lhs (?t ! idx)) ` {0..?idx < length ?t\ \distinct (map lhs ?t)\ by (auto simp add: distinct_conv_nth) also have "... = {x\<^sub>j} \ (set (map lhs ?t) - {x\<^sub>i})" using \lhs (?t ! ?idx) = x\<^sub>i\ by (auto simp add: in_set_conv_nth rev_image_eqI) (auto simp add: image_def) finally show "lvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" by (simp add: lvars_def) qed moreover have rvars_pivot: "rvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" proof- have "rvars_eq ?eq' = {x\<^sub>i} \ (rvars_eq ?eq - {x\<^sub>j})" using rvars_pivot_eq[of x\<^sub>j ?eq] using \lhs (?t ! ?idx) = x\<^sub>i\ using \x\<^sub>j \ rvars_eq ?eq\ \x\<^sub>i \ rvars_eq ?eq\ by simp let ?S1 = "rvars_eq ?eq'" let ?S2 = "\idx\({0..j (rhs ?eq') (?t ! idx))" have "rvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = ?S1 \ ?S2" unfolding pivot'_def pivot_tableau'_def rvars_def using \?idx < length ?t\ by (auto simp add: Let_def split: if_splits) also have "... = {x\<^sub>i} \ (rvars ?t - {x\<^sub>j})" (is "?S1 \ ?S2 = ?rhs") proof show "?S1 \ ?S2 \ ?rhs" proof- have "?S1 \ ?rhs" using \?idx < length ?t\ unfolding rvars_def using \rvars_eq ?eq' = {x\<^sub>i} \ (rvars_eq ?eq - {x\<^sub>j})\ by (force simp add: in_set_conv_nth) moreover have "?S2 \ ?rhs" proof- have "?S2 \ (\idx\{0..j}) \ rvars_eq ?eq')" apply (rule UN_mono) using rvars_eq_subst_var_eq by auto also have "... \ rvars_eq ?eq' \ (\idx\{0..j})" by auto also have "... = rvars_eq ?eq' \ (rvars ?t - {x\<^sub>j})" unfolding rvars_def by (force simp add: in_set_conv_nth) finally show ?thesis using \rvars_eq ?eq' = {x\<^sub>i} \ (rvars_eq ?eq - {x\<^sub>j})\ unfolding rvars_def using \?idx < length ?t\ by auto qed ultimately show ?thesis by simp qed next show "?rhs \ ?S1 \ ?S2" proof fix x assume "x \ ?rhs" show "x \ ?S1 \ ?S2" proof (cases "x \ rvars_eq ?eq'") case True then show ?thesis by auto next case False let ?S2' = "\idx\({0..j}) - rvars_eq ?eq'" have "x \ ?S2'" using False \x \ ?rhs\ using \rvars_eq ?eq' = {x\<^sub>i} \ (rvars_eq ?eq - {x\<^sub>j})\ unfolding rvars_def by (force simp add: in_set_conv_nth) moreover have "?S2 \ ?S2'" apply (rule UN_mono) using rvars_eq_subst_var_eq_supset[of _ x\<^sub>j "rhs ?eq'" ] by auto ultimately show ?thesis by auto qed qed qed ultimately show ?thesis by simp qed ultimately show "let s' = pivot x\<^sub>i x\<^sub>j s in rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i} \ lvars (\ s') = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" using pivot'pivot[where ?'i = 'i] using \\ (\ s)\ \x\<^sub>i \ lvars (\ s)\ by (simp add: Let_def) have "\ (\ (pivot' x\<^sub>i x\<^sub>j s))" unfolding normalized_tableau_def proof have "lvars (\ (pivot' x\<^sub>i x\<^sub>j s)) \ rvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = {}" (is ?g1) using \\ (\ s)\ unfolding normalized_tableau_def using lvars_pivot rvars_pivot using \x\<^sub>i \ x\<^sub>j\ by auto moreover have "0 \ rhs ` set (\ (pivot' x\<^sub>i x\<^sub>j s))" (is ?g2) proof let ?eq = "eq_for_lvar (\ s) x\<^sub>i" from eq_for_lvar[OF \x\<^sub>i \ lvars (\ s)\] have "?eq \ set (\ s)" and var: "lhs ?eq = x\<^sub>i" by auto have "lhs ?eq \ rvars_eq ?eq" using \\ (\ s)\ \?eq \ set (\ s)\ using \x\<^sub>i \ rvars_eq (\ s ! eq_idx_for_lvar (\ s) x\<^sub>i)\ eq_for_lvar_def var by auto from vars_pivot_eq[OF \x\<^sub>j \ rvars_eq ?eq\ this] have vars_pivot: "lhs (pivot_eq ?eq x\<^sub>j) = x\<^sub>j" "rvars_eq (pivot_eq ?eq x\<^sub>j) = {lhs (eq_for_lvar (\ s) x\<^sub>i)} \ (rvars_eq (eq_for_lvar (\ s) x\<^sub>i) - {x\<^sub>j})" unfolding Let_def by auto from vars_pivot(2) have rhs_pivot0: "rhs (pivot_eq ?eq x\<^sub>j) \ 0" using vars_zero by auto assume "0 \ rhs ` set (\ (pivot' x\<^sub>i x\<^sub>j s))" from this[unfolded pivot'pivot[OF \\ (\ s)\ \x\<^sub>i \ lvars (\ s)\] pivot_def] have "0 \ rhs ` set (pivot_tableau x\<^sub>i x\<^sub>j (\ s))" by simp from this[unfolded pivot_tableau_def Let_def var, unfolded var] rhs_pivot0 obtain e where "e \ set (\ s)" "lhs e \ x\<^sub>i" and rvars_eq: "rvars_eq (subst_var_eq x\<^sub>j (rhs (pivot_eq ?eq x\<^sub>j)) e) = {}" by (auto simp: vars_zero) from rvars_eq[unfolded subst_var_eq_def] have empty: "vars (subst_var x\<^sub>j (rhs (pivot_eq ?eq x\<^sub>j)) (rhs e)) = {}" by auto show False proof (cases "x\<^sub>j \ vars (rhs e)") case False from empty[unfolded subst_no_effect[OF False]] have "rvars_eq e = {}" by auto hence "rhs e = 0" using zero_coeff_zero coeff_zero by auto with \e \ set (\ s)\ \\ (\ s)\ show False unfolding normalized_tableau_def by auto next case True from \e \ set (\ s)\ have "rvars_eq e \ rvars (\ s)" unfolding rvars_def by auto hence "x\<^sub>i \ vars (rhs (pivot_eq ?eq x\<^sub>j)) - rvars_eq e" unfolding vars_pivot(2) var using \\ (\ s)\[unfolded normalized_tableau_def] \x\<^sub>i \ lvars (\ s)\ by auto from subst_with_effect[OF True this] rvars_eq show ?thesis by (simp add: subst_var_eq_def) qed qed ultimately show "?g1 \ ?g2" .. show "distinct (map lhs (\ (pivot' x\<^sub>i x\<^sub>j s)))" using map_parametrize_idx[of lhs ?t] using map_lhs_pivot using \distinct (map lhs ?t)\ using interval_3split[of ?idx "length ?t"] \?idx < length ?t\ using \x\<^sub>j \ lvars ?t\ unfolding lvars_def by auto qed moreover have "v \\<^sub>t ?t = v \\<^sub>t \ (pivot' x\<^sub>i x\<^sub>j s)" unfolding satisfies_tableau_def proof assume "\e\set (?t). v \\<^sub>e e" show "\e\set (\ (pivot' x\<^sub>i x\<^sub>j s)). v \\<^sub>e e" proof- have "v \\<^sub>e ?eq'" using \x\<^sub>i \ rvars_eq ?eq\ using \?idx < length ?t\ \\e\set (?t). v \\<^sub>e e\ using \x\<^sub>j \ rvars_eq ?eq\ \x\<^sub>i \ lvars ?t\ by (simp add: equiv_pivot_eq eq_idx_for_lvar) moreover { fix idx assume "idx < length ?t" "idx \ ?idx" have "v \\<^sub>e subst_var_eq x\<^sub>j (rhs ?eq') (?t ! idx)" using \?eq' = (x\<^sub>j, rhs ?eq')\ using \v \\<^sub>e ?eq'\ \idx < length ?t\ \\e\set (?t). v \\<^sub>e e\ using equiv_subst_var_eq[of v x\<^sub>j "rhs ?eq'" "?t ! idx"] by auto } ultimately show ?thesis by (auto simp add: pivot'_def pivot_tableau'_def Let_def) qed next assume "\e\set (\ (pivot' x\<^sub>i x\<^sub>j s)). v \\<^sub>e e" then have "v \\<^sub>e ?eq'" "\ idx. \idx < length ?t; idx \ ?idx \ \ v \\<^sub>e subst_var_eq x\<^sub>j (rhs ?eq') (?t ! idx)" using \?idx < length ?t\ unfolding pivot'_def pivot_tableau'_def by (auto simp add: Let_def) show "\e\set (\ s). v \\<^sub>e e" proof- { fix idx assume "idx < length ?t" have "v \\<^sub>e (?t ! idx)" proof (cases "idx = ?idx") case True then show ?thesis using \v \\<^sub>e ?eq'\ using \x\<^sub>j \ rvars_eq ?eq\ \x\<^sub>i \ lvars ?t\ \x\<^sub>i \ rvars_eq ?eq\ by (simp add: eq_idx_for_lvar equiv_pivot_eq) next case False then show ?thesis using \idx < length ?t\ using \\idx < length ?t; idx \ ?idx \ \ v \\<^sub>e subst_var_eq x\<^sub>j (rhs ?eq') (?t ! idx)\ using \v \\<^sub>e ?eq'\ \?eq' = (x\<^sub>j, rhs ?eq')\ using equiv_subst_var_eq[of v x\<^sub>j "rhs ?eq'" "?t ! idx"] by auto qed } then show ?thesis by (force simp add: in_set_conv_nth) qed qed ultimately show "let s' = pivot x\<^sub>i x\<^sub>j s in v \\<^sub>t \ s = v \\<^sub>t \ s' \ \ (\ s')" using pivot'pivot[where ?'i = 'i] using \\ (\ s)\ \x\<^sub>i \ lvars (\ s)\ by (simp add: Let_def) qed subsection\Check implementation\ text\The \check\ function is called when all rhs variables are in bounds, and it checks if there is a lhs variable that is not. If there is no such variable, then satisfiability is detected and \check\ succeeds. If there is a lhs variable \x\<^sub>i\ out of its bounds, a rhs variable \x\<^sub>j\ is sought which allows pivoting with \x\<^sub>i\ and updating \x\<^sub>i\ to its violated bound. If \x\<^sub>i\ is under its lower bound it must be increased, and if \x\<^sub>j\ has a positive coefficient it must be increased so it must be under its upper bound and if it has a negative coefficient it must be decreased so it must be above its lower bound. The case when \x\<^sub>i\ is above its upper bound is symmetric (avoiding symmetries is discussed in Section \ref{sec:symmetries}). If there is no such \x\<^sub>j\, unsatisfiability is detected and \check\ fails. The procedure is recursively repeated, until it either succeeds or fails. To ensure termination, variables \x\<^sub>i\ and \x\<^sub>j\ must be chosen with respect to a fixed variable ordering. For choosing these variables auxiliary functions \min_lvar_not_in_bounds\, \min_rvar_inc\ and \min_rvar_dec\ are specified (each in its own locale). For, example: \ locale MinLVarNotInBounds = fixes min_lvar_not_in_bounds::"('i,'a::lrv) state \ var option" assumes min_lvar_not_in_bounds_None: "min_lvar_not_in_bounds s = None \ (\x\lvars (\ s). in_bounds x \\ s\ (\ s))" and min_lvar_not_in_bounds_Some': "min_lvar_not_in_bounds s = Some x\<^sub>i \ x\<^sub>i\lvars (\ s) \ \in_bounds x\<^sub>i \\ s\ (\ s) \ (\x\lvars (\ s). x < x\<^sub>i \ in_bounds x \\ s\ (\ s))" begin lemma min_lvar_not_in_bounds_None': "min_lvar_not_in_bounds s = None \ (\\ s\ \\<^sub>b \ s \ lvars (\ s))" unfolding satisfies_bounds_set.simps by (rule min_lvar_not_in_bounds_None) lemma min_lvar_not_in_bounds_lvars:"min_lvar_not_in_bounds s = Some x\<^sub>i \ x\<^sub>i \ lvars (\ s)" using min_lvar_not_in_bounds_Some' by simp lemma min_lvar_not_in_bounds_Some: "min_lvar_not_in_bounds s = Some x\<^sub>i \ \ in_bounds x\<^sub>i \\ s\ (\ s)" using min_lvar_not_in_bounds_Some' by simp lemma min_lvar_not_in_bounds_Some_min: "min_lvar_not_in_bounds s = Some x\<^sub>i \ (\ x \ lvars (\ s). x < x\<^sub>i \ in_bounds x \\ s\ (\ s))" using min_lvar_not_in_bounds_Some' by simp end abbreviation reasable_var where "reasable_var dir x eq s \ (coeff (rhs eq) x > 0 \ \\<^sub>u\<^sub>b (lt dir) (\\ s\ x) (UB dir s x)) \ (coeff (rhs eq) x < 0 \ \\<^sub>l\<^sub>b (lt dir) (\\ s\ x) (LB dir s x))" locale MinRVarsEq = fixes min_rvar_incdec_eq :: "('i,'a) Direction \ ('i,'a::lrv) state \ eq \ 'i list + var" assumes min_rvar_incdec_eq_None: "min_rvar_incdec_eq dir s eq = Inl is \ (\ x \ rvars_eq eq. \ reasable_var dir x eq s) \ (set is = {LI dir s (lhs eq)} \ {LI dir s x | x. x \ rvars_eq eq \ coeff (rhs eq) x < 0} \ {UI dir s x | x. x \ rvars_eq eq \ coeff (rhs eq) x > 0}) \ ((dir = Positive \ dir = Negative) \ LI dir s (lhs eq) \ indices_state s \ set is \ indices_state s)" assumes min_rvar_incdec_eq_Some_rvars: "min_rvar_incdec_eq dir s eq = Inr x\<^sub>j \ x\<^sub>j \ rvars_eq eq" assumes min_rvar_incdec_eq_Some_incdec: "min_rvar_incdec_eq dir s eq = Inr x\<^sub>j \ reasable_var dir x\<^sub>j eq s" assumes min_rvar_incdec_eq_Some_min: "min_rvar_incdec_eq dir s eq = Inr x\<^sub>j \ (\ x \ rvars_eq eq. x < x\<^sub>j \ \ reasable_var dir x eq s)" begin lemma min_rvar_incdec_eq_None': assumes *: "dir = Positive \ dir = Negative" and min: "min_rvar_incdec_eq dir s eq = Inl is" and sub: "I = set is" and Iv: "(I,v) \\<^sub>i\<^sub>b \\ s" shows "le (lt dir) ((rhs eq) \v\) ((rhs eq) \\\ s\\)" proof - have "\ x \ rvars_eq eq. \ reasable_var dir x eq s" using min using min_rvar_incdec_eq_None by simp have "\ x \ rvars_eq eq. (0 < coeff (rhs eq) x \ le (lt dir) 0 (\\ s\ x - v x)) \ (coeff (rhs eq) x < 0 \ le (lt dir) (\\ s\ x - v x) 0)" proof (safe) fix x assume x: "x \ rvars_eq eq" "0 < coeff (rhs eq) x" "0 \ \\ s\ x - v x" then have "\ (\\<^sub>u\<^sub>b (lt dir) (\\ s\ x) (UB dir s x))" using \\ x \ rvars_eq eq. \ reasable_var dir x eq s\ by auto then have "\\<^sub>u\<^sub>b (lt dir) (\\ s\ x) (UB dir s x)" using * by (cases "UB dir s x") (auto simp add: bound_compare_defs) moreover from min_rvar_incdec_eq_None[OF min] x sub have "UI dir s x \ I" by auto from Iv * this have "\\<^sub>u\<^sub>b (lt dir) (v x) (UB dir s x)" unfolding satisfies_bounds_index.simps by (cases "UB dir s x", auto simp: indexl_def indexu_def boundsl_def boundsu_def bound_compare'_defs) (fastforce)+ ultimately have "le (lt dir) (v x) (\\ s\ x)" using * by (cases "UB dir s x") (auto simp add: bound_compare_defs) then show "lt dir 0 (\\ s\ x - v x)" using \0 \ \\ s\ x - v x\ * using minus_gt[of "v x" "\\ s\ x"] minus_lt[of "\\ s\ x" "v x"] by auto next fix x assume x: "x \ rvars_eq eq" "0 > coeff (rhs eq) x" "\\ s\ x - v x \ 0" then have "\ (\\<^sub>l\<^sub>b (lt dir) (\\ s\ x) (LB dir s x))" using \\ x \ rvars_eq eq. \ reasable_var dir x eq s\ by auto then have "\\<^sub>l\<^sub>b (lt dir) (\\ s\ x) (LB dir s x)" using * by (cases "LB dir s x") (auto simp add: bound_compare_defs) moreover from min_rvar_incdec_eq_None[OF min] x sub have "LI dir s x \ I" by auto from Iv * this have "\\<^sub>l\<^sub>b (lt dir) (v x) (LB dir s x)" unfolding satisfies_bounds_index.simps by (cases "LB dir s x", auto simp: indexl_def indexu_def boundsl_def boundsu_def bound_compare'_defs) (fastforce)+ ultimately have "le (lt dir) (\\ s\ x) (v x)" using * by (cases "LB dir s x") (auto simp add: bound_compare_defs) then show "lt dir (\\ s\ x - v x) 0" using \\\ s\ x - v x \ 0\ * using minus_lt[of "\\ s\ x" "v x"] minus_gt[of "v x" "\\ s\ x"] by auto qed then have "le (lt dir) 0 (rhs eq \ \ x. \\ s\ x - v x\)" using * apply auto using valuate_nonneg[of "rhs eq" "\x. \\ s\ x - v x"] apply force using valuate_nonpos[of "rhs eq" "\x. \\ s\ x - v x"] apply force done then show "le (lt dir) rhs eq \ v \ rhs eq \ \\ s\ \" using \dir = Positive \ dir = Negative\ using minus_gt[of "rhs eq \ v \" "rhs eq \ \\ s\ \"] by (auto simp add: valuate_diff[THEN sym]) qed end locale MinRVars = EqForLVar + MinRVarsEq min_rvar_incdec_eq for min_rvar_incdec_eq :: "('i, 'a :: lrv) Direction \ _" begin abbreviation min_rvar_incdec :: "('i,'a) Direction \ ('i,'a) state \ var \ 'i list + var" where "min_rvar_incdec dir s x\<^sub>i \ min_rvar_incdec_eq dir s (eq_for_lvar (\ s) x\<^sub>i)" end locale MinVars = MinLVarNotInBounds min_lvar_not_in_bounds + MinRVars eq_idx_for_lvar min_rvar_incdec_eq for min_lvar_not_in_bounds :: "('i,'a::lrv) state \ _" and eq_idx_for_lvar and min_rvar_incdec_eq :: "('i, 'a :: lrv) Direction \ _" locale PivotUpdateMinVars = PivotAndUpdate eq_idx_for_lvar pivot_and_update + MinVars min_lvar_not_in_bounds eq_idx_for_lvar min_rvar_incdec_eq for eq_idx_for_lvar :: "tableau \ var \ nat" and min_lvar_not_in_bounds :: "('i,'a::lrv) state \ var option" and min_rvar_incdec_eq :: "('i,'a) Direction \ ('i,'a) state \ eq \ 'i list + var" and pivot_and_update :: "var \ var \ 'a \ ('i,'a) state \ ('i,'a) state" begin definition check' where "check' dir x\<^sub>i s \ let l\<^sub>i = the (LB dir s x\<^sub>i); x\<^sub>j' = min_rvar_incdec dir s x\<^sub>i in case x\<^sub>j' of Inl I \ set_unsat I s | Inr x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s" lemma check'_cases: assumes "\ I. \min_rvar_incdec dir s x\<^sub>i = Inl I; check' dir x\<^sub>i s = set_unsat I s\ \ P (set_unsat I s)" assumes "\ x\<^sub>j l\<^sub>i. \min_rvar_incdec dir s x\<^sub>i = Inr x\<^sub>j; l\<^sub>i = the (LB dir s x\<^sub>i); check' dir x\<^sub>i s = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s\ \ P (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)" shows "P (check' dir x\<^sub>i s)" using assms unfolding check'_def by (cases "min_rvar_incdec dir s x\<^sub>i", auto) partial_function (tailrec) check where "check s = (if \ s then s else let x\<^sub>i' = min_lvar_not_in_bounds s in case x\<^sub>i' of None \ s | Some x\<^sub>i \ let dir = if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative in check (check' dir x\<^sub>i s))" declare check.simps[code] inductive check_dom where step: "\\x\<^sub>i. \\ \ s; Some x\<^sub>i = min_lvar_not_in_bounds s; \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i\ \ check_dom (check' Positive x\<^sub>i s); \x\<^sub>i. \\ \ s; Some x\<^sub>i = min_lvar_not_in_bounds s; \ \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i\ \ check_dom (check' Negative x\<^sub>i s)\ \ check_dom s" text\ The definition of \check\ can be given by: @{text[display] "check s \ if \ s then s else let x\<^sub>i' = min_lvar_not_in_bounds s in case x\<^sub>i' of None \ s | Some x\<^sub>i \ if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then check (check_inc x\<^sub>i s) else check (check_dec x\<^sub>i s)" } @{text[display] "check_inc x\<^sub>i s \ let l\<^sub>i = the (\\<^sub>l s x\<^sub>i); x\<^sub>j' = min_rvar_inc s x\<^sub>i in case x\<^sub>j' of None \ s \ \ := True \ | Some x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s" } The definition of \check_dec\ is analogous. It is shown (mainly by induction) that this definition satisfies the \check\ specification. Note that this definition uses general recursion, so its termination is non-trivial. It has been shown that it terminates for all states satisfying the check preconditions. The proof is based on the proof outline given in \cite{simplex-rad}. It is very technically involved, but conceptually uninteresting so we do not discuss it in more details.\ lemma pivotandupdate_check_precond: assumes "dir = (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative)" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec dir s x\<^sub>i = Inr x\<^sub>j" "l\<^sub>i = the (LB dir s x\<^sub>i)" "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" " \ s" shows "\ (\ (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)) \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s) \ \ (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s) \ \ (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)" proof- have "\\<^sub>l s x\<^sub>i = Some l\<^sub>i \ \\<^sub>u s x\<^sub>i = Some l\<^sub>i" using \l\<^sub>i = the (LB dir s x\<^sub>i)\ \dir = (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative)\ using \min_lvar_not_in_bounds s = Some x\<^sub>i\ min_lvar_not_in_bounds_Some[of s x\<^sub>i] using \\ s\ by (case_tac[!] "\\<^sub>l s x\<^sub>i", case_tac[!] "\\<^sub>u s x\<^sub>i") (auto simp add: bounds_consistent_def bound_compare_defs) then show ?thesis using assms using pivotandupdate_tableau_normalized[of s x\<^sub>i x\<^sub>j l\<^sub>i] using pivotandupdate_nolhs[of s x\<^sub>i x\<^sub>j l\<^sub>i] using pivotandupdate_bounds_consistent[of s x\<^sub>i x\<^sub>j l\<^sub>i] using pivotandupdate_tableau_valuated[of s x\<^sub>i x\<^sub>j l\<^sub>i] by (auto simp add: min_lvar_not_in_bounds_lvars min_rvar_incdec_eq_Some_rvars) qed (* -------------------------------------------------------------------------- *) (* Termination *) (* -------------------------------------------------------------------------- *) abbreviation gt_state' where "gt_state' dir s s' x\<^sub>i x\<^sub>j l\<^sub>i \ min_lvar_not_in_bounds s = Some x\<^sub>i \ l\<^sub>i = the (LB dir s x\<^sub>i) \ min_rvar_incdec dir s x\<^sub>i = Inr x\<^sub>j \ s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s" definition gt_state :: "('i,'a) state \ ('i,'a) state \ bool" (infixl "\\<^sub>x" 100) where "s \\<^sub>x s' \ \ x\<^sub>i x\<^sub>j l\<^sub>i. let dir = if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative in gt_state' dir s s' x\<^sub>i x\<^sub>j l\<^sub>i" abbreviation succ :: "('i,'a) state \ ('i,'a) state \ bool" (infixl "\" 100) where "s \ s' \ \ (\ s) \ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s \ \ s \ s \\<^sub>x s' \ \\<^sub>i s' = \\<^sub>i s \ \\<^sub>c s' = \\<^sub>c s" abbreviation succ_rel :: "('i,'a) state rel" where "succ_rel \ {(s, s'). s \ s'}" abbreviation succ_rel_trancl :: "('i,'a) state \ ('i,'a) state \ bool" (infixl "\\<^sup>+" 100) where "s \\<^sup>+ s' \ (s, s') \ succ_rel\<^sup>+" abbreviation succ_rel_rtrancl :: "('i,'a) state \ ('i,'a) state \ bool" (infixl "\\<^sup>*" 100) where "s \\<^sup>* s' \ (s, s') \ succ_rel\<^sup>*" lemma succ_vars: assumes "s \ s'" obtains x\<^sub>i x\<^sub>j where "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" "x\<^sub>j \ rvars (\ s)" "lvars (\ s') = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" "rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" proof- from assms obtain x\<^sub>i x\<^sub>j c where *: "\ (\ s)" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" unfolding gt_state_def by (auto split: if_splits) then have "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)" "lvars (\ s') = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" "rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using pivotandupdate_rvars[of s x\<^sub>i x\<^sub>j] using pivotandupdate_lvars[of s x\<^sub>i x\<^sub>j] by auto moreover have "x\<^sub>j \ rvars (\ s)" using \x\<^sub>i \ lvars (\ s)\ using \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ using eq_for_lvar[of x\<^sub>i "\ s"] unfolding rvars_def by auto ultimately have "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" "x\<^sub>j \ rvars (\ s)" "lvars (\ s') = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" "rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" by auto then show thesis .. qed lemma succ_vars_id: assumes "s \ s'" shows "lvars (\ s) \ rvars (\ s) = lvars (\ s') \ rvars (\ s')" using assms by (rule succ_vars) auto lemma succ_inv: assumes "s \ s'" shows "\ (\ s')" "\ s'" "\ s'" "\\<^sub>i s = \\<^sub>i s'" "(v::'a valuation) \\<^sub>t (\ s) \ v \\<^sub>t (\ s')" proof- from assms obtain x\<^sub>i x\<^sub>j c where *: "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" unfolding gt_state_def by (auto split: if_splits) then show "\ (\ s')" "\ s'" "\ s'" "\\<^sub>i s = \\<^sub>i s'" "(v::'a valuation) \\<^sub>t (\ s) \ v \\<^sub>t (\ s')" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using pivotandupdate_tableau_normalized[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_bounds_consistent[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_bounds_id[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_tableau_equiv using pivotandupdate_tableau_valuated by auto qed lemma succ_rvar_valuation_id: assumes "s \ s'" "x \ rvars (\ s)" "x \ rvars (\ s')" shows "\\ s\ x = \\ s'\ x" proof- from assms obtain x\<^sub>i x\<^sub>j c where *: "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" unfolding gt_state_def by (auto split: if_splits) then show ?thesis using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using \x \ rvars (\ s)\ \x \ rvars (\ s')\ using pivotandupdate_rvars[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_valuation_other_nolhs[of s x\<^sub>i x\<^sub>j x c] by (force simp add: normalized_tableau_def map2fun_def) qed lemma succ_min_lvar_not_in_bounds: assumes "s \ s'" "xr \ lvars (\ s)" "xr \ rvars (\ s')" shows "\ in_bounds xr (\\ s\) (\ s)" "\ x \ lvars (\ s). x < xr \ in_bounds x (\\ s\) (\ s)" proof- from assms obtain x\<^sub>i x\<^sub>j c where *: "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" unfolding gt_state_def by (auto split: if_splits) then have "x\<^sub>i = xr" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using \xr \ lvars (\ s)\ \xr \ rvars (\ s')\ using pivotandupdate_rvars by (auto simp add: normalized_tableau_def) then show "\ in_bounds xr (\\ s\) (\ s)" "\ x \ lvars (\ s). x < xr \ in_bounds x (\\ s\) (\ s)" using \min_lvar_not_in_bounds s = Some x\<^sub>i\ using min_lvar_not_in_bounds_Some min_lvar_not_in_bounds_Some_min by simp_all qed lemma succ_min_rvar: assumes "s \ s'" "xs \ lvars (\ s)" "xs \ rvars (\ s')" "xr \ rvars (\ s)" "xr \ lvars (\ s')" "eq = eq_for_lvar (\ s) xs" and dir: "dir = Positive \ dir = Negative" shows "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ xs) (LB dir s xs) \ reasable_var dir xr eq s \ (\ x \ rvars_eq eq. x < xr \ \ reasable_var dir x eq s)" proof- from assms(1) obtain x\<^sub>i x\<^sub>j c where"\ (\ s) \ \ s \ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "gt_state' (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative) s s' x\<^sub>i x\<^sub>j c" by (auto simp add: gt_state_def Let_def) then have "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" and *: "(\\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i \ min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j) \ (\ \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j)" by (auto split: if_splits) then have "xr = x\<^sub>j" "xs = x\<^sub>i" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using \xr \ rvars (\ s)\ \xr \ lvars (\ s')\ using \xs \ lvars (\ s)\ \xs \ rvars (\ s')\ using pivotandupdate_lvars pivotandupdate_rvars by (auto simp add: normalized_tableau_def) show "\ (\\<^sub>l\<^sub>b (lt dir) (\\ s\ xs) (LB dir s xs)) \ reasable_var dir xr eq s \ (\ x \ rvars_eq eq. x < xr \ \ reasable_var dir x eq s)" proof assume "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ xs) (LB dir s xs)" then have "\\<^sub>l\<^sub>b (lt dir) (\\ s\ xs) (LB dir s xs)" using dir by (cases "LB dir s xs") (auto simp add: bound_compare_defs) moreover then have "\ (\\<^sub>u\<^sub>b (lt dir) (\\ s\ xs) (UB dir s xs))" using \\ s\ dir using bounds_consistent_gt_ub bounds_consistent_lt_lb by (force simp add: bound_compare''_defs) ultimately have "min_rvar_incdec dir s xs = Inr xr" using * \xr = x\<^sub>j\ \xs = x\<^sub>i\ dir by (auto simp add: bound_compare''_defs) then show "reasable_var dir xr eq s \ (\ x \ rvars_eq eq. x < xr \ \ reasable_var dir x eq s)" using \eq = eq_for_lvar (\ s) xs\ using min_rvar_incdec_eq_Some_min[of dir s eq xr] using min_rvar_incdec_eq_Some_incdec[of dir s eq xr] by simp qed qed lemma succ_set_on_bound: assumes "s \ s'" "x\<^sub>i \ lvars (\ s)" "x\<^sub>i \ rvars (\ s')" and dir: "dir = Positive \ dir = Negative" shows "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i) \ \\ s'\ x\<^sub>i = the (LB dir s x\<^sub>i)" "\\ s'\ x\<^sub>i = the (\\<^sub>l s x\<^sub>i) \ \\ s'\ x\<^sub>i = the (\\<^sub>u s x\<^sub>i)" proof- from assms(1) obtain x\<^sub>i' x\<^sub>j c where"\ (\ s) \ \ s \ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "gt_state' (if \\ s\ x\<^sub>i' <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i' then Positive else Negative) s s' x\<^sub>i' x\<^sub>j c" by (auto simp add: gt_state_def Let_def) then have "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i'" "s' = pivot_and_update x\<^sub>i' x\<^sub>j c s" and *: "(\\ s\ x\<^sub>i' <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i' \ c = the (\\<^sub>l s x\<^sub>i') \ min_rvar_incdec Positive s x\<^sub>i' = Inr x\<^sub>j) \ (\ \\ s\ x\<^sub>i' <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i' \ c = the (\\<^sub>u s x\<^sub>i') \ min_rvar_incdec Negative s x\<^sub>i' = Inr x\<^sub>j)" by (auto split: if_splits) then have "x\<^sub>i = x\<^sub>i'" "x\<^sub>i' \ lvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i')" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i'] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i'" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i'" x\<^sub>j] using \x\<^sub>i \ lvars (\ s)\ \x\<^sub>i \ rvars (\ s')\ using pivotandupdate_rvars by (auto simp add: normalized_tableau_def) show "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i) \ \\ s'\ x\<^sub>i = the (LB dir s x\<^sub>i)" proof assume "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i)" then have "\\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i)" using dir by (cases "LB dir s x\<^sub>i") (auto simp add: bound_compare_defs) moreover then have "\ \\<^sub>u\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (UB dir s x\<^sub>i)" using \\ s\ dir using bounds_consistent_gt_ub bounds_consistent_lt_lb by (force simp add: bound_compare''_defs) ultimately show "\\ s'\ x\<^sub>i = the (LB dir s x\<^sub>i)" using * \x\<^sub>i = x\<^sub>i'\ \s' = pivot_and_update x\<^sub>i' x\<^sub>j c s\ using \\ (\ s)\ \\ s\ \x\<^sub>i' \ lvars (\ s)\ \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i')\ using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j c] dir by (case_tac[!] "\\<^sub>l s x\<^sub>i'", case_tac[!] "\\<^sub>u s x\<^sub>i'") (auto simp add: bound_compare_defs map2fun_def) qed have "\ \\ s\ x\<^sub>i' <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i' \ \\ s\ x\<^sub>i' >\<^sub>u\<^sub>b \\<^sub>u s x\<^sub>i'" using \min_lvar_not_in_bounds s = Some x\<^sub>i'\ using min_lvar_not_in_bounds_Some[of s x\<^sub>i'] using not_in_bounds[of x\<^sub>i' "\\ s\" "\\<^sub>l s" "\\<^sub>u s"] by auto then show "\\ s'\ x\<^sub>i = the (\\<^sub>l s x\<^sub>i) \ \\ s'\ x\<^sub>i = the (\\<^sub>u s x\<^sub>i)" using \\ (\ s)\ \\ s\ \x\<^sub>i' \ lvars (\ s)\ \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i')\ using \s' = pivot_and_update x\<^sub>i' x\<^sub>j c s\ \x\<^sub>i = x\<^sub>i'\ using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j c] using * by (case_tac[!] "\\<^sub>l s x\<^sub>i'", case_tac[!] "\\<^sub>u s x\<^sub>i'") (auto simp add: map2fun_def bound_compare_defs) qed lemma succ_rvar_valuation: assumes "s \ s'" "x \ rvars (\ s')" shows "\\ s'\ x = \\ s\ x \ \\ s'\ x = the (\\<^sub>l s x) \ \\ s'\ x = the (\\<^sub>u s x)" proof- from assms obtain x\<^sub>i x\<^sub>j b where "\ (\ s)" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "b = the (\\<^sub>l s x\<^sub>i) \ b = the (\\<^sub>u s x\<^sub>i)" "s' = pivot_and_update x\<^sub>i x\<^sub>j b s" unfolding gt_state_def by (auto simp add: Let_def split: if_splits) then have "x\<^sub>i \ lvars (\ s)" "x\<^sub>i \ rvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)" "x\<^sub>j \ rvars (\ s)" "x\<^sub>j \ lvars (\ s)" "x\<^sub>i \ x\<^sub>j" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using rvars_of_lvar_rvars \\ (\ s)\ by (auto simp add: normalized_tableau_def) then have "rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" "x \ rvars (\ s) \ x = x\<^sub>i" "x \ x\<^sub>j" "x \ x\<^sub>i \ x \ lvars (\ s)" using \x \ rvars (\ s')\ using pivotandupdate_rvars[of s x\<^sub>i x\<^sub>j] using \\ (\ s)\ \\ s\ \s' = pivot_and_update x\<^sub>i x\<^sub>j b s\ by (auto simp add: normalized_tableau_def) then show ?thesis using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j b] using pivotandupdate_valuation_other_nolhs[of s x\<^sub>i x\<^sub>j x b] using \x\<^sub>i \ lvars (\ s)\ \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ using \\ (\ s)\ \\ s\ \s' = pivot_and_update x\<^sub>i x\<^sub>j b s\ \b = the (\\<^sub>l s x\<^sub>i) \ b = the (\\<^sub>u s x\<^sub>i)\ by (auto simp add: map2fun_def) qed lemma succ_no_vars_valuation: assumes "s \ s'" "x \ tvars (\ s')" shows "look (\ s') x = look (\ s) x" proof- from assms obtain x\<^sub>i x\<^sub>j b where "\ (\ s)" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "b = the (\\<^sub>l s x\<^sub>i) \ b = the (\\<^sub>u s x\<^sub>i)" "s' = pivot_and_update x\<^sub>i x\<^sub>j b s" unfolding gt_state_def by (auto simp add: Let_def split: if_splits) then have "x\<^sub>i \ lvars (\ s)" "x\<^sub>i \ rvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)" "x\<^sub>j \ rvars (\ s)" "x\<^sub>j \ lvars (\ s)" "x\<^sub>i \ x\<^sub>j" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using rvars_of_lvar_rvars \\ (\ s)\ by (auto simp add: normalized_tableau_def) then show ?thesis using pivotandupdate_valuation_other_nolhs[of s x\<^sub>i x\<^sub>j x b] using \\ (\ s)\ \\ s\ \s' = pivot_and_update x\<^sub>i x\<^sub>j b s\ using \x \ tvars (\ s')\ using pivotandupdate_rvars[of s x\<^sub>i x\<^sub>j] using pivotandupdate_lvars[of s x\<^sub>i x\<^sub>j] by (auto simp add: map2fun_def) qed lemma succ_valuation_satisfies: assumes "s \ s'" "\\ s\ \\<^sub>t \ s" shows "\\ s'\ \\<^sub>t \ s'" proof- from \s \ s'\ obtain x\<^sub>i x\<^sub>j b where "\ (\ s)" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "b = the (\\<^sub>l s x\<^sub>i) \ b = the (\\<^sub>u s x\<^sub>i)" "s' = pivot_and_update x\<^sub>i x\<^sub>j b s" unfolding gt_state_def by (auto simp add: Let_def split: if_splits) then have "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] \\ (\ s)\ by (auto simp add: normalized_tableau_def) then show ?thesis using pivotandupdate_satisfies_tableau[of s x\<^sub>i x\<^sub>j b] using pivotandupdate_tableau_equiv[of s x\<^sub>i x\<^sub>j ] using \\ (\ s)\ \\ s\ \\\ s\ \\<^sub>t \ s\ \s' = pivot_and_update x\<^sub>i x\<^sub>j b s\ by auto qed lemma succ_tableau_valuated: assumes "s \ s'" "\ s" shows "\ s'" using succ_inv(2) assms by blast (* -------------------------------------------------------------------------- *) abbreviation succ_chain where "succ_chain l \ rel_chain l succ_rel" lemma succ_chain_induct: assumes *: "succ_chain l" "i \ j" "j < length l" assumes base: "\ i. P i i" assumes step: "\ i. l ! i \ (l ! (i + 1)) \ P i (i + 1)" assumes trans: "\ i j k. \P i j; P j k; i < j; j \ k\ \ P i k" shows "P i j" using * proof (induct "j - i" arbitrary: i) case 0 then show ?case by (simp add: base) next case (Suc k) have "P (i + 1) j" using Suc(1)[of "i + 1"] Suc(2) Suc(3) Suc(4) Suc(5) by auto moreover have "P i (i + 1)" proof (rule step) show "l ! i \ (l ! (i + 1))" using Suc(2) Suc(3) Suc(5) unfolding rel_chain_def by auto qed ultimately show ?case using trans[of i "i + 1" j] Suc(2) by simp qed lemma succ_chain_bounds_id: assumes "succ_chain l" "i \ j" "j < length l" shows "\\<^sub>i (l ! i) = \\<^sub>i (l ! j)" using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "\\<^sub>i (l ! i) = \\<^sub>i (l ! (i + 1))" by (rule succ_inv(4)) qed simp_all lemma succ_chain_vars_id': assumes "succ_chain l" "i \ j" "j < length l" shows "lvars (\ (l ! i)) \ rvars (\ (l ! i)) = lvars (\ (l ! j)) \ rvars (\ (l ! j))" using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "tvars (\ (l ! i)) = tvars (\ (l ! (i + 1)))" by (rule succ_vars_id) qed simp_all lemma succ_chain_vars_id: assumes "succ_chain l" "i < length l" "j < length l" shows "lvars (\ (l ! i)) \ rvars (\ (l ! i)) = lvars (\ (l ! j)) \ rvars (\ (l ! j))" proof (cases "i \ j") case True then show ?thesis using assms succ_chain_vars_id'[of l i j] by simp next case False then have "j \ i" by simp then show ?thesis using assms succ_chain_vars_id'[of l j i] by simp qed lemma succ_chain_tableau_equiv': assumes "succ_chain l" "i \ j" "j < length l" shows "(v::'a valuation) \\<^sub>t \ (l ! i) \ v \\<^sub>t \ (l ! j)" using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "v \\<^sub>t \ (l ! i) = v \\<^sub>t \ (l ! (i + 1))" by (rule succ_inv(5)) qed simp_all lemma succ_chain_tableau_equiv: assumes "succ_chain l" "i < length l" "j < length l" shows "(v::'a valuation) \\<^sub>t \ (l ! i) \ v \\<^sub>t \ (l ! j)" proof (cases "i \ j") case True then show ?thesis using assms succ_chain_tableau_equiv'[of l i j v] by simp next case False then have "j \ i" by auto then show ?thesis using assms succ_chain_tableau_equiv'[of l j i v] by simp qed lemma succ_chain_no_vars_valuation: assumes "succ_chain l" "i \ j" "j < length l" shows "\ x. x \ tvars (\ (l ! i)) \ look (\ (l ! i)) x = look (\ (l ! j)) x" (is "?P i j") using assms proof (induct "j - i" arbitrary: i) case 0 then show ?case by simp next case (Suc k) have "?P (i + 1) j" using Suc(1)[of "i + 1"] Suc(2) Suc(3) Suc(4) Suc(5) by auto moreover have "?P (i + 1) i" proof (rule+, rule succ_no_vars_valuation) show "l ! i \ (l ! (i + 1))" using Suc(2) Suc(3) Suc(5) unfolding rel_chain_def by auto qed moreover have "tvars (\ (l ! i)) = tvars (\ (l ! (i + 1)))" proof (rule succ_vars_id) show "l ! i \ (l ! (i + 1))" using Suc(2) Suc(3) Suc(5) unfolding rel_chain_def by simp qed ultimately show ?case by simp qed lemma succ_chain_rvar_valuation: assumes "succ_chain l" "i \ j" "j < length l" shows "\x\rvars (\ (l ! j)). \\ (l ! j)\ x = \\ (l ! i)\ x \ \\ (l ! j)\ x = the (\\<^sub>l (l ! i) x) \ \\ (l ! j)\ x = the (\\<^sub>u (l ! i) x)" (is "?P i j") using assms proof (induct "j - i" arbitrary: j) case 0 then show ?case by simp next case (Suc k) have "k = j - 1 - i" "succ_chain l" "i \ j - 1" "j - 1 < length l" "j > 0" using Suc(2) Suc(3) Suc(4) Suc(5) by auto then have ji: "?P i (j - 1)" using Suc(1) by simp have "l ! (j - 1) \ (l ! j)" using Suc(3) \j < length l\ \j > 0\ unfolding rel_chain_def by (erule_tac x="j - 1" in allE) simp then have jj: "?P (j - 1) j" using succ_rvar_valuation by auto obtain x\<^sub>i x\<^sub>j where vars: "x\<^sub>i \ lvars (\ (l ! (j - 1)))" "x\<^sub>j \ rvars (\ (l ! (j - 1)))" "rvars (\ (l ! j)) = rvars (\ (l ! (j - 1))) - {x\<^sub>j} \ {x\<^sub>i}" using \l ! (j - 1) \ (l ! j)\ by (rule succ_vars) simp then have bounds: "\\<^sub>l (l ! (j - 1)) = \\<^sub>l (l ! i)" "\\<^sub>l (l ! j) = \\<^sub>l (l ! i)" "\\<^sub>u (l ! (j - 1)) = \\<^sub>u (l ! i)" "\\<^sub>u (l ! j) = \\<^sub>u (l ! i)" using \succ_chain l\ using succ_chain_bounds_id[of l i "j - 1", THEN sym] \j - 1 < length l\ \i \ j - 1\ using succ_chain_bounds_id[of l "j - 1" j, THEN sym] \j < length l\ by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) show ?case proof fix x assume "x \ rvars (\ (l ! j))" then have "x \ x\<^sub>j \ x \ rvars (\ (l ! (j - 1))) \ x = x\<^sub>i" using vars by auto then show "\\ (l ! j)\ x = \\ (l ! i)\ x \ \\ (l ! j)\ x = the (\\<^sub>l (l ! i) x) \ \\ (l ! j)\ x = the (\\<^sub>u (l ! i) x)" proof assume "x \ x\<^sub>j \ x \ rvars (\ (l ! (j - 1)))" then show ?thesis using jj \x \ rvars (\ (l ! j))\ ji using bounds by force next assume "x = x\<^sub>i" then show ?thesis using succ_set_on_bound(2)[of "l ! (j - 1)" "l ! j" x\<^sub>i] \l ! (j - 1) \ (l ! j)\ using vars bounds by auto qed qed qed lemma succ_chain_valuation_satisfies: assumes "succ_chain l" "i \ j" "j < length l" shows "\\ (l ! i)\ \\<^sub>t \ (l ! i) \ \\ (l ! j)\ \\<^sub>t \ (l ! j)" using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "\\ (l ! i)\ \\<^sub>t \ (l ! i) \ \\ (l ! (i + 1))\ \\<^sub>t \ (l ! (i + 1))" using succ_valuation_satisfies by auto qed simp_all lemma succ_chain_tableau_valuated: assumes "succ_chain l" "i \ j" "j < length l" shows "\ (l ! i) \ \ (l ! j)" using assms proof(rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "\ (l ! i) \ \ (l ! (i + 1))" using succ_tableau_valuated by auto qed simp_all abbreviation swap_lr where "swap_lr l i x \ i + 1 < length l \ x \ lvars (\ (l ! i)) \ x \ rvars (\ (l ! (i + 1)))" abbreviation swap_rl where "swap_rl l i x \ i + 1 < length l \ x \ rvars (\ (l ! i)) \ x \ lvars (\ (l ! (i + 1)))" abbreviation always_r where "always_r l i j x \ \ k. i \ k \ k \ j \ x \ rvars (\ (l ! k))" lemma succ_chain_always_r_valuation_id: assumes "succ_chain l" "i \ j" "j < length l" shows "always_r l i j x \ \\ (l ! i)\ x = \\ (l ! j)\ x" (is "?P i j") using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "?P i (i + 1)" using succ_rvar_valuation_id by simp qed simp_all lemma succ_chain_swap_rl_exists: assumes "succ_chain l" "i < j" "j < length l" "x \ rvars (\ (l ! i))" "x \ lvars (\ (l ! j))" shows "\ k. i \ k \ k < j \ swap_rl l k x" using assms proof (induct "j - i" arbitrary: i) case 0 then show ?case by simp next case (Suc k) have "l ! i \ (l ! (i + 1))" using Suc(3) Suc(4) Suc(5) unfolding rel_chain_def by auto then have "\ (\ (l ! (i + 1)))" by (rule succ_inv) show ?case proof (cases "x \ rvars (\ (l ! (i + 1)))") case True then have "j \ i + 1" using Suc(7) \\ (\ (l ! (i + 1)))\ by (auto simp add: normalized_tableau_def) have "k = j - Suc i" using Suc(2) by simp then obtain k where "k \ i + 1" "k < j" "swap_rl l k x" using \x \ rvars (\ (l ! (i + 1)))\ \j \ i + 1\ using Suc(1)[of "i + 1"] Suc(2) Suc(3) Suc(4) Suc(5) Suc(6) Suc(7) by auto then show ?thesis by (rule_tac x="k" in exI) simp next case False then have "x \ lvars (\ (l ! (i + 1)))" using Suc(6) using \l ! i \ (l ! (i + 1))\ succ_vars_id by auto then show ?thesis using Suc(4) Suc(5) Suc(6) by force qed qed lemma succ_chain_swap_lr_exists: assumes "succ_chain l" "i < j" "j < length l" "x \ lvars (\ (l ! i))" "x \ rvars (\ (l ! j))" shows "\ k. i \ k \ k < j \ swap_lr l k x" using assms proof (induct "j - i" arbitrary: i) case 0 then show ?case by simp next case (Suc k) have "l ! i \ (l ! (i + 1))" using Suc(3) Suc(4) Suc(5) unfolding rel_chain_def by auto then have "\ (\ (l ! (i + 1)))" by (rule succ_inv) show ?case proof (cases "x \ lvars (\ (l ! (i + 1)))") case True then have "j \ i + 1" using Suc(7) \\ (\ (l ! (i + 1)))\ by (auto simp add: normalized_tableau_def) have "k = j - Suc i" using Suc(2) by simp then obtain k where "k \ i + 1" "k < j" "swap_lr l k x" using \x \ lvars (\ (l ! (i + 1)))\ \j \ i + 1\ using Suc(1)[of "i + 1"] Suc(2) Suc(3) Suc(4) Suc(5) Suc(6) Suc(7) by auto then show ?thesis by (rule_tac x="k" in exI) simp next case False then have "x \ rvars (\ (l ! (i + 1)))" using Suc(6) using \l ! i \ (l ! (i + 1))\ succ_vars_id by auto then show ?thesis using Suc(4) Suc(5) Suc(6) by force qed qed (* -------------------------------------------------------------------------- *) lemma finite_tableaus_aux: shows "finite {t. lvars t = L \ rvars t = V - L \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0)}" (is "finite (?Al L)") proof (cases "?Al L = {}") case True show ?thesis by (subst True) simp next case False then have "\ t. t \ ?Al L" by auto let ?t = "SOME t. t \ ?Al L" have "?t \ ?Al L" using \\ t. t \ ?Al L\ by (rule someI_ex) have "?Al L \ {t. t <~~> ?t}" proof fix x assume "x \ ?Al L" have "x <~~> ?t" apply (rule tableau_perm) using \?t \ ?Al L\ \x \ ?Al L\ by auto then show "x \ {t. t <~~> ?t}" by simp qed moreover have "finite {t. t <~~> ?t}" by (rule perm_finite) ultimately show ?thesis by (rule finite_subset) qed lemma finite_tableaus: assumes "finite V" shows "finite {t. tvars t = V \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0)}" (is "finite ?A") proof- let ?Al = "\ L. {t. lvars t = L \ rvars t = V - L \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0)}" have "?A = \ (?Al ` {L. L \ V})" by (auto simp add: normalized_tableau_def) then show ?thesis using \finite V\ using finite_tableaus_aux by auto qed lemma finite_accessible_tableaus: shows "finite (\ ` {s'. s \\<^sup>* s'})" proof- have "{s'. s \\<^sup>* s'} = {s'. s \\<^sup>+ s'} \ {s}" by (auto simp add: rtrancl_eq_or_trancl) moreover have "finite (\ ` {s'. s \\<^sup>+ s'})" (is "finite ?A") proof- let ?T = "{t. tvars t = tvars (\ s) \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t(\ s))}" have "?A \ ?T" proof fix t assume "t \ ?A" then obtain s' where "s \\<^sup>+ s'" "t = \ s'" by auto then obtain l where *: "l \ []" "1 < length l" "hd l = s" "last l = s'" "succ_chain l" using trancl_rel_chain[of s s' succ_rel] by auto show "t \ ?T" proof- have "tvars (\ s') = tvars (\ s)" using succ_chain_vars_id[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by simp moreover have "\ (\ s')" using \s \\<^sup>+ s'\ using succ_inv(1)[of _ s'] by (auto dest: tranclD2) moreover have "\v::'a valuation. v \\<^sub>t \ s' = v \\<^sub>t \ s" using succ_chain_tableau_equiv[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by auto ultimately show ?thesis using \t = \ s'\ by simp qed qed moreover have "finite (tvars (\ s))" by (auto simp add: lvars_def rvars_def finite_vars) ultimately show ?thesis using finite_tableaus[of "tvars (\ s)" "\ s"] by (auto simp add: finite_subset) qed ultimately show ?thesis by simp qed abbreviation check_valuation where "check_valuation (v::'a valuation) v0 bl0 bu0 t0 V \ \ t. tvars t = V \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0) \ v \\<^sub>t t \ (\ x \ rvars t. v x = v0 x \ v x = bl0 x \ v x = bu0 x) \ (\ x. x \ V \ v x = v0 x)" lemma finite_valuations: assumes "finite V" shows "finite {v::'a valuation. check_valuation v v0 bl0 bu0 t0 V}" (is "finite ?A") proof- let ?Al = "\ L. {t. lvars t = L \ rvars t = V - L \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0)}" let ?Vt = "\ t. {v::'a valuation. v \\<^sub>t t \ (\ x \ rvars t. v x = v0 x \ v x = bl0 x \ v x = bu0 x) \ (\ x. x \ V \ v x = v0 x)}" have "finite {L. L \ V}" using \finite V\ by auto have "\ L. L \ V \ finite (?Al L)" using finite_tableaus_aux by auto have "\ L t. L \ V \ t \ ?Al L \ finite (?Vt t)" proof (safe) fix L t assume "lvars t \ V" "rvars t = V - lvars t" "\ t" "\v. v \\<^sub>t t = v \\<^sub>t t0" then have "rvars t \ lvars t = V" by auto let ?f = "\ v x. if x \ rvars t then v x else 0" have "inj_on ?f (?Vt t)" unfolding inj_on_def proof (safe, rule ext) fix v1 v2 x assume "(\x. if x \ rvars t then v1 x else (0 :: 'a)) = (\x. if x \ rvars t then v2 x else (0 :: 'a))" (is "?f1 = ?f2") have "\x\rvars t. v1 x = v2 x" proof fix x assume "x \ rvars t" then show "v1 x = v2 x" using \?f1 = ?f2\ fun_cong[of ?f1 ?f2 x] by auto qed assume *: "v1 \\<^sub>t t" "v2 \\<^sub>t t" "\x. x \ V \ v1 x = v0 x" "\x. x \ V \ v2 x = v0 x" show "v1 x = v2 x" proof (cases "x \ lvars t") case False then show ?thesis using * \\x\rvars t. v1 x = v2 x\ \rvars t \ lvars t = V\ by auto next case True let ?eq = "eq_for_lvar t x" have "?eq \ set t \ lhs ?eq = x" using eq_for_lvar \x \ lvars t\ by simp then have "v1 x = rhs ?eq \ v1 \" "v2 x = rhs ?eq \ v2 \" using \v1 \\<^sub>t t\ \v2 \\<^sub>t t\ unfolding satisfies_tableau_def satisfies_eq_def by auto moreover have "rhs ?eq \ v1 \ = rhs ?eq \ v2 \" apply (rule valuate_depend) using \\x\rvars t. v1 x = v2 x\ \?eq \ set t \ lhs ?eq = x\ unfolding rvars_def by auto ultimately show ?thesis by simp qed qed let ?R = "{v. \ x. if x \ rvars t then v x = v0 x \ v x = bl0 x \ v x = bu0 x else v x = 0 }" have "?f ` (?Vt t) \ ?R" by auto moreover have "finite ?R" proof- have "finite (rvars t)" using \finite V\ \rvars t \ lvars t = V\ using finite_subset[of "rvars t" V] by auto moreover let ?R' = "{v. \ x. if x \ rvars t then v x \ {v0 x, bl0 x, bu0 x} else v x = 0}" have "?R = ?R'" by auto ultimately show ?thesis using finite_fun_args[of "rvars t" "\ x. {v0 x, bl0 x, bu0 x}" "\ x. 0"] by auto qed ultimately have "finite (?f ` (?Vt t))" by (simp add: finite_subset) then show "finite (?Vt t)" using \inj_on ?f (?Vt t)\ by (auto dest: finite_imageD) qed have "?A = \ (\ (((`) ?Vt) ` (?Al ` {L. L \ V})))" (is "?A = ?A'") by (auto simp add: normalized_tableau_def cong del: image_cong_simp) moreover have "finite ?A'" proof (rule finite_Union) show "finite (\ (((`) ?Vt) ` (?Al ` {L. L \ V})))" using \finite {L. L \ V}\ \\ L. L \ V \ finite (?Al L)\ by auto next fix M assume "M \ \ (((`) ?Vt) ` (?Al ` {L. L \ V}))" then obtain L t where "L \ V" "t \ ?Al L" "M = ?Vt t" by blast then show "finite M" using \\ L t. L \ V \ t \ ?Al L \ finite (?Vt t)\ by blast qed ultimately show ?thesis by simp qed lemma finite_accessible_valuations: shows "finite (\ ` {s'. s \\<^sup>* s'})" proof- have "{s'. s \\<^sup>* s'} = {s'. s \\<^sup>+ s'} \ {s}" by (auto simp add: rtrancl_eq_or_trancl) moreover have "finite (\ ` {s'. s \\<^sup>+ s'})" (is "finite ?A") proof- let ?P = "\ v. check_valuation v (\\ s\) (\ x. the (\\<^sub>l s x)) (\ x. the (\\<^sub>u s x)) (\ s) (tvars (\ s))" let ?P' = "\ v::(var, 'a) mapping. \ t. tvars t = tvars (\ s) \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t \ s) \ \v\ \\<^sub>t t \ (\ x \ rvars t. \v\ x = \\ s\ x \ \v\ x = the (\\<^sub>l s x) \ \v\ x = the (\\<^sub>u s x)) \ (\ x. x \ tvars (\ s) \ look v x = look (\ s) x) \ (\ x. x \ tvars (\ s) \ look v x \ None)" have "finite (tvars (\ s))" by (auto simp add: lvars_def rvars_def finite_vars) then have "finite {v. ?P v}" using finite_valuations[of "tvars (\ s)" "\ s" "\\ s\" "\ x. the (\\<^sub>l s x)" "\ x. the (\\<^sub>u s x)"] by auto moreover have "map2fun ` {v. ?P' v} \ {v. ?P v}" by (auto simp add: map2fun_def) ultimately have "finite (map2fun ` {v. ?P' v})" by (auto simp add: finite_subset) moreover have "inj_on map2fun {v. ?P' v}" unfolding inj_on_def proof (safe) fix x y assume "\x\ = \y\" and *: "\x. x \ Simplex.tvars (\ s) \ look y x = look (\ s) x" "\xa. xa \ Simplex.tvars (\ s) \ look x xa = look (\ s) xa" "\x. x \ Simplex.tvars (\ s) \ look y x \ None" "\xa. xa \ Simplex.tvars (\ s) \ look x xa \ None" show "x = y" proof (rule mapping_eqI) fix k have "\x\ k = \y\ k" using \\x\ = \y\\ by simp then show "look x k = look y k" using * by (cases "k \ tvars (\ s)") (auto simp add: map2fun_def split: option.split) qed qed ultimately have "finite {v. ?P' v}" by (rule finite_imageD) moreover have "?A \ {v. ?P' v}" proof (safe) fix s' assume "s \\<^sup>+ s'" then obtain l where *: "l \ []" "1 < length l" "hd l = s" "last l = s'" "succ_chain l" using trancl_rel_chain[of s s' succ_rel] by auto show "?P' (\ s')" proof- have "\ s" "\ (\ s)" "\\ s\ \\<^sub>t \ s" using \s \\<^sup>+ s'\ using tranclD[of s s' succ_rel] by (auto simp add: curr_val_satisfies_no_lhs_def) have "tvars (\ s') = tvars (\ s)" using succ_chain_vars_id[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by simp moreover have "\(\ s')" using \s \\<^sup>+ s'\ using succ_inv(1)[of _ s'] by (auto dest: tranclD2) moreover have "\v::'a valuation. v \\<^sub>t \ s' = v \\<^sub>t \ s" using succ_chain_tableau_equiv[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by auto moreover have "\\ s'\ \\<^sub>t \ s'" using succ_chain_valuation_satisfies[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] \\\ s\ \\<^sub>t \ s\ by simp moreover have "\x\rvars (\ s'). \\ s'\ x = \\ s\ x \ \\ s'\ x = the (\\<^sub>l s x) \ \\ s'\ x = the (\\<^sub>u s x)" using succ_chain_rvar_valuation[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by auto moreover have "\x. x \ tvars (\ s) \ look (\ s') x = look (\ s) x" using succ_chain_no_vars_valuation[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by auto moreover have "\x. x \ Simplex.tvars (\ s') \ look (\ s') x \ None" using succ_chain_tableau_valuated[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] using \tvars (\ s') = tvars (\ s)\ \\ s\ by (auto simp add: tableau_valuated_def) ultimately show ?thesis by (rule_tac x="\ s'" in exI) auto qed qed ultimately show ?thesis by (auto simp add: finite_subset) qed ultimately show ?thesis by simp qed lemma accessible_bounds: shows "\\<^sub>i ` {s'. s \\<^sup>* s'} = {\\<^sub>i s}" proof - have "s \\<^sup>* s' \ \\<^sub>i s' = \\<^sub>i s" for s' by (induct s s' rule: rtrancl.induct, auto) then show ?thesis by blast qed lemma accessible_unsat_core: shows "\\<^sub>c ` {s'. s \\<^sup>* s'} = {\\<^sub>c s}" proof - have "s \\<^sup>* s' \ \\<^sub>c s' = \\<^sub>c s" for s' by (induct s s' rule: rtrancl.induct, auto) then show ?thesis by blast qed lemma state_eqI: "\\<^sub>i\<^sub>l s = \\<^sub>i\<^sub>l s' \ \\<^sub>i\<^sub>u s = \\<^sub>i\<^sub>u s' \ \ s = \ s' \ \ s = \ s' \ \ s = \ s' \ \\<^sub>c s = \\<^sub>c s' \ s = s'" by (cases s, cases s', auto) lemma finite_accessible_states: shows "finite {s'. s \\<^sup>* s'}" (is "finite ?A") proof- let ?V = "\ ` ?A" let ?T = "\ ` ?A" let ?P = "?V \ ?T \ {\\<^sub>i s} \ {True, False} \ {\\<^sub>c s}" have "finite ?P" using finite_accessible_valuations finite_accessible_tableaus by auto moreover let ?f = "\ s. (\ s, \ s, \\<^sub>i s, \ s, \\<^sub>c s)" have "?f ` ?A \ ?P" using accessible_bounds[of s] accessible_unsat_core[of s] by auto moreover have "inj_on ?f ?A" unfolding inj_on_def by (auto intro: state_eqI) ultimately show ?thesis using finite_imageD [of ?f ?A] using finite_subset by auto qed (* -------------------------------------------------------------------------- *) lemma acyclic_suc_rel: "acyclic succ_rel" proof (rule acyclicI, rule allI) fix s show "(s, s) \ succ_rel\<^sup>+" proof assume "s \\<^sup>+ s" then obtain l where "l \ []" "length l > 1" "hd l = s" "last l = s" "succ_chain l" using trancl_rel_chain[of s s succ_rel] by auto have "l ! 0 = s" using \l \ []\ \hd l = s\ by (simp add: hd_conv_nth) then have "s \ (l ! 1)" using \succ_chain l\ unfolding rel_chain_def using \length l > 1\ by auto then have "\ (\ s)" by simp let ?enter_rvars = "{x. \ sl. swap_lr l sl x}" have "finite ?enter_rvars" proof- let ?all_vars = "\ (set (map (\ t. lvars t \ rvars t) (map \ l)))" have "finite ?all_vars" by (auto simp add: lvars_def rvars_def finite_vars) moreover have "?enter_rvars \ ?all_vars" by force ultimately show ?thesis by (simp add: finite_subset) qed let ?xr = "Max ?enter_rvars" have "?xr \ ?enter_rvars" proof (rule Max_in) show "?enter_rvars \ {}" proof- from \s \ (l ! 1)\ obtain x\<^sub>i x\<^sub>j :: var where "x\<^sub>i \ lvars (\ s)" "x\<^sub>i \ rvars (\ (l ! 1))" by (rule succ_vars) auto then have "x\<^sub>i \ ?enter_rvars" using \hd l = s\ \l \ []\ \length l > 1\ by (auto simp add: hd_conv_nth) then show ?thesis by auto qed next show "finite ?enter_rvars" using \finite ?enter_rvars\ . qed then obtain xr sl where "xr = ?xr" "swap_lr l sl xr" by auto then have "sl + 1 < length l" by simp have "(l ! sl) \ (l ! (sl + 1))" using \sl + 1 < length l\ \succ_chain l\ unfolding rel_chain_def by auto have "length l > 2" proof (rule ccontr) assume "\ ?thesis" with \length l > 1\ have "length l = 2" by auto then have "last l = l ! 1" by (cases l) (auto simp add: last_conv_nth nth_Cons split: nat.split) then have "xr \ lvars (\ s)" "xr \ rvars (\ s)" using \length l = 2\ using \swap_lr l sl xr\ using \hd l = s\ \last l = s\ \l \ []\ by (auto simp add: hd_conv_nth) then show False using \\ (\ s)\ unfolding normalized_tableau_def by auto qed obtain l' where "hd l' = l ! (sl + 1)" "last l' = l ! sl" "length l' = length l - 1" "succ_chain l'" and l'_l: "\ i. i + 1 < length l' \ (\ j. j + 1 < length l \ l' ! i = l ! j \ l' ! (i + 1) = l ! (j + 1))" using \length l > 2\ \sl + 1 < length l\ \hd l = s\ \last l = s\ \succ_chain l\ using reorder_cyclic_list[of l s sl] by blast then have "xr \ rvars (\ (hd l'))" "xr \ lvars (\ (last l'))" "length l' > 1" "l' \ []" using \swap_lr l sl xr\ \length l > 2\ by auto then have "\ sp. swap_rl l' sp xr" using \succ_chain l'\ using succ_chain_swap_rl_exists[of l' 0 "length l' - 1" xr] by (auto simp add: hd_conv_nth last_conv_nth) then have "\ sp. swap_rl l' sp xr \ (\ sp'. sp' < sp \ \ swap_rl l' sp' xr)" by (rule min_element) then obtain sp where "swap_rl l' sp xr" "\ sp'. sp' < sp \ \ swap_rl l' sp' xr" by blast then have "sp + 1 < length l'" by simp have "\\ (l' ! 0)\ xr = \\ (l' ! sp)\ xr" proof- have "always_r l' 0 sp xr" using \xr \ rvars (\ (hd l'))\ \sp + 1 < length l'\ \\ sp'. sp' < sp \ \ swap_rl l' sp' xr\ proof (induct sp) case 0 then have "l' \ []" by auto then show ?case using 0(1) by (auto simp add: hd_conv_nth) next case (Suc sp') show ?case proof (safe) fix k assume "k \ Suc sp'" show "xr \ rvars (\ (l' ! k))" proof (cases "k = sp' + 1") case False then show ?thesis using Suc \k \ Suc sp'\ by auto next case True then have "xr \ rvars (\ (l' ! (k - 1)))" using Suc by auto moreover then have "xr \ lvars (\ (l' ! k))" using True Suc(3) Suc(4) by auto moreover have "(l' ! (k - 1)) \ (l' ! k)" using \succ_chain l'\ using Suc(3) True by (simp add: rel_chain_def) ultimately show ?thesis using succ_vars_id[of "l' ! (k - 1)" "l' ! k"] by auto qed qed qed then show ?thesis using \sp + 1 < length l'\ using \succ_chain l'\ using succ_chain_always_r_valuation_id by simp qed have "(l' ! sp) \ (l' ! (sp+1))" using \sp + 1 < length l'\ \succ_chain l'\ unfolding rel_chain_def by simp then obtain xs xr' :: var where "xs \ lvars (\ (l' ! sp))" "xr \ rvars (\ (l' ! sp))" "swap_lr l' sp xs" apply (rule succ_vars) using \swap_rl l' sp xr\ \sp + 1 < length l'\ by auto then have "xs \ xr" using \(l' ! sp) \ (l' ! (sp+1))\ by (auto simp add: normalized_tableau_def) obtain sp' where "l' ! sp = l ! sp'" "l' ! (sp + 1) = l ! (sp' + 1)" "sp' + 1 < length l" using \sp + 1 < length l'\ l'_l by auto have "xs \ ?enter_rvars" using \swap_lr l' sp xs\ l'_l by force have "xs < xr" proof- have "xs \ ?xr" using \finite ?enter_rvars\ \xs \ ?enter_rvars\ by (rule Max_ge) then show ?thesis using \xr = ?xr\ \xs \ xr\ by simp qed let ?sl = "l ! sl" let ?sp = "l' ! sp" let ?eq = "eq_for_lvar (\ ?sp) xs" let ?bl = "\ ?sl" let ?bp = "\ ?sp" have "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sl" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sp" using \l ! sl \ (l ! (sl + 1))\ using \l' ! sp \ (l' ! (sp+ 1))\ by simp_all have "\\<^sub>i ?sp = \\<^sub>i ?sl" proof- have "\\<^sub>i (l' ! sp) = \\<^sub>i (l' ! (length l' - 1))" using \sp + 1 < length l'\ \succ_chain l'\ using succ_chain_bounds_id by auto then have "\\<^sub>i (last l') = \\<^sub>i (l' ! sp)" using \l' \ []\ by (simp add: last_conv_nth) then show ?thesis using \last l' = l ! sl\ by simp qed have diff_satified: "\?bl\ xs - \?bp\ xs = ((rhs ?eq) \ \?bl\ \) - ((rhs ?eq) \ \?bp\ \)" proof- have "\?bp\ \\<^sub>e ?eq" using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sp\ using eq_for_lvar[of xs "\ ?sp"] using \xs \ lvars (\ (l' ! sp))\ unfolding curr_val_satisfies_no_lhs_def satisfies_tableau_def by auto moreover have "\?bl\ \\<^sub>e ?eq" proof- have "\\ (l ! sl)\ \\<^sub>t \ (l' ! sp)" using \l' ! sp = l ! sp'\ \sp' + 1 < length l\ \sl + 1 < length l\ using \succ_chain l\ using succ_chain_tableau_equiv[of l sl sp'] using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sl\ unfolding curr_val_satisfies_no_lhs_def by simp then show ?thesis unfolding satisfies_tableau_def using eq_for_lvar using \xs \ lvars (\ (l' ! sp))\ by simp qed moreover have "lhs ?eq = xs" using \xs \ lvars (\ (l' ! sp))\ using eq_for_lvar by simp ultimately show ?thesis unfolding satisfies_eq_def by auto qed have "\ in_bounds xr \?bl\ (\ ?sl)" using \l ! sl \ (l ! (sl + 1))\ \swap_lr l sl xr\ using succ_min_lvar_not_in_bounds(1)[of ?sl "l ! (sl + 1)" xr] by simp have "\ x. x < xr \ in_bounds x \?bl\ (\ ?sl)" proof (safe) fix x assume "x < xr" show "in_bounds x \?bl\ (\ ?sl)" proof (cases "x \ lvars (\ ?sl)") case True then show ?thesis using succ_min_lvar_not_in_bounds(2)[of ?sl "l ! (sl + 1)" xr] using \l ! sl \ (l ! (sl + 1))\ \swap_lr l sl xr\ \x < xr\ by simp next case False then show ?thesis using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sl\ unfolding curr_val_satisfies_no_lhs_def by (simp add: satisfies_bounds_set.simps) qed qed then have "in_bounds xs \?bl\ (\ ?sl)" using \xs < xr\ by simp have "\ in_bounds xs \?bp\ (\ ?sp)" using \l' ! sp \ (l' ! (sp + 1))\ \swap_lr l' sp xs\ using succ_min_lvar_not_in_bounds(1)[of ?sp "l' ! (sp + 1)" xs] by simp have "\ x \ rvars_eq ?eq. x > xr \ \?bp\ x = \?bl\ x" proof (safe) fix x assume "x \ rvars_eq ?eq" "x > xr" then have "always_r l' 0 (length l' - 1) x" proof (safe) fix k assume "x \ rvars_eq ?eq" "x > xr" "0 \ k" "k \ length l' - 1" obtain k' where "l ! k' = l' ! k" "k' < length l" using l'_l \k \ length l' - 1\ \length l' > 1\ apply (cases "k > 0") apply (erule_tac x="k - 1" in allE) apply (drule mp) by auto let ?eq' = "eq_for_lvar (\ (l ! sp')) xs" have "\ x \ rvars_eq ?eq'. x > xr \ always_r l 0 (length l - 1) x" proof (safe) fix x k assume "x \ rvars_eq ?eq'" "xr < x" "0 \ k" "k \ length l - 1" then have "x \ rvars (\ (l ! sp'))" using eq_for_lvar[of xs "\ (l ! sp')"] using \swap_lr l' sp xs\ \l' ! sp = l ! sp'\ by (auto simp add: rvars_def) have *: "\ i. i < sp' \ x \ rvars (\ (l ! i))" proof (safe, rule ccontr) fix i assume "i < sp'" "x \ rvars (\ (l ! i))" then have "x \ lvars (\ (l ! i))" using \x \ rvars (\ (l ! sp'))\ using \sp' + 1 < length l\ using \succ_chain l\ using succ_chain_vars_id[of l i sp'] by auto obtain i' where "swap_lr l i' x" using \x \ lvars (\ (l ! i))\ using \x \ rvars (\ (l ! sp'))\ using \i < sp'\ \sp' + 1 < length l\ using \succ_chain l\ using succ_chain_swap_lr_exists[of l i sp' x] by auto then have "x \ ?enter_rvars" by auto then have "x \ ?xr" using \finite ?enter_rvars\ using Max_ge[of ?enter_rvars x] by simp then show False using \x > xr\ using \xr = ?xr\ by simp qed then have "x \ rvars (\ (last l))" using \hd l = s\ \last l = s\ \l \ []\ using \x \ rvars (\ (l ! sp'))\ by (auto simp add: hd_conv_nth) show "x \ rvars (\ (l ! k))" proof (cases "k = length l - 1") case True then show ?thesis using \x \ rvars (\ (last l))\ using \l \ []\ by (simp add: last_conv_nth) next case False then have "k < length l - 1" using \k \ length l - 1\ by simp then have "k < length l" using \length l > 1\ by auto show ?thesis proof (rule ccontr) assume "\ ?thesis" then have "x \ lvars (\ (l ! k))" using \x \ rvars (\ (l ! sp'))\ using \sp' + 1 < length l\ \k < length l\ using succ_chain_vars_id[of l k sp'] using \succ_chain l\ \l \ []\ by auto obtain i' where "swap_lr l i' x" using \succ_chain l\ using \x \ lvars (\ (l ! k))\ using \x \ rvars (\ (last l))\ using \k < length l - 1\ \l \ []\ using succ_chain_swap_lr_exists[of l k "length l - 1" x] by (auto simp add: last_conv_nth) then have "x \ ?enter_rvars" by auto then have "x \ ?xr" using \finite ?enter_rvars\ using Max_ge[of ?enter_rvars x] by simp then show False using \x > xr\ using \xr = ?xr\ by simp qed qed qed then have "x \ rvars (\ (l ! k'))" using \x \ rvars_eq ?eq\ \x > xr\ \k' < length l\ using \l' ! sp = l ! sp'\ by simp then show "x \ rvars (\ (l' ! k))" using \l ! k' = l' ! k\ by simp qed then have "\?bp\ x = \\ (l' ! (length l' - 1))\ x" using \succ_chain l'\ \sp + 1 < length l'\ by (auto intro!: succ_chain_always_r_valuation_id[rule_format]) then have "\?bp\ x = \\ (last l')\ x" using \l' \ []\ by (simp add: last_conv_nth) then show "\?bp\ x = \?bl\ x" using \last l' = l ! sl\ by simp qed have "\?bp\ xr = \\ (l ! (sl + 1))\ xr" using \\\ (l' ! 0)\ xr = \\ (l' ! sp)\ xr\ using \hd l' = l ! (sl + 1)\ \l' \ []\ by (simp add: hd_conv_nth) { fix dir1 dir2 :: "('i,'a) Direction" assume dir1: "dir1 = (if \?bl\ xr <\<^sub>l\<^sub>b \\<^sub>l ?sl xr then Positive else Negative)" then have "\\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)" using \\ in_bounds xr \?bl\ (\ ?sl)\ using neg_bounds_compare(7) neg_bounds_compare(3) by (auto simp add: bound_compare''_defs) then have "\ \\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)" using bounds_compare_contradictory(7) bounds_compare_contradictory(3) neg_bounds_compare(6) dir1 unfolding bound_compare''_defs by auto force have "LB dir1 ?sl xr \ None" using \\\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)\ by (cases "LB dir1 ?sl xr") (auto simp add: bound_compare_defs) assume dir2: "dir2 = (if \?bp\ xs <\<^sub>l\<^sub>b \\<^sub>l ?sp xs then Positive else Negative)" then have "\\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)" using \\ in_bounds xs \?bp\ (\ ?sp)\ using neg_bounds_compare(2) neg_bounds_compare(6) by (auto simp add: bound_compare''_defs) then have "\ \\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)" using bounds_compare_contradictory(3) bounds_compare_contradictory(7) neg_bounds_compare(6) dir2 unfolding bound_compare''_defs by auto force then have "\ x \ rvars_eq ?eq. x < xr \ \ reasable_var dir2 x ?eq ?sp" using succ_min_rvar[of ?sp "l' ! (sp + 1)" xs xr ?eq] using \l' ! sp \ (l' ! (sp + 1))\ using \swap_lr l' sp xs\ \swap_rl l' sp xr\ dir2 unfolding bound_compare''_defs by auto have "LB dir2 ?sp xs \ None" using \\\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)\ by (cases "LB dir2 ?sp xs") (auto simp add: bound_compare_defs) have *: "\ x \ rvars_eq ?eq. x < xr \ ((coeff (rhs ?eq) x > 0 \ \\<^sub>u\<^sub>b (lt dir2) (\?bp\ x) (UB dir2 ?sp x)) \ (coeff (rhs ?eq) x < 0 \ \\<^sub>l\<^sub>b (lt dir2) (\?bp\ x) (LB dir2 ?sp x)))" proof (safe) fix x assume "x \ rvars_eq ?eq" "x < xr" "coeff (rhs ?eq) x > 0" then have "\ \\<^sub>u\<^sub>b (lt dir2) (\?bp\ x) (UB dir2 ?sp x)" using \\ x \ rvars_eq ?eq. x < xr \ \ reasable_var dir2 x ?eq ?sp\ by simp then show "\\<^sub>u\<^sub>b (lt dir2) (\?bp\ x) (UB dir2 ?sp x)" using dir2 neg_bounds_compare(4) neg_bounds_compare(8) unfolding bound_compare''_defs by force next fix x assume "x \ rvars_eq ?eq" "x < xr" "coeff (rhs ?eq) x < 0" then have "\ \\<^sub>l\<^sub>b (lt dir2) (\?bp\ x) (LB dir2 ?sp x)" using \\ x \ rvars_eq ?eq. x < xr \ \ reasable_var dir2 x ?eq ?sp\ by simp then show "\\<^sub>l\<^sub>b (lt dir2) (\?bp\ x) (LB dir2 ?sp x)" using dir2 neg_bounds_compare(4) neg_bounds_compare(8) dir2 unfolding bound_compare''_defs by force qed have "(lt dir2) (\?bp\ xs) (\?bl\ xs)" using \\\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)\ using \\\<^sub>i ?sp = \\<^sub>i ?sl\ dir2 using \in_bounds xs \?bl\ (\ ?sl)\ by (auto simp add: bound_compare''_defs simp: indexl_def indexu_def boundsl_def boundsu_def) then have "(lt dir2) 0 (\?bl\ xs - \?bp\ xs)" using dir2 by (auto simp add: minus_gt[THEN sym] minus_lt[THEN sym]) moreover have "le (lt dir2) ((rhs ?eq) \ \?bl\ \ - (rhs ?eq) \ \?bp\ \) 0" proof- have "\ x \ rvars_eq ?eq. (0 < coeff (rhs ?eq) x \ le (lt dir2) 0 (\?bp\ x - \?bl\ x)) \ (coeff (rhs ?eq) x < 0 \ le (lt dir2) (\?bp\ x - \?bl\ x) 0)" proof fix x assume "x \ rvars_eq ?eq" show "(0 < coeff (rhs ?eq) x \ le (lt dir2) 0 (\?bp\ x - \?bl\ x)) \ (coeff (rhs ?eq) x < 0 \ le (lt dir2) (\?bp\ x - \?bl\ x) 0)" proof (cases "x < xr") case True then have "in_bounds x \?bl\ (\ ?sl)" using \\ x. x < xr \ in_bounds x \?bl\ (\ ?sl)\ by simp show ?thesis proof (safe) assume "coeff (rhs ?eq) x > 0" "0 \ \?bp\ x - \?bl\ x" then have "\\<^sub>u\<^sub>b (lt dir2) (\\ (l' ! sp)\ x) (UB dir2 (l' ! sp) x)" using * \x < xr\ \x \ rvars_eq ?eq\ by simp then have "le (lt dir2) (\?bl\ x) (\?bp\ x)" using \in_bounds x \?bl\ (\ ?sl)\ \\\<^sub>i ?sp = \\<^sub>i ?sl\ dir2 apply (auto simp add: bound_compare''_defs) using bounds_lg(3)[of "\?bp\ x" "\\<^sub>u (l ! sl) x" "\?bl\ x"] using bounds_lg(6)[of "\?bp\ x" "\\<^sub>l (l ! sl) x" "\?bl\ x"] unfolding bound_compare''_defs by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) then show "lt dir2 0 (\?bp\ x - \?bl\ x)" using \0 \ \?bp\ x - \?bl\ x\ using minus_gt[of "\?bl\ x" "\?bp\ x"] minus_lt[of "\?bp\ x" "\?bl\ x"] dir2 by auto next assume "coeff (rhs ?eq) x < 0" "\?bp\ x - \?bl\ x \ 0" then have "\\<^sub>l\<^sub>b (lt dir2) (\\ (l' ! sp)\ x) (LB dir2 (l' ! sp) x)" using * \x < xr\ \x \ rvars_eq ?eq\ by simp then have "le (lt dir2) (\?bp\ x) (\?bl\ x)" using \in_bounds x \?bl\ (\ ?sl)\ \\\<^sub>i ?sp = \\<^sub>i ?sl\ dir2 apply (auto simp add: bound_compare''_defs) using bounds_lg(3)[of "\?bp\ x" "\\<^sub>u (l ! sl) x" "\?bl\ x"] using bounds_lg(6)[of "\?bp\ x" "\\<^sub>l (l ! sl) x" "\?bl\ x"] unfolding bound_compare''_defs by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) then show "lt dir2 (\?bp\ x - \?bl\ x) 0" using \\?bp\ x - \?bl\ x \ 0\ using minus_gt[of "\?bl\ x" "\?bp\ x"] minus_lt[of "\?bp\ x" "\?bl\ x"] dir2 by auto qed next case False show ?thesis proof (cases "x = xr") case True have "\\ (l ! (sl + 1))\ xr = the (LB dir1 ?sl xr)" using \l ! sl \ (l ! (sl + 1))\ using \swap_lr l sl xr\ using succ_set_on_bound(1)[of "l ! sl" "l ! (sl + 1)" xr] using \\ \\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)\ dir1 unfolding bound_compare''_defs by auto then have "\?bp\ xr = the (LB dir1 ?sl xr)" using \\?bp\ xr = \\ (l ! (sl + 1))\ xr\ by simp then have "lt dir1 (\?bl\ xr) (\?bp\ xr)" using \LB dir1 ?sl xr \ None\ using \\\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)\ dir1 by (auto simp add: bound_compare_defs) moreover have "reasable_var dir2 xr ?eq ?sp" using \\ \\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)\ using \l' ! sp \ (l' ! (sp + 1))\ using \swap_lr l' sp xs\ \swap_rl l' sp xr\ using succ_min_rvar[of "l' ! sp" "l' ! (sp + 1)"xs xr ?eq] dir2 unfolding bound_compare''_defs by auto then have "if dir1 = dir2 then coeff (rhs ?eq) xr > 0 else coeff (rhs ?eq) xr < 0" using \\?bp\ xr = the (LB dir1 ?sl xr)\ using \\\<^sub>i ?sp = \\<^sub>i ?sl\[THEN sym] dir1 using \LB dir1 ?sl xr \ None\ dir1 dir2 by (auto split: if_splits simp add: bound_compare_defs indexl_def indexu_def boundsl_def boundsu_def) moreover have "dir1 = Positive \ dir1 = Negative" "dir2 = Positive \ dir2 = Negative" using dir1 dir2 by auto ultimately show ?thesis using \x = xr\ using minus_lt[of "\?bp\ xr" "\?bl\ xr"] minus_gt[of "\?bl\ xr" "\?bp\ xr"] by (auto split: if_splits) next case False then have "x > xr" using \\ x < xr\ by simp then have "\?bp\ x = \?bl\ x" using \\ x \ rvars_eq ?eq. x > xr \ \?bp\ x = \?bl\ x\ using \x \ rvars_eq ?eq\ by simp then show ?thesis by simp qed qed qed then have "le (lt dir2) 0 (rhs ?eq \ \ x. \?bp\ x - \?bl\ x \)" using dir2 apply auto using valuate_nonneg[of "rhs ?eq" "\ x. \?bp\ x - \?bl\ x"] apply force using valuate_nonpos[of "rhs ?eq" "\ x. \?bp\ x - \?bl\ x"] apply force done then have "le (lt dir2) 0 ((rhs ?eq) \ \?bp\ \ - (rhs ?eq) \ \?bl\ \)" by (subst valuate_diff)+ simp then have "le (lt dir2) ((rhs ?eq) \ \?bl\ \) ((rhs ?eq) \ \?bp\ \)" using minus_lt[of "(rhs ?eq) \ \?bp\ \" "(rhs ?eq) \ \?bl\ \"] dir2 by auto then show ?thesis using dir2 using minus_lt[of "(rhs ?eq) \ \?bl\ \" "(rhs ?eq) \ \?bp\ \"] using minus_gt[of "(rhs ?eq) \ \?bp\ \" "(rhs ?eq) \ \?bl\ \"] by auto qed ultimately have False using diff_satified dir2 by (auto split: if_splits) } then show False by auto qed qed (* -------------------------------------------------------------------------- *) lemma check_unsat_terminates: assumes "\ s" shows "check_dom s" by (rule check_dom.intros) (auto simp add: assms) lemma check_sat_terminates'_aux: assumes dir: "dir = (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative)" and *: "\ s'. \s \ s'; \ s'; \ (\ s'); \ s'; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ check_dom s'" and "\ s" "\ (\ s)" "\ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ \ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "\\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i)" shows "check_dom (case min_rvar_incdec dir s x\<^sub>i of Inl I \ set_unsat I s | Inr x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j (the (LB dir s x\<^sub>i)) s)" proof (cases "min_rvar_incdec dir s x\<^sub>i") case Inl then show ?thesis using check_unsat_terminates by simp next case (Inr x\<^sub>j) then have xj: "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" using min_rvar_incdec_eq_Some_rvars[of _ s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using dir by simp let ?s' = "pivot_and_update x\<^sub>i x\<^sub>j (the (LB dir s x\<^sub>i)) s" have "check_dom ?s'" proof (rule * ) show **: "\ ?s'" "\ (\ ?s')" "\ ?s'" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s'" using \min_lvar_not_in_bounds s = Some x\<^sub>i\ Inr using \\ s\ \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ dir using pivotandupdate_check_precond by auto have xi: "x\<^sub>i \ lvars (\ s)" using assms(8) min_lvar_not_in_bounds_lvars by blast show "s \ ?s'" unfolding gt_state_def using \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ \\ s\ using \min_lvar_not_in_bounds s = Some x\<^sub>i\ \\\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i)\ Inr dir by (intro conjI pivotandupdate_bounds_id pivotandupdate_unsat_core_id, auto intro!: xj xi) qed then show ?thesis using Inr by simp qed lemma check_sat_terminates': assumes "\ s" "\ (\ s)" "\ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "s\<^sub>0 \\<^sup>* s" shows "check_dom s" using assms proof (induct s rule: wf_induct[of "{(y, x). s\<^sub>0 \\<^sup>* x \ x \ y}"]) show "wf {(y, x). s\<^sub>0 \\<^sup>* x \ x \ y}" proof (rule finite_acyclic_wf) let ?A = "{(s', s). s\<^sub>0 \\<^sup>* s \ s \ s'}" let ?B = "{s. s\<^sub>0 \\<^sup>* s}" have "?A \ ?B \ ?B" proof fix p assume "p \ ?A" then have "fst p \ ?B" "snd p \ ?B" using rtrancl_into_trancl1[of s\<^sub>0 "snd p" succ_rel "fst p"] by auto then show "p \ ?B \ ?B" using mem_Sigma_iff[of "fst p" "snd p"] by auto qed then show "finite ?A" using finite_accessible_states[of s\<^sub>0] using finite_subset[of ?A "?B \ ?B"] by simp show "acyclic ?A" proof- have "?A \ succ_rel\" by auto then show ?thesis using acyclic_converse acyclic_subset using acyclic_suc_rel by auto qed qed next fix s assume "\ s'. (s', s) \ {(y, x). s\<^sub>0 \\<^sup>* x \ x \ y} \ \ s' \ \ (\ s') \ \ s' \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ s\<^sub>0 \\<^sup>* s' \ check_dom s'" "\ s" "\ (\ s)" "\ s" " \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "s\<^sub>0 \\<^sup>* s" then have *: "\ s'. \s \ s'; \ s'; \ (\ s'); \ s'; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ check_dom s'" using rtrancl_into_trancl1[of s\<^sub>0 s succ_rel] using trancl_into_rtrancl[of s\<^sub>0 _ succ_rel] by auto show "check_dom s" proof (rule check_dom.intros, simp_all add: check'_def, unfold Positive_def[symmetric], unfold Negative_def[symmetric]) fix x\<^sub>i assume "\ \ s" "Some x\<^sub>i = min_lvar_not_in_bounds s" "\\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i" have "\\<^sub>l s x\<^sub>i = LB Positive s x\<^sub>i" by simp show "check_dom (case min_rvar_incdec Positive s x\<^sub>i of Inl I \ set_unsat I s | Inr x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j (the (\\<^sub>l s x\<^sub>i)) s)" apply (subst \\\<^sub>l s x\<^sub>i = LB Positive s x\<^sub>i\) apply (rule check_sat_terminates'_aux[of Positive s x\<^sub>i]) using \\ s\ \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ * using \\ \ s\ \Some x\<^sub>i = min_lvar_not_in_bounds s\ \\\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i\ by (simp_all add: bound_compare''_defs) next fix x\<^sub>i assume "\ \ s" "Some x\<^sub>i = min_lvar_not_in_bounds s" "\ \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i" then have "\\ s\ x\<^sub>i >\<^sub>u\<^sub>b \\<^sub>u s x\<^sub>i" using min_lvar_not_in_bounds_Some[of s x\<^sub>i] using neg_bounds_compare(7) neg_bounds_compare(2) by auto have "\\<^sub>u s x\<^sub>i = LB Negative s x\<^sub>i" by simp show "check_dom (case min_rvar_incdec Negative s x\<^sub>i of Inl I \ set_unsat I s | Inr x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j (the (\\<^sub>u s x\<^sub>i)) s)" apply (subst \\\<^sub>u s x\<^sub>i = LB Negative s x\<^sub>i\) apply (rule check_sat_terminates'_aux) using \\ s\ \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ * using \\ \ s\ \Some x\<^sub>i = min_lvar_not_in_bounds s\ \\\ s\ x\<^sub>i >\<^sub>u\<^sub>b \\<^sub>u s x\<^sub>i\ \\ \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i\ by (simp_all add: bound_compare''_defs) qed qed lemma check_sat_terminates: assumes "\ s" "\ (\ s)" "\ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" shows "check_dom s" using assms using check_sat_terminates'[of s s] by simp lemma check_cases: assumes "\ s \ P s" assumes "\\ \ s; min_lvar_not_in_bounds s = None\ \ P s" assumes "\ x\<^sub>i dir I. \dir = Positive \ dir = Negative; \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inl I\ \ P (set_unsat I s)" assumes "\ x\<^sub>i x\<^sub>j l\<^sub>i dir. \dir = (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative); \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inr x\<^sub>j; l\<^sub>i = the (LB dir s x\<^sub>i); check' dir x\<^sub>i s = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s\ \ P (check (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s))" assumes "\ (\ s)" "\ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" shows "P (check s)" proof (cases "\ s") case True then show ?thesis using assms(1) using check.simps[of s] by simp next case False show ?thesis proof (cases "min_lvar_not_in_bounds s") case None then show ?thesis using \\ \ s\ using assms(2) \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ using check.simps[of s] by simp next case (Some x\<^sub>i) let ?dir = "if (\\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i) then (Positive :: ('i,'a)Direction) else Negative" let ?s' = "check' ?dir x\<^sub>i s" have "\\<^sub>l\<^sub>b (lt ?dir) (\\ s\ x\<^sub>i) (LB ?dir s x\<^sub>i)" using \min_lvar_not_in_bounds s = Some x\<^sub>i\ using min_lvar_not_in_bounds_Some[of s x\<^sub>i] using not_in_bounds[of x\<^sub>i "\\ s\" "\\<^sub>l s" "\\<^sub>u s"] by (auto split: if_splits simp add: bound_compare''_defs) have "P (check ?s')" apply (rule check'_cases) using \\ \ s\ \min_lvar_not_in_bounds s = Some x\<^sub>i\ \\\<^sub>l\<^sub>b (lt ?dir) (\\ s\ x\<^sub>i) (LB ?dir s x\<^sub>i)\ using assms(3)[of ?dir x\<^sub>i] using assms(4)[of ?dir x\<^sub>i] using check.simps[of "set_unsat (_ :: 'i list) s"] using \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by (auto simp add: bounds_consistent_def curr_val_satisfies_no_lhs_def) then show ?thesis using \\ \ s\ \min_lvar_not_in_bounds s = Some x\<^sub>i\ using check.simps[of s] using \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by auto qed qed lemma check_induct: fixes s :: "('i,'a) state" assumes *: "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" assumes **: "\ s. \ s \ P s s" "\ s. \\ \ s; min_lvar_not_in_bounds s = None\ \ P s s" "\ s x\<^sub>i dir I. \dir = Positive \ dir = Negative; \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inl I\ \ P s (set_unsat I s)" assumes step': "\ s x\<^sub>i x\<^sub>j l\<^sub>i. \\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ \ P s (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)" assumes trans': "\ si sj sk. \P si sj; P sj sk\ \ P si sk" shows "P s (check s)" proof- have "check_dom s" using * by (simp add: check_sat_terminates) then show ?thesis using * proof (induct s rule: check_dom.induct) case (step s') show ?case proof (rule check_cases) fix x\<^sub>i x\<^sub>j l\<^sub>i dir let ?dir = "if \\ s'\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s' x\<^sub>i then Positive else Negative" let ?s' = "check' dir x\<^sub>i s'" assume "\ \ s'" "min_lvar_not_in_bounds s' = Some x\<^sub>i" "min_rvar_incdec dir s' x\<^sub>i = Inr x\<^sub>j" "l\<^sub>i = the (LB dir s' x\<^sub>i)" "?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'" "dir = ?dir" moreover then have "\ ?s'" "\ (\ ?s')" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s'" "\ ?s'" using \\ s'\ \\ (\ s')\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'\ \\ s'\ using \?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'\ using pivotandupdate_check_precond[of dir s' x\<^sub>i x\<^sub>j l\<^sub>i] by auto ultimately have "P (check' dir x\<^sub>i s') (check (check' dir x\<^sub>i s'))" using step(2)[of x\<^sub>i] step(4)[of x\<^sub>i] \\ (\ s')\ \\ s'\ by auto then show "P s' (check (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'))" using \?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'\ \\ (\ s')\ \\ s'\ using \min_lvar_not_in_bounds s' = Some x\<^sub>i\ \min_rvar_incdec dir s' x\<^sub>i = Inr x\<^sub>j\ using step'[of s' x\<^sub>i x\<^sub>j l\<^sub>i] using trans'[of s' ?s' "check ?s'"] by (auto simp add: min_lvar_not_in_bounds_lvars min_rvar_incdec_eq_Some_rvars) qed (simp_all add: \\ s'\ \\ (\ s')\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'\ \\ s'\ **) qed qed lemma check_induct': fixes s :: "('i,'a) state" assumes "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" assumes "\ s x\<^sub>i dir I. \dir = Positive \ dir = Negative; \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inl I; P s\ \ P (set_unsat I s)" assumes "\ s x\<^sub>i x\<^sub>j l\<^sub>i. \\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i); P s\ \ P (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)" assumes "P s" shows "P (check s)" proof- have "P s \ P (check s)" by (rule check_induct) (simp_all add: assms) then show ?thesis using \P s\ by simp qed lemma check_induct'': fixes s :: "('i,'a) state" assumes *: "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" assumes **: "\ s \ P s" "\ s. \\ s; \ (\ s); \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ \ s; min_lvar_not_in_bounds s = None\ \ P s" "\ s x\<^sub>i dir I. \dir = Positive \ dir = Negative; \ s; \ (\ s); \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inl I\ \ P (set_unsat I s)" shows "P (check s)" proof (cases "\ s") case True then show ?thesis using \\ s \ P s\ by (simp add: check.simps) next case False have "check_dom s" using * by (simp add: check_sat_terminates) then show ?thesis using * False proof (induct s rule: check_dom.induct) case (step s') show ?case proof (rule check_cases) fix x\<^sub>i x\<^sub>j l\<^sub>i dir let ?dir = "if \\ s'\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s' x\<^sub>i then Positive else Negative" let ?s' = "check' dir x\<^sub>i s'" assume "\ \ s'" "min_lvar_not_in_bounds s' = Some x\<^sub>i" "min_rvar_incdec dir s' x\<^sub>i = Inr x\<^sub>j" "l\<^sub>i = the (LB dir s' x\<^sub>i)" "?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'" "dir = ?dir" moreover then have "\ ?s'" "\ (\ ?s')" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s'" "\ ?s'" "\ \ ?s'" using \\ s'\ \\ (\ s')\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'\ \\ s'\ using \?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'\ using pivotandupdate_check_precond[of dir s' x\<^sub>i x\<^sub>j l\<^sub>i] using pivotandupdate_unsat_id[of s' x\<^sub>i x\<^sub>j l\<^sub>i] by (auto simp add: min_lvar_not_in_bounds_lvars min_rvar_incdec_eq_Some_rvars) ultimately have "P (check (check' dir x\<^sub>i s'))" using step(2)[of x\<^sub>i] step(4)[of x\<^sub>i] \\ (\ s')\ \\ s'\ by auto then show "P (check (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'))" using \?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'\ by simp qed (simp_all add: \\ s'\ \\ (\ s')\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'\ \\ s'\ \\ \ s'\ ** ) qed qed end lemma poly_eval_update: "(p \ v ( x := c :: 'a :: lrv) \) = (p \ v \) + coeff p x *R (c - v x)" proof (transfer, simp, goal_cases) case (1 p v x c) hence fin: "finite {v. p v \ 0}" by simp have "(\y\{v. p v \ 0}. p y *R (if y = x then c else v y)) = (\y\{v. p v \ 0} \ {x}. p y *R (if y = x then c else v y)) + (\y\{v. p v \ 0} \ (UNIV - {x}). p y *R (if y = x then c else v y))" (is "?l = ?a + ?b") by (subst sum.union_disjoint[symmetric], auto intro: sum.cong fin) also have "?a = (if p x = 0 then 0 else p x *R c)" by auto also have "\ = p x *R c" by auto also have "?b = (\y\{v. p v \ 0} \ (UNIV - {x}). p y *R v y)" (is "_ = ?c") by (rule sum.cong, auto) finally have l: "?l = p x *R c + ?c" . define r where "r = (\y\{v. p v \ 0}. p y *R v y) + p x *R (c - v x)" have "r = (\y\{v. p v \ 0}. p y *R v y) + p x *R (c - v x)" by (simp add: r_def) also have "(\y\{v. p v \ 0}. p y *R v y) = (\y\{v. p v \ 0} \ {x}. p y *R v y) + ?c" (is "_ = ?d + _") by (subst sum.union_disjoint[symmetric], auto intro: sum.cong fin) also have "?d = (if p x = 0 then 0 else p x *R v x)" by auto also have "\ = p x *R v x" by auto finally have "(p x *R (c - v x) + p x *R v x) + ?c = r" by simp also have "(p x *R (c - v x) + p x *R v x) = p x *R c" unfolding scaleRat_right_distrib[symmetric] by simp finally have r: "p x *R c + ?c = r" . show ?case unfolding l r r_def .. qed context PivotUpdateMinVars begin context fixes rhs_eq_val :: "(var, 'a::lrv) mapping \ var \ 'a \ eq \ 'a" assumes "RhsEqVal rhs_eq_val" begin lemma check_minimal_unsat_state_core: assumes *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" shows "\ (check s) \ minimal_unsat_state_core (check s)" (is "?P (check s)") proof (rule check_induct'') fix s' :: "('i,'a) state" and x\<^sub>i dir I assume nolhs: "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'" and min_rvar: "min_rvar_incdec dir s' x\<^sub>i = Inl I" and sat: "\ \ s'" and min_lvar: "min_lvar_not_in_bounds s' = Some x\<^sub>i" and dir: "dir = Positive \ dir = Negative" and lt: "\\<^sub>l\<^sub>b (lt dir) (\\ s'\ x\<^sub>i) (LB dir s' x\<^sub>i)" and norm: "\ (\ s')" and valuated: "\ s'" let ?eq = "eq_for_lvar (\ s') x\<^sub>i" have unsat_core: "set (the (\\<^sub>c (set_unsat I s'))) = set I" by auto obtain l\<^sub>i where LB_Some: "LB dir s' x\<^sub>i = Some l\<^sub>i" and lt: "lt dir (\\ s'\ x\<^sub>i) l\<^sub>i" using lt by (cases "LB dir s' x\<^sub>i") (auto simp add: bound_compare_defs) from LB_Some dir obtain i where LBI: "look (LBI dir s') x\<^sub>i = Some (i,l\<^sub>i)" and LI: "LI dir s' x\<^sub>i = i" by (auto simp: simp: indexl_def indexu_def boundsl_def boundsu_def) from min_rvar_incdec_eq_None[OF min_rvar] dir have Is': "LI dir s' (lhs (eq_for_lvar (\ s') x\<^sub>i)) \ indices_state s' \ set I \ indices_state s'" and reasable: "\ x. x \ rvars_eq ?eq \ \ reasable_var dir x ?eq s'" and setI: "set I = {LI dir s' (lhs ?eq)} \ {LI dir s' x |x. x \ rvars_eq ?eq \ coeff (rhs ?eq) x < 0} \ {UI dir s' x |x. x \ rvars_eq ?eq \ 0 < coeff (rhs ?eq) x}" (is "_ = ?L \ ?R1 \ ?R2") by auto note setI also have id: "lhs ?eq = x\<^sub>i" by (simp add: EqForLVar.eq_for_lvar EqForLVar_axioms min_lvar min_lvar_not_in_bounds_lvars) finally have iI: "i \ set I" unfolding LI by auto note setI = setI[unfolded id] have "LI dir s' x\<^sub>i \ indices_state s'" using LBI LI unfolding indices_state_def using dir by force from Is'[unfolded id, OF this] have Is': "set I \ indices_state s'" . have "x\<^sub>i \ lvars (\ s')" using min_lvar by (simp add: min_lvar_not_in_bounds_lvars) then have **: "?eq \ set (\ s')" "lhs ?eq = x\<^sub>i" by (auto simp add: eq_for_lvar) have Is': "set I \ indices_state (set_unsat I s')" using Is' * unfolding indices_state_def by auto have "\\ s'\ \\<^sub>t \ s'" and b: "\\ s'\ \\<^sub>b \ s' \ - lvars (\ s')" using nolhs[unfolded curr_val_satisfies_no_lhs_def] by auto from norm[unfolded normalized_tableau_def] have lvars_rvars: "lvars (\ s') \ rvars (\ s') = {}" by auto hence in_bnds: "x \ rvars (\ s') \ in_bounds x \\ s'\ (\ s')" for x by (intro b[unfolded satisfies_bounds_set.simps, rule_format, of x], auto) { assume dist: "distinct_indices_state (set_unsat I s')" hence "distinct_indices_state s'" unfolding distinct_indices_state_def by auto note dist = this[unfolded distinct_indices_state_def, rule_format] { fix x c i y assume c: "look (\\<^sub>i\<^sub>l s') x = Some (i,c) \ look (\\<^sub>i\<^sub>u s') x = Some (i,c)" and y: "y \ rvars_eq ?eq" and coeff: "coeff (rhs ?eq) y < 0 \ i = LI dir s' y \ coeff (rhs ?eq) y > 0 \ i = UI dir s' y" { assume coeff: "coeff (rhs ?eq) y < 0" and i: "i = LI dir s' y" from reasable[OF y] coeff have not_gt: "\ (\\<^sub>l\<^sub>b (lt dir) (\\ s'\ y) (LB dir s' y))" by auto then obtain d where LB: "LB dir s' y = Some d" using dir by (cases "LB dir s' y", auto simp: bound_compare_defs) with not_gt have le: "le (lt dir) (\\ s'\ y) d" using dir by (auto simp: bound_compare_defs) from LB have "look (LBI dir s') y = Some (i, d)" unfolding i using dir by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) with c dist[of x i c y d] dir have yx: "y = x" "d = c" by auto from y[unfolded yx] have "x \ rvars (\ s')" using **(1) unfolding rvars_def by force from in_bnds[OF this] le LB not_gt i have "\\ s'\ x = c" unfolding yx using dir by auto note yx(1) this } moreover { assume coeff: "coeff (rhs ?eq) y > 0" and i: "i = UI dir s' y" from reasable[OF y] coeff have not_gt: "\ (\\<^sub>u\<^sub>b (lt dir) (\\ s'\ y) (UB dir s' y))" by auto then obtain d where UB: "UB dir s' y = Some d" using dir by (cases "UB dir s' y", auto simp: bound_compare_defs) with not_gt have le: "le (lt dir) d (\\ s'\ y)" using dir by (auto simp: bound_compare_defs) from UB have "look (UBI dir s') y = Some (i, d)" unfolding i using dir by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) with c dist[of x i c y d] dir have yx: "y = x" "d = c" by auto from y[unfolded yx] have "x \ rvars (\ s')" using **(1) unfolding rvars_def by force from in_bnds[OF this] le UB not_gt i have "\\ s'\ x = c" unfolding yx using dir by auto note yx(1) this } ultimately have "y = x" "\\ s'\ x = c" using coeff by blast+ } note x_vars_main = this { fix x c i assume c: "look (\\<^sub>i\<^sub>l s') x = Some (i,c) \ look (\\<^sub>i\<^sub>u s') x = Some (i,c)" and i: "i \ ?R1 \ ?R2" from i obtain y where y: "y \ rvars_eq ?eq" and coeff: "coeff (rhs ?eq) y < 0 \ i = LI dir s' y \ coeff (rhs ?eq) y > 0 \ i = UI dir s' y" by auto from x_vars_main[OF c y coeff] have "y = x" "\\ s'\ x = c" using coeff by blast+ with y have "x \ rvars_eq ?eq" "x \ rvars (\ s')" "\\ s'\ x = c" using **(1) unfolding rvars_def by force+ } note x_rvars = this have R1R2: "(?R1 \ ?R2, \\ s'\) \\<^sub>i\<^sub>s\<^sub>e s'" unfolding satisfies_state_index'.simps proof (intro conjI) show "\\ s'\ \\<^sub>t \ s'" by fact show "(?R1 \ ?R2, \\ s'\) \\<^sub>i\<^sub>b\<^sub>e \\ s'" unfolding satisfies_bounds_index'.simps proof (intro conjI impI allI) fix x c assume c: "\\<^sub>l s' x = Some c" and i: "\\<^sub>l s' x \ ?R1 \ ?R2" from c have ci: "look (\\<^sub>i\<^sub>l s') x = Some (\\<^sub>l s' x, c)" unfolding boundsl_def indexl_def by auto from x_rvars[OF _ i] ci show "\\ s'\ x = c" by auto next fix x c assume c: "\\<^sub>u s' x = Some c" and i: "\\<^sub>u s' x \ ?R1 \ ?R2" from c have ci: "look (\\<^sub>i\<^sub>u s') x = Some (\\<^sub>u s' x, c)" unfolding boundsu_def indexu_def by auto from x_rvars[OF _ i] ci show "\\ s'\ x = c" by auto qed qed have id1: "set (the (\\<^sub>c (set_unsat I s'))) = set I" "\ x. x \\<^sub>i\<^sub>s\<^sub>e set_unsat I s' \ x \\<^sub>i\<^sub>s\<^sub>e s'" by (auto simp: satisfies_state_index'.simps boundsl_def boundsu_def indexl_def indexu_def) have "subsets_sat_core (set_unsat I s')" unfolding subsets_sat_core_def id1 proof (intro allI impI) fix J assume sub: "J \ set I" show "\v. (J, v) \\<^sub>i\<^sub>s\<^sub>e s'" proof (cases "J \ ?R1 \ ?R2") case True with R1R2 have "(J, \\ s'\) \\<^sub>i\<^sub>s\<^sub>e s'" unfolding satisfies_state_index'.simps satisfies_bounds_index'.simps by blast thus ?thesis by blast next case False with sub obtain k where k: "k \ ?R1 \ ?R2" "k \ J" "k \ set I" unfolding setI by auto from k(1) obtain y where y: "y \ rvars_eq ?eq" and coeff: "coeff (rhs ?eq) y < 0 \ k = LI dir s' y \ coeff (rhs ?eq) y > 0 \ k = UI dir s' y" by auto hence cy0: "coeff (rhs ?eq) y \ 0" by auto from y **(1) have ry: "y \ rvars (\ s')" unfolding rvars_def by force hence yl: "y \ lvars (\ s')" using lvars_rvars by blast interpret rev: RhsEqVal rhs_eq_val by fact note update = rev.update_valuation_nonlhs[THEN mp, OF norm valuated yl] define diff where "diff = l\<^sub>i - \\ s'\ x\<^sub>i" have "\\ s'\ x\<^sub>i < l\<^sub>i \ 0 < l\<^sub>i - \\ s'\ x\<^sub>i" "l\<^sub>i < \\ s'\ x\<^sub>i \ l\<^sub>i - \\ s'\ x\<^sub>i < 0" using minus_gt by (blast, insert minus_lt, blast) with lt dir have diff: "lt dir 0 diff" by (auto simp: diff_def) define up where "up = inverse (coeff (rhs ?eq) y) *R diff" define v where "v = \\ (rev.update y (\\ s'\ y + up) s')\" show ?thesis unfolding satisfies_state_index'.simps proof (intro exI[of _ v] conjI) show "v \\<^sub>t \ s'" unfolding v_def using rev.update_satisfies_tableau[OF norm valuated yl] \\\ s'\ \\<^sub>t \ s'\ by auto with **(1) have "v \\<^sub>e ?eq" unfolding satisfies_tableau_def by auto from this[unfolded satisfies_eq_def id] have v_xi: "v x\<^sub>i = (rhs ?eq \ v \)" . from \\\ s'\ \\<^sub>t \ s'\ **(1) have "\\ s'\ \\<^sub>e ?eq" unfolding satisfies_tableau_def by auto hence V_xi: "\\ s'\ x\<^sub>i = (rhs ?eq \ \\ s'\ \)" unfolding satisfies_eq_def id . have "v x\<^sub>i = \\ s'\ x\<^sub>i + coeff (rhs ?eq) y *R up" unfolding v_xi unfolding v_def rev.update_valuate_rhs[OF **(1) norm] poly_eval_update V_xi by simp also have "\ = l\<^sub>i" unfolding up_def diff_def scaleRat_scaleRat using cy0 by simp finally have v_xi_l: "v x\<^sub>i = l\<^sub>i" . { assume both: "\\<^sub>u s' y \ ?R1 \ ?R2" "\\<^sub>u s' y \ None" "\\<^sub>l s' y \ ?R1 \ ?R2" "\\<^sub>l s' y \ None" and diff: "\\<^sub>l s' y \ \\<^sub>u s' y" from both(1) dir obtain xu cu where looku: "look (\\<^sub>i\<^sub>l s') xu = Some (\\<^sub>u s' y, cu) \ look (\\<^sub>i\<^sub>u s') xu = Some (\\<^sub>u s' y,cu)" by (smt Is' indices_state_def le_sup_iff mem_Collect_eq setI set_unsat_simps subsetCE) from both(1) obtain xu' where "xu' \ rvars_eq ?eq" "coeff (rhs ?eq) xu' < 0 \ \\<^sub>u s' y = LI dir s' xu' \ coeff (rhs ?eq) xu' > 0 \ \\<^sub>u s' y = UI dir s' xu'" by blast with x_vars_main(1)[OF looku this] have xu: "xu \ rvars_eq ?eq" "coeff (rhs ?eq) xu < 0 \ \\<^sub>u s' y = LI dir s' xu \ coeff (rhs ?eq) xu > 0 \ \\<^sub>u s' y = UI dir s' xu" by auto { assume "xu \ y" with dist[OF looku, of y] have "look (\\<^sub>i\<^sub>u s') y = None" by (cases "look (\\<^sub>i\<^sub>u s') y", auto simp: boundsu_def indexu_def, blast) with both(2) have False by (simp add: boundsu_def) } hence xu_y: "xu = y" by blast from both(3) dir obtain xl cl where lookl: "look (\\<^sub>i\<^sub>l s') xl = Some (\\<^sub>l s' y, cl) \ look (\\<^sub>i\<^sub>u s') xl = Some (\\<^sub>l s' y,cl)" by (smt Is' indices_state_def le_sup_iff mem_Collect_eq setI set_unsat_simps subsetCE) from both(3) obtain xl' where "xl' \ rvars_eq ?eq" "coeff (rhs ?eq) xl' < 0 \ \\<^sub>l s' y = LI dir s' xl' \ coeff (rhs ?eq) xl' > 0 \ \\<^sub>l s' y = UI dir s' xl'" by blast with x_vars_main(1)[OF lookl this] have xl: "xl \ rvars_eq ?eq" "coeff (rhs ?eq) xl < 0 \ \\<^sub>l s' y = LI dir s' xl \ coeff (rhs ?eq) xl > 0 \ \\<^sub>l s' y = UI dir s' xl" by auto { assume "xl \ y" with dist[OF lookl, of y] have "look (\\<^sub>i\<^sub>l s') y = None" by (cases "look (\\<^sub>i\<^sub>l s') y", auto simp: boundsl_def indexl_def, blast) with both(4) have False by (simp add: boundsl_def) } hence xl_y: "xl = y" by blast from xu(2) xl(2) diff have diff: "xu \ xl" by auto with xu_y xl_y have False by simp } note both_y_False = this show "(J, v) \\<^sub>i\<^sub>b\<^sub>e \\ s'" unfolding satisfies_bounds_index'.simps proof (intro conjI allI impI) fix x c assume x: "\\<^sub>l s' x = Some c" "\\<^sub>l s' x \ J" with k have not_k: "\\<^sub>l s' x \ k" by auto from x have ci: "look (\\<^sub>i\<^sub>l s') x = Some (\\<^sub>l s' x, c)" unfolding boundsl_def indexl_def by auto show "v x = c" proof (cases "\\<^sub>l s' x = i") case False hence iR12: "\\<^sub>l s' x \ ?R1 \ ?R2" using sub x unfolding setI LI by blast from x_rvars(2-3)[OF _ iR12] ci have xr: "x \ rvars (\ s')" and val: "\\ s'\ x = c" by auto with lvars_rvars have xl: "x \ lvars (\ s')" by auto show ?thesis proof (cases "x = y") case False thus ?thesis using val unfolding v_def map2fun_def' update[OF xl] using val by auto next case True note coeff = coeff[folded True] from coeff not_k dir ci have Iu: "\\<^sub>u s' x = k" by auto with ci Iu x(2) k sub False True have both: "\\<^sub>u s' y \ ?R1 \ ?R2" "\\<^sub>l s' y \ ?R1 \ ?R2" and diff: "\\<^sub>l s' y \ \\<^sub>u s' y" unfolding setI LI by auto have "\\<^sub>l s' y \ None" using x True by simp from both_y_False[OF both(1) _ both(2) this diff] have "\\<^sub>u s' y = None" by metis with reasable[OF y] dir coeff True have "dir = Negative \ 0 < coeff (rhs ?eq) y" "dir = Positive \ 0 > coeff (rhs ?eq) y" by (auto simp: bound_compare_defs) with dir coeff[unfolded True] have "k = \\<^sub>l s' y" by auto with diff Iu False True have False by auto thus ?thesis .. qed next case True from LBI ci[unfolded True] dir dist[unfolded distinct_indices_state_def, rule_format, of x i c x\<^sub>i l\<^sub>i] have xxi: "x = x\<^sub>i" and c: "c = l\<^sub>i" by auto have vxi: "v x = l\<^sub>i" unfolding xxi v_xi_l .. thus ?thesis unfolding c by simp qed next fix x c assume x: "\\<^sub>u s' x = Some c" "\\<^sub>u s' x \ J" with k have not_k: "\\<^sub>u s' x \ k" by auto from x have ci: "look (\\<^sub>i\<^sub>u s') x = Some (\\<^sub>u s' x, c)" unfolding boundsu_def indexu_def by auto show "v x = c" proof (cases "\\<^sub>u s' x = i") case False hence iR12: "\\<^sub>u s' x \ ?R1 \ ?R2" using sub x unfolding setI LI by blast from x_rvars(2-3)[OF _ iR12] ci have xr: "x \ rvars (\ s')" and val: "\\ s'\ x = c" by auto with lvars_rvars have xl: "x \ lvars (\ s')" by auto show ?thesis proof (cases "x = y") case False thus ?thesis using val unfolding v_def map2fun_def' update[OF xl] using val by auto next case True note coeff = coeff[folded True] from coeff not_k dir ci have Iu: "\\<^sub>l s' x = k" by auto with ci Iu x(2) k sub False True have both: "\\<^sub>u s' y \ ?R1 \ ?R2" "\\<^sub>l s' y \ ?R1 \ ?R2" and diff: "\\<^sub>l s' y \ \\<^sub>u s' y" unfolding setI LI by auto have "\\<^sub>u s' y \ None" using x True by simp from both_y_False[OF both(1) this both(2) _ diff] have "\\<^sub>l s' y = None" by metis with reasable[OF y] dir coeff True have "dir = Negative \ 0 > coeff (rhs ?eq) y" "dir = Positive \ 0 < coeff (rhs ?eq) y" by (auto simp: bound_compare_defs) with dir coeff[unfolded True] have "k = \\<^sub>u s' y" by auto with diff Iu False True have False by auto thus ?thesis .. qed next case True from LBI ci[unfolded True] dir dist[unfolded distinct_indices_state_def, rule_format, of x i c x\<^sub>i l\<^sub>i] have xxi: "x = x\<^sub>i" and c: "c = l\<^sub>i" by auto have vxi: "v x = l\<^sub>i" unfolding xxi v_xi_l .. thus ?thesis unfolding c by simp qed qed qed qed qed } note minimal_core = this have unsat_core: "unsat_state_core (set_unsat I s')" unfolding unsat_state_core_def unsat_core proof (intro impI conjI Is', clarify) fix v assume "(set I, v) \\<^sub>i\<^sub>s set_unsat I s'" then have Iv: "(set I, v) \\<^sub>i\<^sub>s s'" unfolding satisfies_state_index.simps by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) from Iv have vt: "v \\<^sub>t \ s'" and Iv: "(set I, v) \\<^sub>i\<^sub>b \\ s'" unfolding satisfies_state_index.simps by auto have lt_le_eq: "\ x y :: 'a. (x < y) \ (x \ y \ x \ y)" by auto from Iv dir have lb: "\ x i c l. look (LBI dir s') x = Some (i,l) \ i \ set I \ le (lt dir) l (v x)" unfolding satisfies_bounds_index.simps by (auto simp: lt_le_eq indexl_def indexu_def boundsl_def boundsu_def) from lb[OF LBI iI] have li_x: "le (lt dir) l\<^sub>i (v x\<^sub>i)" . have "\\ s'\ \\<^sub>e ?eq" using nolhs \?eq \ set (\ s')\ unfolding curr_val_satisfies_no_lhs_def by (simp add: satisfies_tableau_def) then have "\\ s'\ x\<^sub>i = (rhs ?eq) \ \\ s'\ \" using \lhs ?eq = x\<^sub>i\ by (simp add: satisfies_eq_def) moreover have "v \\<^sub>e ?eq" using vt \?eq \ set (\ s')\ by (simp add: satisfies_state_def satisfies_tableau_def) then have "v x\<^sub>i = (rhs ?eq) \ v \" using \lhs ?eq = x\<^sub>i\ by (simp add: satisfies_eq_def) moreover have "\\<^sub>l\<^sub>b (lt dir) (v x\<^sub>i) (LB dir s' x\<^sub>i)" using li_x dir unfolding LB_Some by (auto simp: bound_compare'_defs) moreover from min_rvar_incdec_eq_None'[rule_format, OF dir min_rvar refl Iv] have "le (lt dir) (rhs (?eq) \v\) (rhs (?eq) \ \\ s'\ \)" . ultimately show False using dir lt LB_Some by (auto simp add: bound_compare_defs) qed thus "\ (set_unsat I s') \ minimal_unsat_state_core (set_unsat I s')" using minimal_core by (auto simp: minimal_unsat_state_core_def) qed (simp_all add: *) lemma Check_check: "Check check" proof fix s :: "('i,'a) state" assume "\ s" then show "check s = s" by (simp add: check.simps) next fix s :: "('i,'a) state" and v :: "'a valuation" assume *: "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" then have "v \\<^sub>t \ s = v \\<^sub>t \ (check s)" by (rule check_induct, simp_all add: pivotandupdate_tableau_equiv) moreover have "\ (\ (check s))" by (rule check_induct', simp_all add: * pivotandupdate_tableau_normalized) moreover have "\ (check s)" proof (rule check_induct', simp_all add: * pivotandupdate_tableau_valuated) fix s I show "\ s \ \ (set_unsat I s)" by (simp add: tableau_valuated_def) qed ultimately show "let s' = check s in v \\<^sub>t \ s = v \\<^sub>t \ s' \ \ (\ s') \ \ s'" by (simp add: Let_def) next fix s :: "('i,'a) state" assume *: "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" from * show "\\<^sub>i (check s) = \\<^sub>i s" by (rule check_induct, simp_all add: pivotandupdate_bounds_id) next fix s :: "('i,'a) state" assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" have "\ \ (check s) \ \ (check s)" proof (rule check_induct'', simp_all add: *) fix s assume "min_lvar_not_in_bounds s = None" "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" then show " \ s" using min_lvar_not_in_bounds_None[of s] unfolding curr_val_satisfies_state_def satisfies_state_def unfolding curr_val_satisfies_no_lhs_def by (auto simp add: satisfies_bounds_set.simps satisfies_bounds.simps) qed then show "\ \ (check s) \ \ (check s)" by blast next fix s :: "('i,'a) state" assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" have "\ (check s) \ minimal_unsat_state_core (check s)" by (rule check_minimal_unsat_state_core[OF *]) then show "\ (check s) \ minimal_unsat_state_core (check s)" by blast qed end end subsection\Symmetries\ text\\label{sec:symmetries} Simplex algorithm exhibits many symmetric cases. For example, \assert_bound\ treats atoms \Leq x c\ and \Geq x c\ in a symmetric manner, \check_inc\ and \check_dec\ are symmetric, etc. These symmetric cases differ only in several aspects: order relations between numbers (\<\ vs \>\ and \\\ vs \\\), the role of lower and upper bounds (\\\<^sub>l\ vs \\\<^sub>u\) and their updating functions, comparisons with bounds (e.g., \\\<^sub>u\<^sub>b\ vs \\\<^sub>l\<^sub>b\ or \<\<^sub>l\<^sub>b\ vs \>\<^sub>u\<^sub>b\), and atom constructors (\Leq\ and \Geq\). These can be attributed to two different orientations (positive and negative) of rational axis. To avoid duplicating definitions and proofs, \assert_bound\ definition cases for \Leq\ and \Geq\ are replaced by a call to a newly introduced function parametrized by a \Direction\ --- a record containing minimal set of aspects listed above that differ in two definition cases such that other aspects can be derived from them (e.g., only \<\ need to be stored while \\\ can be derived from it). Two constants of the type \Direction\ are defined: \Positive\ (with \<\, \\\ orders, \\\<^sub>l\ for lower and \\\<^sub>u\ for upper bounds and their corresponding updating functions, and \Leq\ constructor) and \Negative\ (completely opposite from the previous one). Similarly, \check_inc\ and \check_dec\ are replaced by a new function \check_incdec\ parametrized by a \Direction\. All lemmas, previously repeated for each symmetric instance, were replaced by a more abstract one, again parametrized by a \Direction\ parameter. \vspace{-3mm} \ (*-------------------------------------------------------------------------- *) subsection\Concrete implementation\ (*-------------------------------------------------------------------------- *) (* -------------------------------------------------------------------------- *) (* Init init_state *) (* -------------------------------------------------------------------------- *) text\It is easy to give a concrete implementation of the initial state constructor, which satisfies the specification of the @{term Init} locale. For example:\ definition init_state :: "_ \ ('i,'a :: zero)state" where "init_state t = State t Mapping.empty Mapping.empty (Mapping.tabulate (vars_list t) (\ v. 0)) False None" interpretation Init "init_state :: _ \ ('i,'a :: lrv)state" proof fix t let ?init = "init_state t :: ('i,'a)state" show "\\ ?init\ \\<^sub>t t" unfolding satisfies_tableau_def satisfies_eq_def proof (safe) fix l r assume "(l, r) \ set t" then have "l \ set (vars_list t)" "vars r \ set (vars_list t)" by (auto simp: set_vars_list) (transfer, force) then have *: "vars r \ lhs ` set t \ (\x\set t. rvars_eq x)" by (auto simp: set_vars_list) have "\\ ?init\ l = (0::'a)" using \l \ set (vars_list t)\ unfolding init_state_def by (auto simp: map2fun_def lookup_tabulate) moreover have "r \ \\ ?init\ \ = (0::'a)" using * proof (transfer fixing: t, goal_cases) case (1 r) { fix x assume "x\{v. r v \ 0}" then have "r x *R \\ ?init\ x = (0::'a)" using 1 unfolding init_state_def by (auto simp add: map2fun_def lookup_tabulate comp_def restrict_map_def set_vars_list Abstract_Linear_Poly.vars_def) } then show ?case by auto qed ultimately show "\\ ?init\ (lhs (l, r)) = rhs (l, r) \ \\ ?init\ \" by auto qed next fix t show "\ (init_state t)" unfolding init_state_def by (auto simp add: lookup_tabulate tableau_valuated_def comp_def restrict_map_def set_vars_list lvars_def rvars_def) qed (simp_all add: init_state_def add: boundsl_def boundsu_def indexl_def indexu_def) (* -------------------------------------------------------------------------- *) (* MinVars min_lvar_not_in_bounds min_rvar_incdec_eq *) (* -------------------------------------------------------------------------- *) definition min_lvar_not_in_bounds :: "('i,'a::{linorder,zero}) state \ var option" where "min_lvar_not_in_bounds s \ min_satisfying (\ x. \ in_bounds x (\\ s\) (\ s)) (map lhs (\ s))" interpretation MinLVarNotInBounds "min_lvar_not_in_bounds :: ('i,'a::lrv) state \ _" proof fix s::"('i,'a) state" show "min_lvar_not_in_bounds s = None \ (\x\lvars (\ s). in_bounds x (\\ s\) (\ s))" unfolding min_lvar_not_in_bounds_def lvars_def using min_satisfying_None by blast next fix s x\<^sub>i show "min_lvar_not_in_bounds s = Some x\<^sub>i \ x\<^sub>i \ lvars (\ s) \ \ in_bounds x\<^sub>i \\ s\ (\ s) \ (\x\lvars (\ s). x < x\<^sub>i \ in_bounds x \\ s\ (\ s))" unfolding min_lvar_not_in_bounds_def lvars_def using min_satisfying_Some by blast+ qed \ \all variables in vs have either a positive or a negative coefficient, so no equal-zero test required.\ definition unsat_indices :: "('i,'a :: linorder) Direction \ ('i,'a) state \ var list \ eq \ 'i list" where "unsat_indices dir s vs eq = (let r = rhs eq; li = LI dir s; ui = UI dir s in remdups (li (lhs eq) # map (\ x. if coeff r x < 0 then li x else ui x) vs))" definition min_rvar_incdec_eq :: "('i,'a) Direction \ ('i,'a::lrv) state \ eq \ 'i list + var" where "min_rvar_incdec_eq dir s eq = (let rvars = Abstract_Linear_Poly.vars_list (rhs eq) in case min_satisfying (\ x. reasable_var dir x eq s) rvars of None \ Inl (unsat_indices dir s rvars eq) | Some x\<^sub>j \ Inr x\<^sub>j)" interpretation MinRVarsEq "min_rvar_incdec_eq :: ('i,'a :: lrv) Direction \ _" proof fix s eq "is" and dir :: "('i,'a) Direction" let ?min = "min_satisfying (\ x. reasable_var dir x eq s) (Abstract_Linear_Poly.vars_list (rhs eq))" let ?vars = "Abstract_Linear_Poly.vars_list (rhs eq)" { assume "min_rvar_incdec_eq dir s eq = Inl is" from this[unfolded min_rvar_incdec_eq_def Let_def, simplified] have "?min = None" and I: "set is = set (unsat_indices dir s ?vars eq)" by (cases ?min, auto)+ from this min_satisfying_None set_vars_list have 1: "\ x. x \ rvars_eq eq \ \ reasable_var dir x eq s" by blast { fix i assume "i \ set is" and dir: "dir = Positive \ dir = Negative" and lhs_eq: "LI dir s (lhs eq) \ indices_state s" from this[unfolded I unsat_indices_def Let_def] consider (lhs) "i = LI dir s (lhs eq)" | (LI_rhs) x where "i = LI dir s x" "x \ rvars_eq eq" "coeff (rhs eq) x < 0" | (UI_rhs) x where "i = UI dir s x" "x \ rvars_eq eq" "coeff (rhs eq) x \ 0" by (auto split: if_splits simp: set_vars_list) then have "i \ indices_state s" proof cases case lhs show ?thesis unfolding lhs using lhs_eq by auto next case LI_rhs from 1[OF LI_rhs(2)] LI_rhs(3) have "\ (\\<^sub>l\<^sub>b (lt dir) (\\ s\ x) (LB dir s x))" by auto then show ?thesis unfolding LI_rhs(1) unfolding indices_state_def using dir by (auto simp: bound_compare'_defs boundsl_def boundsu_def indexl_def indexu_def split: option.splits intro!: exI[of _ x]) auto next case UI_rhs from UI_rhs(2) have "coeff (rhs eq) x \ 0" by (simp add: coeff_zero) with UI_rhs(3) have "0 < coeff (rhs eq) x" by auto from 1[OF UI_rhs(2)] this have "\ (\\<^sub>u\<^sub>b (lt dir) (\\ s\ x) (UB dir s x))" by auto then show ?thesis unfolding UI_rhs(1) unfolding indices_state_def using dir by (auto simp: bound_compare'_defs boundsl_def boundsu_def indexl_def indexu_def split: option.splits intro!: exI[of _ x]) auto qed } then have 2: "dir = Positive \ dir = Negative \ LI dir s (lhs eq) \ indices_state s \ set is \ indices_state s" by auto show " (\ x \ rvars_eq eq. \ reasable_var dir x eq s) \ set is = {LI dir s (lhs eq)} \ {LI dir s x |x. x \ rvars_eq eq \ coeff (rhs eq) x < 0} \ {UI dir s x |x. x \ rvars_eq eq \ 0 < coeff (rhs eq) x} \ (dir = Positive \ dir = Negative \ LI dir s (lhs eq) \ indices_state s \ set is \ indices_state s)" proof (intro conjI impI 2, goal_cases) case 2 have "set is = {LI dir s (lhs eq)} \ LI dir s ` (rvars_eq eq \ {x. coeff (rhs eq) x < 0}) \ UI dir s ` (rvars_eq eq \ {x. \ coeff (rhs eq) x < 0})" unfolding I unsat_indices_def Let_def by (auto simp add: set_vars_list) also have "\ = {LI dir s (lhs eq)} \ LI dir s ` {x. x \ rvars_eq eq \ coeff (rhs eq) x < 0} \ UI dir s ` { x. x \ rvars_eq eq \ 0 < coeff (rhs eq) x}" proof (intro arg_cong2[of _ _ _ _ "(\)"] arg_cong[of _ _ "\ x. _ ` x"] refl, goal_cases) case 2 { fix x assume "x \ rvars_eq eq" hence "coeff (rhs eq) x \ 0" by (simp add: coeff_zero) hence or: "coeff (rhs eq) x < 0 \ coeff (rhs eq) x > 0" by auto assume "\ coeff (rhs eq) x < 0" hence "coeff (rhs eq) x > 0" using or by simp } note [dest] = this show ?case by auto qed auto finally show "set is = {LI dir s (lhs eq)} \ {LI dir s x |x. x \ rvars_eq eq \ coeff (rhs eq) x < 0} \ {UI dir s x |x. x \ rvars_eq eq \ 0 < coeff (rhs eq) x}" by auto qed (insert 1, auto) } fix x\<^sub>j assume "min_rvar_incdec_eq dir s eq = Inr x\<^sub>j" from this[unfolded min_rvar_incdec_eq_def Let_def] have "?min = Some x\<^sub>j" by (cases ?min, auto) then show "x\<^sub>j \ rvars_eq eq" "reasable_var dir x\<^sub>j eq s" "(\ x' \ rvars_eq eq. x' < x\<^sub>j \ \ reasable_var dir x' eq s)" using min_satisfying_Some set_vars_list by blast+ qed (* -------------------------------------------------------------------------- *) (* EqForLVar eq_idx_for_lvar *) (* -------------------------------------------------------------------------- *) primrec eq_idx_for_lvar_aux :: "tableau \ var \ nat \ nat" where "eq_idx_for_lvar_aux [] x i = i" | "eq_idx_for_lvar_aux (eq # t) x i = (if lhs eq = x then i else eq_idx_for_lvar_aux t x (i+1))" definition eq_idx_for_lvar where "eq_idx_for_lvar t x \ eq_idx_for_lvar_aux t x 0" lemma eq_idx_for_lvar_aux: assumes "x \ lvars t" shows "let idx = eq_idx_for_lvar_aux t x i in i \ idx \ idx < i + length t \ lhs (t ! (idx - i)) = x" using assms proof (induct t arbitrary: i) case Nil then show ?case by (simp add: lvars_def) next case (Cons eq t) show ?case using Cons(1)[of "i+1"] Cons(2) by (cases "x = lhs eq") (auto simp add: Let_def lvars_def nth_Cons') qed global_interpretation EqForLVarDefault: EqForLVar eq_idx_for_lvar defines eq_for_lvar_code = EqForLVarDefault.eq_for_lvar proof (unfold_locales) fix x t assume "x \ lvars t" then show "eq_idx_for_lvar t x < length t \ lhs (t ! eq_idx_for_lvar t x) = x" using eq_idx_for_lvar_aux[of x t 0] by (simp add: Let_def eq_idx_for_lvar_def) qed (* -------------------------------------------------------------------------- *) (* PivotEq pivot_eq *) (* -------------------------------------------------------------------------- *) definition pivot_eq :: "eq \ var \ eq" where "pivot_eq e y \ let cy = coeff (rhs e) y in (y, (-1/cy) *R ((rhs e) - cy *R (Var y)) + (1/cy) *R (Var (lhs e)))" lemma pivot_eq_satisfies_eq: assumes "y \ rvars_eq e" shows "v \\<^sub>e e = v \\<^sub>e pivot_eq e y" using assms using scaleRat_right_distrib[of "1 / Rep_linear_poly (rhs e) y" "- (rhs e \ v \)" "v (lhs e)"] using Groups.group_add_class.minus_unique[of "- ((rhs e) \ v \)" "v (lhs e)"] unfolding coeff_def vars_def by (simp add: coeff_def vars_def Let_def pivot_eq_def satisfies_eq_def) (auto simp add: rational_vector.scale_right_diff_distrib valuate_add valuate_minus valuate_uminus valuate_scaleRat valuate_Var) lemma pivot_eq_rvars: assumes "x \ vars (rhs (pivot_eq e v))" "x \ lhs e" "coeff (rhs e) v \ 0" "v \ lhs e" shows "x \ vars (rhs e)" proof- have "v \ vars ((1 / coeff (rhs e) v) *R (rhs e - coeff (rhs e) v *R Var v))" using coeff_zero by force then have "x \ v" using assms(1) assms(3) assms(4) using vars_plus[of "(-1 / coeff (rhs e) v) *R (rhs e - coeff (rhs e) v *R Var v)" "(1 / coeff (rhs e) v) *R Var (lhs e)"] by (auto simp add: Let_def vars_scaleRat pivot_eq_def) then show ?thesis using assms using vars_plus[of "(-1 / coeff (rhs e) v) *R (rhs e - coeff (rhs e) v *R Var v)" "(1 / coeff (rhs e) v) *R Var (lhs e)"] using vars_minus[of "rhs e" "coeff (rhs e) v *R Var v"] by (auto simp add: vars_scaleRat Let_def pivot_eq_def) qed interpretation PivotEq pivot_eq proof fix eq x\<^sub>j assume "x\<^sub>j \ rvars_eq eq" "lhs eq \ rvars_eq eq" have "lhs (pivot_eq eq x\<^sub>j) = x\<^sub>j" unfolding pivot_eq_def by (simp add: Let_def) moreover have "rvars_eq (pivot_eq eq x\<^sub>j) = {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" proof show "rvars_eq (pivot_eq eq x\<^sub>j) \ {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" proof fix x assume "x \ rvars_eq (pivot_eq eq x\<^sub>j)" have *: "coeff (rhs (pivot_eq eq x\<^sub>j)) x\<^sub>j = 0" using \x\<^sub>j \ rvars_eq eq\ \lhs eq \ rvars_eq eq\ using coeff_Var2[of "lhs eq" x\<^sub>j] by (auto simp add: Let_def pivot_eq_def) have "coeff (rhs eq) x\<^sub>j \ 0" using \x\<^sub>j \ rvars_eq eq\ using coeff_zero by (cases eq) (auto simp add:) then show "x \ {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" using pivot_eq_rvars[of x eq x\<^sub>j] using \x \ rvars_eq (pivot_eq eq x\<^sub>j)\ \x\<^sub>j \ rvars_eq eq\ \lhs eq \ rvars_eq eq\ using coeff_zero * by auto qed show "{lhs eq} \ (rvars_eq eq - {x\<^sub>j}) \ rvars_eq (pivot_eq eq x\<^sub>j)" proof fix x assume "x \ {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" have *: "coeff (rhs eq) (lhs eq) = 0" using coeff_zero using \lhs eq \ rvars_eq eq\ by auto have **: "coeff (rhs eq) x\<^sub>j \ 0" using \x\<^sub>j \ rvars_eq eq\ by (simp add: coeff_zero) have ***: "x \ rvars_eq eq \ coeff (Var (lhs eq)) x = 0" using \lhs eq \ rvars_eq eq\ using coeff_Var2[of "lhs eq" x] by auto have "coeff (Var x\<^sub>j) (lhs eq) = 0" using \x\<^sub>j \ rvars_eq eq\ \lhs eq \ rvars_eq eq\ using coeff_Var2[of x\<^sub>j "lhs eq"] by auto then have "coeff (rhs (pivot_eq eq x\<^sub>j)) x \ 0" using \x \ {lhs eq} \ (rvars_eq eq - {x\<^sub>j})\ * ** *** using coeff_zero[of "rhs eq" x] by (auto simp add: Let_def coeff_Var2 pivot_eq_def) then show "x \ rvars_eq (pivot_eq eq x\<^sub>j)" by (simp add: coeff_zero) qed qed ultimately show "let eq' = pivot_eq eq x\<^sub>j in lhs eq' = x\<^sub>j \ rvars_eq eq' = {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" by (simp add: Let_def) next fix v eq x\<^sub>j assume "x\<^sub>j \ rvars_eq eq" then show "v \\<^sub>e pivot_eq eq x\<^sub>j = v \\<^sub>e eq" using pivot_eq_satisfies_eq by blast qed (* -------------------------------------------------------------------------- *) (* SubstVar subst_var *) (* -------------------------------------------------------------------------- *) definition subst_var:: "var \ linear_poly \ linear_poly \ linear_poly" where "subst_var v lp' lp \ lp + (coeff lp v) *R lp' - (coeff lp v) *R (Var v)" definition "subst_var_eq_code = SubstVar.subst_var_eq subst_var" global_interpretation SubstVar subst_var rewrites "SubstVar.subst_var_eq subst_var = subst_var_eq_code" proof (unfold_locales) fix x\<^sub>j lp' lp have *: "\x. \x \ vars (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j); x \ vars lp'\ \ x \ vars lp" proof- fix x assume "x \ vars (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j)" then have "coeff (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j) x \ 0" using coeff_zero by force assume "x \ vars lp'" then have "coeff lp' x = 0" using coeff_zero by auto show "x \ vars lp" proof(rule ccontr) assume "x \ vars lp" then have "coeff lp x = 0" using coeff_zero by auto then show False using \coeff (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j) x \ 0\ using \coeff lp' x = 0\ by (cases "x = x\<^sub>j") (auto simp add: coeff_Var2) qed qed have "vars (subst_var x\<^sub>j lp' lp) \ (vars lp - {x\<^sub>j}) \ vars lp'" unfolding subst_var_def using coeff_zero[of "lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j" x\<^sub>j] using coeff_zero[of lp' x\<^sub>j] using * by auto moreover have "\x. \x \ vars (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j); x \ vars lp; x \ vars lp'\ \ x = x\<^sub>j" proof- fix x assume "x \ vars lp" "x \ vars lp'" then have "coeff lp x \ 0" "coeff lp' x = 0" using coeff_zero by auto assume "x \ vars (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j)" then have "coeff (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j) x = 0" using coeff_zero by force then show "x = x\<^sub>j" using \coeff lp x \ 0\ \coeff lp' x = 0\ by (cases "x = x\<^sub>j") (auto simp add: coeff_Var2) qed then have "vars lp - {x\<^sub>j} - vars lp' \ vars (subst_var x\<^sub>j lp' lp)" by (auto simp add: subst_var_def) ultimately show "vars lp - {x\<^sub>j} - vars lp' \s vars (subst_var x\<^sub>j lp' lp) \s vars lp - {x\<^sub>j} \ vars lp'" by simp next fix v x\<^sub>j lp' lp show "v x\<^sub>j = lp' \ v \ \ lp \ v \ = (subst_var x\<^sub>j lp' lp) \ v \" unfolding subst_var_def using valuate_minus[of "lp + coeff lp x\<^sub>j *R lp'" "coeff lp x\<^sub>j *R Var x\<^sub>j" v] using valuate_add[of lp "coeff lp x\<^sub>j *R lp'" v] using valuate_scaleRat[of "coeff lp x\<^sub>j" lp' v] valuate_scaleRat[of "coeff lp x\<^sub>j" "Var x\<^sub>j" v] using valuate_Var[of x\<^sub>j v] by auto next fix x\<^sub>j lp lp' assume "x\<^sub>j \ vars lp" hence 0: "coeff lp x\<^sub>j = 0" using coeff_zero by blast show "subst_var x\<^sub>j lp' lp = lp" unfolding subst_var_def 0 by simp next fix x\<^sub>j lp x lp' assume "x\<^sub>j \ vars lp" "x \ vars lp' - vars lp" hence x: "x \ x\<^sub>j" and 0: "coeff lp x = 0" and no0: "coeff lp x\<^sub>j \ 0" "coeff lp' x \ 0" using coeff_zero by blast+ from x have 00: "coeff (Var x\<^sub>j) x = 0" using coeff_Var2 by auto show "x \ vars (subst_var x\<^sub>j lp' lp)" unfolding subst_var_def coeff_zero[symmetric] by (simp add: 0 00 no0) qed (simp_all add: subst_var_eq_code_def) (* -------------------------------------------------------------------------- *) (* Update update *) (* -------------------------------------------------------------------------- *) definition rhs_eq_val where "rhs_eq_val v x\<^sub>i c e \ let x\<^sub>j = lhs e; a\<^sub>i\<^sub>j = coeff (rhs e) x\<^sub>i in \v\ x\<^sub>j + a\<^sub>i\<^sub>j *R (c - \v\ x\<^sub>i)" definition "update_code = RhsEqVal.update rhs_eq_val" definition "assert_bound'_code = Update.assert_bound' update_code" definition "assert_bound_code = Update.assert_bound update_code" global_interpretation RhsEqValDefault': RhsEqVal rhs_eq_val rewrites "RhsEqVal.update rhs_eq_val = update_code" and "Update.assert_bound update_code = assert_bound_code" and "Update.assert_bound' update_code = assert_bound'_code" proof unfold_locales fix v x c e assume "\v\ \\<^sub>e e" then show "rhs_eq_val v x c e = rhs e \ \v\(x := c) \" unfolding rhs_eq_val_def Let_def using valuate_update_x[of "rhs e" x "\v\" "\v\(x := c)"] by (auto simp add: satisfies_eq_def) qed (auto simp: update_code_def assert_bound'_code_def assert_bound_code_def) sublocale PivotUpdateMinVars < Check check proof (rule Check_check) show "RhsEqVal rhs_eq_val" .. qed definition "pivot_code = Pivot'.pivot eq_idx_for_lvar pivot_eq subst_var" definition "pivot_tableau_code = Pivot'.pivot_tableau eq_idx_for_lvar pivot_eq subst_var" global_interpretation Pivot'Default: Pivot' eq_idx_for_lvar pivot_eq subst_var rewrites "Pivot'.pivot eq_idx_for_lvar pivot_eq subst_var = pivot_code" and "Pivot'.pivot_tableau eq_idx_for_lvar pivot_eq subst_var = pivot_tableau_code" and "SubstVar.subst_var_eq subst_var = subst_var_eq_code" by (unfold_locales, auto simp: pivot_tableau_code_def pivot_code_def subst_var_eq_code_def) definition "pivot_and_update_code = PivotUpdate.pivot_and_update pivot_code update_code" global_interpretation PivotUpdateDefault: PivotUpdate eq_idx_for_lvar pivot_code update_code rewrites "PivotUpdate.pivot_and_update pivot_code update_code = pivot_and_update_code" by (unfold_locales, auto simp: pivot_and_update_code_def) sublocale Update < AssertBoundNoLhs assert_bound proof (rule update_to_assert_bound_no_lhs) show "Pivot eq_idx_for_lvar pivot_code" .. qed definition "check_code = PivotUpdateMinVars.check eq_idx_for_lvar min_lvar_not_in_bounds min_rvar_incdec_eq pivot_and_update_code" definition "check'_code = PivotUpdateMinVars.check' eq_idx_for_lvar min_rvar_incdec_eq pivot_and_update_code" global_interpretation PivotUpdateMinVarsDefault: PivotUpdateMinVars eq_idx_for_lvar min_lvar_not_in_bounds min_rvar_incdec_eq pivot_and_update_code rewrites "PivotUpdateMinVars.check eq_idx_for_lvar min_lvar_not_in_bounds min_rvar_incdec_eq pivot_and_update_code = check_code" and "PivotUpdateMinVars.check' eq_idx_for_lvar min_rvar_incdec_eq pivot_and_update_code = check'_code" by (unfold_locales) (simp_all add: check_code_def check'_code_def) definition "assert_code = Assert'.assert assert_bound_code check_code" global_interpretation Assert'Default: Assert' assert_bound_code check_code rewrites "Assert'.assert assert_bound_code check_code = assert_code" by (unfold_locales, auto simp: assert_code_def) definition "assert_bound_loop_code = AssertAllState''.assert_bound_loop assert_bound_code" definition "assert_all_state_code = AssertAllState''.assert_all_state init_state assert_bound_code check_code" definition "assert_all_code = AssertAllState.assert_all assert_all_state_code" global_interpretation AssertAllStateDefault: AssertAllState'' init_state assert_bound_code check_code rewrites "AssertAllState''.assert_bound_loop assert_bound_code = assert_bound_loop_code" and "AssertAllState''.assert_all_state init_state assert_bound_code check_code = assert_all_state_code" and "AssertAllState.assert_all assert_all_state_code = assert_all_code" by unfold_locales (simp_all add: assert_bound_loop_code_def assert_all_state_code_def assert_all_code_def) (* -------------------------------------------------------------------------- *) (* Preprocess preprocess *) (* -------------------------------------------------------------------------- *) primrec monom_to_atom:: "QDelta ns_constraint \ QDelta atom" where "monom_to_atom (LEQ_ns l r) = (if (monom_coeff l < 0) then (Geq (monom_var l) (r /R monom_coeff l)) else (Leq (monom_var l) (r /R monom_coeff l)))" | "monom_to_atom (GEQ_ns l r) = (if (monom_coeff l < 0) then (Leq (monom_var l) (r /R monom_coeff l)) else (Geq (monom_var l) (r /R monom_coeff l)))" primrec qdelta_constraint_to_atom:: "QDelta ns_constraint \ var \ QDelta atom" where "qdelta_constraint_to_atom (LEQ_ns l r) v = (if (is_monom l) then (monom_to_atom (LEQ_ns l r)) else (Leq v r))" | "qdelta_constraint_to_atom (GEQ_ns l r) v = (if (is_monom l) then (monom_to_atom (GEQ_ns l r)) else (Geq v r))" primrec qdelta_constraint_to_atom':: "QDelta ns_constraint \ var \ QDelta atom" where "qdelta_constraint_to_atom' (LEQ_ns l r) v = (Leq v r)" | "qdelta_constraint_to_atom' (GEQ_ns l r) v = (Geq v r)" fun linear_poly_to_eq:: "linear_poly \ var \ eq" where "linear_poly_to_eq p v = (v, p)" datatype 'i istate = IState (FirstFreshVariable: var) (Tableau: tableau) (Atoms: "('i,QDelta) i_atom list") (Poly_Mapping: "linear_poly \ var") (UnsatIndices: "'i list") primrec zero_satisfies :: "'a :: lrv ns_constraint \ bool" where "zero_satisfies (LEQ_ns l r) \ 0 \ r" | "zero_satisfies (GEQ_ns l r) \ 0 \ r" lemma zero_satisfies: "poly c = 0 \ zero_satisfies c \ v \\<^sub>n\<^sub>s c" by (cases c, auto simp: valuate_zero) lemma not_zero_satisfies: "poly c = 0 \ \ zero_satisfies c \ \ v \\<^sub>n\<^sub>s c" by (cases c, auto simp: valuate_zero) fun preprocess' :: "('i,QDelta) i_ns_constraint list \ var \ 'i istate" where "preprocess' [] v = IState v [] [] (\ p. None) []" | "preprocess' ((i,h) # t) v = (let s' = preprocess' t v; p = poly h; is_monom_h = is_monom p; v' = FirstFreshVariable s'; t' = Tableau s'; a' = Atoms s'; m' = Poly_Mapping s'; u' = UnsatIndices s' in if is_monom_h then IState v' t' ((i,qdelta_constraint_to_atom h v') # a') m' u' else if p = 0 then if zero_satisfies h then s' else IState v' t' a' m' (i # u') else (case m' p of Some v \ IState v' t' ((i,qdelta_constraint_to_atom h v) # a') m' u' | None \ IState (v' + 1) (linear_poly_to_eq p v' # t') ((i,qdelta_constraint_to_atom h v') # a') (m' (p \ v')) u') )" lemma preprocess'_simps: "preprocess' ((i,h) # t) v = (let s' = preprocess' t v; p = poly h; is_monom_h = is_monom p; v' = FirstFreshVariable s'; t' = Tableau s'; a' = Atoms s'; m' = Poly_Mapping s'; u' = UnsatIndices s' in if is_monom_h then IState v' t' ((i,monom_to_atom h) # a') m' u' else if p = 0 then if zero_satisfies h then s' else IState v' t' a' m' (i # u') else (case m' p of Some v \ IState v' t' ((i,qdelta_constraint_to_atom' h v) # a') m' u' | None \ IState (v' + 1) (linear_poly_to_eq p v' # t') ((i,qdelta_constraint_to_atom' h v') # a') (m' (p \ v')) u') )" by (cases h, auto simp add: Let_def split: option.splits) lemmas preprocess'_code = preprocess'.simps(1) preprocess'_simps declare preprocess'_code[code] text \Normalization of constraints helps to identify same polynomials, e.g., the constraints $x + y \leq 5$ and $-2x-2y \leq -12$ will be normalized to $x + y \leq 5$ and $x + y \geq 6$, so that only one slack-variable will be introduced for the polynomial $x+y$, and not another one for $-2x-2y$. Normalization will take care that the max-var of the polynomial in the constraint will have coefficient 1 (if the polynomial is non-zero)\ fun normalize_ns_constraint :: "'a :: lrv ns_constraint \ 'a ns_constraint" where "normalize_ns_constraint (LEQ_ns l r) = (let v = max_var l; c = coeff l v in if c = 0 then LEQ_ns l r else let ic = inverse c in if c < 0 then GEQ_ns (ic *R l) (scaleRat ic r) else LEQ_ns (ic *R l) (scaleRat ic r))" | "normalize_ns_constraint (GEQ_ns l r) = (let v = max_var l; c = coeff l v in if c = 0 then GEQ_ns l r else let ic = inverse c in if c < 0 then LEQ_ns (ic *R l) (scaleRat ic r) else GEQ_ns (ic *R l) (scaleRat ic r))" lemma normalize_ns_constraint[simp]: "v \\<^sub>n\<^sub>s (normalize_ns_constraint c) \ v \\<^sub>n\<^sub>s (c :: 'a :: lrv ns_constraint)" proof - let ?c = "coeff (poly c) (max_var (poly c))" consider (0) "?c = 0" | (pos) "?c > 0" | (neg) "?c < 0" by linarith thus ?thesis proof cases case 0 thus ?thesis by (cases c, auto) next case pos from pos have id: "a /R ?c \ b /R ?c \ (a :: 'a) \ b" for a b using scaleRat_leq1 by fastforce show ?thesis using pos id by (cases c, auto simp: Let_def valuate_scaleRat id) next case neg from neg have id: "a /R ?c \ b /R ?c \ (a :: 'a) \ b" for a b using scaleRat_leq2 by fastforce show ?thesis using neg id by (cases c, auto simp: Let_def valuate_scaleRat id) qed qed declare normalize_ns_constraint.simps[simp del] lemma i_satisfies_normalize_ns_constraint[simp]: "Iv \\<^sub>i\<^sub>n\<^sub>s\<^sub>s (map_prod id normalize_ns_constraint ` cs) \ Iv \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs" by (cases Iv, force) abbreviation max_var:: "QDelta ns_constraint \ var" where "max_var C \ Abstract_Linear_Poly.max_var (poly C)" fun start_fresh_variable :: "('i,QDelta) i_ns_constraint list \ var" where "start_fresh_variable [] = 0" | "start_fresh_variable ((i,h)#t) = max (max_var h + 1) (start_fresh_variable t)" definition preprocess_part_1 :: "('i,QDelta) i_ns_constraint list \ tableau \ (('i,QDelta) i_atom list) \ 'i list" where "preprocess_part_1 l \ let start = start_fresh_variable l; is = preprocess' l start in (Tableau is, Atoms is, UnsatIndices is)" lemma lhs_linear_poly_to_eq [simp]: "lhs (linear_poly_to_eq h v) = v" by (cases h) auto lemma rvars_eq_linear_poly_to_eq [simp]: "rvars_eq (linear_poly_to_eq h v) = vars h" by simp lemma fresh_var_monoinc: "FirstFreshVariable (preprocess' cs start) \ start" by (induct cs) (auto simp add: Let_def split: option.splits) abbreviation vars_constraints where "vars_constraints cs \ \ (set (map vars (map poly cs)))" lemma start_fresh_variable_fresh: "\ var \ vars_constraints (flat_list cs). var < start_fresh_variable cs" using max_var_max by (induct cs, auto simp add: max_def) force+ lemma vars_tableau_vars_constraints: "rvars (Tableau (preprocess' cs start)) \ vars_constraints (flat_list cs)" by (induct cs start rule: preprocess'.induct) (auto simp add: rvars_def Let_def split: option.splits) lemma lvars_tableau_ge_start: "\ var \ lvars (Tableau (preprocess' cs start)). var \ start" by (induct cs start rule: preprocess'.induct) (auto simp add: Let_def lvars_def fresh_var_monoinc split: option.splits) lemma rhs_no_zero_tableau_start: "0 \ rhs ` set (Tableau (preprocess' cs start))" by (induct cs start rule: preprocess'.induct, auto simp add: Let_def rvars_def fresh_var_monoinc split: option.splits) lemma first_fresh_variable_not_in_lvars: "\var \ lvars (Tableau (preprocess' cs start)). FirstFreshVariable (preprocess' cs start) > var" by (induct cs start rule: preprocess'.induct) (auto simp add: Let_def lvars_def split: option.splits) lemma sat_atom_sat_eq_sat_constraint_non_monom: assumes "v \\<^sub>a qdelta_constraint_to_atom h var" "v \\<^sub>e linear_poly_to_eq (poly h) var" "\ is_monom (poly h)" shows "v \\<^sub>n\<^sub>s h" using assms by (cases h) (auto simp add: satisfies_eq_def split: if_splits) lemma qdelta_constraint_to_atom_monom: assumes "is_monom (poly h)" shows "v \\<^sub>a qdelta_constraint_to_atom h var \ v \\<^sub>n\<^sub>s h" proof (cases h) case (LEQ_ns l a) then show ?thesis using assms using monom_valuate[of _ v] apply auto using scaleRat_leq2[of "a /R monom_coeff l" "v (monom_var l)" "monom_coeff l"] using divide_leq1[of "monom_coeff l" "v (monom_var l)" a] apply (force, simp add: divide_rat_def) using scaleRat_leq1[of "v (monom_var l)" "a /R monom_coeff l" "monom_coeff l"] using is_monom_monom_coeff_not_zero[of l] using divide_leq[of "monom_coeff l" "v (monom_var l)" a] using is_monom_monom_coeff_not_zero[of l] by (simp_all add: divide_rat_def) next case (GEQ_ns l a) then show ?thesis using assms using monom_valuate[of _ v] apply auto using scaleRat_leq2[of "v (monom_var l)" "a /R monom_coeff l" "monom_coeff l"] using divide_geq1[of a "monom_coeff l" "v (monom_var l)"] apply (force, simp add: divide_rat_def) using scaleRat_leq1[of "a /R monom_coeff l" "v (monom_var l)" "monom_coeff l"] using is_monom_monom_coeff_not_zero[of l] using divide_geq[of a "monom_coeff l" "v (monom_var l)"] using is_monom_monom_coeff_not_zero[of l] by (simp_all add: divide_rat_def) qed lemma preprocess'_Tableau_Poly_Mapping_None: "(Poly_Mapping (preprocess' cs start)) p = None \ linear_poly_to_eq p v \ set (Tableau (preprocess' cs start))" by (induct cs start rule: preprocess'.induct, auto simp: Let_def split: option.splits if_splits) lemma preprocess'_Tableau_Poly_Mapping_Some: "(Poly_Mapping (preprocess' cs start)) p = Some v \ linear_poly_to_eq p v \ set (Tableau (preprocess' cs start))" by (induct cs start rule: preprocess'.induct, auto simp: Let_def split: option.splits if_splits) lemma preprocess'_Tableau_Poly_Mapping_Some': "(Poly_Mapping (preprocess' cs start)) p = Some v \ \ h. poly h = p \ \ is_monom (poly h) \ qdelta_constraint_to_atom h v \ flat (set (Atoms (preprocess' cs start)))" by (induct cs start rule: preprocess'.induct, auto simp: Let_def split: option.splits if_splits) lemma not_one_le_zero_qdelta: "\ (1 \ (0 :: QDelta))" by code_simp lemma one_zero_contra[dest,consumes 2]: "1 \ x \ (x :: QDelta) \ 0 \ False" using order.trans[of 1 x 0] not_one_le_zero_qdelta by simp lemma i_preprocess'_sat: assumes "(I,v) \\<^sub>i\<^sub>a\<^sub>s set (Atoms (preprocess' s start))" "v \\<^sub>t Tableau (preprocess' s start)" "I \ set (UnsatIndices (preprocess' s start)) = {}" shows "(I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set s" using assms by (induct s start rule: preprocess'.induct) (auto simp add: Let_def satisfies_atom_set_def satisfies_tableau_def qdelta_constraint_to_atom_monom sat_atom_sat_eq_sat_constraint_non_monom split: if_splits option.splits dest!: preprocess'_Tableau_Poly_Mapping_Some zero_satisfies) lemma preprocess'_sat: assumes "v \\<^sub>a\<^sub>s flat (set (Atoms (preprocess' s start)))" "v \\<^sub>t Tableau (preprocess' s start)" "set (UnsatIndices (preprocess' s start)) = {}" shows "v \\<^sub>n\<^sub>s\<^sub>s flat (set s)" using i_preprocess'_sat[of UNIV v s start] assms by simp lemma sat_constraint_valuation: assumes "\ var \ vars (poly c). v1 var = v2 var" shows "v1 \\<^sub>n\<^sub>s c \ v2 \\<^sub>n\<^sub>s c" using assms using valuate_depend by (cases c) (force)+ lemma atom_var_first: assumes "a \ flat (set (Atoms (preprocess' cs start)))" "\ var \ vars_constraints (flat_list cs). var < start" shows "atom_var a < FirstFreshVariable (preprocess' cs start)" using assms proof(induct cs arbitrary: a) case (Cons hh t a) obtain i h where hh: "hh = (i,h)" by force let ?s = "preprocess' t start" show ?case proof(cases "a \ flat (set (Atoms ?s))") case True then show ?thesis using Cons(1)[of a] Cons(3) hh by (auto simp add: Let_def split: option.splits) next case False consider (monom) "is_monom (poly h)" | (normal) "\ is_monom (poly h)" "poly h \ 0" "(Poly_Mapping ?s) (poly h) = None" | (old) var where "\ is_monom (poly h)" "poly h \ 0" "(Poly_Mapping ?s) (poly h) = Some var" | (zero) "\ is_monom (poly h)" "poly h = 0" by auto then show ?thesis proof cases case monom from Cons(3) monom_var_in_vars hh monom have "monom_var (poly h) < start" by auto moreover from False have "a = qdelta_constraint_to_atom h (FirstFreshVariable (preprocess' t start))" using Cons(2) hh monom by (auto simp: Let_def) ultimately show ?thesis using fresh_var_monoinc[of start t] hh monom by (cases a; cases h) (auto simp add: Let_def ) next case normal have "a = qdelta_constraint_to_atom h (FirstFreshVariable (preprocess' t start))" using False normal Cons(2) hh by (auto simp: Let_def) then show ?thesis using hh normal by (cases a; cases h) (auto simp add: Let_def ) next case (old var) from preprocess'_Tableau_Poly_Mapping_Some'[OF old(3)] obtain h' where "poly h' = poly h" "qdelta_constraint_to_atom h' var \ flat (set (Atoms ?s))" by blast from Cons(1)[OF this(2)] Cons(3) this(1) old(1) have var: "var < FirstFreshVariable ?s" by (cases h', auto) have "a = qdelta_constraint_to_atom h var" using False old Cons(2) hh by (auto simp: Let_def) then have a: "atom_var a = var" using old by (cases a; cases h; auto simp: Let_def) show ?thesis unfolding a hh by (simp add: old Let_def var) next case zero from False show ?thesis using Cons(2) hh zero by (auto simp: Let_def split: if_splits) qed qed qed simp lemma satisfies_tableau_satisfies_tableau: assumes "v1 \\<^sub>t t" "\ var \ tvars t. v1 var = v2 var" shows "v2 \\<^sub>t t" using assms using valuate_depend[of _ v1 v2] by (force simp add: lvars_def rvars_def satisfies_eq_def satisfies_tableau_def) lemma preprocess'_unsat_indices: assumes "i \ set (UnsatIndices (preprocess' s start))" shows "\ ({i},v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set s" using assms proof (induct s start rule: preprocess'.induct) case (2 j h t v) then show ?case by (auto simp: Let_def not_zero_satisfies split: if_splits option.splits) qed simp lemma preprocess'_unsat: assumes "(I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set s" "vars_constraints (flat_list s) \ V" "\var \ V. var < start" shows "\v'. (\var \ V. v var = v' var) \ v' \\<^sub>a\<^sub>s restrict_to I (set (Atoms (preprocess' s start))) \ v' \\<^sub>t Tableau (preprocess' s start)" using assms proof(induct s) case Nil show ?case by (auto simp add: satisfies_atom_set_def satisfies_tableau_def) next case (Cons hh t) obtain i h where hh: "hh = (i,h)" by force from Cons hh obtain v' where var: "(\var\V. v var = v' var)" and v'_as: "v' \\<^sub>a\<^sub>s restrict_to I (set (Atoms (preprocess' t start)))" and v'_t: "v' \\<^sub>t Tableau (preprocess' t start)" and vars_h: "vars_constraints [h] \ V" by auto from Cons(2)[unfolded hh] have i: "i \ I \ v \\<^sub>n\<^sub>s h" by auto have "\ var \ vars (poly h). v var = v' var" using \(\var\V. v var = v' var)\ Cons(3) hh by auto then have vh_v'h: "v \\<^sub>n\<^sub>s h \ v' \\<^sub>n\<^sub>s h" by (rule sat_constraint_valuation) show ?case proof(cases "is_monom (poly h)") case True then have id: "is_monom (poly h) = True" by simp show ?thesis unfolding hh preprocess'.simps Let_def id if_True istate.simps istate.sel proof (intro exI[of _ v'] conjI v'_t var satisifies_atom_restrict_to_Cons[OF v'_as]) assume "i \ I" from i[OF this] var vh_v'h show "v' \\<^sub>a qdelta_constraint_to_atom h (FirstFreshVariable (preprocess' t start))" unfolding qdelta_constraint_to_atom_monom[OF True] by auto qed next case False then have id: "is_monom (poly h) = False" by simp let ?s = "preprocess' t start" let ?x = "FirstFreshVariable ?s" show ?thesis proof (cases "poly h = 0") case zero: False hence id': "(poly h = 0) = False" by simp let ?look = "(Poly_Mapping ?s) (poly h)" show ?thesis proof (cases ?look) case None let ?y = "poly h \ v'\" let ?v' = "v'(?x:=?y)" show ?thesis unfolding preprocess'.simps hh Let_def id id' if_False istate.simps istate.sel None option.simps proof (rule exI[of _ ?v'], intro conjI satisifies_atom_restrict_to_Cons satisfies_tableau_Cons) show vars': "(\var\V. v var = ?v' var)" using \(\var\V. v var = v' var)\ using fresh_var_monoinc[of start t] using Cons(4) by auto { assume "i \ I" from vh_v'h i[OF this] False show "?v' \\<^sub>a qdelta_constraint_to_atom h (FirstFreshVariable (preprocess' t start))" by (cases h, auto) } let ?atoms = "restrict_to I (set (Atoms (preprocess' t start)))" show "?v' \\<^sub>a\<^sub>s ?atoms" unfolding satisfies_atom_set_def proof fix a assume "a \ ?atoms" then have "v' \\<^sub>a a" using \v' \\<^sub>a\<^sub>s ?atoms\ hh by (force simp add: satisfies_atom_set_def) then show "?v' \\<^sub>a a" using \a \ ?atoms\ atom_var_first[of a t start] using Cons(3) Cons(4) by (cases a) auto qed show "?v' \\<^sub>e linear_poly_to_eq (poly h) (FirstFreshVariable (preprocess' t start))" using Cons(3) Cons(4) using valuate_depend[of "poly h" v' "v'(FirstFreshVariable (preprocess' t start) := (poly h) \ v' \)"] using fresh_var_monoinc[of start t] hh by (cases h) (force simp add: satisfies_eq_def)+ have "FirstFreshVariable (preprocess' t start) \ tvars (Tableau (preprocess' t start))" using first_fresh_variable_not_in_lvars[of t start] using Cons(3) Cons(4) using vars_tableau_vars_constraints[of t start] using fresh_var_monoinc[of start t] by force then show "?v' \\<^sub>t Tableau (preprocess' t start)" using \v' \\<^sub>t Tableau (preprocess' t start)\ using satisfies_tableau_satisfies_tableau[of v' "Tableau (preprocess' t start)" ?v'] by auto qed next case (Some var) from preprocess'_Tableau_Poly_Mapping_Some[OF Some] have "linear_poly_to_eq (poly h) var \ set (Tableau ?s)" by auto with v'_t[unfolded satisfies_tableau_def] have v'_h_var: "v' \\<^sub>e linear_poly_to_eq (poly h) var" by auto show ?thesis unfolding preprocess'.simps hh Let_def id id' if_False istate.simps istate.sel Some option.simps proof (intro exI[of _ v'] conjI var v'_t satisifies_atom_restrict_to_Cons satisfies_tableau_Cons v'_as) assume "i \ I" from vh_v'h i[OF this] False v'_h_var show "v' \\<^sub>a qdelta_constraint_to_atom h var" by (cases h, auto simp: satisfies_eq_iff) qed qed next case zero: True hence id': "(poly h = 0) = True" by simp show ?thesis proof (cases "zero_satisfies h") case True hence id'': "zero_satisfies h = True" by simp show ?thesis unfolding hh preprocess'.simps Let_def id id' id'' if_True if_False istate.simps istate.sel by (intro exI[of _ v'] conjI v'_t var v'_as) next case False hence id'': "zero_satisfies h = False" by simp { assume "i \ I" from i[OF this] not_zero_satisfies[OF zero False] have False by simp } note no_I = this show ?thesis unfolding hh preprocess'.simps Let_def id id' id'' if_True if_False istate.simps istate.sel proof (rule Cons(1)[OF _ _ Cons(4)]) show "(I, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set t" using Cons(2) by auto show "vars_constraints (map snd t) \ V" using Cons(3) by force qed qed qed qed qed lemma lvars_distinct: "distinct (map lhs (Tableau (preprocess' cs start)))" using first_fresh_variable_not_in_lvars[where ?'a = 'a] by (induct cs, auto simp add: Let_def lvars_def) (force split: option.splits) lemma normalized_tableau_preprocess': "\ (Tableau (preprocess' cs (start_fresh_variable cs)))" proof - let ?s = "start_fresh_variable cs" show ?thesis using lvars_distinct[of cs ?s] using lvars_tableau_ge_start[of cs ?s] using vars_tableau_vars_constraints[of cs ?s] using start_fresh_variable_fresh[of cs] unfolding normalized_tableau_def Let_def by (smt disjoint_iff_not_equal inf.absorb_iff2 inf.strict_order_iff rhs_no_zero_tableau_start subsetD) qed text \Improved preprocessing: Deletion. An equation x = p can be deleted from the tableau, if x does not occur in the atoms.\ lemma delete_lhs_var: assumes norm: "\ t" and t: "t = t1 @ (x,p) # t2" and t': "t' = t1 @ t2" and tv: "tv = (\ v. upd x (p \ \v\ \) v)" and x_as: "x \ atom_var ` snd ` set as" shows "\ t'" \ \new tableau is normalized\ "\w\ \\<^sub>t t' \ \tv w\ \\<^sub>t t" \ \solution of new tableau is translated to solution of old tableau\ "(I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as" \ \solution translation also works for bounds\ "v \\<^sub>t t \ v \\<^sub>t t'" \ \solution of old tableau is also solution for new tableau\ proof - have tv: "\tv w\ = \w\ (x := p \ \w\ \)" unfolding tv map2fun_def' by auto from norm show "\ t'" unfolding t t' normalized_tableau_def by (auto simp: lvars_def rvars_def) show "v \\<^sub>t t \ v \\<^sub>t t'" unfolding t t' satisfies_tableau_def by auto from norm have dist: "distinct (map lhs t)" "lvars t \ rvars t = {}" unfolding normalized_tableau_def by auto from x_as have x_as: "x \ atom_var ` snd ` (set as \ I \ UNIV)" by auto have "(I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as" unfolding i_satisfies_atom_set.simps satisfies_atom_set_def proof (intro ball_cong[OF refl]) fix a assume "a \ snd ` (set as \ I \ UNIV)" with x_as have "x \ atom_var a" by auto then show "\tv w\ \\<^sub>a a = \w\ \\<^sub>a a" unfolding tv by (cases a, auto) qed then show "(I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as" by blast assume w: "\w\ \\<^sub>t t'" from dist(2)[unfolded t] have xp: "x \ vars p" unfolding lvars_def rvars_def by auto { fix eq assume mem: "eq \ set t1 \ set t2" then have "eq \ set t'" unfolding t' by auto with w have w: "\w\ \\<^sub>e eq" unfolding satisfies_tableau_def by auto obtain y q where eq: "eq = (y,q)" by force from mem[unfolded eq] dist(1)[unfolded t] have xy: "x \ y" by force from mem[unfolded eq] dist(2)[unfolded t] have xq: "x \ vars q" unfolding lvars_def rvars_def by auto from w have "\tv w\ \\<^sub>e eq" unfolding tv eq satisfies_eq_iff using xy xq by (auto intro!: valuate_depend) } moreover have "\tv w\ \\<^sub>e (x,p)" unfolding satisfies_eq_iff tv using xp by (auto intro!: valuate_depend) ultimately show "\tv w\ \\<^sub>t t" unfolding t satisfies_tableau_def by auto qed definition pivot_tableau_eq :: "tableau \ eq \ tableau \ var \ tableau \ eq \ tableau" where "pivot_tableau_eq t1 eq t2 x \ let eq' = pivot_eq eq x; m = map (\ e. subst_var_eq x (rhs eq') e) in (m t1, eq', m t2)" lemma pivot_tableau_eq: assumes t: "t = t1 @ eq # t2" "t' = t1' @ eq' # t2'" and x: "x \ rvars_eq eq" and norm: "\ t" and pte: "pivot_tableau_eq t1 eq t2 x = (t1',eq',t2')" shows "\ t'" "lhs eq' = x" "(v :: 'a :: lrv valuation) \\<^sub>t t' \ v \\<^sub>t t" proof - let ?s = "\ t. State t undefined undefined undefined undefined undefined" let ?y = "lhs eq" have yl: "?y \ lvars t" unfolding t lvars_def by auto from norm have eq_t12: "?y \ lhs ` (set t1 \ set t2)" unfolding normalized_tableau_def t lvars_def by auto have eq: "eq_for_lvar_code t ?y = eq" by (metis (mono_tags, lifting) EqForLVarDefault.eq_for_lvar Un_insert_right eq_t12 image_iff insert_iff list.set(2) set_append t(1) yl) have *: "(?y, b) \ set t1 \ ?y \ lhs ` (set t1)" for b t1 by (metis image_eqI lhs.simps) have pivot: "pivot_tableau_code ?y x t = t'" unfolding Pivot'Default.pivot_tableau_def Let_def eq using pte[symmetric] unfolding t pivot_tableau_eq_def Let_def using eq_t12 by (auto dest!: *) note thms = Pivot'Default.pivot_vars' Pivot'Default.pivot_tableau note thms = thms[unfolded Pivot'Default.pivot_def, of "?s t", simplified, OF norm yl, unfolded eq, OF x, unfolded pivot] from thms(1) thms(2)[of v] show "\ t'" "v \\<^sub>t t' \ v \\<^sub>t t" by auto show "lhs eq' = x" using pte[symmetric] unfolding t pivot_tableau_eq_def Let_def pivot_eq_def by auto qed function preprocess_opt :: "var set \ tableau \ tableau \ tableau \ ((var,'a :: lrv)mapping \ (var,'a)mapping)" where "preprocess_opt X t1 [] = (t1,id)" | "preprocess_opt X t1 ((x,p) # t2) = (if x \ X then case preprocess_opt X t1 t2 of (t,tv) \ (t, (\ v. upd x (p \ \v\ \) v) o tv) else case find (\ x. x \ X) (Abstract_Linear_Poly.vars_list p) of None \ preprocess_opt X ((x,p) # t1) t2 | Some y \ case pivot_tableau_eq t1 (x,p) t2 y of (tt1,(z,q),tt2) \ case preprocess_opt X tt1 tt2 of (t,tv) \ (t, (\ v. upd z (q \ \v\ \) v) o tv))" by pat_completeness auto termination by (relation "measure (\ (X,t1,t2). length t2)", auto simp: pivot_tableau_eq_def Let_def) lemma preprocess_opt: assumes "X = atom_var ` snd ` set as" "preprocess_opt X t1 t2 = (t',tv)" "\ t" "t = rev t1 @ t2" shows "\ t'" "(\w\ :: 'a :: lrv valuation) \\<^sub>t t' \ \tv w\ \\<^sub>t t" "(I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as" "v \\<^sub>t t \ (v :: 'a valuation) \\<^sub>t t'" using assms proof (atomize(full), induct X t1 t2 arbitrary: t tv w rule: preprocess_opt.induct) case (1 X t1 t tv) then show ?case by (auto simp: normalized_tableau_def lvars_def rvars_def satisfies_tableau_def simp flip: rev_map) next case (2 X t1 x p t2 t tv w) note IH = 2(1-3) note X = 2(4) note res = 2(5) have norm: "\ t" by fact have t: "t = rev t1 @ (x, p) # t2" by fact show ?case proof (cases "x \ X") case False with res obtain tv' where res: "preprocess_opt X t1 t2 = (t', tv')" and tv: "tv = (\v. Mapping.update x (p \ \v\ \) v) o tv'" by (auto split: prod.splits) note delete = delete_lhs_var[OF norm t refl refl False[unfolded X]] note IH = IH(1)[OF False X res delete(1) refl] from delete(2)[of "tv' w"] delete(3)[of I "tv' w"] delete(4)[of v] IH[of w] show ?thesis unfolding tv o_def by auto next case True then have "\ x \ X" by simp note IH = IH(2-3)[OF this] show ?thesis proof (cases "find (\x. x \ X) (Abstract_Linear_Poly.vars_list p)") case None with res True have pre: "preprocess_opt X ((x, p) # t1) t2 = (t', tv)" by auto from t have t: "t = rev ((x, p) # t1) @ t2" by simp from IH(1)[OF None X pre norm t] show ?thesis . next case (Some z) from Some[unfolded find_Some_iff] have zX: "z \ X" and "z \ set (Abstract_Linear_Poly.vars_list p)" unfolding set_conv_nth by auto then have z: "z \ rvars_eq (x, p)" by (simp add: set_vars_list) obtain tt1 z' q tt2 where pte: "pivot_tableau_eq t1 (x, p) t2 z = (tt1,(z',q),tt2)" by (cases "pivot_tableau_eq t1 (x, p) t2 z", auto) then have pte_rev: "pivot_tableau_eq (rev t1) (x, p) t2 z = (rev tt1,(z',q),tt2)" unfolding pivot_tableau_eq_def Let_def by (auto simp: rev_map) note eq = pivot_tableau_eq[OF t refl z norm pte_rev] then have z': "z' = z" by auto note eq = eq(1,3)[unfolded z'] note pte = pte[unfolded z'] note pte_rev = pte_rev[unfolded z'] note delete = delete_lhs_var[OF eq(1) refl refl refl zX[unfolded X]] from res[unfolded preprocess_opt.simps Some option.simps pte] True obtain tv' where res: "preprocess_opt X tt1 tt2 = (t', tv')" and tv: "tv = (\v. Mapping.update z (q \ \v\ \) v) o tv'" by (auto split: prod.splits) note IH = IH(2)[OF Some, unfolded pte, OF refl refl refl X res delete(1) refl] from IH[of w] delete(2)[of "tv' w"] delete(3)[of I "tv' w"] delete(4)[of v] show ?thesis unfolding tv o_def eq(2) by auto qed qed qed definition "preprocess_part_2 as t = preprocess_opt (atom_var ` snd ` set as) [] t" lemma preprocess_part_2: assumes "preprocess_part_2 as t = (t',tv)" "\ t" shows "\ t'" "(\w\ :: 'a :: lrv valuation) \\<^sub>t t' \ \tv w\ \\<^sub>t t" "(I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as" "v \\<^sub>t t \ (v :: 'a valuation) \\<^sub>t t'" using preprocess_opt[OF refl assms(1)[unfolded preprocess_part_2_def] assms(2)] by auto definition preprocess :: "('i,QDelta) i_ns_constraint list \ _ \ _ \ (_ \ (var,QDelta)mapping) \ 'i list" where "preprocess l = (case preprocess_part_1 (map (map_prod id normalize_ns_constraint) l) of (t,as,ui) \ case preprocess_part_2 as t of (t,tv) \ (t,as,tv,ui))" lemma preprocess: assumes id: "preprocess cs = (t, as, trans_v, ui)" shows "\ t" "fst ` set as \ set ui \ fst ` set cs" "distinct_indices_ns (set cs) \ distinct_indices_atoms (set as)" "I \ set ui = {} \ (I, \v\) \\<^sub>i\<^sub>a\<^sub>s set as \ \v\ \\<^sub>t t \ (I, \trans_v v\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" "i \ set ui \ \v. ({i}, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" "\ v. (I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs \ \v'. (I,v') \\<^sub>i\<^sub>a\<^sub>s set as \ v' \\<^sub>t t" proof - define ncs where "ncs = map (map_prod id normalize_ns_constraint) cs" have ncs: "fst ` set ncs = fst ` set cs" "\ Iv. Iv \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set ncs \ Iv \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" unfolding ncs_def by force auto from id obtain t1 where part1: "preprocess_part_1 ncs = (t1,as,ui)" unfolding preprocess_def by (auto simp: ncs_def split: prod.splits) from id[unfolded preprocess_def part1 split ncs_def[symmetric]] have part_2: "preprocess_part_2 as t1 = (t,trans_v)" by (auto split: prod.splits) have norm: "\ t1" using normalized_tableau_preprocess' part1 by (auto simp: preprocess_part_1_def Let_def) note part_2 = preprocess_part_2[OF part_2 norm] show "\ t" by fact have unsat: "(I,\v\) \\<^sub>i\<^sub>a\<^sub>s set as \ \v\ \\<^sub>t t1 \ I \ set ui = {} \ (I,\v\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set ncs" for v using part1[unfolded preprocess_part_1_def Let_def, simplified] i_preprocess'_sat[of I] by blast with part_2(2,3) show "I \ set ui = {} \ (I,\v\) \\<^sub>i\<^sub>a\<^sub>s set as \ \v\ \\<^sub>t t \ (I,\trans_v v\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" by (auto simp: ncs) from part1[unfolded preprocess_part_1_def Let_def] obtain var where as: "as = Atoms (preprocess' ncs var)" and ui: "ui = UnsatIndices (preprocess' ncs var)" by auto note min_defs = distinct_indices_atoms_def distinct_indices_ns_def have min1: "(distinct_indices_ns (set ncs) \ (\ k a. (k,a) \ set as \ (\ v p. a = qdelta_constraint_to_atom p v \ (k,p) \ set ncs \ (\ is_monom (poly p) \ Poly_Mapping (preprocess' ncs var) (poly p) = Some v) ))) \ fst ` set as \ set ui \ fst ` set ncs" unfolding as ui proof (induct ncs var rule: preprocess'.induct) case (2 i h t v) hence sub: "fst ` set (Atoms (preprocess' t v)) \ set (UnsatIndices (preprocess' t v)) \ fst ` set t" by auto show ?case proof (intro conjI impI allI, goal_cases) show "fst ` set (Atoms (preprocess' ((i, h) # t) v)) \ set (UnsatIndices (preprocess' ((i,h) #t) v)) \ fst ` set ((i, h) # t)" using sub by (auto simp: Let_def split: option.splits) next case (1 k a) hence min': "distinct_indices_ns (set t)" unfolding min_defs list.simps by blast note IH = 2[THEN conjunct1, rule_format, OF min'] show ?case proof (cases "(k,a) \ set (Atoms (preprocess' t v))") case True from IH[OF this] show ?thesis by (force simp: Let_def split: option.splits if_split) next case new: False with 1(2) have ki: "k = i" by (auto simp: Let_def split: if_splits option.splits) show ?thesis proof (cases "is_monom (poly h)") case True thus ?thesis using new 1(2) by (auto simp: Let_def True intro!: exI) next case no_monom: False thus ?thesis using new 1(2) by (auto simp: Let_def no_monom split: option.splits if_splits intro!: exI) qed qed qed qed (auto simp: min_defs) then show "fst ` set as \ set ui \ fst ` set cs" by (auto simp: ncs) { assume mini: "distinct_indices_ns (set cs)" have mini: "distinct_indices_ns (set ncs)" unfolding distinct_indices_ns_def proof (intro impI allI, goal_cases) case (1 n1 n2 i) from 1(1) obtain c1 where c1: "(i,c1) \ set cs" and n1: "n1 = normalize_ns_constraint c1" unfolding ncs_def by auto from 1(2) obtain c2 where c2: "(i,c2) \ set cs" and n2: "n2 = normalize_ns_constraint c2" unfolding ncs_def by auto from mini[unfolded distinct_indices_ns_def, rule_format, OF c1 c2] show ?case unfolding n1 n2 by (cases c1; cases c2; auto simp: normalize_ns_constraint.simps Let_def) qed note min = min1[THEN conjunct1, rule_format, OF this] show "distinct_indices_atoms (set as)" unfolding distinct_indices_atoms_def proof (intro allI impI) fix i a b assume a: "(i,a) \ set as" and b: "(i,b) \ set as" from min[OF a] obtain v p where aa: "a = qdelta_constraint_to_atom p v" "(i, p) \ set ncs" "\ is_monom (poly p) \ Poly_Mapping (preprocess' ncs var) (poly p) = Some v" by auto from min[OF b] obtain w q where bb: "b = qdelta_constraint_to_atom q w" "(i, q) \ set ncs" "\ is_monom (poly q) \ Poly_Mapping (preprocess' ncs var) (poly q) = Some w" by auto from mini[unfolded distinct_indices_ns_def, rule_format, OF aa(2) bb(2)] have *: "poly p = poly q" "ns_constraint_const p = ns_constraint_const q" by auto show "atom_var a = atom_var b \ atom_const a = atom_const b" proof (cases "is_monom (poly q)") case True thus ?thesis unfolding aa(1) bb(1) using * by (cases p; cases q, auto) next case False thus ?thesis unfolding aa(1) bb(1) using * aa(3) bb(3) by (cases p; cases q, auto) qed qed } show "i \ set ui \ \v. ({i}, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" using preprocess'_unsat_indices[of i ncs] part1 unfolding preprocess_part_1_def Let_def by (auto simp: ncs) assume "\ w. (I,w) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" then obtain w where "(I,w) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" by blast hence "(I,w) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set ncs" unfolding ncs . from preprocess'_unsat[OF this _ start_fresh_variable_fresh, of ncs] have "\v'. (I,v') \\<^sub>i\<^sub>a\<^sub>s set as \ v' \\<^sub>t t1" using part1 unfolding preprocess_part_1_def Let_def by auto then show "\v'. (I,v') \\<^sub>i\<^sub>a\<^sub>s set as \ v' \\<^sub>t t" using part_2(4) by auto qed interpretation PreprocessDefault: Preprocess preprocess by (unfold_locales; rule preprocess, auto) global_interpretation Solve_exec_ns'Default: Solve_exec_ns' preprocess assert_all_code defines solve_exec_ns_code = Solve_exec_ns'Default.solve_exec_ns by unfold_locales (* -------------------------------------------------------------------------- *) (* To_ns to_ns from_ns *) (* -------------------------------------------------------------------------- *) primrec constraint_to_qdelta_constraint:: "constraint \ QDelta ns_constraint list" where "constraint_to_qdelta_constraint (LT l r) = [LEQ_ns l (QDelta.QDelta r (-1))]" | "constraint_to_qdelta_constraint (GT l r) = [GEQ_ns l (QDelta.QDelta r 1)]" | "constraint_to_qdelta_constraint (LEQ l r) = [LEQ_ns l (QDelta.QDelta r 0)]" | "constraint_to_qdelta_constraint (GEQ l r) = [GEQ_ns l (QDelta.QDelta r 0)]" | "constraint_to_qdelta_constraint (EQ l r) = [LEQ_ns l (QDelta.QDelta r 0), GEQ_ns l (QDelta.QDelta r 0)]" | "constraint_to_qdelta_constraint (LTPP l1 l2) = [LEQ_ns (l1 - l2) (QDelta.QDelta 0 (-1))]" | "constraint_to_qdelta_constraint (GTPP l1 l2) = [GEQ_ns (l1 - l2) (QDelta.QDelta 0 1)]" | "constraint_to_qdelta_constraint (LEQPP l1 l2) = [LEQ_ns (l1 - l2) 0]" | "constraint_to_qdelta_constraint (GEQPP l1 l2) = [GEQ_ns (l1 - l2) 0]" | "constraint_to_qdelta_constraint (EQPP l1 l2) = [LEQ_ns (l1 - l2) 0, GEQ_ns (l1 - l2) 0]" primrec i_constraint_to_qdelta_constraint:: "'i i_constraint \ ('i,QDelta) i_ns_constraint list" where "i_constraint_to_qdelta_constraint (i,c) = map (Pair i) (constraint_to_qdelta_constraint c)" definition to_ns :: "'i i_constraint list \ ('i,QDelta) i_ns_constraint list" where "to_ns l \ concat (map i_constraint_to_qdelta_constraint l)" primrec \0_val :: "QDelta ns_constraint \ QDelta valuation \ rat" where "\0_val (LEQ_ns lll rrr) vl = \0 lll\vl\ rrr" | "\0_val (GEQ_ns lll rrr) vl = \0 rrr lll\vl\" primrec \0_val_min :: "QDelta ns_constraint list \ QDelta valuation \ rat" where "\0_val_min [] vl = 1" | "\0_val_min (h#t) vl = min (\0_val_min t vl) (\0_val h vl)" abbreviation vars_list_constraints where "vars_list_constraints cs \ remdups (concat (map Abstract_Linear_Poly.vars_list (map poly cs)))" definition from_ns ::"(var, QDelta) mapping \ QDelta ns_constraint list \ (var, rat) mapping" where "from_ns vl cs \ let \ = \0_val_min cs \vl\ in Mapping.tabulate (vars_list_constraints cs) (\ var. val (\vl\ var) \)" global_interpretation SolveExec'Default: SolveExec' to_ns from_ns solve_exec_ns_code defines solve_exec_code = SolveExec'Default.solve_exec and solve_code = SolveExec'Default.solve proof unfold_locales { fix ics :: "'i i_constraint list" and v' and I let ?to_ns = "to_ns ics" let ?flat = "set ?to_ns" assume sat: "(I,\v'\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s ?flat" define cs where "cs = map snd (filter (\ ic. fst ic \ I) ics)" define to_ns' where to_ns: "to_ns' = (\ l. concat (map constraint_to_qdelta_constraint l))" show "(I,\from_ns v' (flat_list ?to_ns)\) \\<^sub>i\<^sub>c\<^sub>s set ics" unfolding i_satisfies_cs.simps proof let ?listf = "map (\C. case C of (LEQ_ns l r) \ (l\\v'\\, r) | (GEQ_ns l r) \ (r, l\\v'\\) )" let ?to_ns = "\ ics. to_ns' (map snd (filter (\ic. fst ic \ I) ics))" let ?list = "?listf (to_ns' cs)" (* index-filtered list *) let ?f_list = "flat_list (to_ns ics)" let ?flist = "?listf ?f_list" (* full list *) obtain i_list where i_list: "?list = i_list" by force obtain f_list where f_list: "?flist = f_list" by force have if_list: "set i_list \ set f_list" unfolding i_list[symmetric] f_list[symmetric] to_ns_def to_ns set_map set_concat cs_def by (intro image_mono, force) have "\ qd1 qd2. (qd1, qd2) \ set ?list \ qd1 \ qd2" proof- fix qd1 qd2 assume "(qd1, qd2) \ set ?list" then show "qd1 \ qd2" using sat unfolding cs_def proof(induct ics) case Nil then show ?case by (simp add: to_ns) next case (Cons h t) obtain i c where h: "h = (i,c)" by force from Cons(2) consider (ic) "(qd1,qd2) \ set (?listf (?to_ns [(i,c)]))" | (t) "(qd1,qd2) \ set (?listf (?to_ns t))" unfolding to_ns h set_map set_concat by fastforce then show ?case proof cases case t from Cons(1)[OF this] Cons(3) show ?thesis unfolding to_ns_def by auto next case ic note ic = ic[unfolded to_ns, simplified] from ic have i: "(i \ I) = True" by (cases "i \ I", auto) note ic = ic[unfolded i if_True, simplified] from Cons(3)[unfolded h] i have "\v'\ \\<^sub>n\<^sub>s\<^sub>s set (to_ns' [c])" unfolding i_satisfies_ns_constraints.simps unfolding to_ns to_ns_def by force with ic show ?thesis by (induct c) (auto simp add: to_ns) qed qed qed then have l1: "\ > 0 \ \ \ (\_min ?list) \ \qd1 qd2. (qd1, qd2) \ set ?list \ val qd1 \ \ val qd2 \" for \ unfolding i_list by (simp add: delta_gt_zero delta_min[of i_list]) have "\_min ?flist \ \_min ?list" unfolding f_list i_list by (rule delta_min_mono[OF if_list]) from l1[OF delta_gt_zero this] have l1: "\qd1 qd2. (qd1, qd2) \ set ?list \ val qd1 (\_min f_list) \ val qd2 (\_min f_list)" unfolding f_list . have "\0_val_min (flat_list (to_ns ics)) \v'\ = \_min f_list" unfolding f_list[symmetric] proof(induct ics) case Nil show ?case by (simp add: to_ns_def) next case (Cons h t) then show ?case by (cases h; cases "snd h") (auto simp add: to_ns_def) qed then have l2: "from_ns v' ?f_list = Mapping.tabulate (vars_list_constraints ?f_list) (\ var. val (\v'\ var) (\_min f_list))" by (auto simp add: from_ns_def) fix c assume "c \ restrict_to I (set ics)" then obtain i where i: "i \ I" and mem: "(i,c) \ set ics" by auto from mem show "\from_ns v' ?f_list\ \\<^sub>c c" proof (induct c) case (LT lll rrr) then have "(lll\\v'\\, (QDelta.QDelta rrr (-1))) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr (-1)) (\_min f_list)" using l1 by simp moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using LT by (auto simp add: comp_def lookup_tabulate restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ (val (QDelta.QDelta rrr (-1)) (\_min f_list))" by (auto simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def) next case (GT lll rrr) then have "((QDelta.QDelta rrr 1), lll\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 1) (\_min f_list)" using l1 by simp moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using GT by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 1) (\_min f_list)" using l2 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def) next case (LEQ lll rrr) then have "(lll\\v'\\, (QDelta.QDelta rrr 0) ) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l1 by simp moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using LEQ by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l2 by (simp add: valuate_rat_valuate) then show ?case by (simp add: val_def) next case (GEQ lll rrr) then have "((QDelta.QDelta rrr 0), lll\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l1 by simp moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using GEQ by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l2 by (simp add: valuate_rat_valuate) then show ?case by (simp add: val_def) next case (EQ lll rrr) then have "((QDelta.QDelta rrr 0), lll\\v'\\) \ set ?list" and "(lll\\v'\\, (QDelta.QDelta rrr 0) ) \ set ?list" using i unfolding cs_def by (force simp add: to_ns)+ then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 0) (\_min f_list)" and "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l1 by simp_all moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using EQ by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 0) (\_min f_list)" and "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l1 by (auto simp add: valuate_rat_valuate) then show ?case by (simp add: val_def) next case (LTPP ll1 ll2) then have "((ll1-ll2)\\v'\\, (QDelta.QDelta 0 (-1)) ) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 (-1)) (\_min f_list)" using l1 by simp moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using LTPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 (-1)) (\_min f_list)" using l1 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def valuate_minus) next case (GTPP ll1 ll2) then have "((QDelta.QDelta 0 1), (ll1-ll2)\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 1) (\_min f_list)" using l1 by simp moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using GTPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 1) (\_min f_list)" using l1 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def valuate_minus) next case (LEQPP ll1 ll2) then have "((ll1-ll2)\\v'\\, (QDelta.QDelta 0 0) ) \ set ?list" using i unfolding cs_def by (force simp add: to_ns zero_QDelta_def) then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by simp moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using LEQPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def valuate_minus) next case (GEQPP ll1 ll2) then have "((QDelta.QDelta 0 0), (ll1-ll2)\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns zero_QDelta_def) then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by simp moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using GEQPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def valuate_minus) next case (EQPP ll1 ll2) then have "((ll1-ll2)\\v'\\, (QDelta.QDelta 0 0) ) \ set ?list" and "((QDelta.QDelta 0 0), (ll1-ll2)\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns zero_QDelta_def)+ then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 0) (\_min f_list)" and "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by simp_all moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using EQPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 0) (\_min f_list)" and "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by (auto simp add: valuate_rat_valuate) then show ?case by (simp add: val_def valuate_minus) qed qed } note sat = this fix cs :: "('i \ constraint) list" have set_to_ns: "set (to_ns cs) = { (i,n) | i n c. (i,c) \ set cs \ n \ set (constraint_to_qdelta_constraint c)}" unfolding to_ns_def by auto show indices: "fst ` set (to_ns cs) = fst ` set cs" proof show "fst ` set (to_ns cs) \ fst ` set cs" unfolding set_to_ns by force { fix i assume "i \ fst ` set cs" then obtain c where "(i,c) \ set cs" by force hence "i \ fst ` set (to_ns cs)" unfolding set_to_ns by (cases c; force) } thus "fst ` set cs \ fst ` set (to_ns cs)" by blast qed { assume dist: "distinct_indices cs" show "distinct_indices_ns (set (to_ns cs))" unfolding distinct_indices_ns_def proof (intro allI impI conjI notI) fix n1 n2 i assume "(i,n1) \ set (to_ns cs)" "(i,n2) \ set (to_ns cs)" then obtain c1 c2 where i: "(i,c1) \ set cs" "(i,c2) \ set cs" and n: "n1 \ set (constraint_to_qdelta_constraint c1)" "n2 \ set (constraint_to_qdelta_constraint c2)" unfolding set_to_ns by auto from dist have "distinct (map fst cs)" unfolding distinct_indices_def by auto with i have c12: "c1 = c2" by (metis eq_key_imp_eq_value) note n = n[unfolded c12] show "poly n1 = poly n2" using n by (cases c2, auto) show "ns_constraint_const n1 = ns_constraint_const n2" using n by (cases c2, auto) qed } note mini = this fix I mode assume unsat: "minimal_unsat_core_ns I (set (to_ns cs))" note unsat = unsat[unfolded minimal_unsat_core_ns_def indices] hence indices: "I \ fst ` set cs" by auto show "minimal_unsat_core I cs" unfolding minimal_unsat_core_def proof (intro conjI indices impI allI, clarify) fix v assume v: "(I,v) \\<^sub>i\<^sub>c\<^sub>s set cs" let ?v = "\var. QDelta.QDelta (v var) 0" have "(I,?v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s (set (to_ns cs))" using v proof(induct cs) case (Cons ic cs) obtain i c where ic: "ic = (i,c)" by force from Cons(2-) ic have rec: "(I,v) \\<^sub>i\<^sub>c\<^sub>s set cs" and c: "i \ I \ v \\<^sub>c c" by auto { fix jn assume i: "i \ I" and "jn \ set (to_ns [(i,c)])" then have "jn \ set (i_constraint_to_qdelta_constraint (i,c))" unfolding to_ns_def by auto then obtain n where n: "n \ set (constraint_to_qdelta_constraint c)" and jn: "jn = (i,n)" by force from c[OF i] have c: "v \\<^sub>c c" by force from c n jn have "?v \\<^sub>n\<^sub>s snd jn" by (cases c) (auto simp add: less_eq_QDelta_def to_ns_def valuate_valuate_rat valuate_minus zero_QDelta_def) } note main = this from Cons(1)[OF rec] have IH: "(I,?v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set (to_ns cs)" . show ?case unfolding i_satisfies_ns_constraints.simps proof (intro ballI) fix x assume "x \ snd ` (set (to_ns (ic # cs)) \ I \ UNIV)" then consider (1) "x \ snd ` (set (to_ns cs) \ I \ UNIV)" | (2) "x \ snd ` (set (to_ns [(i,c)]) \ I \ UNIV)" unfolding ic to_ns_def by auto then show "?v \\<^sub>n\<^sub>s x" proof cases case 1 then show ?thesis using IH by auto next case 2 then obtain jn where x: "snd jn = x" and "jn \ set (to_ns [(i,c)]) \ I \ UNIV" by auto with main[of jn] show ?thesis unfolding to_ns_def by auto qed qed qed (simp add: to_ns_def) with unsat show False unfolding minimal_unsat_core_ns_def by simp blast next fix J assume *: "distinct_indices cs" "J \ I" hence "distinct_indices_ns (set (to_ns cs))" using mini by auto with * unsat obtain v where model: "(J, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set (to_ns cs)" by blast define w where "w = Mapping.Mapping (\ x. Some (v x))" have "v = \w\" unfolding w_def map2fun_def by (intro ext, transfer, auto) with model have model: "(J, \w\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set (to_ns cs)" by auto from sat[OF this] show " \v. (J, v) \\<^sub>i\<^sub>c\<^sub>s set cs" by blast qed qed (* cleanup *) hide_const (open) le lt LE GE LB UB LI UI LBI UBI UBI_upd le_rat inv zero Var add flat flat_list restrict_to look upd (* -------------------------------------------------------------------------- *) (* Main soundness lemma and executability *) (* -------------------------------------------------------------------------- *) text \Simplex version with indexed constraints as input\ definition simplex_index :: "'i i_constraint list \ 'i list + (var, rat) mapping" where "simplex_index = solve_exec_code" lemma simplex_index: "simplex_index cs = Unsat I \ set I \ fst ` set cs \ \ (\ v. (set I, v) \\<^sub>i\<^sub>c\<^sub>s set cs) \ (distinct_indices cs \ (\ J \ set I. (\ v. (J, v) \\<^sub>i\<^sub>c\<^sub>s set cs)))" \ \minimal unsat core\ "simplex_index cs = Sat v \ \v\ \\<^sub>c\<^sub>s (snd ` set cs)" \ \satisfying assingment\ proof (unfold simplex_index_def) assume "solve_exec_code cs = Unsat I" from SolveExec'Default.simplex_unsat0[OF this] have core: "minimal_unsat_core (set I) cs" by auto then show "set I \ fst ` set cs \ \ (\ v. (set I, v) \\<^sub>i\<^sub>c\<^sub>s set cs) \ (distinct_indices cs \ (\J\set I. \v. (J, v) \\<^sub>i\<^sub>c\<^sub>s set cs))" unfolding minimal_unsat_core_def by auto next assume "solve_exec_code cs = Sat v" from SolveExec'Default.simplex_sat0[OF this] show "\v\ \\<^sub>c\<^sub>s (snd ` set cs)" . qed text \Simplex version where indices will be created\ definition simplex where "simplex cs = simplex_index (zip [0.. \ (\ v. v \\<^sub>c\<^sub>s set cs)" \ \unsat of original constraints\ "simplex cs = Unsat I \ set I \ {0.. \ (\ v. v \\<^sub>c\<^sub>s {cs ! i | i. i \ set I}) \ (\J\set I. \v. v \\<^sub>c\<^sub>s {cs ! i |i. i \ J})" \ \minimal unsat core\ "simplex cs = Sat v \ \v\ \\<^sub>c\<^sub>s set cs" \ \satisfying assignment\ proof (unfold simplex_def) let ?cs = "zip [0.. {0 ..< length cs}" and core: "\v. v \\<^sub>c\<^sub>s (snd ` (set ?cs \ set I \ UNIV))" "(distinct_indices (zip [0.. (\ J \ set I. \v. v \\<^sub>c\<^sub>s (snd ` (set ?cs \ J \ UNIV))))" by (auto simp flip: set_map) note core(2) also have "distinct_indices (zip [0.. J \ set I. \v. v \\<^sub>c\<^sub>s (snd ` (set ?cs \ J \ UNIV))) = (\ J \ set I. \v. v \\<^sub>c\<^sub>s { cs ! i | i. i \ J})" using index by (intro all_cong1 imp_cong ex_cong1 arg_cong[of _ _ "\ x. _ \\<^sub>c\<^sub>s x"] refl, force simp: set_zip) finally have core': "(\J\set I. \v. v \\<^sub>c\<^sub>s {cs ! i |i. i \ J}) " . note unsat = unsat_mono[OF core(1)] show "\ (\ v. v \\<^sub>c\<^sub>s set cs)" by (rule unsat, auto simp: set_zip) show "set I \ {0.. \ (\ v. v \\<^sub>c\<^sub>s {cs ! i | i. i \ set I}) \ (\J\set I. \v. v \\<^sub>c\<^sub>s {cs ! i |i. i \ J})" by (intro conjI index core', rule unsat, auto simp: set_zip) next assume "simplex_index (zip [0..v\ \\<^sub>c\<^sub>s set cs" by (auto simp flip: set_map) qed text \check executability\ lemma "case simplex [LTPP (lp_monom 2 1) (lp_monom 3 2 - lp_monom 3 0), GT (lp_monom 1 1) 5] of Sat _ \ True | Unsat _ \ False" by eval text \check unsat core\ lemma "case simplex_index [ (1 :: int, LT (lp_monom 1 1) 4), (2, GTPP (lp_monom 2 1) (lp_monom 1 2)), (3, EQPP (lp_monom 1 1) (lp_monom 2 2)), (4, GT (lp_monom 2 2) 5), (5, GT (lp_monom 3 0) 7)] of Sat _ \ False | Unsat I \ set I = {1,3,4}" \ \Constraints 1,3,4 are unsat core\ by eval end \ No newline at end of file diff --git a/thys/TortoiseHare/Brent.thy b/thys/TortoiseHare/Brent.thy --- a/thys/TortoiseHare/Brent.thy +++ b/thys/TortoiseHare/Brent.thy @@ -1,232 +1,237 @@ (*<*) theory Brent imports Basis begin (*>*) section\ Brent's algorithm \label{sec:brent} \ text\ @{cite "Brent:1980"} improved on the Tortoise and Hare algorithm and used it to factor large primes. In practice it makes significantly fewer calls to the function \f\ before detecting a loop. We begin by defining the base-2 logarithm. \ fun lg :: "nat \ nat" where [simp del]: "lg x = (if x \ 1 then 0 else 1 + lg (x div 2))" lemma lg_safe: "lg 0 = 0" "lg (Suc 0) = 0" "lg (Suc (Suc 0)) = 1" "0 < x \ lg (x + x) = 1 + lg x" by (simp_all add: lg.simps) lemma lg_inv: "0 < x \ lg (2 ^ x) = x" proof(induct x) case (Suc x) then show ?case by (cases x, simp_all add: lg.simps Suc_lessI not_le) qed simp lemma lg_inv2: - "2 ^ i = x \ 2 ^ lg x = x" -by (induct i arbitrary: x) (auto simp: lg.simps, arith) + \2 ^ lg x = x\ if \2 ^ i = x\ for x +proof - + have \2 ^ lg (2 ^ i) = (2::nat) ^ i\ + by (induction i) (simp_all add: lg_safe mult_2) + with that show ?thesis + by simp +qed lemmas lg_simps = lg_safe lg_inv lg_inv2 subsection\ Finding \lambda\ \ text (in properties) \ Imagine now that the Tortoise carries an unbounded number of carrots, which he passes to the Hare when they meet, and the Hare has a teleporter. The Hare eats a carrot each time she waits for the function @{term "f"} to execute, and initially has just one. If she runs out of carrots before meeting the Tortoise again, she teleports him to her position, and he gives her twice as many carrots as the last time they met (tracked by the variable \carrots\). By counting how many carrots she has eaten from when she last teleported the Tortoise (recorded in \l\) until she finally has surplus carrots when she meets him again, the Hare directly discovers @{term "lambda"}. \ record 'a state = m :: nat \ \\\\\ l :: nat \ \\\\\ carrots :: nat hare :: "'a" tortoise :: "'a" context properties begin definition (in fx0) find_lambda :: "'a state \ 'a state" where "find_lambda \ (\s. s\ carrots := 1, l := 1, tortoise := x0, hare := f x0 \) ;; while (hare \<^bold>\ tortoise) ( ( \<^bold>if carrots \<^bold>= l \<^bold>then (\s. s\ tortoise := hare s, carrots := 2 * carrots s, l := 0 \) \<^bold>else SKIP ) ;; (\s. s\ hare := f (hare s), l := l s + 1 \) )" text\ The termination argument goes intuitively as follows. The Hare eats as many carrots as it takes to teleport the Tortoise into the loop. Afterwards she continues the teleportation dance until the Tortoise has given her enough carrots to make it all the way around the loop and back to him. We can calculate the Tortoise's position as a function of \carrots\. \ definition carrots_total :: "nat \ nat" where "carrots_total c \ \i carrots_total (c + c) = c + carrots_total c" by (auto simp: carrots_total_def lg_simps) definition find_lambda_measures :: "( (nat \ nat) \ (nat \ nat) ) set" where "find_lambda_measures \ measures [\(l, c). mu - carrots_total c, \(l, c). LEAST i. lambda \ c * 2^i, \(l, c). c - l]" lemma find_lambda_measures_wellfounded: "wf find_lambda_measures" by (simp add: find_lambda_measures_def) lemma find_lambda_measures_decreases1: assumes "c = 2 ^ i" assumes "mu \ carrots_total c \ c \ lambda" assumes "seq (carrots_total c) \ seq (carrots_total c + c)" shows "( (c', 2 * c), (c, c) ) \ find_lambda_measures" proof(cases "mu \ carrots_total c") case False with assms show ?thesis by (auto simp: find_lambda_measures_def carrots_total_simps mult_2 field_simps diff_less_mono2) next case True { fix x assume x: "(0::nat) < x" have "\n. lambda \ x * 2 ^ n" proof(induct lambda) case (Suc i) then obtain n where "i \ x * 2 ^ n" by blast with x show ?case by (clarsimp intro!: exI[where x="Suc n"] simp: field_simps mult_2) (metis Nat.add_0_right Suc_leI linorder_neqE_nat mult_eq_0_iff add_left_cancel not_le numeral_2_eq_2 old.nat.distinct(2) power_not_zero trans_le_add2) qed simp } note ex = this have "(LEAST j. lambda \ 2 ^ (i + 1) * 2 ^ j) < (LEAST j. lambda \ 2 ^ i * 2 ^ j)" proof(rule LeastI2_wellorder_ex[OF ex, rotated], rule LeastI2_wellorder_ex[OF ex, rotated]) fix x y assume "lambda \ 2 ^ i * 2 ^ y" "lambda \ 2 ^ (i + 1) * 2 ^ x" "\z. lambda \ 2 ^ (i + 1) * 2 ^ z \ x \ z" with True assms properties_loop[where i="carrots_total c" and j=1] show "x < y" by (cases y, auto simp: less_Suc_eq_le) qed simp_all with True \c = 2 ^ i\ show ?thesis by (clarsimp simp: find_lambda_measures_def mult_2 carrots_total_simps field_simps power_add) qed lemma find_lambda_measures_decreases2: assumes "ls < c" shows "( (Suc ls, c), (ls, c) ) \ find_lambda_measures" using assms by (simp add: find_lambda_measures_def) lemma find_lambda: "\\True\\ find_lambda \l \<^bold>= \lambda\\" apply (simp add: find_lambda_def) apply (rule hoare_pre) apply (rule whileI[where I="\0\ \<^bold>< l \<^bold>\ l \<^bold>\ carrots \<^bold>\ (\mu\ \<^bold>\ carrots_total \ carrots \<^bold>\ l \<^bold>\ \lambda\) \<^bold>\ (\<^bold>\i. carrots \<^bold>= \2^i\) \<^bold>\ tortoise \<^bold>= seq \ carrots_total \ carrots \<^bold>\ hare \<^bold>= seq \ (l \<^bold>+ (carrots_total \ carrots))" and r="inv_image find_lambda_measures (l \<^bold>\ carrots)"] wp_intro)+ using properties_lambda_gt_0 apply (clarsimp simp: field_simps mult_2_right carrots_total_simps) apply (intro conjI impI) apply (metis mult_2 power_Suc) apply (case_tac "mu \ carrots_total (l s)") apply (cut_tac i="carrots_total (l s)" and j="l s" in properties_distinct_contrapos, simp_all add: field_simps)[1] apply (cut_tac i="carrots_total (l s)" and j="l s" in properties_loops_ge_mu, simp_all add: field_simps)[1] apply (cut_tac i="carrots_total (2 ^ x)" and j=1 in properties_loop, simp) apply (fastforce simp: le_eq_less_or_eq field_simps) apply (cut_tac i="carrots_total (2 ^ x)" and j="l s" in properties_loops_ge_mu, simp_all add: field_simps)[1] apply (cut_tac i="carrots_total (2 ^ x)" and j="l s" in properties_distinct_contrapos, simp_all add: field_simps)[1] apply (simp add: find_lambda_measures_wellfounded) apply (clarsimp simp: add.commute find_lambda_measures_decreases1 find_lambda_measures_decreases2) apply (rule wp_intro) using properties_lambda_gt_0 apply (simp add: carrots_total_simps exI[where x=0]) done subsection\ Finding \mu\ \ text\ With @{term "lambda"} in hand, we can find \mu\ using the same approach as for the Tortoise and Hare (\S\ref{sec:th-finding-mu}), after we first move the Hare to @{term "lambda"}. \ definition (in fx0) find_mu :: "'a state \ 'a state" where "find_mu \ (\s. s\ m := 0, tortoise := x0, hare := seq (l s) \) ;; while (hare \<^bold>\ tortoise) (\s. s\ tortoise := f (tortoise s), hare := f (hare s), m := m s + 1 \)" lemma find_mu: "\l \<^bold>= \lambda\\ find_mu \l \<^bold>= \lambda\ \<^bold>\ m \<^bold>= \mu\\" apply (simp add: find_mu_def) apply (rule hoare_pre) apply (rule whileI[where I="l \<^bold>= \lambda\ \<^bold>\ m \<^bold>\ \mu\ \<^bold>\ tortoise \<^bold>= seq \ m \<^bold>\ hare \<^bold>= seq \ (m \<^bold>+ l)" and r="measure (\mu\ \<^bold>- m)"] wp_intro)+ using properties_lambda_gt_0 properties_loop[where i=mu and j=1] apply (fastforce simp: le_less dest: properties_loops_ge_mu) apply simp using properties_loop[where i=mu and j=1, simplified] apply (fastforce simp: le_eq_less_or_eq) apply (rule wp_intro) apply simp done subsection\ Top level \ definition (in fx0) brent :: "'a state \ 'a state" where "brent \ find_lambda ;; find_mu" theorem brent: "\\True\\ brent \l \<^bold>= \lambda\ \<^bold>\ m \<^bold>= \mu\\" unfolding brent_def by (rule find_lambda find_mu wp_intro)+ end corollary brent_correct: assumes s': "s' = fx0.brent f x arbitrary" shows "fx0.properties f x (l s') (m s')" using assms properties.brent[where f=f and ?x0.0=x] by (fastforce intro: fx0.properties_existence[where f=f and ?x0.0=x] simp: Basis.properties_def valid_def) schematic_goal brent_code[code]: "fx0.brent f x = ?code" unfolding fx0.brent_def fx0.find_lambda_def fx0.find_mu_def fcomp_assoc[symmetric] fcomp_comp by (rule refl) export_code fx0.brent in SML (*<*) end (*>*) diff --git a/thys/Universal_Turing_Machine/UTM.thy b/thys/Universal_Turing_Machine/UTM.thy --- a/thys/Universal_Turing_Machine/UTM.thy +++ b/thys/Universal_Turing_Machine/UTM.thy @@ -1,4708 +1,4708 @@ (* Title: thys/UTM.thy Author: Jian Xu, Xingyuan Zhang, and Christian Urban Modifications: Sebastiaan Joosten *) chapter \Construction of a Universal Turing Machine\ theory UTM imports Recursive Abacus UF HOL.GCD Turing_Hoare begin section \Wang coding of input arguments\ text \ The direct compilation of the universal function \rec_F\ can not give us UTM, because \rec_F\ is of arity 2, where the first argument represents the Godel coding of the TM being simulated and the second argument represents the right number (in Wang's coding) of the TM tape. (Notice, left number is always \0\ at the very beginning). However, UTM needs to simulate the execution of any TM which may very well take many input arguments. Therefore, a initialization TM needs to run before the TM compiled from \rec_F\, and the sequential composition of these two TMs will give rise to the UTM we are seeking. The purpose of this initialization TM is to transform the multiple input arguments of the TM being simulated into Wang's coding, so that it can be consumed by the TM compiled from \rec_F\ as the second argument. However, this initialization TM (named \t_wcode\) can not be constructed by compiling from any recursive function, because every recursive function takes a fixed number of input arguments, while \t_wcode\ needs to take varying number of arguments and tranform them into Wang's coding. Therefore, this section give a direct construction of \t_wcode\ with just some parts being obtained from recursive functions. \newlength{\basewidth} \settowidth{\basewidth}{xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx} \newlength{\baseheight} \settoheight{\baseheight}{$B:R$} \newcommand{\vsep}{5\baseheight} The TM used to generate the Wang's code of input arguments is divided into three TMs executed sequentially, namely $prepare$, $mainwork$ and $adjust$. According to the convention, the start state of ever TM is fixed to state $1$ while the final state is fixed to $0$. The input and output of $prepare$ are illustrated respectively by Figure \ref{prepare_input} and \ref{prepare_output}. \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} [tbox/.style = {draw, thick, inner sep = 5pt}] \node (0) {}; \node (1) [tbox, text height = 3.5pt, right = -0.9pt of 0] {$m$}; \node (2) [tbox, right = -0.9pt of 1] {$0$}; \node (3) [tbox, right = -0.9pt of 2] {$a_1$}; \node (4) [tbox, right = -0.9pt of 3] {$0$}; \node (5) [tbox, right = -0.9pt of 4] {$a_2$}; \node (6) [right = -0.9pt of 5] {\ldots \ldots}; \node (7) [tbox, right = -0.9pt of 6] {$a_n$}; \draw [->, >=latex, thick] (1)+(0, -4\baseheight) -- (1); \end{tikzpicture}} \caption{The input of TM $prepare$} \label{prepare_input} \end{figure} \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} \node (0) {}; \node (1) [draw, text height = 3.5pt, right = -0.9pt of 0, thick, inner sep = 5pt] {$m$}; \node (2) [draw, right = -0.9pt of 1, thick, inner sep = 5pt] {$0$}; \node (3) [draw, right = -0.9pt of 2, thick, inner sep = 5pt] {$0$}; \node (4) [draw, right = -0.9pt of 3, thick, inner sep = 5pt] {$a_1$}; \node (5) [draw, right = -0.9pt of 4, thick, inner sep = 5pt] {$0$}; \node (6) [draw, right = -0.9pt of 5, thick, inner sep = 5pt] {$a_2$}; \node (7) [right = -0.9pt of 6] {\ldots \ldots}; \node (8) [draw, right = -0.9pt of 7, thick, inner sep = 5pt] {$a_n$}; \node (9) [draw, right = -0.9pt of 8, thick, inner sep = 5pt] {$0$}; \node (10) [draw, right = -0.9pt of 9, thick, inner sep = 5pt] {$0$}; \node (11) [draw, right = -0.9pt of 10, thick, inner sep = 5pt] {$1$}; \draw [->, >=latex, thick] (10)+(0, -4\baseheight) -- (10); \end{tikzpicture}} \caption{The output of TM $prepare$} \label{prepare_output} \end{figure} As shown in Figure \ref{prepare_input}, the input of $prepare$ is the same as the the input of UTM, where $m$ is the Godel coding of the TM being interpreted and $a_1$ through $a_n$ are the $n$ input arguments of the TM under interpretation. The purpose of $purpose$ is to transform this initial tape layout to the one shown in Figure \ref{prepare_output}, which is convenient for the generation of Wang's codding of $a_1, \ldots, a_n$. The coding procedure starts from $a_n$ and ends after $a_1$ is encoded. The coding result is stored in an accumulator at the end of the tape (initially represented by the $1$ two blanks right to $a_n$ in Figure \ref{prepare_output}). In Figure \ref{prepare_output}, arguments $a_1, \ldots, a_n$ are separated by two blanks on both ends with the rest so that movement conditions can be implemented conveniently in subsequent TMs, because, by convention, two consecutive blanks are usually used to signal the end or start of a large chunk of data. The diagram of $prepare$ is given in Figure \ref{prepare_diag}. \begin{figure}[h!] \centering \scalebox{0.9}{ \begin{tikzpicture} \node[circle,draw] (1) {$1$}; \node[circle,draw] (2) at ($(1)+(0.3\basewidth, 0)$) {$2$}; \node[circle,draw] (3) at ($(2)+(0.3\basewidth, 0)$) {$3$}; \node[circle,draw] (4) at ($(3)+(0.3\basewidth, 0)$) {$4$}; \node[circle,draw] (5) at ($(4)+(0.3\basewidth, 0)$) {$5$}; \node[circle,draw] (6) at ($(5)+(0.3\basewidth, 0)$) {$6$}; \node[circle,draw] (7) at ($(6)+(0.3\basewidth, 0)$) {$7$}; \node[circle,draw] (8) at ($(7)+(0.3\basewidth, 0)$) {$0$}; \draw [->, >=latex] (1) edge [loop above] node[above] {$S_1:L$} (1) ; \draw [->, >=latex] (1) -- node[above] {$S_0:S_1$} (2) ; \draw [->, >=latex] (2) edge [loop above] node[above] {$S_1:R$} (2) ; \draw [->, >=latex] (2) -- node[above] {$S_0:L$} (3) ; \draw [->, >=latex] (3) edge[loop above] node[above] {$S_1:S_0$} (3) ; \draw [->, >=latex] (3) -- node[above] {$S_0:R$} (4) ; \draw [->, >=latex] (4) edge[loop above] node[above] {$S_0:R$} (4) ; \draw [->, >=latex] (4) -- node[above] {$S_0:R$} (5) ; \draw [->, >=latex] (5) edge[loop above] node[above] {$S_1:R$} (5) ; \draw [->, >=latex] (5) -- node[above] {$S_0:R$} (6) ; \draw [->, >=latex] (6) edge[bend left = 50] node[below] {$S_1:R$} (5) ; \draw [->, >=latex] (6) -- node[above] {$S_0:R$} (7) ; \draw [->, >=latex] (7) edge[loop above] node[above] {$S_0:S_1$} (7) ; \draw [->, >=latex] (7) -- node[above] {$S_1:L$} (8) ; \end{tikzpicture}} \caption{The diagram of TM $prepare$} \label{prepare_diag} \end{figure} The purpose of TM $mainwork$ is to compute the Wang's encoding of $a_1, \ldots, a_n$. Every bit of $a_1, \ldots, a_n$, including the separating bits, is processed from left to right. In order to detect the termination condition when the left most bit of $a_1$ is reached, TM $mainwork$ needs to look ahead and consider three different situations at the start of every iteration: \begin{enumerate} \item The TM configuration for the first situation is shown in Figure \ref{mainwork_case_one_input}, where the accumulator is stored in $r$, both of the next two bits to be encoded are $1$. The configuration at the end of the iteration is shown in Figure \ref{mainwork_case_one_output}, where the first 1-bit has been encoded and cleared. Notice that the accumulator has been changed to $(r+1) \times 2$ to reflect the encoded bit. \item The TM configuration for the second situation is shown in Figure \ref{mainwork_case_two_input}, where the accumulator is stored in $r$, the next two bits to be encoded are $1$ and $0$. After the first $1$-bit was encoded and cleared, the second $0$-bit is difficult to detect and process. To solve this problem, these two consecutive bits are encoded in one iteration. In this situation, only the first $1$-bit needs to be cleared since the second one is cleared by definition. The configuration at the end of the iteration is shown in Figure \ref{mainwork_case_two_output}. Notice that the accumulator has been changed to $(r+1) \times 4$ to reflect the two encoded bits. \item The third situation corresponds to the case when the last bit of $a_1$ is reached. The TM configurations at the start and end of the iteration are shown in Figure \ref{mainwork_case_three_input} and \ref{mainwork_case_three_output} respectively. For this situation, only the read write head needs to be moved to the left to prepare a initial configuration for TM $adjust$ to start with. \end{enumerate} The diagram of $mainwork$ is given in Figure \ref{mainwork_diag}. The two rectangular nodes labeled with $2 \times x$ and $4 \times x$ are two TMs compiling from recursive functions so that we do not have to design and verify two quite complicated TMs. \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} \node (0) {}; \node (1) [draw, text height = 3.9pt, right = -0.9pt of 0, thick, inner sep = 5pt] {$m$}; \node (2) [draw, right = -0.9pt of 1, thick, inner sep = 5pt] {$0$}; \node (3) [draw, right = -0.9pt of 2, thick, inner sep = 5pt] {$0$}; \node (4) [draw, right = -0.9pt of 3, thick, inner sep = 5pt] {$a_1$}; \node (5) [draw, right = -0.9pt of 4, thick, inner sep = 5pt] {$0$}; \node (6) [draw, right = -0.9pt of 5, thick, inner sep = 5pt] {$a_2$}; \node (7) [right = -0.9pt of 6] {\ldots \ldots}; \node (8) [draw, right = -0.9pt of 7, thick, inner sep = 5pt] {$a_i$}; \node (9) [draw, right = -0.9pt of 8, thick, inner sep = 5pt] {$1$}; \node (10) [draw, right = -0.9pt of 9, thick, inner sep = 5pt] {$1$}; \node (11) [draw, right = -0.9pt of 10, thick, inner sep = 5pt] {$0$}; \node (12) [right = -0.9pt of 11] {\ldots \ldots}; \node (13) [draw, right = -0.9pt of 12, thick, inner sep = 5pt] {$0$}; \node (14) [draw, text height = 3.9pt, right = -0.9pt of 13, thick, inner sep = 5pt] {$r$}; \draw [->, >=latex, thick] (13)+(0, -4\baseheight) -- (13); \end{tikzpicture}} \caption{The first situation for TM $mainwork$ to consider} \label{mainwork_case_one_input} \end{figure} \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} \node (0) {}; \node (1) [draw, text height = 3.9pt, right = -0.9pt of 0, thick, inner sep = 5pt] {$m$}; \node (2) [draw, right = -0.9pt of 1, thick, inner sep = 5pt] {$0$}; \node (3) [draw, right = -0.9pt of 2, thick, inner sep = 5pt] {$0$}; \node (4) [draw, right = -0.9pt of 3, thick, inner sep = 5pt] {$a_1$}; \node (5) [draw, right = -0.9pt of 4, thick, inner sep = 5pt] {$0$}; \node (6) [draw, right = -0.9pt of 5, thick, inner sep = 5pt] {$a_2$}; \node (7) [right = -0.9pt of 6] {\ldots \ldots}; \node (8) [draw, right = -0.9pt of 7, thick, inner sep = 5pt] {$a_i$}; \node (9) [draw, right = -0.9pt of 8, thick, inner sep = 5pt] {$1$}; \node (10) [draw, right = -0.9pt of 9, thick, inner sep = 5pt] {$0$}; \node (11) [draw, right = -0.9pt of 10, thick, inner sep = 5pt] {$0$}; \node (12) [right = -0.9pt of 11] {\ldots \ldots}; \node (13) [draw, right = -0.9pt of 12, thick, inner sep = 5pt] {$0$}; \node (14) [draw, text height = 2.7pt, right = -0.9pt of 13, thick, inner sep = 5pt] {$(r+1) \times 2$}; \draw [->, >=latex, thick] (13)+(0, -4\baseheight) -- (13); \end{tikzpicture}} \caption{The output for the first case of TM $mainwork$'s processing} \label{mainwork_case_one_output} \end{figure} \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} \node (0) {}; \node (1) [draw, text height = 3.9pt, right = -0.9pt of 0, thick, inner sep = 5pt] {$m$}; \node (2) [draw, right = -0.9pt of 1, thick, inner sep = 5pt] {$0$}; \node (3) [draw, right = -0.9pt of 2, thick, inner sep = 5pt] {$0$}; \node (4) [draw, right = -0.9pt of 3, thick, inner sep = 5pt] {$a_1$}; \node (5) [draw, right = -0.9pt of 4, thick, inner sep = 5pt] {$0$}; \node (6) [draw, right = -0.9pt of 5, thick, inner sep = 5pt] {$a_2$}; \node (7) [right = -0.9pt of 6] {\ldots \ldots}; \node (8) [draw, right = -0.9pt of 7, thick, inner sep = 5pt] {$a_i$}; \node (9) [draw, right = -0.9pt of 8, thick, inner sep = 5pt] {$1$}; \node (10) [draw, right = -0.9pt of 9, thick, inner sep = 5pt] {$0$}; \node (11) [draw, right = -0.9pt of 10, thick, inner sep = 5pt] {$1$}; \node (12) [draw, right = -0.9pt of 11, thick, inner sep = 5pt] {$0$}; \node (13) [right = -0.9pt of 12] {\ldots \ldots}; \node (14) [draw, right = -0.9pt of 13, thick, inner sep = 5pt] {$0$}; \node (15) [draw, text height = 3.9pt, right = -0.9pt of 14, thick, inner sep = 5pt] {$r$}; \draw [->, >=latex, thick] (14)+(0, -4\baseheight) -- (14); \end{tikzpicture}} \caption{The second situation for TM $mainwork$ to consider} \label{mainwork_case_two_input} \end{figure} \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} \node (0) {}; \node (1) [draw, text height = 3.9pt, right = -0.9pt of 0, thick, inner sep = 5pt] {$m$}; \node (2) [draw, right = -0.9pt of 1, thick, inner sep = 5pt] {$0$}; \node (3) [draw, right = -0.9pt of 2, thick, inner sep = 5pt] {$0$}; \node (4) [draw, right = -0.9pt of 3, thick, inner sep = 5pt] {$a_1$}; \node (5) [draw, right = -0.9pt of 4, thick, inner sep = 5pt] {$0$}; \node (6) [draw, right = -0.9pt of 5, thick, inner sep = 5pt] {$a_2$}; \node (7) [right = -0.9pt of 6] {\ldots \ldots}; \node (8) [draw, right = -0.9pt of 7, thick, inner sep = 5pt] {$a_i$}; \node (9) [draw, right = -0.9pt of 8, thick, inner sep = 5pt] {$1$}; \node (10) [draw, right = -0.9pt of 9, thick, inner sep = 5pt] {$0$}; \node (11) [draw, right = -0.9pt of 10, thick, inner sep = 5pt] {$0$}; \node (12) [draw, right = -0.9pt of 11, thick, inner sep = 5pt] {$0$}; \node (13) [right = -0.9pt of 12] {\ldots \ldots}; \node (14) [draw, right = -0.9pt of 13, thick, inner sep = 5pt] {$0$}; \node (15) [draw, text height = 2.7pt, right = -0.9pt of 14, thick, inner sep = 5pt] {$(r+1) \times 4$}; \draw [->, >=latex, thick] (14)+(0, -4\baseheight) -- (14); \end{tikzpicture}} \caption{The output for the second case of TM $mainwork$'s processing} \label{mainwork_case_two_output} \end{figure} \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} \node (0) {}; \node (1) [draw, text height = 3.9pt, right = -0.9pt of 0, thick, inner sep = 5pt] {$m$}; \node (2) [draw, right = -0.9pt of 1, thick, inner sep = 5pt] {$0$}; \node (3) [draw, right = -0.9pt of 2, thick, inner sep = 5pt] {$0$}; \node (4) [draw, right = -0.9pt of 3, thick, inner sep = 5pt] {$1$}; \node (5) [draw, right = -0.9pt of 4, thick, inner sep = 5pt] {$0$}; \node (6) [right = -0.9pt of 5] {\ldots \ldots}; \node (7) [draw, right = -0.9pt of 6, thick, inner sep = 5pt] {$0$}; \node (8) [draw, text height = 3.9pt, right = -0.9pt of 7, thick, inner sep = 5pt] {$r$}; \draw [->, >=latex, thick] (7)+(0, -4\baseheight) -- (7); \end{tikzpicture}} \caption{The third situation for TM $mainwork$ to consider} \label{mainwork_case_three_input} \end{figure} \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} \node (0) {}; \node (1) [draw, text height = 3.9pt, right = -0.9pt of 0, thick, inner sep = 5pt] {$m$}; \node (2) [draw, right = -0.9pt of 1, thick, inner sep = 5pt] {$0$}; \node (3) [draw, right = -0.9pt of 2, thick, inner sep = 5pt] {$0$}; \node (4) [draw, right = -0.9pt of 3, thick, inner sep = 5pt] {$1$}; \node (5) [draw, right = -0.9pt of 4, thick, inner sep = 5pt] {$0$}; \node (6) [right = -0.9pt of 5] {\ldots \ldots}; \node (7) [draw, right = -0.9pt of 6, thick, inner sep = 5pt] {$0$}; \node (8) [draw, text height = 3.9pt, right = -0.9pt of 7, thick, inner sep = 5pt] {$r$}; \draw [->, >=latex, thick] (3)+(0, -4\baseheight) -- (3); \end{tikzpicture}} \caption{The output for the third case of TM $mainwork$'s processing} \label{mainwork_case_three_output} \end{figure} \begin{figure}[h!] \centering \scalebox{0.9}{ \begin{tikzpicture} \node[circle,draw] (1) {$1$}; \node[circle,draw] (2) at ($(1)+(0.3\basewidth, 0)$) {$2$}; \node[circle,draw] (3) at ($(2)+(0.3\basewidth, 0)$) {$3$}; \node[circle,draw] (4) at ($(3)+(0.3\basewidth, 0)$) {$4$}; \node[circle,draw] (5) at ($(4)+(0.3\basewidth, 0)$) {$5$}; \node[circle,draw] (6) at ($(5)+(0.3\basewidth, 0)$) {$6$}; \node[circle,draw] (7) at ($(2)+(0, -7\baseheight)$) {$7$}; \node[circle,draw] (8) at ($(7)+(0, -7\baseheight)$) {$8$}; \node[circle,draw] (9) at ($(8)+(0.3\basewidth, 0)$) {$9$}; \node[circle,draw] (10) at ($(9)+(0.3\basewidth, 0)$) {$10$}; \node[circle,draw] (11) at ($(10)+(0.3\basewidth, 0)$) {$11$}; \node[circle,draw] (12) at ($(11)+(0.3\basewidth, 0)$) {$12$}; \node[draw] (13) at ($(6)+(0.3\basewidth, 0)$) {$2 \times x$}; \node[circle,draw] (14) at ($(13)+(0.3\basewidth, 0)$) {$j_1$}; \node[draw] (15) at ($(12)+(0.3\basewidth, 0)$) {$4 \times x$}; \node[draw] (16) at ($(15)+(0.3\basewidth, 0)$) {$j_2$}; \node[draw] (17) at ($(7)+(0.3\basewidth, 0)$) {$0$}; \draw [->, >=latex] (1) edge[loop left] node[above] {$S_0:L$} (1) ; \draw [->, >=latex] (1) -- node[above] {$S_1:L$} (2) ; \draw [->, >=latex] (2) -- node[above] {$S_1:R$} (3) ; \draw [->, >=latex] (2) -- node[left] {$S_1:L$} (7) ; \draw [->, >=latex] (3) edge[loop above] node[above] {$S_1:S_0$} (3) ; \draw [->, >=latex] (3) -- node[above] {$S_0:R$} (4) ; \draw [->, >=latex] (4) edge[loop above] node[above] {$S_0:R$} (4) ; \draw [->, >=latex] (4) -- node[above] {$S_1:R$} (5) ; \draw [->, >=latex] (5) edge[loop above] node[above] {$S_1:R$} (5) ; \draw [->, >=latex] (5) -- node[above] {$S_0:S_1$} (6) ; \draw [->, >=latex] (6) edge[loop above] node[above] {$S_1:L$} (6) ; \draw [->, >=latex] (6) -- node[above] {$S_0:R$} (13) ; \draw [->, >=latex] (13) -- (14) ; \draw (14) -- ($(14)+(0, 6\baseheight)$) -- ($(1) + (0, 6\baseheight)$) node [above,midway] {$S_1:L$} ; \draw [->, >=latex] ($(1) + (0, 6\baseheight)$) -- (1) ; \draw [->, >=latex] (7) -- node[above] {$S_0:R$} (17) ; \draw [->, >=latex] (7) -- node[left] {$S_1:R$} (8) ; \draw [->, >=latex] (8) -- node[above] {$S_0:R$} (9) ; \draw [->, >=latex] (9) -- node[above] {$S_0:R$} (10) ; \draw [->, >=latex] (10) -- node[above] {$S_1:R$} (11) ; \draw [->, >=latex] (10) edge[loop above] node[above] {$S_0:R$} (10) ; \draw [->, >=latex] (11) edge[loop above] node[above] {$S_1:R$} (11) ; \draw [->, >=latex] (11) -- node[above] {$S_0:S_1$} (12) ; \draw [->, >=latex] (12) -- node[above] {$S_0:R$} (15) ; \draw [->, >=latex] (12) edge[loop above] node[above] {$S_1:L$} (12) ; \draw [->, >=latex] (15) -- (16) ; \draw (16) -- ($(16)+(0, -4\baseheight)$) -- ($(1) + (0, -18\baseheight)$) node [below,midway] {$S_1:L$} ; \draw [->, >=latex] ($(1) + (0, -18\baseheight)$) -- (1) ; \end{tikzpicture}} \caption{The diagram of TM $mainwork$} \label{mainwork_diag} \end{figure} The purpose of TM $adjust$ is to encode the last bit of $a_1$. The initial and final configuration of this TM are shown in Figure \ref{adjust_initial} and \ref{adjust_final} respectively. The diagram of TM $adjust$ is shown in Figure \ref{adjust_diag}. \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} \node (0) {}; \node (1) [draw, text height = 3.9pt, right = -0.9pt of 0, thick, inner sep = 5pt] {$m$}; \node (2) [draw, right = -0.9pt of 1, thick, inner sep = 5pt] {$0$}; \node (3) [draw, right = -0.9pt of 2, thick, inner sep = 5pt] {$0$}; \node (4) [draw, right = -0.9pt of 3, thick, inner sep = 5pt] {$1$}; \node (5) [draw, right = -0.9pt of 4, thick, inner sep = 5pt] {$0$}; \node (6) [right = -0.9pt of 5] {\ldots \ldots}; \node (7) [draw, right = -0.9pt of 6, thick, inner sep = 5pt] {$0$}; \node (8) [draw, text height = 3.9pt, right = -0.9pt of 7, thick, inner sep = 5pt] {$r$}; \draw [->, >=latex, thick] (3)+(0, -4\baseheight) -- (3); \end{tikzpicture}} \caption{Initial configuration of TM $adjust$} \label{adjust_initial} \end{figure} \begin{figure}[h!] \centering \scalebox{1.2}{ \begin{tikzpicture} \node (0) {}; \node (1) [draw, text height = 3.9pt, right = -0.9pt of 0, thick, inner sep = 5pt] {$m$}; \node (2) [draw, right = -0.9pt of 1, thick, inner sep = 5pt] {$0$}; \node (3) [draw, text height = 2.9pt, right = -0.9pt of 2, thick, inner sep = 5pt] {$r+1$}; \node (4) [draw, right = -0.9pt of 3, thick, inner sep = 5pt] {$0$}; \node (5) [draw, right = -0.9pt of 4, thick, inner sep = 5pt] {$0$}; \node (6) [right = -0.9pt of 5] {\ldots \ldots}; \draw [->, >=latex, thick] (1)+(0, -4\baseheight) -- (1); \end{tikzpicture}} \caption{Final configuration of TM $adjust$} \label{adjust_final} \end{figure} \begin{figure}[h!] \centering \scalebox{0.9}{ \begin{tikzpicture} \node[circle,draw] (1) {$1$}; \node[circle,draw] (2) at ($(1)+(0.3\basewidth, 0)$) {$2$}; \node[circle,draw] (3) at ($(2)+(0.3\basewidth, 0)$) {$3$}; \node[circle,draw] (4) at ($(3)+(0.3\basewidth, 0)$) {$4$}; \node[circle,draw] (5) at ($(4)+(0.3\basewidth, 0)$) {$5$}; \node[circle,draw] (6) at ($(5)+(0.3\basewidth, 0)$) {$6$}; \node[circle,draw] (7) at ($(6)+(0.3\basewidth, 0)$) {$7$}; \node[circle,draw] (8) at ($(4)+(0, -7\baseheight)$) {$8$}; \node[circle,draw] (9) at ($(8)+(0.3\basewidth, 0)$) {$9$}; \node[circle,draw] (10) at ($(9)+(0.3\basewidth, 0)$) {$10$}; \node[circle,draw] (11) at ($(10)+(0.3\basewidth, 0)$) {$11$}; \node[circle,draw] (12) at ($(11)+(0.3\basewidth, 0)$) {$0$}; \draw [->, >=latex] (1) -- node[above] {$S_1:R$} (2) ; \draw [->, >=latex] (1) edge[loop above] node[above] {$S_0:S_1$} (1) ; \draw [->, >=latex] (2) -- node[above] {$S_1:R$} (3) ; \draw [->, >=latex] (3) edge[loop above] node[above] {$S_0:R$} (3) ; \draw [->, >=latex] (3) -- node[above] {$S_1:R$} (4) ; \draw [->, >=latex] (4) -- node[above] {$S_1:L$} (5) ; \draw [->, >=latex] (4) -- node[right] {$S_0:L$} (8) ; \draw [->, >=latex] (5) -- node[above] {$S_0:L$} (6) ; \draw [->, >=latex] (5) edge[loop above] node[above] {$S_1:S_0$} (5) ; \draw [->, >=latex] (6) -- node[above] {$S_1:R$} (7) ; \draw [->, >=latex] (6) edge[loop above] node[above] {$S_0:L$} (6) ; \draw (7) -- ($(7)+(0, 6\baseheight)$) -- ($(2) + (0, 6\baseheight)$) node [above,midway] {$S_0:S_1$} ; \draw [->, >=latex] ($(2) + (0, 6\baseheight)$) -- (2) ; \draw [->, >=latex] (8) edge[loop left] node[left] {$S_1:S_0$} (8) ; \draw [->, >=latex] (8) -- node[above] {$S_0:L$} (9) ; \draw [->, >=latex] (9) edge[loop above] node[above] {$S_0:L$} (9) ; \draw [->, >=latex] (9) -- node[above] {$S_1:L$} (10) ; \draw [->, >=latex] (10) edge[loop above] node[above] {$S_0:L$} (10) ; \draw [->, >=latex] (10) -- node[above] {$S_0:L$} (11) ; \draw [->, >=latex] (11) edge[loop above] node[above] {$S_1:L$} (11) ; \draw [->, >=latex] (11) -- node[above] {$S_0:R$} (12) ; \end{tikzpicture}} \caption{Diagram of TM $adjust$} \label{adjust_diag} \end{figure} \ definition rec_twice :: "recf" where "rec_twice = Cn 1 rec_mult [id 1 0, constn 2]" definition rec_fourtimes :: "recf" where "rec_fourtimes = Cn 1 rec_mult [id 1 0, constn 4]" definition abc_twice :: "abc_prog" where "abc_twice = (let (aprog, ary, fp) = rec_ci rec_twice in aprog [+] dummy_abc ((Suc 0)))" definition abc_fourtimes :: "abc_prog" where "abc_fourtimes = (let (aprog, ary, fp) = rec_ci rec_fourtimes in aprog [+] dummy_abc ((Suc 0)))" definition twice_ly :: "nat list" where "twice_ly = layout_of abc_twice" definition fourtimes_ly :: "nat list" where "fourtimes_ly = layout_of abc_fourtimes" definition t_twice_compile :: "instr list" where "t_twice_compile= (tm_of abc_twice @ (shift (mopup 1) (length (tm_of abc_twice) div 2)))" definition t_twice :: "instr list" where "t_twice = adjust0 t_twice_compile" definition t_fourtimes_compile :: "instr list" where "t_fourtimes_compile= (tm_of abc_fourtimes @ (shift (mopup 1) (length (tm_of abc_fourtimes) div 2)))" definition t_fourtimes :: "instr list" where "t_fourtimes = adjust0 t_fourtimes_compile" definition t_twice_len :: "nat" where "t_twice_len = length t_twice div 2" definition t_wcode_main_first_part:: "instr list" where "t_wcode_main_first_part \ [(L, 1), (L, 2), (L, 7), (R, 3), (R, 4), (W0, 3), (R, 4), (R, 5), (W1, 6), (R, 5), (R, 13), (L, 6), (R, 0), (R, 8), (R, 9), (Nop, 8), (R, 10), (W0, 9), (R, 10), (R, 11), (W1, 12), (R, 11), (R, t_twice_len + 14), (L, 12)]" definition t_wcode_main :: "instr list" where "t_wcode_main = (t_wcode_main_first_part @ shift t_twice 12 @ [(L, 1), (L, 1)] @ shift t_fourtimes (t_twice_len + 13) @ [(L, 1), (L, 1)])" fun bl_bin :: "cell list \ nat" where "bl_bin [] = 0" | "bl_bin (Bk # xs) = 2 * bl_bin xs" | "bl_bin (Oc # xs) = Suc (2 * bl_bin xs)" declare bl_bin.simps[simp del] type_synonym bin_inv_t = "cell list \ nat \ tape \ bool" fun wcode_before_double :: "bin_inv_t" where "wcode_before_double ires rs (l, r) = (\ ln rn. l = Bk # Bk # Bk\(ln) @ Oc # ires \ r = Oc\((Suc (Suc rs))) @ Bk\(rn ))" declare wcode_before_double.simps[simp del] fun wcode_after_double :: "bin_inv_t" where "wcode_after_double ires rs (l, r) = (\ ln rn. l = Bk # Bk # Bk\(ln) @ Oc # ires \ r = Oc\(Suc (Suc (Suc 2*rs))) @ Bk\(rn))" declare wcode_after_double.simps[simp del] fun wcode_on_left_moving_1_B :: "bin_inv_t" where "wcode_on_left_moving_1_B ires rs (l, r) = (\ ml mr rn. l = Bk\(ml) @ Oc # Oc # ires \ r = Bk\(mr) @ Oc\(Suc rs) @ Bk\(rn) \ ml + mr > Suc 0 \ mr > 0)" declare wcode_on_left_moving_1_B.simps[simp del] fun wcode_on_left_moving_1_O :: "bin_inv_t" where "wcode_on_left_moving_1_O ires rs (l, r) = (\ ln rn. l = Oc # ires \ r = Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" declare wcode_on_left_moving_1_O.simps[simp del] fun wcode_on_left_moving_1 :: "bin_inv_t" where "wcode_on_left_moving_1 ires rs (l, r) = (wcode_on_left_moving_1_B ires rs (l, r) \ wcode_on_left_moving_1_O ires rs (l, r))" declare wcode_on_left_moving_1.simps[simp del] fun wcode_on_checking_1 :: "bin_inv_t" where "wcode_on_checking_1 ires rs (l, r) = (\ ln rn. l = ires \ r = Oc # Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_erase1 :: "bin_inv_t" where "wcode_erase1 ires rs (l, r) = (\ ln rn. l = Oc # ires \ tl r = Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" declare wcode_erase1.simps [simp del] fun wcode_on_right_moving_1 :: "bin_inv_t" where "wcode_on_right_moving_1 ires rs (l, r) = (\ ml mr rn. l = Bk\(ml) @ Oc # ires \ r = Bk\(mr) @ Oc\(Suc rs) @ Bk\(rn) \ ml + mr > Suc 0)" declare wcode_on_right_moving_1.simps [simp del] declare wcode_on_right_moving_1.simps[simp del] fun wcode_goon_right_moving_1 :: "bin_inv_t" where "wcode_goon_right_moving_1 ires rs (l, r) = (\ ml mr ln rn. l = Oc\(ml) @ Bk # Bk # Bk\(ln) @ Oc # ires \ r = Oc\(mr) @ Bk\(rn) \ ml + mr = Suc rs)" declare wcode_goon_right_moving_1.simps[simp del] fun wcode_backto_standard_pos_B :: "bin_inv_t" where "wcode_backto_standard_pos_B ires rs (l, r) = (\ ln rn. l = Bk # Bk\(ln) @ Oc # ires \ r = Bk # Oc\((Suc (Suc rs))) @ Bk\(rn ))" declare wcode_backto_standard_pos_B.simps[simp del] fun wcode_backto_standard_pos_O :: "bin_inv_t" where "wcode_backto_standard_pos_O ires rs (l, r) = (\ ml mr ln rn. l = Oc\(ml) @ Bk # Bk # Bk\(ln) @ Oc # ires \ r = Oc\(mr) @ Bk\(rn) \ ml + mr = Suc (Suc rs) \ mr > 0)" declare wcode_backto_standard_pos_O.simps[simp del] fun wcode_backto_standard_pos :: "bin_inv_t" where "wcode_backto_standard_pos ires rs (l, r) = (wcode_backto_standard_pos_B ires rs (l, r) \ wcode_backto_standard_pos_O ires rs (l, r))" declare wcode_backto_standard_pos.simps[simp del] lemma bin_wc_eq: "bl_bin xs = bl2wc xs" proof(induct xs) show " bl_bin [] = bl2wc []" apply(simp add: bl_bin.simps) done next fix a xs assume "bl_bin xs = bl2wc xs" thus " bl_bin (a # xs) = bl2wc (a # xs)" apply(case_tac a, simp_all add: bl_bin.simps bl2wc.simps) apply(simp_all add: bl2nat.simps bl2nat_double) done qed lemma tape_of_nl_append_one: "lm \ [] \ = @ Bk # Oc\Suc a" apply(induct lm, auto simp: tape_of_nl_cons split:if_splits) done lemma tape_of_nl_rev: "rev () = ()" apply(induct lm, simp, auto) apply(auto simp: tape_of_nl_cons tape_of_nl_append_one split: if_splits) apply(simp add: exp_ind[THEN sym]) done lemma exp_1[simp]: "a\(Suc 0) = [a]" by(simp) lemma tape_of_nl_cons_app1: "() = (Oc\(Suc a) @ Bk # ())" apply(case_tac xs; simp add: tape_of_list_def tape_of_nat_list.simps tape_of_nat_def) done lemma bl_bin_bk_oc[simp]: "bl_bin (xs @ [Bk, Oc]) = bl_bin xs + 2*2^(length xs)" apply(simp add: bin_wc_eq) using bl2nat_cons_oc[of "xs @ [Bk]"] apply(simp add: bl2nat_cons_bk bl2wc.simps) done lemma tape_of_nat[simp]: "() = Oc\(Suc a)" apply(simp add: tape_of_nat_def) done lemma tape_of_nl_cons_app2: "() = ( @ Bk # Oc\(Suc b))" proof(induct "length xs" arbitrary: xs c, simp add: tape_of_list_def) fix x xs c assume ind: "\xs c. x = length xs \ = @ Bk # Oc\(Suc b)" and h: "Suc x = length (xs::nat list)" show " = @ Bk # Oc\(Suc b)" proof(cases xs, simp add: tape_of_list_def) fix a list assume g: "xs = a # list" hence k: " = @ Bk # Oc\(Suc b)" apply(rule_tac ind) using h apply(simp) done from g and k show " = @ Bk # Oc\(Suc b)" apply(simp add: tape_of_list_def) done qed qed lemma length_2_elems[simp]: "length () = Suc (Suc aa) + length ()" apply(simp add: tape_of_list_def) done lemma bl_bin_addition[simp]: "bl_bin (Oc\(Suc aa) @ Bk # tape_of_nat_list (a # lista) @ [Bk, Oc]) = bl_bin (Oc\(Suc aa) @ Bk # tape_of_nat_list (a # lista)) + 2* 2^(length (Oc\(Suc aa) @ Bk # tape_of_nat_list (a # lista)))" using bl_bin_bk_oc[of "Oc\(Suc aa) @ Bk # tape_of_nat_list (a # lista)"] apply(simp) done declare replicate_Suc[simp del] lemma bl_bin_2[simp]: "bl_bin () + (4 * rs + 4) * 2 ^ (length () - Suc 0) = bl_bin (Oc\(Suc aa) @ Bk # ) + rs * (2 * 2 ^ (aa + length ()))" apply(case_tac "list", simp add: add_mult_distrib) apply(simp add: tape_of_nl_cons_app2 add_mult_distrib) apply(simp add: tape_of_list_def) done lemma tape_of_nl_app_Suc: "(()) = () @ [Oc]" proof(induct list) case (Cons a list) then show ?case by(cases list;simp_all add:tape_of_list_def exp_ind) qed (simp add: tape_of_list_def exp_ind) lemma bl_bin_3[simp]: "bl_bin (Oc # Oc\(aa) @ Bk # @ [Oc]) = bl_bin (Oc # Oc\(aa) @ Bk # ) + 2^(length (Oc # Oc\(aa) @ Bk # ))" apply(simp add: bin_wc_eq) apply(simp add: bl2nat_cons_oc bl2wc.simps) using bl2nat_cons_oc[of "Oc # Oc\(aa) @ Bk # "] apply(simp) done lemma bl_bin_4[simp]: "bl_bin (Oc # Oc\(aa) @ Bk # ) + (4 * 2 ^ (aa + length ()) + 4 * (rs * 2 ^ (aa + length ()))) = bl_bin (Oc # Oc\(aa) @ Bk # ) + rs * (2 * 2 ^ (aa + length ()))" apply(simp add: tape_of_nl_app_Suc) done declare tape_of_nat[simp del] fun wcode_double_case_inv :: "nat \ bin_inv_t" where "wcode_double_case_inv st ires rs (l, r) = (if st = Suc 0 then wcode_on_left_moving_1 ires rs (l, r) else if st = Suc (Suc 0) then wcode_on_checking_1 ires rs (l, r) else if st = 3 then wcode_erase1 ires rs (l, r) else if st = 4 then wcode_on_right_moving_1 ires rs (l, r) else if st = 5 then wcode_goon_right_moving_1 ires rs (l, r) else if st = 6 then wcode_backto_standard_pos ires rs (l, r) else if st = 13 then wcode_before_double ires rs (l, r) else False)" declare wcode_double_case_inv.simps[simp del] fun wcode_double_case_state :: "config \ nat" where "wcode_double_case_state (st, l, r) = 13 - st" fun wcode_double_case_step :: "config \ nat" where "wcode_double_case_step (st, l, r) = (if st = Suc 0 then (length l) else if st = Suc (Suc 0) then (length r) else if st = 3 then if hd r = Oc then 1 else 0 else if st = 4 then (length r) else if st = 5 then (length r) else if st = 6 then (length l) else 0)" fun wcode_double_case_measure :: "config \ nat \ nat" where "wcode_double_case_measure (st, l, r) = (wcode_double_case_state (st, l, r), wcode_double_case_step (st, l, r))" definition wcode_double_case_le :: "(config \ config) set" where "wcode_double_case_le \ (inv_image lex_pair wcode_double_case_measure)" lemma wf_lex_pair[intro]: "wf lex_pair" by(auto intro:wf_lex_prod simp:lex_pair_def) lemma wf_wcode_double_case_le[intro]: "wf wcode_double_case_le" by(auto intro:wf_inv_image simp: wcode_double_case_le_def ) lemma fetch_t_wcode_main[simp]: "fetch t_wcode_main (Suc 0) Bk = (L, Suc 0)" "fetch t_wcode_main (Suc 0) Oc = (L, Suc (Suc 0))" "fetch t_wcode_main (Suc (Suc 0)) Oc = (R, 3)" "fetch t_wcode_main (Suc (Suc 0)) Bk = (L, 7)" "fetch t_wcode_main (Suc (Suc (Suc 0))) Bk = (R, 4)" "fetch t_wcode_main (Suc (Suc (Suc 0))) Oc = (W0, 3)" "fetch t_wcode_main 4 Bk = (R, 4)" "fetch t_wcode_main 4 Oc = (R, 5)" "fetch t_wcode_main 5 Oc = (R, 5)" "fetch t_wcode_main 5 Bk = (W1, 6)" "fetch t_wcode_main 6 Bk = (R, 13)" "fetch t_wcode_main 6 Oc = (L, 6)" "fetch t_wcode_main 7 Oc = (R, 8)" "fetch t_wcode_main 7 Bk = (R, 0)" "fetch t_wcode_main 8 Bk = (R, 9)" "fetch t_wcode_main 9 Bk = (R, 10)" "fetch t_wcode_main 9 Oc = (W0, 9)" "fetch t_wcode_main 10 Bk = (R, 10)" "fetch t_wcode_main 10 Oc = (R, 11)" "fetch t_wcode_main 11 Bk = (W1, 12)" "fetch t_wcode_main 11 Oc = (R, 11)" "fetch t_wcode_main 12 Oc = (L, 12)" "fetch t_wcode_main 12 Bk = (R, t_twice_len + 14)" by(auto simp: t_wcode_main_def t_wcode_main_first_part_def fetch.simps numeral) declare wcode_on_checking_1.simps[simp del] lemmas wcode_double_case_inv_simps = wcode_on_left_moving_1.simps wcode_on_left_moving_1_O.simps wcode_on_left_moving_1_B.simps wcode_on_checking_1.simps wcode_erase1.simps wcode_on_right_moving_1.simps wcode_goon_right_moving_1.simps wcode_backto_standard_pos.simps lemma wcode_on_left_moving_1[simp]: "wcode_on_left_moving_1 ires rs (b, []) = False" "wcode_on_left_moving_1 ires rs (b, r) \ b \ []" by(auto simp: wcode_on_left_moving_1.simps wcode_on_left_moving_1_B.simps wcode_on_left_moving_1_O.simps) lemma wcode_on_left_moving_1E[elim]: "\wcode_on_left_moving_1 ires rs (b, Bk # list); tl b = aa \ hd b # Bk # list = ba\ \ wcode_on_left_moving_1 ires rs (aa, ba)" apply(simp only: wcode_on_left_moving_1.simps wcode_on_left_moving_1_O.simps wcode_on_left_moving_1_B.simps) apply(erule_tac disjE) apply(erule_tac exE)+ apply(rename_tac ml mr rn) apply(case_tac ml, simp) apply(rule_tac x = "mr - Suc (Suc 0)" in exI, rule_tac x = rn in exI) apply (smt One_nat_def Suc_diff_Suc append_Cons empty_replicate list.sel(3) neq0_conv replicate_Suc replicate_app_Cons_same tl_append2 tl_replicate) apply(rule_tac disjI1) apply (metis add_Suc_shift less_SucI list.exhaust_sel list.inject list.simps(3) replicate_Suc_iff_anywhere) by simp declare replicate_Suc[simp] lemma wcode_on_moving_1_Elim[elim]: "\wcode_on_left_moving_1 ires rs (b, Oc # list); tl b = aa \ hd b # Oc # list = ba\ \ wcode_on_checking_1 ires rs (aa, ba)" apply(simp only: wcode_double_case_inv_simps) apply(erule_tac disjE) apply (metis cell.distinct(1) empty_replicate hd_append2 hd_replicate list.sel(1) not_gr_zero) apply force. lemma wcode_on_checking_1_Elim[elim]: "\wcode_on_checking_1 ires rs (b, Oc # ba);Oc # b = aa \ list = ba\ \ wcode_erase1 ires rs (aa, ba)" apply(simp only: wcode_double_case_inv_simps) apply(erule_tac exE)+ by auto lemma wcode_on_checking_1_simp[simp]: "wcode_on_checking_1 ires rs (b, []) = False" "wcode_on_checking_1 ires rs (b, Bk # list) = False" by(auto simp: wcode_double_case_inv_simps) lemma wcode_erase1_nonempty_snd[simp]: "wcode_erase1 ires rs (b, []) = False" apply(simp add: wcode_double_case_inv_simps) done lemma wcode_on_right_moving_1_nonempty_snd[simp]: "wcode_on_right_moving_1 ires rs (b, []) = False" apply(simp add: wcode_double_case_inv_simps) done lemma wcode_on_right_moving_1_BkE[elim]: "\wcode_on_right_moving_1 ires rs (b, Bk # ba); Bk # b = aa \ list = b\ \ wcode_on_right_moving_1 ires rs (aa, ba)" apply(simp only: wcode_double_case_inv_simps) apply(erule_tac exE)+ apply(rename_tac ml mr rn) apply(rule_tac x = "Suc ml" in exI, rule_tac x = "mr - Suc 0" in exI, rule_tac x = rn in exI) apply(simp) apply(case_tac mr, simp, simp) done lemma wcode_on_right_moving_1_OcE[elim]: "\wcode_on_right_moving_1 ires rs (b, Oc # ba); Oc # b = aa \ list = ba\ \ wcode_goon_right_moving_1 ires rs (aa, ba)" apply(simp only: wcode_double_case_inv_simps) apply(erule_tac exE)+ apply(rename_tac ml mr rn) apply(rule_tac x = "Suc 0" in exI, rule_tac x = "rs" in exI, rule_tac x = "ml - Suc (Suc 0)" in exI, rule_tac x = rn in exI) apply(case_tac mr, simp_all) apply(case_tac ml, simp, case_tac nat, simp, simp) done lemma wcode_erase1_BkE[elim]: assumes "wcode_erase1 ires rs (b, Bk # ba)" "Bk # b = aa \ list = ba" "c = Bk # ba" shows "wcode_on_right_moving_1 ires rs (aa, ba)" proof - from assms obtain rn ln where "b = Oc # ires" "tl (Bk # ba) = Bk \ ln @ Bk # Bk # Oc \ Suc rs @ Bk \ rn" unfolding wcode_double_case_inv_simps by auto thus ?thesis using assms(2-) unfolding wcode_double_case_inv_simps apply(rule_tac x = "Suc 0" in exI, rule_tac x = "Suc (Suc ln)" in exI, rule_tac x = rn in exI, simp add: exp_ind del: replicate_Suc) done qed lemma wcode_erase1_OcE[elim]: "\wcode_erase1 ires rs (aa, Oc # list); b = aa \ Bk # list = ba\ \ wcode_erase1 ires rs (aa, ba)" unfolding wcode_double_case_inv_simps by auto auto lemma wcode_goon_right_moving_1_emptyE[elim]: assumes "wcode_goon_right_moving_1 ires rs (aa, [])" "b = aa \ [Oc] = ba" shows "wcode_backto_standard_pos ires rs (aa, ba)" proof - from assms obtain ml ln rn mr where "aa = Oc \ ml @ Bk # Bk # Bk \ ln @ Oc # ires" "[] = Oc \ mr @ Bk \ rn" "ml + mr = Suc rs" by(auto simp:wcode_double_case_inv_simps) thus ?thesis using assms(2) apply(simp only: wcode_double_case_inv_simps) apply(rule_tac disjI2) apply(simp only:wcode_backto_standard_pos_O.simps) apply(rule_tac x = ml in exI, rule_tac x = "Suc 0" in exI, rule_tac x = ln in exI, rule_tac x = rn in exI, simp) done qed lemma wcode_goon_right_moving_1_BkE[elim]: assumes "wcode_goon_right_moving_1 ires rs (aa, Bk # list)" "b = aa \ Oc # list = ba" shows "wcode_backto_standard_pos ires rs (aa, ba)" proof - from assms obtain ln rn where "aa = Oc \ Suc rs @ Bk \ Suc (Suc ln) @ Oc # ires" "Bk # list = Bk \ rn" "b = Oc \ Suc rs @ Bk \ Suc (Suc ln) @ Oc # ires" "ba = Oc # list" by(auto simp:wcode_double_case_inv_simps) thus ?thesis using assms(2) apply(simp only: wcode_double_case_inv_simps wcode_backto_standard_pos_O.simps) apply(rule_tac disjI2) apply(rule exI[of _ "Suc rs"], rule exI[of _ "Suc 0"], rule_tac x = ln in exI, rule_tac x = "rn - Suc 0" in exI, simp) apply(cases rn;auto) done qed lemma wcode_goon_right_moving_1_OcE[elim]: assumes "wcode_goon_right_moving_1 ires rs (b, Oc # ba)" "Oc # b = aa \ list = ba" shows "wcode_goon_right_moving_1 ires rs (aa, ba)" proof - from assms obtain ml mr ln rn where "b = Oc \ ml @ Bk # Bk # Bk \ ln @ Oc # ires \ Oc # ba = Oc \ mr @ Bk \ rn \ ml + mr = Suc rs" unfolding wcode_double_case_inv_simps by auto with assms(2) show ?thesis unfolding wcode_double_case_inv_simps apply(rule_tac x = "Suc ml" in exI, rule_tac x = "mr - Suc 0" in exI, rule_tac x = ln in exI, rule_tac x = rn in exI) apply(simp) apply(case_tac mr, simp, case_tac rn, simp_all) done qed lemma wcode_backto_standard_pos_BkE[elim]: "\wcode_backto_standard_pos ires rs (b, Bk # ba); Bk # b = aa \ list = ba\ \ wcode_before_double ires rs (aa, ba)" apply(simp only: wcode_double_case_inv_simps wcode_backto_standard_pos_B.simps wcode_backto_standard_pos_O.simps wcode_before_double.simps) apply(erule_tac disjE) apply(erule_tac exE)+ by auto lemma wcode_backto_standard_pos_no_Oc[simp]: "wcode_backto_standard_pos ires rs ([], Oc # list) = False" apply(auto simp: wcode_backto_standard_pos.simps wcode_backto_standard_pos_B.simps wcode_backto_standard_pos_O.simps) done lemma wcode_backto_standard_pos_nonempty_snd[simp]: "wcode_backto_standard_pos ires rs (b, []) = False" apply(auto simp: wcode_backto_standard_pos.simps wcode_backto_standard_pos_B.simps wcode_backto_standard_pos_O.simps) done lemma wcode_backto_standard_pos_OcE[elim]: "\wcode_backto_standard_pos ires rs (b, Oc # list); tl b = aa; hd b # Oc # list = ba\ \ wcode_backto_standard_pos ires rs (aa, ba)" apply(simp only: wcode_backto_standard_pos.simps wcode_backto_standard_pos_B.simps wcode_backto_standard_pos_O.simps) apply(erule_tac disjE) apply(simp) apply(erule_tac exE)+ apply(simp) apply (rename_tac ml mr ln rn) apply(case_tac ml) apply(rule_tac disjI1, rule_tac conjI) apply(rule_tac x = ln in exI, force, rule_tac x = rn in exI, force, force). declare nth_of.simps[simp del] fetch.simps[simp del] lemma wcode_double_case_first_correctness: "let P = (\ (st, l, r). st = 13) in let Q = (\ (st, l, r). wcode_double_case_inv st ires rs (l, r)) in let f = (\ stp. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp) in \ n .P (f n) \ Q (f (n::nat))" proof - let ?P = "(\ (st, l, r). st = 13)" let ?Q = "(\ (st, l, r). wcode_double_case_inv st ires rs (l, r))" let ?f = "(\ stp. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp)" have "\ n. ?P (?f n) \ ?Q (?f (n::nat))" proof(rule_tac halt_lemma2) show "wf wcode_double_case_le" by auto next show "\ na. \ ?P (?f na) \ ?Q (?f na) \ ?Q (?f (Suc na)) \ (?f (Suc na), ?f na) \ wcode_double_case_le" proof(rule_tac allI, case_tac "?f na", simp) fix na a b c show "a \ 13 \ wcode_double_case_inv a ires rs (b, c) \ (case step0 (a, b, c) t_wcode_main of (st, x) \ wcode_double_case_inv st ires rs x) \ (step0 (a, b, c) t_wcode_main, a, b, c) \ wcode_double_case_le" apply(rule_tac impI, simp add: wcode_double_case_inv.simps) apply(auto split: if_splits simp: step.simps, case_tac [!] c, simp_all, case_tac [!] "(c::cell list)!0") apply(simp_all add: wcode_double_case_inv.simps wcode_double_case_le_def lex_pair_def) apply(auto split: if_splits) done qed next show "?Q (?f 0)" apply(simp add: steps.simps wcode_double_case_inv.simps wcode_on_left_moving_1.simps wcode_on_left_moving_1_B.simps) apply(rule_tac disjI1) apply(rule_tac x = "Suc m" in exI, simp) apply(rule_tac x = "Suc 0" in exI, simp) done next show "\ ?P (?f 0)" apply(simp add: steps.simps) done qed thus "let P = \(st, l, r). st = 13; Q = \(st, l, r). wcode_double_case_inv st ires rs (l, r); f = steps0 (Suc 0, Bk # Bk\(m) @ Oc # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main in \n. P (f n) \ Q (f n)" apply(simp) done qed lemma tm_append_shift_append_steps: "\steps0 (st, l, r) tp stp = (st', l', r'); 0 < st'; length tp1 mod 2 = 0 \ \ steps0 (st + length tp1 div 2, l, r) (tp1 @ shift tp (length tp1 div 2) @ tp2) stp = (st' + length tp1 div 2, l', r')" proof - assume h: "steps0 (st, l, r) tp stp = (st', l', r')" "0 < st'" "length tp1 mod 2 = 0 " from h have "steps (st + length tp1 div 2, l, r) (tp1 @ shift tp (length tp1 div 2), 0) stp = (st' + length tp1 div 2, l', r')" by(rule_tac tm_append_second_steps_eq, simp_all) then have "steps (st + length tp1 div 2, l, r) ((tp1 @ shift tp (length tp1 div 2)) @ tp2, 0) stp = (st' + length tp1 div 2, l', r')" using h apply(rule_tac tm_append_first_steps_eq, simp_all) done thus "?thesis" by simp qed declare start_of.simps[simp del] lemma twice_lemma: "rec_exec rec_twice [rs] = 2*rs" by(auto simp: rec_twice_def rec_exec.simps) lemma t_twice_correct: "\stp ln rn. steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (tm_of abc_twice @ shift (mopup (Suc 0)) ((length (tm_of abc_twice) div 2))) stp = (0, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(rn))" proof(case_tac "rec_ci rec_twice") fix a b c assume h: "rec_ci rec_twice = (a, b, c)" have "\stp m l. steps0 (Suc 0, Bk # Bk # ires, <[rs]> @ Bk\(n)) (tm_of abc_twice @ shift (mopup (length [rs])) (length (tm_of abc_twice) div 2)) stp = (0, Bk\(m) @ Bk # Bk # ires, Oc\(Suc (rec_exec rec_twice [rs])) @ Bk\(l))" thm recursive_compile_to_tm_correct1 proof(rule_tac recursive_compile_to_tm_correct1) show "rec_ci rec_twice = (a, b, c)" by (simp add: h) next show "terminate rec_twice [rs]" apply(rule_tac primerec_terminate, auto) apply(simp add: rec_twice_def, auto simp: constn.simps numeral_2_eq_2) by(auto) next show "tm_of abc_twice = tm_of (a [+] dummy_abc (length [rs]))" using h by(simp add: abc_twice_def) qed thus "?thesis" apply(simp add: tape_of_list_def tape_of_nat_def rec_exec.simps twice_lemma) done qed declare adjust.simps[simp] lemma adjust_fetch0: "\0 < a; a \ length ap div 2; fetch ap a b = (aa, 0)\ \ fetch (adjust0 ap) a b = (aa, Suc (length ap div 2))" apply(case_tac b, auto simp: fetch.simps nth_of.simps nth_map split: if_splits) apply(case_tac [!] a, auto simp: fetch.simps nth_of.simps) done lemma adjust_fetch_norm: "\st > 0; st \ length tp div 2; fetch ap st b = (aa, ns); ns \ 0\ \ fetch (adjust0 ap) st b = (aa, ns)" apply(case_tac b, auto simp: fetch.simps nth_of.simps nth_map split: if_splits) apply(case_tac [!] st, auto simp: fetch.simps nth_of.simps) done declare adjust.simps[simp del] lemma adjust_step_eq: assumes exec: "step0 (st,l,r) ap = (st', l', r')" and wf_tm: "tm_wf (ap, 0)" and notfinal: "st' > 0" shows "step0 (st, l, r) (adjust0 ap) = (st', l', r')" using assms proof - have "st > 0" using assms by(case_tac st, simp_all add: step.simps fetch.simps) moreover hence "st \ (length ap) div 2" using assms apply(case_tac "st \ (length ap) div 2", simp) apply(case_tac st, auto simp: step.simps fetch.simps) apply(case_tac "read r", simp_all add: fetch.simps nth_of.simps adjust.simps tm_wf.simps split: if_splits) apply(auto simp: mod_ex2) done ultimately have "fetch (adjust0 ap) st (read r) = fetch ap st (read r)" using assms apply(case_tac "fetch ap st (read r)") apply(drule_tac adjust_fetch_norm, simp_all) apply(simp add: step.simps) done thus "?thesis" using exec by(simp add: step.simps) qed declare adjust.simps[simp del] lemma adjust_steps_eq: assumes exec: "steps0 (st,l,r) ap stp = (st', l', r')" and wf_tm: "tm_wf (ap, 0)" and notfinal: "st' > 0" shows "steps0 (st, l, r) (adjust0 ap) stp = (st', l', r')" using exec notfinal proof(induct stp arbitrary: st' l' r') case 0 thus "?case" by(simp add: steps.simps) next case (Suc stp st' l' r') have ind: "\st' l' r'. \steps0 (st, l, r) ap stp = (st', l', r'); 0 < st'\ \ steps0 (st, l, r) (adjust0 ap) stp = (st', l', r')" by fact have h: "steps0 (st, l, r) ap (Suc stp) = (st', l', r')" by fact have g: "0 < st'" by fact obtain st'' l'' r'' where a: "steps0 (st, l, r) ap stp = (st'', l'', r'')" by (metis prod_cases3) hence c:"0 < st''" using h g apply(simp add: step_red) apply(case_tac st'', auto) done hence b: "steps0 (st, l, r) (adjust0 ap) stp = (st'', l'', r'')" using a by(rule_tac ind, simp_all) thus "?case" using assms a b h g apply(simp add: step_red) apply(rule_tac adjust_step_eq, simp_all) done qed lemma adjust_halt_eq: assumes exec: "steps0 (1, l, r) ap stp = (0, l', r')" and tm_wf: "tm_wf (ap, 0)" shows "\ stp. steps0 (Suc 0, l, r) (adjust0 ap) stp = (Suc (length ap div 2), l', r')" proof - have "\ stp. \ is_final (steps0 (1, l, r) ap stp) \ (steps0 (1, l, r) ap (Suc stp) = (0, l', r'))" using exec by(erule_tac before_final) then obtain stpa where a: "\ is_final (steps0 (1, l, r) ap stpa) \ (steps0 (1, l, r) ap (Suc stpa) = (0, l', r'))" .. obtain sa la ra where b:"steps0 (1, l, r) ap stpa = (sa, la, ra)" by (metis prod_cases3) hence c: "steps0 (Suc 0, l, r) (adjust0 ap) stpa = (sa, la, ra)" using assms a apply(rule_tac adjust_steps_eq, simp_all) done have d: "sa \ length ap div 2" using steps_in_range[of "(l, r)" ap stpa] a tm_wf b by(simp) obtain ac ns where e: "fetch ap sa (read ra) = (ac, ns)" by (metis prod.exhaust) hence f: "ns = 0" using b a apply(simp add: step_red step.simps) done have k: "fetch (adjust0 ap) sa (read ra) = (ac, Suc (length ap div 2))" using a b c d e f apply(rule_tac adjust_fetch0, simp_all) done from a b e f k and c show "?thesis" apply(rule_tac x = "Suc stpa" in exI) apply(simp add: step_red, auto) apply(simp add: step.simps) done qed declare tm_wf.simps[simp del] lemma tm_wf_t_twice_compile [simp]: "tm_wf (t_twice_compile, 0)" apply(simp only: t_twice_compile_def) apply(rule_tac wf_tm_from_abacus, simp) done lemma t_twice_change_term_state: "\ stp ln rn. steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) t_twice stp = (Suc t_twice_len, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(rn))" proof - have "\stp ln rn. steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (tm_of abc_twice @ shift (mopup (Suc 0)) ((length (tm_of abc_twice) div 2))) stp = (0, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(rn))" by(rule_tac t_twice_correct) then obtain stp ln rn where " steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (tm_of abc_twice @ shift (mopup (Suc 0)) ((length (tm_of abc_twice) div 2))) stp = (0, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(rn))" by blast hence "\ stp. steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (adjust0 t_twice_compile) stp = (Suc (length t_twice_compile div 2), Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(rn))" apply(rule_tac stp = stp in adjust_halt_eq) apply(simp add: t_twice_compile_def, auto) done then obtain stpb where "steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (adjust0 t_twice_compile) stpb = (Suc (length t_twice_compile div 2), Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(rn))" .. thus "?thesis" apply(simp add: t_twice_def t_twice_len_def) by metis qed lemma length_t_wcode_main_first_part_even[intro]: "length t_wcode_main_first_part mod 2 = 0" apply(auto simp: t_wcode_main_first_part_def) done lemma t_twice_append_pre: "steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) t_twice stp = (Suc t_twice_len, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(rn)) \ steps0 (Suc 0 + length t_wcode_main_first_part div 2, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ ([(L, 1), (L, 1)] @ shift t_fourtimes (t_twice_len + 13) @ [(L, 1), (L, 1)])) stp = (Suc (t_twice_len) + length t_wcode_main_first_part div 2, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(rn))" by(rule_tac tm_append_shift_append_steps, auto) lemma t_twice_append: "\ stp ln rn. steps0 (Suc 0 + length t_wcode_main_first_part div 2, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ ([(L, 1), (L, 1)] @ shift t_fourtimes (t_twice_len + 13) @ [(L, 1), (L, 1)])) stp = (Suc (t_twice_len) + length t_wcode_main_first_part div 2, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(rn))" using t_twice_change_term_state[of ires rs n] apply(erule_tac exE) apply(erule_tac exE) apply(erule_tac exE) apply(drule_tac t_twice_append_pre) apply(rename_tac stp ln rn) apply(rule_tac x = stp in exI, rule_tac x = ln in exI, rule_tac x = rn in exI) apply(simp) done lemma mopup_mod2: "length (mopup k) mod 2 = 0" by(auto simp: mopup.simps) lemma fetch_t_wcode_main_Oc[simp]: "fetch t_wcode_main (Suc (t_twice_len + length t_wcode_main_first_part div 2)) Oc = (L, Suc 0)" apply(subgoal_tac "length (t_twice) mod 2 = 0") apply(simp add: t_wcode_main_def nth_append fetch.simps t_wcode_main_first_part_def nth_of.simps t_twice_len_def, auto) apply(simp add: t_twice_def t_twice_compile_def) using mopup_mod2[of 1] apply(simp) done lemma wcode_jump1: "\ stp ln rn. steps0 (Suc (t_twice_len) + length t_wcode_main_first_part div 2, Bk\(m) @ Bk # Bk # ires, Oc\(Suc (2 * rs)) @ Bk\(n)) t_wcode_main stp = (Suc 0, Bk\(ln) @ Bk # ires, Bk # Oc\(Suc (2 * rs)) @ Bk\(rn))" apply(rule_tac x = "Suc 0" in exI, rule_tac x = "m" in exI, rule_tac x = n in exI) apply(simp add: steps.simps step.simps exp_ind) apply(case_tac m, simp_all) apply(simp add: exp_ind[THEN sym]) done lemma wcode_main_first_part_len[simp]: "length t_wcode_main_first_part = 24" apply(simp add: t_wcode_main_first_part_def) done lemma wcode_double_case: shows "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ Oc # ires, Bk # Oc\(Suc (2 * rs + 2)) @ Bk\(rn))" (is "\stp ln rn. ?tm stp ln rn") proof - from wcode_double_case_first_correctness[of ires rs m n] obtain na ln rn where "steps0 (Suc 0, Bk # Bk \ m @ Oc # Oc # ires, Bk # Oc # Oc \ rs @ Bk \ n) t_wcode_main na = (13, Bk # Bk # Bk \ ln @ Oc # ires, Oc # Oc # Oc \ rs @ Bk \ rn)" by(auto simp: wcode_double_case_inv.simps wcode_before_double.simps) hence "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (13, Bk # Bk # Bk\(ln) @ Oc # ires, Oc\(Suc (Suc rs)) @ Bk\(rn))" by(case_tac "steps0 (Suc 0, Bk # Bk\(m) @ Oc # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main na", auto) from this obtain stpa lna rna where stp1: "steps0 (Suc 0, Bk # Bk\(m) @ Oc # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stpa = (13, Bk # Bk # Bk\(lna) @ Oc # ires, Oc\(Suc (Suc rs)) @ Bk\(rna))" by blast from t_twice_append[of "Bk\(lna) @ Oc # ires" "Suc rs" rna] obtain stp ln rn where "steps0 (Suc 0 + length t_wcode_main_first_part div 2, Bk # Bk # Bk \ lna @ Oc # ires, Oc \ Suc (Suc rs) @ Bk \ rna) (t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ [(L, 1), (L, 1)] @ shift t_fourtimes (t_twice_len + 13) @ [(L, 1), (L, 1)]) stp = (Suc t_twice_len + length t_wcode_main_first_part div 2, Bk \ ln @ Bk # Bk # Bk \ lna @ Oc # ires, Oc \ Suc (2 * Suc rs) @ Bk \ rn)" by blast hence "\ stp ln rn. steps0 (13, Bk # Bk # Bk\(lna) @ Oc # ires, Oc\(Suc (Suc rs)) @ Bk\(rna)) t_wcode_main stp = (13 + t_twice_len, Bk # Bk # Bk\(ln) @ Oc # ires, Oc\(Suc (Suc (Suc (2 *rs)))) @ Bk\(rn))" using t_twice_append[of "Bk\(lna) @ Oc # ires" "Suc rs" rna] apply(simp) apply(rule_tac x = stp in exI, rule_tac x = "ln + lna" in exI, rule_tac x = rn in exI) apply(simp add: t_wcode_main_def) apply(simp add: replicate_Suc[THEN sym] replicate_add [THEN sym] del: replicate_Suc) done from this obtain stpb lnb rnb where stp2: "steps0 (13, Bk # Bk # Bk\(lna) @ Oc # ires, Oc\(Suc (Suc rs)) @ Bk\(rna)) t_wcode_main stpb = (13 + t_twice_len, Bk # Bk # Bk\(lnb) @ Oc # ires, Oc\(Suc (Suc (Suc (2 *rs)))) @ Bk\(rnb))" by blast from wcode_jump1[of lnb "Oc # ires" "Suc rs" rnb] obtain stp ln rn where "steps0 (Suc t_twice_len + length t_wcode_main_first_part div 2, Bk \ lnb @ Bk # Bk # Oc # ires, Oc \ Suc (2 * Suc rs) @ Bk \ rnb) t_wcode_main stp = (Suc 0, Bk \ ln @ Bk # Oc # ires, Bk # Oc \ Suc (2 * Suc rs) @ Bk \ rn)" by metis hence "steps0 (13 + t_twice_len, Bk # Bk # Bk\(lnb) @ Oc # ires, Oc\(Suc (Suc (Suc (2 *rs)))) @ Bk\(rnb)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ Oc # ires, Bk # Oc\(Suc (Suc (Suc (2 *rs)))) @ Bk\(rn))" apply(auto simp add: t_wcode_main_def) apply(subgoal_tac "Bk\(lnb) @ Bk # Bk # Oc # ires = Bk # Bk # Bk\(lnb) @ Oc # ires", simp) apply(simp add: replicate_Suc[THEN sym] exp_ind[THEN sym] del: replicate_Suc) apply(simp) apply(simp add: replicate_Suc[THEN sym] exp_ind del: replicate_Suc) done hence "\stp ln rn. steps0 (13 + t_twice_len, Bk # Bk # Bk\(lnb) @ Oc # ires, Oc\(Suc (Suc (Suc (2 *rs)))) @ Bk\(rnb)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ Oc # ires, Bk # Oc\(Suc (Suc (Suc (2 *rs)))) @ Bk\(rn))" by blast from this obtain stpc lnc rnc where stp3: "steps0 (13 + t_twice_len, Bk # Bk # Bk\(lnb) @ Oc # ires, Oc\(Suc (Suc (Suc (2 *rs)))) @ Bk\(rnb)) t_wcode_main stpc = (Suc 0, Bk # Bk\(lnc) @ Oc # ires, Bk # Oc\(Suc (Suc (Suc (2 *rs)))) @ Bk\(rnc))" by blast from stp1 stp2 stp3 have "?tm (stpa + stpb + stpc) lnc rnc" by simp thus "?thesis" by blast qed (* Begin: fourtime_case*) fun wcode_on_left_moving_2_B :: "bin_inv_t" where "wcode_on_left_moving_2_B ires rs (l, r) = (\ ml mr rn. l = Bk\(ml) @ Oc # Bk # Oc # ires \ r = Bk\(mr) @ Oc\(Suc rs) @ Bk\(rn) \ ml + mr > Suc 0 \ mr > 0)" fun wcode_on_left_moving_2_O :: "bin_inv_t" where "wcode_on_left_moving_2_O ires rs (l, r) = (\ ln rn. l = Bk # Oc # ires \ r = Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_on_left_moving_2 :: "bin_inv_t" where "wcode_on_left_moving_2 ires rs (l, r) = (wcode_on_left_moving_2_B ires rs (l, r) \ wcode_on_left_moving_2_O ires rs (l, r))" fun wcode_on_checking_2 :: "bin_inv_t" where "wcode_on_checking_2 ires rs (l, r) = (\ ln rn. l = Oc#ires \ r = Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_goon_checking :: "bin_inv_t" where "wcode_goon_checking ires rs (l, r) = (\ ln rn. l = ires \ r = Oc # Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_right_move :: "bin_inv_t" where "wcode_right_move ires rs (l, r) = (\ ln rn. l = Oc # ires \ r = Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_erase2 :: "bin_inv_t" where "wcode_erase2 ires rs (l, r) = (\ ln rn. l = Bk # Oc # ires \ tl r = Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_on_right_moving_2 :: "bin_inv_t" where "wcode_on_right_moving_2 ires rs (l, r) = (\ ml mr rn. l = Bk\(ml) @ Oc # ires \ r = Bk\(mr) @ Oc\(Suc rs) @ Bk\(rn) \ ml + mr > Suc 0)" fun wcode_goon_right_moving_2 :: "bin_inv_t" where "wcode_goon_right_moving_2 ires rs (l, r) = (\ ml mr ln rn. l = Oc\(ml) @ Bk # Bk # Bk\(ln) @ Oc # ires \ r = Oc\(mr) @ Bk\(rn) \ ml + mr = Suc rs)" fun wcode_backto_standard_pos_2_B :: "bin_inv_t" where "wcode_backto_standard_pos_2_B ires rs (l, r) = (\ ln rn. l = Bk # Bk\(ln) @ Oc # ires \ r = Bk # Oc\(Suc (Suc rs)) @ Bk\(rn))" fun wcode_backto_standard_pos_2_O :: "bin_inv_t" where "wcode_backto_standard_pos_2_O ires rs (l, r) = (\ ml mr ln rn. l = Oc\(ml )@ Bk # Bk # Bk\(ln) @ Oc # ires \ r = Oc\(mr) @ Bk\(rn) \ ml + mr = (Suc (Suc rs)) \ mr > 0)" fun wcode_backto_standard_pos_2 :: "bin_inv_t" where "wcode_backto_standard_pos_2 ires rs (l, r) = (wcode_backto_standard_pos_2_O ires rs (l, r) \ wcode_backto_standard_pos_2_B ires rs (l, r))" fun wcode_before_fourtimes :: "bin_inv_t" where "wcode_before_fourtimes ires rs (l, r) = (\ ln rn. l = Bk # Bk # Bk\(ln) @ Oc # ires \ r = Oc\(Suc (Suc rs)) @ Bk\(rn))" declare wcode_on_left_moving_2_B.simps[simp del] wcode_on_left_moving_2.simps[simp del] wcode_on_left_moving_2_O.simps[simp del] wcode_on_checking_2.simps[simp del] wcode_goon_checking.simps[simp del] wcode_right_move.simps[simp del] wcode_erase2.simps[simp del] wcode_on_right_moving_2.simps[simp del] wcode_goon_right_moving_2.simps[simp del] wcode_backto_standard_pos_2_B.simps[simp del] wcode_backto_standard_pos_2_O.simps[simp del] wcode_backto_standard_pos_2.simps[simp del] lemmas wcode_fourtimes_invs = wcode_on_left_moving_2_B.simps wcode_on_left_moving_2.simps wcode_on_left_moving_2_O.simps wcode_on_checking_2.simps wcode_goon_checking.simps wcode_right_move.simps wcode_erase2.simps wcode_on_right_moving_2.simps wcode_goon_right_moving_2.simps wcode_backto_standard_pos_2_B.simps wcode_backto_standard_pos_2_O.simps wcode_backto_standard_pos_2.simps fun wcode_fourtimes_case_inv :: "nat \ bin_inv_t" where "wcode_fourtimes_case_inv st ires rs (l, r) = (if st = Suc 0 then wcode_on_left_moving_2 ires rs (l, r) else if st = Suc (Suc 0) then wcode_on_checking_2 ires rs (l, r) else if st = 7 then wcode_goon_checking ires rs (l, r) else if st = 8 then wcode_right_move ires rs (l, r) else if st = 9 then wcode_erase2 ires rs (l, r) else if st = 10 then wcode_on_right_moving_2 ires rs (l, r) else if st = 11 then wcode_goon_right_moving_2 ires rs (l, r) else if st = 12 then wcode_backto_standard_pos_2 ires rs (l, r) else if st = t_twice_len + 14 then wcode_before_fourtimes ires rs (l, r) else False)" declare wcode_fourtimes_case_inv.simps[simp del] fun wcode_fourtimes_case_state :: "config \ nat" where "wcode_fourtimes_case_state (st, l, r) = 13 - st" fun wcode_fourtimes_case_step :: "config \ nat" where "wcode_fourtimes_case_step (st, l, r) = (if st = Suc 0 then length l else if st = 9 then (if hd r = Oc then 1 else 0) else if st = 10 then length r else if st = 11 then length r else if st = 12 then length l else 0)" fun wcode_fourtimes_case_measure :: "config \ nat \ nat" where "wcode_fourtimes_case_measure (st, l, r) = (wcode_fourtimes_case_state (st, l, r), wcode_fourtimes_case_step (st, l, r))" definition wcode_fourtimes_case_le :: "(config \ config) set" where "wcode_fourtimes_case_le \ (inv_image lex_pair wcode_fourtimes_case_measure)" lemma wf_wcode_fourtimes_case_le[intro]: "wf wcode_fourtimes_case_le" by(auto simp: wcode_fourtimes_case_le_def) lemma nonempty_snd [simp]: "wcode_on_left_moving_2 ires rs (b, []) = False" "wcode_on_checking_2 ires rs (b, []) = False" "wcode_goon_checking ires rs (b, []) = False" "wcode_right_move ires rs (b, []) = False" "wcode_erase2 ires rs (b, []) = False" "wcode_on_right_moving_2 ires rs (b, []) = False" "wcode_backto_standard_pos_2 ires rs (b, []) = False" "wcode_on_checking_2 ires rs (b, Oc # list) = False" by(auto simp: wcode_fourtimes_invs) lemma wcode_on_left_moving_2[simp]: "wcode_on_left_moving_2 ires rs (b, Bk # list) \ wcode_on_left_moving_2 ires rs (tl b, hd b # Bk # list)" apply(simp only: wcode_fourtimes_invs) apply(erule_tac disjE) apply(erule_tac exE)+ apply(simp add: gr1_conv_Suc exp_ind replicate_app_Cons_same split:hd_repeat_cases) apply (auto simp add: gr0_conv_Suc[symmetric] replicate_app_Cons_same split:hd_repeat_cases) by force+ lemma wcode_goon_checking_via_2 [simp]: "wcode_on_checking_2 ires rs (b, Bk # list) \ wcode_goon_checking ires rs (tl b, hd b # Bk # list)" unfolding wcode_fourtimes_invs by auto lemma wcode_erase2_via_move [simp]: "wcode_right_move ires rs (b, Bk # list) \ wcode_erase2 ires rs (Bk # b, list)" by (auto simp:wcode_fourtimes_invs ) auto lemma wcode_on_right_moving_2_via_erase2[simp]: "wcode_erase2 ires rs (b, Bk # list) \ wcode_on_right_moving_2 ires rs (Bk # b, list)" apply(auto simp:wcode_fourtimes_invs ) apply(rule_tac x = "Suc (Suc 0)" in exI, simp add: exp_ind) by (metis replicate_Suc_iff_anywhere replicate_app_Cons_same) lemma wcode_on_right_moving_2_move_Bk[simp]: "wcode_on_right_moving_2 ires rs (b, Bk # list) \ wcode_on_right_moving_2 ires rs (Bk # b, list)" apply(auto simp: wcode_fourtimes_invs) apply(rename_tac ml mr rn) apply(rule_tac x = "Suc ml" in exI, simp) apply(rule_tac x = "mr - 1" in exI, case_tac mr,auto) done lemma wcode_backto_standard_pos_2_via_right[simp]: "wcode_goon_right_moving_2 ires rs (b, Bk # list) \ wcode_backto_standard_pos_2 ires rs (b, Oc # list)" apply(simp add: wcode_fourtimes_invs, auto) by (metis add.right_neutral add_Suc_shift append_Cons list.sel(3) replicate.simps(1) replicate_Suc replicate_Suc_iff_anywhere self_append_conv2 tl_replicate zero_less_Suc) lemma wcode_on_checking_2_via_left[simp]: "wcode_on_left_moving_2 ires rs (b, Oc # list) \ wcode_on_checking_2 ires rs (tl b, hd b # Oc # list)" by(auto simp: wcode_fourtimes_invs) lemma wcode_backto_standard_pos_2_empty_via_right[simp]: "wcode_goon_right_moving_2 ires rs (b, []) \ wcode_backto_standard_pos_2 ires rs (b, [Oc])" apply(simp only: wcode_fourtimes_invs) apply(erule_tac exE)+ by(rule_tac disjI1,auto) lemma wcode_goon_checking_cases[simp]: "wcode_goon_checking ires rs (b, Oc # list) \ (b = [] \ wcode_right_move ires rs ([Oc], list)) \ (b \ [] \ wcode_right_move ires rs (Oc # b, list))" apply(simp only: wcode_fourtimes_invs) apply(erule_tac exE)+ apply(auto) done lemma wcode_right_move_no_Oc[simp]: "wcode_right_move ires rs (b, Oc # list) = False" apply(auto simp: wcode_fourtimes_invs) done lemma wcode_erase2_Bk_via_Oc[simp]: "wcode_erase2 ires rs (b, Oc # list) \ wcode_erase2 ires rs (b, Bk # list)" apply(auto simp: wcode_fourtimes_invs) done lemma wcode_goon_right_moving_2_Oc_move[simp]: "wcode_on_right_moving_2 ires rs (b, Oc # list) \ wcode_goon_right_moving_2 ires rs (Oc # b, list)" apply(auto simp: wcode_fourtimes_invs) apply(rule_tac x = "Suc 0" in exI, auto) apply(rule_tac x = "ml - 2" in exI) apply(case_tac ml, simp, case_tac "ml - 1", simp_all) done lemma wcode_backto_standard_pos_2_exists[simp]: "wcode_backto_standard_pos_2 ires rs (b, Bk # list) \ (\ln. b = Bk # Bk\(ln) @ Oc # ires) \ (\rn. list = Oc\(Suc (Suc rs)) @ Bk\(rn))" by(simp add: wcode_fourtimes_invs) lemma wcode_goon_right_moving_2_move_Oc[simp]: "wcode_goon_right_moving_2 ires rs (b, Oc # list) \ wcode_goon_right_moving_2 ires rs (Oc # b, list)" apply(simp only:wcode_fourtimes_invs, auto) apply(rename_tac ml ln mr rn) apply(case_tac mr;force) done lemma wcode_backto_standard_pos_2_Oc_mv_hd[simp]: "wcode_backto_standard_pos_2 ires rs (b, Oc # list) \ wcode_backto_standard_pos_2 ires rs (tl b, hd b # Oc # list)" apply(simp only: wcode_fourtimes_invs) apply(erule_tac disjE) apply(erule_tac exE)+ apply(rename_tac ml mr ln rn) by (case_tac ml, force,force,force) lemma nonempty_fst[simp]: "wcode_on_left_moving_2 ires rs (b, Bk # list) \ b \ []" "wcode_on_checking_2 ires rs (b, Bk # list) \ b \ []" "wcode_goon_checking ires rs (b, Bk # list) = False" "wcode_right_move ires rs (b, Bk # list) \ b\ []" "wcode_erase2 ires rs (b, Bk # list) \ b \ []" "wcode_on_right_moving_2 ires rs (b, Bk # list) \ b \ []" "wcode_goon_right_moving_2 ires rs (b, Bk # list) \ b \ []" "wcode_backto_standard_pos_2 ires rs (b, Bk # list) \ b \ []" "wcode_on_left_moving_2 ires rs (b, Oc # list) \ b \ []" "wcode_goon_right_moving_2 ires rs (b, []) \ b \ []" "wcode_erase2 ires rs (b, Oc # list) \ b \ []" "wcode_on_right_moving_2 ires rs (b, Oc # list) \ b \ []" "wcode_goon_right_moving_2 ires rs (b, Oc # list) \ b \ []" "wcode_backto_standard_pos_2 ires rs (b, Oc # list) \ b \ []" by(auto simp: wcode_fourtimes_invs) lemma wcode_fourtimes_case_first_correctness: shows "let P = (\ (st, l, r). st = t_twice_len + 14) in let Q = (\ (st, l, r). wcode_fourtimes_case_inv st ires rs (l, r)) in let f = (\ stp. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp) in \ n .P (f n) \ Q (f (n::nat))" proof - let ?P = "(\ (st, l, r). st = t_twice_len + 14)" let ?Q = "(\ (st, l, r). wcode_fourtimes_case_inv st ires rs (l, r))" let ?f = "(\ stp. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp)" have "\ n . ?P (?f n) \ ?Q (?f (n::nat))" proof(rule_tac halt_lemma2) show "wf wcode_fourtimes_case_le" by auto next have "\ ?P (?f na) \ ?Q (?f na) \ ?Q (?f (Suc na)) \ (?f (Suc na), ?f na) \ wcode_fourtimes_case_le" for na apply(cases "?f na", rule_tac impI) apply(simp add: step_red step.simps) apply(case_tac "snd (snd (?f na))", simp, case_tac [2] "hd (snd (snd (?f na)))", simp_all) apply(simp_all add: wcode_fourtimes_case_inv.simps wcode_fourtimes_case_le_def lex_pair_def split: if_splits) by(auto simp: wcode_backto_standard_pos_2.simps wcode_backto_standard_pos_2_O.simps wcode_backto_standard_pos_2_B.simps gr0_conv_Suc) thus "\ na. \ ?P (?f na) \ ?Q (?f na) \ ?Q (?f (Suc na)) \ (?f (Suc na), ?f na) \ wcode_fourtimes_case_le" by auto next show "?Q (?f 0)" apply(simp add: steps.simps wcode_fourtimes_case_inv.simps) apply(simp add: wcode_on_left_moving_2.simps wcode_on_left_moving_2_B.simps wcode_on_left_moving_2_O.simps) apply(rule_tac x = "Suc m" in exI, simp ) apply(rule_tac x ="Suc 0" in exI, auto) done next show "\ ?P (?f 0)" apply(simp add: steps.simps) done qed thus "?thesis" apply(erule_tac exE, simp) done qed definition t_fourtimes_len :: "nat" where "t_fourtimes_len = (length t_fourtimes div 2)" lemma primerec_rec_fourtimes_1[intro]: "primerec rec_fourtimes (Suc 0)" apply(auto simp: rec_fourtimes_def numeral_4_eq_4 constn.simps) by auto lemma fourtimes_lemma: "rec_exec rec_fourtimes [rs] = 4 * rs" by(simp add: rec_exec.simps rec_fourtimes_def) lemma t_fourtimes_correct: "\stp ln rn. steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (tm_of abc_fourtimes @ shift (mopup 1) (length (tm_of abc_fourtimes) div 2)) stp = (0, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (4 * rs)) @ Bk\(rn))" proof(case_tac "rec_ci rec_fourtimes") fix a b c assume h: "rec_ci rec_fourtimes = (a, b, c)" have "\stp m l. steps0 (Suc 0, Bk # Bk # ires, <[rs]> @ Bk\(n)) (tm_of abc_fourtimes @ shift (mopup (length [rs])) (length (tm_of abc_fourtimes) div 2)) stp = (0, Bk\(m) @ Bk # Bk # ires, Oc\(Suc (rec_exec rec_fourtimes [rs])) @ Bk\(l))" thm recursive_compile_to_tm_correct1 proof(rule_tac recursive_compile_to_tm_correct1) show "rec_ci rec_fourtimes = (a, b, c)" by (simp add: h) next show "terminate rec_fourtimes [rs]" apply(rule_tac primerec_terminate) by auto next show "tm_of abc_fourtimes = tm_of (a [+] dummy_abc (length [rs]))" using h by(simp add: abc_fourtimes_def) qed thus "?thesis" apply(simp add: tape_of_list_def tape_of_nat_def fourtimes_lemma) done qed lemma wf_fourtimes[intro]: "tm_wf (t_fourtimes_compile, 0)" apply(simp only: t_fourtimes_compile_def) apply(rule_tac wf_tm_from_abacus, simp) done lemma t_fourtimes_change_term_state: "\ stp ln rn. steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) t_fourtimes stp = (Suc t_fourtimes_len, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (4 * rs)) @ Bk\(rn))" proof - have "\stp ln rn. steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (tm_of abc_fourtimes @ shift (mopup 1) ((length (tm_of abc_fourtimes) div 2))) stp = (0, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (4 * rs)) @ Bk\(rn))" by(rule_tac t_fourtimes_correct) then obtain stp ln rn where "steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (tm_of abc_fourtimes @ shift (mopup 1) ((length (tm_of abc_fourtimes) div 2))) stp = (0, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (4 * rs)) @ Bk\(rn))" by blast hence "\ stp. steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (adjust0 t_fourtimes_compile) stp = (Suc (length t_fourtimes_compile div 2), Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (4 * rs)) @ Bk\(rn))" apply(rule_tac stp = stp in adjust_halt_eq) apply(simp add: t_fourtimes_compile_def, auto) done then obtain stpb where "steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) (adjust0 t_fourtimes_compile) stpb = (Suc (length t_fourtimes_compile div 2), Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (4 * rs)) @ Bk\(rn))" .. thus "?thesis" apply(simp add: t_fourtimes_def t_fourtimes_len_def) by metis qed lemma length_t_twice_even[intro]: "is_even (length t_twice)" by(auto simp: t_twice_def t_twice_compile_def intro!:mopup_mod2) lemma t_fourtimes_append_pre: "steps0 (Suc 0, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) t_fourtimes stp = (Suc t_fourtimes_len, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (4 * rs)) @ Bk\(rn)) \ steps0 (Suc 0 + length (t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ [(L, 1), (L, 1)]) div 2, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) ((t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ [(L, 1), (L, 1)]) @ shift t_fourtimes (length (t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ [(L, 1), (L, 1)]) div 2) @ ([(L, 1), (L, 1)])) stp = ((Suc t_fourtimes_len) + length (t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ [(L, 1), (L, 1)]) div 2, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (4 * rs)) @ Bk\(rn))" using length_t_twice_even by(intro tm_append_shift_append_steps, auto) lemma split_26_even[simp]: "(26 + l::nat) div 2 = l div 2 + 13" by(simp) lemma t_twice_len_plust_14[simp]: "t_twice_len + 14 = 14 + length (shift t_twice 12) div 2" apply(simp add: t_twice_def t_twice_len_def) done lemma t_fourtimes_append: "\ stp ln rn. steps0 (Suc 0 + length (t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ [(L, 1), (L, 1)]) div 2, Bk # Bk # ires, Oc\(Suc rs) @ Bk\(n)) ((t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ [(L, 1), (L, 1)]) @ shift t_fourtimes (t_twice_len + 13) @ [(L, 1), (L, 1)]) stp = (Suc t_fourtimes_len + length (t_wcode_main_first_part @ shift t_twice (length t_wcode_main_first_part div 2) @ [(L, 1), (L, 1)]) div 2, Bk\(ln) @ Bk # Bk # ires, Oc\(Suc (4 * rs)) @ Bk\(rn))" using t_fourtimes_change_term_state[of ires rs n] apply(erule_tac exE) apply(erule_tac exE) apply(erule_tac exE) apply(drule_tac t_fourtimes_append_pre) apply(rule_tac x = stp in exI, rule_tac x = ln in exI, rule_tac x = rn in exI) apply(simp add: t_twice_len_def) done lemma even_fourtimes_len: "length t_fourtimes mod 2 = 0" apply(auto simp: t_fourtimes_def t_fourtimes_compile_def) by (metis mopup_mod2) lemma t_twice_even[simp]: "2 * (length t_twice div 2) = length t_twice" using length_t_twice_even by arith lemma t_fourtimes_even[simp]: "2 * (length t_fourtimes div 2) = length t_fourtimes" using even_fourtimes_len by arith lemma fetch_t_wcode_14_Oc: "fetch t_wcode_main (14 + length t_twice div 2 + t_fourtimes_len) Oc = (L, Suc 0)" apply(subgoal_tac "14 = Suc 13") apply(simp only: fetch.simps add_Suc nth_of.simps t_wcode_main_def) apply(simp add:length_t_twice_even t_fourtimes_len_def nth_append) by arith lemma fetch_t_wcode_14_Bk: "fetch t_wcode_main (14 + length t_twice div 2 + t_fourtimes_len) Bk = (L, Suc 0)" apply(subgoal_tac "14 = Suc 13") apply(simp only: fetch.simps add_Suc nth_of.simps t_wcode_main_def) apply(simp add:length_t_twice_even t_fourtimes_len_def nth_append) by arith lemma fetch_t_wcode_14 [simp]: "fetch t_wcode_main (14 + length t_twice div 2 + t_fourtimes_len) b = (L, Suc 0)" apply(case_tac b, simp_all add:fetch_t_wcode_14_Bk fetch_t_wcode_14_Oc) done lemma wcode_jump2: "\ stp ln rn. steps0 (t_twice_len + 14 + t_fourtimes_len , Bk # Bk # Bk\(lnb) @ Oc # ires, Oc\(Suc (4 * rs + 4)) @ Bk\(rnb)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ Oc # ires, Bk # Oc\(Suc (4 * rs + 4)) @ Bk\(rn))" apply(rule_tac x = "Suc 0" in exI) apply(simp add: steps.simps) apply(rule_tac x = lnb in exI, rule_tac x = rnb in exI) apply(simp add: step.simps) done lemma wcode_fourtimes_case: shows "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ Oc # ires, Bk # Oc\(Suc (4*rs + 4)) @ Bk\(rn))" proof - have "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (t_twice_len + 14, Bk # Bk # Bk\(ln) @ Oc # ires, Oc\(Suc (rs + 1)) @ Bk\(rn))" using wcode_fourtimes_case_first_correctness[of ires rs m n] by (auto simp add: wcode_fourtimes_case_inv.simps) auto from this obtain stpa lna rna where stp1: "steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # Oc # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stpa = (t_twice_len + 14, Bk # Bk # Bk\(lna) @ Oc # ires, Oc\(Suc (rs + 1)) @ Bk\(rna))" by blast have "\stp ln rn. steps0 (t_twice_len + 14, Bk # Bk # Bk\(lna) @ Oc # ires, Oc\(Suc (rs + 1)) @ Bk\(rna)) t_wcode_main stp = (t_twice_len + 14 + t_fourtimes_len, Bk # Bk # Bk\(ln) @ Oc # ires, Oc\(Suc (4*rs + 4)) @ Bk\(rn))" using t_fourtimes_append[of " Bk\(lna) @ Oc # ires" "rs + 1" rna] apply(erule_tac exE) apply(erule_tac exE) apply(erule_tac exE) apply(simp add: t_wcode_main_def) apply(rename_tac stp ln rn) apply(rule_tac x = stp in exI, rule_tac x = "ln + lna" in exI, rule_tac x = rn in exI, simp) apply(simp add: replicate_Suc[THEN sym] replicate_add[THEN sym] del: replicate_Suc) done from this obtain stpb lnb rnb where stp2: "steps0 (t_twice_len + 14, Bk # Bk # Bk\(lna) @ Oc # ires, Oc\(Suc (rs + 1)) @ Bk\(rna)) t_wcode_main stpb = (t_twice_len + 14 + t_fourtimes_len, Bk # Bk # Bk\(lnb) @ Oc # ires, Oc\(Suc (4*rs + 4)) @ Bk\(rnb))" by blast have "\stp ln rn. steps0 (t_twice_len + 14 + t_fourtimes_len, Bk # Bk # Bk\(lnb) @ Oc # ires, Oc\(Suc (4*rs + 4)) @ Bk\(rnb)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ Oc # ires, Bk # Oc\(Suc (4*rs + 4)) @ Bk\(rn))" apply(rule wcode_jump2) done from this obtain stpc lnc rnc where stp3: "steps0 (t_twice_len + 14 + t_fourtimes_len, Bk # Bk # Bk\(lnb) @ Oc # ires, Oc\(Suc (4*rs + 4)) @ Bk\(rnb)) t_wcode_main stpc = (Suc 0, Bk # Bk\(lnc) @ Oc # ires, Bk # Oc\(Suc (4*rs + 4)) @ Bk\(rnc))" by blast from stp1 stp2 stp3 show "?thesis" apply(rule_tac x = "stpa + stpb + stpc" in exI, rule_tac x = lnc in exI, rule_tac x = rnc in exI) apply(simp) done qed fun wcode_on_left_moving_3_B :: "bin_inv_t" where "wcode_on_left_moving_3_B ires rs (l, r) = (\ ml mr rn. l = Bk\(ml) @ Oc # Bk # Bk # ires \ r = Bk\(mr) @ Oc\(Suc rs) @ Bk\(rn) \ ml + mr > Suc 0 \ mr > 0 )" fun wcode_on_left_moving_3_O :: "bin_inv_t" where "wcode_on_left_moving_3_O ires rs (l, r) = (\ ln rn. l = Bk # Bk # ires \ r = Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_on_left_moving_3 :: "bin_inv_t" where "wcode_on_left_moving_3 ires rs (l, r) = (wcode_on_left_moving_3_B ires rs (l, r) \ wcode_on_left_moving_3_O ires rs (l, r))" fun wcode_on_checking_3 :: "bin_inv_t" where "wcode_on_checking_3 ires rs (l, r) = (\ ln rn. l = Bk # ires \ r = Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_goon_checking_3 :: "bin_inv_t" where "wcode_goon_checking_3 ires rs (l, r) = (\ ln rn. l = ires \ r = Bk # Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_stop :: "bin_inv_t" where "wcode_stop ires rs (l, r) = (\ ln rn. l = Bk # ires \ r = Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" fun wcode_halt_case_inv :: "nat \ bin_inv_t" where "wcode_halt_case_inv st ires rs (l, r) = (if st = 0 then wcode_stop ires rs (l, r) else if st = Suc 0 then wcode_on_left_moving_3 ires rs (l, r) else if st = Suc (Suc 0) then wcode_on_checking_3 ires rs (l, r) else if st = 7 then wcode_goon_checking_3 ires rs (l, r) else False)" fun wcode_halt_case_state :: "config \ nat" where "wcode_halt_case_state (st, l, r) = (if st = 1 then 5 else if st = Suc (Suc 0) then 4 else if st = 7 then 3 else 0)" fun wcode_halt_case_step :: "config \ nat" where "wcode_halt_case_step (st, l, r) = (if st = 1 then length l else 0)" fun wcode_halt_case_measure :: "config \ nat \ nat" where "wcode_halt_case_measure (st, l, r) = (wcode_halt_case_state (st, l, r), wcode_halt_case_step (st, l, r))" definition wcode_halt_case_le :: "(config \ config) set" where "wcode_halt_case_le \ (inv_image lex_pair wcode_halt_case_measure)" lemma wf_wcode_halt_case_le[intro]: "wf wcode_halt_case_le" by(auto simp: wcode_halt_case_le_def) declare wcode_on_left_moving_3_B.simps[simp del] wcode_on_left_moving_3_O.simps[simp del] wcode_on_checking_3.simps[simp del] wcode_goon_checking_3.simps[simp del] wcode_on_left_moving_3.simps[simp del] wcode_stop.simps[simp del] lemmas wcode_halt_invs = wcode_on_left_moving_3_B.simps wcode_on_left_moving_3_O.simps wcode_on_checking_3.simps wcode_goon_checking_3.simps wcode_on_left_moving_3.simps wcode_stop.simps lemma wcode_on_left_moving_3_mv_Bk[simp]: "wcode_on_left_moving_3 ires rs (b, Bk # list) \ wcode_on_left_moving_3 ires rs (tl b, hd b # Bk # list)" apply(simp only: wcode_halt_invs) apply(erule_tac disjE) apply(erule_tac exE)+ apply(rename_tac ml mr rn) apply(case_tac ml, simp) apply(rule_tac x = "mr - 2" in exI, rule_tac x = rn in exI) apply(case_tac mr, force, simp add: exp_ind del: replicate_Suc) apply(case_tac "mr - 1", force, simp add: exp_ind del: replicate_Suc) apply force apply force done lemma wcode_goon_checking_3_cases[simp]: "wcode_goon_checking_3 ires rs (b, Bk # list) \ (b = [] \ wcode_stop ires rs ([Bk], list)) \ (b \ [] \ wcode_stop ires rs (Bk # b, list))" apply(auto simp: wcode_halt_invs) done lemma wcode_on_checking_3_mv_Oc[simp]: "wcode_on_left_moving_3 ires rs (b, Oc # list) \ wcode_on_checking_3 ires rs (tl b, hd b # Oc # list)" by(simp add:wcode_halt_invs) lemma wcode_3_nonempty[simp]: "wcode_on_left_moving_3 ires rs (b, []) = False" "wcode_on_checking_3 ires rs (b, []) = False" "wcode_goon_checking_3 ires rs (b, []) = False" "wcode_on_left_moving_3 ires rs (b, Oc # list) \ b \ []" "wcode_on_checking_3 ires rs (b, Oc # list) = False" "wcode_on_left_moving_3 ires rs (b, Bk # list) \ b \ []" "wcode_on_checking_3 ires rs (b, Bk # list) \ b \ []" "wcode_goon_checking_3 ires rs (b, Oc # list) = False" by(auto simp: wcode_halt_invs) lemma wcode_goon_checking_3_mv_Bk[simp]: "wcode_on_checking_3 ires rs (b, Bk # list) \ wcode_goon_checking_3 ires rs (tl b, hd b # Bk # list)" apply(auto simp: wcode_halt_invs) done lemma t_halt_case_correctness: shows "let P = (\ (st, l, r). st = 0) in let Q = (\ (st, l, r). wcode_halt_case_inv st ires rs (l, r)) in let f = (\ stp. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp) in \ n .P (f n) \ Q (f (n::nat))" proof - let ?P = "(\ (st, l, r). st = 0)" let ?Q = "(\ (st, l, r). wcode_halt_case_inv st ires rs (l, r))" let ?f = "(\ stp. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp)" have "\ n. ?P (?f n) \ ?Q (?f (n::nat))" proof(rule_tac halt_lemma2) show "wf wcode_halt_case_le" by auto next { fix na obtain a b c where abc:"?f na = (a,b,c)" by(cases "?f na",auto) hence "\ ?P (?f na) \ ?Q (?f na) \ ?Q (?f (Suc na)) \ (?f (Suc na), ?f na) \ wcode_halt_case_le" apply(simp add: step.simps) apply(cases c;cases "hd c") apply(auto simp: wcode_halt_case_le_def lex_pair_def split: if_splits) done } thus "\ na. \ ?P (?f na) \ ?Q (?f na) \ ?Q (?f (Suc na)) \ (?f (Suc na), ?f na) \ wcode_halt_case_le" by blast next show "?Q (?f 0)" apply(simp add: steps.simps wcode_halt_invs) apply(rule_tac x = "Suc m" in exI, simp) apply(rule_tac x = "Suc 0" in exI, auto) done next show "\ ?P (?f 0)" apply(simp add: steps.simps) done qed thus "?thesis" apply(auto) done qed declare wcode_halt_case_inv.simps[simp del] lemma leading_Oc[intro]: "\ xs. ( :: cell list) = Oc # xs" apply(case_tac "rev list", simp) apply(simp add: tape_of_nl_cons) done lemma wcode_halt_case: "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(Suc rs) @ Bk\(rn))" proof - let ?P = "\(st, l, r). st = 0" let ?Q = "\(st, l, r). wcode_halt_case_inv st ires rs (l, r)" let ?f = "steps0 (Suc 0, Bk # Bk \ m @ Oc # Bk # Bk # ires, Bk # Oc \ Suc rs @ Bk \ n) t_wcode_main" from t_halt_case_correctness[of ires rs m n] obtain n where "?P (?f n) \ ?Q (?f n)" by metis thus ?thesis apply(simp add: wcode_halt_case_inv.simps wcode_stop.simps) apply(case_tac "steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main n") apply(auto simp: wcode_halt_case_inv.simps wcode_stop.simps) by auto qed lemma bl_bin_one[simp]: "bl_bin [Oc] = 1" apply(simp add: bl_bin.simps) done lemma twice_power[intro]: "2 * 2 ^ a = Suc (Suc (2 * bl_bin (Oc \ a)))" apply(induct a, auto simp: bl_bin.simps) done declare replicate_Suc[simp del] lemma t_wcode_main_lemma_pre: "\args \ []; lm = \ \ \ stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ rev lm @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin lm + rs * 2^(length lm - 1) ) @ Bk\(rn))" proof(induct "length args" arbitrary: args lm rs m n, simp) fix x args lm rs m n assume ind: "\args lm rs m n. \x = length args; (args::nat list) \ []; lm = \ \ \stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ rev lm @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin lm + rs * 2 ^ (length lm - 1)) @ Bk\(rn))" and h: "Suc x = length args" "(args::nat list) \ []" "lm = " from h have "\ (a::nat) xs. args = xs @ [a]" apply(rule_tac x = "last args" in exI) apply(rule_tac x = "butlast args" in exI, auto) done from this obtain a xs where "args = xs @ [a]" by blast from h and this show "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ rev lm @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin lm + rs * 2 ^ (length lm - 1)) @ Bk\(rn))" proof(case_tac "xs::nat list", simp) show "\stp ln rn. steps0 (Suc 0, Bk # Bk \ m @ Oc \ Suc a @ Bk # Bk # ires, Bk # Oc \ Suc rs @ Bk \ n) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ (bl_bin (Oc \ Suc a) + rs * 2 ^ a) @ Bk \ rn)" proof(induct "a" arbitrary: m n rs ires, simp) fix m n rs ires show "\stp ln rn. steps0 (Suc 0, Bk # Bk \ m @ Oc # Bk # Bk # ires, Bk # Oc \ Suc rs @ Bk \ n) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ Suc rs @ Bk \ rn)" apply(rule_tac wcode_halt_case) done next fix a m n rs ires assume ind2: "\m n rs ires. \stp ln rn. steps0 (Suc 0, Bk # Bk \ m @ Oc \ Suc a @ Bk # Bk # ires, Bk # Oc \ Suc rs @ Bk \ n) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ (bl_bin (Oc \ Suc a) + rs * 2 ^ a) @ Bk \ rn)" show " \stp ln rn. steps0 (Suc 0, Bk # Bk \ m @ Oc \ Suc (Suc a) @ Bk # Bk # ires, Bk # Oc \ Suc rs @ Bk \ n) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ (bl_bin (Oc \ Suc (Suc a)) + rs * 2 ^ Suc a) @ Bk \ rn)" proof - have "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ rev () @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ rev () @ Bk # Bk # ires, Bk # Oc\(Suc (2 * rs + 2)) @ Bk\(rn))" apply(simp add: tape_of_nat) using wcode_double_case[of m "Oc\(a) @ Bk # Bk # ires" rs n] apply(simp add: replicate_Suc) done from this obtain stpa lna rna where stp1: "steps0 (Suc 0, Bk # Bk\(m) @ rev () @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stpa = (Suc 0, Bk # Bk\(lna) @ rev () @ Bk # Bk # ires, Bk # Oc\(Suc (2 * rs + 2)) @ Bk\(rna))" by blast moreover have "\stp ln rn. steps0 (Suc 0, Bk # Bk\(lna) @ rev () @ Bk # Bk # ires, Bk # Oc\(Suc (2 * rs + 2)) @ Bk\(rna)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin () + (2*rs + 2) * 2 ^ a) @ Bk\(rn))" using ind2[of lna ires "2*rs + 2" rna] by(simp add: tape_of_list_def tape_of_nat_def) from this obtain stpb lnb rnb where stp2: "steps0 (Suc 0, Bk # Bk\(lna) @ rev () @ Bk # Bk # ires, Bk # Oc\(Suc (2 * rs + 2)) @ Bk\(rna)) t_wcode_main stpb = (0, Bk # ires, Bk # Oc # Bk\(lnb) @ Bk # Bk # Oc\(bl_bin () + (2*rs + 2) * 2 ^ a) @ Bk\(rnb))" by blast from stp1 and stp2 show "?thesis" apply(rule_tac x = "stpa + stpb" in exI, rule_tac x = lnb in exI, rule_tac x = rnb in exI, simp add: tape_of_nat_def) apply(simp add: bl_bin.simps replicate_Suc) apply(auto) done qed qed next fix aa list assume g: "Suc x = length args" "args \ []" "lm = " "args = xs @ [a::nat]" "xs = (aa::nat) # list" thus "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ rev lm @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin lm + rs * 2 ^ (length lm - 1)) @ Bk\(rn))" - proof(induct a arbitrary: m n rs args lm, simp_all add: tape_of_nl_rev, - simp only: tape_of_nl_cons_app1, simp) + proof(induct a arbitrary: m n rs args lm, simp_all add: tape_of_nl_rev del: subst_all subst_all', + simp only: tape_of_nl_cons_app1, simp del: subst_all subst_all') fix m n rs args lm have "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # rev (<(aa::nat) # list>) @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ rev () @ Bk # Bk # ires, Bk # Oc\(Suc (4*rs + 4)) @ Bk\(rn))" proof(simp add: tape_of_nl_rev) have "\ xs. () = Oc # xs" by auto from this obtain xs where "() = Oc # xs" .. thus "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ @ Bk # Bk # ires, Bk # Oc\(5 + 4 * rs) @ Bk\(rn))" apply(simp) using wcode_fourtimes_case[of m "xs @ Bk # Bk # ires" rs n] apply(simp) done qed from this obtain stpa lna rna where stp1: "steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # rev () @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stpa = (Suc 0, Bk # Bk\(lna) @ rev () @ Bk # Bk # ires, Bk # Oc\(Suc (4*rs + 4)) @ Bk\(rna))" by blast from g have "\ stp ln rn. steps0 (Suc 0, Bk # Bk\(lna) @ rev (<(aa::nat) # list>) @ Bk # Bk # ires, Bk # Oc\(Suc (4*rs + 4)) @ Bk\(rna)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin ()+ (4*rs + 4) * 2^(length () - 1) ) @ Bk\(rn))" apply(rule_tac args = "(aa::nat)#list" in ind, simp_all) done from this obtain stpb lnb rnb where stp2: "steps0 (Suc 0, Bk # Bk\(lna) @ rev (<(aa::nat) # list>) @ Bk # Bk # ires, Bk # Oc\(Suc (4*rs + 4)) @ Bk\(rna)) t_wcode_main stpb = (0, Bk # ires, Bk # Oc # Bk\(lnb) @ Bk # Bk # Oc\(bl_bin ()+ (4*rs + 4) * 2^(length () - 1) ) @ Bk\(rnb))" by blast from stp1 and stp2 and h show "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc # Bk # @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin (Oc\(Suc aa) @ Bk # ) + rs * (2 * 2 ^ (aa + length ()))) @ Bk\(rn))" apply(rule_tac x = "stpa + stpb" in exI, rule_tac x = lnb in exI, rule_tac x = rnb in exI, simp add: steps_add tape_of_nl_rev) done next fix ab m n rs args lm assume ind2: "\ m n rs args lm. \lm = ; args = aa # list @ [ab]\ \ \stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin () + rs * 2 ^ (length () - Suc 0)) @ Bk\(rn))" and k: "args = aa # list @ [Suc ab]" "lm = " show "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (0, Bk # ires,Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin () + rs * 2 ^ (length () - Suc 0)) @ Bk\(rn))" proof(simp add: tape_of_nl_cons_app1) have "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc\(Suc (Suc ab)) @ Bk # @ Bk # Bk # ires, Bk # Oc # Oc\(rs) @ Bk\(n)) t_wcode_main stp = (Suc 0, Bk # Bk\(ln) @ Oc\(Suc ab) @ Bk # @ Bk # Bk # ires, Bk # Oc\(Suc (2*rs + 2)) @ Bk\(rn))" using wcode_double_case[of m "Oc\(ab) @ Bk # @ Bk # Bk # ires" rs n] apply(simp add: replicate_Suc) done from this obtain stpa lna rna where stp1: "steps0 (Suc 0, Bk # Bk\(m) @ Oc\(Suc (Suc ab)) @ Bk # @ Bk # Bk # ires, Bk # Oc # Oc\(rs) @ Bk\(n)) t_wcode_main stpa = (Suc 0, Bk # Bk\(lna) @ Oc\(Suc ab) @ Bk # @ Bk # Bk # ires, Bk # Oc\(Suc (2*rs + 2)) @ Bk\(rna))" by blast from k have "\ stp ln rn. steps0 (Suc 0, Bk # Bk\(lna) @ @ Bk # Bk # ires, Bk # Oc\(Suc (2*rs + 2)) @ Bk\(rna)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin ( ) + (2*rs + 2)* 2^(length () - Suc 0)) @ Bk\(rn))" apply(rule_tac ind2, simp_all) done from this obtain stpb lnb rnb where stp2: "steps0 (Suc 0, Bk # Bk\(lna) @ @ Bk # Bk # ires, Bk # Oc\(Suc (2*rs + 2)) @ Bk\(rna)) t_wcode_main stpb = (0, Bk # ires, Bk # Oc # Bk\(lnb) @ Bk # Bk # Oc\(bl_bin ( ) + (2*rs + 2)* 2^(length () - Suc 0)) @ Bk\(rnb))" by blast from stp1 and stp2 show "\stp ln rn. steps0 (Suc 0, Bk # Bk\(m) @ Oc\(Suc (Suc ab)) @ Bk # @ Bk # Bk # ires, Bk # Oc\(Suc rs) @ Bk\(n)) t_wcode_main stp = (0, Bk # ires, Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin (Oc\(Suc aa) @ Bk # ) + rs * (2 * 2 ^ (aa + length ()))) @ Bk\(rn))" apply(rule_tac x = "stpa + stpb" in exI, rule_tac x = lnb in exI, rule_tac x = rnb in exI, simp add: steps_add tape_of_nl_cons_app1 replicate_Suc) done qed qed qed qed definition t_wcode_prepare :: "instr list" where "t_wcode_prepare \ [(W1, 2), (L, 1), (L, 3), (R, 2), (R, 4), (W0, 3), (R, 4), (R, 5), (R, 6), (R, 5), (R, 7), (R, 5), (W1, 7), (L, 0)]" fun wprepare_add_one :: "nat \ nat list \ tape \ bool" where "wprepare_add_one m lm (l, r) = (\ rn. l = [] \ (r = @ Bk\(rn) \ r = Bk # @ Bk\(rn)))" fun wprepare_goto_first_end :: "nat \ nat list \ tape \ bool" where "wprepare_goto_first_end m lm (l, r) = (\ ml mr rn. l = Oc\(ml) \ r = Oc\(mr) @ Bk # @ Bk\(rn) \ ml + mr = Suc (Suc m))" fun wprepare_erase :: "nat \ nat list \ tape \ bool" where "wprepare_erase m lm (l, r) = (\ rn. l = Oc\(Suc m) \ tl r = Bk # @ Bk\(rn))" fun wprepare_goto_start_pos_B :: "nat \ nat list \ tape \ bool" where "wprepare_goto_start_pos_B m lm (l, r) = (\ rn. l = Bk # Oc\(Suc m) \ r = Bk # @ Bk\(rn))" fun wprepare_goto_start_pos_O :: "nat \ nat list \ tape \ bool" where "wprepare_goto_start_pos_O m lm (l, r) = (\ rn. l = Bk # Bk # Oc\(Suc m) \ r = @ Bk\(rn))" fun wprepare_goto_start_pos :: "nat \ nat list \ tape \ bool" where "wprepare_goto_start_pos m lm (l, r) = (wprepare_goto_start_pos_B m lm (l, r) \ wprepare_goto_start_pos_O m lm (l, r))" fun wprepare_loop_start_on_rightmost :: "nat \ nat list \ tape \ bool" where "wprepare_loop_start_on_rightmost m lm (l, r) = (\ rn mr. rev l @ r = Oc\(Suc m) @ Bk # Bk # @ Bk\(rn) \ l \ [] \ r = Oc\(mr) @ Bk\(rn))" fun wprepare_loop_start_in_middle :: "nat \ nat list \ tape \ bool" where "wprepare_loop_start_in_middle m lm (l, r) = (\ rn (mr:: nat) (lm1::nat list). rev l @ r = Oc\(Suc m) @ Bk # Bk # @ Bk\(rn) \ l \ [] \ r = Oc\(mr) @ Bk # @ Bk\(rn) \ lm1 \ [])" fun wprepare_loop_start :: "nat \ nat list \ tape \ bool" where "wprepare_loop_start m lm (l, r) = (wprepare_loop_start_on_rightmost m lm (l, r) \ wprepare_loop_start_in_middle m lm (l, r))" fun wprepare_loop_goon_on_rightmost :: "nat \ nat list \ tape \ bool" where "wprepare_loop_goon_on_rightmost m lm (l, r) = (\ rn. l = Bk # @ Bk # Bk # Oc\(Suc m) \ r = Bk\(rn))" fun wprepare_loop_goon_in_middle :: "nat \ nat list \ tape \ bool" where "wprepare_loop_goon_in_middle m lm (l, r) = (\ rn (mr:: nat) (lm1::nat list). rev l @ r = Oc\(Suc m) @ Bk # Bk # @ Bk\(rn) \ l \ [] \ (if lm1 = [] then r = Oc\(mr) @ Bk\(rn) else r = Oc\(mr) @ Bk # @ Bk\(rn)) \ mr > 0)" fun wprepare_loop_goon :: "nat \ nat list \ tape \ bool" where "wprepare_loop_goon m lm (l, r) = (wprepare_loop_goon_in_middle m lm (l, r) \ wprepare_loop_goon_on_rightmost m lm (l, r))" fun wprepare_add_one2 :: "nat \ nat list \ tape \ bool" where "wprepare_add_one2 m lm (l, r) = (\ rn. l = Bk # Bk # @ Bk # Bk # Oc\(Suc m) \ (r = [] \ tl r = Bk\(rn)))" fun wprepare_stop :: "nat \ nat list \ tape \ bool" where "wprepare_stop m lm (l, r) = (\ rn. l = Bk # @ Bk # Bk # Oc\(Suc m) \ r = Bk # Oc # Bk\(rn))" fun wprepare_inv :: "nat \ nat \ nat list \ tape \ bool" where "wprepare_inv st m lm (l, r) = (if st = 0 then wprepare_stop m lm (l, r) else if st = Suc 0 then wprepare_add_one m lm (l, r) else if st = Suc (Suc 0) then wprepare_goto_first_end m lm (l, r) else if st = Suc (Suc (Suc 0)) then wprepare_erase m lm (l, r) else if st = 4 then wprepare_goto_start_pos m lm (l, r) else if st = 5 then wprepare_loop_start m lm (l, r) else if st = 6 then wprepare_loop_goon m lm (l, r) else if st = 7 then wprepare_add_one2 m lm (l, r) else False)" fun wprepare_stage :: "config \ nat" where "wprepare_stage (st, l, r) = (if st \ 1 \ st \ 4 then 3 else if st = 5 \ st = 6 then 2 else 1)" fun wprepare_state :: "config \ nat" where "wprepare_state (st, l, r) = (if st = 1 then 4 else if st = Suc (Suc 0) then 3 else if st = Suc (Suc (Suc 0)) then 2 else if st = 4 then 1 else if st = 7 then 2 else 0)" fun wprepare_step :: "config \ nat" where "wprepare_step (st, l, r) = (if st = 1 then (if hd r = Oc then Suc (length l) else 0) else if st = Suc (Suc 0) then length r else if st = Suc (Suc (Suc 0)) then (if hd r = Oc then 1 else 0) else if st = 4 then length r else if st = 5 then Suc (length r) else if st = 6 then (if r = [] then 0 else Suc (length r)) else if st = 7 then (if (r \ [] \ hd r = Oc) then 0 else 1) else 0)" fun wcode_prepare_measure :: "config \ nat \ nat \ nat" where "wcode_prepare_measure (st, l, r) = (wprepare_stage (st, l, r), wprepare_state (st, l, r), wprepare_step (st, l, r))" definition wcode_prepare_le :: "(config \ config) set" where "wcode_prepare_le \ (inv_image lex_triple wcode_prepare_measure)" lemma wf_wcode_prepare_le[intro]: "wf wcode_prepare_le" by(auto intro:wf_inv_image simp: wcode_prepare_le_def lex_triple_def) declare wprepare_add_one.simps[simp del] wprepare_goto_first_end.simps[simp del] wprepare_erase.simps[simp del] wprepare_goto_start_pos.simps[simp del] wprepare_loop_start.simps[simp del] wprepare_loop_goon.simps[simp del] wprepare_add_one2.simps[simp del] lemmas wprepare_invs = wprepare_add_one.simps wprepare_goto_first_end.simps wprepare_erase.simps wprepare_goto_start_pos.simps wprepare_loop_start.simps wprepare_loop_goon.simps wprepare_add_one2.simps declare wprepare_inv.simps[simp del] lemma fetch_t_wcode_prepare[simp]: "fetch t_wcode_prepare (Suc 0) Bk = (W1, 2)" "fetch t_wcode_prepare (Suc 0) Oc = (L, 1)" "fetch t_wcode_prepare (Suc (Suc 0)) Bk = (L, 3)" "fetch t_wcode_prepare (Suc (Suc 0)) Oc = (R, 2)" "fetch t_wcode_prepare (Suc (Suc (Suc 0))) Bk = (R, 4)" "fetch t_wcode_prepare (Suc (Suc (Suc 0))) Oc = (W0, 3)" "fetch t_wcode_prepare 4 Bk = (R, 4)" "fetch t_wcode_prepare 4 Oc = (R, 5)" "fetch t_wcode_prepare 5 Oc = (R, 5)" "fetch t_wcode_prepare 5 Bk = (R, 6)" "fetch t_wcode_prepare 6 Oc = (R, 5)" "fetch t_wcode_prepare 6 Bk = (R, 7)" "fetch t_wcode_prepare 7 Oc = (L, 0)" "fetch t_wcode_prepare 7 Bk = (W1, 7)" unfolding fetch.simps t_wcode_prepare_def nth_of.simps numeral by auto lemma wprepare_add_one_nonempty_snd[simp]: "lm \ [] \ wprepare_add_one m lm (b, []) = False" apply(simp add: wprepare_invs) done lemma wprepare_goto_first_end_nonempty_snd[simp]: "lm \ [] \ wprepare_goto_first_end m lm (b, []) = False" apply(simp add: wprepare_invs) done lemma wprepare_erase_nonempty_snd[simp]: "lm \ [] \ wprepare_erase m lm (b, []) = False" apply(simp add: wprepare_invs) done lemma wprepare_goto_start_pos_nonempty_snd[simp]: "lm \ [] \ wprepare_goto_start_pos m lm (b, []) = False" apply(simp add: wprepare_invs) done lemma wprepare_loop_start_empty_nonempty_fst[simp]: "\lm \ []; wprepare_loop_start m lm (b, [])\ \ b \ []" apply(simp add: wprepare_invs) done lemma rev_eq: "rev xs = rev ys \ xs = ys" by auto lemma wprepare_loop_goon_Bk_empty_snd[simp]: "\lm \ []; wprepare_loop_start m lm (b, [])\ \ wprepare_loop_goon m lm (Bk # b, [])" apply(simp only: wprepare_invs) apply(erule_tac disjE) apply(rule_tac disjI2) apply(simp add: wprepare_loop_start_on_rightmost.simps wprepare_loop_goon_on_rightmost.simps, auto) apply(rule_tac rev_eq, simp add: tape_of_nl_rev) done lemma wprepare_loop_goon_nonempty_fst[simp]: "\lm \ []; wprepare_loop_goon m lm (b, [])\ \ b \ []" apply(simp only: wprepare_invs, auto) done lemma wprepare_add_one2_Bk_empty[simp]:"\lm \ []; wprepare_loop_goon m lm (b, [])\ \ wprepare_add_one2 m lm (Bk # b, [])" apply(simp only: wprepare_invs, auto split: if_splits) done lemma wprepare_add_one2_nonempty_fst[simp]: "wprepare_add_one2 m lm (b, []) \ b \ []" apply(simp only: wprepare_invs, auto) done lemma wprepare_add_one2_Oc[simp]: "wprepare_add_one2 m lm (b, []) \ wprepare_add_one2 m lm (b, [Oc])" apply(simp only: wprepare_invs, auto) done lemma Bk_not_tape_start[simp]: "(Bk # list = <(m::nat) # lm> @ ys) = False" apply(case_tac lm, auto simp: tape_of_nl_cons replicate_Suc) done lemma wprepare_goto_first_end_cases[simp]: "\lm \ []; wprepare_add_one m lm (b, Bk # list)\ \ (b = [] \ wprepare_goto_first_end m lm ([], Oc # list)) \ (b \ [] \ wprepare_goto_first_end m lm (b, Oc # list))" apply(simp only: wprepare_invs) apply(auto simp: tape_of_nl_cons split: if_splits) apply(cases lm, auto simp add:tape_of_list_def replicate_Suc) done lemma wprepare_goto_first_end_Bk_nonempty_fst[simp]: "wprepare_goto_first_end m lm (b, Bk # list) \ b \ []" apply(simp only: wprepare_invs , auto simp: replicate_Suc) done declare replicate_Suc[simp] lemma wprepare_erase_elem_Bk_rest[simp]: "wprepare_goto_first_end m lm (b, Bk # list) \ wprepare_erase m lm (tl b, hd b # Bk # list)" by(simp add: wprepare_invs) lemma wprepare_erase_Bk_nonempty_fst[simp]: "wprepare_erase m lm (b, Bk # list) \ b \ []" by(simp add: wprepare_invs) lemma wprepare_goto_start_pos_Bk[simp]: "wprepare_erase m lm (b, Bk # list) \ wprepare_goto_start_pos m lm (Bk # b, list)" apply(simp only: wprepare_invs, auto) done lemma wprepare_add_one_Bk_nonempty_snd[simp]: "\wprepare_add_one m lm (b, Bk # list)\ \ list \ []" apply(simp only: wprepare_invs) apply(case_tac lm, simp_all add: tape_of_list_def tape_of_nat_def, auto) done lemma wprepare_goto_first_end_nonempty_snd_tl[simp]: "\lm \ []; wprepare_goto_first_end m lm (b, Bk # list)\ \ list \ []" by(simp only: wprepare_invs, auto) lemma wprepare_erase_Bk_nonempty_list[simp]: "\lm \ []; wprepare_erase m lm (b, Bk # list)\ \ list \ []" apply(simp only: wprepare_invs, auto) done lemma wprepare_goto_start_pos_Bk_nonempty[simp]: "\lm \ []; wprepare_goto_start_pos m lm (b, Bk # list)\ \ list \ []" by(cases lm;cases list;simp only: wprepare_invs, auto) lemma wprepare_goto_start_pos_Bk_nonempty_fst[simp]: "\lm \ []; wprepare_goto_start_pos m lm (b, Bk # list)\ \ b \ []" apply(simp only: wprepare_invs) apply(auto) done lemma wprepare_loop_goon_Bk_nonempty[simp]: "\lm \ []; wprepare_loop_goon m lm (b, Bk # list)\ \ b \ []" apply(simp only: wprepare_invs, auto) done lemma wprepare_loop_goon_wprepare_add_one2_cases[simp]: "\lm \ []; wprepare_loop_goon m lm (b, Bk # list)\ \ (list = [] \ wprepare_add_one2 m lm (Bk # b, [])) \ (list \ [] \ wprepare_add_one2 m lm (Bk # b, list))" unfolding wprepare_invs apply(cases list;auto split:nat.split if_splits) by (metis list.sel(3) tl_replicate) lemma wprepare_add_one2_nonempty[simp]: "wprepare_add_one2 m lm (b, Bk # list) \ b \ []" apply(simp only: wprepare_invs, simp) done lemma wprepare_add_one2_cases[simp]: "wprepare_add_one2 m lm (b, Bk # list) \ (list = [] \ wprepare_add_one2 m lm (b, [Oc])) \ (list \ [] \ wprepare_add_one2 m lm (b, Oc # list))" apply(simp only: wprepare_invs, auto) done lemma wprepare_goto_first_end_cases_Oc[simp]: "wprepare_goto_first_end m lm (b, Oc # list) \ (b = [] \ wprepare_goto_first_end m lm ([Oc], list)) \ (b \ [] \ wprepare_goto_first_end m lm (Oc # b, list))" apply(simp only: wprepare_invs, auto) apply(rule_tac x = 1 in exI, auto) apply(rename_tac ml mr rn) apply(case_tac mr, simp_all add: ) apply(case_tac ml, simp_all add: ) apply(rule_tac x = "Suc ml" in exI, simp_all add: ) apply(rule_tac x = "mr - 1" in exI, simp) done lemma wprepare_erase_nonempty[simp]: "wprepare_erase m lm (b, Oc # list) \ b \ []" apply(simp only: wprepare_invs, auto simp: ) done lemma wprepare_erase_Bk[simp]: "wprepare_erase m lm (b, Oc # list) \ wprepare_erase m lm (b, Bk # list)" apply(simp only:wprepare_invs, auto simp: ) done lemma wprepare_goto_start_pos_Bk_move[simp]: "\lm \ []; wprepare_goto_start_pos m lm (b, Bk # list)\ \ wprepare_goto_start_pos m lm (Bk # b, list)" apply(simp only:wprepare_invs, auto) apply(case_tac [!] lm, simp, simp_all) done lemma wprepare_loop_start_b_nonempty[simp]: "wprepare_loop_start m lm (b, aa) \ b \ []" apply(simp only:wprepare_invs, auto) done lemma exists_exp_of_Bk[elim]: "Bk # list = Oc\(mr) @ Bk\(rn) \ \rn. list = Bk\(rn)" apply(case_tac mr, simp_all) apply(case_tac rn, simp_all) done lemma wprepare_loop_start_in_middle_Bk_False[simp]: "wprepare_loop_start_in_middle m lm (b, [Bk]) = False" by(auto) declare wprepare_loop_start_in_middle.simps[simp del] declare wprepare_loop_start_on_rightmost.simps[simp del] wprepare_loop_goon_in_middle.simps[simp del] wprepare_loop_goon_on_rightmost.simps[simp del] lemma wprepare_loop_goon_in_middle_Bk_False[simp]: "wprepare_loop_goon_in_middle m lm (Bk # b, []) = False" apply(simp add: wprepare_loop_goon_in_middle.simps, auto) done lemma wprepare_loop_goon_Bk[simp]: "\lm \ []; wprepare_loop_start m lm (b, [Bk])\ \ wprepare_loop_goon m lm (Bk # b, [])" unfolding wprepare_invs apply(auto simp add: wprepare_loop_goon_on_rightmost.simps wprepare_loop_start_on_rightmost.simps) apply(rule_tac rev_eq) apply(simp add: tape_of_nl_rev) apply(simp add: exp_ind replicate_Suc[THEN sym] del: replicate_Suc) done lemma wprepare_loop_goon_in_middle_Bk_False2[simp]: "wprepare_loop_start_on_rightmost m lm (b, Bk # a # lista) \ wprepare_loop_goon_in_middle m lm (Bk # b, a # lista) = False" apply(auto simp: wprepare_loop_start_on_rightmost.simps wprepare_loop_goon_in_middle.simps) done lemma wprepare_loop_goon_on_rightbmost_Bk_False[simp]: "\lm \ []; wprepare_loop_start_on_rightmost m lm (b, Bk # a # lista)\ \ wprepare_loop_goon_on_rightmost m lm (Bk # b, a # lista)" apply(simp only: wprepare_loop_start_on_rightmost.simps wprepare_loop_goon_on_rightmost.simps, auto simp: tape_of_nl_rev) apply(simp add: replicate_Suc[THEN sym] exp_ind tape_of_nl_rev del: replicate_Suc) by (meson Cons_replicate_eq) lemma wprepare_loop_goon_in_middle_Bk_False3[simp]: assumes "lm \ []" "wprepare_loop_start_in_middle m lm (b, Bk # a # lista)" shows "wprepare_loop_goon_in_middle m lm (Bk # b, a # lista)" (is "?t1") and "wprepare_loop_goon_on_rightmost m lm (Bk # b, a # lista) = False" (is ?t2) proof - from assms obtain rn mr lm1 where *:"rev b @ Oc \ mr @ Bk # = Oc # Oc \ m @ Bk # Bk # " "b \ []" "Bk # a # lista = Oc \ mr @ Bk # @ Bk \ rn" "lm1 \ []" by(auto simp add: wprepare_loop_start_in_middle.simps) thus ?t1 apply(simp add: wprepare_loop_start_in_middle.simps wprepare_loop_goon_in_middle.simps, auto) apply(rule_tac x = rn in exI, simp) apply(case_tac mr, simp_all add: ) apply(case_tac lm1, simp) apply(rule_tac x = "Suc (hd lm1)" in exI, simp) apply(rule_tac x = "tl lm1" in exI) apply(case_tac "tl lm1", simp_all add: tape_of_list_def tape_of_nat_def) done from * show ?t2 apply(simp add: wprepare_loop_start_in_middle.simps wprepare_loop_goon_on_rightmost.simps del:split_head_repeat, auto simp del:split_head_repeat) apply(case_tac mr) apply(case_tac "lm1::nat list", simp_all, case_tac "tl lm1", simp_all) apply(auto simp add: tape_of_list_def ) apply(case_tac [!] rna, simp_all add: ) apply(case_tac mr, simp_all add: ) apply(case_tac lm1, simp, case_tac list, simp) apply(simp_all add: tape_of_nat_def) by (metis Bk_not_tape_start tape_of_list_def tape_of_nat_list.elims) qed lemma wprepare_loop_goon_Bk2[simp]: "\lm \ []; wprepare_loop_start m lm (b, Bk # a # lista)\ \ wprepare_loop_goon m lm (Bk # b, a # lista)" apply(simp add: wprepare_loop_start.simps wprepare_loop_goon.simps) apply(erule_tac disjE, simp, auto) done lemma start_2_goon: "\lm \ []; wprepare_loop_start m lm (b, Bk # list)\ \ (list = [] \ wprepare_loop_goon m lm (Bk # b, [])) \ (list \ [] \ wprepare_loop_goon m lm (Bk # b, list))" apply(case_tac list, auto) done lemma add_one_2_add_one: "wprepare_add_one m lm (b, Oc # list) \ (hd b = Oc \ (b = [] \ wprepare_add_one m lm ([], Bk # Oc # list)) \ (b \ [] \ wprepare_add_one m lm (tl b, Oc # Oc # list))) \ (hd b \ Oc \ (b = [] \ wprepare_add_one m lm ([], Bk # Oc # list)) \ (b \ [] \ wprepare_add_one m lm (tl b, hd b # Oc # list)))" unfolding wprepare_add_one.simps by auto lemma wprepare_loop_start_on_rightmost_Oc[simp]: "wprepare_loop_start_on_rightmost m lm (b, Oc # list) \ wprepare_loop_start_on_rightmost m lm (Oc # b, list)" apply(simp add: wprepare_loop_start_on_rightmost.simps) by (metis Cons_replicate_eq cell.distinct(1) list.sel(3) self_append_conv2 tl_append2 tl_replicate) lemma wprepare_loop_start_in_middle_Oc[simp]: assumes "wprepare_loop_start_in_middle m lm (b, Oc # list)" shows "wprepare_loop_start_in_middle m lm (Oc # b, list)" proof - from assms obtain mr lm1 rn where "rev b @ Oc \ mr @ Bk # = Oc # Oc \ m @ Bk # Bk # " "Oc # list = Oc \ mr @ Bk # @ Bk \ rn" "lm1 \ []" by(auto simp add: wprepare_loop_start_in_middle.simps) thus ?thesis apply(auto simp add: wprepare_loop_start_in_middle.simps) apply(rule_tac x = rn in exI, auto) apply(case_tac mr, simp, simp add: ) apply(rule_tac x = "mr - 1" in exI, simp) apply(rule_tac x = lm1 in exI, simp) done qed lemma start_2_start: "wprepare_loop_start m lm (b, Oc # list) \ wprepare_loop_start m lm (Oc # b, list)" apply(simp add: wprepare_loop_start.simps) apply(erule_tac disjE, simp_all ) done lemma wprepare_loop_goon_Oc_nonempty[simp]: "wprepare_loop_goon m lm (b, Oc # list) \ b \ []" apply(simp add: wprepare_loop_goon.simps wprepare_loop_goon_in_middle.simps wprepare_loop_goon_on_rightmost.simps) apply(auto) done lemma wprepare_goto_start_pos_Oc_nonempty[simp]: "wprepare_goto_start_pos m lm (b, Oc # list) \ b \ []" apply(simp add: wprepare_goto_start_pos.simps) done lemma wprepare_loop_goon_on_rightmost_Oc_False[simp]: "wprepare_loop_goon_on_rightmost m lm (b, Oc # list) = False" apply(simp add: wprepare_loop_goon_on_rightmost.simps) done lemma wprepare_loop1: "\rev b @ Oc\(mr) = Oc\(Suc m) @ Bk # Bk # ; b \ []; 0 < mr; Oc # list = Oc\(mr) @ Bk\(rn)\ \ wprepare_loop_start_on_rightmost m lm (Oc # b, list)" apply(simp add: wprepare_loop_start_on_rightmost.simps) apply(rule_tac x = rn in exI, simp) apply(case_tac mr, simp, simp) done lemma wprepare_loop2: "\rev b @ Oc\(mr) @ Bk # = Oc\(Suc m) @ Bk # Bk # ; b \ []; Oc # list = Oc\(mr) @ Bk # <(a::nat) # lista> @ Bk\(rn)\ \ wprepare_loop_start_in_middle m lm (Oc # b, list)" apply(simp add: wprepare_loop_start_in_middle.simps) apply(rule_tac x = rn in exI, simp) apply(case_tac mr, simp_all add: ) apply(rename_tac nat) apply(rule_tac x = nat in exI, simp) apply(rule_tac x = "a#lista" in exI, simp) done lemma wprepare_loop_goon_in_middle_cases[simp]: "wprepare_loop_goon_in_middle m lm (b, Oc # list) \ wprepare_loop_start_on_rightmost m lm (Oc # b, list) \ wprepare_loop_start_in_middle m lm (Oc # b, list)" apply(simp add: wprepare_loop_goon_in_middle.simps split: if_splits) apply(rename_tac lm1) apply(case_tac lm1, simp_all add: wprepare_loop1 wprepare_loop2) done lemma wprepare_add_one_b[simp]: "wprepare_add_one m lm (b, Oc # list) \ b = [] \ wprepare_add_one m lm ([], Bk # Oc # list)" "wprepare_loop_goon m lm (b, Oc # list) \ wprepare_loop_start m lm (Oc # b, list)" apply(auto simp add: wprepare_add_one.simps wprepare_loop_goon.simps wprepare_loop_start.simps) done lemma wprepare_loop_start_on_rightmost_Oc2[simp]: "wprepare_goto_start_pos m [a] (b, Oc # list) \ wprepare_loop_start_on_rightmost m [a] (Oc # b, list) " apply(auto simp: wprepare_goto_start_pos.simps wprepare_loop_start_on_rightmost.simps) apply(rename_tac rn) apply(rule_tac x = rn in exI, simp) apply(simp add: replicate_Suc[THEN sym] exp_ind del: replicate_Suc) done lemma wprepare_loop_start_in_middle_2_Oc[simp]: "wprepare_goto_start_pos m (a # aa # listaa) (b, Oc # list) \wprepare_loop_start_in_middle m (a # aa # listaa) (Oc # b, list)" apply(auto simp: wprepare_goto_start_pos.simps wprepare_loop_start_in_middle.simps) apply(rename_tac rn) apply(rule_tac x = rn in exI, simp) apply(simp add: exp_ind[THEN sym]) apply(rule_tac x = a in exI, rule_tac x = "aa#listaa" in exI, simp) apply(simp add: tape_of_nl_cons) done lemma wprepare_loop_start_Oc2[simp]: "\lm \ []; wprepare_goto_start_pos m lm (b, Oc # list)\ \ wprepare_loop_start m lm (Oc # b, list)" by(cases lm;cases "tl lm", auto simp add: wprepare_loop_start.simps) lemma wprepare_add_one2_Oc_nonempty[simp]: "wprepare_add_one2 m lm (b, Oc # list) \ b \ []" apply(auto simp: wprepare_add_one2.simps) done lemma add_one_2_stop: "wprepare_add_one2 m lm (b, Oc # list) \ wprepare_stop m lm (tl b, hd b # Oc # list)" apply(simp add: wprepare_add_one2.simps) done declare wprepare_stop.simps[simp del] lemma wprepare_correctness: assumes h: "lm \ []" shows "let P = (\ (st, l, r). st = 0) in let Q = (\ (st, l, r). wprepare_inv st m lm (l, r)) in let f = (\ stp. steps0 (Suc 0, [], ()) t_wcode_prepare stp) in \ n .P (f n) \ Q (f n)" proof - let ?P = "(\ (st, l, r). st = 0)" let ?Q = "(\ (st, l, r). wprepare_inv st m lm (l, r))" let ?f = "(\ stp. steps0 (Suc 0, [], ()) t_wcode_prepare stp)" have "\ n. ?P (?f n) \ ?Q (?f n)" proof(rule_tac halt_lemma2) show "\ n. \ ?P (?f n) \ ?Q (?f n) \ ?Q (?f (Suc n)) \ (?f (Suc n), ?f n) \ wcode_prepare_le" using h apply(rule_tac allI, rule_tac impI) apply(rename_tac n) apply(case_tac "?f n", simp add: step.simps) apply(rename_tac c) apply(case_tac c, simp, case_tac [2] aa) apply(simp_all add: wprepare_inv.simps wcode_prepare_le_def lex_triple_def lex_pair_def split: if_splits) (* slow *) apply(simp_all add: start_2_goon start_2_start add_one_2_add_one add_one_2_stop) apply(auto simp: wprepare_add_one2.simps) done qed (auto simp add: steps.simps wprepare_inv.simps wprepare_invs) thus "?thesis" apply(auto) done qed lemma tm_wf_t_wcode_prepare[intro]: "tm_wf (t_wcode_prepare, 0)" apply(simp add:tm_wf.simps t_wcode_prepare_def) done lemma is_28_even[intro]: "(28 + (length t_twice_compile + length t_fourtimes_compile)) mod 2 = 0" by(auto simp: t_twice_compile_def t_fourtimes_compile_def) lemma b_le_28[elim]: "(a, b) \ set t_wcode_main_first_part \ b \ (28 + (length t_twice_compile + length t_fourtimes_compile)) div 2" apply(auto simp: t_wcode_main_first_part_def t_twice_def) done lemma tm_wf_change_termi: assumes "tm_wf (tp, 0)" shows "list_all (\(acn, st). (st \ Suc (length tp div 2))) (adjust0 tp)" proof - { fix acn st n assume "tp ! n = (acn, st)" "n < length tp" "0 < st" hence "(acn, st)\set tp" by (metis nth_mem) with assms tm_wf.simps have "st \ length tp div 2 + 0" by auto hence "st \ Suc (length tp div 2)" by auto } thus ?thesis by(auto simp: tm_wf.simps List.list_all_length adjust.simps split: if_splits prod.split) qed lemma tm_wf_shift: assumes "list_all (\(acn, st). (st \ y)) tp" shows "list_all (\(acn, st). (st \ y + off)) (shift tp off)" proof - have [dest!]:"\ P Q n. \n. Q n \ P n \ Q n \ P n" by metis from assms show ?thesis by(auto simp: tm_wf.simps List.list_all_length shift.simps) qed declare length_tp'[simp del] lemma length_mopup_1[simp]: "length (mopup (Suc 0)) = 16" apply(auto simp: mopup.simps) done lemma twice_plus_28_elim[elim]: "(a, b) \ set (shift (adjust0 t_twice_compile) 12) \ b \ (28 + (length t_twice_compile + length t_fourtimes_compile)) div 2" apply(simp add: t_twice_compile_def t_fourtimes_compile_def) proof - assume g: "(a, b) \ set (shift (adjust (tm_of abc_twice @ shift (mopup (Suc 0)) (length (tm_of abc_twice) div 2)) (Suc ((length (tm_of abc_twice) + 16) div 2))) 12)" moreover have "length (tm_of abc_twice) mod 2 = 0" by auto moreover have "length (tm_of abc_fourtimes) mod 2 = 0" by auto ultimately have "list_all (\(acn, st). (st \ (60 + (length (tm_of abc_twice) + length (tm_of abc_fourtimes))) div 2)) (shift (adjust0 t_twice_compile) 12)" proof(auto simp add: mod_ex1 del: adjust.simps) assume "even (length (tm_of abc_twice))" then obtain q where q:"length (tm_of abc_twice) = 2 * q" by auto assume "even (length (tm_of abc_fourtimes))" then obtain qa where qa:"length (tm_of abc_fourtimes) = 2 * qa" by auto note h = q qa hence "list_all (\(acn, st). st \ (18 + (q + qa)) + 12) (shift (adjust0 t_twice_compile) 12)" proof(rule_tac tm_wf_shift t_twice_compile_def) have "list_all (\(acn, st). st \ Suc (length t_twice_compile div 2)) (adjust0 t_twice_compile)" by(rule_tac tm_wf_change_termi, auto) thus "list_all (\(acn, st). st \ 18 + (q + qa)) (adjust0 t_twice_compile)" using h apply(simp add: t_twice_compile_def, auto simp: List.list_all_length) done qed thus "list_all (\(acn, st). st \ 30 + (length (tm_of abc_twice) div 2 + length (tm_of abc_fourtimes) div 2)) (shift (adjust0 t_twice_compile) 12)" using h by simp qed thus "b \ (60 + (length (tm_of abc_twice) + length (tm_of abc_fourtimes))) div 2" using g apply(auto simp:t_twice_compile_def) apply(simp add: Ball_set[THEN sym]) apply(erule_tac x = "(a, b)" in ballE, simp, simp) done qed lemma length_plus_28_elim2[elim]: "(a, b) \ set (shift (adjust0 t_fourtimes_compile) (t_twice_len + 13)) \ b \ (28 + (length t_twice_compile + length t_fourtimes_compile)) div 2" apply(simp add: t_twice_compile_def t_fourtimes_compile_def t_twice_len_def) proof - assume g: "(a, b) \ set (shift (adjust (tm_of abc_fourtimes @ shift (mopup (Suc 0)) (length (tm_of abc_fourtimes) div 2)) (Suc ((length (tm_of abc_fourtimes) + 16) div 2))) (length t_twice div 2 + 13))" moreover have "length (tm_of abc_twice) mod 2 = 0" by auto moreover have "length (tm_of abc_fourtimes) mod 2 = 0" by auto ultimately have "list_all (\(acn, st). (st \ (60 + (length (tm_of abc_twice) + length (tm_of abc_fourtimes))) div 2)) (shift (adjust0 (tm_of abc_fourtimes @ shift (mopup (Suc 0)) (length (tm_of abc_fourtimes) div 2))) (length t_twice div 2 + 13))" proof(auto simp: mod_ex1 t_twice_def t_twice_compile_def) assume "even (length (tm_of abc_twice))" then obtain q where q:"length (tm_of abc_twice) = 2 * q" by auto assume "even (length (tm_of abc_fourtimes))" then obtain qa where qa:"length (tm_of abc_fourtimes) = 2 * qa" by auto note h = q qa hence "list_all (\(acn, st). st \ (9 + qa + (21 + q))) (shift (adjust0 (tm_of abc_fourtimes @ shift (mopup (Suc 0)) qa)) (21 + q))" proof(rule_tac tm_wf_shift t_twice_compile_def) have "list_all (\(acn, st). st \ Suc (length (tm_of abc_fourtimes @ shift (mopup (Suc 0)) qa) div 2)) (adjust0 (tm_of abc_fourtimes @ shift (mopup (Suc 0)) qa))" apply(rule_tac tm_wf_change_termi) using wf_fourtimes h apply(simp add: t_fourtimes_compile_def) done thus "list_all (\(acn, st). st \ 9 + qa) (adjust (tm_of abc_fourtimes @ shift (mopup (Suc 0)) qa) (Suc (length (tm_of abc_fourtimes @ shift (mopup (Suc 0)) qa) div 2)))" using h apply(simp) done qed thus "list_all (\(acn, st). st \ 30 + (length (tm_of abc_twice) div 2 + length (tm_of abc_fourtimes) div 2)) (shift (adjust (tm_of abc_fourtimes @ shift (mopup (Suc 0)) (length (tm_of abc_fourtimes) div 2)) (9 + length (tm_of abc_fourtimes) div 2)) (21 + length (tm_of abc_twice) div 2))" apply(subgoal_tac "qa + q = q + qa") apply(simp add: h) apply(simp) done qed thus "b \ (60 + (length (tm_of abc_twice) + length (tm_of abc_fourtimes))) div 2" using g apply(simp add: Ball_set[THEN sym]) apply(erule_tac x = "(a, b)" in ballE, simp, simp) done qed lemma tm_wf_t_wcode_main[intro]: "tm_wf (t_wcode_main, 0)" by(auto simp: t_wcode_main_def tm_wf.simps t_twice_def t_fourtimes_def del: List.list_all_iff) declare tm_comp.simps[simp del] lemma prepare_mainpart_lemma: "args \ [] \ \ stp ln rn. steps0 (Suc 0, [], ) (t_wcode_prepare |+| t_wcode_main) stp = (0, Bk # Oc\(Suc m), Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin ()) @ Bk\(rn))" proof - let ?P1 = "(\ (l, r). (l::cell list) = [] \ r = )" let ?Q1 = "(\ (l, r). wprepare_stop m args (l, r))" let ?P2 = ?Q1 let ?Q2 = "(\ (l, r). (\ ln rn. l = Bk # Oc\(Suc m) \ r = Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin ()) @ Bk\(rn)))" let ?P3 = "\ tp. False" assume h: "args \ []" have "{?P1} t_wcode_prepare |+| t_wcode_main {?Q2}" proof(rule_tac Hoare_plus_halt) show "{?P1} t_wcode_prepare {?Q1}" proof(rule_tac Hoare_haltI, auto) show "\n. is_final (steps0 (Suc 0, [], ) t_wcode_prepare n) \ wprepare_stop m args holds_for steps0 (Suc 0, [], ) t_wcode_prepare n" using wprepare_correctness[of args m,OF h] apply(auto simp add: wprepare_inv.simps) by (metis holds_for.simps is_finalI) qed next show "{?P2} t_wcode_main {?Q2}" proof(rule_tac Hoare_haltI, auto) fix l r assume "wprepare_stop m args (l, r)" thus "\n. is_final (steps0 (Suc 0, l, r) t_wcode_main n) \ (\(l, r). l = Bk # Oc # Oc \ m \ (\ln rn. r = Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ bl_bin () @ Bk \ rn)) holds_for steps0 (Suc 0, l, r) t_wcode_main n" proof(auto simp: wprepare_stop.simps) fix rn show " \n. is_final (steps0 (Suc 0, Bk # @ Bk # Bk # Oc # Oc \ m, Bk # Oc # Bk \ rn) t_wcode_main n) \ (\(l, r). l = Bk # Oc # Oc \ m \ (\ln rn. r = Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ bl_bin () @ Bk \ rn)) holds_for steps0 (Suc 0, Bk # @ Bk # Bk # Oc # Oc \ m, Bk # Oc # Bk \ rn) t_wcode_main n" using t_wcode_main_lemma_pre[of "args" "" 0 "Oc\(Suc m)" 0 rn,OF h refl] apply(auto simp: tape_of_nl_rev) apply(rename_tac stp ln rna) apply(rule_tac x = stp in exI, auto) done qed qed next show "tm_wf0 t_wcode_prepare" by auto qed then obtain n where "\ tp. (case tp of (l, r) \ l = [] \ r = ) \ (is_final (steps0 (1, tp) (t_wcode_prepare |+| t_wcode_main) n) \ (\(l, r). \ln rn. l = Bk # Oc \ Suc m \ r = Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ bl_bin () @ Bk \ rn) holds_for steps0 (1, tp) (t_wcode_prepare |+| t_wcode_main) n)" unfolding Hoare_halt_def by auto thus "?thesis" apply(rule_tac x = n in exI) apply(case_tac "(steps0 (Suc 0, [], ) (adjust0 t_wcode_prepare @ shift t_wcode_main (length t_wcode_prepare div 2)) n)") apply(auto simp: tm_comp.simps) done qed definition tinres :: "cell list \ cell list \ bool" where "tinres xs ys = (\n. xs = ys @ Bk \ n \ ys = xs @ Bk \ n)" lemma tinres_fetch_congr[simp]: "tinres r r' \ fetch t ss (read r) = fetch t ss (read r')" apply(simp add: fetch.simps, auto split: if_splits simp: tinres_def) using hd_replicate apply fastforce using hd_replicate apply fastforce done lemma nonempty_hd_tinres[simp]: "\tinres r r'; r \ []; r' \ []\ \ hd r = hd r'" apply(auto simp: tinres_def) done lemma tinres_nonempty[simp]: "\tinres r []; r \ []\ \ hd r = Bk" "\tinres [] r'; r' \ []\ \ hd r' = Bk" "\tinres r []; r \ []\ \ tinres (tl r) []" "tinres r r' \ tinres (b # r) (b # r')" by(auto simp: tinres_def) lemma ex_move_tl[intro]: "\na. tl r = tl (r @ Bk\(n)) @ Bk\(na) \ tl (r @ Bk\(n)) = tl r @ Bk\(na)" apply(case_tac r, simp) by(case_tac n, auto) lemma tinres_tails[simp]: "tinres r r' \ tinres (tl r) (tl r')" apply(auto simp: tinres_def) by(case_tac r', auto) lemma tinres_empty[simp]: "\tinres [] r'\ \ tinres [] (tl r')" "tinres r [] \ tinres (Bk # tl r) [Bk]" "tinres r [] \ tinres (Oc # tl r) [Oc]" by(auto simp: tinres_def) lemma tinres_step2: assumes "tinres r r'" "step0 (ss, l, r) t = (sa, la, ra)" "step0 (ss, l, r') t = (sb, lb, rb)" shows "la = lb \ tinres ra rb \ sa = sb" proof (cases "fetch t ss (read r')") case (Pair a b) have sa:"sa = sb" using assms Pair by(force simp: step.simps) have "la = lb \ tinres ra rb" using assms Pair by(cases a, auto simp: step.simps split: if_splits) thus ?thesis using sa by auto qed lemma tinres_steps2: "\tinres r r'; steps0 (ss, l, r) t stp = (sa, la, ra); steps0 (ss, l, r') t stp = (sb, lb, rb)\ \ la = lb \ tinres ra rb \ sa = sb" proof(induct stp arbitrary: sa la ra sb lb rb) case (Suc stp sa la ra sb lb rb) then show ?case apply(simp) apply(case_tac "(steps0 (ss, l, r) t stp)") apply(case_tac "(steps0 (ss, l, r') t stp)") proof - fix stp a b c aa ba ca assume ind: "\sa la ra sb lb rb. \steps0 (ss, l, r) t stp = (sa, la, ra); steps0 (ss, l, r') t stp = (sb, lb, rb)\ \ la = lb \ tinres ra rb \ sa = sb" and h: " tinres r r'" "step0 (steps0 (ss, l, r) t stp) t = (sa, la, ra)" "step0 (steps0 (ss, l, r') t stp) t = (sb, lb, rb)" "steps0 (ss, l, r) t stp = (a, b, c)" "steps0 (ss, l, r') t stp = (aa, ba, ca)" have "b = ba \ tinres c ca \ a = aa" apply(rule_tac ind, simp_all add: h) done thus "la = lb \ tinres ra rb \ sa = sb" apply(rule_tac l = b and r = c and ss = a and r' = ca and t = t in tinres_step2) using h apply(simp, simp, simp) done qed qed (simp add: steps.simps) definition t_wcode_adjust :: "instr list" where "t_wcode_adjust = [(W1, 1), (R, 2), (Nop, 2), (R, 3), (R, 3), (R, 4), (L, 8), (L, 5), (L, 6), (W0, 5), (L, 6), (R, 7), (W1, 2), (Nop, 7), (L, 9), (W0, 8), (L, 9), (L, 10), (L, 11), (L, 10), (R, 0), (L, 11)]" lemma fetch_t_wcode_adjust[simp]: "fetch t_wcode_adjust (Suc 0) Bk = (W1, 1)" "fetch t_wcode_adjust (Suc 0) Oc = (R, 2)" "fetch t_wcode_adjust (Suc (Suc 0)) Oc = (R, 3)" "fetch t_wcode_adjust (Suc (Suc (Suc 0))) Oc = (R, 4)" "fetch t_wcode_adjust (Suc (Suc (Suc 0))) Bk = (R, 3)" "fetch t_wcode_adjust 4 Bk = (L, 8)" "fetch t_wcode_adjust 4 Oc = (L, 5)" "fetch t_wcode_adjust 5 Oc = (W0, 5)" "fetch t_wcode_adjust 5 Bk = (L, 6)" "fetch t_wcode_adjust 6 Oc = (R, 7)" "fetch t_wcode_adjust 6 Bk = (L, 6)" "fetch t_wcode_adjust 7 Bk = (W1, 2)" "fetch t_wcode_adjust 8 Bk = (L, 9)" "fetch t_wcode_adjust 8 Oc = (W0, 8)" "fetch t_wcode_adjust 9 Oc = (L, 10)" "fetch t_wcode_adjust 9 Bk = (L, 9)" "fetch t_wcode_adjust 10 Bk = (L, 11)" "fetch t_wcode_adjust 10 Oc = (L, 10)" "fetch t_wcode_adjust 11 Oc = (L, 11)" "fetch t_wcode_adjust 11 Bk = (R, 0)" by(auto simp: fetch.simps t_wcode_adjust_def nth_of.simps numeral) fun wadjust_start :: "nat \ nat \ tape \ bool" where "wadjust_start m rs (l, r) = (\ ln rn. l = Bk # Oc\(Suc m) \ tl r = Oc # Bk\(ln) @ Bk # Oc\(Suc rs) @ Bk\(rn))" fun wadjust_loop_start :: "nat \ nat \ tape \ bool" where "wadjust_loop_start m rs (l, r) = (\ ln rn ml mr. l = Oc\(ml) @ Bk # Oc\(Suc m) \ r = Oc # Bk\(ln) @ Bk # Oc\(mr) @ Bk\(rn) \ ml + mr = Suc (Suc rs) \ mr > 0)" fun wadjust_loop_right_move :: "nat \ nat \ tape \ bool" where "wadjust_loop_right_move m rs (l, r) = (\ ml mr nl nr rn. l = Bk\(nl) @ Oc # Oc\(ml) @ Bk # Oc\(Suc m) \ r = Bk\(nr) @ Oc\(mr) @ Bk\(rn) \ ml + mr = Suc (Suc rs) \ mr > 0 \ nl + nr > 0)" fun wadjust_loop_check :: "nat \ nat \ tape \ bool" where "wadjust_loop_check m rs (l, r) = (\ ml mr ln rn. l = Oc # Bk\(ln) @ Bk # Oc # Oc\(ml) @ Bk # Oc\(Suc m) \ r = Oc\(mr) @ Bk\(rn) \ ml + mr = (Suc rs))" fun wadjust_loop_erase :: "nat \ nat \ tape \ bool" where "wadjust_loop_erase m rs (l, r) = (\ ml mr ln rn. l = Bk\(ln) @ Bk # Oc # Oc\(ml) @ Bk # Oc\(Suc m) \ tl r = Oc\(mr) @ Bk\(rn) \ ml + mr = (Suc rs) \ mr > 0)" fun wadjust_loop_on_left_moving_O :: "nat \ nat \ tape \ bool" where "wadjust_loop_on_left_moving_O m rs (l, r) = (\ ml mr ln rn. l = Oc\(ml) @ Bk # Oc\(Suc m )\ r = Oc # Bk\(ln) @ Bk # Bk # Oc\(mr) @ Bk\(rn) \ ml + mr = Suc rs \ mr > 0)" fun wadjust_loop_on_left_moving_B :: "nat \ nat \ tape \ bool" where "wadjust_loop_on_left_moving_B m rs (l, r) = (\ ml mr nl nr rn. l = Bk\(nl) @ Oc # Oc\(ml) @ Bk # Oc\(Suc m) \ r = Bk\(nr) @ Bk # Bk # Oc\(mr) @ Bk\(rn) \ ml + mr = Suc rs \ mr > 0)" fun wadjust_loop_on_left_moving :: "nat \ nat \ tape \ bool" where "wadjust_loop_on_left_moving m rs (l, r) = (wadjust_loop_on_left_moving_O m rs (l, r) \ wadjust_loop_on_left_moving_B m rs (l, r))" fun wadjust_loop_right_move2 :: "nat \ nat \ tape \ bool" where "wadjust_loop_right_move2 m rs (l, r) = (\ ml mr ln rn. l = Oc # Oc\(ml) @ Bk # Oc\(Suc m) \ r = Bk\(ln) @ Bk # Bk # Oc\(mr) @ Bk\(rn) \ ml + mr = Suc rs \ mr > 0)" fun wadjust_erase2 :: "nat \ nat \ tape \ bool" where "wadjust_erase2 m rs (l, r) = (\ ln rn. l = Bk\(ln) @ Bk # Oc # Oc\(Suc rs) @ Bk # Oc\(Suc m) \ tl r = Bk\(rn))" fun wadjust_on_left_moving_O :: "nat \ nat \ tape \ bool" where "wadjust_on_left_moving_O m rs (l, r) = (\ rn. l = Oc\(Suc rs) @ Bk # Oc\(Suc m) \ r = Oc # Bk\(rn))" fun wadjust_on_left_moving_B :: "nat \ nat \ tape \ bool" where "wadjust_on_left_moving_B m rs (l, r) = (\ ln rn. l = Bk\(ln) @ Oc # Oc\(Suc rs) @ Bk # Oc\(Suc m) \ r = Bk\(rn))" fun wadjust_on_left_moving :: "nat \ nat \ tape \ bool" where "wadjust_on_left_moving m rs (l, r) = (wadjust_on_left_moving_O m rs (l, r) \ wadjust_on_left_moving_B m rs (l, r))" fun wadjust_goon_left_moving_B :: "nat \ nat \ tape \ bool" where "wadjust_goon_left_moving_B m rs (l, r) = (\ rn. l = Oc\(Suc m) \ r = Bk # Oc\(Suc (Suc rs)) @ Bk\(rn))" fun wadjust_goon_left_moving_O :: "nat \ nat \ tape \ bool" where "wadjust_goon_left_moving_O m rs (l, r) = (\ ml mr rn. l = Oc\(ml) @ Bk # Oc\(Suc m) \ r = Oc\(mr) @ Bk\(rn) \ ml + mr = Suc (Suc rs) \ mr > 0)" fun wadjust_goon_left_moving :: "nat \ nat \ tape \ bool" where "wadjust_goon_left_moving m rs (l, r) = (wadjust_goon_left_moving_B m rs (l, r) \ wadjust_goon_left_moving_O m rs (l, r))" fun wadjust_backto_standard_pos_B :: "nat \ nat \ tape \ bool" where "wadjust_backto_standard_pos_B m rs (l, r) = (\ rn. l = [] \ r = Bk # Oc\(Suc m )@ Bk # Oc\(Suc (Suc rs)) @ Bk\(rn))" fun wadjust_backto_standard_pos_O :: "nat \ nat \ tape \ bool" where "wadjust_backto_standard_pos_O m rs (l, r) = (\ ml mr rn. l = Oc\(ml) \ r = Oc\(mr) @ Bk # Oc\(Suc (Suc rs)) @ Bk\(rn) \ ml + mr = Suc m \ mr > 0)" fun wadjust_backto_standard_pos :: "nat \ nat \ tape \ bool" where "wadjust_backto_standard_pos m rs (l, r) = (wadjust_backto_standard_pos_B m rs (l, r) \ wadjust_backto_standard_pos_O m rs (l, r))" fun wadjust_stop :: "nat \ nat \ tape \ bool" where "wadjust_stop m rs (l, r) = (\ rn. l = [Bk] \ r = Oc\(Suc m )@ Bk # Oc\(Suc (Suc rs)) @ Bk\(rn))" declare wadjust_start.simps[simp del] wadjust_loop_start.simps[simp del] wadjust_loop_right_move.simps[simp del] wadjust_loop_check.simps[simp del] wadjust_loop_erase.simps[simp del] wadjust_loop_on_left_moving.simps[simp del] wadjust_loop_right_move2.simps[simp del] wadjust_erase2.simps[simp del] wadjust_on_left_moving_O.simps[simp del] wadjust_on_left_moving_B.simps[simp del] wadjust_on_left_moving.simps[simp del] wadjust_goon_left_moving_B.simps[simp del] wadjust_goon_left_moving_O.simps[simp del] wadjust_goon_left_moving.simps[simp del] wadjust_backto_standard_pos.simps[simp del] wadjust_backto_standard_pos_B.simps[simp del] wadjust_backto_standard_pos_O.simps[simp del] wadjust_stop.simps[simp del] fun wadjust_inv :: "nat \ nat \ nat \ tape \ bool" where "wadjust_inv st m rs (l, r) = (if st = Suc 0 then wadjust_start m rs (l, r) else if st = Suc (Suc 0) then wadjust_loop_start m rs (l, r) else if st = Suc (Suc (Suc 0)) then wadjust_loop_right_move m rs (l, r) else if st = 4 then wadjust_loop_check m rs (l, r) else if st = 5 then wadjust_loop_erase m rs (l, r) else if st = 6 then wadjust_loop_on_left_moving m rs (l, r) else if st = 7 then wadjust_loop_right_move2 m rs (l, r) else if st = 8 then wadjust_erase2 m rs (l, r) else if st = 9 then wadjust_on_left_moving m rs (l, r) else if st = 10 then wadjust_goon_left_moving m rs (l, r) else if st = 11 then wadjust_backto_standard_pos m rs (l, r) else if st = 0 then wadjust_stop m rs (l, r) else False )" declare wadjust_inv.simps[simp del] fun wadjust_phase :: "nat \ config \ nat" where "wadjust_phase rs (st, l, r) = (if st = 1 then 3 else if st \ 2 \ st \ 7 then 2 else if st \ 8 \ st \ 11 then 1 else 0)" fun wadjust_stage :: "nat \ config \ nat" where "wadjust_stage rs (st, l, r) = (if st \ 2 \ st \ 7 then rs - length (takeWhile (\ a. a = Oc) (tl (dropWhile (\ a. a = Oc) (rev l @ r)))) else 0)" fun wadjust_state :: "nat \ config \ nat" where "wadjust_state rs (st, l, r) = (if st \ 2 \ st \ 7 then 8 - st else if st \ 8 \ st \ 11 then 12 - st else 0)" fun wadjust_step :: "nat \ config \ nat" where "wadjust_step rs (st, l, r) = (if st = 1 then (if hd r = Bk then 1 else 0) else if st = 3 then length r else if st = 5 then (if hd r = Oc then 1 else 0) else if st = 6 then length l else if st = 8 then (if hd r = Oc then 1 else 0) else if st = 9 then length l else if st = 10 then length l else if st = 11 then (if hd r = Bk then 0 else Suc (length l)) else 0)" fun wadjust_measure :: "(nat \ config) \ nat \ nat \ nat \ nat" where "wadjust_measure (rs, (st, l, r)) = (wadjust_phase rs (st, l, r), wadjust_stage rs (st, l, r), wadjust_state rs (st, l, r), wadjust_step rs (st, l, r))" definition wadjust_le :: "((nat \ config) \ nat \ config) set" where "wadjust_le \ (inv_image lex_square wadjust_measure)" lemma wf_lex_square[intro]: "wf lex_square" by(auto intro:wf_lex_prod simp: Abacus.lex_pair_def lex_square_def Abacus.lex_triple_def) lemma wf_wadjust_le[intro]: "wf wadjust_le" by(auto intro:wf_inv_image simp: wadjust_le_def Abacus.lex_triple_def Abacus.lex_pair_def) lemma wadjust_start_snd_nonempty[simp]: "wadjust_start m rs (c, []) = False" apply(auto simp: wadjust_start.simps) done lemma wadjust_loop_right_move_fst_nonempty[simp]: "wadjust_loop_right_move m rs (c, []) \ c \ []" apply(auto simp: wadjust_loop_right_move.simps) done lemma wadjust_loop_check_fst_nonempty[simp]: "wadjust_loop_check m rs (c, []) \ c \ []" apply(simp only: wadjust_loop_check.simps, auto) done lemma wadjust_loop_start_snd_nonempty[simp]: "wadjust_loop_start m rs (c, []) = False" apply(simp add: wadjust_loop_start.simps) done lemma wadjust_erase2_singleton[simp]: "wadjust_loop_check m rs (c, []) \ wadjust_erase2 m rs (tl c, [hd c])" apply(simp only: wadjust_loop_check.simps wadjust_erase2.simps, auto) done lemma wadjust_loop_on_left_moving_snd_nonempty[simp]: "wadjust_loop_on_left_moving m rs (c, []) = False" "wadjust_loop_right_move2 m rs (c, []) = False" "wadjust_erase2 m rs ([], []) = False" by(auto simp: wadjust_loop_on_left_moving.simps wadjust_loop_right_move2.simps wadjust_erase2.simps) lemma wadjust_on_left_moving_B_Bk1[simp]: "wadjust_on_left_moving_B m rs (Oc # Oc # Oc\(rs) @ Bk # Oc # Oc\(m), [Bk])" apply(simp add: wadjust_on_left_moving_B.simps, auto) done lemma wadjust_on_left_moving_B_Bk2[simp]: "wadjust_on_left_moving_B m rs (Bk\(n) @ Bk # Oc # Oc # Oc\(rs) @ Bk # Oc # Oc\(m), [Bk])" apply(simp add: wadjust_on_left_moving_B.simps , auto) apply(rule_tac x = "Suc n" in exI, simp add: exp_ind del: replicate_Suc) done lemma wadjust_on_left_moving_singleton[simp]: "\wadjust_erase2 m rs (c, []); c \ []\ \ wadjust_on_left_moving m rs (tl c, [hd c])" unfolding wadjust_erase2.simps apply(auto simp add: wadjust_on_left_moving.simps) apply (metis (no_types, lifting) empty_replicate hd_append hd_replicate list.sel(1) list.sel(3) self_append_conv2 tl_append2 tl_replicate wadjust_on_left_moving_B_Bk1 wadjust_on_left_moving_B_Bk2)+ done lemma wadjust_erase2_cases[simp]: "wadjust_erase2 m rs (c, []) \ (c = [] \ wadjust_on_left_moving m rs ([], [Bk])) \ (c \ [] \ wadjust_on_left_moving m rs (tl c, [hd c]))" apply(auto) done lemma wadjust_on_left_moving_nonempty[simp]: "wadjust_on_left_moving m rs ([], []) = False" "wadjust_on_left_moving_O m rs (c, []) = False" apply(auto simp: wadjust_on_left_moving.simps wadjust_on_left_moving_O.simps wadjust_on_left_moving_B.simps) done lemma wadjust_on_left_moving_B_singleton_Bk[simp]: " \wadjust_on_left_moving_B m rs (c, []); c \ []; hd c = Bk\ \ wadjust_on_left_moving_B m rs (tl c, [Bk])" apply(auto simp add: wadjust_on_left_moving_B.simps hd_append) by (metis cell.distinct(1) empty_replicate list.sel(1) tl_append2 tl_replicate) lemma wadjust_on_left_moving_B_singleton_Oc[simp]: "\wadjust_on_left_moving_B m rs (c, []); c \ []; hd c = Oc\ \ wadjust_on_left_moving_O m rs (tl c, [Oc])" apply(auto simp add: wadjust_on_left_moving_B.simps wadjust_on_left_moving_O.simps hd_append) apply (metis cell.distinct(1) empty_replicate hd_replicate list.sel(3) self_append_conv2)+ done lemma wadjust_on_left_moving_singleton2[simp]: "\wadjust_on_left_moving m rs (c, []); c \ []\ \ wadjust_on_left_moving m rs (tl c, [hd c])" apply(simp add: wadjust_on_left_moving.simps) apply(case_tac "hd c", simp_all) done lemma wadjust_nonempty[simp]: "wadjust_goon_left_moving m rs (c, []) = False" "wadjust_backto_standard_pos m rs (c, []) = False" by(auto simp: wadjust_goon_left_moving.simps wadjust_goon_left_moving_B.simps wadjust_goon_left_moving_O.simps wadjust_backto_standard_pos.simps wadjust_backto_standard_pos_B.simps wadjust_backto_standard_pos_O.simps) lemma wadjust_loop_start_no_Bk[simp]: "wadjust_loop_start m rs (c, Bk # list) = False" apply(auto simp: wadjust_loop_start.simps) done lemma wadjust_loop_check_nonempty[simp]: "wadjust_loop_check m rs (c, b) \ c \ []" apply(simp only: wadjust_loop_check.simps, auto) done lemma wadjust_erase2_via_loop_check_Bk[simp]: "wadjust_loop_check m rs (c, Bk # list) \ wadjust_erase2 m rs (tl c, hd c # Bk # list)" by (auto simp: wadjust_loop_check.simps wadjust_erase2.simps) declare wadjust_loop_on_left_moving_O.simps[simp del] wadjust_loop_on_left_moving_B.simps[simp del] lemma wadjust_loop_on_left_moving_B_via_erase[simp]: "\wadjust_loop_erase m rs (c, Bk # list); hd c = Bk\ \ wadjust_loop_on_left_moving_B m rs (tl c, Bk # Bk # list)" unfolding wadjust_loop_erase.simps wadjust_loop_on_left_moving_B.simps apply(erule_tac exE)+ apply(rename_tac ml mr ln rn) apply(rule_tac x = ml in exI, rule_tac x = mr in exI, rule_tac x = ln in exI, rule_tac x = 0 in exI) apply(case_tac ln, auto) apply(simp add: exp_ind [THEN sym]) done lemma wadjust_loop_on_left_moving_O_Bk_via_erase[simp]: "\wadjust_loop_erase m rs (c, Bk # list); c \ []; hd c = Oc\ \ wadjust_loop_on_left_moving_O m rs (tl c, Oc # Bk # list)" apply(auto simp: wadjust_loop_erase.simps wadjust_loop_on_left_moving_O.simps) by (metis cell.distinct(1) empty_replicate hd_append hd_replicate list.sel(1)) lemma wadjust_loop_on_left_moving_Bk_via_erase[simp]: "\wadjust_loop_erase m rs (c, Bk # list); c \ []\ \ wadjust_loop_on_left_moving m rs (tl c, hd c # Bk # list)" apply(case_tac "hd c", simp_all add:wadjust_loop_on_left_moving.simps) done lemma wadjust_loop_on_left_moving_B_Bk_move[simp]: "\wadjust_loop_on_left_moving_B m rs (c, Bk # list); hd c = Bk\ \ wadjust_loop_on_left_moving_B m rs (tl c, Bk # Bk # list)" apply(simp only: wadjust_loop_on_left_moving_B.simps) apply(erule_tac exE)+ by (metis (no_types, lifting) cell.distinct(1) list.sel(1) replicate_Suc_iff_anywhere self_append_conv2 tl_append2 tl_replicate) lemma wadjust_loop_on_left_moving_O_Oc_move[simp]: "\wadjust_loop_on_left_moving_B m rs (c, Bk # list); hd c = Oc\ \ wadjust_loop_on_left_moving_O m rs (tl c, Oc # Bk # list)" apply(simp only: wadjust_loop_on_left_moving_O.simps wadjust_loop_on_left_moving_B.simps) by (metis cell.distinct(1) empty_replicate hd_append hd_replicate list.sel(3) self_append_conv2) lemma wadjust_loop_erase_nonempty[simp]: "wadjust_loop_erase m rs (c, b) \ c \ []" "wadjust_loop_on_left_moving m rs (c, b) \ c \ []" "wadjust_loop_right_move2 m rs (c, b) \ c \ []" "wadjust_erase2 m rs (c, Bk # list) \ c \ []" "wadjust_on_left_moving m rs (c,b) \ c \ []" "wadjust_on_left_moving_O m rs (c, Bk # list) = False" "wadjust_goon_left_moving m rs (c, b) \ c \ []" "wadjust_loop_on_left_moving_O m rs (c, Bk # list) = False" by(auto simp: wadjust_loop_erase.simps wadjust_loop_on_left_moving.simps wadjust_loop_on_left_moving_O.simps wadjust_loop_on_left_moving_B.simps wadjust_loop_right_move2.simps wadjust_erase2.simps wadjust_on_left_moving.simps wadjust_on_left_moving_O.simps wadjust_on_left_moving_B.simps wadjust_goon_left_moving.simps wadjust_goon_left_moving_B.simps wadjust_goon_left_moving_O.simps) lemma wadjust_loop_on_left_moving_Bk_move[simp]: "wadjust_loop_on_left_moving m rs (c, Bk # list) \ wadjust_loop_on_left_moving m rs (tl c, hd c # Bk # list)" apply(simp add: wadjust_loop_on_left_moving.simps) apply(case_tac "hd c", simp_all) done lemma wadjust_loop_start_Oc_via_Bk_move[simp]: "wadjust_loop_right_move2 m rs (c, Bk # list) \ wadjust_loop_start m rs (c, Oc # list)" apply(auto simp: wadjust_loop_right_move2.simps wadjust_loop_start.simps replicate_app_Cons_same) by (metis add_Suc replicate_Suc) lemma wadjust_on_left_moving_Bk_via_erase[simp]: "wadjust_erase2 m rs (c, Bk # list) \ wadjust_on_left_moving m rs (tl c, hd c # Bk # list)" apply(auto simp: wadjust_erase2.simps wadjust_on_left_moving.simps replicate_app_Cons_same wadjust_on_left_moving_O.simps wadjust_on_left_moving_B.simps) apply (metis exp_ind replicate_append_same)+ done lemma wadjust_on_left_moving_B_Bk_drop_one: "\wadjust_on_left_moving_B m rs (c, Bk # list); hd c = Bk\ \ wadjust_on_left_moving_B m rs (tl c, Bk # Bk # list)" apply(auto simp: wadjust_on_left_moving_B.simps) by (metis cell.distinct(1) hd_append list.sel(1) tl_append2 tl_replicate) lemma wadjust_on_left_moving_B_Bk_drop_Oc: "\wadjust_on_left_moving_B m rs (c, Bk # list); hd c = Oc\ \ wadjust_on_left_moving_O m rs (tl c, Oc # Bk # list)" apply(auto simp: wadjust_on_left_moving_O.simps wadjust_on_left_moving_B.simps) by (metis cell.distinct(1) empty_replicate hd_append hd_replicate list.sel(3) self_append_conv2) lemma wadjust_on_left_moving_B_drop[simp]: "wadjust_on_left_moving m rs (c, Bk # list) \ wadjust_on_left_moving m rs (tl c, hd c # Bk # list)" by(cases "hd c", auto simp:wadjust_on_left_moving.simps wadjust_on_left_moving_B_Bk_drop_one wadjust_on_left_moving_B_Bk_drop_Oc) lemma wadjust_goon_left_moving_O_no_Bk[simp]: "wadjust_goon_left_moving_O m rs (c, Bk # list) = False" by (auto simp add: wadjust_goon_left_moving_O.simps) lemma wadjust_backto_standard_pos_via_left_Bk[simp]: "wadjust_goon_left_moving m rs (c, Bk # list) \ wadjust_backto_standard_pos m rs (tl c, hd c # Bk # list)" by(case_tac "hd c", simp_all add: wadjust_backto_standard_pos.simps wadjust_goon_left_moving.simps wadjust_goon_left_moving_B.simps wadjust_backto_standard_pos_O.simps) lemma wadjust_loop_right_move_Oc[simp]: "wadjust_loop_start m rs (c, Oc # list) \ wadjust_loop_right_move m rs (Oc # c, list)" apply(auto simp add: wadjust_loop_start.simps wadjust_loop_right_move.simps simp del:split_head_repeat) apply(rename_tac ln rn ml mr) apply(rule_tac x = ml in exI, rule_tac x = mr in exI, rule_tac x = 0 in exI, simp) apply(rule_tac x = "Suc ln" in exI, simp add: exp_ind del: replicate_Suc) done lemma wadjust_loop_check_Oc[simp]: assumes "wadjust_loop_right_move m rs (c, Oc # list)" shows "wadjust_loop_check m rs (Oc # c, list)" proof - from assms obtain ml mr nl nr rn where "c = Bk \ nl @ Oc # Oc \ ml @ Bk # Oc \ m @ [Oc]" "Oc # list = Bk \ nr @ Oc \ mr @ Bk \ rn" "ml + mr = Suc (Suc rs)" "0 < mr" "0 < nl + nr" unfolding wadjust_loop_right_move.simps exp_ind wadjust_loop_check.simps by auto hence "\ln. Oc # c = Oc # Bk \ ln @ Bk # Oc # Oc \ ml @ Bk # Oc \ Suc m" "\rn. list = Oc \ (mr - 1) @ Bk \ rn" "ml + (mr - 1) = Suc rs" by(cases nl;cases nr;cases mr;force simp add: wadjust_loop_right_move.simps exp_ind wadjust_loop_check.simps replicate_append_same)+ thus ?thesis unfolding wadjust_loop_check.simps by auto qed lemma wadjust_loop_erase_move_Oc[simp]: "wadjust_loop_check m rs (c, Oc # list) \ wadjust_loop_erase m rs (tl c, hd c # Oc # list)" apply(simp only: wadjust_loop_check.simps wadjust_loop_erase.simps) apply(erule_tac exE)+ using Cons_replicate_eq by fastforce lemma wadjust_loop_on_move_no_Oc[simp]: "wadjust_loop_on_left_moving_B m rs (c, Oc # list) = False" "wadjust_loop_right_move2 m rs (c, Oc # list) = False" "wadjust_loop_on_left_moving m rs (c, Oc # list) \ wadjust_loop_right_move2 m rs (Oc # c, list)" "wadjust_on_left_moving_B m rs (c, Oc # list) = False" "wadjust_loop_erase m rs (c, Oc # list) \ wadjust_loop_erase m rs (c, Bk # list)" by(auto simp: wadjust_loop_on_left_moving_B.simps wadjust_loop_on_left_moving_O.simps wadjust_loop_right_move2.simps replicate_app_Cons_same wadjust_loop_on_left_moving.simps wadjust_on_left_moving_B.simps wadjust_loop_erase.simps) lemma wadjust_goon_left_moving_B_Bk_Oc: "\wadjust_on_left_moving_O m rs (c, Oc # list); hd c = Bk\ \ wadjust_goon_left_moving_B m rs (tl c, Bk # Oc # list)" apply(auto simp: wadjust_on_left_moving_O.simps wadjust_goon_left_moving_B.simps ) done lemma wadjust_goon_left_moving_O_Oc_Oc: "\wadjust_on_left_moving_O m rs (c, Oc # list); hd c = Oc\ \ wadjust_goon_left_moving_O m rs (tl c, Oc # Oc # list)" apply(auto simp: wadjust_on_left_moving_O.simps wadjust_goon_left_moving_O.simps ) apply(auto simp: numeral_2_eq_2) done lemma wadjust_goon_left_moving_Oc[simp]: "wadjust_on_left_moving m rs (c, Oc # list) \ wadjust_goon_left_moving m rs (tl c, hd c # Oc # list)" by(cases "hd c"; force simp: wadjust_on_left_moving.simps wadjust_goon_left_moving.simps wadjust_goon_left_moving_B_Bk_Oc wadjust_goon_left_moving_O_Oc_Oc)+ lemma left_moving_Bk_Oc[simp]: "\wadjust_goon_left_moving_O m rs (c, Oc # list); hd c = Bk\ \ wadjust_goon_left_moving_B m rs (tl c, Bk # Oc # list)" apply(auto simp: wadjust_goon_left_moving_O.simps wadjust_goon_left_moving_B.simps hd_append dest!: gr0_implies_Suc) apply (metis cell.distinct(1) empty_replicate hd_replicate list.sel(3) self_append_conv2) by (metis add_cancel_right_left cell.distinct(1) hd_replicate replicate_Suc_iff_anywhere) lemma left_moving_Oc_Oc[simp]: "\wadjust_goon_left_moving_O m rs (c, Oc # list); hd c = Oc\ \ wadjust_goon_left_moving_O m rs (tl c, Oc # Oc # list)" apply(auto simp: wadjust_goon_left_moving_O.simps wadjust_goon_left_moving_B.simps) apply(rename_tac mlx mrx rnx) apply(rule_tac x = "mlx - 1" in exI, simp) apply(case_tac mlx, simp_all add: ) apply(rule_tac x = "Suc mrx" in exI, auto simp: ) done lemma wadjust_goon_left_moving_B_no_Oc[simp]: "wadjust_goon_left_moving_B m rs (c, Oc # list) = False" apply(auto simp: wadjust_goon_left_moving_B.simps) done lemma wadjust_goon_left_moving_Oc_move[simp]: "wadjust_goon_left_moving m rs (c, Oc # list) \ wadjust_goon_left_moving m rs (tl c, hd c # Oc # list)" by(cases "hd c",auto simp: wadjust_goon_left_moving.simps) lemma wadjust_backto_standard_pos_B_no_Oc[simp]: "wadjust_backto_standard_pos_B m rs (c, Oc # list) = False" apply(simp add: wadjust_backto_standard_pos_B.simps) done lemma wadjust_backto_standard_pos_O_no_Bk[simp]: "wadjust_backto_standard_pos_O m rs (c, Bk # xs) = False" by(simp add: wadjust_backto_standard_pos_O.simps) lemma wadjust_backto_standard_pos_B_Bk_Oc[simp]: "wadjust_backto_standard_pos_O m rs ([], Oc # list) \ wadjust_backto_standard_pos_B m rs ([], Bk # Oc # list)" apply(auto simp: wadjust_backto_standard_pos_O.simps wadjust_backto_standard_pos_B.simps) done lemma wadjust_backto_standard_pos_B_Bk_Oc_via_O[simp]: "\wadjust_backto_standard_pos_O m rs (c, Oc # list); c \ []; hd c = Bk\ \ wadjust_backto_standard_pos_B m rs (tl c, Bk # Oc # list)" apply(simp add:wadjust_backto_standard_pos_O.simps wadjust_backto_standard_pos_B.simps, auto) done lemma wadjust_backto_standard_pos_B_Oc_Oc_via_O[simp]: "\wadjust_backto_standard_pos_O m rs (c, Oc # list); c \ []; hd c = Oc\ \ wadjust_backto_standard_pos_O m rs (tl c, Oc # Oc # list)" apply(simp add: wadjust_backto_standard_pos_O.simps, auto) by force lemma wadjust_backto_standard_pos_cases[simp]: "wadjust_backto_standard_pos m rs (c, Oc # list) \ (c = [] \ wadjust_backto_standard_pos m rs ([], Bk # Oc # list)) \ (c \ [] \ wadjust_backto_standard_pos m rs (tl c, hd c # Oc # list))" apply(auto simp: wadjust_backto_standard_pos.simps) apply(case_tac "hd c", simp_all) done lemma wadjust_loop_right_move_nonempty_snd[simp]: "wadjust_loop_right_move m rs (c, []) = False" proof - {fix nl ml mr rn nr have "(c = Bk \ nl @ Oc # Oc \ ml @ Bk # Oc \ Suc m \ [] = Bk \ nr @ Oc \ mr @ Bk \ rn \ ml + mr = Suc (Suc rs) \ 0 < mr \ 0 < nl + nr) = False" by auto } note t=this thus ?thesis unfolding wadjust_loop_right_move.simps t by blast qed lemma wadjust_loop_erase_nonempty_snd[simp]: "wadjust_loop_erase m rs (c, []) = False" apply(simp only: wadjust_loop_erase.simps, auto) done lemma wadjust_loop_erase_cases2[simp]: "\Suc (Suc rs) = a; wadjust_loop_erase m rs (c, Bk # list)\ \ a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev (tl c) @ hd c # Bk # list)))) < a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Bk # list)))) \ a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev (tl c) @ hd c # Bk # list)))) = a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Bk # list))))" apply(simp only: wadjust_loop_erase.simps) apply(rule_tac disjI2) apply(case_tac c, simp, simp) done lemma dropWhile_exp1: "dropWhile (\a. a = Oc) (Oc\(n) @ xs) = dropWhile (\a. a = Oc) xs" apply(induct n, simp_all add: ) done lemma takeWhile_exp1: "takeWhile (\a. a = Oc) (Oc\(n) @ xs) = Oc\(n) @ takeWhile (\a. a = Oc) xs" apply(induct n, simp_all add: ) done lemma wadjust_correctness_helper_1: assumes "Suc (Suc rs) = a" " wadjust_loop_right_move2 m rs (c, Bk # list)" shows "a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Oc # list)))) < a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Bk # list))))" proof - have "ml + mr = Suc rs \ 0 < mr \ rs - (ml + length (takeWhile (\a. a = Oc) list)) < Suc rs - (ml + length (takeWhile (\a. a = Oc) (Bk \ ln @ Bk # Bk # Oc \ mr @ Bk \ rn))) " for ml mr ln rn by(cases ln, auto) thus ?thesis using assms by (auto simp: wadjust_loop_right_move2.simps dropWhile_exp1 takeWhile_exp1) qed lemma wadjust_correctness_helper_2: "\Suc (Suc rs) = a; wadjust_loop_on_left_moving m rs (c, Bk # list)\ \ a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev (tl c) @ hd c # Bk # list)))) < a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Bk # list)))) \ a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev (tl c) @ hd c # Bk # list)))) = a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Bk # list))))" apply(subgoal_tac "c \ []") apply(case_tac c, simp_all) done lemma wadjust_loop_check_empty_false[simp]: "wadjust_loop_check m rs ([], b) = False" apply(simp add: wadjust_loop_check.simps) done lemma wadjust_loop_check_cases: "\Suc (Suc rs) = a; wadjust_loop_check m rs (c, Oc # list)\ \ a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev (tl c) @ hd c # Oc # list)))) < a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Oc # list)))) \ a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev (tl c) @ hd c # Oc # list)))) = a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Oc # list))))" apply(case_tac "c", simp_all) done lemma wadjust_loop_erase_cases_or: "\Suc (Suc rs) = a; wadjust_loop_erase m rs (c, Oc # list)\ \ a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Bk # list)))) < a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Oc # list)))) \ a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Bk # list)))) = a - length (takeWhile (\a. a = Oc) (tl (dropWhile (\a. a = Oc) (rev c @ Oc # list))))" apply(simp add: wadjust_loop_erase.simps) apply(rule_tac disjI2) apply(auto) apply(simp add: dropWhile_exp1 takeWhile_exp1) done lemmas wadjust_correctness_helpers = wadjust_correctness_helper_2 wadjust_correctness_helper_1 wadjust_loop_erase_cases_or wadjust_loop_check_cases declare numeral_2_eq_2[simp del] lemma wadjust_start_Oc[simp]: "wadjust_start m rs (c, Bk # list) \ wadjust_start m rs (c, Oc # list)" apply(auto simp: wadjust_start.simps) done lemma wadjust_stop_Bk[simp]: "wadjust_backto_standard_pos m rs (c, Bk # list) \ wadjust_stop m rs (Bk # c, list)" apply(auto simp: wadjust_backto_standard_pos.simps wadjust_stop.simps wadjust_backto_standard_pos_B.simps) done lemma wadjust_loop_start_Oc[simp]: assumes "wadjust_start m rs (c, Oc # list)" shows "wadjust_loop_start m rs (Oc # c, list)" proof - from assms[unfolded wadjust_start.simps] obtain ln rn where "c = Bk # Oc # Oc \ m" "list = Oc # Bk \ ln @ Bk # Oc # Oc \ rs @ Bk \ rn" by(auto) hence "Oc # c = Oc \ 1 @ Bk # Oc \ Suc m \ list = Oc # Bk \ ln @ Bk # Oc \Suc rs @ Bk \ rn \ 1 + (Suc rs) = Suc (Suc rs) \ 0 < Suc rs" by auto thus ?thesis unfolding wadjust_loop_start.simps by blast qed lemma erase2_Bk_if_Oc[simp]:" wadjust_erase2 m rs (c, Oc # list) \ wadjust_erase2 m rs (c, Bk # list)" apply(auto simp: wadjust_erase2.simps) done lemma wadjust_loop_right_move_Bk[simp]: "wadjust_loop_right_move m rs (c, Bk # list) \ wadjust_loop_right_move m rs (Bk # c, list)" apply(simp only: wadjust_loop_right_move.simps) apply(erule_tac exE)+ apply auto apply (metis cell.distinct(1) empty_replicate hd_append hd_replicate less_SucI list.sel(1) list.sel(3) neq0_conv replicate_Suc_iff_anywhere tl_append2 tl_replicate)+ done lemma wadjust_correctness: shows "let P = (\ (len, st, l, r). st = 0) in let Q = (\ (len, st, l, r). wadjust_inv st m rs (l, r)) in let f = (\ stp. (Suc (Suc rs), steps0 (Suc 0, Bk # Oc\(Suc m), Bk # Oc # Bk\(ln) @ Bk # Oc\(Suc rs) @ Bk\(rn)) t_wcode_adjust stp)) in \ n .P (f n) \ Q (f n)" proof - let ?P = "(\ (len, st, l, r). st = 0)" let ?Q = "\ (len, st, l, r). wadjust_inv st m rs (l, r)" let ?f = "\ stp. (Suc (Suc rs), steps0 (Suc 0, Bk # Oc\(Suc m), Bk # Oc # Bk\(ln) @ Bk # Oc\(Suc rs) @ Bk\(rn)) t_wcode_adjust stp)" have "\ n. ?P (?f n) \ ?Q (?f n)" proof(rule_tac halt_lemma2) show "wf wadjust_le" by auto next { fix n assume a:"\ ?P (?f n) \ ?Q (?f n)" have "?Q (?f (Suc n)) \ (?f (Suc n), ?f n) \ wadjust_le" proof(cases "?f n") case (fields a b c d) then show ?thesis proof(cases d) case Nil then show ?thesis using a fields apply(simp add: step.simps) apply(simp_all only: wadjust_inv.simps split: if_splits) apply(simp_all add: wadjust_inv.simps wadjust_le_def wadjust_correctness_helpers Abacus.lex_triple_def Abacus.lex_pair_def lex_square_def split: if_splits). next case (Cons aa list) then show ?thesis using a fields Nil Cons apply((case_tac aa); simp add: step.simps) apply(simp_all only: wadjust_inv.simps split: if_splits) apply(simp_all) apply(simp_all add: wadjust_inv.simps wadjust_le_def wadjust_correctness_helpers Abacus.lex_triple_def Abacus.lex_pair_def lex_square_def split: if_splits). qed qed } thus "\ n. \ ?P (?f n) \ ?Q (?f n) \ ?Q (?f (Suc n)) \ (?f (Suc n), ?f n) \ wadjust_le" by auto next show "?Q (?f 0)" by(auto simp add: steps.simps wadjust_inv.simps wadjust_start.simps) next show "\ ?P (?f 0)" by (simp add: steps.simps) qed thus"?thesis" by simp qed lemma tm_wf_t_wcode_adjust[intro]: "tm_wf (t_wcode_adjust, 0)" by(auto simp: t_wcode_adjust_def tm_wf.simps) lemma bl_bin_nonzero[simp]: "args \ [] \ bl_bin () > 0" by(cases args) (auto simp: tape_of_nl_cons bl_bin.simps) lemma wcode_lemma_pre': "args \ [] \ \ stp rn. steps0 (Suc 0, [], ) ((t_wcode_prepare |+| t_wcode_main) |+| t_wcode_adjust) stp = (0, [Bk], Oc\(Suc m) @ Bk # Oc\(Suc (bl_bin ())) @ Bk\(rn))" proof - let ?P1 = "\ (l, r). l = [] \ r = " let ?Q1 = "\(l, r). l = Bk # Oc\(Suc m) \ (\ln rn. r = Bk # Oc # Bk\(ln) @ Bk # Bk # Oc\(bl_bin ()) @ Bk\(rn))" let ?P2 = ?Q1 let ?Q2 = "\ (l, r). (wadjust_stop m (bl_bin () - 1) (l, r))" let ?P3 = "\ tp. False" assume h: "args \ []" hence a: "bl_bin () > 0" using h by simp hence "{?P1} (t_wcode_prepare |+| t_wcode_main) |+| t_wcode_adjust {?Q2}" proof(rule_tac Hoare_plus_halt) next show "tm_wf (t_wcode_prepare |+| t_wcode_main, 0)" by(rule_tac tm_comp_wf, auto) next show "{?P1} t_wcode_prepare |+| t_wcode_main {?Q1}" proof(rule_tac Hoare_haltI, auto) show "\n. is_final (steps0 (Suc 0, [], ) (t_wcode_prepare |+| t_wcode_main) n) \ (\(l, r). l = Bk # Oc # Oc \ m \ (\ln rn. r = Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ bl_bin () @ Bk \ rn)) holds_for steps0 (Suc 0, [], ) (t_wcode_prepare |+| t_wcode_main) n" using h prepare_mainpart_lemma[of args m] apply(auto) apply(rename_tac stp ln rn) apply(rule_tac x = stp in exI, simp) apply(rule_tac x = ln in exI, auto) done qed next show "{?P2} t_wcode_adjust {?Q2}" proof(rule_tac Hoare_haltI, auto del: replicate_Suc) fix ln rn obtain n a b where "steps0 (Suc 0, Bk # Oc \ m @ [Oc], Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ (bl_bin () - Suc 0) @ Oc # Bk \ rn) t_wcode_adjust n = (0, a, b)" "wadjust_inv 0 m (bl_bin () - Suc 0) (a, b)" using wadjust_correctness[of m "bl_bin () - 1" "Suc ln" rn,unfolded Let_def] by(simp del: replicate_Suc add: replicate_Suc[THEN sym] exp_ind, auto) thus "\n. is_final (steps0 (Suc 0, Bk # Oc # Oc \ m, Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ bl_bin () @ Bk \ rn) t_wcode_adjust n) \ wadjust_stop m (bl_bin () - Suc 0) holds_for steps0 (Suc 0, Bk # Oc # Oc \ m, Bk # Oc # Bk \ ln @ Bk # Bk # Oc \ bl_bin () @ Bk \ rn) t_wcode_adjust n" apply(rule_tac x = n in exI) using a apply(case_tac "bl_bin ()", simp, simp del: replicate_Suc add: exp_ind wadjust_inv.simps) by (simp add: replicate_append_same) qed qed thus "?thesis" apply(simp add: Hoare_halt_def, auto) apply(rename_tac n) apply(case_tac "(steps0 (Suc 0, [], <(m::nat) # args>) ((t_wcode_prepare |+| t_wcode_main) |+| t_wcode_adjust) n)") apply(rule_tac x = n in exI, auto simp: wadjust_stop.simps) using a apply(case_tac "bl_bin ()", simp_all) done qed text \ The initialization TM \t_wcode\. \ definition t_wcode :: "instr list" where "t_wcode = (t_wcode_prepare |+| t_wcode_main) |+| t_wcode_adjust " text \ The correctness of \t_wcode\. \ lemma wcode_lemma_1: "args \ [] \ \ stp ln rn. steps0 (Suc 0, [], ) (t_wcode) stp = (0, [Bk], Oc\(Suc m) @ Bk # Oc\(Suc (bl_bin ())) @ Bk\(rn))" apply(simp add: wcode_lemma_pre' t_wcode_def del: replicate_Suc) done lemma wcode_lemma: "args \ [] \ \ stp ln rn. steps0 (Suc 0, [], ) (t_wcode) stp = (0, [Bk], <[m ,bl_bin ()]> @ Bk\(rn))" using wcode_lemma_1[of args m] apply(simp add: t_wcode_def tape_of_list_def tape_of_nat_def) done section \The universal TM\ text \ This section gives the explicit construction of {\em Universal Turing Machine}, defined as \UTM\ and proves its correctness. It is pretty easy by composing the partial results we have got so far. \ definition UTM :: "instr list" where "UTM = (let (aprog, rs_pos, a_md) = rec_ci rec_F in let abc_F = aprog [+] dummy_abc (Suc (Suc 0)) in (t_wcode |+| (tm_of abc_F @ shift (mopup (Suc (Suc 0))) (length (tm_of abc_F) div 2))))" definition F_aprog :: "abc_prog" where "F_aprog \ (let (aprog, rs_pos, a_md) = rec_ci rec_F in aprog [+] dummy_abc (Suc (Suc 0)))" definition F_tprog :: "instr list" where "F_tprog = tm_of (F_aprog)" definition t_utm :: "instr list" where "t_utm \ F_tprog @ shift (mopup (Suc (Suc 0))) (length F_tprog div 2)" definition UTM_pre :: "instr list" where "UTM_pre = t_wcode |+| t_utm" lemma tinres_step1: assumes "tinres l l'" "step (ss, l, r) (t, 0) = (sa, la, ra)" "step (ss, l', r) (t, 0) = (sb, lb, rb)" shows "tinres la lb \ ra = rb \ sa = sb" proof(cases "r") case Nil then show ?thesis using assms by (cases "(fetch t ss Bk)";cases "fst (fetch t ss Bk)";auto simp:step.simps split:if_splits) next case (Cons a list) then show ?thesis using assms by (cases "(fetch t ss a)";cases "fst (fetch t ss a)";auto simp:step.simps split:if_splits) qed lemma tinres_steps1: "\tinres l l'; steps (ss, l, r) (t, 0) stp = (sa, la, ra); steps (ss, l', r) (t, 0) stp = (sb, lb, rb)\ \ tinres la lb \ ra = rb \ sa = sb" proof (induct stp arbitrary: sa la ra sb lb rb) case (Suc stp) then show ?case apply simp apply(case_tac "(steps (ss, l, r) (t, 0) stp)") apply(case_tac "(steps (ss, l', r) (t, 0) stp)") proof - fix stp sa la ra sb lb rb a b c aa ba ca assume ind: "\sa la ra sb lb rb. \steps (ss, l, r) (t, 0) stp = (sa, (la::cell list), ra); steps (ss, l', r) (t, 0) stp = (sb, lb, rb)\ \ tinres la lb \ ra = rb \ sa = sb" and h: " tinres l l'" "step (steps (ss, l, r) (t, 0) stp) (t, 0) = (sa, la, ra)" "step (steps (ss, l', r) (t, 0) stp) (t, 0) = (sb, lb, rb)" "steps (ss, l, r) (t, 0) stp = (a, b, c)" "steps (ss, l', r) (t, 0) stp = (aa, ba, ca)" have "tinres b ba \ c = ca \ a = aa" using ind h by metis thus "tinres la lb \ ra = rb \ sa = sb" using tinres_step1 h by metis qed qed (simp add: steps.simps) lemma tinres_some_exp[simp]: "tinres (Bk \ m @ [Bk, Bk]) la \ \m. la = Bk \ m" unfolding tinres_def proof - let ?c1 = "\ n. Bk \ m @ [Bk, Bk] = la @ Bk \ n" let ?c2 = "\ n. la = (Bk \ m @ [Bk, Bk]) @ Bk \ n" assume "\n. ?c1 n \ ?c2 n" then obtain n where "?c1 n \ ?c2 n" by auto then consider "?c1 n" | "?c2 n" by blast thus ?thesis proof(cases) case 1 hence "Bk \ Suc (Suc m) = la @ Bk \ n" by (metis exp_ind append_Cons append_eq_append_conv2 self_append_conv2) hence "la = Bk \ (Suc (Suc m) - n)" by (metis replicate_add append_eq_append_conv diff_add_inverse2 length_append length_replicate) then show ?thesis by auto next case 2 hence "la = Bk \ (m + Suc (Suc n))" by (metis append_Cons append_eq_append_conv2 replicate_Suc replicate_add self_append_conv2) then show ?thesis by blast qed qed lemma t_utm_halt_eq: assumes tm_wf: "tm_wf (tp, 0)" and exec: "steps0 (Suc 0, Bk\(l), ) tp stp = (0, Bk\(m), Oc\(rs)@Bk\(n))" and resutl: "0 < rs" shows "\stp m n. steps0 (Suc 0, [Bk], <[code tp, bl2wc ()]> @ Bk\(i)) t_utm stp = (0, Bk\(m), Oc\(rs) @ Bk\(n))" proof - obtain ap arity fp where a: "rec_ci rec_F = (ap, arity, fp)" by (metis prod_cases3) moreover have b: "rec_exec rec_F [code tp, (bl2wc ())] = (rs - Suc 0)" using assms apply(rule_tac F_correct, simp_all) done have "\ stp m l. steps0 (Suc 0, Bk # Bk # [], <[code tp, bl2wc ()]> @ Bk\i) (F_tprog @ shift (mopup (length [code tp, bl2wc ()])) (length F_tprog div 2)) stp = (0, Bk\m @ Bk # Bk # [], Oc\Suc (rec_exec rec_F [code tp, (bl2wc ())]) @ Bk\l)" proof(rule_tac recursive_compile_to_tm_correct1) show "rec_ci rec_F = (ap, arity, fp)" using a by simp next show "terminate rec_F [code tp, bl2wc ()]" using assms by(rule_tac terminate_F, simp_all) next show "F_tprog = tm_of (ap [+] dummy_abc (length [code tp, bl2wc ()]))" using a apply(simp add: F_tprog_def F_aprog_def numeral_2_eq_2) done qed then obtain stp m l where "steps0 (Suc 0, Bk # Bk # [], <[code tp, bl2wc ()]> @ Bk\i) (F_tprog @ shift (mopup (length [code tp, (bl2wc ())])) (length F_tprog div 2)) stp = (0, Bk\m @ Bk # Bk # [], Oc\Suc (rec_exec rec_F [code tp, (bl2wc ())]) @ Bk\l)" by blast hence "\ m. steps0 (Suc 0, [Bk], <[code tp, bl2wc ()]> @ Bk\i) (F_tprog @ shift (mopup 2) (length F_tprog div 2)) stp = (0, Bk\m, Oc\Suc (rs - 1) @ Bk\l)" proof - assume g: "steps0 (Suc 0, [Bk, Bk], <[code tp, bl2wc ()]> @ Bk \ i) (F_tprog @ shift (mopup (length [code tp, bl2wc ()])) (length F_tprog div 2)) stp = (0, Bk \ m @ [Bk, Bk], Oc \ Suc ((rec_exec rec_F [code tp, bl2wc ()])) @ Bk \ l)" moreover have "tinres [Bk, Bk] [Bk]" apply(auto simp: tinres_def) done moreover obtain sa la ra where "steps0 (Suc 0, [Bk], <[code tp, bl2wc ()]> @ Bk\i) (F_tprog @ shift (mopup 2) (length F_tprog div 2)) stp = (sa, la, ra)" apply(case_tac "steps0 (Suc 0, [Bk], <[code tp, bl2wc ()]> @ Bk\i) (F_tprog @ shift (mopup 2) (length F_tprog div 2)) stp", auto) done ultimately show "?thesis" using b apply(drule_tac la = "Bk\m @ [Bk, Bk]" in tinres_steps1, auto simp: numeral_2_eq_2) done qed thus "?thesis" apply(auto) apply(rule_tac x = stp in exI, simp add: t_utm_def) using assms apply(case_tac rs, simp_all add: numeral_2_eq_2) done qed lemma tm_wf_t_wcode[intro]: "tm_wf (t_wcode, 0)" apply(simp add: t_wcode_def) apply(rule_tac tm_comp_wf) apply(rule_tac tm_comp_wf, auto) done lemma UTM_halt_lemma_pre: assumes wf_tm: "tm_wf (tp, 0)" and result: "0 < rs" and args: "args \ []" and exec: "steps0 (Suc 0, Bk\(i), ) tp stp = (0, Bk\(m), Oc\(rs)@Bk\(k))" shows "\stp m n. steps0 (Suc 0, [], ) UTM_pre stp = (0, Bk\(m), Oc\(rs) @ Bk\(n))" proof - let ?Q2 = "\ (l, r). (\ ln rn. l = Bk\(ln) \ r = Oc\(rs) @ Bk\(rn))" let ?P1 = "\ (l, r). l = [] \ r = " let ?Q1 = "\ (l, r). (l = [Bk] \ (\ rn. r = Oc\(Suc (code tp)) @ Bk # Oc\(Suc (bl_bin ())) @ Bk\(rn)))" let ?P2 = ?Q1 let ?P3 = "\ (l, r). False" have "{?P1} (t_wcode |+| t_utm) {?Q2}" proof(rule_tac Hoare_plus_halt) show "tm_wf (t_wcode, 0)" by auto next show "{?P1} t_wcode {?Q1}" apply(rule_tac Hoare_haltI, auto) using wcode_lemma_1[of args "code tp"] args apply(auto) by (metis (mono_tags, lifting) holds_for.simps is_finalI old.prod.case) next show "{?P2} t_utm {?Q2}" proof(rule_tac Hoare_haltI, auto) fix rn show "\n. is_final (steps0 (Suc 0, [Bk], Oc # Oc \ code tp @ Bk # Oc # Oc \ bl_bin () @ Bk \ rn) t_utm n) \ (\(l, r). (\ln. l = Bk \ ln) \ (\rn. r = Oc \ rs @ Bk \ rn)) holds_for steps0 (Suc 0, [Bk], Oc # Oc \ code tp @ Bk # Oc # Oc \ bl_bin () @ Bk \ rn) t_utm n" using t_utm_halt_eq[of tp i "args" stp m rs k rn] assms apply(auto simp: bin_wc_eq tape_of_list_def tape_of_nat_def) apply(rename_tac stpa) apply(rule_tac x = stpa in exI, simp) done qed qed thus "?thesis" apply(auto simp: Hoare_halt_def UTM_pre_def) apply(case_tac "steps0 (Suc 0, [], ) (t_wcode |+| t_utm) n",simp) by auto qed text \ The correctness of \UTM\, the halt case. \ lemma UTM_halt_lemma': assumes tm_wf: "tm_wf (tp, 0)" and result: "0 < rs" and args: "args \ []" and exec: "steps0 (Suc 0, Bk\(i), ) tp stp = (0, Bk\(m), Oc\(rs)@Bk\(k))" shows "\stp m n. steps0 (Suc 0, [], ) UTM stp = (0, Bk\(m), Oc\(rs) @ Bk\(n))" using UTM_halt_lemma_pre[of tp rs args i stp m k] assms apply(simp add: UTM_pre_def t_utm_def UTM_def F_aprog_def F_tprog_def) apply(case_tac "rec_ci rec_F", simp) done definition TSTD:: "config \ bool" where "TSTD c = (let (st, l, r) = c in st = 0 \ (\ m. l = Bk\(m)) \ (\ rs n. r = Oc\(Suc rs) @ Bk\(n)))" lemma nstd_case1: "0 < a \ NSTD (trpl_code (a, b, c))" by(simp add: NSTD.simps trpl_code.simps) lemma nonzero_bl2wc[simp]: "\m. b \ Bk\(m) \ 0 < bl2wc b" proof - have "\m. b \ Bk \ m \ bl2wc b = 0 \ False" proof(induct b) case (Cons a b) then show ?case apply(simp add: bl2wc.simps, case_tac a, simp_all add: bl2nat.simps bl2nat_double) apply(case_tac "\ m. b = Bk\(m)", erule exE) apply(metis append_Nil2 replicate_Suc_iff_anywhere) by simp qed auto thus "\m. b \ Bk\(m) \ 0 < bl2wc b" by auto qed lemma nstd_case2: "\m. b \ Bk\(m) \ NSTD (trpl_code (a, b, c))" apply(simp add: NSTD.simps trpl_code.simps) done lemma even_not_odd[elim]: "Suc (2 * x) = 2 * y \ RR" proof(induct x arbitrary: y) case (Suc x) thus ?case by(cases y;auto) qed auto declare replicate_Suc[simp del] lemma bl2nat_zero_eq[simp]: "(bl2nat c 0 = 0) = (\n. c = Bk\(n))" proof(induct c) case (Cons a c) then show ?case by (cases a;auto simp: bl2nat.simps bl2nat_double Cons_replicate_eq) qed (auto simp: bl2nat.simps) lemma bl2wc_exp_ex: "\Suc (bl2wc c) = 2 ^ m\ \ \ rs n. c = Oc\(rs) @ Bk\(n)" proof(induct c arbitrary: m) case (Cons a c m) { fix n have "Bk # Bk \ n = Oc \ 0 @ Bk \ Suc n" by (auto simp:replicate_Suc) hence "\rs na. Bk # Bk \ n = Oc \ rs @ Bk \ na" by blast } with Cons show ?case apply(cases a, auto) apply(case_tac m, simp_all add: bl2wc.simps, auto) apply(simp add: bl2wc.simps bl2nat.simps bl2nat_double Cons) apply(case_tac m, simp, simp add: bin_wc_eq bl2wc.simps twice_power ) by (metis Cons.hyps Suc_pred bl2wc.simps neq0_conv power_not_zero replicate_Suc_iff_anywhere zero_neq_numeral) qed (simp add: bl2wc.simps bl2nat.simps) lemma lg_bin: assumes "\rs n. c \ Oc\(Suc rs) @ Bk\(n)" "bl2wc c = 2 ^ lg (Suc (bl2wc c)) 2 - Suc 0" shows "bl2wc c = 0" proof - from assms obtain rs nat n where *:"2 ^ rs - Suc 0 = nat" "c = Oc \ rs @ Bk \ n" using bl2wc_exp_ex[of c "lg (Suc (bl2wc c)) 2"] by(case_tac "(2::nat) ^ lg (Suc (bl2wc c)) 2", simp, simp, erule_tac exE, erule_tac exE, simp) have r:"bl2wc (Oc \ rs) = nat" by (metis "*"(1) bl2nat_exp_zero bl2wc.elims) hence "Suc (bl2wc c) = 2^rs" using * by(case_tac "(2::nat)^rs", auto) thus ?thesis using * assms(1) apply(drule_tac bl2wc_exp_ex, simp, erule_tac exE, erule_tac exE) by(case_tac rs, simp, simp) qed lemma nstd_case3: "\rs n. c \ Oc\(Suc rs) @ Bk\(n) \ NSTD (trpl_code (a, b, c))" apply(simp add: NSTD.simps trpl_code.simps) apply(auto) apply(drule_tac lg_bin, simp_all) done lemma NSTD_1: "\ TSTD (a, b, c) \ rec_exec rec_NSTD [trpl_code (a, b, c)] = Suc 0" using NSTD_lemma1[of "trpl_code (a, b, c)"] NSTD_lemma2[of "trpl_code (a, b, c)"] apply(simp add: TSTD_def) apply(erule_tac disjE, erule_tac nstd_case1) apply(erule_tac disjE, erule_tac nstd_case2) apply(erule_tac nstd_case3) done lemma nonstop_t_uhalt_eq: "\tm_wf (tp, 0); steps0 (Suc 0, Bk\(l), ) tp stp = (a, b, c); \ TSTD (a, b, c)\ \ rec_exec rec_nonstop [code tp, bl2wc (), stp] = Suc 0" apply(simp add: rec_nonstop_def rec_exec.simps) apply(subgoal_tac "rec_exec rec_conf [code tp, bl2wc (), stp] = trpl_code (a, b, c)", simp) apply(erule_tac NSTD_1) using rec_t_eq_steps[of tp l lm stp] apply(simp) done lemma nonstop_true: "\tm_wf (tp, 0); \ stp. (\ TSTD (steps0 (Suc 0, Bk\(l), ) tp stp))\ \ \y. rec_exec rec_nonstop ([code tp, bl2wc (), y]) = (Suc 0)" proof fix y assume a:"tm_wf0 tp" "\stp. \ TSTD (steps0 (Suc 0, Bk \ l, ) tp stp)" hence "\ TSTD (steps0 (Suc 0, Bk \ l, ) tp y)" by auto thus "rec_exec rec_nonstop [code tp, bl2wc (), y] = Suc 0" by (cases "steps0 (Suc 0, Bk\(l), ) tp y") (auto intro: nonstop_t_uhalt_eq[OF a(1)]) qed lemma cn_arity: "rec_ci (Cn n f gs) = (a, b, c) \ b = n" by(case_tac "rec_ci f", simp add: rec_ci.simps) lemma mn_arity: "rec_ci (Mn n f) = (a, b, c) \ b = n" by(case_tac "rec_ci f", simp add: rec_ci.simps) lemma F_aprog_uhalt: assumes wf_tm: "tm_wf (tp,0)" and unhalt: "\ stp. (\ TSTD (steps0 (Suc 0, Bk\(l), ) tp stp))" and compile: "rec_ci rec_F = (F_ap, rs_pos, a_md)" shows "{\ nl. nl = [code tp, bl2wc ()] @ 0\(a_md - rs_pos ) @ suflm} (F_ap) \" using compile proof(simp only: rec_F_def) assume h: "rec_ci (Cn (Suc (Suc 0)) rec_valu [Cn (Suc (Suc 0)) rec_right [Cn (Suc (Suc 0)) rec_conf [recf.id (Suc (Suc 0)) 0, recf.id (Suc (Suc 0)) (Suc 0), rec_halt]]]) = (F_ap, rs_pos, a_md)" moreover hence "rs_pos = Suc (Suc 0)" using cn_arity by simp moreover obtain ap1 ar1 ft1 where a: "rec_ci (Cn (Suc (Suc 0)) rec_right [Cn (Suc (Suc 0)) rec_conf [recf.id (Suc (Suc 0)) 0, recf.id (Suc (Suc 0)) (Suc 0), rec_halt]]) = (ap1, ar1, ft1)" by(case_tac "rec_ci (Cn (Suc (Suc 0)) rec_right [Cn (Suc (Suc 0)) rec_conf [recf.id (Suc (Suc 0)) 0, recf.id (Suc (Suc 0)) (Suc 0), rec_halt]])", auto) moreover hence b: "ar1 = Suc (Suc 0)" using cn_arity by simp ultimately show "?thesis" proof(rule_tac i = 0 in cn_unhalt_case, auto) fix anything obtain ap2 ar2 ft2 where c: "rec_ci (Cn (Suc (Suc 0)) rec_conf [recf.id (Suc (Suc 0)) 0, recf.id (Suc (Suc 0)) (Suc 0), rec_halt]) = (ap2, ar2, ft2)" by(case_tac "rec_ci (Cn (Suc (Suc 0)) rec_conf [recf.id (Suc (Suc 0)) 0, recf.id (Suc (Suc 0)) (Suc 0), rec_halt])", auto) moreover hence d:"ar2 = Suc (Suc 0)" using cn_arity by simp ultimately have "{\nl. nl = [code tp, bl2wc ()] @ 0 \ (ft1 - Suc (Suc 0)) @ anything} ap1 \" using a b c d proof(rule_tac i = 0 in cn_unhalt_case, auto) fix anything obtain ap3 ar3 ft3 where e: "rec_ci rec_halt = (ap3, ar3, ft3)" by(case_tac "rec_ci rec_halt", auto) hence f: "ar3 = Suc (Suc 0)" using mn_arity by(simp add: rec_halt_def) have "{\nl. nl = [code tp, bl2wc ()] @ 0 \ (ft2 - Suc (Suc 0)) @ anything} ap2 \" using c d e f proof(rule_tac i = 2 in cn_unhalt_case, auto simp: rec_halt_def) fix anything have "{\nl. nl = [code tp, bl2wc ()] @ 0 \ (ft3 - Suc (Suc 0)) @ anything} ap3 \" using e f proof(rule_tac mn_unhalt_case, auto simp: rec_halt_def) fix i show "terminate rec_nonstop [code tp, bl2wc (), i]" by(rule_tac primerec_terminate, auto) next fix i show "0 < rec_exec rec_nonstop [code tp, bl2wc (), i]" using assms by(drule_tac nonstop_true, auto) qed thus "{\nl. nl = code tp # bl2wc () # 0 \ (ft3 - Suc (Suc 0)) @ anything} ap3 \" by simp next fix apj arj ftj j anything assume "j<2" "rec_ci ([recf.id (Suc (Suc 0)) 0, recf.id (Suc (Suc 0)) (Suc 0), Mn (Suc (Suc 0)) rec_nonstop] ! j) = (apj, arj, ftj)" hence "{\nl. nl = [code tp, bl2wc ()] @ 0 \ (ftj - arj) @ anything} apj {\nl. nl = [code tp, bl2wc ()] @ rec_exec ([recf.id (Suc (Suc 0)) 0, recf.id (Suc (Suc 0)) (Suc 0), Mn (Suc (Suc 0)) rec_nonstop] ! j) [code tp, bl2wc ()] # 0 \ (ftj - Suc arj) @ anything}" apply(rule_tac recursive_compile_correct) apply(case_tac j, auto) apply(rule_tac [!] primerec_terminate) by(auto) thus "{\nl. nl = code tp # bl2wc () # 0 \ (ftj - arj) @ anything} apj {\nl. nl = code tp # bl2wc () # rec_exec ([recf.id (Suc (Suc 0)) 0, recf.id (Suc (Suc 0)) (Suc 0), Mn (Suc (Suc 0)) rec_nonstop] ! j) [code tp, bl2wc ()] # 0 \ (ftj - Suc arj) @ anything}" by simp next fix j assume "(j::nat) < 2" thus "terminate ([recf.id (Suc (Suc 0)) 0, recf.id (Suc (Suc 0)) (Suc 0), Mn (Suc (Suc 0)) rec_nonstop] ! j) [code tp, bl2wc ()]" by(case_tac j, auto intro!: primerec_terminate) qed thus "{\nl. nl = code tp # bl2wc () # 0 \ (ft2 - Suc (Suc 0)) @ anything} ap2 \" by simp qed thus "{\nl. nl = code tp # bl2wc () # 0 \ (ft1 - Suc (Suc 0)) @ anything} ap1 \" by simp qed qed lemma uabc_uhalt': "\tm_wf (tp, 0); \ stp. (\ TSTD (steps0 (Suc 0, Bk\(l), ) tp stp)); rec_ci rec_F = (ap, pos, md)\ \ {\ nl. nl = [code tp, bl2wc ()]} ap \" proof(frule_tac F_ap = ap and rs_pos = pos and a_md = md and suflm = "[]" in F_aprog_uhalt, auto simp: abc_Hoare_unhalt_def, case_tac "abc_steps_l (0, [code tp, bl2wc ()]) ap n", simp) fix n a b assume h: "\n. abc_notfinal (abc_steps_l (0, code tp # bl2wc () # 0 \ (md - pos)) ap n) ap" "abc_steps_l (0, [code tp, bl2wc ()]) ap n = (a, b)" "tm_wf (tp, 0)" "rec_ci rec_F = (ap, pos, md)" moreover have a: "ap \ []" using h rec_ci_not_null[of "rec_F" pos md] by auto ultimately show "a < length ap" proof(erule_tac x = n in allE) assume g: "abc_notfinal (abc_steps_l (0, code tp # bl2wc () # 0 \ (md - pos)) ap n) ap" obtain ss nl where b : "abc_steps_l (0, code tp # bl2wc () # 0 \ (md - pos)) ap n = (ss, nl)" by (metis prod.exhaust) then have c: "ss < length ap" using g by simp thus "?thesis" using a b c using abc_list_crsp_steps[of "[code tp, bl2wc ()]" "md - pos" ap n ss nl] h by(simp) qed qed lemma uabc_uhalt: "\tm_wf (tp, 0); \ stp. (\ TSTD (steps0 (Suc 0, Bk\(l), ) tp stp))\ \ {\ nl. nl = [code tp, bl2wc ()]} F_aprog \ " proof - obtain a b c where abc:"rec_ci rec_F = (a,b,c)" by (cases "rec_ci rec_F") force assume a:"tm_wf (tp, 0)" "\ stp. (\ TSTD (steps0 (Suc 0, Bk\(l), ) tp stp))" from uabc_uhalt'[OF a abc] abc_Hoare_plus_unhalt1 show "{\ nl. nl = [code tp, bl2wc ()]} F_aprog \" by(simp add: F_aprog_def abc) qed lemma tutm_uhalt': assumes tm_wf: "tm_wf (tp,0)" and unhalt: "\ stp. (\ TSTD (steps0 (1, Bk\(l), ) tp stp))" shows "\ stp. \ is_final (steps0 (1, [Bk, Bk], <[code tp, bl2wc ()]>) t_utm stp)" unfolding t_utm_def proof(rule_tac compile_correct_unhalt, auto) show "F_tprog = tm_of F_aprog" by(simp add: F_tprog_def) next show "crsp (layout_of F_aprog) (0, [code tp, bl2wc ()]) (Suc 0, [Bk, Bk], <[code tp, bl2wc ()]>) []" by(auto simp: crsp.simps start_of.simps) next fix stp a b show "abc_steps_l (0, [code tp, bl2wc ()]) F_aprog stp = (a, b) \ a < length F_aprog" using assms apply(drule_tac uabc_uhalt, auto simp: abc_Hoare_unhalt_def) by(erule_tac x = stp in allE, erule_tac x = stp in allE, simp) qed lemma tinres_commute: "tinres r r' \ tinres r' r" apply(auto simp: tinres_def) done lemma inres_tape: "\steps0 (st, l, r) tp stp = (a, b, c); steps0 (st, l', r') tp stp = (a', b', c'); tinres l l'; tinres r r'\ \ a = a' \ tinres b b' \ tinres c c'" proof(case_tac "steps0 (st, l', r) tp stp") fix aa ba ca assume h: "steps0 (st, l, r) tp stp = (a, b, c)" "steps0 (st, l', r') tp stp = (a', b', c')" "tinres l l'" "tinres r r'" "steps0 (st, l', r) tp stp = (aa, ba, ca)" have "tinres b ba \ c = ca \ a = aa" using h apply(rule_tac tinres_steps1, auto) done moreover have "b' = ba \ tinres c' ca \ a' = aa" using h apply(rule_tac tinres_steps2, auto intro: tinres_commute) done ultimately show "?thesis" apply(auto intro: tinres_commute) done qed lemma tape_normalize: assumes "\ stp. \ is_final(steps0 (Suc 0, [Bk,Bk], <[code tp, bl2wc ()]>) t_utm stp)" shows "\ stp. \ is_final (steps0 (Suc 0, Bk\(m), <[code tp, bl2wc ()]> @ Bk\(n)) t_utm stp)" (is "\ stp. ?P stp") proof fix stp from assms[rule_format,of stp] show "?P stp" apply(case_tac "steps0 (Suc 0, Bk\(m), <[code tp, bl2wc ()]> @ Bk\(n)) t_utm stp", simp) apply(case_tac "steps0 (Suc 0, [Bk, Bk], <[code tp, bl2wc ()]>) t_utm stp", simp) apply(drule_tac inres_tape, auto) apply(auto simp: tinres_def) apply(case_tac "m > Suc (Suc 0)") apply(rule_tac x = "m - Suc (Suc 0)" in exI) apply(case_tac m, simp_all) apply(metis Suc_lessD Suc_pred replicate_Suc) apply(rule_tac x = "2 - m" in exI, simp add: replicate_add[THEN sym]) apply(simp only: numeral_2_eq_2, simp add: replicate_Suc) done qed lemma tutm_uhalt: "\tm_wf (tp,0); \ stp. (\ TSTD (steps0 (Suc 0, Bk\(l), ) tp stp))\ \ \ stp. \ is_final (steps0 (Suc 0, Bk\(m), <[code tp, bl2wc ()]> @ Bk\(n)) t_utm stp)" apply(rule_tac tape_normalize) apply(rule_tac tutm_uhalt'[simplified], simp_all) done lemma UTM_uhalt_lemma_pre: assumes tm_wf: "tm_wf (tp, 0)" and exec: "\ stp. (\ TSTD (steps0 (Suc 0, Bk\(l), ) tp stp))" and args: "args \ []" shows "\ stp. \ is_final (steps0 (Suc 0, [], ) UTM_pre stp)" proof - let ?P1 = "\ (l, r). l = [] \ r = " let ?Q1 = "\ (l, r). (l = [Bk] \ (\ rn. r = Oc\(Suc (code tp)) @ Bk # Oc\(Suc (bl_bin ())) @ Bk\(rn)))" let ?P2 = ?Q1 have "{?P1} (t_wcode |+| t_utm) \" proof(rule_tac Hoare_plus_unhalt) show "tm_wf (t_wcode, 0)" by auto next show "{?P1} t_wcode {?Q1}" apply(rule_tac Hoare_haltI, auto) using wcode_lemma_1[of args "code tp"] args apply(auto) by (metis (mono_tags, lifting) holds_for.simps is_finalI old.prod.case) next show "{?P2} t_utm \" proof(rule_tac Hoare_unhaltI, auto) fix n rn assume h: "is_final (steps0 (Suc 0, [Bk], Oc \ Suc (code tp) @ Bk # Oc \ Suc (bl_bin ()) @ Bk \ rn) t_utm n)" have "\ stp. \ is_final (steps0 (Suc 0, Bk\(Suc 0), <[code tp, bl2wc ()]> @ Bk\(rn)) t_utm stp)" using assms apply(rule_tac tutm_uhalt, simp_all) done thus "False" using h apply(erule_tac x = n in allE) apply(simp add: tape_of_list_def bin_wc_eq tape_of_nat_def) done qed qed thus "?thesis" apply(simp add: Hoare_unhalt_def UTM_pre_def) done qed text \ The correctness of \UTM\, the unhalt case. \ lemma UTM_uhalt_lemma': assumes tm_wf: "tm_wf (tp, 0)" and unhalt: "\ stp. (\ TSTD (steps0 (Suc 0, Bk\(l), ) tp stp))" and args: "args \ []" shows " \ stp. \ is_final (steps0 (Suc 0, [], ) UTM stp)" using UTM_uhalt_lemma_pre[of tp l args] assms apply(simp add: UTM_pre_def t_utm_def UTM_def F_aprog_def F_tprog_def) apply(case_tac "rec_ci rec_F", simp) done lemma UTM_halt_lemma: assumes tm_wf: "tm_wf (p, 0)" and resut: "rs > 0" and args: "(args::nat list) \ []" and exec: "{(\tp. tp = (Bk\i, ))} p {(\tp. tp = (Bk\m, Oc\rs @ Bk\k))}" shows "{(\tp. tp = ([], ))} UTM {(\tp. (\ m n. tp = (Bk\m, Oc\rs @ Bk\n)))}" proof - let ?steps0 = "steps0 (Suc 0, [], )" let ?stepsBk = "steps0 (Suc 0, Bk\i, ) p" from wcode_lemma_1[OF args,of "code p"] obtain stp ln rn where wcl1:"?steps0 t_wcode stp = (0, [Bk], Oc \ Suc (code p) @ Bk # Oc \ Suc (bl_bin ()) @ Bk \ rn)" by fast from exec Hoare_halt_def obtain n where n:"{\tp. tp = (Bk \ i, )} p {\tp. tp = (Bk \ m, Oc \ rs @ Bk \ k)}" "is_final (?stepsBk n)" "(\tp. tp = (Bk \ m, Oc \ rs @ Bk \ k)) holds_for steps0 (Suc 0, Bk \ i, ) p n" by auto obtain a where a:"a = fst (rec_ci rec_F)" by blast have "{(\ (l, r). l = [] \ r = )} (t_wcode |+| t_utm) {(\ (l, r). (\ m. l = Bk\m) \ (\ n. r = Oc\rs @ Bk\n))}" proof(rule_tac Hoare_plus_halt) show "{\(l, r). l = [] \ r = } t_wcode {\ (l, r). (l = [Bk] \ (\ rn. r = Oc\(Suc (code p)) @ Bk # Oc\(Suc (bl_bin ())) @ Bk\(rn)))}" using wcl1 by (auto intro!:Hoare_haltI exI[of _ stp]) next have "\ stp. (?stepsBk stp = (0, Bk\m, Oc\rs @ Bk\k))" using n by (case_tac "?stepsBk n", auto) then obtain stp where k: "steps0 (Suc 0, Bk\i, ) p stp = (0, Bk\m, Oc\rs @ Bk\k)" .. thus "{\(l, r). l = [Bk] \ (\rn. r = Oc \ Suc (code p) @ Bk # Oc \ Suc (bl_bin ()) @ Bk \ rn)} t_utm {\(l, r). (\m. l = Bk \ m) \ (\n. r = Oc \ rs @ Bk \ n)}" proof(rule_tac Hoare_haltI, auto) fix rn from t_utm_halt_eq[OF assms(1) k assms(2),of rn] assms k have "\ ma n stp. steps0 (Suc 0, [Bk], <[code p, bl2wc ()]> @ Bk \ rn) t_utm stp = (0, Bk \ ma, Oc \ rs @ Bk \ n)" by (auto simp add: bin_wc_eq) then obtain stpx m' n' where t:"steps0 (Suc 0, [Bk], <[code p, bl2wc ()]> @ Bk \ rn) t_utm stpx = (0, Bk \ m', Oc \ rs @ Bk \ n')" by auto show "\n. is_final (steps0 (Suc 0, [Bk], Oc \ Suc (code p) @ Bk # Oc \ Suc (bl_bin ()) @ Bk \ rn) t_utm n) \ (\(l, r). (\m. l = Bk \ m) \ (\n. r = Oc \ rs @ Bk \ n)) holds_for steps0 (Suc 0, [Bk], Oc \ Suc (code p) @ Bk # Oc \ Suc (bl_bin ()) @ Bk \ rn) t_utm n" using t by(auto simp: bin_wc_eq tape_of_list_def tape_of_nat_def intro:exI[of _ stpx]) qed next show "tm_wf0 t_wcode" by auto qed then obtain n where "is_final (?steps0 (t_wcode |+| t_utm) n)" "(\(l, r). (\m. l = Bk \ m) \ (\n. r = Oc \ rs @ Bk \ n)) holds_for ?steps0 (t_wcode |+| t_utm) n" by(auto simp add: Hoare_halt_def a) thus "?thesis" apply(case_tac "rec_ci rec_F") apply(auto simp add: UTM_def Hoare_halt_def) apply(case_tac "(?steps0 (t_wcode |+| t_utm) n)") apply(rule_tac x="n" in exI) apply(auto simp add:a t_utm_def F_aprog_def F_tprog_def) done qed lemma UTM_halt_lemma2: assumes tm_wf: "tm_wf (p, 0)" and args: "(args::nat list) \ []" and exec: "{(\tp. tp = ([], ))} p {(\tp. tp = (Bk\m, <(n::nat)> @ Bk\k))}" shows "{(\tp. tp = ([], ))} UTM {(\tp. (\ m k. tp = (Bk\m, @ Bk\k)))}" using UTM_halt_lemma[OF assms(1) _ assms(2), where i="0"] using assms(3) by(simp add: tape_of_nat_def) lemma UTM_unhalt_lemma: assumes tm_wf: "tm_wf (p, 0)" and unhalt: "{(\tp. tp = (Bk\i, ))} p \" and args: "args \ []" shows "{(\tp. tp = ([], ))} UTM \" proof - have "(\ TSTD (steps0 (Suc 0, Bk\(i), ) p stp))" for stp (* in unhalt, we substitute inner 'forall' n\stp *) using unhalt[unfolded Hoare_unhalt_def,rule_format,OF refl,of stp] by(cases "steps0 (Suc 0, Bk \ i, ) p stp",auto simp: Hoare_unhalt_def TSTD_def) then have "\ stp. \ is_final (steps0 (Suc 0, [], ) UTM stp)" using assms by(intro UTM_uhalt_lemma', auto) thus "?thesis" by(simp add: Hoare_unhalt_def) qed lemma UTM_unhalt_lemma2: assumes tm_wf: "tm_wf (p, 0)" and unhalt: "{(\tp. tp = ([], ))} p \" and args: "args \ []" shows "{(\tp. tp = ([], ))} UTM \" using UTM_unhalt_lemma[OF assms(1), where i="0"] using assms(2-3) by(simp add: tape_of_nat_def) end \ No newline at end of file diff --git a/thys/WebAssembly/Wasm_Checker_Types.thy b/thys/WebAssembly/Wasm_Checker_Types.thy --- a/thys/WebAssembly/Wasm_Checker_Types.thy +++ b/thys/WebAssembly/Wasm_Checker_Types.thy @@ -1,1081 +1,1081 @@ section \Augmented Type Syntax for Concrete Checker\ theory Wasm_Checker_Types imports Wasm "HOL-Library.Sublist" begin datatype ct = TAny | TSome t datatype checker_type = TopType "ct list" | Type "t list" | Bot definition to_ct_list :: "t list \ ct list" where "to_ct_list ts = map TSome ts" fun ct_eq :: "ct \ ct \ bool" where "ct_eq (TSome t) (TSome t') = (t = t')" | "ct_eq TAny _ = True" | "ct_eq _ TAny = True" definition ct_list_eq :: "ct list \ ct list \ bool" where "ct_list_eq ct1s ct2s = list_all2 ct_eq ct1s ct2s" definition ct_prefix :: "ct list \ ct list \ bool" where "ct_prefix xs ys = (\as bs. ys = as@bs \ ct_list_eq as xs)" definition ct_suffix :: "ct list \ ct list \ bool" where "ct_suffix xs ys = (\as bs. ys = as@bs \ ct_list_eq bs xs)" lemma ct_eq_commute: assumes "ct_eq x y" shows "ct_eq y x" using assms by (metis ct_eq.elims(3) ct_eq.simps(1)) lemma ct_eq_flip: "ct_eq\\ = ct_eq" using ct_eq_commute by fastforce lemma ct_eq_common_tsome: "ct_eq x y = (\t. ct_eq x (TSome t) \ ct_eq (TSome t) y)" by (metis ct_eq.elims(3) ct_eq.simps(1)) lemma ct_list_eq_commute: assumes "ct_list_eq xs ys" shows "ct_list_eq ys xs" using assms ct_eq_commute List.List.list.rel_flip ct_eq_flip unfolding ct_list_eq_def by fastforce lemma ct_list_eq_refl: "ct_list_eq xs xs" unfolding ct_list_eq_def by (metis ct_eq.elims(3) ct_eq.simps(1) list_all2_refl) lemma ct_list_eq_length: assumes "ct_list_eq xs ys" shows "length xs = length ys" using assms list_all2_lengthD unfolding ct_list_eq_def by blast lemma ct_list_eq_concat: assumes "ct_list_eq xs ys" "ct_list_eq xs' ys'" shows "ct_list_eq (xs@xs') (ys@ys')" using assms unfolding ct_list_eq_def by (simp add: list_all2_appendI) lemma ct_list_eq_ts_conv_eq: "ct_list_eq (to_ct_list ts) (to_ct_list ts') = (ts = ts')" unfolding ct_list_eq_def to_ct_list_def list_all2_map1 list_all2_map2 ct_eq.simps(1) by (simp add: list_all2_eq) lemma ct_list_eq_exists: "\ys. ct_list_eq xs (to_ct_list ys)" proof (induction xs) case Nil thus ?case unfolding ct_list_eq_def to_ct_list_def by (simp) next case (Cons a xs) thus ?case unfolding ct_list_eq_def to_ct_list_def apply (cases a) apply (metis ct_eq.simps(3) ct_eq_commute list.rel_intros(2) list.simps(9)) apply (metis ct_eq.simps(1) list.rel_intros(2) list.simps(9)) done qed lemma ct_list_eq_common_tsome_list: "ct_list_eq xs ys = (\zs. ct_list_eq xs (to_ct_list zs) \ ct_list_eq (to_ct_list zs) ys)" proof (induction ys arbitrary: xs) case Nil thus ?case unfolding ct_list_eq_def to_ct_list_def by simp next case (Cons a ys) show ?case proof (safe) assume assms:"ct_list_eq xs (a # ys)" then obtain x' xs' where xs_def:"xs = x'#xs'" by (meson ct_list_eq_def list_all2_Cons2) then obtain zs where zs_def:"ct_eq x' a" "ct_list_eq xs' (to_ct_list zs) \ ct_list_eq (to_ct_list zs) ys" using Cons[of xs'] assms list_all2_Cons unfolding ct_list_eq_def by fastforce obtain z where "ct_eq x' (TSome z)" "ct_eq (TSome z) a" using ct_eq_common_tsome[of x' a] zs_def(1) by fastforce hence "ct_list_eq (x'#xs') (to_ct_list (z#zs)) \ ct_list_eq (to_ct_list (z#zs)) (a # ys)" using zs_def(2) list_all2_Cons unfolding ct_list_eq_def to_ct_list_def by simp thus "\zs. ct_list_eq xs (to_ct_list zs) \ ct_list_eq (to_ct_list zs) (a # ys)" using xs_def by fastforce next fix zs assume assms:"ct_list_eq xs (to_ct_list zs)" "ct_list_eq (to_ct_list zs) (a # ys)" then obtain x' xs' z' zs' where "xs = x'#xs'" "zs = z'#zs'" "ct_list_eq xs' (to_ct_list zs')" "ct_list_eq (to_ct_list zs') (ys)" using list_all2_Cons2 unfolding ct_list_eq_def to_ct_list_def list_all2_map1 list_all2_map2 by (metis (no_types, lifting)) thus "ct_list_eq xs (a # ys)" using assms Cons ct_list_eq_def to_ct_list_def ct_eq_common_tsome by (metis list.simps(9) list_all2_Cons) qed qed lemma ct_list_eq_cons_ct_list: assumes "ct_list_eq (to_ct_list as) (xs @ ys)" shows "\bs bs'. as = bs @ bs' \ ct_list_eq (to_ct_list bs) xs \ ct_list_eq (to_ct_list bs') ys" using assms proof (induction xs arbitrary: as) case Nil thus ?case by (metis append_Nil ct_list_eq_ts_conv_eq list.simps(8) to_ct_list_def) next case (Cons a xs) thus ?case unfolding ct_list_eq_def to_ct_list_def list_all2_map1 by (meson list_all2_append2) qed lemma ct_list_eq_cons_ct_list1: assumes "ct_list_eq (to_ct_list as) (xs @ (to_ct_list ys))" shows "\bs. as = bs @ ys \ ct_list_eq (to_ct_list bs) xs" using ct_list_eq_cons_ct_list[OF assms] ct_list_eq_ts_conv_eq by fastforce lemma ct_list_eq_shared: assumes "ct_list_eq xs (to_ct_list as)" "ct_list_eq ys (to_ct_list as)" shows "ct_list_eq xs ys" using assms ct_list_eq_def by (meson ct_list_eq_common_tsome_list ct_list_eq_commute) lemma ct_list_eq_take: assumes "ct_list_eq xs ys" shows "ct_list_eq (take n xs) (take n ys)" using assms list_all2_takeI unfolding ct_list_eq_def by blast lemma ct_prefixI [intro?]: assumes "ys = as @ zs" "ct_list_eq as xs" shows "ct_prefix xs ys" using assms unfolding ct_prefix_def by blast lemma ct_prefixE [elim?]: assumes "ct_prefix xs ys" obtains as zs where "ys = as @ zs" "ct_list_eq as xs" using assms unfolding ct_prefix_def by blast lemma ct_prefix_snoc [simp]: "ct_prefix xs (ys @ [y]) = (ct_list_eq xs (ys@[y]) \ ct_prefix xs ys)" proof (safe) assume "ct_prefix xs (ys @ [y])" "\ ct_prefix xs ys" thus "ct_list_eq xs (ys @ [y])" unfolding ct_prefix_def ct_list_eq_def by (metis butlast_append butlast_snoc ct_eq_flip list.rel_flip) next assume "ct_list_eq xs (ys @ [y])" thus "ct_prefix xs (ys @ [y])" using ct_list_eq_commute ct_prefixI by fastforce next assume "ct_prefix xs ys" thus "ct_prefix xs (ys @ [y])" using append_assoc unfolding ct_prefix_def by blast qed lemma ct_prefix_nil:"ct_prefix [] xs" "\ct_prefix (x # xs) []" by (simp_all add: ct_prefix_def ct_list_eq_def) lemma Cons_ct_prefix_Cons[simp]: "ct_prefix (x # xs) (y # ys) = ((ct_eq x y) \ ct_prefix xs ys)" proof (safe) assume "ct_prefix (x # xs) (y # ys)" thus "ct_eq x y" unfolding ct_prefix_def ct_list_eq_def by (metis ct_eq_commute hd_append2 list.sel(1) list.simps(3) list_all2_Cons2) next assume "ct_prefix (x # xs) (y # ys)" thus "ct_prefix xs ys" unfolding ct_prefix_def ct_list_eq_def by (metis list.rel_distinct(1) list.sel(3) list_all2_Cons2 tl_append2) next assume "ct_eq x y" "ct_prefix xs ys" thus "ct_prefix (x # xs) (y # ys)" unfolding ct_prefix_def ct_list_eq_def by (metis (full_types) append_Cons ct_list_eq_commute ct_list_eq_def list.rel_inject(2)) qed lemma ct_prefix_code [code]: "ct_prefix [] xs = True" "ct_prefix (x # xs) [] = False" "ct_prefix (x # xs) (y # ys) = ((ct_eq x y) \ ct_prefix xs ys)" by (simp_all add: ct_prefix_nil) lemma ct_suffix_to_ct_prefix [code]: "ct_suffix xs ys = ct_prefix (rev xs) (rev ys)" unfolding ct_suffix_def ct_prefix_def ct_list_eq_def by (metis list_all2_rev1 rev_append rev_rev_ident) lemma inj_TSome: "inj TSome" by (meson ct.inject injI) lemma to_ct_list_append: assumes "to_ct_list ts = as@bs" shows "\as'. to_ct_list as' = as" "\bs'. to_ct_list bs' = bs" using assms proof (induct as arbitrary: ts) fix ts assume "to_ct_list ts = [] @ bs" thus "\as'. to_ct_list as' = []" "\bs'. to_ct_list bs' = bs" unfolding to_ct_list_def by auto next case (Cons a as) fix ts assume local_assms:"to_ct_list ts = (a # as) @ bs" then obtain t' ts' where "ts = t'#ts'" unfolding to_ct_list_def by auto thus "\as'. to_ct_list as' = a # as" "\as'. to_ct_list as' = bs" using Cons local_assms unfolding to_ct_list_def apply simp_all apply (metis list.simps(9)) apply blast done qed lemma ct_suffixI [intro?]: assumes "ys = as @ zs" "ct_list_eq zs xs" shows "ct_suffix xs ys" using assms unfolding ct_suffix_def by blast lemma ct_suffixE [elim?]: assumes "ct_suffix xs ys" obtains as zs where "ys = as @ zs" "ct_list_eq zs xs" using assms unfolding ct_suffix_def by blast lemma ct_suffix_nil: "ct_suffix [] ts" unfolding ct_suffix_def using ct_list_eq_refl by auto lemma ct_suffix_refl: "ct_suffix ts ts" unfolding ct_suffix_def using ct_list_eq_refl by auto lemma ct_suffix_length: assumes "ct_suffix ts ts'" shows "length ts \ length ts'" using assms list_all2_lengthD unfolding ct_suffix_def ct_list_eq_def by fastforce lemma ct_suffix_take: assumes "ct_suffix ts ts'" shows "ct_suffix ((take (length ts - n) ts)) ((take (length ts' - n) ts'))" using assms ct_list_eq_take append_eq_conv_conj unfolding ct_suffix_def proof - assume "\as bs. ts' = as @ bs \ ct_list_eq bs ts" then obtain ccs :: "ct list" and ccsa :: "ct list" where f1: "ts' = ccs @ ccsa \ ct_list_eq ccsa ts" by moura then have f2: "length ccsa = length ts" by (meson ct_list_eq_length) have "\n. ct_list_eq (take n ccsa) (take n ts)" using f1 by (meson ct_list_eq_take) then show "\cs csa. take (length ts' - n) ts' = cs @ csa \ ct_list_eq csa (take (length ts - n) ts)" using f2 f1 by auto qed lemma ct_suffix_ts_conv_suffix: "ct_suffix (to_ct_list ts) (to_ct_list ts') = suffix ts ts'" proof safe assume "ct_suffix (to_ct_list ts) (to_ct_list ts')" then obtain as bs where "to_ct_list ts' = (to_ct_list as) @ (to_ct_list bs)" "ct_list_eq (to_ct_list bs) (to_ct_list ts)" using to_ct_list_append unfolding ct_suffix_def by metis thus "suffix ts ts'" using ct_list_eq_ts_conv_eq unfolding ct_suffix_def to_ct_list_def suffix_def by (metis map_append) next assume "suffix ts ts'" thus "ct_suffix (to_ct_list ts) (to_ct_list ts')" using ct_list_eq_ts_conv_eq unfolding ct_suffix_def to_ct_list_def suffix_def by (metis map_append) qed lemma ct_suffix_exists: "\ts_c. ct_suffix x1 (to_ct_list ts_c)" using ct_list_eq_commute ct_list_eq_exists ct_suffix_def by fastforce lemma ct_suffix_ct_list_eq_exists: assumes "ct_suffix x1 x2" shows "\ts_c. ct_suffix x1 (to_ct_list ts_c) \ ct_list_eq (to_ct_list ts_c) x2" proof - obtain as bs where x2_def:"x2 = as @ bs" "ct_list_eq x1 bs" using assms ct_list_eq_commute unfolding ct_suffix_def by blast then obtain ts_as ts_bs where "ct_list_eq as (to_ct_list ts_as)" "ct_list_eq x1 (to_ct_list ts_bs)" "ct_list_eq (to_ct_list ts_bs) bs" using ct_list_eq_common_tsome_list[of x1 bs] ct_list_eq_exists by fastforce thus ?thesis using x2_def ct_list_eq_commute unfolding ct_suffix_def to_ct_list_def by (metis ct_list_eq_def list_all2_appendI map_append) qed lemma ct_suffix_cons_ct_list: assumes "ct_suffix (xs@ys) (to_ct_list zs)" shows "\as bs. zs = as@bs \ ct_list_eq ys (to_ct_list bs) \ ct_suffix xs (to_ct_list as)" proof - obtain as bs where "to_ct_list zs = (to_ct_list as) @ (to_ct_list bs)" "ct_list_eq (to_ct_list bs) (xs @ ys)" using assms to_ct_list_append[of zs] unfolding ct_suffix_def by blast thus ?thesis using assms ct_list_eq_cons_ct_list[of bs xs ys] unfolding ct_suffix_def by (metis append.assoc ct_list_eq_commute ct_list_eq_ts_conv_eq map_append to_ct_list_def) qed lemma ct_suffix_cons_ct_list1: assumes "ct_suffix (xs@(to_ct_list ys)) (to_ct_list zs)" shows "\as. zs = as@ys \ ct_suffix xs (to_ct_list as)" using ct_suffix_cons_ct_list[OF assms] ct_list_eq_ts_conv_eq by fastforce lemma ct_suffix_cons2: assumes "ct_suffix (xs) (ys@zs)" "length xs = length zs" shows "ct_list_eq xs zs" using assms by (metis append_eq_append_conv ct_list_eq_commute ct_list_eq_def ct_suffix_def list_all2_lengthD) lemma ct_suffix_imp_ct_list_eq: assumes "ct_suffix xs ys" shows "ct_list_eq (drop (length ys - length xs) ys) xs" using assms ct_list_eq_def list_all2_lengthD unfolding ct_suffix_def by fastforce lemma ct_suffix_extend_ct_list_eq: assumes "ct_suffix xs ys" "ct_list_eq xs' ys'" shows "ct_suffix (xs@xs') (ys@ys')" using assms unfolding ct_suffix_def ct_list_eq_def by (meson append.assoc ct_list_eq_commute ct_list_eq_def list_all2_appendI) lemma ct_suffix_extend_any1: assumes "ct_suffix xs ys" "length xs < length ys" shows "ct_suffix (TAny#xs) ys" proof - obtain as bs where ys_def:"ys = as@bs" "ct_list_eq bs xs" using assms(1) ct_suffix_def by fastforce hence "length as > 0" using list_all2_lengthD assms(2) unfolding ct_list_eq_def by fastforce then obtain as' a where as_def:"as = as'@[a]" by (metis append_butlast_last_id length_greater_0_conv) hence "ct_list_eq (a#bs) (TAny#xs)" using ys_def by (meson ct_eq.simps(2) ct_list_eq_commute ct_list_eq_def list.rel_intros(2)) thus ?thesis using as_def ys_def ct_suffix_def by fastforce qed lemma ct_suffix_singleton_any: "ct_suffix [TAny] [t]" using ct_suffix_extend_ct_list_eq[of "[]" "[]" "[TAny]" "[t]"] ct_suffix_nil by (simp add: ct_list_eq_def) lemma ct_suffix_cons_it: "ct_suffix xs (xs'@xs)" using ct_list_eq_refl ct_suffix_def by blast lemma ct_suffix_singleton: assumes "length cts > 0" shows "ct_suffix [TAny] cts" proof - have "\c. ct_prefix [TAny] [c]" using ct_suffix_singleton_any ct_suffix_to_ct_prefix by force then show ?thesis by (metis (no_types) Suc_leI append_butlast_last_id assms butlast.simps(2) ct_list_eq_commute ct_prefix_nil(2) ct_prefix_snoc ct_suffix_def impossible_Cons length_Cons list.size(3)) qed lemma ct_suffix_less: assumes "ct_suffix (xs@xs') ys" shows "ct_suffix xs' ys" using assms unfolding ct_suffix_def by (metis append_eq_appendI ct_list_eq_def list_all2_append2) lemma ct_suffix_unfold_one: "ct_suffix (xs@[x]) (ys@[y]) = ((ct_eq x y) \ ct_suffix xs ys)" using ct_prefix_code(3) by (simp add: ct_suffix_to_ct_prefix) lemma ct_suffix_shared: assumes "ct_suffix cts (to_ct_list ts)" "ct_suffix cts' (to_ct_list ts)" shows "ct_suffix cts cts' \ ct_suffix cts' cts" proof (cases "length cts > length cts'") case True obtain as bs where cts_def:"ts = as@bs" "ct_list_eq cts (to_ct_list bs)" using assms(1) ct_suffix_def to_ct_list_def by (metis append_Nil ct_suffix_cons_ct_list) obtain as' bs' where cts'_def:"ts = as'@bs'" "ct_list_eq cts' (to_ct_list bs')" using assms(2) ct_suffix_def to_ct_list_def by (metis append_Nil ct_suffix_cons_ct_list) obtain ct1s ct2s where "cts = ct1s@ct2s" "length ct2s = length cts'" using True (* List.append_eq_conv_conj[of _ _ cts] *) by (metis add_diff_cancel_right' append_take_drop_id length_drop less_imp_le_nat nat_le_iff_add) show ?thesis proof - obtain tts :: "t list \ ct list \ ct list \ t list" and ttsa :: "t list \ ct list \ ct list \ t list" where "\x0 x1 x2. (\v3 v4. x0 = v3 @ v4 \ ct_list_eq x1 (to_ct_list v4) \ ct_suffix x2 (to_ct_list v3)) = (x0 = tts x0 x1 x2 @ ttsa x0 x1 x2 \ ct_list_eq x1 (to_ct_list (ttsa x0 x1 x2)) \ ct_suffix x2 (to_ct_list (tts x0 x1 x2)))" by moura then have f1: "as' @ bs' = tts (as' @ bs') ct2s ct1s @ ttsa (as' @ bs') ct2s ct1s \ ct_list_eq ct2s (to_ct_list (ttsa (as' @ bs') ct2s ct1s)) \ ct_suffix ct1s (to_ct_list (tts (as' @ bs') ct2s ct1s))" using assms(1) \cts = ct1s @ ct2s\ cts'_def(1) ct_suffix_cons_ct_list by force then have "ct_list_eq cts' (to_ct_list (ttsa (as' @ bs') ct2s ct1s))" by (metis \ct_suffix cts' (to_ct_list ts)\ \length ct2s = length cts'\ cts'_def(1) ct_list_eq_length ct_suffix_cons2 map_append to_ct_list_def) then show ?thesis using f1 by (metis \cts = ct1s @ ct2s\ ct_list_eq_shared ct_suffix_def) qed next case False hence len:"length cts' \ length cts" by linarith obtain as bs where cts_def:"ts = as@bs" "ct_list_eq cts (to_ct_list bs)" using assms(1) ct_suffix_def to_ct_list_def by (metis append_Nil ct_suffix_cons_ct_list) obtain as' bs' where cts'_def:"ts = as'@bs'" "ct_list_eq cts' (to_ct_list bs')" using assms(2) ct_suffix_def to_ct_list_def by (metis append_Nil ct_suffix_cons_ct_list) obtain ct1s ct2s where "cts' = ct1s@ct2s" "length ct2s = length cts" using len (* List.append_eq_conv_conj[of _ _ cts] *) by (metis add_diff_cancel_right' append_take_drop_id length_drop nat_le_iff_add) show ?thesis proof - obtain tts :: "t list \ ct list \ ct list \ t list" and ttsa :: "t list \ ct list \ ct list \ t list" where "\x0 x1 x2. (\v3 v4. x0 = v3 @ v4 \ ct_list_eq x1 (to_ct_list v4) \ ct_suffix x2 (to_ct_list v3)) = (x0 = tts x0 x1 x2 @ ttsa x0 x1 x2 \ ct_list_eq x1 (to_ct_list (ttsa x0 x1 x2)) \ ct_suffix x2 (to_ct_list (tts x0 x1 x2)))" by moura then have f1: "as @ bs = tts (as @ bs) ct2s ct1s @ ttsa (as @ bs) ct2s ct1s \ ct_list_eq ct2s (to_ct_list (ttsa (as @ bs) ct2s ct1s)) \ ct_suffix ct1s (to_ct_list (tts (as @ bs) ct2s ct1s))" using assms(2) \cts' = ct1s @ ct2s\ cts_def(1) ct_suffix_cons_ct_list by force then have "ct_list_eq cts (to_ct_list (ttsa (as @ bs) ct2s ct1s))" by (metis \ct_suffix cts (to_ct_list ts)\ \length ct2s = length cts\ cts_def(1) ct_list_eq_length ct_suffix_cons2 map_append to_ct_list_def) then show ?thesis using f1 by (metis \cts' = ct1s @ ct2s\ ct_list_eq_shared ct_suffix_def) qed qed fun checker_type_suffix::"checker_type \ checker_type \ bool" where "checker_type_suffix (Type ts) (Type ts') = suffix ts ts'" | "checker_type_suffix (Type ts) (TopType cts) = ct_suffix (to_ct_list ts) cts" | "checker_type_suffix (TopType cts) (Type ts) = ct_suffix cts (to_ct_list ts)" | "checker_type_suffix _ _ = False" fun consume :: "checker_type \ ct list \ checker_type" where "consume (Type ts) cons = (if ct_suffix cons (to_ct_list ts) then Type (take (length ts - length cons) ts) else Bot)" | "consume (TopType cts) cons = (if ct_suffix cons cts then TopType (take (length cts - length cons) cts) else (if ct_suffix cts cons then TopType [] else Bot))" | "consume _ _ = Bot" fun produce :: "checker_type \ checker_type \ checker_type" where "produce (TopType ts) (Type ts') = TopType (ts@(to_ct_list ts'))" | "produce (Type ts) (Type ts') = Type (ts@ts')" | "produce (Type ts') (TopType ts) = TopType ts" | "produce (TopType ts') (TopType ts) = TopType ts" | "produce _ _ = Bot" fun type_update :: "checker_type \ ct list \ checker_type \ checker_type" where "type_update curr_type cons prods = produce (consume curr_type cons) prods" fun select_return_top :: "[ct list] \ ct \ ct \ checker_type" where "select_return_top ts ct1 TAny = TopType ((take (length ts - 3) ts) @ [ct1])" | "select_return_top ts TAny ct2 = TopType ((take (length ts - 3) ts) @ [ct2])" | "select_return_top ts (TSome t1) (TSome t2) = (if (t1 = t2) then (TopType ((take (length ts - 3) ts) @ [TSome t1])) else Bot)" fun type_update_select :: "checker_type \ checker_type" where "type_update_select (Type ts) = (if (length ts \ 3 \ (ts!(length ts-2)) = (ts!(length ts-3))) then consume (Type ts) [TAny, TSome T_i32] else Bot)" | "type_update_select (TopType ts) = (case length ts of 0 \ TopType [TAny] | Suc 0 \ type_update (TopType ts) [TSome T_i32] (TopType [TAny]) | Suc (Suc 0) \ consume (TopType ts) [TSome T_i32] | _ \ type_update (TopType ts) [TAny, TAny, TSome T_i32] (select_return_top ts (ts!(length ts-2)) (ts!(length ts-3))))" | "type_update_select _ = Bot" fun c_types_agree :: "checker_type \ t list \ bool" where "c_types_agree (Type ts) ts' = (ts = ts')" | "c_types_agree (TopType ts) ts' = ct_suffix ts (to_ct_list ts')" | "c_types_agree Bot _ = False" lemma consume_type: assumes "consume (Type ts) ts' = c_t" "c_t \ Bot" shows "\ts''. ct_list_eq (to_ct_list ts) ((to_ct_list ts'')@ts') \ c_t = Type ts''" proof - { assume a1: "(if ct_suffix ts' (map TSome ts) then Type (take (length ts - length ts') ts) else Bot) = c_t" assume a2: "c_t \ Bot" obtain ccs :: "ct list \ ct list \ ct list" and ccsa :: "ct list \ ct list \ ct list" where f3: "\cs csa. \ ct_suffix cs csa \ csa = ccs cs csa @ ccsa cs csa \ ct_list_eq (ccsa cs csa) cs" using ct_suffixE by moura have f4: "ct_suffix ts' (map TSome ts)" using a2 a1 by metis then have f5: "ct_list_eq (ccsa ts' (map TSome ts)) ts'" using f3 by blast have f6: "take (length (map TSome ts) - length (ccsa ts' (map TSome ts))) (map TSome ts) @ ccsa ts' (map TSome ts) = map TSome ts" using f4 f3 by (metis (full_types) suffixI suffix_take) have "\cs. ct_list_eq (cs @ ccsa ts' (map TSome ts)) (cs @ ts')" using f5 ct_list_eq_concat ct_list_eq_refl by blast then have "\tsa. ct_list_eq (map TSome ts) (map TSome tsa @ ts') \ c_t = Type tsa" using f6 f5 f4 a1 by (metis (no_types) ct_list_eq_length length_map take_map) } thus ?thesis using assms to_ct_list_def by simp qed lemma consume_top_geq: assumes "consume (TopType ts) ts' = c_t" "length ts \ length ts'" "c_t \ Bot" shows "(\as bs. ts = as@bs \ ct_list_eq bs ts' \ c_t = TopType as)" proof - consider (1) "ct_suffix ts' ts" | (2) "\ct_suffix ts' ts" "ct_suffix ts ts'" | (3) "\ct_suffix ts' ts" "\ct_suffix ts ts'" by blast thus ?thesis proof (cases) case 1 hence "TopType (take (length ts - length ts') ts) = c_t" using assms by simp thus ?thesis using assms(2) 1 ct_list_eq_def unfolding ct_suffix_def by (metis (no_types, lifting) append_eq_append_conv append_take_drop_id diff_diff_cancel length_drop list_all2_lengthD) next case 2 thus ?thesis using assms append_eq_append_conv ct_list_eq_commute unfolding ct_suffix_def by (metis append.left_neutral ct_suffix_def ct_suffix_length le_antisym) next case 3 thus ?thesis using assms by auto qed qed lemma consume_top_leq: assumes "consume (TopType ts) ts' = c_t" "length ts \ length ts'" "c_t \ Bot" shows "c_t = TopType []" using assms append_eq_conv_conj by fastforce lemma consume_type_type: assumes "consume xs cons = (Type t_int)" shows "\tn. xs = Type tn" using assms apply (cases xs) apply simp_all apply (metis checker_type.distinct(1) checker_type.distinct(5)) done lemma produce_type_type: assumes "produce xs cons = (Type tm)" shows "\tn. xs = Type tn" apply (cases xs; cases cons) using assms apply simp_all done lemma consume_weaken_type: assumes "consume (Type tn) cons = (Type t_int)" shows "consume (Type (ts@tn)) cons = (Type (ts@t_int))" proof - obtain ts' where "ct_list_eq (to_ct_list tn) (to_ct_list ts' @ cons) \ Type t_int = Type ts'" using consume_type[OF assms] by blast have cond:"ct_suffix cons (to_ct_list tn)" using assms by (simp, metis checker_type.distinct(5)) hence res:"t_int = take (length tn - length cons) tn" using assms by simp have "ct_suffix cons (to_ct_list (ts@tn))" using cond unfolding to_ct_list_def by (metis append_assoc ct_suffix_def map_append) moreover have "ts@t_int = take (length (ts@tn) - length cons) (ts@tn)" using res take_append cond ct_suffix_length to_ct_list_def by fastforce ultimately show ?thesis by simp qed lemma produce_weaken_type: assumes "produce (Type tn) cons = (Type tm)" shows "produce (Type (ts@tn)) cons = (Type (ts@tm))" using assms by (cases cons, simp_all) lemma produce_nil: "produce ts (Type []) = ts" using to_ct_list_def by (cases ts, simp_all) lemma c_types_agree_id: "c_types_agree (Type ts) ts" by simp lemma c_types_agree_top1: "c_types_agree (TopType []) ts" using ct_suffix_ts_conv_suffix to_ct_list_def by (simp add: ct_suffix_nil) lemma c_types_agree_top2: assumes "ct_list_eq ts (to_ct_list ts'')" shows "c_types_agree (TopType ts) (ts'@ts'')" using assms ct_list_eq_commute ct_suffix_def to_ct_list_def by auto lemma c_types_agree_imp_ct_list_eq: assumes "c_types_agree (TopType cts) ts" shows "\ts' ts''. (ts = ts'@ts'') \ ct_list_eq cts (to_ct_list ts'')" using assms ct_suffix_def to_ct_list_def by (simp, metis ct_list_eq_commute ct_list_eq_ts_conv_eq ct_suffix_ts_conv_suffix suffixE to_ct_list_append(2)) lemma c_types_agree_not_bot_exists: assumes "ts \ Bot" shows "\ts_c. c_types_agree ts ts_c" using assms ct_suffix_exists by (cases ts, simp_all) lemma consume_c_types_agree: assumes "consume (Type ts) cts = (Type ts')" "c_types_agree ctn ts" shows "\c_t'. consume ctn cts = c_t' \ c_types_agree c_t' ts'" using assms proof (cases ctn) case (TopType x1) have 1:"ct_suffix cts (to_ct_list ts)" using assms by (simp, metis checker_type.distinct(5)) hence "ct_suffix cts x1 \ ct_suffix x1 cts" using TopType 1 assms(2) ct_suffix_shared by simp thus ?thesis proof (rule disjE) assume local_assms:"ct_suffix cts x1" hence 2:"consume (TopType x1) cts = TopType (take (length x1 - length cts) x1)" by simp have "(take (length ts - length cts) ts) = ts'" using assms 1 by simp hence "c_types_agree (TopType (take (length x1 - length cts) x1)) ts'" using 2 assms local_assms TopType ct_suffix_take by (simp, metis length_map take_map to_ct_list_def) thus ?thesis using 2 TopType by simp next assume local_assms:"ct_suffix x1 cts" hence 3:"consume (TopType x1) cts = TopType []" by (simp add: ct_suffix_length) thus ?thesis using TopType c_types_agree_top1 by blast qed qed simp_all lemma type_update_type: assumes "type_update (Type ts) (to_ct_list cons) prods = ts'" "ts' \ Bot" shows "(ts' = prods \ (\ts_c. prods = (TopType ts_c))) \ (\ts_a ts_b. prods = Type ts_a \ ts = ts_b@cons \ ts' = Type (ts_b@ts_a))" using assms apply (cases prods) apply simp_all apply (metis (full_types) produce.simps(3) produce.simps(7)) using ct_suffix_ts_conv_suffix suffix_take to_ct_list_def apply fastforce done lemma type_update_empty: "type_update ts cons (Type []) = consume ts cons" using produce_nil by simp lemma type_update_top_top: assumes "type_update (TopType ts) (to_ct_list cons) (Type prods) = (TopType ts')" "c_types_agree (TopType ts') t_ag" shows "ct_suffix (to_ct_list prods) ts'" "\t_ag'. t_ag = t_ag'@prods \ c_types_agree (TopType ts) (t_ag'@cons)" proof - consider (1) "ct_suffix (to_ct_list cons) ts" | (2) "\ct_suffix (to_ct_list cons) ts" "ct_suffix ts (to_ct_list cons)" | (3) "\ct_suffix (to_ct_list cons) ts" "\ct_suffix ts (to_ct_list cons)" by blast hence "ct_suffix (to_ct_list prods) ts' \ (\t_ag'. t_ag = t_ag'@prods \ c_types_agree (TopType ts) (t_ag'@cons))" proof (cases) case 1 hence "ts' = (take (length ts - length cons) ts) @ to_ct_list prods" using assms(1) to_ct_list_def by simp moreover then obtain t_ag' where "t_ag = t_ag' @ prods" "ct_suffix (take (length ts - length cons) ts) (to_ct_list t_ag')" using assms(2) ct_suffix_cons_ct_list1 unfolding c_types_agree.simps by blast moreover hence "ct_suffix ts (to_ct_list (t_ag'@cons))" using 1 ct_suffix_imp_ct_list_eq ct_suffix_extend_ct_list_eq to_ct_list_def by fastforce ultimately show ?thesis using c_types_agree.simps(2) ct_list_eq_ts_conv_eq ct_suffix_def by auto next case 2 thus ?thesis using assms by (metis append.assoc c_types_agree.simps(2) checker_type.inject(1) consume.simps(2) ct_list_eq_ts_conv_eq ct_suffix_cons_ct_list ct_suffix_def map_append produce.simps(1) to_ct_list_def type_update.simps) next case 3 thus ?thesis using assms by simp qed thus "ct_suffix (to_ct_list prods) ts'" "\t_ag'. t_ag = t_ag'@prods \ c_types_agree (TopType ts) (t_ag'@cons)" by simp_all qed lemma type_update_select_length0: assumes "type_update_select (TopType cts) = tm" "length cts = 0" "tm \ Bot" shows "tm = TopType [TAny]" using assms by simp lemma type_update_select_length1: assumes "type_update_select (TopType cts) = tm" "length cts = 1" "tm \ Bot" shows "ct_list_eq cts [TSome T_i32]" "tm = TopType [TAny]" proof - have 1:"type_update (TopType cts) [TSome T_i32] (TopType [TAny]) = tm" using assms(1,2) by simp hence "ct_suffix cts [TSome T_i32] \ ct_suffix [TSome T_i32] cts" using assms(3) by (metis consume.simps(2) produce.simps(7) type_update.simps) thus "ct_list_eq cts [TSome T_i32]" using assms(2,3) ct_suffix_imp_ct_list_eq by (metis One_nat_def Suc_length_conv ct_list_eq_commute diff_Suc_1 drop_0 list.size(3)) show "tm = TopType [TAny]" using 1 assms(3) consume_top_leq by (metis One_nat_def assms(2) diff_Suc_1 diff_is_0_eq length_Cons list.size(3) produce.simps(4,7) type_update.simps) qed lemma type_update_select_length2: assumes "type_update_select (TopType cts) = tm" "length cts = 2" "tm \ Bot" shows "\t1 t2. cts = [t1, t2] \ ct_eq t2 (TSome T_i32) \ tm = TopType [t1]" proof - obtain x y where cts_def:"cts = [x,y]" using assms(2) List.length_Suc_conv[of cts "Suc 0"] by (metis length_0_conv length_Suc_conv numeral_2_eq_2) moreover hence "consume (TopType [x,y]) [TSome T_i32] = tm" using assms(1,2) by simp moreover hence "ct_suffix [x,y] [TSome T_i32] \ ct_suffix [TSome T_i32] [x,y]" using assms(3) by (metis consume.simps(2)) hence "ct_suffix [TSome T_i32] ([x]@[y])" using assms(2) ct_suffix_length by fastforce moreover hence "ct_eq y (TSome T_i32)" by (metis ct_eq_commute ct_list_eq_def ct_suffix_cons2 list.rel_sel list.sel(1) list.simps(3) list.size(4)) ultimately show ?thesis by auto qed lemma type_update_select_length3: assumes "type_update_select (TopType cts) = (TopType ctm)" "length cts \ 3" shows "\cts' ct1 ct2 ct3. cts = cts'@[ct1, ct2, ct3] \ ct_eq ct3 (TSome T_i32)" proof - obtain cts' cts'' where cts_def:"cts = cts'@ cts''" "length cts'' = 3" using assms(2) by (metis append_take_drop_id diff_diff_cancel length_drop) then obtain ct1 cts''2 where "cts'' = ct1#cts''2" "length cts''2 = Suc (Suc 0)" using List.length_Suc_conv[of cts' "Suc (Suc 0)"] by (metis length_Suc_conv numeral_3_eq_3) then obtain ct2 ct3 where "cts'' = [ct1,ct2,ct3]" using List.length_Suc_conv[of cts''2 "Suc 0"] by (metis length_0_conv length_Suc_conv) hence cts_def2:"cts = cts'@ [ct1,ct2,ct3]" using cts_def by simp obtain nat where "length cts = Suc (Suc (Suc nat))" using assms(2) by (simp add: cts_def) hence "(type_update (TopType cts) [TAny, TAny, TSome T_i32] (select_return_top cts (cts ! (length cts - 2)) (cts ! (length cts - 3)))) = TopType ctm" using assms by simp then obtain c_mid where "consume (TopType cts) [TAny, TAny, TSome T_i32] = TopType c_mid" by (metis consume.simps(2) produce.simps(6) type_update.simps) hence "ct_suffix [TAny, TAny, TSome T_i32] (cts'@ [ct1,ct2,ct3])" using assms(2) consume_top_geq cts_def2 by (metis checker_type.distinct(3) ct_suffix_def length_Cons list.size(3) numeral_3_eq_3) hence "ct_eq ct3 (TSome T_i32)" using ct_suffix_def ct_list_eq_def by (simp, metis append_eq_append_conv length_Cons list_all2_Cons list_all2_lengthD) thus ?thesis using cts_def2 by simp qed lemma type_update_select_type_length3: assumes "type_update_select (Type tn) = (Type tm)" shows "\t ts'. tn = ts'@[t, t, T_i32]" proof - have tn_cond:"(length tn \ 3 \ (tn!(length tn-2)) = (tn!(length tn-3)))" using assms by (simp, metis checker_type.distinct(5)) hence tm_def:"consume (Type tn) [TAny, TSome T_i32] = Type tm" using assms by simp obtain tn' tn'' where tn_split:"tn = tn'@tn''" "length tn'' = 3" using assms tn_cond by (metis append_take_drop_id diff_diff_cancel length_drop) then obtain t1 tn''2 where "tn'' = t1#tn''2" "length tn''2 = Suc (Suc 0)" by (metis length_Suc_conv numeral_3_eq_3) then obtain t2 t3 where tn''_def:"tn'' = [t1,t2,t3]" using List.length_Suc_conv[of tn''2 "Suc 0"] by (metis length_0_conv length_Suc_conv) hence tn_def:"tn = tn'@ [t1,t2,t3]" using tn_split by simp hence t1_t2_eq:"t1 = t2" using tn_cond by (metis (no_types, lifting) Suc_diff_Suc Suc_eq_plus1_left Suc_lessD tn''_def add_diff_cancel_right' diff_is_0_eq length_append neq0_conv not_less_eq_eq nth_Cons_0 nth_Cons_numeral nth_append numeral_2_eq_2 numeral_3_eq_3 numeral_One tn_split(2) zero_less_diff) have "ct_suffix [TAny, TSome T_i32] (to_ct_list (tn' @ [t1, t2, t3]))" using tn_def tm_def by (simp, metis checker_type.distinct(5)) hence "t3 = T_i32" using ct_suffix_unfold_one[of "[TAny]" "TSome T_i32" "to_ct_list (tn' @ [t1, t2])" "TSome t3"] ct_eq.simps(1) unfolding to_ct_list_def by simp thus ?thesis using t1_t2_eq tn_def by simp qed lemma select_return_top_exists: assumes "select_return_top cts c1 c2 = ctm" "ctm \ Bot" shows "\xs. ctm = TopType xs" using assms by (meson select_return_top.elims) lemma type_update_select_top_exists: assumes "type_update_select xs = (TopType tm)" shows "\tn. xs = TopType tn" using assms proof (cases xs) case (Type x2) thus ?thesis using assms by (simp, metis checker_type.distinct(1) checker_type.distinct(3) consume.simps(1)) qed simp_all lemma type_update_select_conv_select_return_top: assumes "ct_suffix [TSome T_i32] cts" "length cts \ 3" shows "type_update_select (TopType cts) = (select_return_top cts (cts!(length cts-2)) (cts!(length cts-3)))" proof - obtain nat where nat_def:"length cts = Suc (Suc (Suc nat))" using assms(2) by (metis add_eq_if diff_Suc_1 le_Suc_ex numeral_3_eq_3 nat.distinct(2)) have "ct_suffix [TAny, TAny, TSome T_i32] cts" using assms ct_suffix_extend_any1 by simp then obtain cts' where "consume (TopType cts) [TAny, TAny, TSome T_i32] = TopType cts'" by simp thus ?thesis using nat_def select_return_top_exists apply (cases "select_return_top cts (cts ! Suc nat) (cts ! nat)") apply simp_all - apply blast + apply (metis checker_type.distinct(1) checker_type.distinct(5)) done qed lemma select_return_top_ct_eq: assumes "select_return_top cts c1 c2 = TopType ctm" "length cts \ 3" "c_types_agree (TopType ctm) cm" shows "\c' cm'. cm = cm'@[c'] \ ct_suffix (take (length cts - 3) cts) (to_ct_list cm') \ ct_eq c1 (TSome c') \ ct_eq c2 (TSome c')" proof (cases c1) case TAny note outer_TAny = TAny show ?thesis proof (cases c2) case TAny hence "take (length cts - 3) cts @ [TAny] = ctm" using outer_TAny assms ct_suffix_cons_ct_list by simp then obtain cm' c where "cm = cm' @ [c]" "ct_suffix (take (length cts - 3) cts) (to_ct_list cm')" using ct_suffix_cons_ct_list[of "take (length cts - 3) cts" "[TAny]"] assms(3) c_types_agree.simps(2)[of ctm cm] unfolding ct_list_eq_def to_ct_list_def by (metis Suc_leI impossible_Cons length_Cons length_map list.exhaust list.size(3) list_all2_conv_all_nth zero_less_Suc) thus ?thesis using TAny outer_TAny ct_eq.simps(2) by blast next case (TSome x2) hence "take (length cts - 3) cts @ [TSome x2] = ctm" using outer_TAny assms ct_suffix_cons_ct_list by simp then obtain cm' where "cm = cm' @ [x2]" "ct_suffix (take (length cts - 3) cts) (to_ct_list cm')" using ct_suffix_cons_ct_list[of "take (length cts - 3) cts" "[TSome x2]"] assms(3) c_types_agree.simps(2)[of ctm cm] ct_list_eq_ts_conv_eq unfolding ct_list_eq_def to_ct_list_def by (metis (no_types, hide_lams) list.simps(8,9)) thus ?thesis using TSome outer_TAny by simp qed next case (TSome x2) note outer_TSome = TSome show ?thesis proof (cases c2) case TAny hence "take (length cts - 3) cts @ [TSome x2] = ctm" using TSome assms ct_suffix_cons_ct_list by simp then obtain cm' where "cm = cm' @ [x2]" "ct_suffix (take (length cts - 3) cts) (to_ct_list cm')" using ct_suffix_cons_ct_list[of "take (length cts - 3) cts" "[TSome x2]"] assms(3) c_types_agree.simps(2)[of ctm cm] ct_list_eq_ts_conv_eq unfolding ct_list_eq_def to_ct_list_def by (metis (no_types, hide_lams) list.simps(8,9)) thus ?thesis using TSome TAny by simp next case (TSome x3) hence x_eq:"x2 = x3" using outer_TSome assms ct_suffix_cons_ct_list by (metis checker_type.distinct(3) select_return_top.simps(3)) hence "take (length cts - 3) cts @ [TSome x2] = ctm" using TSome outer_TSome assms ct_suffix_cons_ct_list by (metis checker_type.inject(1) select_return_top.simps(3)) then obtain cm' where "cm = cm' @ [x2]" "ct_suffix (take (length cts - 3) cts) (to_ct_list cm')" using ct_suffix_cons_ct_list[of "take (length cts - 3) cts" "[TSome x2]"] assms(3) c_types_agree.simps(2)[of ctm cm] ct_list_eq_ts_conv_eq unfolding ct_list_eq_def to_ct_list_def by (metis (no_types, hide_lams) list.simps(8,9)) thus ?thesis using TSome outer_TSome x_eq by simp qed qed end \ No newline at end of file diff --git a/thys/WebAssembly/Wasm_Properties.thy b/thys/WebAssembly/Wasm_Properties.thy --- a/thys/WebAssembly/Wasm_Properties.thy +++ b/thys/WebAssembly/Wasm_Properties.thy @@ -1,3428 +1,3428 @@ section \Lemmas for Soundness Proof\ theory Wasm_Properties imports Wasm_Properties_Aux begin subsection \Preservation\ lemma t_cvt: assumes "cvt t sx v = Some v'" shows "t = typeof v'" using assms unfolding cvt_def typeof_def apply (cases t) apply (simp add: option.case_eq_if, metis option.discI option.inject v.simps(17)) apply (simp add: option.case_eq_if, metis option.discI option.inject v.simps(18)) apply (simp add: option.case_eq_if, metis option.discI option.inject v.simps(19)) apply (simp add: option.case_eq_if, metis option.discI option.inject v.simps(20)) done lemma store_preserved1: assumes "\s;vs;es\ \_i \s';vs';es'\" "store_typing s \" "\\\ \ es : (ts _> ts')" "\ = ((s_inst \)!i)\local := local ((s_inst \)!i) @ (map typeof vs), label := arb_label, return := arb_return\" "i < length (s_inst \)" shows "store_typing s' \" using assms proof (induction arbitrary: \ arb_label arb_return ts ts' rule: reduce.induct) case (callcl_host_Some cl t1s t2s f ves vcs n m s hs s' vcs' vs i) obtain ts'' where ts''_def:"\\\ \ ves : (ts _> ts'')" "\\\ \ [Callcl cl] : (ts'' _> ts')" using callcl_host_Some(8) e_type_comp by fastforce have ves_c:"const_list ves" using is_const_list[OF callcl_host_Some(2)] by simp then obtain tvs where tvs_def:"ts'' = ts @ tvs" "length t1s = length tvs" "\\\ \ ves : ([] _> tvs)" using ts''_def(1) e_type_const_list[of ves \ \ ts ts''] callcl_host_Some by fastforce hence "ts'' = ts @ t1s" "ts' = ts @ t2s" using e_type_callcl_host[OF ts''_def(2) callcl_host_Some(1)] by auto moreover hence "list_all2 types_agree t1s vcs" using e_typing_imp_list_types_agree[where ?ts' = "[]"] callcl_host_Some(2) tvs_def(1,3) by fastforce thus ?case using store_extension_imp_store_typing host_apply_preserve_store[OF _ callcl_host_Some(6)] callcl_host_Some(7) by fastforce next case (set_global s i j v s' vs) obtain insts fs clss bss gs where "s = \inst = insts, funcs = fs, tab = clss, mem = bss, globs = gs\" using s.cases by blast moreover obtain insts' fs' clss' bss' gs' where "s' = \inst = insts', funcs = fs', tab = clss', mem = bss', globs = gs'\" using s.cases by blast moreover obtain \s tfs ns ms tgs where "\ = \s_inst = \s, s_funcs = tfs, s_tab = ns, s_mem = ms, s_globs = tgs\" using s_context.cases by blast moreover note s_S_defs = calculation (* Prove that inst, tab and mem are not altered. *) have "insts = insts'" "fs = fs'" "clss = clss'" "bss = bss'" using set_global(1) s_S_defs(1,2) unfolding supdate_glob_def supdate_glob_s_def by (metis s.ext_inject s.update_convs(5))+ hence "list_all2 (inst_typing \) insts' \s" "list_all2 (cl_typing \) fs' tfs" "list_all (tab_agree \) (concat clss')" "list_all2 (\cls n. n \ length cls) clss' ns" "list_all2 mem_agree bss' ms" using set_global(2) s_S_defs unfolding store_typing.simps by auto moreover have "list_all2 glob_agree gs' tgs" proof - have gs_agree:"list_all2 glob_agree gs tgs" using set_global(2) s_S_defs unfolding store_typing.simps by auto have "length gs = length gs'" using s_S_defs(1,2) set_global(1) unfolding supdate_glob_def supdate_glob_s_def by (metis length_list_update s.select_convs(5) s.update_convs(5)) moreover obtain k where k_def:"(sglob_ind s i j) = k" by blast hence "\j'. \j' \ k; j' < length gs\ \ gs!j' = gs'!j'" using s_S_defs(1,2) set_global(1) unfolding supdate_glob_def supdate_glob_s_def by auto hence "\j'. \j' \ k; j' < length gs\ \ glob_agree (gs'!j') (tgs!j')" using gs_agree by (metis list_all2_conv_all_nth) moreover have "glob_agree (gs'!k) (tgs!k)" proof - obtain ts'' where ts''_def:"\ \ [C v] : (ts _> ts'')" "\ \ [Set_global j] : (ts'' _> ts')" by (metis b_e_type_comp2_unlift set_global.prems(2)) have b_es:"ts'' = ts@[typeof v]" "ts = ts'" "global \ ! j = \tg_mut = T_mut, tg_t = typeof v\" "j < length (global \)" using b_e_type_value[OF ts''_def(1)] b_e_type_set_global[OF ts''_def(2)] by auto hence "j < length (global ((s_inst \)!i))" using set_global(4) by fastforce hence globs_agree:"k < length (s_globs \)" "glob_agree (gs!k) (tgs!k)" "(tgs!k) = (global \)!j" using store_typing_imp_glob_agree[OF set_global(2,5)] b_es(4) s_S_defs(1,3) k_def set_global(4) unfolding sglob_def by auto hence "g_mut (gs!k) = T_mut" "typeof (g_val (gs!k)) = typeof v" using b_es(3) unfolding glob_agree_def by auto hence "g_mut (gs'!k) = T_mut" "typeof (g_val (gs'!k)) = typeof v" using set_global(1) k_def globs_agree(1) store_typing_imp_glob_length_eq[OF set_global(2)] s_S_defs(1,2) unfolding supdate_glob_def supdate_glob_s_def by auto thus ?thesis using globs_agree(3) b_es(3) unfolding glob_agree_def by fastforce qed ultimately show ?thesis using gs_agree unfolding list_all2_conv_all_nth by fastforce qed ultimately show ?case using store_typing.intros by simp next (* The following are all special cases of store_preserved_mem. *) case (store_Some t v s i j m k off mem' vs a) show ?case using store_preserved_mem[OF store_Some(5) _ _ store_Some(3)] store_size[OF store_Some(4)] by fastforce next case (store_packed_Some t v s i j m k off tp mem' vs a) thus ?case using store_preserved_mem[OF store_packed_Some(5) _ _ store_packed_Some(3)] store_packed_size[OF store_packed_Some(4)] by simp next case (grow_memory s i j n mem c mem' vs) show ?case using store_preserved_mem[OF grow_memory(5)_ _ grow_memory(2)] mem_grow_size[OF grow_memory(4)] by simp next case (label s vs es i s' vs' es' k lholed les les') obtain \' t1s t2s arb_label' arb_return' where es_def:"\' = \\label := arb_label', return := arb_return'\" "\\\' \ es : (t1s _> t2s)" using types_exist_lfilled_weak[OF label(2,6)] by fastforce thus ?case using label(4)[OF label(5) es_def(2) _ label(8)] label(7) by fastforce next case (local s vs es i s' vs' es' v0s n j) obtain tls where t_local:"(\\((s_inst \)!i)\local := (local ((s_inst \)!i)) @ (map typeof vs), return := Some tls\ \ es : ([] _> tls))" "ts' = ts @ tls" "i < length (s_inst \)" using e_type_local[OF local(4)] by blast+ show ?case using local(2)[OF local(3) t_local(1) _ t_local(3), of "(Some tls)" "label ((s_inst \)!i)" ] by fastforce qed (simp_all) lemma store_preserved: assumes "\s;vs;es\ \_i \s';vs';es'\" "store_typing s \" "\\None \_i vs;es : ts" shows "store_typing s' \" proof - show ?thesis using store_preserved1[OF assms(1,2), of _ "[]" "ts" None "label (s_inst \!i)"] s_type_unfold[OF assms(3)] by fastforce qed lemma typeof_unop_testop: assumes "\\\ \ [$C v, $e] : (ts _> ts')" "(e = (Unop_i t iop)) \ (e = (Unop_f t fop)) \ (e = (Testop t testop))" shows "(typeof v) = t" "e = (Unop_f t fop) \ is_float_t t" proof - have "\ \ [C v, e] : (ts _> ts')" using unlift_b_e assms(1) by simp then obtain ts'' where ts''_def:"\ \ [C v] : (ts _> ts'')" "\ \ [e] : (ts'' _> ts')" using b_e_type_comp[where ?e = e and ?es = "[C v]"] by fastforce show "(typeof v) = t" "e = (Unop_f t fop) \ is_float_t t" using b_e_type_value[OF ts''_def(1)] assms(2) b_e_type_unop_testop[OF ts''_def(2)] by simp_all qed lemma typeof_cvtop: assumes "\\\ \ [$C v, $e] : (ts _> ts')" "e = Cvtop t1 cvtop t sx" shows "(typeof v) = t" "cvtop = Convert \ (t1 \ t) \ ((sx = None) = ((is_float_t t1 \ is_float_t t) \ (is_int_t t1 \ is_int_t t \ (t_length t1 < t_length t))))" "cvtop = Reinterpret \ (t1 \ t) \ t_length t1 = t_length t" proof - have "\ \ [C v, e] : (ts _> ts')" using unlift_b_e assms(1) by simp then obtain ts'' where ts''_def:"\ \ [C v] : (ts _> ts'')" "\ \ [e] : (ts'' _> ts')" using b_e_type_comp[where ?e = e and ?es = "[C v]"] by fastforce show "(typeof v) = t" "cvtop = Convert \ (t1 \ t) \ (sx = None) = ((is_float_t t1 \ is_float_t t) \ (is_int_t t1 \ is_int_t t \ (t_length t1 < t_length t)))" "cvtop = Reinterpret \ (t1 \ t) \ t_length t1 = t_length t" using b_e_type_value[OF ts''_def(1)] b_e_type_cvtop[OF ts''_def(2) assms(2)] by simp_all qed lemma types_preserved_unop_testop_cvtop: assumes "\[$C v, $e]\ \ \[$C v']\" "\\\ \ [$C v, $e] : (ts _> ts')" "(e = (Unop_i t iop)) \ (e = (Unop_f t fop)) \ (e = (Testop t testop)) \ (e = (Cvtop t2 cvtop t sx))" shows "\\\ \ [$C v'] : (ts _> ts')" proof - have "\ \ [C v, e] : (ts _> ts')" using unlift_b_e assms(2) by simp then obtain ts'' where ts''_def:"\ \ [C v] : (ts _> ts'')" "\ \ [e] : (ts'' _> ts')" using b_e_type_comp[where ?e = e and ?es = "[C v]"] by fastforce have "ts@[arity_1_result e] = ts'" "(typeof v) = t" using b_e_type_value[OF ts''_def(1)] assms(3) b_e_type_unop_testop(1)[OF ts''_def(2)] b_e_type_cvtop(1)[OF ts''_def(2)] by (metis butlast_snoc, metis last_snoc) moreover have "arity_1_result e = typeof (v')" using assms(1,3) apply (cases rule: reduce_simple.cases) apply (simp_all add: arity_1_result_def wasm_deserialise_type t_cvt) apply (auto simp add: typeof_def) done hence "\ \ [C v'] : ([] _> [arity_1_result e])" using b_e_typing.const by metis ultimately show "\\\ \ [$C v'] : (ts _> ts')" using e_typing_s_typing.intros(1) b_e_typing.weakening[of \ "[C v']" "[]" "[arity_1_result e]" ts] by fastforce qed lemma typeof_binop_relop: assumes "\\\ \ [$C v1, $C v2, $e] : (ts _> ts')" "e = Binop_i t iop \ e = Binop_f t fop \ e = Relop_i t irop \ e = Relop_f t frop" shows "typeof v1 = t" "typeof v2 = t" "e = Binop_f t fop \ is_float_t t" "e = Relop_f t frop \ is_float_t t" proof - have "\ \ [C v1, C v2, e] : (ts _> ts')" using unlift_b_e assms(1) by simp then obtain ts'' where ts''_def:"\ \ [C v1, C v2] : (ts _> ts'')" "\ \ [e] : (ts'' _> ts')" using b_e_type_comp[where ?e = e and ?es = "[C v1, C v2]"] by fastforce then obtain ts_id where ts_id_def:"ts_id@[t,t] = ts''" "ts' = ts_id @ [arity_2_result e]" "e = Binop_f t fop \ is_float_t t" "e = Relop_f t frop \ is_float_t t" using assms(2) b_e_type_binop_relop[of \ e ts'' ts' t] by blast thus "typeof v1 = t" "typeof v2 = t" "e = Binop_f t fop \ is_float_t t" "e = Relop_f t frop \ is_float_t t" using ts''_def b_e_type_comp[of \ "[C v1]" "C v2" ts ts''] b_e_type_value2 by fastforce+ qed lemma types_preserved_binop_relop: assumes "\[$C v1, $C v2, $e]\ \ \[$C v']\" "\\\ \ [$C v1, $C v2, $e] : (ts _> ts')" "e = Binop_i t iop \ e = Binop_f t fop \ e = Relop_i t irop \ e = Relop_f t frop" shows "\\\ \ [$C v'] : (ts _> ts')" proof - have "\ \ [C v1, C v2, e] : (ts _> ts')" using unlift_b_e assms(2) by simp then obtain ts'' where ts''_def:"\ \ [C v1, C v2] : (ts _> ts'')" "\ \ [e] : (ts'' _> ts')" using b_e_type_comp[where ?e = e and ?es = "[C v1, C v2]"] by fastforce then obtain ts_id where ts_id_def:"ts_id@[t,t] = ts''" "ts' = ts_id @ [arity_2_result e]" using assms(3) b_e_type_binop_relop[of \ e ts'' ts' t] by blast hence "\ \ [C v1] : (ts _> ts_id@[t])" using ts''_def b_e_type_comp[of \ "[C v1]" "C v2" ts ts''] b_e_type_value by fastforce hence "ts@[arity_2_result e] = ts'" using b_e_type_value ts_id_def(2) by fastforce moreover have "arity_2_result e = typeof (v')" using assms(1,3) by (cases rule: reduce_simple.cases) (auto simp add: arity_2_result_def typeof_def) hence "\ \ [C v'] : ([] _> [arity_2_result e])" using b_e_typing.const by metis ultimately show ?thesis using e_typing_s_typing.intros(1) b_e_typing.weakening[of \ "[C v']" "[]" "[arity_2_result e]" ts] by fastforce qed lemma types_preserved_drop: assumes "\[$C v, $e]\ \ \[]\" "\\\ \ [$C v, $e] : (ts _> ts')" "(e = (Drop))" shows "\\\ \ [] : (ts _> ts')" proof - have "\ \ [C v, e] : (ts _> ts')" using unlift_b_e assms(2) by simp then obtain ts'' where ts''_def:"\ \ [C v] : (ts _> ts'')" "\ \ [e] : (ts'' _> ts')" using b_e_type_comp[where ?e = e and ?es = "[C v]"] by fastforce hence "ts'' = ts@[typeof v]" using b_e_type_value by blast hence "ts = ts'" using ts''_def assms(3) b_e_type_drop by blast hence "\ \ [] : (ts _> ts')" using b_e_type_empty by simp thus ?thesis using e_typing_s_typing.intros(1) by fastforce qed lemma types_preserved_select: assumes "\[$C v1, $C v2, $C vn, $e]\ \ \[$C v3]\" "\\\ \ [$C v1, $C v2, $C vn, $e] : (ts _> ts')" "(e = Select)" shows "\\\ \ [$C v3] : (ts _> ts')" proof - have "\ \ [C v1, C v2, C vn, e] : (ts _> ts')" using unlift_b_e assms(2) by simp then obtain t1s where t1s_def:"\ \ [C v1, C v2, C vn] : (ts _> t1s)" "\ \ [e] : (t1s _> ts')" using b_e_type_comp[where ?e = e and ?es = "[C v1, C v2, C vn]"] by fastforce then obtain t2s t where t2s_def:"t1s = t2s @ [t, t, (T_i32)]" "ts' = t2s@[t]" using b_e_type_select[of \ e t1s] assms by fastforce hence "\ \ [C v1, C v2] : (ts _> t2s@[t,t])" using t1s_def t2s_def b_e_type_value_list[of \ "[C v1, C v2]" "vn" ts "t2s@[t,t]"] by fastforce hence v2_t_def:"\ \ [C v1] : (ts _> t2s@[t])" "typeof v2 = t" using t1s_def t2s_def b_e_type_value_list[of \ "[C v1]" "v2" ts "t2s@[t]"] by fastforce+ hence v1_t_def:"ts = t2s" "typeof v1 = t" using b_e_type_value by fastforce+ have "typeof v3 = t" using assms(1) v2_t_def(2) v1_t_def(2) by (cases rule: reduce_simple.cases, simp_all) hence "\ \ [C v3] : (ts _> ts')" using b_e_typing.const b_e_typing.weakening t2s_def(2) v1_t_def(1) by fastforce thus ?thesis using e_typing_s_typing.intros(1) by fastforce qed lemma types_preserved_block: assumes "\vs @ [$Block (tn _> tm) es]\ \ \[Label m [] (vs @ ($* es))]\" "\\\ \ vs @ [$Block (tn _> tm) es] : (ts _> ts')" "const_list vs" "length vs = n" "length tn = n" "length tm = m" shows "\\\ \ [Label m [] (vs @ ($* es))] : (ts _> ts')" proof - obtain \' where c_def:"\' = \\label := [tm] @ label \\" by blast obtain ts'' where ts''_def:"\\\ \ vs : (ts _> ts'')" "\\\ \ [$Block (tn _> tm) es] : (ts'' _> ts')" using assms(2) e_type_comp[of \ \ vs "$Block (tn _> tm) es" ts ts'] by fastforce hence "\ \ [Block (tn _> tm) es] : (ts'' _> ts')" using unlift_b_e by auto then obtain ts_c tfn tfm where ts_c_def:"(tn _> tm) = (tfn _> tfm)" "ts'' = ts_c@tfn" "ts' = ts_c@tfm" " (\\label := [tfm] @ label \\ \ es : (tn _> tm))" using b_e_type_block[of \ "Block (tn _> tm) es" ts'' ts' "(tn _> tm)" es] by fastforce hence tfn_l:"length tfn = n" using assms(5) by simp obtain tvs' where tvs'_def:"ts'' = ts@tvs'" "length tvs' = n" "\\\' \ vs : ([] _> tvs')" using e_type_const_list assms(3,4) ts''_def(1) by fastforce hence "\\\' \ vs : ([] _> tn)" "\\\' \ $*es : (tn _> tm)" using ts_c_def tvs'_def tfn_l ts''_def c_def e_typing_s_typing.intros(1) by simp_all hence "\\\' \ (vs @ ($* es)) : ([] _> tm)" using e_type_comp_conc by simp moreover have "\\\ \ [] : (tm _> tm)" using b_e_type_empty[of \ "[]" "[]"] e_typing_s_typing.intros(1)[where ?b_es = "[]"] e_typing_s_typing.intros(3)[of \ \ "[]" "[]" "[]" "tm"] by fastforce ultimately show ?thesis using e_typing_s_typing.intros(7)[of \ \ "[]" tm _ "vs @ ($* es)" m] ts_c_def tvs'_def assms(5,6) e_typing_s_typing.intros(3) c_def by fastforce qed lemma types_preserved_if: assumes "\[$C ConstInt32 n, $If tf e1s e2s]\ \ \[$Block tf es']\" "\\\ \ [$C ConstInt32 n, $If tf e1s e2s] : (ts _> ts')" shows "\\\ \ [$Block tf es'] : (ts _> ts')" proof - have "\ \ [C ConstInt32 n, If tf e1s e2s] : (ts _> ts')" using unlift_b_e assms(2) by fastforce then obtain ts_i where ts_i_def:"\ \ [C ConstInt32 n] : (ts _> ts_i)" "\ \ [If tf e1s e2s] : (ts_i _> ts')" using b_e_type_comp by (metis append_Cons append_Nil) then obtain ts'' tfn tfm where ts_def:"tf = (tfn _> tfm)" "ts_i = ts''@tfn @ [T_i32]" "ts' = ts''@tfm" "(\\label := [tfm] @ label \\ \ e1s : tf)" "(\\label := [tfm] @ label \\ \ e2s : tf)" using b_e_type_if[of \ "If tf e1s e2s"] by fastforce have "ts_i = ts @ [(T_i32)]" using ts_i_def(1) b_e_type_value unfolding typeof_def by fastforce moreover have "(\\label := [tfm] @ label \\ \ es' : (tfn _> tfm))" using assms(1) ts_def(4,5) ts_def(1) by (cases rule: reduce_simple.cases, simp_all) hence "\ \ [Block tf es'] : (tfn _> tfm)" using ts_def(1) b_e_typing.block[of tf tfn tfm \ es'] by simp ultimately show ?thesis using ts_def(2,3) e_typing_s_typing.intros(1,3) by fastforce qed lemma types_preserved_tee_local: assumes "\[v, $Tee_local i]\ \ \[v, v, $Set_local i]\" "\\\ \ [v, $Tee_local i] : (ts _> ts')" "is_const v" shows "\\\ \ [v, v, $Set_local i] : (ts _> ts')" proof - obtain bv where bv_def:"v = $C bv" using e_type_const_unwrap assms(3) by fastforce hence "\ \ [C bv, Tee_local i] : (ts _> ts')" using unlift_b_e assms(2) by fastforce then obtain ts'' where ts''_def:"\ \ [C bv] : (ts _> ts'')" "\ \ [Tee_local i] : (ts'' _> ts')" using b_e_type_comp[of _ "[C bv]" "Tee_local i"] by fastforce then obtain ts_c t where ts_c_def:"ts'' = ts_c@[t]" "ts' = ts_c@[t]" "(local \)!i = t" "i < length(local \)" using b_e_type_tee_local[of \ "Tee_local i" ts'' ts' i] by fastforce hence t_bv:"t = typeof bv" "ts = ts_c" using b_e_type_value ts''_def by fastforce+ have "\ \ [Set_local i] : ([t,t] _> [t])" using ts_c_def(3,4) b_e_typing.set_local[of i \ t] b_e_typing.weakening[of \ "[Set_local i]" "[t]" "[]" "[t]"] by fastforce moreover have "\ \ [C bv] : ([t] _> [t,t])" using t_bv b_e_typing.const[of \ bv] b_e_typing.weakening[of \ "[C bv]" "[]" "[t]" "[t]"] by fastforce hence "\ \ [C bv, C bv] : ([] _> [t,t])" using t_bv b_e_typing.const[of \ bv] b_e_typing.composition[of \ "[C bv]" "[]" "[t]"] by fastforce ultimately have "\ \ [C bv, C bv, Set_local i] : (ts _> ts@[t])" using b_e_typing.composition b_e_typing.weakening[of \ "[C bv, C bv, Set_local i]"] by fastforce thus ?thesis using t_bv(2) ts_c_def(2) bv_def e_typing_s_typing.intros(1) by fastforce qed lemma types_preserved_loop: assumes "\vs @ [$Loop (t1s _> t2s) es]\ \ \[Label n [$Loop (t1s _> t2s) es] (vs @ ($* es))]\" "\\\ \ vs @ [$Loop (t1s _> t2s) es] : (ts _> ts')" "const_list vs" "length vs = n" "length t1s = n" "length t2s = m" shows "\\\ \ [Label n [$Loop (t1s _> t2s) es] (vs @ ($* es))] : (ts _> ts')" proof - obtain ts'' where ts''_def:"\\\ \ vs : (ts _> ts'')" "\\\ \ [$Loop (t1s _> t2s) es] : (ts'' _> ts')" using assms(2) e_type_comp by fastforce then have "\ \ [Loop (t1s _> t2s) es] : (ts'' _> ts')" using unlift_b_e assms(2) by fastforce then obtain ts_c tfn tfm \' where t_loop:"(t1s _> t2s) = (tfn _> tfm)" "(ts'' = ts_c@tfn)" "(ts' = ts_c@tfm)" "\' = \\label := [t1s] @ label \\" "(\' \ es : (tfn _> tfm))" using b_e_type_loop[of \ "Loop (t1s _> t2s) es" ts'' ts'] by fastforce obtain tvs where tvs_def:"ts'' = ts @ tvs" "length vs = length tvs" "\\\' \ vs : ([] _> tvs)" using e_type_const_list assms(3) ts''_def(1) by fastforce then have tvs_eq:"tvs = t1s" "tfn = t1s" using assms(4,5) t_loop(1,2) by simp_all have "\\\ \ [$Loop (t1s _> t2s) es] : (t1s _> t2s)" using t_loop b_e_typing.loop e_typing_s_typing.intros(1) by fastforce moreover have "\\\' \ $*es : (t1s _> t2s)" using t_loop e_typing_s_typing.intros(1) by fastforce then have "\\\' \ vs@($*es) : ([] _> t2s)" using tvs_eq tvs_def(3) e_type_comp_conc by blast ultimately have "\\\ \ [Label n [$Loop (t1s _> t2s) es] (vs @ ($* es))] : ([] _> t2s)" using e_typing_s_typing.intros(7)[of \ \ "[$Loop (t1s _> t2s) es]" t1s t2s "vs @ ($* es)"] t_loop(4) assms(5) by fastforce then show ?thesis using t_loop e_typing_s_typing.intros(3) tvs_def(1) tvs_eq(1) by fastforce qed lemma types_preserved_label_value: assumes "\[Label n es0 vs]\ \ \vs\" "\\\ \ [Label n es0 vs] : (ts _> ts')" "const_list vs" shows "\\\ \ vs : (ts _> ts')" proof - obtain tls t2s where t2s_def:"(ts' = (ts@t2s))" "(\\\ \ es0 : (tls _> t2s))" "(\\\\label := [tls] @ (label \)\ \ vs : ([] _> t2s))" using assms e_type_label by fastforce thus ?thesis using e_type_const_list[of vs \ "\\label := [tls] @ (label \)\" "[]" t2s] assms(3) e_typing_s_typing.intros(3) by fastforce qed lemma types_preserved_br_if: assumes "\[$C ConstInt32 n, $Br_if i]\ \ \e\" "\\\ \ [$C ConstInt32 n, $Br_if i] : (ts _> ts')" "e = [$Br i] \ e = []" shows "\\\ \ e : (ts _> ts')" proof - have "\ \ [C ConstInt32 n, Br_if i] : (ts _> ts')" using unlift_b_e assms(2) by fastforce then obtain ts'' where ts''_def:"\ \ [C ConstInt32 n] : (ts _> ts'')" "\ \ [Br_if i] : (ts'' _> ts')" using b_e_type_comp[of _ "[C ConstInt32 n]" "Br_if i"] by fastforce then obtain ts_c ts_b where ts_bc_def:"i < length(label \)" "ts'' = ts_c @ ts_b @ [T_i32]" "ts' = ts_c @ ts_b" "(label \)!i = ts_b" using b_e_type_br_if[of \ "Br_if i" ts'' ts' i] by fastforce hence ts_def:"ts = ts_c @ ts_b" using ts''_def(1) b_e_type_value by fastforce show ?thesis using assms(3) proof (rule disjE) assume "e = [$Br i]" thus ?thesis using ts_def e_typing_s_typing.intros(1) b_e_typing.br ts_bc_def by fastforce next assume "e = []" thus ?thesis using ts_def b_e_type_empty ts_bc_def(3) e_typing_s_typing.intros(1)[of _ "[]" "(ts _> ts')"] by fastforce qed qed lemma types_preserved_br_table: assumes "\[$C ConstInt32 c, $Br_table is i]\ \ \[$Br i']\" "\\\ \ [$C ConstInt32 c, $Br_table is i] : (ts _> ts')" "(i' = (is ! nat_of_int c) \ length is > nat_of_int c) \ i' = i" shows "\\\ \ [$Br i'] : (ts _> ts')" proof - have "\ \ [C ConstInt32 c, Br_table is i] : (ts _> ts')" using unlift_b_e assms(2) by fastforce then obtain ts'' where ts''_def:"\ \ [C ConstInt32 c] : (ts _> ts'')" "\ \ [Br_table is i] : (ts'' _> ts')" using b_e_type_comp[of _ "[C ConstInt32 c]" "Br_table is i"] by fastforce then obtain ts_l ts_c where ts_c_def:"list_all (\i. i < length(label \) \ (label \)!i = ts_l) (is@[i])" "ts'' = ts_c @ ts_l@[T_i32]" using b_e_type_br_table[of \ "Br_table is i" ts'' ts'] by fastforce hence ts_def:"ts = ts_c @ ts_l" using ts''_def(1) b_e_type_value by fastforce have "\ \ [Br i'] : (ts _> ts')" using assms(3) ts_c_def(1,2) b_e_typing.br[of i' \ ts_l ts_c ts'] ts_def unfolding list_all_length by (fastforce simp add: less_Suc_eq nth_append) thus ?thesis using e_typing_s_typing.intros(1) by fastforce qed lemma types_preserved_local_const: assumes "\[Local n i vs es]\ \ \es\" "\\\ \ [Local n i vs es] : (ts _> ts')" "const_list es" shows "\\\ \ es: (ts _> ts')" proof - obtain tls where "(\\((s_inst \)!i)\local := (local ((s_inst \)!i)) @ (map typeof vs), return := Some tls\ \ es : ([] _> tls))" "ts' = ts @ tls" using e_type_local[OF assms(2)] by blast+ moreover then have "\\\ \ es : ([] _> tls)" using assms(3) e_type_const_list by fastforce ultimately show ?thesis using e_typing_s_typing.intros(3) by fastforce qed lemma typing_map_typeof: assumes "ves = $$* vs" "\\\ \ ves : ([] _> tvs)" shows "tvs = map typeof vs" using assms proof (induction ves arbitrary: vs tvs rule: List.rev_induct) case Nil hence "\ \ [] : ([] _> tvs)" using unlift_b_e by auto thus ?case using Nil by auto next case (snoc a ves) obtain vs' v' where vs'_def:"ves @ [a] = $$* (vs'@[v'])" "vs = vs'@[v']" using snoc(2) by (metis Nil_is_map_conv append_is_Nil_conv list.distinct(1) rev_exhaust) obtain tvs' where tvs'_def:"\\\ \ ves: ([] _> tvs')" "\\\ \ [a] : (tvs' _> tvs)" using snoc(3) e_type_comp by fastforce hence "tvs' = map typeof vs'" using snoc(1) vs'_def by fastforce moreover have "is_const a" using vs'_def unfolding is_const_def by auto then obtain t where t_def:"tvs = tvs' @ [t]" "\\\ \ [a] : ([] _> [t])" using tvs'_def(2) e_type_const[of a \ \ tvs' tvs] by fastforce have "a = $ C v'" using vs'_def(1) by auto hence "t = typeof v'" using t_def unlift_b_e[of \ \ "[C v']" "([] _> [t])"] b_e_type_value[of \ "C v'" "[]" "[t]" v'] by fastforce ultimately show ?case using vs'_def t_def by simp qed lemma types_preserved_call_indirect_Some: assumes "\\\ \ [$C ConstInt32 c, $Call_indirect j] : (ts _> ts')" "stab s i' (nat_of_int c) = Some cl" "stypes s i' j = tf" "cl_type cl = tf" "store_typing s \" "i' < length (inst s)" "\ = (s_inst \ ! i') \local := local (s_inst \ ! i') @ tvs, label := arb_labs, return := arb_return\" shows "\\\ \ [Callcl cl] : (ts _> ts')" proof - obtain t1s t2s where tf_def:"tf = (t1s _> t2s)" using tf.exhaust by blast obtain ts'' where ts''_def:"\ \ [C ConstInt32 c] : (ts _> ts'')" "\ \ [Call_indirect j] : (ts'' _> ts')" using e_type_comp[of \ \ "[$C ConstInt32 c]" "$Call_indirect j" ts ts'] assms(1) unlift_b_e[of \ \ "[C ConstInt32 c]"] unlift_b_e[of \ \ "[Call_indirect j]"] by fastforce hence "ts'' = ts@[(T_i32)]" using b_e_type_value unfolding typeof_def by fastforce moreover have "i' < length (s_inst \)" using assms(5,6) store_typing_imp_inst_length_eq by fastforce hence stypes_eq:"types_t (s_inst \ ! i') = types (inst s ! i')" using store_typing_imp_inst_typing[OF assms(5)] store_typing_imp_inst_length_eq[OF assms(5)] unfolding inst_typing.simps by fastforce obtain ts''a where ts''a_def:"j < length (types_t \)" "ts'' = ts''a @ t1s @ [T_i32]" "ts' = ts''a @ t2s" "types_t \ ! j = (t1s _> t2s)" using b_e_type_call_indirect[OF ts''_def(2), of j] tf_def assms(3,7) stypes_eq unfolding stypes_def by fastforce moreover obtain tf' where tf'_def:"cl_typing \ cl tf'" using assms(2,5,6) stab_typed_some_imp_cl_typed by blast hence "cl_typing \ cl tf" using assms(4) unfolding cl_typing.simps cl_type_def by auto hence "\\\ \ [Callcl cl] : tf" using e_typing_s_typing.intros(6) assms(6,7) ts''a_def(1) by fastforce ultimately show "\\\ \ [Callcl cl] : (ts _> ts')" using tf_def e_typing_s_typing.intros(3) by auto qed lemma types_preserved_call_indirect_None: assumes "\\\ \ [$C ConstInt32 c, $Call_indirect j] : (ts _> ts')" shows "\\\ \ [Trap] : (ts _> ts')" using e_typing_s_typing.intros(4) by blast lemma types_preserved_callcl_native: assumes "\\\ \ ves @ [Callcl cl] : (ts _> ts')" "cl = Func_native i (t1s _> t2s) tfs es" "ves = $$* vs" "length vs = n" "length tfs = k" "length t1s = n" "length t2s = m" "n_zeros tfs = zs" "store_typing s \" shows "\\\ \ [Local m i (vs @ zs) [$Block ([] _> t2s) es]] : (ts _> ts')" proof - obtain ts'' where ts''_def:"\\\ \ ves : (ts _> ts'')" "\\\ \ [Callcl cl] : (ts'' _> ts')" using assms(1) e_type_comp by fastforce have ves_c:"const_list ves" using is_const_list[OF assms(3)] by simp then obtain tvs where tvs_def:"ts'' = ts @ tvs" "length t1s = length tvs" "\\\ \ ves : ([] _> tvs)" using ts''_def(1) e_type_const_list[of ves \ \ ts ts''] assms by fastforce obtain ts_c \' where ts_c_def:"(ts'' = ts_c @ t1s)" "(ts' = ts_c @ t2s)" "i < length (s_inst \)" "\' = ((s_inst \)!i)" "(\'\local := (local \') @ t1s @ tfs, label := ([t2s] @ (label \')), return := Some t2s\ \ es : ([] _> t2s))" using e_type_callcl_native[OF ts''_def(2) assms(2)] by fastforce have "inst_typing \ (inst s ! i) (s_inst \ ! i)" using store_typing_imp_inst_length_eq[OF assms(9)] store_typing_imp_inst_typing[OF assms(9)] ts_c_def(3) by simp obtain \'' where c''_def:"\'' = \'\local := (local \') @ t1s @ tfs, return := Some t2s\" by blast hence "\''\label := ([t2s] @ (label \''))\ = \'\local := (local \') @ t1s @ tfs, label := ([t2s] @ (label \')), return := Some t2s\" by fastforce hence "\\\'' \ [$Block ([] _> t2s) es] : ([] _> t2s)" using ts_c_def b_e_typing.block[of "([] _> t2s)" "[]" "t2s" _ es] e_typing_s_typing.intros(1)[of _ "[Block ([] _> t2s) es]"] by fastforce moreover have t_eqs:"ts = ts_c" "t1s = tvs" using tvs_def(1,2) ts_c_def(1) by simp_all have 1:"tfs = map typeof zs" using n_zeros_typeof assms(8) - by simp + by auto have "t1s = map typeof vs" using typing_map_typeof assms(3) tvs_def t_eqs by fastforce hence "(t1s @ tfs) = map typeof (vs @ zs)" using 1 by simp ultimately have "\\Some t2s \_i (vs @ zs);([$Block ([] _> t2s) es]) : t2s" using e_typing_s_typing.intros(8) ts_c_def c''_def by fastforce thus ?thesis using e_typing_s_typing.intros(3,5) ts_c_def t_eqs(1) assms(2,7) by fastforce qed lemma types_preserved_callcl_host_some: assumes "\\\ \ ves @ [Callcl cl] : (ts _> ts')" "cl = Func_host (t1s _> t2s) f" "ves = $$* vcs" "length vcs = n" "length t1s = n" "length t2s = m" "host_apply s (t1s _> t2s) f vcs hs = Some (s', vcs')" "store_typing s \" shows "\\\ \ $$* vcs' : (ts _> ts')" proof - obtain ts'' where ts''_def:"\\\ \ ves : (ts _> ts'')" "\\\ \ [Callcl cl] : (ts'' _> ts')" using assms(1) e_type_comp by fastforce have ves_c:"const_list ves" using is_const_list[OF assms(3)] by simp then obtain tvs where tvs_def:"ts'' = ts @ tvs" "length t1s = length tvs" "\\\ \ ves : ([] _> tvs)" using ts''_def(1) e_type_const_list[of ves \ \ ts ts''] assms by fastforce hence "ts'' = ts @ t1s" "ts' = ts @ t2s" using e_type_callcl_host[OF ts''_def(2) assms(2)] by auto moreover hence "list_all2 types_agree t1s vcs" using e_typing_imp_list_types_agree[where ?ts' = "[]"] assms(3) tvs_def(1,3) by fastforce hence "\\\ \ $$* vcs' : ([] _> t2s)" using list_types_agree_imp_e_typing host_apply_respect_type[OF _ assms(7)] by fastforce ultimately show ?thesis using e_typing_s_typing.intros(3) by fastforce qed lemma types_imp_concat: assumes "\\\ \ es @ [e] @ es' : (ts _> ts')" "\tes tes'. ((\\\ \ [e] : (tes _> tes')) \ (\\\ \ [e'] : (tes _> tes')))" shows "\\\ \ es @ [e'] @ es' : (ts _> ts')" proof - obtain ts'' where "\\\ \ es : (ts _> ts'')" "\\\ \ [e] @ es' : (ts'' _> ts')" using e_type_comp_conc1[of _ _ es "[e] @ es'"] assms(1) by fastforce moreover then obtain ts''' where "\\\ \ [e] : (ts'' _> ts''')" "\\\ \ es' : (ts''' _> ts')" using e_type_comp_conc1[of _ _ "[e]" es' ts'' ts'] assms by fastforce ultimately show ?thesis using assms(2) e_type_comp_conc[of _ _ es ts ts'' "[e']" ts'''] e_type_comp_conc[of _ _ "es @ [e']" ts ts'''] by fastforce qed lemma type_const_return: assumes "Lfilled i lholed (vs @ [$Return]) LI" "(return \) = Some tcs" "length tcs = length vs" "\\\ \ LI : (ts _> ts')" "const_list vs" shows "\\\' \ vs : ([] _> tcs)" using assms proof (induction i arbitrary: ts ts' lholed \ LI \') case 0 obtain vs' es' where "LI = (vs' @ (vs @ [$Return]) @ es')" using Lfilled.simps[of 0 lholed "(vs @ [$Return])" LI] 0(1) by fastforce then obtain ts'' ts''' where "\\\ \ vs' : (ts _> ts'')" "\\\ \ (vs @ [$Return]) : (ts'' _> ts''')" "\\\ \ es' : (ts''' _> ts')" using e_type_comp_conc2[of \ \ vs' "(vs @ [$Return])" es'] 0(4) by fastforce then obtain ts_b where ts_b_def:"\\\ \ vs : (ts'' _> ts_b)" "\\\ \ [$Return] : (ts_b _> ts''')" using e_type_comp_conc1 by fastforce then obtain ts_c where ts_c_def:"ts_b = ts_c @ tcs" "(return \) = Some tcs" using 0(2) b_e_type_return[of \] unlift_b_e[of \ \ "[Return]" "ts_b _> ts'''"] by fastforce obtain tcs' where "ts_b = ts'' @ tcs'" "length vs = length tcs'" "\\\' \ vs : ([] _> tcs')" using ts_b_def(1) e_type_const_list 0(5) by fastforce thus ?case using 0(3) ts_c_def by simp next case (Suc i) obtain vs' n l les les' LK where es_def:"lholed = (LRec vs' n les l les')" "Lfilled i l (vs @ [$Return]) LK" "LI = (vs' @ [Label n les LK] @ les')" using Lfilled.simps[of "(Suc i)" lholed "(vs @ [$Return])" LI] Suc(2) by fastforce then obtain ts'' ts''' where "\\\ \ [Label n les LK] : (ts'' _> ts''')" using e_type_comp_conc2[of \ \ vs' "[Label n les LK]" les'] Suc(5) by fastforce then obtain tls t2s where "ts''' = ts'' @ t2s" "length tls = n" "\\\ \ les : (tls _> t2s)" "\\\\label := [tls] @ label \\ \ LK : ([] _> t2s)" "return (\\label := [tls] @ label \\) = Some tcs" using e_type_label[of \ \ n les LK ts'' ts'''] Suc(3) by fastforce then show ?case using Suc(1)[OF es_def(2) _ assms(3) _ assms(5)] by fastforce qed lemma types_preserved_return: assumes "\[Local n i vls LI]\ \ \ves\" "\\\ \ [Local n i vls LI] : (ts _> ts')" "const_list ves" "length ves = n" "Lfilled j lholed (ves @ [$Return]) LI" shows "\\\ \ ves : (ts _> ts')" proof - obtain tls \' where l_def:"i < length (s_inst \)" "\' = ((s_inst \)!i)\local := (local ((s_inst \)!i)) @ (map typeof vls), return := Some tls\" "\\\' \ LI : ([] _> tls)" "ts' = ts @ tls" "length tls = n" using e_type_local[OF assms(2)] by blast hence "\\\ \ ves : ([] _> tls)" using type_const_return[OF assms(5) _ _ l_def(3)] assms(3-5) by fastforce thus ?thesis using e_typing_s_typing.intros(3) l_def(4) by fastforce qed lemma type_const_br: assumes "Lfilled i lholed (vs @ [$Br (i+k)]) LI" "length (label \) > k" "(label \)!k = tcs" "length tcs = length vs" "\\\ \ LI : (ts _> ts')" "const_list vs" shows "\\\' \ vs : ([] _> tcs)" using assms proof (induction i arbitrary: k ts ts' lholed \ LI \') case 0 obtain vs' es' where "LI = (vs' @ (vs @ [$Br (0+k)]) @ es')" using Lfilled.simps[of 0 lholed "(vs @ [$Br (0 + k)])" LI] 0(1) by fastforce then obtain ts'' ts''' where "\\\ \ vs' : (ts _> ts'')" "\\\ \ (vs @ [$Br (0+k)]) : (ts'' _> ts''')" "\\\ \ es' : (ts''' _> ts')" using e_type_comp_conc2[of \ \ vs' "(vs @ [$Br (0+k)])" es'] 0(5) by fastforce then obtain ts_b where ts_b_def:"\\\ \ vs : (ts'' _> ts_b)" "\\\ \ [$Br (0+k)] : (ts_b _> ts''')" using e_type_comp_conc1 by fastforce then obtain ts_c where ts_c_def:"ts_b = ts_c @ tcs" "(label \)!k = tcs" using 0(3) b_e_type_br[of \ "Br (0 + k)"] unlift_b_e[of \ \ "[Br (0 + k)]" "ts_b _> ts'''"] by fastforce obtain tcs' where "ts_b = ts'' @ tcs'" "length vs = length tcs'" "\\\' \ vs : ([] _> tcs')" using ts_b_def(1) e_type_const_list 0(6) by fastforce thus ?case using 0(4) ts_c_def by simp next case (Suc i k ts ts' lholed \ LI) obtain vs' n l les les' LK where es_def:"lholed = (LRec vs' n les l les')" "Lfilled i l (vs @ [$Br (i + (Suc k))]) LK" "LI = (vs' @ [Label n les LK] @ les')" using Lfilled.simps[of "(Suc i)" lholed "(vs @ [$Br ((Suc i) + k)])" LI] Suc(2) by fastforce then obtain ts'' ts''' where "\\\ \ [Label n les LK] : (ts'' _> ts''')" using e_type_comp_conc2[of \ \ vs' "[Label n les LK]" les'] Suc(6) by fastforce moreover then obtain lts \'' ts'''' where "\\\'' \ LK : ([] _> ts'''')" "\'' = \\label := [lts] @ (label \)\" "length (label \'') > (Suc k)" "(label \'')!(Suc k) = tcs" using e_type_label[of \ \ n les LK ts'' ts'''] Suc(3,4) by fastforce then show ?case using Suc(1) es_def(2) assms(4,6) by fastforce qed lemma types_preserved_br: assumes "\[Label n es0 LI]\ \ \vs @ es0\" "\\\ \ [Label n es0 LI] : (ts _> ts')" "const_list vs" "length vs = n" "Lfilled i lholed (vs @ [$Br i]) LI" shows "\\\ \ (vs @ es0) : (ts _> ts')" proof - obtain tls t2s \' where l_def:"(ts' = (ts@t2s))" "(\\\ \ es0 : (tls _> t2s))" "\' = \\label := [tls] @ (label \)\" "length (label \') > 0" "(label \')!0 = tls" "length tls = n" "(\\\\label := [tls] @ (label \)\ \ LI : ([] _> t2s))" using e_type_label[of \ \ n es0 LI ts ts'] assms(2) by fastforce hence "\\\ \ vs : ([] _> tls)" using assms(3-5) type_const_br[of i lholed vs 0 LI \' tls] by fastforce thus ?thesis using l_def(1,2) e_type_comp_conc e_typing_s_typing.intros(3) by fastforce qed lemma store_local_label_empty: assumes "i < length (s_inst \)" "store_typing s \" shows "label ((s_inst \)!i) = []" "local ((s_inst \)!i) = []" proof - obtain insts where inst_typ:"list_all2 (inst_typing \) insts (s_inst \)" using assms(2) unfolding store_typing.simps by auto thus "label ((s_inst \)!i) = []" using assms(1) unfolding inst_typing.simps List.list_all2_conv_all_nth by fastforce show "local ((s_inst \)!i) = []" using assms(1) inst_typ unfolding inst_typing.simps List.list_all2_conv_all_nth by fastforce qed lemma types_preserved_b_e1: assumes "\es\ \ \es'\" "store_typing s \" "\\\ \ es : (ts _> ts')" shows "\\\ \ es' : (ts _> ts')" using assms(1) proof (cases rule: reduce_simple.cases) case (unop_i32 c iop) thus ?thesis using assms(1,3) types_preserved_unop_testop_cvtop by simp next case (unop_i64 c iop) thus ?thesis using assms(1, 3) types_preserved_unop_testop_cvtop by simp next case (unop_f32 c fop) thus ?thesis using assms(1, 3) types_preserved_unop_testop_cvtop by simp next case (unop_f64 c fop) thus ?thesis using assms(1, 3) types_preserved_unop_testop_cvtop by simp next case (binop_i32_Some iop c1 c2 c) thus ?thesis using assms(1, 3) types_preserved_binop_relop by simp next case (binop_i32_None iop c1 c2) thus ?thesis by (simp add: e_typing_s_typing.intros(4)) next case (binop_i64_Some iop c1 c2 c) thus ?thesis using assms(1, 3) types_preserved_binop_relop by simp next case (binop_i64_None iop c1 c2) thus ?thesis by (simp add: e_typing_s_typing.intros(4)) next case (binop_f32_Some fop c1 c2 c) thus ?thesis using assms(1, 3) types_preserved_binop_relop by simp next case (binop_f32_None fop c1 c2) thus ?thesis by (simp add: e_typing_s_typing.intros(4)) next case (binop_f64_Some fop c1 c2 c) then show ?thesis using assms(1, 3) types_preserved_binop_relop by simp next case (binop_f64_None fop c1 c2) then show ?thesis by (simp add: e_typing_s_typing.intros(4)) next case (testop_i32 c testop) then show ?thesis using assms(1, 3) types_preserved_unop_testop_cvtop by simp next case (testop_i64 c testop) then show ?thesis using assms(1, 3) types_preserved_unop_testop_cvtop by simp next case (relop_i32 c1 c2 iop) then show ?thesis using assms(1, 3) types_preserved_binop_relop by simp next case (relop_i64 c1 c2 iop) then show ?thesis using assms(1, 3) types_preserved_binop_relop by simp next case (relop_f32 c1 c2 fop) then show ?thesis using assms(1, 3) types_preserved_binop_relop by simp next case (relop_f64 c1 c2 fop) then show ?thesis using assms(1, 3) types_preserved_binop_relop by simp next case (convert_Some t1 v t2 sx v') then show ?thesis using assms(1, 3) types_preserved_unop_testop_cvtop by simp next case (convert_None t1 v t2 sx) then show ?thesis using e_typing_s_typing.intros(4) by simp next case (reinterpret t1 v t2) then show ?thesis using assms(1, 3) types_preserved_unop_testop_cvtop by simp next case unreachable then show ?thesis using e_typing_s_typing.intros(4) by simp next case nop then have "\ \ [Nop] : (ts _> ts')" using assms(3) unlift_b_e by simp then show ?thesis using nop b_e_typing.empty e_typing_s_typing.intros(1,3) apply (induction "[Nop]" "ts _> ts'" arbitrary: ts ts') apply simp_all apply (metis list.simps(8)) apply blast done next case (drop v) then show ?thesis using assms(1, 3) types_preserved_drop by simp next case (select_false v1 v2) then show ?thesis using assms(1, 3) types_preserved_select by simp next case (select_true n v1 v2) then show ?thesis using assms(1, 3) types_preserved_select by simp next case (block vs n t1s t2s m es) then show ?thesis using assms(1, 3) types_preserved_block by simp next case (loop vs n t1s t2s m es) then show ?thesis using assms(1, 3) types_preserved_loop by simp next case (if_false tf e1s e2s) then show ?thesis using assms(1, 3) types_preserved_if by simp next case (if_true n tf e1s e2s) then show ?thesis using assms(1, 3) types_preserved_if by simp next case (label_const ts es) then show ?thesis using assms(1, 3) types_preserved_label_value by simp next case (label_trap ts es) then show ?thesis by (simp add: e_typing_s_typing.intros(4)) next case (br vs n i lholed LI es) then show ?thesis using assms(1, 3) types_preserved_br by fastforce next case (br_if_false n i) then show ?thesis using assms(1, 3) types_preserved_br_if by fastforce next case (br_if_true n i) then show ?thesis using assms(1, 3) types_preserved_br_if by fastforce next case (br_table is' c i') then show ?thesis using assms(1, 3) types_preserved_br_table by fastforce next case (br_table_length is' c i') then show ?thesis using assms(1, 3) types_preserved_br_table by fastforce next case (local_const i vs) then show ?thesis using assms(1, 3) types_preserved_local_const by fastforce next case (local_trap i vs) then show ?thesis by (simp add: e_typing_s_typing.intros(4)) next case (return n j lholed es i vls) then show ?thesis using assms(1, 3) types_preserved_return by fastforce next case (tee_local v i) then show ?thesis using assms(1, 3) types_preserved_tee_local by simp next case (trap lholed) then show ?thesis by (simp add: e_typing_s_typing.intros(4)) qed lemma types_preserved_b_e: assumes "\es\ \ \es'\" "store_typing s \" "\\None \_i vs;es : ts" shows "\\None \_i vs;es' : ts" proof - have "i < (length (s_inst \))" using assms(3) s_typing.cases by blast moreover obtain tvs \ where defs: "tvs = map typeof vs" "\ = ((s_inst \)!i)\local := (local ((s_inst \)!i) @ tvs), return := None\" "\\\ \ es : ([] _> ts)" using assms(3) unfolding s_typing.simps by blast have "\\\ \ es' : ([] _> ts)" using assms(1,2) defs(3) types_preserved_b_e1 by simp ultimately show ?thesis using defs unfolding s_typing.simps by auto qed lemma types_preserved_store: assumes "\\\ \ [$C ConstInt32 k, $C v, $Store t tp a off] : (ts _> ts')" shows "\\\ \ [] : (ts _> ts')" "types_agree t v" proof - obtain ts'' ts''' where ts_def:"\\\ \ [$C ConstInt32 k] : (ts _> ts'')" "\\\ \ [$C v] : (ts'' _> ts''')" "\\\ \ [$Store t tp a off] : (ts''' _> ts')" using assms e_type_comp_conc2[of \ \ "[$C ConstInt32 k]" "[$C v]" "[$Store t tp a off]"] by fastforce then have "ts'' = ts@[(T_i32)]" using b_e_type_value[of \ "C ConstInt32 k" "ts" ts''] unlift_b_e[of \ \ "[C (ConstInt32 k)]" "(ts _> ts'')"] unfolding typeof_def by fastforce hence "ts''' = ts@[(T_i32), (typeof v)]" using ts_def(2) b_e_type_value[of \ "C v" ts'' ts'''] unlift_b_e[of \ \ "[C v]" "(ts'' _> ts''')"] by fastforce hence "ts = ts'" "types_agree t v" using ts_def(3) b_e_type_store[of \ "Store t tp a off" ts''' ts'] unlift_b_e[of \ \ "[Store t tp a off]" "(ts''' _> ts')"] unfolding types_agree_def by fastforce+ thus "\\\ \ [] : (ts _> ts')" "types_agree t v" using b_e_type_empty[of \ "ts" "ts'"] e_typing_s_typing.intros(1) by fastforce+ qed lemma types_preserved_current_memory: assumes "\\\ \ [$Current_memory] : (ts _> ts')" shows "\\\ \ [$C ConstInt32 c] : (ts _> ts')" proof - have "ts' = ts@[T_i32]" using assms b_e_type_current_memory unlift_b_e[of \ \ "[Current_memory]"] by fastforce thus ?thesis using b_e_typing.const[of \ "ConstInt32 c"] e_typing_s_typing.intros(1,3) unfolding typeof_def by fastforce qed lemma types_preserved_grow_memory: assumes "\\\ \ [$C ConstInt32 c, $Grow_memory] : (ts _> ts')" shows "\\\ \ [$C ConstInt32 c'] : (ts _> ts')" proof - obtain ts'' where ts''_def:"\\\ \ [$C ConstInt32 c] : (ts _> ts'')" "\\\ \ [$Grow_memory] : (ts'' _> ts')" using e_type_comp assms by (metis append_butlast_last_id butlast.simps(2) last.simps list.distinct(1)) have "ts'' = ts@[(T_i32)]" using b_e_type_value[of \ "C ConstInt32 c" ts ts''] unlift_b_e[of \ \ "[C ConstInt32 c]"] ts''_def(1) unfolding typeof_def by fastforce moreover hence "ts'' = ts'" using ts''_def b_e_type_grow_memory[of _ _ ts'' ts'] unlift_b_e[of \ \ "[Grow_memory]"] by fastforce ultimately show "\\\ \ [$C ConstInt32 c'] : (ts _> ts')" using e_typing_s_typing.intros(1,3) b_e_typing.const[of \ "ConstInt32 c'"] unfolding typeof_def by fastforce qed lemma types_preserved_set_global: assumes "\\\ \ [$C v, $Set_global j] : (ts _> ts')" shows "\\\ \ [] : (ts _> ts')" "tg_t (global \ ! j) = typeof v" proof - obtain ts'' where ts''_def:"\\\ \ [$C v] : (ts _> ts'')" "\\\ \ [$Set_global j] : (ts'' _> ts')" using e_type_comp assms by (metis append_butlast_last_id butlast.simps(2) last.simps list.distinct(1)) hence "ts'' = ts@[typeof v]" using b_e_type_value unlift_b_e[of \ \ "[C v]"] by fastforce hence "ts = ts'" "tg_t (global \ ! j) = typeof v" using b_e_type_set_global ts''_def(2) unlift_b_e[of \ \ "[Set_global j]"] by fastforce+ thus "\\\ \ [] : (ts _> ts')" "tg_t (global \ ! j) = typeof v" using b_e_type_empty[of \ "ts" "ts'"] e_typing_s_typing.intros(1) by fastforce+ qed lemma types_preserved_load: assumes "\\\ \ [$C ConstInt32 k, $Load t tp a off] : (ts _> ts')" "typeof v = t" shows "\\\ \ [$C v] : (ts _> ts')" proof - obtain ts'' where ts''_def:"\\\ \ [$C ConstInt32 k] : (ts _> ts'')" "\\\ \ [$Load t tp a off] : (ts'' _> ts')" using e_type_comp assms by (metis append_butlast_last_id butlast.simps(2) last.simps list.distinct(1)) hence "ts'' = ts@[(T_i32)]" using b_e_type_value unlift_b_e[of \ \ "[C ConstInt32 k]"] unfolding typeof_def by fastforce hence ts_def:"ts' = ts@[t]" "load_store_t_bounds a (option_projl tp) t" using ts''_def(2) b_e_type_load unlift_b_e[of \ \ "[Load t tp a off]"] by fastforce+ moreover hence "\ \ [C v] : (ts _> ts@[t])" using assms(2) b_e_typing.const b_e_typing.weakening by fastforce ultimately show "\\\ \ [$C v] : (ts _> ts')" using e_typing_s_typing.intros(1) by fastforce qed lemma types_preserved_get_local: assumes "\\\ \ [$Get_local i] : (ts _> ts')" "length vi = i" "(local \) = map typeof (vi @ [v] @ vs)" shows "\\\ \ [$C v] : (ts _> ts')" proof - have "(local \)!i = typeof v" using assms(2,3) by (metis (no_types, hide_lams) append_Cons length_map list.simps(9) map_append nth_append_length) hence "ts' = ts@[typeof v]" using assms(1) unlift_b_e[of \ \ "[Get_local i]"] b_e_type_get_local by fastforce thus ?thesis using b_e_typing.const e_typing_s_typing.intros(1,3) by fastforce qed lemma types_preserved_set_local: assumes "\\\ \ [$C v', $Set_local i] : (ts _> ts')" "length vi = i" "(local \) = map typeof (vi @ [v] @ vs)" shows "(\\\ \ [] : (ts _> ts')) \ map typeof (vi @ [v] @ vs) = map typeof (vi @ [v'] @ vs)" proof - have v_type:"(local \)!i = typeof v" using assms(2,3) by (metis (no_types, hide_lams) append_Cons length_map list.simps(9) map_append nth_append_length) obtain ts'' where ts''_def:"\\\ \ [$C v'] : (ts _> ts'')" "\\\ \ [$Set_local i] : (ts'' _> ts')" using e_type_comp assms by (metis append_butlast_last_id butlast.simps(2) last.simps list.distinct(1)) hence "ts'' = ts@[typeof v']" using b_e_type_value unlift_b_e[of \ \ "[C v']"] by fastforce hence "typeof v = typeof v'" "ts' = ts" using v_type b_e_type_set_local[of \ "Set_local i" ts'' ts'] ts''_def(2) unlift_b_e[of \ \ "[Set_local i]"] by fastforce+ thus ?thesis using b_e_type_empty[of \ "ts" "ts'"] e_typing_s_typing.intros(1) by fastforce qed lemma types_preserved_get_global: assumes "typeof (sglob_val s i j) = tg_t (global \ ! j)" "\\\ \ [$Get_global j] : (ts _> ts')" shows "\\\ \ [$C sglob_val s i j] : (ts _> ts')" proof - have "ts' = ts@[tg_t (global \ ! j)]" using b_e_type_get_global assms(2) unlift_b_e[of _ _ "[Get_global j]"] by fastforce thus ?thesis using b_e_typing.const[of \ "sglob_val s i j"] assms(1) e_typing_s_typing.intros(1,3) by fastforce qed lemma lholed_same_type: assumes "Lfilled k lholed es les" "Lfilled k lholed es' les'" "\\\ \ les : (ts _> ts')" "\arb_labs ts ts'. \\(\\label := arb_labs@(label \)\) \ es : (ts _> ts') \ \\(\\label := arb_labs@(label \)\) \ es' : (ts _> ts')" shows "(\\\ \ les' : (ts _> ts'))" using assms proof (induction arbitrary: ts ts' es' \ les' rule: Lfilled.induct) case (L0 vs lholed es' es ts ts' es'') obtain ts'' ts''' where "\\\ \ vs : (ts _> ts'')" "\\\ \ es : (ts'' _> ts''')" "\\\ \ es' : (ts''' _> ts')" using e_type_comp_conc2 L0(4) by blast moreover hence "(\\\ \ es'' : (ts'' _> ts'''))" using L0(5)[of "[]" ts'' ts'''] by fastforce ultimately have "(\\\ \ vs @ es'' @ es' : (ts _> ts'))" using e_type_comp_conc by fastforce thus ?case using L0(2,3) Lfilled.simps[of 0 lholed es'' les'] by fastforce next case (LN vs lholed n es' l es'' k es lfilledk t1s t2s es''' \ les') obtain lfilledk' where l'_def:"Lfilled k l es''' lfilledk'" "les' = vs @ [Label n es' lfilledk'] @ es''" using LN Lfilled.simps[of "k+1" lholed es''' les'] by fastforce obtain ts' ts'' where lab_def:"\\\ \ vs : (t1s _> ts')" "\\\ \ [Label n es' lfilledk] : (ts' _> ts'')" "\\\ \ es'' : (ts'' _> t2s)" using e_type_comp_conc2[OF LN(6)] by blast obtain tls ts_c \_int where int_def:" ts'' = ts' @ ts_c" "length tls = n" "\\\ \ es' : (tls _> ts_c)" "\_int = \\label := [tls] @ label \\" "\\\_int \ lfilledk : ([] _> ts_c)" using e_type_label[OF lab_def(2)] by blast have "(\\' arb_labs' ts ts'. \' = \_int\label := arb_labs' @ label \_int\ \ \\\' \ es : (ts _> ts') \ (\\\' \ es''' : (ts _> ts')))" proof - fix \'' arb_labs'' tts tts' assume "\'' = \_int\label := arb_labs'' @ label \_int\" "\\\'' \ es : (tts _> tts')" thus "(\\\'' \ es''' : (tts _> tts'))" using LN(7)[of "arb_labs'' @ [tls]" tts tts'] int_def(4) by fastforce qed hence "(\\\_int \ lfilledk' : ([] _> ts_c))" using LN(4)[OF l'_def(1) int_def(5)] by fastforce hence "(\\\ \ [Label n es' lfilledk'] : (ts' _> ts''))" using int_def e_typing_s_typing.intros(3,7) by (metis append.right_neutral) thus ?case using lab_def e_type_comp_conc l'_def(2) by blast qed lemma types_preserved_e1: assumes "\s;vs;es\ \_i \s';vs';es'\" "store_typing s \" "tvs = map typeof vs" "i < length (inst s)" "\ = ((s_inst \)!i)\local := (local ((s_inst \)!i) @ tvs), label := arb_labs, return := arb_return\" "\\\ \ es : (ts _> ts')" shows "(\\\ \ es' : (ts _> ts')) \ (map typeof vs = map typeof vs')" using assms proof (induction arbitrary: tvs \ ts ts' arb_labs arb_return rule: reduce.induct) case (basic e e' s vs i) then show ?case using types_preserved_b_e1[OF basic(1,2)] by fastforce next case (call s vs j i) obtain ts'' tf1 tf2 where l_func_t: "length (func_t \) > j" "ts = ts''@tf1" "ts' = ts''@tf2" "((func_t \)!j) = (tf1 _> tf2)" using b_e_type_call[of \ "Call j" ts ts' j] call(5) unlift_b_e[of _ _ "[Call j]" "(ts _> ts')"] by fastforce have "i < length (s_inst \)" using call(3) store_typing_imp_inst_length_eq[OF call(1)] by simp moreover have "j < length (func_t (s_inst \ ! i))" using l_func_t(1) call(4) by simp ultimately have "cl_typing \ (sfunc s i j) (tf1 _> tf2)" using store_typing_imp_func_agree[OF call(1)] l_func_t(4) call(4) by fastforce thus ?case using e_typing_s_typing.intros(3,6) l_func_t by fastforce next case (call_indirect_Some s i' c cl j tf vs) show ?case using types_preserved_call_indirect_Some[OF call_indirect_Some(8,1)] call_indirect_Some(2,3,4,6,7) by fastforce next case (call_indirect_None s i c cl j vs) thus ?case using e_typing_s_typing.intros(4) by blast next case (callcl_native cl j t1s t2s ts es ves vcs n k m zs s vs i) thus ?case using types_preserved_callcl_native by fastforce next case (callcl_host_Some cl t1s t2s f ves vcs n m s hs s' vcs' vs i) thus ?case using types_preserved_callcl_host_some by fastforce next case (callcl_host_None cl t1s t2s f ves vcs n m s hs vs i) thus ?case using e_typing_s_typing.intros(4) by blast next case (get_local vi j s v vs i) hence "i < length (s_inst \)" unfolding list_all2_conv_all_nth store_typing.simps by fastforce then have "local \ = tvs" using store_local_label_empty assms(2) get_local by fastforce then show ?case using types_preserved_get_local get_local by fastforce next case (set_local vi j s v vs v' i) hence "i < length (s_inst \)" unfolding list_all2_conv_all_nth store_typing.simps by fastforce hence "local \ = tvs" using store_local_label_empty assms(2) set_local by fastforce thus ?case using set_local types_preserved_set_local by simp next case (get_global s vs j i) have "length (global \) > j" using b_e_type_get_global get_global(5) unlift_b_e[of _ _ "[Get_global j]" "(ts _> ts')"] by fastforce hence "glob_agree (sglob s i j) ((global \)!j)" using get_global(3,4) store_typing_imp_glob_agree[OF get_global(1)] store_typing_imp_inst_length_eq[OF get_global(1)] by fastforce hence "typeof (g_val (sglob s i j)) = tg_t (global \ ! j)" unfolding glob_agree_def by simp thus ?case using get_global(5) types_preserved_get_global unfolding glob_agree_def sglob_val_def by fastforce next case (set_global s i j v s' vs) then show ?case using types_preserved_set_global by fastforce next case (load_Some s i j m k off t bs vs a) then show ?case using types_preserved_load(1) wasm_deserialise_type by blast next case (load_None s i j m k off t vs a) then show ?case using e_typing_s_typing.intros(4) by blast next case (load_packed_Some s i j m sx k off tp bs vs t a) then show ?case using types_preserved_load(1) wasm_deserialise_type by blast next case (load_packed_None s i j m sx k off tp vs t a) then show ?case using e_typing_s_typing.intros(4) by blast next case (store_Some t v s i j m k off mem' vs a) then show ?case using types_preserved_store by blast next case (store_None t v s i j m k off vs a) then show ?case using e_typing_s_typing.intros(4) by blast next case (store_packed_Some t v s i j m k off tp mem' vs a) then show ?case using types_preserved_store by blast next case (store_packed_None t v s i j m k off tp vs a) then show ?case using e_typing_s_typing.intros(4) by blast next case (current_memory s i j m n vs) then show ?case using types_preserved_current_memory by fastforce next case (grow_memory s i j m n c mem' vs) then show ?case using types_preserved_grow_memory by fastforce next case (grow_memory_fail s i j m n vs c) thus ?case using types_preserved_grow_memory by blast next case (label s vs es i s' vs' es' k lholed les les') { fix \' arb_labs' ts ts' assume local_assms:"\' = \\label := arb_labs'@(label \), return := (return \)\" hence "(\\\' \ es : (ts _> ts')) \ (\\\' \ es' : (ts _> ts')) \ map typeof vs = map typeof vs'" using label(4)[OF label(5,6,7)] label(8) by fastforce hence "(\\\\label := arb_labs'@(label \)\ \ es : (ts _> ts')) \ (\\\\label := arb_labs'@(label \)\ \ es' : (ts _> ts')) \ map typeof vs = map typeof vs'" using local_assms by simp } hence "\arb_labs' ts ts'. \\\\label := arb_labs'@(label \)\ \ es : (ts _> ts') \ (\\\\label := arb_labs'@(label \)\ \ es' : (ts _> ts'))" "map typeof vs = map typeof vs'" using types_exist_lfilled[OF label(2,9)] by auto thus ?case using lholed_same_type[OF label(2,3,9)] by fastforce next case (local s vls es i s' vs' es' vs n j) obtain \' tls where es_def:"i < length (s_inst \)" "length tls = n" "\' = (s_inst \ ! i) \local := local(s_inst \ ! i) @ map typeof vls, label := label (s_inst \ ! i), return := Some tls\" "\\\' \ es : ([] _> tls)" "ts' = ts @ tls" using e_type_local[OF local(7)] by fastforce moreover obtain ts'' where "ts' = ts@ts''" "(\\(Some ts'') \_i vls;es : ts'')" using e_type_local_shallow local(7) by fastforce moreover have "inst_typing \ ((inst s)!i) ((s_inst \)!i)" "i < length (inst s)" using local es_def(1) unfolding store_typing.simps list_all2_conv_all_nth by fastforce+ ultimately have "\\\' \ es' : ([] _> tls)" "map typeof vls = map typeof vs'" using local(2)[OF local(3) _ _ _ es_def(4), of "map typeof vls" "Some tls" "label (s_inst \ ! i)"] by fastforce+ hence "\\(Some tls) \_ i vs';es' : tls" using e_typing_s_typing.intros(8) es_def(1,3) by fastforce thus ?case using e_typing_s_typing.intros(3,5) es_def(2,5) by fastforce qed lemma types_preserved_e: assumes "\s;vs;es\ \_i \s';vs';es'\" "store_typing s \" "\\None \_i vs;es : ts" shows "\\None \_i vs';es' : ts" using assms proof - have "i < (length (s_inst \))" using assms(3) s_typing.cases by blast moreover hence i_bound:"i < length (inst s)" using assms(2) unfolding list_all2_conv_all_nth store_typing.simps by fastforce obtain tvs \ where defs: "tvs = map typeof vs" "\ = ((s_inst \)!i)\local := (local ((s_inst \)!i) @ tvs), label := (label ((s_inst \)!i)), return := None\" "\\\ \ es : ([] _> ts)" using assms(3) unfolding s_typing.simps by fastforce have "(\\\ \ es' : ([] _> ts)) \ (map typeof vs = map typeof vs')" using types_preserved_e1[OF assms(1,2) defs(1) i_bound defs(2,3)] by simp ultimately show ?thesis using defs unfolding s_typing.simps by auto qed subsection \Progress\ lemma const_list_no_progress: assumes "const_list es" shows "\\s;vs;es\ \_ i \s';vs';es'\" proof - { assume "\s;vs;es\ \_ i \s';vs';es'\" hence "False" using assms proof (induction rule: reduce.induct) case (basic e e' s vs i) thus ?thesis proof (induction rule: reduce_simple.induct) case (trap es lholed) show ?case using trap(2) proof (cases rule: Lfilled.cases) case (L0 vs es') thus ?thesis using trap(3) list_all_append const_list_cons_last(2)[of vs Trap] unfolding const_list_def by (simp add: is_const_def) next case (LN vs n es' l es'' k lfilledk) thus ?thesis by (simp add: is_const_def) qed qed (fastforce simp add: const_list_cons_last(2) is_const_def const_list_def)+ next case (label s vs es i s' vs' es' k lholed les les') show ?case using label(2) proof (cases rule: Lfilled.cases) case (L0 vs es') thus ?thesis using label(4,5) list_all_append unfolding const_list_def by fastforce next case (LN vs n es' l es'' k lfilledk) thus ?thesis using label(4,5) unfolding const_list_def by (simp add: is_const_def) qed qed (fastforce simp add: const_list_cons_last(2) is_const_def const_list_def)+ } thus ?thesis by blast qed lemma empty_no_progress: assumes "es = []" shows "\\s;vs;es\ \_ i \s';vs';es'\" proof - { assume "\s;vs;es\ \_ i \s';vs';es'\" hence False using assms proof (induction rule: reduce.induct) case (basic e e' s vs i) thus ?thesis proof (induction rule: reduce_simple.induct) case (trap es lholed) thus ?case using Lfilled.simps[of 0 lholed "[Trap]" es] by auto qed auto next case (label s vs es i s' vs' es' k lholed les les') thus ?case using Lfilled.simps[of k lholed es "[]"] by auto qed auto } thus ?thesis by blast qed lemma trap_no_progress: assumes "es = [Trap]" shows "\\s;vs;es\ \_ i \s';vs';es'\" proof - { assume "\s;vs;es\ \_ i \s';vs';es'\" hence False using assms proof (induction rule: reduce.induct) case (basic e e' s vs i) thus ?case by (induction rule: reduce_simple.induct) auto next case (label s vs es i s' vs' es' k lholed les les') show ?case using label(2) proof (cases rule: Lfilled.cases) case (L0 vs es') show ?thesis using L0(2) label(1,4,5) empty_no_progress by (auto simp add: Cons_eq_append_conv) next case (LN vs n es' l es'' k' lfilledk) show ?thesis using LN(2) label(5) by (simp add: Cons_eq_append_conv) qed qed auto } thus ?thesis by blast qed lemma terminal_no_progress: assumes "const_list es \ es = [Trap]" shows "\\s;vs;es\ \_ i \s';vs';es'\" using const_list_no_progress trap_no_progress assms by blast lemma progress_L0: assumes "\s;vs;es\ \_ i \s';vs';es'\" "const_list cs" shows "\s;vs;cs@es@es_c\ \_ i \s';vs';cs@es'@es_c\" proof - have "\es. Lfilled 0 (LBase cs es_c) es (cs@es@es_c)" using Lfilled.intros(1)[of cs "(LBase cs es_c)" es_c] assms(2) unfolding const_list_def by fastforce thus ?thesis using reduce.intros(23) assms(1) by blast qed lemma progress_L0_left: assumes "\s;vs;es\ \_ i \s';vs';es'\" "const_list cs" shows "\s;vs;cs@es\ \_ i \s';vs';cs@es'\" using assms progress_L0[where ?es_c = "[]"] by fastforce lemma progress_L0_trap: assumes "const_list cs" "cs \ [] \ es \ []" shows "\a. \s;vs;cs@[Trap]@es\ \_ i \s;vs;[Trap]\" proof - have "cs @ [Trap] @ es \ [Trap]" using assms(2) by (cases "cs = []") (auto simp add: append_eq_Cons_conv) thus ?thesis using reduce.intros(1) assms(2) reduce_simple.trap Lfilled.intros(1)[OF assms(1), of _ es "[Trap]"] by blast qed lemma progress_LN: assumes "(Lfilled j lholed [$Br (j+k)] es)" "\\\ \ es : ([] _> ts)" "(label \)!k = tvs" shows "\lholed' vs \'. (Lfilled j lholed' (vs@[$Br (j+k)]) es) \ (\\\' \ vs : ([] _> tvs)) \ const_list vs" using assms proof (induction "[$Br (j+k)]" es arbitrary: k \ ts rule: Lfilled.induct) case (L0 vs lholed es') obtain ts' ts'' where ts_def:"\\\ \ vs : ([] _> ts')" "\\\ \ [$Br k] : (ts' _> ts'')" "\\\ \ es' : (ts'' _> ts)" using e_type_comp_conc2[OF L0(3)] by fastforce obtain ts_c where "ts' = ts_c @ tvs" using b_e_type_br[of \ "Br k" ts' ts''] L0(3,4) ts_def(2) unlift_b_e by fastforce then obtain vs1 vs2 where vs_def:"\\\ \ vs1 : ([] _> ts_c)" "\\\ \ vs2 : (ts_c _> (ts_c@tvs))" "vs = vs1@vs2" "const_list vs1" "const_list vs2" using e_type_const_list_cons[OF L0(1)] ts_def(1) by fastforce hence "\\\ \ vs2 : ([] _> tvs)" using e_type_const_list by blast thus ?case using Lfilled.intros(1)[OF vs_def(4), of _ es' "vs2@[$Br k]"] vs_def(3,5) by fastforce next case (LN vs lholed n es' l es'' j lfilledk) obtain t1s t2s where ts_def:"\\\ \ vs : ([] _> t1s)" "\\\ \ [Label n es' lfilledk] : (t1s _> t2s)" "\\\ \ es'' : (t2s _> ts)" using e_type_comp_conc2[OF LN(5)] by fastforce obtain ts' ts_l where ts_l_def:"\\\\label := [ts'] @ label \\ \ lfilledk : ([] _> ts_l)" using e_type_label[OF ts_def(2)] by fastforce obtain lholed' vs' \' where lfilledk_def:"Lfilled j lholed' (vs' @ [$Br (j + (1 + k))]) lfilledk" "\\\' \ vs' : ([] _> tvs)" "const_list vs'" using LN(4)[OF _ ts_l_def, of "1 + k"] LN(5,6) by fastforce thus ?case using Lfilled.intros(2)[OF LN(1) _ lfilledk_def(1)] by fastforce qed lemma progress_LN_return: assumes "(Lfilled j lholed [$Return] es)" "\\\ \ es : ([] _> ts)" "(return \) = Some tvs" shows "\lholed' vs \'. (Lfilled j lholed' (vs@[$Return]) es) \ (\\\' \ vs : ([] _> tvs)) \ const_list vs" using assms proof (induction "[$Return]" es arbitrary: k \ ts rule: Lfilled.induct) case (L0 vs lholed es') obtain ts' ts'' where ts_def:"\\\ \ vs : ([] _> ts')" "\\\ \ [$Return] : (ts' _> ts'')" "\\\ \ es' : (ts'' _> ts)" using e_type_comp_conc2[OF L0(3)] by fastforce obtain ts_c where "ts' = ts_c @ tvs" using b_e_type_return[of \ "Return" ts' ts''] L0(3,4) ts_def(2) unlift_b_e by fastforce then obtain vs1 vs2 where vs_def:"\\\ \ vs1 : ([] _> ts_c)" "\\\ \ vs2 : (ts_c _> (ts_c@tvs))" "vs = vs1@vs2" "const_list vs1" "const_list vs2" using e_type_const_list_cons[OF L0(1)] ts_def(1) by fastforce hence "\\\ \ vs2 : ([] _> tvs)" using e_type_const_list by blast thus ?case using Lfilled.intros(1)[OF vs_def(4), of _ es' "vs2@[$Return]"] vs_def(3,5) by fastforce next case (LN vs lholed n es' l es'' j lfilledk) obtain t1s t2s where ts_def:"\\\ \ vs : ([] _> t1s)" "\\\ \ [Label n es' lfilledk] : (t1s _> t2s)" "\\\ \ es'' : (t2s _> ts)" using e_type_comp_conc2[OF LN(5)] by fastforce obtain ts' ts_l where ts_l_def:"\\\\label := [ts'] @ label \\ \ lfilledk : ([] _> ts_l)" using e_type_label[OF ts_def(2)] by fastforce obtain lholed' vs' \' where lfilledk_def:"Lfilled j lholed' (vs' @ [$Return]) lfilledk" "\\\' \ vs' : ([] _> tvs)" "const_list vs'" using LN(4)[OF ts_l_def] LN(6) by fastforce thus ?case using Lfilled.intros(2)[OF LN(1) _ lfilledk_def(1)] by fastforce qed lemma progress_LN1: assumes "(Lfilled j lholed [$Br (j+k)] es)" "\\\ \ es : (ts _> ts')" shows "length (label \) > k" using assms proof (induction "[$Br (j+k)]" es arbitrary: k \ ts ts' rule: Lfilled.induct) case (L0 vs lholed es') obtain ts'' ts''' where ts_def:"\\\ \ vs : (ts _> ts'')" "\\\ \ [$Br k] : (ts'' _> ts''')" "\\\ \ es' : (ts''' _> ts')" using e_type_comp_conc2[OF L0(3)] by fastforce thus ?case using b_e_type_br(1)[of _ "Br k" ts'' ts'''] unlift_b_e by fastforce next case (LN vs lholed n es' l es'' k' lfilledk) obtain t1s t2s where ts_def:"\\\ \ vs : (ts _> t1s)" "\\\ \ [Label n es' lfilledk] : (t1s _> t2s)" "\\\ \ es'' : (t2s _> ts')" using e_type_comp_conc2[OF LN(5)] by fastforce obtain ts'' ts_l where ts_l_def:"\\\\label := [ts''] @ label \\ \ lfilledk : ([] _> ts_l)" using e_type_label[OF ts_def(2)] by fastforce thus ?case using LN(4)[of "1+k"] by fastforce qed lemma progress_LN2: assumes "(Lfilled j lholed e1s lfilled)" shows "\lfilled'. (Lfilled j lholed e2s lfilled')" using assms proof (induction rule: Lfilled.induct) case (L0 vs lholed es' es) thus ?case using Lfilled.intros(1) by fastforce next case (LN vs lholed n es' l es'' k es lfilledk) thus ?case using Lfilled.intros(2) by fastforce qed lemma const_of_const_list: assumes "length cs = 1" "const_list cs" shows "\v. cs = [$C v]" using e_type_const_unwrap assms unfolding const_list_def list_all_length by (metis append_butlast_last_id append_self_conv2 gr_zeroI last_conv_nth length_butlast length_greater_0_conv less_numeral_extra(1,4) zero_less_diff) lemma const_of_i32: assumes "const_list cs" "\\\ \ cs : ([] _> [(T_i32)])" shows "\c. cs = [$C ConstInt32 c]" proof - obtain v where "cs = [$C v]" using const_of_const_list assms(1) e_type_const_list[OF assms] by fastforce moreover hence "\ \ [C v] : ([] _> [(T_i32)])" using assms(2) unlift_b_e by fastforce hence "\c. v = ConstInt32 c" proof (induction "[C v]" "([] _> [(T_i32)])" rule: b_e_typing.induct) case (const \) then show ?case unfolding typeof_def by (cases v, auto) qed auto ultimately show ?thesis by fastforce qed lemma const_of_i64: assumes "const_list cs" "\\\ \ cs : ([] _> [(T_i64)])" shows "\c. cs = [$C ConstInt64 c]" proof - obtain v where "cs = [$C v]" using const_of_const_list assms(1) e_type_const_list[OF assms] by fastforce moreover hence "\ \ [C v] : ([] _> [(T_i64)])" using assms(2) unlift_b_e by fastforce hence "\c. v = ConstInt64 c" proof (induction "[C v]" "([] _> [(T_i64)])" rule: b_e_typing.induct) case (const \) then show ?case unfolding typeof_def by (cases v, auto) qed auto ultimately show ?thesis by fastforce qed lemma const_of_f32: assumes "const_list cs" "\\\ \ cs : ([] _> [T_f32])" shows "\c. cs = [$C ConstFloat32 c]" proof - obtain v where "cs = [$C v]" using const_of_const_list assms(1) e_type_const_list[OF assms] by fastforce moreover hence "\ \ [C v] : ([] _> [T_f32])" using assms(2) unlift_b_e by fastforce hence "\c. v = ConstFloat32 c" proof (induction "[C v]" "([] _> [T_f32])" rule: b_e_typing.induct) case (const \) then show ?case unfolding typeof_def by (cases v, auto) qed auto ultimately show ?thesis by fastforce qed lemma const_of_f64: assumes "const_list cs" "\\\ \ cs : ([] _> [T_f64])" shows "\c. cs = [$C ConstFloat64 c]" proof - obtain v where "cs = [$C v]" using const_of_const_list assms(1) e_type_const_list[OF assms] by fastforce moreover hence "\ \ [C v] : ([] _> [T_f64])" using assms(2) unlift_b_e by fastforce hence "\c. v = ConstFloat64 c" proof (induction "[C v]" "([] _> [T_f64])" rule: b_e_typing.induct) case (const \) then show ?case unfolding typeof_def by (cases v, auto) qed auto ultimately show ?thesis by fastforce qed lemma progress_unop_testop_i: assumes "\\\ \ cs : ([] _> [t])" "is_int_t t" "const_list cs" "e = Unop_i t iop \ e = Testop t testop" shows "\a s' vs' es'. \s;vs;cs@([$e])\ \_i \s';vs';es'\" using assms(2) proof (cases t) case T_i32 thus ?thesis using const_of_i32[OF assms(3)] assms(1,4) reduce.intros(1)[OF reduce_simple.intros(1)] reduce.intros(1)[OF reduce_simple.intros(13)] by fastforce next case T_i64 thus ?thesis using const_of_i64[OF assms(3)] assms(1,4) reduce.intros(1)[OF reduce_simple.intros(2)] reduce.intros(1)[OF reduce_simple.intros(14)] by fastforce qed (simp_all add: is_int_t_def) lemma progress_unop_f: assumes "\\\ \ cs : ([] _> [t])" "is_float_t t" "const_list cs" "e = Unop_f t iop" shows "\a s' vs' es'. \s;vs;cs@([$e])\ \_i \s';vs';es'\" using assms(2) proof (cases t) case T_f32 thus ?thesis using const_of_f32[OF assms(3)] assms(1,4) reduce.intros(1)[OF reduce_simple.intros(3)] reduce.intros(1)[OF reduce_simple.intros(13)] by fastforce next case T_f64 thus ?thesis using const_of_f64[OF assms(3)] assms(1,4) reduce.intros(1)[OF reduce_simple.intros(4)] reduce.intros(1)[OF reduce_simple.intros(14)] by fastforce qed (simp_all add: is_float_t_def) lemma const_list_split_2: assumes "const_list cs" "\\\ \ cs : ([] _> [t1, t2])" shows "\c1 c2. (\\\ \ [c1] : ([] _> [t1])) \ (\\\ \ [c2] : ([] _> [t2])) \ cs = [c1, c2] \ const_list [c1] \ const_list [c2]" proof - have l_cs:"length cs = 2" using assms e_type_const_list[OF assms] by simp then obtain c1 c2 where "cs!0 = c1" "cs!1 = c2" by fastforce hence "cs = [c1] @ [c2]" using assms e_type_const_conv_vs typing_map_typeof by fastforce thus ?thesis using assms e_type_comp[of \ \ "[c1]" c2] e_type_const[of c2 \ \ _ "[t1,t2]"] unfolding const_list_def by fastforce qed lemma const_list_split_3: assumes "const_list cs" "\\\ \ cs : ([] _> [t1, t2, t3])" shows "\c1 c2 c3. (\\\ \ [c1] : ([] _> [t1])) \ (\\\ \ [c2] : ([] _> [t2])) \ (\\\ \ [c3] : ([] _> [t3])) \ cs = [c1, c2, c3]" proof - have l_cs:"length cs = 3" using assms e_type_const_list[OF assms] by simp then obtain c1 c2 c3 where "cs!0 = c1" "cs!1 = c2" "cs!2 = c3" by fastforce hence "cs = [c1] @ [c2] @ [c3]" using assms e_type_const_conv_vs typing_map_typeof by fastforce thus ?thesis using assms e_type_comp_conc2[of \ \ "[c1]" "[c2]" "[c3]" "[]" "[t1,t2,t3]"] e_type_const[of c1] e_type_const[of c2] e_type_const[of c3] unfolding const_list_def by fastforce qed lemma progress_binop_relop_i: assumes "\\\ \ cs : ([] _> [t, t])" "is_int_t t" "const_list cs" "e = Binop_i t iop \ e = Relop_i t irop" shows "\a s' vs' es'. \s;vs;cs@([$e])\ \_i \s';vs';es'\" using assms(2) proof (cases t) case (T_i32) hence cs_def:"\c1 c2. cs = [$C ConstInt32 c1,$C ConstInt32 c2]" using const_list_split_2[OF assms(3,1)] assms(3) const_of_i32 unfolding const_list_def by blast show ?thesis proof (cases "e = Binop_i t iop") case True obtain c1 c2 where "cs = [$C ConstInt32 c1,$C ConstInt32 c2]" using cs_def by blast thus ?thesis apply (cases "app_binop_i iop c1 c2") apply (metis reduce_simple.intros(6) reduce.intros(1) T_i32 True append_Cons append_Nil) apply (metis reduce_simple.intros(5) reduce.intros(1) T_i32 True append_Cons append_Nil) done next case False thus ?thesis using reduce_simple.intros(15) assms(4) reduce.intros(1) cs_def T_i32 by fastforce qed next case (T_i64) hence cs_def:"\c1 c2. cs = [$C ConstInt64 c1,$C ConstInt64 c2]" using const_list_split_2[OF assms(3,1)] assms(3) const_of_i64 unfolding const_list_def by blast show ?thesis proof (cases "e = Binop_i t iop") case True obtain c1 c2 where "cs = [$C ConstInt64 c1,$C ConstInt64 c2]" using cs_def by blast thus ?thesis apply (cases "app_binop_i iop c1 c2") apply (metis reduce_simple.intros(8) reduce.intros(1) T_i64 True append_Cons append_Nil) apply (metis reduce_simple.intros(7) reduce.intros(1) T_i64 True append_Cons append_Nil) done next case False thus ?thesis using reduce_simple.intros(16) assms(4) reduce.intros(1) cs_def T_i64 by fastforce qed qed (simp_all add: is_int_t_def) lemma progress_binop_relop_f: assumes "\\\ \ cs : ([] _> [t, t])" "is_float_t t" "const_list cs" "e = Binop_f t fop \ e = Relop_f t frop" shows "\a s' vs' es'. \s;vs;cs@([$e])\ \_i \s';vs';es'\" using assms(2) proof (cases t) case T_f32 hence cs_def:"\c1 c2. cs = [$C ConstFloat32 c1,$C ConstFloat32 c2]" using const_list_split_2[OF assms(3,1)] assms(3) const_of_f32 unfolding const_list_def by blast show ?thesis proof (cases "e = Binop_f t fop") case True obtain c1 c2 where cs_def:"cs = [$C ConstFloat32 c1,$C ConstFloat32 c2]" using cs_def by blast thus ?thesis apply (cases "app_binop_f fop c1 c2") apply (metis reduce_simple.intros(10) reduce.intros(1) T_f32 True append_Cons append_Nil) apply (metis reduce_simple.intros(9) reduce.intros(1) T_f32 True append_Cons append_Nil) done next case False thus ?thesis using reduce_simple.intros(17) assms(4) reduce.intros(1) cs_def T_f32 by fastforce qed next case T_f64 hence cs_def:"\c1 c2. cs = [$C ConstFloat64 c1,$C ConstFloat64 c2]" using const_list_split_2[OF assms(3,1)] assms(3) const_of_f64 unfolding const_list_def by blast show ?thesis proof (cases "e = Binop_f t fop") case True obtain c1 c2 where "cs = [$C ConstFloat64 c1,$C ConstFloat64 c2]" using cs_def by blast thus ?thesis apply (cases "app_binop_f fop c1 c2") apply (metis reduce_simple.intros(12) reduce.intros(1) T_f64 True append_Cons append_Nil) apply (metis reduce_simple.intros(11) reduce.intros(1) T_f64 True append_Cons append_Nil) done next case False thus ?thesis using reduce_simple.intros(18) assms(4) reduce.intros(1) cs_def T_f64 by fastforce qed qed (simp_all add: is_float_t_def) lemma progress_b_e: assumes "\ \ b_es : (ts _> ts')" "\\\ \ cs : ([] _> ts)" "(\lholed. \(Lfilled 0 lholed [$Return] (cs@($*b_es))))" "\ i lholed. \(Lfilled 0 lholed [$Br (i)] (cs@($*b_es)))" "const_list cs" "\ const_list ($* b_es)" "i < length (s_inst \)" "length (local \) = length (vs)" "Option.is_none (memory \) = Option.is_none (inst.mem ((inst s)!i))" shows "\a s' vs' es'. \s;vs;cs@($*b_es)\ \_i \s';vs';es'\" using assms proof (induction b_es "(ts _> ts')" arbitrary: ts ts' cs rule: b_e_typing.induct) case (const \ v) then show ?case unfolding const_list_def is_const_def by simp next case (unop_i t \ uu) thus ?case using progress_unop_testop_i[OF unop_i(2,1)] by fastforce next case (unop_f t \ uv) thus ?case using progress_unop_f[OF unop_f(2,1,5)] by fastforce next case (binop_i t \ uw) thus ?case using progress_binop_relop_i[OF binop_i(2,1)] by fastforce next case (binop_f t \ ux) thus ?case using progress_binop_relop_f[OF binop_f(2,1,5)] by fastforce next case (testop t \ uy) thus ?case using progress_unop_testop_i[OF testop(2,1)] by fastforce next case (relop_i t \ uz) thus ?case using progress_binop_relop_i[OF relop_i(2,1)] by fastforce next case (relop_f t \ va) thus ?case using progress_binop_relop_f[OF relop_f(2,1,5)] by fastforce next case (convert t1 t2 sx \) obtain v where cs_def:"cs = [$ C v]" "typeof v = t2" using const_typeof const_of_const_list[OF _ convert(6)] e_type_const_list[OF convert(6,3)] by fastforce thus ?case proof (cases "cvt t1 sx v") case None thus ?thesis using reduce.intros(1)[OF reduce_simple.convert_None[OF _ None]] cs_def unfolding types_agree_def by fastforce next case (Some a) thus ?thesis using reduce.intros(1)[OF reduce_simple.convert_Some[OF _ Some]] cs_def unfolding types_agree_def by fastforce qed next case (reinterpret t1 t2 \) obtain v where cs_def:"cs = [$ C v]" "typeof v = t2" using const_typeof const_of_const_list[OF _ reinterpret(6)] e_type_const_list[OF reinterpret(6,3)] by fastforce thus ?case using reduce.intros(1)[OF reduce_simple.reinterpret] unfolding types_agree_def by fastforce next case (unreachable \ ts ts') thus ?case using reduce.intros(1)[OF reduce_simple.unreachable] progress_L0[OF _ unreachable(4)] by fastforce next case (nop \) thus ?case using reduce.intros(1)[OF reduce_simple.nop] progress_L0[OF _ nop(4)] by fastforce next case (drop \ t) obtain v where "cs = [$C v]" using const_of_const_list drop(4) e_type_const_list[OF drop(4,1)] by fastforce thus ?case using reduce.intros(1)[OF reduce_simple.drop] progress_L0[OF _ drop(4)] by fastforce next case (select \ t) obtain v1 v2 v3 where cs_def:"\\\ \ [$ C v3] : ([] _> [T_i32])" "cs = [$C v1, $C v2, $ C v3]" using const_list_split_3[OF select(4,1)] select(4) unfolding const_list_def by (metis list_all_simps(1) e_type_const_unwrap) obtain c3 where c_def:"v3 = ConstInt32 c3" using cs_def select(4) const_of_i32[OF _ cs_def(1)] unfolding const_list_def by fastforce have "\a s' vs' es'. \s;vs;[$C v1, $C v2, $ C ConstInt32 c3, $Select]\ \_i \s';vs';es'\" proof (cases "int_eq c3 0") case True thus ?thesis using reduce.intros(1)[OF reduce_simple.select_false] by fastforce next case False thus ?thesis using reduce.intros(1)[OF reduce_simple.select_true] by fastforce qed thus ?case using c_def cs_def by fastforce next case (block tf tn tm \ es) show ?case using reduce_simple.block[OF block(7), of _ tn tm _ es] e_type_const_list[OF block(7,4)] reduce.intros(1) block(1) by fastforce next case (loop tf tn tm \ es) show ?case using reduce_simple.loop[OF loop(7), of _ tn tm _ es] e_type_const_list[OF loop(7,4)] reduce.intros(1) loop(1) by fastforce next case (if_wasm tf tn tm \ es1 es2) obtain c1s c2s where cs_def:"\\\ \ c1s : ([] _> tn)" "\\\ \ c2s : ([] _> [T_i32])" "const_list c1s" "const_list c2s" "cs = c1s @ c2s" using e_type_const_list_cons[OF if_wasm(9,6)] e_type_const_list by fastforce obtain c where c_def: "c2s = [$ C (ConstInt32 c)]" using const_of_i32 cs_def by fastforce have "\a s' vs' es'. \s;vs;[$ C (ConstInt32 c), $ If tf es1 es2]\ \_i \s';vs';es'\" proof (cases "int_eq c 0") case True thus ?thesis using reduce.intros(1)[OF reduce_simple.if_false] by fastforce next case False thus ?thesis using reduce.intros(1)[OF reduce_simple.if_true] by fastforce qed thus ?case using c_def cs_def progress_L0 by fastforce next case (br i \ ts t1s t2s) thus ?case using Lfilled.intros(1)[OF br(6), of _ "[]" "[$Br i]"] by fastforce next case (br_if j \ ts) obtain cs1 cs2 where cs_def:"\\\ \ cs1 : ([] _> ts)" "\\\ \ cs2 : ([] _> [T_i32])" "const_list cs1" "const_list cs2" "cs = cs1 @ cs2" using e_type_const_list_cons[OF br_if(6,3)] e_type_const_list by fastforce obtain c where c_def:"cs2 = [$C ConstInt32 c]" using const_of_i32[OF cs_def(4,2)] by blast have "\a s' vs' es'. \s;vs;cs2@($* [Br_if j])\ \_i \s';vs';es'\" proof (cases "int_eq c 0") case True thus ?thesis using c_def reduce.intros(1)[OF reduce_simple.br_if_false] by fastforce next case False thus ?thesis using c_def reduce.intros(1)[OF reduce_simple.br_if_true] by fastforce qed thus ?case using cs_def(5) progress_L0[OF _ cs_def(3), of s vs "cs2 @ ($* [Br_if j])" _ _ _ _ "[]"] by fastforce next case (br_table \ ts "is" i' t1s t2s) obtain cs1 cs2 where cs_def:"\\\ \ cs1 : ([]_> (t1s @ ts))" "\\\ \ cs2 : ([] _> [T_i32])" "const_list cs1" "const_list cs2" "cs = cs1 @ cs2" using e_type_const_list_cons[OF br_table(5), of \ \ "(t1s @ ts)" "[T_i32]"] e_type_const_list[of _ \ \ "t1s @ ts" "(t1s @ ts) @ [T_i32]"] br_table(2,5) unfolding const_list_def by fastforce obtain c where c_def:"cs2 = [$C ConstInt32 c]" using const_of_i32[OF cs_def(4,2)] by blast have "\a s' vs' es'. \s;vs;[$C ConstInt32 c, $Br_table is i']\ \_i \s';vs';es'\" proof (cases "(nat_of_int c) < length is") case True show ?thesis using reduce.intros(1)[OF reduce_simple.br_table[OF True]] by fastforce next case False hence "length is \ nat_of_int c" by fastforce thus ?thesis using reduce.intros(1)[OF reduce_simple.br_table_length] by fastforce qed thus ?case using c_def cs_def progress_L0 by fastforce next case (return \ ts t1s t2s) thus ?case using Lfilled.intros(1)[OF return(5), of _ "[]" "[$Return]"] by fastforce next case (call j \) show ?case using progress_L0[OF reduce.intros(2) call(6)] by fastforce next case (call_indirect j \ t1s t2s) obtain cs1 cs2 where cs_def:"\\\ \ cs1 : ([]_> t1s)" "\\\ \ cs2 : ([] _> [T_i32])" "const_list cs1" "const_list cs2" "cs = cs1 @ cs2" using e_type_const_list_cons[OF call_indirect(7), of \ \ t1s "[T_i32]"] e_type_const_list[of _ \ \ t1s "t1s @ [T_i32]"] call_indirect(4) by fastforce obtain c where c_def:"cs2 = [$C ConstInt32 c]" using cs_def(2,4) const_of_i32 by fastforce consider (1) "\cl tf. stab s i (nat_of_int c) = Some cl \ stypes s i j = tf \ cl_type cl = tf" | (2) "\cl. stab s i (nat_of_int c) = Some cl \ stypes s i j \ cl_type cl" | (3) "stab s i (nat_of_int c) = None" by (metis option.collapse) hence "\a s' vs' es'. \s;vs;[$C ConstInt32 c, $Call_indirect j]\ \_i \s';vs';es'\" proof (cases) case 1 thus ?thesis using reduce.intros(3) by blast next case 2 thus ?thesis using reduce.intros(4) by blast next case 3 thus ?thesis using reduce.intros(4) by blast qed then show ?case using c_def cs_def progress_L0 by fastforce next case (get_local j \ t) obtain v vj vj' where v_def:"v = vs ! j" "vj = (take j vs)" "vj' = (drop (j+1) vs)" by blast have j_def:"j < length vs" using get_local(1,9) by simp hence vj_len:"length vj = j" using v_def(2) by fastforce hence "vs = vj @ [v] @ vj'" using v_def id_take_nth_drop j_def by fastforce thus ?case using progress_L0[OF reduce.intros(8)[OF vj_len, of s v vj'] get_local(6)] by fastforce next case (set_local j \ t) obtain v vj vj' where v_def:"v = vs ! j" "vj = (take j vs)" "vj' = (drop (j+1) vs)" by blast obtain v' where cs_def: "cs = [$C v']" using const_of_const_list set_local(3,6) e_type_const_list by fastforce have j_def:"j < length vs" using set_local(1,9) by simp hence vj_len:"length vj = j" using v_def(2) by fastforce hence "vs = vj @ [v] @ vj'" using v_def id_take_nth_drop j_def by fastforce thus ?case using reduce.intros(9)[OF vj_len, of s v vj' v' i] cs_def by fastforce next case (tee_local i \ t) obtain v where "cs = [$C v]" using const_of_const_list tee_local(3,6) e_type_const_list by fastforce thus ?case using reduce.intros(1)[OF reduce_simple.tee_local] tee_local(6) unfolding const_list_def by fastforce next case (get_global j \ t) thus ?case using reduce.intros(10)[of s vs j i] progress_L0 by fastforce next case (set_global j \ t) obtain v where "cs = [$C v]" using const_of_const_list set_global(4,7) e_type_const_list by fastforce thus ?case using reduce.intros(11)[of s i j v _ vs] by fastforce next case (load \ n a tp_sx t off) obtain c where c_def: "cs = [$C ConstInt32 c]" using const_of_i32 load(3,6) e_type_const_unwrap unfolding const_list_def by fastforce obtain j where mem_some:"smem_ind s i = Some j" using load(1,10) unfolding smem_ind_def by fastforce have "\a' s' vs' es'. \s;vs;[$C ConstInt32 c, $Load t tp_sx a off]\ \_i \s';vs';es'\" proof (cases tp_sx) case None note tp_none = None show ?thesis proof (cases "load ((mem s)!j) (nat_of_int c) off (t_length t)") case None show ?thesis using reduce.intros(13)[OF mem_some _ None, of vs] tp_none load(2) by fastforce next case (Some a) show ?thesis using reduce.intros(12)[OF mem_some _ Some, of vs] tp_none load(2) by fastforce qed next case (Some a) obtain tp sx where tp_some:"tp_sx = Some (tp, sx)" using Some by fastforce show ?thesis proof (cases "load_packed sx ((mem s)!j) (nat_of_int c) off (tp_length tp) (t_length t)") case None show ?thesis using reduce.intros(15)[OF mem_some _ None, of vs] tp_some load(2) by fastforce next case (Some a) show ?thesis using reduce.intros(14)[OF mem_some _ Some, of vs] tp_some load(2) by fastforce qed qed then show ?case using c_def progress_L0 by fastforce next case (store \ n a tp t off) obtain cs' v where cs_def:"\\\ \ [cs'] : ([] _> [T_i32])" "\\\ \ [$ C v] : ([] _> [t])" "cs = [cs',$ C v]" using const_list_split_2[OF store(6,3)] e_type_const_unwrap unfolding const_list_def by fastforce have t_def:"typeof v = t" using cs_def(2) b_e_type_value[OF unlift_b_e[of \ \ "[C v]" "([] _> [t])"]] by fastforce obtain j where mem_some:"smem_ind s i = Some j" using store(1,10) unfolding smem_ind_def by fastforce obtain c where c_def:"cs' = $C ConstInt32 c" using const_of_i32[OF _ cs_def(1)] cs_def(3) store(6) unfolding const_list_def by fastforce have "\a' s' vs' es'. \s;vs;[$C ConstInt32 c, $C v, $Store t tp a off]\ \_i \s';vs';es'\" proof (cases tp) case None note tp_none = None show ?thesis proof (cases "store (s.mem s ! j) (nat_of_int c) off (bits v) (t_length t)") case None show ?thesis using reduce.intros(17)[OF _ mem_some _ None, of vs] t_def tp_none store(2) unfolding types_agree_def by fastforce next case (Some a) show ?thesis using reduce.intros(16)[OF _ mem_some _ Some, of vs] t_def tp_none store(2) unfolding types_agree_def by fastforce qed next case (Some a) note tp_some = Some show ?thesis proof (cases "store_packed (s.mem s ! j) (nat_of_int c) off (bits v) (tp_length a)") case None show ?thesis using reduce.intros(19)[OF _ mem_some _ None, of t vs] t_def tp_some store(2) unfolding types_agree_def by fastforce next case (Some a) show ?thesis using reduce.intros(18)[OF _ mem_some _ Some, of t vs] t_def tp_some store(2) unfolding types_agree_def by fastforce qed qed then show ?case using c_def cs_def progress_L0 by fastforce next case (current_memory \ n) obtain j where mem_some:"smem_ind s i = Some j" using current_memory(1,9) unfolding smem_ind_def by fastforce thus ?case using progress_L0[OF reduce.intros(20)[OF mem_some] current_memory(5), of _ _ vs "[]"] by fastforce next case (grow_memory \ n) obtain c where c_def:"cs = [$C ConstInt32 c]" using const_of_i32 grow_memory(2,5) by fastforce obtain j where mem_some:"smem_ind s i = Some j" using grow_memory(1,9) unfolding smem_ind_def by fastforce show ?case using reduce.intros(22)[OF mem_some, of _] c_def by fastforce next case (empty \) thus ?case unfolding const_list_def by simp next case (composition \ es t1s t2s e t3s) consider (1) "\ const_list ($* es)" | (2) "const_list ($* es)" "\ const_list ($*[e])" using composition(9) unfolding const_list_def by fastforce thus ?case proof (cases) case 1 have "(\lholed. \ Lfilled 0 lholed [$Return] (cs @ ($* es)))" "(\i lholed. \ Lfilled 0 lholed [$Br i] (cs @ ($* es)))" proof safe fix lholed assume "Lfilled 0 lholed [$Return] (cs @ ($* es))" hence "\lholed'. Lfilled 0 lholed' [$Return] (cs @ ($* es @ [e]))" proof (cases rule: Lfilled.cases) case (L0 vs es') thus ?thesis using Lfilled.intros(1)[of "vs" _ "es'@ ($*[e])" "[$Return]"] by (metis append.assoc map_append) qed simp thus False using composition(6) by simp next fix i lholed assume "Lfilled 0 lholed [$Br i] (cs @ ($* es))" hence "\lholed'. Lfilled 0 lholed' [$Br i] (cs @ ($* es @ [e]))" proof (cases rule: Lfilled.cases) case (L0 vs es') thus ?thesis using Lfilled.intros(1)[of "vs" _ "es'@ ($*[e])" "[$Br i]"] by (metis append.assoc map_append) qed simp thus False using composition(7) by simp qed thus ?thesis using composition(2)[OF composition(5) _ _ composition(8) 1 composition(10,11,12)] progress_L0[of s vs "(cs @ ($* es))" i _ _ _ "[]" "$*[e]"] unfolding const_list_def by fastforce next case 2 hence "const_list (cs@($* es))" using composition(8) unfolding const_list_def by simp moreover have "\\\ \ (cs@($* es)) : ([] _> t2s)" using composition(5) e_typing_s_typing.intros(1)[OF composition(1)] e_type_comp_conc by fastforce ultimately show ?thesis using composition(4)[of "(cs@($* es))"] 2(2) composition(6,7) composition(10-) by fastforce qed next case (weakening \ es t1s t2s ts) obtain cs1 cs2 where cs_def:"\\\ \ cs1 : ([] _> ts)" "\\\ \ cs2 : ([] _> t1s)" "cs = cs1 @ cs2" "const_list cs1" "const_list cs2" using e_type_const_list_cons[OF weakening(6,3)] e_type_const_list[of _ \ \ "ts" "ts @ t1s"] by fastforce have "(\lholed. \ Lfilled 0 lholed [$Return] (cs2 @ ($* es)))" "(\i lholed. \ Lfilled 0 lholed [$Br i] (cs2 @ ($* es)))" proof safe fix lholed assume "Lfilled 0 lholed [$Return] (cs2 @ ($* es))" hence "\lholed'. Lfilled 0 lholed' [$Return] (cs1 @ cs2 @ ($* es))" proof (cases rule: Lfilled.cases) case (L0 vs es') thus ?thesis using Lfilled.intros(1)[of "cs1 @ vs" _ "es'" "[$Return]"] cs_def(4) unfolding const_list_def by fastforce qed simp thus False using weakening(4) cs_def(3) by simp next fix i lholed assume "Lfilled 0 lholed [$Br i] (cs2 @ ($* es))" hence "\lholed'. Lfilled 0 lholed' [$Br i] (cs1 @ cs2 @ ($* es))" proof (cases rule: Lfilled.cases) case (L0 vs es') thus ?thesis using Lfilled.intros(1)[of "cs1 @ vs" _ "es'" "[$Br i]"] cs_def(4) unfolding const_list_def by fastforce qed simp thus False using weakening(5) cs_def(3) by simp qed hence "\a s' vs' es'. \s;vs;cs2@($*es)\ \_i \s';vs';es'\" using weakening(2)[OF cs_def(2) _ _ cs_def(5) weakening(7)] weakening(8-) by fastforce thus ?case using progress_L0[OF _ cs_def(4), of s vs "cs2 @ ($* es)" i _ _ _ "[]"] cs_def(3) by fastforce qed lemma progress_e: assumes "\\None \_i vs;cs_es : ts'" "\ k lholed. \(Lfilled k lholed [$Return] cs_es)" "\ i k lholed. (Lfilled k lholed [$Br (i)] cs_es) \ i < k" "cs_es \ [Trap]" "\ const_list (cs_es)" "store_typing s \" shows "\a s' vs' es'. \s;vs;cs_es\ \_i \s';vs';es'\" proof - fix \ cs es ts_c have prems1: "\\\ \ es : (ts_c _> ts') \ \\\ \ cs_es : ([] _> ts') \ cs_es = cs@es \ const_list cs \ \\\ \ cs : ([] _> ts_c) \ (\ k lholed. \(Lfilled k lholed [$Return] cs_es)) \ (\ i k lholed. (Lfilled k lholed [$Br (i)] cs_es) \ i < k) \ cs_es \ [Trap] \ \ const_list (cs_es) \ store_typing s \ \ i < length (s_inst \) \ length (local \) = length (vs) \ Option.is_none (memory \) = Option.is_none (inst.mem ((inst s)!i)) \ \a s' vs' cs_es'. \s;vs;cs_es\ \_i \s';vs';cs_es'\" and prems2: "\\None \_i vs;cs_es : ts' \ (\ k lholed. \(Lfilled k lholed [$Return] cs_es)) \ (\ i k lholed. (Lfilled k lholed [$Br (i)] cs_es) \ i < k) \ cs_es \ [Trap] \ \ const_list (cs_es) \ store_typing s \ \ \a s' vs' cs_es'. \s;vs;cs_es\ \_i \s';vs';cs_es'\" proof (induction arbitrary: vs ts_c ts' i cs_es cs rule: e_typing_s_typing.inducts) case (1 \ b_es tf \) hence "\ \ b_es : (ts_c _> ts')" using e_type_comp_conc1[of \ \ cs "($* b_es)" "[]" "ts'"] unlift_b_e by (metis e_type_const_conv_vs typing_map_typeof) then show ?case using progress_b_e[OF _ 1(5) _ _ 1(4)] 1(3,4,9) list_all_append 1 unfolding const_list_def by fastforce next case (2 \ \ es t1s t2s e t3s) show ?case proof (cases "const_list es") case True hence "const_list (cs@es)" using 2(7) unfolding const_list_def by simp moreover have "\ts''. (\\\ \ (cs @ es) : ([] _> ts''))" using 2(5,6) by (metis append.assoc e_type_comp_conc1) ultimately show ?thesis using 2(4)[OF 2(5) _ _ _ 2(9,10,11,12,13,14,15), of "(cs@es)"] 2(6,16) by fastforce next case False hence "\const_list (cs@es)" unfolding const_list_def by simp moreover have "\ts''. (\\\ \ (cs @ es) : ([] _> ts''))" using 2(5,6) by (metis append.assoc e_type_comp_conc1) moreover have "\k lholed. \ Lfilled k lholed [$Return] (cs @ es)" proof - { assume "\k lholed. Lfilled k lholed [$Return] (cs @ es)" then obtain k lholed where local_assms:"Lfilled k lholed [$Return] (cs @ es)" by blast hence "\lholed'. Lfilled k lholed' [$Return] (cs @ es @ [e])" proof (cases rule: Lfilled.cases) case (L0 vs es') obtain lholed' where "lholed' = LBase vs (es'@[e])" by blast thus ?thesis using L0 by (metis Lfilled.intros(1) append.assoc) next case (LN vs ts es' l es'' k lfilledk) obtain lholed' where "lholed' = LRec vs ts es' l (es''@[e])" by blast thus ?thesis using LN by (metis Lfilled.intros(2) append.assoc) qed hence False using 2(6,9) by blast } thus "\k lholed. \ Lfilled k lholed [$Return] (cs @ es)" by blast qed moreover have "\i k lholed. Lfilled k lholed [$Br i] (cs @ es) \ i < k" proof - { assume "\i k lholed. Lfilled k lholed [$Br i] (cs @ es) \ \(i < k)" then obtain i k lholed where local_assms:"Lfilled k lholed [$Br i] (cs @ es)" "\(i < k)" by blast hence "\lholed'. Lfilled k lholed' [$Br i] (cs @ es @ [e]) \ \(i < k)" proof (cases rule: Lfilled.cases) case (L0 vs es') obtain lholed' where "lholed' = LBase vs (es'@[e])" by blast thus ?thesis using L0 local_assms(2) by (metis Lfilled.intros(1) append.assoc) next case (LN vs ts es' l es'' k lfilledk) obtain lholed' where "lholed' = LRec vs ts es' l (es''@[e])" by blast thus ?thesis using LN local_assms(2) by (metis Lfilled.intros(2) append.assoc) qed hence False using 2(6,10) by blast } thus "\i k lholed. Lfilled k lholed [$Br i] (cs @ es) \ i < k" by blast qed moreover note preds = calculation show ?thesis proof (cases "cs @ es = [Trap]") case True thus ?thesis using reduce_simple.trap[of _ "(LBase [] [e])"] Lfilled.intros(1)[of "[]" "LBase [] [e]" "[e]" "cs @ es"] reduce.intros(1) 2(6,11) unfolding const_list_def by (metis append.assoc append_Nil list.pred_inject(1)) next case False thus ?thesis using 2(3)[OF _ _ 2(7,8) _ _ _ _ 2(13,14,15)] preds 2(6,16) progress_L0[of s vs "(cs @ es)" _ _ _ _ "[]" "[e]"] unfolding const_list_def by (metis append.assoc append_Nil list.pred_inject(1)) qed qed next case (3 \ \ es t1s t2s ts) thus ?case by fastforce next case (4 \ \) have cs_es_def:"Lfilled 0 (LBase cs []) [Trap] cs_es" using Lfilled.intros(1)[OF 4(3), of _ "[]" "[Trap]"] 4(2) by fastforce thus ?case using reduce_simple.trap[OF 4(7) cs_es_def] reduce.intros(1) by blast next case (5 \ ts j vls es n \) consider (1) "(\k lholed. \ Lfilled k lholed [$Return] es)" "(\k lholed i. (Lfilled k lholed [$Br i] es) \ i < k)" "es \ [Trap]" "\ const_list es" | (2) "\k lholed. Lfilled k lholed [$Return] es" | (3) "const_list es \ (es = [Trap])" | (4) "\k lholed i. (Lfilled k lholed [$Br i] es) \ i \ k" using not_le_imp_less by blast thus ?case proof (cases) case 1 obtain s' vs'' a where temp1:"\s;vls;es\ \_ j \s';vs'';a\" using 5(3)[OF 1(1) _ 1(3,4) 5(12)] 1(2) by fastforce show ?thesis using reduce.intros(24)[OF temp1, of vs] progress_L0[where ?cs = cs, OF _ 5(6)] 5(5) by fastforce next case 2 then obtain k lholed where local_assms:"(Lfilled k lholed [$Return] es)" by blast then obtain lholed' vs' \' where lholed'_def:"(Lfilled k lholed' (vs'@[$Return]) es)" "\\\' \ vs' : ([] _> ts)" "const_list vs'" using progress_LN_return[OF local_assms, of \ _ ts ts] s_type_unfold[OF 5(1)] by fastforce hence temp1:"\a. \[Local n j vls es]\ \ \vs'\" using reduce_simple.return[OF lholed'_def(3)] e_type_const_list[OF lholed'_def(3,2)] 5(2) by fastforce show ?thesis using temp1 progress_L0[OF reduce.intros(1) 5(6)] 5(5) by fastforce next case 3 then consider (1) "const_list es" | (2) "es = [Trap]" by blast hence temp1:"\a. \s;vs;[Local n j vls es]\ \_ i \s;vs;es\" proof (cases) case 1 have "length es = length ts" using s_type_unfold[OF 5(1)] e_type_const_list[OF 1] by fastforce thus ?thesis using reduce_simple.local_const[OF 1] reduce.intros(1) 5(2) by fastforce next case 2 thus ?thesis using reduce_simple.local_trap reduce.intros(1) by fastforce qed thus ?thesis using progress_L0[where ?cs = cs, OF _ 5(6)] 5(5) by fastforce next case 4 then obtain k' lholed' i' where temp1:"Lfilled k' lholed' [$Br (k'+i')] es" using le_Suc_ex by blast obtain \' where c_def:"\' = ((s_inst \)!j)\local := (local ((s_inst \)!j)) @ (map typeof vls), return := Some ts\" by blast hence es_def:"\\\' \ es : ([] _> ts)" "j < length (s_inst \)" using 5(1) s_type_unfold by fastforce+ hence "length (label \') = 0" using c_def store_local_label_empty 5(12) by fastforce thus ?thesis using progress_LN1[OF temp1 es_def(1)] by linarith qed next case (6 \ cl tf \) obtain ts'' where ts''_def:"\\\ \ cs : ([] _> ts'')" "\\\ \ [Callcl cl] : (ts'' _> ts')" using 6(2,3) e_type_comp_conc1 by fastforce obtain ts_c t1s t2s where cl_def:"(ts'' = ts_c @ t1s)" "(ts' = ts_c @ t2s)" "cl_type cl = (t1s _> t2s)" using e_type_callcl[OF ts''_def(2)] by fastforce obtain vs1 vs2 where vs_def:"\\\ \ vs1 : ([] _> ts_c)" "\\\ \ vs2 : (ts_c _> ts_c @ t1s)" "cs = vs1 @ vs2" "const_list vs1" "const_list vs2" using e_type_const_list_cons[OF 6(4)] ts''_def(1) cl_def(1) by fastforce have l:"(length vs2) = (length t1s)" using e_type_const_list vs_def(2,5) by fastforce show ?case proof (cases cl) case (Func_native x11 x12 x13 x14) hence func_native_def:"cl = Func_native x11 (t1s _> t2s) x13 x14" using cl_def(3) unfolding cl_type_def by simp have "\a a'. \s;vs;vs2 @ [Callcl cl]\ \_ i \s;vs;a\" using reduce.intros(5)[OF func_native_def] e_type_const_conv_vs[OF vs_def(5)] l unfolding n_zeros_def by fastforce thus ?thesis using progress_L0 vs_def(3,4) 6(3) by fastforce next case (Func_host x21 x22) hence func_host_def:"cl = Func_host (t1s _> t2s) x22" using cl_def(3) unfolding cl_type_def by simp obtain vcs where vcs_def:"vs2 = $$* vcs" using e_type_const_conv_vs[OF vs_def(5)] by blast fix hs have "\s' a a'. \s;vs;vs2 @ [Callcl cl]\ \_ i \s';vs;a\" proof (cases "host_apply s (t1s _> t2s) x22 vcs hs") case None thus ?thesis using reduce.intros(7)[OF func_host_def] l vcs_def by fastforce next case (Some a) then obtain s' vcs' where ha_def:"host_apply s (t1s _> t2s) x22 vcs hs = Some (s', vcs')" by (metis surj_pair) have "list_all2 types_agree t1s vcs" using e_typing_imp_list_types_agree vs_def(2,4) vcs_def by simp thus ?thesis using reduce.intros(6)[OF func_host_def _ _ _ _ ha_def] l vcs_def host_apply_respect_type[OF _ ha_def] by fastforce qed thus ?thesis using vs_def(3,4) 6(3) progress_L0 by fastforce qed next case (7 \ \ e0s ts t2s es n) consider (1) "(\k lholed. \ Lfilled k lholed [$Return] es)" "(\k lholed i. (Lfilled k lholed [$Br i] es) \ i < k)" "es \ [Trap]" "\ const_list es" | (2) "\k lholed. Lfilled k lholed [$Return] es" | (3) "const_list es \ (es = [Trap])" | (4) "\k lholed i. (Lfilled k lholed [$Br i] es) \ i = k" | (5) "\k lholed i. (Lfilled k lholed [$Br i] es) \ i > k" using linorder_neqE_nat by blast thus ?case proof (cases) case 1 have temp1:"es = [] @ es" "const_list []" unfolding const_list_def by auto have temp2:"\\\\label := [ts] @ label \\ \ [] : ([] _> [])" using b_e_typing.empty e_typing_s_typing.intros(1) by fastforce have "\s' vs' a. \s;vs;es\ \_ i \s';vs';a\" using 7(5)[OF 7(2), of "[]" "[]", OF temp1 temp2 1(1) _ 1(3,4) 7(14,15)] 1(2) 7(16,17) unfolding const_list_def by fastforce then obtain s' vs' a where red_def:"\s;vs;es\ \_ i \s';vs';a\" by blast have temp4:"\es. Lfilled 0 (LBase [] []) es es" using Lfilled.intros(1)[of "[]" "(LBase [] [])" "[]"] unfolding const_list_def by fastforce hence temp5:"Lfilled 1 (LRec cs n e0s (LBase [] []) []) es (cs@[Label n e0s es])" using Lfilled.intros(2)[of cs "(LRec cs n e0s (LBase [] []) [])" n e0s "(LBase [] [])" "[]" 0 es es] 7(8) unfolding const_list_def by fastforce have temp6:"Lfilled 1 (LRec cs n e0s (LBase [] []) []) a (cs@[Label n e0s a])" using temp4 Lfilled.intros(2)[of cs "(LRec cs n e0s (LBase [] []) [])" n e0s "(LBase [] [])" "[]" 0 a a] 7(8) unfolding const_list_def by fastforce show ?thesis using reduce.intros(23)[OF _ temp5 temp6] 7(7) red_def by fastforce next case 2 then obtain k lholed where "(Lfilled k lholed [$Return] es)" by blast hence "(Lfilled (k+1) (LRec cs n e0s lholed []) [$Return] (cs@[Label n e0s es]))" using Lfilled.intros(2) 7(8) by fastforce thus ?thesis using 7(10)[of "k+1"] 7(7) by fastforce next case 3 hence temp1:"\a. \s;vs;[Label n e0s es]\ \_ i \s;vs;es\" using reduce_simple.label_const reduce_simple.label_trap reduce.intros(1) by fastforce show ?thesis using progress_L0[OF _ 7(8)] 7(7) temp1 by fastforce next case 4 then obtain k lholed where lholed_def:"(Lfilled k lholed [$Br (k+0)] es)" by fastforce then obtain lholed' vs' \' where lholed'_def:"(Lfilled k lholed' (vs'@[$Br (k)]) es)" "\\\' \ vs' : ([] _> ts)" "const_list vs'" using progress_LN[OF lholed_def 7(2), of ts] by fastforce have "\es' a. \[Label n e0s es]\ \ \vs'@e0s\" using reduce_simple.br[OF lholed'_def(3) _ lholed'_def(1)] 7(3) e_type_const_list[OF lholed'_def(3,2)] by fastforce hence "\es' a. \s;vs;[Label n e0s es]\ \_ i \s;vs;es'\" using reduce.intros(1) by fastforce thus ?thesis using progress_L0 7(7,8) by fastforce next case 5 then obtain i k lholed where lholed_def:"(Lfilled k lholed [$Br i] es)" "i > k" using less_imp_add_positive by blast have k1_def:"Lfilled (k+1) (LRec cs n e0s lholed []) [$Br i] cs_es" using 7(7) Lfilled.intros(2)[OF 7(8) _ lholed_def(1), of _ n e0s "[]"] by fastforce thus ?thesis using 7(11)[OF k1_def] lholed_def(2) by simp qed next case (8 i \ tvs vs \ rs es ts) have "length (local \) = length vs" using 8(2,3) store_local_label_empty[OF 8(1,11)] by fastforce moreover have "Option.is_none (memory \) = Option.is_none (inst.mem ((inst s)!i))" using store_mem_exists[OF 8(1,11)] 8(3) by simp ultimately show ?case using 8(6)[OF 8(4) _ _ _ 8(7,8,9,10,11,1)] e_typing_s_typing.intros(1)[OF b_e_typing.empty[of \]] unfolding const_list_def by fastforce qed show ?thesis using prems2[OF assms] by fastforce qed lemma progress_e1: assumes "\\None \_i vs;es : ts" shows "\(Lfilled k lholed [$Return] es)" proof - { assume "\k lholed. (Lfilled k lholed [$Return] es)" then obtain k lholed where local_assms:"(Lfilled k lholed [$Return] es)" by blast obtain \ where c_def:"i < length (s_inst \)" "\ = ((s_inst \)!i)\local := (local ((s_inst \)!i)) @ (map typeof vs), return := None\" "(\\\ \ es : ([] _> ts))" using assms s_type_unfold by fastforce have "\rs. return \ = Some rs" using local_assms c_def(3) proof (induction "[$Return]" es arbitrary: \ ts rule: Lfilled.induct) case (L0 vs lholed es') thus ?case using e_type_comp_conc2[OF L0(3)] unlift_b_e[of \ \ "[Return]"] b_e_type_return by fastforce next case (LN vs lholed tls es' l es'' k lfilledk) thus ?case using e_type_comp_conc2[OF LN(5)] e_type_label[of \ \ tls es' lfilledk] by fastforce qed hence False using c_def(2) by fastforce } thus "\ k lholed. \(Lfilled k lholed [$Return] es)" by blast qed lemma progress_e2: assumes "\\None \_i vs;es : ts" "store_typing s \" shows "(Lfilled k lholed [$Br (j)] es) \ j < k" proof - { assume "(\i k lholed. (Lfilled k lholed [$Br (i)] es) \ i \ k)" then obtain j k lholed where local_assms:"(Lfilled k lholed [$Br (k+j)] es)" by (metis le_iff_add) obtain \ where c_def:"i < length (s_inst \)" "\ = ((s_inst \)!i)\local := (local ((s_inst \)!i)) @ (map typeof vs), return := None\" "(\\\ \ es : ([] _> ts))" using assms s_type_unfold by fastforce have "j < length (label \)" using progress_LN1[OF local_assms c_def(3)] by - hence False using store_local_label_empty(1)[OF c_def(1) assms(2)] c_def(2) by fastforce } thus "(\ j k lholed. (Lfilled k lholed [$Br (j)] es) \ j < k)" by fastforce qed lemma progress_e3: assumes "\\None \_i vs;cs_es : ts'" "cs_es \ [Trap]" "\ const_list (cs_es)" "store_typing s \" shows "\a s' vs' es'. \s;vs;cs_es\ \_i \s';vs';es'\" using assms progress_e progress_e1 progress_e2 by fastforce end \ No newline at end of file