diff --git a/thys/Affine_Arithmetic/Affine_Form.thy b/thys/Affine_Arithmetic/Affine_Form.thy --- a/thys/Affine_Arithmetic/Affine_Form.thy +++ b/thys/Affine_Arithmetic/Affine_Form.thy @@ -1,1989 +1,1989 @@ section \Affine Form\ theory Affine_Form imports "HOL-Analysis.Multivariate_Analysis" - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" Affine_Arithmetic_Auxiliarities Executable_Euclidean_Space begin subsection \Auxiliary developments\ lemma sum_list_mono: fixes xs ys::"'a::ordered_ab_group_add list" shows "length xs = length ys \ (\x y. (x, y) \ set (zip xs ys) \ x \ y) \ sum_list xs \ sum_list ys" by (induct xs ys rule: list_induct2) (auto simp: algebra_simps intro: add_mono) lemma fixes xs::"'a::ordered_comm_monoid_add list" shows sum_list_nonneg: "(\x. x \ set xs \ x \ 0) \ sum_list xs \ 0" by (induct xs) (auto intro!: add_nonneg_nonneg) lemma map_filter: "map f (filter (\x. P (f x)) xs) = filter P (map f xs)" by (induct xs) simp_all lemma map_of_zip_upto2_length_eq_nth: assumes "distinct B" assumes "i < length B" shows "(map_of (zip B [0.. (i, a) \ set xs \ (i, b) \ set xs \ a = b" by (metis (lifting) map_of_is_SomeI option.inject) lemma length_filter_snd_zip: "length ys = length xs \ length (filter (p \ snd) (zip ys xs)) = length (filter p xs)" by (induct ys xs rule: list_induct2) (auto ) lemma filter_snd_nth: "length ys = length xs \ n < length (filter p xs) \ snd (filter (p \ snd) (zip ys xs) ! n) = filter p xs ! n" by (induct ys xs arbitrary: n rule: list_induct2) (auto simp: o_def nth_Cons split: nat.split) lemma distinct_map_snd_fst_eqD: "distinct (map snd xs) \ (i, a) \ set xs \ (j, a) \ set xs \ i = j" by (metis Pair_inject inj_on_contraD snd_conv distinct_map) lemma map_of_mapk_inj_on_SomeI: "inj_on f (fst ` (set t)) \ map_of t k = Some x \ map_of (map (case_prod (\k. Pair (f k))) t) (f k) = Some x" by (induct t) (auto simp add: inj_on_def dest!: map_of_SomeD split: if_split_asm) lemma map_abs_nonneg[simp]: fixes xs::"'a::ordered_ab_group_add_abs list" shows "list_all (\x. x \ 0) xs \ map abs xs = xs" by (induct xs) auto lemma the_inv_into_image_eq: "inj_on f A \ Y \ f ` A \ the_inv_into A f ` Y = f -` Y \ A" using f_the_inv_into_f the_inv_into_f_f[where f = f and A = A] by force lemma image_fst_zip: "length ys = length xs \ fst ` set (zip ys xs) = set ys" by (metis dom_map_of_conv_image_fst dom_map_of_zip) lemma inj_on_fst_set_zip_distinct[simp]: "distinct xs \ length xs = length ys \ inj_on fst (set (zip xs ys))" by (force simp add: in_set_zip distinct_conv_nth intro!: inj_onI) lemma mem_greaterThanLessThan_absI: fixes x::real assumes "abs x < 1" shows "x \ {-1 <..< 1}" using assms by (auto simp: abs_real_def split: if_split_asm) lemma minus_one_less_divideI: "b > 0 \ -b < a \ -1 < a / (b::real)" by (auto simp: field_simps) lemma divide_less_oneI: "b > 0 \ b > a \ a / (b::real) < 1" by (auto simp: field_simps) lemma closed_segment_real: fixes a b::real shows "closed_segment a b = (if a \ b then {a .. b} else {b .. a})" (is "_ = ?if") proof safe fix x assume "x \ closed_segment a b" from segment_bound[OF this] show "x \ ?if" by (auto simp: abs_real_def split: if_split_asm) next fix x assume "x \ ?if" thus "x \ closed_segment a b" by (auto simp: closed_segment_def intro!: exI[where x="(x - a)/(b - a)"] simp: divide_simps algebra_simps) qed subsection \Partial Deviations\ typedef (overloaded) 'a pdevs = "{x::nat \ 'a::zero. finite {i. x i \ 0}}" \ \TODO: unify with polynomials\ morphisms pdevs_apply Abs_pdev by (auto intro!: exI[where x="\x. 0"]) setup_lifting type_definition_pdevs lemma pdevs_eqI: "(\i. pdevs_apply x i = pdevs_apply y i) \ x = y" by transfer auto definition pdevs_val :: "(nat \ real) \ 'a::real_normed_vector pdevs \ 'a" where "pdevs_val e x = (\i. e i *\<^sub>R pdevs_apply x i)" definition valuate:: "((nat \ real) \ 'a) \ 'a set" where "valuate x = x ` (UNIV \ {-1 .. 1})" lemma valuate_ex: "x \ valuate f \ (\e. (\i. e i \ {-1 .. 1}) \ x = f e)" unfolding valuate_def by (auto simp add: valuate_def Pi_iff) blast instantiation pdevs :: (equal) equal begin definition equal_pdevs::"'a pdevs \ 'a pdevs \ bool" where "equal_pdevs a b \ a = b" instance proof qed (simp add: equal_pdevs_def) end subsection \Affine Forms\ text \The data structure of affine forms represents particular sets, zonotopes\ type_synonym 'a aform = "'a \ 'a pdevs" subsection \Evaluation, Range, Joint Range\ definition aform_val :: "(nat \ real) \ 'a::real_normed_vector aform \ 'a" where "aform_val e X = fst X + pdevs_val e (snd X)" definition Affine :: "'a::real_normed_vector aform \ 'a set" where "Affine X = valuate (\e. aform_val e X)" definition Joints :: "'a::real_normed_vector aform list \ 'a list set" where "Joints XS = valuate (\e. map (aform_val e) XS)" lemma Joints_nthE: assumes "zs \ Joints ZS" obtains e where "\i. i < length zs \ zs ! i = aform_val e (ZS ! i)" "\i. e i \ {-1..1}" using assms by atomize_elim (auto simp: Joints_def Pi_iff valuate_ex) lemma Joints_mapE: assumes "ys \ Joints YS" obtains e where "ys = map (\x. aform_val e x) YS" "\i. e i \ {-1 .. 1}" using assms by (force simp: Joints_def valuate_def) lemma zipped_subset_mapped_Elem: assumes "xs = map (aform_val e) XS" assumes e: "\i. e i \ {-1 .. 1}" assumes [simp]: "length xs = length XS" assumes [simp]: "length ys = length YS" assumes "set (zip ys YS) \ set (zip xs XS)" shows "ys = map (aform_val e) YS" proof - from assms have ys: "\i. i < length xs \ xs ! i = aform_val e (XS ! i)" by auto from assms have set_eq: "{(ys ! i, YS ! i) |i. i < length ys \ i < length YS} \ {(xs ! i, XS ! i) |i. i < length xs \ i < length XS}" using assms(2) by (auto simp: set_zip) hence "\ij YS ! i = XS ! j" by auto then obtain j where j: "\i. i < length YS \ ys ! i = xs ! (j i)" "\i. i < length YS \ YS ! i = XS ! (j i)" "\i. i < length YS \ j i < length XS" by metis show ?thesis using assms by (auto simp: Joints_def j ys intro!: exI[where x=e] nth_equalityI) qed lemma Joints_set_zip_subset: assumes "xs \ Joints XS" assumes "length xs = length XS" assumes "length ys = length YS" assumes "set (zip ys YS) \ set (zip xs XS)" shows "ys \ Joints YS" proof - from Joints_mapE assms obtain e where ys: "xs = map (\x. aform_val e x) XS" and e: "\i. e i \ {-1 .. 1}" by blast show "ys \ Joints YS" using e zipped_subset_mapped_Elem[OF ys e assms(2-4)] by (auto simp: Joints_def valuate_def intro!: exI[where x=e]) qed lemma Joints_set_zip: assumes "ys \ Joints YS" assumes "length xs = length XS" assumes "length YS = length XS" assumes sets_eq: "set (zip xs XS) = set (zip ys YS)" shows "xs \ Joints XS" proof - from assms have "length ys = length YS" by (auto simp: Joints_def valuate_def) from assms(1) this assms(2) show ?thesis by (rule Joints_set_zip_subset) (simp add: assms) qed definition Joints2 :: "'a::real_normed_vector aform list \'b::real_normed_vector aform \ ('a list \ 'b) set" where "Joints2 XS Y = valuate (\e. (map (aform_val e) XS, aform_val e Y))" lemma Joints2E: assumes "zs_y \ Joints2 ZS Y" obtains e where "\i. i < length (fst zs_y) \ (fst zs_y) ! i = aform_val e (ZS ! i)" "snd (zs_y) = aform_val e Y" "\i. e i \ {-1..1}" using assms by atomize_elim (auto simp: Joints2_def Pi_iff valuate_ex) lemma nth_in_AffineI: assumes "xs \ Joints XS" assumes "i < length XS" shows "xs ! i \ Affine (XS ! i)" using assms by (force simp: Affine_def Joints_def valuate_def) lemma Cons_nth_in_Joints1: assumes "xs \ Joints XS" assumes "i < length XS" shows "((xs ! i) # xs) \ Joints ((XS ! i) # XS)" using assms by (force simp: Joints_def valuate_def) lemma Cons_nth_in_Joints2: assumes "xs \ Joints XS" assumes "i < length XS" assumes "j < length XS" shows "((xs ! i) #(xs ! j) # xs) \ Joints ((XS ! i)#(XS ! j) # XS)" using assms by (force simp: Joints_def valuate_def) lemma Joints_swap: "x#y#xs\Joints (X#Y#XS) \ y#x#xs \ Joints (Y#X#XS)" by (force simp: Joints_def valuate_def) lemma Joints_swap_Cons_append: "length xs = length XS \ x#ys@xs\Joints (X#YS@XS) \ ys@x#xs \ Joints (YS@X#XS)" by (auto simp: Joints_def valuate_def) lemma Joints_ConsD: "x#xs\Joints (X#XS) \ xs \ Joints XS" by (force simp: Joints_def valuate_def) lemma Joints_appendD1: "ys@xs\Joints (YS@XS) \ length xs = length XS \ xs \ Joints XS" by (force simp: Joints_def valuate_def) lemma Joints_appendD2: "ys@xs\Joints (YS@XS) \ length ys = length YS \ ys \ Joints YS" by (force simp: Joints_def valuate_def) lemma Joints_imp_length_eq: "xs \ Joints XS \ length xs = length XS" by (auto simp: Joints_def valuate_def) lemma Joints_rotate[simp]: "xs@[x] \ Joints (XS @[X]) \ x#xs \ Joints (X#XS)" by (auto simp: Joints_def valuate_def) subsection \Domain\ definition "pdevs_domain x = {i. pdevs_apply x i \ 0}" lemma finite_pdevs_domain[intro, simp]: "finite (pdevs_domain x)" unfolding pdevs_domain_def by transfer lemma in_pdevs_domain[simp]: "i \ pdevs_domain x \ pdevs_apply x i \ 0" by (auto simp: pdevs_domain_def) subsection \Least Fresh Index\ definition degree::"'a::real_vector pdevs \ nat" where "degree x = (LEAST i. \j\i. pdevs_apply x j = 0)" lemma degree[rule_format, intro, simp]: shows "\j\degree x. pdevs_apply x j = 0" unfolding degree_def proof (rule LeastI_ex) have "\j. j > Max (pdevs_domain x) \ j \ (pdevs_domain x)" by (metis Max_less_iff all_not_in_conv less_irrefl_nat finite_pdevs_domain) then show "\xa. \j\xa. pdevs_apply x j = 0" by (auto intro!: exI[where x="Max (pdevs_domain x) + 1"]) qed lemma degree_le: assumes d: "\j \ d. pdevs_apply x j = 0" shows "degree x \ d" unfolding degree_def by (rule Least_le) (rule d) lemma degree_gt: "pdevs_apply x j \ 0 \ degree x > j" by auto lemma pdevs_val_pdevs_domain: "pdevs_val e X = (\i\pdevs_domain X. e i *\<^sub>R pdevs_apply X i)" by (auto simp: pdevs_val_def intro!: suminf_finite) lemma pdevs_val_sum_le: "degree X \ d \ pdevs_val e X = (\i < d. e i *\<^sub>R pdevs_apply X i)" by (force intro!: degree_gt sum.mono_neutral_cong_left simp: pdevs_val_pdevs_domain) lemmas pdevs_val_sum = pdevs_val_sum_le[OF order_refl] lemma pdevs_val_zero[simp]: "pdevs_val (\_. 0) x = 0" by (auto simp: pdevs_val_sum) lemma degree_eqI: assumes "pdevs_apply x d \ 0" assumes "\j. j > d \ pdevs_apply x j = 0" shows "degree x = Suc d" unfolding eq_iff by (auto intro!: degree_gt degree_le assms simp: Suc_le_eq) lemma finite_degree_nonzero[intro, simp]: "finite {i. pdevs_apply x i \ 0}" by transfer (auto simp: vimage_def Collect_neg_eq) lemma degree_eq_Suc_max: "degree x = (if (\i. pdevs_apply x i = 0) then 0 else Suc (Max {i. pdevs_apply x i \ 0}))" proof - { assume "\i. pdevs_apply x i = 0" hence ?thesis by auto (metis degree_le le_0_eq) } moreover { fix i assume "pdevs_apply x i \ 0" hence ?thesis using Max_in[OF finite_degree_nonzero, of x] by (auto intro!: degree_eqI) (metis Max.coboundedI[OF finite_degree_nonzero] in_pdevs_domain le_eq_less_or_eq less_asym pdevs_domain_def) } ultimately show ?thesis by blast qed lemma pdevs_val_degree_cong: assumes "b = d" assumes "\i. i < degree b \ a i = c i" shows "pdevs_val a b = pdevs_val c d" using assms by (auto simp: pdevs_val_sum) abbreviation degree_aform::"'a::real_vector aform \ nat" where "degree_aform X \ degree (snd X)" lemma degree_cong: "(\i. (pdevs_apply x i = 0) = (pdevs_apply y i = 0)) \ degree x = degree y" unfolding degree_def by auto lemma Least_True_nat[intro, simp]: "(LEAST i::nat. True) = 0" by (metis (lifting) One_nat_def less_one not_less_Least not_less_eq) lemma sorted_list_of_pdevs_domain_eq: "sorted_list_of_set (pdevs_domain X) = filter ((\) 0 o pdevs_apply X) [0..x. x", simplified]) subsection \Total Deviation\ definition tdev::"'a::ordered_euclidean_space pdevs \ 'a" where "tdev x = (\ipdevs_apply x i\)" lemma abs_pdevs_val_le_tdev: "e \ UNIV \ {-1 .. 1} \ \pdevs_val e x\ \ tdev x" by (force simp: pdevs_val_sum tdev_def abs_scaleR Pi_iff intro!: order_trans[OF sum_abs] sum_mono scaleR_left_le_one_le intro: abs_leI) subsection \Binary Pointwise Operations\ definition binop_pdevs_raw::"('a::zero \ 'b::zero \ 'c::zero) \ (nat \ 'a) \ (nat \ 'b) \ nat \ 'c" where "binop_pdevs_raw f x y i = (if x i = 0 \ y i = 0 then 0 else f (x i) (y i))" lemma nonzeros_binop_pdevs_subset: "{i. binop_pdevs_raw f x y i \ 0} \ {i. x i \ 0} \ {i. y i \ 0}" by (auto simp: binop_pdevs_raw_def) lift_definition binop_pdevs:: "('a \ 'b \ 'c) \ 'a::zero pdevs \ 'b::zero pdevs \ 'c::zero pdevs" is binop_pdevs_raw using nonzeros_binop_pdevs_subset by (rule finite_subset) auto lemma pdevs_apply_binop_pdevs[simp]: "pdevs_apply (binop_pdevs f x y) i = (if pdevs_apply x i = 0 \ pdevs_apply y i = 0 then 0 else f (pdevs_apply x i) (pdevs_apply y i))" by transfer (auto simp: binop_pdevs_raw_def) subsection \Addition\ definition add_pdevs::"'a::real_vector pdevs \ 'a pdevs \ 'a pdevs" where "add_pdevs = binop_pdevs (+)" lemma pdevs_apply_add_pdevs[simp]: "pdevs_apply (add_pdevs X Y) n = pdevs_apply X n + pdevs_apply Y n" by (auto simp: add_pdevs_def) lemma pdevs_val_add_pdevs[simp]: fixes x y::"'a::euclidean_space" shows "pdevs_val e (add_pdevs X Y) = pdevs_val e X + pdevs_val e Y" proof - let ?sum = "\m X. \i < m. e i *\<^sub>R pdevs_apply X i" let ?m = "max (degree X) (degree Y)" have "pdevs_val e X + pdevs_val e Y = ?sum (degree X) X + ?sum (degree Y) Y" by (simp add: pdevs_val_sum) also have "?sum (degree X) X = ?sum ?m X" by (rule sum.mono_neutral_cong_left) auto also have "?sum (degree Y) Y = ?sum ?m Y" by (rule sum.mono_neutral_cong_left) auto also have "?sum ?m X + ?sum ?m Y = (\i < ?m. e i *\<^sub>R (pdevs_apply X i + pdevs_apply Y i))" by (simp add: scaleR_right_distrib sum.distrib) also have "\ = (\i. e i *\<^sub>R (pdevs_apply X i + pdevs_apply Y i))" by (rule suminf_finite[symmetric]) auto also have "\ = pdevs_val e (add_pdevs X Y)" by (simp add: pdevs_val_def) finally show "pdevs_val e (add_pdevs X Y) = pdevs_val e X + pdevs_val e Y" by simp qed subsection \Total Deviation\ lemma tdev_eq_zero_iff: fixes X::"real pdevs" shows "tdev X = 0 \ (\e. pdevs_val e X = 0)" by (force simp add: pdevs_val_sum tdev_def sum_nonneg_eq_0_iff dest!: spec[where x="\i. if pdevs_apply X i \ 0 then 1 else -1"] split: if_split_asm) lemma tdev_nonneg[intro, simp]: "tdev X \ 0" by (auto simp: tdev_def) lemma tdev_nonpos_iff[simp]: "tdev X \ 0 \ tdev X = 0" by (auto simp: order.antisym) subsection \Unary Operations\ definition unop_pdevs_raw:: "('a::zero \ 'b::zero) \ (nat \ 'a) \ nat \ 'b" where "unop_pdevs_raw f x i = (if x i = 0 then 0 else f (x i))" lemma nonzeros_unop_pdevs_subset: "{i. unop_pdevs_raw f x i \ 0} \ {i. x i \ 0}" by (auto simp: unop_pdevs_raw_def) lift_definition unop_pdevs:: "('a \ 'b) \ 'a::zero pdevs \ 'b::zero pdevs" is unop_pdevs_raw using nonzeros_unop_pdevs_subset by (rule finite_subset) auto lemma pdevs_apply_unop_pdevs[simp]: "pdevs_apply (unop_pdevs f x) i = (if pdevs_apply x i = 0 then 0 else f (pdevs_apply x i))" by transfer (auto simp: unop_pdevs_raw_def) lemma pdevs_domain_unop_linear: assumes "linear f" shows "pdevs_domain (unop_pdevs f x) \ pdevs_domain x" proof - interpret f: linear f by fact show ?thesis by (auto simp: f.zero) qed lemma pdevs_val_unop_linear: assumes "linear f" shows "pdevs_val e (unop_pdevs f x) = f (pdevs_val e x)" proof - interpret f: linear f by fact have *: "\i. (if pdevs_apply x i = 0 then 0 else f (pdevs_apply x i)) = f (pdevs_apply x i)" by (auto simp: f.zero) have "pdevs_val e (unop_pdevs f x) = (\i\pdevs_domain (unop_pdevs f x). e i *\<^sub>R f (pdevs_apply x i))" by (auto simp add: pdevs_val_pdevs_domain *) also have "\ = (\xa\pdevs_domain x. e xa *\<^sub>R f (pdevs_apply x xa))" by (auto intro!: sum.mono_neutral_cong_left) also have "\ = f (pdevs_val e x)" by (auto simp add: pdevs_val_pdevs_domain f.sum f.scaleR) finally show ?thesis . qed subsection \Pointwise Scaling of Partial Deviations\ definition scaleR_pdevs::"real \ 'a::real_vector pdevs \ 'a pdevs" where "scaleR_pdevs r x = unop_pdevs ((*\<^sub>R) r) x" lemma pdevs_apply_scaleR_pdevs[simp]: "pdevs_apply (scaleR_pdevs x Y) n = x *\<^sub>R pdevs_apply Y n" by (auto simp: scaleR_pdevs_def) lemma degree_scaleR_pdevs[simp]: "degree (scaleR_pdevs r x) = (if r = 0 then 0 else degree x)" unfolding degree_def by auto lemma pdevs_val_scaleR_pdevs[simp]: fixes x::real and Y::"'a::real_normed_vector pdevs" shows "pdevs_val e (scaleR_pdevs x Y) = x *\<^sub>R pdevs_val e Y" by (auto simp: pdevs_val_sum scaleR_sum_right ac_simps) subsection \Partial Deviations Scale Pointwise\ definition pdevs_scaleR::"real pdevs \ 'a::real_vector \ 'a pdevs" where "pdevs_scaleR r x = unop_pdevs (\r. r *\<^sub>R x) r" lemma pdevs_apply_pdevs_scaleR[simp]: "pdevs_apply (pdevs_scaleR X y) n = pdevs_apply X n *\<^sub>R y" by (auto simp: pdevs_scaleR_def) lemma degree_pdevs_scaleR[simp]: "degree (pdevs_scaleR r x) = (if x = 0 then 0 else degree r)" unfolding degree_def by auto lemma pdevs_val_pdevs_scaleR[simp]: fixes X::"real pdevs" and y::"'a::real_normed_vector" shows "pdevs_val e (pdevs_scaleR X y) = pdevs_val e X *\<^sub>R y" by (auto simp: pdevs_val_sum scaleR_sum_left) subsection \Pointwise Unary Minus\ definition uminus_pdevs::"'a::real_vector pdevs \ 'a pdevs" where "uminus_pdevs = unop_pdevs uminus" lemma pdevs_apply_uminus_pdevs[simp]: "pdevs_apply (uminus_pdevs x) = - pdevs_apply x" by (auto simp: uminus_pdevs_def) lemma degree_uminus_pdevs[simp]: "degree (uminus_pdevs x) = degree x" by (rule degree_cong) simp lemma pdevs_val_uminus_pdevs[simp]: "pdevs_val e (uminus_pdevs x) = - pdevs_val e x" unfolding pdevs_val_sum by (auto simp: sum_negf) definition "uminus_aform X = (- fst X, uminus_pdevs (snd X))" lemma fst_uminus_aform[simp]: "fst (uminus_aform Y) = - fst Y" by (simp add: uminus_aform_def) lemma aform_val_uminus_aform[simp]: "aform_val e (uminus_aform X) = - aform_val e X" by (auto simp: uminus_aform_def aform_val_def) subsection \Constant\ lift_definition zero_pdevs::"'a::zero pdevs" is "\_. 0" by simp lemma pdevs_apply_zero_pdevs[simp]: "pdevs_apply zero_pdevs i = 0" by transfer simp lemma pdevs_val_zero_pdevs[simp]: "pdevs_val e zero_pdevs = 0" by (auto simp: pdevs_val_def) definition "num_aform f = (f, zero_pdevs)" subsection \Inner Product\ definition pdevs_inner::"'a::euclidean_space pdevs \ 'a \ real pdevs" where "pdevs_inner x b = unop_pdevs (\x. x \ b) x" lemma pdevs_apply_pdevs_inner[simp]: "pdevs_apply (pdevs_inner p a) i = pdevs_apply p i \ a" by (simp add: pdevs_inner_def) lemma pdevs_val_pdevs_inner[simp]: "pdevs_val e (pdevs_inner p a) = pdevs_val e p \ a" by (auto simp add: inner_sum_left pdevs_val_pdevs_domain intro!: sum.mono_neutral_cong_left) definition inner_aform::"'a::euclidean_space aform \ 'a \ real aform" where "inner_aform X b = (fst X \ b, pdevs_inner (snd X) b)" subsection \Inner Product Pair\ definition inner2::"'a::euclidean_space \ 'a \ 'a \ real*real" where "inner2 x n l = (x \ n, x \ l)" definition pdevs_inner2::"'a::euclidean_space pdevs \ 'a \ 'a \ (real*real) pdevs" where "pdevs_inner2 X n l = unop_pdevs (\x. inner2 x n l) X" lemma pdevs_apply_pdevs_inner2[simp]: "pdevs_apply (pdevs_inner2 p a b) i = (pdevs_apply p i \ a, pdevs_apply p i \ b)" by (simp add: pdevs_inner2_def inner2_def zero_prod_def) definition inner2_aform::"'a::euclidean_space aform \ 'a \ 'a \ (real*real) aform" where "inner2_aform X a b = (inner2 (fst X) a b, pdevs_inner2 (snd X) a b)" lemma linear_inner2[intro, simp]: "linear (\x. inner2 x n i)" by unfold_locales (auto simp: inner2_def algebra_simps) lemma aform_val_inner2_aform[simp]: "aform_val e (inner2_aform Z n i) = inner2 (aform_val e Z) n i" proof - have "aform_val e (inner2_aform Z n i) = inner2 (fst Z) n i + inner2 (pdevs_val e (snd Z)) n i" by (auto simp: aform_val_def inner2_aform_def pdevs_inner2_def pdevs_val_unop_linear) also have "\ = inner2 (aform_val e Z) n i" by (simp add: inner2_def algebra_simps aform_val_def) finally show ?thesis . qed subsection \Update\ lemma pdevs_val_upd[simp]: "pdevs_val (e(n := e')) X = pdevs_val e X - e n * pdevs_apply X n + e' * pdevs_apply X n" unfolding pdevs_val_def by (subst suminf_finite[OF finite.insertI[OF finite_degree_nonzero], of n X], auto simp: pdevs_val_def sum.insert_remove)+ lemma nonzeros_fun_upd: "{i. (f(n := a)) i \ 0} \ {i. f i \ 0} \ {n}" by (auto split: if_split_asm) lift_definition pdev_upd::"'a::real_vector pdevs \ nat \ 'a \ 'a pdevs" is "\x n a. x(n:=a)" by (rule finite_subset[OF nonzeros_fun_upd]) simp lemma pdevs_apply_pdev_upd[simp]: "pdevs_apply (pdev_upd X n x) = (pdevs_apply X)(n:=x)" by transfer simp lemma pdevs_val_pdev_upd[simp]: "pdevs_val e (pdev_upd X n x) = pdevs_val e X + e n *\<^sub>R x - e n *\<^sub>R pdevs_apply X n" unfolding pdevs_val_def by (subst suminf_finite[OF finite.insertI[OF finite_degree_nonzero], of n X], auto simp: pdevs_val_def sum.insert_remove)+ lemma degree_pdev_upd: assumes "x = 0 \ pdevs_apply X n = 0" shows "degree (pdev_upd X n x) = degree X" using assms by (auto intro!: degree_cong split: if_split_asm) lemma degree_pdev_upd_le: assumes "degree X \ n" shows "degree (pdev_upd X n x) \ Suc n" using assms by (auto intro!: degree_le) subsection \Inf/Sup\ definition "Inf_aform X = fst X - tdev (snd X)" definition "Sup_aform X = fst X + tdev (snd X)" lemma Inf_aform: assumes "e \ UNIV \ {-1 .. 1}" shows "Inf_aform X \ aform_val e X" using order_trans[OF abs_ge_minus_self abs_pdevs_val_le_tdev[OF assms]] by (auto simp: Inf_aform_def aform_val_def minus_le_iff) lemma Sup_aform: assumes "e \ UNIV \ {-1 .. 1}" shows "aform_val e X \ Sup_aform X" using order_trans[OF abs_ge_self abs_pdevs_val_le_tdev[OF assms]] by (auto simp: Sup_aform_def aform_val_def) subsection \Minkowski Sum\ definition msum_pdevs_raw::"nat\(nat \ 'a::real_vector)\(nat \ 'a)\(nat\'a)" where "msum_pdevs_raw n x y i = (if i < n then x i else y (i - n))" lemma nonzeros_msum_pdevs_raw: "{i. msum_pdevs_raw n f g i \ 0} = ({0.. {i. f i \ 0}) \ (+) n ` ({i. g i \ 0})" by (force simp: msum_pdevs_raw_def not_less split: if_split_asm) lift_definition msum_pdevs::"nat\'a::real_vector pdevs\'a pdevs\'a pdevs" is msum_pdevs_raw unfolding nonzeros_msum_pdevs_raw by simp lemma pdevs_apply_msum_pdevs: "pdevs_apply (msum_pdevs n f g) i = (if i < n then pdevs_apply f i else pdevs_apply g (i - n))" by transfer (auto simp: msum_pdevs_raw_def) lemma degree_least_nonzero: assumes "degree f \ 0" shows "pdevs_apply f (degree f - 1) \ 0" proof assume H: "pdevs_apply f (degree f - 1) = 0" { fix j assume "j\degree f - 1" with H have "pdevs_apply f j = 0" by (cases "degree f - 1 = j") auto } from degree_le[rule_format, OF this] have "degree f \ degree f - 1" by blast with assms show False by simp qed lemma degree_leI: assumes "(\i. pdevs_apply y i = 0 \ pdevs_apply x i = 0)" shows "degree x \ degree y" proof cases assume "degree x \ 0" from degree_least_nonzero[OF this] have "pdevs_apply y (degree x - 1) \ 0" by (auto simp: assms split: if_split_asm) from degree_gt[OF this] show ?thesis by simp qed simp lemma degree_msum_pdevs_ge1: shows "degree f \ n \ degree f \ degree (msum_pdevs n f g)" by (rule degree_leI) (auto simp: pdevs_apply_msum_pdevs split: if_split_asm) lemma degree_msum_pdevs_ge2: assumes "degree f \ n" shows "degree g \ degree (msum_pdevs n f g) - n" proof cases assume "degree g \ 0" hence "pdevs_apply g (degree g - 1) \ 0" by (rule degree_least_nonzero) hence "pdevs_apply (msum_pdevs n f g) (n + degree g - 1) \ 0" using assms by (auto simp: pdevs_apply_msum_pdevs) from degree_gt[OF this] show ?thesis by simp qed simp lemma degree_msum_pdevs_le: shows "degree (msum_pdevs n f g) \ n + degree g" by (auto intro!: degree_le simp: pdevs_apply_msum_pdevs) lemma sum_msum_pdevs_cases: assumes "degree f \ n" assumes [simp]: "\i. e i 0 = 0" shows "(\i i i i\{.. {i. i < n}. e i (pdevs_apply f i)) + (\i\{.. - {i. i < n}. e i (pdevs_apply g (i - n)))" (is "_ = ?sum_f + ?sum_g") by (simp add: sum.If_cases if_distrib) also have "?sum_f = (\i = 0..i\{0 + n.. = (\i = 0.. = (\i = 0.. n \ tdev (msum_pdevs n f g) = tdev f + tdev g" by (auto simp: tdev_def pdevs_apply_msum_pdevs intro!: sum_msum_pdevs_cases) lemma pdevs_val_msum_pdevs: "degree f \ n \ pdevs_val e (msum_pdevs n f g) = pdevs_val e f + pdevs_val (\i. e (i + n)) g" by (auto simp: pdevs_val_sum pdevs_apply_msum_pdevs intro!: sum_msum_pdevs_cases) definition msum_aform::"nat \ 'a::real_vector aform \ 'a aform \ 'a aform" where "msum_aform n f g = (fst f + fst g, msum_pdevs n (snd f) (snd g))" lemma fst_msum_aform[simp]: "fst (msum_aform n f g) = fst f + fst g" by (simp add: msum_aform_def) lemma snd_msum_aform[simp]: "snd (msum_aform n f g) = msum_pdevs n (snd f) (snd g)" by (simp add: msum_aform_def) lemma finite_nonzero_summable: "finite {i. f i \ 0} \ summable f" by (auto intro!: sums_summable sums_finite) lemma aform_val_msum_aform: assumes "degree_aform f \ n" shows "aform_val e (msum_aform n f g) = aform_val e f + aform_val (\i. e (i + n)) g" using assms by (auto simp: pdevs_val_msum_pdevs aform_val_def) lemma Inf_aform_msum_aform: "degree_aform X \ n \ Inf_aform (msum_aform n X Y) = Inf_aform X + Inf_aform Y" by (simp add: Inf_aform_def tdev_msum_pdevs) lemma Sup_aform_msum_aform: "degree_aform X \ n \ Sup_aform (msum_aform n X Y) = Sup_aform X + Sup_aform Y" by (simp add: Sup_aform_def tdev_msum_pdevs) definition "independent_from d Y = msum_aform d (0, zero_pdevs) Y" definition "independent_aform X Y = independent_from (degree_aform X) Y" lemma degree_zero_pdevs[simp]: "degree zero_pdevs = 0" by (metis degree_least_nonzero pdevs_apply_zero_pdevs) lemma independent_aform_Joints: assumes "x \ Affine X" assumes "y \ Affine Y" shows "[x, y] \ Joints [X, independent_aform X Y]" using assms unfolding Affine_def valuate_def Joints_def apply safe subgoal premises prems for e ea using prems by (intro image_eqI[where x="\i. if i < degree_aform X then e i else ea (i - degree_aform X)"]) (auto simp: aform_val_def pdevs_val_msum_pdevs Pi_iff independent_aform_def independent_from_def intro!: pdevs_val_degree_cong) done lemma msum_aform_Joints: assumes "d \ degree_aform X" assumes "\X. X \ set XS \ d \ degree_aform X" assumes "(x#xs) \ Joints (X#XS)" assumes "y \ Affine Y" shows "((x + y)#x#xs) \ Joints (msum_aform d X Y#X#XS)" using assms unfolding Joints_def valuate_def Affine_def proof (safe, goal_cases) case (1 e ea a b zs) then show ?case by (intro image_eqI[where x = "\i. if i < d then e i else ea (i - d)"]) (force simp: aform_val_def pdevs_val_msum_pdevs intro!: intro!: pdevs_val_degree_cong)+ qed lemma Joints_msum_aform: assumes "d \ degree_aform X" assumes "\Y. Y \ set YS \ d \ degree_aform Y" shows "Joints (msum_aform d X Y#YS) = {((x + y)#ys) |x y ys. y \ Affine Y \ x#ys \ Joints (X#YS)}" unfolding Affine_def valuate_def Joints_def proof (safe, goal_cases) case (1 x e) thus ?case using assms by (intro exI[where x = "aform_val e X"] exI[where x = "aform_val ((\i. e (i + d))) Y"]) (auto simp add: aform_val_def pdevs_val_msum_pdevs) next case (2 x xa y ys e ea) thus ?case using assms by (intro image_eqI[where x="\i. if i < d then ea i else e (i - d)"]) (force simp: aform_val_def pdevs_val_msum_pdevs Pi_iff intro!: pdevs_val_degree_cong)+ qed lemma Joints_singleton_image: "Joints [x] = (\x. [x]) ` Affine x" by (auto simp: Joints_def Affine_def valuate_def) lemma Collect_extract_image: "{g (f x y) |x y. P x y} = g ` {f x y |x y. P x y}" by auto lemma inj_Cons: "inj (\x. x#xs)" by (auto intro!: injI) lemma Joints_Nil[simp]: "Joints [] = {[]}" by (force simp: Joints_def valuate_def) lemma msum_pdevs_zero_ident[simp]: "msum_pdevs 0 zero_pdevs x = x" by transfer (auto simp: msum_pdevs_raw_def) lemma msum_aform_zero_ident[simp]: "msum_aform 0 (0, zero_pdevs) x = x" by (simp add: msum_aform_def) lemma mem_Joints_singleton: "(x \ Joints [X]) = (\y. x = [y] \ y \ Affine X)" by (auto simp: Affine_def valuate_def Joints_def) lemma singleton_mem_Joints[simp]: "[x] \ Joints [X] \ x \ Affine X" by (auto simp: mem_Joints_singleton) lemma msum_aform_Joints_without_first: assumes "d \ degree_aform X" assumes "\X. X \ set XS \ d \ degree_aform X" assumes "(x#xs) \ Joints (X#XS)" assumes "y \ Affine Y" assumes "z = x + y" shows "z#xs \ Joints (msum_aform d X Y#XS)" unfolding \z = x + y\ using msum_aform_Joints[OF assms(1-4)] by (force simp: Joints_def valuate_def) lemma Affine_msum_aform: assumes "d \ degree_aform X" shows "Affine (msum_aform d X Y) = {x + y |x y. x \ Affine X \ y \ Affine Y}" using Joints_msum_aform[OF assms, of Nil Y, simplified, unfolded mem_Joints_singleton] by (auto simp add: Joints_singleton_image Collect_extract_image[where g="\x. [x]"] inj_image_eq_iff[OF inj_Cons] ) lemma Affine_zero_pdevs[simp]: "Affine (0, zero_pdevs) = {0}" by (force simp: Affine_def valuate_def aform_val_def) lemma Affine_independent_aform: "Affine (independent_aform X Y) = Affine Y" by (auto simp: independent_aform_def independent_from_def Affine_msum_aform) lemma abs_diff_eq1: fixes l u::"'a::ordered_euclidean_space" shows "l \ u \ \u - l\ = u - l" by (metis abs_of_nonneg diff_add_cancel le_add_same_cancel2) lemma compact_sum: fixes f :: "'a \ 'b::topological_space \ 'c::real_normed_vector" assumes "finite I" assumes "\i. i \ I \ compact (S i)" assumes "\i. i \ I \ continuous_on (S i) (f i)" assumes "I \ J" shows "compact {\i\I. f i (x i) | x. x \ Pi J S}" using assms proof (induct I) case empty thus ?case proof (cases "\x. x \ Pi J S") case False hence *: "{\i\{}. f i (x i) |x. x \ Pi J S} = {}" by (auto simp: Pi_iff) show ?thesis unfolding * by simp qed auto next case (insert a I) hence "{\i\insert a I. f i (xa i) |xa. xa \ Pi J S} = {x + y |x y. x \ f a ` S a \ y \ {\i\I. f i (x i) |x. x \ Pi J S}}" proof safe fix s x assume "s \ S a" "x \ Pi J S" thus "\xa. f a s + (\i\I. f i (x i)) = (\i\insert a I. f i (xa i)) \ xa \ Pi J S" using insert by (auto intro!: exI[where x="x(a:=s)"] sum.cong) qed force also have "compact \" using insert by (intro compact_sums) (auto intro!: compact_continuous_image) finally show ?case . qed lemma compact_Affine: fixes X::"'a::ordered_euclidean_space aform" shows "compact (Affine X)" proof - have "Affine X = {x + y|x y. x \ {fst X} \ y \ {(\i \ {0..R pdevs_apply (snd X) i) | e. e \ UNIV \ {-1 .. 1}}}" by (auto simp: Affine_def valuate_def aform_val_def pdevs_val_sum atLeast0LessThan) also have "compact \" by (rule compact_sums) (auto intro!: compact_sum continuous_intros) finally show ?thesis . qed lemma Joints2_JointsI: "(xs, x) \ Joints2 XS X \ x#xs \ Joints (X#XS)" by (auto simp: Joints_def Joints2_def valuate_def) subsection \Splitting\ definition "split_aform X i = (let xi = pdevs_apply (snd X) i /\<^sub>R 2 in ((fst X - xi, pdev_upd (snd X) i xi), (fst X + xi, pdev_upd (snd X) i xi)))" lemma split_aformE: assumes "e \ UNIV \ {-1 .. 1}" assumes "x = aform_val e X" obtains err where "x = aform_val (e(i:=err)) (fst (split_aform X i))" "err \ {-1 .. 1}" | err where "x = aform_val (e(i:=err)) (snd (split_aform X i))" "err \ {-1 .. 1}" proof (atomize_elim) let ?thesis = "(\err. x = aform_val (e(i := err)) (fst (split_aform X i)) \ err \ {- 1..1}) \ (\err. x = aform_val (e(i := err)) (snd (split_aform X i)) \ err \ {- 1..1})" { assume "pdevs_apply (snd X) i = 0" hence "X = fst (split_aform X i)" by (auto simp: split_aform_def intro!: prod_eqI pdevs_eqI) with assms have ?thesis by (auto intro!: exI[where x="e i"]) } moreover { assume "pdevs_apply (snd X) i \ 0" hence [simp]: "degree_aform X > i" by (rule degree_gt) note assms(2) also have "aform_val e X = fst X + (\iR pdevs_apply (snd X) i)" by (simp add: aform_val_def pdevs_val_sum) also have rewr: "{.. {i}" by auto have "(\iR pdevs_apply (snd X) i) = (\i \ {0..R pdevs_apply (snd X) i) + e i *\<^sub>R pdevs_apply (snd X) i" by (subst rewr, subst sum.union_disjoint) auto finally have "x = fst X + \" . hence "x = aform_val (e(i:=2 * e i - 1)) (snd (split_aform X i))" "x = aform_val (e(i:=2 * e i + 1)) (fst (split_aform X i))" by (auto simp: aform_val_def split_aform_def Let_def pdevs_val_sum atLeast0LessThan Diff_eq degree_pdev_upd if_distrib sum.If_cases field_simps scaleR_left_distrib[symmetric]) moreover have "2 * e i - 1 \ {-1 .. 1} \ 2 * e i + 1 \ {-1 .. 1}" using assms by (auto simp: not_le Pi_iff dest!: spec[where x=i]) ultimately have ?thesis by blast } ultimately show ?thesis by blast qed lemma pdevs_val_add: "pdevs_val (\i. e i + f i) xs = pdevs_val e xs + pdevs_val f xs" by (auto simp: pdevs_val_pdevs_domain algebra_simps sum.distrib) lemma pdevs_val_minus: "pdevs_val (\i. e i - f i) xs = pdevs_val e xs - pdevs_val f xs" by (auto simp: pdevs_val_pdevs_domain algebra_simps sum_subtractf) lemma pdevs_val_cmul: "pdevs_val (\i. u * e i) xs = u *\<^sub>R pdevs_val e xs" by (auto simp: pdevs_val_pdevs_domain scaleR_sum_right) lemma atLeastAtMost_absI: "- a \ a \ \x::real\ \ \a\ \ x \ atLeastAtMost (- a) a" by auto lemma divide_atLeastAtMost_1_absI: "\x::real\ \ \a\ \ x/a \ {-1 .. 1}" by (intro atLeastAtMost_absI) (auto simp: divide_le_eq_1) lemma convex_scaleR_aux: "u + v = 1 \ u *\<^sub>R x + v *\<^sub>R x = (x::'a::real_vector)" by (metis scaleR_add_left scaleR_one) lemma convex_mult_aux: "u + v = 1 \ u * x + v * x = (x::real)" using convex_scaleR_aux[of u v x] by simp lemma convex_Affine: "convex (Affine X)" proof (rule convexI) fix x y::'a and u v::real assume "x \ Affine X" "y \ Affine X" and convex: "0 \ u" "0 \ v" "u + v = 1" then obtain e f where x: "x = aform_val e X" "e \ UNIV \ {-1 .. 1}" and y: "y = aform_val f X" "f \ UNIV \ {-1 .. 1}" by (auto simp: Affine_def valuate_def) let ?conv = "\i. u * e i + v * f i" { fix i have "\?conv i\ \ u * \e i\ + v * \f i\" using convex by (intro order_trans[OF abs_triangle_ineq]) (simp add: abs_mult) also have "\ \ 1" using convex x y by (intro convex_bound_le) (auto simp: Pi_iff abs_real_def) finally have "?conv i \ 1" "-1 \ ?conv i" by (auto simp: abs_real_def split: if_split_asm) } thus "u *\<^sub>R x + v *\<^sub>R y \ Affine X" using convex x y by (auto simp: Affine_def valuate_def aform_val_def pdevs_val_add pdevs_val_cmul algebra_simps convex_scaleR_aux intro!: image_eqI[where x="?conv"]) qed lemma segment_in_aform_val: assumes "e \ UNIV \ {-1 .. 1}" assumes "f \ UNIV \ {-1 .. 1}" shows "closed_segment (aform_val e X) (aform_val f X) \ Affine X" proof - have "aform_val e X \ Affine X" "aform_val f X \ Affine X" using assms by (auto simp: Affine_def valuate_def) with convex_Affine[of X, simplified convex_contains_segment] show ?thesis by simp qed subsection \From List of Generators\ lift_definition pdevs_of_list::"'a::zero list \ 'a pdevs" is "\xs i. if i < length xs then xs ! i else 0" by auto lemma pdevs_apply_pdevs_of_list: "pdevs_apply (pdevs_of_list xs) i = (if i < length xs then xs ! i else 0)" by transfer simp lemma pdevs_apply_pdevs_of_list_Nil[simp]: "pdevs_apply (pdevs_of_list []) i = 0" by transfer auto lemma pdevs_apply_pdevs_of_list_Cons: "pdevs_apply (pdevs_of_list (x # xs)) i = (if i = 0 then x else pdevs_apply (pdevs_of_list xs) (i - 1))" by transfer auto lemma pdevs_domain_pdevs_of_list_Cons[simp]: "pdevs_domain (pdevs_of_list (x # xs)) = (if x = 0 then {} else {0}) \ (+) 1 ` pdevs_domain (pdevs_of_list xs)" by (force simp: pdevs_apply_pdevs_of_list_Cons split: if_split_asm) lemma pdevs_val_pdevs_of_list_eq[simp]: "pdevs_val e (pdevs_of_list (x # xs)) = e 0 *\<^sub>R x + pdevs_val (e o (+) 1) (pdevs_of_list xs)" proof - have "pdevs_val e (pdevs_of_list (x # xs)) = (\i\pdevs_domain (pdevs_of_list (x # xs)) \ {0}. e i *\<^sub>R x) + (\i\pdevs_domain (pdevs_of_list (x # xs)) \ - {0}. e i *\<^sub>R pdevs_apply (pdevs_of_list xs) (i - Suc 0))" (is "_ = ?l + ?r") by (simp add: pdevs_val_pdevs_domain if_distrib sum.If_cases pdevs_apply_pdevs_of_list_Cons) also have "?r = (\i\pdevs_domain (pdevs_of_list xs). e (Suc i) *\<^sub>R pdevs_apply (pdevs_of_list xs) i)" by (rule sum.reindex_cong[of "\i. i + 1"]) auto also have "\ = pdevs_val (e o (+) 1) (pdevs_of_list xs)" by (simp add: pdevs_val_pdevs_domain ) also have "?l = (\i\{0}. e i *\<^sub>R x)" by (rule sum.mono_neutral_cong_left) auto also have "\ = e 0 *\<^sub>R x" by simp finally show ?thesis . qed lemma less_degree_pdevs_of_list_imp_less_length: assumes "i < degree (pdevs_of_list xs)" shows "i < length xs" proof - from assms have "pdevs_apply (pdevs_of_list xs) (degree (pdevs_of_list xs) - 1) \ 0" by (metis degree_least_nonzero less_nat_zero_code) hence "degree (pdevs_of_list xs) - 1 < length xs" by (simp add: pdevs_apply_pdevs_of_list split: if_split_asm) with assms show ?thesis by simp qed lemma tdev_pdevs_of_list[simp]: "tdev (pdevs_of_list xs) = sum_list (map abs xs)" by (auto simp: tdev_def pdevs_apply_pdevs_of_list sum_list_sum_nth less_degree_pdevs_of_list_imp_less_length intro!: sum.mono_neutral_cong_left degree_gt) lemma pdevs_of_list_Nil[simp]: "pdevs_of_list [] = zero_pdevs" by (auto intro!: pdevs_eqI) lemma pdevs_val_inj_sumI: fixes K::"'a set" and g::"'a \ nat" assumes "finite K" assumes "inj_on g K" assumes "pdevs_domain x \ g ` K" assumes "\i. i \ K \ g i \ pdevs_domain x \ f i = 0" assumes "\i. i \ K \ g i \ pdevs_domain x \ f i = e (g i) *\<^sub>R pdevs_apply x (g i)" shows "pdevs_val e x = (\i\K. f i)" proof - have [simp]: "inj_on (the_inv_into K g) (pdevs_domain x)" using assms by (auto simp: intro!: subset_inj_on[OF inj_on_the_inv_into]) { fix y assume y: "y \ pdevs_domain x" have g_inv: "g (the_inv_into K g y) = y" by (meson assms(2) assms(3) y f_the_inv_into_f subset_eq) have inv_in: "the_inv_into K g y \ K" by (meson assms(2) assms(3) y subset_iff in_pdevs_domain the_inv_into_into) have inv3: "the_inv_into (pdevs_domain x) (the_inv_into K g) (the_inv_into K g y) = g (the_inv_into K g y)" using assms y by (subst the_inv_into_f_f) (auto simp: f_the_inv_into_f[OF assms(2)]) note g_inv inv_in inv3 } note this[simp] have "pdevs_val e x = (\i\pdevs_domain x. e i *\<^sub>R pdevs_apply x i)" by (simp add: pdevs_val_pdevs_domain) also have "\ = (\i \ the_inv_into K g ` pdevs_domain x. e (g i) *\<^sub>R pdevs_apply x (g i))" by (rule sum.reindex_cong[OF inj_on_the_inv_into]) auto also have "\ = (\i\K. f i)" using assms by (intro sum.mono_neutral_cong_left) (auto simp: the_inv_into_image_eq) finally show ?thesis . qed lemma pdevs_domain_pdevs_of_list_le: "pdevs_domain (pdevs_of_list xs) \ {0..(i,x)\zip [0..R x)" by (auto simp: sum_list_distinct_conv_sum_set in_set_zip image_fst_zip pdevs_apply_pdevs_of_list distinct_zipI1 intro!: pdevs_val_inj_sumI[of _ fst] split: if_split_asm) lemma scaleR_sum_list: fixes xs::"'a::real_vector list" shows "a *\<^sub>R sum_list xs = sum_list (map (scaleR a) xs)" by (induct xs) (auto simp: algebra_simps) lemma pdevs_val_const_pdevs_of_list: "pdevs_val (\_. c) (pdevs_of_list xs) = c *\<^sub>R sum_list xs" unfolding pdevs_val_zip split_beta' scaleR_sum_list by (rule arg_cong) (auto intro!: nth_equalityI) lemma pdevs_val_partition: assumes "e \ UNIV \ I" obtains f g where "pdevs_val e (pdevs_of_list xs) = pdevs_val f (pdevs_of_list (filter p xs)) + pdevs_val g (pdevs_of_list (filter (Not o p) xs))" "f \ UNIV \ I" "g \ UNIV \ I" proof - obtain i where i: "i \ I" by (metis assms funcset_mem iso_tuple_UNIV_I) let ?zip = "zip [0.. snd) ?zip" let ?f = "(\n. if n < degree (pdevs_of_list (filter p xs)) then e (map fst (fst part) ! n) else i)" let ?g = "(\n. if n < degree (pdevs_of_list (filter (Not \ p) xs)) then e (map fst (snd part) ! n) else i)" show ?thesis proof have "pdevs_val e (pdevs_of_list xs) = (\(i,x)\?zip. e i *\<^sub>R x)" by (rule pdevs_val_zip) also have "\ = (\(i, x)\set ?zip. e i *\<^sub>R x)" by (simp add: sum_list_distinct_conv_sum_set distinct_zipI1) also have [simp]: "set (fst part) \ set (snd part) = {}" by (auto simp: part_def) from partition_set[of "p o snd" ?zip "fst part" "snd part"] have "set ?zip = set (fst part) \ set (snd part)" by (auto simp: part_def) also have "(\a\set (fst part) \ set (snd part). case a of (i, x) \ e i *\<^sub>R x) = (\(i, x)\set (fst part). e i *\<^sub>R x) + (\(i, x)\set (snd part). e i *\<^sub>R x)" by (auto simp: split_beta sum_Un) also have "(\(i, x)\set (fst part). e i *\<^sub>R x) = (\(i, x)\(fst part). e i *\<^sub>R x)" by (simp add: sum_list_distinct_conv_sum_set distinct_zipI1 part_def) also have "\ = (\i e i *\<^sub>R x)" by (subst sum_list_sum_nth) (simp add: split_beta' atLeast0LessThan) also have "\ = pdevs_val (\n. e (map fst (fst part) ! n)) (pdevs_of_list (map snd (fst part)))" by (force simp: pdevs_val_zip sum_list_distinct_conv_sum_set distinct_zipI1 split_beta' in_set_zip intro!: sum.reindex_cong[where l=fst] image_eqI[where x = "(x, map snd (fst part) ! x)" for x]) also have "(\(i, x)\set (snd part). e i *\<^sub>R x) = (\(i, x)\(snd part). e i *\<^sub>R x)" by (simp add: sum_list_distinct_conv_sum_set distinct_zipI1 part_def) also have "\ = (\i e i *\<^sub>R x)" by (subst sum_list_sum_nth) (simp add: split_beta' atLeast0LessThan) also have "\ = pdevs_val (\n. e (map fst (snd part) ! n)) (pdevs_of_list (map snd (snd part)))" by (force simp: pdevs_val_zip sum_list_distinct_conv_sum_set distinct_zipI1 split_beta' in_set_zip intro!: sum.reindex_cong[where l=fst] image_eqI[where x = "(x, map snd (snd part) ! x)" for x]) also have "pdevs_val (\n. e (map fst (fst part) ! n)) (pdevs_of_list (map snd (fst part))) = pdevs_val (\n. if n < degree (pdevs_of_list (map snd (fst part))) then e (map fst (fst part) ! n) else i) (pdevs_of_list (map snd (fst part)))" by (rule pdevs_val_degree_cong) simp_all also have "pdevs_val (\n. e (map fst (snd part) ! n)) (pdevs_of_list (map snd (snd part))) = pdevs_val (\n. if n < degree (pdevs_of_list (map snd (snd part))) then e (map fst (snd part) ! n) else i) (pdevs_of_list (map snd (snd part)))" by (rule pdevs_val_degree_cong) simp_all also have "map snd (snd part) = filter (Not o p) xs" by (simp add: part_def filter_map[symmetric] o_assoc) also have "map snd (fst part) = filter p xs" by (simp add: part_def filter_map[symmetric]) finally show "pdevs_val e (pdevs_of_list xs) = pdevs_val ?f (pdevs_of_list (filter p xs)) + pdevs_val ?g (pdevs_of_list (filter (Not \ p) xs))" . show "?f \ UNIV \ I" "?g \ UNIV \ I" using assms \i\I\ by (auto simp: Pi_iff) qed qed lemma pdevs_apply_pdevs_of_list_append: "pdevs_apply (pdevs_of_list (xs @ zs)) i = (if i < length xs then pdevs_apply (pdevs_of_list xs) i else pdevs_apply (pdevs_of_list zs) (i - length xs))" by (auto simp: pdevs_apply_pdevs_of_list nth_append) lemma degree_pdevs_of_list_le_length[intro, simp]: "degree (pdevs_of_list xs) \ length xs" by (metis less_irrefl_nat le_less_linear less_degree_pdevs_of_list_imp_less_length) lemma degree_pdevs_of_list_append: "degree (pdevs_of_list (xs @ ys)) \ length xs + degree (pdevs_of_list ys)" by (rule degree_le) (auto simp: pdevs_apply_pdevs_of_list_append) lemma pdevs_val_pdevs_of_list_append: assumes "f \ UNIV \ I" assumes "g \ UNIV \ I" obtains e where "pdevs_val f (pdevs_of_list xs) + pdevs_val g (pdevs_of_list ys) = pdevs_val e (pdevs_of_list (xs @ ys))" "e \ UNIV \ I" proof let ?e = "(\i. if i < length xs then f i else g (i - length xs))" have f: "pdevs_val f (pdevs_of_list xs) = (\i\{..R pdevs_apply (pdevs_of_list (xs @ ys)) i)" by (auto simp: pdevs_val_sum degree_gt pdevs_apply_pdevs_of_list_append intro: sum.mono_neutral_cong_left) have g: "pdevs_val g (pdevs_of_list ys) = (\i=length xs ..R pdevs_apply (pdevs_of_list (xs @ ys)) i)" (is "_ = ?sg") by (auto simp: pdevs_val_sum pdevs_apply_pdevs_of_list_append intro!: inj_onI image_eqI[where x="length xs + x" for x] sum.reindex_cong[where l="\i. i - length xs"]) show "pdevs_val f (pdevs_of_list xs) + pdevs_val g (pdevs_of_list ys) = pdevs_val ?e (pdevs_of_list (xs @ ys))" unfolding f g by (subst sum.union_disjoint[symmetric]) (force simp: pdevs_val_sum ivl_disj_un degree_pdevs_of_list_append intro!: sum.mono_neutral_cong_right split: if_split_asm)+ show "?e \ UNIV \ I" using assms by (auto simp: Pi_iff) qed lemma sum_general_mono: fixes f::"'a\('b::ordered_ab_group_add)" assumes [simp,intro]: "finite s" "finite t" assumes f: "\x. x \ s - t \ f x \ 0" assumes g: "\x. x \ t - s \ g x \ 0" assumes fg: "\x. x \ s \ t \ f x \ g x" shows "(\x \ s. f x) \ (\x \ t. g x)" proof - have "s = (s - t) \ (s \ t)" and [intro, simp]: "(s - t) \ (s \ t) = {}" by auto hence "(\x \ s. f x) = (\x \ s - t \ s \ t. f x)" using assms by simp also have "\ = (\x \ s - t. f x) + (\x \ s \ t. f x)" by (simp add: sum_Un) also have "(\x \ s - t. f x) \ 0" by (auto intro!: sum_nonpos f) also have "0 \ (\x \ t - s. g x)" by (auto intro!: sum_nonneg g) also have "(\x \ s \ t. f x) \ (\x \ s \ t. g x)" by (auto intro!: sum_mono fg) also have [intro, simp]: "(t - s) \ (s \ t) = {}" by auto hence "sum g (t - s) + sum g (s \ t) = sum g ((t - s) \ (s \ t))" by (simp add: sum_Un) also have "\ = sum g t" by (auto intro!: sum.cong) finally show ?thesis by simp qed lemma pdevs_val_perm_ex: assumes "xs <~~> ys" assumes mem: "e \ UNIV \ I" shows "\e'. e' \ UNIV \ I \ pdevs_val e (pdevs_of_list xs) = pdevs_val e' (pdevs_of_list ys)" using assms proof (induct arbitrary: e) case Nil thus ?case by auto next case (Cons xs ys z) hence "(e \ (+) (Suc 0)) \ UNIV \ I" by auto from Cons(2)[OF this] obtain e' where "e' \ UNIV \ I" "pdevs_val (e \ (+) (Suc 0)) (pdevs_of_list xs) = pdevs_val e' (pdevs_of_list ys)" by metis thus ?case using Cons by (auto intro!: exI[where x="\x. if x = 0 then e 0 else e' (x - 1)"] simp: o_def Pi_iff) next case (trans xs ys zs) thus ?case by metis next case (swap y x l) thus ?case by (auto intro!: exI[where x="\i. if i = 0 then e 1 else if i = 1 then e 0 else e i"] simp: o_def Pi_iff) qed lemma pdevs_val_perm: assumes "xs <~~> ys" assumes mem: "e \ UNIV \ I" obtains e' where "e' \ UNIV \ I" "pdevs_val e (pdevs_of_list xs) = pdevs_val e' (pdevs_of_list ys)" using assms by (metis pdevs_val_perm_ex) lemma set_distinct_permI: "set xs = set ys \ distinct xs \ distinct ys \ xs <~~> ys" by (metis eq_set_perm_remdups remdups_id_iff_distinct) lemmas pdevs_val_permute = pdevs_val_perm[OF set_distinct_permI] lemma partition_permI: "filter p xs @ filter (Not o p) xs <~~> xs" proof (induct xs) case (Cons x xs) have swap_app_Cons: "filter p xs @ x # [a\xs . \ p a] <~~> x # filter p xs @ [a\xs . \ p a]" by (metis perm_sym perm_append_Cons) also have "\ <~~> x#xs" using Cons by auto finally (trans) show ?case using Cons by simp qed simp lemma pdevs_val_eqI: assumes "\i. i \ pdevs_domain y \ i \ pdevs_domain x \ e i *\<^sub>R pdevs_apply x i = f i *\<^sub>R pdevs_apply y i" assumes "\i. i \ pdevs_domain y \ i \ pdevs_domain x \ f i *\<^sub>R pdevs_apply y i = 0" assumes "\i. i \ pdevs_domain x \ i \ pdevs_domain y \ e i *\<^sub>R pdevs_apply x i = 0" shows "pdevs_val e x = pdevs_val f y" using assms by (force simp: pdevs_val_pdevs_domain intro!: sum.reindex_bij_witness_not_neutral[where i=id and j = id and S'="pdevs_domain x - pdevs_domain y" and T'="pdevs_domain y - pdevs_domain x"]) definition filter_pdevs_raw::"(nat \ 'a \ bool) \ (nat \ 'a::real_vector) \ (nat \ 'a)" where "filter_pdevs_raw I X = (\i. if I i (X i) then X i else 0)" lemma filter_pdevs_raw_nonzeros: "{i. filter_pdevs_raw s f i \ 0} = {i. f i \ 0} \ {x. s x (f x)}" by (auto simp: filter_pdevs_raw_def) lift_definition filter_pdevs::"(nat \ 'a \ bool) \ 'a::real_vector pdevs \ 'a pdevs" is filter_pdevs_raw by (simp add: filter_pdevs_raw_nonzeros) lemma pdevs_apply_filter_pdevs[simp]: "pdevs_apply (filter_pdevs I x) i = (if I i (pdevs_apply x i) then pdevs_apply x i else 0)" by transfer (auto simp: filter_pdevs_raw_def) lemma degree_filter_pdevs_le: "degree (filter_pdevs I x) \ degree x" by (rule degree_leI) (simp split: if_split_asm) lemma pdevs_val_filter_pdevs: "pdevs_val e (filter_pdevs I x) = (\i \ {.. {i. I i (pdevs_apply x i)}. e i *\<^sub>R pdevs_apply x i)" by (auto simp: pdevs_val_sum if_distrib sum.inter_restrict degree_filter_pdevs_le degree_gt intro!: sum.mono_neutral_cong_left split: if_split_asm) lemma pdevs_val_filter_pdevs_dom: "pdevs_val e (filter_pdevs I x) = (\i \ pdevs_domain x \ {i. I i (pdevs_apply x i)}. e i *\<^sub>R pdevs_apply x i)" by (auto simp: pdevs_val_pdevs_domain if_distrib sum.inter_restrict degree_filter_pdevs_le degree_gt intro!: sum.mono_neutral_cong_left split: if_split_asm) lemma pdevs_val_filter_pdevs_eval: "pdevs_val e (filter_pdevs p x) = pdevs_val (\i. if p i (pdevs_apply x i) then e i else 0) x" by (auto split: if_split_asm intro!: pdevs_val_eqI) definition "pdevs_applys X i = map (\x. pdevs_apply x i) X" definition "pdevs_vals e X = map (pdevs_val e) X" definition "aform_vals e X = map (aform_val e) X" definition "filter_pdevs_list I X = map (filter_pdevs (\i _. I i (pdevs_applys X i))) X" lemma pdevs_applys_filter_pdevs_list[simp]: "pdevs_applys (filter_pdevs_list I X) i = (if I i (pdevs_applys X i) then pdevs_applys X i else map (\_. 0) X)" by (auto simp: filter_pdevs_list_def o_def pdevs_applys_def) definition "degrees X = Max (insert 0 (degree ` set X))" abbreviation "degree_aforms X \ degrees (map snd X)" lemma degrees_leI: assumes "\x. x \ set X \ degree x \ K" shows "degrees X \ K" using assms by (auto simp: degrees_def intro!: Max.boundedI) lemma degrees_leD: assumes "degrees X \ K" shows "\x. x \ set X \ degree x \ K" using assms by (auto simp: degrees_def intro!: Max.boundedI) lemma degree_filter_pdevs_list_le: "degrees (filter_pdevs_list I x) \ degrees x" by (rule degrees_leI) (auto simp: filter_pdevs_list_def intro!: degree_le dest!: degrees_leD) definition "dense_list_of_pdevs x = map (\i. pdevs_apply x i) [0..(reverse) ordered coefficients as list\ definition "list_of_pdevs x = map (\i. (i, pdevs_apply x i)) (rev (sorted_list_of_set (pdevs_domain x)))" lemma list_of_pdevs_zero_pdevs[simp]: "list_of_pdevs zero_pdevs = []" by (auto simp: list_of_pdevs_def) lemma sum_list_list_of_pdevs: "sum_list (map snd (list_of_pdevs x)) = sum_list (dense_list_of_pdevs x)" by (auto intro!: sum.mono_neutral_cong_left simp add: degree_gt sum_list_distinct_conv_sum_set dense_list_of_pdevs_def list_of_pdevs_def) lemma sum_list_filter_dense_list_of_pdevs[symmetric]: "sum_list (map snd (filter (p o snd) (list_of_pdevs x))) = sum_list (filter p (dense_list_of_pdevs x))" by (auto intro!: sum.mono_neutral_cong_left simp add: degree_gt sum_list_distinct_conv_sum_set dense_list_of_pdevs_def list_of_pdevs_def o_def filter_map) lemma pdevs_of_list_dense_list_of_pdevs: "pdevs_of_list (dense_list_of_pdevs x) = x" by (auto simp: pdevs_apply_pdevs_of_list dense_list_of_pdevs_def pdevs_eqI) lemma pdevs_val_sum_list: "pdevs_val (\_. c) X = c *\<^sub>R sum_list (map snd (list_of_pdevs X))" by (auto simp: pdevs_val_sum sum_list_list_of_pdevs pdevs_val_const_pdevs_of_list[symmetric] pdevs_of_list_dense_list_of_pdevs) lemma list_of_pdevs_all_nonzero: "list_all (\x. x \ 0) (map snd (list_of_pdevs xs))" by (auto simp: list_of_pdevs_def list_all_iff) lemma list_of_pdevs_nonzero: "x \ set (map snd (list_of_pdevs xs)) \ x \ 0" by (auto simp: list_of_pdevs_def) lemma pdevs_of_list_scaleR_0[simp]: fixes xs::"'a::real_vector list" shows "pdevs_of_list (map ((*\<^sub>R) 0) xs) = zero_pdevs" by (auto simp: pdevs_apply_pdevs_of_list intro!: pdevs_eqI) lemma degree_pdevs_of_list_scaleR: "degree (pdevs_of_list (map ((*\<^sub>R) c) xs)) = (if c \ 0 then degree (pdevs_of_list xs) else 0)" by (auto simp: pdevs_apply_pdevs_of_list intro!: degree_cong) lemma list_of_pdevs_eq: "rev (list_of_pdevs X) = (filter ((\) 0 o snd) (map (\i. (i, pdevs_apply X i)) [0..i. if i < d then 1 else 0) (pdevs_of_list xs)" proof - have "sum_list (take d xs) = 1 *\<^sub>R sum_list (take d xs)" by simp also note pdevs_val_const_pdevs_of_list[symmetric] also have "pdevs_val (\_. 1) (pdevs_of_list (take d xs)) = pdevs_val (\i. if i < d then 1 else 0) (pdevs_of_list xs)" by (auto simp: pdevs_apply_pdevs_of_list split: if_split_asm intro!: pdevs_val_eqI) finally show ?thesis . qed lemma zero_in_range_pdevs_apply[intro, simp]: fixes X::"'a::real_vector pdevs" shows "0 \ range (pdevs_apply X)" by (metis degree_gt less_irrefl rangeI) lemma dense_list_in_range: "x \ set (dense_list_of_pdevs X) \ x \ range (pdevs_apply X)" by (auto simp: dense_list_of_pdevs_def) lemma not_in_dense_list_zeroD: assumes "pdevs_apply X i \ set (dense_list_of_pdevs X)" shows "pdevs_apply X i = 0" proof (rule ccontr) assume "pdevs_apply X i \ 0" hence "i < degree X" by (rule degree_gt) thus False using assms by (auto simp: dense_list_of_pdevs_def) qed lemma list_all_list_of_pdevsI: assumes "\i. i \ pdevs_domain X \ P (pdevs_apply X i)" shows "list_all (\x. P x) (map snd (list_of_pdevs X))" using assms by (auto simp: list_all_iff list_of_pdevs_def) lemma pdevs_of_list_map_scaleR: "pdevs_of_list (map (scaleR r) xs) = scaleR_pdevs r (pdevs_of_list xs)" by (auto intro!: pdevs_eqI simp: pdevs_apply_pdevs_of_list) lemma map_permI: assumes "xs <~~> ys" shows "map f xs <~~> map f ys" using assms by induct auto lemma rev_perm: "rev xs <~~> ys \ xs <~~> ys" by (metis perm.trans perm_rev rev_rev_ident) lemma list_of_pdevs_perm_filter_nonzero: "map snd (list_of_pdevs X) <~~> (filter ((\) 0) (dense_list_of_pdevs X))" proof - have zip_map: "zip [0..i. (i, pdevs_apply X i)) [0.. filter ((\) 0 o snd) (zip [0.. map snd (filter ((\) 0 \ snd) (zip [0..) 0 \ snd) (zip [0..) 0) (dense_list_of_pdevs X)" using map_filter[of snd "(\) 0" "(zip [0.. UNIV \ I" assumes "0 \ I" obtains e' where "pdevs_val e (pdevs_of_list (filter p xs)) = pdevs_val e' (pdevs_of_list xs)" "e' \ UNIV \ I" unfolding pdevs_val_filter_pdevs_eval proof - have "(\_::nat. 0) \ UNIV \ I" using assms by simp have "pdevs_val e (pdevs_of_list (filter p xs)) = pdevs_val e (pdevs_of_list (filter p xs)) + pdevs_val (\_. 0) (pdevs_of_list (filter (Not o p) xs))" by (simp add: pdevs_val_sum) also from pdevs_val_pdevs_of_list_append[OF \e \ _\ \(\_. 0) \ _\] obtain e' where "e' \ UNIV \ I" "\ = pdevs_val e' (pdevs_of_list (filter p xs @ filter (Not o p) xs))" by metis note this(2) also from pdevs_val_perm[OF partition_permI \e' \ _\] obtain e'' where "\ = pdevs_val e'' (pdevs_of_list xs)" "e'' \ UNIV \ I" by metis note this(1) finally show ?thesis using \e'' \ _\ .. qed lemma pdevs_val_of_list_of_pdevs: assumes "e \ UNIV \ I" assumes "0 \ I" obtains e' where "pdevs_val e (pdevs_of_list (map snd (list_of_pdevs X))) = pdevs_val e' X" "e' \ UNIV \ I" proof - obtain e' where "e' \ UNIV \ I" and "pdevs_val e (pdevs_of_list (map snd (list_of_pdevs X))) = pdevs_val e' (pdevs_of_list (filter ((\) 0) (dense_list_of_pdevs X)))" by (rule pdevs_val_perm[OF list_of_pdevs_perm_filter_nonzero assms(1)]) note this(2) also from pdevs_val_filter[OF \e' \ _\ \0 \ I\, of "(\) 0" "dense_list_of_pdevs X"] obtain e'' where "e'' \ UNIV \ I" and "\ = pdevs_val e'' (pdevs_of_list (dense_list_of_pdevs X))" by metis note this(2) also have "\ = pdevs_val e'' X" by (simp add: pdevs_of_list_dense_list_of_pdevs) finally show ?thesis using \e'' \ UNIV \ I\ .. qed lemma pdevs_val_of_list_of_pdevs2: assumes "e \ UNIV \ I" obtains e' where "pdevs_val e X = pdevs_val e' (pdevs_of_list (map snd (list_of_pdevs X)))" "e' \ UNIV \ I" proof - from list_of_pdevs_perm_filter_nonzero[of X] have perm: "(filter ((\) 0) (dense_list_of_pdevs X)) <~~> map snd (list_of_pdevs X)" by (simp add: perm_sym) have "pdevs_val e X = pdevs_val e (pdevs_of_list (dense_list_of_pdevs X))" by (simp add: pdevs_of_list_dense_list_of_pdevs) also from pdevs_val_partition[OF \e \ _\, of "dense_list_of_pdevs X" "(\) 0"] obtain f g where "f \ UNIV \ I" "g \ UNIV \ I" "\ = pdevs_val f (pdevs_of_list (filter ((\) 0) (dense_list_of_pdevs X))) + pdevs_val g (pdevs_of_list (filter (Not \ (\) 0) (dense_list_of_pdevs X)))" (is "_ = ?f + ?g") by metis note this(3) also have "pdevs_of_list [x\dense_list_of_pdevs X . x = 0] = zero_pdevs" by (auto intro!: pdevs_eqI simp: pdevs_apply_pdevs_of_list dest!: nth_mem) hence "?g = 0" by (auto simp: o_def ) also obtain e' where "e' \ UNIV \ I" and "?f = pdevs_val e' (pdevs_of_list (map snd (list_of_pdevs X)))" by (rule pdevs_val_perm[OF perm \f \ _\]) note this(2) finally show ?thesis using \e' \ UNIV \ I\ by (auto intro!: that) qed lemma dense_list_of_pdevs_scaleR: "r \ 0 \ map ((*\<^sub>R) r) (dense_list_of_pdevs x) = dense_list_of_pdevs (scaleR_pdevs r x)" by (auto simp: dense_list_of_pdevs_def) lemma degree_pdevs_of_list_eq: "(\x. x \ set xs \ x \ 0) \ degree (pdevs_of_list xs) = length xs" by (cases xs) (auto simp add: pdevs_apply_pdevs_of_list nth_Cons intro!: degree_eqI split: nat.split) lemma dense_list_of_pdevs_pdevs_of_list: "(\x. x \ set xs \ x \ 0) \ dense_list_of_pdevs (pdevs_of_list xs) = xs" by (auto simp: dense_list_of_pdevs_def degree_pdevs_of_list_eq pdevs_apply_pdevs_of_list intro!: nth_equalityI) lemma pdevs_of_list_sum: assumes "distinct xs" assumes "e \ UNIV \ I" obtains f where "f \ UNIV \ I" "pdevs_val e (pdevs_of_list xs) = (\P\set xs. f P *\<^sub>R P)" proof - define f where "f X = e (the (map_of (zip xs [0.. UNIV \ I" by (auto simp: f_def) moreover have "pdevs_val e (pdevs_of_list xs) = (\P\set xs. f P *\<^sub>R P)" by (auto simp add: pdevs_val_zip f_def assms sum_list_distinct_conv_sum_set[symmetric] in_set_zip map_of_zip_upto2_length_eq_nth intro!: sum_list_nth_eqI) ultimately show ?thesis .. qed lemma pdevs_domain_eq_pdevs_of_list: assumes nz: "\x. x \ set (xs) \ x \ 0" shows "pdevs_domain (pdevs_of_list xs) = {0..x. x \ set xs \ x \ 0" shows "length (list_of_pdevs (pdevs_of_list xs)) = length xs" using nz by (auto simp: list_of_pdevs_def pdevs_domain_eq_pdevs_of_list) lemma nth_list_of_pdevs_pdevs_of_list: assumes nz: "\x. x \ set xs \ x \ 0" assumes l: "n < length xs" shows "list_of_pdevs (pdevs_of_list xs) ! n = ((length xs - Suc n), xs ! (length xs - Suc n))" using nz l by (auto simp: list_of_pdevs_def pdevs_domain_eq_pdevs_of_list rev_nth pdevs_apply_pdevs_of_list) lemma list_of_pdevs_pdevs_of_list_eq: "(\x. x \ set xs \ x \ 0) \ list_of_pdevs (pdevs_of_list xs) = zip (rev [0..x. x \ set xs \ x \ 0" shows "sum_list (filter p (map snd (list_of_pdevs (pdevs_of_list xs)))) = sum_list (filter p xs)" using assms by (auto simp: list_of_pdevs_pdevs_of_list_eq rev_filter[symmetric]) lemma sum_list_partition: fixes xs::"'a::comm_monoid_add list" shows "sum_list (filter p xs) + sum_list (filter (Not o p) xs) = sum_list xs" by (induct xs) (auto simp: ac_simps) subsection \2d zonotopes\ definition "prod_of_pdevs x y = binop_pdevs Pair x y" lemma apply_pdevs_prod_of_pdevs[simp]: "pdevs_apply (prod_of_pdevs x y) i = (pdevs_apply x i, pdevs_apply y i)" unfolding prod_of_pdevs_def by (simp add: zero_prod_def) lemma pdevs_domain_prod_of_pdevs[simp]: "pdevs_domain (prod_of_pdevs x y) = pdevs_domain x \ pdevs_domain y" by (auto simp: zero_prod_def) lemma pdevs_val_prod_of_pdevs[simp]: "pdevs_val e (prod_of_pdevs x y) = (pdevs_val e x, pdevs_val e y)" proof - have "pdevs_val e x = (\i\pdevs_domain x \ pdevs_domain y. e i *\<^sub>R pdevs_apply x i)" (is "_ = ?x") unfolding pdevs_val_pdevs_domain by (rule sum.mono_neutral_cong_left) auto moreover have "pdevs_val e y = (\i\pdevs_domain x \ pdevs_domain y. e i *\<^sub>R pdevs_apply y i)" (is "_ = ?y") unfolding pdevs_val_pdevs_domain by (rule sum.mono_neutral_cong_left) auto ultimately have "(pdevs_val e x, pdevs_val e y) = (?x, ?y)" by auto also have "\ = pdevs_val e (prod_of_pdevs x y)" by (simp add: sum_prod pdevs_val_pdevs_domain) finally show ?thesis by simp qed definition prod_of_aforms (infixr "\\<^sub>a" 80) where "prod_of_aforms x y = ((fst x, fst y), prod_of_pdevs (snd x) (snd y))" subsection \Intervals\ definition One_pdevs_raw::"nat \ 'a::executable_euclidean_space" where "One_pdevs_raw i = (if i < length (Basis_list::'a list) then Basis_list ! i else 0)" lemma zeros_One_pdevs_raw: "One_pdevs_raw -` {0::'a::executable_euclidean_space} = {length (Basis_list::'a list)..}" by (auto simp: One_pdevs_raw_def nonzero_Basis split: if_split_asm dest!: nth_mem) lemma nonzeros_One_pdevs_raw: "{i. One_pdevs_raw i \ (0::'a::executable_euclidean_space)} = - {length (Basis_list::'a list)..}" using zeros_One_pdevs_raw by blast lift_definition One_pdevs::"'a::executable_euclidean_space pdevs" is One_pdevs_raw by (auto simp: nonzeros_One_pdevs_raw) lemma pdevs_apply_One_pdevs[simp]: "pdevs_apply One_pdevs i = (if i < length (Basis_list::'a::executable_euclidean_space list) then Basis_list ! i else 0::'a)" by transfer (simp add: One_pdevs_raw_def) lemma Max_Collect_less_nat: "Max {i::nat. i < k} = (if k = 0 then Max {} else k - 1)" by (auto intro!: Max_eqI) lemma degree_One_pdevs[simp]: "degree (One_pdevs::'a pdevs) = length (Basis_list::'a::executable_euclidean_space list)" by (auto simp: degree_eq_Suc_max Basis_list_nth_nonzero Max_Collect_less_nat intro!: Max_eqI DIM_positive) definition inner_scaleR_pdevs::"'a::euclidean_space \ 'a pdevs \ 'a pdevs" where "inner_scaleR_pdevs b x = unop_pdevs (\x. (b \ x) *\<^sub>R x) x" lemma pdevs_apply_inner_scaleR_pdevs[simp]: "pdevs_apply (inner_scaleR_pdevs a x) i = (a \ (pdevs_apply x i)) *\<^sub>R (pdevs_apply x i)" by (simp add: inner_scaleR_pdevs_def) lemma degree_inner_scaleR_pdevs_le: "degree (inner_scaleR_pdevs (l::'a::executable_euclidean_space) One_pdevs) \ degree (One_pdevs::'a pdevs)" by (rule degree_leI) (auto simp: inner_scaleR_pdevs_def One_pdevs_raw_def) definition "pdevs_of_ivl l u = scaleR_pdevs (1/2) (inner_scaleR_pdevs (u - l) One_pdevs)" lemma degree_pdevs_of_ivl_le: "degree (pdevs_of_ivl l u::'a::executable_euclidean_space pdevs) \ DIM('a)" using degree_inner_scaleR_pdevs_le by (simp add: pdevs_of_ivl_def) lemma pdevs_apply_pdevs_of_ivl: defines "B \ Basis_list::'a::executable_euclidean_space list" shows "pdevs_apply (pdevs_of_ivl l u) i = (if i < length B then ((u - l)\(B!i)/2)*\<^sub>R(B!i) else 0)" by (auto simp: pdevs_of_ivl_def B_def) lemma deg_length_less_imp[simp]: "k < degree (pdevs_of_ivl l u::'a::executable_euclidean_space pdevs) \ k < length (Basis_list::'a list)" by (metis (no_types, hide_lams) degree_One_pdevs degree_inner_scaleR_pdevs_le degree_scaleR_pdevs dual_order.strict_trans length_Basis_list_pos nat_neq_iff not_le pdevs_of_ivl_def) lemma tdev_pdevs_of_ivl: "tdev (pdevs_of_ivl l u) = \u - l\ /\<^sub>R 2" proof - have "tdev (pdevs_of_ivl l u) = (\i pdevs_apply (pdevs_of_ivl l u) i\)" by (auto simp: tdev_def) also have "\ = (\i = 0..pdevs_apply (pdevs_of_ivl l u) i\)" using degree_pdevs_of_ivl_le[of l u] by (intro sum.mono_neutral_cong_left) auto also have "\ = (\i = 0..((u - l) \ Basis_list ! i / 2) *\<^sub>R Basis_list ! i\)" by (auto simp: pdevs_apply_pdevs_of_ivl) also have "\ = (\b \ Basis_list. \((u - l) \ b / 2) *\<^sub>R b\)" by (auto simp: sum_list_sum_nth) also have "\ = (\b\Basis. \((u - l) \ b / 2) *\<^sub>R b\)" by (auto simp: sum_list_distinct_conv_sum_set) also have "\ = \u - l\ /\<^sub>R 2" by (subst euclidean_representation[symmetric, of "\u - l\ /\<^sub>R 2"]) (simp add: abs_inner abs_scaleR) finally show ?thesis . qed definition "aform_of_ivl l u = ((l + u)/\<^sub>R2, pdevs_of_ivl l u)" definition "aform_of_point x = aform_of_ivl x x" lemma Elem_affine_of_ivl_le: assumes "e \ UNIV \ {-1 .. 1}" assumes "l \ u" shows "l \ aform_val e (aform_of_ivl l u)" proof - have "l = (1 / 2) *\<^sub>R l + (1 / 2) *\<^sub>R l" by (simp add: scaleR_left_distrib[symmetric]) also have "\ = (l + u)/\<^sub>R2 - tdev (pdevs_of_ivl l u)" by (auto simp: assms tdev_pdevs_of_ivl algebra_simps) also have "\ \ aform_val e (aform_of_ivl l u)" using abs_pdevs_val_le_tdev[OF assms(1), of "pdevs_of_ivl l u"] by (auto simp: aform_val_def aform_of_ivl_def minus_le_iff dest!: abs_le_D2) finally show ?thesis . qed lemma Elem_affine_of_ivl_ge: assumes "e \ UNIV \ {-1 .. 1}" assumes "l \ u" shows "aform_val e (aform_of_ivl l u) \ u" proof - have "aform_val e (aform_of_ivl l u) \ (l + u)/\<^sub>R2 + tdev (pdevs_of_ivl l u)" using abs_pdevs_val_le_tdev[OF assms(1), of "pdevs_of_ivl l u"] by (auto simp: aform_val_def aform_of_ivl_def minus_le_iff dest!: abs_le_D1) also have "\ = (1 / 2) *\<^sub>R u + (1 / 2) *\<^sub>R u" by (auto simp: assms tdev_pdevs_of_ivl algebra_simps) also have "\ = u" by (simp add: scaleR_left_distrib[symmetric]) finally show ?thesis . qed lemma map_of_zip_upto_length_eq_nth: assumes "i < length B" assumes "d = length B" shows "(map_of (zip [0.. {l .. u}" obtains e where "e \ UNIV \ {-1 .. 1}" "k = aform_val e (aform_of_ivl l u)" proof atomize_elim define e where [abs_def]: "e i = (let b = if i R 2) \ b) / (((u - l) /\<^sub>R 2) \ b))" for i let ?B = "Basis_list::'a list" have "k = (1 / 2) *\<^sub>R (l + u) + (\b \ Basis. (if (u - l) \ b = 0 then 0 else ((k - (1 / 2) *\<^sub>R (l + u)) \ b)) *\<^sub>R b)" (is "_ = _ + ?dots") using assms by (force simp add: algebra_simps eucl_le[where 'a='a] intro!: euclidean_eqI[where 'a='a]) also have "?dots = (\b \ Basis. (if (u - l) \ b = 0 then 0 else ((k - (1 / 2) *\<^sub>R (l + u)) \ b) *\<^sub>R b))" by (auto intro!: sum.cong) also have "\ = (\b \ ?B. (if (u - l) \ b = 0 then 0 else ((k - (1 / 2) *\<^sub>R (l + u)) \ b) *\<^sub>R b))" by (auto simp: sum_list_distinct_conv_sum_set) also have "\ = (\i = 0.. ?B ! i = 0 then 0 else ((k - (1 / 2) *\<^sub>R (l + u)) \ ?B ! i) *\<^sub>R ?B ! i))" by (auto simp: sum_list_sum_nth) also have "\ = (\i = 0.. Basis_list ! i = 0 then 0 else ((k - (1 / 2) *\<^sub>R (l + u)) \ Basis_list ! i) *\<^sub>R Basis_list ! i))" using degree_inner_scaleR_pdevs_le[of "u - l"] by (intro sum.mono_neutral_cong_right) (auto dest!: degree) also have "(1 / 2) *\<^sub>R (l + u) + (\i = 0.. Basis_list ! i = 0 then 0 else ((k - (1 / 2) *\<^sub>R (l + u)) \ Basis_list ! i) *\<^sub>R Basis_list ! i)) = aform_val e (aform_of_ivl l u)" using degree_inner_scaleR_pdevs_le[of "u - l"] by (auto simp: aform_val_def aform_of_ivl_def pdevs_of_ivl_def map_of_zip_upto_length_eq_nth e_def Let_def pdevs_val_sum intro!: sum.cong) finally have "k = aform_val e (aform_of_ivl l u)" . moreover { fix k l u::real assume *: "l \ k" "k \ u" let ?m = "l / 2 + u / 2" have "\k - ?m\ \ \if k \ ?m then ?m - l else u - ?m\" using * by auto also have "\ \ \u / 2 - l / 2\" by (auto simp: abs_real_def) finally have "\k - (l / 2 + u / 2)\ \ \u / 2 - l/2\" . } note midpoint_abs = this have "e \ UNIV \ {- 1..1}" using assms unfolding e_def Let_def by (intro Pi_I divide_atLeastAtMost_1_absI) (auto simp: map_of_zip_upto_length_eq_nth eucl_le[where 'a='a] divide_le_eq_1 not_less inner_Basis algebra_simps intro!: midpoint_abs dest!: nth_mem) ultimately show "\e. e \ UNIV \ {- 1..1} \ k = aform_val e (aform_of_ivl l u)" by blast qed lemma Inf_aform_aform_of_ivl: assumes "l \ u" shows "Inf_aform (aform_of_ivl l u) = l" using assms by (auto simp: Inf_aform_def aform_of_ivl_def tdev_pdevs_of_ivl abs_diff_eq1 algebra_simps) (metis field_sum_of_halves scaleR_add_left scaleR_one) lemma Sup_aform_aform_of_ivl: assumes "l \ u" shows "Sup_aform (aform_of_ivl l u) = u" using assms by (auto simp: Sup_aform_def aform_of_ivl_def tdev_pdevs_of_ivl abs_diff_eq1 algebra_simps) (metis field_sum_of_halves scaleR_add_left scaleR_one) lemma Affine_aform_of_ivl: "a \ b \ Affine (aform_of_ivl a b) = {a .. b}" by (force simp: Affine_def valuate_def intro!: Elem_affine_of_ivl_ge Elem_affine_of_ivl_le elim!: in_ivl_affine_of_ivlE) end diff --git a/thys/Berlekamp_Zassenhaus/Missing_Multiset2.thy b/thys/Berlekamp_Zassenhaus/Missing_Multiset2.thy --- a/thys/Berlekamp_Zassenhaus/Missing_Multiset2.thy +++ b/thys/Berlekamp_Zassenhaus/Missing_Multiset2.thy @@ -1,270 +1,270 @@ theory Missing_Multiset2 - imports "HOL-Library.Multiset" "HOL-Library.Permutation" "HOL-Library.Permutations" + imports "HOL-Library.Multiset" "HOL-Library.List_Permutation" "HOL-Library.Permutations" Containers.Containers_Auxiliary (* only for a lemma *) begin subsubsection \Missing muiltiset\ lemma id_imp_bij: assumes id: "\x. f (f x) = x" shows "bij f" proof (intro bijI injI surjI[of f, OF id]) fix x y assume "f x = f y" then have "f (f x) = f (f y)" by auto with id show "x = y" by auto qed lemma rel_mset_Zero_iff[simp]: shows "rel_mset rel {#} Y \ Y = {#}" and "rel_mset rel X {#} \ X = {#}" using rel_mset_Zero rel_mset_size by (fastforce, fastforce) definition "is_mset_set X \ \x \# X. count X x = 1" lemma is_mset_setD[dest]: "is_mset_set X \ x \# X \ count X x = 1" unfolding is_mset_set_def by auto lemma is_mset_setI[intro]: assumes "\x. x \# X \ count X x = 1" shows "is_mset_set X" using assms unfolding is_mset_set_def by auto lemma is_mset_set[simp]: "is_mset_set (mset_set X)" unfolding is_mset_set_def by (meson count_mset_set(1) count_mset_set(2) count_mset_set(3) not_in_iff) lemma is_mset_set_add[simp]: "is_mset_set (X + {#x#}) \ is_mset_set X \ x \# X" (is "?L \ ?R") proof(intro iffI conjI) assume L: ?L with count_eq_zero_iff count_single show "is_mset_set X" unfolding is_mset_set_def by (metis (no_types, hide_lams) add_mset_add_single count_add_mset nat.inject set_mset_add_mset_insert union_single_eq_member) show "x \# X" proof assume "x \# X" then have "count (X + {#x#}) x > 1" by auto with L show False by (auto simp: is_mset_set_def) qed next assume R: ?R show ?L proof fix x' assume x': "x' \# X + {#x#}" show "count (X + {#x#}) x' = 1" proof(cases "x' \# X") case True with R have "count X x' = 1" by auto moreover from True R have "count {#x#} x' = 0" by auto ultimately show ?thesis by auto next case False then have "count X x' = 0" by (simp add: not_in_iff) with R x' show ?thesis by auto qed qed qed lemma mset_set_id[simp]: assumes "is_mset_set X" shows "mset_set (set_mset X) = X" using assms unfolding is_mset_set_def by (metis count_eq_zero_iff count_mset_set(1) count_mset_set(3) finite_set_mset multiset_eqI) lemma count_image_mset: shows "count (image_mset f X) y = (\x | x \# X \ y = f x. count X x)" proof(induct X) case empty show ?case by auto next case (add x X) define X' where "X' \ X + {#x#}" have "(\z | z \# X' \ y = f z. count (X + {#x#}) z) = (\z | z \# X' \ y = f z. count X z) + (\z | z \# X' \ y = f z. count {#x#} z)" unfolding plus_multiset.rep_eq sum.distrib.. also have split: "{z. z \# X' \ y = f z} = {z. z \# X' \ y = f z \ z \ x} \ {z. z \# X' \ y = f z \ z = x}" by blast then have "(\z | z \# X' \ y = f z. count {#x#} z) = (\z | z \# X' \ y = f z \ z = x. count {#x#} z)" unfolding split by (subst sum.union_disjoint, auto) also have "... = (if y = f x then 1 else 0)" using card_eq_Suc_0_ex1 by (auto simp: X'_def) also have "(\z | z \# X' \ y = f z. count X z) = (\z | z \# X \ y = f z. count X z)" proof(cases "x \# X") case True then have "z \# X' \ z \# X" for z by (auto simp: X'_def) then show ?thesis by auto next case False have split: "{z. z \# X' \ y = f z} = {z. z \# X \ y = f z} \ {z. z = x \ y = f z}" by (auto simp: X'_def) also have "sum (count X) ... = (\z | z \# X \ y = f z. count X z) + (\z | z = x \ y = f z. count X z)" by (subst sum.union_disjoint, auto simp: False) also with False have "\z. z = x \ y = f z \ count X z = 0" by (meson count_inI) with sum.neutral_const have "(\z | z = x \ y = f z. count X z) = 0" by auto finally show ?thesis by auto qed also have "... = count (image_mset f X) y" using add by auto finally show ?case by (simp add: X'_def) qed lemma is_mset_set_image: assumes "inj_on f (set_mset X)" and "is_mset_set X" shows "is_mset_set (image_mset f X)" proof (cases X) case empty then show ?thesis by auto next case (add x X) define X' where "X' \ add_mset x X" with assms add have inj:"inj_on f (set_mset X')" and X': "is_mset_set X'" by auto show ?thesis proof(unfold add, intro is_mset_setI, fold X'_def) fix y assume "y \# image_mset f X'" then have "y \ f ` set_mset X'" by auto with inj have "\!x'. x' \# X' \ y = f x'" by (meson imageE inj_onD) then obtain x' where x': "{x'. x' \# X' \ y = f x'} = {x'}" by auto then have "count (image_mset f X') y = count X' x'" unfolding count_image_mset by auto also from X' x' have "... = 1" by auto finally show "count (image_mset f X') y = 1". qed qed (* a variant for "right" *) lemma ex_mset_zip_right: assumes "length xs = length ys" "mset ys' = mset ys" shows "\xs'. length ys' = length xs' \ mset (zip xs' ys') = mset (zip xs ys)" using assms proof (induct xs ys arbitrary: ys' rule: list_induct2) case Nil thus ?case by auto next case (Cons x xs y ys ys') obtain j where j_len: "j < length ys'" and nth_j: "ys' ! j = y" by (metis Cons.prems in_set_conv_nth list.set_intros(1) mset_eq_setD) define ysa where "ysa = take j ys' @ drop (Suc j) ys'" have "mset ys' = {#y#} + mset ysa" unfolding ysa_def using j_len nth_j by (metis Cons_nth_drop_Suc union_mset_add_mset_right add_mset_remove_trivial add_diff_cancel_left' append_take_drop_id mset.simps(2) mset_append) hence ms_y: "mset ysa = mset ys" by (simp add: Cons.prems) then obtain xsa where len_a: "length ysa = length xsa" and ms_a: "mset (zip xsa ysa) = mset (zip xs ys)" using Cons.hyps(2) by blast define xs' where "xs' = take j xsa @ x # drop j xsa" have ys': "ys' = take j ysa @ y # drop j ysa" using ms_y j_len nth_j Cons.prems ysa_def by (metis append_eq_append_conv append_take_drop_id diff_Suc_Suc Cons_nth_drop_Suc length_Cons length_drop size_mset) have j_len': "j \ length ysa" using j_len ys' ysa_def by (metis add_Suc_right append_take_drop_id length_Cons length_append less_eq_Suc_le not_less) have "length ys' = length xs'" unfolding xs'_def using Cons.prems len_a ms_y by (metis add_Suc_right append_take_drop_id length_Cons length_append mset_eq_length) moreover have "mset (zip xs' ys') = mset (zip (x # xs) (y # ys))" unfolding ys' xs'_def apply (rule HOL.trans[OF mset_zip_take_Cons_drop_twice]) using j_len' by (auto simp: len_a ms_a) ultimately show ?case by blast qed lemma list_all2_reorder_right_invariance: assumes rel: "list_all2 R xs ys" and ms_y: "mset ys' = mset ys" shows "\xs'. list_all2 R xs' ys' \ mset xs' = mset xs" proof - have len: "length xs = length ys" using rel list_all2_conv_all_nth by auto obtain xs' where len': "length xs' = length ys'" and ms_xy: "mset (zip xs' ys') = mset (zip xs ys)" using len ms_y by (metis ex_mset_zip_right) have "list_all2 R xs' ys'" using assms(1) len' ms_xy unfolding list_all2_iff by (blast dest: mset_eq_setD) moreover have "mset xs' = mset xs" using len len' ms_xy map_fst_zip mset_map by metis ultimately show ?thesis by blast qed lemma rel_mset_via_perm: "rel_mset rel (mset xs) (mset ys) \ (\zs. perm xs zs \ list_all2 rel zs ys)" proof (unfold rel_mset_def, intro iffI, goal_cases) case 1 then obtain zs ws where zs: "mset zs = mset xs" and ws: "mset ws = mset ys" and zsws: "list_all2 rel zs ws" by auto note list_all2_reorder_right_invariance[OF zsws ws[symmetric], unfolded zs mset_eq_perm] then show ?case using perm_sym by auto next case 2 from this[folded mset_eq_perm] show ?case by force qed lemma rel_mset_free: assumes rel: "rel_mset rel X Y" and xs: "mset xs = X" shows "\ys. mset ys = Y \ list_all2 rel xs ys" proof- from rel[unfolded rel_mset_def] obtain xs' ys' where xs': "mset xs' = X" and ys': "mset ys' = Y" and xsys': "list_all2 rel xs' ys'" by auto from xs' xs have "mset xs = mset xs'" by auto from mset_eq_permutation[OF this] obtain f where perm: "f permutes {..i. i < length xs \ xs ! i = xs' ! f i" by auto note [simp] = list_all2_lengthD[OF xsys',symmetric] note [simp] = atLeast0LessThan[symmetric] note bij = permutes_bij[OF perm] define ys where "ys \ map (nth ys' \ f) [0..Y1 Y2. Y = Y1 + Y2 \ rel_mset rel X1 Y1 \ rel_mset rel X2 Y2" proof- obtain xs1 where xs1: "mset xs1 = X1" using ex_mset by auto obtain xs2 where xs2: "mset xs2 = X2" using ex_mset by auto from xs1 xs2 have "mset (xs1 @ xs2) = X1 + X2" by auto from rel_mset_free[OF rel this] obtain ys where ys: "mset ys = Y" "list_all2 rel (xs1 @ xs2) ys" by auto then obtain ys1 ys2 where ys12: "ys = ys1 @ ys2" and xs1ys1: "list_all2 rel xs1 ys1" and xs2ys2: "list_all2 rel xs2 ys2" using list_all2_append1 by blast from ys12 ys have "Y = mset ys1 + mset ys2" by auto moreover from xs1 xs1ys1 have "rel_mset rel X1 (mset ys1)" unfolding rel_mset_def by auto moreover from xs2 xs2ys2 have "rel_mset rel X2 (mset ys2)" unfolding rel_mset_def by auto ultimately show ?thesis by (subst exI[of _ "mset ys1"], subst exI[of _ "mset ys2"],auto) qed lemma rel_mset_OO: assumes AB: "rel_mset R A B" and BC: "rel_mset S B C" shows "rel_mset (R OO S) A C" proof- from AB obtain as bs where A_as: "A = mset as" and B_bs: "B = mset bs" and as_bs: "list_all2 R as bs" by (auto simp: rel_mset_def) from rel_mset_free[OF BC] B_bs obtain cs where C_cs: "C = mset cs" and bs_cs: "list_all2 S bs cs" by auto from list_all2_trans[OF _ as_bs bs_cs, of "R OO S"] A_as C_cs show ?thesis by (auto simp: rel_mset_def) qed end diff --git a/thys/Buchi_Complementation/Complementation_Final.thy b/thys/Buchi_Complementation/Complementation_Final.thy --- a/thys/Buchi_Complementation/Complementation_Final.thy +++ b/thys/Buchi_Complementation/Complementation_Final.thy @@ -1,183 +1,183 @@ section \Final Instantiation of Algorithms Related to Complementation\ theory Complementation_Final imports "Complementation_Implement" "Formula" "Transition_Systems_and_Automata.NBA_Translate" "Transition_Systems_and_Automata.NGBA_Algorithms" - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" begin subsection \Syntax\ (* TODO: this syntax has unnecessarily high inner binding strength, requiring extra parentheses the regular let syntax correctly uses inner binding strength 0: ("(2_ =/ _)" 10) *) no_syntax "_do_let" :: "[pttrn, 'a] \ do_bind" ("(2let _ =/ _)" [1000, 13] 13) syntax "_do_let" :: "[pttrn, 'a] \ do_bind" ("(2let _ =/ _)" 13) subsection \Hashcodes on Complement States\ definition "hci k \ uint32_of_nat k * 1103515245 + 12345" definition "hc \ \ (p, q, b). hci p + hci q * 31 + (if b then 1 else 0)" definition "list_hash xs \ fold ((XOR) \ hc) xs 0" lemma list_hash_eq: assumes "distinct xs" "distinct ys" "set xs = set ys" shows "list_hash xs = list_hash ys" proof - have "remdups xs <~~> remdups ys" using eq_set_perm_remdups assms(3) by this then have "xs <~~> ys" using assms(1, 2) by (simp add: distinct_remdups_id) then have "fold ((XOR) \ hc) xs a = fold ((XOR) \ hc) ys a" for a proof (induct arbitrary: a) case (swap y x l) have "x XOR y XOR a = y XOR x XOR a" for x y by (transfer) (simp add: word_bw_lcs(3)) then show ?case by simp qed simp+ then show ?thesis unfolding list_hash_def by this qed definition state_hash :: "nat \ Complementation_Implement.state \ nat" where "state_hash n p \ nat_of_hashcode (list_hash p) mod n" lemma state_hash_bounded_hashcode[autoref_ga_rules]: "is_bounded_hashcode state_rel (gen_equals (Gen_Map.gen_ball (foldli \ list_map_to_list)) (list_map_lookup (=)) (prod_eq (=) (\))) state_hash" proof show [param]: "(gen_equals (Gen_Map.gen_ball (foldli \ list_map_to_list)) (list_map_lookup (=)) (prod_eq (=) (\)), (=)) \ state_rel \ state_rel \ bool_rel" by autoref show "state_hash n xs = state_hash n ys" if "xs \ Domain state_rel" "ys \ Domain state_rel" "gen_equals (Gen_Map.gen_ball (foldli \ list_map_to_list)) (list_map_lookup (=)) (prod_eq (=) (=)) xs ys" for xs ys n proof - have 1: "distinct (map fst xs)" "distinct (map fst ys)" using that(1, 2) unfolding list_map_rel_def list_map_invar_def by (auto simp: in_br_conv) have 2: "distinct xs" "distinct ys" using 1 by (auto intro: distinct_mapI) have 3: "(xs, map_of xs) \ state_rel" "(ys, map_of ys) \ state_rel" using 1 unfolding list_map_rel_def list_map_invar_def by (auto simp: in_br_conv) have 4: "(gen_equals (Gen_Map.gen_ball (foldli \ list_map_to_list)) (list_map_lookup (=)) (prod_eq (=) (\)) xs ys, map_of xs = map_of ys) \ bool_rel" using 3 by parametricity have 5: "map_to_set (map_of xs) = map_to_set (map_of ys)" using that(3) 4 by simp have 6: "set xs = set ys" using map_to_set_map_of 1 5 by blast show "state_hash n xs = state_hash n ys" unfolding state_hash_def using list_hash_eq 2 6 by metis qed show "state_hash n x < n" if "1 < n" for n x using that unfolding state_hash_def by simp qed subsection \Complementation\ schematic_goal complement_impl: assumes [simp]: "finite (NBA.nodes A)" assumes [autoref_rules]: "(Ai, A) \ \Id, nat_rel\ nbai_nba_rel" shows "(?f :: ?'c, op_translate (complement_4 A)) \ ?R" by (autoref_monadic (plain)) concrete_definition complement_impl uses complement_impl theorem complement_impl_correct: assumes "finite (NBA.nodes A)" assumes "(Ai, A) \ \Id, nat_rel\ nbai_nba_rel" shows "NBA.language (nbae_nba (nbaei_nbae (complement_impl Ai))) = streams (nba.alphabet A) - NBA.language A" using op_translate_language[OF complement_impl.refine[OF assms]] using complement_4_correct[OF assms(1)] by simp subsection \Language Subset\ definition [simp]: "op_language_subset A B \ NBA.language A \ NBA.language B" lemmas [autoref_op_pat] = op_language_subset_def[symmetric] schematic_goal language_subset_impl: assumes [simp]: "finite (NBA.nodes B)" assumes [autoref_rules]: "(Ai, A) \ \Id, nat_rel\ nbai_nba_rel" assumes [autoref_rules]: "(Bi, B) \ \Id, nat_rel\ nbai_nba_rel" shows "(?f :: ?'c, do { let AB' = intersect' A (complement_4 B); ASSERT (finite (NGBA.nodes AB')); RETURN (NGBA.language AB' = {}) }) \ ?R" by (autoref_monadic (plain)) concrete_definition language_subset_impl uses language_subset_impl lemma language_subset_impl_refine[autoref_rules]: assumes "SIDE_PRECOND (finite (NBA.nodes A))" assumes "SIDE_PRECOND (finite (NBA.nodes B))" assumes "SIDE_PRECOND (nba.alphabet A \ nba.alphabet B)" assumes "(Ai, A) \ \Id, nat_rel\ nbai_nba_rel" assumes "(Bi, B) \ \Id, nat_rel\ nbai_nba_rel" shows "(language_subset_impl Ai Bi, (OP op_language_subset ::: \Id, nat_rel\ nbai_nba_rel \ \Id, nat_rel\ nbai_nba_rel \ bool_rel) $ A $ B) \ bool_rel" proof - have "(RETURN (language_subset_impl Ai Bi), do { let AB' = intersect' A (complement_4 B); ASSERT (finite (NGBA.nodes AB')); RETURN (NGBA.language AB' = {}) }) \ \bool_rel\ nres_rel" using language_subset_impl.refine assms(2, 4, 5) unfolding autoref_tag_defs by this also have "(do { let AB' = intersect' A (complement_4 B); ASSERT (finite (NGBA.nodes AB')); RETURN (NGBA.language AB' = {}) }, RETURN (NBA.language A \ NBA.language B)) \ \bool_rel\ nres_rel" proof refine_vcg show "finite (NGBA.nodes (intersect' A (complement_4 B)))" using assms(1, 2) by auto have 1: "NBA.language A \ streams (nba.alphabet B)" using nba.language_alphabet streams_mono2 assms(3) unfolding autoref_tag_defs by blast have 2: "NBA.language (complement_4 B) = streams (nba.alphabet B) - NBA.language B" using complement_4_correct assms(2) by auto show "(NGBA.language (intersect' A (complement_4 B)) = {}, NBA.language A \ NBA.language B) \ bool_rel" using 1 2 by auto qed finally show ?thesis using RETURN_nres_relD unfolding nres_rel_comp by force qed subsection \Language Equality\ definition [simp]: "op_language_equal A B \ NBA.language A = NBA.language B" lemmas [autoref_op_pat] = op_language_equal_def[symmetric] schematic_goal language_equal_impl: assumes [simp]: "finite (NBA.nodes A)" assumes [simp]: "finite (NBA.nodes B)" assumes [simp]: "nba.alphabet A = nba.alphabet B" assumes [autoref_rules]: "(Ai, A) \ \Id, nat_rel\ nbai_nba_rel" assumes [autoref_rules]: "(Bi, B) \ \Id, nat_rel\ nbai_nba_rel" shows "(?f :: ?'c, NBA.language A \ NBA.language B \ NBA.language B \ NBA.language A) \ ?R" by autoref concrete_definition language_equal_impl uses language_equal_impl lemma language_equal_impl_refine[autoref_rules]: assumes "SIDE_PRECOND (finite (NBA.nodes A))" assumes "SIDE_PRECOND (finite (NBA.nodes B))" assumes "SIDE_PRECOND (nba.alphabet A = nba.alphabet B)" assumes "(Ai, A) \ \Id, nat_rel\ nbai_nba_rel" assumes "(Bi, B) \ \Id, nat_rel\ nbai_nba_rel" shows "(language_equal_impl Ai Bi, (OP op_language_equal ::: \Id, nat_rel\ nbai_nba_rel \ \Id, nat_rel\ nbai_nba_rel \ bool_rel) $ A $ B) \ bool_rel" using language_equal_impl.refine[OF assms[unfolded autoref_tag_defs]] by auto schematic_goal product_impl: assumes [simp]: "finite (NBA.nodes B)" assumes [autoref_rules]: "(Ai, A) \ \Id, nat_rel\ nbai_nba_rel" assumes [autoref_rules]: "(Bi, B) \ \Id, nat_rel\ nbai_nba_rel" shows "(?f :: ?'c, do { let AB' = intersect A (complement_4 B); ASSERT (finite (NBA.nodes AB')); op_translate AB' }) \ ?R" by (autoref_monadic (plain)) concrete_definition product_impl uses product_impl (* TODO: possible optimizations: - introduce op_map_map operation for maps instead of manually iterating via FOREACH - consolidate various binds and maps in expand_map_get_7 *) export_code Set.empty Set.insert Set.member "Inf :: 'a set set \ 'a set" "Sup :: 'a set set \ 'a set" image Pow set nat_of_integer integer_of_nat Variable Negation Conjunction Disjunction satisfies map_formula nbaei alphabetei initialei transitionei acceptingei nbae_nba_impl complement_impl language_equal_impl product_impl in SML module_name Complementation file_prefix Complementation end \ No newline at end of file diff --git a/thys/CakeML/generated/LemExtraDefs.thy b/thys/CakeML/generated/LemExtraDefs.thy --- a/thys/CakeML/generated/LemExtraDefs.thy +++ b/thys/CakeML/generated/LemExtraDefs.thy @@ -1,1255 +1,1255 @@ (*========================================================================*) (* Lem *) (* *) (* Dominic Mulligan, University of Cambridge *) (* Francesco Zappa Nardelli, INRIA Paris-Rocquencourt *) (* Gabriel Kerneis, University of Cambridge *) (* Kathy Gray, University of Cambridge *) (* Peter Boehm, University of Cambridge (while working on Lem) *) (* Peter Sewell, University of Cambridge *) (* Scott Owens, University of Kent *) (* Thomas Tuerk, University of Cambridge *) (* Brian Campbell, University of Edinburgh *) (* Shaked Flur, University of Cambridge *) (* Thomas Bauereiss, University of Cambridge *) (* Stephen Kell, University of Cambridge *) (* Thomas Williams *) (* Lars Hupel *) (* Basile Clement *) (* *) (* The Lem sources are copyright 2010-2018 *) (* by the authors above and Institut National de Recherche en *) (* Informatique et en Automatique (INRIA). *) (* *) (* All files except ocaml-lib/pmap.{ml,mli} and ocaml-libpset.{ml,mli} *) (* are distributed under the license below. The former are distributed *) (* under the LGPLv2, as in the LICENSE file. *) (* *) (* *) (* Redistribution and use in source and binary forms, with or without *) (* modification, are permitted provided that the following conditions *) (* are met: *) (* 1. Redistributions of source code must retain the above copyright *) (* notice, this list of conditions and the following disclaimer. *) (* 2. Redistributions in binary form must reproduce the above copyright *) (* notice, this list of conditions and the following disclaimer in the *) (* documentation and/or other materials provided with the distribution. *) (* 3. The names of the authors may not be used to endorse or promote *) (* products derived from this software without specific prior written *) (* permission. *) (* *) (* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS *) (* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED *) (* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE *) (* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY *) (* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *) (* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE *) (* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *) (* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER *) (* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR *) (* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN *) (* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *) (*========================================================================*) chapter \Auxiliary Definitions needed by Lem\ theory "LemExtraDefs" imports Main - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" "HOL-Library.While_Combinator" begin subsection \General\ consts failwith :: " 'a \ 'b" subsection \Lists\ fun index :: " 'a list \ nat \ 'a option " where "index [] n = None" | "index (x # xs) 0 = Some x" | "index (x # xs) (Suc n) = index xs n" lemma index_eq_some [simp]: "index l n = Some x \ (n < length l \ (x = l ! n))" proof (induct l arbitrary: n x) case Nil thus ?case by simp next case (Cons e es n x) note ind_hyp = this show ?case proof (cases n) case 0 thus ?thesis by auto next case (Suc n') with ind_hyp show ?thesis by simp qed qed lemma index_eq_none [simp]: "index l n = None \ length l \ n" by (rule iffD1[OF Not_eq_iff]) auto lemma index_simps [simp]: "length l \ n \ index l n = None" "n < length l \ index l n = Some (l ! n)" by (simp_all) fun find_indices :: "('a \ bool) \ 'a list \ nat list" where "find_indices P [] = []" | "find_indices P (x # xs) = (if P x then 0 # (map Suc (find_indices P xs)) else (map Suc (find_indices P xs)))" lemma length_find_indices : "length (find_indices P l) \ length l" by (induct l) auto lemma sorted_map_suc : "sorted l \ sorted (map Suc l)" by (induct l) (simp_all) lemma sorted_find_indices : "sorted (find_indices P xs)" proof (induct xs) case Nil thus ?case by simp next case (Cons x xs) from sorted_map_suc[OF this] show ?case by (simp) qed lemma find_indices_set [simp] : "set (find_indices P l) = {i. i < length l \ P (l ! i)}" proof (intro set_eqI) fix i show "i \ set (find_indices P l) \ (i \ {i. i < length l \ P (l ! i)})" proof (induct l arbitrary: i) case Nil thus ?case by simp next case (Cons x l' i) note ind_hyp = this show ?case proof (cases i) case 0 thus ?thesis by auto next case (Suc i') with ind_hyp[of i'] show ?thesis by auto qed qed qed definition find_index where "find_index P xs = (case find_indices P xs of [] \ None | i # _ \ Some i)" lemma find_index_eq_some [simp] : "(find_index P xs = Some ii) \ (ii < length xs \ P (xs ! ii) \ (\i' < ii. \(P (xs ! i'))))" (is "?lhs = ?rhs") proof (cases "find_indices P xs") case Nil with find_indices_set[of P xs] show ?thesis unfolding find_index_def by auto next case (Cons i il) note find_indices_eq = this from sorted_find_indices[of P xs] find_indices_eq have "sorted (i # il)" by simp hence i_leq: "\i'. i' \ set (i # il) \ i \ i'" by auto from find_indices_set[of P xs, unfolded find_indices_eq] have set_i_il_eq:"\i'. i' \ set (i # il) = (i' < length xs \ P (xs ! i'))" by simp have lhs_eq: "find_index P xs = Some i" unfolding find_index_def find_indices_eq by simp show ?thesis proof (intro iffI) assume ?lhs with lhs_eq have ii_eq[simp]: "ii = i" by simp from set_i_il_eq[of i] i_leq[unfolded set_i_il_eq] show ?rhs by auto (metis leD less_trans) next assume ?rhs with set_i_il_eq[of ii] have "ii \ set (i # il) \ (ii \ i)" by (metis leI length_pos_if_in_set nth_Cons_0 nth_mem set_i_il_eq) with i_leq [of ii] have "i = ii" by simp thus ?lhs unfolding lhs_eq by simp qed qed lemma find_index_eq_none [simp] : "(find_index P xs = None) \ (\x \ set xs. \(P x))" (is "?lhs = ?rhs") proof (rule iffD1[OF Not_eq_iff], intro iffI) assume "\ ?lhs" then obtain i where "find_index P xs = Some i" by auto hence "i < length xs \ P (xs ! i)" by simp thus "\ ?rhs" by auto next let ?p = "(\i. i < length xs \ P(xs ! i))" assume "\ ?rhs" then obtain i where "?p i" by (metis in_set_conv_nth) from LeastI [of ?p, OF \?p i\] have "?p (Least ?p)" . hence "find_index P xs = Some (Least ?p)" by (subst find_index_eq_some) (metis (mono_tags) less_trans not_less_Least) thus "\ ?lhs" by blast qed definition genlist where "genlist f n = map f (upt 0 n)" lemma genlist_length [simp] : "length (genlist f n) = n" unfolding genlist_def by simp lemma genlist_simps [simp]: "genlist f 0 = []" "genlist f (Suc n) = genlist f n @ [f n]" unfolding genlist_def by auto definition split_at where "split_at n l = (take n l, drop n l)" fun delete_first :: "('a \ bool) \ 'a list \ ('a list) option " where "delete_first P [] = None" | "delete_first P (x # xs) = (if (P x) then Some xs else map_option (\xs'. x # xs') (delete_first P xs))" declare delete_first.simps [simp del] lemma delete_first_simps [simp] : "delete_first P [] = None" "P x \ delete_first P (x # xs) = Some xs" "\(P x) \ delete_first P (x # xs) = map_option (\xs'. x # xs') (delete_first P xs)" unfolding delete_first.simps by auto lemmas delete_first_unroll = delete_first.simps(2) lemma delete_first_eq_none [simp] : "delete_first P l = None \ (\x \ set l. \ (P x))" by (induct l) (auto simp add: delete_first_unroll) lemma delete_first_eq_some : "delete_first P l = (Some l') \ (\l1 x l2. P x \ (\x \ set l1. \(P x)) \ (l = l1 @ (x # l2)) \ (l' = l1 @ l2))" (is "?lhs l l' = (\l1 x l2. ?rhs_body l1 x l2 l l')") proof (induct l arbitrary: l') case Nil thus ?case by simp next case (Cons e l l') note ind_hyp = this show ?case proof (cases "P e") case True show ?thesis proof (rule iffI) assume "?lhs (e # l) l'" with \P e\ have "l = l'" by simp with \P e\ have "?rhs_body [] e l' (e # l) l'" by simp thus "\l1 x l2. ?rhs_body l1 x l2 (e # l) l'" by blast next assume "\l1 x l2. ?rhs_body l1 x l2 (e # l) l'" then obtain l1 x l2 where body_ok: "?rhs_body l1 x l2 (e # l) l'" by blast from body_ok \P e\ have l1_eq[simp]: "l = l'" by (cases l1) (simp_all) with \P e\ show "?lhs (e # l) l'" by simp qed next case False define rhs_pred where "rhs_pred \ \l1 x l2 l l'. ?rhs_body l1 x l2 l l'" have rhs_fold: "\l1 x l2 l l'. ?rhs_body l1 x l2 l l' = rhs_pred l1 x l2 l l'" unfolding rhs_pred_def by simp have "(\z l1 x l2. rhs_pred l1 x l2 l z \ e # z = l') = (\l1 x l2. rhs_pred l1 x l2 (e # l) l')" proof (intro iffI) assume "\z l1 x l2. rhs_pred l1 x l2 l z \ e # z = l'" then obtain z l1 x l2 where "rhs_pred l1 x l2 l z" and l'_eq: "l' = e # z" by auto with \\(P e)\ have "rhs_pred (e # l1) x l2 (e # l) l'" unfolding rhs_pred_def by simp thus "\l1 x l2. rhs_pred l1 x l2 (e # l) l'" by blast next assume "\l1 x l2. rhs_pred l1 x l2 (e # l) l'" then obtain l1 x l2 where "rhs_pred l1 x l2 (e # l) l'" by blast with \\ (P e)\ obtain l1' where l1_eq[simp]: "l1 = e # l1'" unfolding rhs_pred_def by (cases l1) (auto) with \rhs_pred l1 x l2 (e # l) l'\ have "rhs_pred l1' x l2 l (l1' @ l2) \ e # (l1' @ l2) = l'" unfolding rhs_pred_def by (simp) thus "\z l1 x l2. rhs_pred l1 x l2 l z \ e # z = l'" by blast qed with \\ P e\ show ?thesis unfolding rhs_fold by (simp add: ind_hyp[unfolded rhs_fold]) qed qed lemma perm_eval [code] : "perm [] l \ l = []" (is ?g1) "perm (x # xs) l \ (case delete_first (\e. e = x) l of None => False | Some l' => perm xs l')" (is ?g2) proof - show ?g1 by auto next show ?g2 proof (cases "delete_first (\e. e = x) l") case None note del_eq = this hence "x \ set l" by auto with perm_set_eq [of "x # xs" l] have "\ perm (x # xs) l" by auto thus ?thesis unfolding del_eq by simp next case (Some l') note del_eq = this from del_eq[unfolded delete_first_eq_some] obtain l1 l2 where l_eq: "l = l1 @ [x] @ l2" and l'_eq: "l' = l1 @ l2" by auto have "(x # xs <~~> l1 @ x # l2) = (xs <~~> l1 @ l2)" proof - from perm_append_swap [of l1 "[x]"] perm_append2 [of "l1 @ [x]" "x # l1" l2] have "l1 @ x # l2 <~~> x # (l1 @ l2)" by simp hence "x # xs <~~> l1 @ x # l2 \ x # xs <~~> x # (l1 @ l2)" by (metis perm.trans perm_sym) thus ?thesis by simp qed with del_eq l_eq l'_eq show ?thesis by simp qed qed fun sorted_by :: "('a \ 'a \ bool)\ 'a list \ bool " where "sorted_by cmp [] = True" | "sorted_by cmp [_] = True" | "sorted_by cmp (x1 # x2 # xs) = ((cmp x1 x2) \ sorted_by cmp (x2 # xs))" lemma sorted_by_lesseq [simp] : "sorted_by ((\) :: ('a::{linorder}) => 'a => bool) = sorted" proof (rule ext) fix l :: "'a list" show "sorted_by (\) l = sorted l" proof (induct l) case Nil thus ?case by simp next case (Cons x xs) thus ?case by (cases xs) (simp_all del: sorted.simps(2) add: sorted2_simps) qed qed lemma sorted_by_cons_imp : "sorted_by cmp (x # xs) \ sorted_by cmp xs" by (cases xs) simp_all lemma sorted_by_cons_trans : assumes trans_cmp: "transp cmp" shows "sorted_by cmp (x # xs) = ((\x' \ set xs . cmp x x') \ sorted_by cmp xs)" proof (induct xs arbitrary: x) case Nil thus ?case by simp next case (Cons x2 xs x1) note ind_hyp = this from trans_cmp show ?case by (auto simp add: ind_hyp transp_def) qed fun insert_sort_insert_by :: "('a \ 'a \ bool)\ 'a \ 'a list \ 'a list " where "insert_sort_insert_by cmp e ([]) = ( [e])" | "insert_sort_insert_by cmp e (x # xs) = ( if cmp e x then (e # (x # xs)) else x # (insert_sort_insert_by cmp e xs))" lemma insert_sort_insert_by_length [simp] : "length (insert_sort_insert_by cmp e l) = Suc (length l)" by (induct l) auto lemma insert_sort_insert_by_set [simp] : "set (insert_sort_insert_by cmp e l) = insert e (set l)" by (induct l) auto lemma insert_sort_insert_by_perm : "(insert_sort_insert_by cmp e l) <~~> (e # l)" proof (induct l) case Nil thus ?case by simp next case (Cons e2 l') note ind_hyp = this have "e2 # e # l' <~~> e # e2 # l'" by (rule perm.swap) hence "e2 # insert_sort_insert_by cmp e l' <~~> e # e2 # l'" using ind_hyp by (metis cons_perm_eq perm.trans) thus ?case by simp qed lemma insert_sort_insert_by_sorted_by : assumes cmp_cases: "\y. y \ set l \ \ (cmp e y) \ cmp y e" assumes cmp_trans: "transp cmp" shows "sorted_by cmp l \ sorted_by cmp (insert_sort_insert_by cmp e l)" using cmp_cases proof (induct l) case Nil thus ?case by simp next case (Cons x1 l') note ind_hyp = Cons(1) note sorted_x1_l' = Cons(2) note cmp_cases = Cons(3) show ?case proof (cases l') case Nil with cmp_cases show ?thesis by simp next case (Cons x2 l'') note l'_eq = this from l'_eq sorted_x1_l' have "cmp x1 x2" "sorted_by cmp l'" by simp_all show ?thesis proof (cases "cmp e x1") case True with \cmp x1 x2\ \sorted_by cmp l'\ have "sorted_by cmp (x1 # l')" unfolding l'_eq by (simp) with \cmp e x1\ show ?thesis by simp next case False with cmp_cases have "cmp x1 e" by simp have "\x'. x' \ set l' \ cmp x1 x'" proof - fix x' assume "x' \ set l'" hence "x' = x2 \ cmp x2 x'" using \sorted_by cmp l'\ l'_eq sorted_by_cons_trans [OF cmp_trans, of x2 l''] by auto with transpD[OF cmp_trans, of x1 x2 x'] \cmp x1 x2\ show "cmp x1 x'" by blast qed hence "sorted_by cmp (x1 # insert_sort_insert_by cmp e l')" using ind_hyp [OF \sorted_by cmp l'\] \cmp x1 e\ cmp_cases unfolding sorted_by_cons_trans[OF cmp_trans] by simp with \\(cmp e x1)\ show ?thesis by simp qed qed qed fun insert_sort_by :: "('a \ 'a \ bool) \ 'a list \ 'a list " where "insert_sort_by cmp [] = []" | "insert_sort_by cmp (x # xs) = insert_sort_insert_by cmp x (insert_sort_by cmp xs)" lemma insert_sort_by_perm : "(insert_sort_by cmp l) <~~> l" proof (induct l) case Nil thus ?case by simp next case (Cons x l) thus ?case by simp (metis cons_perm_eq insert_sort_insert_by_perm perm.trans) qed lemma insert_sort_by_length [simp]: "length (insert_sort_by cmp l) = length l" by (induct l) auto lemma insert_sort_by_set [simp]: "set (insert_sort_by cmp l) = set l" by (induct l) auto definition sort_by where "sort_by = insert_sort_by" lemma sort_by_simps [simp]: "length (sort_by cmp l) = length l" "set (sort_by cmp l) = set l" unfolding sort_by_def by simp_all lemma sort_by_perm : "sort_by cmp l <~~> l" unfolding sort_by_def by (simp add: insert_sort_by_perm) subsection \Maps\ definition map_image :: "('v \ 'w) \ ('k, 'v) map \ ('k, 'w) map" where "map_image f m = (\k. map_option f (m k))" definition map_domain_image :: "('k \ 'v \ 'w) \ ('k, 'v) map \ ('k, 'w) map" where "map_domain_image f m = (\k. map_option (f k) (m k))" lemma map_image_simps [simp]: "(map_image f m) k = None \ m k = None" "(map_image f m) k = Some x \ (\x'. (m k = Some x') \ (x = f x'))" "(map_image f Map.empty) = Map.empty" "(map_image f (m (k \ v)) = (map_image f m) (k \ f v))" unfolding map_image_def by auto lemma map_image_dom_ran [simp]: "dom (map_image f m) = dom m" "ran (map_image f m) = f ` (ran m)" unfolding dom_def ran_def by auto definition map_to_set :: "('k, 'v) map \ ('k * 'v) set" where "map_to_set m = { (k, v) . m k = Some v }" lemma map_to_set_simps [simp] : "map_to_set Map.empty = {}" (is ?g1) "map_to_set (m ((k::'k) \ (v::'v))) = (insert (k, v) (map_to_set (m |` (- {k}))))" (is ?g2) proof - show ?g1 unfolding map_to_set_def by simp next show ?g2 proof (rule set_eqI) fix kv :: "('k * 'v)" obtain k' v' where kv_eq[simp]: "kv = (k', v')" by (rule prod.exhaust) show "(kv \ map_to_set (m(k \ v))) = (kv \ insert (k, v) (map_to_set (m |` (- {k}))))" by (auto simp add: map_to_set_def) qed qed subsection \Sets\ definition "set_choose s \ (SOME x. (x \ s))" definition without_trans_edges :: "('a \ 'a) set \ ('a \ 'a) set" where "without_trans_edges S \ let ts = trancl S in { (x, y) \ S. \z \ snd ` S. x \ z \ y \ z \ \ ((x, z) \ ts \ (z, y) \ ts)}" definition unbounded_lfp :: "'a set \ ('a set \ 'a set) \ 'a set" where "unbounded_lfp S f \ while (\x. x \ S) f S" definition unbounded_gfp :: "'a set \ ('a set \ 'a set) \ 'a set" where "unbounded_gfp S f \ while (\x. S \ x) f S" lemma set_choose_thm[simp]: "s \ {} \ (set_choose s) \ s" unfolding set_choose_def by (rule someI_ex) auto lemma set_choose_sing [simp]: "set_choose {x} = x" unfolding set_choose_def by auto lemma set_choose_code [code]: "set_choose (set [x]) = x" by auto lemma set_choose_in [intro] : assumes "s \ {}" shows "set_choose s \ s" proof - from \s \ {}\ obtain x where "x \ s" by auto thus ?thesis unfolding set_choose_def by (rule someI) qed definition set_case where "set_case s c_empty c_sing c_else = (if (s = {}) then c_empty else (if (card s = 1) then c_sing (set_choose s) else c_else))" lemma set_case_simps [simp] : "set_case {} c_empty c_sing c_else = c_empty" "set_case {x} c_empty c_sing c_else = c_sing x" "card s > 1 \ set_case s c_empty c_sing c_else = c_else" "\(finite s) \ set_case s c_empty c_sing c_else = c_else" unfolding set_case_def by auto lemma set_case_simp_insert2 [simp] : assumes x12_neq: "x1 \ x2" shows "set_case (insert x1 (insert x2 xs)) c_empty c_sing c_else = c_else" proof (cases "finite xs") case False thus ?thesis by (simp) next case True note fin_xs = this have "card {x1,x2} \ card (insert x1 (insert x2 xs))" by (rule card_mono) (auto simp add: fin_xs) with x12_neq have "1 < card (insert x1 (insert x2 xs))" by simp thus ?thesis by auto qed lemma set_case_code [code] : "set_case (set []) c_empty c_sing c_else = c_empty" "set_case (set [x]) c_empty c_sing c_else = c_sing x" "set_case (set (x1 # x2 # xs)) c_empty c_sing c_else = (if (x1 = x2) then set_case (set (x2 # xs)) c_empty c_sing c_else else c_else)" by auto definition set_lfp:: "'a set \ ('a set \ 'a set) \ 'a set" where "set_lfp s f = lfp (\s'. f s' \ s)" lemma set_lfp_tail_rec_def : assumes mono_f: "mono f" shows "set_lfp s f = (if (f s) \ s then s else (set_lfp (s \ f s) f))" (is "?ls = ?rs") proof (cases "f s \ s") case True note fs_sub_s = this from fs_sub_s have "\{u. f u \ s \ u} = s" by auto hence "?ls = s" unfolding set_lfp_def lfp_def . with fs_sub_s show "?ls = ?rs" by simp next case False note not_fs_sub_s = this from mono_f have mono_f': "mono (\s'. f s' \ s)" unfolding mono_def by auto have "\{u. f u \ s \ u} = \{u. f u \ (s \ f s) \ u}" (is "\?S1 = \?S2") proof have "?S2 \ ?S1" by auto thus "\?S1 \ \?S2" by (rule Inf_superset_mono) next { fix e assume "e \ \?S2" hence S2_prop: "\s'. f s' \ s' \ s \ s' \ f s \ s' \ e \ s'" by simp { fix s' assume "f s' \ s'" "s \ s'" from mono_f \s \ s'\ have "f s \ f s'" unfolding mono_def by simp with \f s' \ s'\ have "f s \ s'" by simp with \f s' \ s'\ \s \ s'\ S2_prop have "e \ s'" by simp } hence "e \ \?S1" by simp } thus "\?S2 \ \?S1" by auto qed hence "?ls = (set_lfp (s \ f s) f)" unfolding set_lfp_def lfp_def . with not_fs_sub_s show "?ls = ?rs" by simp qed lemma set_lfp_simps [simp] : "mono f \ f s \ s \ set_lfp s f = s" "mono f \ \(f s \ s) \ set_lfp s f = (set_lfp (s \ f s) f)" by (metis set_lfp_tail_rec_def)+ fun insert_in_list_at_arbitrary_pos where "insert_in_list_at_arbitrary_pos x [] = {[x]}" | "insert_in_list_at_arbitrary_pos x (y # ys) = insert (x # y # ys) ((\l. y # l) ` (insert_in_list_at_arbitrary_pos x ys))" lemma insert_in_list_at_arbitrary_pos_thm : "xl \ insert_in_list_at_arbitrary_pos x l \ (\l1 l2. l = l1 @ l2 \ xl = l1 @ [x] @ l2)" proof (induct l arbitrary: xl) case Nil thus ?case by simp next case (Cons y l xyl) note ind_hyp = this show ?case proof (rule iffI) assume xyl_in: "xyl \ insert_in_list_at_arbitrary_pos x (y # l)" show "\l1 l2. y # l = l1 @ l2 \ xyl = l1 @ [x] @ l2" proof (cases "xyl = x # y # l") case True hence "y # l = [] @ (y # l) \ xyl = [] @ [x] @ (y # l)" by simp thus ?thesis by blast next case False with xyl_in have "xyl \ (#) y ` insert_in_list_at_arbitrary_pos x l" by simp with ind_hyp obtain l1 l2 where "l = l1 @ l2 \ xyl = y # l1 @ x # l2" by (auto simp add: image_def Bex_def) hence "y # l = (y # l1) @ l2 \ xyl = (y # l1) @ [x] @ l2" by simp thus ?thesis by blast qed next assume "\l1 l2. y # l = l1 @ l2 \ xyl = l1 @ [x] @ l2" then obtain l1 l2 where yl_eq: "y # l = l1 @ l2" and xyl_eq: "xyl = l1 @ [x] @ l2" by blast show "xyl \ insert_in_list_at_arbitrary_pos x (y # l)" proof (cases l1) case Nil with yl_eq xyl_eq have "xyl = x # y # l" by simp thus ?thesis by simp next case (Cons y' l1') with yl_eq have l1_eq: "l1 = y # l1'" and l_eq: "l = l1' @ l2" by simp_all have "\l1'' l2''. l = l1'' @ l2'' \ l1' @ [x] @ l2 = l1'' @ [x] @ l2''" apply (rule_tac exI[where x = l1']) apply (rule_tac exI [where x = l2]) apply (simp add: l_eq) done hence "(l1' @ [x] @ l2) \ insert_in_list_at_arbitrary_pos x l" unfolding ind_hyp by blast hence "\l'. l' \ insert_in_list_at_arbitrary_pos x l \ l1 @ x # l2 = y # l'" by (rule_tac exI [where x = "l1' @ [x] @ l2"]) (simp add: l1_eq) thus ?thesis by (simp add: image_def Bex_def xyl_eq) qed qed qed definition list_of_set_set :: "'a set \ ('a list) set" where "list_of_set_set s = { l . (set l = s) \ distinct l }" lemma list_of_set_set_empty [simp]: "list_of_set_set {} = {[]}" unfolding list_of_set_set_def by auto lemma list_of_set_set_insert [simp] : "list_of_set_set (insert x s) = \ ((insert_in_list_at_arbitrary_pos x) ` (list_of_set_set (s - {x})))" (is "?lhs = ?rhs") proof (intro set_eqI) fix l have "(set l = insert x s \ distinct l) \ (\l1 l2. set (l1 @ l2) = s - {x} \ distinct (l1 @ l2) \ l = l1 @ x # l2)" proof (intro iffI) assume "set l = insert x s \ distinct l" hence set_l_eq: "set l = insert x s" and "distinct l" by simp_all from \set l = insert x s\ have "x \ set l" by simp then obtain l1 l2 where l_eq: "l = l1 @ x # l2" unfolding in_set_conv_decomp by blast from \distinct l\ l_eq have "distinct (l1 @ l2)" and x_nin: "x \ set (l1 @ l2)" by auto from x_nin set_l_eq[unfolded l_eq] have set_l12_eq: "set (l1 @ l2) = s - {x}" by auto from \distinct (l1 @ l2)\ l_eq set_l12_eq show "\l1 l2. set (l1 @ l2) = s - {x} \ distinct (l1 @ l2) \ l = l1 @ x # l2" by blast next assume "\l1 l2. set (l1 @ l2) = s - {x} \ distinct (l1 @ l2) \ l = l1 @ x # l2" then obtain l1 l2 where "set (l1 @ l2) = s - {x}" "distinct (l1 @ l2)" "l = l1 @ x # l2" by blast thus "set l = insert x s \ distinct l" by auto qed thus "l \ list_of_set_set (insert x s) \ l \ (\ ((insert_in_list_at_arbitrary_pos x) ` (list_of_set_set (s - {x}))))" unfolding list_of_set_set_def by (simp add: insert_in_list_at_arbitrary_pos_thm ex_simps[symmetric] del: ex_simps) qed lemma list_of_set_set_code [code]: "list_of_set_set (set []) = {[]}" "list_of_set_set (set (x # xs)) = \ ((insert_in_list_at_arbitrary_pos x) ` (list_of_set_set ((set xs) - {x})))" by simp_all lemma list_of_set_set_is_empty : "list_of_set_set s = {} \ \ (finite s)" proof - have "finite s \ (\l. set l = s \ distinct l)" proof (rule iffI) assume "\l. set l = s \ distinct l" then obtain l where "s = set l" by blast thus "finite s" by simp next assume "finite s" thus "\l. set l = s \ distinct l" proof (induct s) case empty show ?case by auto next case (insert e s) note e_nin_s = insert(2) from insert(3) obtain l where set_l: "set l = s" and dist_l: "distinct l" by blast from set_l have set_el: "set (e # l) = insert e s" by auto from dist_l set_l e_nin_s have dist_el: "distinct (e # l)" by simp from set_el dist_el show ?case by blast qed qed thus ?thesis unfolding list_of_set_set_def by simp qed definition list_of_set :: "'a set \ 'a list" where "list_of_set s = set_choose (list_of_set_set s)" lemma list_of_set [simp] : assumes fin_s: "finite s" shows "set (list_of_set s) = s" "distinct (list_of_set s)" proof - from fin_s list_of_set_set_is_empty[of s] have "\ (list_of_set_set s = {})" by simp hence "list_of_set s \ list_of_set_set s" unfolding list_of_set_def by (rule set_choose_thm) thus "set (list_of_set s) = s" "distinct (list_of_set s)" unfolding list_of_set_set_def by simp_all qed lemma list_of_set_in: "finite s \ list_of_set s \ list_of_set_set s" unfolding list_of_set_def by (metis list_of_set_set_is_empty set_choose_thm) definition ordered_list_of_set where "ordered_list_of_set cmp s = set_choose (sort_by cmp ` list_of_set_set s)" subsection \sum\ find_consts "'a list => ('a list * _)" fun sum_partition :: "('a + 'b) list \ 'a list * 'b list" where "sum_partition [] = ([], [])" | "sum_partition ((Inl l) # lrs) = (let (ll, rl) = sum_partition lrs in (l # ll, rl))" | "sum_partition ((Inr r) # lrs) = (let (ll, rl) = sum_partition lrs in (ll, r # rl))" lemma sum_partition_length : "List.length lrs = List.length (fst (sum_partition lrs)) + List.length (snd (sum_partition lrs))" proof (induct lrs) case Nil thus ?case by simp next case (Cons lr lrs) thus ?case by (cases lr) (auto split: prod.split) qed subsection \sorting\ subsection \Strings\ declare String.literal.explode_inverse [simp] subsection \num to string conversions\ definition nat_list_to_string :: "nat list \ string" where "nat_list_to_string nl = map char_of nl" definition is_digit where "is_digit (n::nat) = (n < 10)" lemma is_digit_simps[simp] : "n < 10 \ is_digit n" "\(n < 10) \ \(is_digit n)" unfolding is_digit_def by simp_all lemma is_digit_expand : "is_digit n \ (n = 0) \ (n = 1) \ (n = 2) \ (n = 3) \ (n = 4) \ (n = 5) \ (n = 6) \ (n = 7) \ (n = 8) \ (n = 9)" unfolding is_digit_def by auto lemmas is_digitE = is_digit_expand[THEN iffD1,elim_format] lemmas is_digitI = is_digit_expand[THEN iffD2,rule_format] definition is_digit_char where "is_digit_char c \ (c = CHR ''0'') \ (c = CHR ''5'') \ (c = CHR ''1'') \ (c = CHR ''6'') \ (c = CHR ''2'') \ (c = CHR ''7'') \ (c = CHR ''3'') \ (c = CHR ''8'') \ (c = CHR ''4'') \ (c = CHR ''9'')" lemma is_digit_char_simps[simp] : "is_digit_char (CHR ''0'')" "is_digit_char (CHR ''1'')" "is_digit_char (CHR ''2'')" "is_digit_char (CHR ''3'')" "is_digit_char (CHR ''4'')" "is_digit_char (CHR ''5'')" "is_digit_char (CHR ''6'')" "is_digit_char (CHR ''7'')" "is_digit_char (CHR ''8'')" "is_digit_char (CHR ''9'')" unfolding is_digit_char_def by simp_all lemmas is_digit_charE = is_digit_char_def[THEN iffD1,elim_format] lemmas is_digit_charI = is_digit_char_def[THEN iffD2,rule_format] definition digit_to_char :: "nat \ char" where "digit_to_char n = ( if n = 0 then CHR ''0'' else if n = 1 then CHR ''1'' else if n = 2 then CHR ''2'' else if n = 3 then CHR ''3'' else if n = 4 then CHR ''4'' else if n = 5 then CHR ''5'' else if n = 6 then CHR ''6'' else if n = 7 then CHR ''7'' else if n = 8 then CHR ''8'' else if n = 9 then CHR ''9'' else CHR ''X'')" lemma digit_to_char_simps [simp]: "digit_to_char 0 = CHR ''0''" "digit_to_char (Suc 0) = CHR ''1''" "digit_to_char 2 = CHR ''2''" "digit_to_char 3 = CHR ''3''" "digit_to_char 4 = CHR ''4''" "digit_to_char 5 = CHR ''5''" "digit_to_char 6 = CHR ''6''" "digit_to_char 7 = CHR ''7''" "digit_to_char 8 = CHR ''8''" "digit_to_char 9 = CHR ''9''" "n > 9 \ digit_to_char n = CHR ''X''" unfolding digit_to_char_def by simp_all definition char_to_digit :: "char \ nat" where "char_to_digit c = ( if c = CHR ''0'' then 0 else if c = CHR ''1'' then 1 else if c = CHR ''2'' then 2 else if c = CHR ''3'' then 3 else if c = CHR ''4'' then 4 else if c = CHR ''5'' then 5 else if c = CHR ''6'' then 6 else if c = CHR ''7'' then 7 else if c = CHR ''8'' then 8 else if c = CHR ''9'' then 9 else 10)" lemma char_to_digit_simps [simp]: "char_to_digit (CHR ''0'') = 0" "char_to_digit (CHR ''1'') = 1" "char_to_digit (CHR ''2'') = 2" "char_to_digit (CHR ''3'') = 3" "char_to_digit (CHR ''4'') = 4" "char_to_digit (CHR ''5'') = 5" "char_to_digit (CHR ''6'') = 6" "char_to_digit (CHR ''7'') = 7" "char_to_digit (CHR ''8'') = 8" "char_to_digit (CHR ''9'') = 9" unfolding char_to_digit_def by simp_all lemma diget_to_char_inv[simp]: assumes is_digit: "is_digit n" shows "char_to_digit (digit_to_char n) = n" using is_digit unfolding is_digit_expand by auto lemma char_to_diget_inv[simp]: assumes is_digit: "is_digit_char c" shows "digit_to_char (char_to_digit c) = c" using is_digit unfolding char_to_digit_def is_digit_char_def by auto lemma char_to_digit_div_mod [simp]: assumes is_digit: "is_digit_char c" shows "char_to_digit c < 10" using is_digit unfolding char_to_digit_def is_digit_char_def by auto lemma is_digit_char_intro[simp]: "is_digit (char_to_digit c) = is_digit_char c" unfolding char_to_digit_def is_digit_char_def is_digit_expand by auto lemma is_digit_intro[simp]: "is_digit_char (digit_to_char n) = is_digit n" unfolding digit_to_char_def is_digit_char_def is_digit_expand by auto lemma digit_to_char_11: "digit_to_char n1 = digit_to_char n2 \ (is_digit n1 = is_digit n2) \ (is_digit n1 \ (n1 = n2))" by (metis diget_to_char_inv is_digit_intro) lemma char_to_digit_11: "char_to_digit c1 = char_to_digit c2 \ (is_digit_char c1 = is_digit_char c2) \ (is_digit_char c1 \ (c1 = c2))" by (metis char_to_diget_inv is_digit_char_intro) function nat_to_string :: "nat \ string" where "nat_to_string n = (if (is_digit n) then [digit_to_char n] else nat_to_string (n div 10) @ [digit_to_char (n mod 10)])" by auto termination by (relation "measure id") (auto simp add: is_digit_def) definition int_to_string :: "int \ string" where "int_to_string i \ if i < 0 then ''-'' @ nat_to_string (nat (abs i)) else nat_to_string (nat i)" lemma nat_to_string_simps[simp]: "is_digit n \ nat_to_string n = [digit_to_char n]" "\(is_digit n) \ nat_to_string n = nat_to_string (n div 10) @ [digit_to_char (n mod 10)]" by simp_all declare nat_to_string.simps[simp del] lemma nat_to_string_neq_nil[simp]: "nat_to_string n \ []" by (cases "is_digit n") simp_all lemmas nat_to_string_neq_nil2[simp] = nat_to_string_neq_nil[symmetric] lemma nat_to_string_char_to_digit [simp]: "is_digit_char c \ nat_to_string (char_to_digit c) = [c]" by auto lemma nat_to_string_11[simp] : "(nat_to_string n1 = nat_to_string n2) \ n1 = n2" proof (rule iffI) assume "n1 = n2" thus "nat_to_string n1 = nat_to_string n2" by simp next assume "nat_to_string n1 = nat_to_string n2" thus "n1 = n2" proof (induct n2 arbitrary: n1 rule: less_induct) case (less n2') note ind_hyp = this(1) note n2s_eq = less(2) have is_dig_eq: "is_digit n2' = is_digit n1" using n2s_eq apply (cases "is_digit n2'") apply (case_tac [!] "is_digit n1") apply (simp_all) done show ?case proof (cases "is_digit n2'") case True with n2s_eq is_dig_eq show ?thesis by simp (metis digit_to_char_11) next case False with is_dig_eq have not_digs : "\ (is_digit n1)" "\ (is_digit n2')" by simp_all from not_digs(2) have "n2' div 10 < n2'" unfolding is_digit_def by auto note ind_hyp' = ind_hyp [OF this, of "n1 div 10"] from not_digs n2s_eq ind_hyp' digit_to_char_11[of "n1 mod 10" "n2' mod 10"] have "(n1 mod 10) = (n2' mod 10)" "n1 div 10 = n2' div 10" by simp_all thus "n1 = n2'" by (metis div_mult_mod_eq) qed qed qed definition "is_nat_string s \ (\c\set s. is_digit_char c)" definition "is_strong_nat_string s \ is_nat_string s \ (s \ []) \ (hd s = CHR ''0'' \ length s = 1)" lemma is_nat_string_simps[simp]: "is_nat_string []" "is_nat_string (c # s) \ is_digit_char c \ is_nat_string s" unfolding is_nat_string_def by simp_all lemma is_strong_nat_string_simps[simp]: "\(is_strong_nat_string [])" "is_strong_nat_string (c # s) \ is_digit_char c \ is_nat_string s \ (c = CHR ''0'' \ s = [])" unfolding is_strong_nat_string_def by simp_all fun string_to_nat_aux :: "nat \ string \ nat" where "string_to_nat_aux n [] = n" | "string_to_nat_aux n (d#ds) = string_to_nat_aux (n*10 + char_to_digit d) ds" definition string_to_nat :: "string \ nat option" where "string_to_nat s \ (if is_nat_string s then Some (string_to_nat_aux 0 s) else None)" definition string_to_nat' :: "string \ nat" where "string_to_nat' s \ the (string_to_nat s)" lemma string_to_nat_aux_inv : assumes "is_nat_string s" assumes "n > 0 \ is_strong_nat_string s" shows "nat_to_string (string_to_nat_aux n s) = (if n = 0 then '''' else nat_to_string n) @ s" using assms proof (induct s arbitrary: n) case Nil thus ?case by (simp add: is_strong_nat_string_def) next case (Cons c s n) from Cons(2) have "is_digit_char c" "is_nat_string s" by simp_all note cs_ok = Cons(3) let ?m = "n*10 + char_to_digit c" note ind_hyp = Cons(1)[OF \is_nat_string s\, of ?m] from \is_digit_char c\ have m_div: "?m div 10 = n" and m_mod: "?m mod 10 = char_to_digit c" unfolding is_digit_char_def by auto show ?case proof (cases "?m = 0") case True with \is_digit_char c\ have "n = 0" "c = CHR ''0''" unfolding is_digit_char_def by auto moreover with cs_ok have "s = []" by simp ultimately show ?thesis by simp next case False note m_neq_0 = this with ind_hyp have ind_hyp': "nat_to_string (string_to_nat_aux ?m s) = (nat_to_string ?m) @ s" by auto hence "nat_to_string (string_to_nat_aux n (c # s)) = (nat_to_string ?m) @ s" by simp with \is_digit_char c\ m_div show ?thesis by simp qed qed lemma string_to_nat_inv : assumes strong_nat_s: "is_strong_nat_string s" assumes s2n_s: "string_to_nat s = Some n" shows "nat_to_string n = s" proof - from strong_nat_s have nat_s: "is_nat_string s" unfolding is_strong_nat_string_def by simp with s2n_s have n_eq: "n = string_to_nat_aux 0 s" unfolding string_to_nat_def by simp from string_to_nat_aux_inv[of s 0, folded n_eq] nat_s strong_nat_s show ?thesis by simp qed lemma nat_to_string_induct [case_names "digit" "non_digit"]: assumes digit: "\d. is_digit d \ P d" assumes not_digit: "\n. \(is_digit n) \ P (n div 10) \ P (n mod 10) \ P n" shows "P n" proof (induct n rule: less_induct) case (less n) note ind_hyp = this show ?case proof (cases "is_digit n") case True with digit show ?thesis by simp next case False note not_dig = this hence "n div 10 < n" "n mod 10 < n" unfolding is_digit_def by auto with not_dig ind_hyp not_digit show ?thesis by simp qed qed lemma nat_to_string___is_nat_string [simp]: "is_nat_string (nat_to_string n)" unfolding is_nat_string_def proof (induct n rule: nat_to_string_induct) case (digit d) thus ?case by simp next case (non_digit n) thus ?case by simp qed lemma nat_to_string___eq_0 [simp]: "(nat_to_string n = (CHR ''0'')#s) \ (n = 0 \ s = [])" unfolding is_nat_string_def proof (induct n arbitrary: s rule: nat_to_string_induct) case (digit d) thus ?case unfolding is_digit_expand by auto next case (non_digit n) obtain c s' where ns_eq: "nat_to_string (n div 10) = c # s'" by (cases "nat_to_string (n div 10)") auto from non_digit(1) have "n div 10 \ 0" unfolding is_digit_def by auto with non_digit(2) ns_eq have c_neq: "c \ CHR ''0''" by auto from \\ (is_digit n)\ c_neq ns_eq show ?case by auto qed lemma nat_to_string___is_strong_nat_string: "is_strong_nat_string (nat_to_string n)" proof (cases "is_digit n") case True thus ?thesis by simp next case False note not_digit = this obtain c s' where ns_eq: "nat_to_string n = c # s'" by (cases "nat_to_string n") auto from not_digit have "0 < n" unfolding is_digit_def by simp with ns_eq have c_neq: "c \ CHR ''0''" by auto hence "hd (nat_to_string n) \ CHR ''0''" unfolding ns_eq by simp thus ?thesis unfolding is_strong_nat_string_def by simp qed lemma nat_to_string_inv : "string_to_nat (nat_to_string n) = Some n" by (metis nat_to_string_11 nat_to_string___is_nat_string nat_to_string___is_strong_nat_string string_to_nat_def string_to_nat_inv) definition The_opt where "The_opt p = (if (\!x. p x) then Some (The p) else None)" lemma The_opt_eq_some [simp] : "((The_opt p) = (Some x)) \ ((p x) \ ((\ y. p y \ (x = y))))" (is "?lhs = ?rhs") proof (cases "\!x. p x") case True note exists_unique = this then obtain x where p_x: "p x" by auto from the1_equality[of p x] exists_unique p_x have the_opt_eq: "The_opt p = Some x" unfolding The_opt_def by simp from exists_unique the_opt_eq p_x show ?thesis by auto next case False note not_unique = this hence "The_opt p = None" unfolding The_opt_def by simp with not_unique show ?thesis by auto qed lemma The_opt_eq_none [simp] : "((The_opt p) = None) \ \(\!x. p x)" unfolding The_opt_def by auto end diff --git a/thys/CakeML/generated/Lem_sorting.thy b/thys/CakeML/generated/Lem_sorting.thy --- a/thys/CakeML/generated/Lem_sorting.thy +++ b/thys/CakeML/generated/Lem_sorting.thy @@ -1,110 +1,110 @@ chapter \Generated by Lem from \sorting.lem\.\ theory "Lem_sorting" imports Main "Lem_bool" "Lem_basic_classes" "Lem_maybe" "Lem_list" "Lem_num" "Lem" - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" begin \ \\open import Bool Basic_classes Maybe List Num\\ \ \\open import {isabelle} `HOL-Library.Permutation`\\ \ \\open import {coq} `Coq.Lists.List`\\ \ \\open import {hol} `sortingTheory` `permLib`\\ \ \\open import {isabelle} `$LIB_DIR/Lem`\\ \ \\ ------------------------- \\ \ \\ permutations \\ \ \\ ------------------------- \\ \ \\val isPermutation : forall 'a. Eq 'a => list 'a -> list 'a -> bool\\ \ \\val isPermutationBy : forall 'a. ('a -> 'a -> bool) -> list 'a -> list 'a -> bool\\ fun isPermutationBy :: "('a \ 'a \ bool)\ 'a list \ 'a list \ bool " where " isPermutationBy eq ([]) l2 = ( (l2 = []))" |" isPermutationBy eq (x # xs) l2 = ( ( (case delete_first (eq x) l2 of None => False | Some ys => isPermutationBy eq xs ys ) ))" \ \\ ------------------------- \\ \ \\ isSorted \\ \ \\ ------------------------- \\ \ \\ isSortedBy R l checks, whether the list l is sorted by ordering R. R should represent an order, i.e. it should be transitive. Different backends defined "isSorted" slightly differently. However, the definitions coincide for transitive R. Therefore there is the following restriction: WARNING: Use isSorted and isSortedBy only with transitive relations! \\ \ \\val isSorted : forall 'a. Ord 'a => list 'a -> bool\\ \ \\val isSortedBy : forall 'a. ('a -> 'a -> bool) -> list 'a -> bool\\ \ \\ DPM: rejigged the definition with a nested match to get past Coq's termination checker. \\ \ \\let rec isSortedBy cmp l= match l with | [] -> true | x1 :: xs -> match xs with | [] -> true | x2 :: _ -> (cmp x1 x2 && isSortedBy cmp xs) end end\\ \ \\ ----------------------- \\ \ \\ insertion sort \\ \ \\ ----------------------- \\ \ \\val insert : forall 'a. Ord 'a => 'a -> list 'a -> list 'a\\ \ \\val insertBy : forall 'a. ('a -> 'a -> bool) -> 'a -> list 'a -> list 'a\\ \ \\val insertSort: forall 'a. Ord 'a => list 'a -> list 'a\\ \ \\val insertSortBy: forall 'a. ('a -> 'a -> bool) -> list 'a -> list 'a\\ \ \\let rec insertBy cmp e l= match l with | [] -> [e] | x :: xs -> if cmp x e then x :: (insertBy cmp e xs) else (e :: x :: xs) end\\ \ \\let insertSortBy cmp l= List.foldl (fun l e -> insertBy cmp e l) [] l\\ \ \\ ----------------------- \\ \ \\ general sorting \\ \ \\ ----------------------- \\ \ \\val sort: forall 'a. Ord 'a => list 'a -> list 'a\\ \ \\val sortBy: forall 'a. ('a -> 'a -> bool) -> list 'a -> list 'a\\ \ \\val sortByOrd: forall 'a. ('a -> 'a -> ordering) -> list 'a -> list 'a\\ \ \\val predicate_of_ord : forall 'a. ('a -> 'a -> ordering) -> 'a -> 'a -> bool\\ definition predicate_of_ord :: "('a \ 'a \ ordering)\ 'a \ 'a \ bool " where " predicate_of_ord f x y = ( (case f x y of LT => True | EQ => True | GT => False ))" end diff --git a/thys/Chandy_Lamport/Snapshot.thy b/thys/Chandy_Lamport/Snapshot.thy --- a/thys/Chandy_Lamport/Snapshot.thy +++ b/thys/Chandy_Lamport/Snapshot.thy @@ -1,5272 +1,5272 @@ section \The Chandy--Lamport algorithm\ theory Snapshot imports "HOL-Library.Sublist" - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" Distributed_System Trace Util Swap begin subsection \The computation locale\ text \We extend the distributed system locale presented earlier: Now we are given a trace t of the distributed system between two configurations, the initial and final configuartions of t. Our objective is to show that the Chandy--Lamport algorithm terminated successfully and exhibits the same properties as claimed in~\cite{chandy}. In the initial state no snapshotting must have taken place yet, however the computation itself may have progressed arbitrarily far already. We assume that there exists at least one process, that the total number of processes in the system is finite, and that there are only finitely many channels between the processes. The process graph is strongly connected. Finally there are Chandy and Lamport's core assumptions: every process snapshots at some time and no marker may remain in a channel forever.\ locale computation = distributed_system + fixes init final :: "('a, 'b, 'c) configuration" assumes finite_channels: "finite {i. \p q. channel i = Some (p, q)}" and strongly_connected_raw: "\p q. (p \ q) \ (tranclp (\p q. (\i. channel i = Some (p, q)))) p q" and at_least_two_processes: "card (UNIV :: 'a set) > 1" and finite_processes: "finite (UNIV :: 'a set)" and no_initial_Marker: "\i. (\p q. channel i = Some (p, q)) \ Marker \ set (msgs init i)" and no_msgs_if_no_channel: "\i. channel i = None \ msgs init i = []" and no_initial_process_snapshot: "\p. ~ has_snapshotted init p" and no_initial_channel_snapshot: "\i. channel_snapshot init i = ([], NotStarted)" and valid: "\t. trace init t final" and l1: "\t i cid. trace init t final \ Marker \ set (msgs (s init t i) cid) \ (\j. j \ i \ Marker \ set (msgs (s init t j) cid))" and l2: "\t p. trace init t final \ (\i. has_snapshotted (s init t i) p \ i \ length t)" begin definition has_channel where "has_channel p q \ (\i. channel i = Some (p, q))" lemmas strongly_connected = strongly_connected_raw[folded has_channel_def] lemma exists_some_channel: shows "\i p q. channel i = Some (p, q)" proof - obtain p q where "p : (UNIV :: 'a set) \ q : (UNIV :: 'a set) \ p \ q" by (metis (mono_tags) One_nat_def UNIV_eq_I all_not_in_conv at_least_two_processes card_Suc_Diff1 card.empty finite_processes insert_iff iso_tuple_UNIV_I less_numeral_extra(4) n_not_Suc_n) then have "(tranclp has_channel) p q" using strongly_connected by simp then obtain r s where "has_channel r s" by (meson tranclpD) then show ?thesis using has_channel_def by auto qed abbreviation S where "S \ s init" lemma no_messages_if_no_channel: assumes "trace init t final" shows "channel cid = None \ msgs (s init t i) cid = []" using no_messages_introduced_if_no_channel[OF assms no_msgs_if_no_channel] by blast lemma S_induct [consumes 3, case_names S_init S_step]: "\ trace init t final; i \ j; j \ length t; \i. P i i; \i j. i < j \ j \ length t \ (S t i) \ (t ! i) \ (S t (Suc i)) \ P (Suc i) j \ P i j \ \ P i j" proof (induct "j - i" arbitrary: i) case 0 then show ?case by simp next case (Suc n) then have "(S t i) \ (t ! i) \ (S t (Suc i))" using Suc step_Suc by simp then show ?case using Suc by simp qed lemma exists_index: assumes "trace init t final" and "ev \ set (take (j - i) (drop i t))" shows "\k. i \ k \ k < j \ ev = t ! k" proof - have "trace (S t i) (take (j - i) (drop i t)) (S t j)" by (metis assms(1) assms(2) diff_is_0_eq' exists_trace_for_any_i_j list.distinct(1) list.set_cases nat_le_linear take_eq_Nil) obtain l where "ev = (take (j - i) (drop i t)) ! l" "l < length (take (j - i) (drop i t))" by (metis assms(2) in_set_conv_nth) let ?k = "l + i" have "(take (j - i) (drop i t)) ! l = drop i t ! l" using \l < length (take (j - i) (drop i t))\ by auto also have "... = t ! ?k" by (metis add.commute assms(2) drop_all empty_iff list.set(1) nat_le_linear nth_drop take_Nil) finally have "ev = t ! ?k" using \ev = take (j - i) (drop i t) ! l\ by blast moreover have "i \ ?k \ ?k < j" using \l < length (take (j - i) (drop i t))\ by auto ultimately show ?thesis by blast qed lemma no_change_if_ge_length_t: assumes "trace init t final" and "i \ length t" and "j \ i" shows "S t i = S t j" proof - have "trace (S t i) (take (j - i) (drop i t)) (S t j)" using assms(1) assms(3) exists_trace_for_any_i_j by blast moreover have "(take (j - i) (drop i t)) = Nil" by (simp add: assms(2)) ultimately show ?thesis by (metis tr_init trace_and_start_determines_end) qed lemma no_marker_if_no_snapshot: shows "\ trace init t final; channel cid = Some (p, q); ~ has_snapshotted (S t i) p \ \ Marker \ set (msgs (S t i) cid)" proof (induct i) case 0 then show ?case by (metis exists_trace_for_any_i no_initial_Marker take_eq_Nil tr_init trace_and_start_determines_end) next case (Suc n) then have IH: "Marker \ set (msgs (S t n) cid)" by (meson distributed_system.exists_trace_for_any_i_j distributed_system.snapshot_stable_2 distributed_system_axioms eq_iff le_Suc_eq) then obtain tr where decomp: "trace (S t n) tr (S t (Suc n))" "tr = take (Suc n - n) (drop n t)" using Suc exists_trace_for_any_i_j le_Suc_eq by blast have "Marker \ set (msgs (S t (Suc n)) cid)" proof (cases "tr = []") case True then show ?thesis by (metis IH decomp(1) tr_init trace_and_start_determines_end) next case False then obtain ev where step: "tr = [ev]" "(S t n) \ ev \ (S t (Suc n))" by (metis One_nat_def Suc_eq_plus1 Suc_leI \tr = take (Suc n - n) (drop n t)\ \trace (S t n) tr (S t (Suc n))\ add_diff_cancel_left' append.simps(1) butlast_take cancel_comm_monoid_add_class.diff_cancel length_greater_0_conv list.distinct(1) list.sel(3) snoc_eq_iff_butlast take0 take_Nil trace.cases) then show ?thesis proof (cases ev) case (Snapshot p') then show ?thesis by (metis IH Suc.prems(2) Suc.prems(3) local.step(2) new_Marker_in_set_implies_nonregular_occurence new_msg_in_set_implies_occurrence nonregular_event_induces_snapshot snapshot_state_unchanged) next case (RecvMarker cid' p' q') have "p' \ p" proof (rule ccontr) assume asm: "~ p' \ p" then have "has_snapshotted (S t (Suc n)) p" proof - have "~ regular_event ev" using RecvMarker by auto moreover have "occurs_on ev = p" using asm RecvMarker by auto ultimately show ?thesis using step(2) Suc.hyps Suc.prems by (metis nonregular_event_induces_snapshot snapshot_state_unchanged) qed then show False using Suc.prems by blast qed moreover have "cid \ cid'" proof (rule ccontr) assume "~ cid \ cid'" then have "hd (msgs (S t n) cid) = Marker \ length (msgs (S t n) cid) > 0" using step RecvMarker can_occur_def by auto then have "Marker : set (msgs (S t n) cid)" using list.set_sel(1) by fastforce then show False using IH by simp qed ultimately have "msgs (S t (Suc n)) cid = msgs (S t n) cid" proof - have "\r. channel cid = Some (p', r)" using Suc.prems(2) \p' \ p\ by auto with \cid \ cid'\ RecvMarker step show ?thesis by (cases "has_snapshotted (S t n) p'", auto) qed then show ?thesis by (simp add: IH) next case (Trans p' s s') then show ?thesis using IH local.step(2) by force next case (Send cid' p' q' s s' m) with step IH show ?thesis by (cases "cid' = cid", auto) next case (Recv cid' p' q' s s' m) with step IH show ?thesis by (cases "cid' = cid", auto) qed qed then show ?case by blast qed subsection \Termination\ text \We prove that the snapshot algorithm terminates, as exhibited by lemma \texttt{snapshot\_algorithm\_must\_terminate}. In the final configuration all processes have snapshotted, and no markers remain in the channels.\ lemma must_exist_snapshot: assumes "trace init t final" shows "\p i. Snapshot p = t ! i" proof (rule ccontr) assume "\p i. Snapshot p = t ! i" have "\i p. ~ has_snapshotted (S t i) p" proof (rule allI) fix i show "\p. ~ has_snapshotted (S t i) p" proof (induct i) case 0 then show ?case by (metis assms distributed_system.trace_and_start_determines_end distributed_system_axioms exists_trace_for_any_i computation.no_initial_process_snapshot computation_axioms take0 tr_init) next case (Suc n) then have IH: "\p. ~ has_snapshotted (S t n) p" by auto then obtain tr where "trace (S t n) tr (S t (Suc n))" "tr = take (Suc n - n) (drop n t)" using assms exists_trace_for_any_i_j le_Suc_eq by blast show "\p. ~ has_snapshotted (S t (Suc n)) p" proof (cases "tr = []") case True then show ?thesis by (metis IH \trace (S t n) tr (S t (Suc n))\ tr_init trace_and_start_determines_end) next case False then obtain ev where step: "tr = [ev]" "(S t n) \ ev \ (S t (Suc n))" by (metis One_nat_def Suc_eq_plus1 Suc_leI \tr = take (Suc n - n) (drop n t)\ \trace (S t n) tr (S t (Suc n))\ add_diff_cancel_left' append.simps(1) butlast_take cancel_comm_monoid_add_class.diff_cancel length_greater_0_conv list.distinct(1) list.sel(3) snoc_eq_iff_butlast take0 take_Nil trace.cases) then show ?thesis using step Suc.hyps proof (cases ev) case (Snapshot q) then show ?thesis by (metis \\p i. Snapshot p = t ! i\ \tr = [ev]\ \tr = take (Suc n - n) (drop n t)\ append_Cons append_take_drop_id nth_append_length) next case (RecvMarker cid' q r) then have m: "Marker \ set (msgs (S t n) cid')" using RecvMarker_implies_Marker_in_set step by blast have "~ has_snapshotted (S t n) q" using Suc by auto then have "Marker \ set (msgs (S t n) cid')" proof - have "channel cid' = Some (r, q)" using step can_occur_def RecvMarker by auto then show ?thesis using IH assms no_marker_if_no_snapshot by blast qed then show ?thesis using m by auto qed auto qed qed qed obtain j p where "has_snapshotted (S t j) p" using l2 assms by blast then show False using \\i p. \ has_snapshotted (S t i) p\ by blast qed lemma recv_marker_means_snapshotted: assumes "trace init t final" and "ev = RecvMarker cid p q" and "(S t i) \ ev \ (S t (Suc i))" shows "has_snapshotted (S t i) q" proof - have "Marker = hd (msgs (S t i) cid) \ length (msgs (S t i) cid) > 0" proof - have "Marker # msgs (S t (Suc i)) cid = msgs (S t i) cid" using assms(2) assms(3) next_recv_marker by blast then show ?thesis by (metis length_greater_0_conv list.discI list.sel(1)) qed then have "Marker \ set (msgs (S t i) cid)" using hd_in_set by fastforce then show "has_snapshotted (S t i) q" proof - have "channel cid = Some (q, p)" using assms can_occur_def by auto then show ?thesis using \Marker \ set (msgs (S t i) cid)\ assms(1) no_marker_if_no_snapshot by blast qed qed lemma recv_marker_means_cs_Done: assumes "trace init t final" and "t ! i = RecvMarker cid p q" and "i < length t" shows "snd (cs (S t (i+1)) cid) = Done" proof - have "(S t i) \ (t ! i) \ (S t (i+1))" using assms(1) assms(3) step_Suc by auto then show ?thesis by (simp add: assms(2)) qed lemma snapshot_produces_marker: assumes "trace init t final" and "~ has_snapshotted (S t i) p" and "has_snapshotted (S t (Suc i)) p" and "channel cid = Some (p, q)" shows "Marker : set (msgs (S t (Suc i)) cid) \ has_snapshotted (S t i) q" proof - obtain ev where ex_ev: "(S t i) \ ev \ (S t (Suc i))" by (metis append_Nil2 append_take_drop_id assms(1) assms(2) assms(3) distributed_system.step_Suc distributed_system_axioms drop_eq_Nil less_Suc_eq_le nat_le_linear not_less_eq s_def) then have "occurs_on ev = p" using assms(2) assms(3) no_state_change_if_no_event by force then show ?thesis using assms ex_ev proof (cases ev) case (Snapshot r) then have "Marker \ set (msgs (S t (Suc i)) cid)" using ex_ev assms(2) assms(3) assms(4) by fastforce then show ?thesis by simp next case (RecvMarker cid' r s) have "r = p" using \occurs_on ev = p\ by (simp add: RecvMarker) then show ?thesis proof (cases "cid = cid'") case True then have "has_snapshotted (S t i) q" using RecvMarker RecvMarker_implies_Marker_in_set assms(1) assms(2) assms(4) ex_ev no_marker_if_no_snapshot by blast then show ?thesis by simp next case False then have "\s. channel cid = Some (r, s)" using RecvMarker assms can_occur_def \r = p\ by simp then have "msgs (S t (Suc i)) cid = msgs (S t i) cid @ [Marker]" using RecvMarker assms ex_ev \r = p\ False by simp then show ?thesis by simp qed qed auto qed lemma exists_snapshot_for_all_p: assumes "trace init t final" shows "\i. ~ has_snapshotted (S t i) p \ has_snapshotted (S t (Suc i)) p" (is ?Q) proof - obtain i where "has_snapshotted (S t i) p" using l2 assms by blast let ?j = "LEAST j. has_snapshotted (S t j) p" have "?j \ 0" proof - have "~ has_snapshotted (S t 0) p" by (metis exists_trace_for_any_i list.discI no_initial_process_snapshot s_def take_eq_Nil trace.simps) then show ?thesis by (metis (mono_tags, lifting) \has_snapshotted (S t i) p\ wellorder_Least_lemma(1)) qed have "?j \ i" by (meson Least_le \has_snapshotted (S t i) p\) have "\ has_snapshotted (S t (?j - 1)) p" (is ?P) proof (rule ccontr) assume "\ ?P" then have "has_snapshotted (S t (?j - 1)) p" by simp then have "\j. j < ?j \ has_snapshotted (S t j) p" by (metis One_nat_def \(LEAST j. ps (S t j) p \ None) \ 0\ diff_less lessI not_gr_zero) then show False using not_less_Least by blast qed show ?thesis proof (rule ccontr) assume "\ ?Q" have "\i. \ has_snapshotted (S t i) p" proof (rule allI) fix i' show "\ has_snapshotted (S t i') p" proof (induct i') case 0 then show ?case using \(LEAST j. ps (S t j) p \ None) \ 0\ by force next case (Suc i'') then show ?case using \\i. \ ps (S t i) p \ None \ ps (S t (Suc i)) p \ None\ by blast qed qed then show False using \ps (S t i) p \ None\ by blast qed qed lemma all_processes_snapshotted_in_final_state: assumes "trace init t final" shows "has_snapshotted final p" proof - obtain i where "has_snapshotted (S t i) p \ i \ length t" using assms l2 by blast moreover have "final = (S t (length t))" by (metis (no_types, lifting) assms exists_trace_for_any_i le_Suc_eq length_Cons take_Nil take_all trace.simps trace_and_start_determines_end) ultimately show ?thesis using assms exists_trace_for_any_i_j snapshot_stable by blast qed definition next_marker_free_state where "next_marker_free_state t i cid = (LEAST j. j \ i \ Marker \ set (msgs (S t j) cid))" lemma exists_next_marker_free_state: assumes "channel cid = Some (p, q)" "trace init t final" shows "\!j. next_marker_free_state t i cid = j \ j \ i \ Marker \ set (msgs (S t j) cid)" proof (cases "Marker \ set (msgs (S t i) cid)") case False then have "next_marker_free_state t i cid = i" unfolding next_marker_free_state_def by (metis (no_types, lifting) Least_equality order_refl) then show ?thesis using False assms by blast next case True then obtain j where "j \ i" "Marker \ set (msgs (S t j) cid)" using l1 assms by blast then show ?thesis by (metis (no_types, lifting) LeastI_ex next_marker_free_state_def) qed theorem snapshot_algorithm_must_terminate: assumes "trace init t final" shows "\phi. ((\p. has_snapshotted (S t phi) p) \ (\cid. Marker \ set (msgs (S t phi) cid)))" proof - let ?i = "{i. i \ length t \ (\p. has_snapshotted (S t i) p)}" have fin_i: "finite ?i" by auto moreover have "?i \ empty" proof - have "\p. has_snapshotted (S t (length t)) p" by (meson assms exists_trace_for_any_i_j l2 snapshot_stable_2) then show ?thesis by blast qed then obtain i where asm: "\p. has_snapshotted (S t i) p" by blast have f: "\j. j \ i \ (\p. has_snapshotted (S t j) p)" using snapshot_stable asm exists_trace_for_any_i_j valid assms by blast let ?s = "(\cid. (next_marker_free_state t i cid)) ` { cid. channel cid \ None }" have "?s \ empty" using exists_some_channel by auto have fin_s: "finite ?s" using finite_channels by simp let ?phi = "Max ?s" have "?phi \ i" proof (rule ccontr) assume asm: "\ ?phi \ i" obtain cid p q where g: "channel cid = Some (p, q)" using exists_some_channel by auto then have "next_marker_free_state t i cid \ i" using exists_next_marker_free_state assms by blast then have "Max ?s \ i" using Max_ge_iff g fin_s by fast then show False using asm by simp qed then have "\cid. Marker \ set (msgs (S t ?phi) cid)" proof - fix cid show "Marker \ set (msgs (S t ?phi) cid)" proof (cases "Marker : set (msgs (S t i) cid)") case False then show ?thesis using \i \ Max ?s\ asm assms exists_trace_for_any_i_j no_markers_if_all_snapshotted by blast next case True then have cpq: "channel cid \ None" using no_messages_if_no_channel assms by fastforce then obtain p q where chan: "channel cid = Some (p, q)" by auto then obtain j where i: "j = next_marker_free_state t i cid" "Marker \ set (msgs (S t j) cid)" using exists_next_marker_free_state assms by fast have "j \ ?phi" using cpq fin_s i(1) pair_imageI by simp then show "Marker \ set (msgs (S t ?phi) cid)" proof - have "trace (S t j) (take (?phi - j) (drop j t)) (S t ?phi)" using \j \ ?phi\ assms exists_trace_for_any_i_j by blast moreover have "\p. has_snapshotted (S t j) p" by (metis assms chan f computation.exists_next_marker_free_state computation_axioms i(1)) ultimately show ?thesis using i(2) no_markers_if_all_snapshotted by blast qed qed qed thus ?thesis using f \?phi \ i\ by blast qed subsection \Correctness\ text \The greatest part of this work is spent on the correctness of the Chandy-Lamport algorithm. We prove that the snapshot is consistent, i.e.\ there exists a permutation $t'$ of the trace $t$ and an intermediate configuration $c'$ of $t'$ such that the configuration recorded in the snapshot corresponds to the snapshot taken during execution of $t$, which is given as Theorem 1 in~\cite{chandy}.\ lemma snapshot_stable_ver_2: shows "trace init t final \ has_snapshotted (S t i) p \ j \ i \ has_snapshotted (S t j) p" using exists_trace_for_any_i_j snapshot_stable by blast lemma snapshot_stable_ver_3: shows "trace init t final \ ~ has_snapshotted (S t i) p \ i \ j \ ~ has_snapshotted (S t j) p" using snapshot_stable_ver_2 by blast lemma marker_must_stay_if_no_snapshot: assumes "trace init t final" and "has_snapshotted (S t i) p" and "~ has_snapshotted (S t i) q" and "channel cid = Some (p, q)" shows "Marker : set (msgs (S t i) cid)" proof - obtain j where "~ has_snapshotted (S t j) p \ has_snapshotted (S t (Suc j)) p" using exists_snapshot_for_all_p assms by blast have "j \ i" proof (rule ccontr) assume asm: "~ j \ i" then have "~ has_snapshotted (S t i) p" using \\ has_snapshotted (S t j) p \ has_snapshotted (S t (Suc j)) p\ assms(1) less_imp_le_nat snapshot_stable_ver_3 by (meson nat_le_linear) then show False using assms(2) by simp qed have "i \ length t" proof (rule ccontr) assume "~ i \ length t" then have "i > length t" using not_less by blast obtain i' where a: "\p. has_snapshotted (S t i') p" using assms snapshot_algorithm_must_terminate by blast have "i' \ i" using \\p. has_snapshotted (S t i') p\ assms(1) assms(3) nat_le_linear snapshot_stable_ver_3 by blast have "(S t i') \ (S t i)" using assms a by force then have "i \ length t" using \i \ i'\ assms(1) computation.no_change_if_ge_length_t computation_axioms nat_le_linear by fastforce then show False using \~ i \ length t\ by simp qed have marker_in_set: "Marker : set (msgs (S t (Suc j)) cid)" using \\ has_snapshotted (S t j) p \ has_snapshotted (S t (Suc j)) p\ \j \ i\ assms(1) assms(3) assms(4) snapshot_produces_marker snapshot_stable_ver_3 by blast show ?thesis proof (rule ccontr) assume asm: "Marker \ set (msgs (S t i) cid)" then have range: "(Suc j) < i" by (metis Suc_lessI \\ ps (S t j) p \ None \ ps (S t (Suc j)) p \ None\ \j \ i\ assms(2) marker_in_set order.order_iff_strict) let ?k = "LEAST k. k \ (Suc j) \ Marker \ set (msgs (S t k) cid)" have range_k: "(Suc j) < ?k \ ?k \ i" proof - have "j < (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid))" by (metis (full_types) Suc_le_eq assms(1) assms(4) exists_next_marker_free_state next_marker_free_state_def) then show ?thesis proof - assume a1: "j < (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid))" have "j < i" using local.range by linarith (* 4 ms *) then have "(Suc j \ i \ Marker \ set (msgs (S t i) cid)) \ (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) \ Suc j" by (metis (lifting) Suc_leI asm marker_in_set wellorder_Least_lemma(1)) (* 64 ms *) then show ?thesis using a1 by (simp add: wellorder_Least_lemma(2)) (* 16 ms *) qed qed have a: "Marker : set (msgs (S t (?k-1)) cid)" proof - obtain nn :: "nat \ nat \ nat" where "\x0 x1. (\v2. x0 = Suc (x1 + v2)) = (x0 = Suc (x1 + nn x0 x1))" by moura then have f1: "(LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) = Suc (Suc j + nn (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) (Suc j))" using \Suc j < (LEAST k. Suc j \ k \ Marker \ set (msgs (S t k) cid)) \ (LEAST k. Suc j \ k \ Marker \ set (msgs (S t k) cid)) \ i\ less_iff_Suc_add by fastforce have f2: "Suc j \ Suc j + nn (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) (Suc j)" by simp have f3: "\p n. \ p (n::nat) \ Least p \ n" by (meson wellorder_Least_lemma(2)) have "\ (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) \ Suc j + nn (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) (Suc j)" using f1 by linarith then have f4: "\ (Suc j \ Suc j + nn (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) (Suc j) \ Marker \ set (msgs (S t (Suc j + nn (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) (Suc j))) cid))" using f3 by force have "Suc j + nn (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) (Suc j) = (LEAST n. Suc j \ n \ Marker \ set (msgs (S t n) cid)) - 1" using f1 by linarith then show ?thesis using f4 f2 by presburger qed have b: "Marker \ set (msgs (S t ?k) cid)" using assms(1) assms(4) exists_next_marker_free_state next_marker_free_state_def by fastforce have "?k - 1 < i" using range_k by auto then obtain ev where step: "(S t (?k-1)) \ ev \ (S t (Suc (?k-1)))" by (meson Suc_le_eq \i \ length t\ assms(1) le_trans step_Suc) then show False using a assms(1) assms(3) assms(4) b computation.snapshot_stable_ver_3 computation_axioms less_iff_Suc_add range_k recv_marker_means_snapshotted_2 by fastforce qed qed subsubsection \Pre- and postrecording events\ definition prerecording_event: "prerecording_event t i \ i < length t \ regular_event (t ! i) \ ~ has_snapshotted (S t i) (occurs_on (t ! i))" definition postrecording_event: "postrecording_event t i \ i < length t \ regular_event (t ! i) \ has_snapshotted (S t i) (occurs_on (t ! i))" abbreviation neighboring where "neighboring t i j \ i < j \ j < length t \ regular_event (t ! i) \ regular_event (t ! j) \ (\k. i < k \ k < j \ ~ regular_event (t ! k))" lemma pre_if_regular_and_not_post: assumes "regular_event (t ! i)" and "~ postrecording_event t i" and "i < length t" shows "prerecording_event t i" using assms computation.postrecording_event computation_axioms prerecording_event by metis lemma post_if_regular_and_not_pre: assumes "regular_event (t ! i)" and "~ prerecording_event t i" and "i < length t" shows "postrecording_event t i" using assms computation.postrecording_event computation_axioms prerecording_event by metis lemma post_before_pre_different_processes: assumes "i < j" and "j < length t" and neighboring: "\k. (i < k \ k < j) \ ~ regular_event (t ! k)" and post_ei: "postrecording_event t i" and pre_ej: "prerecording_event t j" and valid: "trace init t final" shows "occurs_on (t ! i) \ occurs_on (t ! j)" proof - let ?p = "occurs_on (t ! i)" let ?q = "occurs_on (t ! j)" have sp: "has_snapshotted (S t i) ?p" using assms postrecording_event prerecording_event by blast have nsq: "~ has_snapshotted (S t j) ?q" using assms postrecording_event prerecording_event by blast show "?p \ ?q" proof - have "~ has_snapshotted (S t i) ?q" proof (rule ccontr) assume sq: "~ ~ has_snapshotted (S t i) ?q" from \i < j\ have "i \ j" using less_imp_le by blast then obtain tr where ex_trace: "trace (S t i) tr (S t j)" using exists_trace_for_any_i_j valid by blast then have "has_snapshotted (S t j) ?q" using ex_trace snapshot_stable sq by blast then show False using nsq by simp qed then show ?thesis using sp by auto qed qed lemma post_before_pre_neighbors: assumes "i < j" and "j < length t" and neighboring: "\k. (i < k \ k < j) \ ~ regular_event (t ! k)" and post_ei: "postrecording_event t i" and pre_ej: "prerecording_event t j" and valid: "trace init t final" shows "Ball (set (take (j - (i+1)) (drop (i+1) t))) (%ev. ~ regular_event ev \ ~ occurs_on ev = occurs_on (t ! j))" proof - let ?p = "occurs_on (t ! i)" let ?q = "occurs_on (t ! j)" let ?between = "take (j - (i+1)) (drop (i+1) t)" show ?thesis proof (unfold Ball_def, rule allI, rule impI) fix ev assume "ev : set ?between" have len_nr: "length ?between = (j - (i+1))" using assms(2) by auto then obtain l where "?between ! l = ev" and range_l: "0 \ l \ l < (j - (i+1))" by (metis \ev \ set (take (j - (i + 1)) (drop (i + 1) t))\ gr_zeroI in_set_conv_nth le_numeral_extra(3) less_le) let ?k = "l + (i+1)" have "?between ! l = (t ! ?k)" proof - have "j < length t" by (metis assms(2)) then show ?thesis by (metis (no_types) Suc_eq_plus1 Suc_leI add.commute assms(1) drop_take length_take less_diff_conv less_imp_le_nat min.absorb2 nth_drop nth_take range_l) qed have "~ regular_event ev" by (metis (no_types, lifting) assms(3) range_l One_nat_def Suc_eq_plus1 \take (j - (i + 1)) (drop (i + 1) t) ! l = ev\ \take (j - (i + 1)) (drop (i + 1) t) ! l = t ! (l + (i + 1))\ add.left_commute add_lessD1 lessI less_add_same_cancel2 less_diff_conv order_le_less) have step_ev: "(S t ?k) \ ev \ (S t (?k+1))" proof - have "j \ length t" by (metis assms(2) less_or_eq_imp_le) then have "l + (i + 1) < length t" by (meson less_diff_conv less_le_trans range_l) then show ?thesis by (metis (no_types) Suc_eq_plus1 \take (j - (i + 1)) (drop (i + 1) t) ! l = ev\ \take (j - (i + 1)) (drop (i + 1) t) ! l = t ! (l + (i + 1))\ distributed_system.step_Suc distributed_system_axioms valid) qed obtain cid s r where f: "ev = RecvMarker cid s r \ ev = Snapshot r" using \~ regular_event ev\ by (meson isRecvMarker_def isSnapshot_def nonregular_event) from f have "occurs_on ev \ ?q" proof (elim disjE) assume snapshot: "ev = Snapshot r" show ?thesis proof (rule ccontr) assume occurs_on_q: "~ occurs_on ev \ ?q" then have "?q = r" using snapshot by auto then have q_snapshotted: "has_snapshotted (S t (?k+1)) ?q" using snapshot step_ev by auto then show False proof - have "l + (i + 1) < j" by (meson less_diff_conv range_l) then show ?thesis by (metis (no_types) Suc_eq_plus1 Suc_le_eq computation.snapshot_stable_ver_2 computation_axioms pre_ej prerecording_event q_snapshotted valid) qed qed next assume RecvMarker: "ev = RecvMarker cid s r" show ?thesis proof (rule ccontr) assume occurs_on_q: "~ occurs_on ev \ ?q" then have "s = ?q" using RecvMarker by auto then have q_snapshotted: "has_snapshotted (S t (?k+1)) ?q" proof (cases "has_snapshotted (S t ?k) ?q") case True then show ?thesis using snapshot_stable_ver_2 step_Suc step_ev valid by auto next case False then show "has_snapshotted (S t (?k+1)) ?q" using \s = ?q\ next_recv_marker RecvMarker step_ev by auto qed then show False proof - have "l + (i + 1) < j" using less_diff_conv range_l by blast then show ?thesis by (metis (no_types) Suc_eq_plus1 Suc_le_eq computation.snapshot_stable_ver_2 computation_axioms pre_ej prerecording_event q_snapshotted valid) qed qed qed then show "\ regular_event ev \ occurs_on ev \ ?q" using \~ regular_event ev\ by simp qed qed lemma can_swap_neighboring_pre_and_postrecording_events: assumes "i < j" and "j < length t" and "occurs_on (t ! i) = p" and "occurs_on (t ! j) = q" and neighboring: "\k. (i < k \ k < j) \ ~ regular_event (t ! k)" and post_ei: "postrecording_event t i" and pre_ej: "prerecording_event t j" and valid: "trace init t final" shows "can_occur (t ! j) (S t i)" proof - have "p \ q" using post_before_pre_different_processes assms by auto have sp: "has_snapshotted (S t i) p" using assms(3) post_ei postrecording_event prerecording_event by blast have nsq: "~ has_snapshotted (S t j) q" using assms(4) pre_ej prerecording_event by auto let ?nr = "take (j - (Suc i)) (drop (Suc i) t)" have valid_subtrace: "trace (S t (Suc i)) ?nr (S t j)" using assms(1) exists_trace_for_any_i_j valid by fastforce have "Ball (set ?nr) (%ev. ~ occurs_on ev = q \ ~ regular_event ev)" proof - have "?nr = take (j - (i+1)) (drop (i+1) t)" by auto then show ?thesis by (metis assms(1) assms(2) assms(4) neighboring post_ei pre_ej valid post_before_pre_neighbors) qed then have la: "list_all (%ev. ~ occurs_on ev = q) ?nr" by (meson list_all_length nth_mem) have tj_to_tSi: "can_occur (t ! j) (S t (Suc i))" proof - have "list_all (%ev. ~ isSend ev) ?nr" proof - have "list_all (%ev. ~ regular_event ev) ?nr" using \\ev\set (take (j - (Suc i)) (drop (Suc i) t)). occurs_on ev \ q \ \ regular_event ev\ \list_all (\ev. occurs_on ev \ q) (take (j - (Suc i)) (drop (Suc i) t))\ list.pred_mono_strong by fastforce then show ?thesis by (simp add: list.pred_mono_strong) qed moreover have "~ isRecvMarker (t ! j)" using prerecording_event assms by auto moreover have "can_occur (t ! j) (S t j)" proof - have "(S t j) \ (t ! j) \ (S t (Suc j))" using assms(2) step_Suc valid by auto then show ?thesis using happen_implies_can_occur by blast qed ultimately show "can_occur (t ! j) (S t (Suc i))" using assms(4) event_can_go_back_if_no_sender_trace valid_subtrace la by blast qed show "can_occur (t ! j) (S t i)" proof (cases "isSend (t ! i)") case False have "~ isRecvMarker (t ! j)" using assms prerecording_event by auto moreover have "~ isSend (t ! i)" using False by simp ultimately show ?thesis by (metis \p \ q\ assms(3) assms(4) event_can_go_back_if_no_sender post_ei postrecording_event step_Suc tj_to_tSi valid) next case True obtain cid s u u' m where Send: "t ! i = Send cid p s u u' m" by (metis True isSend_def assms(3) event.sel(2)) have chan: "channel cid = Some (p, s)" proof - have "can_occur (t ! i) (S t i)" by (meson computation.postrecording_event computation_axioms happen_implies_can_occur post_ei step_Suc valid) then show ?thesis using can_occur_def Send by simp qed have n: "(S t i) \ (t ! i) \ (S t (Suc i))" using assms(1) assms(2) step_Suc valid True by auto have st: "states (S t i) q = states (S t (Suc i)) q" using Send \p \ q\ n by auto have "isTrans (t ! j) \ isSend (t ! j) \ isRecv (t ! j)" using assms(7) computation.prerecording_event computation_axioms regular_event by blast then show ?thesis proof (elim disjE) assume "isTrans (t ! j)" then show ?thesis by (metis (no_types, lifting) tj_to_tSi st can_occur_def assms(4) event.case(1) event.collapse(1)) next assume "isSend (t ! j)" then obtain cid' s' u'' u''' m' where Send: "t ! j = Send cid' q s' u'' u''' m'" by (metis (no_types, lifting) assms(4) event.sel(2) isSend_def) have co_tSi: "can_occur (Send cid' q s' u'' u''' m') (S t (Suc i))" using Send tj_to_tSi by auto then have "channel cid' = Some (q, s') \ send cid' q s' u'' u''' m'" using Send can_occur_def by simp then show ?thesis using can_occur_def st Send assms co_tSi by auto next assume "isRecv (t ! j)" then obtain cid' s' u'' u''' m' where Recv: "t ! j = Recv cid' q s' u'' u''' m'" by (metis assms(4) event.sel(3) isRecv_def) have co_tSi: "can_occur (Recv cid' q s' u'' u''' m') (S t (Suc i))" using Recv tj_to_tSi by auto then have a: "channel cid' = Some (s', q) \ length (msgs (S t (Suc i)) cid') > 0 \ hd (msgs (S t (Suc i)) cid') = Msg m'" using can_occur_def co_tSi by fastforce show "can_occur (t ! j) (S t i)" proof (cases "cid = cid'") case False with Send n have "msgs (S t (Suc i)) cid' = msgs (S t i) cid'" by auto then have b: "length (msgs (S t i) cid') > 0 \ hd (msgs (S t i) cid') = Msg m'" using a by simp with can_occur_Recv co_tSi st a Recv show ?thesis unfolding can_occur_def by auto next case True (* This is the interesting case *) have stu: "states (S t i) q = u''" using can_occur_Recv co_tSi st by blast show ?thesis proof (rule ccontr) have marker_in_set: "Marker \ set (msgs (S t i) cid)" proof - have "(s', q) = (p, q)" using True a chan by auto then show ?thesis by (metis (no_types, lifting) True \p \ q\ a assms(3) marker_must_stay_if_no_snapshot n no_state_change_if_no_event nsq snapshot_stable_2 sp valid valid_subtrace) qed assume asm: "~ can_occur (t ! j) (S t i)" then show False proof (unfold can_occur_def, (auto simp add: marker_in_set True Recv stu)) assume "msgs (S t i) cid' = []" then show False using marker_in_set by (simp add: True) next assume "hd (msgs (S t i) cid') \ Msg m'" have "msgs (S t i) cid \ []" using marker_in_set by auto then have "msgs (S t (Suc i)) cid = msgs (S t i) cid @ [Msg m]" using Send True n chan by auto then have "hd (msgs (S t (Suc i)) cid) \ Msg m'" using True \hd (msgs (S t i) cid') \ Msg m'\ \msgs (S t i) cid \ []\ by auto then have "~ can_occur (t ! j) (S t (Suc i))" using True a by blast then show False using tj_to_tSi by blast next assume "~ recv cid' q s' u'' u''' m'" then show False using can_occur_Recv co_tSi by blast next assume "channel cid' \ Some (s', q)" then show False using can_occur_def tj_to_tSi Recv by simp qed qed qed qed qed qed subsubsection \Event swapping\ lemma swap_events: shows "\ i < j; j < length t; \k. (i < k \ k < j) \ ~ regular_event (t ! k); postrecording_event t i; prerecording_event t j; trace init t final \ \ trace init (swap_events i j t) final \ (\k. k \ j + 1 \ S (swap_events i j t) k = S t k) \ (\k. k \ i \ S (swap_events i j t) k = S t k) \ prerecording_event (swap_events i j t) i \ postrecording_event (swap_events i j t) (i+1) \ (\k. k > i+1 \ k < j+1 \ ~ regular_event ((swap_events i j t) ! k))" proof (induct "j - (i+1)" arbitrary: j t) case 0 let ?p = "occurs_on (t ! i)" let ?q = "occurs_on (t ! j)" have "j = (i+1)" using "0.prems" "0.hyps" by linarith let ?subt = "take (j - (i+1)) (drop (i+1) t)" have "t = take i t @ [t ! i] @ ?subt @ [t ! j] @ drop (j+1) t" proof - have "take (Suc i) t = take i t @ [t ! i]" using "0.prems"(2) \j = i + 1\ add_lessD1 take_Suc_conv_app_nth by blast then show ?thesis by (metis (no_types) "0.hyps" "0.prems"(2) Suc_eq_plus1 \j = i + 1\ append_assoc append_take_drop_id self_append_conv2 take_Suc_conv_app_nth take_eq_Nil) qed have sp: "has_snapshotted (S t i) ?p" using "0.prems" postrecording_event prerecording_event by blast have nsq: "~ has_snapshotted (S t j) ?q" using "0.prems" postrecording_event prerecording_event by blast have "?p \ ?q" using "0.prems" computation.post_before_pre_different_processes computation_axioms by blast have "?subt = Nil" by (simp add: \j = i + 1\) have reg_step_1: "(S t i) \ (t ! i) \ (S t j)" by (metis "0.prems"(2) "0.prems"(6) Suc_eq_plus1 \j = i + 1\ add_lessD1 step_Suc) have reg_step_2: "(S t j) \ (t ! j) \ (S t (j+1))" using "0.prems"(2) "0.prems"(6) step_Suc by auto have "can_occur (t ! j) (S t i)" using "0.prems" can_swap_neighboring_pre_and_postrecording_events by blast then obtain d' where new_step1: "(S t i) \ (t ! j) \ d'" using exists_next_if_can_occur by blast have st: "states d' ?p = states (S t i) ?p" using \(S t i) \ t ! j \ d'\ \occurs_on (t ! i) \ occurs_on (t ! j)\ no_state_change_if_no_event by auto then have "can_occur (t ! i) d'" using \occurs_on (t ! i) \ occurs_on (t ! j)\ event_stays_valid_if_no_occurrence happen_implies_can_occur new_step1 reg_step_1 by auto then obtain e where new_step2: "d' \ (t ! i) \ e" using exists_next_if_can_occur by blast have "states e = states (S t (j+1))" proof (rule ext) fix p show "states e p = states (S t (j+1)) p" proof (cases "p = ?p \ p = ?q") case True then show ?thesis proof (elim disjE) assume "p = ?p" then have "states d' p = states (S t i) p" by (simp add: st) thm same_state_implies_same_result_state then have "states e p = states (S t j) p" using "0.prems"(2) "0.prems"(6) new_step2 reg_step_1 by (blast intro:same_state_implies_same_result_state[symmetric]) moreover have "states (S t j) p = states (S t (j+1)) p" using \occurs_on (t ! i) \ occurs_on (t ! j)\ \p = occurs_on (t ! i)\ no_state_change_if_no_event reg_step_2 by auto ultimately show ?thesis by simp next assume "p = ?q" then have "states (S t j) p = states (S t i) p" using reg_step_1 \occurs_on (t ! i) \ occurs_on (t ! j)\ no_state_change_if_no_event by auto then have "states d' p = states (S t (j+1)) p" using "0.prems"(5) prerecording_event computation_axioms new_step1 reg_step_2 same_state_implies_same_result_state by blast moreover have "states e p = states (S t (j+1)) p" using \occurs_on (t ! i) \ occurs_on (t ! j)\ \p = occurs_on (t ! j)\ calculation new_step2 no_state_change_if_no_event by auto ultimately show ?thesis by simp qed next case False then have "states (S t i) p = states (S t j) p" using no_state_change_if_no_event reg_step_1 by auto moreover have "... = states (S t (j+1)) p" using False no_state_change_if_no_event reg_step_2 by auto moreover have "... = states d' p" using False calculation new_step1 no_state_change_if_no_event by auto moreover have "... = states e p" using False new_step2 no_state_change_if_no_event by auto ultimately show ?thesis by simp qed qed moreover have "msgs e = msgs (S t (j+1))" proof (rule ext) fix cid have "isTrans (t ! i) \ isSend (t ! i) \ isRecv (t ! i)" using "0.prems"(4) computation.postrecording_event computation_axioms regular_event by blast moreover have "isTrans (t ! j) \ isSend (t ! j) \ isRecv (t ! j)" using "0.prems"(5) computation.prerecording_event computation_axioms regular_event by blast ultimately show "msgs e cid = msgs (S t (j+1)) cid" proof (elim disjE, goal_cases) case 1 then have "msgs d' cid = msgs (S t j) cid" by (metis Trans_msg new_step1 reg_step_1) then show ?thesis using Trans_msg \isTrans (t ! i)\ \isTrans (t ! j)\ new_step2 reg_step_2 by auto next case 2 then show ?thesis using \occurs_on (t ! i) \ occurs_on (t ! j)\ new_step1 new_step2 reg_step_1 reg_step_2 swap_msgs_Trans_Send by auto next case 3 then show ?thesis using \occurs_on (t ! i) \ occurs_on (t ! j)\ new_step1 new_step2 reg_step_1 reg_step_2 swap_msgs_Trans_Recv by auto next case 4 then show ?thesis using \occurs_on (t ! i) \ occurs_on (t ! j)\ new_step1 new_step2 reg_step_1 reg_step_2 swap_msgs_Send_Trans by auto next case 5 then show ?thesis using \occurs_on (t ! i) \ occurs_on (t ! j)\ new_step1 new_step2 reg_step_1 reg_step_2 swap_msgs_Recv_Trans by auto next case 6 then show ?thesis using \occurs_on (t ! i) \ occurs_on (t ! j)\ new_step1 new_step2 reg_step_1 reg_step_2 by (blast intro:swap_msgs_Send_Send[symmetric]) next case 7 then show ?thesis using \occurs_on (t ! i) \ occurs_on (t ! j)\ new_step1 new_step2 reg_step_1 reg_step_2 swap_msgs_Send_Recv by auto next case 8 then show ?thesis using \occurs_on (t ! i) \ occurs_on (t ! j)\ new_step1 new_step2 reg_step_1 reg_step_2 swap_msgs_Send_Recv by simp next case 9 then show ?thesis using \occurs_on (t ! i) \ occurs_on (t ! j)\ new_step1 new_step2 reg_step_1 reg_step_2 by (blast intro:swap_msgs_Recv_Recv[symmetric]) qed qed moreover have "process_snapshot e = process_snapshot (S t (j+1))" proof (rule ext) fix p have "process_snapshot d' p = process_snapshot (S t j) p" by (metis "0.prems"(4) "0.prems"(5) computation.postrecording_event computation.prerecording_event computation_axioms new_step1 reg_step_1 regular_event_preserves_process_snapshots) then show "process_snapshot e p = process_snapshot (S t (j+1)) p" by (metis "0.prems"(4) "0.prems"(5) computation.postrecording_event computation.prerecording_event computation_axioms new_step2 reg_step_2 regular_event_preserves_process_snapshots) qed moreover have "channel_snapshot e = channel_snapshot (S t (j+1))" proof (rule ext) fix cid show "cs e cid = cs (S t (j+1)) cid" proof (cases "isRecv (t ! i)"; cases "isRecv (t ! j)", goal_cases) case 1 then show ?thesis using \?p \ ?q\ new_step1 new_step2 reg_step_1 reg_step_2 by (blast intro:regular_event_implies_same_channel_snapshot_Recv_Recv[symmetric]) next case 2 moreover have "regular_event (t ! j)" using prerecording_event 0 by simp ultimately show ?thesis using \?p \ ?q\ new_step1 new_step2 reg_step_1 reg_step_2 regular_event_implies_same_channel_snapshot_Recv by auto next assume 3: "~ isRecv (t ! i)" "isRecv (t ! j)" moreover have "regular_event (t ! i)" using postrecording_event 0 by simp ultimately show ?thesis using \?p \ ?q\ new_step1 new_step2 reg_step_1 reg_step_2 regular_event_implies_same_channel_snapshot_Recv by auto next assume 4: "~ isRecv (t ! i)" "~ isRecv (t ! j)" moreover have "regular_event (t ! j)" using prerecording_event 0 by simp moreover have "regular_event (t ! i)" using postrecording_event 0 by simp ultimately show ?thesis using \?p \ ?q\ new_step1 new_step2 reg_step_1 reg_step_2 by (metis no_cs_change_if_no_event) qed qed ultimately have "e = S t (j+1)" by simp then have "(S t i) \ (t ! j) \ d' \ d' \ (t ! i) \ (S t (j+1))" using new_step1 new_step2 by blast then have swap: "trace (S t i) [t ! j, t ! i] (S t (j+1))" by (meson trace.simps) have "take (j-1) t @ [t ! j, t ! i] = ((take (j+1) t)[i := t ! j])[j := t ! i]" proof - have "i = j - 1" by (simp add: \j = i + 1\) show ?thesis proof (subst (1 2 3) \i = j - 1\) have "j < length t" using "0.prems" by auto then have "take (j - 1) t @ [t ! j, t ! (j - 1)] @ drop (j + 1) t = t[j - 1 := t ! j, j := t ! (j - 1)]" by (metis Suc_eq_plus1 \i = j - 1\ \j = i + 1\ add_Suc_right arith_special(3) swap_neighbors) then show "take (j - 1) t @ [t ! j, t ! (j - 1)] = (take (j+1) t)[j - 1 := t ! j, j := t ! (j - 1)]" proof - assume a1: "take (j - 1) t @ [t ! j, t ! (j - 1)] @ drop (j + 1) t = t [j - 1 := t ! j, j := t ! (j - 1)]" have f2: "t[j - 1 := t ! j, j := t ! (j - 1)] = take j (t[j - 1 := t ! j]) @ t ! (j - 1) # drop (Suc j) (t[j - 1 := t ! j])" by (metis (no_types) "0.prems"(2) length_list_update upd_conv_take_nth_drop) (* 32 ms *) have f3: "\n na. \ n < na \ Suc n \ na" using Suc_leI by blast (* 0.0 ms *) then have "min (length t) (j + 1) = j + 1" by (metis (no_types) "0.prems"(2) Suc_eq_plus1 min.absorb2) (* 16 ms *) then have f4: "length ((take (j + 1) t)[j - 1 := t ! j]) = j + 1" by simp (* 4 ms *) have f5: "j + 1 \ length (t[j - 1 := t ! j])" using f3 by (metis (no_types) "0.prems"(2) Suc_eq_plus1 length_list_update) (* 8 ms *) have "Suc j \ j + 1" by linarith (* 0.0 ms *) then have "(take (j + 1) (t[j - 1 := t ! j]))[j := t ! (j - 1)] = take j (t[j - 1 := t ! j]) @ t ! (j - 1) # [] @ []" using f5 f4 by (metis (no_types) Suc_eq_plus1 add_diff_cancel_right' butlast_conv_take butlast_take drop_eq_Nil lessI self_append_conv2 take_update_swap upd_conv_take_nth_drop) (* 180 ms *) then show ?thesis using f2 a1 by (simp add: take_update_swap) (* 120 ms *) qed qed qed have s: "trace init (take i t) (S t i)" using "0.prems"(6) exists_trace_for_any_i by blast have e: "trace (S t (j+1)) (take (length t - (j+1)) (drop (j+1) t)) final" proof - have "trace init (take (length t) t) final" by (simp add: "0.prems"(6)) then show ?thesis by (metis "0.prems"(2) Suc_eq_plus1 Suc_leI exists_trace_for_any_i exists_trace_for_any_i_j nat_le_linear take_all trace_and_start_determines_end) qed have "trace init (take i t @ [t ! j] @ [t ! i] @ drop (j+1) t) final" proof - from s swap have "trace init (take i t @ [t ! j,t ! i]) (S t (j+1))" using trace_trans by blast then have "trace init (take i t @ [t ! j, t ! i] @ (take (length t - (j+1)) (drop (j+1) t))) final" using e trace_trans by fastforce moreover have "take (length t - (j+1)) (drop (j+1) t) = drop (j+1) t" by simp ultimately show ?thesis by simp qed moreover have "take i t @ [t ! j] @ [t ! i] @ drop (j+1) t = (t[i := t ! j])[j := t ! i]" proof - have "length (take i t @ [t ! j] @ [t ! i] @ drop (j+1) t) = length ((t[i := t ! j])[j := t ! i])" by (metis (mono_tags, lifting) \t = take i t @ [t ! i] @ take (j - (i + 1)) (drop (i + 1) t) @ [t ! j] @ drop (j + 1) t\ \take (j - (i + 1)) (drop (i + 1) t) = []\ length_append length_list_update list.size(4) self_append_conv2) moreover have "\k. k < length ((t[i := t ! j])[j := t ! i]) \ (take i t @ [t ! j] @ [t ! i] @ drop (j+1) t) ! k = ((t[i := t ! j])[j := t ! i]) ! k" proof - fix k assume "k < length ((t[i := t ! j])[j := t ! i])" show "(take i t @ [t ! j] @ [t ! i] @ drop (j+1) t) ! k = ((t[i := t ! j])[j := t ! i]) ! k" proof (cases "k = i \ k = j") case True then show ?thesis proof (elim disjE) assume "k = i" then show ?thesis by (metis (no_types, lifting) \k < length (t[i := t ! j, j := t ! i])\ append_Cons le_eq_less_or_eq length_list_update length_take min.absorb2 nth_append_length nth_list_update_eq nth_list_update_neq) next assume "k = j" then show ?thesis by (metis (no_types, lifting) "0.prems"(4) Suc_eq_plus1 \j = i + 1\ \k < length (t[i := t ! j, j := t ! i])\ append.assoc append_Cons le_eq_less_or_eq length_append_singleton length_list_update length_take min.absorb2 nth_append_length nth_list_update postrecording_event) qed next case knij: False then show ?thesis proof (cases "k < i") case True then show ?thesis by (metis (no_types, lifting) "0.prems"(2) \j = i + 1\ add_lessD1 length_take less_imp_le_nat min.absorb2 not_less nth_append nth_list_update_neq nth_take) next case False then have "k > j" using \j = i + 1\ knij by linarith then have "(take i t @ [t ! j] @ [t ! i] @ drop (j+1) t) ! k = drop (j+1) t ! (k-(j+1))" proof - assume a1: "j < k" have f2: "\n na. ((n::nat) < na) = (n \ na \ n \ na)" using nat_less_le by blast (* 0.0 ms *) have f3: "i + 0 = min (length t) i + (0 + 0)" using "0.prems"(2) \j = i + 1\ by linarith (* 8 ms *) have f4: "min (length t) i + Suc (0 + 0) = length (take i t) + length [t ! j]" by force (* 4 ms *) have f5: "take i t @ [t ! j] @ [] = take i t @ [t ! j]" by auto (* 0.0 ms *) have "j = length (take i t @ [t ! j] @ [])" using f3 by (simp add: \j = i + 1\) (* 4 ms *) then have "j + 1 = length (take i t @ [t ! j] @ [t ! i])" by fastforce (* 4 ms *) then show ?thesis using f5 f4 f3 f2 a1 by (metis (no_types) One_nat_def \j = i + 1\ add_Suc_right append.assoc length_append less_antisym list.size(3) not_less nth_append) (* 284 ms *) qed moreover have "(t[i := t ! j])[j := t ! i] ! k = drop (j+1) ((t[i := t ! j])[j := t ! i]) ! (k-(j+1))" using "0.prems"(2) \j < k\ by auto moreover have "drop (j+1) ((t[i := t ! j])[j := t ! i]) = drop (j+1) t" using "0.prems"(1) by auto ultimately show ?thesis by simp qed qed qed ultimately show ?thesis by (simp add: list_eq_iff_nth_eq) qed moreover have "\k. k \ j + 1 \ S t k = S ((t[i := t ! j])[j := t ! i]) k" proof (rule allI, rule impI) fix k assume "k \ j + 1" let ?newt = "((t[i := t ! j])[j := t ! i])" have "trace init (take k ?newt) (S ?newt k)" using calculation(1) calculation(2) exists_trace_for_any_i by auto have "take k ?newt = take (j+1) ?newt @ take (k - (j+1)) (drop (j+1) ?newt)" by (metis \j + 1 \ k\ le_add_diff_inverse take_add) have same_traces: "drop (j+1) t = drop (j+1) ?newt" by (metis "0.prems"(1) Suc_eq_plus1 \j = i + 1\ drop_update_cancel less_SucI less_add_same_cancel1) have "trace init (take (j+1) ((t[i := t ! j])[j := t ! i])) (S t (j+1))" by (metis (no_types, lifting) \j = i + 1\ \take (j - 1) t @ [t ! j, t ! i] = (take (j + 1) t)[i := t ! j, j := t ! i]\ add_diff_cancel_right' local.swap s take_update_swap trace_trans) moreover have "trace init (take (j+1) ?newt) (S ?newt (j+1))" using \take i t @ [t ! j] @ [t ! i] @ drop (j + 1) t = t[i := t ! j, j := t ! i]\ \trace init (take i t @ [t ! j] @ [t ! i] @ drop (j + 1) t) final\ exists_trace_for_any_i by auto ultimately have "S ?newt (j+1) = S t (j+1)" using trace_and_start_determines_end by blast have "trace (S t (j+1)) (take (k - (j+1)) (drop (j+1) t)) (S t k)" using "0.prems"(6) \j + 1 \ k\ exists_trace_for_any_i_j by blast moreover have "trace (S ?newt (j+1)) (take (k - (j+1)) (drop (j+1) ?newt)) (S ?newt k)" using \j + 1 \ k\ \take i t @ [t ! j] @ [t ! i] @ drop (j + 1) t = t[i := t ! j, j := t ! i]\ \trace init (take i t @ [t ! j] @ [t ! i] @ drop (j + 1) t) final\ exists_trace_for_any_i_j by fastforce ultimately show "S t k = S ?newt k" using \S (t[i := t ! j, j := t ! i]) (j + 1) = S t (j + 1)\ same_traces trace_and_start_determines_end by auto qed moreover have "\k. k \ i \ S t k = S ((t[i := t ! j])[j := t ! i]) k" proof (rule allI, rule impI) fix k assume "k \ i" let ?newt = "((t[i := t ! j])[j := t ! i])" have "trace init (take k t) (S t k)" using "0.prems"(6) exists_trace_for_any_i by blast moreover have "trace init (take k ?newt) (S ?newt k)" using \take i t @ [t ! j] @ [t ! i] @ drop (j + 1) t = t[i := t ! j, j := t ! i]\ \trace init (take i t @ [t ! j] @ [t ! i] @ drop (j + 1) t) final\ exists_trace_for_any_i by auto moreover have "take k t = take k ?newt" using "0.prems"(1) \k \ i\ by auto ultimately show "S t k = S ?newt k" by (simp add: trace_and_start_determines_end) qed moreover have "prerecording_event (swap_events i j t) i" proof - have "~ has_snapshotted (S ((t[i := t ! j])[j := t ! i]) i) ?q" by (metis "0.prems"(6) \j = i + 1\ add.right_neutral calculation(4) le_add1 nsq snapshot_stable_ver_3) moreover have "regular_event ((t[i := t ! j])[j := t ! i] ! i)" by (metis "0.prems"(4) "0.prems"(5) \occurs_on (t ! i) \ occurs_on (t ! j)\ nth_list_update_eq nth_list_update_neq postrecording_event prerecording_event) moreover have "i < length ((t[i := t ! j])[j := t ! i])" using "0.prems"(1) "0.prems"(2) by auto ultimately show ?thesis unfolding prerecording_event by (metis (no_types, hide_lams) "0.prems"(1) \take (j - (i + 1)) (drop (i + 1) t) = []\ \take i t @ [t ! j] @ [t ! i] @ drop (j + 1) t = t[i := t ! j, j := t ! i]\ append_Cons length_list_update nat_less_le nth_list_update_eq nth_list_update_neq self_append_conv2) qed moreover have "postrecording_event (swap_events i j t) (i+1)" proof - have "has_snapshotted (S ((t[i := t ! j])[j := t ! i]) (i+1)) ?p" by (metis "0.prems"(4) add.right_neutral calculation(1) calculation(2) calculation(4) le_add1 postrecording_event snapshot_stable_ver_3) moreover have "regular_event ((t[i := t ! j])[j := t ! i] ! j)" using "0.prems"(2) "0.prems"(4) length_list_update postrecording_event by auto moreover have "j < length t" using "0.prems" by auto ultimately show ?thesis unfolding postrecording_event by (metis \j = i + 1\ length_list_update nth_list_update_eq swap_neighbors_2) qed moreover have "\k. k > i+1 \ k < j+1 \ ~ regular_event ((swap_events i j t) ! k)" using "0" by force ultimately show ?case using \j = i + 1\ by force next case (Suc n) let ?p = "occurs_on (t ! i)" let ?q = "occurs_on (t ! j)" let ?t = "take ((j+1) - i) (drop i t)" let ?subt = "take (j - (i+1)) (drop (i+1) t)" let ?subt' = "take ((j-1) - (i+1)) (drop (i+1) t)" have sp: "has_snapshotted (S t i) ?p" using Suc.prems postrecording_event prerecording_event by blast have nsq: "~ has_snapshotted (S t j) ?q" using Suc.prems postrecording_event prerecording_event by blast have "?p \ ?q" using Suc.prems computation.post_before_pre_different_processes computation_axioms by blast have "?subt \ Nil" using Suc.hyps(2) Suc.prems(1) Suc.prems(2) by auto have "?subt' = butlast ?subt" by (metis Suc.prems(2) Suc_eq_plus1 butlast_drop butlast_take drop_take less_imp_le_nat) have "?t = t ! i # ?subt @ [t ! j]" proof - have f1: "Suc j - i = Suc (j - i)" using Suc.prems(1) Suc_diff_le le_simps(1) by presburger have f2: "t ! i # drop (Suc i) t = drop i t" by (meson Cons_nth_drop_Suc Suc.prems(1) Suc.prems(2) less_trans) have f3: "t ! j # drop (Suc j) t = drop j t" using Cons_nth_drop_Suc Suc.prems(2) by blast have f4: "j - (i + 1) + (i + 1) = j" using Suc.prems(1) by force have "j - (i + 1) + Suc 0 = j - i" using Suc.prems(1) Suc_diff_Suc by presburger then show ?thesis using f4 f3 f2 f1 by (metis One_nat_def Suc.hyps(2) Suc_eq_plus1 drop_drop take_Suc_Cons take_add take_eq_Nil) qed then have "trace (S t i) ?t (S t (j+1))" by (metis Suc.prems(1) Suc.prems(6) Suc_eq_plus1 exists_trace_for_any_i_j less_SucI nat_less_le) then have reg_tr_1: "trace (S t i) (t ! i # ?subt) (S t j)" by (metis (no_types, hide_lams) Suc.hyps(2) Suc.prems(1) Suc.prems(4) Suc.prems(6) Suc_eq_plus1 discrete exists_trace_for_any_i_j postrecording_event step_Suc tr_step) have reg_st_2: "(S t j) \ (t ! j) \ (S t (j+1))" using Suc.prems(2) Suc.prems(6) step_Suc by auto have "?subt = ?subt' @ [t ! (j-1)]" proof - have f1: "\n es. \ n < length es \ take n es @ [hd (drop n es)::('a, 'b, 'c) event] = take (Suc n) es" by (meson take_hd_drop) (* 0.0 ms *) have f2: "j - 1 - (i + 1) = n" by (metis (no_types) Suc.hyps(2) Suc_eq_plus1 diff_Suc_1 diff_diff_left plus_1_eq_Suc) (* 28 ms *) have f3: "\n na. \ n < na \ Suc n \ na" using Suc_leI by blast (* 0.0 ms *) then have f4: "Suc i \ j - 1" by (metis (no_types) Suc.hyps(2) Suc_eq_plus1 diff_diff_left plus_1_eq_Suc zero_less_Suc zero_less_diff) (* 12 ms *) have f5: "i + 1 < j" by (metis Suc.hyps(2) zero_less_Suc zero_less_diff) (* 4 ms *) then have f6: "t ! (j - 1) = hd (drop n (drop (i + 1) t))" using f4 f3 by (metis (no_types) Suc.hyps(2) Suc.prems(2) Suc_eq_plus1 Suc_lessD add_Suc_right diff_Suc_1 drop_drop hd_drop_conv_nth le_add_diff_inverse2 plus_1_eq_Suc) (* 140 ms *) have "n < length (drop (i + 1) t)" using f5 f3 by (metis (no_types) Suc.hyps(2) Suc.prems(2) Suc_eq_plus1 Suc_lessD drop_drop le_add_diff_inverse2 length_drop zero_less_diff) (* 144 ms *) then show ?thesis using f6 f2 f1 Suc.hyps(2) by presburger (* 4 ms *) qed then have reg_tr: "trace (S t i) (t ! i # ?subt') (S t (j-1))" proof - have f1: "j - Suc i = Suc n" using Suc.hyps(2) by presburger have f2: "length (take j t) = j" by (metis (no_types) Suc.prems(2) length_take min.absorb2 nat_le_linear not_less) have f3: "(t ! i # drop (Suc i) (take j t)) @ [t ! j] = drop i (take (Suc j) t)" by (metis (no_types) Suc_eq_plus1 \take (j + 1 - i) (drop i t) = t ! i # take (j - (i + 1)) (drop (i + 1) t) @ [t ! j]\ append_Cons drop_take) have f4: "Suc (i + n) = j - 1" using f1 by (metis (no_types) Suc.prems(1) Suc_diff_Suc add_Suc_right diff_Suc_1 le_add_diff_inverse nat_le_linear not_less) have "Suc (j - 1) = j" using f1 by simp then have f5: "butlast (take (Suc j) t) = take j t" using f4 f3 f2 f1 by (metis (no_types) Groups.add_ac(2) One_nat_def append_eq_conv_conj append_take_drop_id butlast_take diff_Suc_1 drop_drop length_append length_drop list.size(3) list.size(4) order_refl plus_1_eq_Suc plus_nat.simps(2) take_add take_all) have f6: "butlast (take j t) = take (j - 1) t" by (meson Suc.prems(2) butlast_take nat_le_linear not_less) have "drop (Suc i) (take j t) \ []" by (metis (no_types) Nil_is_append_conv Suc_eq_plus1 \take (j - (i + 1)) (drop (i + 1) t) = take (j - 1 - (i + 1)) (drop (i + 1) t) @ [t ! (j - 1)]\ drop_take list.distinct(1)) then show ?thesis using f6 f5 f4 f3 by (metis (no_types) Suc.prems(6) Suc_eq_plus1 butlast.simps(2) butlast_drop butlast_snoc drop_take exists_trace_for_any_i_j less_add_Suc1 nat_le_linear not_less) qed have reg_st_1: "(S t (j-1)) \ (t ! (j-1)) \ (S t j)" by (metis Suc.prems(1) Suc.prems(2) Suc.prems(6) Suc_lessD diff_Suc_1 less_imp_Suc_add step_Suc) have "~ regular_event (t ! (j-1))" using Suc.prems(3) \take (j - (i + 1)) (drop (i + 1) t) \ []\ less_diff_conv by auto moreover have "regular_event (t ! j)" using Suc.prems(5) computation.prerecording_event computation_axioms by blast moreover have "can_occur (t ! j) (S t j)" using happen_implies_can_occur reg_tr_1 reg_st_2 by blast moreover have njmiq: "occurs_on (t ! (j-1)) \ ?q" proof (rule ccontr) assume "~ occurs_on (t ! (j-1)) \ ?q" then have "occurs_on (t ! (j-1)) = ?q" by simp then have "has_snapshotted (S t j) ?q" using Suc.prems(6) calculation(1) diff_le_self nonregular_event_induces_snapshot reg_st_1 snapshot_stable_ver_2 by blast then show False using nsq by simp qed ultimately have "can_occur (t ! j) (S t (j-1))" using reg_tr reg_st_1 event_can_go_back_if_no_sender by auto then obtain d where new_st_1: "(S t (j-1)) \ (t ! j) \ d" using exists_next_if_can_occur by blast then have "trace (S t i) (t ! i # ?subt' @ [t ! j]) d" using reg_tr trace_snoc by fastforce moreover have "can_occur (t ! (j-1)) d" using \(S t (j-1)) \ t ! j \ d\ \occurs_on (t ! (j - 1)) \ occurs_on (t ! j)\ event_stays_valid_if_no_occurrence happen_implies_can_occur reg_st_1 by auto moreover obtain e where new_st_2: "d \ (t ! (j-1)) \ e" using calculation(2) exists_next_if_can_occur by blast have pre_swap: "e = (S t (j+1))" proof - have "states e = states (S t (j+1))" proof (rule ext) fix p have "states (S t (j-1)) p = states (S t j) p" using no_state_change_if_nonregular_event\~ regular_event (t ! (j-1))\ reg_st_1 by auto moreover have "states d p = states e p" using no_state_change_if_nonregular_event\~ regular_event (t ! (j-1))\ new_st_2 by auto moreover have "states d p = states (S t (j+1)) p" proof - have "\a. states (S t (j + 1)) a = states d a" by (meson \\ regular_event (t ! (j - 1))\ new_st_1 no_state_change_if_nonregular_event reg_st_1 reg_st_2 same_state_implies_same_result_state) then show ?thesis by presburger qed ultimately show "states e p = states (S t (j+1)) p" by simp qed moreover have "msgs e = msgs (S t (j+1))" proof (rule ext) fix cid have "isTrans (t ! j) \ isSend (t ! j) \ isRecv (t ! j)" using \regular_event (t ! j)\ by auto moreover have "isSnapshot (t ! (j-1)) \ isRecvMarker (t ! (j-1))" using nonregular_event \~ regular_event (t ! (j-1))\ by auto ultimately show "msgs e cid = msgs (S t (j+1)) cid" proof (elim disjE, goal_cases) case 1 then show ?case using new_st_1 new_st_2 njmiq reg_st_1 reg_st_2 swap_Trans_Snapshot by auto next case 2 then show ?case using new_st_1 new_st_2 njmiq reg_st_1 reg_st_2 swap_msgs_Trans_RecvMarker by auto next case 3 then show ?case using new_st_1 new_st_2 njmiq reg_st_1 reg_st_2 swap_Send_Snapshot by auto next case 4 then show ?case using new_st_1 new_st_2 njmiq reg_st_1 reg_st_2 swap_Recv_Snapshot by auto next case 5 then show ?case using new_st_1 new_st_2 njmiq reg_st_1 reg_st_2 swap_msgs_Send_RecvMarker by auto next case 6 then show ?case using new_st_1 new_st_2 njmiq reg_st_1 reg_st_2 swap_msgs_Recv_RecvMarker by auto qed qed moreover have "process_snapshot e = process_snapshot (S t (j+1))" proof (rule ext) fix p have "process_snapshot (S t j) p = process_snapshot (S t (j+1)) p" using \regular_event (t ! j)\ reg_st_2 regular_event_preserves_process_snapshots by blast moreover have "process_snapshot (S t (j-1)) p = process_snapshot d p" using \regular_event (t ! j)\ new_st_1 regular_event_preserves_process_snapshots by blast moreover have "process_snapshot e p = process_snapshot (S t j) p" proof - have "occurs_on (t ! j) = p \ ps e p = ps (S t j) p" using calculation(2) new_st_2 njmiq no_state_change_if_no_event reg_st_1 by force then show ?thesis by (meson new_st_1 new_st_2 no_state_change_if_no_event reg_st_1 same_snapshot_state_implies_same_result_snapshot_state) qed ultimately show "process_snapshot e p = process_snapshot (S t (j+1)) p" by simp qed moreover have "cs e = cs (S t (j+1))" proof (rule ext) fix cid have "isTrans (t ! j) \ isSend (t ! j) \ isRecv (t ! j)" using \regular_event (t ! j)\ by auto moreover have "isSnapshot (t ! (j-1)) \ isRecvMarker (t ! (j-1))" using nonregular_event \~ regular_event (t ! (j-1))\ by auto ultimately show "cs e cid = cs (S t (j+1)) cid" proof (elim disjE, goal_cases) case 1 then show ?case using new_st_1 new_st_2 reg_st_1 reg_st_2 swap_cs_Trans_Snapshot by auto next case 2 then show ?case using new_st_1 new_st_2 reg_st_1 reg_st_2 swap_cs_Trans_RecvMarker by auto next case 3 then show ?case using new_st_1 new_st_2 reg_st_1 reg_st_2 swap_cs_Send_Snapshot by auto next case 4 then show ?case using new_st_1 new_st_2 reg_st_1 reg_st_2 swap_cs_Recv_Snapshot njmiq by auto next case 5 then show ?case using new_st_1 new_st_2 reg_st_1 reg_st_2 swap_cs_Send_RecvMarker by auto next case 6 then show ?case using new_st_1 new_st_2 reg_st_1 reg_st_2 swap_cs_Recv_RecvMarker njmiq by auto qed qed ultimately show ?thesis by auto qed let ?it = "(t[j-1 := t ! j])[j := t ! (j-1)]" have same_prefix: "take (j-1) ?it = take (j-1) t" by simp have same_suffix: "drop (j+1) ?it = drop (j+1) t" by simp have trace_prefix: "trace init (take (j-1) ?it) (S t (j-1))" using Suc.prems(6) exists_trace_for_any_i by auto have "?it = take (j-1) t @ [t ! j, t ! (j-1)] @ drop (j+1) t" proof - have "1 < j" by (metis (no_types) Suc.hyps(2) Suc_eq_plus1 add_lessD1 plus_1_eq_Suc zero_less_Suc zero_less_diff) (* 12 ms *) then have "j - 1 + 1 = j" by (metis (no_types) le_add_diff_inverse2 nat_less_le) (* 4 ms *) then show ?thesis by (metis (no_types) Suc.prems(2) Suc_eq_plus1 add_Suc_right one_add_one swap_neighbors) (* 76 ms *) qed have "trace (S t (j-1)) [t ! j, t ! (j-1)] (S t (j+1))" by (metis new_st_1 new_st_2 pre_swap trace.simps) have "trace init (take (j+1) t @ drop (j+1) t) final" by (simp add: Suc.prems(6)) then have "trace init (take (j+1) t) (S t (j+1)) \ trace (S t (j+1)) (drop (j+1) t) final" using Suc.prems(6) exists_trace_for_any_i split_trace trace_and_start_determines_end by blast then have trace_suffix: "trace (S t (j+1)) (drop (j+1) ?it) final" using same_suffix by simp have "trace init ?it final" by (metis (no_types, lifting) \t[j - 1 := t ! j, j := t ! (j - 1)] = take (j - 1) t @ [t ! j, t ! (j - 1)] @ drop (j + 1) t\ \trace (S t (j + 1)) (drop (j + 1) (t[j - 1 := t ! j, j := t ! (j - 1)])) final\ \trace (S t (j - 1)) [t ! j, t ! (j - 1)] (S t (j + 1))\ \trace init (take (j - 1) (t[j - 1 := t ! j, j := t ! (j - 1)])) (S t (j - 1))\ same_prefix same_suffix trace_trans) have suffix_same_states: "\k. k > j \ S t k = S ?it k" proof (rule allI, rule impI) fix k assume "k > j" have eq_trace: "drop (j+1) t = drop (j+1) ?it" by simp have "trace init (take (j+1) ?it) (S ?it (j+1))" using \trace init (t[j - 1 := t ! j, j := t ! (j - 1)]) final\ exists_trace_for_any_i by blast moreover have "trace init (take (j+1) ?it) (S t (j+1))" proof - have f1: "\es esa esb esc. (esb::('a, 'b, 'c) event list) @ es \ esa @ esc @ es \ esa @ esc = esb" by auto have f2: "take (j + 1) (t[j - 1 := t ! j, j := t ! (j - 1)]) @ drop (j + 1) t = t [j - 1 := t ! j, j := t ! (j - 1)]" by (metis append_take_drop_id same_suffix) have "trace init (take (j - 1) t @ [t ! j, t ! (j - 1)]) (S t (j + 1))" using \trace (S t (j - 1)) [t ! j, t ! (j - 1)] (S t (j + 1))\ same_prefix trace_prefix trace_trans by presburger then show ?thesis using f2 f1 by (metis (no_types) \t[j - 1 := t ! j, j := t ! (j - 1)] = take (j - 1) t @ [t ! j, t ! (j - 1)] @ drop (j + 1) t\) qed ultimately have eq_start: "S ?it (j+1) = S t (j+1)" using trace_and_start_determines_end by blast then have "take k ?it = take (j+1) ?it @ take (k - (j+1)) (drop (j+1) ?it)" by (metis Suc_eq_plus1 Suc_leI \j < k\ le_add_diff_inverse take_add) have "trace (S ?it (j+1)) (take (k - (j+1)) (drop (j+1) ?it)) (S ?it k)" by (metis Suc_eq_plus1 Suc_leI \j < k\ \trace init (t[j - 1 := t ! j, j := t ! (j - 1)]) final\ exists_trace_for_any_i_j) moreover have "trace (S t (j+1)) (take (k - (j+1)) (drop (j+1) t)) (S t k)" using Suc.prems(6) \j < k\ exists_trace_for_any_i_j by fastforce ultimately show "S t k = S ?it k" using eq_start trace_and_start_determines_end by auto qed have prefix_same_states: "\k. k < j \ S t k = S ?it k" proof (rule allI, rule impI) fix k assume "k < j" have "trace init (take k t) (S t k)" using Suc.prems(6) exists_trace_for_any_i by blast moreover have "trace init (take k ?it) (S ?it k)" by (meson \trace init (t[j - 1 := t ! j, j := t ! (j - 1)]) final\ exists_trace_for_any_i) ultimately show "S t k = S ?it k" using \k < j\ s_def by auto qed moreover have "j - 1 < length ?it" using Suc.prems(2) by auto moreover have "prerecording_event ?it (j-1)" proof - have f1: "t[j - 1 := t ! j, j := t ! (j - 1)] ! (j - 1) = t[j - 1 := t ! j] ! (j - 1)" by (metis (no_types) njmiq nth_list_update_neq) (* 28 ms *) have "j \ 0" by (metis (no_types) Suc.prems(1) not_less_zero) (* 0.0 ms *) then have "\ j < 1" by blast (* 0.0 ms *) then have "S t (j - 1) = S (t[j - 1 := t ! j, j := t ! (j - 1)]) (j - 1)" by (simp add: prefix_same_states) (* 8 ms *) then show ?thesis using f1 by (metis \regular_event (t ! j)\ calculation(4) computation.prerecording_event computation_axioms length_list_update njmiq no_state_change_if_no_event nsq nth_list_update_eq reg_st_1) (* 456 ms *) qed moreover have "postrecording_event ?it i" proof - have "i < length ?it" using Suc.prems(4) postrecording_event by auto then show ?thesis proof - assume "i < length (t[j - 1 := t ! j, j := t ! (j - 1)])" have "i < j - 1" by (metis (no_types) Suc.hyps(2) cancel_ab_semigroup_add_class.diff_right_commute diff_diff_left zero_less_Suc zero_less_diff) then show ?thesis using Suc.prems(1) Suc.prems(4) postrecording_event prefix_same_states by auto qed qed moreover have "i < j - 1" using Suc.hyps(2) by auto moreover have "\k. i < k \ k < (j-1) \ ~ regular_event (?it ! k)" proof (rule allI, rule impI) fix k assume "i < k \ k < (j-1)" show "~ regular_event (?it ! k)" using Suc.prems(3) \i < k \ k < j - 1\ by force qed moreover have "(j-1) - (i+1) = n" using Suc.prems Suc.hyps by auto ultimately have ind: "trace init (swap_events i (j-1) ?it) final \ (\k. k \ (j-1)+1 \ S (swap_events i (j-1) ?it) k = S ?it k) \ (\k. k \ i \ S (swap_events i (j-1) ?it) k = S ?it k) \ prerecording_event (swap_events i (j-1) ?it) i \ postrecording_event (swap_events i (j-1) ?it) (i+1) \ (\k. k > i+1 \ k < (j-1)+1 \ ~ regular_event ((swap_events i (j-1) ?it) ! k))" using Suc.hyps \trace init ?it final\ by blast then have new_trace: "trace init (swap_events i (j-1) ?it) final" by blast have equal_suffix_states: "\k. k \ j \ S (swap_events i (j-1) ?it) k = S ?it k" using Suc.prems(1) ind by simp have equal_prefix_states: "\k. k \ i \ S (swap_events i (j-1) ?it) k = S ?it k" using ind by blast have neighboring_events_shifted: "\k. k > i+1 \ k < j \ ~ regular_event ((swap_events i (j-1) ?it) ! k)" using ind by force let ?itn = "swap_events i (j-1) ?it" have "?itn = swap_events i j t" proof - have f1: "i \ j - 1" using \i < j - 1\ less_imp_le_nat by blast have "t ! j # [t ! (j - 1)] @ drop (j + 1) t = drop (j - 1) (take (j - 1) t @ [t ! j, t ! (j - 1)] @ drop (j + 1) t)" using \t[j - 1 := t ! j, j := t ! (j - 1)] = take (j - 1) t @ [t ! j, t ! (j - 1)] @ drop (j + 1) t\ same_prefix by force then have f2: "t[j - 1 := t ! j, j := t ! (j - 1)] ! (j - 1) = t ! j \ drop (j - 1 + 1) (t[j - 1 := t ! j, j := t ! (j - 1)]) = t ! (j - 1) # [] @ drop (j + 1) t" by (metis (no_types) Cons_nth_drop_Suc Suc_eq_plus1 \j - 1 < length (t[j - 1 := t ! j, j := t ! (j - 1)])\ \t[j - 1 := t ! j, j := t ! (j - 1)] = take (j - 1) t @ [t ! j, t ! (j - 1)] @ drop (j + 1) t\ append_Cons list.inject) have "t ! i = t[j - 1 := t ! j, j := t ! (j - 1)] ! i" by (metis (no_types) Suc.prems(1) \i < j - 1\ nat_neq_iff nth_list_update_neq) then show ?thesis using f2 f1 by (metis (no_types) Suc.prems(1) \take (j - (i + 1)) (drop (i + 1) t) = take (j - 1 - (i + 1)) (drop (i + 1) t) @ [t ! (j - 1)]\ append.assoc append_Cons drop_take less_imp_le_nat same_prefix take_update_cancel) qed moreover have "\k. k \ i \ S t k = S ?itn k" using Suc.prems(1) equal_prefix_states prefix_same_states by auto moreover have "\k. k \ j + 1 \ S t k = S ?itn k" by (metis (no_types, lifting) Suc_eq_plus1 add_lessD1 equal_suffix_states lessI nat_less_le suffix_same_states) moreover have "\k. k > i+1 \ k < j+1 \ ~ regular_event (?itn ! k)" proof - have "~ regular_event (?itn ! j)" proof - have f1: "j - 1 < length t" using \j - 1 < length (t[j - 1 := t ! j, j := t ! (j - 1)])\ by force have f2: "\n na es. \ n < na \ \ na < length es \ drop (Suc na) (take n es @ [hd (drop na es), es ! n::('a, 'b, 'c) event] @ take (na - Suc n) (drop (Suc n) es) @ drop (Suc na) es) = drop (Suc na) es" by (metis Suc_eq_plus1 hd_drop_conv_nth swap_identical_tails) have f3: "t ! j = hd (drop j t)" by (simp add: Suc.prems(2) hd_drop_conv_nth) have "\ j < 1" using Suc.prems(1) by blast then have "\ regular_event (hd (drop j (take i (t[j - 1 := hd (drop j t), j := hd (drop (j - 1) t)]) @ [hd (drop (j - 1) (t[j - 1 := hd (drop j t), j := hd (drop (j - 1) t)])), t[j - 1 := hd (drop j t), j := hd (drop (j - 1) t)] ! i] @ take (j - 1 - Suc i) (drop (Suc i) (t[j - 1 := hd (drop j t), j := hd (drop (j - 1) t)])) @ drop (Suc (j - 1)) (t[j - 1 := hd (drop j t), j := hd (drop (j - 1) t)]))))" using f2 f1 by (metis (no_types) Suc.prems(2) \\ regular_event (t ! (j - 1))\ \i < j - 1\ add_diff_inverse_nat hd_drop_conv_nth length_list_update nth_list_update_eq plus_1_eq_Suc) then show ?thesis using f3 f1 by (metis Suc.prems(2) Suc_eq_plus1 \i < j - 1\ hd_drop_conv_nth length_list_update swap_identical_length) qed then show ?thesis by (metis Suc_eq_plus1 less_Suc_eq neighboring_events_shifted) qed ultimately show ?case using ind by presburger qed subsubsection \Relating configurations and the computed snapshot\ definition ps_equal_to_snapshot where "ps_equal_to_snapshot c c' \ \p. Some (states c p) = process_snapshot c' p" definition cs_equal_to_snapshot where "cs_equal_to_snapshot c c' \ \cid. channel cid \ None \ filter ((\) Marker) (msgs c cid) = map Msg (fst (channel_snapshot c' cid))" definition state_equal_to_snapshot where "state_equal_to_snapshot c c' \ ps_equal_to_snapshot c c' \ cs_equal_to_snapshot c c'" lemma init_is_s_t_0: assumes "trace init t final" shows "init = (S t 0)" by (metis assms exists_trace_for_any_i take_eq_Nil tr_init trace_and_start_determines_end) lemma final_is_s_t_len_t: assumes "trace init t final" shows "final = S t (length t)" by (metis assms exists_trace_for_any_i order_refl take_all trace_and_start_determines_end) lemma snapshot_event: assumes "trace init t final" and "~ has_snapshotted (S t i) p" and "has_snapshotted (S t (i+1)) p" shows "isSnapshot (t ! i) \ isRecvMarker (t ! i)" proof - have "(S t i) \ (t ! i) \ (S t (i+1))" by (metis Suc_eq_plus1 assms(1) assms(2) assms(3) distributed_system.step_Suc computation_axioms computation_def nat_less_le not_less not_less_eq s_def take_all) then show ?thesis using assms(2) assms(3) nonregular_event regular_event_cannot_induce_snapshot by blast qed lemma snapshot_state: assumes "trace init t final" and "states (S t i) p = u" and "~ has_snapshotted (S t i) p" and "has_snapshotted (S t (i+1)) p" shows "ps (S t (i+1)) p = Some u" proof - have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis add.commute assms(1) assms(3) assms(4) le_SucI le_eq_less_or_eq le_refl nat_neq_iff no_change_if_ge_length_t plus_1_eq_Suc step_Suc) let ?q = "occurs_on (t ! i)" have qp: "?q = p" proof (rule ccontr) assume "?q \ p" then have "has_snapshotted (S t (i+1)) p = has_snapshotted (S t i) p" using local.step no_state_change_if_no_event by auto then show False using assms by simp qed have "isSnapshot (t ! i) \ isRecvMarker (t ! i)" using assms snapshot_event by auto then show ?thesis proof (elim disjE, goal_cases) case 1 then have "t ! i = Snapshot p" by (metis event.collapse(4) qp) then show ?thesis using assms(2) local.step by auto next case 2 then obtain cid' q where "t ! i = RecvMarker cid' p q" by (metis event.collapse(5) qp) then show ?thesis using assms step by auto qed qed lemma snapshot_state_unchanged_trace_2: shows "\ trace init t final; i \ j; j \ length t; ps (S t i) p = Some u \ \ ps (S t j) p = Some u" proof (induct i j rule:S_induct) case S_init then show ?case by simp next case S_step then show ?case using snapshot_state_unchanged by auto qed lemma no_recording_cs_if_not_snapshotted: shows "\ trace init t final; ~ has_snapshotted (S t i) p; channel cid = Some (q, p) \ \ cs (S t i) cid = cs init cid" proof (induct i) case 0 then show ?case by (metis exists_trace_for_any_i list.discI take_eq_Nil trace.simps) next case (Suc i) have "Suc i < length t" proof - have "has_snapshotted final p" using all_processes_snapshotted_in_final_state valid by blast show ?thesis proof (rule ccontr) assume "~ Suc i < length t" then have "Suc i \ length t" by simp then have "has_snapshotted (S t (Suc i)) p" using Suc.prems(1) \ps final p \ None\ final_is_s_t_len_t snapshot_stable_ver_3 by blast then show False using Suc by simp qed qed then have t_dec: "trace init (take i t) (S t i) \ (S t i) \ (t ! i) \ (S t (Suc i))" using Suc.prems(1) exists_trace_for_any_i step_Suc by auto moreover have step: "(S t i) \ (t ! i) \ (S t (Suc i))" using calculation by simp ultimately have IH: "cs (S t i) cid = cs init cid" using Suc.hyps Suc.prems(1) Suc.prems(2) Suc.prems(3) snapshot_state_unchanged by fastforce then show ?case proof (cases "t ! i") case (Snapshot r) have "r \ p" proof (rule ccontr) assume "~ r \ p" then have "r = p" by simp then have "has_snapshotted (S t (Suc i)) p" using Snapshot step by auto then show False using Suc by simp qed then have "cs (S t i) cid = cs (S t (Suc i)) cid" using Snapshot Suc.prems(3) local.step by auto then show ?thesis using IH by simp next case (RecvMarker cid' r s) have "r \ p" proof (rule ccontr) assume "~ r \ p" then have "r = p" by simp then have "has_snapshotted (S t (Suc i)) p" using RecvMarker t_dec recv_marker_means_snapshotted_1 by blast then show False using Suc by simp qed have "cid' \ cid" proof (rule ccontr) assume "~ cid' \ cid" then have "channel cid' = Some (s, r)" using t_dec can_occur_def RecvMarker by simp then show False using Suc.prems(3) \\ cid' \ cid\ \r \ p\ by auto qed then have "cs (S t i) cid = cs (S t (Suc i)) cid" proof - have "\s. channel cid = Some (s, r)" using \r \ p\ Suc by simp with RecvMarker t_dec \cid' \ cid\ \r \ p\ Suc.prems(3) show ?thesis by (cases "has_snapshotted (S t i) r", auto) qed then show ?thesis using IH by simp next case (Trans r u u') then show ?thesis using IH t_dec by auto next case (Send cid' r s u u' m) then show ?thesis using IH local.step by auto next case (Recv cid' r s u u' m) then have "snd (cs (S t i) cid) = NotStarted" by (simp add: IH no_initial_channel_snapshot) with Recv step Suc show ?thesis by (cases "cid' = cid", auto) qed qed lemma cs_done_implies_has_snapshotted: assumes "trace init t final" and "snd (cs (S t i) cid) = Done" and "channel cid = Some (p, q)" shows "has_snapshotted (S t i) q" proof - show ?thesis using assms no_initial_channel_snapshot no_recording_cs_if_not_snapshotted by fastforce qed lemma exactly_one_snapshot: assumes "trace init t final" shows "\!i. ~ has_snapshotted (S t i) p \ has_snapshotted (S t (i+1)) p" (is ?P) proof - have "~ has_snapshotted init p" using no_initial_process_snapshot by auto moreover have "has_snapshotted final p" using all_processes_snapshotted_in_final_state valid by blast moreover have "trace (S t 0) t (S t (length t))" using assms final_is_s_t_len_t init_is_s_t_0 by auto ultimately have ex_snap: "\i. ~ has_snapshotted (S t i) p \ has_snapshotted (S t (i+1)) p" using assms exists_snapshot_for_all_p by auto show ?thesis proof (rule ccontr) assume "~ ?P" then have "\i j. (i \ j) \ ~ has_snapshotted (S t i) p \ has_snapshotted (S t (i+1)) p \ ~ has_snapshotted (S t j) p \ has_snapshotted (S t (j+1)) p" using ex_snap by blast then have "\i j. (i < j) \ ~ has_snapshotted (S t i) p \ has_snapshotted (S t (i+1)) p \ ~ has_snapshotted (S t j) p \ has_snapshotted (S t (j+1)) p" by (meson linorder_neqE_nat) then obtain i j where "i < j" "~ has_snapshotted (S t i) p" "has_snapshotted (S t (i+1)) p" "~ has_snapshotted (S t j) p" "has_snapshotted (S t (j+1)) p" by blast have "trace (S t (i+1)) (take (j - (i+1)) (drop (i+1) t)) (S t j)" using \i < j\ assms exists_trace_for_any_i_j by fastforce then have "has_snapshotted (S t j) p" using \ps (S t (i + 1)) p \ None\ snapshot_stable by blast then show False using \~ has_snapshotted (S t j) p\ by simp qed qed lemma initial_cs_changes_implies_nonregular_event: assumes "trace init t final" and "snd (cs (S t i) cid) = NotStarted" and "snd (cs (S t (i+1)) cid) \ NotStarted" and "channel cid = Some (p, q)" shows "~ regular_event (t ! i)" proof - have "i < length t" proof (rule ccontr) assume "~ i < length t" then have "S t i = S t (i+1)" using assms(1) no_change_if_ge_length_t by auto then show False using assms by presburger qed then have step: "(S t i) \ (t ! i) \ (S t (i+1))" using assms(1) step_Suc by auto show ?thesis proof (rule ccontr) assume "~ ~ regular_event (t ! i)" then have "regular_event (t ! i)" by simp then have "cs (S t i) cid = cs (S t (i+1)) cid" proof (cases "isRecv (t ! i)") case False then show ?thesis using \regular_event (t ! i)\ local.step no_cs_change_if_no_event by blast next case True then obtain cid' r s u u' m where Recv: "t ! i = Recv cid' r s u u' m" by (meson isRecv_def) with assms step show ?thesis proof (cases "cid = cid'") case True then show ?thesis using assms step Recv by simp next case False then show ?thesis using assms step Recv by simp qed qed then show False using assms by simp qed qed lemma cs_in_initial_state_implies_not_snapshotted: assumes "trace init t final" and "snd (cs (S t i) cid) = NotStarted" and "channel cid = Some (p, q)" shows "~ has_snapshotted (S t i) q" proof (rule ccontr) assume "~ ~ has_snapshotted (S t i) q" then obtain j where "j < i" "~ has_snapshotted (S t j) q" "has_snapshotted (S t (j+1)) q" by (metis Suc_eq_plus1 assms(1) exists_snapshot_for_all_p computation.snapshot_stable_ver_3 computation_axioms nat_le_linear order_le_less) have step_j: "(S t j) \ (t ! j) \ (S t (j+1))" by (metis \\ \ ps (S t i) q \ None\ \\ ps (S t j) q \ None\ \j < i\ add.commute assms(1) linorder_neqE_nat no_change_if_ge_length_t order_le_less order_refl plus_1_eq_Suc step_Suc) have tr_j_i: "trace (S t (j+1)) (take (i - (j+1)) (drop (j+1) t)) (S t i)" using \j < i\ assms(1) exists_trace_for_any_i_j by fastforce have "~ regular_event (t ! j)" using step_j \\ ps (S t j) q \ None\ \ps (S t (j + 1)) q \ None\ regular_event_cannot_induce_snapshot by blast then have "isSnapshot (t ! j) \ isRecvMarker (t ! j)" using nonregular_event by auto then have "snd (cs (S t (j+1)) cid) \ NotStarted" proof (elim disjE, goal_cases) case 1 have "occurs_on (t ! j) = q" using \\ ps (S t j) q \ None\ \ps (S t (j + 1)) q \ None\ distributed_system.no_state_change_if_no_event distributed_system_axioms step_j by fastforce with 1 have "t ! j = Snapshot q" using isSnapshot_def by auto then show ?thesis using step_j assms by simp next case 2 have "occurs_on (t ! j) = q" using \\ ps (S t j) q \ None\ \ps (S t (j + 1)) q \ None\ distributed_system.no_state_change_if_no_event distributed_system_axioms step_j by fastforce with 2 obtain cid' s where RecvMarker: "t ! j = RecvMarker cid' q s" by (metis event.collapse(5)) then show ?thesis proof (cases "cid' = cid") case True then show ?thesis using RecvMarker step_j assms by simp next case False have "~ has_snapshotted (S t j) q" using \\ ps (S t j) q \ None\ by auto moreover have "\r. channel cid = Some (r, q)" by (simp add: assms(3)) ultimately show ?thesis using RecvMarker step_j assms False by simp qed qed then have "snd (cs (S t i) cid) \ NotStarted" using tr_j_i cs_not_not_started_stable_trace assms by blast then show False using assms by simp qed lemma nonregular_event_in_initial_state_implies_cs_changed: assumes "trace init t final" and "snd (cs (S t i) cid) = NotStarted" and "~ regular_event (t ! i)" and "occurs_on (t ! i) = q" and "channel cid = Some (p, q)" and "i < length t" shows "snd (cs (S t (i+1)) cid) \ NotStarted" proof - have step: "(S t i) \ (t ! i) \ (S t (i+1))" using step_Suc assms by auto have "isSnapshot (t ! i) \ isRecvMarker (t ! i)" using assms(3) nonregular_event by blast then show ?thesis proof (elim disjE, goal_cases) case 1 then show ?thesis using assms cs_in_initial_state_implies_not_snapshotted local.step nonregular_event_induces_snapshot by blast next case 2 then show ?thesis by (metis assms(1) assms(2) assms(3) assms(4) assms(5) cs_in_initial_state_implies_not_snapshotted local.step nonregular_event_induces_snapshot) qed qed lemma cs_recording_implies_snapshot: assumes "trace init t final" and "snd (cs (S t i) cid) = Recording" and "channel cid = Some (p, q)" shows "has_snapshotted (S t i) q" proof (rule ccontr) assume "~ has_snapshotted (S t i) q" have "\ trace init t final; ~ has_snapshotted (S t i) p; channel cid = Some (p, q) \ \ snd (cs (S t i) cid) = NotStarted" proof (induct i) case 0 then show ?case using init_is_s_t_0 no_initial_channel_snapshot by auto next case (Suc n) have step: "(S t n) \ (t ! n) \ (S t (n+1))" by (metis Suc.prems(2) Suc_eq_plus1 all_processes_snapshotted_in_final_state assms(1) distributed_system.step_Suc distributed_system_axioms final_is_s_t_len_t le_add1 not_less snapshot_stable_ver_3) have "snd (cs (S t n) cid) = NotStarted" using Suc.hyps Suc.prems(2) assms snapshot_state_unchanged computation_axioms local.step by fastforce then show ?case by (metis Suc.prems(1) \\ ps (S t i) q \ None\ assms(2) assms(3) cs_not_not_started_stable_trace exists_trace_for_any_i no_recording_cs_if_not_snapshotted recording_state.simps(2)) qed then show False using \\ ps (S t i) q \ None\ assms computation.no_initial_channel_snapshot computation_axioms no_recording_cs_if_not_snapshotted by fastforce qed lemma cs_done_implies_both_snapshotted: assumes "trace init t final" and "snd (cs (S t i) cid) = Done" and "i < length t" and "channel cid = Some (p, q)" shows "has_snapshotted (S t i) p" "has_snapshotted (S t i) q" proof - have "trace init (take i t) (S t i)" using assms(1) exists_trace_for_any_i by blast then have "RecvMarker cid q p : set (take i t)" by (metis assms(1,2,4) cs_done_implies_has_snapshotted done_only_from_recv_marker_trace computation.no_initial_process_snapshot computation_axioms init_is_s_t_0 list.discI trace.simps) then obtain k where "t ! k = RecvMarker cid q p" "0 \ k" "k < i" by (metis add.right_neutral add_diff_cancel_right' append_Nil append_take_drop_id assms(1) exists_index take0) then have "has_snapshotted (S t (k+1)) q" by (metis (no_types, lifting) Suc_eq_plus1 Suc_leI assms(1,2,4) computation.cs_done_implies_has_snapshotted computation.no_change_if_ge_length_t computation_axioms less_le not_less_eq recv_marker_means_cs_Done) then show "has_snapshotted (S t i) q" using assms cs_done_implies_has_snapshotted by blast have step_k: "(S t k) \ (t ! k) \ (S t (k+1))" by (metis Suc_eq_plus1 \k < i\ add_lessD1 assms(1) assms(3) distributed_system.step_Suc distributed_system_axioms less_imp_add_positive) then have "Marker : set (msgs (S t k) cid)" proof - have "can_occur (t ! k) (S t k)" using happen_implies_can_occur step_k by blast then show ?thesis unfolding can_occur_def \t ! k = RecvMarker cid q p\ using hd_in_set by fastforce qed then have "has_snapshotted (S t k) p" using assms(1,4) no_marker_if_no_snapshot by blast then show "has_snapshotted (S t i) p" using \k < i\ assms(1) less_imp_le_nat snapshot_stable_ver_3 by blast qed lemma cs_done_implies_same_snapshots: assumes "trace init t final" "i \ j" "j \ length t" shows "snd (cs (S t i) cid) = Done \ channel cid = Some (p, q) \ cs (S t i) cid = cs (S t j) cid" using assms proof (induct i j rule: S_induct) case (S_init i) then show ?case by auto next case (S_step i j) have snap_p: "has_snapshotted (S t i) p" using S_step.hyps(1) S_step.hyps(2) S_step.prems(1,2) assms(1) cs_done_implies_both_snapshotted(1) by auto have snap_q: "has_snapshotted (S t i) q" using S_step.prems(1,2) assms(1) cs_done_implies_has_snapshotted by blast from S_step have "cs (S t i) cid = cs (S t (Suc i)) cid" proof (cases "t ! i") case (Snapshot r) from Snapshot S_step.hyps(3) snap_p have False if "r = p" using that by (auto simp: can_occur_def) moreover from Snapshot S_step.hyps(3) snap_q have False if "r = q" using that by (auto simp: can_occur_def) ultimately show ?thesis using Snapshot S_step by force next case (RecvMarker cid' r s) then show ?thesis proof (cases "has_snapshotted (S t i) r") case True with RecvMarker S_step show ?thesis proof (cases "cid = cid'") case True then have "cs (S t (Suc i)) cid = (fst (cs (S t i) cid), Done)" using RecvMarker S_step by simp then show ?thesis by (metis S_step.prems(1) prod.collapse) qed auto next case no_snap: False then show ?thesis proof (cases "cid = cid'") case True then have "cs (S t (Suc i)) cid = (fst (cs (S t i) cid), Done)" using RecvMarker S_step by simp then show ?thesis by (metis S_step.prems(1) prod.collapse) next case False then have "r \ p" using no_snap snap_p by auto moreover have "\s. channel cid = Some (s, r)" using S_step(5) assms(1) cs_done_implies_has_snapshotted no_snap by blast ultimately show ?thesis using RecvMarker S_step False no_snap by simp qed qed next case (Recv cid' r s u u' m) with S_step show ?thesis by (cases "cid = cid'", auto) qed auto with S_step show ?case by auto qed lemma snapshotted_and_not_done_implies_marker_in_channel: assumes "trace init t final" and "has_snapshotted (S t i) p" and "snd (cs (S t i) cid) \ Done" and "i \ length t" and "channel cid = Some (p, q)" shows "Marker : set (msgs (S t i) cid)" proof - obtain j where jj: "j < i" "~ has_snapshotted (S t j) p" "has_snapshotted (S t (j+1)) p" by (metis Suc_eq_plus1 assms(1) assms(2) exists_snapshot_for_all_p computation.snapshot_stable_ver_2 computation_axioms le_eq_less_or_eq nat_neq_iff) have step: "(S t j) \ (t ! j) \ (S t (j+1))" by (metis \\ ps (S t j) p \ None\ \j < i\ add.commute assms(1) assms(2) linorder_neqE_nat no_change_if_ge_length_t order_le_less order_refl plus_1_eq_Suc step_Suc) then have "Marker : set (msgs (S t (j+1)) cid)" proof - have "~ regular_event (t ! j)" by (meson \\ ps (S t j) p \ None\ \ps (S t (j + 1)) p \ None\ distributed_system.regular_event_cannot_induce_snapshot distributed_system_axioms local.step) then have "isSnapshot (t ! j) \ isRecvMarker (t ! j)" using nonregular_event by blast then show ?thesis proof (elim disjE, goal_cases) case 1 then obtain r where Snapshot: "t ! j = Snapshot r" by (meson isSnapshot_def) then have "r = p" using jj(2) jj(3) local.step by auto then show ?thesis using Snapshot assms step by simp next case 2 then obtain cid' s where RecvMarker: "t ! j = RecvMarker cid' p s" by (metis jj(2,3) distributed_system.no_state_change_if_no_event distributed_system_axioms event.sel(5) isRecvMarker_def local.step) moreover have "cid \ cid'" proof (rule ccontr) assume "~ cid \ cid'" then have "snd (cs (S t (j+1)) cid) = Done" using RecvMarker step by simp then have "snd (cs (S t i) cid) = Done" proof - assume a1: "snd (cs (S t (j + 1)) cid) = Done" have f2: "ps (S t j) p = None" using jj(2) by blast have "j < length t" using assms(4) jj(1) by linarith then have "t ! j = RecvMarker cid q p" using f2 a1 assms(1) assms(5) cs_done_implies_both_snapshotted(1) done_only_from_recv_marker local.step by blast then show ?thesis using f2 by (metis (no_types) Suc_eq_plus1 assms(1) local.step recv_marker_means_snapshotted) qed then show False using assms by simp qed ultimately show ?thesis using jj assms step by auto qed qed show ?thesis proof (rule ccontr) let ?t = "take (i - (j+1)) (drop (j+1) t)" have tr_j: "trace (S t (j+1)) ?t (S t i)" by (metis \j < i\ assms(1) discrete exists_trace_for_any_i_j) assume "~ Marker : set (msgs (S t i) cid)" then obtain ev where "ev \ set ?t" "\p q. ev = RecvMarker cid p q" using \Marker \ set (msgs (S t (j + 1)) cid)\ marker_must_be_delivered_2_trace tr_j assms by blast obtain k where "t ! k = ev" "j < k" "k < i" using \ev \ set (take (i - (j + 1)) (drop (j + 1) t))\ assms(1) exists_index by fastforce have step_k: "(S t k) \ (t ! k) \ (S t (k+1))" proof - have "k < length t" using \k < i\ assms(4) by auto then show ?thesis using step_Suc assms by simp qed have "ev = RecvMarker cid q p" using assms step_k can_occur_def using \\p q. ev = RecvMarker cid p q\ \t ! k = ev\ by auto then have "snd (cs (S t (k+1)) cid) = Done" using \k < i\ \t ! k = ev\ assms(1) assms(4) recv_marker_means_cs_Done by auto moreover have "trace (S t (k+1)) (take (i - (k+1)) (drop (k+1) t)) (S t i)" by (meson \k < i\ assms(1) discrete exists_trace_for_any_i_j) ultimately have "snd (cs (S t i) cid) = Done" by (metis \k < i\ assms(1) assms(4) assms(5) cs_done_implies_same_snapshots discrete) then show False using assms by simp qed qed lemma no_marker_left_in_final_state: assumes "trace init t final" shows "Marker \ set (msgs final cid)" (is ?P) proof (rule ccontr) assume "~ ?P" then obtain i where "i > length t" "Marker \ set (msgs (S t i) cid)" using assms l1 by (metis final_is_s_t_len_t le_neq_implies_less) then have "S t (length t) \ S t i" proof - have "msgs (S t i) cid \ msgs final cid" using \Marker \ set (msgs (S t i) cid)\ \~ ?P\ by auto then show ?thesis using final_is_s_t_len_t assms by auto qed moreover have "S t (length t) = S t i" using assms \i > length t\ less_imp_le no_change_if_ge_length_t by simp ultimately show False by simp qed lemma all_channels_done_in_final_state: assumes "trace init t final" and "channel cid = Some (p, q)" shows "snd (cs final cid) = Done" proof (rule ccontr) assume cs_not_done: "~ snd (cs final cid) = Done" obtain i where snap_p: "~ has_snapshotted (S t i) p" "has_snapshotted (S t (i+1)) p" by (metis Suc_eq_plus1 assms(1) exists_snapshot_for_all_p) have "i < length t" proof - have "S t i \ S t (i+1)" using snap_p by auto then show ?thesis by (meson assms(1) computation.no_change_if_ge_length_t computation_axioms le_add1 not_less) qed let ?t = "take (length t - (i+1)) (drop (i+1) t)" have tr: "trace (S t (i+1)) ?t (S t (length t))" by (meson \i < length t\ assms(1) discrete exists_trace_for_any_i_j) have "Marker \ set (msgs (S t (i+1)) cid)" proof - have n_done: "snd (cs (S t (i+1)) cid) \ Done" proof (rule ccontr) assume "~ snd (cs (S t (i+1)) cid) \ Done" then have "snd (cs final cid) = Done" by (metis Suc_eq_plus1 Suc_leI \i < length t\ assms final_is_s_t_len_t computation.cs_done_implies_same_snapshots computation_axioms order_refl) then show False using cs_not_done by simp qed then show ?thesis using snapshotted_and_not_done_implies_marker_in_channel snap_p assms proof - have "i+1 \ length t" using \i < length t\ by auto then show ?thesis using snapshotted_and_not_done_implies_marker_in_channel snap_p assms n_done by simp qed qed moreover have "Marker \ set (msgs (S t (length t)) cid)" using final_is_s_t_len_t no_marker_left_in_final_state assms by blast ultimately have rm_prov: "\ev \ set ?t. (\q p. ev = RecvMarker cid q p)" using tr message_must_be_delivered_2_trace assms by (simp add: marker_must_be_delivered_2_trace) then obtain k where "\q p. t ! k = RecvMarker cid q p" "i+1 \ k" "k < length t" by (metis assms(1) exists_index) then have step: "(S t k) \ (t ! k) \ (S t (k+1))" by (metis Suc_eq_plus1_left add.commute assms(1) step_Suc) then have RecvMarker: "t ! k = RecvMarker cid q p" by (metis RecvMarker_given_channel \\q p. t ! k = RecvMarker cid q p\ assms(2) event.disc(25) event.sel(10) happen_implies_can_occur) then have "snd (cs (S t (k+1)) cid) = Done" using step \k < length t\ assms(1) recv_marker_means_cs_Done by blast then have "snd (cs final cid) = Done" using \Marker \ set (msgs (S t (length t)) cid)\ all_processes_snapshotted_in_final_state assms(1) assms(2) final_is_s_t_len_t snapshotted_and_not_done_implies_marker_in_channel by fastforce then show False using cs_not_done by simp qed lemma cs_NotStarted_implies_empty_cs: shows "\ trace init t final; channel cid = Some (p, q); i < length t; ~ has_snapshotted (S t i) q \ \ cs (S t i) cid = ([], NotStarted)" by (simp add: no_initial_channel_snapshot no_recording_cs_if_not_snapshotted) lemma fst_changed_by_recv_recording_trace: assumes "i < j" and "j \ length t" and "trace init t final" and "fst (cs (S t i) cid) \ fst (cs (S t j) cid)" and "channel cid = Some (p, q)" shows "\k. i \ k \ k < j \ (\p q u u' m. t ! k = Recv cid q p u u' m) \ (snd (cs (S t k) cid) = Recording)" (is ?P) proof (rule ccontr) assume "~ ?P" have "\ i < j; j \ length t; ~ ?P; trace init t final; channel cid = Some (p, q) \ \ fst (cs (S t i) cid) = fst (cs (S t j) cid)" proof (induct "j - i" arbitrary: i) case 0 then show ?case by linarith next case (Suc n) then have step: "(S t i) \ t ! i \ (S t (Suc i))" using step_Suc by auto then have "fst (cs (S t (Suc i)) cid) = fst (cs (S t i) cid)" by (metis Suc.prems(1) Suc.prems(3) assms(5) fst_cs_changed_by_recv_recording le_eq_less_or_eq) also have "fst (cs (S t (Suc i)) cid) = fst (cs (S t j) cid)" proof - have "j - Suc i = n" using Suc by simp moreover have "~ (\k. (Suc i) \ k \ k < j \ (\p q u u' m. t ! k = Recv cid q p u u' m) \ (snd (cs (S t k) cid) = Recording))" using \~ ?P\ Suc.prems(3) Suc_leD by blast ultimately show ?thesis using Suc by (metis Suc_lessI) qed finally show ?case by simp qed then show False using assms \~ ?P\ by blast qed lemma cs_not_nil_implies_postrecording_event: assumes "trace init t final" and "fst (cs (S t i) cid) \ []" and "i \ length t" and "channel cid = Some (p, q)" shows "\j. j < i \ postrecording_event t j" proof - have "fst (cs init cid) = []" using no_initial_channel_snapshot by auto then have diff_cs: "fst (cs (S t 0) cid) \ fst (cs (S t i) cid)" using assms(1) assms(2) init_is_s_t_0 by auto moreover have "0 < i" proof (rule ccontr) assume "~ 0 < i" then have "0 = i" by auto then have "fst (cs (S t 0) cid) = fst (cs (S t i) cid)" by blast then show False using diff_cs by simp qed ultimately obtain j where "j < i" and Recv: "\p q u u' m. t ! j = Recv cid q p u u' m" "snd (cs (S t j) cid) = Recording" using assms(1) assms(3) assms(4) fst_changed_by_recv_recording_trace by blast then have "has_snapshotted (S t j) q" using assms(1) assms(4) cs_recording_implies_snapshot by blast moreover have "regular_event (t ! j)" using Recv by auto moreover have "occurs_on (t ! j) = q" proof - have "can_occur (t ! j) (S t j)" by (meson Suc_le_eq \j < i\ assms(1) assms(3) happen_implies_can_occur le_trans step_Suc) then show ?thesis using Recv Recv_given_channel assms(4) by force qed ultimately have "postrecording_event t j" unfolding postrecording_event using \j < i\ assms(3) by simp then show ?thesis using \j < i\ by auto qed subsubsection \Relating process states\ lemma snapshot_state_must_have_been_reached: assumes "trace init t final" and "ps final p = Some u" and "~ has_snapshotted (S t i) p" and "has_snapshotted (S t (i+1)) p" and "i < length t" shows "states (S t i) p = u" proof (rule ccontr) assume "states (S t i) p \ u" then have "ps (S t (i+1)) p \ Some u" using assms(1) assms(3) snapshot_state by force then have "ps final p \ Some u" by (metis One_nat_def Suc_leI add.right_neutral add_Suc_right assms(1) assms(3) assms(4) assms(5) final_is_s_t_len_t order_refl snapshot_state snapshot_state_unchanged_trace_2) then show False using assms by simp qed lemma ps_after_all_prerecording_events: assumes "trace init t final" and "\i'. i' \ i \ ~ prerecording_event t i'" and "\j'. j' < i \ ~ postrecording_event t j'" shows "ps_equal_to_snapshot (S t i) final" proof (unfold ps_equal_to_snapshot_def, rule allI) fix p show "Some (states (S t i) p) = ps final p" proof (rule ccontr) obtain s where "ps final p = Some s \ ps final p = None" by auto moreover assume "Some (states (S t i) p) \ ps final p" ultimately have "ps final p = None \ states (S t i) p \ s" by auto then show False proof (elim disjE) assume "ps final p = None" then show False using assms all_processes_snapshotted_in_final_state by blast next assume st: "states (S t i) p \ s" then obtain j where "~ has_snapshotted (S t j) p \ has_snapshotted (S t (j+1)) p" using Suc_eq_plus1 assms(1) exists_snapshot_for_all_p by presburger then show False proof (cases "has_snapshotted (S t i) p") case False then have "j \ i" by (metis Suc_eq_plus1 \\ ps (S t j) p \ None \ ps (S t (j + 1)) p \ None\ assms(1) not_less_eq_eq snapshot_stable_ver_3) let ?t = "take (j-i) (drop i t)" have "\ev. ev \ set ?t \ regular_event ev \ occurs_on ev = p" proof (rule ccontr) assume "~ (\ev. ev \ set ?t \ regular_event ev \ occurs_on ev = p)" moreover have "trace (S t i) ?t (S t j)" using \i \ j\ assms(1) exists_trace_for_any_i_j by blast ultimately have "states (S t j) p = states (S t i) p" using no_state_change_if_only_nonregular_events st by blast then show False by (metis \\ ps (S t j) p \ None \ ps (S t (j + 1)) p \ None\ \ps final p = Some s \ ps final p = None\ assms(1) final_is_s_t_len_t computation.all_processes_snapshotted_in_final_state computation.snapshot_stable_ver_3 computation_axioms linorder_not_le snapshot_state_must_have_been_reached st) qed then obtain ev where "ev \ set ?t \ regular_event ev \ occurs_on ev = p" by blast then obtain k where t_ind: "0 \ k \ k < length ?t \ ev = ?t ! k" by (metis in_set_conv_nth not_le not_less_zero) moreover have "length ?t \ j - i" by simp ultimately have "?t ! k = (drop i t) ! k" using less_le_trans nth_take by blast also have "... = t ! (k+i)" by (metis \ev \ set (take (j - i) (drop i t)) \ regular_event ev \ occurs_on ev = p\ add.commute drop_eq_Nil length_greater_0_conv length_pos_if_in_set nat_le_linear nth_drop take_eq_Nil) finally have "?t ! k = t ! (k+i)" by simp have "prerecording_event t (k+i)" proof - have "regular_event (?t ! k)" using \ev \ set (take (j - i) (drop i t)) \ regular_event ev \ occurs_on ev = p\ t_ind by blast moreover have "occurs_on (?t ! k) = p" using \ev \ set (take (j - i) (drop i t)) \ regular_event ev \ occurs_on ev = p\ t_ind by blast moreover have "~ has_snapshotted (S t (k+i)) p" proof - have "k+i \ j" using \length (take (j - i) (drop i t)) \ j - i\ t_ind by linarith show ?thesis using \\ ps (S t j) p \ None \ ps (S t (j + 1)) p \ None\ \k+i \ j\ assms(1) snapshot_stable_ver_3 by blast qed ultimately show ?thesis using \take (j - i) (drop i t) ! k = t ! (k + i)\ prerecording_event t_ind by auto qed then show False using assms by auto next case True have "j < i" proof (rule ccontr) assume "~ j < i" then have "j \ i" by simp moreover have "~ has_snapshotted (S t j) p" using \\ ps (S t j) p \ None \ ps (S t (j + 1)) p \ None\ by blast moreover have "trace (S t i) (take (j - i) (drop i t)) (S t j)" using assms(1) calculation(1) exists_trace_for_any_i_j by blast ultimately have "~ has_snapshotted (S t i) p" using snapshot_stable by blast then show False using True by simp qed let ?t = "take (i-j) (drop j t)" have "\ev. ev \ set ?t \ regular_event ev \ occurs_on ev = p" proof (rule ccontr) assume "~ (\ev. ev \ set ?t \ regular_event ev \ occurs_on ev = p)" moreover have "trace (S t j) ?t (S t i)" using \j < i\ assms(1) exists_trace_for_any_i_j less_imp_le by blast ultimately have "states (S t j) p = states (S t i) p" using no_state_change_if_only_nonregular_events by auto moreover have "states (S t j) p = s" by (metis \\ ps (S t j) p \ None \ ps (S t (j + 1)) p \ None\ \ps final p = Some s \ ps final p = None\ assms(1) final_is_s_t_len_t computation.all_processes_snapshotted_in_final_state computation.snapshot_stable_ver_3 computation_axioms linorder_not_le snapshot_state_must_have_been_reached) ultimately show False using \states (S t i) p \ s\ by simp qed then obtain ev where ev: "ev \ set ?t \ regular_event ev \ occurs_on ev = p" by blast then obtain k where t_ind: "0 \ k \ k < length ?t \ ev = ?t ! k" by (metis in_set_conv_nth le0) have "length ?t \ i - j" by simp have "?t ! k = (drop j t) ! k" using t_ind by auto also have "... = t ! (k + j)" by (metis \ev \ set (take (i - j) (drop j t)) \ regular_event ev \ occurs_on ev = p\ add.commute drop_eq_Nil length_greater_0_conv length_pos_if_in_set nat_le_linear nth_drop take_eq_Nil) finally have "?t ! k = t ! (k+j)" by simp have "postrecording_event t (k+j)" proof - have "trace (S t j) (take k (drop j t)) (S t (k+j))" by (metis add_diff_cancel_right' assms(1) exists_trace_for_any_i_j le_add_same_cancel2 t_ind) then have "has_snapshotted (S t (k+j)) p" by (metis Suc_eq_plus1 Suc_leI \\ ps (S t j) p \ None \ ps (S t (j + 1)) p \ None\ \take (i - j) (drop j t) ! k = t ! (k + j)\ assms(1) drop_eq_Nil ev computation.snapshot_stable_ver_3 computation_axioms le_add_same_cancel2 length_greater_0_conv length_pos_if_in_set linorder_not_le order_le_less regular_event_preserves_process_snapshots step_Suc t_ind take_eq_Nil) then show ?thesis using \take (i - j) (drop j t) ! k = t ! (k + j)\ ev postrecording_event t_ind by auto qed moreover have "k + j < i" using \length (take (i - j) (drop j t)) \ i - j\ t_ind by linarith ultimately show False using assms(3) by simp qed qed qed qed subsubsection \Relating channel states\ lemma cs_when_recording: shows "\ i < j; j \ length t; trace init t final; has_snapshotted (S t i) p; snd (cs (S t i) cid) = Recording; snd (cs (S t j) cid) = Done; channel cid = Some (p, q) \ \ map Msg (fst (cs (S t j) cid)) = map Msg (fst (cs (S t i) cid)) @ takeWhile ((\) Marker) (msgs (S t i) cid)" proof (induct "j - (i+1)" arbitrary: i) case 0 then have "j = i+1" by simp then have step: "(S t i) \ (t ! i) \ (S t j)" using "0.prems" step_Suc by simp then have rm: "\q p. t ! i = RecvMarker cid q p" using done_only_from_recv_marker "0.prems" by force then have RecvMarker: "t ! i = RecvMarker cid q p" by (metis "0.prems"(7) RecvMarker_given_channel event.collapse(5) event.disc(25) event.inject(5) happen_implies_can_occur local.step) then have "takeWhile ((\) Marker) (msgs (S t i) cid) = []" proof - have "can_occur (t ! i) (S t i)" using happen_implies_can_occur step by simp then show ?thesis proof - have "\p ms. takeWhile p ms = [] \ p (hd ms::'c message)" by (metis (no_types) hd_append2 hd_in_set set_takeWhileD takeWhile_dropWhile_id) then show ?thesis using \can_occur (t ! i) (S t i)\ can_occur_def rm by fastforce qed qed then show ?case using local.step rm by auto next case (Suc n) then have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis Suc_eq_plus1 less_SucI nat_induct_at_least step_Suc) have ib: "i+1 < j \ j \ length t \ has_snapshotted (S t (i+1)) p \ snd (cs (S t j) cid) = Done" using Suc.hyps(2) Suc.prems(2) Suc.prems(4) Suc.prems(6) local.step snapshot_state_unchanged by auto have snap_q: "has_snapshotted (S t i) q" using Suc(7) Suc.prems(3) Suc cs_recording_implies_snapshot by blast then show ?case proof (cases "t ! i") case (Snapshot r) then have "r \ p" using Suc.prems(4) can_occur_def local.step by auto then have "msgs (S t (i+1)) cid = msgs (S t i) cid" using Snapshot local.step Suc.prems(7) by auto moreover have "cs (S t (i+1)) cid = cs (S t i) cid" proof - have "r \ q" using Snapshot can_occur_def snap_q step by auto then show ?thesis using Snapshot local.step Suc.prems(7) by auto qed ultimately show ?thesis using Suc ib by force next case (RecvMarker cid' r s) then show ?thesis proof (cases "cid = cid'") case True then have "takeWhile ((\) Marker) (msgs (S t i) cid) = []" proof - have "can_occur (t ! i) (S t i)" using happen_implies_can_occur step by simp then show ?thesis proof - have "\p ms. takeWhile p ms = [] \ p (hd ms::'c message)" by (metis (no_types) hd_append2 hd_in_set set_takeWhileD takeWhile_dropWhile_id) then show ?thesis using RecvMarker True \can_occur (t ! i) (S t i)\ can_occur_def by fastforce qed qed moreover have "snd (cs (S t (i+1)) cid) = Done" using RecvMarker Suc.prems(1) Suc.prems(2) Suc.prems(3) True recv_marker_means_cs_Done by auto moreover have "fst (cs (S t i) cid) = fst (cs (S t (i+1)) cid)" using RecvMarker True local.step by auto ultimately show ?thesis by (metis Suc.prems(1) Suc.prems(2) Suc.prems(3) Suc.prems(7) Suc_eq_plus1 Suc_leI append_Nil2 cs_done_implies_same_snapshots) next case False then have "msgs (S t i) cid = msgs (S t (i+1)) cid" proof (cases "has_snapshotted (S t i) r") case True then show ?thesis using RecvMarker step Suc False by auto next case False with RecvMarker step Suc \cid \ cid'\ show ?thesis by (cases "s = p", auto) qed moreover have "cs (S t i) cid = cs (S t (i+1)) cid" proof (cases "has_snapshotted (S t i) r") case True then show ?thesis using RecvMarker step Suc False by auto next case no_snap: False then show ?thesis proof (cases "r = q") case True then show ?thesis using snap_q no_snap \r = q\ by simp next case False then show ?thesis using RecvMarker step Suc no_snap False \cid \ cid'\ by simp qed qed ultimately show ?thesis using Suc ib by simp qed next case (Trans r u u') then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step by auto moreover have "cs (S t i) cid = cs (S t (i+1)) cid" using step Trans by auto ultimately show ?thesis using Suc ib by simp next case (Send cid' r s u u' m) then show ?thesis proof (cases "cid = cid'") case True have marker_in_msgs: "Marker \ set (msgs (S t i) cid)" proof - have "has_snapshotted (S t i) p" using Suc by simp moreover have "i < length t" using Suc.prems(1) Suc.prems(2) less_le_trans by blast moreover have "snd (cs (S t i) cid) \ Done" using Suc by simp ultimately show ?thesis using snapshotted_and_not_done_implies_marker_in_channel less_imp_le using Suc by blast qed then have "takeWhile ((\) Marker) (msgs (S t i) cid) = takeWhile ((\) Marker) (msgs (S t (i+1)) cid)" proof - have "butlast (msgs (S t (i+1)) cid) = msgs (S t i) cid" using step True Send by auto then show ?thesis proof - have "takeWhile ((\) Marker) (msgs (S t i) cid @ [last (msgs (S t (i + 1)) cid)]) = takeWhile ((\) Marker) (msgs (S t i) cid)" using marker_in_msgs by force then show ?thesis by (metis (no_types) \butlast (msgs (S t (i + 1)) cid) = msgs (S t i) cid\ append_butlast_last_id in_set_butlastD length_greater_0_conv length_pos_if_in_set marker_in_msgs) qed qed moreover have "cs (S t i) cid = cs (S t (i+1)) cid" using step Send by auto ultimately show ?thesis using ib Suc by simp next case False then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step Send by auto moreover have "cs (S t i) cid = cs (S t (i+1)) cid" using step Send by auto ultimately show ?thesis using Suc ib by simp qed next case (Recv cid' r s u u' m) then show ?thesis proof (cases "cid = cid'") case True then have msgs_ip1: "Msg m # msgs (S t (i+1)) cid = msgs (S t i) cid" using Suc Recv step by auto moreover have cs_ip1: "cs (S t (i+1)) cid = (fst (cs (S t i) cid) @ [m], Recording)" using True Suc Recv step by auto ultimately show ?thesis proof - have "map Msg (fst (cs (S t j) cid)) = map Msg (fst (cs (S t (i+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (i+1)) cid)" using Suc ib cs_ip1 by force moreover have "map Msg (fst (cs (S t i) cid)) @ takeWhile ((\) Marker) (msgs (S t i) cid) = map Msg (fst (cs (S t (i+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (i+1)) cid)" proof - have "takeWhile ((\) Marker) (Msg m # msgs (S t (i+1)) cid) = Msg m # takeWhile ((\) Marker) (msgs (S t (i + 1)) cid)" by auto then have "takeWhile ((\) Marker) (msgs (S t i) cid) = Msg m # takeWhile ((\) Marker) (msgs (S t (i + 1)) cid)" by (metis msgs_ip1) then show ?thesis using cs_ip1 by auto qed ultimately show ?thesis by simp qed next case False then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step Recv by auto moreover have "cs (S t i) cid = cs (S t (i+1)) cid" using step Recv False by auto ultimately show ?thesis using Suc ib by simp qed qed qed lemma cs_when_recording_2: shows "\ i \ j; trace init t final; ~ has_snapshotted (S t i) p; \k. i \ k \ k < j \ ~ occurs_on (t ! k) = p; snd (cs (S t i) cid) = Recording; channel cid = Some (p, q) \ \ map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid) = map Msg (fst (cs (S t i) cid)) @ takeWhile ((\) Marker) (msgs (S t i) cid) \ snd (cs (S t j) cid) = Recording" proof (induct "j - i" arbitrary: i) case 0 then show ?case by auto next case (Suc n) then have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis Suc_eq_plus1 all_processes_snapshotted_in_final_state distributed_system.step_Suc distributed_system_axioms computation.final_is_s_t_len_t computation_axioms linorder_not_le snapshot_stable_ver_3) have ib: "i+1 \ j \ ~ has_snapshotted (S t (i+1)) p \ (\k. (i+1) \ k \ k < j \ ~ occurs_on (t ! k) = p) \ j - (i+1) = n" by (metis Suc.hyps(2) Suc.prems(1) Suc.prems(3) Suc.prems(4) diff_Suc_1 diff_diff_left Suc_eq_plus1 Suc_leD Suc_le_eq Suc_neq_Zero cancel_comm_monoid_add_class.diff_cancel le_neq_implies_less le_refl local.step no_state_change_if_no_event) have snap_q: "has_snapshotted (S t i) q" using Suc.prems(5,6) Suc.prems(2) cs_recording_implies_snapshot by blast then show ?case proof (cases "t ! i") case (Snapshot r) then have "r \ p" using Suc by auto then have "msgs (S t (i+1)) cid = msgs (S t i) cid" using Snapshot local.step Suc.prems(6) by auto moreover have "cs (S t (i+1)) cid = cs (S t i) cid" proof - have "r \ q" using step can_occur_def Snapshot snap_q by auto then show ?thesis using Snapshot step Suc by simp qed ultimately show ?thesis using Suc ib by auto next case (RecvMarker cid' r s) then show ?thesis proof (cases "cid = cid'") case True then have "Marker \ set (msgs (S t i) cid)" using RecvMarker RecvMarker_implies_Marker_in_set local.step by blast then have "has_snapshotted (S t i) p" using Suc.prems(2) no_marker_if_no_snapshot Suc by blast then show ?thesis using Suc.prems by simp next case False then have "msgs (S t i) cid = msgs (S t (i+1)) cid" proof (cases "has_snapshotted (S t i) r") case True then show ?thesis using RecvMarker step Suc False by auto next case False with RecvMarker step Suc \cid \ cid'\ show ?thesis by (cases "s = p", auto) qed moreover have "cs (S t i) cid = cs (S t (i+1)) cid" proof (cases "has_snapshotted (S t i) r") case True then show ?thesis using RecvMarker step Suc False by auto next case no_snap: False then show ?thesis proof (cases "r = q") case True then show ?thesis using snap_q no_snap \r = q\ by simp next case False then show ?thesis using RecvMarker step Suc no_snap False \cid \ cid'\ by simp qed qed ultimately show ?thesis using Suc ib by auto qed next case (Trans r u u') then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step by auto moreover have "cs (S t i) cid = cs (S t (i+1)) cid" using step Trans by auto ultimately show ?thesis using Suc ib by auto next case (Send cid' r s u u' m) then have "r \ p" using Suc.hyps(2) Suc.prems(4) Suc by auto have "cid \ cid'" proof (rule ccontr) assume "~ cid \ cid'" then have "channel cid = channel cid'" by auto then have "(p, q) = (r, s)" using can_occur_def step Send Suc by auto then show False using \r \ p\ by simp qed then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step Send by simp moreover have "cs (S t i) cid = cs (S t (i+1)) cid" using step Send by auto ultimately show ?thesis using Suc ib by auto next case (Recv cid' r s u u' m) then show ?thesis proof (cases "cid = cid'") case True then have msgs_ip1: "Msg m # msgs (S t (i+1)) cid = msgs (S t i) cid" using Suc Recv step by auto moreover have cs_ip1: "cs (S t (i+1)) cid = (fst (cs (S t i) cid) @ [m], Recording)" using True Suc Recv step by auto ultimately show ?thesis proof - have "map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid) = map Msg (fst (cs (S t (i+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (i+1)) cid) \ snd (cs (S t j) cid) = Recording" using Suc ib cs_ip1 by auto moreover have "map Msg (fst (cs (S t i) cid)) @ takeWhile ((\) Marker) (msgs (S t i) cid) = map Msg (fst (cs (S t (i+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (i+1)) cid)" proof - have "takeWhile ((\) Marker) (Msg m # msgs (S t (i + 1)) cid) = Msg m # takeWhile ((\) Marker) (msgs (S t (i + 1)) cid)" by fastforce then have "takeWhile ((\) Marker) (msgs (S t i) cid) = Msg m # takeWhile ((\) Marker) (msgs (S t (i + 1)) cid)" by (metis msgs_ip1) then show ?thesis using cs_ip1 by force qed ultimately show ?thesis using cs_ip1 by simp qed next case False then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step Recv by auto moreover have "cs (S t i) cid = cs (S t (i+1)) cid" using step Recv False by auto ultimately show ?thesis using Suc ib by auto qed qed qed lemma cs_when_recording_3: shows "\ i \ j; trace init t final; ~ has_snapshotted (S t i) q; \k. i \ k \ k < j \ ~ occurs_on (t ! k) = q; snd (cs (S t i) cid) = NotStarted; has_snapshotted (S t i) p; Marker : set (msgs (S t i) cid); channel cid = Some (p, q) \ \ map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid) = map Msg (fst (cs (S t i) cid)) @ takeWhile ((\) Marker) (msgs (S t i) cid) \ snd (cs (S t j) cid) = NotStarted" proof (induct "j - i" arbitrary: i) case 0 then show ?case by auto next case (Suc n) then have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis Suc_eq_plus1 all_processes_snapshotted_in_final_state distributed_system.step_Suc distributed_system_axioms computation.final_is_s_t_len_t computation_axioms linorder_not_le snapshot_stable_ver_3) have ib: "i+1 \ j \ ~ has_snapshotted (S t (i+1)) q \ has_snapshotted (S t (i+1)) p \ (\k. (i+1) \ k \ k < j \ ~ occurs_on (t ! k) = q) \ j - (i+1) = n \ Marker : set (msgs (S t (i+1)) cid) \ cs (S t i) cid = cs (S t (i+1)) cid" proof - have "i+1 \ j \ ~ has_snapshotted (S t (i+1)) q \ (\k. (i+1) \ k \ k < j \ ~ occurs_on (t ! k) = q) \ j - (i+1) = n" by (metis Suc.hyps(2) Suc.prems(1) Suc.prems(3) Suc.prems(4) diff_Suc_1 diff_diff_left Suc_eq_plus1 Suc_leD Suc_le_eq Suc_neq_Zero cancel_comm_monoid_add_class.diff_cancel le_neq_implies_less le_refl local.step no_state_change_if_no_event) moreover have "has_snapshotted (S t (i+1)) p" using Suc.prems(6) local.step snapshot_state_unchanged by auto moreover have "Marker : set (msgs (S t (i+1)) cid)" using Suc calculation(1) local.step recv_marker_means_snapshotted_2 by blast moreover have "cs (S t i) cid = cs (S t (i+1)) cid" using Suc calculation(1) no_recording_cs_if_not_snapshotted by auto ultimately show ?thesis by simp qed then show ?case proof (cases "t ! i") case (Snapshot r) then have "r \ q" using Suc by auto then have "takeWhile ((\) Marker) (msgs (S t (i+1)) cid) = takeWhile ((\) Marker) (msgs (S t i) cid)" proof (cases "occurs_on (t ! i) = p") case True then show ?thesis by (metis (mono_tags, lifting) Snapshot Suc.prems(6) distributed_system.can_occur_def event.sel(4) event.simps(29) computation_axioms computation_def happen_implies_can_occur local.step) next case False then have "msgs (S t (i+1)) cid = msgs (S t i) cid" using Snapshot local.step Suc by auto then show ?thesis by simp qed then show ?thesis using Suc ib by metis next case (RecvMarker cid' r s) then show ?thesis proof (cases "cid = cid'") case True then have "snd (cs (S t i) cid) = Done" by (metis RecvMarker Suc.prems(2) Suc_eq_plus1 Suc_le_eq exactly_one_snapshot computation.no_change_if_ge_length_t computation.recv_marker_means_cs_Done computation.snapshot_stable_ver_2 computation_axioms ib nat_le_linear) then show ?thesis using Suc.prems by simp next case False then have "takeWhile ((\) Marker) (msgs (S t i) cid) = takeWhile ((\) Marker) (msgs (S t (i+1)) cid)" proof (cases "has_snapshotted (S t i) r") case True with RecvMarker False step show ?thesis by auto next case no_snap: False then have "r \ p" using Suc.prems(6) by auto then show ?thesis using no_snap RecvMarker step Suc.prems False by auto qed then show ?thesis using Suc ib by metis qed next case (Trans r u u') then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step by auto then show ?thesis using Suc ib by auto next case (Send cid' r s u u' m) then have "r \ q" using Suc.hyps(2) Suc.prems(4) by auto have marker: "Marker \ set (msgs (S t i) cid)" using Suc by simp with step Send marker have "takeWhile ((\) Marker) (msgs (S t i) cid) = takeWhile ((\) Marker) (msgs (S t (i+1)) cid)" by (cases "cid = cid'", auto) then show ?thesis using Suc ib by auto next case (Recv cid' r s u u' m) then have "cid' \ cid" by (metis Suc.hyps(2) Suc.prems(4) Suc.prems(8) distributed_system.can_occur_Recv distributed_system_axioms event.sel(3) happen_implies_can_occur local.step option.inject order_refl prod.inject zero_less_Suc zero_less_diff) then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step Recv Suc by simp then show ?thesis using Suc ib by auto qed qed lemma at_most_one_marker: shows "\ trace init t final; channel cid = Some (p, q) \ \ Marker \ set (msgs (S t i) cid) \ (\!j. j < length (msgs (S t i) cid) \ msgs (S t i) cid ! j = Marker)" proof (induct i) case 0 then show ?case using no_initial_Marker init_is_s_t_0 by auto next case (Suc i) then show ?case proof (cases "i < length t") case False then show ?thesis by (metis Suc.prems(1) final_is_s_t_len_t computation.no_change_if_ge_length_t computation_axioms le_refl less_imp_le_nat no_marker_left_in_final_state not_less_eq) next case True then have step: "(S t i) \ (t ! i) \ (S t (Suc i))" using step_Suc Suc.prems by blast moreover have "Marker \ set (msgs (S t i) cid) \ (\!j. j < length (msgs (S t i) cid) \ msgs (S t i) cid ! j = Marker)" using Suc.hyps Suc.prems(1) Suc.prems(2) by linarith moreover have "Marker \ set (msgs (S t (Suc i)) cid) \ (\!j. j < length (msgs (S t (Suc i)) cid) \ msgs (S t (Suc i)) cid ! j = Marker)" proof (cases "Marker \ set (msgs (S t i) cid)") case no_marker: True then show ?thesis proof (cases "t ! i") case (Snapshot r) then show ?thesis proof (cases "r = p") case True then have new_msgs: "msgs (S t (Suc i)) cid = msgs (S t i) cid @ [Marker]" using step Snapshot Suc by auto then show ?thesis using util_exactly_one_element no_marker by fastforce next case False then show ?thesis using Snapshot local.step no_marker Suc by auto qed next case (RecvMarker cid' r s) then show ?thesis proof (cases "cid = cid'") case True then show ?thesis using RecvMarker RecvMarker_implies_Marker_in_set local.step no_marker by blast next case False then show ?thesis proof (cases "has_snapshotted (S t i) r") case True then show ?thesis using RecvMarker step Suc False by simp next case no_snap: False then show ?thesis proof (cases "r = p") case True then have "msgs (S t (i+1)) cid = msgs (S t i) cid @ [Marker]" using RecvMarker step Suc.prems no_snap \cid \ cid'\ by simp then show ?thesis proof - assume a1: "msgs (S t (i + 1)) cid = msgs (S t i) cid @ [Marker]" { fix nn :: "nat \ nat" have "\ms m. \n. \na. ((m::'c message) \ set ms \ n < length (ms @ [m])) \ (m \ set ms \ (ms @ [m]) ! n = m) \ (\ na < length (ms @ [m]) \ (ms @ [m]) ! na \ m \ m \ set ms \ na = n)" by (metis (no_types) util_exactly_one_element) then have "\n. n < length (msgs (S t (Suc i)) cid) \ nn n = n \ msgs (S t (Suc i)) cid ! n = Marker \ n < length (msgs (S t (Suc i)) cid) \ msgs (S t (Suc i)) cid ! n = Marker \ \ nn n < length (msgs (S t (Suc i)) cid) \ n < length (msgs (S t (Suc i)) cid) \ msgs (S t (Suc i)) cid ! n = Marker \ msgs (S t (Suc i)) cid ! nn n \ Marker" using a1 by (metis Suc_eq_plus1 no_marker) } then show ?thesis by (metis (no_types)) qed next case False then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using RecvMarker step Suc.prems \cid \ cid'\ no_snap by simp then show ?thesis using Suc by simp qed qed qed next case (Trans r u u') then show ?thesis using no_marker step by auto next case (Send cid' r s u u' m) then show ?thesis proof (cases "cid = cid'") case True then have "Marker \ set (msgs (S t (Suc i)) cid)" using step no_marker Send by auto then show ?thesis by simp next case False then have "Marker \ set (msgs (S t (Suc i)) cid)" using step no_marker Send by auto then show ?thesis by simp qed next case (Recv cid' r s u u' m) with step no_marker Recv show ?thesis by (cases "cid = cid'", auto) qed next case False then have asm: "\!j. j < length (msgs (S t i) cid) \ msgs (S t i) cid ! j = Marker" using Suc by simp have len_filter: "length (filter ((=) Marker) (msgs (S t i) cid)) = 1" by (metis False \Marker \ set (msgs (S t i) cid) \ (\!j. j < length (msgs (S t i) cid) \ msgs (S t i) cid ! j = Marker)\ exists_one_iff_filter_one) have snap_p: "has_snapshotted (S t i) p" using False Suc.prems no_marker_if_no_snapshot by blast show ?thesis proof (cases "t ! i") case (Snapshot r) have "r \ p" proof (rule ccontr) assume "~ r \ p" moreover have "can_occur (t ! i) (S t i)" using happen_implies_can_occur step by blast ultimately show False using snap_p can_occur_def Snapshot by auto qed then have "msgs (S t (Suc i)) cid = msgs (S t i) cid" using step Snapshot Suc by auto then show ?thesis using asm by simp next case (RecvMarker cid' r s) then show ?thesis proof (cases "cid = cid'") case True then have "Marker # msgs (S t (Suc i)) cid = msgs (S t i) cid" using RecvMarker step by auto then have "Marker \ set (msgs (S t (Suc i)) cid)" proof - have "\j. j \ 0 \ j < length (msgs (S t i) cid) \ msgs (S t i) cid ! j \ Marker" by (metis False \Marker # msgs (S t (Suc i)) cid = msgs (S t i) cid\ asm length_pos_if_in_set nth_Cons_0) then show ?thesis proof - assume a1: "\j. j \ 0 \ j < length (msgs (S t i) cid) \ msgs (S t i) cid ! j \ Marker" have "\ms n. ms \ msgs (S t i) cid \ length (msgs (S t (Suc i)) cid) \ n \ length ms = Suc n" by (metis \Marker # msgs (S t (Suc i)) cid = msgs (S t i) cid\ length_Suc_conv) then show ?thesis using a1 by (metis (no_types) Suc_mono Zero_not_Suc \Marker # msgs (S t (Suc i)) cid = msgs (S t i) cid\ in_set_conv_nth nth_Cons_Suc) qed qed then show ?thesis by simp next case cid_neq_cid': False then show ?thesis proof (cases "has_snapshotted (S t i) r") case True then have "msgs (S t (Suc i)) cid = msgs (S t i) cid" using cid_neq_cid' RecvMarker local.step snap_p by auto then show ?thesis using asm by simp next case False then have "r \ p" using snap_p by blast then have "msgs (S t (Suc i)) cid = msgs (S t i) cid" using cid_neq_cid' RecvMarker step False Suc by auto then show ?thesis using asm by simp qed qed next case (Trans r u u') then show ?thesis using step asm by auto next case (Send cid' r s u u' m) then show ?thesis proof (cases "cid = cid'") case True then have new_messages: "msgs (S t (Suc i)) cid = msgs (S t i) cid @ [Msg m]" using step Send by auto then have "\!j. j < length (msgs (S t (Suc i)) cid) \ msgs (S t (Suc i)) cid ! j = Marker" proof - have "length (filter ((=) Marker) (msgs (S t (Suc i)) cid)) = length (filter ((=) Marker) (msgs (S t i) cid)) + length (filter ((=) Marker) [Msg m])" by (simp add: new_messages) then have "length (filter ((=) Marker) (msgs (S t (Suc i)) cid)) = 1" using len_filter by simp then show ?thesis using exists_one_iff_filter_one by metis qed then show ?thesis by simp next case False then show ?thesis using step Send asm by auto qed next case (Recv cid' r s u u' m) then show ?thesis proof (cases "cid = cid'") case True then have new_msgs: "Msg m # msgs (S t (Suc i)) cid = msgs (S t i) cid" using step Recv by auto then show ?thesis proof - have "length (filter ((=) Marker) (msgs (S t i) cid)) = length (filter ((=) Marker) [Msg m]) + length (filter ((=) Marker) (msgs (S t (Suc i)) cid))" by (metis append_Cons append_Nil filter_append len_filter length_append new_msgs) then have "length (filter ((=) Marker) (msgs (S t (Suc i)) cid)) = 1" using len_filter by simp then show ?thesis using exists_one_iff_filter_one by metis qed next case False then show ?thesis using step Recv asm by auto qed qed qed then show ?thesis by simp qed qed lemma last_changes_implies_send_when_msgs_nonempty: assumes "trace init t final" and "msgs (S t i) cid \ []" and "msgs (S t (i+1)) cid \ []" and "last (msgs (S t i) cid) = Marker" and "last (msgs (S t (i+1)) cid) \ Marker" and "channel cid = Some (p, q)" shows "(\u u' m. t ! i = Send cid p q u u' m)" proof - have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis Suc_eq_plus1_left add.commute assms(1) assms(4) assms(5) le_Suc_eq nat_le_linear nat_less_le no_change_if_ge_length_t step_Suc) then show ?thesis proof (cases "t ! i") case (Snapshot r) then show ?thesis by (metis assms(4) assms(5) last_snoc local.step next_snapshot) next case (RecvMarker cid' r s) then show ?thesis proof (cases "cid = cid'") case True then have "last (msgs (S t i) cid) = last (msgs (S t (i+1)) cid)" proof - have "Marker # msgs (S t (i + 1)) cid = msgs (S t i) cid" using RecvMarker local.step True by auto then show ?thesis by (metis assms(3) last_ConsR) qed then show ?thesis using assms by simp next case no_snap: False then have "last (msgs (S t i) cid) = last (msgs (S t (i+1)) cid)" proof (cases "has_snapshotted (S t i) r") case True then show ?thesis using RecvMarker step no_snap by simp next case False with RecvMarker step no_snap \cid \ cid'\ assms show ?thesis by (cases "p = r", auto) qed then show ?thesis using assms by simp qed next case (Trans r u u') then show ?thesis using assms(4) assms(5) local.step by auto next case (Send cid' r s u u' m) then have "cid = cid'" by (metis (no_types, hide_lams) assms(4) assms(5) local.step next_send) moreover have "(p, q) = (r, s)" proof - have "channel cid = channel cid'" using \cid = cid'\ by simp moreover have "channel cid = Some (p, q)" using assms by simp moreover have "channel cid' = Some (r, s)" using Send step can_occur_def by auto ultimately show ?thesis by simp qed ultimately show ?thesis using Send by auto next case (Recv cid' r s u u' m) then show ?thesis proof (cases "cid = cid'") case True then have "last (msgs (S t i) cid) = last (msgs (S t (i+1)) cid)" by (metis (no_types, lifting) Recv assms(3) assms(4) last_ConsR local.step next_recv) then show ?thesis using assms by simp next case False then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using Recv step by auto then show ?thesis using assms by simp qed qed qed lemma no_marker_after_RecvMarker: assumes "trace init t final" and "(S t i) \ RecvMarker cid p q \ (S t (i+1))" and "channel cid = Some (q, p)" shows "Marker \ set (msgs (S t (i+1)) cid)" proof - have new_msgs: "msgs (S t i) cid = Marker # msgs (S t (i+1)) cid" using assms(2) by auto have one_marker: "\!j. j < length (msgs (S t i) cid) \ msgs (S t i) cid ! j = Marker" by (metis assms(1,3) at_most_one_marker list.set_intros(1) new_msgs) then obtain j where "j < length (msgs (S t i) cid)" "msgs (S t i) cid ! j = Marker" by blast then have "j = 0" using one_marker new_msgs by auto then have "\j. j \ 0 \ j < length (msgs (S t i) cid) \ msgs (S t i) cid ! j \ Marker" using one_marker using \j < length (msgs (S t i) cid)\ \msgs (S t i) cid ! j = Marker\ by blast then have "\j. j < length (msgs (S t (i+1)) cid) \ msgs (S t (i+1)) cid ! j \ Marker" by (metis One_nat_def Suc_eq_plus1 Suc_le_eq Suc_mono le_zero_eq list.size(4) new_msgs not_less0 nth_Cons_Suc) then show ?thesis by (simp add: in_set_conv_nth) qed lemma no_marker_and_snapshotted_implies_no_more_markers_trace: shows "\ trace init t final; i \ j; j \ length t; has_snapshotted (S t i) p; Marker \ set (msgs (S t i) cid); channel cid = Some (p, q) \ \ Marker \ set (msgs (S t j) cid)" proof (induct "j - i" arbitrary: i) case 0 then show ?case by auto next case (Suc n) then have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis (no_types, hide_lams) Suc_eq_plus1 cancel_comm_monoid_add_class.diff_cancel distributed_system.step_Suc distributed_system_axioms less_le_trans linorder_not_less old.nat.distinct(2) order_eq_iff) then have "Marker \ set (msgs (S t (i+1)) cid)" using no_marker_and_snapshotted_implies_no_more_markers Suc step by blast moreover have "has_snapshotted (S t (i+1)) p" using Suc.prems(4) local.step snapshot_state_unchanged by auto ultimately show ?case proof - assume a1: "ps (S t (i + 1)) p \ None" assume a2: "Marker \ set (msgs (S t (i + 1)) cid)" have f3: "j \ i" using Suc.hyps(2) by force have "j - Suc i = n" by (metis (no_types) Suc.hyps(2) Suc.prems(2) add.commute add_Suc_right add_diff_cancel_left' le_add_diff_inverse) then show ?thesis using f3 a2 a1 by (metis Suc.hyps(1) Suc.prems(1) Suc.prems(2) Suc.prems(3) Suc.prems(6) Suc_eq_plus1_left add.commute less_Suc_eq linorder_not_less) qed qed lemma marker_not_vanishing_means_always_present: shows "\ trace init t final; i \ j; j \ length t; Marker : set (msgs (S t i) cid); Marker : set (msgs (S t j) cid); channel cid = Some (p, q) \ \ \k. i \ k \ k \ j \ Marker : set (msgs (S t k) cid)" proof (induct "j - i" arbitrary: i) case 0 then show ?case by auto next case (Suc n) then have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis (no_types, lifting) Suc_eq_plus1 add_lessD1 distributed_system.step_Suc distributed_system_axioms le_add_diff_inverse order_le_less zero_less_Suc zero_less_diff) have "Marker : set (msgs (S t (i+1)) cid)" proof (rule ccontr) assume asm: "~ Marker : set (msgs (S t (i+1)) cid)" have snap_p: "has_snapshotted (S t i) p" using Suc.prems(1) Suc.prems(4,6) no_marker_if_no_snapshot by blast then have "has_snapshotted (S t (i+1)) p" using local.step snapshot_state_unchanged by auto then have "Marker \ set (msgs (S t j) cid)" by (metis Suc.hyps(2) Suc.prems(1) Suc.prems(3) Suc.prems(6) asm discrete no_marker_and_snapshotted_implies_no_more_markers_trace zero_less_Suc zero_less_diff) then show False using Suc.prems by simp qed then show ?case by (meson Suc.prems(1) Suc.prems(3) Suc.prems(4) Suc.prems(5) Suc.prems(6) computation.snapshot_stable_ver_3 computation_axioms no_marker_and_snapshotted_implies_no_more_markers_trace no_marker_if_no_snapshot) qed lemma last_stays_if_no_recv_marker_and_no_send: shows "\ trace init t final; i < j; j \ length t; last (msgs (S t i) cid) = Marker; Marker : set (msgs (S t i) cid); Marker : set (msgs (S t j) cid); \k. i \ k \ k < j \ ~ (\u u' m. t ! k = Send cid p q u u' m); channel cid = Some (p, q) \ \ last (msgs (S t j) cid) = Marker" proof (induct "j - (i+1)" arbitrary: i) case 0 then have "j = i+1" by simp then have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis "0"(2) "0.prems"(2) "0.prems"(3) Suc_eq_plus1 distributed_system.step_Suc distributed_system_axioms less_le_trans) have "Marker = last (msgs (S t (i+1)) cid)" proof (rule ccontr) assume "~ Marker = last (msgs (S t (i+1)) cid)" then have "\u u' m. t ! i = Send cid p q u u' m" proof - have "msgs (S t (i+1)) cid \ []" using "0" \j = i+1\ by auto moreover have "msgs (S t i) cid \ []" using "0" by auto ultimately show ?thesis using "0.prems"(1) "0.prems"(4) "0.prems"(8) \Marker \ last (msgs (S t (i + 1)) cid)\ last_changes_implies_send_when_msgs_nonempty by auto qed then show False using 0 by auto qed then show ?case using \j = i+1\ by simp next case (Suc n) then have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis (no_types, hide_lams) Suc_eq_plus1 distributed_system.step_Suc distributed_system_axioms less_le_trans) have marker_present: "Marker : set (msgs (S t (i+1)) cid)" by (meson Suc.prems(1) Suc.prems(2) Suc.prems(3) Suc.prems(5) Suc.prems(6) Suc.prems(8) discrete le_add1 less_imp_le_nat marker_not_vanishing_means_always_present) moreover have "Marker = last (msgs (S t (i+1)) cid)" proof (rule ccontr) assume asm: "~ Marker = last (msgs (S t (i+1)) cid)" then have "\u u' m. t ! i = Send cid p q u u' m" proof - have "msgs (S t (i+1)) cid \ []" using marker_present by auto moreover have "msgs (S t i) cid \ []" using Suc by auto ultimately show ?thesis using Suc.prems(1) Suc.prems(4) Suc.prems(8) asm last_changes_implies_send_when_msgs_nonempty by auto qed then show False using Suc by auto qed moreover have "\k. i+1 \ k \ k < j \ ~ (\u u' m. t ! k = Send cid p q u u' m)" using Suc.prems by force moreover have "i+1 < j" using Suc by auto moreover have "j \ length t" using Suc by auto moreover have "trace init t final" using Suc by auto moreover have "Marker : set (msgs (S t j) cid)" using Suc by auto ultimately show ?case using Suc by (metis diff_Suc_1 diff_diff_left) qed lemma last_changes_implies_send_when_msgs_nonempty_trace: assumes "trace init t final" "i < j" "j \ length t" "Marker : set (msgs (S t i) cid)" "Marker : set (msgs (S t j) cid)" "last (msgs (S t i) cid) = Marker" "last (msgs (S t j) cid) \ Marker" "channel cid = Some (p, q)" shows "\k u u' m. i \ k \ k < j \ t ! k = Send cid p q u u' m" proof (rule ccontr) assume "~ (\k u u' m. i \ k \ k < j \ t ! k = Send cid p q u u' m)" then have "\k. i \ k \ k < j \ ~ (\u u' m. t ! k = Send cid p q u u' m)" by blast then have "last (msgs (S t j) cid) = Marker" using assms last_stays_if_no_recv_marker_and_no_send by blast then show False using assms by simp qed lemma msg_after_marker_and_nonempty_implies_postrecording_event: assumes "trace init t final" and "Marker : set (msgs (S t i) cid)" and "Marker \ last (msgs (S t i) cid)" and "i \ length t" and "channel cid = Some (p, q)" shows "\j. j < i \ postrecording_event t j" (is ?P) proof - let ?len = "length (msgs (S t i) cid)" have "?len \ 0" using assms(2) by auto have snap_p_i: "has_snapshotted (S t i) p" using assms no_marker_if_no_snapshot by blast obtain j where snap_p: "~ has_snapshotted (S t j) p" "has_snapshotted (S t (j+1)) p" by (metis Suc_eq_plus1 assms(1) exists_snapshot_for_all_p) have "j < i" by (meson assms(1) computation.snapshot_stable_ver_2 computation_axioms not_less snap_p(1) snap_p_i) have step_snap: "(S t j) \ (t ! j) \ (S t (j+1))" by (metis Suc_eq_plus1 assms(1) l2 nat_le_linear nat_less_le snap_p(1) snapshot_stable_ver_2 step_Suc) have re: "~ regular_event (t ! j)" by (meson distributed_system.regular_event_cannot_induce_snapshot distributed_system_axioms snap_p(1) snap_p(2) step_snap) have op: "occurs_on (t ! j) = p" using no_state_change_if_no_event snap_p(1) snap_p(2) step_snap by force have marker_last: "Marker = last (msgs (S t (j+1)) cid) \ msgs (S t (j+1)) cid \ []" proof - have "isSnapshot (t ! j) \ isRecvMarker (t ! j)" using re nonregular_event by auto then show ?thesis proof (elim disjE, goal_cases) case 1 then have "t ! j = Snapshot p" using op by auto then show ?thesis using step_snap assms by auto next case 2 then obtain cid' r where RecvMarker: "t ! j = RecvMarker cid' p r" by (metis event.collapse(5) op) then have "cid \ cid'" using RecvMarker_implies_Marker_in_set assms(1) assms(5) no_marker_if_no_snapshot snap_p(1) step_snap by blast then show ?thesis using assms snap_p(1) step_snap RecvMarker by auto qed qed then have "\k u u' m. j+1 \ k \ k < i \ t ! k = Send cid p q u u' m" proof - have "j+1 < i" proof - have "(S t (j+1)) \ (S t i)" using assms(3) marker_last by auto then have "j+1 \ i" by auto moreover have "j+1 \ i" using \j < i\ by simp ultimately show ?thesis by simp qed moreover have "trace init t final" using assms by simp moreover have "Marker = last (msgs (S t (j+1)) cid)" using marker_last by simp moreover have "Marker : set (msgs (S t (j+1)) cid)" using marker_last by (simp add: marker_last) ultimately show ?thesis using assms last_changes_implies_send_when_msgs_nonempty_trace by simp qed then obtain k where Send: "\u u' m. j+1 \ k \ k < i \ t ! k = Send cid p q u u' m" by blast then have "postrecording_event t k" proof - have "k < length t" using Send assms by simp moreover have "isSend (t ! k)" using Send by auto moreover have "has_snapshotted (S t k) p" using Send snap_p using assms(1) snapshot_stable_ver_3 by blast moreover have "occurs_on (t ! k) = p" using Send by auto ultimately show ?thesis unfolding postrecording_event by simp qed then show ?thesis using Send by auto qed lemma same_messages_if_no_occurrence_trace: shows "\ trace init t final; i \ j; j \ length t; (\k. i \ k \ k < j \ occurs_on (t ! k) \ p \ occurs_on (t ! k) \ q); channel cid = Some (p, q) \ \ msgs (S t i) cid = msgs (S t j) cid \ cs (S t i) cid = cs (S t j) cid" proof (induct "j - i" arbitrary: i) case 0 then show ?case by auto next case (Suc n) then have step: "(S t i) \ (t ! i) \ (S t (i+1))" by (metis (no_types, hide_lams) Suc_eq_plus1 Suc_n_not_le_n diff_self_eq_0 distributed_system.step_Suc distributed_system_axioms le0 le_eq_less_or_eq less_le_trans) then have "msgs (S t i) cid = msgs (S t (i+1)) cid \ cs (S t i) cid = cs (S t (i+1)) cid" proof - have "~ occurs_on (t ! i) = p" using Suc by simp moreover have "~ occurs_on (t ! i) = q" using Suc by simp ultimately show ?thesis using step Suc same_messages_if_no_occurrence by blast qed moreover have "msgs (S t (i+1)) cid = msgs (S t j) cid \ cs (S t (i+1)) cid = cs (S t j) cid" proof - have "i+1 \ j" using Suc by linarith moreover have "\k. i+1 \ k \ k < j \ occurs_on (t ! k) \ p \ occurs_on (t ! k) \ q" using Suc by force ultimately show ?thesis using Suc by auto qed ultimately show ?case by simp qed lemma snapshot_step_cs_preservation_p: assumes "c \ ev \ c'" and "~ regular_event ev" and "occurs_on ev = p" and "channel cid = Some (p, q)" shows "map Msg (fst (cs c cid)) @ takeWhile ((\) Marker) (msgs c cid) = map Msg (fst (cs c' cid)) @ takeWhile ((\) Marker) (msgs c' cid) \ snd (cs c cid) = snd (cs c' cid)" proof - have "isSnapshot ev \ isRecvMarker ev" using assms nonregular_event by blast then show ?thesis proof (elim disjE, goal_cases) case 1 then have Snap: "ev = Snapshot p" by (metis event.collapse(4) assms(3)) then have "fst (cs c cid) = fst (cs c' cid)" using assms(1) assms(2) regular_event same_cs_if_not_recv by blast moreover have "takeWhile ((\) Marker) (msgs c cid) = takeWhile ((\) Marker) (msgs c' cid)" proof - have "msgs c' cid = msgs c cid @ [Marker]" using assms Snap by auto then show ?thesis by (simp add: takeWhile_tail) qed moreover have "snd (cs c cid) = snd (cs c' cid)" using Snap assms no_self_channel by fastforce ultimately show ?thesis by simp next case 2 then obtain cid' r where RecvMarker: "ev = RecvMarker cid' p r" by (metis event.collapse(5) assms(3)) have "cid \ cid'" by (metis "2" RecvMarker assms(1) assms(4) distributed_system.RecvMarker_given_channel distributed_system.happen_implies_can_occur distributed_system_axioms event.sel(5,10) no_self_channel) then have "fst (cs c cid) = fst (cs c' cid)" using RecvMarker assms(1) assms(2) regular_event same_cs_if_not_recv by blast moreover have "takeWhile ((\) Marker) (msgs c cid) = takeWhile ((\) Marker) (msgs c' cid)" proof (cases "has_snapshotted c p") case True then have "msgs c cid = msgs c' cid" using RecvMarker \cid \ cid'\ assms by auto then show ?thesis by simp next case False then have "msgs c' cid = msgs c cid @ [Marker]" using RecvMarker \cid \ cid'\ assms by auto then show ?thesis by (simp add: takeWhile_tail) qed moreover have "snd (cs c cid) = snd (cs c' cid)" proof (cases "has_snapshotted c p") case True then have "cs c cid = cs c' cid" using RecvMarker \cid \ cid'\ assms by simp then show ?thesis by simp next case False then show ?thesis using RecvMarker \cid \ cid'\ assms(1) assms(4) no_self_channel by auto qed ultimately show ?thesis by simp qed qed lemma snapshot_step_cs_preservation_q: assumes "c \ ev \ c'" and "~ regular_event ev" and "occurs_on ev = q" and "channel cid = Some (p, q)" and "Marker \ set (msgs c cid)" and "~ has_snapshotted c q" shows "map Msg (fst (cs c cid)) @ takeWhile ((\) Marker) (msgs c cid) = map Msg (fst (cs c' cid)) @ takeWhile ((\) Marker) (msgs c' cid) \ snd (cs c' cid) = Recording" proof - have "isSnapshot ev \ isRecvMarker ev" using assms nonregular_event by blast then show ?thesis proof (elim disjE, goal_cases) case 1 then have Snapshot: "ev = Snapshot q" by (metis event.collapse(4) assms(3)) then have "fst (cs c cid) = fst (cs c' cid)" using assms(1) assms(2) regular_event same_cs_if_not_recv by blast moreover have "takeWhile ((\) Marker) (msgs c cid) = takeWhile ((\) Marker) (msgs c' cid)" proof - have "msgs c' cid = msgs c cid" using assms Snapshot by (metis distributed_system.next_snapshot distributed_system_axioms eq_fst_iff no_self_channel option.inject) then show ?thesis by simp qed moreover have "snd (cs c' cid) = Recording" using assms Snapshot by auto ultimately show ?thesis by simp next case 2 then obtain cid' r where RecvMarker: "ev = RecvMarker cid' q r" by (metis event.collapse(5) assms(3)) have "cid \ cid'" using RecvMarker RecvMarker_implies_Marker_in_set assms(1) assms(5) by blast have "fst (cs c cid) = fst (cs c' cid)" using assms(1) assms(2) regular_event same_cs_if_not_recv by blast moreover have "takeWhile ((\) Marker) (msgs c cid) = takeWhile ((\) Marker) (msgs c' cid)" proof - have "\r. channel cid = Some (q, r)" using assms(4) no_self_channel by auto with RecvMarker assms \cid \ cid'\ have "msgs c cid = msgs c' cid" by (cases "has_snapshotted c r", auto) then show ?thesis by simp qed moreover have "snd (cs c' cid) = Recording" using assms RecvMarker \cid \ cid'\ by simp ultimately show ?thesis by simp qed qed lemma Marker_in_channel_implies_not_done: assumes "trace init t final" and "Marker : set (msgs (S t i) cid)" and "channel cid = Some (p, q)" and "i \ length t" shows "snd (cs (S t i) cid) \ Done" proof (rule ccontr) assume is_done: "~ snd (cs (S t i) cid) \ Done" let ?t = "take i t" have tr: "trace init ?t (S t i)" using assms(1) exists_trace_for_any_i by blast have "\q p. RecvMarker cid q p \ set ?t" by (metis (mono_tags, lifting) assms(3) distributed_system.trace.simps distributed_system_axioms done_only_from_recv_marker_trace computation.no_initial_channel_snapshot computation_axioms is_done list.discI recording_state.simps(4) snd_conv tr) then obtain j where RecvMarker: "\q p. t ! j = RecvMarker cid q p" "j < i" by (metis (no_types, lifting) assms(4) in_set_conv_nth length_take min.absorb2 nth_take) then have step_j: "(S t j) \ (t ! j) \ (S t (j+1))" by (metis Suc_eq_plus1 assms(1) distributed_system.step_Suc distributed_system_axioms assms(4) less_le_trans) then have "t ! j = RecvMarker cid q p" by (metis RecvMarker(1) RecvMarker_given_channel assms(3) event.disc(25) event.sel(10) happen_implies_can_occur) then have "Marker \ set (msgs (S t (j+1)) cid)" using assms(1) assms(3) no_marker_after_RecvMarker step_j by presburger moreover have "has_snapshotted (S t (j+1)) p" using Suc_eq_plus1 \t ! j = RecvMarker cid q p\ assms(1) recv_marker_means_snapshotted snapshot_state_unchanged step_j by presburger ultimately have "Marker \ set (msgs (S t i) cid)" by (metis RecvMarker(2) Suc_eq_plus1 Suc_leI assms(1,3) assms(4) no_marker_and_snapshotted_implies_no_more_markers_trace) then show False using assms by simp qed lemma keep_empty_if_no_events: shows "\ trace init t final; i \ j; j \ length t; msgs (S t i) cid = []; has_snapshotted (S t i) p; channel cid = Some (p, q); \k. i \ k \ k < j \ regular_event (t ! k) \ ~ occurs_on (t ! k) = p \ \ msgs (S t j) cid = []" proof (induct "j - i" arbitrary: i) case 0 then show ?case by auto next case (Suc n) then have step: "(S t i) \ (t ! i) \ (S t (i+1))" proof - have "i < length t" using Suc.hyps(2) Suc.prems(3) by linarith then show ?thesis by (metis (full_types) Suc.prems(1) Suc_eq_plus1 step_Suc) qed have "msgs (S t (i+1)) cid = []" proof (cases "t ! i") case (Snapshot r) have "r \ p" proof (rule ccontr) assume "~ r \ p" moreover have "can_occur (t ! i) (S t i)" using happen_implies_can_occur local.step by blast ultimately show False using can_occur_def Snapshot Suc by simp qed then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using Snapshot local.step Suc by auto then show ?thesis using Suc by simp next case (RecvMarker cid' r s) have "cid \ cid'" proof (rule ccontr) assume "~ cid \ cid'" then have "msgs (S t i) cid \ []" by (metis RecvMarker length_greater_0_conv linorder_not_less list.size(3) local.step nat_less_le recv_marker_other_channels_not_shrinking) then show False using Suc by simp qed then show ?thesis proof (cases "has_snapshotted (S t i) r") case True then have "msgs (S t (i+1)) cid = msgs (S t i) cid" using RecvMarker Suc step \cid \ cid'\ by auto then show ?thesis using Suc by simp next case False have "r \ p" using False Suc.prems(5) by blast then show ?thesis using RecvMarker Suc step \cid \ cid'\ False by simp qed next case (Trans r u u') then show ?thesis using Suc step by simp next case (Send cid' r s u u' m) have "r \ p" proof (rule ccontr) assume "~ r \ p" then have "occurs_on (t ! i) = p \ regular_event (t ! i)" using Send by simp moreover have "i \ i \ i < j" using Suc by simp ultimately show False using Suc.prems by blast qed have "cid \ cid'" proof (rule ccontr) assume "~ cid \ cid'" then have "channel cid = channel cid'" by auto then have "channel cid' = Some (r, s)" using Send step can_occur_def by simp then show False using Suc \r \ p\ \~ cid \ cid'\ by auto qed then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step Send Suc by simp then show ?thesis using Suc by simp next case (Recv cid' r s u u' m) have "cid \ cid'" proof (rule ccontr) assume "~ cid \ cid'" then have "msgs (S t i) cid \ []" using Recv local.step by auto then show False using Suc by simp qed then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using Recv step by auto then show ?thesis using Suc by simp qed moreover have "\k. i+1 \ k \ k < j \ regular_event (t ! k) \ ~ occurs_on (t ! k) = p" using Suc by simp moreover have "has_snapshotted (S t (i+1)) p" by (meson Suc.prems(1) Suc.prems(5) discrete less_not_refl nat_le_linear snapshot_stable_ver_3) moreover have "i+1 \ j" using Suc by simp moreover have "j \ length t" using Suc by simp moreover have "j - (i+1) = n" using Suc by linarith ultimately show ?case using Suc by blast qed lemma last_unchanged_or_empty_if_no_events: shows "\ trace init t final; i \ j; j \ length t; msgs (S t i) cid \ []; last (msgs (S t i) cid) = Marker; has_snapshotted (S t i) p; channel cid = Some (p, q); \k. i \ k \ k < j \ regular_event (t ! k) \ ~ occurs_on (t ! k) = p \ \ msgs (S t j) cid = [] \ (msgs (S t j) cid \ [] \ last (msgs (S t j) cid) = Marker)" proof (induct "j - i" arbitrary: i) case 0 then show ?case by auto next case (Suc n) then have step: "(S t i) \ (t ! i) \ (S t (i+1))" proof - have "i < length t" using Suc.hyps(2) Suc.prems(3) by linarith then show ?thesis by (metis (full_types) Suc.prems(1) Suc_eq_plus1 step_Suc) qed have msgs_s_ip1: "msgs (S t (i+1)) cid = [] \ (msgs (S t (i+1)) cid \ [] \ last (msgs (S t (i+1)) cid) = Marker)" proof (cases "t ! i") case (Snapshot r) have "r \ p" proof (rule ccontr) assume "~ r \ p" moreover have "can_occur (t ! i) (S t i)" using happen_implies_can_occur local.step by blast ultimately show False using can_occur_def Snapshot Suc by simp qed then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using Snapshot local.step Suc by auto then show ?thesis using Suc by simp next case (RecvMarker cid' r s) then show ?thesis proof (cases "cid = cid'") case True then have "msgs (S t (i+1)) cid = []" proof - have "Marker # msgs (S t (i+1)) cid = msgs (S t i) cid" using RecvMarker True local.step by auto then show ?thesis proof - assume a1: "Marker # msgs (S t (i + 1)) cid = msgs (S t i) cid" have "i < j" by (metis (no_types) Suc.hyps(2) Suc.prems(2) Suc_neq_Zero diff_is_0_eq le_neq_implies_less) then have "i < length t" using Suc.prems(3) less_le_trans by blast then show ?thesis using a1 by (metis (no_types) Marker_in_channel_implies_not_done RecvMarker Suc.prems(1) Suc.prems(5) Suc.prems(7) Suc_eq_plus1 Suc_le_eq True last_ConsR last_in_set recv_marker_means_cs_Done) qed qed then show ?thesis by simp next case False then show ?thesis proof (cases "has_snapshotted (S t i) r") case True then show ?thesis using False RecvMarker Suc.prems(5) local.step by auto next case False then have "r \ p" using Suc.prems(6) by blast with RecvMarker False Suc.prems step \cid \ cid'\ show ?thesis by auto qed qed next case (Trans r u u') then show ?thesis using Suc step by simp next case (Send cid' r s u u' m) have "r \ p" proof (rule ccontr) assume "~ r \ p" then have "occurs_on (t ! i) = p \ regular_event (t ! i)" using Send by simp moreover have "i \ i \ i < j" using Suc by simp ultimately show False using Suc.prems by blast qed have "cid \ cid'" proof (rule ccontr) assume "~ cid \ cid'" then have "channel cid = channel cid'" by auto then have "channel cid' = Some (r, s)" using Send step can_occur_def by simp then show False using Suc \r \ p\ \~ cid \ cid'\ by auto qed then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using step Send by simp then show ?thesis using Suc by simp next case (Recv cid' r s u u' m) then show ?thesis proof (cases "cid = cid'") case True then have "msgs (S t i) cid = Msg m # msgs (S t (i+1)) cid" using Recv local.step by auto then have "last (msgs (S t (i+1)) cid) = Marker" by (metis Suc.prems(5) last.simps message.simps(3)) then show ?thesis by blast next case False then have "msgs (S t i) cid = msgs (S t (i+1)) cid" using Recv step by auto then show ?thesis using Suc by simp qed qed then show ?case proof (elim disjE, goal_cases) case 1 moreover have "trace init t final" using Suc by simp moreover have "i+1 \ j" using Suc by simp moreover have "j \ length t" using Suc by simp moreover have "has_snapshotted (S t (i+1)) p" using Suc.prems(6) local.step snapshot_state_unchanged by auto moreover have "j - (i+1) = n" using Suc by linarith moreover have "\k. i+1 \ k \ k < j \ regular_event (t ! k) \ ~ occurs_on (t ! k) = p" using Suc by auto ultimately have "msgs (S t j) cid = []" using keep_empty_if_no_events Suc.prems(7) by blast then show ?thesis by simp next case 2 moreover have "trace init t final" using Suc by simp moreover have "i+1 \ j" using Suc by simp moreover have "j \ length t" using Suc by simp moreover have "has_snapshotted (S t (i+1)) p" using Suc.prems(6) local.step snapshot_state_unchanged by auto moreover have "j - (i+1) = n" using Suc by linarith moreover have "\k. i+1 \ k \ k < j \ regular_event (t ! k) \ ~ occurs_on (t ! k) = p" using Suc by auto ultimately show ?thesis using Suc.prems(7) Suc.hyps by blast qed qed lemma cs_after_all_prerecording_events: assumes "trace init t final" and "\i'. i' \ i \ ~ prerecording_event t i'" and "\j'. j' < i \ ~ postrecording_event t j'" and "i \ length t" shows "cs_equal_to_snapshot (S t i) final" proof (unfold cs_equal_to_snapshot_def, rule allI, rule impI) fix cid assume "channel cid \ None" then obtain p q where chan: "channel cid = Some (p, q)" by auto have cs_done: "snd (cs (S t (length t)) cid) = Done" using chan all_channels_done_in_final_state assms(1) final_is_s_t_len_t by blast have "filter ((\) Marker) (msgs (S t i) cid) = takeWhile ((\) Marker) (msgs (S t i) cid)" (is ?B) proof (rule ccontr) let ?m = "msgs (S t i) cid" assume "~ ?B" then obtain j k where range: "j < k" "k < length ?m" and "?m ! j = Marker" "?m ! k \ Marker" using filter_neq_takeWhile by metis then have "Marker \ set ?m" by (metis less_trans nth_mem) moreover have "last ?m \ Marker" proof - have "\l. l < length ?m \ l \ j \ ?m ! l \ Marker" using chan \j < k\ \k < length (msgs (S t i) cid)\ \msgs (S t i) cid ! j = Marker\ assms(1) at_most_one_marker calculation less_trans by blast moreover have "last ?m = ?m ! (length ?m - 1)" by (metis \Marker \ set (msgs (S t i) cid)\ empty_iff last_conv_nth list.set(1)) moreover have "length ?m - 1 \ j" using range by auto ultimately show ?thesis using range by auto qed moreover have "i \ length t" using chan assms(1) calculation(1) computation.exists_next_marker_free_state computation.no_change_if_ge_length_t computation_axioms nat_le_linear by fastforce ultimately have "\j. j < i \ postrecording_event t j" using chan assms(1) msg_after_marker_and_nonempty_implies_postrecording_event by auto then show False using assms by auto qed moreover have "takeWhile ((\) Marker) (msgs (S t i) cid) = map Msg (fst (cs final cid))" proof (cases "snd (cs (S t i) cid)") case NotStarted text \show that q and p have to snapshot, and then reduce it to the case below depending on the order they snapshotted in\ have nsq: "~ has_snapshotted (S t i) q" using NotStarted chan assms(1) cs_in_initial_state_implies_not_snapshotted by auto obtain j where snap_q: "~ has_snapshotted (S t j) q" "has_snapshotted (S t (j+1)) q" by (metis Suc_eq_plus1 assms(1) exists_snapshot_for_all_p) have step_q: "(S t j) \ (t ! j) \ (S t (j+1))" by (metis \\ ps (S t j) q \ None\ add.commute assms(1) le_SucI le_eq_less_or_eq le_refl linorder_neqE_nat no_change_if_ge_length_t plus_1_eq_Suc snap_q step_Suc) obtain k where snap_p: "~ has_snapshotted (S t k) p" "has_snapshotted (S t (k+1)) p" by (metis Suc_eq_plus1 assms(1) exists_snapshot_for_all_p) have bound: "i \ j" proof (rule ccontr) assume "~ i \ j" then have "i \ j+1" by simp then have "has_snapshotted (S t i) q" by (meson assms(1) computation.snapshot_stable_ver_3 computation_axioms snap_q(2)) then show False using nsq by simp qed have step_p: "(S t k) \ (t ! k) \ (S t (k+1))" by (metis \\ ps (S t k) p \ None\ add.commute assms(1) le_SucI le_eq_less_or_eq le_refl linorder_neqE_nat no_change_if_ge_length_t plus_1_eq_Suc snap_p step_Suc) have oq: "occurs_on (t ! j) = q" proof (rule ccontr) assume "~ occurs_on (t ! j) = q" then have "has_snapshotted (S t j) q = has_snapshotted (S t (j+1)) q" using no_state_change_if_no_event step_q by auto then show False using snap_q by blast qed have op: "occurs_on (t ! k) = p" proof (rule ccontr) assume "~ occurs_on (t ! k) = p" then have "has_snapshotted (S t k) p = has_snapshotted (S t (k+1)) p" using no_state_change_if_no_event step_p by auto then show False using snap_p by blast qed have "p \ q" using chan no_self_channel by blast then have "j \ k" using oq op event_occurs_on_unique by blast show ?thesis proof (cases "j < k") case True then have "msgs (S t i) cid = msgs (S t j) cid \ cs (S t i) cid = cs (S t j) cid" proof - have "\k. i \ k \ k < j \ occurs_on (t ! k) \ p \ occurs_on (t ! k) \ q" (is ?Q) proof (rule ccontr) assume "~ ?Q" then obtain l where range: "i \ l" "l < j" and "occurs_on (t ! l) = p \ occurs_on (t ! l) = q" by blast then show False proof (elim disjE, goal_cases) case 1 then show ?thesis proof (cases "regular_event (t ! l)") case True have "l < k" using range \j < k\ by simp have "~ has_snapshotted (S t l) p" using snap_p(1) range \j < k\ snapshot_stable_ver_3 assms(1) by simp then have "prerecording_event t l" using True "1" prerecording_event using s_def snap_q(1) snap_q(2) by fastforce then show False using assms range by blast next case False then have step_l: "(S t l) \ t ! l \ (S t (l+1))" by (metis (no_types, lifting) Suc_eq_plus1 Suc_lessD True assms(1) distributed_system.step_Suc distributed_system_axioms less_trans_Suc linorder_not_le local.range(2) s_def snap_p(1) snap_p(2) take_all) then have "has_snapshotted (S t (l+1)) p" using False nonregular_event_induces_snapshot by (metis "1"(3) snapshot_state_unchanged) then show False by (metis Suc_eq_plus1 Suc_leI True assms(1) less_imp_le_nat local.range(2) snap_p(1) snapshot_stable_ver_3) qed next case 2 then show ?thesis proof (cases "regular_event (t ! l)") case True have "~ has_snapshotted (S t l) q" using snap_q(1) range \j < k\ snapshot_stable_ver_3 assms(1) by simp then have "prerecording_event t l" using True "2" prerecording_event using s_def snap_q(2) by fastforce then show False using assms range by blast next case False then have step_l: "(S t l) \ t ! l \ (S t (l+1))" by (metis (no_types, lifting) Suc_eq_plus1 Suc_lessD True assms(1) distributed_system.step_Suc distributed_system_axioms less_trans_Suc linorder_not_le local.range(2) s_def snap_p(1) snap_p(2) take_all) then have "has_snapshotted (S t (l+1)) q" using False nonregular_event_induces_snapshot by (metis "2"(3) snapshot_state_unchanged) then show False by (metis Suc_eq_plus1 Suc_leI assms(1) range(2) snap_q(1) snapshot_stable_ver_3) qed qed qed moreover have "j \ length t" proof (rule ccontr) assume "~ j \ length t" then have "S t j = S t (j+1)" using no_change_if_ge_length_t assms by simp then show False using snap_q by auto qed ultimately show ?thesis using chan same_messages_if_no_occurrence_trace assms less_imp_le bound by blast qed moreover have "map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid) = map Msg (fst (cs (S t (j+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (j+1)) cid) \ snd (cs (S t (j+1)) cid) = Recording" proof - have "~ regular_event (t ! j)" using snap_q using regular_event_cannot_induce_snapshot step_q by blast moreover have "Marker \ set (msgs (S t j) cid)" by (meson chan True assms(1) computation.no_marker_if_no_snapshot computation.snapshot_stable_ver_2 computation_axioms less_imp_le_nat snap_p(1)) ultimately show ?thesis using oq snapshot_step_cs_preservation_q step_q chan snap_q(1) by blast qed moreover have "map Msg (fst (cs (S t k) cid)) @ takeWhile ((\) Marker) (msgs (S t k) cid) = map Msg (fst (cs (S t (j+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (j+1)) cid)" proof - have "snd (cs (S t (j+1)) cid) = Recording" using calculation by simp moreover have "\a. j+1 \ a \ a < k \ ~ occurs_on (t ! a) = p" (is ?R) proof (rule ccontr) assume "~ ?R" then obtain a where "j+1 \ a" "a < k" and ocp: "occurs_on (t ! a) = p" by blast have "a < length t" proof - have "k < length t" proof (rule ccontr) assume "~ k < length t" then have "S t k = S t (k+1)" using assms(1) no_change_if_ge_length_t by auto then show False using snap_p by auto qed then show ?thesis using \a < k\ by simp qed then show False proof (cases "regular_event (t ! a)") case True have "~ has_snapshotted (S t a) p" by (meson \a < k\ assms(1) computation.snapshot_stable_ver_2 computation_axioms less_imp_le_nat snap_p(1)) then have "prerecording_event t a" using \a < length t\ ocp True prerecording_event by simp then show False using \j+1 \ a\ \j \ i\ assms by auto next case False then have "(S t a) \ (t ! a) \ (S t (a+1))" using \a < length t\ assms(1) step_Suc by auto then have "has_snapshotted (S t (a+1)) p" by (metis False ocp nonregular_event_induces_snapshot snapshot_state_unchanged) then show False by (metis Suc_eq_plus1 Suc_leI \a < k\ assms(1) snap_p(1) snapshot_stable_ver_3) qed qed moreover have "~ has_snapshotted (S t (j+1)) p" by (metis Suc_eq_plus1 Suc_le_eq True assms(1) computation.snapshot_stable_ver_2 computation_axioms snap_p(1)) ultimately show ?thesis using chan cs_when_recording_2 True assms(1) by auto qed moreover have "map Msg (fst (cs (S t k) cid)) @ takeWhile ((\) Marker) (msgs (S t k) cid) = map Msg (fst (cs (S t (k+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (k+1)) cid)" proof - have "\ regular_event (t ! k)" using regular_event_preserves_process_snapshots snap_p(1) snap_p(2) step_p by force then show ?thesis using chan computation.snapshot_step_cs_preservation_p computation_axioms op step_p by fastforce qed moreover have "map Msg (fst (cs (S t (k+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (k+1)) cid) = map Msg (fst (cs final cid))" proof - have f1: "\f p pa pb c ca es n a na. \ computation f p pa pb (c::('a, 'b, 'c) configuration) ca \ \ distributed_system.trace f p pa pb c es ca \ ps (distributed_system.s f p pa pb c es n) a = None \ \ n \ na \ ps (distributed_system.s f p pa pb c es na) a \ None" by (meson computation.snapshot_stable_ver_2) have f2: "computation channel trans send recv init (S t (length t))" using assms(1) final_is_s_t_len_t computation_axioms by blast have f3: "trace init t (S t (length t))" using assms(1) final_is_s_t_len_t by blast have f4: "ps (S t k) p = None" by (meson snap_p(1)) then have f5: "k < length t" using f3 f2 f1 by (metis le_eq_less_or_eq not_le s_def snap_p(2) take_all) have "\ regular_event (t ! k)" using f4 by (meson distributed_system.regular_event_cannot_induce_snapshot distributed_system_axioms snap_p(2) step_p) then have f6: "map Msg (fst (cs (S t k) cid)) @ takeWhile ((\) Marker) (msgs (S t k) cid) = map Msg (fst (cs (S t (k + 1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (k + 1)) cid) \ snd (cs (S t k) cid) = snd (cs (S t (k + 1)) cid)" using chan computation.snapshot_step_cs_preservation_p computation_axioms op step_p by fastforce then have f7: "snd (cs (S t (k + 1)) cid) \ Done" using f5 f4 by (metis (no_types) assms(1) chan cs_done_implies_both_snapshotted(1)) have "j + 1 \ k + 1" using True by linarith then have "snd (cs (S t (k + 1)) cid) = Recording" using f7 f3 f2 f1 by (meson chan computation.cs_in_initial_state_implies_not_snapshotted recording_state.exhaust snap_q(2)) then show ?thesis using f6 f5 by (metis (no_types) Suc_eq_plus1 Suc_leI assms(1) chan cs_done cs_done_implies_both_snapshotted(1) cs_when_recording final_is_s_t_len_t le_eq_less_or_eq snap_p(2)) qed ultimately show ?thesis by (metis (no_types, lifting) chan Nil_is_map_conv assms(1) computation.no_initial_channel_snapshot computation_axioms fst_conv no_recording_cs_if_not_snapshotted self_append_conv2 snap_q(1)) next case False then have "k < j" using \j \ k\ False by simp then have "map Msg (fst (cs (S t i) cid)) @ takeWhile ((\) Marker) (msgs (S t i) cid) = map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid)" proof (cases "i \ k") case True then have "msgs (S t i) cid = msgs (S t k) cid \ cs (S t i) cid = cs (S t k) cid" proof - have "\j. i \ j \ j < k \ occurs_on (t ! j) \ p \ occurs_on (t ! j) \ q" (is ?Q) proof (rule ccontr) assume "~ ?Q" then obtain l where range: "i \ l" "l < k" and "occurs_on (t ! l) = p \ occurs_on (t ! l) = q" by blast then show False proof (elim disjE, goal_cases) case 1 then show ?thesis proof (cases "regular_event (t ! l)") case True have "l < k" using range \k < j\ by simp have "~ has_snapshotted (S t l) p" using snap_p(1) range \k < j\ snapshot_stable_ver_3 assms(1) by simp then have "prerecording_event t l" using True "1" prerecording_event using s_def snap_p by fastforce then show False using assms range by blast next case False then have step_l: "(S t l) \ t ! l \ (S t (l+1))" by (metis (no_types, lifting) Suc_eq_plus1 Suc_lessD assms(1) distributed_system.step_Suc distributed_system_axioms less_trans_Suc linorder_not_le local.range(2) s_def snap_p(1) snap_p(2) take_all) then have "has_snapshotted (S t (l+1)) p" using False nonregular_event_induces_snapshot by (metis "1"(3) snapshot_state_unchanged) then show False by (metis Suc_eq_plus1 Suc_leI assms(1) local.range(2) snap_p(1) snapshot_stable_ver_3) qed next case 2 then show ?thesis proof (cases "regular_event (t ! l)") case True have "~ has_snapshotted (S t l) p" using snap_p(1) range \k < j\ snapshot_stable_ver_3 assms(1) by simp moreover have "l < length t" using \k < j\ local.range(2) s_def snap_q(1) snap_q(2) by force ultimately have "prerecording_event t l" using True "2" prerecording_event proof - have "l \ j" by (meson False \l < k\ less_trans not_less) then show ?thesis by (metis (no_types) True \l < length t\ \occurs_on (t ! l) = q\ assms(1) computation.prerecording_event computation.snapshot_stable_ver_2 computation_axioms snap_q(1)) qed then show False using assms range by blast next case False then have step_l: "(S t l) \ t ! l \ (S t (l+1))" by (metis (no_types, lifting) Suc_eq_plus1 Suc_lessD assms(1) distributed_system.step_Suc distributed_system_axioms less_trans_Suc linorder_not_le local.range(2) s_def snap_p(1) snap_p(2) take_all) then have "has_snapshotted (S t (l+1)) q" using False nonregular_event_induces_snapshot by (metis "2"(3) snapshot_state_unchanged) then show False by (metis Suc_eq_plus1 Suc_leI \k < j\ assms(1) less_imp_le_nat local.range(2) snap_q(1) snapshot_stable_ver_3) qed qed qed moreover have "k \ length t" proof (rule ccontr) assume "~ k \ length t" then have "S t k = S t (k+1)" using no_change_if_ge_length_t assms by simp then show False using snap_p by auto qed ultimately show ?thesis using chan same_messages_if_no_occurrence_trace assms True less_imp_le by auto qed moreover have "map Msg (fst (cs (S t k) cid)) @ takeWhile ((\) Marker) (msgs (S t k) cid) = map Msg (fst (cs (S t (k+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (k+1)) cid) \ snd (cs (S t (k+1)) cid) = NotStarted" proof - have "~ regular_event (t ! k)" using snap_p using regular_event_cannot_induce_snapshot step_p by blast then show ?thesis using calculation op snapshot_step_cs_preservation_p step_p chan NotStarted by auto qed moreover have "map Msg (fst (cs (S t (k+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (k+1)) cid) = map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid)" proof - have "\a. k+1 \ a \ a < j \ ~ occurs_on (t ! a) = q" (is ?R) proof (rule ccontr) assume "~ ?R" then obtain a where "k+1 \ a" "a < j" and ocp: "occurs_on (t ! a) = q" by blast have "a < length t" proof - have "j < length t" proof (rule ccontr) assume "~ j < length t" then have "S t j = S t (j+1)" using assms(1) no_change_if_ge_length_t by auto then show False using snap_q by auto qed then show ?thesis using \a < j\ by simp qed then show False proof (cases "regular_event (t ! a)") case True have "~ has_snapshotted (S t a) q" by (meson \a < j\ assms(1) computation.snapshot_stable_ver_2 computation_axioms less_imp_le_nat snap_q(1)) then have "prerecording_event t a" using \a < length t\ ocp True prerecording_event by simp then show False using \k+1 \ a\ \k \ i\ assms by auto next case False then have "(S t a) \ (t ! a) \ (S t (a+1))" using \a < length t\ assms(1) step_Suc by auto then have "has_snapshotted (S t (a+1)) q" by (metis False ocp nonregular_event_induces_snapshot snapshot_state_unchanged) then show False by (metis Suc_eq_plus1 Suc_leI \a < j\ assms(1) snap_q(1) snapshot_stable_ver_3) qed qed moreover have "Marker : set (msgs (S t (k+1)) cid)" using chan \map Msg (fst (cs (S t k) cid)) @ takeWhile ((\) Marker) (msgs (S t k) cid) = map Msg (fst (cs (S t (k + 1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (k + 1)) cid) \ snd (cs (S t (k + 1)) cid) = NotStarted\ assms(1) cs_in_initial_state_implies_not_snapshotted marker_must_stay_if_no_snapshot snap_p(2) by blast moreover have "has_snapshotted (S t (k+1)) p" using snap_p(2) by blast moreover have "~ has_snapshotted (S t (k+1)) q" using chan \map Msg (fst (cs (S t k) cid)) @ takeWhile ((\) Marker) (msgs (S t k) cid) = map Msg (fst (cs (S t (k + 1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (k + 1)) cid) \ snd (cs (S t (k + 1)) cid) = NotStarted\ assms(1) cs_in_initial_state_implies_not_snapshotted by blast moreover have "k+1 \ j" using \k < j\ by auto moreover have "trace init t final" using assms by simp moreover have "snd (cs (S t (k+1)) cid) = NotStarted" using \map Msg (fst (cs (S t k) cid)) @ takeWhile ((\) Marker) (msgs (S t k) cid) = map Msg (fst (cs (S t (k + 1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (k + 1)) cid) \ snd (cs (S t (k + 1)) cid) = NotStarted\ by blast ultimately show ?thesis using cs_when_recording_3 chan by simp qed ultimately show ?thesis by simp next case False show ?thesis proof - have "has_snapshotted (S t i) p" by (metis False Suc_eq_plus1 assms(1) not_less_eq_eq snap_p(2) snapshot_stable_ver_3) moreover have "~ has_snapshotted (S t i) q" using nsq by auto moreover have "Marker : set (msgs (S t i) cid)" using chan assms(1) calculation(1) marker_must_stay_if_no_snapshot nsq by blast moreover have "\k. i \ k \ k < j \ ~ occurs_on (t ! k) = q" (is ?R) proof (rule ccontr) assume "~ ?R" then obtain k where "i \ k" "k < j" and ocp: "occurs_on (t ! k) = q" by blast have "k < length t" proof - have "j < length t" proof (rule ccontr) assume "~ j < length t" then have "S t j = S t (j+1)" using assms(1) no_change_if_ge_length_t by auto then show False using snap_q by auto qed then show ?thesis using \k < j\ by simp qed then show False proof (cases "regular_event (t ! k)") case True have "~ has_snapshotted (S t k) q" by (meson \k < j\ assms(1) computation.snapshot_stable_ver_2 computation_axioms less_imp_le_nat snap_q(1)) then have "prerecording_event t k" using \k < length t\ ocp True prerecording_event by simp then show False using \i \ j\ \k \ i\ assms by auto next case False then have "(S t k) \ (t ! k) \ (S t (k+1))" using \k < length t\ assms(1) step_Suc by auto then have "has_snapshotted (S t (k+1)) q" by (metis False ocp nonregular_event_induces_snapshot snapshot_state_unchanged) then show False by (metis Suc_eq_plus1 Suc_leI \k < j\ assms(1) snap_q(1) snapshot_stable_ver_3) qed qed ultimately show ?thesis using cs_when_recording_3 using NotStarted assms(1) bound chan by auto qed qed moreover have "map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid) = map Msg (fst (cs final cid))" proof (cases "\q p. t ! j = RecvMarker cid q p") case True then have "fst (cs (S t j) cid) = fst (cs (S t (j+1)) cid)" using step_q by auto moreover have RecvMarker: "t ! j = RecvMarker cid q p" proof - have "can_occur (t ! j) (S t j)" using happen_implies_can_occur step_q by simp then show ?thesis using RecvMarker_given_channel True chan by force qed moreover have "takeWhile ((\) Marker) (msgs (S t j) cid) = []" proof - have "can_occur (t ! j) (S t j)" using happen_implies_can_occur step_q by simp then have "length (msgs (S t j) cid) > 0 \ hd (msgs (S t j) cid) = Marker" using RecvMarker can_occur_def by auto then show ?thesis by (metis (mono_tags, lifting) hd_conv_nth length_greater_0_conv nth_mem set_takeWhileD takeWhile_nth) qed moreover have "snd (cs (S t (j+1)) cid) = Done" using step_q True by auto moreover have "cs (S t (j+1)) cid = cs final cid" using chan calculation cs_done_implies_same_snapshots assms(1) by (metis final_is_s_t_len_t nat_le_linear no_change_if_ge_length_t) ultimately show ?thesis by simp next case False have "~ regular_event (t ! j)" using regular_event_preserves_process_snapshots snap_q(1) snap_q(2) step_q by auto then have "isSnapshot (t ! j) \ isRecvMarker (t ! j)" using nonregular_event by auto then have "map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid) = map Msg (fst (cs (S t (j+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (j+1)) cid) \ snd (cs (S t (j+1)) cid) = Recording" proof (elim disjE, goal_cases) case 1 have Snapshot: "t ! j = Snapshot q" using "1" oq by auto then have "msgs (S t j) cid = msgs (S t (j+1)) cid" using \p \ q\ step_q chan by auto moreover have "cs (S t (j+1)) cid = (fst (cs (S t j) cid), Recording)" using step_q Snapshot chan by simp ultimately show ?thesis by simp next case 2 obtain cid' r where RecvMarker: "t ! j = RecvMarker cid' q r" by (metis "2" event.collapse(5) oq) have "cid \ cid'" proof (rule ccontr) assume "~ cid \ cid'" then have "channel cid = channel cid'" by simp then have "channel cid' = Some (r, q)" using False RecvMarker \\ cid \ cid'\ by blast then show False using False RecvMarker \\ cid \ cid'\ by blast qed then have "msgs (S t j) cid = msgs (S t (j+1)) cid" using \cid \ cid'\ step_q snap_q RecvMarker chan \p \ q\ by simp moreover have "cs (S t (j+1)) cid = (fst (cs (S t j) cid), Recording)" using \p \ q\ \cid \ cid'\step_q snap_q RecvMarker chan by auto ultimately show ?thesis by simp qed moreover have "map Msg (fst (cs (S t (j+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (j+1)) cid) = map Msg (fst (cs final cid))" proof - have "snd (cs (S t (j+1)) cid) = Recording" using calculation by blast moreover have "has_snapshotted (S t (j+1)) p" by (metis Suc_eq_plus1 Suc_leI \k < j\ assms(1) le_add1 snap_p(2) snapshot_stable_ver_3) moreover have "has_snapshotted (S t (j+1)) q" using snap_q by auto moreover have "j < length t" by (metis (no_types, lifting) chan Suc_eq_plus1 assms(1) cs_done cs_done_implies_both_snapshotted(2) computation.no_change_if_ge_length_t computation.snapshot_stable_ver_3 computation_axioms leI le_Suc_eq snap_q(1) snap_q(2)) ultimately show ?thesis using cs_when_recording assms(1) cs_done final_is_s_t_len_t proof - assume a1: "j < length t" assume a2: "trace init t final" assume a3: "snd (cs (S t (length t)) cid) = Done" assume a4: "snd (cs (S t (j + 1)) cid) = Recording" assume a5: "ps (S t (j + 1)) p \ None" assume a6: "\t. trace init t final \ final = S t (length t)" assume a7: "\i j t p cid q. \i < j; j \ length t; trace init t final; ps (S t i) p \ None; snd (cs (S t i) cid) = Recording; snd (cs (S t j) cid) = Done; channel cid = Some (p, q)\ \ map Msg (fst (cs (S t j) cid)) = map Msg (fst (cs (S t i) cid)) @ takeWhile ((\) Marker) (msgs (S t i) cid)" have "Suc j < length t" using a3 a2 a1 by (metis (no_types) False Suc_eq_plus1 Suc_lessI chan cs_done_implies_has_snapshotted done_only_from_recv_marker snap_q(1) step_q) then show ?thesis using a7 a6 a5 a4 a3 a2 by (metis (no_types) Suc_eq_plus1 chan nat_le_linear) qed qed ultimately show ?thesis by simp qed ultimately show ?thesis by (metis (no_types, lifting) Nil_is_map_conv assms(1) assms(3) chan cs_done cs_done_implies_has_snapshotted cs_not_nil_implies_postrecording_event nat_le_linear nsq self_append_conv2 snapshot_stable_ver_3) qed next case Recording then obtain j where snap_p: "~ has_snapshotted (S t j) p" "has_snapshotted (S t (j+1)) p" by (metis Suc_eq_plus1 assms(1) exists_snapshot_for_all_p) have snap_q: "has_snapshotted (S t i) q" using Recording assms(1) chan cs_recording_implies_snapshot by blast have fst_cs_empty: "cs (S t i) cid = ([], Recording)" (is ?P) proof (rule ccontr) assume "~ ?P" have "snd (cs (S t i) cid) = Recording" using Recording by simp moreover have "fst (cs (S t i) cid) \ []" using \~ ?P\ prod.collapse calculation by metis ultimately have "\j. j < i \ postrecording_event t j" using assms(1) assms(4) chan cs_not_nil_implies_postrecording_event by blast then show False using assms by auto qed then show ?thesis proof - have i_less_len_t: "i < length t" proof (rule ccontr) assume "~ i < length t" then have "snd (cs (S t i) cid) = Done" by (metis assms(1) cs_done le_eq_less_or_eq nat_le_linear no_change_if_ge_length_t) then show False using Recording by simp qed then have "map Msg (fst (cs final cid)) = map Msg (fst (cs (S t i) cid)) @ takeWhile ((\) Marker) (msgs (S t i) cid)" proof (cases "j < i") case True then have "has_snapshotted (S t i) p" by (metis Suc_eq_plus1 Suc_leI assms(1) snap_p(2) snapshot_stable_ver_3) moreover have "length t \ length t" by simp ultimately show ?thesis using Recording chan assms(1) cs_done cs_when_recording final_is_s_t_len_t i_less_len_t by blast next case False text \need to show that next message that comes into the channel must be marker\ have "\k. i \ k \ k < j \ ~ occurs_on (t ! k) = p" (is ?P) proof (rule ccontr) assume "~ ?P" then obtain k where "i \ k" "k < j" "occurs_on (t ! k) = p" by blast then show False proof (cases "regular_event (t ! k)") case True then have "prerecording_event t k" by (metis (no_types, hide_lams) \k < j\ \occurs_on (t ! k) = p\ all_processes_snapshotted_in_final_state assms(1) final_is_s_t_len_t computation.prerecording_event computation_axioms less_trans nat_le_linear not_less snap_p(1) snapshot_stable_ver_2) then show ?thesis using assms \i \ k\ by auto next case False then have step_k: "(S t k) \ (t ! k) \ (S t (Suc k))" by (metis (no_types, lifting) Suc_leI \k < j\ all_processes_snapshotted_in_final_state assms(1) final_is_s_t_len_t le_Suc_eq less_imp_Suc_add linorder_not_less no_change_if_ge_length_t snap_p(1) step_Suc) then have "has_snapshotted (S t (Suc k)) p" by (metis False \occurs_on (t ! k) = p\ nonregular_event_induces_snapshot snapshot_state_unchanged) then have "k \ j" by (metis Suc_leI \k < j\ assms(1) snap_p(1) snapshot_stable_ver_3) then show False using \k < j\ by simp qed qed moreover have "~ has_snapshotted (S t i) p" using False assms(1) snap_p(1) snapshot_stable_ver_3 by auto ultimately have to_snapshot: "map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid) = map Msg (fst (cs (S t i) cid)) @ takeWhile ((\) Marker) (msgs (S t i) cid)" using False chan Recording assms(1) cs_when_recording_2 by auto have step_j: "(S t j) \ (t ! j) \ (S t (j+1))" by (metis Suc_eq_plus1 Suc_le_eq assms(1) distributed_system.step_Suc distributed_system_axioms computation.no_change_if_ge_length_t computation_axioms le_add1 not_less_eq_eq snap_p(1) snap_p(2)) then have "map Msg (fst (cs (S t j) cid)) @ takeWhile ((\) Marker) (msgs (S t j) cid) = map Msg (fst (cs (S t (j+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (j+1)) cid)" proof - have o: "~ regular_event (t ! j) \ occurs_on (t ! j) = p" by (metis (no_types, hide_lams) distributed_system.no_state_change_if_no_event distributed_system.regular_event_cannot_induce_snapshot distributed_system_axioms snap_p(1) snap_p(2) step_j) then show ?thesis using chan snapshot_step_cs_preservation_p step_j by blast qed moreover have "map Msg (fst (cs final cid)) = map Msg (fst (cs (S t (j+1)) cid)) @ takeWhile ((\) Marker) (msgs (S t (j+1)) cid)" proof - have "snd (cs (S t (j+1)) cid) = Recording" proof - have f1: "ps (S t j) p = None" by (meson snap_p(1)) then have f2: "j < length t" by (metis (no_types) all_processes_snapshotted_in_final_state assms(1) final_is_s_t_len_t linorder_not_le snapshot_stable_ver_3) have "t ! j \ RecvMarker cid q p" using f1 by (metis (no_types) Suc_eq_plus1 assms(1) recv_marker_means_snapshotted step_j) then show ?thesis using f2 f1 by (meson False assms(1) chan cs_done_implies_both_snapshotted(1) cs_in_initial_state_implies_not_snapshotted cs_not_not_started_stable done_only_from_recv_marker linorder_not_le recording_state.exhaust snap_q snapshot_stable_ver_3 step_j) qed moreover have "j+1 < length t" proof (rule ccontr) assume "~ j+1 < length t" then have "snd (cs (S t (j+1)) cid) = Done" by (metis assms(1) cs_done le_Suc_eq less_imp_Suc_add linorder_not_le no_change_if_ge_length_t) then show False using calculation by auto qed ultimately show ?thesis using chan snap_p(2) assms final_is_s_t_len_t cs_when_recording cs_done by blast qed ultimately show ?thesis using to_snapshot by simp qed then show ?thesis using fst_cs_empty by simp qed next case Done text \msgs must be empty, and cs must also be empty\ have fst_cs_empty: "fst (cs (S t i) cid) = []" proof (rule ccontr) assume "~ fst (cs (S t i) cid) = []" then have "fst (cs (S t 0) cid) \ fst (cs (S t i) cid)" by (metis chan assms(1) cs_not_nil_implies_postrecording_event gr_implies_not0 le0) then have "\j. j < i \ postrecording_event t j" using chan \fst (cs (S t i) cid) \ []\ assms(1) assms(4) cs_not_nil_implies_postrecording_event by blast then show False using assms by auto qed moreover have "msgs (S t i) cid = []" proof - have no_marker: "Marker \ set (msgs (S t i) cid)" (is ?P) proof (rule ccontr) assume "~ ?P" then have "Marker : set (msgs (S t i) cid)" by simp then have "snd (cs (S t i) cid) \ Done" by (metis Marker_in_channel_implies_not_done chan assms(1) nat_le_linear s_def take_all) then show False using Done by simp qed have snap_both: "has_snapshotted (S t i) p \ has_snapshotted (S t i) q" by (metis chan Done assms(1) cs_done_implies_both_snapshotted(1) cs_done_implies_has_snapshotted final_is_s_t_len_t computation.all_processes_snapshotted_in_final_state computation_axioms le_refl not_less s_def take_all) obtain j where snap_p: "~ has_snapshotted (S t j) p" "has_snapshotted (S t (j+1)) p" by (metis Suc_eq_plus1 assms(1) exists_snapshot_for_all_p) have "j < i" by (meson assms(1) not_le_imp_less snap_both snap_p(1) snapshot_stable_ver_2) have step_j: "(S t j) \ (t ! j) \ (S t (j+1))" by (metis Suc_eq_plus1 assms(1) distributed_system.step_Suc distributed_system_axioms computation.no_change_if_ge_length_t computation_axioms le_add1 linorder_not_less snap_p(1) snap_p(2)) have nonreg_j: "~ regular_event (t ! j)" by (meson distributed_system.regular_event_cannot_induce_snapshot distributed_system_axioms snap_p(1) snap_p(2) step_j) have oc_j: "occurs_on (t ! j) = p" using no_state_change_if_no_event snap_p(1) snap_p(2) step_j by force have "msgs (S t i) cid = [] \ (msgs (S t i) cid \ [] \ last (msgs (S t i) cid) = Marker)" proof - have "msgs (S t (j+1)) cid \ [] \ last (msgs (S t (j+1)) cid) = Marker" proof - have "msgs (S t (j+1)) cid = msgs (S t j) cid @ [Marker]" proof - have "isSnapshot (t ! j) \ isRecvMarker (t ! j)" using nonregular_event nonreg_j by blast then show ?thesis proof (elim disjE, goal_cases) case 1 then have "t ! j = Snapshot p" using oc_j by auto then show ?thesis using step_j chan by auto next case 2 then obtain cid' r where RecvMarker: "t ! j = RecvMarker cid' p r" by (metis event.collapse(5) oc_j) have "cid \ cid'" proof (rule ccontr) assume "~ cid \ cid'" then have "channel cid = channel cid'" by auto then have "Some (p, q) = Some (r, p)" by (metis RecvMarker RecvMarker_implies_Marker_in_set assms(1) chan computation.no_marker_if_no_snapshot computation_axioms snap_p(1) step_j) then show False using no_self_channel chan by simp qed then show ?thesis using oc_j snap_p step_j chan RecvMarker by auto qed qed then show ?thesis by auto qed moreover have "i \ length t" using assms by simp moreover have "j+1 \ i" using \j < i\ by simp moreover have "\k. j+1 \ k \ k < i \ regular_event (t ! k) \ ~ occurs_on (t ! k) = p" (is ?R) proof (rule ccontr) assume "~ ?R" then obtain k where range: "j+1 \ k" "k < i" and "regular_event (t ! k)" "occurs_on (t ! k) = p" by blast then have "postrecording_event t k" using snap_p by (meson assms(1) calculation(2) le_trans linorder_not_less pre_if_regular_and_not_post prerecording_event snapshot_stable_ver_2) then show False using assms range by auto qed ultimately show ?thesis using assms(1) chan last_unchanged_or_empty_if_no_events snap_p(2) by auto qed then show ?thesis using no_marker last_in_set by fastforce qed ultimately show ?thesis using chan Done assms(1) assms(4) final_is_s_t_len_t computation.cs_done_implies_same_snapshots computation_axioms by fastforce qed ultimately show "filter ((\) Marker) (msgs (S t i) cid) = map Msg (fst (cs final cid))" by simp qed lemma snapshot_after_all_prerecording_events: assumes "trace init t final" and "\i'. i' \ i \ ~ prerecording_event t i'" and "\j'. j' < i \ ~ postrecording_event t j'" and "i \ length t" shows "state_equal_to_snapshot (S t i) final" proof (unfold state_equal_to_snapshot_def, rule conjI) show "ps_equal_to_snapshot (S t i) final" using assms ps_after_all_prerecording_events by auto show "cs_equal_to_snapshot (S t i) final" using assms cs_after_all_prerecording_events by auto qed subsection \Obtaining the desired traces\ abbreviation all_prerecording_before_postrecording where "all_prerecording_before_postrecording t \ \i. (\j. j < i \ ~ postrecording_event t j) \ (\j. j \ i \ ~ prerecording_event t j) \ i \ length t \ trace init t final" definition count_violations :: "('a, 'b, 'c) trace \ nat" where "count_violations t = sum (\i. if postrecording_event t i then card {j \ {i+1.. {i+1.. 0" by simp lemma count_violations_ge_0: shows "count_violations t \ 0" by simp lemma violations_0_implies_all_subterms_0: assumes "count_violations t = 0" shows "\i \ {0.. {i+1..i. if postrecording_event t i then card {j \ {i+1..i \ {0.. {i+1.. 0" shows "\i. postrecording_event t i \ card {j \ {i+1.. 0" (is ?P) proof (rule ccontr) assume "~ ?P" then have "\i. ~ postrecording_event t i \ card {j \ {i+1..i. (if postrecording_event t i then card {j \ {i+1..\i. \ postrecording_event t i \ card {j \ {i + 1.. by auto then show "sum (\i. if postrecording_event t i then card {j \ {i+1.. {i+1.. 0" shows "\j \ {i+1.. {i+1.. 0" proof - have "j < length t" using prerecording_event assms by auto have "{j \ {i+1.. empty" using Suc_eq_plus1 \j < length t\ assms(1) assms(3) less_imp_triv by auto then show ?thesis by fastforce qed lemma exists_neighboring_violation_pair: assumes "trace init t final" and "count_violations t > 0" shows "\i j. i < j \ postrecording_event t i \ prerecording_event t j \ (\k. (i < k \ k < j) \ ~ regular_event (t ! k)) \ j < length t" proof - let ?I = "{i. postrecording_event t i \ card {j \ {i+1.. 0}" have nonempty_I: "?I \ empty" using assms exists_postrecording_violation_if_count_greater_0 by blast have fin_I: "finite ?I" proof (rule ccontr) assume "~ finite ?I" then obtain i where "i > length t" "postrecording_event t i" by (simp add: postrecording_event) then show False using postrecording_event by simp qed let ?i = "Max ?I" have no_greater_postrec_violation: "\i. i > ?i \ ~ (postrecording_event t i \ card {j \ {i+1.. 0)" using Max_gr_iff fin_I by blast have post_i: "postrecording_event t ?i" using Max_in fin_I nonempty_I by blast have "card {j \ {?i+1.. 0" proof - have "?i \ ?I" using Max_in fin_I nonempty_I by blast then show ?thesis by simp qed let ?J = "{j \ {?i+1.. empty" using \card {j \ {?i+1.. 0\ exists_prerecording_violation_when_card_greater_0 by blast have fin_J: "finite ?J" by auto let ?j = "Min ?J" have pre_j: "prerecording_event t ?j" using Min_in fin_J nonempty_J by blast have no_smaller_prerec_violation: "\j \ {?i+1.. ~ prerecording_event t j" using Min_less_iff fin_J by blast have j_less_len_t: "?j < length t" using pre_j prerecording_event by blast have "\k. (?i < k \ k < ?j) \ ~ regular_event (t ! k)" proof (rule allI, rule impI) fix k assume asm: "?i < k \ k < ?j" then show "~ regular_event (t ! k)" proof - have "0_le_k": "0 \ k" by simp have k_less_len_t: "k < length t" using j_less_len_t asm by auto show ?thesis proof (rule ccontr) assume reg_event: "~ ~ regular_event (t ! k)" then show False proof (cases "has_snapshotted (S t k) (occurs_on (t ! k))") case True then have post_k: "postrecording_event t k" using reg_event k_less_len_t postrecording_event by simp moreover have "card {j \ {k+1.. 0" using post_k pre_j card_greater_0_if_post_after_pre asm pre_j by blast ultimately show False using no_greater_postrec_violation asm by blast next case False then have pre_k: "prerecording_event t k" using reg_event k_less_len_t prerecording_event by simp moreover have "k \ {?i+1..k. (i < k \ k < j) \ ~ regular_event (t ! k)" and "trace init t final" shows "{k \ {0.. {0..k. k < i \ t ! k = ?t ! k" by (metis nth_take same_begin) then have "\k. k < i \ prerecording_event t k = prerecording_event ?t k" proof - have "\k. k < i \ S t k = S ?t k" using assms swap_events by simp then show ?thesis unfolding prerecording_event using a same_length by presburger qed then show ?thesis by auto qed lemma same_cardinality_post_swap_2: assumes "prerecording_event t j" and "postrecording_event t i" and "i < j" and "j < length t" and "count_violations t = Suc n" and "\k. (i < k \ k < j) \ ~ regular_event (t ! k)" and "trace init t final" shows "card {k \ {i.. {i.. {i..k. i \ k \ k < j \ ~ prerecording_event t k" proof (rule allI, rule impI) fix k assume asm: "i \ k \ k < j" then show "~ prerecording_event t k" proof (cases "k = i") case True then have "postrecording_event t k" using assms by simp then show ?thesis by (meson computation.postrecording_event computation.prerecording_event computation_axioms) next case False then have "i < k \ k < j" using asm by force then have "~ regular_event (t ! k)" using assms by simp then show ?thesis unfolding prerecording_event by simp qed qed then have "{k \ {i.. {j.. {i.. {i.. postrecording_event ?t (i+1) \ (\k. k > i+1 \ k < j+1 \ ~ regular_event (?t ! k))" using assms swap_events by blast have "\k. i+1 \ k \ k < j+1 \ ~ prerecording_event ?t k" proof (rule allI, rule impI) fix k assume asm: "i+1 \ k \ k < j+1" then show "~ prerecording_event ?t k" proof (cases "k = i+1") case True then have "postrecording_event ?t k" using swap_ind by blast then show ?thesis by (meson computation.postrecording_event computation.prerecording_event computation_axioms) next case False then have "i+1 < k \ k < j+1" using asm by linarith then have "~ regular_event (?t ! k)" using asm assms swap_ind by blast then show ?thesis unfolding prerecording_event by simp qed qed then have "{k \ {i+1.. {i.. {i..k. (i < k \ k < j) \ ~ regular_event (t ! k)" and "trace init t final" shows "{k \ {j+1.. {j+1..k. j+1 \ k \ k < length t \ ?t ! k = t ! k" proof (rule allI, rule impI) fix k assume "j+1 \ k \ k < length t" then have "?t ! k = drop (j+1) (swap_events i j t) ! (k-(j+1))" by (metis (no_types, lifting) Suc_eq_plus1 Suc_leI assms(4) le_add_diff_inverse nth_drop same_length) moreover have "t ! k = drop (j+1) t ! (k-(j+1))" using \j + 1 \ k \ k < length t\ by auto ultimately have "drop (j+1) ?t ! (k-(j+1)) = drop (j+1) t ! (k-(j+1))" using assms swap_identical_tails by metis then show "?t ! k = t ! k" using \?t ! k = drop (j + 1) ?t ! (k - (j + 1))\ \t ! k = drop (j + 1) t ! (k - (j + 1))\ by auto qed then have "\k. j+1 \ k \ k < length t \ prerecording_event t k = prerecording_event ?t k" proof - have "\k. k \ (j+1) \ S t k = S ?t k" using assms swap_events by simp then show ?thesis unfolding prerecording_event using a by auto qed then have "{k \ {j+1.. {j+1..k. (i < k \ k < j) \ ~ regular_event (t ! k)" and "count_violations t = Suc n" and "trace init t final" shows "card {k \ {i+1..k. i < k \ k < j \ ~ prerecording_event t k" proof (rule allI, rule impI) fix k assume asm: "i < k \ k < j" then show "~ prerecording_event t k" proof - have "~ regular_event (t ! k)" using asm assms by blast then show ?thesis unfolding prerecording_event by simp qed qed then have "{k \ {i+1.. {j.. {i+1..k. (i < k \ k < j) \ ~ regular_event (t ! k)" and "count_violations t = Suc n" and "trace init t final" shows "card {k \ {i+1..k. i+1 < k \ k < j+1 \ ~ regular_event (?t ! k)" using assms swap_events by blast have "\k. i+1 \ k \ k < j+1 \ ~ prerecording_event ?t k" proof (rule allI, rule impI) fix k assume asm: "i+1 \ k \ k < j+1" then show "~ prerecording_event ?t k" proof (cases "k = i+1") case True then show ?thesis using postrec_ip1 by (meson computation.postrecording_event computation.prerecording_event computation_axioms) next case False then have "i+1 < k \ k < j+1" using asm by simp then have "~ regular_event (?t ! k)" using neigh_shift by blast then show ?thesis unfolding prerecording_event by simp qed qed then have "{k \ {i+1..k. (i < k \ k < j) \ ~ regular_event (t ! k)" and "count_violations t = Suc n" and "trace init t final" shows "count_violations (swap_events i j t) = n" proof - let ?t = "swap_events i j t" let ?f = "(\i. if postrecording_event t i then card {j \ {i+1..k. k < i \ postrecording_event t k = postrecording_event ?t k" proof - have "\k. k < i \ S t k = S ?t k" using assms swap_events by auto then show ?thesis unfolding postrecording_event proof - assume a1: "\kn na es nb. \ n < na \ \ na < length es \ \ nb < n \ swap_events n na es ! nb = (es ! nb::('a, 'b, 'c) event)" by (metis (no_types) nth_take swap_identical_heads) then have "\ nn < i \ \ nn < length t \ \ nn < length (swap_events i j t) \ \ regular_event (t ! nn) \ \ regular_event (swap_events i j t ! nn) \ ps (S t nn) (occurs_on (t ! nn)) = None \ ps (S (swap_events i j t) nn) (occurs_on (swap_events i j t ! nn)) = None \ regular_event (t ! nn) \ regular_event (swap_events i j t ! nn) \ nn < length t \ nn < length (swap_events i j t) \ ps (S t nn) (occurs_on (t ! nn)) \ None \ ps (S (swap_events i j t) nn) (occurs_on (swap_events i j t ! nn)) \ None" using a1 by (metis (no_types) assms(3) assms(4) swap_identical_length) } then show "\n regular_event (t ! n) \ ps (S t n) (occurs_on (t ! n)) \ None) = (n < length (swap_events i j t) \ regular_event (swap_events i j t ! n) \ ps (S (swap_events i j t) n) (occurs_on (swap_events i j t ! n)) \ None)" by (metis (no_types)) qed qed have same_postrec_suffix: "\k. k \ j+1 \ postrecording_event t k = postrecording_event ?t k" proof - have post_equal_states: "\k. k \ j+1 \ S t k = S ?t k" using assms swap_events by presburger show ?thesis proof (rule allI, rule impI) fix k assume "j+1 \ k" then show "postrecording_event t k = postrecording_event ?t k" proof (cases "k < length t") case False then have "~ postrecording_event t k" using postrecording_event by simp moreover have "~ postrecording_event ?t k" using postrecording_event swap_identical_length False assms by metis ultimately show ?thesis by simp next case True then show "postrecording_event t k = postrecording_event ?t k" using post_equal_states proof - assume a1: "\k\j + 1. S t k = S (swap_events i j t) k" assume a2: "k < length t" have f3: "length t = length (swap_events i j t)" using assms(3) assms(4) swap_identical_length by blast have f4: "k - (j + 1) + (j + 1) = k" using \j + 1 \ k\ le_add_diff_inverse2 by blast have "drop (j + 1) t = drop (j + 1) (swap_events i j t)" using assms(3) assms(4) swap_identical_tails by blast then have "swap_events i j t ! k = t ! k" using f4 f3 a2 by (metis (no_types) drop_drop hd_drop_conv_nth) then show ?thesis using f3 a1 \j + 1 \ k\ postrecording_event by presburger qed qed qed qed have sum_decomp_f: "sum ?f {0..l. 0 \ l \ l < i \ ?f l = ?f' l" proof (rule allI, rule impI) fix l assume "0 \ l \ l < i" then have "l < i" by simp show "?f l = ?f' l" proof (cases "postrecording_event t l") case True let ?G = "{k \ {l+1.. (?B \ ?C)" using assms \l < i\ by auto then have "card ?G = card (?A \ (?B \ ?C))" by simp also have "card (?A \ (?B \ ?C)) = card ?A + card (?B \ ?C)" proof - have "?A \ (?B \ ?C) = {}" using \l < i\ assms by auto then show ?thesis by (simp add: card_Un_disjoint disjoint_iff_not_equal) qed also have "card ?A + card (?B \ ?C) = card ?A + card ?B + card ?C" proof - have "?B \ ?C = {}" by auto then show ?thesis by (simp add: card_Un_disjoint disjoint_iff_not_equal) qed finally show ?thesis by simp qed have card_G': "card ?G' = card ?A' + card ?B' + card ?C'" proof - have "?G' = ?A' \ (?B' \ ?C')" using assms \l < i\ by auto then have "card ?G' = card (?A' \ (?B' \ ?C'))" by simp also have "card (?A' \ (?B' \ ?C')) = card ?A' + card (?B' \ ?C')" proof - have "?A' \ (?B' \ ?C') = {}" using \l < i\ assms by auto then show ?thesis by (simp add: card_Un_disjoint disjoint_iff_not_equal) qed also have "card ?A' + card (?B' \ ?C') = card ?A' + card ?B' + card ?C'" proof - have "?B' \ ?C' = {}" by auto then show ?thesis by (simp add: card_Un_disjoint disjoint_iff_not_equal) qed finally show ?thesis by simp qed have "card ?G = card ?G'" proof - have "card ?A = card ?A'" proof - have "{k \ {0.. {0..l < i\ by blast moreover have "length ?t = length t" using assms(3) assms(4) by fastforce ultimately show ?thesis using True by presburger next case False then have "~ postrecording_event ?t l" using same_postrec_prefix \l < i\ by blast then show ?thesis using False by simp qed qed then show ?thesis using sum_eq_if_same_subterms by auto qed have infix_sum: "sum ?f {i..l. i+2 \ l \ l < j+1 \ ?f l = ?f' l" proof (rule allI, rule impI) fix l assume asm: "i+2 \ l \ l < j+1" have "?f l = 0" proof (cases "l = j") case True then have "~ postrecording_event t l" using assms(1) postrecording_event prerecording_event by auto then show ?thesis by simp next case False then have "i < l \ l < j" using assms asm by simp then have "~ regular_event (t ! l)" using assms by blast then have "~ postrecording_event t l" unfolding postrecording_event by simp then show ?thesis by simp qed moreover have "?f' l = 0" proof - have "\k. i+1 < k \ k < j+1 \ ~ regular_event (?t ! k)" using assms swap_events by blast then have "~ regular_event (?t ! l)" using asm by simp then have "~ postrecording_event ?t l" unfolding postrecording_event by simp then show ?thesis by simp qed ultimately show "?f l = ?f' l" by simp qed then show ?thesis using sum_eq_if_same_subterms by simp qed moreover have "sum ?f {i.. ?B" using assms by auto moreover have "?A \ ?B = {}" by auto ultimately show ?thesis by (simp add: card_Un_disjoint disjoint_iff_not_equal) qed have card_G': "card ?G' = card ?A' + card ?B'" proof - have "?G' = ?A' \ ?B'" using assms by auto moreover have "?A' \ ?B' = {}" by auto ultimately show ?thesis by (simp add: card_Un_disjoint disjoint_iff_not_equal) qed have "card ?G = card ?G' + 1" proof - have "card ?A = card ?A' + 1" proof - have "card ?A = 1" using assms card_ip1_to_j_is_1_in_normal_events by blast moreover have "card ?A' = 0" using assms card_ip1_to_j_is_0_in_swapped_events by force ultimately show ?thesis by algebra qed moreover have "card ?B = card ?B'" using assms same_cardinality_post_swap_3 by force ultimately show ?thesis using card_G card_G' by presburger qed moreover have "card ?G = ?f i" using pi by simp moreover have "card ?G' = ?f' (i+1)" using pip1 by simp ultimately show ?thesis by argo qed ultimately show ?thesis by auto qed ultimately show ?thesis using sum_decomp_f sum_decomp_f' by linarith qed have suffix_sum: "sum ?f {j+1..l. l > j \ ?f l = ?f' l" proof (rule allI, rule impI) fix l assume "l > j" then show "?f l = ?f' l" proof (cases "postrecording_event t l") case True let ?G = "{k \ {l+1..l > j\ by fastforce then show ?thesis by simp qed moreover have "postrecording_event ?t l" using True same_postrec_suffix \l > j\ by simp moreover have "length ?t = length t" using assms(3) assms(4) by fastforce ultimately show ?thesis using True by presburger next case False then have "~ postrecording_event ?t l" using same_postrec_suffix \l > j\ by simp then show ?thesis using False by simp qed qed then have "\k. j+1 \ k \ k < length t \ ?f k = ?f' k" using discrete by blast moreover have "length t = length ?t" using assms(3) assms(4) swap_identical_length by blast ultimately show ?thesis by (blast intro:sum_eq_if_same_subterms) qed have "sum ?f {0..t'. perm t' t \ all_prerecording_before_postrecording t'" using assms proof (induct "count_violations t" arbitrary: t) case 0 then show ?case proof (cases "\i. prerecording_event t i") case False then have "\j. ~ prerecording_event t j" by auto then have "\j. j \ 0 \ ~ postrecording_event t j" using "0.prems" init_is_s_t_0 no_initial_process_snapshot postrecording_event by auto moreover have "\j. j > 0 \ ~ prerecording_event t j" using False by auto moreover have "length t > 0" by (metis "0.prems" all_processes_snapshotted_in_final_state length_greater_0_conv no_initial_process_snapshot tr_init trace_and_start_determines_end) ultimately show ?thesis using "0.prems" False by auto next case True let ?Is = "{i. prerecording_event t i}" have "?Is \ empty" by (simp add: True) moreover have fin_Is: "finite ?Is" proof (rule ccontr) assume "~ finite ?Is" then obtain i where "i > length t" "prerecording_event t i" by (simp add: prerecording_event) then show False using prerecording_event by auto qed let ?i = "Max ?Is" have pi: "prerecording_event t ?i" using Max_in calculation fin_Is by blast have "?i < length t" proof (rule ccontr) assume "~ ?i < length t" then show False using calculation fin_Is computation.prerecording_event computation_axioms by force qed moreover have "\j. j \ ?i+1 \ ~ prerecording_event t j" proof - have "\j. j > ?i \ ~ prerecording_event t j" using Max_less_iff fin_Is by auto then show ?thesis by auto qed moreover have "\j. j < ?i+1 \ ~ postrecording_event t j" proof - have "\j. j \ ?i \ ~ postrecording_event t j" proof (rule allI, rule impI, rule ccontr) fix j assume "j \ ?i" "~ ~ postrecording_event t j" then have "j < ?i" by (metis add_diff_inverse_nat dual_order.antisym le_add1 pi postrecording_event prerecording_event) then have "count_violations t > 0" proof - have "(if postrecording_event t j then card {l \ {j+1.. {j+1..~ ~ postrecording_event t j\ by simp moreover have "card {l \ {j+1.. 0" proof - have "j + 1 \ ?i \ ?i < length t" using \Max {i. prerecording_event t i} < length t\ \j < Max {i. prerecording_event t i}\ discrete by blast moreover have "prerecording_event t ?i" using pi by simp ultimately have "{l \ {j+1.. empty" by fastforce then show ?thesis by fastforce qed ultimately show ?thesis by (metis (no_types, lifting) violations_0_implies_all_subterms_0 \Max {i. prerecording_event t i} < length t\ \j < Max {i. prerecording_event t i}\ atLeastLessThan_iff less_trans linorder_not_le neq0_conv) qed then show False using "0" by simp qed then show ?thesis by auto qed moreover have "?i+1 \ length t" using calculation(2) discrete by blast ultimately show ?thesis using "0.prems" by blast qed next case (Suc n) then obtain i j where ind: "postrecording_event t i" "prerecording_event t j" "\k. (i < k \ k < j) \ ~ regular_event (t ! k)" "i < j" "j < length t" using exists_neighboring_violation_pair Suc by force then have "trace init (swap_events i j t) final \ (\k. k \ j + 1 \ S (swap_events i j t) k = S t k) \ (\k. k \ i \ S (swap_events i j t) k = S t k)" using Suc swap_events by presburger moreover have "perm (swap_events i j t) t" using swap_events_perm ind by blast moreover have "count_violations (swap_events i j t) = n" using count_violations_swap Suc ind by simp ultimately show ?case using Suc.hyps by blast qed theorem snapshot_algorithm_is_correct: assumes "trace init t final" shows "\t' i. trace init t' final \ perm t' t \ state_equal_to_snapshot (S t' i) final \ i \ length t'" proof - obtain t' where "perm t' t" and "all_prerecording_before_postrecording t'" using assms desired_trace_always_exists by blast then show ?thesis using snapshot_after_all_prerecording_events by blast qed subsection \Stable property detection\ text \Finally, we show that the computed snapshot is indeed suitable for stable property detection, as claimed in~\cite{chandy}.\ definition stable where "stable p \ (\c. p c \ (\t c'. trace c t c' \ p c'))" lemma has_snapshot_stable: assumes "trace init t final" shows "stable (\c. has_snapshotted c p)" using snapshot_stable stable_def by auto definition some_snapshot_state where "some_snapshot_state t \ SOME (t', i). trace init t final \ trace init t' final \ perm t' t \ state_equal_to_snapshot (S t' i) final" lemma split_S: assumes "trace init t final" shows "trace (S t i) (drop i t) final" proof - have "t = take i t @ drop i t" by simp then show ?thesis by (metis split_trace assms exists_trace_for_any_i trace_and_start_determines_end) qed theorem Stable_Property_Detection: assumes "stable p" and "trace init t final" and "(t', i) = some_snapshot_state t" and "p (S t' i)" shows "p final" proof - have "\t' i. trace init t final \ trace init t' final \ perm t' t \ state_equal_to_snapshot (S t' i) final" using snapshot_algorithm_is_correct assms(2) by blast then have "trace init t' final" using assms unfolding some_snapshot_state_def by (metis (lifting) case_prodD case_prodI someI_ex) then show ?thesis using assms stable_def split_S by metis qed end (* locale computation *) end (* theory Snapshot *) diff --git a/thys/Chandy_Lamport/Util.thy b/thys/Chandy_Lamport/Util.thy --- a/thys/Chandy_Lamport/Util.thy +++ b/thys/Chandy_Lamport/Util.thy @@ -1,363 +1,363 @@ section \Utilties\ theory Util imports Main "HOL-Library.Sublist" - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" begin abbreviation swap_events where "swap_events i j t \ take i t @ [t ! j, t ! i] @ take (j - (i+1)) (drop (i+1) t) @ drop (j+1) t" lemma swap_neighbors_2: shows "i+1 < length t \ swap_events i (i+1) t = (t[i := t ! (i+1)])[i+1 := t ! i]" proof (induct i arbitrary: t) case 0 then show ?case by (metis One_nat_def Suc_eq_plus1 add_lessD1 append.left_neutral append_Cons cancel_comm_monoid_add_class.diff_cancel drop_update_cancel length_list_update numeral_One take_0 take_Cons_numeral upd_conv_take_nth_drop zero_less_Suc) next case (Suc n) let ?t = "tl t" have "t = hd t # ?t" by (metis Suc.prems hd_Cons_tl list.size(3) not_less_zero) moreover have "swap_events n (n+1) ?t = (?t[n := ?t ! (n+1)])[n+1 := ?t ! n]" by (metis Suc.hyps Suc.prems Suc_eq_plus1 length_tl less_diff_conv) ultimately show ?case by (metis Suc_eq_plus1 append_Cons diff_self_eq_0 drop_Suc_Cons list_update_code(3) nth_Cons_Suc take_Suc_Cons) qed lemma swap_identical_length: assumes "i < j" and "j < length t" shows "length t = length (swap_events i j t)" proof - have "length (take i t @ [t ! j, t ! i] @ take (j - (i+1)) (drop (i+1) t)) = length (take i t) + length [t ! j, t ! i] + length (take (j - (i+1)) (drop (i+1) t))" by simp then have "... = j+1" using assms(1) assms(2) by auto then show ?thesis using assms(2) by auto qed lemma swap_identical_heads: assumes "i < j" and "j < length t" shows "take i t = take i (swap_events i j t)" using assms by auto lemma swap_identical_tails: assumes "i < j" and "j < length t" shows "drop (j+1) t = drop (j+1) (swap_events i j t)" proof - have "length (take i t @ [t ! j, t ! i] @ take (j - (i+1)) (drop (i+1) t)) = length (take i t) + length [t ! j, t ! i] + length (take (j - (i+1)) (drop (i+1) t))" by simp then have "... = j+1" using assms(1) assms(2) by auto then show ?thesis by (metis \length (take i t @ [t ! j, t ! i] @ take (j - (i + 1)) (drop (i + 1) t)) = length (take i t) + length [t ! j, t ! i] + length (take (j - (i + 1)) (drop (i + 1) t))\ append.assoc append_eq_conv_conj) qed lemma swap_neighbors: shows "i+1 < length l \ (l[i := l ! (i+1)])[i+1 := l ! i] = take i l @ [l ! (i+1), l ! i] @ drop (i+2) l" proof (induct i arbitrary: l) case 0 then show ?case by (metis One_nat_def add.left_neutral add_lessD1 append_Cons append_Nil drop_update_cancel length_list_update one_add_one plus_1_eq_Suc take0 take_Suc_Cons upd_conv_take_nth_drop zero_less_two) next case (Suc n) let ?l = "tl l" have "(l[Suc n := l ! (Suc n + 1)])[Suc n + 1 := l ! Suc n] = hd l # (?l[n := ?l ! (n+1)])[n+1 := ?l ! n]" by (metis Suc.prems add.commute add_less_same_cancel2 list.collapse list.size(3) list_update_code(3) not_add_less2 nth_Cons_Suc plus_1_eq_Suc) have "n + 1 < length ?l" using Suc.prems by auto then have "(?l[n := ?l ! (n+1)])[n+1 := ?l ! n] = take n ?l @ [?l ! (n+1), ?l ! n] @ drop (n+2) ?l" using Suc.hyps by simp then show ?case by (cases l) auto qed lemma swap_events_perm: assumes "i < j" and "j < length t" shows "perm (swap_events i j t) t" proof - have swap: "swap_events i j t = take i t @ [t ! j, t ! i] @ (take (j - (i+1)) (drop (i+1) t)) @ (drop (j+1) t)" by auto have reg: "t = take i t @ (take ((j+1) - i) (drop i t)) @ drop (j+1) t" by (metis add_diff_inverse_nat add_lessD1 append.assoc append_take_drop_id assms(1) less_imp_add_positive less_not_refl take_add) have "perm (take i t) (take i t)" by simp moreover have "perm (drop (j+1) t) (drop (j+1) t)" by simp moreover have "perm ([t ! j, t ! i] @ (take (j - (i+1)) (drop (i+1) t))) (take ((j+1) - i) (drop i t))" proof - let ?l = "take (j - (i+1)) (drop (i+1) t)" have "take ((j+1) - i) (drop i t) = t ! i # ?l @ [t ! j]" proof - have f1: "Suc (j - Suc i) = j - i" by (meson Suc_diff_Suc assms(1)) have f2: "i < length t" using assms(1) assms(2) by linarith have f3: "j - (i + 1) + (i + 1) = j" by (meson assms(1) discrete le_add_diff_inverse2) then have "drop (j - (i + 1)) (drop (i + 1) t) = drop j t" by (metis drop_drop) then show ?thesis using f3 f2 f1 by (metis (no_types) Cons_nth_drop_Suc Suc_diff_le Suc_eq_plus1 assms(1) assms(2) hd_drop_conv_nth length_drop less_diff_conv nat_less_le take_Suc_Cons take_hd_drop) qed then show ?thesis using mset_eq_perm by fastforce qed ultimately show ?thesis using swap reg by (metis append.assoc perm_append1 perm_append2) qed lemma sum_eq_if_same_subterms: fixes i :: nat shows "\k. i \ k \ k < j \ f k = f' k \ sum f {i..) a) l \ takeWhile ((\) a) l" shows "\i j. i < j \ j < length l \ l ! i = a \ l ! j \ a" (is ?P) proof (rule ccontr) assume "~ ?P" then have asm: "\i j. i < j \ j < length l \ l ! i \ a \ l ! j = a" (is ?Q) by simp then have "filter ((\) a) l = takeWhile ((\) a) l" proof (cases "a : set l") case False then have "\i. i < length l \ l ! i \ a" by auto then show ?thesis by (metis (mono_tags, lifting) False filter_True takeWhile_eq_all_conv) next case True then have ex_j: "\j. j < length l \ l ! j = a" by (simp add: in_set_conv_nth) let ?j = "Min {j. j < length l \ l ! j = a}" have fin_j: "finite {j. j < length l \ l ! j = a}" using finite_nat_set_iff_bounded by blast moreover have "{j. j < length l \ l ! j = a} \ empty" using ex_j by blast ultimately have "?j < length l" using Min_less_iff by blast have tail_all_a: "\j. j < length l \ j \ ?j \ l ! j = a" proof (rule allI, rule impI) fix j assume "j < length l \ j \ ?j" moreover have "\ ?Q; j < length l \ j \ ?j \ \ \k. k \ ?j \ k \ j \ l ! j = a" proof (induct "j - ?j") case 0 then have "j = ?j" using 0 by simp then show ?case using Min_in \{j. j < length l \ l ! j = a} \ {}\ fin_j by blast next case (Suc n) then have "\k. k \ ?j \ k < j \ l ! j = a" by (metis (mono_tags, lifting) Min_in \{j. j < length l \ l ! j = a} \ {}\ fin_j le_eq_less_or_eq mem_Collect_eq) then show ?case using Suc.hyps(2) by auto qed ultimately show "l ! j = a" using asm by blast qed moreover have head_all_not_a: "\i. i < ?j \ l ! i \ a" using asm calculation by (metis (mono_tags, lifting) Min_le \Min {j. j < length l \ l ! j = a} < length l\ fin_j leD less_trans mem_Collect_eq) ultimately have "takeWhile ((\) a) l = take ?j l" proof - have "length (takeWhile ((\) a) l) = ?j" proof - have "length (takeWhile ((\) a) l) \ ?j" (is ?S) proof (rule ccontr) assume "\ ?S" then have "l ! ?j \ a" by (metis (mono_tags, lifting) not_le_imp_less nth_mem set_takeWhileD takeWhile_nth) then show False using \Min {j. j < length l \ l ! j = a} < length l\ tail_all_a by blast qed moreover have "length (takeWhile ((\) a) l) \ ?j" (is ?T) proof (rule ccontr) assume "\ ?T" then have "\j. j < ?j \ l ! j = a" by (metis (mono_tags, lifting) \Min {j. j < length l \ l ! j = a} < length l\ calculation le_less_trans not_le_imp_less nth_length_takeWhile) then show False using head_all_not_a by blast qed ultimately show ?thesis by simp qed moreover have "length (take ?j l) = ?j" by (metis calculation takeWhile_eq_take) ultimately show ?thesis by (metis takeWhile_eq_take) qed moreover have "filter ((\) a) l = take ?j l" proof - have "filter ((\) a) l = filter ((\) a) (take ?j l) @ filter ((\) a) (drop ?j l)" by (metis append_take_drop_id filter_append) moreover have "filter ((\) a) (take ?j l) = take ?j l" using head_all_not_a by (metis \takeWhile ((\) a) l = take (Min {j. j < length l \ l ! j = a}) l\ filter_id_conv set_takeWhileD) moreover have "filter ((\) a) (drop ?j l) = []" proof - have "\j. j \ ?j \ j < length l \ l ! j = drop ?j l ! (j - ?j)" by simp then have "\j. j < length l - ?j \ drop ?j l ! j = a" using tail_all_a by (metis (no_types, lifting) Groups.add_ac(2) \Min {j. j < length l \ l ! j = a} < length l\ less_diff_conv less_imp_le_nat not_add_less2 not_le nth_drop) then show ?thesis proof - obtain aa :: "('a \ bool) \ 'a list \ 'a" where "\x0 x1. (\v2. v2 \ set x1 \ x0 v2) = (aa x0 x1 \ set x1 \ x0 (aa x0 x1))" by moura then have f1: "\as p. aa p as \ set as \ p (aa p as) \ filter p as = []" by (metis (full_types) filter_False) obtain nn :: "'a list \ 'a \ nat" where f2: "\x0 x1. (\v2 x0 ! nn x0 x1 = x1)" by moura { assume "drop (Min {n. n < length l \ l ! n = a}) l ! nn (drop (Min {n. n < length l \ l ! n = a}) l) (aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)) = a" then have "filter ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l) = [] \ \ nn (drop (Min {n. n < length l \ l ! n = a}) l) (aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)) < length (drop (Min {n. n < length l \ l ! n = a}) l) \ drop (Min {n. n < length l \ l ! n = a}) l ! nn (drop (Min {n. n < length l \ l ! n = a}) l) (aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)) \ aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)" using f1 by (metis (full_types)) } moreover { assume "\ nn (drop (Min {n. n < length l \ l ! n = a}) l) (aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)) < length l - Min {n. n < length l \ l ! n = a}" then have "\ nn (drop (Min {n. n < length l \ l ! n = a}) l) (aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)) < length (drop (Min {n. n < length l \ l ! n = a}) l) \ drop (Min {n. n < length l \ l ! n = a}) l ! nn (drop (Min {n. n < length l \ l ! n = a}) l) (aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)) \ aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)" by simp } ultimately have "filter ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l) = [] \ \ nn (drop (Min {n. n < length l \ l ! n = a}) l) (aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)) < length (drop (Min {n. n < length l \ l ! n = a}) l) \ drop (Min {n. n < length l \ l ! n = a}) l ! nn (drop (Min {n. n < length l \ l ! n = a}) l) (aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)) \ aa ((\) a) (drop (Min {n. n < length l \ l ! n = a}) l)" using \\j l ! j = a}. drop (Min {j. j < length l \ l ! j = a}) l ! j = a\ by blast then show ?thesis using f2 f1 by (meson in_set_conv_nth) qed qed ultimately show ?thesis by simp qed ultimately show ?thesis by simp qed then show False using assms by simp qed lemma util_exactly_one_element: assumes "m \ set l" and "l' = l @ [m]" shows "\!j. j < length l' \ l' ! j = m" (is ?P) proof - have "\j. j < length l' - 1 \ l' ! j \ m" by (metis assms(1) assms(2) butlast_snoc length_butlast nth_append nth_mem) then have one_j: "\j. j < length l' \ l' ! j = m \ j = (length l' - 1)" by (metis (no_types, hide_lams) diff_Suc_1 lessE) show ?thesis proof (rule ccontr) assume "~ ?P" then obtain i j where "i \ j" "i < length l'" "j < length l'" "l' ! i = m" "l' ! j = m" using assms by auto then show False using one_j by blast qed qed lemma exists_one_iff_filter_one: shows "(\!j. j < length l \ l ! j = a) \ length (filter ((=) a) l) = 1" proof (rule iffI) assume "\!j. j < length l \ l ! j = a" then obtain j where "j < length l" "l ! j = a" by blast moreover have "\k. k \ j \ k < length l \ l ! k \ a" using \\!j. j < length l \ l ! j = a\ \j < length l\ \l ! j = a\ by blast moreover have "l = take j l @ [l ! j] @ drop (j+1) l" by (metis Cons_eq_appendI Cons_nth_drop_Suc Suc_eq_plus1 append_self_conv2 append_take_drop_id calculation(1) calculation(2)) moreover have "filter ((=) a) (take j l) = []" proof - have "\k. k < length (take j l) \ (take j l) ! k \ a" using calculation(3) by auto then show ?thesis by (metis (full_types) filter_False in_set_conv_nth) qed moreover have "filter ((=) a) (drop (j+1) l) = []" proof - have "\k. k < length (drop (j+1) l) \ (drop (j+1) l) ! k \ a" using calculation(3) by auto then show ?thesis by (metis (full_types) filter_False in_set_conv_nth) qed ultimately show "length (filter ((=) a) l) = 1" by (metis (mono_tags, lifting) One_nat_def Suc_eq_plus1 append_Cons append_self_conv2 filter.simps(2) filter_append list.size(3) list.size(4)) next assume asm: "length (filter ((=) a) l) = 1" then have "filter ((=) a) l = [a]" proof - let ?xs = "filter ((=) a) l" have "length ?xs = 1" using asm by blast then show ?thesis by (metis (full_types) Cons_eq_filterD One_nat_def length_0_conv length_Suc_conv) qed then have "\j. j < length l \ l ! j = a" by (metis (full_types) filter_False in_set_conv_nth list.discI) then obtain j where j: "j < length l" "l ! j = a" by blast moreover have "\k. k < length l \ k \ j \ l ! k \ a" proof (rule allI, rule impI) fix k assume assm: "k < length l \ k \ j" show "l ! k \ a" proof (rule ccontr) assume lka: "\ l ! k \ a" show False proof (cases "k < j") let ?xs = "take (k+1) l" let ?ys = "drop (k+1) l" case True then have "length (filter ((=) a) ?xs) > 0" by (metis (full_types, hide_lams) add.commute assm discrete filter_empty_conv length_greater_0_conv length_take less_add_one lka min.absorb2 nth_mem nth_take) moreover have "length (filter ((=) a) ?ys) > 0" proof - have "?ys ! (j - (k+1)) = l ! j" using True assm by auto moreover have "j - (k+1) < length ?ys" using True \j < length l\ by auto ultimately show ?thesis by (metis (full_types) \l ! j = a\ filter_empty_conv length_greater_0_conv nth_mem) qed moreover have "?xs @ ?ys = l" using append_take_drop_id by blast ultimately have "length (filter ((=) a) l) > 1" by (metis (no_types, lifting) One_nat_def Suc_eq_plus1 asm filter_append length_append less_add_eq_less less_one nat_neq_iff) then show False using asm by simp next let ?xs = "take (j+1) l" let ?ys = "drop (j+1) l" case False then have "length (filter ((=) a) ?xs) > 0" by (metis (full_types, hide_lams) add.commute j discrete filter_empty_conv length_greater_0_conv length_take less_add_one min.absorb2 nth_mem nth_take) moreover have "length (filter ((=) a) ?ys) > 0" proof - have "?ys ! (k - (j+1)) = l ! k" using False assm by auto moreover have "k - (j+1) < length ?ys" using False assm by auto ultimately show ?thesis by (metis (full_types) filter_empty_conv length_greater_0_conv lka nth_mem) qed moreover have "?xs @ ?ys = l" using append_take_drop_id by blast ultimately have "length (filter ((=) a) l) > 1" by (metis (no_types, lifting) One_nat_def Suc_eq_plus1 asm filter_append length_append less_add_eq_less less_one nat_neq_iff) then show False using asm by simp qed qed qed ultimately show "\!j. j < length l \ l ! j = a" by blast qed end diff --git a/thys/Completeness/PermutationLemmas.thy b/thys/Completeness/PermutationLemmas.thy --- a/thys/Completeness/PermutationLemmas.thy +++ b/thys/Completeness/PermutationLemmas.thy @@ -1,184 +1,184 @@ section "Permutation Lemmas" theory PermutationLemmas -imports "HOL-Library.Permutation" "HOL-Library.Multiset" +imports "HOL-Library.List_Permutation" "HOL-Library.Multiset" begin \ \following function is very close to that in multisets- now we can make the connection that x <~~> y iff the multiset of x is the same as that of y\ subsection "perm, count equivalence" primrec count :: "'a \ 'a list \ nat" where "count x [] = 0" | "count x (y#ys) = (if x=y then 1 else 0) + count x ys" lemma perm_count: "A <~~> B \ (\ x. count x A = count x B)" by(induct set: perm) auto lemma count_0: "(\x. count x B = 0) = (B = [])" by(induct B) auto lemma count_Suc: "count a B = Suc m \ a : set B" apply(induct B) apply auto apply(case_tac "a = aa") apply auto done lemma count_append: "count a (xs@ys) = count a xs + count a ys" by(induct xs) auto lemma count_perm: "!! B. (\ x. count x A = count x B) \ A <~~> B" apply(induct A) apply(simp add: count_0) proof - fix a list B assume a: "\B. \x. count x list = count x B \ list <~~> B" and b: "\x. count x (a # list) = count x B" from b have "a : set B" apply auto apply (drule_tac x=a in spec, simp) apply(metis count_Suc) done from split_list[OF this] obtain xs ys where B: "B = xs@a#ys" by blast let ?B' = "xs@ys" from b have "\x. count x list = count x ?B'" by(simp add: count_append B) from a[OF this] have c: "list <~~> xs@ys" . hence "a#list <~~> a#(xs@ys)" by rule also have "a#(xs@ys) <~~> xs@a#ys" by(rule perm_append_Cons) also (perm.trans) note B[symmetric] finally show "a # list <~~> B" . qed lemma perm_count_conv: "A <~~> B = (\ x. count x A = count x B)" apply(blast intro!: perm_count count_perm) done subsection "Properties closed under Perm and Contr hold for x iff hold for remdups x" lemma remdups_append: "y : set ys --> remdups (ws@y#ys) = remdups (ws@ys)" apply (induct ws, simp) apply (case_tac "y = a", simp, simp) done lemma perm_contr': assumes perm[rule_format]: "! xs ys. xs <~~> ys --> (P xs = P ys)" and contr'[rule_format]: "! x xs. P(x#x#xs) = P (x#xs)" shows "! xs. length xs = n --> (P xs = P (remdups xs))" apply(induct n rule: nat_less_induct) proof (safe) fix xs :: "'a list" assume a[rule_format]: "\mys. length ys = m \ P ys = P (remdups ys)" show "P xs = P (remdups xs)" proof (cases "distinct xs") case True thus ?thesis by(simp add:distinct_remdups_id) next case False from not_distinct_decomp[OF this] obtain ws ys zs y where xs: "xs = ws@[y]@ys@[y]@zs" by force have "P xs = P (ws@[y]@ys@[y]@zs)" by (simp add: xs) also have "... = P ([y,y]@ws@ys@zs)" apply(rule perm) apply(rule iffD2[OF perm_count_conv]) apply rule apply(simp add: count_append) done also have "... = P ([y]@ws@ys@zs)" apply simp apply(rule contr') done also have "... = P (ws@ys@[y]@zs)" apply(rule perm) apply(rule iffD2[OF perm_count_conv]) apply rule apply(simp add: count_append) done also have "... = P (remdups (ws@ys@[y]@zs))" apply(rule a) by(auto simp: xs) also have "(remdups (ws@ys@[y]@zs)) = (remdups xs)" apply(simp add: xs remdups_append) done finally show "P xs = P (remdups xs)" . qed qed lemma perm_contr: assumes perm: "! xs ys. xs <~~> ys --> (P xs = P ys)" and contr': "! x xs. P(x#x#xs) = P (x#xs)" shows "(P xs = P (remdups xs))" apply(rule perm_contr'[OF perm contr', rule_format]) by force subsection "List properties closed under Perm, Weak and Contr are monotonic in the set of the list" definition rem :: "'a => 'a list => 'a list" where "rem x xs = filter (%y. y ~= x) xs" lemma rem: "x ~: set (rem x xs)" by(simp add: rem_def) lemma length_rem: "length (rem x xs) <= length xs" by(simp add: rem_def) lemma rem_notin: "x ~: set xs ==> rem x xs = xs" apply(simp add: rem_def) apply(rule filter_True) apply force done lemma perm_weak_filter': assumes perm[rule_format]: "! xs ys. xs <~~> ys --> (P xs = P ys)" and weak[rule_format]: "! x xs. P xs --> P (x#xs)" shows "! ys. P (ys@filter Q xs) --> P (ys@xs)" apply (induct xs, simp, rule) apply rule apply simp apply (case_tac "Q a", simp) apply(drule_tac x="ys@[a]" in spec) apply simp apply simp apply(drule_tac x="ys@[a]" in spec) apply simp apply(erule impE) apply(subgoal_tac "(ys @ a # filter Q xs) <~~> a#ys@filter Q xs") apply(simp add: perm) apply(rule weak) apply simp apply(rule perm_sym) apply(rule perm_append_Cons) . lemma perm_weak_filter: assumes perm: "! xs ys. xs <~~> ys --> (P xs = P ys)" and weak: "! x xs. P xs --> P (x#xs)" shows "P (filter Q xs) ==> P xs" using perm_weak_filter'[OF perm weak, rule_format, of "[]", simplified] by blast \ \right, now in a position to prove that in presence of perm, contr and weak, set x leq set y and x : ded implies y : ded\ lemma perm_weak_contr_mono: assumes perm: "! xs ys. xs <~~> ys --> (P xs = P ys)" and contr: "! x xs. P (x#x#xs) --> P (x#xs)" and weak: "! x xs. P xs --> P (x#xs)" and xy: "set x <= set y" and Px : "P x" shows "P y" proof - from contr weak have contr': "! x xs. P(x#x#xs) = P (x#xs)" by blast define y' where "y' = filter (% z. z : set x) y" from xy have "set x = set y'" apply(simp add: y'_def) apply blast done hence rxry': "remdups x <~~> remdups y'" by(simp add: perm_remdups_iff_eq_set) from Px perm_contr[OF perm contr'] have Prx: "P (remdups x)" by simp with rxry' have "P (remdups y')" by(simp add: perm) with perm_contr[OF perm contr'] have "P y'" by simp thus "P y" apply(simp add: y'_def) apply(rule perm_weak_filter[OF perm weak]) . qed (* No, not used subsection "Following used in Soundness" primrec multiset_of_list :: "'a list \ 'a multiset" where "multiset_of_list [] = {#}" | "multiset_of_list (x#xs) = {#x#} + multiset_of_list xs" lemma count_count[symmetric]: "count x A = Multiset.count (multiset_of_list A) x" by (induct A) simp_all lemma perm_multiset: "A <~~> B = (multiset_of_list A = multiset_of_list B)" apply(simp add: perm_count_conv) apply(simp add: multiset_eq_iff) apply(simp add: count_count) done lemma set_of_multiset_of_list: "set_of (multiset_of_list A) = set A" by (induct A) auto *) end diff --git a/thys/Groebner_Macaulay/Hilbert_Function.thy b/thys/Groebner_Macaulay/Hilbert_Function.thy --- a/thys/Groebner_Macaulay/Hilbert_Function.thy +++ b/thys/Groebner_Macaulay/Hilbert_Function.thy @@ -1,1404 +1,1404 @@ (* Author: Alexander Maletzky *) section \Direct Decompositions and Hilbert Functions\ theory Hilbert_Function - imports Dube_Prelims Degree_Section "HOL-Library.Permutation" + imports Dube_Prelims Degree_Section "HOL-Library.List_Permutation" begin subsection \Direct Decompositions\ text \The main reason for defining \direct_decomp\ in terms of lists rather than sets is that lemma \direct_decomp_direct_decomp\ can be proved easier. At some point one could invest the time to re-define \direct_decomp\ in terms of sets (possibly adding a couple of further assumptions to \direct_decomp_direct_decomp\).\ definition direct_decomp :: "'a set \ 'a::comm_monoid_add set list \ bool" where "direct_decomp A ss \ bij_betw sum_list (listset ss) A" lemma direct_decompI: "inj_on sum_list (listset ss) \ sum_list ` listset ss = A \ direct_decomp A ss" by (simp add: direct_decomp_def bij_betw_def) lemma direct_decompI_alt: "(\qs. qs \ listset ss \ sum_list qs \ A) \ (\a. a \ A \ \!qs\listset ss. a = sum_list qs) \ direct_decomp A ss" by (auto simp: direct_decomp_def intro!: bij_betwI') blast lemma direct_decompD: assumes "direct_decomp A ss" shows "qs \ listset ss \ sum_list qs \ A" and "inj_on sum_list (listset ss)" and "sum_list ` listset ss = A" using assms by (auto simp: direct_decomp_def bij_betw_def) lemma direct_decompE: assumes "direct_decomp A ss" and "a \ A" obtains qs where "qs \ listset ss" and "a = sum_list qs" using assms by (auto simp: direct_decomp_def bij_betw_def) lemma direct_decomp_unique: "direct_decomp A ss \ qs \ listset ss \ qs' \ listset ss \ sum_list qs = sum_list qs' \ qs = qs'" by (auto dest: direct_decompD simp: inj_on_def) lemma direct_decomp_singleton: "direct_decomp A [A]" proof (rule direct_decompI_alt) fix qs assume "qs \ listset [A]" then obtain q where "q \ A" and "qs = [q]" by (rule listset_singletonE) thus "sum_list qs \ A" by simp next fix a assume "a \ A" show "\!qs\listset [A]. a = sum_list qs" proof (intro ex1I conjI allI impI) from \a \ A\ refl show "[a] \ listset [A]" by (rule listset_singletonI) next fix qs assume "qs \ listset [A] \ a = sum_list qs" hence a: "a = sum_list qs" and "qs \ listset [A]" by simp_all from this(2) obtain b where qs: "qs = [b]" by (rule listset_singletonE) with a show "qs = [a]" by simp qed simp_all qed (* TODO: Move. *) lemma mset_bij: assumes "bij_betw f {..i. i < length xs \ xs ! i = ys ! f i" shows "mset xs = mset ys" proof - from assms(1) have 1: "inj_on f {0..i < length xs\ have "\ = map ((!) ys \ f) [0.. f) [0.. = image_mset ((!) ys) (image_mset f (mset_set {0.. = image_mset ((!) ys) (mset_set {0.. = mset (map ((!) ys) [0..f. bij_betw f {.. (\ii. i < length ss2 \ ss1 ! i = ss2 ! f i" unfolding len_ss1 by blast define g where "g = inv_into {.. f ` {.. {.. {.. g ` {.. = {.. listset ss2" then obtain qs1 where qs1_in: "qs1 \ listset ss1" and len_qs1: "length qs1 = length qs2" and *: "\i. i < length qs2 \ qs1 ! i = qs2 ! f i" using f_bij f by (rule listset_permE) blast+ from \qs2 \ listset ss2\ have "length qs2 = length ss2" by (rule listsetD) with f_bij have "bij_betw f {.. \ A" by (rule direct_decompD) finally show "sum_list qs2 \ A" . next fix a assume "a \ A" with assms(1) obtain qs where a: "a = sum_list qs" and qs_in: "qs \ listset ss1" by (rule direct_decompE) from qs_in obtain qs2 where qs2_in: "qs2 \ listset ss2" and len_qs2: "length qs2 = length qs" and 1: "\i. i < length qs \ qs2 ! i = qs ! g i" using g_bij g by (rule listset_permE) blast+ show "\!qs\listset ss2. a = sum_list qs" proof (intro ex1I conjI allI impI) from qs_in have len_qs: "length qs = length ss1" by (rule listsetD) with g_bij have g_bij2: "bij_betw g {.. listset ss2 \ a = sum_list qs'" hence qs'_in: "qs' \ listset ss2" and a': "a = sum_list qs'" by simp_all from this(1) obtain qs1 where qs1_in: "qs1 \ listset ss1" and len_qs1: "length qs1 = length qs'" and 2: "\i. i < length qs' \ qs1 ! i = qs' ! f i" using f_bij f by (rule listset_permE) blast+ from \qs' \ listset ss2\ have "length qs' = length ss2" by (rule listsetD) with f_bij have "bij_betw f {.. {.. g ` {.. = {..i < length qs\ have "qs2 ! i = qs ! g i" by (rule 1) also have "\ = qs1 ! g i" by (simp only: \qs1 = qs\) also from \g i < length qs'\ have "\ = qs' ! f (g i)" by (rule 2) also from \i < length ss1\ have "\ = qs' ! i" by (simp only: f_g) finally show "qs' ! i = qs2 ! i" by (rule sym) qed qed fact qed qed lemma direct_decomp_split_map: "direct_decomp A (map f ss) \ direct_decomp A (map f (filter P ss) @ map f (filter (- P) ss))" proof (rule direct_decomp_perm) show "perm (map f ss) (map f (filter P ss) @ map f (filter (- P) ss))" proof (induct ss) case Nil show ?case by simp next case (Cons s ss) show ?case proof (cases "P s") case True with Cons show ?thesis by simp next case False have "map f (s # ss) = f s # map f ss" by simp also from Cons have "perm (f s # map f ss) (f s # map f (filter P ss) @ map f (filter (- P) ss))" by (rule perm.intros) also have "perm \ (map f (filter P ss) @ map f (s # filter (- P) ss))" by (simp add: perm_append_Cons) also(trans) from False have "\ = map f (filter P (s # ss)) @ map f (filter (- P) (s # ss))" by simp finally show ?thesis . qed qed qed lemmas direct_decomp_split = direct_decomp_split_map[where f=id, simplified] lemma direct_decomp_direct_decomp: assumes "direct_decomp A (s # ss)" and "direct_decomp s rs" shows "direct_decomp A (ss @ rs)" (is "direct_decomp A ?ss") proof (rule direct_decompI_alt) fix qs assume "qs \ listset ?ss" then obtain qs1 qs2 where qs1: "qs1 \ listset ss" and qs2: "qs2 \ listset rs" and qs: "qs = qs1 @ qs2" by (rule listset_appendE) have "sum_list qs = sum_list ((sum_list qs2) # qs1)" by (simp add: qs add.commute) also from assms(1) have "\ \ A" proof (rule direct_decompD) from assms(2) qs2 have "sum_list qs2 \ s" by (rule direct_decompD) thus "sum_list qs2 # qs1 \ listset (s # ss)" using qs1 refl by (rule listset_ConsI) qed finally show "sum_list qs \ A" . next fix a assume "a \ A" with assms(1) obtain qs1 where qs1_in: "qs1 \ listset (s # ss)" and a: "a = sum_list qs1" by (rule direct_decompE) from qs1_in obtain qs11 qs12 where "qs11 \ s" and qs12_in: "qs12 \ listset ss" and qs1: "qs1 = qs11 # qs12" by (rule listset_ConsE) from assms(2) this(1) obtain qs2 where qs2_in: "qs2 \ listset rs" and qs11: "qs11 = sum_list qs2" by (rule direct_decompE) let ?qs = "qs12 @ qs2" show "\!qs\listset ?ss. a = sum_list qs" proof (intro ex1I conjI allI impI) from qs12_in qs2_in refl show "?qs \ listset ?ss" by (rule listset_appendI) show "a = sum_list ?qs" by (simp add: a qs1 qs11 add.commute) fix qs0 assume "qs0 \ listset ?ss \ a = sum_list qs0" hence qs0_in: "qs0 \ listset ?ss" and a2: "a = sum_list qs0" by simp_all from this(1) obtain qs01 qs02 where qs01_in: "qs01 \ listset ss" and qs02_in: "qs02 \ listset rs" and qs0: "qs0 = qs01 @ qs02" by (rule listset_appendE) note assms(1) moreover from _ qs01_in refl have "(sum_list qs02) # qs01 \ listset (s # ss)" (is "?qs' \ _") proof (rule listset_ConsI) from assms(2) qs02_in show "sum_list qs02 \ s" by (rule direct_decompD) qed moreover note qs1_in moreover from a2 have "sum_list ?qs' = sum_list qs1" by (simp add: qs0 a add.commute) ultimately have "?qs' = qs11 # qs12" unfolding qs1 by (rule direct_decomp_unique) hence "qs11 = sum_list qs02" and 1: "qs01 = qs12" by simp_all from this(1) have "sum_list qs02 = sum_list qs2" by (simp only: qs11) with assms(2) qs02_in qs2_in have "qs02 = qs2" by (rule direct_decomp_unique) thus "qs0 = qs12 @ qs2" by (simp only: 1 qs0) qed qed lemma sum_list_map_times: "sum_list (map ((*) x) xs) = (x::'a::semiring_0) * sum_list xs" by (induct xs) (simp_all add: algebra_simps) lemma direct_decomp_image_times: assumes "direct_decomp (A::'a::semiring_0 set) ss" and "\a b. x * a = x * b \ x \ 0 \ a = b" shows "direct_decomp ((*) x ` A) (map ((`) ((*) x)) ss)" (is "direct_decomp ?A ?ss") proof (rule direct_decompI_alt) fix qs assume "qs \ listset ?ss" then obtain qs0 where qs0_in: "qs0 \ listset ss" and qs: "qs = map ((*) x) qs0" by (rule listset_map_imageE) have "sum_list qs = x * sum_list qs0" by (simp only: qs sum_list_map_times) moreover from assms(1) qs0_in have "sum_list qs0 \ A" by (rule direct_decompD) ultimately show "sum_list qs \ (*) x ` A" by (rule image_eqI) next fix a assume "a \ ?A" then obtain a' where "a' \ A" and a: "a = x * a'" .. from assms(1) this(1) obtain qs' where qs'_in: "qs' \ listset ss" and a': "a' = sum_list qs'" by (rule direct_decompE) define qs where "qs = map ((*) x) qs'" show "\!qs\listset ?ss. a = sum_list qs" proof (intro ex1I conjI allI impI) from qs'_in qs_def show "qs \ listset ?ss" by (rule listset_map_imageI) fix qs0 assume "qs0 \ listset ?ss \ a = sum_list qs0" hence "qs0 \ listset ?ss" and a0: "a = sum_list qs0" by simp_all from this(1) obtain qs1 where qs1_in: "qs1 \ listset ss" and qs0: "qs0 = map ((*) x) qs1" by (rule listset_map_imageE) show "qs0 = qs" proof (cases "x = 0") case True from qs1_in have "length qs1 = length ss" by (rule listsetD) moreover from qs'_in have "length qs' = length ss" by (rule listsetD) ultimately show ?thesis by (simp add: qs_def qs0 list_eq_iff_nth_eq True) next case False have "x * sum_list qs1 = a" by (simp only: a0 qs0 sum_list_map_times) also have "\ = x * sum_list qs'" by (simp only: a' a) finally have "sum_list qs1 = sum_list qs'" using False by (rule assms(2)) with assms(1) qs1_in qs'_in have "qs1 = qs'" by (rule direct_decomp_unique) thus ?thesis by (simp only: qs0 qs_def) qed qed (simp only: a a' qs_def sum_list_map_times) qed lemma direct_decomp_appendD: assumes "direct_decomp A (ss1 @ ss2)" shows "{} \ set ss2 \ direct_decomp (sum_list ` listset ss1) ss1" (is "_ \ ?thesis1") and "{} \ set ss1 \ direct_decomp (sum_list ` listset ss2) ss2" (is "_ \ ?thesis2") and "direct_decomp A [sum_list ` listset ss1, sum_list ` listset ss2]" (is "direct_decomp _ ?ss") proof - have rl: "direct_decomp (sum_list ` listset ts1) ts1" if "direct_decomp A (ts1 @ ts2)" and "{} \ set ts2" for ts1 ts2 proof (intro direct_decompI inj_onI refl) fix qs1 qs2 assume qs1: "qs1 \ listset ts1" and qs2: "qs2 \ listset ts1" assume eq: "sum_list qs1 = sum_list qs2" from that(2) have "listset ts2 \ {}" by (simp add: listset_empty_iff) then obtain qs3 where qs3: "qs3 \ listset ts2" by blast note that(1) moreover from qs1 qs3 refl have "qs1 @ qs3 \ listset (ts1 @ ts2)" by (rule listset_appendI) moreover from qs2 qs3 refl have "qs2 @ qs3 \ listset (ts1 @ ts2)" by (rule listset_appendI) moreover have "sum_list (qs1 @ qs3) = sum_list (qs2 @ qs3)" by (simp add: eq) ultimately have "qs1 @ qs3 = qs2 @ qs3" by (rule direct_decomp_unique) thus "qs1 = qs2" by simp qed { assume "{} \ set ss2" with assms show ?thesis1 by (rule rl) } { from assms perm_append_swap have "direct_decomp A (ss2 @ ss1)" by (rule direct_decomp_perm) moreover assume "{} \ set ss1" ultimately show ?thesis2 by (rule rl) } show "direct_decomp A ?ss" proof (rule direct_decompI_alt) fix qs assume "qs \ listset ?ss" then obtain q1 q2 where q1: "q1 \ sum_list ` listset ss1" and q2: "q2 \ sum_list ` listset ss2" and qs: "qs = [q1, q2]" by (rule listset_doubletonE) from q1 obtain qs1 where qs1: "qs1 \ listset ss1" and q1: "q1 = sum_list qs1" .. from q2 obtain qs2 where qs2: "qs2 \ listset ss2" and q2: "q2 = sum_list qs2" .. from qs1 qs2 refl have "qs1 @ qs2 \ listset (ss1 @ ss2)" by (rule listset_appendI) with assms have "sum_list (qs1 @ qs2) \ A" by (rule direct_decompD) thus "sum_list qs \ A" by (simp add: qs q1 q2) next fix a assume "a \ A" with assms obtain qs0 where qs0_in: "qs0 \ listset (ss1 @ ss2)" and a: "a = sum_list qs0" by (rule direct_decompE) from this(1) obtain qs1 qs2 where qs1: "qs1 \ listset ss1" and qs2: "qs2 \ listset ss2" and qs0: "qs0 = qs1 @ qs2" by (rule listset_appendE) from qs1 have len_qs1: "length qs1 = length ss1" by (rule listsetD) define qs where "qs = [sum_list qs1, sum_list qs2]" show "\!qs\listset ?ss. a = sum_list qs" proof (intro ex1I conjI) from qs1 have "sum_list qs1 \ sum_list ` listset ss1" by (rule imageI) moreover from qs2 have "sum_list qs2 \ sum_list ` listset ss2" by (rule imageI) ultimately show "qs \ listset ?ss" using qs_def by (rule listset_doubletonI) fix qs' assume "qs' \ listset ?ss \ a = sum_list qs'" hence "qs' \ listset ?ss" and a': "a = sum_list qs'" by simp_all from this(1) obtain q1 q2 where q1: "q1 \ sum_list ` listset ss1" and q2: "q2 \ sum_list ` listset ss2" and qs': "qs' = [q1, q2]" by (rule listset_doubletonE) from q1 obtain qs1' where qs1': "qs1' \ listset ss1" and q1: "q1 = sum_list qs1'" .. from q2 obtain qs2' where qs2': "qs2' \ listset ss2" and q2: "q2 = sum_list qs2'" .. from qs1' have len_qs1': "length qs1' = length ss1" by (rule listsetD) note assms moreover from qs1' qs2' refl have "qs1' @ qs2' \ listset (ss1 @ ss2)" by (rule listset_appendI) moreover note qs0_in moreover have "sum_list (qs1' @ qs2') = sum_list qs0" by (simp add: a' qs' flip: a q1 q2) ultimately have "qs1' @ qs2' = qs0" by (rule direct_decomp_unique) also have "\ = qs1 @ qs2" by fact finally show "qs' = qs" by (simp add: qs_def qs' q1 q2 len_qs1 len_qs1') qed (simp add: qs_def a qs0) qed qed lemma direct_decomp_Cons_zeroI: assumes "direct_decomp A ss" shows "direct_decomp A ({0} # ss)" proof (rule direct_decompI_alt) fix qs assume "qs \ listset ({0} # ss)" then obtain q qs' where "q \ {0}" and "qs' \ listset ss" and "qs = q # qs'" by (rule listset_ConsE) from this(1, 3) have "sum_list qs = sum_list qs'" by simp also from assms \qs' \ listset ss\ have "\ \ A" by (rule direct_decompD) finally show "sum_list qs \ A" . next fix a assume "a \ A" with assms obtain qs' where qs': "qs' \ listset ss" and a: "a = sum_list qs'" by (rule direct_decompE) define qs where "qs = 0 # qs'" show "\!qs. qs \ listset ({0} # ss) \ a = sum_list qs" proof (intro ex1I conjI) from _ qs' qs_def show "qs \ listset ({0} # ss)" by (rule listset_ConsI) simp next fix qs0 assume "qs0 \ listset ({0} # ss) \ a = sum_list qs0" hence "qs0 \ listset ({0} # ss)" and a0: "a = sum_list qs0" by simp_all from this(1) obtain q0 qs0' where "q0 \ {0}" and qs0': "qs0' \ listset ss" and qs0: "qs0 = q0 # qs0'" by (rule listset_ConsE) from this(1, 3) have "sum_list qs0' = sum_list qs'" by (simp add: a0 flip: a) with assms qs0' qs' have "qs0' = qs'" by (rule direct_decomp_unique) with \q0 \ {0}\ show "qs0 = qs" by (simp add: qs_def qs0) qed (simp add: qs_def a) qed lemma direct_decomp_Cons_zeroD: assumes "direct_decomp A ({0} # ss)" shows "direct_decomp A ss" proof - have "direct_decomp {0} []" by (simp add: direct_decomp_def bij_betw_def) with assms have "direct_decomp A (ss @ [])" by (rule direct_decomp_direct_decomp) thus ?thesis by simp qed lemma direct_decomp_Cons_subsetI: assumes "direct_decomp A (s # ss)" and "\s0. s0 \ set ss \ 0 \ s0" shows "s \ A" proof fix x assume "x \ s" moreover from assms(2) have "map (\_. 0) ss \ listset ss" by (induct ss, auto simp del: listset.simps(2) intro: listset_ConsI) ultimately have "x # (map (\_. 0) ss) \ listset (s # ss)" using refl by (rule listset_ConsI) with assms(1) have "sum_list (x # (map (\_. 0) ss)) \ A" by (rule direct_decompD) thus "x \ A" by simp qed lemma direct_decomp_Int_zero: assumes "direct_decomp A ss" and "i < j" and "j < length ss" and "\s. s \ set ss \ 0 \ s" shows "ss ! i \ ss ! j = {0}" proof - from assms(2, 3) have "i < length ss" by (rule less_trans) hence i_in: "ss ! i \ set ss" by simp from assms(3) have j_in: "ss ! j \ set ss" by simp show ?thesis proof show "ss ! i \ ss ! j \ {0}" proof fix x assume "x \ ss ! i \ ss ! j" hence x_i: "x \ ss ! i" and x_j: "x \ ss ! j" by simp_all have 1: "(map (\_. 0) ss)[k := y] \ listset ss" if "k < length ss" and "y \ ss ! k" for k y using assms(4) that proof (induct ss arbitrary: k) case Nil from Nil(2) show ?case by simp next case (Cons s ss) have *: "\s'. s' \ set ss \ 0 \ s'" by (rule Cons.prems) simp show ?case proof (cases k) case k: 0 with Cons.prems(3) have "y \ s" by simp moreover from * have "map (\_. 0) ss \ listset ss" by (induct ss) (auto simp del: listset.simps(2) intro: listset_ConsI) moreover have "(map (\_. 0) (s # ss))[k := y] = y # map (\_. 0) ss" by (simp add: k) ultimately show ?thesis by (rule listset_ConsI) next case k: (Suc k') have "0 \ s" by (rule Cons.prems) simp moreover from * have "(map (\_. 0) ss)[k' := y] \ listset ss" proof (rule Cons.hyps) from Cons.prems(2) show "k' < length ss" by (simp add: k) next from Cons.prems(3) show "y \ ss ! k'" by (simp add: k) qed moreover have "(map (\_. 0) (s # ss))[k := y] = 0 # (map (\_. 0) ss)[k' := y]" by (simp add: k) ultimately show ?thesis by (rule listset_ConsI) qed qed have 2: "sum_list ((map (\_. 0) ss)[k := y]) = y" if "k < length ss" for k and y::'a using that by (induct ss arbitrary: k) (auto simp: add_ac split: nat.split) define qs1 where "qs1 = (map (\_. 0) ss)[i := x]" define qs2 where "qs2 = (map (\_. 0) ss)[j := x]" note assms(1) moreover from \i < length ss\ x_i have "qs1 \ listset ss" unfolding qs1_def by (rule 1) moreover from assms(3) x_j have "qs2 \ listset ss" unfolding qs2_def by (rule 1) thm sum_list_update moreover from \i < length ss\ assms(3) have "sum_list qs1 = sum_list qs2" by (simp add: qs1_def qs2_def 2) ultimately have "qs1 = qs2" by (rule direct_decomp_unique) hence "qs1 ! i = qs2 ! i" by simp with \i < length ss\ assms(2, 3) show "x \ {0}" by (simp add: qs1_def qs2_def) qed next from i_in have "0 \ ss ! i" by (rule assms(4)) moreover from j_in have "0 \ ss ! j" by (rule assms(4)) ultimately show "{0} \ ss ! i \ ss ! j" by simp qed qed corollary direct_decomp_pairwise_zero: assumes "direct_decomp A ss" and "\s. s \ set ss \ 0 \ s" shows "pairwise (\s1 s2. s1 \ s2 = {0}) (set ss)" proof (rule pairwiseI) fix s1 s2 assume "s1 \ set ss" then obtain i where "i < length ss" and s1: "s1 = ss ! i" by (metis in_set_conv_nth) assume "s2 \ set ss" then obtain j where "j < length ss" and s2: "s2 = ss ! j" by (metis in_set_conv_nth) assume "s1 \ s2" hence "i < j \ j < i" by (auto simp: s1 s2) thus "s1 \ s2 = {0}" proof assume "i < j" with assms(1) show ?thesis unfolding s1 s2 using \j < length ss\ assms(2) by (rule direct_decomp_Int_zero) next assume "j < i" with assms(1) have "s2 \ s1 = {0}" unfolding s1 s2 using \i < length ss\ assms(2) by (rule direct_decomp_Int_zero) thus ?thesis by (simp only: Int_commute) qed qed corollary direct_decomp_repeated_eq_zero: assumes "direct_decomp A ss" and "1 < count_list ss X" and "\s. s \ set ss \ 0 \ s" shows "X = {0}" proof - from assms(2) obtain i j where "i < j" and "j < length ss" and 1: "ss ! i = X" and 2: "ss ! j = X" by (rule count_list_gr_1_E) from assms(1) this(1, 2) assms(3) have "ss ! i \ ss ! j = {0}" by (rule direct_decomp_Int_zero) thus ?thesis by (simp add: 1 2) qed corollary direct_decomp_map_Int_zero: assumes "direct_decomp A (map f ss)" and "s1 \ set ss" and "s2 \ set ss" and "s1 \ s2" and "\s. s \ set ss \ 0 \ f s" shows "f s1 \ f s2 = {0}" proof - from assms(2) obtain i where "i < length ss" and s1: "s1 = ss ! i" by (metis in_set_conv_nth) from this(1) have i: "i < length (map f ss)" by simp from assms(3) obtain j where "j < length ss" and s2: "s2 = ss ! j" by (metis in_set_conv_nth) from this(1) have j: "j < length (map f ss)" by simp have *: "0 \ s" if "s \ set (map f ss)" for s proof - from that obtain s' where "s' \ set ss" and s: "s = f s'" unfolding set_map .. from this(1) show "0 \ s" unfolding s by (rule assms(5)) qed show ?thesis proof (rule linorder_cases) assume "i < j" with assms(1) have "(map f ss) ! i \ (map f ss) ! j = {0}" using j * by (rule direct_decomp_Int_zero) with i j show ?thesis by (simp add: s1 s2) next assume "j < i" with assms(1) have "(map f ss) ! j \ (map f ss) ! i = {0}" using i * by (rule direct_decomp_Int_zero) with i j show ?thesis by (simp add: s1 s2 Int_commute) next assume "i = j" with assms(4) show ?thesis by (simp add: s1 s2) qed qed subsection \Direct Decompositions and Vector Spaces\ definition (in vector_space) is_basis :: "'b set \ 'b set \ bool" where "is_basis V B \ (B \ V \ independent B \ V \ span B \ card B = dim V)" definition (in vector_space) some_basis :: "'b set \ 'b set" where "some_basis V = Eps (local.is_basis V)" hide_const (open) real_vector.is_basis real_vector.some_basis context vector_space begin lemma dim_empty [simp]: "dim {} = 0" using dim_span_eq_card_independent independent_empty by fastforce lemma dim_zero [simp]: "dim {0} = 0" using dim_span_eq_card_independent independent_empty by fastforce lemma independent_UnI: assumes "independent A" and "independent B" and "span A \ span B = {0}" shows "independent (A \ B)" proof from span_superset have "A \ B \ span A \ span B" by blast hence "A \ B = {}" unfolding assms(3) using assms(1, 2) dependent_zero by blast assume "dependent (A \ B)" then obtain T u v where "finite T" and "T \ A \ B" and eq: "(\v\T. u v *s v) = 0" and "v \ T" and "u v \ 0" unfolding dependent_explicit by blast define TA where "TA = T \ A" define TB where "TB = T \ B" from \T \ A \ B\ have T: "T = TA \ TB" by (auto simp: TA_def TB_def) from \finite T\ have "finite TA" and "TA \ A" by (simp_all add: TA_def) from \finite T\ have "finite TB" and "TB \ B" by (simp_all add: TB_def) from \A \ B = {}\ \TA \ A\ this(2) have "TA \ TB = {}" by blast have "0 = (\v\TA \ TB. u v *s v)" by (simp only: eq flip: T) also have "\ = (\v\TA. u v *s v) + (\v\TB. u v *s v)" by (rule sum.union_disjoint) fact+ finally have "(\v\TA. u v *s v) = (\v\TB. (- u) v *s v)" (is "?x = ?y") by (simp add: sum_negf eq_neg_iff_add_eq_0) from \finite TB\ \TB \ B\ have "?y \ span B" by (auto simp: span_explicit simp del: uminus_apply) moreover from \finite TA\ \TA \ A\ have "?x \ span A" by (auto simp: span_explicit) ultimately have "?y \ span A \ span B" by (simp add: \?x = ?y\) hence "?x = 0" and "?y = 0" by (simp_all add: \?x = ?y\ assms(3)) from \v \ T\ have "v \ TA \ TB" by (simp only: T) hence "u v = 0" proof assume "v \ TA" with assms(1) \finite TA\ \TA \ A\ \?x = 0\ show "u v = 0" by (rule independentD) next assume "v \ TB" with assms(2) \finite TB\ \TB \ B\ \?y = 0\ have "(- u) v = 0" by (rule independentD) thus "u v = 0" by simp qed with \u v \ 0\ show False .. qed lemma subspace_direct_decomp: assumes "direct_decomp A ss" and "\s. s \ set ss \ subspace s" shows "subspace A" proof (rule subspaceI) let ?qs = "map (\_. 0) ss" from assms(2) have "?qs \ listset ss" by (induct ss) (auto simp del: listset.simps(2) dest: subspace_0 intro: listset_ConsI) with assms(1) have "sum_list ?qs \ A" by (rule direct_decompD) thus "0 \ A" by simp next fix p q assume "p \ A" with assms(1) obtain ps where ps: "ps \ listset ss" and p: "p = sum_list ps" by (rule direct_decompE) assume "q \ A" with assms(1) obtain qs where qs: "qs \ listset ss" and q: "q = sum_list qs" by (rule direct_decompE) from ps qs have l: "length ps = length qs" by (simp only: listsetD) from ps qs have "map2 (+) ps qs \ listset ss" (is "?qs \ _") by (rule listset_closed_map2) (auto dest: assms(2) subspace_add) with assms(1) have "sum_list ?qs \ A" by (rule direct_decompD) thus "p + q \ A" using l by (simp only: p q sum_list_map2_plus) next fix c p assume "p \ A" with assms(1) obtain ps where "ps \ listset ss" and p: "p = sum_list ps" by (rule direct_decompE) from this(1) have "map ((*s) c) ps \ listset ss" (is "?qs \ _") by (rule listset_closed_map) (auto dest: assms(2) subspace_scale) with assms(1) have "sum_list ?qs \ A" by (rule direct_decompD) also have "sum_list ?qs = c *s sum_list ps" by (induct ps) (simp_all add: scale_right_distrib) finally show "c *s p \ A" by (simp only: p) qed lemma is_basis_alt: "subspace V \ is_basis V B \ (independent B \ span B = V)" by (metis (full_types) is_basis_def dim_eq_card span_eq span_eq_iff) lemma is_basis_finite: "is_basis V A \ is_basis V B \ finite A \ finite B" unfolding is_basis_def using independent_span_bound by auto lemma some_basis_is_basis: "is_basis V (some_basis V)" proof - obtain B where "B \ V" and "independent B" and "V \ span B" and "card B = dim V" by (rule basis_exists) hence "is_basis V B" by (simp add: is_basis_def) thus ?thesis unfolding some_basis_def by (rule someI) qed corollary shows some_basis_subset: "some_basis V \ V" and independent_some_basis: "independent (some_basis V)" and span_some_basis_supset: "V \ span (some_basis V)" and card_some_basis: "card (some_basis V) = dim V" using some_basis_is_basis[of V] by (simp_all add: is_basis_def) lemma some_basis_not_zero: "0 \ some_basis V" using independent_some_basis dependent_zero by blast lemma span_some_basis: "subspace V \ span (some_basis V) = V" by (simp add: span_subspace some_basis_subset span_some_basis_supset) lemma direct_decomp_some_basis_pairwise_disjnt: assumes "direct_decomp A ss" and "\s. s \ set ss \ subspace s" shows "pairwise (\s1 s2. disjnt (some_basis s1) (some_basis s2)) (set ss)" proof (rule pairwiseI) fix s1 s2 assume "s1 \ set ss" and "s2 \ set ss" and "s1 \ s2" have "some_basis s1 \ some_basis s2 \ s1 \ s2" using some_basis_subset by blast also from direct_decomp_pairwise_zero have "\ = {0}" proof (rule pairwiseD) fix s assume "s \ set ss" hence "subspace s" by (rule assms(2)) thus "0 \ s" by (rule subspace_0) qed fact+ finally have "some_basis s1 \ some_basis s2 \ {0}" . with some_basis_not_zero show "disjnt (some_basis s1) (some_basis s2)" unfolding disjnt_def by blast qed lemma direct_decomp_span_some_basis: assumes "direct_decomp A ss" and "\s. s \ set ss \ subspace s" shows "span (\(some_basis ` set ss)) = A" proof - from assms(1) have eq0[symmetric]: "sum_list ` listset ss = A" by (rule direct_decompD) show ?thesis unfolding eq0 using assms(2) proof (induct ss) case Nil show ?case by simp next case (Cons s ss) have "subspace s" by (rule Cons.prems) simp hence eq1: "span (some_basis s) = s" by (rule span_some_basis) have "\s'. s' \ set ss \ subspace s'" by (rule Cons.prems) simp hence eq2: "span (\ (some_basis ` set ss)) = sum_list ` listset ss" by (rule Cons.hyps) have "span (\ (some_basis ` set (s # ss))) = {x + y |x y. x \ s \ y \ sum_list ` listset ss}" by (simp add: span_Un eq1 eq2) also have "\ = sum_list ` listset (s # ss)" (is "?A = ?B") proof show "?A \ ?B" proof fix a assume "a \ ?A" then obtain x y where "x \ s" and "y \ sum_list ` listset ss" and a: "a = x + y" by blast from this(2) obtain qs where "qs \ listset ss" and y: "y = sum_list qs" .. from \x \ s\ this(1) refl have "x # qs \ listset (s # ss)" by (rule listset_ConsI) hence "sum_list (x # qs) \ ?B" by (rule imageI) also have "sum_list (x # qs) = a" by (simp add: a y) finally show "a \ ?B" . qed next show "?B \ ?A" proof fix a assume "a \ ?B" then obtain qs' where "qs' \ listset (s # ss)" and a: "a = sum_list qs'" .. from this(1) obtain x qs where "x \ s" and "qs \ listset ss" and qs': "qs' = x # qs" by (rule listset_ConsE) from this(2) have "sum_list qs \ sum_list ` listset ss" by (rule imageI) moreover have "a = x + sum_list qs" by (simp add: a qs') ultimately show "a \ ?A" using \x \ s\ by blast qed qed finally show ?case . qed qed lemma direct_decomp_independent_some_basis: assumes "direct_decomp A ss" and "\s. s \ set ss \ subspace s" shows "independent (\(some_basis ` set ss))" using assms proof (induct ss arbitrary: A) case Nil from independent_empty show ?case by simp next case (Cons s ss) have 1: "\s'. s' \ set ss \ subspace s'" by (rule Cons.prems) simp have "subspace s" by (rule Cons.prems) simp hence "0 \ s" and eq1: "span (some_basis s) = s" by (rule subspace_0, rule span_some_basis) from Cons.prems(1) have *: "direct_decomp A ([s] @ ss)" by simp moreover from \0 \ s\ have "{} \ set [s]" by auto ultimately have 2: "direct_decomp (sum_list ` listset ss) ss" by (rule direct_decomp_appendD) hence eq2: "span (\ (some_basis ` set ss)) = sum_list ` listset ss" using 1 by (rule direct_decomp_span_some_basis) note independent_some_basis[of s] moreover from 2 1 have "independent (\ (some_basis ` set ss))" by (rule Cons.hyps) moreover have "span (some_basis s) \ span (\ (some_basis ` set ss)) = {0}" proof - from * have "direct_decomp A [sum_list ` listset [s], sum_list ` listset ss]" by (rule direct_decomp_appendD) hence "direct_decomp A [s, sum_list ` listset ss]" by (simp add: image_image) moreover have "0 < (1::nat)" by simp moreover have "1 < length [s, sum_list ` listset ss]" by simp ultimately have "[s, sum_list ` listset ss] ! 0 \ [s, sum_list ` listset ss] ! 1 = {0}" by (rule direct_decomp_Int_zero) (auto simp: \0 \ s\ eq2[symmetric] span_zero) thus ?thesis by (simp add: eq1 eq2) qed ultimately have "independent (some_basis s \ (\ (some_basis ` set ss)))" by (rule independent_UnI) thus ?case by simp qed corollary direct_decomp_is_basis: assumes "direct_decomp A ss" and "\s. s \ set ss \ subspace s" shows "is_basis A (\(some_basis ` set ss))" proof - from assms have "subspace A" by (rule subspace_direct_decomp) moreover from assms have "span (\(some_basis ` set ss)) = A" by (rule direct_decomp_span_some_basis) moreover from assms have "independent (\(some_basis ` set ss))" by (rule direct_decomp_independent_some_basis) ultimately show ?thesis by (simp add: is_basis_alt) qed lemma dim_direct_decomp: assumes "direct_decomp A ss" and "finite B" and "A \ span B" and "\s. s \ set ss \ subspace s" shows "dim A = (\s\set ss. dim s)" proof - from assms(1, 4) have "is_basis A (\(some_basis ` set ss))" (is "is_basis A ?B") by (rule direct_decomp_is_basis) hence "dim A = card ?B" and "independent ?B" and "?B \ A" by (simp_all add: is_basis_def) from this(3) assms(3) have "?B \ span B" by (rule subset_trans) with assms(2) \independent ?B\ have "finite ?B" using independent_span_bound by blast note \dim A = card ?B\ also from finite_set have "card ?B = (\s\set ss. card (some_basis s))" proof (intro card_UN_disjoint ballI impI) fix s assume "s \ set ss" with \finite ?B\ show "finite (some_basis s)" by auto next fix s1 s2 have "pairwise (\s t. disjnt (some_basis s) (some_basis t)) (set ss)" using assms(1, 4) by (rule direct_decomp_some_basis_pairwise_disjnt) moreover assume "s1 \ set ss" and "s2 \ set ss" and "s1 \ s2" thm pairwiseD ultimately have "disjnt (some_basis s1) (some_basis s2)" by (rule pairwiseD) thus "some_basis s1 \ some_basis s2 = {}" by (simp only: disjnt_def) qed also from refl card_some_basis have "\ = (\s\set ss. dim s)" by (rule sum.cong) finally show ?thesis . qed end (* vector_space *) subsection \Homogeneous Sets of Polynomials with Fixed Degree\ lemma homogeneous_set_direct_decomp: assumes "direct_decomp A ss" and "\s. s \ set ss \ homogeneous_set s" shows "homogeneous_set A" proof (rule homogeneous_setI) fix a n assume "a \ A" with assms(1) obtain qs where "qs \ listset ss" and a: "a = sum_list qs" by (rule direct_decompE) have "hom_component a n = hom_component (sum_list qs) n" by (simp only: a) also have "\ = sum_list (map (\q. hom_component q n) qs)" by (induct qs) (simp_all add: hom_component_plus) also from assms(1) have "\ \ A" proof (rule direct_decompD) show "map (\q. hom_component q n) qs \ listset ss" proof (rule listset_closed_map) fix s q assume "s \ set ss" hence "homogeneous_set s" by (rule assms(2)) moreover assume "q \ s" ultimately show "hom_component q n \ s" by (rule homogeneous_setD) qed fact qed finally show "hom_component a n \ A" . qed definition hom_deg_set :: "nat \ (('x \\<^sub>0 nat) \\<^sub>0 'a) set \ (('x \\<^sub>0 nat) \\<^sub>0 'a::zero) set" where "hom_deg_set z A = (\a. hom_component a z) ` A" lemma hom_deg_setD: assumes "p \ hom_deg_set z A" shows "homogeneous p" and "p \ 0 \ poly_deg p = z" proof - from assms obtain a where "a \ A" and p: "p = hom_component a z" unfolding hom_deg_set_def .. show *: "homogeneous p" by (simp only: p homogeneous_hom_component) assume "p \ 0" hence "keys p \ {}" by simp then obtain t where "t \ keys p" by blast with * have "deg_pm t = poly_deg p" by (rule homogeneousD_poly_deg) moreover from \t \ keys p\ have "deg_pm t = z" unfolding p by (rule keys_hom_componentD) ultimately show "poly_deg p = z" by simp qed lemma zero_in_hom_deg_set: assumes "0 \ A" shows "0 \ hom_deg_set z A" proof - have "0 = hom_component 0 z" by simp also from assms have "\ \ hom_deg_set z A" unfolding hom_deg_set_def by (rule imageI) finally show ?thesis . qed lemma hom_deg_set_closed_uminus: assumes "\a. a \ A \ - a \ A" and "p \ hom_deg_set z A" shows "- p \ hom_deg_set z A" proof - from assms(2) obtain a where "a \ A" and p: "p = hom_component a z" unfolding hom_deg_set_def .. from this(1) have "- a \ A" by (rule assms(1)) moreover have "- p = hom_component (- a) z" by (simp add: p) ultimately show ?thesis unfolding hom_deg_set_def by (rule rev_image_eqI) qed lemma hom_deg_set_closed_plus: assumes "\a1 a2. a1 \ A \ a2 \ A \ a1 + a2 \ A" and "p \ hom_deg_set z A" and "q \ hom_deg_set z A" shows "p + q \ hom_deg_set z A" proof - from assms(2) obtain a1 where "a1 \ A" and p: "p = hom_component a1 z" unfolding hom_deg_set_def .. from assms(3) obtain a2 where "a2 \ A" and q: "q = hom_component a2 z" unfolding hom_deg_set_def .. from \a1 \ A\ this(1) have "a1 + a2 \ A" by (rule assms(1)) moreover have "p + q = hom_component (a1 + a2) z" by (simp only: p q hom_component_plus) ultimately show ?thesis unfolding hom_deg_set_def by (rule rev_image_eqI) qed lemma hom_deg_set_closed_minus: assumes "\a1 a2. a1 \ A \ a2 \ A \ a1 - a2 \ A" and "p \ hom_deg_set z A" and "q \ hom_deg_set z A" shows "p - q \ hom_deg_set z A" proof - from assms(2) obtain a1 where "a1 \ A" and p: "p = hom_component a1 z" unfolding hom_deg_set_def .. from assms(3) obtain a2 where "a2 \ A" and q: "q = hom_component a2 z" unfolding hom_deg_set_def .. from \a1 \ A\ this(1) have "a1 - a2 \ A" by (rule assms(1)) moreover have "p - q = hom_component (a1 - a2) z" by (simp only: p q hom_component_minus) ultimately show ?thesis unfolding hom_deg_set_def by (rule rev_image_eqI) qed lemma hom_deg_set_closed_scalar: assumes "\a. a \ A \ c \ a \ A" and "p \ hom_deg_set z A" shows "(c::'a::semiring_0) \ p \ hom_deg_set z A" proof - from assms(2) obtain a where "a \ A" and p: "p = hom_component a z" unfolding hom_deg_set_def .. from this(1) have "c \ a \ A" by (rule assms(1)) moreover have "c \ p = hom_component (c \ a) z" by (simp add: p punit.map_scale_eq_monom_mult hom_component_monom_mult) ultimately show ?thesis unfolding hom_deg_set_def by (rule rev_image_eqI) qed lemma hom_deg_set_closed_sum: assumes "0 \ A" and "\a1 a2. a1 \ A \ a2 \ A \ a1 + a2 \ A" and "\i. i \ I \ f i \ hom_deg_set z A" shows "sum f I \ hom_deg_set z A" using assms(3) proof (induct I rule: infinite_finite_induct) case (infinite I) with assms(1) show ?case by (simp add: zero_in_hom_deg_set) next case empty with assms(1) show ?case by (simp add: zero_in_hom_deg_set) next case (insert j I) from insert.hyps(1, 2) have "sum f (insert j I) = f j + sum f I" by simp also from assms(2) have "\ \ hom_deg_set z A" proof (intro hom_deg_set_closed_plus insert.hyps) show "f j \ hom_deg_set z A" by (rule insert.prems) simp next fix i assume "i \ I" hence "i \ insert j I" by simp thus "f i \ hom_deg_set z A" by (rule insert.prems) qed finally show ?case . qed lemma hom_deg_set_subset: "homogeneous_set A \ hom_deg_set z A \ A" by (auto dest: homogeneous_setD simp: hom_deg_set_def) lemma Polys_closed_hom_deg_set: assumes "A \ P[X]" shows "hom_deg_set z A \ P[X]" proof fix p assume "p \ hom_deg_set z A" then obtain p' where "p' \ A" and p: "p = hom_component p' z" unfolding hom_deg_set_def .. from this(1) assms have "p' \ P[X]" .. have "keys p \ keys p'" by (simp add: p keys_hom_component) also from \p' \ P[X]\ have "\ \ .[X]" by (rule PolysD) finally show "p \ P[X]" by (rule PolysI) qed lemma hom_deg_set_alt_homogeneous_set: assumes "homogeneous_set A" shows "hom_deg_set z A = {p \ A. homogeneous p \ (p = 0 \ poly_deg p = z)}" (is "?A = ?B") proof show "?A \ ?B" proof fix h assume "h \ ?A" also from assms have "\ \ A" by (rule hom_deg_set_subset) finally show "h \ ?B" using \h \ ?A\ by (auto dest: hom_deg_setD) qed next show "?B \ ?A" proof fix h assume "h \ ?B" hence "h \ A" and "homogeneous h" and "h = 0 \ poly_deg h = z" by simp_all from this(3) show "h \ ?A" proof assume "h = 0" with \h \ A\ have "0 \ A" by simp thus ?thesis unfolding \h = 0\ by (rule zero_in_hom_deg_set) next assume "poly_deg h = z" with \homogeneous h\ have "h = hom_component h z" by (simp add: hom_component_of_homogeneous) with \h \ A\ show ?thesis unfolding hom_deg_set_def by (rule rev_image_eqI) qed qed qed lemma hom_deg_set_sum_list_listset: assumes "A = sum_list ` listset ss" shows "hom_deg_set z A = sum_list ` listset (map (hom_deg_set z) ss)" (is "?A = ?B") proof show "?A \ ?B" proof fix h assume "h \ ?A" then obtain a where "a \ A" and h: "h = hom_component a z" unfolding hom_deg_set_def .. from this(1) obtain qs where "qs \ listset ss" and a: "a = sum_list qs" unfolding assms .. have "h = hom_component (sum_list qs) z" by (simp only: a h) also have "\ = sum_list (map (\q. hom_component q z) qs)" by (induct qs) (simp_all add: hom_component_plus) also have "\ \ ?B" proof (rule imageI) show "map (\q. hom_component q z) qs \ listset (map (hom_deg_set z) ss)" unfolding hom_deg_set_def using \qs \ listset ss\ refl by (rule listset_map_imageI) qed finally show "h \ ?B" . qed next show "?B \ ?A" proof fix h assume "h \ ?B" then obtain qs where "qs \ listset (map (hom_deg_set z) ss)" and h: "h = sum_list qs" .. from this(1) obtain qs' where "qs' \ listset ss" and qs: "qs = map (\q. hom_component q z) qs'" unfolding hom_deg_set_def by (rule listset_map_imageE) have "h = sum_list (map (\q. hom_component q z) qs')" by (simp only: h qs) also have "\ = hom_component (sum_list qs') z" by (induct qs') (simp_all add: hom_component_plus) finally have "h = hom_component (sum_list qs') z" . moreover have "sum_list qs' \ A" unfolding assms using \qs' \ listset ss\ by (rule imageI) ultimately show "h \ ?A" unfolding hom_deg_set_def by (rule image_eqI) qed qed lemma direct_decomp_hom_deg_set: assumes "direct_decomp A ss" and "\s. s \ set ss \ homogeneous_set s" shows "direct_decomp (hom_deg_set z A) (map (hom_deg_set z) ss)" proof (rule direct_decompI) from assms(1) have "sum_list ` listset ss = A" by (rule direct_decompD) from this[symmetric] show "sum_list ` listset (map (hom_deg_set z) ss) = hom_deg_set z A" by (simp only: hom_deg_set_sum_list_listset) next from assms(1) have "inj_on sum_list (listset ss)" by (rule direct_decompD) moreover have "listset (map (hom_deg_set z) ss) \ listset ss" proof (rule listset_mono) fix i assume "i < length ss" hence "map (hom_deg_set z) ss ! i = hom_deg_set z (ss ! i)" by simp also from \i < length ss\ have "\ \ ss ! i" by (intro hom_deg_set_subset assms(2) nth_mem) finally show "map (hom_deg_set z) ss ! i \ ss ! i" . qed simp ultimately show "inj_on sum_list (listset (map (hom_deg_set z) ss))" by (rule inj_on_subset) qed subsection \Interpreting Polynomial Rings as Vector Spaces over the Coefficient Field\ text \There is no need to set up any further interpretation, since interpretation \phull\ is exactly what we need.\ lemma subspace_ideal: "phull.subspace (ideal (F::('b::comm_powerprod \\<^sub>0 'a::field) set))" using ideal.span_zero ideal.span_add proof (rule phull.subspaceI) fix c p assume "p \ ideal F" thus "c \ p \ ideal F" unfolding map_scale_eq_times by (rule ideal.span_scale) qed lemma subspace_Polys: "phull.subspace (P[X]::(('x \\<^sub>0 nat) \\<^sub>0 'a::field) set)" using zero_in_Polys Polys_closed_plus Polys_closed_map_scale by (rule phull.subspaceI) lemma subspace_hom_deg_set: assumes "phull.subspace A" shows "phull.subspace (hom_deg_set z A)" (is "phull.subspace ?A") proof (rule phull.subspaceI) from assms have "0 \ A" by (rule phull.subspace_0) thus "0 \ ?A" by (rule zero_in_hom_deg_set) next fix p q assume "p \ ?A" and "q \ ?A" with phull.subspace_add show "p + q \ ?A" by (rule hom_deg_set_closed_plus) (rule assms) next fix c p assume "p \ ?A" with phull.subspace_scale show "c \ p \ ?A" by (rule hom_deg_set_closed_scalar) (rule assms) qed lemma hom_deg_set_Polys_eq_span: "hom_deg_set z P[X] = phull.span (monomial (1::'a::field) ` deg_sect X z)" (is "?A = ?B") proof show "?A \ ?B" proof fix p assume "p \ ?A" also from this have "\ = {p \ P[X]. homogeneous p \ (p = 0 \ poly_deg p = z)}" by (simp only: hom_deg_set_alt_homogeneous_set[OF homogeneous_set_Polys]) finally have "p \ P[X]" and "homogeneous p" and "p \ 0 \ poly_deg p = z" by simp_all thus "p \ ?B" proof (induct p rule: poly_mapping_plus_induct) case 1 from phull.span_zero show ?case . next case (2 p c t) let ?m = "monomial c t" from 2(1) have "t \ keys ?m" by simp hence "t \ keys (?m + p)" using 2(2) by (rule in_keys_plusI1) hence "?m + p \ 0" by auto hence "poly_deg (monomial c t + p) = z" by (rule 2) from 2(4) have "keys (?m + p) \ .[X]" by (rule PolysD) with \t \ keys (?m + p)\ have "t \ .[X]" .. hence "?m \ P[X]" by (rule Polys_closed_monomial) have "t \ deg_sect X z" proof (rule deg_sectI) from 2(5) \t \ keys (?m + p)\ have "deg_pm t = poly_deg (?m + p)" by (rule homogeneousD_poly_deg) also have "\ = z" by fact finally show "deg_pm t = z" . qed fact hence "monomial 1 t \ monomial 1 ` deg_sect X z" by (rule imageI) hence "monomial 1 t \ ?B" by (rule phull.span_base) hence "c \ monomial 1 t \ ?B" by (rule phull.span_scale) hence "?m \ ?B" by simp moreover have "p \ ?B" proof (rule 2) from 2(4) \?m \ P[X]\ have "(?m + p) - ?m \ P[X]" by (rule Polys_closed_minus) thus "p \ P[X]" by simp next have 1: "deg_pm s = z" if "s \ keys p" for s proof - from that 2(2) have "s \ t" by blast hence "s \ keys ?m" by simp with that have "s \ keys (?m + p)" by (rule in_keys_plusI2) with 2(5) have "deg_pm s = poly_deg (?m + p)" by (rule homogeneousD_poly_deg) also have "\ = z" by fact finally show ?thesis . qed show "homogeneous p" by (rule homogeneousI) (simp add: 1) assume "p \ 0" show "poly_deg p = z" proof (rule antisym) show "poly_deg p \ z" by (rule poly_deg_leI) (simp add: 1) next from \p \ 0\ have "keys p \ {}" by simp then obtain s where "s \ keys p" by blast hence "z = deg_pm s" by (simp only: 1) also from \s \ keys p\ have "\ \ poly_deg p" by (rule poly_deg_max_keys) finally show "z \ poly_deg p" . qed qed ultimately show ?case by (rule phull.span_add) qed qed next show "?B \ ?A" proof fix p assume "p \ ?B" then obtain M u where "M \ monomial 1 ` deg_sect X z" and "finite M" and p: "p = (\m\M. u m \ m)" by (auto simp: phull.span_explicit) from this(1) obtain T where "T \ deg_sect X z" and M: "M = monomial 1 ` T" and inj: "inj_on (monomial (1::'a)) T" by (rule subset_imageE_inj) define c where "c = (\t. u (monomial 1 t))" from inj have "p = (\t\T. monomial (c t) t)" by (simp add: p M sum.reindex c_def) also have "\ \ ?A" proof (intro hom_deg_set_closed_sum zero_in_Polys Polys_closed_plus) fix t assume "t \ T" hence "t \ deg_sect X z" using \T \ deg_sect X z\ .. hence "t \ .[X]" and eq: "deg_pm t = z" by (rule deg_sectD)+ from this(1) have "monomial (c t) t \ P[X]" (is "?m \ _") by (rule Polys_closed_monomial) thus "?m \ ?A" by (simp add: hom_deg_set_alt_homogeneous_set[OF homogeneous_set_Polys] poly_deg_monomial monomial_0_iff eq) qed finally show "p \ ?A" . qed qed subsection \(Projective) Hilbert Function\ interpretation phull: vector_space map_scale apply standard subgoal by (fact map_scale_distrib_left) subgoal by (fact map_scale_distrib_right) subgoal by (fact map_scale_assoc) subgoal by (fact map_scale_one_left) done definition Hilbert_fun :: "(('x \\<^sub>0 nat) \\<^sub>0 'a::field) set \ nat \ nat" where "Hilbert_fun A z = phull.dim (hom_deg_set z A)" lemma Hilbert_fun_empty [simp]: "Hilbert_fun {} = 0" by (rule ext) (simp add: Hilbert_fun_def hom_deg_set_def) lemma Hilbert_fun_zero [simp]: "Hilbert_fun {0} = 0" by (rule ext) (simp add: Hilbert_fun_def hom_deg_set_def) lemma Hilbert_fun_direct_decomp: assumes "finite X" and "A \ P[X]" and "direct_decomp (A::(('x::countable \\<^sub>0 nat) \\<^sub>0 'a::field) set) ps" and "\s. s \ set ps \ homogeneous_set s" and "\s. s \ set ps \ phull.subspace s" shows "Hilbert_fun A z = (\p\set ps. Hilbert_fun p z)" proof - from assms(3, 4) have dd: "direct_decomp (hom_deg_set z A) (map (hom_deg_set z) ps)" by (rule direct_decomp_hom_deg_set) have "Hilbert_fun A z = phull.dim (hom_deg_set z A)" by (fact Hilbert_fun_def) also from dd have "\ = sum phull.dim (set (map (hom_deg_set z) ps))" proof (rule phull.dim_direct_decomp) from assms(1) have "finite (deg_sect X z)" by (rule finite_deg_sect) thus "finite (monomial (1::'a) ` deg_sect X z)" by (rule finite_imageI) next from assms(2) have "hom_deg_set z A \ hom_deg_set z P[X]" unfolding hom_deg_set_def by (rule image_mono) thus "hom_deg_set z A \ phull.span (monomial 1 ` deg_sect X z)" by (simp only: hom_deg_set_Polys_eq_span) next fix s assume "s \ set (map (hom_deg_set z) ps)" then obtain s' where "s' \ set ps" and s: "s = hom_deg_set z s'" unfolding set_map .. from this(1) have "phull.subspace s'" by (rule assms(5)) thus "phull.subspace s" unfolding s by (rule subspace_hom_deg_set) qed also have "\ = sum (phull.dim \ hom_deg_set z) (set ps)" unfolding set_map using finite_set proof (rule sum.reindex_nontrivial) fix s1 s2 note dd moreover assume "s1 \ set ps" and "s2 \ set ps" and "s1 \ s2" moreover have "0 \ hom_deg_set z s" if "s \ set ps" for s proof (rule zero_in_hom_deg_set) from that have "phull.subspace s" by (rule assms(5)) thus "0 \ s" by (rule phull.subspace_0) qed ultimately have "hom_deg_set z s1 \ hom_deg_set z s2 = {0}" by (rule direct_decomp_map_Int_zero) moreover assume "hom_deg_set z s1 = hom_deg_set z s2" ultimately show "phull.dim (hom_deg_set z s1) = 0" by simp qed also have "\ = (\p\set ps. Hilbert_fun p z)" by (simp only: o_def Hilbert_fun_def) finally show ?thesis . qed context pm_powerprod begin lemma image_lt_hom_deg_set: assumes "homogeneous_set A" shows "lpp ` (hom_deg_set z A - {0}) = {t \ lpp ` (A - {0}). deg_pm t = z}" (is "?B = ?A") proof (intro set_eqI iffI) fix t assume "t \ ?A" hence "t \ lpp ` (A - {0})" and deg_t[symmetric]: "deg_pm t = z" by simp_all from this(1) obtain p where "p \ A - {0}" and t: "t = lpp p" .. from this(1) have "p \ A" and "p \ 0" by simp_all from this(1) have 1: "hom_component p z \ hom_deg_set z A" (is "?p \ _") unfolding hom_deg_set_def by (rule imageI) from \p \ 0\ have "?p \ 0" and "lpp ?p = t" unfolding t deg_t by (rule hom_component_lpp)+ note this(2)[symmetric] moreover from 1 \?p \ 0\ have "?p \ hom_deg_set z A - {0}" by simp ultimately show "t \ ?B" by (rule image_eqI) next fix t assume "t \ ?B" then obtain p where "p \ hom_deg_set z A - {0}" and t: "t = lpp p" .. from this(1) have "p \ hom_deg_set z A" and "p \ 0" by simp_all with assms have "p \ A" and "homogeneous p" and "poly_deg p = z" by (simp_all add: hom_deg_set_alt_homogeneous_set) from this(1) \p \ 0\ have "p \ A - {0}" by simp hence 1: "t \ lpp ` (A - {0})" using t by (rule rev_image_eqI) from \p \ 0\ have "t \ keys p" unfolding t by (rule punit.lt_in_keys) with \homogeneous p\ have "deg_pm t = poly_deg p" by (rule homogeneousD_poly_deg) with 1 show "t \ ?A" by (simp add: \poly_deg p = z\) qed lemma Hilbert_fun_alt: assumes "finite X" and "A \ P[X]" and "phull.subspace A" shows "Hilbert_fun A z = card (lpp ` (hom_deg_set z A - {0}))" (is "_ = card ?A") proof - have "?A \ lpp ` (hom_deg_set z A - {0})" by simp then obtain B where sub: "B \ hom_deg_set z A - {0}" and eq1: "?A = lpp ` B" and inj: "inj_on lpp B" by (rule subset_imageE_inj) have "Hilbert_fun A z = phull.dim (hom_deg_set z A)" by (fact Hilbert_fun_def) also have "\ = card B" proof (rule phull.dim_eq_card) show "phull.span B = phull.span (hom_deg_set z A)" proof from sub have "B \ hom_deg_set z A" by blast thus "phull.span B \ phull.span (hom_deg_set z A)" by (rule phull.span_mono) next from assms(3) have "phull.subspace (hom_deg_set z A)" by (rule subspace_hom_deg_set) hence "phull.span (hom_deg_set z A) = hom_deg_set z A" by (simp only: phull.span_eq_iff) also have "\ \ phull.span B" proof (rule ccontr) assume "\ hom_deg_set z A \ phull.span B" then obtain p0 where "p0 \ hom_deg_set z A - phull.span B" (is "_ \ ?B") by blast note assms(1) this moreover have "?B \ P[X]" proof (rule subset_trans) from assms(2) show "hom_deg_set z A \ P[X]" by (rule Polys_closed_hom_deg_set) qed blast ultimately obtain p where "p \ ?B" and p_min: "\q. punit.ord_strict_p q p \ q \ ?B" by (rule punit.ord_p_minimum_dgrad_p_set[OF dickson_grading_varnum, where m=0, simplified dgrad_p_set_varnum]) blast from this(1) have "p \ hom_deg_set z A" and "p \ phull.span B" by simp_all from phull.span_zero this(2) have "p \ 0" by blast with \p \ hom_deg_set z A\ have "p \ hom_deg_set z A - {0}" by simp hence "lpp p \ lpp ` (hom_deg_set z A - {0})" by (rule imageI) also have "\ = lpp ` B" by (simp only: eq1) finally obtain b where "b \ B" and eq2: "lpp p = lpp b" .. from this(1) sub have "b \ hom_deg_set z A - {0}" .. hence "b \ hom_deg_set z A" and "b \ 0" by simp_all from this(2) have lcb: "punit.lc b \ 0" by (rule punit.lc_not_0) from \p \ 0\ have lcp: "punit.lc p \ 0" by (rule punit.lc_not_0) from \b \ B\ have "b \ phull.span B" by (rule phull.span_base) hence "(punit.lc p / punit.lc b) \ b \ phull.span B" (is "?b \ _") by (rule phull.span_scale) with \p \ phull.span B\ have "p - ?b \ 0" by auto moreover from lcb lcp \b \ 0\ have "lpp ?b = lpp p" by (simp add: punit.map_scale_eq_monom_mult punit.lt_monom_mult eq2) moreover from lcb have "punit.lc ?b = punit.lc p" by (simp add: punit.map_scale_eq_monom_mult) ultimately have "lpp (p - ?b) \ lpp p" by (rule punit.lt_minus_lessI) hence "punit.ord_strict_p (p - ?b) p" by (rule punit.lt_ord_p) hence "p - ?b \ ?B" by (rule p_min) hence "p - ?b \ hom_deg_set z A \ p - ?b \ phull.span B" by simp thus False proof assume *: "p - ?b \ hom_deg_set z A" from phull.subspace_scale have "?b \ hom_deg_set z A" proof (rule hom_deg_set_closed_scalar) show "phull.subspace A" by fact next show "b \ hom_deg_set z A" by fact qed with phull.subspace_diff \p \ hom_deg_set z A\ have "p - ?b \ hom_deg_set z A" by (rule hom_deg_set_closed_minus) (rule assms(3)) with * show ?thesis .. next assume "p - ?b \ phull.span B" hence "p - ?b + ?b \ phull.span B" using \?b \ phull.span B\ by (rule phull.span_add) hence "p \ phull.span B" by simp with \p \ phull.span B\ show ?thesis .. qed qed finally show "phull.span (hom_deg_set z A) \ phull.span B" . qed next show "phull.independent B" proof assume "phull.dependent B" then obtain B' u b' where "finite B'" and "B' \ B" and "(\b\B'. u b \ b) = 0" and "b' \ B'" and "u b' \ 0" unfolding phull.dependent_explicit by blast define B0 where "B0 = {b \ B'. u b \ 0}" have "B0 \ B'" by (simp add: B0_def) with \finite B'\ have "(\b\B0. u b \ b) = (\b\B'. u b \ b)" by (rule sum.mono_neutral_left) (simp add: B0_def) also have "\ = 0" by fact finally have eq: "(\b\B0. u b \ b) = 0" . define t where "t = ordered_powerprod_lin.Max (lpp ` B0)" from \b' \ B'\ \u b' \ 0\ have "b' \ B0" by (simp add: B0_def) hence "lpp b' \ lpp ` B0" by (rule imageI) hence "lpp ` B0 \ {}" by blast from \B0 \ B'\ \finite B'\ have "finite B0" by (rule finite_subset) hence "finite (lpp ` B0)" by (rule finite_imageI) hence "t \ lpp ` B0" unfolding t_def using \lpp ` B0 \ {}\ by (rule ordered_powerprod_lin.Max_in) then obtain b0 where "b0 \ B0" and t: "t = lpp b0" .. note this(1) moreover from \B0 \ B'\ \B' \ B\ have "B0 \ B" by (rule subset_trans) also have "\ \ hom_deg_set z A - {0}" by fact finally have "b0 \ hom_deg_set z A - {0}" . hence "b0 \ 0" by simp hence "t \ keys b0" unfolding t by (rule punit.lt_in_keys) have "lookup (\b\B0. u b \ b) t = (\b\B0. u b * lookup b t)" by (simp add: lookup_sum) also from \finite B0\ have "\ = (\b\{b0}. u b * lookup b t)" proof (rule sum.mono_neutral_right) from \b0 \ B0\ show "{b0} \ B0" by simp next show "\b\B0 - {b0}. u b * lookup b t = 0" proof fix b assume "b \ B0 - {b0}" hence "b \ B0" and "b \ b0" by simp_all from this(1) have "lpp b \ lpp ` B0" by (rule imageI) with \finite (lpp ` B0)\ have "lpp b \ t" unfolding t_def by (rule ordered_powerprod_lin.Max_ge) have "t \ keys b" proof assume "t \ keys b" hence "t \ lpp b" by (rule punit.lt_max_keys) with \lpp b \ t\ have "lpp b = lpp b0" unfolding t by (rule ordered_powerprod_lin.antisym) from inj \B0 \ B\ have "inj_on lpp B0" by (rule inj_on_subset) hence "b = b0" using \lpp b = lpp b0\ \b \ B0\ \b0 \ B0\ by (rule inj_onD) with \b \ b0\ show False .. qed thus "u b * lookup b t = 0" by (simp add: in_keys_iff) qed qed also from \t \ keys b0\ \b0 \ B0\ have "\ \ 0" by (simp add: B0_def in_keys_iff) finally show False by (simp add: eq) qed qed also have "\ = card ?A" unfolding eq1 using inj by (rule card_image[symmetric]) finally show ?thesis . qed end (* pm_powerprod *) end (* theory *) diff --git a/thys/MuchAdoAboutTwo/MuchAdoAboutTwo.thy b/thys/MuchAdoAboutTwo/MuchAdoAboutTwo.thy --- a/thys/MuchAdoAboutTwo/MuchAdoAboutTwo.thy +++ b/thys/MuchAdoAboutTwo/MuchAdoAboutTwo.thy @@ -1,2059 +1,2059 @@ (* Title: Much Ado about Two Author: Sascha Böhme , 2007 Maintainer: Sascha Böhme *) section \Much Ado about Two\ (*<*) theory MuchAdoAboutTwo -imports "HOL-Library.Permutation" +imports "HOL-Library.List_Permutation" begin (*>*) text \ Due to Donald E. Knuth, it is known for some time that certain sorting functions for lists of arbitrary types can be proved correct by only showing that they are correct for boolean lists (\cite{KnuthSortingAndSearching}, see also \cite{LogicalAbstractionsInHaskell}). This reduction idea, i.e. reducing a proof for an arbitrary type to a proof for a fixed type with a fixed number of values, has also instances in other fields. Recently, in \cite{MuchAdoAboutTwo}, a similar result as Knuth's 0-1-principle is explained for the problem of parallel prefix computation \cite{PrefixSumsAndTheirApplications}. That is the task to compute, for given $x_1, \ldots, x_n$ and an associative operation $\oplus$, the values $x_1$, $x_1 \oplus x_2$, $\ldots$, $x_1 \oplus x_2 \oplus \cdots \oplus x_n$. There are several solutions which optimise this computation, and an obvious question is to ask whether these solutions are correct. One way to answer this question is given in \cite{MuchAdoAboutTwo}. There, a ``0-1-2-principle'' is proved which relates an unspecified solution of the parallel prefix computation, expressed as a function \candidate\, with \scanl1\, a functional representation of the parallel prefix computation. The essence proved in the mentioned paper is as follows: If \candidate\ and \scanl1\ behave identical on all lists over a type which has three elements, then \candidate\ is semantically equivalent to \scanl1\, that is, \candidate\ is a correct solution of the parallel prefix computation. Although it seems that nearly nothing is known about the function \candidate\, it turns out that the type of \candidate\ already suffices for the proof of the paper's result. The key is relational parametricity \cite{TypesAbstractionsAndParametricPolymorphism} in the form of a free theorem \cite{TheoremsForFree}. This, some rewriting and a few properties about list-processing functions thrown in allow to proof the ``0-1-2-principle''. The paper first shows some simple properties and derives a specialisation of the free theorem. The proof of the main theorem itself is split up in two parts. The first, and considerably more complicated part relates lists over a type with three values to lists of integer lists. Here, the paper uses several figures to demonstrate and shorten several proofs. The second part then relates lists of integer list with lists over arbitrary types, and consists of applying the free theorem and some rewriting. The combination of these two parts then yields the theorem. Th article at hand formalises the proofs given in \cite{MuchAdoAboutTwo}, which is called here ``the original paper''. Compared to that paper, there are several differences in this article. The major differences are listed below. A more detailed collection follows thereafter. \begin{itemize} \item The original paper requires lists to be non-empty. Eventhough lists in Isabelle may also be empty, we stick to Isabelle's list datatype instead of declaring a new datatype, due to the huge, already existing theory about lists in Isabelle. As a consequence, however, several modifications become necessary. \item The figure-based proofs of the original paper are replaced by formal proofs. This forms a major part of this article (see Section 6). \item Instead of integers, we restrict ourselves to natural numbers. Thus, several conditions can be simplified since every natural number is greater than or equal to \0\. This decision has no further influence on the proofs because they never consider negative integers. \item Mainly due to differences between Haskell and Isabelle, certain notations are different here compared to the original paper. List concatenation is denoted by \@\ instead of $++$, and in writing down intervals, we use \[0.. instead of \[0..k]\. Moreover, we write \f\ instead of $\oplus$ and \g\ instead of $\otimes$. Functions mapping an element of the three-valued type to an arbitrary type are denoted by \h\. \end{itemize} Whenever we use lemmas from already existing Isabelle theories, we qualify them by their theory name. For example, instead of \map_map\, we write \List.map_map\ to point out that this lemma is taken from Isabelle's list theory. The following comparison shows all differences of this article compared to the original paper. The items below follow the structure of the original paper (and also this article's structure). They also highlight the challenges which needed to be solved in formalising the original paper. \begin{itemize} \item Introductions of several list functions (e.g. \length\, \map\, \take\) are dropped. They exist already in Isabelle's list theory and are be considered familiar to the reader. \item The free theorem given in Lemma 1 of the original paper is not sufficient for later proofs, because the assumption is not appropriate in the context of Isabelle's lists, which may also be empty. Thus, here, Lemma 1 is a derived version of the free theorem given as Lemma 1 in the original paper, and some additional proof-work is done. \item Before proceeding in the original paper's way, we state and proof additional lemmas, which are not part of Isabelle's libraries. These lemmas are not specific to this article and may also be used in other theories. \item Laws 1 to 8 and Lemma 2 of the original paper are explicitly proved. Most of the proofs follow directly from existing results of Isabelle's list theory. To proof Law 7, Law 8 and Lemma 2, more work was necessary, especially for Law 8. \item Lemma 3 and its proof are nearly the same here as in the original paper. Only the additional assumptions of Lemma 1, due to Isabelle's list datatype, have to be shown. \item Lemma 4 is split up in several smaller lemmas, and the order of them tries to follow the structure of the original paper's Lemma 4. For every figure of the original paper, there is now one separate proof. These proofs constitute the major difference in the structure of this article compared to the original paper. The proof of Lemma 4 in the original paper concludes by combining the results of the figure-based proofs to a non-trivial permutation property. These three sentences given in the original paper are split up in five separate lemmas and according proofs, and therefore, they as well form a major difference to the original paper. \item Lemma 5 is mostly identical to the version in the original paper. It has one additional assumption required by Lemma 4. Moreover, the proof is slightly more structured, and some steps needed a bit more argumentation than in the original paper. \item In principle, Proposition 1 is identical to the according proposition in the original paper. However, to fulfill the additional requirement of Lemma 5, an additional lemma was proved. This, however, is only necessary, because we use Isabelle's list type which allows lists to be empty. \item Proposition 2 contains one non-trivial step, which is proved as a seperate lemma. Note that this is not due to any decisions of using special datatypes, but inherent in the proof itself. Apart from that, the proof is identical to the original paper's proof of Proposition 2. \item The final theorem is, as in the original paper, just a combination of Proposition 1 and Proposition 2. Only the assumptions are extended due to Isabelle's list datatype. \end{itemize} \ section \Basic definitions\ fun foldl1 :: "('a \ 'a \ 'a) \ 'a list \ 'a" where "foldl1 f (x # xs) = foldl f x xs" fun scanl1 :: "('a \ 'a \ 'a) \ 'a list \ 'a list" where "scanl1 f xs = map (\k. foldl1 f (take k xs)) [1.. The original paper further relies on associative functions. Thus, we define another predicate to be able to express this condition: \ definition "associative f \ (\x y z. f x (f y z) = f (f x y) z)" text \ The following constant symbols represents our unspecified function. We want to show that this function is semantically equivalent to \scanl1\, provided that the first argument is an associative function. \ consts candidate :: "('a \ 'a \ 'a) \ 'a list \ 'a list" text \ With the final theorem, it suffices to show that \candidate\ behaves like \scanl1\ on all lists of the following type, to conclude that \canditate\ is semantically equivalent to \scanl1\. \ datatype three = Zero | One | Two text \ Although most of the functions mentioned in the original paper already exist in Isabelle's list theory, we still need to define two more functions: \ fun wrap :: "'a \ 'a list" where "wrap x = [x]" fun ups :: "nat \ nat list list" where "ups n = map (\k. [0..A Free Theorem\ text \ The key to proof the final theorem is the following free theorem \cite{TypesAbstractionsAndParametricPolymorphism,TheoremsForFree} of \candidate\. Since there is no proof possible without specifying the underlying (functional) language (which would be beyond the scope of this work), this lemma is expected to hold. As a consequence, all following lemmas and also the final theorem only hold under this provision. \ axiomatization where candidate_free_theorem: "\x y. h (f x y) = g (h x) (h y) \ map h (candidate f zs) = candidate g (map h zs)" text \ In what follows in this section, the previous lemma is specialised to a lemma for non-empty lists. More precisely, we want to restrict the above assumption to be applicable for non-empty lists. This is already possible without modifications when having a list datatype which does not allow for empty lists. However, before being able to also use Isabelle's list datatype, further conditions on \f\ and \zs\ are necessary. To prove the derived lemma, we first introduce a datatype for non-empty lists, and we furthermore define conversion functions to map the new datatype on Isabelle lists and back. \ datatype 'a nel = NE_One 'a | NE_Cons 'a "'a nel" fun n2l :: "'a nel \ 'a list" where "n2l (NE_One x) = [x]" | "n2l (NE_Cons x xs) = x # n2l xs" fun l2n :: "'a list \ 'a nel" where "l2n [x] = NE_One x" | "l2n (x # xs) = (case xs of [] \ NE_One x | (_ # _) \ NE_Cons x (l2n xs))" text \ The following results relate Isabelle lists and non-empty lists: \ lemma non_empty_n2l: "n2l xs \ []" by (cases xs, auto) lemma n2l_l2n_id: "x \ [] \ n2l (l2n x) = x" proof (induct x) case Nil thus ?case by simp next case (Cons x xs) thus ?case by (cases xs, auto) qed lemma n2l_l2n_map_id: assumes "\x. x \ set zs \ x \ []" shows "map (n2l \ l2n) zs = zs" using assms proof (induct zs) case Nil thus ?case by simp next case (Cons z zs) hence "\x. x \ set zs \ x \ []" using List.set_subset_Cons by auto with Cons have IH: "map (n2l \ l2n) zs = zs" by blast have "map (n2l \ l2n) (z # zs) = (n2l \ l2n) z # map (n2l \ l2n) zs" by simp also have "\ = z # map (n2l \ l2n) zs" using Cons and n2l_l2n_id by auto also have "\ = z # zs" using IH by simp finally show ?case . qed text \ Based on the previous lemmas, we can state and proof a specialised version of \candidate\'s free theorem, suitable for our setting as explained before. \ lemma Lemma_1: assumes A1: "\(x::'a list) (y::'a list). x \ [] \ y \ [] \ h (f x y) = g (h x) (h y)" and A2: "\x y. x \ [] \ y \ [] \ f x y \ []" and A3: "\x. x \ set zs \ x \ []" shows "map h (candidate f zs) = candidate g (map h zs)" proof - \ \We define two functions, @{text "fn :: 'a nel \ 'a nel \ 'a nel"} and\ \ \@{text "hn :: 'a nel \ b"}, which wrap @{text f} and @{text h} in the\ \ \setting of non-empty lists.\ let ?fn = "\x y. l2n (f (n2l x) (n2l y))" let ?hn = "h \ n2l" \ \Our new functions fulfill the preconditions of @{text candidate}'s\ \ \free theorem:\ have "\(x::'a nel) (y::'a nel). ?hn (?fn x y) = g (?hn x) (?hn y)" proof - fix x y let ?xl = "n2l (x :: 'a nel)" let ?yl = "n2l (y :: 'a nel)" have "?hn (?fn x y) = h (n2l (l2n (f (n2l x) (n2l y))))" by simp also have "\ = h (f ?xl ?yl)" using A2 [where x="?xl" and y="?yl"] and n2l_l2n_id [where x="f (n2l x) (n2l y)"] and non_empty_n2l [where xs=x] and non_empty_n2l [where xs=y] by simp also have "\ = g (h ?xl) (h ?yl)" using A1 and non_empty_n2l and non_empty_n2l by auto also have "\ = g (?hn x) (?hn y)" by simp finally show "?hn (?fn x y) = g (?hn x) (?hn y)" . qed with candidate_free_theorem [where f="?fn" and h="?hn" and g = g] have ne_free_theorem: "map ?hn (candidate ?fn (map l2n zs)) = candidate g (map ?hn (map l2n zs))" by auto \ \We use @{text candidate}'s free theorem again to show the following\ \ \property:\ have n2l_candidate: "\zs. map n2l (candidate ?fn zs) = candidate f (map n2l zs)" proof - fix zs have "\x y. n2l (?fn x y) = f (n2l x) (n2l y)" proof - fix x y show "n2l (?fn x y) = f (n2l x) (n2l y)" using n2l_l2n_id [where x="f (n2l x) (n2l y)"] and A2 [where x="n2l x" and y="n2l y"] and non_empty_n2l [where xs=x] and non_empty_n2l [where xs=y] by simp qed with candidate_free_theorem [where h=n2l and f="?fn" and g=f] show "map n2l (candidate ?fn zs) = candidate f (map n2l zs)" by simp qed \ \Now, with the previous preparations, we conclude the thesis by the\ \ \following rewriting:\ have "map h (candidate f zs) = map h (candidate f (map (n2l \ l2n) zs))" using n2l_l2n_map_id [where zs=zs] and A3 by simp also have "\ = map h (candidate f (map n2l (map l2n zs)))" using List.map_map [where f=n2l and g=l2n and xs=zs] by simp also have "\= map h (map n2l (candidate ?fn (map l2n zs)))" using n2l_candidate by auto also have "\ = map ?hn (candidate ?fn (map l2n zs))" using List.map_map by auto also have "\ = candidate g (map ?hn (map l2n zs))" using ne_free_theorem by simp also have "\ = candidate g (map ((h \ n2l) \ l2n) zs)" using List.map_map [where f="h \ n2l" and g=l2n] by simp also have "\ = candidate g (map (h \ (n2l \ l2n)) zs)" using Fun.o_assoc [symmetric, where f=h and g=n2l and h=l2n] by simp also have "\ = candidate g (map h (map (n2l \ l2n) zs))" using List.map_map [where f=h and g="n2l \ l2n"] by simp also have "\ = candidate g (map h zs)" using n2l_l2n_map_id [where zs=zs] and A3 by auto finally show ?thesis . qed section \Useful lemmas\ text \ In this section, we state and proof several lemmas, which neither occur in the original paper nor in Isabelle's libraries. \ lemma upt_map_Suc: "k > 0 \ [0..x. P [x]" and A3: "\xs ys. \ xs \ [] ; ys \ [] ; P xs ; P ys \ \ P (xs @ ys)" shows "P zs" proof (induct zs) case Nil with A1 show ?case by simp next case (Cons z zs) hence IH: "P zs" by simp show ?case proof (cases zs) case Nil with A2 show ?thesis by simp next case Cons with IH and A2 and A3 [where xs="[z]" and ys=zs] show ?thesis by auto qed qed lemmas divide_and_conquer = divide_and_conquer_induct [case_names Nil One Partition] lemma all_set_inter_empty_distinct: assumes "\xs ys. js = xs @ ys \ set xs \ set ys = {}" shows "distinct js" using assms proof (induct js rule: divide_and_conquer) case Nil thus ?case by simp next case One thus ?case by simp next case (Partition xs ys) hence P: "\as bs. xs @ ys = as @ bs \ set as \ set bs = {}" by simp have "\xs1 xs2. xs = xs1 @ xs2 \ set xs1 \ set xs2 = {}" proof - fix xs1 xs2 assume "xs = xs1 @ xs2" hence "set xs1 \ set (xs2 @ ys) = {}" using P [where as=xs1 and bs="xs2 @ ys"] by simp thus "set xs1 \ set xs2 = {}" using List.set_append and Set.Int_Un_distrib by auto qed with Partition have distinct_xs: "distinct xs" by simp have "\ys1 ys2. ys = ys1 @ ys2 \ set ys1 \ set ys2 = {}" proof - fix ys1 ys2 assume "ys = ys1 @ ys2" hence "set (xs @ ys1) \ set ys2 = {}" using P [where as="xs @ ys1" and bs=ys2] by simp thus "set ys1 \ set ys2 = {}" using List.set_append and Set.Int_Un_distrib by auto qed with Partition have distinct_ys: "distinct ys" by simp from Partition and distinct_xs and distinct_ys show ?case by simp qed lemma partitions_sorted: assumes "\xs ys x y. \ js = xs @ ys ; x \ set xs ; y \ set ys \ \ x \ y" shows "sorted js" using assms proof (induct js rule: divide_and_conquer) case Nil thus ?case by simp next case One thus ?case by simp next case (Partition xs ys) hence P: "\as bs x y. \ xs @ ys = as @ bs ; x \ set as ; y \ set bs\ \ x \ y" by simp have "\xs1 xs2 x y. \ xs = xs1 @ xs2 ; x \ set xs1 ; y \ set xs2 \ \ x \ y" proof - fix xs1 xs2 assume "xs = xs1 @ xs2" hence "\x y. \ x \ set xs1 ; y \ set (xs2 @ ys) \ \ x \ y" using P [where as=xs1 and bs="xs2 @ ys"] by simp thus "\x y. \ x \ set xs1 ; y \ set xs2 \ \ x \ y" using List.set_append by auto qed with Partition have sorted_xs: "sorted xs" by simp have "\ys1 ys2 x y. \ ys = ys1 @ ys2 ; x \ set ys1 ; y \ set ys2 \ \ x \ y" proof - fix ys1 ys2 assume "ys = ys1 @ ys2" hence "\x y. \ x \ set (xs @ ys1) ; y \ set ys2 \ \ x \ y" using P [where as="xs @ ys1" and bs=ys2] by simp thus "\x y. \ x \ set ys1 ; y \ set ys2 \ \ x \ y" using List.set_append by auto qed with Partition have sorted_ys: "sorted ys" by simp have "\x \ set xs. \y \ set ys. x \ y" using P [where as=xs and bs=ys] by simp with sorted_xs and sorted_ys show ?case using List.sorted_append by auto qed section \Preparatory Material\ text \ In the original paper, the following lemmas L1 to L8 are given without a proof, although it is hinted there that most of them follow from parametricity properties \cite{TypesAbstractionsAndParametricPolymorphism,TheoremsForFree}. Alternatively, most of them can be shown by induction over lists. However, since we are using Isabelle's list datatype, we rely on already existing results. \ lemma L1: "map g (map f xs) = map (g \ f) xs" using List.map_map by auto lemma L2: "length (map f xs) = length xs" using List.length_map by simp lemma L3: "take k (map f xs) = map f (take k xs)" using List.take_map by auto lemma L4: "map f \ wrap = wrap \ f" by (simp add: fun_eq_iff) lemma L5: "map f (xs @ ys) = (map f xs) @ (map f ys)" using List.map_append by simp lemma L6: "k < length xs \ (map f xs) ! k = f (xs ! k)" using List.nth_map by simp lemma L7: "\k. k < length xs \ map (nth xs) [0.. 0" by simp hence "map (nth (x # xs)) [0.. = ((x # xs) ! 0) # (map (nth (x # xs) \ Suc) [0.. = x # map (nth xs) [0.. = x # map (nth xs) [0.. = x # take (k' + 1) xs" using Cons and Suc by simp also have "\ = take (k + 1) (x # xs)" using Suc by simp finally show ?thesis . qed qed text \ In Isabelle's list theory, a similar result for \foldl\ already exists. Therefore, it is easy to prove the following lemma for \foldl1\. Note that this lemma does not occur in the original paper. \ lemma foldl1_append: assumes "xs \ []" shows "foldl1 f (xs @ ys) = foldl1 f (foldl1 f xs # ys)" proof - have non_empty_list: "xs \ [] \ \y ys. xs = y # ys" by (cases xs, auto) with assms obtain x xs' where x_xs_def: "xs = x # xs'" by auto have "foldl1 f (xs @ ys) = foldl f x (xs' @ ys)" using x_xs_def by simp also have "\ = foldl f (foldl f x xs') ys" using List.foldl_append by simp also have "\ = foldl f (foldl1 f (x # xs')) ys" by simp also have "\ = foldl1 f (foldl1 f xs # ys)" using x_xs_def by simp finally show ?thesis . qed text \ This is a special induction scheme suitable for proving L8. It is not mentioned in the original paper. \ lemma foldl1_induct': assumes "\f x. P f [x]" and "\f x y. P f [x, y]" and "\f x y z zs. P f (f x y # z # zs) \ P f (x # y # z # zs)" and "\f. P f []" shows "P f xs" proof (induct xs rule: List.length_induct) fix xs assume A: "\ys::'a list. length ys < length (xs::'a list) \ P f ys" thus "P f xs" proof (cases xs) case Nil with assms show ?thesis by simp next case (Cons x1 xs1) hence xs1: "xs = x1 # xs1" by simp thus ?thesis proof (cases xs1) case Nil with assms and xs1 show ?thesis by simp next case (Cons x2 xs2) hence xs2: "xs1 = x2 # xs2" by simp thus ?thesis proof (cases xs2) case Nil with assms and xs1 and xs2 show ?thesis by simp next case (Cons x3 xs3) hence "xs2 = x3 # xs3" by simp with assms and xs1 xs2 and A show ?thesis by simp qed qed qed qed lemmas foldl1_induct = foldl1_induct' [case_names One Two More Nil] lemma L8: assumes "associative f" and "xs \ []" and "ys \ []" shows "foldl1 f (xs @ ys) = f (foldl1 f xs) (foldl1 f ys)" using assms proof (induct f ys rule: foldl1_induct) case (One f y) have "foldl1 f (xs @ [y]) = foldl1 f (foldl1 f xs # [y])" using foldl1_append [where xs=xs] and One by simp also have "\ = f (foldl1 f xs) y" by simp also have "\ = f (foldl1 f xs) (foldl1 f [y])" by simp finally show ?case . next case (Two f x y) have "foldl1 f (xs @ [x, y]) = foldl1 f (foldl1 f xs # [x, y])" using foldl1_append [where xs=xs] and Two by simp also have "\ = foldl1 f (f (foldl1 f xs) x # [y])" by simp also have "\ = f (f (foldl1 f xs) x) y" by simp also have "\ = f (foldl1 f xs) (f x y)" using Two unfolding associative_def by simp also have "\ = f (foldl1 f xs) (foldl1 f [x, y])" by simp finally show ?case . next case (More f x y z zs) hence IH: "foldl1 f (xs @ f x y # z # zs) = f (foldl1 f xs) (foldl1 f (f x y # z # zs))" by simp have "foldl1 f (xs @ x # y # z # zs) = foldl1 f (foldl1 f xs # x # y # z # zs)" using foldl1_append [where xs=xs] and More by simp also have "\ = foldl1 f (f (foldl1 f xs) x # y # z # zs)" by simp also have "\ = foldl1 f (f (f (foldl1 f xs) x) y # z # zs)" by simp also have "\ = foldl1 f (f (foldl1 f xs) (f x y) # z # zs)" using More unfolding associative_def by simp also have "\ = foldl1 f (foldl1 f xs # f x y # z # zs)" by simp also have "\ = foldl1 f (xs @ f x y # z # zs)" using foldl1_append [where xs=xs] and More by simp also have "\ = f (foldl1 f xs) (foldl1 f (x # y # z # zs))" using IH by simp finally show ?case . next case Nil thus ?case by simp qed text \ The next lemma is applied in several following proofs whenever the equivalence of two lists is shown. \ lemma Lemma_2: assumes "length xs = length ys" and "\k. k < length xs \ xs ! k = ys ! k" shows "xs = ys" using assms by (auto simp: List.list_eq_iff_nth_eq) text \ In the original paper, this lemma and its proof appear inside of Lemma 3. However, this property will be useful also in later proofs and is thus separated. \ lemma foldl1_map: assumes "associative f" and "xs \ []" and "ys \ []" shows "foldl1 f (map h (xs @ ys)) = f (foldl1 f (map h xs)) (foldl1 f (map h ys))" proof - have "foldl1 f (map h (xs @ ys)) = foldl1 f (map h xs @ map h ys)" using L5 by simp also have "\ = f (foldl1 f (map h xs)) (foldl1 f (map h ys))" using assms and L8 [where f=f] by auto finally show ?thesis . qed lemma Lemma_3: fixes f :: "'a \ 'a \ 'a" and h :: "nat \ 'a" assumes "associative f" shows "map (foldl1 f \ map h) (candidate (@) (map wrap [0.. \The following three properties @{text P1}, @{text P2} and @{text P3}\ \ \are preconditions of Lemma 1.\ have P1: "\x y. \ x \ [] ; y \ [] \ \ foldl1 f (map h (x @ y)) = f (foldl1 f (map h x)) (foldl1 f (map h y))" using assms and foldl1_map by blast have P2: "\x y. x \ [] \ y \ [] \ x @ y \ []" by auto have P3: "\x. x \ set (map wrap [0.. x \ []" by auto \ \The proof for the thesis is now equal to the one of the original paper:\ from Lemma_1 [where h="foldl1 f \ map h" and zs="map wrap [0.. map h) (candidate (@) (map wrap [0.. map h) (map wrap [0.. = candidate f (map (foldl1 f \ map h \ wrap) [0.. = candidate f (map (foldl1 f \ wrap \ h) [0.. = candidate f (map h [0..Proving Proposition 1\ subsection \Definitions of Lemma 4\ text \ In the same way as in the original paper, the following two functions are defined: \ fun f1 :: "three \ three \ three" where "f1 x Zero = x" | "f1 Zero One = One" | "f1 x y = Two" fun f2 :: "three \ three \ three" where "f2 x Zero = x" | "f2 x One = One" | "f2 x Two = Two" text \ Both functions are associative as is proved by case analysis: \ lemma f1_assoc: "associative f1" unfolding associative_def proof auto fix x y z show "f1 x (f1 y z) = f1 (f1 x y) z" proof (cases z) case Zero thus ?thesis by simp next case One hence z_One: "z = One" by simp thus ?thesis by (cases y, simp_all, cases x, simp_all) next case Two thus ?thesis by simp qed qed lemma f2_assoc: "associative f2" unfolding associative_def proof auto fix x y z show "f2 x (f2 y z) = f2 (f2 x y) z" by (cases z, auto) qed text \ Next, we define two other functions, again according to the original paper. Note that \h1\ has an extra parameter \k\ which is only implicit in the original paper. \ fun h1 :: "nat \ nat \ nat \ three" where "h1 k i j = (if i = j then One else if j \ k then Zero else Two)" fun h2 :: "nat \ nat \ three" where "h2 i j = (if i = j then One else if i + 1 = j then Two else Zero)" subsection \Figures and Proofs\ text \ In the original paper, this lemma is depicted in (and proved by) Figure~2. Therefore, it carries this unusual name here. \ lemma Figure_2: assumes "i \ k" shows "foldl1 f1 (map (h1 k i) [0..j. j < length (map (h1 k i) [0.. (map (h1 k i) [0.. i \ (map (h1 k i) [0.. ?mr ! j = Zero" using List.nth_append [where xs="replicate i Zero"] by simp have R3: "j > i \ ?mr ! j = Zero" using List.nth_append [where xs="replicate i Zero @ [One]"] and j_k by simp show "(map (h1 k i) [0.. j" thus ?thesis proof (cases "i < j") assume "i < j" with M2 and R3 show ?thesis by simp next assume "\(i < j)" with i_ne_j have "i > j" by simp with M2 and R2 show ?thesis by simp qed qed qed from Q1 Q2 and Lemma_2 show ?thesis by blast qed have P2: "\j. j > 0 \ foldl1 f1 (replicate j Zero) = Zero" proof - fix j assume "(j::nat) > 0" thus "foldl1 f1 (replicate j Zero) = Zero" proof (induct j) case 0 thus ?case by simp next case (Suc j) thus ?case by (cases j, auto) qed qed have P3: "\j. foldl1 f1 ([One] @ replicate j Zero) = One" proof - fix j show "foldl1 f1 ([One] @ replicate j Zero) = One" using L8 [where f=f1 and xs="[One]" and ys="replicate (Suc j) Zero"] and f1_assoc and P2 [where j="Suc j"] by simp qed have "foldl1 f1 ?mr = One" proof (cases i) case 0 thus ?thesis using P3 by simp next case (Suc i) hence "foldl1 f1 (replicate (Suc i) Zero @ [One] @ replicate (k - Suc i) Zero) = f1 (foldl1 f1 (replicate (Suc i) Zero)) (foldl1 f1 ([One] @ replicate (k - Suc i) Zero))" using L8 [where xs="replicate (Suc i) Zero" and ys="[One] @ replicate (k - Suc i) Zero"] and f1_assoc by simp also have "\ = One" using P2 [where j="Suc i"] and P3 [where j="k - Suc i"] by simp finally show ?thesis using Suc by simp qed with P1 show ?thesis by simp qed text \ In the original paper, this lemma is depicted in (and proved by) Figure~3. Therefore, it carries this unusual name here. \ lemma Figure_3: assumes "i < k" shows "foldl1 f2 (map (h2 i) [0..j. j < length (map (h2 i) [0.. (map (h2 i) [0.. j > i + 1 \ (map (h2 i) [0.. ?mr ! j = Zero" using List.nth_append [where xs="replicate i Zero"] by simp have R2: "?mr ! i = One" using List.nth_append [where xs="replicate i Zero"] by simp have R3: "?mr ! (i + 1) = Two" using List.nth_append [where xs="replicate i Zero @ [One]"] by simp have R4: "j > i + 1 \ ?mr ! j = Zero" using List.nth_append [where xs="replicate i Zero @ [One,Two]"] and j_k by simp show "(map (h2 i) [0..(j < i)" hence j_ge_i: "j \ i" by simp thus ?thesis proof (cases "j = i") assume "j = i" with M1 and R2 show ?thesis by simp next assume "\(j = i)" with j_ge_i have j_gt_i: "j > i" by simp thus ?thesis proof (cases "j = i + 1") assume "j = i + 1" with M2 and R3 show ?thesis by simp next assume "\(j = i + 1)" with j_gt_i have "j > i + 1" by simp with M3 and R4 show ?thesis by simp qed qed qed qed from Q1 Q2 and Lemma_2 show ?thesis by blast qed have P2: "\j. j > 0 \ foldl1 f2 (replicate j Zero) = Zero" proof - fix j assume j_0: "(j::nat) > 0" show "foldl1 f2 (replicate j Zero) = Zero" using j_0 proof (induct j) case 0 thus ?case by simp next case (Suc j) thus ?case by (cases j, auto) qed qed have P3: "\j. foldl1 f2 ([One, Two] @ replicate j Zero) = Two" proof - fix j show "foldl1 f2 ([One, Two] @ replicate j Zero) = Two" using L8 [where f=f2 and xs="[One,Two]" and ys="replicate (Suc j) Zero"] and f2_assoc and P2 [where j="Suc j"] by simp qed have "foldl1 f2 ?mr = Two" proof (cases i) case 0 thus ?thesis using P3 by simp next case (Suc i) hence "foldl1 f2 (replicate (Suc i) Zero @ [One, Two] @ replicate (k - Suc i - 1) Zero) = f2 (foldl1 f2 (replicate (Suc i) Zero)) (foldl1 f2 ([One, Two] @ replicate (k - Suc i - 1) Zero))" using L8 [where f=f2 and xs="replicate (Suc i) Zero" and ys="[One, Two] @ replicate (k - Suc i - 1) Zero"] and f2_assoc by simp also have "\ = Two" using P2 [where j="Suc i"] and P3 [where j="k - Suc i - 1"] by simp finally show ?thesis using Suc by simp qed with P1 show ?thesis by simp qed text \ Counterparts of the following two lemmas are shown in the proof of Lemma 4 in the original paper. Since here, the proof of Lemma 4 is seperated in several smaller lemmas, also these two properties are given separately. \ lemma L9: assumes "\ (f :: three \ three \ three) h. associative f \ foldl1 f (map h js) = foldl1 f (map h [0.. k" shows "foldl1 f1 (map (h1 k i) js) = One" using assms and f1_assoc and Figure_2 by auto lemma L10: assumes "\ (f :: three \ three \ three) h. associative f \ foldl1 f (map h js) = foldl1 f (map h [0.. In the original paper, this lemma is depicted in (and proved by) Figure~4. Therefore, it carries this unusual name here. This lemma expresses that every \i \ k\ is contained in \js\ at least once. \ lemma Figure_4: assumes "foldl1 f1 (map (h1 k i) js) = One" and "js \ []" shows "i \ set js" proof (rule ccontr) assume i_not_in_js: "i \ set js" have One_not_in_map_js: "One \ set (map (h1 k i) js)" proof assume "One \ set (map (h1 k i) js)" hence "One \ (h1 k i) ` (set js)" by simp then obtain j where j_def: "j \ set js \ One = h1 k i j" using Set.image_iff [where f="h1 k i"] by auto hence "i = j" by (simp split: if_splits) with i_not_in_js and j_def show False by simp qed have f1_One: "\x y. x \ One \ y \ One \ f1 x y \ One" proof - fix x y assume "x \ One \ y \ One" thus "f1 x y \ One" by (cases y, cases x, auto) qed have "\xs. \ xs \ [] ; One \ set xs \ \ foldl1 f1 xs \ One" proof - fix xs assume A: "(xs :: three list) \ []" thus "One \ set xs \ foldl1 f1 xs \ One" proof (induct xs rule: divide_and_conquer) case Nil thus ?case by simp next case (One x) thus "foldl1 f1 [x] \ One" by simp next case (Partition xs ys) hence "One \ set xs \ One \ set ys" by simp with Partition have "foldl1 f1 xs \ One \ foldl1 f1 ys \ One" by simp with f1_One have "f1 (foldl1 f1 xs) (foldl1 f1 ys) \ One" by simp with L8 [symmetric, where f=f1] and f1_assoc and Partition show "foldl1 f1 (xs @ ys) \ One" by auto qed qed with One_not_in_map_js and assms show False by auto qed text \ In the original paper, this lemma is depicted in (and proved by) Figure~5. Therefore, it carries this unusual name here. This lemma expresses that every \i \ k\ is contained in \js\ at most once. \ lemma Figure_5: assumes "foldl1 f1 (map (h1 k i) js) = One" and "js = xs @ ys" shows "\(i \ set xs \ i \ set ys)" proof (rule ccontr) assume "\\(i \ set xs \ i \ set ys)" hence i_xs_ys: "i \ set xs \ i \ set ys" by simp from i_xs_ys have xs_not_empty: "xs \ []" by auto from i_xs_ys have ys_not_empty: "ys \ []" by auto have f1_Zero: "\x y. x \ Zero \ y \ Zero \ f1 x y \ Zero" proof - fix x y show "x \ Zero \ y \ Zero \ f1 x y \ Zero" by (cases y, simp_all, cases x, simp_all) qed have One_foldl1: "\xs. One \ set xs \ foldl1 f1 xs \ Zero" proof - fix xs assume One_xs: "One \ set xs" thus "foldl1 f1 xs \ Zero" proof (induct xs rule: divide_and_conquer) case Nil thus ?case by simp next case One thus ?case by simp next case (Partition xs ys) hence "One \ set xs \ One \ set ys" by simp with Partition have "foldl1 f1 xs \ Zero \ foldl1 f1 ys \ Zero" by auto with f1_Zero have "f1 (foldl1 f1 xs) (foldl1 f1 ys) \ Zero" by simp thus ?case using L8 [symmetric, where f=f1] and f1_assoc and Partition by auto qed qed have f1_Two: "\x y. x \ Zero \ y \ Zero \ f1 x y = Two" proof - fix x y show "x \ Zero \ y \ Zero \ f1 x y = Two" by (cases y, simp_all, cases x, simp_all) qed from i_xs_ys have "One \ set (map (h1 k i) xs) \ One \ set (map (h1 k i) ys)" by simp hence "foldl1 f1 (map (h1 k i) xs) \ Zero \ foldl1 f1 (map (h1 k i) ys) \ Zero" using One_foldl1 by simp hence "f1 (foldl1 f1 (map (h1 k i) xs)) (foldl1 f1 (map (h1 k i) ys)) = Two" using f1_Two by simp hence "foldl1 f1 (map (h1 k i) (xs @ ys)) = Two" using foldl1_map [symmetric, where h="h1 k i"] and f1_assoc and xs_not_empty and ys_not_empty by auto with assms show False by simp qed text \ In the original paper, this lemma is depicted in (and proved by) Figure~6. Therefore, it carries this unusual name here. This lemma expresses that \js\ contains only elements of \[0... \ lemma Figure_6: assumes "\i. i \ k \ foldl1 f1 (map (h1 k i) js) = One" and "i > k" shows "i \ set js" proof assume i_in_js: "i \ set js" have Two_map: "Two \ set (map (h1 k 0) js)" proof - have "Two = h1 k 0 i" using assms by simp with i_in_js show ?thesis using IntI by (auto split: if_splits) qed have f1_Two: "\x y. x = Two \ y = Two \ f1 x y = Two" proof - fix x y show "x = Two \ y = Two \ f1 x y = Two" by (cases y, auto) qed have "\xs. Two \ set xs \ foldl1 f1 xs = Two" proof - fix xs assume Two_xs: "Two \ set xs" thus "foldl1 f1 xs = Two" using Two_xs proof (induct xs rule: divide_and_conquer) case Nil thus ?case by simp next case One thus ?case by simp next case (Partition xs ys) hence "Two \ set xs \ Two \ set ys" by simp hence "foldl1 f1 xs = Two \ foldl1 f1 ys = Two" using Partition by auto with f1_Two have "f1 (foldl1 f1 xs) (foldl1 f1 ys) = Two" by simp thus "foldl1 f1 (xs @ ys) = Two" using L8 [symmetric, where f=f1] and f1_assoc and Partition by auto qed qed with Two_map have "foldl1 f1 (map (h1 k 0) js) = Two" by simp with assms show False by auto qed text \ In the original paper, this lemma is depicted in (and proved by) Figure~7. Therefore, it carries this unusual name here. This lemma expresses that every \i \ k\ in \js\ is eventually followed by \i + 1\. \ lemma Figure_7: assumes "foldl1 f2 (map (h2 i) js) = Two" and "js = xs @ ys" and "xs \ []" and "i = last xs" shows "(i + 1) \ set ys" proof (rule ccontr) assume Suc_i_not_in_ys: "(i + 1) \ set ys" have last_map_One: "last (map (h2 i) xs) = One" proof - have "last (map (h2 i) xs) = (map (h2 i) xs) ! (length (map (h2 i) xs) - 1)" using List.last_conv_nth [where xs="map (h2 i) xs"] and assms by simp also have "\ = (map (h2 i) xs) ! (length xs - 1)" using L2 by simp also have "\ = h2 i (xs ! (length xs - 1))" using L6 and assms by simp also have "\ = h2 i (last xs)" using List.last_conv_nth [symmetric,where xs=xs] and assms by simp also have "\ = One" using assms by simp finally show ?thesis . qed have "\xs. \ xs \ [] ; last xs = One \ \ foldl1 f2 xs = One" proof - fix xs assume last_xs_One: "last xs = One" assume xs_not_empty: "xs \ []" hence xs_partition: "xs = butlast xs @ [last xs]" by simp show "foldl1 f2 xs = One" proof (cases "butlast xs") case Nil with xs_partition and last_xs_One show ?thesis by simp next case Cons hence butlast_not_empty: "butlast xs \ []" by simp have "foldl1 f2 xs = foldl1 f2 (butlast xs @ [last xs])" using xs_partition by simp also have "\ = f2 (foldl1 f2 (butlast xs)) (foldl1 f2 [last xs])" using L8 [where f=f2] and f2_assoc and butlast_not_empty by simp also have "\ = One" using last_xs_One by simp finally show ?thesis . qed qed with last_map_One have foldl1_map_xs: "foldl1 f2 (map (h2 i) xs) = One" using assms by simp have ys_not_empty: "ys \ []" using foldl1_map_xs and assms by auto have Two_map_ys: "Two \ set (map (h2 i) ys)" proof assume "Two \ set (map (h2 i) ys)" hence "Two \ (h2 i) ` (set ys)" by simp then obtain j where j_def: "j \ set ys \ Two = h2 i j" using Set.image_iff [where f="h2 i"] by auto hence "i + 1 = j" by (simp split: if_splits) with Suc_i_not_in_ys and j_def show False by simp qed have f2_One: "\x y. x \ Two \ y \ Two \ f2 x y \ Two" proof - fix x y show "x \ Two \ y \ Two \ f2 x y \ Two" by (cases y, auto) qed have "\xs. \ xs \ [] ; Two \ set xs \ \ foldl1 f2 xs \ Two" proof - fix xs assume xs_not_empty: "(xs :: three list) \ []" thus "Two \ set xs \ foldl1 f2 xs \ Two" proof (induct xs rule: divide_and_conquer) case Nil thus ?case by simp next case One thus ?case by simp next case (Partition xs ys) hence "Two \ set xs \ Two \ set ys" by simp hence "foldl1 f2 xs \ Two \ foldl1 f2 ys \ Two" using Partition by simp hence "f2 (foldl1 f2 xs) (foldl1 f2 ys) \ Two" using f2_One by simp thus "foldl1 f2 (xs @ ys) \ Two" using L8 [symmetric, where f=f2] and f2_assoc and Partition by simp qed qed with Two_map_ys have foldl1_map_ys: "foldl1 f2 (map (h2 i) ys) \ Two" using ys_not_empty by simp from f2_One have "f2 (foldl1 f2 (map (h2 i) xs)) (foldl1 f2 (map (h2 i) ys)) \ Two" using foldl1_map_xs and foldl1_map_ys by simp hence "foldl1 f2 (map (h2 i) (xs @ ys)) \ Two" using foldl1_map [symmetric, where h="h2 i" and f=f2] and f2_assoc and assms and ys_not_empty by simp with assms show False by simp qed subsection \Permutations and Lemma 4\ text \ In the original paper, the argumentation goes as follows: From \Figure_4\ and \Figure_5\ we can show that \js\ contains every \i \ k\ exactly once, and from \Figure_6\ we can furthermore show that \js\ contains no other elements. Thus, \js\ must be a permutation of \[0... Here, however, the argumentation is different, because we want to use already existing results. Therefore, we show first, that the sets of \js\ and \[0.. are equal using the results of \Figure_4\ and \Figure_6\. Second, we show that \js\ is a distinct list, i.e. no element occurs twice in \js\. Since also \[0.. is distinct, the multisets of \js\ and \[0.. are equal, and therefore, both lists are permutations. \ lemma js_is_a_permutation: assumes A1: "\ (f :: three \ three \ three) h. associative f \ foldl1 f (map h js) = foldl1 f (map h [0.. []" shows "js <~~> [0..i. i \ k \ foldl1 f1 (map (h1 k i) js) = One" by auto from L9' and Figure_4 and A2 have P1: "\i. i \ k \ i \ set js" by auto from L9' and Figure_5 have P2: "\i xs ys. \ i \ k ; js = xs @ ys \ \ \(i \ set xs \ i \ set ys)" by blast from L9' and Figure_6 have P3: "\i. i > k \ i \ set js" by auto have set_eq: "set [0.. set js" by auto next show "set js \ set [0.. set js" hence "\(j \ set js)" by simp with P3 have "\(j > k)" using HOL.contrapos_nn by auto hence "j \ k" by simp thus "j \ set [0..xs ys. js = xs @ ys \ set xs \ set ys = {}" proof - fix xs ys assume js_xs_ys: "js = xs @ ys" with set_eq have i_xs_ys: "\i. i \ set xs \ i \ set ys \ i \ k" by auto have "\i. i \ k \ (i \ set xs) = (i \ set ys)" proof fix i assume "i \ set xs" moreover assume "i \ k" ultimately show "i \ set ys" using js_xs_ys and P2 by simp next fix i assume "i \ set ys" moreover assume "i \ k" ultimately show "i \ set xs" using js_xs_ys and P2 and P1 by auto qed thus "set xs \ set ys = {}" using i_xs_ys by auto qed with all_set_inter_empty_distinct have "distinct js" using A2 by auto with set_eq have "mset js = mset [0.. [0.. The result of \Figure_7\ is too specific. Instead of having that every \i\ is eventually followed by \i + 1\, it more useful to know that every \i\ is followed by all \i + r\, where \r > 0\. This result follows easily by induction from \Figure_7\. \ lemma Figure_7_trans: assumes A1: "\i xs ys. \ i < k ; js = xs @ ys ; xs \ [] ; i = last xs \ \ (i + 1) \ set ys" and A2: "(r::nat) > 0" and A3: "i + r \ k" and A4: "js = xs @ ys" and A5: "xs \ []" and A6: "i = last xs" shows "(i + r) \ set ys" using A2 A3 proof (induct r) case 0 thus ?case by simp next case (Suc r) hence IH: "0 < r \ (i + r) \ set ys" by simp from Suc have i_r_k: "i + Suc r \ k" by simp show ?case proof (cases r) case 0 thus ?thesis using A1 and i_r_k and A4 and A5 and A6 by auto next case Suc with IH have "(i + r) \ set ys" by simp then obtain p where p_def: "p < length ys \ ys ! p = i + r" using List.in_set_conv_nth [where x="i + r"] by auto let ?xs = "xs @ take (p + 1) ys" let ?ys = "drop (p + 1) ys" have "i + r < k" using i_r_k by simp moreover have "js = ?xs @ ?ys" using A4 by simp moreover have "?xs \ []" using A5 by simp moreover have "i + r = last ?xs" using p_def and List.take_Suc_conv_app_nth [where i=p and xs=ys] by simp ultimately have "(i + Suc r) \ set ?ys" using A1 [where i="i + r"] by auto thus "(i + Suc r) \ set ys" using List.set_drop_subset [where xs=ys] by auto qed qed text \ Since we want to use Lemma \partitions_sorted\ to show that \js\ is sorted, we need yet another result which can be obtained using the previous lemma and some further argumentation: \ lemma js_partition_order: assumes A1: "js <~~> [0..i xs ys. \ i < k ; js = xs @ ys ; xs \ [] ; i = last xs \ \ (i + 1) \ set ys" and A3: "js = xs @ ys" and A4: "i \ set xs" and A5: "j \ set ys" shows "i \ j" proof (rule ccontr) assume "\(i \ j)" hence i_j: "i > j" by simp from A5 obtain pj where pj_def: "pj < length ys \ ys ! pj = j" using List.in_set_conv_nth [where x=j] by auto let ?xs = "xs @ take (pj + 1) ys" let ?ys = "drop (pj + 1) ys" let ?r = "i - j" from A1 and A3 have "distinct (xs @ ys)" - using Permutation.perm_distinct_iff [where xs="xs @ ys"] by auto + using perm_distinct_iff [where xs="xs @ ys"] by auto hence xs_ys_inter_empty: "set xs \ set ys = {}" by simp from A2 and Figure_7_trans have "\i r xs ys. \ r > 0 ; i + r \ k ; js = xs @ ys ; xs \ [] ; i = last xs \ \ (i + r) \ set ys" by blast moreover from i_j have "?r > 0" by simp moreover have "j + ?r \ k" proof - have "i \ set js" using A4 and A3 by simp hence "i \ set [0.. k" by auto thus ?thesis using i_j by simp qed moreover have "js = ?xs @ ?ys" using A3 by simp moreover have "?xs \ []" using A4 by auto moreover have "j = last (?xs)" using pj_def and List.take_Suc_conv_app_nth [where i=pj and xs=ys] by simp ultimately have "(j + ?r) \ set ?ys" by blast hence "i \ set ys" using i_j and List.set_drop_subset [where xs=ys] by auto with A4 and xs_ys_inter_empty show False by auto qed text \ With the help of the previous lemma, we show now that \js\ equals \[0.., if both lists are permutations and every \i\ is eventually followed by \i + 1\ in \js\. \ lemma js_equals_upt_k: assumes A1: "js <~~> [0..i xs ys. \ i < k ; js = xs @ ys ; xs \ [] ; i = last xs \ \ (i + 1) \ set ys" shows "js = [0..xs ys x y. \ js = xs @ ys ; x \ set xs ; y \ set ys \ \ x \ y" by blast hence "sorted js" using partitions_sorted by blast moreover have "distinct js" - using A1 and Permutation.perm_distinct_iff and List.distinct_upt by blast + using A1 and perm_distinct_iff and List.distinct_upt by blast moreover have "sorted [0.. From all the work done before, we conclude now Lemma 4: \ lemma Lemma_4: assumes "\(f :: three \ three \ three) h. associative f \ foldl1 f (map h js) = foldl1 f (map h [0.. []" shows "js = [0.. [0..i xs ys. \ i < k ; js = xs @ ys ; xs \ [] ; i = last xs \ \ (i + 1) \ set ys" by blast ultimately show ?thesis using js_equals_upt_k by auto qed subsection \Lemma 5\ text \ This lemma is a lifting of Lemma 4 to the overall computation of \scanl1\. Its proof follows closely the one given in the original paper. \ lemma Lemma_5: assumes "\(f :: three \ three \ three) h. associative f \ map (foldl1 f \ map h) jss = scanl1 f (map h [0..js. js \ set jss \ js \ []" shows "jss = ups n" proof - have P1: "length jss = length (ups n)" proof - obtain f :: "three \ three \ three" where f_assoc: "associative f" using f1_assoc by auto fix h have "length jss = length (map (foldl1 f \ map h) jss)" by (simp add: L2) also have "\ = length (scanl1 f (map h [0.. = length (map (\k. foldl1 f (take (k + 1) (map h [0.. = length [0.. = length [0.. = length [0.. = length (map (\k. [0.. = length (ups n)" by simp finally show ?thesis . qed have P2: "\k. k < length jss \ jss ! k = (ups n) ! k" proof - fix k assume k_length_jss: "k < length jss" hence non_empty_jss_k: "jss ! k \ []" using assms by simp from k_length_jss have k_length_length: "k < length [1..(f :: three \ three \ three) h. associative f \ foldl1 f (map h (jss ! k)) = foldl1 f (map h [0.. three \ three)" have "foldl1 f (map h (jss ! k)) = (map (foldl1 f \ map h) jss) ! k" using L6 and k_length_jss by auto also have "\ = (scanl1 f (map h [0.. = (map (\k. foldl1 f (take k (map h [0.. = (map (\k. foldl1 f (take k (map h [0.. = (\k. foldl1 f (take k (map h [0.. = foldl1 f (take (k + 1) (map h [0.. = foldl1 f (map h (take (k + 1) [0.. = foldl1 f (map h [0..k. [0.. = (\k. [0.. = [0..Proposition 1\ text \ In the original paper, only non-empty lists where considered, whereas here, the used list datatype allows also for empty lists. Therefore, we need to exclude non-empty lists to have a similar setting as in the original paper. In the case of Proposition 1, we need to show that every list contained in the result of \candidate (@) (map wrap [0.. is non-empty. The idea is to interpret empty lists by the value \Zero\ and non-empty lists by the value \One\, and to apply the assumptions. \ lemma non_empty_candidate_results: assumes "\ (f :: three \ three \ three) (xs :: three list). \ associative f ; xs \ [] \ \ candidate f xs = scanl1 f xs" and "js \ set (candidate (@) (map wrap [0.. []" proof - \ \We define a mapping of lists to values of @{text three} as explained\ \ \above, and a function which behaves like @{text @} on values of\ \ \@{text three}.\ let ?h = "\xs. case xs of [] \ Zero | (_#_) \ One" let ?g = "\x y. if (x = One \ y = One) then One else Zero" have g_assoc: "associative ?g" unfolding associative_def by auto \ \Our defined functions fulfill the requirements of the free theorem of\ \ \@{text candidate}, that is:\ have req_free_theorem: "\xs ys. ?h (xs @ ys) = ?g (?h xs) (?h ys)" proof - fix xs ys show "?h (xs @ ys) = ?g (?h xs) (?h ys)" by (cases xs, simp_all, cases ys, simp_all) qed \ \Before applying the assumptions, we show that @{text candidate}'s\ \ \counterpart @{text scanl1}, applied to a non-empty list, returns only\ \ \a repetition of the value @{text One}.\ have set_scanl1_is_One: "set (scanl1 ?g (map ?h (map wrap [0..x. One) [0..x. One) [0..x. One) ([0.. = map (\x. One) [0..x. One) [Suc n]" by simp also have "\ = replicate (Suc n) One @ replicate 1 One" using Suc by simp also have "\ = replicate (Suc n + 1) One" using List.replicate_add [symmetric, where x=One and n="Suc n" and m=1] by simp finally show ?case . qed have foldl1_One: "\xs. foldl1 ?g (One # xs) = One" proof - fix xs show "foldl1 ?g (One # xs) = One" proof (induct xs rule: measure_induct [where f=length]) fix x assume "\y. length y < length (x::three list) \ foldl1 ?g (One # y) = One" thus "foldl1 ?g (One # x) = One" by (cases x, auto) qed qed have "scanl1 ?g (map ?h (map wrap [0.. wrap) [0.. = scanl1 ?g (map (\x. One) [0.. = scanl1 ?g (replicate (n + 1) One)" using const_One by auto also have "\ = map (\k. foldl1 ?g (take k (replicate (n + 1) One))) [1.. = map (\k. foldl1 ?g (take k (replicate (n + 1) One))) (map Suc [0.. = map ((\k. foldl1 ?g (take k (replicate (n + 1) One))) \ Suc) [0.. = map (\k. foldl1 ?g (replicate (min (k + 1) (n + 1)) One)) [0.. = map (\k. foldl1 ?g (One # replicate (min k n) One)) [0.. = map (\k. One) [0.. = replicate (n + 1) One" using const_One by simp finally show ?thesis using List.set_replicate [where n="n + 1"] by simp qed \ \Thus, with the assumptions and the free theorem of candidate, we show\ \ \that results of @{text candidate}, after applying @{text h}, can only\ \ \have the value @{text One}.\ have "scanl1 ?g (map ?h (map wrap [0.. = map ?h (candidate (@) (map wrap [0..x. x \ set (map ?h (candidate (@) (map wrap [0.. x = One" using set_scanl1_is_One by auto \ \Now, it is easy to conclude the thesis.\ from assms have "?h js \ ?h ` set (candidate (@) (map wrap [0.. []" by auto qed text \ Proposition 1 is very similar to the corresponding one shown in the original paper except of a slight modification due to the choice of using Isabelle's list datatype. Strictly speaking, the requirement that \xs\ must be non-empty in the assumptions of Proposition 1 is not necessary, because only non-empty lists are applied in the proof. However, the additional requirement eases the proof obligations of the final theorem, i.e. this additions allows more (or easier) applications of the final theorem. \ lemma Proposition_1: assumes "\ (f :: three \ three \ three) (xs :: three list). \ associative f ; xs \ [] \ \ candidate f xs = scanl1 f xs" shows "candidate (@) (map wrap [0.. \This addition is necessary because we are using Isabelle's list datatype\ \ \which allows for empty lists.\ from assms and non_empty_candidate_results have non_empty_candidate: "\js. js \ set (candidate (@) (map wrap [0.. js \ []" by auto have "\(f:: three \ three \ three) h. associative f \ map (foldl1 f \ map h) (candidate (@) (map wrap [0.. three \ three)" hence "map (foldl1 f \ map h) (candidate (@) (map wrap [0.. = scanl1 f (map h [0.. map h) (candidate (@) (map wrap [0..Proving Proposition 2\ text \ Before proving Proposition 2, a non-trivial step of that proof is shown first. In the original paper, the argumentation simply applies L7 and the definition of \map\ and \[0... However, since, L7 requires that \k\ must be less than \length [0.. and this does not simply follow for the bound occurrence of \k\, a more complicated proof is necessary. Here, it is shown based on Lemma 2. \ lemma Prop_2_step_L7: "map (\k. foldl1 g (map (nth xs) [0..k. foldl1 g (take (k + 1) xs)) [0..k. foldl1 g (map (nth xs) [0..k. foldl1 g (take (k + 1) xs)) [0..k. k < length (map (\k. foldl1 g (map (nth xs) [0.. (map (\k. foldl1 g (map (nth xs) [0..k. foldl1 g (take (k + 1) xs)) [0..k. foldl1 g (map (nth xs) [0..k. foldl1 g (map (nth xs) [0..k. foldl1 g (map (nth xs) [0.. = foldl1 g (map (nth xs) [0.. = foldl1 g (take (k + 1) xs)" using L7 [where k=k and xs=xs] and k_length' by simp also have "\ = (\k. foldl1 g (take (k + 1) xs)) ([0.. = (map (\k. foldl1 g (take (k + 1) xs)) [0..k. foldl1 g (map (nth xs) [0..k. foldl1 g (take (k + 1) xs)) [0.. Compared to the original paper, here, Proposition 2 has the additional assumption that \xs\ is non-empty. The proof, however, is identical to the the one given in the original paper, except for the non-trivial step shown before. \ lemma Proposition_2: assumes A1: "\ n. candidate (@) (map wrap [0.. []" shows "candidate g xs = scanl1 g xs" proof - \ \First, based on Lemma 2, we show that\ \ \@{term "xs = map (nth xs) [0.. \ \by the following facts P1 and P2.\ have P1: "length xs = length (map (nth xs) [0.. = length (map (nth xs) [0..k. k < length xs \ xs ! k = (map (nth xs) [0.. = (map (nth xs) [0.. \Thus, with some rewriting, we show the thesis.\ hence "candidate g xs = candidate g (map (nth xs) [0.. = map (foldl1 g \ map (nth xs)) (candidate (@) (map wrap [0.. = map (foldl1 g \ map (nth xs)) (ups (length xs - 1))" using A1 [where n="length xs - 1"] and A3 by simp also have "\ = map (foldl1 g \ map (nth xs)) (map (\k. [0.. = map (\k. foldl1 g (map (nth xs) [0.. = map (\k. foldl1 g (take (k + 1) xs)) [0.. = map (\k. foldl1 g (take k xs)) (map (\k. k + 1) [0.. = map (\k. foldl1 g (take k xs)) [1.. = scanl1 g xs" by simp finally show ?thesis . qed section \The Final Result\ text \ Finally, we the main result follows directly from Proposition 1 and Proposition 2. \ theorem The_0_1_2_Principle: assumes "\ (f :: three \ three \ three) (xs :: three list). \ associative f ; xs \ [] \ \ candidate f xs = scanl1 f xs" and "associative g" and "ys \ []" shows "candidate g ys = scanl1 g ys" using Proposition_1 Proposition_2 and assms by blast text \ \section*{Acknowledgments} I thank Janis Voigtl\"ander for sharing a draft of his paper before its publication with me. \ (*<*) end (*>*) diff --git a/thys/Polynomial_Factorization/ROOT b/thys/Polynomial_Factorization/ROOT --- a/thys/Polynomial_Factorization/ROOT +++ b/thys/Polynomial_Factorization/ROOT @@ -1,74 +1,74 @@ chapter AFP session "JNF-AFP-Lib" (AFP) in "Lib" = "HOL-Algebra" + description "Theories from HOL-Library and the Archive of Formal Proofs that are used by this entry" options [timeout = 600] sessions Containers "Abstract-Rewriting" Gauss_Jordan Matrix Polynomial_Interpolation Show VectorSpace "HOL-Cardinals" theories Containers.Set_Impl Matrix.Utility Matrix.Ordered_Semiring "Abstract-Rewriting.SN_Order_Carrier" "Abstract-Rewriting.Relative_Rewriting" Show.Show_Instances VectorSpace.VectorSpace Polynomial_Interpolation.Missing_Polynomial Polynomial_Interpolation.Ring_Hom_Poly "HOL-Library.AList" "HOL-Library.Cardinality" "HOL-Library.Char_ord" "HOL-Library.Code_Binary_Nat" "HOL-Library.Code_Target_Numeral" "HOL-Library.DAList" "HOL-Library.DAList_Multiset" "HOL-Library.Infinite_Set" "HOL-Library.Lattice_Syntax" "HOL-Library.Mapping" "HOL-Library.Monad_Syntax" "HOL-Library.More_List" "HOL-Library.Multiset" - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" "HOL-Library.Permutations" "HOL-Library.IArray" "HOL-Library.Phantom_Type" "HOL-Library.Ramsey" "HOL-Library.RBT_Impl" "HOL-Library.Simps_Case_Conv" "HOL-Library.While_Combinator" "HOL-Computational_Algebra.Fundamental_Theorem_Algebra" "HOL-Computational_Algebra.Fraction_Field" "HOL-Computational_Algebra.Polynomial" "HOL-Computational_Algebra.Primes" "HOL-Cardinals.Order_Union" "HOL-Cardinals.Wellorder_Extension" session Polynomial_Factorization (AFP) = "JNF-AFP-Lib" + description "Algebraic Numbers" options [timeout = 600] sessions Partial_Function_MR Polynomial_Interpolation Show Sqrt_Babylonian theories Missing_Multiset Missing_List Precomputation Order_Polynomial Explicit_Roots Dvd_Int_Poly Rational_Root_Test Kronecker_Factorization Square_Free_Factorization Rational_Factorization document_files "root.bib" "root.tex" diff --git a/thys/Program-Conflict-Analysis/Interleave.thy b/thys/Program-Conflict-Analysis/Interleave.thy --- a/thys/Program-Conflict-Analysis/Interleave.thy +++ b/thys/Program-Conflict-Analysis/Interleave.thy @@ -1,417 +1,417 @@ (* Title: Conflict analysis/List Interleaving Operator Author: Peter Lammich Maintainer: Peter Lammich *) section "List Interleaving Operator" theory Interleave -imports Main "HOL-Library.Permutation" Misc +imports Main "HOL-Library.List_Permutation" Misc begin text_raw \\label{thy:Interleave}\ text \ This theory defines an operator to return the set of all possible interleavings of two lists. \ (* Is \-operator needed ? Should we better do a reformulation of \ on sets of lists, to get an associative, commutative operator with identity (ACI ?) ? *) subsection "Definitions" subsubsection "Prepend and concatenate lifted to sets" definition list_set_cons :: "'a \ 'a list set \ 'a list set" (infixr "\" 65) where [simp]: "a\S = (#) a ` S" definition list_set_append :: "'a list \ 'a list set \ 'a list set" (infixr "\" 65) where [simp]: "a\S = (@) a ` S" subsubsection "The interleaving operator" function interleave :: "'a list \ 'a list \ 'a list set" (infixr "\" 64) where "[]\b = {b}" | "a\[] = {a}" | "a#l \ b#r = ((a\(l \ b#r)) \ (b\(a#l \ r)))" by pat_completeness auto termination by lexicographic_order subsection "Properties" subsubsection "Lifted prepend and concatenate operators" lemma cons_set_cons_eq: "a#l \ b\S = (a=b & l\S)" by auto lemma append_set_append_eq: "\length a = length b\ \ a@l \ b\S = (a=b & l\S)" by auto lemma list_set_cons_cases[cases set]: "\w\a\S; !!w'. \ w=a#w'; w'\S \ \ P\ \ P" by auto lemma list_set_append_cases[cases set]: "\w\a\S; !!w'. \ w=a@w'; w'\S \ \ P\ \ P" by auto subsubsection "Standard simplification-, introduction-, elimination-, and induction rules" lemma interleave_cons1: "l \ a\b \ x#l \ x#a \ b" apply(case_tac b) apply(auto) done lemma interleave_cons2: "l \ a\b \ x#l \ a \ x#b" apply(case_tac a) apply(auto) done lemmas interleave_cons = interleave_cons1 interleave_cons2 lemma interleave_empty[simp]: "[]\a\b = (a=[] & b=[])" apply(case_tac a) apply(case_tac b) apply(simp) apply(simp) apply(case_tac b) apply(auto) done (* TODO: Is this wise as default simp ?*) lemma interleave_length[rule_format, simp]: "ALL x : a\b . length x = length a + length b" apply(induct rule: interleave.induct) apply(auto) done lemma interleave_same[simp]: "y\l\y = (l=[])" apply (rule iffI) apply (subgoal_tac "length y = length l + length y") apply (simp del: interleave_length) apply (erule interleave_length) apply simp done lemma interleave_comm[simp]: "a\b = b\a" (is "?P f a b") apply(induct rule: interleave.induct) apply(auto) done lemma interleave_cont_conc[rule_format, simp]: "ALL b . a@b \ a\b" apply(induct_tac a) apply(auto simp add: interleave_cons) done lemma interleave_cont_rev_conc[simp]: "b@a \ a\b" apply (subgoal_tac "b@a \ b\a") apply(simp) apply(simp only: interleave_cont_conc) done lemma interleave_not_empty: "a\b ~= {}" apply(induct rule: interleave.induct) apply(auto) done lemma cons_interleave_split: "\a#l \ l1\l2\ \ (EX lh . l1=a#lh & l \ lh\l2 | l2=a#lh & l \ l1\lh )" apply(case_tac l1) apply(auto) apply(case_tac l2) apply(auto) done lemma cons_interleave_cases[cases set, case_names left right]: "\a#l \ l1\l2; !!lh . \l1=a#lh; l \ lh\l2\ \ P; !!lh. \l2=a#lh; l \ l1\lh\ \ P\ \ P" by (blast dest: cons_interleave_split) lemma interleave_cases[cases set, case_names empty left right]: "\l\l1\l2; \ l=[]; l1=[]; l2=[] \ \ P; !!a l' l1'. \l=a#l'; l1=a#l1'; l'\l1'\l2\ \ P; !!a l' l2'. \l=a#l'; l2=a#l2'; l'\l1\l2'\ \ P \ \ P" apply (cases l) apply (simp) apply simp apply (erule cons_interleave_cases) apply simp_all done lemma interleave_elem_induct[induct set, case_names empty left right]: "!!w1 w2. \ w\w1\w2; P [] [] []; !!e w w1 w2. \ P w w1 w2; w\w1\w2 \ \ P (e#w) (e#w1) w2; !!e w w1 w2. \ P w w1 w2; w\w1\w2 \ \ P (e#w) w1 (e#w2) \ \ P w w1 w2" by (induct w) (auto elim!: cons_interleave_cases intro!: interleave_cons) lemma interleave_rev_cons1[rule_format]: "ALL a b . l \ a\b \ l@[x] \ a@[x] \ b" (is "?P l") proof (induct l) case Nil show ?case by (simp) case (Cons e l) assume IH[rule_format]: "?P l" show "?P (e#l)" proof (intro allI impI) fix a b assume "e#l \ a\b" then obtain c where SPLIT: "a=e#c & l\c\b | b=e#c & l\a\c" by (blast dest: cons_interleave_split) with IH have "a=e#c & l@[x]\c@[x]\b | b=e#c & l@[x]\a@[x]\c" by auto hence "a=e#c & e#l@[x]\e#c@[x]\b | b=e#c & e#l@[x]\a@[x]\e#c" by (auto simp add: interleave_cons) thus "(e#l)@[x]\a@[x]\b" by auto qed qed corollary interleave_rev_cons2: "l \ a\b \ l@[x] \ a \ b@[x]" proof - assume "l \ a\b" hence "l\b\a" by auto hence "l@[x] \ b@[x] \ a" by (blast dest: interleave_rev_cons1) thus ?thesis by auto qed lemmas interleave_rev_cons = interleave_rev_cons1 interleave_rev_cons2 subsubsection "Interleaving and list concatenation" lemma interleave_append1: "\l \ a\b\ \ x@l \ x@a \ b" proof - have "ALL l a b . l \ a\b \ x@l \ x@a \ b" (is "?P x") proof (induct x) show "?P []" by simp next fix e fix x::"'a list" assume IH: "?P x" show "?P (e#x)" proof (intro impI allI) fix l::"'a list" fix a b assume "l \ a \ b" with IH have "x@l \ x@a \ b" by auto with interleave_cons1 show "(e # x) @ l \ (e # x) @ a \ b" by force qed qed moreover assume "l \ a \ b" ultimately show ?thesis by blast qed lemma interleave_append2: "\l \ a\b\ \ x@l \ a \ x@b" proof - have "ALL l a b . l \ a\b \ x@l \ a \ x@b" (is "?P x") proof (induct x) show "?P []" by simp next fix a fix x::"'a list" assume IH: "\l a b. l \ a \ b \ x @ l \ a \ x @ b" show "\l aa b. l \ aa \ b \ (a # x) @ l \ aa \ (a # x) @ b" proof (intro impI allI) fix l::"'a list" fix aa b assume "l \ aa \ b" with IH have "x@l \ aa \ x@b" by auto with interleave_cons2 show "(a # x) @ l \ aa \ (a # x) @ b" by force qed qed moreover assume "l \ a \ b" ultimately show ?thesis by blast qed lemmas interleave_append = interleave_append1 interleave_append2 lemma interleave_rev_append1: "!!a b w. w\a\b \ w@w' \ a@w' \ b" proof (induct w' rule: rev_induct) case Nil thus ?case by simp next case (snoc e w') note IHP=this hence "w@w'\a@w'\b" by auto thus ?case using interleave_rev_cons1[of "w@w'" "a@w'" "b"] by (simp) qed lemma interleave_rev_append2: "w\a\b \ w@w' \ a \ b@w'" by (simp only: interleave_comm[of a b] interleave_comm[of a "b@w'"]) (blast dest: interleave_rev_append1) lemmas interleave_rev_append = interleave_rev_append1 interleave_rev_append2 lemma interleave_rev1[rule_format]: "ALL w1 w2 . (w\w1\w2) \ (rev w \ rev w1 \ rev w2)" (is "?P w") proof (induct w) case Nil show ?case by (simp) case (Cons e w) assume IH[rule_format]: "?P w" show ?case proof (clarsimp) fix w1 w2 assume "e # w \ w1 \ w2" then obtain w' where "w1=e#w' & w\w'\w2 | w2=e#w' & w\w1\w'" by (blast dest: cons_interleave_split) with IH have "w1=e#w' & rev w\rev w' \ rev w2 | w2=e#w' & rev w \ rev w1 \ rev w'" by auto thus "rev w @ [e]\rev w1 \ rev w2" by (auto simp add: interleave_rev_cons) qed qed corollary interleave_rev2: "(rev w \ rev w1 \ rev w2) \ (w\w1\w2)" apply (subgoal_tac "(rev w\rev w1\rev w2) = (rev (rev w) \ rev (rev w1) \ rev (rev w2))") apply(simp) apply (blast dest: interleave_rev1) done lemmas interleave_rev = interleave_rev1 interleave_rev2 lemma rev_cons_interleave_split: "\l@[a] \ l1\l2\ \ (EX lh . l1=lh@[a] & l \ lh\l2 | l2=lh@[a] & l \ l1\lh )" proof - assume "l@[a] \ l1\l2" hence "a#rev l \ rev l1 \ rev l2" by (auto dest: interleave_rev) then obtain lh where "rev l1 = a#lh & rev l \ lh\rev l2 | rev l2 = a#lh & rev l \ rev l1 \ lh" by (blast dest: cons_interleave_split) hence "rev l1 = a#lh & l \ rev lh \ l2 | rev l2 = a#lh & l \ l1 \ rev lh" by (auto dest: interleave_rev) hence "l1 = rev lh @ [a] & l \ rev lh \ l2 | l2 = rev lh @ [a] & l \ l1 \ rev lh" by (simp add: rev_swap) thus ?thesis by blast qed subsubsection "Associativity" lemma interleave_s_assoc1[rule_format]: "ALL w1 ws w2 w3 . (w\w1\ws & ws\w2\w3 \ (EX ws' : w1\w2 . w \ ws'\w3))" proof (induct w) case Nil show ?case by (auto) case (Cons e w) assume IH: "ALL w1 ws w2 w3 . w\w1\ws & ws\w2\w3 \ (EX ws' : w1\w2 . w \ ws'\w3)" show ?case proof (intro impI allI) fix w1 ws w2 w3 assume A: "e#w \ w1 \ ws \ ws \ w2 \ w3" then obtain wh where CASES: "w1=e#wh & w\wh\ws | ws=e#wh & w\w1\wh" by (blast dest!: cons_interleave_split) moreover { assume CASE: "w1=e#wh & w\wh\ws" with A IH obtain ws' where "ws'\wh\w2 & w\ws'\w3" by (blast) hence "e#ws'\ (e#wh)\w2 & e#w \ (e#ws')\w3" by (auto simp add: interleave_cons) with CASE have "\ws'\w1 \ w2. e # w \ ws' \ w3" by blast } moreover { assume CASE: "ws=e#wh & w\w1\wh" with A obtain whh where "w2=e#whh & wh\whh\w3 | w3=e#whh & wh\w2\whh" by (blast dest!: cons_interleave_split) moreover { assume SCASE: "w2=e#whh & wh\whh\w3" with CASE IH obtain ws' where "ws'\w1\whh & w\ws'\w3" by blast with SCASE have "e#ws'\w1\w2 & e#w \ (e#ws')\w3" by (auto simp add: interleave_cons) hence "\ws'\w1 \ w2. e # w \ ws' \ w3" by blast } moreover { assume SCASE: "w3=e#whh & wh\w2\whh" with CASE IH obtain ws' where "ws'\w1\w2 & w\ws'\whh" by blast with SCASE have "ws'\w1\w2 & e#w \ ws'\w3" by (auto simp add: interleave_cons) hence "\ws'\w1 \ w2. e # w \ ws' \ w3" by blast } ultimately have "\ws'\w1 \ w2. e # w \ ws' \ w3" by blast } ultimately show "\ws'\w1 \ w2. e # w \ ws' \ w3" by blast qed qed lemma interleave_s_assoc2: "\w\ws\w3; ws\w1\w2\ \ EX ws' : w2\w3 . w \ w1\ws'" proof - assume "w \ ws \ w3" "ws \ w1 \ w2" hence "w \ w3 \ ws & ws \ w2 \ w1" by simp hence "EX ws':w3\w2 . w\ws'\w1" by (simp only: interleave_s_assoc1) thus ?thesis by simp qed lemmas interleave_s_assoc = interleave_s_assoc1 interleave_s_assoc2 subsubsection "Relation to other standard list operations" lemma interleave_set: "w\w1\w2 \ set w = set w1 \ set w2" by (induct rule: interleave_elem_induct) auto lemma interleave_map: "w\w1\w2 \ map f w \ map f w1 \ map f w2" by (induct rule: interleave_elem_induct) (auto intro!: interleave_cons) lemma interleave_filter: "w\w1\w2 \ filter f w \ filter f w1 \ filter f w2" by (induct rule: interleave_elem_induct) (auto intro!: interleave_cons) subsubsection "Relation to sublist order" lemma ileq_interleave_alt: "l'\l == \lb. l\lb \ l'" proof (rule eq_reflection) {fix l' l have "l'\l \ \lb. l\lb \ l'" by (induct rule: less_eq_list_induct) (simp, (blast intro: interleave_cons)+)} moreover {fix l have "!!lb l'. l\lb\l' \ l'\l" by (induct l) (auto intro: less_eq_list_drop elim!: cons_interleave_cases)} ultimately show "(l'\l) = (\lb. l\lb \ l')" by blast qed lemma ileq_interleave: "w\w1\w2 \ w1\w & w2\w" by (unfold ileq_interleave_alt, auto) lemma ilt_ex_notempty: "x < y \ (\xs. xs \ [] \ y \ xs \ x)" apply (auto simp add: order_less_le ileq_interleave_alt) apply (case_tac lb) apply auto done lemma ilt_interleave1: "\w\w1\w2; w1~=[]\ \ w2w\w1\w2; w2~=[]\ \ w1 \Recover structure of @{text w} wrt. to structure of @{text "w1"}\ lemma interleave_recover1[rule_format]: "ALL w1a w1b w2 . w\(w1a@w1b)\w2 \ (EX wa wb w2a w2b . w=wa@wb & w2=w2a@w2b & wa\w1a\w2a & wb\w1b\w2b)" (is "?P w" is "ALL w1a w1b w2 . ?PRE w w1a w1b w2 \ ?CONS w w1a w1b w2") proof (induct w) case Nil show ?case by (auto) case (Cons e w) assume IH[rule_format]: "?P w" show "?P (e#w)" proof (intro allI impI) fix w1a w1b w2 assume PRE: "e # w \ w1a @ w1b \ w2" { assume CASE: "w1a=[]" with PRE have "e#w=[]@(e#w) & w2=[]@w2 & []\w1a\[] & e#w\w1b\w2" by (auto) hence "?CONS (e#w) w1a w1b w2" by blast } moreover { assume CASE: "w1a~=[]" with PRE obtain wh where SCASES: "w1a@w1b=e#wh & w\wh\w2 | w2=e#wh & w\w1a@w1b\wh" by (blast dest: cons_interleave_split) moreover { assume SCASE: "w1a@w1b=e#wh & w\wh\w2" with CASE obtain w1ah where W1AFMT: "w1a=e#w1ah & w1ah@w1b=wh" by (cases w1a, auto) with SCASE have "w\w1ah@w1b\w2" by auto with IH[of w1ah w1b w2] obtain wa wb w2a w2b where "w=wa@wb & w2=w2a@w2b & wa\w1ah\w2a & wb\w1b\w2b" by (blast) with W1AFMT have "e#w=(e#wa)@wb & e#w\e#wa\wb & w2=w2a@w2b & e#wa\w1a\w2a & wb\w1b\w2b" by (auto simp add: interleave_cons) hence "?CONS (e#w) w1a w1b w2" by blast } moreover { assume SCASE: "w2=e#wh & w\w1a@w1b\wh" with IH[of w1a w1b wh] obtain wa wb w2a w2b where "w=wa@wb & wh=w2a@w2b & wa\w1a\w2a & wb\w1b\w2b" by blast with SCASE have "e#w=(e#wa)@wb & w2=(e#w2a)@w2b & e#wa\w1a\e#w2a & wb\w1b\w2b" by (auto simp add: interleave_cons) hence "?CONS (e#w) w1a w1b w2" by blast } ultimately have "?CONS (e#w) w1a w1b w2" by blast } ultimately show "?CONS (e#w) w1a w1b w2" by blast qed qed lemma interleave_recover2: "w\w1\(w2a@w2b) \ EX wa wb w1a w1b . w=wa@wb & w1=w1a@w1b & wa\w1a\w2a & wb\w1b\w2b" proof - assume "w\w1\(w2a@w2b)" hence "w\(w2a@w2b)\w1" by auto hence "EX wa wb w1a w1b . w=wa@wb & w1=w1a@w1b & wa\w2a\w1a & wb\w2b\w1b" by (blast dest: interleave_recover1) thus ?thesis by auto qed lemmas interleave_recover = interleave_recover1 interleave_recover2 \ \Split operands according to element of result\ lemma interleave_unconc: "!! l2 w1 w2 . l1@l2 \ w1\w2 \ \ w11 w12 w21 w22 . w1=w11@w12 \ w2=w21@w22 \ l1\w11\w21 \ l2\w12\w22" proof (induct l1) case Nil hence "w1=[]@w1 & w2=[]@w2 & []\[]\[] & l2\w1\w2" by auto thus ?case by fast next case (Cons e l1') note IHP=this hence "e#(l1'@l2)\w1\w2" by auto with cons_interleave_split obtain w' where "(w1=e#w' & l1'@l2\w'\w2) | (w2=e#w' & l1'@l2\w1\w')" (is "?C1 | ?C2") by (fast) moreover { assume CASE: ?C1 moreover then obtain w11' w12' w21 w22 where "w'=w11'@w12' \ w2=w21@w22 \ l1'\w11'\w21 \ l2\w12'\w22" by (fast dest: IHP(1)) moreover hence "e#w'=(e#w11')@w12' & e#l1'\(e#w11')\w21" by (auto dest: interleave_cons) ultimately have ?case by fast } moreover { assume CASE: ?C2 moreover then obtain w11 w12 w21' w22' where "w1=w11@w12 \ w'=w21'@w22' \ l1'\w11\w21' \ l2\w12\w22'" by (fast dest: IHP(1)) moreover hence "e#w'=(e#w21')@w22' & e#l1'\w11\(e#w21')" by (auto dest: interleave_cons) ultimately have ?case by fast } ultimately show ?case by fast qed \ \Reverse direction of @{thm [source] "interleave_unconc"}\ lemma interleave_reconc: "!!w11 w21 l2 w12 w22 . \l1\w11\w21;l2\w12\w22\ \ l1@l2\(w11@w12)\(w21@w22)" proof (induct l1) case Nil thus ?case by (auto) next case (Cons e l1') note IHP=this then obtain w' where "(w11=e#w' & l1'\w'\w21) | (w21=e#w' & l1'\w11\w')" (is "?C1 | ?C2") by (fast dest: cons_interleave_split) moreover { assume CASE: ?C1 moreover with IHP have "l1'@l2\(w'@w12)\(w21@w22)" by auto ultimately have ?case by (auto dest: interleave_cons) } moreover { assume CASE: ?C2 moreover with IHP have "l1'@l2\(w11@w12)\(w'@w22)" by auto ultimately have ?case by (auto dest: interleave_cons) } ultimately show ?case by fast qed (* interleave_unconc and interleave_reconc as equivalence *) lemma interleave_unconc_eq: "!! l2 w1 w2 . l1@l2 \ w1\w2 = (\ w11 w12 w21 w22 . w1=w11@w12 \ w2=w21@w22 \ l1\w11\w21 \ l2\w12\w22)" by (fast dest: interleave_reconc interleave_unconc) end diff --git a/thys/Selection_Heap_Sort/Sort.thy b/thys/Selection_Heap_Sort/Sort.thy --- a/thys/Selection_Heap_Sort/Sort.thy +++ b/thys/Selection_Heap_Sort/Sort.thy @@ -1,35 +1,35 @@ (* Title: Sort.thy Author: Danijela Petrovi\'c, Facylty of Mathematics, University of Belgrade *) section \Locale Sort\ theory Sort imports Main - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" begin text \ First, we start from the definition of sorting algorithm. {\em What are the basic properties that any sorting algorithm must satisfy? } There are two basic features any sorting algorithm must satisfy: \begin{itemize} \item The elements of sorted array must be in some order, e.g. ascending or descending order. In this paper we are sorting in ascending order. $$sorted\ (sort\ \ l)$$ \item The algorithm does not change or delete elements of the given array, e.g. the sorted array is the permutation of the input array. $$sort\ l\ <\sim \sim>\ l$$ \end{itemize} \ locale Sort = fixes sort :: "'a::linorder list \ 'a list" assumes sorted: "sorted (sort l)" assumes permutation: "sort l <~~> l" end diff --git a/thys/Simpl/ex/Quicksort.thy b/thys/Simpl/ex/Quicksort.thy --- a/thys/Simpl/ex/Quicksort.thy +++ b/thys/Simpl/ex/Quicksort.thy @@ -1,281 +1,281 @@ (* Author: Norbert Schirmer Maintainer: Norbert Schirmer, norbert.schirmer at web de License: LGPL *) (* Title: Quicksort.thy Author: Norbert Schirmer, TU Muenchen Copyright (C) 2004-2008 Norbert Schirmer Some rights reserved, TU Muenchen This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA *) section "Example: Quicksort on Heap Lists" theory Quicksort -imports "../Vcg" "../HeapList" "HOL-Library.Permutation" +imports "../Vcg" "../HeapList" "HOL-Library.List_Permutation" begin record globals_heap = next_' :: "ref \ ref" cont_' :: "ref \ nat" record 'g vars = "'g state" + p_' :: "ref" q_' :: "ref" le_' :: "ref" gt_' :: "ref" hd_' :: "ref" tl_' :: "ref" procedures append(p,q|p) = "IF \p=Null THEN \p :== \q ELSE \p\\next :== CALL append(\p\\next,\q) FI" append_spec: "\\ Ps Qs. \\ \\. List \p \next Ps \ List \q \next Qs \ set Ps \ set Qs = {}\ \p :== PROC append(\p,\q) \List \p \next (Ps@Qs) \ (\x. x\set Ps \ \next x = \<^bsup>\\<^esup>next x)\" append_modifies: "\\. \\ {\} \p :== PROC append(\p,\q){t. t may_only_modify_globals \ in [next]}" lemma (in append_impl) append_modifies: shows "\\. \\ {\} \p :== PROC append(\p,\q){t. t may_only_modify_globals \ in [next]}" apply (hoare_rule HoarePartial.ProcRec1) apply (vcg spec=modifies) done lemma (in append_impl) append_spec: shows "\\ Ps Qs. \\ \\. List \p \next Ps \ List \q \next Qs \ set Ps \ set Qs = {}\ \p :== PROC append(\p,\q) \List \p \next (Ps@Qs) \ (\x. x\set Ps \ \next x = \<^bsup>\\<^esup>next x)\" apply (hoare_rule HoarePartial.ProcRec1) apply vcg apply fastforce done primrec sorted:: "('a \ 'a \ bool) \ 'a list \ bool" where "sorted le [] = True" | "sorted le (x#xs) = ((\y\set xs. le x y) \ sorted le xs)" lemma perm_set_eq: assumes perm: "xs <~~> ys" shows "set xs = set ys" using perm by induct auto lemma perm_Cons_eq [iff]: "x#xs <~~> x#ys = (xs <~~> ys)" by auto lemma perm_app_Cons_eq1 : "xs@y#ys <~~> zs = (y#xs@ys <~~> zs)" proof - have app_Cons: "xs@y#ys <~~> y#xs@ys" by (rule perm_sym, rule perm_append_Cons) show ?thesis proof assume "xs@y#ys <~~> zs" with app_Cons [THEN perm_sym] show "y#xs@ys <~~> zs" by (rule perm.trans) next assume " y#xs@ys <~~> zs" with app_Cons show "xs@y#ys <~~> zs" by (rule perm.trans) qed qed lemma perm_app_Cons_eq2 : "zs <~~> xs@y#ys = (zs <~~> y#xs@ys)" proof - have "xs@y#ys <~~> zs = (y#xs@ys <~~> zs)" by (rule perm_app_Cons_eq1) thus ?thesis by (iprover intro: perm_sym) qed lemmas perm_app_Cons_simps = perm_app_Cons_eq1 [THEN sym] perm_app_Cons_eq2 [THEN sym] lemma sorted_append[simp]: "sorted le (xs@ys) = (sorted le xs \ sorted le ys \ (\x \ set xs. \y \ set ys. le x y))" by (induct xs, auto) lemma perm_append_blocks: assumes ws_ys: "ws <~~> ys" assumes xs_zs: "xs <~~> zs" shows "ws@xs <~~> ys@zs" using ws_ys proof (induct) case (swap l x y) from xs_zs show "(l # x # y) @ xs <~~> (x # l # y) @ zs" by (induct) auto qed (insert xs_zs , auto) procedures quickSort(p|p) = "IF \p=Null THEN SKIP ELSE \tl :== \p\\next;; \le :== Null;; \gt :== Null;; WHILE \tl\Null DO \hd :== \tl;; \tl :== \tl\\next;; IF \hd\\cont \ \p\\cont THEN \hd\\next :== \le;; \le :== \hd ELSE \hd\\next :== \gt;; \gt :== \hd FI OD;; \le :== CALL quickSort(\le);; \gt :== CALL quickSort(\gt);; \p\\next :== \gt;; \le :== CALL append(\le,\p);; \p :== \le FI" quickSort_spec: "\\ Ps. \\ \\. List \p \next Ps\ \p :== PROC quickSort(\p) \(\sortedPs. List \p \next sortedPs \ sorted (\) (map \<^bsup>\\<^esup>cont sortedPs) \ Ps <~~> sortedPs) \ (\x. x\set Ps \ \next x = \<^bsup>\\<^esup>next x)\" quickSort_modifies: "\\. \\ {\} \p :== PROC quickSort(\p) {t. t may_only_modify_globals \ in [next]}" lemma (in quickSort_impl) quickSort_modifies: shows "\\. \\ {\} \p :== PROC quickSort(\p) {t. t may_only_modify_globals \ in [next]}" apply (hoare_rule HoarePartial.ProcRec1) apply (vcg spec=modifies) done lemma (in quickSort_impl) quickSort_spec: shows "\\ Ps. \\ \\. List \p \next Ps\ \p :== PROC quickSort(\p) \(\sortedPs. List \p \next sortedPs \ sorted (\) (map \<^bsup>\\<^esup>cont sortedPs) \ Ps <~~> sortedPs) \ (\x. x\set Ps \ \next x = \<^bsup>\\<^esup>next x)\" apply (hoare_rule HoarePartial.ProcRec1) apply (hoare_rule anno = "IF \p=Null THEN SKIP ELSE \tl :== \p\\next;; \le :== Null;; \gt :== Null;; WHILE \tl\Null INV \ (\les grs tls. List \le \next les \ List \gt \next grs \ List \tl \next tls \ Ps <~~> \p#tls@les@grs \ distinct(\p#tls@les@grs) \ (\x\set les. x\\cont \ \p\\cont) \ (\x\set grs. \p\\cont < x\\cont)) \ \p=\<^bsup>\\<^esup>p \ \cont=\<^bsup>\\<^esup>cont \ List \<^bsup>\\<^esup>p \<^bsup>\\<^esup>next Ps \ (\x. x\set Ps \ \next x = \<^bsup>\\<^esup>next x)\ DO \hd :== \tl;; \tl :== \tl\\next;; IF \hd\\cont \ \p\\cont THEN \hd\\next :== \le;; \le :== \hd ELSE \hd\\next :== \gt;; \gt :== \hd FI OD;; \le :== CALL quickSort(\le);; \gt :== CALL quickSort(\gt);; \p\\next :== \gt;; \le :== CALL append(\le,\p);; \p :== \le FI" in HoarePartial.annotateI) apply vcg apply fastforce apply clarsimp apply (rule conjI) apply clarify apply (rule conjI) apply (rule_tac x="tl#les" in exI) apply simp apply (rule_tac x="grs" in exI) apply simp apply (rule_tac x="ps" in exI) apply simp apply (erule perm.trans) apply simp apply (simp add: perm_app_Cons_simps) apply (simp add: perm_set_eq) apply clarify apply (rule conjI) apply (rule_tac x="les" in exI) apply simp apply (rule_tac x="tl#grs" in exI) apply simp apply (rule_tac x="ps" in exI) apply simp apply (erule perm.trans) apply simp apply (simp add: perm_app_Cons_simps) apply (simp add: perm_set_eq) apply clarsimp apply (rule_tac ?x=grs in exI) apply (rule conjI) apply (erule heap_eq_ListI1) apply clarify apply (erule_tac x=x in allE)back apply blast apply clarsimp apply (rule_tac x="sortedPs" in exI) apply (rule conjI) apply (erule heap_eq_ListI1) apply (clarsimp) apply (erule_tac x=x in allE) back back apply (fast dest!: perm_set_eq) apply (rule_tac x="p#sortedPsa" in exI) apply (rule conjI) apply (fastforce dest!: perm_set_eq) apply (rule conjI) apply (force dest!: perm_set_eq) apply clarsimp apply (rule conjI) apply (fastforce dest!: perm_set_eq) apply (rule conjI) apply (fastforce dest!: perm_set_eq) apply (rule conjI) apply (erule perm.trans) apply (simp add: perm_app_Cons_simps list_all_iff) apply (fastforce intro!: perm_append_blocks) apply clarsimp apply (erule_tac x=x in allE)+ apply (force dest!: perm_set_eq) done end diff --git a/thys/Simplex/Simplex.thy b/thys/Simplex/Simplex.thy --- a/thys/Simplex/Simplex.thy +++ b/thys/Simplex/Simplex.thy @@ -1,8324 +1,8324 @@ (* Authors: F. Maric, M. Spasic, R. Thiemann *) section \The Simplex Algorithm\ theory Simplex imports Linear_Poly_Maps QDelta Rel_Chain Simplex_Algebra "HOL-Library.RBT_Mapping" - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" "HOL-Library.Code_Target_Numeral" begin text\Linear constraints are of the form \p \ c\ or \p\<^sub>1 \ p\<^sub>2\, where \p\, \p\<^sub>1\, and \p\<^sub>2\, are linear polynomials, \c\ is a rational constant and \\ \ {<, >, \, \, =}\. Their abstract syntax is given by the \constraint\ type, and semantics is given by the relation \\\<^sub>c\, defined straightforwardly by primitive recursion over the \constraint\ type. A set of constraints is satisfied, denoted by \\\<^sub>c\<^sub>s\, if all constraints are. There is also an indexed version \\\<^sub>i\<^sub>c\<^sub>s\ which takes an explicit set of indices and then only demands that these constraints are satisfied.\ datatype constraint = LT linear_poly rat | GT linear_poly rat | LEQ linear_poly rat | GEQ linear_poly rat | EQ linear_poly rat | LTPP linear_poly linear_poly | GTPP linear_poly linear_poly | LEQPP linear_poly linear_poly | GEQPP linear_poly linear_poly | EQPP linear_poly linear_poly text \Indexed constraints are just pairs of indices and constraints. Indices will be used to identify constraints, e.g., to easily specify an unsatisfiable core by a list of indices.\ type_synonym 'i i_constraint = "'i \ constraint" abbreviation (input) restrict_to :: "'i set \ ('i \ 'a) set \ 'a set" where "restrict_to I xs \ snd ` (xs \ (I \ UNIV))" text \The operation @{const restrict_to} is used to select constraints for a given index set.\ abbreviation (input) flat :: "('i \ 'a) set \ 'a set" where "flat xs \ snd ` xs" text \The operation @{const flat} is used to drop indices from a set of indexed constraints.\ abbreviation (input) flat_list :: "('i \ 'a) list \ 'a list" where "flat_list xs \ map snd xs" primrec satisfies_constraint :: "'a :: lrv valuation \ constraint \ bool" (infixl "\\<^sub>c" 100) where "v \\<^sub>c (LT l r) \ (l\v\) < r *R 1" | "v \\<^sub>c GT l r \ (l\v\) > r *R 1" | "v \\<^sub>c LEQ l r \ (l\v\) \ r *R 1" | "v \\<^sub>c GEQ l r \ (l\v\) \ r *R 1" | "v \\<^sub>c EQ l r \ (l\v\) = r *R 1" | "v \\<^sub>c LTPP l1 l2 \ (l1\v\) < (l2\v\)" | "v \\<^sub>c GTPP l1 l2 \ (l1\v\) > (l2\v\)" | "v \\<^sub>c LEQPP l1 l2 \ (l1\v\) \ (l2\v\)" | "v \\<^sub>c GEQPP l1 l2 \ (l1\v\) \ (l2\v\)" | "v \\<^sub>c EQPP l1 l2 \ (l1\v\) = (l2\v\)" abbreviation satisfies_constraints :: "rat valuation \ constraint set \ bool" (infixl "\\<^sub>c\<^sub>s" 100) where "v \\<^sub>c\<^sub>s cs \ \ c \ cs. v \\<^sub>c c" lemma unsat_mono: assumes "\ (\ v. v \\<^sub>c\<^sub>s cs)" and "cs \ ds" shows "\ (\ v. v \\<^sub>c\<^sub>s ds)" using assms by auto fun i_satisfies_cs (infixl "\\<^sub>i\<^sub>c\<^sub>s" 100) where "(I,v) \\<^sub>i\<^sub>c\<^sub>s cs \ v \\<^sub>c\<^sub>s restrict_to I cs" definition distinct_indices :: "('i \ 'c) list \ bool" where "distinct_indices as = (distinct (map fst as))" lemma distinct_indicesD: "distinct_indices as \ (i,x) \ set as \ (i,y) \ set as \ x = y" unfolding distinct_indices_def by (rule eq_key_imp_eq_value) text \For the unsat-core predicate we only demand minimality in case that the indices are distinct. Otherwise, minimality does in general not hold. For instance, consider the input constraints $c_1: x < 0$, $c_2: x > 2$ and $c_2: x < 1$ where the index $c_2$ occurs twice. If the simplex-method first encounters constraint $c_1$, then it will detect that there is a conflict between $c_1$ and the first $c_2$-constraint. Consequently, the index-set $\{c_1,c_2\}$ will be returned, but this set is not minimal since $\{c_2\}$ is already unsatisfiable.\ definition minimal_unsat_core :: "'i set \ 'i i_constraint list \ bool" where "minimal_unsat_core I ics = ((I \ fst ` set ics) \ (\ (\ v. (I,v) \\<^sub>i\<^sub>c\<^sub>s set ics)) \ (distinct_indices ics \ (\ J. J \ I \ (\ v. (J,v) \\<^sub>i\<^sub>c\<^sub>s set ics))))" subsection \Procedure Specification\ abbreviation (input) Unsat where "Unsat \ Inl" abbreviation (input) Sat where "Sat \ Inr" text\The specification for the satisfiability check procedure is given by:\ locale Solve = \ \Decide if the given list of constraints is satisfiable. Return either an unsat core, or a satisfying valuation.\ fixes solve :: "'i i_constraint list \ 'i list + rat valuation" \ \If the status @{const Sat} is returned, then returned valuation satisfies all constraints.\ assumes simplex_sat: "solve cs = Sat v \ v \\<^sub>c\<^sub>s flat (set cs)" \ \If the status @{const Unsat} is returned, then constraints are unsatisfiable, i.e., an unsatisfiable core is returned.\ assumes simplex_unsat: "solve cs = Unsat I \ minimal_unsat_core (set I) cs" abbreviation (input) look where "look \ Mapping.lookup" abbreviation (input) upd where "upd \ Mapping.update" lemma look_upd: "look (upd k v m) = (look m)(k \ v)" by (transfer, auto) lemmas look_upd_simps[simp] = look_upd Mapping.lookup_empty definition map2fun:: "(var, 'a :: zero) mapping \ var \ 'a" where "map2fun v \ \x. case look v x of None \ 0 | Some y \ y" syntax "_map2fun" :: "(var, 'a) mapping \ var \ 'a" ("\_\") translations "\v\" == "CONST map2fun v" lemma map2fun_def': "\v\ x \ case Mapping.lookup v x of None \ 0 | Some y \ y" by (auto simp add: map2fun_def) text\Note that the above specification requires returning a valuation (defined as a HOL function), which is not efficiently executable. In order to enable more efficient data structures for representing valuations, a refinement of this specification is needed and the function \solve\ is replaced by the function \solve_exec\ returning optional \(var, rat) mapping\ instead of \var \ rat\ function. This way, efficient data structures for representing mappings can be easily plugged-in during code generation \cite{florian-refinement}. A conversion from the \mapping\ datatype to HOL function is denoted by \\_\\ and given by: @{thm map2fun_def'[no_vars]}.\ locale SolveExec = fixes solve_exec :: "'i i_constraint list \ 'i list + (var, rat) mapping" assumes simplex_sat0: "solve_exec cs = Sat v \ \v\ \\<^sub>c\<^sub>s flat (set cs)" assumes simplex_unsat0: "solve_exec cs = Unsat I \ minimal_unsat_core (set I) cs" begin definition solve where "solve cs \ case solve_exec cs of Sat v \ Sat \v\ | Unsat c \ Unsat c" end sublocale SolveExec < Solve solve by (unfold_locales, insert simplex_sat0 simplex_unsat0, auto simp: solve_def split: sum.splits) subsection \Handling Strict Inequalities\ text\The first step of the procedure is removing all equalities and strict inequalities. Equalities can be easily rewritten to non-strict inequalities. Removing strict inequalities can be done by replacing the list of constraints by a new one, formulated over an extension \\'\ of the space of rationals \\\. \\'\ must have a structure of a linearly ordered vector space over \\\ (represented by the type class \lrv\) and must guarantee that if some non-strict constraints are satisfied in \\'\, then there is a satisfying valuation for the original constraints in \\\. Our final implementation uses the \\\<^sub>\\ space, defined in \cite{simplex-rad} (basic idea is to replace \p < c\ by \p \ c - \\ and \p > c\ by \p \ c + \\ for a symbolic parameter \\\). So, all constraints are reduced to the form \p \ b\, where \p\ is a linear polynomial (still over \\\), \b\ is constant from \\'\ and \\ \ {\, \}\. The non-strict constraints are represented by the type \'a ns_constraint\, and their semantics is denoted by \\\<^sub>n\<^sub>s\ and \\\<^sub>n\<^sub>s\<^sub>s\. The indexed variant is \\\<^sub>i\<^sub>n\<^sub>s\<^sub>s\.\ datatype 'a ns_constraint = LEQ_ns linear_poly 'a | GEQ_ns linear_poly 'a type_synonym ('i,'a) i_ns_constraint = "'i \ 'a ns_constraint" primrec satisfiable_ns_constraint :: "'a::lrv valuation \ 'a ns_constraint \ bool" (infixl "\\<^sub>n\<^sub>s" 100) where "v \\<^sub>n\<^sub>s LEQ_ns l r \ l\v\ \ r" | "v \\<^sub>n\<^sub>s GEQ_ns l r \ l\v\ \ r" abbreviation satisfies_ns_constraints :: "'a::lrv valuation \ 'a ns_constraint set \ bool" (infixl "\\<^sub>n\<^sub>s\<^sub>s " 100) where "v \\<^sub>n\<^sub>s\<^sub>s cs \ \ c \ cs. v \\<^sub>n\<^sub>s c" fun i_satisfies_ns_constraints :: "'i set \ 'a::lrv valuation \ ('i,'a) i_ns_constraint set \ bool" (infixl "\\<^sub>i\<^sub>n\<^sub>s\<^sub>s " 100) where "(I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs \ v \\<^sub>n\<^sub>s\<^sub>s restrict_to I cs" lemma i_satisfies_ns_constraints_mono: "(I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs \ J \ I \ (J,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs" by auto primrec poly :: "'a ns_constraint \ linear_poly" where "poly (LEQ_ns p a) = p" | "poly (GEQ_ns p a) = p" primrec ns_constraint_const :: "'a ns_constraint \ 'a" where "ns_constraint_const (LEQ_ns p a) = a" | "ns_constraint_const (GEQ_ns p a) = a" definition distinct_indices_ns :: "('i,'a :: lrv) i_ns_constraint set \ bool" where "distinct_indices_ns ns = ((\ n1 n2 i. (i,n1) \ ns \ (i,n2) \ ns \ poly n1 = poly n2 \ ns_constraint_const n1 = ns_constraint_const n2))" definition minimal_unsat_core_ns :: "'i set \ ('i,'a :: lrv) i_ns_constraint set \ bool" where "minimal_unsat_core_ns I cs = ((I \ fst ` cs) \ (\ (\ v. (I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs)) \ (distinct_indices_ns cs \ (\ J \ I. \ v. (J,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs)))" text\Specification of reduction of constraints to non-strict form is given by:\ locale To_ns = \ \Convert a constraint to an equisatisfiable non-strict constraint list. The conversion must work for arbitrary subsets of constraints -- selected by some index set I -- in order to carry over unsat-cores and in order to support incremental simplex solving.\ fixes to_ns :: "'i i_constraint list \ ('i,'a::lrv) i_ns_constraint list" \ \Convert the valuation that satisfies all non-strict constraints to the valuation that satisfies all initial constraints.\ fixes from_ns :: "(var, 'a) mapping \ 'a ns_constraint list \ (var, rat) mapping" assumes to_ns_unsat: "minimal_unsat_core_ns I (set (to_ns cs)) \ minimal_unsat_core I cs" assumes i_to_ns_sat: "(I,\v'\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set (to_ns cs) \ (I,\from_ns v' (flat_list (to_ns cs))\) \\<^sub>i\<^sub>c\<^sub>s set cs" assumes to_ns_indices: "fst ` set (to_ns cs) = fst ` set cs" assumes distinct_cond: "distinct_indices cs \ distinct_indices_ns (set (to_ns cs))" begin lemma to_ns_sat: "\v'\ \\<^sub>n\<^sub>s\<^sub>s flat (set (to_ns cs)) \ \from_ns v' (flat_list (to_ns cs))\ \\<^sub>c\<^sub>s flat (set cs)" using i_to_ns_sat[of UNIV v' cs] by auto end locale Solve_exec_ns = fixes solve_exec_ns :: "('i,'a::lrv) i_ns_constraint list \ 'i list + (var, 'a) mapping" assumes simplex_ns_sat: "solve_exec_ns cs = Sat v \ \v\ \\<^sub>n\<^sub>s\<^sub>s flat (set cs)" assumes simplex_ns_unsat: "solve_exec_ns cs = Unsat I \ minimal_unsat_core_ns (set I) (set cs)" text\After the transformation, the procedure is reduced to solving only the non-strict constraints, implemented in the \solve_exec_ns\ function having an analogous specification to the \solve\ function. If \to_ns\, \from_ns\ and \solve_exec_ns\ are available, the \solve_exec\ function can be easily defined and it can be easily shown that this definition satisfies its specification (also analogous to \solve\). \ locale SolveExec' = To_ns to_ns from_ns + Solve_exec_ns solve_exec_ns for to_ns:: "'i i_constraint list \ ('i,'a::lrv) i_ns_constraint list" and from_ns :: "(var, 'a) mapping \ 'a ns_constraint list \ (var, rat) mapping" and solve_exec_ns :: "('i,'a) i_ns_constraint list \ 'i list + (var, 'a) mapping" begin definition solve_exec where "solve_exec cs \ let cs' = to_ns cs in case solve_exec_ns cs' of Sat v \ Sat (from_ns v (flat_list cs')) | Unsat is \ Unsat is" end sublocale SolveExec' < SolveExec solve_exec by (unfold_locales, insert simplex_ns_sat simplex_ns_unsat to_ns_sat to_ns_unsat, (force simp: solve_exec_def Let_def split: sum.splits)+) subsection \Preprocessing\ text\The next step in the procedure rewrites a list of non-strict constraints into an equisatisfiable form consisting of a list of linear equations (called the \emph{tableau}) and of a list of \emph{atoms} of the form \x\<^sub>i \ b\<^sub>i\ where \x\<^sub>i\ is a variable and \b\<^sub>i\ is a constant (from the extension field). The transformation is straightforward and introduces auxiliary variables for linear polynomials occurring in the initial formula. For example, \[x\<^sub>1 + x\<^sub>2 \ b\<^sub>1, x\<^sub>1 + x\<^sub>2 \ b\<^sub>2, x\<^sub>2 \ b\<^sub>3]\ can be transformed to the tableau \[x\<^sub>3 = x\<^sub>1 + x\<^sub>2]\ and atoms \[x\<^sub>3 \ b\<^sub>1, x\<^sub>3 \ b\<^sub>2, x\<^sub>2 \ b\<^sub>3]\.\ type_synonym eq = "var \ linear_poly" primrec lhs :: "eq \ var" where "lhs (l, r) = l" primrec rhs :: "eq \ linear_poly" where "rhs (l, r) = r" abbreviation rvars_eq :: "eq \ var set" where "rvars_eq eq \ vars (rhs eq)" definition satisfies_eq :: "'a::rational_vector valuation \ eq \ bool" (infixl "\\<^sub>e" 100) where "v \\<^sub>e eq \ v (lhs eq) = (rhs eq)\v\" lemma satisfies_eq_iff: "v \\<^sub>e (x, p) \ v x = p\v\" by (simp add: satisfies_eq_def) type_synonym tableau ="eq list" definition satisfies_tableau ::"'a::rational_vector valuation \ tableau \ bool" (infixl "\\<^sub>t" 100) where "v \\<^sub>t t \ \ e \ set t. v \\<^sub>e e" definition lvars :: "tableau \ var set" where "lvars t = set (map lhs t)" definition rvars :: "tableau \ var set" where "rvars t = \ (set (map rvars_eq t))" abbreviation tvars where "tvars t \ lvars t \ rvars t" text \The condition that the rhss are non-zero is required to obtain minimal unsatisfiable cores. To observe the problem with 0 as rhs, consider the tableau $x = 0$ in combination with atom $(A: x \leq 0)$ where then $(B: x \geq 1)$ is asserted. In this case, the unsat core would be computed as $\{A,B\}$, although already $\{B\}$ is unsatisfiable.\ definition normalized_tableau :: "tableau \ bool" ("\") where "normalized_tableau t \ distinct (map lhs t) \ lvars t \ rvars t = {} \ 0 \ rhs ` set t" text\Equations are of the form \x = p\, where \x\ is a variable and \p\ is a polynomial, and are represented by the type \eq = var \ linear_poly\. Semantics of equations is given by @{thm satisfies_eq_iff[no_vars]}. Tableau is represented as a list of equations, by the type \tableau = eq list\. Semantics for a tableau is given by @{thm satisfies_tableau_def[no_vars]}. Functions \lvars\ and \rvars\ return sets of variables appearing on the left hand side (lhs) and the right hand side (rhs) of a tableau. Lhs variables are called \emph{basic} while rhs variables are called \emph{non-basic} variables. A tableau \t\ is \emph{normalized} (denoted by @{term "\ t"}) iff no variable occurs on the lhs of two equations in a tableau and if sets of lhs and rhs variables are distinct.\ lemma normalized_tableau_unique_eq_for_lvar: assumes "\ t" shows "\ x \ lvars t. \! p. (x, p) \ set t" proof (safe) fix x assume "x \ lvars t" then show "\p. (x, p) \ set t" unfolding lvars_def by auto next fix x p1 p2 assume *: "(x, p1) \ set t" "(x, p2) \ set t" then show "p1 = p2" using \\ t\ unfolding normalized_tableau_def by (force simp add: distinct_map inj_on_def) qed lemma recalc_tableau_lvars: assumes "\ t" shows "\ v. \ v'. (\ x \ rvars t. v x = v' x) \ v' \\<^sub>t t" proof fix v let ?v' = "\ x. if x \ lvars t then (THE p. (x, p) \ set t) \ v \ else v x" show "\ v'. (\ x \ rvars t. v x = v' x) \ v' \\<^sub>t t" proof (rule_tac x="?v'" in exI, rule conjI) show "\x\rvars t. v x = ?v' x" using \\ t\ unfolding normalized_tableau_def by auto show "?v' \\<^sub>t t" unfolding satisfies_tableau_def satisfies_eq_def proof fix e assume "e \ set t" obtain l r where e: "e = (l,r)" by force show "?v' (lhs e) = rhs e \ ?v' \" proof- have "(lhs e, rhs e) \ set t" using \e \ set t\ e by auto have "\!p. (lhs e, p) \ set t" using \\ t\ normalized_tableau_unique_eq_for_lvar[of t] using \e \ set t\ unfolding lvars_def by auto let ?p = "THE p. (lhs e, p) \ set t" have "(lhs e, ?p) \ set t" apply (rule theI') using \\!p. (lhs e, p) \ set t\ by auto then have "?p = rhs e" using \(lhs e, rhs e) \ set t\ using \\!p. (lhs e, p) \ set t\ by auto moreover have "?v' (lhs e) = ?p \ v \" using \e \ set t\ unfolding lvars_def by simp moreover have "rhs e \ ?v' \ = rhs e \ v \" apply (rule valuate_depend) using \\ t\ \e \ set t\ unfolding normalized_tableau_def by (auto simp add: lvars_def rvars_def) ultimately show ?thesis by auto qed qed qed qed lemma tableau_perm: assumes "lvars t1 = lvars t2" "rvars t1 = rvars t2" "\ t1" "\ t2" "\ v::'a::lrv valuation. v \\<^sub>t t1 \ v \\<^sub>t t2" shows "t1 <~~> t2" proof- { fix t1 t2 assume "lvars t1 = lvars t2" "rvars t1 = rvars t2" "\ t1" "\ v::'a::lrv valuation. v \\<^sub>t t1 \ v \\<^sub>t t2" have "set t1 \ set t2" proof (safe) fix a b assume "(a, b) \ set t1" then have "a \ lvars t1" unfolding lvars_def by force then have "a \ lvars t2" using \lvars t1 = lvars t2\ by simp then obtain b' where "(a, b') \ set t2" unfolding lvars_def by force have "\v::'a valuation. \v'. (\x\vars (b - b'). v' x = v x) \ (b - b') \ v' \ = 0" proof fix v::"'a valuation" obtain v' where "v' \\<^sub>t t1" "\ x \ rvars t1. v x = v' x" using recalc_tableau_lvars[of t1] \\ t1\ by auto have "v' \\<^sub>t t2" using \v' \\<^sub>t t1\ \\ v. v \\<^sub>t t1 \ v \\<^sub>t t2\ by simp have "b \v'\ = b' \v'\" using \(a, b) \ set t1\ \v' \\<^sub>t t1\ using \(a, b') \ set t2\ \v' \\<^sub>t t2\ unfolding satisfies_tableau_def satisfies_eq_def by force then have "(b - b') \v'\ = 0" using valuate_minus[of b b' v'] by auto moreover have "vars b \ rvars t1" "vars b' \ rvars t1" using \(a, b) \ set t1\ \(a, b') \ set t2\ \rvars t1 = rvars t2\ unfolding rvars_def by force+ then have "vars (b - b') \ rvars t1" using vars_minus[of b b'] by blast then have "\x\vars (b - b'). v' x = v x" using \\ x \ rvars t1. v x = v' x\ by auto ultimately show "\v'. (\x\vars (b - b'). v' x = v x) \ (b - b') \ v' \ = 0" by auto qed then have "b = b'" using all_val[of "b - b'"] by simp then show "(a, b) \ set t2" using \(a, b') \ set t2\ by simp qed } note * = this have "set t1 = set t2" using *[of t1 t2] *[of t2 t1] using assms by auto moreover have "distinct t1" "distinct t2" using \\ t1\ \\ t2\ unfolding normalized_tableau_def by (auto simp add: distinct_map) ultimately show ?thesis by (auto simp add: set_eq_iff_mset_eq_distinct mset_eq_perm) qed text\Elementary atoms are represented by the type \'a atom\ and semantics for atoms and sets of atoms is denoted by \\\<^sub>a\ and \\\<^sub>a\<^sub>s\ and given by: \ datatype 'a atom = Leq var 'a | Geq var 'a primrec atom_var::"'a atom \ var" where "atom_var (Leq var a) = var" | "atom_var (Geq var a) = var" primrec atom_const::"'a atom \ 'a" where "atom_const (Leq var a) = a" | "atom_const (Geq var a) = a" primrec satisfies_atom :: "'a::linorder valuation \ 'a atom \ bool" (infixl "\\<^sub>a" 100) where "v \\<^sub>a Leq x c \ v x \ c" | "v \\<^sub>a Geq x c \ v x \ c" definition satisfies_atom_set :: "'a::linorder valuation \ 'a atom set \ bool" (infixl "\\<^sub>a\<^sub>s" 100) where "v \\<^sub>a\<^sub>s as \ \ a \ as. v \\<^sub>a a" definition satisfies_atom' :: "'a::linorder valuation \ 'a atom \ bool" (infixl "\\<^sub>a\<^sub>e" 100) where "v \\<^sub>a\<^sub>e a \ v (atom_var a) = atom_const a" lemma satisfies_atom'_stronger: "v \\<^sub>a\<^sub>e a \ v \\<^sub>a a" by (cases a, auto simp: satisfies_atom'_def) abbreviation satisfies_atom_set' :: "'a::linorder valuation \ 'a atom set \ bool" (infixl "\\<^sub>a\<^sub>e\<^sub>s" 100) where "v \\<^sub>a\<^sub>e\<^sub>s as \ \ a \ as. v \\<^sub>a\<^sub>e a" lemma satisfies_atom_set'_stronger: "v \\<^sub>a\<^sub>e\<^sub>s as \ v \\<^sub>a\<^sub>s as" using satisfies_atom'_stronger unfolding satisfies_atom_set_def by auto text \There is also the indexed variant of an atom\ type_synonym ('i,'a) i_atom = "'i \ 'a atom" fun i_satisfies_atom_set :: "'i set \ 'a::linorder valuation \ ('i,'a) i_atom set \ bool" (infixl "\\<^sub>i\<^sub>a\<^sub>s" 100) where "(I,v) \\<^sub>i\<^sub>a\<^sub>s as \ v \\<^sub>a\<^sub>s restrict_to I as" fun i_satisfies_atom_set' :: "'i set \ 'a::linorder valuation \ ('i,'a) i_atom set \ bool" (infixl "\\<^sub>i\<^sub>a\<^sub>e\<^sub>s" 100) where "(I,v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as \ v \\<^sub>a\<^sub>e\<^sub>s restrict_to I as" lemma i_satisfies_atom_set'_stronger: "Iv \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as \ Iv \\<^sub>i\<^sub>a\<^sub>s as" using satisfies_atom_set'_stronger[of _ "snd Iv"] by (cases Iv, auto) lemma satisifies_atom_restrict_to_Cons: "v \\<^sub>a\<^sub>s restrict_to I (set as) \ (i \ I \ v \\<^sub>a a) \ v \\<^sub>a\<^sub>s restrict_to I (set ((i,a) # as))" unfolding satisfies_atom_set_def by auto lemma satisfies_tableau_Cons: "v \\<^sub>t t \ v \\<^sub>e e \ v \\<^sub>t (e # t)" unfolding satisfies_tableau_def by auto definition distinct_indices_atoms :: "('i,'a) i_atom set \ bool" where "distinct_indices_atoms as = (\ i a b. (i,a) \ as \ (i,b) \ as \ atom_var a = atom_var b \ atom_const a = atom_const b)" text\ The specification of the preprocessing function is given by:\ locale Preprocess = fixes preprocess::"('i,'a::lrv) i_ns_constraint list \ tableau\ ('i,'a) i_atom list \ ((var,'a) mapping \ (var,'a) mapping) \ 'i list" assumes \ \The returned tableau is always normalized.\ preprocess_tableau_normalized: "preprocess cs = (t,as,trans_v,U) \ \ t" and \ \Tableau and atoms are equisatisfiable with starting non-strict constraints.\ i_preprocess_sat: "\ v. preprocess cs = (t,as,trans_v,U) \ I \ set U = {} \ (I,\v\) \\<^sub>i\<^sub>a\<^sub>s set as \ \v\ \\<^sub>t t \ (I,\trans_v v\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" and preprocess_unsat: "preprocess cs = (t, as,trans_v,U) \ (I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs \ \ v'. (I,v') \\<^sub>i\<^sub>a\<^sub>s set as \ v' \\<^sub>t t" and \ \distinct indices on ns-constraints ensures distinct indices in atoms\ preprocess_distinct: "preprocess cs = (t, as,trans_v, U) \ distinct_indices_ns (set cs) \ distinct_indices_atoms (set as)" and \ \unsat indices\ preprocess_unsat_indices: "preprocess cs = (t, as,trans_v, U) \ i \ set U \ \ (\ v. ({i},v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs)" and \ \preprocessing cannot introduce new indices\ preprocess_index: "preprocess cs = (t,as,trans_v, U) \ fst ` set as \ set U \ fst ` set cs" begin lemma preprocess_sat: "preprocess cs = (t,as,trans_v,U) \ U = [] \ \v\ \\<^sub>a\<^sub>s flat (set as) \ \v\ \\<^sub>t t \ \trans_v v\ \\<^sub>n\<^sub>s\<^sub>s flat (set cs)" using i_preprocess_sat[of cs t as trans_v U UNIV v] by auto end definition minimal_unsat_core_tabl_atoms :: "'i set \ tableau \ ('i,'a::lrv) i_atom set \ bool" where "minimal_unsat_core_tabl_atoms I t as = ( I \ fst ` as \ (\ (\ v. v \\<^sub>t t \ (I,v) \\<^sub>i\<^sub>a\<^sub>s as)) \ (distinct_indices_atoms as \ (\ J \ I. \ v. v \\<^sub>t t \ (J,v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as)))" lemma minimal_unsat_core_tabl_atomsD: assumes "minimal_unsat_core_tabl_atoms I t as" shows "I \ fst ` as" "\ (\ v. v \\<^sub>t t \ (I,v) \\<^sub>i\<^sub>a\<^sub>s as)" "distinct_indices_atoms as \ J \ I \ \ v. v \\<^sub>t t \ (J,v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as" using assms unfolding minimal_unsat_core_tabl_atoms_def by auto locale AssertAll = fixes assert_all :: "tableau \ ('i,'a::lrv) i_atom list \ 'i list + (var, 'a)mapping" assumes assert_all_sat: "\ t \ assert_all t as = Sat v \ \v\ \\<^sub>t t \ \v\ \\<^sub>a\<^sub>s flat (set as)" assumes assert_all_unsat: "\ t \ assert_all t as = Unsat I \ minimal_unsat_core_tabl_atoms (set I) t (set as)" text\Once the preprocessing is done and tableau and atoms are obtained, their satisfiability is checked by the \assert_all\ function. Its precondition is that the starting tableau is normalized, and its specification is analogue to the one for the \solve\ function. If \preprocess\ and \assert_all\ are available, the \solve_exec_ns\ can be defined, and it can easily be shown that this definition satisfies the specification.\ locale Solve_exec_ns' = Preprocess preprocess + AssertAll assert_all for preprocess:: "('i,'a::lrv) i_ns_constraint list \ tableau \ ('i,'a) i_atom list \ ((var,'a)mapping \ (var,'a)mapping) \ 'i list" and assert_all :: "tableau \ ('i,'a::lrv) i_atom list \ 'i list + (var, 'a) mapping" begin definition solve_exec_ns where "solve_exec_ns s \ case preprocess s of (t,as,trans_v,ui) \ (case ui of i # _ \ Inl [i] | _ \ (case assert_all t as of Inl I \ Inl I | Inr v \ Inr (trans_v v))) " end context Preprocess begin lemma preprocess_unsat_index: assumes prep: "preprocess cs = (t,as,trans_v,ui)" and i: "i \ set ui" shows "minimal_unsat_core_ns {i} (set cs)" proof - from preprocess_index[OF prep] have "set ui \ fst ` set cs" by auto with i have i': "i \ fst ` set cs" by auto from preprocess_unsat_indices[OF prep i] show ?thesis unfolding minimal_unsat_core_ns_def using i' by auto qed lemma preprocess_minimal_unsat_core: assumes prep: "preprocess cs = (t,as,trans_v,ui)" and unsat: "minimal_unsat_core_tabl_atoms I t (set as)" and inter: "I \ set ui = {}" shows "minimal_unsat_core_ns I (set cs)" proof - from preprocess_tableau_normalized[OF prep] have t: "\ t" . from preprocess_index[OF prep] have index: "fst ` set as \ set ui \ fst ` set cs" by auto from minimal_unsat_core_tabl_atomsD(1,2)[OF unsat] preprocess_unsat[OF prep, of I] have 1: "I \ fst ` set as" "\ (\ v. (I, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs)" by force+ show "minimal_unsat_core_ns I (set cs)" unfolding minimal_unsat_core_ns_def proof (intro conjI impI allI 1(2)) show "I \ fst ` set cs" using 1 index by auto fix J assume "distinct_indices_ns (set cs)" "J \ I" with preprocess_distinct[OF prep] have "distinct_indices_atoms (set as)" "J \ I" by auto from minimal_unsat_core_tabl_atomsD(3)[OF unsat this] obtain v where model: "v \\<^sub>t t" "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s set as" by auto from i_satisfies_atom_set'_stronger[OF model(2)] have model': "(J, v) \\<^sub>i\<^sub>a\<^sub>s set as" . define w where "w = Mapping.Mapping (\ x. Some (v x))" have "v = \w\" unfolding w_def map2fun_def by (intro ext, transfer, auto) with model model' have "\w\ \\<^sub>t t" "(J, \w\) \\<^sub>i\<^sub>a\<^sub>s set as" by auto from i_preprocess_sat[OF prep _ this(2,1)] \J \ I\ inter have "(J, \trans_v w\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" by auto then show "\ w. (J, w) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" by auto qed qed end sublocale Solve_exec_ns' < Solve_exec_ns solve_exec_ns proof fix cs obtain t as trans_v ui where prep: "preprocess cs = (t,as,trans_v,ui)" by (cases "preprocess cs") from preprocess_tableau_normalized[OF prep] have t: "\ t" . from preprocess_index[OF prep] have index: "fst ` set as \ set ui \ fst ` set cs" by auto note solve = solve_exec_ns_def[of cs, unfolded prep split] { fix v assume "solve_exec_ns cs = Sat v" from this[unfolded solve] t assert_all_sat[OF t] preprocess_sat[OF prep] show " \v\ \\<^sub>n\<^sub>s\<^sub>s flat (set cs)" by (auto split: sum.splits list.splits) } { fix I assume res: "solve_exec_ns cs = Unsat I" show "minimal_unsat_core_ns (set I) (set cs)" proof (cases ui) case (Cons i uis) hence I: "I = [i]" using res[unfolded solve] by auto from preprocess_unsat_index[OF prep, of i] I Cons index show ?thesis by auto next case Nil from res[unfolded solve Nil] have assert: "assert_all t as = Unsat I" by (auto split: sum.splits) from assert_all_unsat[OF t assert] have "minimal_unsat_core_tabl_atoms (set I) t (set as)" . from preprocess_minimal_unsat_core[OF prep this] Nil show "minimal_unsat_core_ns (set I) (set cs)" by simp qed } qed subsection\Incrementally Asserting Atoms\ text\The function @{term assert_all} can be implemented by iteratively asserting one by one atom from the given list of atoms. \ type_synonym 'a bounds = "var \ 'a" text\Asserted atoms will be stored in a form of \emph{bounds} for a given variable. Bounds are of the form \l\<^sub>i \ x\<^sub>i \ u\<^sub>i\, where \l\<^sub>i\ and \u\<^sub>i\ and are either scalars or $\pm \infty$. Each time a new atom is asserted, a bound for the corresponding variable is updated (checking for conflict with the previous bounds). Since bounds for a variable can be either finite or $\pm \infty$, they are represented by (partial) maps from variables to values (\'a bounds = var \ 'a\). Upper and lower bounds are represented separately. Infinite bounds map to @{term None} and this is reflected in the semantics: \begin{small} \c \\<^sub>u\<^sub>b b \ case b of None \ False | Some b' \ c \ b'\ \c \\<^sub>u\<^sub>b b \ case b of None \ True | Some b' \ c \ b'\ \end{small} \noindent Strict comparisons, and comparisons with lower bounds are performed similarly. \ abbreviation (input) le where "le lt x y \ lt x y \ x = y" definition geub ("\\<^sub>u\<^sub>b") where "\\<^sub>u\<^sub>b lt c b \ case b of None \ False | Some b' \ le lt b' c" definition gtub ("\\<^sub>u\<^sub>b") where "\\<^sub>u\<^sub>b lt c b \ case b of None \ False | Some b' \ lt b' c" definition leub ("\\<^sub>u\<^sub>b") where "\\<^sub>u\<^sub>b lt c b \ case b of None \ True | Some b' \ le lt c b'" definition ltub ("\\<^sub>u\<^sub>b") where "\\<^sub>u\<^sub>b lt c b \ case b of None \ True | Some b' \ lt c b'" definition lelb ("\\<^sub>l\<^sub>b") where "\\<^sub>l\<^sub>b lt c b \ case b of None \ False | Some b' \ le lt c b'" definition ltlb ("\\<^sub>l\<^sub>b") where "\\<^sub>l\<^sub>b lt c b \ case b of None \ False | Some b' \ lt c b'" definition gelb ("\\<^sub>l\<^sub>b") where "\\<^sub>l\<^sub>b lt c b \ case b of None \ True | Some b' \ le lt b' c" definition gtlb ("\\<^sub>l\<^sub>b") where "\\<^sub>l\<^sub>b lt c b \ case b of None \ True | Some b' \ lt b' c" definition ge_ubound :: "'a::linorder \ 'a option \ bool" (infixl "\\<^sub>u\<^sub>b" 100) where "c \\<^sub>u\<^sub>b b = \\<^sub>u\<^sub>b (<) c b" definition gt_ubound :: "'a::linorder \ 'a option \ bool" (infixl ">\<^sub>u\<^sub>b" 100) where "c >\<^sub>u\<^sub>b b = \\<^sub>u\<^sub>b (<) c b" definition le_ubound :: "'a::linorder \ 'a option \ bool" (infixl "\\<^sub>u\<^sub>b" 100) where "c \\<^sub>u\<^sub>b b = \\<^sub>u\<^sub>b (<) c b" definition lt_ubound :: "'a::linorder \ 'a option \ bool" (infixl "<\<^sub>u\<^sub>b" 100) where "c <\<^sub>u\<^sub>b b = \\<^sub>u\<^sub>b (<) c b" definition le_lbound :: "'a::linorder \ 'a option \ bool" (infixl "\\<^sub>l\<^sub>b" 100) where "c \\<^sub>l\<^sub>b b = \\<^sub>l\<^sub>b (<) c b" definition lt_lbound :: "'a::linorder \ 'a option \ bool" (infixl "<\<^sub>l\<^sub>b" 100) where "c <\<^sub>l\<^sub>b b = \\<^sub>l\<^sub>b (<) c b" definition ge_lbound :: "'a::linorder \ 'a option \ bool" (infixl "\\<^sub>l\<^sub>b" 100) where "c \\<^sub>l\<^sub>b b = \\<^sub>l\<^sub>b (<) c b" definition gt_lbound :: "'a::linorder \ 'a option \ bool" (infixl ">\<^sub>l\<^sub>b" 100) where "c >\<^sub>l\<^sub>b b = \\<^sub>l\<^sub>b (<) c b" lemmas bound_compare'_defs = geub_def gtub_def leub_def ltub_def gelb_def gtlb_def lelb_def ltlb_def lemmas bound_compare''_defs = ge_ubound_def gt_ubound_def le_ubound_def lt_ubound_def le_lbound_def lt_lbound_def ge_lbound_def gt_lbound_def lemmas bound_compare_defs = bound_compare'_defs bound_compare''_defs lemma opposite_dir [simp]: "\\<^sub>l\<^sub>b (>) a b = \\<^sub>u\<^sub>b (<) a b" "\\<^sub>u\<^sub>b (>) a b = \\<^sub>l\<^sub>b (<) a b" "\\<^sub>l\<^sub>b (>) a b = \\<^sub>u\<^sub>b (<) a b" "\\<^sub>u\<^sub>b (>) a b = \\<^sub>l\<^sub>b (<) a b" "\\<^sub>l\<^sub>b (>) a b = \\<^sub>u\<^sub>b (<) a b" "\\<^sub>u\<^sub>b (>) a b = \\<^sub>l\<^sub>b (<) a b" "\\<^sub>l\<^sub>b (>) a b = \\<^sub>u\<^sub>b (<) a b" "\\<^sub>u\<^sub>b (>) a b = \\<^sub>l\<^sub>b (<) a b" by (case_tac[!] b) (auto simp add: bound_compare'_defs) (* Auxiliary lemmas about bound comparison *) lemma [simp]: "\ c \\<^sub>u\<^sub>b None " "\ c \\<^sub>l\<^sub>b None" by (auto simp add: bound_compare_defs) lemma neg_bounds_compare: "(\ (c \\<^sub>u\<^sub>b b)) \ c <\<^sub>u\<^sub>b b" "(\ (c \\<^sub>u\<^sub>b b)) \ c >\<^sub>u\<^sub>b b" "(\ (c >\<^sub>u\<^sub>b b)) \ c \\<^sub>u\<^sub>b b" "(\ (c <\<^sub>u\<^sub>b b)) \ c \\<^sub>u\<^sub>b b" "(\ (c \\<^sub>l\<^sub>b b)) \ c >\<^sub>l\<^sub>b b" "(\ (c \\<^sub>l\<^sub>b b)) \ c <\<^sub>l\<^sub>b b" "(\ (c <\<^sub>l\<^sub>b b)) \ c \\<^sub>l\<^sub>b b" "(\ (c >\<^sub>l\<^sub>b b)) \ c \\<^sub>l\<^sub>b b" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma bounds_compare_contradictory [simp]: "\c \\<^sub>u\<^sub>b b; c <\<^sub>u\<^sub>b b\ \ False" "\c \\<^sub>u\<^sub>b b; c >\<^sub>u\<^sub>b b\ \ False" "\c >\<^sub>u\<^sub>b b; c \\<^sub>u\<^sub>b b\ \ False" "\c <\<^sub>u\<^sub>b b; c \\<^sub>u\<^sub>b b\ \ False" "\c \\<^sub>l\<^sub>b b; c >\<^sub>l\<^sub>b b\ \ False" "\c \\<^sub>l\<^sub>b b; c <\<^sub>l\<^sub>b b\ \ False" "\c <\<^sub>l\<^sub>b b; c \\<^sub>l\<^sub>b b\ \ False" "\c >\<^sub>l\<^sub>b b; c \\<^sub>l\<^sub>b b\ \ False" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma compare_strict_nonstrict: "x <\<^sub>u\<^sub>b b \ x \\<^sub>u\<^sub>b b" "x >\<^sub>u\<^sub>b b \ x \\<^sub>u\<^sub>b b" "x <\<^sub>l\<^sub>b b \ x \\<^sub>l\<^sub>b b" "x >\<^sub>l\<^sub>b b \ x \\<^sub>l\<^sub>b b" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma [simp]: "\ x \ c; c <\<^sub>u\<^sub>b b \ \ x <\<^sub>u\<^sub>b b" "\ x < c; c \\<^sub>u\<^sub>b b \ \ x <\<^sub>u\<^sub>b b" "\ x \ c; c \\<^sub>u\<^sub>b b \ \ x \\<^sub>u\<^sub>b b" "\ x \ c; c >\<^sub>l\<^sub>b b \ \ x >\<^sub>l\<^sub>b b" "\ x > c; c \\<^sub>l\<^sub>b b \ \ x >\<^sub>l\<^sub>b b" "\ x \ c; c \\<^sub>l\<^sub>b b \ \ x \\<^sub>l\<^sub>b b" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma bounds_lg [simp]: "\ c >\<^sub>u\<^sub>b b; x \\<^sub>u\<^sub>b b\ \ x < c" "\ c \\<^sub>u\<^sub>b b; x <\<^sub>u\<^sub>b b\ \ x < c" "\ c \\<^sub>u\<^sub>b b; x \\<^sub>u\<^sub>b b\ \ x \ c" "\ c <\<^sub>l\<^sub>b b; x \\<^sub>l\<^sub>b b\ \ x > c" "\ c \\<^sub>l\<^sub>b b; x >\<^sub>l\<^sub>b b\ \ x > c" "\ c \\<^sub>l\<^sub>b b; x \\<^sub>l\<^sub>b b\ \ x \ c" by (case_tac[!] b) (auto simp add: bound_compare_defs) lemma bounds_compare_Some [simp]: "x \\<^sub>u\<^sub>b Some c \ x \ c" "x \\<^sub>u\<^sub>b Some c \ x \ c" "x <\<^sub>u\<^sub>b Some c \ x < c" "x >\<^sub>u\<^sub>b Some c \ x > c" "x \\<^sub>l\<^sub>b Some c \ x \ c" "x \\<^sub>l\<^sub>b Some c \ x \ c" "x >\<^sub>l\<^sub>b Some c \ x > c" "x <\<^sub>l\<^sub>b Some c \ x < c" by (auto simp add: bound_compare_defs) fun in_bounds where "in_bounds x v (lb, ub) = (v x \\<^sub>l\<^sub>b lb x \ v x \\<^sub>u\<^sub>b ub x)" fun satisfies_bounds :: "'a::linorder valuation \ 'a bounds \ 'a bounds \ bool" (infixl "\\<^sub>b" 100) where "v \\<^sub>b b \ (\ x. in_bounds x v b)" declare satisfies_bounds.simps [simp del] lemma satisfies_bounds_iff: "v \\<^sub>b (lb, ub) \ (\ x. v x \\<^sub>l\<^sub>b lb x \ v x \\<^sub>u\<^sub>b ub x)" by (auto simp add: satisfies_bounds.simps) lemma not_in_bounds: "\ (in_bounds x v (lb, ub)) = (v x <\<^sub>l\<^sub>b lb x \ v x >\<^sub>u\<^sub>b ub x)" using bounds_compare_contradictory(7) using bounds_compare_contradictory(2) using neg_bounds_compare(7)[of "v x" "lb x"] using neg_bounds_compare(2)[of "v x" "ub x"] by auto fun atoms_equiv_bounds :: "'a::linorder atom set \ 'a bounds \ 'a bounds \ bool" (infixl "\" 100) where "as \ (lb, ub) \ (\ v. v \\<^sub>a\<^sub>s as \ v \\<^sub>b (lb, ub))" declare atoms_equiv_bounds.simps [simp del] lemma atoms_equiv_bounds_simps: "as \ (lb, ub) \ \ v. v \\<^sub>a\<^sub>s as \ v \\<^sub>b (lb, ub)" by (simp add: atoms_equiv_bounds.simps) text\A valuation satisfies bounds iff the value of each variable respects both its lower and upper bound, i.e, @{thm satisfies_bounds_iff[no_vars]}. Asserted atoms are precisely encoded by the current bounds in a state (denoted by \\\) if every valuation satisfies them iff it satisfies the bounds, i.e., @{thm atoms_equiv_bounds_simps[no_vars, iff]}.\ text\The procedure also keeps track of a valuation that is a candidate solution. Whenever a new atom is asserted, it is checked whether the valuation is still satisfying. If not, the procedure tries to fix that by changing it and changing the tableau if necessary (but so that it remains equivalent to the initial tableau).\ text\Therefore, the state of the procedure stores the tableau (denoted by \\\), lower and upper bounds (denoted by \\\<^sub>l\ and \\\<^sub>u\, and ordered pair of lower and upper bounds denoted by \\\), candidate solution (denoted by \\\) and a flag (denoted by \\\) indicating if unsatisfiability has been detected so far:\ text\Since we also need to now about the indices of atoms, actually, the bounds are also indexed, and in addition to the flag for unsatisfiability, we also store an optional unsat core.\ type_synonym 'i bound_index = "var \ 'i" type_synonym ('i,'a) bounds_index = "(var, ('i \ 'a))mapping" datatype ('i,'a) state = State (\: "tableau") (\\<^sub>i\<^sub>l: "('i,'a) bounds_index") (\\<^sub>i\<^sub>u: "('i,'a) bounds_index") (\: "(var, 'a) mapping") (\: bool) (\\<^sub>c: "'i list option") definition indexl :: "('i,'a) state \ 'i bound_index" ("\\<^sub>l") where "\\<^sub>l s = (fst o the) o look (\\<^sub>i\<^sub>l s)" definition boundsl :: "('i,'a) state \ 'a bounds" ("\\<^sub>l") where "\\<^sub>l s = map_option snd o look (\\<^sub>i\<^sub>l s)" definition indexu :: "('i,'a) state \ 'i bound_index" ("\\<^sub>u") where "\\<^sub>u s = (fst o the) o look (\\<^sub>i\<^sub>u s)" definition boundsu :: "('i,'a) state \ 'a bounds" ("\\<^sub>u") where "\\<^sub>u s = map_option snd o look (\\<^sub>i\<^sub>u s)" abbreviation BoundsIndicesMap ("\\<^sub>i") where "\\<^sub>i s \ (\\<^sub>i\<^sub>l s, \\<^sub>i\<^sub>u s)" abbreviation Bounds :: "('i,'a) state \ 'a bounds \ 'a bounds" ("\") where "\ s \ (\\<^sub>l s, \\<^sub>u s)" abbreviation Indices :: "('i,'a) state \ 'i bound_index \ 'i bound_index" ("\") where "\ s \ (\\<^sub>l s, \\<^sub>u s)" abbreviation BoundsIndices :: "('i,'a) state \ ('a bounds \ 'a bounds) \ ('i bound_index \ 'i bound_index)" ("\\") where "\\ s \ (\ s, \ s)" fun satisfies_bounds_index :: "'i set \ 'a::lrv valuation \ ('a bounds \ 'a bounds) \ ('i bound_index \ 'i bound_index) \ bool" (infixl "\\<^sub>i\<^sub>b" 100) where "(I,v) \\<^sub>i\<^sub>b ((BL,BU),(IL,IU)) \ ( (\ x c. BL x = Some c \ IL x \ I \ v x \ c) \ (\ x c. BU x = Some c \ IU x \ I \ v x \ c))" declare satisfies_bounds_index.simps[simp del] fun satisfies_bounds_index' :: "'i set \ 'a::lrv valuation \ ('a bounds \ 'a bounds) \ ('i bound_index \ 'i bound_index) \ bool" (infixl "\\<^sub>i\<^sub>b\<^sub>e" 100) where "(I,v) \\<^sub>i\<^sub>b\<^sub>e ((BL,BU),(IL,IU)) \ ( (\ x c. BL x = Some c \ IL x \ I \ v x = c) \ (\ x c. BU x = Some c \ IU x \ I \ v x = c))" declare satisfies_bounds_index'.simps[simp del] fun atoms_imply_bounds_index :: "('i,'a::lrv) i_atom set \ ('a bounds \ 'a bounds) \ ('i bound_index \ 'i bound_index) \ bool" (infixl "\\<^sub>i" 100) where "as \\<^sub>i bi \ (\ I v. (I,v) \\<^sub>i\<^sub>a\<^sub>s as \ (I,v) \\<^sub>i\<^sub>b bi)" declare atoms_imply_bounds_index.simps[simp del] lemma i_satisfies_atom_set_mono: "as \ as' \ v \\<^sub>i\<^sub>a\<^sub>s as' \ v \\<^sub>i\<^sub>a\<^sub>s as" by (cases v, auto simp: satisfies_atom_set_def) lemma atoms_imply_bounds_index_mono: "as \ as' \ as \\<^sub>i bi \ as' \\<^sub>i bi" unfolding atoms_imply_bounds_index.simps using i_satisfies_atom_set_mono by blast definition satisfies_state :: "'a::lrv valuation \ ('i,'a) state \ bool" (infixl "\\<^sub>s" 100) where "v \\<^sub>s s \ v \\<^sub>b \ s \ v \\<^sub>t \ s" definition curr_val_satisfies_state :: "('i,'a::lrv) state \ bool" ("\") where "\ s \ \\ s\ \\<^sub>s s" fun satisfies_state_index :: "'i set \ 'a::lrv valuation \ ('i,'a) state \ bool" (infixl "\\<^sub>i\<^sub>s" 100) where "(I,v) \\<^sub>i\<^sub>s s \ (v \\<^sub>t \ s \ (I,v) \\<^sub>i\<^sub>b \\ s)" declare satisfies_state_index.simps[simp del] fun satisfies_state_index' :: "'i set \ 'a::lrv valuation \ ('i,'a) state \ bool" (infixl "\\<^sub>i\<^sub>s\<^sub>e" 100) where "(I,v) \\<^sub>i\<^sub>s\<^sub>e s \ (v \\<^sub>t \ s \ (I,v) \\<^sub>i\<^sub>b\<^sub>e \\ s)" declare satisfies_state_index'.simps[simp del] definition indices_state :: "('i,'a)state \ 'i set" where "indices_state s = { i. \ x b. look (\\<^sub>i\<^sub>l s) x = Some (i,b) \ look (\\<^sub>i\<^sub>u s) x = Some (i,b)}" text \distinctness requires that for each index $i$, there is at most one variable $x$ and bound $b$ such that $x \leq b$ or $x \geq b$ or both are enforced.\ definition distinct_indices_state :: "('i,'a)state \ bool" where "distinct_indices_state s = (\ i x b x' b'. ((look (\\<^sub>i\<^sub>l s) x = Some (i,b) \ look (\\<^sub>i\<^sub>u s) x = Some (i,b)) \ (look (\\<^sub>i\<^sub>l s) x' = Some (i,b') \ look (\\<^sub>i\<^sub>u s) x' = Some (i,b')) \ (x = x' \ b = b')))" lemma distinct_indices_stateD: assumes "distinct_indices_state s" shows "look (\\<^sub>i\<^sub>l s) x = Some (i,b) \ look (\\<^sub>i\<^sub>u s) x = Some (i,b) \ look (\\<^sub>i\<^sub>l s) x' = Some (i,b') \ look (\\<^sub>i\<^sub>u s) x' = Some (i,b') \ x = x' \ b = b'" using assms unfolding distinct_indices_state_def by blast+ definition unsat_state_core :: "('i,'a::lrv) state \ bool" where "unsat_state_core s = (set (the (\\<^sub>c s)) \ indices_state s \ (\ (\ v. (set (the (\\<^sub>c s)),v) \\<^sub>i\<^sub>s s)))" definition subsets_sat_core :: "('i,'a::lrv) state \ bool" where "subsets_sat_core s = ((\ I. I \ set (the (\\<^sub>c s)) \ (\ v. (I,v) \\<^sub>i\<^sub>s\<^sub>e s)))" definition minimal_unsat_state_core :: "('i,'a::lrv) state \ bool" where "minimal_unsat_state_core s = (unsat_state_core s \ (distinct_indices_state s \ subsets_sat_core s))" lemma minimal_unsat_core_tabl_atoms_mono: assumes sub: "as \ bs" and unsat: "minimal_unsat_core_tabl_atoms I t as" shows "minimal_unsat_core_tabl_atoms I t bs" unfolding minimal_unsat_core_tabl_atoms_def proof (intro conjI impI allI) note min = unsat[unfolded minimal_unsat_core_tabl_atoms_def] from min have I: "I \ fst ` as" by blast with sub show "I \ fst ` bs" by blast from min have "(\v. v \\<^sub>t t \ (I, v) \\<^sub>i\<^sub>a\<^sub>s as)" by blast with i_satisfies_atom_set_mono[OF sub] show "(\v. v \\<^sub>t t \ (I, v) \\<^sub>i\<^sub>a\<^sub>s bs)" by blast fix J assume J: "J \ I" and dist_bs: "distinct_indices_atoms bs" hence dist: "distinct_indices_atoms as" using sub unfolding distinct_indices_atoms_def by blast from min dist J obtain v where v: "v \\<^sub>t t" "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as" by blast have "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s bs" unfolding i_satisfies_atom_set'.simps proof (intro ballI) fix a assume "a \ snd ` (bs \ J \ UNIV)" then obtain i where ia: "(i,a) \ bs" and i: "i \ J" by force with J have "i \ I" by auto with I obtain b where ib: "(i,b) \ as" by force with sub have ib': "(i,b) \ bs" by auto from dist_bs[unfolded distinct_indices_atoms_def, rule_format, OF ia ib'] have id: "atom_var a = atom_var b" "atom_const a = atom_const b" by auto from v(2)[unfolded i_satisfies_atom_set'.simps] i ib have "v \\<^sub>a\<^sub>e b" by force thus "v \\<^sub>a\<^sub>e a" using id unfolding satisfies_atom'_def by auto qed with v show "\v. v \\<^sub>t t \ (J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s bs" by blast qed lemma state_satisfies_index: assumes "v \\<^sub>s s" shows "(I,v) \\<^sub>i\<^sub>s s" unfolding satisfies_state_index.simps satisfies_bounds_index.simps proof (intro conjI impI allI) fix x c from assms[unfolded satisfies_state_def satisfies_bounds.simps, simplified] have "v \\<^sub>t \ s" and bnd: "v x \\<^sub>l\<^sub>b \\<^sub>l s x" "v x \\<^sub>u\<^sub>b \\<^sub>u s x" by auto show "v \\<^sub>t \ s" by fact show "\\<^sub>l s x = Some c \ \\<^sub>l s x \ I \ c \ v x" using bnd(1) by auto show "\\<^sub>u s x = Some c \ \\<^sub>u s x \ I \ v x \ c" using bnd(2) by auto qed lemma unsat_state_core_unsat: "unsat_state_core s \ \ (\ v. v \\<^sub>s s)" unfolding unsat_state_core_def using state_satisfies_index by blast definition tableau_valuated ("\") where "\ s \ \ x \ tvars (\ s). Mapping.lookup (\ s) x \ None" definition index_valid where "index_valid as (s :: ('i,'a) state) = (\ x b i. (look (\\<^sub>i\<^sub>l s) x = Some (i,b) \ ((i, Geq x b) \ as)) \ (look (\\<^sub>i\<^sub>u s) x = Some (i,b) \ ((i, Leq x b) \ as)))" lemma index_valid_indices_state: "index_valid as s \ indices_state s \ fst ` as" unfolding index_valid_def indices_state_def by force lemma index_valid_mono: "as \ bs \ index_valid as s \ index_valid bs s" unfolding index_valid_def by blast lemma index_valid_distinct_indices: assumes "index_valid as s" and "distinct_indices_atoms as" shows "distinct_indices_state s" unfolding distinct_indices_state_def proof (intro allI impI, goal_cases) case (1 i x b y c) note valid = assms(1)[unfolded index_valid_def, rule_format] from 1(1) valid[of x i b] have "(i, Geq x b) \ as \ (i, Leq x b) \ as" by auto then obtain a where a: "(i,a) \ as" "atom_var a = x" "atom_const a = b" by auto from 1(2) valid[of y i c] have y: "(i, Geq y c) \ as \ (i, Leq y c) \ as" by auto then obtain a' where a': "(i,a') \ as" "atom_var a' = y" "atom_const a' = c" by auto from assms(2)[unfolded distinct_indices_atoms_def, rule_format, OF a(1) a'(1)] show ?case using a a' by auto qed text\To be a solution of the initial problem, a valuation should satisfy the initial tableau and list of atoms. Since tableau is changed only by equivalency preserving transformations and asserted atoms are encoded in the bounds, a valuation is a solution if it satisfies both the tableau and the bounds in the final state (when all atoms have been asserted). So, a valuation \v\ satisfies a state \s\ (denoted by \\\<^sub>s\) if it satisfies the tableau and the bounds, i.e., @{thm satisfies_state_def [no_vars]}. Since \\\ should be a candidate solution, it should satisfy the state (unless the \\\ flag is raised). This is denoted by \\ s\ and defined by @{thm curr_val_satisfies_state_def[no_vars]}. \\ s\ will denote that all variables of \\ s\ are explicitly valuated in \\ s\.\ definition update\\ where [simp]: "update\\ field_update i x c s = field_update (upd x (i,c)) s" fun \\<^sub>i\<^sub>u_update where "\\<^sub>i\<^sub>u_update up (State T BIL BIU V U UC) = State T BIL (up BIU) V U UC" fun \\<^sub>i\<^sub>l_update where "\\<^sub>i\<^sub>l_update up (State T BIL BIU V U UC) = State T (up BIL) BIU V U UC" fun \_update where "\_update V (State T BIL BIU V_old U UC) = State T BIL BIU V U UC" fun \_update where "\_update T (State T_old BIL BIU V U UC) = State T BIL BIU V U UC" lemma update_simps[simp]: "\\<^sub>i\<^sub>u (\\<^sub>i\<^sub>u_update up s) = up (\\<^sub>i\<^sub>u s)" "\\<^sub>i\<^sub>l (\\<^sub>i\<^sub>u_update up s) = \\<^sub>i\<^sub>l s" "\ (\\<^sub>i\<^sub>u_update up s) = \ s" "\ (\\<^sub>i\<^sub>u_update up s) = \ s" "\ (\\<^sub>i\<^sub>u_update up s) = \ s" "\\<^sub>c (\\<^sub>i\<^sub>u_update up s) = \\<^sub>c s" "\\<^sub>i\<^sub>l (\\<^sub>i\<^sub>l_update up s) = up (\\<^sub>i\<^sub>l s)" "\\<^sub>i\<^sub>u (\\<^sub>i\<^sub>l_update up s) = \\<^sub>i\<^sub>u s" "\ (\\<^sub>i\<^sub>l_update up s) = \ s" "\ (\\<^sub>i\<^sub>l_update up s) = \ s" "\ (\\<^sub>i\<^sub>l_update up s) = \ s" "\\<^sub>c (\\<^sub>i\<^sub>l_update up s) = \\<^sub>c s" "\ (\_update V s) = V" "\\<^sub>i\<^sub>l (\_update V s) = \\<^sub>i\<^sub>l s" "\\<^sub>i\<^sub>u (\_update V s) = \\<^sub>i\<^sub>u s" "\ (\_update V s) = \ s" "\ (\_update V s) = \ s" "\\<^sub>c (\_update V s) = \\<^sub>c s" "\ (\_update T s) = T" "\\<^sub>i\<^sub>l (\_update T s) = \\<^sub>i\<^sub>l s" "\\<^sub>i\<^sub>u (\_update T s) = \\<^sub>i\<^sub>u s" "\ (\_update T s) = \ s" "\ (\_update T s) = \ s" "\\<^sub>c (\_update T s) = \\<^sub>c s" by (atomize(full), cases s, auto) declare \\<^sub>i\<^sub>u_update.simps[simp del] \\<^sub>i\<^sub>l_update.simps[simp del] fun set_unsat :: "'i list \ ('i,'a) state \ ('i,'a) state" where "set_unsat I (State T BIL BIU V U UC) = State T BIL BIU V True (Some (remdups I))" lemma set_unsat_simps[simp]: "\\<^sub>i\<^sub>l (set_unsat I s) = \\<^sub>i\<^sub>l s" "\\<^sub>i\<^sub>u (set_unsat I s) = \\<^sub>i\<^sub>u s" "\ (set_unsat I s) = \ s" "\ (set_unsat I s) = \ s" "\ (set_unsat I s) = True" "\\<^sub>c (set_unsat I s) = Some (remdups I)" by (atomize(full), cases s, auto) datatype ('i,'a) Direction = Direction (lt: "'a::linorder \ 'a \ bool") (LBI: "('i,'a) state \ ('i,'a) bounds_index") (UBI: "('i,'a) state \ ('i,'a) bounds_index") (LB: "('i,'a) state \ 'a bounds") (UB: "('i,'a) state \ 'a bounds") (LI: "('i,'a) state \ 'i bound_index") (UI: "('i,'a) state \ 'i bound_index") (UBI_upd: "(('i,'a) bounds_index \ ('i,'a) bounds_index) \ ('i,'a) state \ ('i,'a) state") (LE: "var \ 'a \ 'a atom") (GE: "var \ 'a \ 'a atom") (le_rat: "rat \ rat \ bool") definition Positive where [simp]: "Positive \ Direction (<) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>u_update Leq Geq (\)" definition Negative where [simp]: "Negative \ Direction (>) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>l_update Geq Leq (\)" text\Assuming that the \\\ flag and the current valuation \\\ in the final state determine the solution of a problem, the \assert_all\ function can be reduced to the \assert_all_state\ function that operates on the states: @{text[display] "assert_all t as \ let s = assert_all_state t as in if (\ s) then (False, None) else (True, Some (\ s))" } \ text\Specification for the \assert_all_state\ can be directly obtained from the specification of \assert_all\, and it describes the connection between the valuation in the final state and the initial tableau and atoms. However, we will make an additional refinement step and give stronger assumptions about the \assert_all_state\ function that describes the connection between the initial tableau and atoms with the tableau and bounds in the final state.\ locale AssertAllState = fixes assert_all_state::"tableau \ ('i,'a::lrv) i_atom list \ ('i,'a) state" assumes \ \The final and the initial tableau are equivalent.\ assert_all_state_tableau_equiv: "\ t \ assert_all_state t as = s' \ (v::'a valuation) \\<^sub>t t \ v \\<^sub>t \ s'" and \ \If @{term \} is not raised, then the valuation in the final state satisfies its tableau and its bounds (that are, in this case, equivalent to the set of all asserted bounds).\ assert_all_state_sat: "\ t \ assert_all_state t as = s' \ \ \ s' \ \ s'" and assert_all_state_sat_atoms_equiv_bounds: "\ t \ assert_all_state t as = s' \ \ \ s' \ flat (set as) \ \ s'" and \ \If @{term \} is raised, then there is no valuation satisfying the tableau and the bounds in the final state (that are, in this case, equivalent to a subset of asserted atoms).\ assert_all_state_unsat: "\ t \ assert_all_state t as = s' \ \ s' \ minimal_unsat_state_core s'" and assert_all_state_unsat_atoms_equiv_bounds: "\ t \ assert_all_state t as = s' \ \ s' \ set as \\<^sub>i \\ s'" and \ \The set of indices is taken from the constraints\ assert_all_state_indices: "\ t \ assert_all_state t as = s \ indices_state s \ fst ` set as" and assert_all_index_valid: "\ t \ assert_all_state t as = s \ index_valid (set as) s" begin definition assert_all where "assert_all t as \ let s = assert_all_state t as in if (\ s) then Unsat (the (\\<^sub>c s)) else Sat (\ s)" end text\The \assert_all_state\ function can be implemented by first applying the \init\ function that creates an initial state based on the starting tableau, and then by iteratively applying the \assert\ function for each atom in the starting atoms list.\ text\ \begin{small} \assert_loop as s \ foldl (\ s' a. if (\ s') then s' else assert a s') s as\ \assert_all_state t as \ assert_loop ats (init t)\ \end{small} \ locale Init' = fixes init :: "tableau \ ('i,'a::lrv) state" assumes init'_tableau_normalized: "\ t \ \ (\ (init t))" assumes init'_tableau_equiv: "\ t \ (v::'a valuation) \\<^sub>t t = v \\<^sub>t \ (init t)" assumes init'_sat: "\ t \ \ \ (init t) \ \ (init t)" assumes init'_unsat: "\ t \ \ (init t) \ minimal_unsat_state_core (init t)" assumes init'_atoms_equiv_bounds: "\ t \ {} \ \ (init t)" assumes init'_atoms_imply_bounds_index: "\ t \ {} \\<^sub>i \\ (init t)" text\Specification for \init\ can be obtained from the specification of \asser_all_state\ since all its assumptions must also hold for \init\ (when the list of atoms is empty). Also, since \init\ is the first step in the \assert_all_state\ implementation, the precondition for \init\ the same as for the \assert_all_state\. However, unsatisfiability is never going to be detected during initialization and @{term \} flag is never going to be raised. Also, the tableau in the initial state can just be initialized with the starting tableau. The condition @{term "{} \ \ (init t)"} is equivalent to asking that initial bounds are empty. Therefore, specification for \init\ can be refined to:\ locale Init = fixes init::"tableau \ ('i,'a::lrv) state" assumes \ \Tableau in the initial state for @{text t} is @{text t}:\ init_tableau_id: "\ (init t) = t" and \ \Since unsatisfiability is not detected, @{text \} flag must not be set:\ init_unsat_flag: "\ \ (init t)" and \ \The current valuation must satisfy the tableau:\ init_satisfies_tableau: "\\ (init t)\ \\<^sub>t t" and \ \In an inital state no atoms are yet asserted so the bounds must be empty:\ init_bounds: "\\<^sub>i\<^sub>l (init t) = Mapping.empty" "\\<^sub>i\<^sub>u (init t) = Mapping.empty" and \ \All tableau vars are valuated:\ init_tableau_valuated: "\ (init t)" begin lemma init_satisfies_bounds: "\\ (init t)\ \\<^sub>b \ (init t)" using init_bounds unfolding satisfies_bounds.simps in_bounds.simps bound_compare_defs by (auto simp: boundsl_def boundsu_def) lemma init_satisfies: "\ (init t)" using init_satisfies_tableau init_satisfies_bounds init_tableau_id unfolding curr_val_satisfies_state_def satisfies_state_def by simp lemma init_atoms_equiv_bounds: "{} \ \ (init t)" using init_bounds unfolding atoms_equiv_bounds.simps satisfies_bounds.simps in_bounds.simps satisfies_atom_set_def unfolding bound_compare_defs by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma init_atoms_imply_bounds_index: "{} \\<^sub>i \\ (init t)" using init_bounds unfolding atoms_imply_bounds_index.simps satisfies_bounds_index.simps in_bounds.simps i_satisfies_atom_set.simps satisfies_atom_set_def unfolding bound_compare_defs by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma init_tableau_normalized: "\ t \ \ (\ (init t))" using init_tableau_id by simp lemma init_index_valid: "index_valid as (init t)" using init_bounds unfolding index_valid_def by auto lemma init_indices: "indices_state (init t) = {}" unfolding indices_state_def init_bounds by auto end sublocale Init < Init' init using init_tableau_id init_satisfies init_unsat_flag init_atoms_equiv_bounds init_atoms_imply_bounds_index by unfold_locales auto abbreviation vars_list where "vars_list t \ remdups (map lhs t @ (concat (map (Abstract_Linear_Poly.vars_list \ rhs) t)))" lemma "tvars t = set (vars_list t)" by (auto simp add: set_vars_list lvars_def rvars_def) text\\smallskip The \assert\ function asserts a single atom. Since the \init\ function does not raise the \\\ flag, from the definition of \assert_loop\, it is clear that the flag is not raised when the \assert\ function is called. Moreover, the assumptions about the \assert_all_state\ imply that the loop invariant must be that if the \\\ flag is not raised, then the current valuation must satisfy the state (i.e., \\ s\). The \assert\ function will be more easily implemented if it is always applied to a state with a normalized and valuated tableau, so we make this another loop invariant. Therefore, the precondition for the \assert a s\ function call is that \\ \ s\, \\ s\, \\ (\ s)\ and \\ s\ hold. The specification for \assert\ directly follows from the specification of \assert_all_state\ (except that it is additionally required that bounds reflect asserted atoms also when unsatisfiability is detected, and that it is required that \assert\ keeps the tableau normalized and valuated).\ locale Assert = fixes assert::"('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" assumes \ \Tableau remains equivalent to the previous one and normalized and valuated.\ assert_tableau: "\\ \ s; \ s; \ (\ s); \ s\ \ let s' = assert a s in ((v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ s') \ \ (\ s') \ \ s'" and \ \If the @{term \} flag is not raised, then the current valuation is updated so that it satisfies the current tableau and the current bounds.\ assert_sat: "\\ \ s; \ s; \ (\ s); \ s\ \ \ \ (assert a s) \ \ (assert a s)" and \ \The set of asserted atoms remains equivalent to the bounds in the state.\ assert_atoms_equiv_bounds: "\\ \ s; \ s; \ (\ s); \ s\ \ flat ats \ \ s \ flat (ats \ {a}) \ \ (assert a s)" and \ \There is a subset of asserted atoms which remains index-equivalent to the bounds in the state.\ assert_atoms_imply_bounds_index: "\\ \ s; \ s; \ (\ s); \ s\ \ ats \\<^sub>i \\ s \ insert a ats \\<^sub>i \\ (assert a s)" and \ \If the @{term \} flag is raised, then there is no valuation that satisfies both the current tableau and the current bounds.\ assert_unsat: "\\ \ s; \ s; \ (\ s); \ s; index_valid ats s\ \ \ (assert a s) \ minimal_unsat_state_core (assert a s)" and assert_index_valid: "\\ \ s; \ s; \ (\ s); \ s\ \ index_valid ats s \ index_valid (insert a ats) (assert a s)" begin lemma assert_tableau_equiv: "\\ \ s; \ s; \ (\ s); \ s\ \ (v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ (assert a s)" using assert_tableau by (simp add: Let_def) lemma assert_tableau_normalized: "\\ \ s; \ s; \ (\ s); \ s\ \ \ (\ (assert a s))" using assert_tableau by (simp add: Let_def) lemma assert_tableau_valuated: "\\ \ s; \ s; \ (\ s); \ s\ \ \ (assert a s)" using assert_tableau by (simp add: Let_def) end locale AssertAllState' = Init init + Assert assert for init :: "tableau \ ('i,'a::lrv) state" and assert :: "('i,'a) i_atom \ ('i,'a) state \ ('i,'a) state" begin definition assert_loop where "assert_loop as s \ foldl (\ s' a. if (\ s') then s' else assert a s') s as" definition assert_all_state where [simp]: "assert_all_state t as \ assert_loop as (init t)" lemma AssertAllState'_precond: "\ t \ \ (\ (assert_all_state t as)) \ (\ (assert_all_state t as)) \ (\ \ (assert_all_state t as) \ \ (assert_all_state t as))" unfolding assert_all_state_def assert_loop_def using init_satisfies init_tableau_normalized init_index_valid using assert_sat assert_tableau_normalized init_tableau_valuated assert_tableau_valuated by (induct as rule: rev_induct) auto lemma AssertAllState'Induct: assumes "\ t" "P {} (init t)" "\ as bs t. as \ bs \ P as t \ P bs t" "\ s a as. \\ \ s; \ s; \ (\ s); \ s; P as s; index_valid as s\ \ P (insert a as) (assert a s)" shows "P (set as) (assert_all_state t as)" proof - have "P (set as) (assert_all_state t as) \ index_valid (set as) (assert_all_state t as)" proof (induct as rule: rev_induct) case Nil then show ?case unfolding assert_all_state_def assert_loop_def using assms(2) init_index_valid by auto next case (snoc a as') let ?f = "\s' a. if \ s' then s' else assert a s'" let ?s = "foldl ?f (init t) as'" show ?case proof (cases "\ ?s") case True from snoc index_valid_mono[of _ "set (a # as')" "(assert_all_state t as')"] have index: "index_valid (set (a # as')) (assert_all_state t as')" by auto from snoc assms(3)[of "set as'" "set (a # as')"] have P: "P (set (a # as')) (assert_all_state t as')" by auto show ?thesis using True P index unfolding assert_all_state_def assert_loop_def by simp next case False then show ?thesis using snoc using assms(1) assms(4) using AssertAllState'_precond assert_index_valid unfolding assert_all_state_def assert_loop_def by auto qed qed then show ?thesis .. qed lemma AssertAllState_index_valid: "\ t \ index_valid (set as) (assert_all_state t as)" by (rule AssertAllState'Induct, auto intro: assert_index_valid init_index_valid index_valid_mono) lemma AssertAllState'_sat_atoms_equiv_bounds: "\ t \ \ \ (assert_all_state t as) \ flat (set as) \ \ (assert_all_state t as)" using AssertAllState'_precond using init_atoms_equiv_bounds assert_atoms_equiv_bounds unfolding assert_all_state_def assert_loop_def by (induct as rule: rev_induct) auto lemma AssertAllState'_unsat_atoms_implies_bounds: assumes "\ t" shows "set as \\<^sub>i \\ (assert_all_state t as)" proof (induct as rule: rev_induct) case Nil then show ?case using assms init_atoms_imply_bounds_index unfolding assert_all_state_def assert_loop_def by simp next case (snoc a as') let ?s = "assert_all_state t as'" show ?case proof (cases "\ ?s") case True then show ?thesis using snoc atoms_imply_bounds_index_mono[of "set as'" "set (as' @ [a])"] unfolding assert_all_state_def assert_loop_def by auto next case False then have id: "assert_all_state t (as' @ [a]) = assert a ?s" unfolding assert_all_state_def assert_loop_def by simp from snoc have as': "set as' \\<^sub>i \\ ?s" by auto from AssertAllState'_precond[of t as'] assms False have "\ ?s" "\ (\ ?s)" "\ ?s" by auto from assert_atoms_imply_bounds_index[OF False this as', of a] show ?thesis unfolding id by auto qed qed end text\Under these assumptions, it can easily be shown (mainly by induction) that the previously shown implementation of \assert_all_state\ satisfies its specification.\ sublocale AssertAllState' < AssertAllState assert_all_state proof fix v::"'a valuation" and t as s' assume *: "\ t" and id: "assert_all_state t as = s'" note idsym = id[symmetric] show "v \\<^sub>t t = v \\<^sub>t \ s'" unfolding idsym using init_tableau_id[of t] assert_tableau_equiv[of _ v] by (induct rule: AssertAllState'Induct) (auto simp add: * ) show "\ \ s' \ \ s'" unfolding idsym using AssertAllState'_precond by (simp add: * ) show "\ \ s' \ flat (set as) \ \ s'" unfolding idsym using * by (rule AssertAllState'_sat_atoms_equiv_bounds) show "\ s' \ set as \\<^sub>i \\ s'" using * unfolding idsym by (rule AssertAllState'_unsat_atoms_implies_bounds) show "\ s' \ minimal_unsat_state_core s'" using init_unsat_flag assert_unsat assert_index_valid unfolding idsym by (induct rule: AssertAllState'Induct) (auto simp add: * ) show "indices_state s' \ fst ` set as" unfolding idsym using * by (intro index_valid_indices_state, induct rule: AssertAllState'Induct, auto simp: init_index_valid index_valid_mono assert_index_valid) show "index_valid (set as) s'" using "*" AssertAllState_index_valid idsym by blast qed subsection\Asserting Single Atoms\ text\The @{term assert} function is split in two phases. First, @{term assert_bound} updates the bounds and checks only for conflicts cheap to detect. Next, \check\ performs the full simplex algorithm. The \assert\ function can be implemented as \assert a s = check (assert_bound a s)\. Note that it is also possible to do the first phase for several asserted atoms, and only then to let the expensive second phase work. \medskip Asserting an atom \x \ b\ begins with the function \assert_bound\. If the atom is subsumed by the current bounds, then no changes are performed. Otherwise, bounds for \x\ are changed to incorporate the atom. If the atom is inconsistent with the previous bounds for \x\, the @{term \} flag is raised. If \x\ is not a lhs variable in the current tableau and if the value for \x\ in the current valuation violates the new bound \b\, the value for \x\ can be updated and set to \b\, meanwhile updating the values for lhs variables of the tableau so that it remains satisfied. Otherwise, no changes to the current valuation are performed.\ fun satisfies_bounds_set :: "'a::linorder valuation \ 'a bounds \ 'a bounds \ var set \ bool" where "satisfies_bounds_set v (lb, ub) S \ (\ x \ S. in_bounds x v (lb, ub))" declare satisfies_bounds_set.simps [simp del] syntax "_satisfies_bounds_set" :: "(var \ 'a::linorder) \ 'a bounds \ 'a bounds \ var set \ bool" ("_ \\<^sub>b _ \/ _") translations "v \\<^sub>b b \ S" == "CONST satisfies_bounds_set v b S" lemma satisfies_bounds_set_iff: "v \\<^sub>b (lb, ub) \ S \ (\ x \ S. v x \\<^sub>l\<^sub>b lb x \ v x \\<^sub>u\<^sub>b ub x)" by (simp add: satisfies_bounds_set.simps) definition curr_val_satisfies_no_lhs ("\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s") where "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s \ \\ s\ \\<^sub>t (\ s) \ (\\ s\ \\<^sub>b (\ s) \ (- lvars (\ s)))" lemma satisfies_satisfies_no_lhs: "\ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" by (simp add: curr_val_satisfies_state_def satisfies_state_def curr_val_satisfies_no_lhs_def satisfies_bounds.simps satisfies_bounds_set.simps) definition bounds_consistent :: "('i,'a::linorder) state \ bool" ("\") where "\ s \ \ x. if \\<^sub>l s x = None \ \\<^sub>u s x = None then True else the (\\<^sub>l s x) \ the (\\<^sub>u s x)" text\So, the \assert_bound\ function must ensure that the given atom is included in the bounds, that the tableau remains satisfied by the valuation and that all variables except the lhs variables in the tableau are within their bounds. To formalize this, we introduce the notation \v \\<^sub>b (lb, ub) \ S\, and define @{thm satisfies_bounds_set_iff[no_vars]}, and @{thm curr_val_satisfies_no_lhs_def[no_vars]}. The \assert_bound\ function raises the \\\ flag if and only if lower and upper bounds overlap. This is formalized as @{thm bounds_consistent_def[no_vars]}.\ lemma satisfies_bounds_consistent: "(v::'a::linorder valuation) \\<^sub>b \ s \ \ s" unfolding satisfies_bounds.simps in_bounds.simps bounds_consistent_def bound_compare_defs by (auto split: option.split) force lemma satisfies_consistent: "\ s \ \ s" by (auto simp add: curr_val_satisfies_state_def satisfies_state_def satisfies_bounds_consistent) lemma bounds_consistent_geq_lb: "\\ s; \\<^sub>u s x\<^sub>i = Some c\ \ c \\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i" unfolding bounds_consistent_def by (cases "\\<^sub>l s x\<^sub>i", auto simp add: bound_compare_defs split: if_splits) (erule_tac x="x\<^sub>i" in allE, auto) lemma bounds_consistent_leq_ub: "\\ s; \\<^sub>l s x\<^sub>i = Some c\ \ c \\<^sub>u\<^sub>b \\<^sub>u s x\<^sub>i" unfolding bounds_consistent_def by (cases "\\<^sub>u s x\<^sub>i", auto simp add: bound_compare_defs split: if_splits) (erule_tac x="x\<^sub>i" in allE, auto) lemma bounds_consistent_gt_ub: "\\ s; c <\<^sub>l\<^sub>b \\<^sub>l s x \ \ \ c >\<^sub>u\<^sub>b \\<^sub>u s x" unfolding bounds_consistent_def by (case_tac[!] "\\<^sub>l s x", case_tac[!] "\\<^sub>u s x") (auto simp add: bound_compare_defs, erule_tac x="x" in allE, simp) lemma bounds_consistent_lt_lb: "\\ s; c >\<^sub>u\<^sub>b \\<^sub>u s x \ \ \ c <\<^sub>l\<^sub>b \\<^sub>l s x" unfolding bounds_consistent_def by (case_tac[!] "\\<^sub>l s x", case_tac[!] "\\<^sub>u s x") (auto simp add: bound_compare_defs, erule_tac x="x" in allE, simp) text\Since the \assert_bound\ is the first step in the \assert\ function implementation, the preconditions for \assert_bound\ are the same as preconditions for the \assert\ function. The specifiction for the \assert_bound\ is:\ locale AssertBound = fixes assert_bound::"('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" assumes \ \The tableau remains unchanged and valuated.\ assert_bound_tableau: "\\ \ s; \ s; \ (\ s); \ s\ \ assert_bound a s = s' \ \ s' = \ s \ \ s'" and \ \If the @{term \} flag is not set, all but the lhs variables in the tableau remain within their bounds, the new valuation satisfies the tableau, and bounds do not overlap.\ assert_bound_sat: "\\ \ s; \ s; \ (\ s); \ s\ \ assert_bound a s = s' \ \ \ s' \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ s'" and \ \The set of asserted atoms remains equivalent to the bounds in the state.\ assert_bound_atoms_equiv_bounds: "\\ \ s; \ s; \ (\ s); \ s\ \ flat ats \ \ s \ flat (ats \ {a}) \ \ (assert_bound a s)" and assert_bound_atoms_imply_bounds_index: "\\ \ s; \ s; \ (\ s); \ s\ \ ats \\<^sub>i \\ s \ insert a ats \\<^sub>i \\ (assert_bound a s)" and \ \@{term \} flag is raised, only if the bounds became inconsistent:\ assert_bound_unsat: "\\ \ s; \ s; \ (\ s); \ s\ \ index_valid as s \ assert_bound a s = s' \ \ s' \ minimal_unsat_state_core s'" and assert_bound_index_valid: "\\ \ s; \ s; \ (\ s); \ s\ \ index_valid as s \ index_valid (insert a as) (assert_bound a s)" begin lemma assert_bound_tableau_id: "\\ \ s; \ s; \ (\ s); \ s\ \ \ (assert_bound a s) = \ s" using assert_bound_tableau by blast lemma assert_bound_tableau_valuated: "\\ \ s; \ s; \ (\ s); \ s\ \ \ (assert_bound a s)" using assert_bound_tableau by blast end locale AssertBoundNoLhs = fixes assert_bound :: "('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" assumes assert_bound_nolhs_tableau_id: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ \ (assert_bound a s) = \ s" assumes assert_bound_nolhs_sat: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ \ \ (assert_bound a s) \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (assert_bound a s) \ \ (assert_bound a s)" assumes assert_bound_nolhs_atoms_equiv_bounds: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ flat ats \ \ s \ flat (ats \ {a}) \ \ (assert_bound a s)" assumes assert_bound_nolhs_atoms_imply_bounds_index: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ ats \\<^sub>i \\ s \ insert a ats \\<^sub>i \\ (assert_bound a s)" assumes assert_bound_nolhs_unsat: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ index_valid as s \ \ (assert_bound a s) \ minimal_unsat_state_core (assert_bound a s)" assumes assert_bound_nolhs_tableau_valuated: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ \ (assert_bound a s)" assumes assert_bound_nolhs_index_valid: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s\ \ index_valid as s \ index_valid (insert a as) (assert_bound a s)" sublocale AssertBoundNoLhs < AssertBound by unfold_locales ((metis satisfies_satisfies_no_lhs satisfies_consistent assert_bound_nolhs_tableau_id assert_bound_nolhs_sat assert_bound_nolhs_atoms_equiv_bounds assert_bound_nolhs_index_valid assert_bound_nolhs_atoms_imply_bounds_index assert_bound_nolhs_unsat assert_bound_nolhs_tableau_valuated)+) text\The second phase of \assert\, the \check\ function, is the heart of the Simplex algorithm. It is always called after \assert_bound\, but in two different situations. In the first case \assert_bound\ raised the \\\ flag and then \check\ should retain the flag and should not perform any changes. In the second case \assert_bound\ did not raise the \\\ flag, so \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\, \\ s\, \\ (\ s)\, and \\ s\ hold.\ locale Check = fixes check::"('i,'a::lrv) state \ ('i,'a) state" assumes \ \If @{text check} is called from an inconsistent state, the state is unchanged.\ check_unsat_id: "\ s \ check s = s" and \ \The tableau remains equivalent to the previous one, normalized and valuated.\ check_tableau: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ let s' = check s in ((v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ s') \ \ (\ s') \ \ s'" and \ \The bounds remain unchanged.\ check_bounds_id: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \\<^sub>i (check s) = \\<^sub>i s" and \ \If @{term \} flag is not raised, the current valuation @{text "\"} satisfies both the tableau and the bounds and if it is raised, there is no valuation that satisfies them.\ check_sat: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \ \ (check s) \ \ (check s)" and check_unsat: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \ (check s) \ minimal_unsat_state_core (check s)" begin lemma check_tableau_equiv: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ (v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ (check s)" using check_tableau by (simp add: Let_def) lemma check_tableau_index_valid: assumes "\ \ s" " \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" shows "index_valid as (check s) = index_valid as s" unfolding index_valid_def using check_bounds_id[OF assms] by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma check_tableau_normalized: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \ (\ (check s))" using check_tableau by (simp add: Let_def) lemma check_tableau_valuated: "\\ \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ (\ s); \ s\ \ \ (check s)" using check_tableau by (simp add: Let_def) lemma check_indices_state: assumes "\ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ \ s \ \ s" "\ \ s \ \ (\ s)" "\ \ s \ \ s" shows "indices_state (check s) = indices_state s" using check_bounds_id[OF _ assms] check_unsat_id[of s] unfolding indices_state_def by (cases "\ s", auto) lemma check_distinct_indices_state: assumes "\ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ \ s \ \ s" "\ \ s \ \ (\ s)" "\ \ s \ \ s" shows "distinct_indices_state (check s) = distinct_indices_state s" using check_bounds_id[OF _ assms] check_unsat_id[of s] unfolding distinct_indices_state_def by (cases "\ s", auto) end locale Assert' = AssertBound assert_bound + Check check for assert_bound :: "('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" and check :: "('i,'a::lrv) state \ ('i,'a) state" begin definition assert :: "('i,'a) i_atom \ ('i,'a) state \ ('i,'a) state" where "assert a s \ check (assert_bound a s)" lemma Assert'Precond: assumes "\ \ s" "\ s" "\ (\ s)" "\ s" shows "\ (\ (assert_bound a s))" "\ \ (assert_bound a s) \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (assert_bound a s) \ \ (assert_bound a s)" "\ (assert_bound a s)" using assms using assert_bound_tableau_id assert_bound_sat assert_bound_tableau_valuated by (auto simp add: satisfies_bounds_consistent Let_def) end sublocale Assert' < Assert assert proof fix s::"('i,'a) state" and v::"'a valuation" and a::"('i,'a) i_atom" assume *: "\ \ s" "\ s" "\ (\ s)" "\ s" have "\ (\ (assert a s))" using check_tableau_normalized[of "assert_bound a s"] check_unsat_id[of "assert_bound a s"] * using assert_bound_sat[of s a] Assert'Precond[of s a] by (force simp add: assert_def) moreover have "v \\<^sub>t \ s = v \\<^sub>t \ (assert a s)" using check_tableau_equiv[of "assert_bound a s" v] * using check_unsat_id[of "assert_bound a s"] by (force simp add: assert_def Assert'Precond assert_bound_sat assert_bound_tableau_id) moreover have "\ (assert a s)" using assert_bound_tableau_valuated[of s a] * using check_tableau_valuated[of "assert_bound a s"] using check_unsat_id[of "assert_bound a s"] by (cases "\ (assert_bound a s)") (auto simp add: Assert'Precond assert_def) ultimately show "let s' = assert a s in (v \\<^sub>t \ s = v \\<^sub>t \ s') \ \ (\ s') \ \ s'" by (simp add: Let_def) next fix s::"('i,'a) state" and a::"('i,'a) i_atom" assume "\ \ s" "\ s" "\ (\ s)" "\ s" then show "\ \ (assert a s) \ \ (assert a s)" unfolding assert_def using check_unsat_id[of "assert_bound a s"] using check_sat[of "assert_bound a s"] by (force simp add: Assert'Precond) next fix s::"('i,'a) state" and a::"('i,'a) i_atom" and ats::"('i,'a) i_atom set" assume "\ \ s" "\ s" "\ (\ s)" "\ s" then show "flat ats \ \ s \ flat (ats \ {a}) \ \ (assert a s)" using assert_bound_atoms_equiv_bounds using check_unsat_id[of "assert_bound a s"] check_bounds_id by (cases "\ (assert_bound a s)") (auto simp add: Assert'Precond assert_def simp: indexl_def indexu_def boundsl_def boundsu_def) next fix s::"('i,'a) state" and a::"('i,'a) i_atom" and ats assume *: "\ \ s" "\ s" "\ (\ s)" "\ s" "\ (assert a s)" "index_valid ats s" show "minimal_unsat_state_core (assert a s)" proof (cases "\ (assert_bound a s)") case True then show ?thesis unfolding assert_def using * assert_bound_unsat check_tableau_equiv[of "assert_bound a s"] check_bounds_id using check_unsat_id[of "assert_bound a s"] by (auto simp add: Assert'Precond satisfies_state_def Let_def) next case False then show ?thesis unfolding assert_def using * assert_bound_sat[of s a] check_unsat Assert'Precond by (metis assert_def) qed next fix ats fix s::"('i,'a) state" and a::"('i,'a) i_atom" assume *: "index_valid ats s" and **: "\ \ s" "\ s" "\ (\ s)" "\ s" have *: "index_valid (insert a ats) (assert_bound a s)" using assert_bound_index_valid[OF ** *] . show "index_valid (insert a ats) (assert a s)" proof (cases "\ (assert_bound a s)") case True show ?thesis unfolding assert_def check_unsat_id[OF True] using * . next case False show ?thesis unfolding assert_def using Assert'Precond[OF **, of a] False * by (subst check_tableau_index_valid[OF False], auto) qed next fix s ats a let ?s = "assert_bound a s" assume *: "\ \ s" "\ s" "\ (\ s)" "\ s" "ats \\<^sub>i \\ s" from assert_bound_atoms_imply_bounds_index[OF this, of a] have as: "insert a ats \\<^sub>i \\ (assert_bound a s)" by auto show "insert a ats \\<^sub>i \\ (assert a s)" proof (cases "\ ?s") case True from check_unsat_id[OF True] as show ?thesis unfolding assert_def by auto next case False from assert_bound_tableau_id[OF *(1-4)] * have t: "\ (\ ?s)" by simp from assert_bound_tableau_valuated[OF *(1-4)] have v: "\ ?s" . from assert_bound_sat[OF *(1-4) refl False] have "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s" "\ ?s" by auto from check_bounds_id[OF False this t v] as show ?thesis unfolding assert_def by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) qed qed text\Under these assumptions for \assert_bound\ and \check\, it can be easily shown that the implementation of \assert\ (previously given) satisfies its specification.\ locale AssertAllState'' = Init init + AssertBoundNoLhs assert_bound + Check check for init :: "tableau \ ('i,'a::lrv) state" and assert_bound :: "('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" and check :: "('i,'a::lrv) state \ ('i,'a) state" begin definition assert_bound_loop where "assert_bound_loop ats s \ foldl (\ s' a. if (\ s') then s' else assert_bound a s') s ats" definition assert_all_state where [simp]: "assert_all_state t ats \ check (assert_bound_loop ats (init t))" text\However, for efficiency reasons, we want to allow implementations that delay the \check\ function call and call it after several \assert_bound\ calls. For example: \smallskip \begin{small} @{thm assert_bound_loop_def[no_vars]} @{thm assert_all_state_def[no_vars]} \end{small} \smallskip Then, the loop consists only of \assert_bound\ calls, so \assert_bound\ postcondition must imply its precondition. This is not the case, since variables on the lhs may be out of their bounds. Therefore, we make a refinement and specify weaker preconditions (replace \\ s\, by \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ and \\ s\) for \assert_bound\, and show that these preconditions are still good enough to prove the correctnes of this alternative \assert_all_state\ definition.\ lemma AssertAllState''_precond': assumes "\ (\ s)" "\ s" "\ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s \ \ s" shows "let s' = assert_bound_loop ats s in \ (\ s') \ \ s' \ (\ \ s' \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ s')" using assms using assert_bound_nolhs_tableau_id assert_bound_nolhs_sat assert_bound_nolhs_tableau_valuated by (induct ats rule: rev_induct) (auto simp add: assert_bound_loop_def Let_def) lemma AssertAllState''_precond: assumes "\ t" shows "let s' = assert_bound_loop ats (init t) in \ (\ s') \ \ s' \ (\ \ s' \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ s')" using assms using AssertAllState''_precond'[of "init t" ats] by (simp add: Let_def init_tableau_id init_unsat_flag init_satisfies satisfies_consistent satisfies_satisfies_no_lhs init_tableau_valuated) lemma AssertAllState''Induct: assumes "\ t" "P {} (init t)" "\ as bs t. as \ bs \ P as t \ P bs t" "\ s a ats. \\ \ s; \\ s\ \\<^sub>t \ s; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ (\ s); \ s; \ s; P (set ats) s; index_valid (set ats) s\ \ P (insert a (set ats)) (assert_bound a s)" shows "P (set ats) (assert_bound_loop ats (init t))" proof - have "P (set ats) (assert_bound_loop ats (init t)) \ index_valid (set ats) (assert_bound_loop ats (init t))" proof (induct ats rule: rev_induct) case Nil then show ?case unfolding assert_bound_loop_def using assms(2) init_index_valid by simp next case (snoc a as') let ?s = "assert_bound_loop as' (init t)" from snoc index_valid_mono[of _ "set (a # as')" "assert_bound_loop as' (init t)"] have index: "index_valid (set (a # as')) (assert_bound_loop as' (init t))" by auto from snoc assms(3)[of "set as'" "set (a # as')"] have P: "P (set (a # as')) (assert_bound_loop as' (init t))" by auto show ?case proof (cases "\ ?s") case True then show ?thesis using P index unfolding assert_bound_loop_def by simp next case False have id': "set (as' @ [a]) = insert a (set as')" by simp have id: "assert_bound_loop (as' @ [a]) (init t) = assert_bound a (assert_bound_loop as' (init t))" using False unfolding assert_bound_loop_def by auto from snoc have index: "index_valid (set as') ?s" by simp show ?thesis unfolding id unfolding id' using False snoc AssertAllState''_precond[OF assms(1)] by (intro conjI assert_bound_nolhs_index_valid index assms(4); (force simp: Let_def curr_val_satisfies_no_lhs_def)?) qed qed then show ?thesis .. qed lemma AssertAllState''_tableau_id: "\ t \ \ (assert_bound_loop ats (init t)) = \ (init t)" by (rule AssertAllState''Induct) (auto simp add: init_tableau_id assert_bound_nolhs_tableau_id) lemma AssertAllState''_sat: "\ t \ \ \ (assert_bound_loop ats (init t)) \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (assert_bound_loop ats (init t)) \ \ (assert_bound_loop ats (init t))" by (rule AssertAllState''Induct) (auto simp add: init_unsat_flag init_satisfies satisfies_consistent satisfies_satisfies_no_lhs assert_bound_nolhs_sat) lemma AssertAllState''_unsat: "\ t \ \ (assert_bound_loop ats (init t)) \ minimal_unsat_state_core (assert_bound_loop ats (init t))" by (rule AssertAllState''Induct) (auto simp add: init_tableau_id assert_bound_nolhs_unsat init_unsat_flag) lemma AssertAllState''_sat_atoms_equiv_bounds: "\ t \ \ \ (assert_bound_loop ats (init t)) \ flat (set ats) \ \ (assert_bound_loop ats (init t))" using AssertAllState''_precond using assert_bound_nolhs_atoms_equiv_bounds init_atoms_equiv_bounds by (induct ats rule: rev_induct) (auto simp add: Let_def assert_bound_loop_def) lemma AssertAllState''_atoms_imply_bounds_index: assumes "\ t" shows "set ats \\<^sub>i \\ (assert_bound_loop ats (init t))" proof (induct ats rule: rev_induct) case Nil then show ?case unfolding assert_bound_loop_def using init_atoms_imply_bounds_index assms by simp next case (snoc a ats') let ?s = "assert_bound_loop ats' (init t)" show ?case proof (cases "\ ?s") case True then show ?thesis using snoc atoms_imply_bounds_index_mono[of "set ats'" "set (ats' @ [a])"] unfolding assert_bound_loop_def by auto next case False then have id: "assert_bound_loop (ats' @ [a]) (init t) = assert_bound a ?s" unfolding assert_bound_loop_def by auto from snoc have ats: "set ats' \\<^sub>i \\ ?s" by auto from AssertAllState''_precond[of t ats', OF assms, unfolded Let_def] False have *: "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s" "\ (\ ?s)" "\ ?s" "\ ?s" by auto show ?thesis unfolding id using assert_bound_nolhs_atoms_imply_bounds_index[OF False * ats, of a] by auto qed qed lemma AssertAllState''_index_valid: "\ t \ index_valid (set ats) (assert_bound_loop ats (init t))" by (rule AssertAllState''Induct, auto simp: init_index_valid index_valid_mono assert_bound_nolhs_index_valid) end sublocale AssertAllState'' < AssertAllState assert_all_state proof fix v::"'a valuation" and t ats s' assume *: "\ t" and "assert_all_state t ats = s'" then have s': "s' = assert_all_state t ats" by simp let ?s' = "assert_bound_loop ats (init t)" show "v \\<^sub>t t = v \\<^sub>t \ s'" unfolding assert_all_state_def s' using * check_tableau_equiv[of ?s' v] AssertAllState''_tableau_id[of t ats] init_tableau_id[of t] using AssertAllState''_sat[of t ats] check_unsat_id[of ?s'] using AssertAllState''_precond[of t ats] by force show "\ \ s' \ \ s'" unfolding assert_all_state_def s' using * AssertAllState''_precond[of t ats] using check_sat check_unsat_id by (force simp add: Let_def) show "\ s' \ minimal_unsat_state_core s'" using * check_unsat check_unsat_id[of ?s'] check_bounds_id using AssertAllState''_unsat[of t ats] AssertAllState''_precond[of t ats] s' by (force simp add: Let_def satisfies_state_def) show "\ \ s' \ flat (set ats) \ \ s'" unfolding assert_all_state_def s' using * AssertAllState''_precond[of t ats] using check_bounds_id[of ?s'] check_unsat_id[of ?s'] using AssertAllState''_sat_atoms_equiv_bounds[of t ats] by (force simp add: Let_def simp: indexl_def indexu_def boundsl_def boundsu_def) show "\ s' \ set ats \\<^sub>i \\ s'" unfolding assert_all_state_def s' using * AssertAllState''_precond[of t ats] unfolding Let_def using check_bounds_id[of ?s'] using AssertAllState''_atoms_imply_bounds_index[of t ats] using check_unsat_id[of ?s'] by (cases "\ ?s'") (auto simp add: Let_def simp: indexl_def indexu_def boundsl_def boundsu_def) show "index_valid (set ats) s'" unfolding assert_all_state_def s' using * AssertAllState''_precond[of t ats] AssertAllState''_index_valid[OF *, of ats] unfolding Let_def using check_tableau_index_valid[of ?s'] using check_unsat_id[of ?s'] by (cases "\ ?s'", auto) show "indices_state s' \ fst ` set ats" by (intro index_valid_indices_state, fact) qed subsection\Update and Pivot\ text\Both \assert_bound\ and \check\ need to update the valuation so that the tableau remains satisfied. If the value for a variable not on the lhs of the tableau is changed, this can be done rather easily (once the value of that variable is changed, one should recalculate and change the values for all lhs variables of the tableau). The \update\ function does this, and it is specified by:\ locale Update = fixes update::"var \ 'a::lrv \ ('i,'a) state \ ('i,'a) state" assumes \ \Tableau, bounds, and the unsatisfiability flag are preserved.\ update_id: "\\ (\ s); \ s; x \ lvars (\ s)\ \ let s' = update x c s in \ s' = \ s \ \\<^sub>i s' = \\<^sub>i s \ \ s' = \ s \ \\<^sub>c s' = \\<^sub>c s" and \ \Tableau remains valuated.\ update_tableau_valuated: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \ (update x v s)" and \ \The given variable @{text "x"} in the updated valuation is set to the given value @{text "v"} while all other variables (except those on the lhs of the tableau) are unchanged.\ update_valuation_nonlhs: "\\ (\ s); \ s; x \ lvars (\ s)\ \ x' \ lvars (\ s) \ look (\ (update x v s)) x' = (if x = x' then Some v else look (\ s) x')" and \ \Updated valuation continues to satisfy the tableau.\ update_satisfies_tableau: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \\ s\ \\<^sub>t \ s \ \\ (update x c s)\ \\<^sub>t \ s" begin lemma update_bounds_id: assumes "\ (\ s)" "\ s" "x \ lvars (\ s)" shows "\\<^sub>i (update x c s) = \\<^sub>i s" "\\ (update x c s) = \\ s" "\\<^sub>l (update x c s) = \\<^sub>l s" "\\<^sub>u (update x c s) = \\<^sub>u s" using update_id assms by (auto simp add: Let_def simp: indexl_def indexu_def boundsl_def boundsu_def) lemma update_indices_state_id: assumes "\ (\ s)" "\ s" "x \ lvars (\ s)" shows "indices_state (update x c s) = indices_state s" using update_bounds_id[OF assms] unfolding indices_state_def by auto lemma update_tableau_id: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \ (update x c s) = \ s" using update_id by (auto simp add: Let_def) lemma update_unsat_id: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \ (update x c s) = \ s" using update_id by (auto simp add: Let_def) lemma update_unsat_core_id: "\\ (\ s); \ s; x \ lvars (\ s)\ \ \\<^sub>c (update x c s) = \\<^sub>c s" using update_id by (auto simp add: Let_def) definition assert_bound' where [simp]: "assert_bound' dir i x c s \ (if (\\<^sub>u\<^sub>b (lt dir)) c (UB dir s x) then s else let s' = update\\ (UBI_upd dir) i x c s in if (\\<^sub>l\<^sub>b (lt dir)) c ((LB dir) s x) then set_unsat [i, ((LI dir) s x)] s' else if x \ lvars (\ s') \ (lt dir) c (\\ s\ x) then update x c s' else s')" fun assert_bound :: "('i,'a::lrv) i_atom \ ('i,'a) state \ ('i,'a) state" where "assert_bound (i,Leq x c) s = assert_bound' Positive i x c s" | "assert_bound (i,Geq x c) s = assert_bound' Negative i x c s" lemma assert_bound'_cases: assumes "\\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ \ P s" assumes "\\ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)\ \ P (set_unsat [i, ((LI dir) s x)] (update\\ (UBI_upd dir) i x c s))" assumes "\x \ lvars (\ s); (lt dir) c (\\ s\ x); \ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x))\ \ P (update x c (update\\ (UBI_upd dir) i x c s))" assumes "\\ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)); x \ lvars (\ s)\ \ P (update\\ (UBI_upd dir) i x c s)" assumes "\\ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)); \ ((lt dir) c (\\ s\ x))\ \ P (update\\ (UBI_upd dir) i x c s)" assumes "dir = Positive \ dir = Negative" shows "P (assert_bound' dir i x c s)" proof (cases "\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)") case True then show ?thesis using assms(1) by simp next case False show ?thesis proof (cases "\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)") case True then show ?thesis using \\ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ using assms(2) by simp next case False let ?s = "update\\ (UBI_upd dir) i x c s" show ?thesis proof (cases "x \ lvars (\ ?s) \ (lt dir) c (\\ s\ x)") case True then show ?thesis using \\ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ \\ \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)\ using assms(3) assms(6) by auto next case False then have "x \ lvars (\ ?s) \ \ ((lt dir) c (\\ s\ x))" by simp then show ?thesis proof assume "x \ lvars (\ ?s)" then show ?thesis using \\ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ \\ \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)\ using assms(4) assms(6) by auto next assume "\ (lt dir) c (\\ s\ x)" then show ?thesis using \\ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)\ \\ \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)\ using assms(5) assms(6) by simp qed qed qed qed lemma assert_bound_cases: assumes "\ c x dir. \ dir = Positive \ dir = Negative; a = LE dir x c; \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) s" assumes "\ c x dir. \dir = Positive \ dir = Negative; a = LE dir x c; \ \\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x); \\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) (set_unsat [i, ((LI dir) s x)] (update\\ (UBI_upd dir) i x c s))" assumes "\ c x dir. \ dir = Positive \ dir = Negative; a = LE dir x c; x \ lvars (\ s); (lt dir) c (\\ s\ x); \ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) (update x c ((update\\ (UBI_upd dir) i x c s)))" assumes "\ c x dir. \ dir = Positive \ dir = Negative; a = LE dir x c; x \ lvars (\ s); \ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) ((update\\ (UBI_upd dir) i x c s))" assumes "\ c x dir. \ dir = Positive \ dir = Negative; a = LE dir x c; \ (\\<^sub>u\<^sub>b (lt dir) c ((UB dir) s x)); \ (\\<^sub>l\<^sub>b (lt dir) c ((LB dir) s x)); \ ((lt dir) c (\\ s\ x)) \ \ P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) ((update\\ (UBI_upd dir) i x c s))" assumes "\ s. P s = P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s" assumes "\ s. P s = P' (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s" shows "P (assert_bound (i,a) s)" proof (cases a) case (Leq x c) then show ?thesis apply (simp del: assert_bound'_def) apply (rule assert_bound'_cases, simp_all) using assms(1)[of Positive x c] using assms(2)[of Positive x c] using assms(3)[of Positive x c] using assms(4)[of Positive x c] using assms(5)[of Positive x c] using assms(7) by auto next case (Geq x c) then show ?thesis apply (simp del: assert_bound'_def) apply (rule assert_bound'_cases) using assms(1)[of Negative x c] using assms(2)[of Negative x c] using assms(3)[of Negative x c] using assms(4)[of Negative x c] using assms(5)[of Negative x c] using assms(6) by auto qed end lemma set_unsat_bounds_id: "\ (set_unsat I s) = \ s" unfolding boundsl_def boundsu_def by auto lemma decrease_ub_satisfied_inverse: assumes lt: "\\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" and dir: "dir = Positive \ dir = Negative" assumes v: "v \\<^sub>b \ (update\\ (UBI_upd dir) i x c s)" shows "v \\<^sub>b \ s" unfolding satisfies_bounds.simps proof fix x' show "in_bounds x' v (\ s)" proof (cases "x = x'") case False then show ?thesis using v dir unfolding satisfies_bounds.simps by (auto split: if_splits simp: boundsl_def boundsu_def) next case True then show ?thesis using v dir unfolding satisfies_bounds.simps using lt by (erule_tac x="x'" in allE) (auto simp add: lt_ubound_def[THEN sym] gt_lbound_def[THEN sym] compare_strict_nonstrict boundsl_def boundsu_def) qed qed lemma atoms_equiv_bounds_extend: fixes x c dir assumes "dir = Positive \ dir = Negative" "\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" "ats \ \ s" shows "(ats \ {LE dir x c}) \ \ (update\\ (UBI_upd dir) i x c s)" unfolding atoms_equiv_bounds.simps proof fix v let ?s = "update\\ (UBI_upd dir) i x c s" show "v \\<^sub>a\<^sub>s (ats \ {LE dir x c}) = v \\<^sub>b \ ?s" proof assume "v \\<^sub>a\<^sub>s (ats \ {LE dir x c})" then have "v \\<^sub>a\<^sub>s ats" "le (lt dir) (v x) c" using \dir = Positive \ dir = Negative\ unfolding satisfies_atom_set_def by auto show "v \\<^sub>b \ ?s" unfolding satisfies_bounds.simps proof fix x' show "in_bounds x' v (\ ?s)" using \v \\<^sub>a\<^sub>s ats\ \le (lt dir) (v x) c\ \ats \ \ s\ using \dir = Positive \ dir = Negative\ unfolding atoms_equiv_bounds.simps satisfies_bounds.simps by (cases "x = x'") (auto simp: boundsl_def boundsu_def) qed next assume "v \\<^sub>b \ ?s" then have "v \\<^sub>b \ s" using \\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)\ using \dir = Positive \ dir = Negative\ using decrease_ub_satisfied_inverse[of dir c s x v] using neg_bounds_compare(1)[of c "\\<^sub>u s x"] using neg_bounds_compare(5)[of c "\\<^sub>l s x"] by (auto simp add: lt_ubound_def[THEN sym] ge_ubound_def[THEN sym] le_lbound_def[THEN sym] gt_lbound_def[THEN sym]) show "v \\<^sub>a\<^sub>s (ats \ {LE dir x c})" unfolding satisfies_atom_set_def proof fix a assume "a \ ats \ {LE dir x c}" then show "v \\<^sub>a a" proof assume "a \ {LE dir x c}" then show ?thesis using \v \\<^sub>b \ ?s\ using \dir = Positive \ dir = Negative\ unfolding satisfies_bounds.simps by (auto split: if_splits simp: boundsl_def boundsu_def) next assume "a \ ats" then show ?thesis using \ats \ \ s\ using \v \\<^sub>b \ s\ unfolding atoms_equiv_bounds.simps satisfies_atom_set_def by auto qed qed qed qed lemma bounds_updates: "\\<^sub>l (\\<^sub>i\<^sub>u_update u s) = \\<^sub>l s" "\\<^sub>u (\\<^sub>i\<^sub>l_update u s) = \\<^sub>u s" "\\<^sub>u (\\<^sub>i\<^sub>u_update (upd x (i,c)) s) = (\\<^sub>u s) (x \ c)" "\\<^sub>l (\\<^sub>i\<^sub>l_update (upd x (i,c)) s) = (\\<^sub>l s) (x \ c)" by (auto simp: boundsl_def boundsu_def) locale EqForLVar = fixes eq_idx_for_lvar :: "tableau \ var \ nat" assumes eq_idx_for_lvar: "\x \ lvars t\ \ eq_idx_for_lvar t x < length t \ lhs (t ! eq_idx_for_lvar t x) = x" begin definition eq_for_lvar :: "tableau \ var \ eq" where "eq_for_lvar t v \ t ! (eq_idx_for_lvar t v)" lemma eq_for_lvar: "\x \ lvars t\ \ eq_for_lvar t x \ set t \ lhs (eq_for_lvar t x) = x" unfolding eq_for_lvar_def using eq_idx_for_lvar by auto abbreviation rvars_of_lvar where "rvars_of_lvar t x \ rvars_eq (eq_for_lvar t x)" lemma rvars_of_lvar_rvars: assumes "x \ lvars t" shows "rvars_of_lvar t x \ rvars t" using assms eq_for_lvar[of x t] unfolding rvars_def by auto end text \Updating changes the value of \x\ and then updates values of all lhs variables so that the tableau remains satisfied. This can be based on a function that recalculates rhs polynomial values in the changed valuation:\ locale RhsEqVal = fixes rhs_eq_val::"(var, 'a::lrv) mapping \ var \ 'a \ eq \ 'a" \ \@{text rhs_eq_val} computes the value of the rhs of @{text e} in @{text "\v\(x := c)"}.\ assumes rhs_eq_val: "\v\ \\<^sub>e e \ rhs_eq_val v x c e = rhs e \ \v\ (x := c) \" begin text\\noindent Then, the next implementation of \update\ satisfies its specification:\ abbreviation update_eq where "update_eq v x c v' e \ upd (lhs e) (rhs_eq_val v x c e) v'" definition update :: "var \ 'a \ ('i,'a) state \ ('i,'a) state" where "update x c s \ \_update (upd x c (foldl (update_eq (\ s) x c) (\ s) (\ s))) s" lemma update_no_set_none: shows "look (\ s) y \ None \ look (foldl (update_eq (\ s) x v) (\ s) t) y \ None" by (induct t rule: rev_induct, auto simp: lookup_update') lemma update_no_left: assumes "y \ lvars t" shows "look (\ s) y = look (foldl (update_eq (\ s) x v) (\ s) t) y" using assms by (induct t rule: rev_induct) (auto simp add: lvars_def lookup_update') lemma update_left: assumes "y \ lvars t" shows "\ eq \ set t. lhs eq = y \ look (foldl (update_eq (\ s) x v) (\ s) t) y = Some (rhs_eq_val (\ s) x v eq)" using assms by (induct t rule: rev_induct) (auto simp add: lvars_def lookup_update') lemma update_valuate_rhs: assumes "e \ set (\ s)" "\ (\ s)" shows "rhs e \ \\ (update x c s)\ \ = rhs e \ \\ s\ (x := c) \" proof (rule valuate_depend, safe) fix y assume "y \ rvars_eq e" then have "y \ lvars (\ s)" using \\ (\ s)\ \e \ set (\ s)\ by (auto simp add: normalized_tableau_def rvars_def) then show "\\ (update x c s)\ y = (\\ s\(x := c)) y" using update_no_left[of y "\ s" s x c] by (auto simp add: update_def map2fun_def lookup_update') qed end sublocale RhsEqVal < Update update proof fix s::"('i,'a) state" and x c show "let s' = update x c s in \ s' = \ s \ \\<^sub>i s' = \\<^sub>i s \ \ s' = \ s \ \\<^sub>c s' = \\<^sub>c s" by (simp add: Let_def update_def add: boundsl_def boundsu_def indexl_def indexu_def) next fix s::"('i,'a) state" and x c assume "\ (\ s)" "\ s" "x \ lvars (\ s)" then show "\ (update x c s)" using update_no_set_none[of s] by (simp add: Let_def update_def tableau_valuated_def lookup_update') next fix s::"('i,'a) state" and x x' c assume "\ (\ s)" "\ s" "x \ lvars (\ s)" show "x' \ lvars (\ s) \ look (\ (update x c s)) x' = (if x = x' then Some c else look (\ s) x')" using update_no_left[of x' "\ s" s x c] unfolding update_def lvars_def Let_def by (auto simp: lookup_update') next fix s::"('i,'a) state" and x c assume "\ (\ s)" "\ s" "x \ lvars (\ s)" have "\\ s\ \\<^sub>t \ s \ \e \ set (\ s). \\ (update x c s)\ \\<^sub>e e" proof fix e assume "e \ set (\ s)" "\\ s\ \\<^sub>t \ s" then have "\\ s\ \\<^sub>e e" by (simp add: satisfies_tableau_def) have "x \ lhs e" using \x \ lvars (\ s)\ \e \ set (\ s)\ by (auto simp add: lvars_def) then have "\\ (update x c s)\ (lhs e) = rhs_eq_val (\ s) x c e" using update_left[of "lhs e" "\ s" s x c] \e \ set (\ s)\ \\ (\ s)\ by (auto simp add: lvars_def lookup_update' update_def Let_def map2fun_def normalized_tableau_def distinct_map inj_on_def) then show "\\ (update x c s)\ \\<^sub>e e" using \\\ s\ \\<^sub>e e\ \e \ set (\ s)\ \x \ lvars (\ s)\ \\ (\ s)\ using rhs_eq_val by (simp add: satisfies_eq_def update_valuate_rhs) qed then show "\\ s\ \\<^sub>t \ s \ \\ (update x c s)\ \\<^sub>t \ s" by(simp add: satisfies_tableau_def update_def) qed text\To update the valuation for a variable that is on the lhs of the tableau it should first be swapped with some rhs variable of its equation, in an operation called \emph{pivoting}. Pivoting has the precondition that the tableau is normalized and that it is always called for a lhs variable of the tableau, and a rhs variable in the equation with that lhs variable. The set of rhs variables for the given lhs variable is found using the \rvars_of_lvar\ function (specified in a very simple locale \EqForLVar\, that we do not print).\ locale Pivot = EqForLVar + fixes pivot::"var \ var \ ('i,'a::lrv) state \ ('i,'a) state" assumes \ \Valuation, bounds, and the unsatisfiability flag are not changed.\ pivot_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ let s' = pivot x\<^sub>i x\<^sub>j s in \ s' = \ s \ \\<^sub>i s' = \\<^sub>i s \ \ s' = \ s \ \\<^sub>c s' = \\<^sub>c s" and \ \The tableau remains equivalent to the previous one and normalized.\ pivot_tableau: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ let s' = pivot x\<^sub>i x\<^sub>j s in ((v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ s') \ \ (\ s') " and \ \@{text "x\<^sub>i"} and @{text "x\<^sub>j"} are swapped, while the other variables do not change sides.\ pivot_vars': "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ let s' = pivot x\<^sub>i x\<^sub>j s in rvars(\ s') = rvars(\ s)-{x\<^sub>j}\{x\<^sub>i} \ lvars(\ s') = lvars(\ s)-{x\<^sub>i}\{x\<^sub>j}" begin lemma pivot_bounds_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\<^sub>i (pivot x\<^sub>i x\<^sub>j s) = \\<^sub>i s" using pivot_id by (simp add: Let_def) lemma pivot_bounds_id': assumes "\ (\ s)" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" shows "\\ (pivot x\<^sub>i x\<^sub>j s) = \\ s" "\ (pivot x\<^sub>i x\<^sub>j s) = \ s" "\ (pivot x\<^sub>i x\<^sub>j s) = \ s" using pivot_bounds_id[OF assms] by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma pivot_valuation_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (pivot x\<^sub>i x\<^sub>j s) = \ s" using pivot_id by (simp add: Let_def) lemma pivot_unsat_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (pivot x\<^sub>i x\<^sub>j s) = \ s" using pivot_id by (simp add: Let_def) lemma pivot_unsat_core_id: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\<^sub>c (pivot x\<^sub>i x\<^sub>j s) = \\<^sub>c s" using pivot_id by (simp add: Let_def) lemma pivot_tableau_equiv: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ (v::'a valuation) \\<^sub>t \ s = v \\<^sub>t \ (pivot x\<^sub>i x\<^sub>j s)" using pivot_tableau by (simp add: Let_def) lemma pivot_tableau_normalized: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (\ (pivot x\<^sub>i x\<^sub>j s))" using pivot_tableau by (simp add: Let_def) lemma pivot_rvars: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ rvars (\ (pivot x\<^sub>i x\<^sub>j s)) = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" using pivot_vars' by (simp add: Let_def) lemma pivot_lvars: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ lvars (\ (pivot x\<^sub>i x\<^sub>j s)) = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" using pivot_vars' by (simp add: Let_def) lemma pivot_vars: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ tvars (\ (pivot x\<^sub>i x\<^sub>j s)) = tvars (\ s) " using pivot_lvars[of s x\<^sub>i x\<^sub>j] pivot_rvars[of s x\<^sub>i x\<^sub>j] using rvars_of_lvar_rvars[of x\<^sub>i "\ s"] by auto lemma pivot_tableau_valuated: "\\ (\ s); x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i; \ s\ \ \ (pivot x\<^sub>i x\<^sub>j s)" using pivot_valuation_id pivot_vars by (auto simp add: tableau_valuated_def) end text\Functions \pivot\ and \update\ can be used to implement the \check\ function. In its context, \pivot\ and \update\ functions are always called together, so the following definition can be used: @{prop "pivot_and_update x\<^sub>i x\<^sub>j c s = update x\<^sub>i c (pivot x\<^sub>i x\<^sub>j s)"}. It is possible to make a more efficient implementation of \pivot_and_update\ that does not use separate implementations of \pivot\ and \update\. To allow this, a separate specification for \pivot_and_update\ can be given. It can be easily shown that the \pivot_and_update\ definition above satisfies this specification.\ locale PivotAndUpdate = EqForLVar + fixes pivot_and_update :: "var \ var \ 'a::lrv \ ('i,'a) state \ ('i,'a) state" assumes pivotandupdate_unsat_id: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \ s" assumes pivotandupdate_unsat_core_id: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\<^sub>c (pivot_and_update x\<^sub>i x\<^sub>j c s) = \\<^sub>c s" assumes pivotandupdate_bounds_id: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\<^sub>i (pivot_and_update x\<^sub>i x\<^sub>j c s) = \\<^sub>i s" assumes pivotandupdate_tableau_normalized: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (\ (pivot_and_update x\<^sub>i x\<^sub>j c s))" assumes pivotandupdate_tableau_equiv: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ (v::'a valuation) \\<^sub>t \ s \ v \\<^sub>t \ (pivot_and_update x\<^sub>i x\<^sub>j c s)" assumes pivotandupdate_satisfies_tableau: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \\ s\ \\<^sub>t \ s \ \\ (pivot_and_update x\<^sub>i x\<^sub>j c s)\ \\<^sub>t \ s" assumes pivotandupdate_rvars: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ rvars (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" assumes pivotandupdate_lvars: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ lvars (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" assumes pivotandupdate_valuation_nonlhs: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ x \ lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j} \ look (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) x = (if x = x\<^sub>i then Some c else look (\ s) x)" assumes pivotandupdate_tableau_valuated: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ \ (pivot_and_update x\<^sub>i x\<^sub>j c s)" begin lemma pivotandupdate_bounds_id': assumes "\ (\ s)" "\ s" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" shows "\\ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \\ s" "\ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \ s" "\ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \ s" using pivotandupdate_bounds_id[OF assms] by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) lemma pivotandupdate_valuation_xi: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i\ \ look (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) x\<^sub>i = Some c" using pivotandupdate_valuation_nonlhs[of s x\<^sub>i x\<^sub>j x\<^sub>i c] using rvars_of_lvar_rvars by (auto simp add: normalized_tableau_def) lemma pivotandupdate_valuation_other_nolhs: "\\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i; x \ lvars (\ s); x \ x\<^sub>j\ \ look (\ (pivot_and_update x\<^sub>i x\<^sub>j c s)) x = look (\ s) x" using pivotandupdate_valuation_nonlhs[of s x\<^sub>i x\<^sub>j x c] by auto lemma pivotandupdate_nolhs: "\ \ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \\<^sub>l s x\<^sub>i = Some c \ \\<^sub>u s x\<^sub>i = Some c\ \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (pivot_and_update x\<^sub>i x\<^sub>j c s)" using pivotandupdate_satisfies_tableau[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_tableau_equiv[of s x\<^sub>i x\<^sub>j _ c] using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_valuation_other_nolhs[of s x\<^sub>i x\<^sub>j _ c] using pivotandupdate_lvars[of s x\<^sub>i x\<^sub>j c] by (auto simp add: curr_val_satisfies_no_lhs_def satisfies_bounds.simps satisfies_bounds_set.simps bounds_consistent_geq_lb bounds_consistent_leq_ub map2fun_def pivotandupdate_bounds_id') lemma pivotandupdate_bounds_consistent: assumes "\ (\ s)" "\ s" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" shows "\ (pivot_and_update x\<^sub>i x\<^sub>j c s) = \ s" using assms pivotandupdate_bounds_id'[of s x\<^sub>i x\<^sub>j c] by (simp add: bounds_consistent_def) end locale PivotUpdate = Pivot eq_idx_for_lvar pivot + Update update for eq_idx_for_lvar :: "tableau \ var \ nat" and pivot :: "var \ var \ ('i,'a::lrv) state \ ('i,'a) state" and update :: "var \ 'a \ ('i,'a) state \ ('i,'a) state" begin definition pivot_and_update :: "var \ var \ 'a \ ('i,'a) state \ ('i,'a) state" where [simp]: "pivot_and_update x\<^sub>i x\<^sub>j c s \ update x\<^sub>i c (pivot x\<^sub>i x\<^sub>j s)" lemma pivot_update_precond: assumes "\ (\ s)" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" shows "\ (\ (pivot x\<^sub>i x\<^sub>j s))" "x\<^sub>i \ lvars (\ (pivot x\<^sub>i x\<^sub>j s))" proof- from assms have "x\<^sub>i \ x\<^sub>j" using rvars_of_lvar_rvars[of x\<^sub>i "\ s"] by (auto simp add: normalized_tableau_def) then show "\ (\ (pivot x\<^sub>i x\<^sub>j s))" "x\<^sub>i \ lvars (\ (pivot x\<^sub>i x\<^sub>j s))" using assms using pivot_tableau_normalized[of s x\<^sub>i x\<^sub>j] using pivot_lvars[of s x\<^sub>i x\<^sub>j] by auto qed end sublocale PivotUpdate < PivotAndUpdate eq_idx_for_lvar pivot_and_update using pivot_update_precond using update_unsat_id pivot_unsat_id pivot_unsat_core_id update_bounds_id pivot_bounds_id update_tableau_id pivot_tableau_normalized pivot_tableau_equiv update_satisfies_tableau pivot_valuation_id pivot_lvars pivot_rvars update_valuation_nonlhs update_valuation_nonlhs pivot_tableau_valuated update_tableau_valuated update_unsat_core_id by (unfold_locales, auto) text\Given the @{term update} function, \assert_bound\ can be implemented as follows. \vspace{-2mm} @{text[display] "assert_bound (Leq x c) s \ if c \\<^sub>u\<^sub>b \\<^sub>u s x then s else let s' = s \ \\<^sub>u := (\\<^sub>u s) (x := Some c) \ in if c <\<^sub>l\<^sub>b \\<^sub>l s x then s' \ \ := True \ else if x \ lvars (\ s') \ c < \\ s\ x then update x c s' else s'" } \vspace{-2mm} \noindent The case of \Geq x c\ atoms is analogous (a systematic way to avoid symmetries is discussed in Section \ref{sec:symmetries}). This implementation satisfies both its specifications. \ lemma indices_state_set_unsat: "indices_state (set_unsat I s) = indices_state s" by (cases s, auto simp: indices_state_def) lemma \\_set_unsat: "\\ (set_unsat I s) = \\ s" by (cases s, auto simp: boundsl_def boundsu_def indexl_def indexu_def) lemma satisfies_tableau_cong: assumes "\ x. x \ tvars t \ v x = w x" shows "(v \\<^sub>t t) = (w \\<^sub>t t)" unfolding satisfies_tableau_def satisfies_eq_def by (intro ball_cong[OF refl] arg_cong2[of _ _ _ _ "(=)"] valuate_depend, insert assms, auto simp: lvars_def rvars_def) lemma satisfying_state_valuation_to_atom_tabl: assumes J: "J \ indices_state s" and model: "(J, v) \\<^sub>i\<^sub>s\<^sub>e s" and ivalid: "index_valid as s" and dist: "distinct_indices_atoms as" shows "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s as" "v \\<^sub>t \ s" unfolding i_satisfies_atom_set'.simps proof (intro ballI) from model[unfolded satisfies_state_index'.simps] have model: "v \\<^sub>t \ s" "(J, v) \\<^sub>i\<^sub>b\<^sub>e \\ s" by auto show "v \\<^sub>t \ s" by fact fix a assume "a \ restrict_to J as" then obtain i where iJ: "i \ J" and mem: "(i,a) \ as" by auto with J have "i \ indices_state s" by auto from this[unfolded indices_state_def] obtain x c where look: "look (\\<^sub>i\<^sub>l s) x = Some (i, c) \ look (\\<^sub>i\<^sub>u s) x = Some (i, c)" by auto with ivalid[unfolded index_valid_def] obtain b where "(i,b) \ as" "atom_var b = x" "atom_const b = c" by force with dist[unfolded distinct_indices_atoms_def, rule_format, OF this(1) mem] have a: "atom_var a = x" "atom_const a = c" by auto from model(2)[unfolded satisfies_bounds_index'.simps] look iJ have "v x = c" by (auto simp: boundsu_def boundsl_def indexu_def indexl_def) thus "v \\<^sub>a\<^sub>e a" unfolding satisfies_atom'_def a . qed text \Note that in order to ensure minimality of the unsat cores, pivoting is required.\ sublocale AssertAllState < AssertAll assert_all proof fix t as v I assume D: "\ t" from D show "assert_all t as = Sat v \ \v\ \\<^sub>t t \ \v\ \\<^sub>a\<^sub>s flat (set as)" unfolding Let_def assert_all_def using assert_all_state_tableau_equiv[OF D refl] using assert_all_state_sat[OF D refl] using assert_all_state_sat_atoms_equiv_bounds[OF D refl, of as] unfolding atoms_equiv_bounds.simps curr_val_satisfies_state_def satisfies_state_def satisfies_atom_set_def by (auto simp: Let_def split: if_splits) let ?s = "assert_all_state t as" assume "assert_all t as = Unsat I" then have i: "I = the (\\<^sub>c ?s)" and U: "\ ?s" unfolding assert_all_def Let_def by (auto split: if_splits) from assert_all_index_valid[OF D refl, of as] have ivalid: "index_valid (set as) ?s" . note unsat = assert_all_state_unsat[OF D refl U, unfolded minimal_unsat_state_core_def unsat_state_core_def i[symmetric]] from unsat have "set I \ indices_state ?s" by auto also have "\ \ fst ` set as" using assert_all_state_indices[OF D refl] . finally have indices: "set I \ fst ` set as" . show "minimal_unsat_core_tabl_atoms (set I) t (set as)" unfolding minimal_unsat_core_tabl_atoms_def proof (intro conjI impI allI indices, clarify) fix v assume model: "v \\<^sub>t t" "(set I, v) \\<^sub>i\<^sub>a\<^sub>s set as" from unsat have no_model: "\ ((set I, v) \\<^sub>i\<^sub>s ?s)" by auto from assert_all_state_unsat_atoms_equiv_bounds[OF D refl U] have equiv: "set as \\<^sub>i \\ ?s" by auto from assert_all_state_tableau_equiv[OF D refl, of v] model have model_t: "v \\<^sub>t \ ?s" by auto have model_as': "(set I, v) \\<^sub>i\<^sub>a\<^sub>s set as" using model(2) by (auto simp: satisfies_atom_set_def) with equiv model_t have "(set I, v) \\<^sub>i\<^sub>s ?s" unfolding satisfies_state_index.simps atoms_imply_bounds_index.simps by simp with no_model show False by simp next fix J assume dist: "distinct_indices_atoms (set as)" and J: "J \ set I" from J unsat[unfolded subsets_sat_core_def, folded i] have J': "J \ indices_state ?s" by auto from index_valid_distinct_indices[OF ivalid dist] J unsat[unfolded subsets_sat_core_def, folded i] obtain v where model: "(J, v) \\<^sub>i\<^sub>s\<^sub>e ?s" by blast have "(J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s set as" "v \\<^sub>t t" using satisfying_state_valuation_to_atom_tabl[OF J' model ivalid dist] assert_all_state_tableau_equiv[OF D refl] by auto then show "\ v. v \\<^sub>t t \ (J, v) \\<^sub>i\<^sub>a\<^sub>e\<^sub>s set as" by blast qed qed lemma (in Update) update_to_assert_bound_no_lhs: assumes pivot: "Pivot eqlvar (pivot :: var \ var \ ('i,'a) state \ ('i,'a) state)" shows "AssertBoundNoLhs assert_bound" proof fix s::"('i,'a) state" and a assume "\ \ s" "\ (\ s)" "\ s" then show "\ (assert_bound a s) = \ s" by (cases a, cases "snd a") (auto simp add: Let_def update_tableau_id tableau_valuated_def) next fix s::"('i,'a) state" and ia and as assume *: "\ \ s" "\ (\ s)" "\ s" and **: "\ (assert_bound ia s)" and index: "index_valid as s" and consistent: "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" obtain i a where ia: "ia = (i,a)" by force let ?modelU = "\ lt UB UI s v x c i. UB s x = Some c \ UI s x = i \ i \ set (the (\\<^sub>c s)) \ (lt (v x) c \ v x = c)" let ?modelL = "\ lt LB LI s v x c i. LB s x = Some c \ LI s x = i \ i \ set (the (\\<^sub>c s)) \ (lt c (v x) \ c = v x)" let ?modelIU = "\ I lt UB UI s v x c i. UB s x = Some c \ UI s x = i \ i \ I \ (v x = c)" let ?modelIL = "\ I lt LB LI s v x c i. LB s x = Some c \ LI s x = i \ i \ I \ (v x = c)" let ?P' = "\ lt UBI LBI UB LB UBI_upd UI LI LE GE s. \ s \ (set (the (\\<^sub>c s)) \ indices_state s \ \ (\v. (v \\<^sub>t \ s \ (\ x c i. ?modelU lt UB UI s v x c i) \ (\ x c i. ?modelL lt LB LI s v x c i)))) \ (distinct_indices_state s \ (\ I. I \ set (the (\\<^sub>c s)) \ (\ v. v \\<^sub>t \ s \ (\ x c i. ?modelIU I lt UB UI s v x c i) \ (\ x c i. ?modelIL I lt LB LI s v x c i))))" have "\ (assert_bound ia s) \ (unsat_state_core (assert_bound ia s) \ (distinct_indices_state (assert_bound ia s) \ subsets_sat_core (assert_bound ia s)))" (is "?P (assert_bound ia s)") unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix s' :: "('i,'a) state" have id: "((x :: 'a) < y \ x = y) \ x \ y" "((x :: 'a) > y \ x = y) \ x \ y" for x y by auto have id': "?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u undefined \\<^sub>l \\<^sub>u Geq Leq s' = ?P' (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l undefined \\<^sub>u \\<^sub>l Leq Geq s'" by (intro arg_cong[of _ _ "\ y. _ \ y"] arg_cong[of _ _ "\ x. _ \ x"], intro arg_cong2[of _ _ _ _ "(\)"] arg_cong[of _ _ "\ y. _ \ y"] arg_cong[of _ _ "\ y. \ x \ set (the (\\<^sub>c s')). y x"] ext arg_cong[of _ _ Not], unfold id, auto) show "?P s' = ?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u undefined \\<^sub>l \\<^sub>u Geq Leq s'" unfolding satisfies_state_def satisfies_bounds_index.simps satisfies_bounds.simps in_bounds.simps unsat_state_core_def satisfies_state_index.simps subsets_sat_core_def satisfies_state_index'.simps satisfies_bounds_index'.simps unfolding bound_compare''_defs id by ((intro arg_cong[of _ _ "\ x. _ \ x"] arg_cong[of _ _ "\ x. _ \ x"], intro arg_cong2[of _ _ _ _ "(\)"] refl arg_cong[of _ _ "\ x. _ \ x"] arg_cong[of _ _ Not] arg_cong[of _ _ "\ y. \ x \ set (the (\\<^sub>c s')). y x"] ext; intro arg_cong[of _ _ Ex] ext), auto) then show "?P s' = ?P' (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l undefined \\<^sub>u \\<^sub>l Leq Geq s'" unfolding id' . next fix c::'a and x::nat and dir assume "\\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" and dir: "dir = Positive \ dir = Negative" then obtain d where some: "LB dir s x = Some d" and lt: "lt dir c d" by (auto simp: bound_compare'_defs split: option.splits) from index[unfolded index_valid_def, rule_format, of x _ d] some dir obtain j where ind: "LI dir s x = j" "look (LBI dir s) x = Some (j,d)" and ge: "(j, GE dir x d) \ as" by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) let ?s = "set_unsat [i, ((LI dir) s x)] (update\\ (UBI_upd dir) i x c s)" let ?ss = "update\\ (UBI_upd dir) i x c s" show "?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) ?s" proof (intro conjI impI allI, goal_cases) case 1 thus ?case using dir ind ge lt some by (force simp: indices_state_def split: if_splits) next case 2 { fix v assume vU: "\ x c i. ?modelU (lt dir) (UB dir) (UI dir) ?s v x c i" assume vL: "\ x c i. ?modelL (lt dir) (LB dir) (LI dir) ?s v x c i" from dir have "UB dir ?s x = Some c" "UI dir ?s x = i" by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) from vU[rule_format, OF this] have vx_le_c: "lt dir (v x) c \ v x = c" by auto from dir ind some have *: "LB dir ?s x = Some d" "LI dir ?s x = j" by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) have d_le_vx: "lt dir d (v x) \ d = v x" by (intro vL[rule_format, OF *], insert some ind, auto) from dir d_le_vx vx_le_c lt have False by auto } thus ?case by blast next case (3 I) then obtain j where I: "I \ {j}" by (auto split: if_splits) from 3 have dist: "distinct_indices_state ?ss" unfolding distinct_indices_state_def by auto have id1: "UB dir ?s y = UB dir ?ss y" "LB dir ?s y = LB dir ?ss y" "UI dir ?s y = UI dir ?ss y" "LI dir ?s y = LI dir ?ss y" "\ ?s = \ s" "set (the (\\<^sub>c ?s)) = {i,LI dir s x}" for y using dir by (auto simp: boundsu_def boundsl_def indexu_def indexl_def) from I have id: "(\ k. P1 k \ P2 k \ k \ I \ Q k) \ (I = {} \ (P1 j \ P2 j \ Q j))" for P1 P2 Q by auto have id2: "(UB dir s xa = Some ca \ UI dir s xa = j \ P) = (look (UBI dir s) xa = Some (j,ca) \ P)" "(LB dir s xa = Some ca \ LI dir s xa = j \ P) = (look (LBI dir s) xa = Some (j,ca) \ P)" for xa ca P s using dir by (auto simp: boundsu_def indexu_def boundsl_def indexl_def) have "\v. v \\<^sub>t \ s \ (\xa ca ia. UB dir ?ss xa = Some ca \ UI dir ?ss xa = ia \ ia \ I \ v xa = ca) \ (\xa ca ia. LB dir ?ss xa = Some ca \ LI dir ?ss xa = ia \ ia \ I \ v xa = ca)" proof (cases "\ xa ca. look (UBI dir ?ss) xa = Some (j,ca) \ look (LBI dir ?ss) xa = Some (j,ca)") case False thus ?thesis unfolding id id2 using consistent unfolding curr_val_satisfies_no_lhs_def by (intro exI[of _ "\\ s\"], auto) next case True from consistent have val: " \\ s\ \\<^sub>t \ s" unfolding curr_val_satisfies_no_lhs_def by auto define ss where ss: "ss = ?ss" from True obtain y b where "look (UBI dir ?ss) y = Some (j,b) \ look (LBI dir ?ss) y = Some (j,b)" by force then have id3: "(look (LBI dir ss) yy = Some (j,bb) \ look (UBI dir ss) yy = Some (j,bb)) \ (yy = y \ bb = b)" for yy bb using distinct_indices_stateD(1)[OF dist, of y j b yy bb] using dir unfolding ss[symmetric] by (auto simp: boundsu_def boundsl_def indexu_def indexl_def) have "\v. v \\<^sub>t \ s \ v y = b" proof (cases "y \ lvars (\ s)") case False let ?v = "\\ (update y b s)\" show ?thesis proof (intro exI[of _ ?v] conjI) from update_satisfies_tableau[OF *(2,3) False] val show "?v \\<^sub>t \ s" by simp from update_valuation_nonlhs[OF *(2,3) False, of y b] False show "?v y = b" by (simp add: map2fun_def') qed next case True from *(2)[unfolded normalized_tableau_def] have zero: "0 \ rhs ` set (\ s)" by auto interpret Pivot eqlvar pivot by fact interpret PivotUpdate eqlvar pivot update .. let ?eq = "eq_for_lvar (\ s) y" from eq_for_lvar[OF True] have "?eq \ set (\ s)" "lhs ?eq = y" by auto with zero have rhs: "rhs ?eq \ 0" by force hence "rvars_eq ?eq \ {}" by (simp add: vars_empty_zero) then obtain z where z: "z \ rvars_eq ?eq" by auto let ?v = "\ (pivot_and_update y z b s)" let ?vv = "\?v\" from pivotandupdate_valuation_xi[OF *(2,3) True z] have "look ?v y = Some b" . hence vv: "?vv y = b" unfolding map2fun_def' by auto show ?thesis proof (intro exI[of _ ?vv] conjI vv) show "?vv \\<^sub>t \ s" using pivotandupdate_satisfies_tableau[OF *(2,3) True z] val by auto qed qed thus ?thesis unfolding id id2 ss[symmetric] using id3 by metis qed thus ?case unfolding id1 . qed next fix c::'a and x::nat and dir assume **: "dir = Positive \ dir = Negative" "a = LE dir x c" "x \ lvars (\ s)" "lt dir c (\\ s\ x)" "\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" "\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" let ?s = "update\\ (UBI_upd dir) i x c s" show "?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) (update x c ?s)" using * ** by (auto simp add: update_unsat_id tableau_valuated_def) qed (auto simp add: * update_unsat_id tableau_valuated_def) with ** show "minimal_unsat_state_core (assert_bound ia s)" by (auto simp: minimal_unsat_state_core_def) next fix s::"('i,'a) state" and ia assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" and **: "\ \ (assert_bound ia s)" (is ?lhs) obtain i a where ia: "ia = (i,a)" by force have "\\ (assert_bound ia s)\ \\<^sub>t \ (assert_bound ia s)" proof- let ?P = "\ lt UBI LBI UB LB UBI_upd UI LI LE GE s. \\ s\ \\<^sub>t \ s" show ?thesis unfolding ia proof (rule assert_bound_cases[of _ _ ?P]) fix c x and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "x \ lvars (\ s)" "(lt dir) c (\\ s\ x)" "dir = Positive \ dir = Negative" then show "\\ (update x c ?s')\ \\<^sub>t \ (update x c ?s')" using * using update_satisfies_tableau[of ?s' x c] update_tableau_id by (auto simp add: curr_val_satisfies_no_lhs_def tableau_valuated_def) qed (insert *, auto simp add: curr_val_satisfies_no_lhs_def) qed moreover have "\ \ (assert_bound ia s) \ \\ (assert_bound ia s)\ \\<^sub>b \ (assert_bound ia s) \ - lvars (\ (assert_bound ia s))" (is "?P (assert_bound ia s)") proof- let ?P' = "\ lt UBI LBI UB LB UB_upd UI LI LE GE s. \ \ s \ (\x\- lvars (\ s). \\<^sub>l\<^sub>b lt (\\ s\ x) (LB s x) \ \\<^sub>u\<^sub>b lt (\\ s\ x) (UB s x))" let ?P'' = "\ dir. ?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have x: "\ s'. ?P s' = ?P' (<) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" and xx: "\ s'. ?P s' = ?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" unfolding satisfies_bounds_set.simps in_bounds.simps bound_compare_defs by (auto split: option.split) show ?thesis unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix dir :: "('i,'a) Direction" assume "dir = Positive \ dir = Negative" then show "?P'' dir s" using x[of s] xx[of s] \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by (auto simp add: curr_val_satisfies_no_lhs_def) next fix x c and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "x \ lvars (\ s)" "dir = Positive \ dir = Negative" then have "?P ?s'" using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by (auto simp add: satisfies_bounds_set.simps curr_val_satisfies_no_lhs_def boundsl_def boundsu_def indexl_def indexu_def) then show "?P'' dir ?s'" using x[of ?s'] xx[of ?s'] \dir = Positive \ dir = Negative\ by auto next fix c x and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "\ lt dir c (\\ s\ x)" "dir = Positive \ dir = Negative" then show "?P'' dir ?s'" using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by (auto simp add: satisfies_bounds_set.simps curr_val_satisfies_no_lhs_def simp: boundsl_def boundsu_def indexl_def indexu_def) (auto simp add: bound_compare_defs) next fix c x and dir :: "('i,'a) Direction" let ?s' = "update x c (update\\ (UBI_upd dir) i x c s)" assume "x \ lvars (\ s)" "\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" "dir = Positive \ dir = Negative" show "?P'' dir ?s'" proof (rule impI, rule ballI) fix y assume "\ \ ?s'" "y \ - lvars (\ ?s')" show "\\<^sub>l\<^sub>b (lt dir) (\\ ?s'\ y) (LB dir ?s' y) \ \\<^sub>u\<^sub>b (lt dir) (\\ ?s'\ y) (UB dir ?s' y)" proof (cases "x = y") case True then show ?thesis using \x \ lvars (\ s)\ using \y \ - lvars (\ ?s')\ using \\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)\ using \dir = Positive \ dir = Negative\ using neg_bounds_compare(7) neg_bounds_compare(3) using * by (auto simp add: update_valuation_nonlhs update_tableau_id update_bounds_id bound_compare''_defs map2fun_def tableau_valuated_def bounds_updates) (force simp add: bound_compare'_defs)+ next case False then show ?thesis using \x \ lvars (\ s)\ \y \ - lvars (\ ?s')\ using \dir = Positive \ dir = Negative\ * by (auto simp add: update_valuation_nonlhs update_tableau_id update_bounds_id bound_compare''_defs satisfies_bounds_set.simps curr_val_satisfies_no_lhs_def map2fun_def tableau_valuated_def bounds_updates) qed qed qed (auto simp add: x xx) qed moreover have "\ \ (assert_bound ia s) \ \ (assert_bound ia s)" (is "?P (assert_bound ia s)") proof- let ?P' = "\ lt UBI LBI UB LB UBI_upd UI LI LE GE s. \ \ s \ (\x. if LB s x = None \ UB s x = None then True else lt (the (LB s x)) (the (UB s x)) \ (the (LB s x) = the (UB s x)))" let ?P'' = "\ dir. ?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have x: "\ s'. ?P s' = ?P' (<) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" and xx: "\ s'. ?P s' = ?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" unfolding bounds_consistent_def by auto show ?thesis unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix dir :: "('i,'a) Direction" assume "dir = Positive \ dir = Negative" then show "?P'' dir s" using \\ s\ by (auto simp add: bounds_consistent_def) (erule_tac x=x in allE, auto)+ next fix x c and dir :: "('i,'a) Direction" let ?s' = "update x c (update\\ (UBI_upd dir) i x c s)" assume "dir = Positive \ dir = Negative" "x \ lvars (\ s)" "\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" "\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" then show "?P'' dir ?s'" using \\ s\ * unfolding bounds_consistent_def by (auto simp add: update_bounds_id tableau_valuated_def bounds_updates split: if_splits) (force simp add: bound_compare'_defs, erule_tac x=xa in allE, simp)+ next fix x c and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "\ \\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" "\ \\<^sub>l\<^sub>b (lt dir) c (LB dir s x)" "dir = Positive \ dir = Negative" then have "?P'' dir ?s'" using \\ s\ unfolding bounds_consistent_def by (auto split: if_splits simp: bounds_updates) (force simp add: bound_compare'_defs, erule_tac x=xa in allE, simp)+ then show "?P'' dir ?s'" "?P'' dir ?s'" by simp_all qed (auto simp add: x xx) qed ultimately show "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (assert_bound ia s) \ \ (assert_bound ia s)" using \?lhs\ unfolding curr_val_satisfies_no_lhs_def by simp next fix s :: "('i,'a) state" and ats and ia :: "('i,'a) i_atom" assume "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ (\ s)" "\ s" obtain i a where ia: "ia = (i,a)" by force { fix ats let ?P' = "\ lt UBI LBI UB LB UB_upd UI LI LE GE s'. ats \ \ s \ (ats \ {a}) \ \ s'" let ?P'' = "\ dir. ?P' (lt dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have "ats \ \ s \ (ats \ {a}) \ \ (assert_bound ia s)" (is "?P (assert_bound ia s)") unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix x c and dir :: "('i,'a) Direction" assume "dir = Positive \ dir = Negative" "a = LE dir x c" "\\<^sub>u\<^sub>b (lt dir) c (UB dir s x)" then show "?P s" unfolding atoms_equiv_bounds.simps satisfies_atom_set_def satisfies_bounds.simps by auto (erule_tac x=x in allE, force simp add: bound_compare_defs)+ next fix x c and dir :: "('i,'a) Direction" let ?s' = "set_unsat [i, ((LI dir) s x)] (update\\ (UBI_upd dir) i x c s)" assume "dir = Positive \ dir = Negative" "a = LE dir x c" "\ (\\<^sub>u\<^sub>b (lt dir) c (UB dir s x))" then show "?P ?s'" unfolding set_unsat_bounds_id using atoms_equiv_bounds_extend[of dir c s x ats i] by auto next fix x c and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "dir = Positive \ dir = Negative" "a = LE dir x c" "\ (\\<^sub>u\<^sub>b (lt dir) c (UB dir s x))" then have "?P ?s'" using atoms_equiv_bounds_extend[of dir c s x ats i] by auto then show "?P ?s'" "?P ?s'" by simp_all next fix x c and dir :: "('i,'a) Direction" let ?s = "update\\ (UBI_upd dir) i x c s" let ?s' = "update x c ?s" assume *: "dir = Positive \ dir = Negative" "a = LE dir x c" "\ (\\<^sub>u\<^sub>b (lt dir) c (UB dir s x))" "x \ lvars (\ s)" then have "\ (\ ?s)" "\ ?s" "x \ lvars (\ ?s)" using \\ (\ s)\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ \\ s\ by (auto simp: tableau_valuated_def) from update_bounds_id[OF this, of c] have "\\<^sub>i ?s' = \\<^sub>i ?s" by blast then have id: "\ ?s' = \ ?s" unfolding boundsl_def boundsu_def by auto show "?P ?s'" unfolding id \a = LE dir x c\ by (intro impI atoms_equiv_bounds_extend[rule_format] *(1,3)) qed simp_all } then show "flat ats \ \ s \ flat (ats \ {ia}) \ \ (assert_bound ia s)" unfolding ia by auto next fix s :: "('i,'a) state" and ats and ia :: "('i,'a) i_atom" obtain i a where ia: "ia = (i,a)" by force assume "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ (\ s)" "\ s" have *: "\ dir x c s. dir = Positive \ dir = Negative \ \ (update\\ (UBI_upd dir) i x c s) = \ s" "\ s y I . \ (set_unsat I s) = \ s" by (auto simp add: tableau_valuated_def) show "\ (assert_bound ia s)" (is "?P (assert_bound ia s)") proof- let ?P' = "\ lt UBI LBI UB LB UB_upd UI LI LE GE s'. \ s'" let ?P'' = "\ dir. ?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" show ?thesis unfolding ia proof (rule assert_bound_cases[of _ _ ?P']) fix x c and dir :: "('i,'a) Direction" let ?s' = "update\\ (UBI_upd dir) i x c s" assume "dir = Positive \ dir = Negative" then have "\ ?s'" using *(1)[of dir x c s] \\ s\ by simp then show "\ (set_unsat [i, ((LI dir) s x)] ?s')" using *(2) by auto next fix x c and dir :: "('i,'a) Direction" assume *: "x \ lvars (\ s)" "dir = Positive \ dir = Negative" let ?s = "update\\ (UBI_upd dir) i x c s" let ?s' = "update x c ?s" from * show "\ ?s'" using \\ (\ s)\ \\ s\ using update_tableau_valuated[of ?s x c] by (auto simp add: tableau_valuated_def) qed (insert \\ s\ *(1), auto) qed next fix s :: "('i,'a) state" and as and ia :: "('i,'a) i_atom" obtain i a where ia: "ia = (i,a)" by force assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ (\ s)" "\ s" and valid: "index_valid as s" have id: "\ dir x c s. dir = Positive \ dir = Negative \ \ (update\\ (UBI_upd dir) i x c s) = \ s" "\ s y I. \ (set_unsat I s) = \ s" by (auto simp add: tableau_valuated_def) let ?I = "insert (i,a) as" define I where "I = ?I" from index_valid_mono[OF _ valid] have valid: "index_valid I s" unfolding I_def by auto have I: "(i,a) \ I" unfolding I_def by auto let ?P = "\ s. index_valid I s" let ?P' = "\ (lt :: 'a \ 'a \ bool) (UBI :: ('i,'a) state \ ('i,'a) bounds_index) (LBI :: ('i,'a) state \ ('i,'a) bounds_index) (UB :: ('i,'a) state \ 'a bounds) (LB :: ('i,'a) state \ 'a bounds) (UBI_upd :: (('i,'a) bounds_index \ ('i,'a) bounds_index) \ ('i,'a) state \ ('i,'a) state) (UI :: ('i,'a) state \ 'i bound_index) (LI :: ('i,'a) state \ 'i bound_index) LE GE s'. (\ x c i. look (UBI s') x = Some (i,c) \ (i,LE (x :: var) c) \ I) \ (\ x c i. look (LBI s') x = Some (i,c) \ (i,GE (x :: nat) c) \ I)" define P where "P = ?P'" let ?P'' = "\ (dir :: ('i,'a) Direction). P (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have x: "\ s'. ?P s' = P (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" and xx: "\ s'. ?P s' = P (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" unfolding satisfies_bounds_set.simps in_bounds.simps bound_compare_defs index_valid_def P_def by (auto split: option.split simp: indexl_def indexu_def boundsl_def boundsu_def) from valid have P'': "dir = Positive \ dir = Negative \ ?P'' dir s" for dir using x[of s] xx[of s] by auto have UTrue: "dir = Positive \ dir = Negative \ ?P'' dir s \ ?P'' dir (set_unsat I s)" for dir s I unfolding P_def by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) have updateIB: "a = LE dir x c \ dir = Positive \ dir = Negative \ ?P'' dir s \ ?P'' dir (update\\ (UBI_upd dir) i x c s)" for dir x c s unfolding P_def using I by (auto split: if_splits simp: simp: boundsl_def boundsu_def indexl_def indexu_def) show "index_valid (insert ia as) (assert_bound ia s)" unfolding ia I_def[symmetric] proof ((rule assert_bound_cases[of _ _ P]; (intro UTrue x xx updateIB P'')?)) fix x c and dir :: "('i,'a) Direction" assume **: "dir = Positive \ dir = Negative" "a = LE dir x c" "x \ lvars (\ s)" let ?s = "(update\\ (UBI_upd dir) i x c s)" define s' where "s' = ?s" have 1: "\ (\ ?s)" using * ** by auto have 2: "\ ?s" using id(1) ** * \\ s\ by auto have 3: "x \ lvars (\ ?s)" using id(1) ** * \\ s\ by auto have "?P'' dir ?s" using ** by (intro updateIB P'') auto with update_id[of ?s x c, OF 1 2 3, unfolded Let_def] **(1) show "P (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) (update x c (update\\ (UBI_upd dir) i x c s))" unfolding P_def s'_def[symmetric] by auto qed auto next fix s and ia :: "('i,'a) i_atom" and ats :: "('i,'a) i_atom set" assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ (\ s)" "\ s" "\ s" and ats: "ats \\<^sub>i \\ s" obtain i a where ia: "ia = (i,a)" by force have id: "\ dir x c s. dir = Positive \ dir = Negative \ \ (update\\ (UBI_upd dir) i x c s) = \ s" "\ s I. \ (set_unsat I s) = \ s" by (auto simp add: tableau_valuated_def) have idlt: "(c < (a :: 'a) \ c = a) = (c \ a)" "(a < c \ c = a) = (c \ a)" for a c by auto define A where "A = insert (i,a) ats" let ?P = "\ (s :: ('i,'a) state). A \\<^sub>i \\ s" let ?Q = "\ bs (lt :: 'a \ 'a \ bool) (UBI :: ('i,'a) state \ ('i,'a) bounds_index) (LBI :: ('i,'a) state \ ('i,'a) bounds_index) (UB :: ('i,'a) state \ 'a bounds) (LB :: ('i,'a) state \ 'a bounds) (UBI_upd :: (('i,'a) bounds_index \ ('i,'a) bounds_index) \ ('i,'a) state \ ('i,'a) state) UI LI (LE :: nat \ 'a \ 'a atom) (GE :: nat \ 'a \ 'a atom) s'. (\ I v. (I :: 'i set,v) \\<^sub>i\<^sub>a\<^sub>s bs \ ((\ x c. LB s' x = Some c \ LI s' x \ I \ lt c (v x) \ c = v x) \ (\ x c. UB s' x = Some c \ UI s' x \ I \ lt (v x) c \ v x = c)))" define Q where "Q = ?Q" let ?P' = "Q A" have equiv: "bs \\<^sub>i \\ s' \ Q bs (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" "bs \\<^sub>i \\ s' \ Q bs (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" for bs s' unfolding satisfies_bounds_set.simps in_bounds.simps bound_compare_defs index_valid_def Q_def atoms_imply_bounds_index.simps by (atomize(full), (intro conjI iff_exI allI arg_cong2[of _ _ _ _ "(\)"] refl iff_allI arg_cong2[of _ _ _ _ "(=)"]; unfold satisfies_bounds_index.simps idlt), auto) have x: "\ s'. ?P s' = ?P' (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s'" and xx: "\ s'. ?P s' = ?P' (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s'" using equiv by blast+ from ats equiv[of ats s] have Q_ats: "Q ats (<) \\<^sub>i\<^sub>u \\<^sub>i\<^sub>l \\<^sub>u \\<^sub>l \\<^sub>i\<^sub>u_update \\<^sub>u \\<^sub>l Leq Geq s" "Q ats (>) \\<^sub>i\<^sub>l \\<^sub>i\<^sub>u \\<^sub>l \\<^sub>u \\<^sub>i\<^sub>l_update \\<^sub>l \\<^sub>u Geq Leq s" by auto let ?P'' = "\ (dir :: ('i,'a) Direction). ?P' (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir)" have P_upd: "dir = Positive \ dir = Negative \ ?P'' dir (set_unsat I s) = ?P'' dir s" for s I dir unfolding Q_def by (intro iff_exI arg_cong2[of _ _ _ _ "(\)"] refl iff_allI arg_cong2[of _ _ _ _ "(=)"] arg_cong2[of _ _ _ _ "(\)"], auto simp: boundsl_def boundsu_def indexl_def indexu_def) have P_upd: "dir = Positive \ dir = Negative \ ?P'' dir s \ ?P'' dir (set_unsat I s)" for s I dir using P_upd[of dir] by blast have ats_sub: "ats \ A" unfolding A_def by auto { fix x c and dir :: "('i,'a) Direction" assume dir: "dir = Positive \ dir = Negative" and a: "a = LE dir x c" from Q_ats dir have Q_ats: "Q ats (lt dir) (UBI dir) (LBI dir) (UB dir) (LB dir) (UBI_upd dir) (UI dir) (LI dir) (LE dir) (GE dir) s" by auto have "?P'' dir (update\\ (UBI_upd dir) i x c s)" unfolding Q_def proof (intro allI impI conjI) fix I v y d assume IvA: "(I, v) \\<^sub>i\<^sub>a\<^sub>s A" from i_satisfies_atom_set_mono[OF ats_sub this] have "(I, v) \\<^sub>i\<^sub>a\<^sub>s ats" by auto from Q_ats[unfolded Q_def, rule_format, OF this] have s_bnds: "LB dir s x = Some c \ LI dir s x \ I \ lt dir c (v x) \ c = v x" "UB dir s x = Some c \ UI dir s x \ I \ lt dir (v x) c \ v x = c" for x c by auto from IvA[unfolded A_def, unfolded i_satisfies_atom_set.simps satisfies_atom_set_def, simplified] have va: "i \ I \ v \\<^sub>a a" by auto with a dir have vc: "i \ I \ lt dir (v x) c \ v x = c" by auto let ?s = "(update\\ (UBI_upd dir) i x c s)" show "LB dir ?s y = Some d \ LI dir ?s y \ I \ lt dir d (v y) \ d = v y" "UB dir ?s y = Some d \ UI dir ?s y \ I \ lt dir (v y) d \ v y = d" proof (atomize(full), goal_cases) case 1 consider (main) "y = x" "UI dir ?s x = i" | (easy1) "x \ y" | (easy2) "x = y" "UI dir ?s y \ i" by blast then show ?case proof cases case easy1 then show ?thesis using s_bnds[of y d] dir by (fastforce simp: boundsl_def boundsu_def indexl_def indexu_def) next case easy2 then show ?thesis using s_bnds[of y d] dir by (fastforce simp: boundsl_def boundsu_def indexl_def indexu_def) next case main note s_bnds = s_bnds[of x] show ?thesis unfolding main using s_bnds dir vc by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) qed qed qed } note main = this have Ps: "dir = Positive \ dir = Negative \ ?P'' dir s" for dir using Q_ats unfolding Q_def using i_satisfies_atom_set_mono[OF ats_sub] by fastforce have "?P (assert_bound (i,a) s)" proof ((rule assert_bound_cases[of _ _ ?P']; (intro x xx P_upd main Ps)?)) fix c x and dir :: "('i,'a) Direction" assume **: "dir = Positive \ dir = Negative" "a = LE dir x c" "x \ lvars (\ s)" let ?s = "update\\ (UBI_upd dir) i x c s" define s' where "s' = ?s" from main[OF **(1-2)] have P: "?P'' dir s'" unfolding s'_def . have 1: "\ (\ ?s)" using * ** by auto have 2: "\ ?s" using id(1) ** * \\ s\ by auto have 3: "x \ lvars (\ ?s)" using id(1) ** * \\ s\ by auto have "\ (\ s')" "\ s'" "x \ lvars (\ s')" using 1 2 3 unfolding s'_def by auto from update_bounds_id[OF this, of c] **(1) have "?P'' dir (update x c s') = ?P'' dir s'" unfolding Q_def by (intro iff_allI arg_cong2[of _ _ _ _ "(\)"] arg_cong2[of _ _ _ _ "(\)"] refl, auto) with P show "?P'' dir (update x c ?s)" unfolding s'_def by blast qed auto then show "insert ia ats \\<^sub>i \\ (assert_bound ia s)" unfolding ia A_def by blast qed text \Pivoting the tableau can be reduced to pivoting single equations, and substituting variable by polynomials. These operations are specified by:\ locale PivotEq = fixes pivot_eq::"eq \ var \ eq" assumes \ \Lhs var of @{text eq} and @{text x\<^sub>j} are swapped, while the other variables do not change sides.\ vars_pivot_eq:" \x\<^sub>j \ rvars_eq eq; lhs eq \ rvars_eq eq \ \ let eq' = pivot_eq eq x\<^sub>j in lhs eq' = x\<^sub>j \ rvars_eq eq' = {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" and \ \Pivoting keeps the equation equisatisfiable.\ equiv_pivot_eq: "\x\<^sub>j \ rvars_eq eq; lhs eq \ rvars_eq eq \ \ (v::'a::lrv valuation) \\<^sub>e pivot_eq eq x\<^sub>j \ v \\<^sub>e eq" begin lemma lhs_pivot_eq: "\x\<^sub>j \ rvars_eq eq; lhs eq \ rvars_eq eq \ \ lhs (pivot_eq eq x\<^sub>j) = x\<^sub>j" using vars_pivot_eq by (simp add: Let_def) lemma rvars_pivot_eq: "\x\<^sub>j \ rvars_eq eq; lhs eq \ rvars_eq eq \ \ rvars_eq (pivot_eq eq x\<^sub>j) = {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" using vars_pivot_eq by (simp add: Let_def) end abbreviation doublesub (" _ \s _ \s _" [50,51,51] 50) where "doublesub a b c \ a \ b \ b \ c" locale SubstVar = fixes subst_var::"var \ linear_poly \ linear_poly \ linear_poly" assumes \ \Effect of @{text "subst_var x\<^sub>j lp' lp"} on @{text lp} variables.\ vars_subst_var': "(vars lp - {x\<^sub>j}) - vars lp' \s vars (subst_var x\<^sub>j lp' lp) \s (vars lp - {x\<^sub>j}) \ vars lp'"and subst_no_effect: "x\<^sub>j \ vars lp \ subst_var x\<^sub>j lp' lp = lp" and subst_with_effect: "x\<^sub>j \ vars lp \ x \ vars lp' - vars lp \ x \ vars (subst_var x\<^sub>j lp' lp)" and \ \Effect of @{text "subst_var x\<^sub>j lp' lp"} on @{text lp} value.\ equiv_subst_var: "(v::'a :: lrv valuation) x\<^sub>j = lp' \v\ \ lp \v\ = (subst_var x\<^sub>j lp' lp) \v\" begin lemma vars_subst_var: "vars (subst_var x\<^sub>j lp' lp) \ (vars lp - {x\<^sub>j}) \ vars lp'" using vars_subst_var' by simp lemma vars_subst_var_supset: "vars (subst_var x\<^sub>j lp' lp) \ (vars lp - {x\<^sub>j}) - vars lp'" using vars_subst_var' by simp definition subst_var_eq :: "var \ linear_poly \ eq \ eq" where "subst_var_eq v lp' eq \ (lhs eq, subst_var v lp' (rhs eq))" lemma rvars_eq_subst_var_eq: shows "rvars_eq (subst_var_eq x\<^sub>j lp eq) \ (rvars_eq eq - {x\<^sub>j}) \ vars lp" unfolding subst_var_eq_def by (auto simp add: vars_subst_var) lemma rvars_eq_subst_var_eq_supset: "rvars_eq (subst_var_eq x\<^sub>j lp eq) \ (rvars_eq eq) - {x\<^sub>j} - (vars lp)" unfolding subst_var_eq_def by (simp add: vars_subst_var_supset) lemma equiv_subst_var_eq: assumes "(v::'a valuation) \\<^sub>e (x\<^sub>j, lp')" shows "v \\<^sub>e eq \ v \\<^sub>e subst_var_eq x\<^sub>j lp' eq" using assms unfolding subst_var_eq_def unfolding satisfies_eq_def using equiv_subst_var[of v x\<^sub>j lp' "rhs eq"] by auto end locale Pivot' = EqForLVar + PivotEq + SubstVar begin definition pivot_tableau' :: "var \ var \ tableau \ tableau" where "pivot_tableau' x\<^sub>i x\<^sub>j t \ let x\<^sub>i_idx = eq_idx_for_lvar t x\<^sub>i; eq = t ! x\<^sub>i_idx; eq' = pivot_eq eq x\<^sub>j in map (\ idx. if idx = x\<^sub>i_idx then eq' else subst_var_eq x\<^sub>j (rhs eq') (t ! idx) ) [0.. var \ ('i,'a::lrv) state \ ('i,'a) state" where "pivot' x\<^sub>i x\<^sub>j s \ \_update (pivot_tableau' x\<^sub>i x\<^sub>j (\ s)) s" text\\noindent Then, the next implementation of \pivot\ satisfies its specification:\ definition pivot_tableau :: "var \ var \ tableau \ tableau" where "pivot_tableau x\<^sub>i x\<^sub>j t \ let eq = eq_for_lvar t x\<^sub>i; eq' = pivot_eq eq x\<^sub>j in map (\ e. if lhs e = lhs eq then eq' else subst_var_eq x\<^sub>j (rhs eq') e) t" definition pivot :: "var \ var \ ('i,'a::lrv) state \ ('i,'a) state" where "pivot x\<^sub>i x\<^sub>j s \ \_update (pivot_tableau x\<^sub>i x\<^sub>j (\ s)) s" lemma pivot_tableau'pivot_tableau: assumes "\ t" "x\<^sub>i \ lvars t" shows "pivot_tableau' x\<^sub>i x\<^sub>j t = pivot_tableau x\<^sub>i x\<^sub>j t" proof- let ?f = "\idx. if idx = eq_idx_for_lvar t x\<^sub>i then pivot_eq (t ! eq_idx_for_lvar t x\<^sub>i) x\<^sub>j else subst_var_eq x\<^sub>j (rhs (pivot_eq (t ! eq_idx_for_lvar t x\<^sub>i) x\<^sub>j)) (t ! idx)" let ?f' = "\e. if lhs e = lhs (eq_for_lvar t x\<^sub>i) then pivot_eq (eq_for_lvar t x\<^sub>i) x\<^sub>j else subst_var_eq x\<^sub>j (rhs (pivot_eq (eq_for_lvar t x\<^sub>i) x\<^sub>j)) e" have "\ i < length t. ?f' (t ! i) = ?f i" proof(safe) fix i assume "i < length t" then have "t ! i \ set t" "i < length t" by auto moreover have "t ! eq_idx_for_lvar t x\<^sub>i \ set t" "eq_idx_for_lvar t x\<^sub>i < length t" using eq_for_lvar[of x\<^sub>i t] \x\<^sub>i \ lvars t\ eq_idx_for_lvar[of x\<^sub>i t] by (auto simp add: eq_for_lvar_def) ultimately have "lhs (t ! i) = lhs (t ! eq_idx_for_lvar t x\<^sub>i) \ t ! i = t ! (eq_idx_for_lvar t x\<^sub>i)" "distinct t" using \\ t\ unfolding normalized_tableau_def by (auto simp add: distinct_map inj_on_def) then have "lhs (t ! i) = lhs (t ! eq_idx_for_lvar t x\<^sub>i) \ i = eq_idx_for_lvar t x\<^sub>i" using \i < length t\ \eq_idx_for_lvar t x\<^sub>i < length t\ by (auto simp add: distinct_conv_nth) then show "?f' (t ! i) = ?f i" by (auto simp add: eq_for_lvar_def) qed then show "pivot_tableau' x\<^sub>i x\<^sub>j t = pivot_tableau x\<^sub>i x\<^sub>j t" unfolding pivot_tableau'_def pivot_tableau_def unfolding Let_def by (auto simp add: map_reindex) qed lemma pivot'pivot: fixes s :: "('i,'a::lrv)state" assumes "\ (\ s)" "x\<^sub>i \ lvars (\ s)" shows "pivot' x\<^sub>i x\<^sub>j s = pivot x\<^sub>i x\<^sub>j s" using pivot_tableau'pivot_tableau[OF assms] unfolding pivot_def pivot'_def by auto end sublocale Pivot' < Pivot eq_idx_for_lvar pivot proof fix s::"('i,'a) state" and x\<^sub>i x\<^sub>j and v::"'a valuation" assume "\ (\ s)" "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)" show "let s' = pivot x\<^sub>i x\<^sub>j s in \ s' = \ s \ \\<^sub>i s' = \\<^sub>i s \ \ s' = \ s \ \\<^sub>c s' = \\<^sub>c s" unfolding pivot_def by (auto simp add: Let_def simp: boundsl_def boundsu_def indexl_def indexu_def) let ?t = "\ s" let ?idx = "eq_idx_for_lvar ?t x\<^sub>i" let ?eq = "?t ! ?idx" let ?eq' = "pivot_eq ?eq x\<^sub>j" have "?idx < length ?t" "lhs (?t ! ?idx) = x\<^sub>i" using \x\<^sub>i \ lvars ?t\ using eq_idx_for_lvar by auto have "distinct (map lhs ?t)" using \\ ?t\ unfolding normalized_tableau_def by simp have "x\<^sub>j \ rvars_eq ?eq" using \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ unfolding eq_for_lvar_def by simp then have "x\<^sub>j \ rvars ?t" using \?idx < length ?t\ using in_set_conv_nth[of ?eq ?t] by (auto simp add: rvars_def) then have "x\<^sub>j \ lvars ?t" using \\ ?t\ unfolding normalized_tableau_def by auto have "x\<^sub>i \ rvars ?t" using \x\<^sub>i \ lvars ?t\ \\ ?t\ unfolding normalized_tableau_def rvars_def by auto then have "x\<^sub>i \ rvars_eq ?eq" unfolding rvars_def using \?idx < length ?t\ using in_set_conv_nth[of ?eq ?t] by auto have "x\<^sub>i \ x\<^sub>j" using \x\<^sub>j \ rvars_eq ?eq\ \x\<^sub>i \ rvars_eq ?eq\ by auto have "?eq' = (x\<^sub>j, rhs ?eq')" using lhs_pivot_eq[of x\<^sub>j ?eq] using \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ \lhs (?t ! ?idx) = x\<^sub>i\ \x\<^sub>i \ rvars_eq ?eq\ by (auto simp add: eq_for_lvar_def) (cases "?eq'", simp)+ let ?I1 = "[0..?idx < length ?t\ by (rule interval_3split) then have map_lhs_pivot: "map lhs (\ (pivot' x\<^sub>i x\<^sub>j s)) = map (\idx. lhs (?t ! idx)) ?I1 @ [x\<^sub>j] @ map (\idx. lhs (?t ! idx)) ?I2" using \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ \lhs (?t ! ?idx) = x\<^sub>i\ \x\<^sub>i \ rvars_eq ?eq\ by (auto simp add: Let_def subst_var_eq_def eq_for_lvar_def lhs_pivot_eq pivot'_def pivot_tableau'_def) have lvars_pivot: "lvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" proof- have "lvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = {x\<^sub>j} \ (\idx. lhs (?t ! idx)) ` ({0..?idx < length ?t\ \?eq' = (x\<^sub>j, rhs ?eq')\ by (cases ?eq', auto simp add: Let_def pivot'_def pivot_tableau'_def lvars_def subst_var_eq_def)+ also have "... = {x\<^sub>j} \ (((\idx. lhs (?t ! idx)) ` {0..?idx < length ?t\ \distinct (map lhs ?t)\ by (auto simp add: distinct_conv_nth) also have "... = {x\<^sub>j} \ (set (map lhs ?t) - {x\<^sub>i})" using \lhs (?t ! ?idx) = x\<^sub>i\ by (auto simp add: in_set_conv_nth rev_image_eqI) (auto simp add: image_def) finally show "lvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" by (simp add: lvars_def) qed moreover have rvars_pivot: "rvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" proof- have "rvars_eq ?eq' = {x\<^sub>i} \ (rvars_eq ?eq - {x\<^sub>j})" using rvars_pivot_eq[of x\<^sub>j ?eq] using \lhs (?t ! ?idx) = x\<^sub>i\ using \x\<^sub>j \ rvars_eq ?eq\ \x\<^sub>i \ rvars_eq ?eq\ by simp let ?S1 = "rvars_eq ?eq'" let ?S2 = "\idx\({0..j (rhs ?eq') (?t ! idx))" have "rvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = ?S1 \ ?S2" unfolding pivot'_def pivot_tableau'_def rvars_def using \?idx < length ?t\ by (auto simp add: Let_def split: if_splits) also have "... = {x\<^sub>i} \ (rvars ?t - {x\<^sub>j})" (is "?S1 \ ?S2 = ?rhs") proof show "?S1 \ ?S2 \ ?rhs" proof- have "?S1 \ ?rhs" using \?idx < length ?t\ unfolding rvars_def using \rvars_eq ?eq' = {x\<^sub>i} \ (rvars_eq ?eq - {x\<^sub>j})\ by (force simp add: in_set_conv_nth) moreover have "?S2 \ ?rhs" proof- have "?S2 \ (\idx\{0..j}) \ rvars_eq ?eq')" apply (rule UN_mono) using rvars_eq_subst_var_eq by auto also have "... \ rvars_eq ?eq' \ (\idx\{0..j})" by auto also have "... = rvars_eq ?eq' \ (rvars ?t - {x\<^sub>j})" unfolding rvars_def by (force simp add: in_set_conv_nth) finally show ?thesis using \rvars_eq ?eq' = {x\<^sub>i} \ (rvars_eq ?eq - {x\<^sub>j})\ unfolding rvars_def using \?idx < length ?t\ by auto qed ultimately show ?thesis by simp qed next show "?rhs \ ?S1 \ ?S2" proof fix x assume "x \ ?rhs" show "x \ ?S1 \ ?S2" proof (cases "x \ rvars_eq ?eq'") case True then show ?thesis by auto next case False let ?S2' = "\idx\({0..j}) - rvars_eq ?eq'" have "x \ ?S2'" using False \x \ ?rhs\ using \rvars_eq ?eq' = {x\<^sub>i} \ (rvars_eq ?eq - {x\<^sub>j})\ unfolding rvars_def by (force simp add: in_set_conv_nth) moreover have "?S2 \ ?S2'" apply (rule UN_mono) using rvars_eq_subst_var_eq_supset[of _ x\<^sub>j "rhs ?eq'" ] by auto ultimately show ?thesis by auto qed qed qed ultimately show ?thesis by simp qed ultimately show "let s' = pivot x\<^sub>i x\<^sub>j s in rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i} \ lvars (\ s') = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" using pivot'pivot[where ?'i = 'i] using \\ (\ s)\ \x\<^sub>i \ lvars (\ s)\ by (simp add: Let_def) have "\ (\ (pivot' x\<^sub>i x\<^sub>j s))" unfolding normalized_tableau_def proof have "lvars (\ (pivot' x\<^sub>i x\<^sub>j s)) \ rvars (\ (pivot' x\<^sub>i x\<^sub>j s)) = {}" (is ?g1) using \\ (\ s)\ unfolding normalized_tableau_def using lvars_pivot rvars_pivot using \x\<^sub>i \ x\<^sub>j\ by auto moreover have "0 \ rhs ` set (\ (pivot' x\<^sub>i x\<^sub>j s))" (is ?g2) proof let ?eq = "eq_for_lvar (\ s) x\<^sub>i" from eq_for_lvar[OF \x\<^sub>i \ lvars (\ s)\] have "?eq \ set (\ s)" and var: "lhs ?eq = x\<^sub>i" by auto have "lhs ?eq \ rvars_eq ?eq" using \\ (\ s)\ \?eq \ set (\ s)\ using \x\<^sub>i \ rvars_eq (\ s ! eq_idx_for_lvar (\ s) x\<^sub>i)\ eq_for_lvar_def var by auto from vars_pivot_eq[OF \x\<^sub>j \ rvars_eq ?eq\ this] have vars_pivot: "lhs (pivot_eq ?eq x\<^sub>j) = x\<^sub>j" "rvars_eq (pivot_eq ?eq x\<^sub>j) = {lhs (eq_for_lvar (\ s) x\<^sub>i)} \ (rvars_eq (eq_for_lvar (\ s) x\<^sub>i) - {x\<^sub>j})" unfolding Let_def by auto from vars_pivot(2) have rhs_pivot0: "rhs (pivot_eq ?eq x\<^sub>j) \ 0" using vars_zero by auto assume "0 \ rhs ` set (\ (pivot' x\<^sub>i x\<^sub>j s))" from this[unfolded pivot'pivot[OF \\ (\ s)\ \x\<^sub>i \ lvars (\ s)\] pivot_def] have "0 \ rhs ` set (pivot_tableau x\<^sub>i x\<^sub>j (\ s))" by simp from this[unfolded pivot_tableau_def Let_def var, unfolded var] rhs_pivot0 obtain e where "e \ set (\ s)" "lhs e \ x\<^sub>i" and rvars_eq: "rvars_eq (subst_var_eq x\<^sub>j (rhs (pivot_eq ?eq x\<^sub>j)) e) = {}" by (auto simp: vars_zero) from rvars_eq[unfolded subst_var_eq_def] have empty: "vars (subst_var x\<^sub>j (rhs (pivot_eq ?eq x\<^sub>j)) (rhs e)) = {}" by auto show False proof (cases "x\<^sub>j \ vars (rhs e)") case False from empty[unfolded subst_no_effect[OF False]] have "rvars_eq e = {}" by auto hence "rhs e = 0" using zero_coeff_zero coeff_zero by auto with \e \ set (\ s)\ \\ (\ s)\ show False unfolding normalized_tableau_def by auto next case True from \e \ set (\ s)\ have "rvars_eq e \ rvars (\ s)" unfolding rvars_def by auto hence "x\<^sub>i \ vars (rhs (pivot_eq ?eq x\<^sub>j)) - rvars_eq e" unfolding vars_pivot(2) var using \\ (\ s)\[unfolded normalized_tableau_def] \x\<^sub>i \ lvars (\ s)\ by auto from subst_with_effect[OF True this] rvars_eq show ?thesis by (simp add: subst_var_eq_def) qed qed ultimately show "?g1 \ ?g2" .. show "distinct (map lhs (\ (pivot' x\<^sub>i x\<^sub>j s)))" using map_parametrize_idx[of lhs ?t] using map_lhs_pivot using \distinct (map lhs ?t)\ using interval_3split[of ?idx "length ?t"] \?idx < length ?t\ using \x\<^sub>j \ lvars ?t\ unfolding lvars_def by auto qed moreover have "v \\<^sub>t ?t = v \\<^sub>t \ (pivot' x\<^sub>i x\<^sub>j s)" unfolding satisfies_tableau_def proof assume "\e\set (?t). v \\<^sub>e e" show "\e\set (\ (pivot' x\<^sub>i x\<^sub>j s)). v \\<^sub>e e" proof- have "v \\<^sub>e ?eq'" using \x\<^sub>i \ rvars_eq ?eq\ using \?idx < length ?t\ \\e\set (?t). v \\<^sub>e e\ using \x\<^sub>j \ rvars_eq ?eq\ \x\<^sub>i \ lvars ?t\ by (simp add: equiv_pivot_eq eq_idx_for_lvar) moreover { fix idx assume "idx < length ?t" "idx \ ?idx" have "v \\<^sub>e subst_var_eq x\<^sub>j (rhs ?eq') (?t ! idx)" using \?eq' = (x\<^sub>j, rhs ?eq')\ using \v \\<^sub>e ?eq'\ \idx < length ?t\ \\e\set (?t). v \\<^sub>e e\ using equiv_subst_var_eq[of v x\<^sub>j "rhs ?eq'" "?t ! idx"] by auto } ultimately show ?thesis by (auto simp add: pivot'_def pivot_tableau'_def Let_def) qed next assume "\e\set (\ (pivot' x\<^sub>i x\<^sub>j s)). v \\<^sub>e e" then have "v \\<^sub>e ?eq'" "\ idx. \idx < length ?t; idx \ ?idx \ \ v \\<^sub>e subst_var_eq x\<^sub>j (rhs ?eq') (?t ! idx)" using \?idx < length ?t\ unfolding pivot'_def pivot_tableau'_def by (auto simp add: Let_def) show "\e\set (\ s). v \\<^sub>e e" proof- { fix idx assume "idx < length ?t" have "v \\<^sub>e (?t ! idx)" proof (cases "idx = ?idx") case True then show ?thesis using \v \\<^sub>e ?eq'\ using \x\<^sub>j \ rvars_eq ?eq\ \x\<^sub>i \ lvars ?t\ \x\<^sub>i \ rvars_eq ?eq\ by (simp add: eq_idx_for_lvar equiv_pivot_eq) next case False then show ?thesis using \idx < length ?t\ using \\idx < length ?t; idx \ ?idx \ \ v \\<^sub>e subst_var_eq x\<^sub>j (rhs ?eq') (?t ! idx)\ using \v \\<^sub>e ?eq'\ \?eq' = (x\<^sub>j, rhs ?eq')\ using equiv_subst_var_eq[of v x\<^sub>j "rhs ?eq'" "?t ! idx"] by auto qed } then show ?thesis by (force simp add: in_set_conv_nth) qed qed ultimately show "let s' = pivot x\<^sub>i x\<^sub>j s in v \\<^sub>t \ s = v \\<^sub>t \ s' \ \ (\ s')" using pivot'pivot[where ?'i = 'i] using \\ (\ s)\ \x\<^sub>i \ lvars (\ s)\ by (simp add: Let_def) qed subsection\Check implementation\ text\The \check\ function is called when all rhs variables are in bounds, and it checks if there is a lhs variable that is not. If there is no such variable, then satisfiability is detected and \check\ succeeds. If there is a lhs variable \x\<^sub>i\ out of its bounds, a rhs variable \x\<^sub>j\ is sought which allows pivoting with \x\<^sub>i\ and updating \x\<^sub>i\ to its violated bound. If \x\<^sub>i\ is under its lower bound it must be increased, and if \x\<^sub>j\ has a positive coefficient it must be increased so it must be under its upper bound and if it has a negative coefficient it must be decreased so it must be above its lower bound. The case when \x\<^sub>i\ is above its upper bound is symmetric (avoiding symmetries is discussed in Section \ref{sec:symmetries}). If there is no such \x\<^sub>j\, unsatisfiability is detected and \check\ fails. The procedure is recursively repeated, until it either succeeds or fails. To ensure termination, variables \x\<^sub>i\ and \x\<^sub>j\ must be chosen with respect to a fixed variable ordering. For choosing these variables auxiliary functions \min_lvar_not_in_bounds\, \min_rvar_inc\ and \min_rvar_dec\ are specified (each in its own locale). For, example: \ locale MinLVarNotInBounds = fixes min_lvar_not_in_bounds::"('i,'a::lrv) state \ var option" assumes min_lvar_not_in_bounds_None: "min_lvar_not_in_bounds s = None \ (\x\lvars (\ s). in_bounds x \\ s\ (\ s))" and min_lvar_not_in_bounds_Some': "min_lvar_not_in_bounds s = Some x\<^sub>i \ x\<^sub>i\lvars (\ s) \ \in_bounds x\<^sub>i \\ s\ (\ s) \ (\x\lvars (\ s). x < x\<^sub>i \ in_bounds x \\ s\ (\ s))" begin lemma min_lvar_not_in_bounds_None': "min_lvar_not_in_bounds s = None \ (\\ s\ \\<^sub>b \ s \ lvars (\ s))" unfolding satisfies_bounds_set.simps by (rule min_lvar_not_in_bounds_None) lemma min_lvar_not_in_bounds_lvars:"min_lvar_not_in_bounds s = Some x\<^sub>i \ x\<^sub>i \ lvars (\ s)" using min_lvar_not_in_bounds_Some' by simp lemma min_lvar_not_in_bounds_Some: "min_lvar_not_in_bounds s = Some x\<^sub>i \ \ in_bounds x\<^sub>i \\ s\ (\ s)" using min_lvar_not_in_bounds_Some' by simp lemma min_lvar_not_in_bounds_Some_min: "min_lvar_not_in_bounds s = Some x\<^sub>i \ (\ x \ lvars (\ s). x < x\<^sub>i \ in_bounds x \\ s\ (\ s))" using min_lvar_not_in_bounds_Some' by simp end abbreviation reasable_var where "reasable_var dir x eq s \ (coeff (rhs eq) x > 0 \ \\<^sub>u\<^sub>b (lt dir) (\\ s\ x) (UB dir s x)) \ (coeff (rhs eq) x < 0 \ \\<^sub>l\<^sub>b (lt dir) (\\ s\ x) (LB dir s x))" locale MinRVarsEq = fixes min_rvar_incdec_eq :: "('i,'a) Direction \ ('i,'a::lrv) state \ eq \ 'i list + var" assumes min_rvar_incdec_eq_None: "min_rvar_incdec_eq dir s eq = Inl is \ (\ x \ rvars_eq eq. \ reasable_var dir x eq s) \ (set is = {LI dir s (lhs eq)} \ {LI dir s x | x. x \ rvars_eq eq \ coeff (rhs eq) x < 0} \ {UI dir s x | x. x \ rvars_eq eq \ coeff (rhs eq) x > 0}) \ ((dir = Positive \ dir = Negative) \ LI dir s (lhs eq) \ indices_state s \ set is \ indices_state s)" assumes min_rvar_incdec_eq_Some_rvars: "min_rvar_incdec_eq dir s eq = Inr x\<^sub>j \ x\<^sub>j \ rvars_eq eq" assumes min_rvar_incdec_eq_Some_incdec: "min_rvar_incdec_eq dir s eq = Inr x\<^sub>j \ reasable_var dir x\<^sub>j eq s" assumes min_rvar_incdec_eq_Some_min: "min_rvar_incdec_eq dir s eq = Inr x\<^sub>j \ (\ x \ rvars_eq eq. x < x\<^sub>j \ \ reasable_var dir x eq s)" begin lemma min_rvar_incdec_eq_None': assumes *: "dir = Positive \ dir = Negative" and min: "min_rvar_incdec_eq dir s eq = Inl is" and sub: "I = set is" and Iv: "(I,v) \\<^sub>i\<^sub>b \\ s" shows "le (lt dir) ((rhs eq) \v\) ((rhs eq) \\\ s\\)" proof - have "\ x \ rvars_eq eq. \ reasable_var dir x eq s" using min using min_rvar_incdec_eq_None by simp have "\ x \ rvars_eq eq. (0 < coeff (rhs eq) x \ le (lt dir) 0 (\\ s\ x - v x)) \ (coeff (rhs eq) x < 0 \ le (lt dir) (\\ s\ x - v x) 0)" proof (safe) fix x assume x: "x \ rvars_eq eq" "0 < coeff (rhs eq) x" "0 \ \\ s\ x - v x" then have "\ (\\<^sub>u\<^sub>b (lt dir) (\\ s\ x) (UB dir s x))" using \\ x \ rvars_eq eq. \ reasable_var dir x eq s\ by auto then have "\\<^sub>u\<^sub>b (lt dir) (\\ s\ x) (UB dir s x)" using * by (cases "UB dir s x") (auto simp add: bound_compare_defs) moreover from min_rvar_incdec_eq_None[OF min] x sub have "UI dir s x \ I" by auto from Iv * this have "\\<^sub>u\<^sub>b (lt dir) (v x) (UB dir s x)" unfolding satisfies_bounds_index.simps by (cases "UB dir s x", auto simp: indexl_def indexu_def boundsl_def boundsu_def bound_compare'_defs) (fastforce)+ ultimately have "le (lt dir) (v x) (\\ s\ x)" using * by (cases "UB dir s x") (auto simp add: bound_compare_defs) then show "lt dir 0 (\\ s\ x - v x)" using \0 \ \\ s\ x - v x\ * using minus_gt[of "v x" "\\ s\ x"] minus_lt[of "\\ s\ x" "v x"] by auto next fix x assume x: "x \ rvars_eq eq" "0 > coeff (rhs eq) x" "\\ s\ x - v x \ 0" then have "\ (\\<^sub>l\<^sub>b (lt dir) (\\ s\ x) (LB dir s x))" using \\ x \ rvars_eq eq. \ reasable_var dir x eq s\ by auto then have "\\<^sub>l\<^sub>b (lt dir) (\\ s\ x) (LB dir s x)" using * by (cases "LB dir s x") (auto simp add: bound_compare_defs) moreover from min_rvar_incdec_eq_None[OF min] x sub have "LI dir s x \ I" by auto from Iv * this have "\\<^sub>l\<^sub>b (lt dir) (v x) (LB dir s x)" unfolding satisfies_bounds_index.simps by (cases "LB dir s x", auto simp: indexl_def indexu_def boundsl_def boundsu_def bound_compare'_defs) (fastforce)+ ultimately have "le (lt dir) (\\ s\ x) (v x)" using * by (cases "LB dir s x") (auto simp add: bound_compare_defs) then show "lt dir (\\ s\ x - v x) 0" using \\\ s\ x - v x \ 0\ * using minus_lt[of "\\ s\ x" "v x"] minus_gt[of "v x" "\\ s\ x"] by auto qed then have "le (lt dir) 0 (rhs eq \ \ x. \\ s\ x - v x\)" using * apply auto using valuate_nonneg[of "rhs eq" "\x. \\ s\ x - v x"] apply force using valuate_nonpos[of "rhs eq" "\x. \\ s\ x - v x"] apply force done then show "le (lt dir) rhs eq \ v \ rhs eq \ \\ s\ \" using \dir = Positive \ dir = Negative\ using minus_gt[of "rhs eq \ v \" "rhs eq \ \\ s\ \"] by (auto simp add: valuate_diff[THEN sym]) qed end locale MinRVars = EqForLVar + MinRVarsEq min_rvar_incdec_eq for min_rvar_incdec_eq :: "('i, 'a :: lrv) Direction \ _" begin abbreviation min_rvar_incdec :: "('i,'a) Direction \ ('i,'a) state \ var \ 'i list + var" where "min_rvar_incdec dir s x\<^sub>i \ min_rvar_incdec_eq dir s (eq_for_lvar (\ s) x\<^sub>i)" end locale MinVars = MinLVarNotInBounds min_lvar_not_in_bounds + MinRVars eq_idx_for_lvar min_rvar_incdec_eq for min_lvar_not_in_bounds :: "('i,'a::lrv) state \ _" and eq_idx_for_lvar and min_rvar_incdec_eq :: "('i, 'a :: lrv) Direction \ _" locale PivotUpdateMinVars = PivotAndUpdate eq_idx_for_lvar pivot_and_update + MinVars min_lvar_not_in_bounds eq_idx_for_lvar min_rvar_incdec_eq for eq_idx_for_lvar :: "tableau \ var \ nat" and min_lvar_not_in_bounds :: "('i,'a::lrv) state \ var option" and min_rvar_incdec_eq :: "('i,'a) Direction \ ('i,'a) state \ eq \ 'i list + var" and pivot_and_update :: "var \ var \ 'a \ ('i,'a) state \ ('i,'a) state" begin definition check' where "check' dir x\<^sub>i s \ let l\<^sub>i = the (LB dir s x\<^sub>i); x\<^sub>j' = min_rvar_incdec dir s x\<^sub>i in case x\<^sub>j' of Inl I \ set_unsat I s | Inr x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s" lemma check'_cases: assumes "\ I. \min_rvar_incdec dir s x\<^sub>i = Inl I; check' dir x\<^sub>i s = set_unsat I s\ \ P (set_unsat I s)" assumes "\ x\<^sub>j l\<^sub>i. \min_rvar_incdec dir s x\<^sub>i = Inr x\<^sub>j; l\<^sub>i = the (LB dir s x\<^sub>i); check' dir x\<^sub>i s = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s\ \ P (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)" shows "P (check' dir x\<^sub>i s)" using assms unfolding check'_def by (cases "min_rvar_incdec dir s x\<^sub>i", auto) partial_function (tailrec) check where "check s = (if \ s then s else let x\<^sub>i' = min_lvar_not_in_bounds s in case x\<^sub>i' of None \ s | Some x\<^sub>i \ let dir = if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative in check (check' dir x\<^sub>i s))" declare check.simps[code] inductive check_dom where step: "\\x\<^sub>i. \\ \ s; Some x\<^sub>i = min_lvar_not_in_bounds s; \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i\ \ check_dom (check' Positive x\<^sub>i s); \x\<^sub>i. \\ \ s; Some x\<^sub>i = min_lvar_not_in_bounds s; \ \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i\ \ check_dom (check' Negative x\<^sub>i s)\ \ check_dom s" text\ The definition of \check\ can be given by: @{text[display] "check s \ if \ s then s else let x\<^sub>i' = min_lvar_not_in_bounds s in case x\<^sub>i' of None \ s | Some x\<^sub>i \ if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then check (check_inc x\<^sub>i s) else check (check_dec x\<^sub>i s)" } @{text[display] "check_inc x\<^sub>i s \ let l\<^sub>i = the (\\<^sub>l s x\<^sub>i); x\<^sub>j' = min_rvar_inc s x\<^sub>i in case x\<^sub>j' of None \ s \ \ := True \ | Some x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s" } The definition of \check_dec\ is analogous. It is shown (mainly by induction) that this definition satisfies the \check\ specification. Note that this definition uses general recursion, so its termination is non-trivial. It has been shown that it terminates for all states satisfying the check preconditions. The proof is based on the proof outline given in \cite{simplex-rad}. It is very technically involved, but conceptually uninteresting so we do not discuss it in more details.\ lemma pivotandupdate_check_precond: assumes "dir = (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative)" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec dir s x\<^sub>i = Inr x\<^sub>j" "l\<^sub>i = the (LB dir s x\<^sub>i)" "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" " \ s" shows "\ (\ (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)) \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s) \ \ (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s) \ \ (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)" proof- have "\\<^sub>l s x\<^sub>i = Some l\<^sub>i \ \\<^sub>u s x\<^sub>i = Some l\<^sub>i" using \l\<^sub>i = the (LB dir s x\<^sub>i)\ \dir = (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative)\ using \min_lvar_not_in_bounds s = Some x\<^sub>i\ min_lvar_not_in_bounds_Some[of s x\<^sub>i] using \\ s\ by (case_tac[!] "\\<^sub>l s x\<^sub>i", case_tac[!] "\\<^sub>u s x\<^sub>i") (auto simp add: bounds_consistent_def bound_compare_defs) then show ?thesis using assms using pivotandupdate_tableau_normalized[of s x\<^sub>i x\<^sub>j l\<^sub>i] using pivotandupdate_nolhs[of s x\<^sub>i x\<^sub>j l\<^sub>i] using pivotandupdate_bounds_consistent[of s x\<^sub>i x\<^sub>j l\<^sub>i] using pivotandupdate_tableau_valuated[of s x\<^sub>i x\<^sub>j l\<^sub>i] by (auto simp add: min_lvar_not_in_bounds_lvars min_rvar_incdec_eq_Some_rvars) qed (* -------------------------------------------------------------------------- *) (* Termination *) (* -------------------------------------------------------------------------- *) abbreviation gt_state' where "gt_state' dir s s' x\<^sub>i x\<^sub>j l\<^sub>i \ min_lvar_not_in_bounds s = Some x\<^sub>i \ l\<^sub>i = the (LB dir s x\<^sub>i) \ min_rvar_incdec dir s x\<^sub>i = Inr x\<^sub>j \ s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s" definition gt_state :: "('i,'a) state \ ('i,'a) state \ bool" (infixl "\\<^sub>x" 100) where "s \\<^sub>x s' \ \ x\<^sub>i x\<^sub>j l\<^sub>i. let dir = if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative in gt_state' dir s s' x\<^sub>i x\<^sub>j l\<^sub>i" abbreviation succ :: "('i,'a) state \ ('i,'a) state \ bool" (infixl "\" 100) where "s \ s' \ \ (\ s) \ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s \ \ s \ s \\<^sub>x s' \ \\<^sub>i s' = \\<^sub>i s \ \\<^sub>c s' = \\<^sub>c s" abbreviation succ_rel :: "('i,'a) state rel" where "succ_rel \ {(s, s'). s \ s'}" abbreviation succ_rel_trancl :: "('i,'a) state \ ('i,'a) state \ bool" (infixl "\\<^sup>+" 100) where "s \\<^sup>+ s' \ (s, s') \ succ_rel\<^sup>+" abbreviation succ_rel_rtrancl :: "('i,'a) state \ ('i,'a) state \ bool" (infixl "\\<^sup>*" 100) where "s \\<^sup>* s' \ (s, s') \ succ_rel\<^sup>*" lemma succ_vars: assumes "s \ s'" obtains x\<^sub>i x\<^sub>j where "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" "x\<^sub>j \ rvars (\ s)" "lvars (\ s') = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" "rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" proof- from assms obtain x\<^sub>i x\<^sub>j c where *: "\ (\ s)" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" unfolding gt_state_def by (auto split: if_splits) then have "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)" "lvars (\ s') = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" "rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using pivotandupdate_rvars[of s x\<^sub>i x\<^sub>j] using pivotandupdate_lvars[of s x\<^sub>i x\<^sub>j] by auto moreover have "x\<^sub>j \ rvars (\ s)" using \x\<^sub>i \ lvars (\ s)\ using \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ using eq_for_lvar[of x\<^sub>i "\ s"] unfolding rvars_def by auto ultimately have "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" "x\<^sub>j \ rvars (\ s)" "lvars (\ s') = lvars (\ s) - {x\<^sub>i} \ {x\<^sub>j}" "rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" by auto then show thesis .. qed lemma succ_vars_id: assumes "s \ s'" shows "lvars (\ s) \ rvars (\ s) = lvars (\ s') \ rvars (\ s')" using assms by (rule succ_vars) auto lemma succ_inv: assumes "s \ s'" shows "\ (\ s')" "\ s'" "\ s'" "\\<^sub>i s = \\<^sub>i s'" "(v::'a valuation) \\<^sub>t (\ s) \ v \\<^sub>t (\ s')" proof- from assms obtain x\<^sub>i x\<^sub>j c where *: "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" unfolding gt_state_def by (auto split: if_splits) then show "\ (\ s')" "\ s'" "\ s'" "\\<^sub>i s = \\<^sub>i s'" "(v::'a valuation) \\<^sub>t (\ s) \ v \\<^sub>t (\ s')" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using pivotandupdate_tableau_normalized[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_bounds_consistent[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_bounds_id[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_tableau_equiv using pivotandupdate_tableau_valuated by auto qed lemma succ_rvar_valuation_id: assumes "s \ s'" "x \ rvars (\ s)" "x \ rvars (\ s')" shows "\\ s\ x = \\ s'\ x" proof- from assms obtain x\<^sub>i x\<^sub>j c where *: "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" unfolding gt_state_def by (auto split: if_splits) then show ?thesis using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using \x \ rvars (\ s)\ \x \ rvars (\ s')\ using pivotandupdate_rvars[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j c] using pivotandupdate_valuation_other_nolhs[of s x\<^sub>i x\<^sub>j x c] by (force simp add: normalized_tableau_def map2fun_def) qed lemma succ_min_lvar_not_in_bounds: assumes "s \ s'" "xr \ lvars (\ s)" "xr \ rvars (\ s')" shows "\ in_bounds xr (\\ s\) (\ s)" "\ x \ lvars (\ s). x < xr \ in_bounds x (\\ s\) (\ s)" proof- from assms obtain x\<^sub>i x\<^sub>j c where *: "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" unfolding gt_state_def by (auto split: if_splits) then have "x\<^sub>i = xr" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using \xr \ lvars (\ s)\ \xr \ rvars (\ s')\ using pivotandupdate_rvars by (auto simp add: normalized_tableau_def) then show "\ in_bounds xr (\\ s\) (\ s)" "\ x \ lvars (\ s). x < xr \ in_bounds x (\\ s\) (\ s)" using \min_lvar_not_in_bounds s = Some x\<^sub>i\ using min_lvar_not_in_bounds_Some min_lvar_not_in_bounds_Some_min by simp_all qed lemma succ_min_rvar: assumes "s \ s'" "xs \ lvars (\ s)" "xs \ rvars (\ s')" "xr \ rvars (\ s)" "xr \ lvars (\ s')" "eq = eq_for_lvar (\ s) xs" and dir: "dir = Positive \ dir = Negative" shows "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ xs) (LB dir s xs) \ reasable_var dir xr eq s \ (\ x \ rvars_eq eq. x < xr \ \ reasable_var dir x eq s)" proof- from assms(1) obtain x\<^sub>i x\<^sub>j c where"\ (\ s) \ \ s \ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "gt_state' (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative) s s' x\<^sub>i x\<^sub>j c" by (auto simp add: gt_state_def Let_def) then have "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "s' = pivot_and_update x\<^sub>i x\<^sub>j c s" and *: "(\\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i \ min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j) \ (\ \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j)" by (auto split: if_splits) then have "xr = x\<^sub>j" "xs = x\<^sub>i" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using \xr \ rvars (\ s)\ \xr \ lvars (\ s')\ using \xs \ lvars (\ s)\ \xs \ rvars (\ s')\ using pivotandupdate_lvars pivotandupdate_rvars by (auto simp add: normalized_tableau_def) show "\ (\\<^sub>l\<^sub>b (lt dir) (\\ s\ xs) (LB dir s xs)) \ reasable_var dir xr eq s \ (\ x \ rvars_eq eq. x < xr \ \ reasable_var dir x eq s)" proof assume "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ xs) (LB dir s xs)" then have "\\<^sub>l\<^sub>b (lt dir) (\\ s\ xs) (LB dir s xs)" using dir by (cases "LB dir s xs") (auto simp add: bound_compare_defs) moreover then have "\ (\\<^sub>u\<^sub>b (lt dir) (\\ s\ xs) (UB dir s xs))" using \\ s\ dir using bounds_consistent_gt_ub bounds_consistent_lt_lb by (force simp add: bound_compare''_defs) ultimately have "min_rvar_incdec dir s xs = Inr xr" using * \xr = x\<^sub>j\ \xs = x\<^sub>i\ dir by (auto simp add: bound_compare''_defs) then show "reasable_var dir xr eq s \ (\ x \ rvars_eq eq. x < xr \ \ reasable_var dir x eq s)" using \eq = eq_for_lvar (\ s) xs\ using min_rvar_incdec_eq_Some_min[of dir s eq xr] using min_rvar_incdec_eq_Some_incdec[of dir s eq xr] by simp qed qed lemma succ_set_on_bound: assumes "s \ s'" "x\<^sub>i \ lvars (\ s)" "x\<^sub>i \ rvars (\ s')" and dir: "dir = Positive \ dir = Negative" shows "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i) \ \\ s'\ x\<^sub>i = the (LB dir s x\<^sub>i)" "\\ s'\ x\<^sub>i = the (\\<^sub>l s x\<^sub>i) \ \\ s'\ x\<^sub>i = the (\\<^sub>u s x\<^sub>i)" proof- from assms(1) obtain x\<^sub>i' x\<^sub>j c where"\ (\ s) \ \ s \ \ s \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "gt_state' (if \\ s\ x\<^sub>i' <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i' then Positive else Negative) s s' x\<^sub>i' x\<^sub>j c" by (auto simp add: gt_state_def Let_def) then have "\ (\ s)" "\ s" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i'" "s' = pivot_and_update x\<^sub>i' x\<^sub>j c s" and *: "(\\ s\ x\<^sub>i' <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i' \ c = the (\\<^sub>l s x\<^sub>i') \ min_rvar_incdec Positive s x\<^sub>i' = Inr x\<^sub>j) \ (\ \\ s\ x\<^sub>i' <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i' \ c = the (\\<^sub>u s x\<^sub>i') \ min_rvar_incdec Negative s x\<^sub>i' = Inr x\<^sub>j)" by (auto split: if_splits) then have "x\<^sub>i = x\<^sub>i'" "x\<^sub>i' \ lvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i')" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i'] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i'" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i'" x\<^sub>j] using \x\<^sub>i \ lvars (\ s)\ \x\<^sub>i \ rvars (\ s')\ using pivotandupdate_rvars by (auto simp add: normalized_tableau_def) show "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i) \ \\ s'\ x\<^sub>i = the (LB dir s x\<^sub>i)" proof assume "\ \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i)" then have "\\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i)" using dir by (cases "LB dir s x\<^sub>i") (auto simp add: bound_compare_defs) moreover then have "\ \\<^sub>u\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (UB dir s x\<^sub>i)" using \\ s\ dir using bounds_consistent_gt_ub bounds_consistent_lt_lb by (force simp add: bound_compare''_defs) ultimately show "\\ s'\ x\<^sub>i = the (LB dir s x\<^sub>i)" using * \x\<^sub>i = x\<^sub>i'\ \s' = pivot_and_update x\<^sub>i' x\<^sub>j c s\ using \\ (\ s)\ \\ s\ \x\<^sub>i' \ lvars (\ s)\ \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i')\ using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j c] dir by (case_tac[!] "\\<^sub>l s x\<^sub>i'", case_tac[!] "\\<^sub>u s x\<^sub>i'") (auto simp add: bound_compare_defs map2fun_def) qed have "\ \\ s\ x\<^sub>i' <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i' \ \\ s\ x\<^sub>i' >\<^sub>u\<^sub>b \\<^sub>u s x\<^sub>i'" using \min_lvar_not_in_bounds s = Some x\<^sub>i'\ using min_lvar_not_in_bounds_Some[of s x\<^sub>i'] using not_in_bounds[of x\<^sub>i' "\\ s\" "\\<^sub>l s" "\\<^sub>u s"] by auto then show "\\ s'\ x\<^sub>i = the (\\<^sub>l s x\<^sub>i) \ \\ s'\ x\<^sub>i = the (\\<^sub>u s x\<^sub>i)" using \\ (\ s)\ \\ s\ \x\<^sub>i' \ lvars (\ s)\ \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i')\ using \s' = pivot_and_update x\<^sub>i' x\<^sub>j c s\ \x\<^sub>i = x\<^sub>i'\ using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j c] using * by (case_tac[!] "\\<^sub>l s x\<^sub>i'", case_tac[!] "\\<^sub>u s x\<^sub>i'") (auto simp add: map2fun_def bound_compare_defs) qed lemma succ_rvar_valuation: assumes "s \ s'" "x \ rvars (\ s')" shows "\\ s'\ x = \\ s\ x \ \\ s'\ x = the (\\<^sub>l s x) \ \\ s'\ x = the (\\<^sub>u s x)" proof- from assms obtain x\<^sub>i x\<^sub>j b where "\ (\ s)" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "b = the (\\<^sub>l s x\<^sub>i) \ b = the (\\<^sub>u s x\<^sub>i)" "s' = pivot_and_update x\<^sub>i x\<^sub>j b s" unfolding gt_state_def by (auto simp add: Let_def split: if_splits) then have "x\<^sub>i \ lvars (\ s)" "x\<^sub>i \ rvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)" "x\<^sub>j \ rvars (\ s)" "x\<^sub>j \ lvars (\ s)" "x\<^sub>i \ x\<^sub>j" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using rvars_of_lvar_rvars \\ (\ s)\ by (auto simp add: normalized_tableau_def) then have "rvars (\ s') = rvars (\ s) - {x\<^sub>j} \ {x\<^sub>i}" "x \ rvars (\ s) \ x = x\<^sub>i" "x \ x\<^sub>j" "x \ x\<^sub>i \ x \ lvars (\ s)" using \x \ rvars (\ s')\ using pivotandupdate_rvars[of s x\<^sub>i x\<^sub>j] using \\ (\ s)\ \\ s\ \s' = pivot_and_update x\<^sub>i x\<^sub>j b s\ by (auto simp add: normalized_tableau_def) then show ?thesis using pivotandupdate_valuation_xi[of s x\<^sub>i x\<^sub>j b] using pivotandupdate_valuation_other_nolhs[of s x\<^sub>i x\<^sub>j x b] using \x\<^sub>i \ lvars (\ s)\ \x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ using \\ (\ s)\ \\ s\ \s' = pivot_and_update x\<^sub>i x\<^sub>j b s\ \b = the (\\<^sub>l s x\<^sub>i) \ b = the (\\<^sub>u s x\<^sub>i)\ by (auto simp add: map2fun_def) qed lemma succ_no_vars_valuation: assumes "s \ s'" "x \ tvars (\ s')" shows "look (\ s') x = look (\ s) x" proof- from assms obtain x\<^sub>i x\<^sub>j b where "\ (\ s)" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "b = the (\\<^sub>l s x\<^sub>i) \ b = the (\\<^sub>u s x\<^sub>i)" "s' = pivot_and_update x\<^sub>i x\<^sub>j b s" unfolding gt_state_def by (auto simp add: Let_def split: if_splits) then have "x\<^sub>i \ lvars (\ s)" "x\<^sub>i \ rvars (\ s)" "x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)" "x\<^sub>j \ rvars (\ s)" "x\<^sub>j \ lvars (\ s)" "x\<^sub>i \ x\<^sub>j" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using rvars_of_lvar_rvars \\ (\ s)\ by (auto simp add: normalized_tableau_def) then show ?thesis using pivotandupdate_valuation_other_nolhs[of s x\<^sub>i x\<^sub>j x b] using \\ (\ s)\ \\ s\ \s' = pivot_and_update x\<^sub>i x\<^sub>j b s\ using \x \ tvars (\ s')\ using pivotandupdate_rvars[of s x\<^sub>i x\<^sub>j] using pivotandupdate_lvars[of s x\<^sub>i x\<^sub>j] by (auto simp add: map2fun_def) qed lemma succ_valuation_satisfies: assumes "s \ s'" "\\ s\ \\<^sub>t \ s" shows "\\ s'\ \\<^sub>t \ s'" proof- from \s \ s'\ obtain x\<^sub>i x\<^sub>j b where "\ (\ s)" "\ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "min_rvar_incdec Positive s x\<^sub>i = Inr x\<^sub>j \ min_rvar_incdec Negative s x\<^sub>i = Inr x\<^sub>j" "b = the (\\<^sub>l s x\<^sub>i) \ b = the (\\<^sub>u s x\<^sub>i)" "s' = pivot_and_update x\<^sub>i x\<^sub>j b s" unfolding gt_state_def by (auto simp add: Let_def split: if_splits) then have "x\<^sub>i \ lvars (\ s)" "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" using min_lvar_not_in_bounds_lvars[of s x\<^sub>i] using min_rvar_incdec_eq_Some_rvars[of Positive s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using min_rvar_incdec_eq_Some_rvars[of Negative s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] \\ (\ s)\ by (auto simp add: normalized_tableau_def) then show ?thesis using pivotandupdate_satisfies_tableau[of s x\<^sub>i x\<^sub>j b] using pivotandupdate_tableau_equiv[of s x\<^sub>i x\<^sub>j ] using \\ (\ s)\ \\ s\ \\\ s\ \\<^sub>t \ s\ \s' = pivot_and_update x\<^sub>i x\<^sub>j b s\ by auto qed lemma succ_tableau_valuated: assumes "s \ s'" "\ s" shows "\ s'" using succ_inv(2) assms by blast (* -------------------------------------------------------------------------- *) abbreviation succ_chain where "succ_chain l \ rel_chain l succ_rel" lemma succ_chain_induct: assumes *: "succ_chain l" "i \ j" "j < length l" assumes base: "\ i. P i i" assumes step: "\ i. l ! i \ (l ! (i + 1)) \ P i (i + 1)" assumes trans: "\ i j k. \P i j; P j k; i < j; j \ k\ \ P i k" shows "P i j" using * proof (induct "j - i" arbitrary: i) case 0 then show ?case by (simp add: base) next case (Suc k) have "P (i + 1) j" using Suc(1)[of "i + 1"] Suc(2) Suc(3) Suc(4) Suc(5) by auto moreover have "P i (i + 1)" proof (rule step) show "l ! i \ (l ! (i + 1))" using Suc(2) Suc(3) Suc(5) unfolding rel_chain_def by auto qed ultimately show ?case using trans[of i "i + 1" j] Suc(2) by simp qed lemma succ_chain_bounds_id: assumes "succ_chain l" "i \ j" "j < length l" shows "\\<^sub>i (l ! i) = \\<^sub>i (l ! j)" using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "\\<^sub>i (l ! i) = \\<^sub>i (l ! (i + 1))" by (rule succ_inv(4)) qed simp_all lemma succ_chain_vars_id': assumes "succ_chain l" "i \ j" "j < length l" shows "lvars (\ (l ! i)) \ rvars (\ (l ! i)) = lvars (\ (l ! j)) \ rvars (\ (l ! j))" using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "tvars (\ (l ! i)) = tvars (\ (l ! (i + 1)))" by (rule succ_vars_id) qed simp_all lemma succ_chain_vars_id: assumes "succ_chain l" "i < length l" "j < length l" shows "lvars (\ (l ! i)) \ rvars (\ (l ! i)) = lvars (\ (l ! j)) \ rvars (\ (l ! j))" proof (cases "i \ j") case True then show ?thesis using assms succ_chain_vars_id'[of l i j] by simp next case False then have "j \ i" by simp then show ?thesis using assms succ_chain_vars_id'[of l j i] by simp qed lemma succ_chain_tableau_equiv': assumes "succ_chain l" "i \ j" "j < length l" shows "(v::'a valuation) \\<^sub>t \ (l ! i) \ v \\<^sub>t \ (l ! j)" using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "v \\<^sub>t \ (l ! i) = v \\<^sub>t \ (l ! (i + 1))" by (rule succ_inv(5)) qed simp_all lemma succ_chain_tableau_equiv: assumes "succ_chain l" "i < length l" "j < length l" shows "(v::'a valuation) \\<^sub>t \ (l ! i) \ v \\<^sub>t \ (l ! j)" proof (cases "i \ j") case True then show ?thesis using assms succ_chain_tableau_equiv'[of l i j v] by simp next case False then have "j \ i" by auto then show ?thesis using assms succ_chain_tableau_equiv'[of l j i v] by simp qed lemma succ_chain_no_vars_valuation: assumes "succ_chain l" "i \ j" "j < length l" shows "\ x. x \ tvars (\ (l ! i)) \ look (\ (l ! i)) x = look (\ (l ! j)) x" (is "?P i j") using assms proof (induct "j - i" arbitrary: i) case 0 then show ?case by simp next case (Suc k) have "?P (i + 1) j" using Suc(1)[of "i + 1"] Suc(2) Suc(3) Suc(4) Suc(5) by auto moreover have "?P (i + 1) i" proof (rule+, rule succ_no_vars_valuation) show "l ! i \ (l ! (i + 1))" using Suc(2) Suc(3) Suc(5) unfolding rel_chain_def by auto qed moreover have "tvars (\ (l ! i)) = tvars (\ (l ! (i + 1)))" proof (rule succ_vars_id) show "l ! i \ (l ! (i + 1))" using Suc(2) Suc(3) Suc(5) unfolding rel_chain_def by simp qed ultimately show ?case by simp qed lemma succ_chain_rvar_valuation: assumes "succ_chain l" "i \ j" "j < length l" shows "\x\rvars (\ (l ! j)). \\ (l ! j)\ x = \\ (l ! i)\ x \ \\ (l ! j)\ x = the (\\<^sub>l (l ! i) x) \ \\ (l ! j)\ x = the (\\<^sub>u (l ! i) x)" (is "?P i j") using assms proof (induct "j - i" arbitrary: j) case 0 then show ?case by simp next case (Suc k) have "k = j - 1 - i" "succ_chain l" "i \ j - 1" "j - 1 < length l" "j > 0" using Suc(2) Suc(3) Suc(4) Suc(5) by auto then have ji: "?P i (j - 1)" using Suc(1) by simp have "l ! (j - 1) \ (l ! j)" using Suc(3) \j < length l\ \j > 0\ unfolding rel_chain_def by (erule_tac x="j - 1" in allE) simp then have jj: "?P (j - 1) j" using succ_rvar_valuation by auto obtain x\<^sub>i x\<^sub>j where vars: "x\<^sub>i \ lvars (\ (l ! (j - 1)))" "x\<^sub>j \ rvars (\ (l ! (j - 1)))" "rvars (\ (l ! j)) = rvars (\ (l ! (j - 1))) - {x\<^sub>j} \ {x\<^sub>i}" using \l ! (j - 1) \ (l ! j)\ by (rule succ_vars) simp then have bounds: "\\<^sub>l (l ! (j - 1)) = \\<^sub>l (l ! i)" "\\<^sub>l (l ! j) = \\<^sub>l (l ! i)" "\\<^sub>u (l ! (j - 1)) = \\<^sub>u (l ! i)" "\\<^sub>u (l ! j) = \\<^sub>u (l ! i)" using \succ_chain l\ using succ_chain_bounds_id[of l i "j - 1", THEN sym] \j - 1 < length l\ \i \ j - 1\ using succ_chain_bounds_id[of l "j - 1" j, THEN sym] \j < length l\ by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) show ?case proof fix x assume "x \ rvars (\ (l ! j))" then have "x \ x\<^sub>j \ x \ rvars (\ (l ! (j - 1))) \ x = x\<^sub>i" using vars by auto then show "\\ (l ! j)\ x = \\ (l ! i)\ x \ \\ (l ! j)\ x = the (\\<^sub>l (l ! i) x) \ \\ (l ! j)\ x = the (\\<^sub>u (l ! i) x)" proof assume "x \ x\<^sub>j \ x \ rvars (\ (l ! (j - 1)))" then show ?thesis using jj \x \ rvars (\ (l ! j))\ ji using bounds by force next assume "x = x\<^sub>i" then show ?thesis using succ_set_on_bound(2)[of "l ! (j - 1)" "l ! j" x\<^sub>i] \l ! (j - 1) \ (l ! j)\ using vars bounds by auto qed qed qed lemma succ_chain_valuation_satisfies: assumes "succ_chain l" "i \ j" "j < length l" shows "\\ (l ! i)\ \\<^sub>t \ (l ! i) \ \\ (l ! j)\ \\<^sub>t \ (l ! j)" using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "\\ (l ! i)\ \\<^sub>t \ (l ! i) \ \\ (l ! (i + 1))\ \\<^sub>t \ (l ! (i + 1))" using succ_valuation_satisfies by auto qed simp_all lemma succ_chain_tableau_valuated: assumes "succ_chain l" "i \ j" "j < length l" shows "\ (l ! i) \ \ (l ! j)" using assms proof(rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "\ (l ! i) \ \ (l ! (i + 1))" using succ_tableau_valuated by auto qed simp_all abbreviation swap_lr where "swap_lr l i x \ i + 1 < length l \ x \ lvars (\ (l ! i)) \ x \ rvars (\ (l ! (i + 1)))" abbreviation swap_rl where "swap_rl l i x \ i + 1 < length l \ x \ rvars (\ (l ! i)) \ x \ lvars (\ (l ! (i + 1)))" abbreviation always_r where "always_r l i j x \ \ k. i \ k \ k \ j \ x \ rvars (\ (l ! k))" lemma succ_chain_always_r_valuation_id: assumes "succ_chain l" "i \ j" "j < length l" shows "always_r l i j x \ \\ (l ! i)\ x = \\ (l ! j)\ x" (is "?P i j") using assms proof (rule succ_chain_induct) fix i assume "l ! i \ (l ! (i + 1))" then show "?P i (i + 1)" using succ_rvar_valuation_id by simp qed simp_all lemma succ_chain_swap_rl_exists: assumes "succ_chain l" "i < j" "j < length l" "x \ rvars (\ (l ! i))" "x \ lvars (\ (l ! j))" shows "\ k. i \ k \ k < j \ swap_rl l k x" using assms proof (induct "j - i" arbitrary: i) case 0 then show ?case by simp next case (Suc k) have "l ! i \ (l ! (i + 1))" using Suc(3) Suc(4) Suc(5) unfolding rel_chain_def by auto then have "\ (\ (l ! (i + 1)))" by (rule succ_inv) show ?case proof (cases "x \ rvars (\ (l ! (i + 1)))") case True then have "j \ i + 1" using Suc(7) \\ (\ (l ! (i + 1)))\ by (auto simp add: normalized_tableau_def) have "k = j - Suc i" using Suc(2) by simp then obtain k where "k \ i + 1" "k < j" "swap_rl l k x" using \x \ rvars (\ (l ! (i + 1)))\ \j \ i + 1\ using Suc(1)[of "i + 1"] Suc(2) Suc(3) Suc(4) Suc(5) Suc(6) Suc(7) by auto then show ?thesis by (rule_tac x="k" in exI) simp next case False then have "x \ lvars (\ (l ! (i + 1)))" using Suc(6) using \l ! i \ (l ! (i + 1))\ succ_vars_id by auto then show ?thesis using Suc(4) Suc(5) Suc(6) by force qed qed lemma succ_chain_swap_lr_exists: assumes "succ_chain l" "i < j" "j < length l" "x \ lvars (\ (l ! i))" "x \ rvars (\ (l ! j))" shows "\ k. i \ k \ k < j \ swap_lr l k x" using assms proof (induct "j - i" arbitrary: i) case 0 then show ?case by simp next case (Suc k) have "l ! i \ (l ! (i + 1))" using Suc(3) Suc(4) Suc(5) unfolding rel_chain_def by auto then have "\ (\ (l ! (i + 1)))" by (rule succ_inv) show ?case proof (cases "x \ lvars (\ (l ! (i + 1)))") case True then have "j \ i + 1" using Suc(7) \\ (\ (l ! (i + 1)))\ by (auto simp add: normalized_tableau_def) have "k = j - Suc i" using Suc(2) by simp then obtain k where "k \ i + 1" "k < j" "swap_lr l k x" using \x \ lvars (\ (l ! (i + 1)))\ \j \ i + 1\ using Suc(1)[of "i + 1"] Suc(2) Suc(3) Suc(4) Suc(5) Suc(6) Suc(7) by auto then show ?thesis by (rule_tac x="k" in exI) simp next case False then have "x \ rvars (\ (l ! (i + 1)))" using Suc(6) using \l ! i \ (l ! (i + 1))\ succ_vars_id by auto then show ?thesis using Suc(4) Suc(5) Suc(6) by force qed qed (* -------------------------------------------------------------------------- *) lemma finite_tableaus_aux: shows "finite {t. lvars t = L \ rvars t = V - L \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0)}" (is "finite (?Al L)") proof (cases "?Al L = {}") case True show ?thesis by (subst True) simp next case False then have "\ t. t \ ?Al L" by auto let ?t = "SOME t. t \ ?Al L" have "?t \ ?Al L" using \\ t. t \ ?Al L\ by (rule someI_ex) have "?Al L \ {t. t <~~> ?t}" proof fix x assume "x \ ?Al L" have "x <~~> ?t" apply (rule tableau_perm) using \?t \ ?Al L\ \x \ ?Al L\ by auto then show "x \ {t. t <~~> ?t}" by simp qed moreover have "finite {t. t <~~> ?t}" by (rule perm_finite) ultimately show ?thesis by (rule finite_subset) qed lemma finite_tableaus: assumes "finite V" shows "finite {t. tvars t = V \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0)}" (is "finite ?A") proof- let ?Al = "\ L. {t. lvars t = L \ rvars t = V - L \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0)}" have "?A = \ (?Al ` {L. L \ V})" by (auto simp add: normalized_tableau_def) then show ?thesis using \finite V\ using finite_tableaus_aux by auto qed lemma finite_accessible_tableaus: shows "finite (\ ` {s'. s \\<^sup>* s'})" proof- have "{s'. s \\<^sup>* s'} = {s'. s \\<^sup>+ s'} \ {s}" by (auto simp add: rtrancl_eq_or_trancl) moreover have "finite (\ ` {s'. s \\<^sup>+ s'})" (is "finite ?A") proof- let ?T = "{t. tvars t = tvars (\ s) \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t(\ s))}" have "?A \ ?T" proof fix t assume "t \ ?A" then obtain s' where "s \\<^sup>+ s'" "t = \ s'" by auto then obtain l where *: "l \ []" "1 < length l" "hd l = s" "last l = s'" "succ_chain l" using trancl_rel_chain[of s s' succ_rel] by auto show "t \ ?T" proof- have "tvars (\ s') = tvars (\ s)" using succ_chain_vars_id[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by simp moreover have "\ (\ s')" using \s \\<^sup>+ s'\ using succ_inv(1)[of _ s'] by (auto dest: tranclD2) moreover have "\v::'a valuation. v \\<^sub>t \ s' = v \\<^sub>t \ s" using succ_chain_tableau_equiv[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by auto ultimately show ?thesis using \t = \ s'\ by simp qed qed moreover have "finite (tvars (\ s))" by (auto simp add: lvars_def rvars_def finite_vars) ultimately show ?thesis using finite_tableaus[of "tvars (\ s)" "\ s"] by (auto simp add: finite_subset) qed ultimately show ?thesis by simp qed abbreviation check_valuation where "check_valuation (v::'a valuation) v0 bl0 bu0 t0 V \ \ t. tvars t = V \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0) \ v \\<^sub>t t \ (\ x \ rvars t. v x = v0 x \ v x = bl0 x \ v x = bu0 x) \ (\ x. x \ V \ v x = v0 x)" lemma finite_valuations: assumes "finite V" shows "finite {v::'a valuation. check_valuation v v0 bl0 bu0 t0 V}" (is "finite ?A") proof- let ?Al = "\ L. {t. lvars t = L \ rvars t = V - L \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t t0)}" let ?Vt = "\ t. {v::'a valuation. v \\<^sub>t t \ (\ x \ rvars t. v x = v0 x \ v x = bl0 x \ v x = bu0 x) \ (\ x. x \ V \ v x = v0 x)}" have "finite {L. L \ V}" using \finite V\ by auto have "\ L. L \ V \ finite (?Al L)" using finite_tableaus_aux by auto have "\ L t. L \ V \ t \ ?Al L \ finite (?Vt t)" proof (safe) fix L t assume "lvars t \ V" "rvars t = V - lvars t" "\ t" "\v. v \\<^sub>t t = v \\<^sub>t t0" then have "rvars t \ lvars t = V" by auto let ?f = "\ v x. if x \ rvars t then v x else 0" have "inj_on ?f (?Vt t)" unfolding inj_on_def proof (safe, rule ext) fix v1 v2 x assume "(\x. if x \ rvars t then v1 x else (0 :: 'a)) = (\x. if x \ rvars t then v2 x else (0 :: 'a))" (is "?f1 = ?f2") have "\x\rvars t. v1 x = v2 x" proof fix x assume "x \ rvars t" then show "v1 x = v2 x" using \?f1 = ?f2\ fun_cong[of ?f1 ?f2 x] by auto qed assume *: "v1 \\<^sub>t t" "v2 \\<^sub>t t" "\x. x \ V \ v1 x = v0 x" "\x. x \ V \ v2 x = v0 x" show "v1 x = v2 x" proof (cases "x \ lvars t") case False then show ?thesis using * \\x\rvars t. v1 x = v2 x\ \rvars t \ lvars t = V\ by auto next case True let ?eq = "eq_for_lvar t x" have "?eq \ set t \ lhs ?eq = x" using eq_for_lvar \x \ lvars t\ by simp then have "v1 x = rhs ?eq \ v1 \" "v2 x = rhs ?eq \ v2 \" using \v1 \\<^sub>t t\ \v2 \\<^sub>t t\ unfolding satisfies_tableau_def satisfies_eq_def by auto moreover have "rhs ?eq \ v1 \ = rhs ?eq \ v2 \" apply (rule valuate_depend) using \\x\rvars t. v1 x = v2 x\ \?eq \ set t \ lhs ?eq = x\ unfolding rvars_def by auto ultimately show ?thesis by simp qed qed let ?R = "{v. \ x. if x \ rvars t then v x = v0 x \ v x = bl0 x \ v x = bu0 x else v x = 0 }" have "?f ` (?Vt t) \ ?R" by auto moreover have "finite ?R" proof- have "finite (rvars t)" using \finite V\ \rvars t \ lvars t = V\ using finite_subset[of "rvars t" V] by auto moreover let ?R' = "{v. \ x. if x \ rvars t then v x \ {v0 x, bl0 x, bu0 x} else v x = 0}" have "?R = ?R'" by auto ultimately show ?thesis using finite_fun_args[of "rvars t" "\ x. {v0 x, bl0 x, bu0 x}" "\ x. 0"] by auto qed ultimately have "finite (?f ` (?Vt t))" by (simp add: finite_subset) then show "finite (?Vt t)" using \inj_on ?f (?Vt t)\ by (auto dest: finite_imageD) qed have "?A = \ (\ (((`) ?Vt) ` (?Al ` {L. L \ V})))" (is "?A = ?A'") by (auto simp add: normalized_tableau_def cong del: image_cong_simp) moreover have "finite ?A'" proof (rule finite_Union) show "finite (\ (((`) ?Vt) ` (?Al ` {L. L \ V})))" using \finite {L. L \ V}\ \\ L. L \ V \ finite (?Al L)\ by auto next fix M assume "M \ \ (((`) ?Vt) ` (?Al ` {L. L \ V}))" then obtain L t where "L \ V" "t \ ?Al L" "M = ?Vt t" by blast then show "finite M" using \\ L t. L \ V \ t \ ?Al L \ finite (?Vt t)\ by blast qed ultimately show ?thesis by simp qed lemma finite_accessible_valuations: shows "finite (\ ` {s'. s \\<^sup>* s'})" proof- have "{s'. s \\<^sup>* s'} = {s'. s \\<^sup>+ s'} \ {s}" by (auto simp add: rtrancl_eq_or_trancl) moreover have "finite (\ ` {s'. s \\<^sup>+ s'})" (is "finite ?A") proof- let ?P = "\ v. check_valuation v (\\ s\) (\ x. the (\\<^sub>l s x)) (\ x. the (\\<^sub>u s x)) (\ s) (tvars (\ s))" let ?P' = "\ v::(var, 'a) mapping. \ t. tvars t = tvars (\ s) \ \ t \ (\ v::'a valuation. v \\<^sub>t t = v \\<^sub>t \ s) \ \v\ \\<^sub>t t \ (\ x \ rvars t. \v\ x = \\ s\ x \ \v\ x = the (\\<^sub>l s x) \ \v\ x = the (\\<^sub>u s x)) \ (\ x. x \ tvars (\ s) \ look v x = look (\ s) x) \ (\ x. x \ tvars (\ s) \ look v x \ None)" have "finite (tvars (\ s))" by (auto simp add: lvars_def rvars_def finite_vars) then have "finite {v. ?P v}" using finite_valuations[of "tvars (\ s)" "\ s" "\\ s\" "\ x. the (\\<^sub>l s x)" "\ x. the (\\<^sub>u s x)"] by auto moreover have "map2fun ` {v. ?P' v} \ {v. ?P v}" by (auto simp add: map2fun_def) ultimately have "finite (map2fun ` {v. ?P' v})" by (auto simp add: finite_subset) moreover have "inj_on map2fun {v. ?P' v}" unfolding inj_on_def proof (safe) fix x y assume "\x\ = \y\" and *: "\x. x \ Simplex.tvars (\ s) \ look y x = look (\ s) x" "\xa. xa \ Simplex.tvars (\ s) \ look x xa = look (\ s) xa" "\x. x \ Simplex.tvars (\ s) \ look y x \ None" "\xa. xa \ Simplex.tvars (\ s) \ look x xa \ None" show "x = y" proof (rule mapping_eqI) fix k have "\x\ k = \y\ k" using \\x\ = \y\\ by simp then show "look x k = look y k" using * by (cases "k \ tvars (\ s)") (auto simp add: map2fun_def split: option.split) qed qed ultimately have "finite {v. ?P' v}" by (rule finite_imageD) moreover have "?A \ {v. ?P' v}" proof (safe) fix s' assume "s \\<^sup>+ s'" then obtain l where *: "l \ []" "1 < length l" "hd l = s" "last l = s'" "succ_chain l" using trancl_rel_chain[of s s' succ_rel] by auto show "?P' (\ s')" proof- have "\ s" "\ (\ s)" "\\ s\ \\<^sub>t \ s" using \s \\<^sup>+ s'\ using tranclD[of s s' succ_rel] by (auto simp add: curr_val_satisfies_no_lhs_def) have "tvars (\ s') = tvars (\ s)" using succ_chain_vars_id[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by simp moreover have "\(\ s')" using \s \\<^sup>+ s'\ using succ_inv(1)[of _ s'] by (auto dest: tranclD2) moreover have "\v::'a valuation. v \\<^sub>t \ s' = v \\<^sub>t \ s" using succ_chain_tableau_equiv[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by auto moreover have "\\ s'\ \\<^sub>t \ s'" using succ_chain_valuation_satisfies[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] \\\ s\ \\<^sub>t \ s\ by simp moreover have "\x\rvars (\ s'). \\ s'\ x = \\ s\ x \ \\ s'\ x = the (\\<^sub>l s x) \ \\ s'\ x = the (\\<^sub>u s x)" using succ_chain_rvar_valuation[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by auto moreover have "\x. x \ tvars (\ s) \ look (\ s') x = look (\ s) x" using succ_chain_no_vars_valuation[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] by auto moreover have "\x. x \ Simplex.tvars (\ s') \ look (\ s') x \ None" using succ_chain_tableau_valuated[of l 0 "length l - 1"] using * hd_conv_nth[of l] last_conv_nth[of l] using \tvars (\ s') = tvars (\ s)\ \\ s\ by (auto simp add: tableau_valuated_def) ultimately show ?thesis by (rule_tac x="\ s'" in exI) auto qed qed ultimately show ?thesis by (auto simp add: finite_subset) qed ultimately show ?thesis by simp qed lemma accessible_bounds: shows "\\<^sub>i ` {s'. s \\<^sup>* s'} = {\\<^sub>i s}" proof - have "s \\<^sup>* s' \ \\<^sub>i s' = \\<^sub>i s" for s' by (induct s s' rule: rtrancl.induct, auto) then show ?thesis by blast qed lemma accessible_unsat_core: shows "\\<^sub>c ` {s'. s \\<^sup>* s'} = {\\<^sub>c s}" proof - have "s \\<^sup>* s' \ \\<^sub>c s' = \\<^sub>c s" for s' by (induct s s' rule: rtrancl.induct, auto) then show ?thesis by blast qed lemma state_eqI: "\\<^sub>i\<^sub>l s = \\<^sub>i\<^sub>l s' \ \\<^sub>i\<^sub>u s = \\<^sub>i\<^sub>u s' \ \ s = \ s' \ \ s = \ s' \ \ s = \ s' \ \\<^sub>c s = \\<^sub>c s' \ s = s'" by (cases s, cases s', auto) lemma finite_accessible_states: shows "finite {s'. s \\<^sup>* s'}" (is "finite ?A") proof- let ?V = "\ ` ?A" let ?T = "\ ` ?A" let ?P = "?V \ ?T \ {\\<^sub>i s} \ {True, False} \ {\\<^sub>c s}" have "finite ?P" using finite_accessible_valuations finite_accessible_tableaus by auto moreover let ?f = "\ s. (\ s, \ s, \\<^sub>i s, \ s, \\<^sub>c s)" have "?f ` ?A \ ?P" using accessible_bounds[of s] accessible_unsat_core[of s] by auto moreover have "inj_on ?f ?A" unfolding inj_on_def by (auto intro: state_eqI) ultimately show ?thesis using finite_imageD [of ?f ?A] using finite_subset by auto qed (* -------------------------------------------------------------------------- *) lemma acyclic_suc_rel: "acyclic succ_rel" proof (rule acyclicI, rule allI) fix s show "(s, s) \ succ_rel\<^sup>+" proof assume "s \\<^sup>+ s" then obtain l where "l \ []" "length l > 1" "hd l = s" "last l = s" "succ_chain l" using trancl_rel_chain[of s s succ_rel] by auto have "l ! 0 = s" using \l \ []\ \hd l = s\ by (simp add: hd_conv_nth) then have "s \ (l ! 1)" using \succ_chain l\ unfolding rel_chain_def using \length l > 1\ by auto then have "\ (\ s)" by simp let ?enter_rvars = "{x. \ sl. swap_lr l sl x}" have "finite ?enter_rvars" proof- let ?all_vars = "\ (set (map (\ t. lvars t \ rvars t) (map \ l)))" have "finite ?all_vars" by (auto simp add: lvars_def rvars_def finite_vars) moreover have "?enter_rvars \ ?all_vars" by force ultimately show ?thesis by (simp add: finite_subset) qed let ?xr = "Max ?enter_rvars" have "?xr \ ?enter_rvars" proof (rule Max_in) show "?enter_rvars \ {}" proof- from \s \ (l ! 1)\ obtain x\<^sub>i x\<^sub>j :: var where "x\<^sub>i \ lvars (\ s)" "x\<^sub>i \ rvars (\ (l ! 1))" by (rule succ_vars) auto then have "x\<^sub>i \ ?enter_rvars" using \hd l = s\ \l \ []\ \length l > 1\ by (auto simp add: hd_conv_nth) then show ?thesis by auto qed next show "finite ?enter_rvars" using \finite ?enter_rvars\ . qed then obtain xr sl where "xr = ?xr" "swap_lr l sl xr" by auto then have "sl + 1 < length l" by simp have "(l ! sl) \ (l ! (sl + 1))" using \sl + 1 < length l\ \succ_chain l\ unfolding rel_chain_def by auto have "length l > 2" proof (rule ccontr) assume "\ ?thesis" with \length l > 1\ have "length l = 2" by auto then have "last l = l ! 1" by (cases l) (auto simp add: last_conv_nth nth_Cons split: nat.split) then have "xr \ lvars (\ s)" "xr \ rvars (\ s)" using \length l = 2\ using \swap_lr l sl xr\ using \hd l = s\ \last l = s\ \l \ []\ by (auto simp add: hd_conv_nth) then show False using \\ (\ s)\ unfolding normalized_tableau_def by auto qed obtain l' where "hd l' = l ! (sl + 1)" "last l' = l ! sl" "length l' = length l - 1" "succ_chain l'" and l'_l: "\ i. i + 1 < length l' \ (\ j. j + 1 < length l \ l' ! i = l ! j \ l' ! (i + 1) = l ! (j + 1))" using \length l > 2\ \sl + 1 < length l\ \hd l = s\ \last l = s\ \succ_chain l\ using reorder_cyclic_list[of l s sl] by blast then have "xr \ rvars (\ (hd l'))" "xr \ lvars (\ (last l'))" "length l' > 1" "l' \ []" using \swap_lr l sl xr\ \length l > 2\ by auto then have "\ sp. swap_rl l' sp xr" using \succ_chain l'\ using succ_chain_swap_rl_exists[of l' 0 "length l' - 1" xr] by (auto simp add: hd_conv_nth last_conv_nth) then have "\ sp. swap_rl l' sp xr \ (\ sp'. sp' < sp \ \ swap_rl l' sp' xr)" by (rule min_element) then obtain sp where "swap_rl l' sp xr" "\ sp'. sp' < sp \ \ swap_rl l' sp' xr" by blast then have "sp + 1 < length l'" by simp have "\\ (l' ! 0)\ xr = \\ (l' ! sp)\ xr" proof- have "always_r l' 0 sp xr" using \xr \ rvars (\ (hd l'))\ \sp + 1 < length l'\ \\ sp'. sp' < sp \ \ swap_rl l' sp' xr\ proof (induct sp) case 0 then have "l' \ []" by auto then show ?case using 0(1) by (auto simp add: hd_conv_nth) next case (Suc sp') show ?case proof (safe) fix k assume "k \ Suc sp'" show "xr \ rvars (\ (l' ! k))" proof (cases "k = sp' + 1") case False then show ?thesis using Suc \k \ Suc sp'\ by auto next case True then have "xr \ rvars (\ (l' ! (k - 1)))" using Suc by auto moreover then have "xr \ lvars (\ (l' ! k))" using True Suc(3) Suc(4) by auto moreover have "(l' ! (k - 1)) \ (l' ! k)" using \succ_chain l'\ using Suc(3) True by (simp add: rel_chain_def) ultimately show ?thesis using succ_vars_id[of "l' ! (k - 1)" "l' ! k"] by auto qed qed qed then show ?thesis using \sp + 1 < length l'\ using \succ_chain l'\ using succ_chain_always_r_valuation_id by simp qed have "(l' ! sp) \ (l' ! (sp+1))" using \sp + 1 < length l'\ \succ_chain l'\ unfolding rel_chain_def by simp then obtain xs xr' :: var where "xs \ lvars (\ (l' ! sp))" "xr \ rvars (\ (l' ! sp))" "swap_lr l' sp xs" apply (rule succ_vars) using \swap_rl l' sp xr\ \sp + 1 < length l'\ by auto then have "xs \ xr" using \(l' ! sp) \ (l' ! (sp+1))\ by (auto simp add: normalized_tableau_def) obtain sp' where "l' ! sp = l ! sp'" "l' ! (sp + 1) = l ! (sp' + 1)" "sp' + 1 < length l" using \sp + 1 < length l'\ l'_l by auto have "xs \ ?enter_rvars" using \swap_lr l' sp xs\ l'_l by force have "xs < xr" proof- have "xs \ ?xr" using \finite ?enter_rvars\ \xs \ ?enter_rvars\ by (rule Max_ge) then show ?thesis using \xr = ?xr\ \xs \ xr\ by simp qed let ?sl = "l ! sl" let ?sp = "l' ! sp" let ?eq = "eq_for_lvar (\ ?sp) xs" let ?bl = "\ ?sl" let ?bp = "\ ?sp" have "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sl" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sp" using \l ! sl \ (l ! (sl + 1))\ using \l' ! sp \ (l' ! (sp+ 1))\ by simp_all have "\\<^sub>i ?sp = \\<^sub>i ?sl" proof- have "\\<^sub>i (l' ! sp) = \\<^sub>i (l' ! (length l' - 1))" using \sp + 1 < length l'\ \succ_chain l'\ using succ_chain_bounds_id by auto then have "\\<^sub>i (last l') = \\<^sub>i (l' ! sp)" using \l' \ []\ by (simp add: last_conv_nth) then show ?thesis using \last l' = l ! sl\ by simp qed have diff_satified: "\?bl\ xs - \?bp\ xs = ((rhs ?eq) \ \?bl\ \) - ((rhs ?eq) \ \?bp\ \)" proof- have "\?bp\ \\<^sub>e ?eq" using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sp\ using eq_for_lvar[of xs "\ ?sp"] using \xs \ lvars (\ (l' ! sp))\ unfolding curr_val_satisfies_no_lhs_def satisfies_tableau_def by auto moreover have "\?bl\ \\<^sub>e ?eq" proof- have "\\ (l ! sl)\ \\<^sub>t \ (l' ! sp)" using \l' ! sp = l ! sp'\ \sp' + 1 < length l\ \sl + 1 < length l\ using \succ_chain l\ using succ_chain_tableau_equiv[of l sl sp'] using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sl\ unfolding curr_val_satisfies_no_lhs_def by simp then show ?thesis unfolding satisfies_tableau_def using eq_for_lvar using \xs \ lvars (\ (l' ! sp))\ by simp qed moreover have "lhs ?eq = xs" using \xs \ lvars (\ (l' ! sp))\ using eq_for_lvar by simp ultimately show ?thesis unfolding satisfies_eq_def by auto qed have "\ in_bounds xr \?bl\ (\ ?sl)" using \l ! sl \ (l ! (sl + 1))\ \swap_lr l sl xr\ using succ_min_lvar_not_in_bounds(1)[of ?sl "l ! (sl + 1)" xr] by simp have "\ x. x < xr \ in_bounds x \?bl\ (\ ?sl)" proof (safe) fix x assume "x < xr" show "in_bounds x \?bl\ (\ ?sl)" proof (cases "x \ lvars (\ ?sl)") case True then show ?thesis using succ_min_lvar_not_in_bounds(2)[of ?sl "l ! (sl + 1)" xr] using \l ! sl \ (l ! (sl + 1))\ \swap_lr l sl xr\ \x < xr\ by simp next case False then show ?thesis using \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?sl\ unfolding curr_val_satisfies_no_lhs_def by (simp add: satisfies_bounds_set.simps) qed qed then have "in_bounds xs \?bl\ (\ ?sl)" using \xs < xr\ by simp have "\ in_bounds xs \?bp\ (\ ?sp)" using \l' ! sp \ (l' ! (sp + 1))\ \swap_lr l' sp xs\ using succ_min_lvar_not_in_bounds(1)[of ?sp "l' ! (sp + 1)" xs] by simp have "\ x \ rvars_eq ?eq. x > xr \ \?bp\ x = \?bl\ x" proof (safe) fix x assume "x \ rvars_eq ?eq" "x > xr" then have "always_r l' 0 (length l' - 1) x" proof (safe) fix k assume "x \ rvars_eq ?eq" "x > xr" "0 \ k" "k \ length l' - 1" obtain k' where "l ! k' = l' ! k" "k' < length l" using l'_l \k \ length l' - 1\ \length l' > 1\ apply (cases "k > 0") apply (erule_tac x="k - 1" in allE) apply (drule mp) by auto let ?eq' = "eq_for_lvar (\ (l ! sp')) xs" have "\ x \ rvars_eq ?eq'. x > xr \ always_r l 0 (length l - 1) x" proof (safe) fix x k assume "x \ rvars_eq ?eq'" "xr < x" "0 \ k" "k \ length l - 1" then have "x \ rvars (\ (l ! sp'))" using eq_for_lvar[of xs "\ (l ! sp')"] using \swap_lr l' sp xs\ \l' ! sp = l ! sp'\ by (auto simp add: rvars_def) have *: "\ i. i < sp' \ x \ rvars (\ (l ! i))" proof (safe, rule ccontr) fix i assume "i < sp'" "x \ rvars (\ (l ! i))" then have "x \ lvars (\ (l ! i))" using \x \ rvars (\ (l ! sp'))\ using \sp' + 1 < length l\ using \succ_chain l\ using succ_chain_vars_id[of l i sp'] by auto obtain i' where "swap_lr l i' x" using \x \ lvars (\ (l ! i))\ using \x \ rvars (\ (l ! sp'))\ using \i < sp'\ \sp' + 1 < length l\ using \succ_chain l\ using succ_chain_swap_lr_exists[of l i sp' x] by auto then have "x \ ?enter_rvars" by auto then have "x \ ?xr" using \finite ?enter_rvars\ using Max_ge[of ?enter_rvars x] by simp then show False using \x > xr\ using \xr = ?xr\ by simp qed then have "x \ rvars (\ (last l))" using \hd l = s\ \last l = s\ \l \ []\ using \x \ rvars (\ (l ! sp'))\ by (auto simp add: hd_conv_nth) show "x \ rvars (\ (l ! k))" proof (cases "k = length l - 1") case True then show ?thesis using \x \ rvars (\ (last l))\ using \l \ []\ by (simp add: last_conv_nth) next case False then have "k < length l - 1" using \k \ length l - 1\ by simp then have "k < length l" using \length l > 1\ by auto show ?thesis proof (rule ccontr) assume "\ ?thesis" then have "x \ lvars (\ (l ! k))" using \x \ rvars (\ (l ! sp'))\ using \sp' + 1 < length l\ \k < length l\ using succ_chain_vars_id[of l k sp'] using \succ_chain l\ \l \ []\ by auto obtain i' where "swap_lr l i' x" using \succ_chain l\ using \x \ lvars (\ (l ! k))\ using \x \ rvars (\ (last l))\ using \k < length l - 1\ \l \ []\ using succ_chain_swap_lr_exists[of l k "length l - 1" x] by (auto simp add: last_conv_nth) then have "x \ ?enter_rvars" by auto then have "x \ ?xr" using \finite ?enter_rvars\ using Max_ge[of ?enter_rvars x] by simp then show False using \x > xr\ using \xr = ?xr\ by simp qed qed qed then have "x \ rvars (\ (l ! k'))" using \x \ rvars_eq ?eq\ \x > xr\ \k' < length l\ using \l' ! sp = l ! sp'\ by simp then show "x \ rvars (\ (l' ! k))" using \l ! k' = l' ! k\ by simp qed then have "\?bp\ x = \\ (l' ! (length l' - 1))\ x" using \succ_chain l'\ \sp + 1 < length l'\ by (auto intro!: succ_chain_always_r_valuation_id[rule_format]) then have "\?bp\ x = \\ (last l')\ x" using \l' \ []\ by (simp add: last_conv_nth) then show "\?bp\ x = \?bl\ x" using \last l' = l ! sl\ by simp qed have "\?bp\ xr = \\ (l ! (sl + 1))\ xr" using \\\ (l' ! 0)\ xr = \\ (l' ! sp)\ xr\ using \hd l' = l ! (sl + 1)\ \l' \ []\ by (simp add: hd_conv_nth) { fix dir1 dir2 :: "('i,'a) Direction" assume dir1: "dir1 = (if \?bl\ xr <\<^sub>l\<^sub>b \\<^sub>l ?sl xr then Positive else Negative)" then have "\\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)" using \\ in_bounds xr \?bl\ (\ ?sl)\ using neg_bounds_compare(7) neg_bounds_compare(3) by (auto simp add: bound_compare''_defs) then have "\ \\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)" using bounds_compare_contradictory(7) bounds_compare_contradictory(3) neg_bounds_compare(6) dir1 unfolding bound_compare''_defs by auto force have "LB dir1 ?sl xr \ None" using \\\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)\ by (cases "LB dir1 ?sl xr") (auto simp add: bound_compare_defs) assume dir2: "dir2 = (if \?bp\ xs <\<^sub>l\<^sub>b \\<^sub>l ?sp xs then Positive else Negative)" then have "\\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)" using \\ in_bounds xs \?bp\ (\ ?sp)\ using neg_bounds_compare(2) neg_bounds_compare(6) by (auto simp add: bound_compare''_defs) then have "\ \\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)" using bounds_compare_contradictory(3) bounds_compare_contradictory(7) neg_bounds_compare(6) dir2 unfolding bound_compare''_defs by auto force then have "\ x \ rvars_eq ?eq. x < xr \ \ reasable_var dir2 x ?eq ?sp" using succ_min_rvar[of ?sp "l' ! (sp + 1)" xs xr ?eq] using \l' ! sp \ (l' ! (sp + 1))\ using \swap_lr l' sp xs\ \swap_rl l' sp xr\ dir2 unfolding bound_compare''_defs by auto have "LB dir2 ?sp xs \ None" using \\\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)\ by (cases "LB dir2 ?sp xs") (auto simp add: bound_compare_defs) have *: "\ x \ rvars_eq ?eq. x < xr \ ((coeff (rhs ?eq) x > 0 \ \\<^sub>u\<^sub>b (lt dir2) (\?bp\ x) (UB dir2 ?sp x)) \ (coeff (rhs ?eq) x < 0 \ \\<^sub>l\<^sub>b (lt dir2) (\?bp\ x) (LB dir2 ?sp x)))" proof (safe) fix x assume "x \ rvars_eq ?eq" "x < xr" "coeff (rhs ?eq) x > 0" then have "\ \\<^sub>u\<^sub>b (lt dir2) (\?bp\ x) (UB dir2 ?sp x)" using \\ x \ rvars_eq ?eq. x < xr \ \ reasable_var dir2 x ?eq ?sp\ by simp then show "\\<^sub>u\<^sub>b (lt dir2) (\?bp\ x) (UB dir2 ?sp x)" using dir2 neg_bounds_compare(4) neg_bounds_compare(8) unfolding bound_compare''_defs by force next fix x assume "x \ rvars_eq ?eq" "x < xr" "coeff (rhs ?eq) x < 0" then have "\ \\<^sub>l\<^sub>b (lt dir2) (\?bp\ x) (LB dir2 ?sp x)" using \\ x \ rvars_eq ?eq. x < xr \ \ reasable_var dir2 x ?eq ?sp\ by simp then show "\\<^sub>l\<^sub>b (lt dir2) (\?bp\ x) (LB dir2 ?sp x)" using dir2 neg_bounds_compare(4) neg_bounds_compare(8) dir2 unfolding bound_compare''_defs by force qed have "(lt dir2) (\?bp\ xs) (\?bl\ xs)" using \\\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)\ using \\\<^sub>i ?sp = \\<^sub>i ?sl\ dir2 using \in_bounds xs \?bl\ (\ ?sl)\ by (auto simp add: bound_compare''_defs simp: indexl_def indexu_def boundsl_def boundsu_def) then have "(lt dir2) 0 (\?bl\ xs - \?bp\ xs)" using dir2 by (auto simp add: minus_gt[THEN sym] minus_lt[THEN sym]) moreover have "le (lt dir2) ((rhs ?eq) \ \?bl\ \ - (rhs ?eq) \ \?bp\ \) 0" proof- have "\ x \ rvars_eq ?eq. (0 < coeff (rhs ?eq) x \ le (lt dir2) 0 (\?bp\ x - \?bl\ x)) \ (coeff (rhs ?eq) x < 0 \ le (lt dir2) (\?bp\ x - \?bl\ x) 0)" proof fix x assume "x \ rvars_eq ?eq" show "(0 < coeff (rhs ?eq) x \ le (lt dir2) 0 (\?bp\ x - \?bl\ x)) \ (coeff (rhs ?eq) x < 0 \ le (lt dir2) (\?bp\ x - \?bl\ x) 0)" proof (cases "x < xr") case True then have "in_bounds x \?bl\ (\ ?sl)" using \\ x. x < xr \ in_bounds x \?bl\ (\ ?sl)\ by simp show ?thesis proof (safe) assume "coeff (rhs ?eq) x > 0" "0 \ \?bp\ x - \?bl\ x" then have "\\<^sub>u\<^sub>b (lt dir2) (\\ (l' ! sp)\ x) (UB dir2 (l' ! sp) x)" using * \x < xr\ \x \ rvars_eq ?eq\ by simp then have "le (lt dir2) (\?bl\ x) (\?bp\ x)" using \in_bounds x \?bl\ (\ ?sl)\ \\\<^sub>i ?sp = \\<^sub>i ?sl\ dir2 apply (auto simp add: bound_compare''_defs) using bounds_lg(3)[of "\?bp\ x" "\\<^sub>u (l ! sl) x" "\?bl\ x"] using bounds_lg(6)[of "\?bp\ x" "\\<^sub>l (l ! sl) x" "\?bl\ x"] unfolding bound_compare''_defs by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) then show "lt dir2 0 (\?bp\ x - \?bl\ x)" using \0 \ \?bp\ x - \?bl\ x\ using minus_gt[of "\?bl\ x" "\?bp\ x"] minus_lt[of "\?bp\ x" "\?bl\ x"] dir2 by auto next assume "coeff (rhs ?eq) x < 0" "\?bp\ x - \?bl\ x \ 0" then have "\\<^sub>l\<^sub>b (lt dir2) (\\ (l' ! sp)\ x) (LB dir2 (l' ! sp) x)" using * \x < xr\ \x \ rvars_eq ?eq\ by simp then have "le (lt dir2) (\?bp\ x) (\?bl\ x)" using \in_bounds x \?bl\ (\ ?sl)\ \\\<^sub>i ?sp = \\<^sub>i ?sl\ dir2 apply (auto simp add: bound_compare''_defs) using bounds_lg(3)[of "\?bp\ x" "\\<^sub>u (l ! sl) x" "\?bl\ x"] using bounds_lg(6)[of "\?bp\ x" "\\<^sub>l (l ! sl) x" "\?bl\ x"] unfolding bound_compare''_defs by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) then show "lt dir2 (\?bp\ x - \?bl\ x) 0" using \\?bp\ x - \?bl\ x \ 0\ using minus_gt[of "\?bl\ x" "\?bp\ x"] minus_lt[of "\?bp\ x" "\?bl\ x"] dir2 by auto qed next case False show ?thesis proof (cases "x = xr") case True have "\\ (l ! (sl + 1))\ xr = the (LB dir1 ?sl xr)" using \l ! sl \ (l ! (sl + 1))\ using \swap_lr l sl xr\ using succ_set_on_bound(1)[of "l ! sl" "l ! (sl + 1)" xr] using \\ \\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)\ dir1 unfolding bound_compare''_defs by auto then have "\?bp\ xr = the (LB dir1 ?sl xr)" using \\?bp\ xr = \\ (l ! (sl + 1))\ xr\ by simp then have "lt dir1 (\?bl\ xr) (\?bp\ xr)" using \LB dir1 ?sl xr \ None\ using \\\<^sub>l\<^sub>b (lt dir1) (\?bl\ xr) (LB dir1 ?sl xr)\ dir1 by (auto simp add: bound_compare_defs) moreover have "reasable_var dir2 xr ?eq ?sp" using \\ \\<^sub>l\<^sub>b (lt dir2) (\?bp\ xs) (LB dir2 ?sp xs)\ using \l' ! sp \ (l' ! (sp + 1))\ using \swap_lr l' sp xs\ \swap_rl l' sp xr\ using succ_min_rvar[of "l' ! sp" "l' ! (sp + 1)"xs xr ?eq] dir2 unfolding bound_compare''_defs by auto then have "if dir1 = dir2 then coeff (rhs ?eq) xr > 0 else coeff (rhs ?eq) xr < 0" using \\?bp\ xr = the (LB dir1 ?sl xr)\ using \\\<^sub>i ?sp = \\<^sub>i ?sl\[THEN sym] dir1 using \LB dir1 ?sl xr \ None\ dir1 dir2 by (auto split: if_splits simp add: bound_compare_defs indexl_def indexu_def boundsl_def boundsu_def) moreover have "dir1 = Positive \ dir1 = Negative" "dir2 = Positive \ dir2 = Negative" using dir1 dir2 by auto ultimately show ?thesis using \x = xr\ using minus_lt[of "\?bp\ xr" "\?bl\ xr"] minus_gt[of "\?bl\ xr" "\?bp\ xr"] by (auto split: if_splits) next case False then have "x > xr" using \\ x < xr\ by simp then have "\?bp\ x = \?bl\ x" using \\ x \ rvars_eq ?eq. x > xr \ \?bp\ x = \?bl\ x\ using \x \ rvars_eq ?eq\ by simp then show ?thesis by simp qed qed qed then have "le (lt dir2) 0 (rhs ?eq \ \ x. \?bp\ x - \?bl\ x \)" using dir2 apply auto using valuate_nonneg[of "rhs ?eq" "\ x. \?bp\ x - \?bl\ x"] apply force using valuate_nonpos[of "rhs ?eq" "\ x. \?bp\ x - \?bl\ x"] apply force done then have "le (lt dir2) 0 ((rhs ?eq) \ \?bp\ \ - (rhs ?eq) \ \?bl\ \)" by (subst valuate_diff)+ simp then have "le (lt dir2) ((rhs ?eq) \ \?bl\ \) ((rhs ?eq) \ \?bp\ \)" using minus_lt[of "(rhs ?eq) \ \?bp\ \" "(rhs ?eq) \ \?bl\ \"] dir2 by auto then show ?thesis using dir2 using minus_lt[of "(rhs ?eq) \ \?bl\ \" "(rhs ?eq) \ \?bp\ \"] using minus_gt[of "(rhs ?eq) \ \?bp\ \" "(rhs ?eq) \ \?bl\ \"] by auto qed ultimately have False using diff_satified dir2 by (auto split: if_splits) } then show False by auto qed qed (* -------------------------------------------------------------------------- *) lemma check_unsat_terminates: assumes "\ s" shows "check_dom s" by (rule check_dom.intros) (auto simp add: assms) lemma check_sat_terminates'_aux: assumes dir: "dir = (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative)" and *: "\ s'. \s \ s'; \ s'; \ (\ s'); \ s'; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ check_dom s'" and "\ s" "\ (\ s)" "\ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ \ s" "min_lvar_not_in_bounds s = Some x\<^sub>i" "\\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i)" shows "check_dom (case min_rvar_incdec dir s x\<^sub>i of Inl I \ set_unsat I s | Inr x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j (the (LB dir s x\<^sub>i)) s)" proof (cases "min_rvar_incdec dir s x\<^sub>i") case Inl then show ?thesis using check_unsat_terminates by simp next case (Inr x\<^sub>j) then have xj: "x\<^sub>j \ rvars_of_lvar (\ s) x\<^sub>i" using min_rvar_incdec_eq_Some_rvars[of _ s "eq_for_lvar (\ s) x\<^sub>i" x\<^sub>j] using dir by simp let ?s' = "pivot_and_update x\<^sub>i x\<^sub>j (the (LB dir s x\<^sub>i)) s" have "check_dom ?s'" proof (rule * ) show **: "\ ?s'" "\ (\ ?s')" "\ ?s'" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s'" using \min_lvar_not_in_bounds s = Some x\<^sub>i\ Inr using \\ s\ \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ dir using pivotandupdate_check_precond by auto have xi: "x\<^sub>i \ lvars (\ s)" using assms(8) min_lvar_not_in_bounds_lvars by blast show "s \ ?s'" unfolding gt_state_def using \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ \\ s\ using \min_lvar_not_in_bounds s = Some x\<^sub>i\ \\\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i)\ Inr dir by (intro conjI pivotandupdate_bounds_id pivotandupdate_unsat_core_id, auto intro!: xj xi) qed then show ?thesis using Inr by simp qed lemma check_sat_terminates': assumes "\ s" "\ (\ s)" "\ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "s\<^sub>0 \\<^sup>* s" shows "check_dom s" using assms proof (induct s rule: wf_induct[of "{(y, x). s\<^sub>0 \\<^sup>* x \ x \ y}"]) show "wf {(y, x). s\<^sub>0 \\<^sup>* x \ x \ y}" proof (rule finite_acyclic_wf) let ?A = "{(s', s). s\<^sub>0 \\<^sup>* s \ s \ s'}" let ?B = "{s. s\<^sub>0 \\<^sup>* s}" have "?A \ ?B \ ?B" proof fix p assume "p \ ?A" then have "fst p \ ?B" "snd p \ ?B" using rtrancl_into_trancl1[of s\<^sub>0 "snd p" succ_rel "fst p"] by auto then show "p \ ?B \ ?B" using mem_Sigma_iff[of "fst p" "snd p"] by auto qed then show "finite ?A" using finite_accessible_states[of s\<^sub>0] using finite_subset[of ?A "?B \ ?B"] by simp show "acyclic ?A" proof- have "?A \ succ_rel\" by auto then show ?thesis using acyclic_converse acyclic_subset using acyclic_suc_rel by auto qed qed next fix s assume "\ s'. (s', s) \ {(y, x). s\<^sub>0 \\<^sup>* x \ x \ y} \ \ s' \ \ (\ s') \ \ s' \ \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ s\<^sub>0 \\<^sup>* s' \ check_dom s'" "\ s" "\ (\ s)" "\ s" " \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "s\<^sub>0 \\<^sup>* s" then have *: "\ s'. \s \ s'; \ s'; \ (\ s'); \ s'; \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s' \ \ check_dom s'" using rtrancl_into_trancl1[of s\<^sub>0 s succ_rel] using trancl_into_rtrancl[of s\<^sub>0 _ succ_rel] by auto show "check_dom s" proof (rule check_dom.intros, simp_all add: check'_def, unfold Positive_def[symmetric], unfold Negative_def[symmetric]) fix x\<^sub>i assume "\ \ s" "Some x\<^sub>i = min_lvar_not_in_bounds s" "\\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i" have "\\<^sub>l s x\<^sub>i = LB Positive s x\<^sub>i" by simp show "check_dom (case min_rvar_incdec Positive s x\<^sub>i of Inl I \ set_unsat I s | Inr x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j (the (\\<^sub>l s x\<^sub>i)) s)" apply (subst \\\<^sub>l s x\<^sub>i = LB Positive s x\<^sub>i\) apply (rule check_sat_terminates'_aux[of Positive s x\<^sub>i]) using \\ s\ \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ * using \\ \ s\ \Some x\<^sub>i = min_lvar_not_in_bounds s\ \\\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i\ by (simp_all add: bound_compare''_defs) next fix x\<^sub>i assume "\ \ s" "Some x\<^sub>i = min_lvar_not_in_bounds s" "\ \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i" then have "\\ s\ x\<^sub>i >\<^sub>u\<^sub>b \\<^sub>u s x\<^sub>i" using min_lvar_not_in_bounds_Some[of s x\<^sub>i] using neg_bounds_compare(7) neg_bounds_compare(2) by auto have "\\<^sub>u s x\<^sub>i = LB Negative s x\<^sub>i" by simp show "check_dom (case min_rvar_incdec Negative s x\<^sub>i of Inl I \ set_unsat I s | Inr x\<^sub>j \ pivot_and_update x\<^sub>i x\<^sub>j (the (\\<^sub>u s x\<^sub>i)) s)" apply (subst \\\<^sub>u s x\<^sub>i = LB Negative s x\<^sub>i\) apply (rule check_sat_terminates'_aux) using \\ s\ \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ * using \\ \ s\ \Some x\<^sub>i = min_lvar_not_in_bounds s\ \\\ s\ x\<^sub>i >\<^sub>u\<^sub>b \\<^sub>u s x\<^sub>i\ \\ \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i\ by (simp_all add: bound_compare''_defs) qed qed lemma check_sat_terminates: assumes "\ s" "\ (\ s)" "\ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" shows "check_dom s" using assms using check_sat_terminates'[of s s] by simp lemma check_cases: assumes "\ s \ P s" assumes "\\ \ s; min_lvar_not_in_bounds s = None\ \ P s" assumes "\ x\<^sub>i dir I. \dir = Positive \ dir = Negative; \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inl I\ \ P (set_unsat I s)" assumes "\ x\<^sub>i x\<^sub>j l\<^sub>i dir. \dir = (if \\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i then Positive else Negative); \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inr x\<^sub>j; l\<^sub>i = the (LB dir s x\<^sub>i); check' dir x\<^sub>i s = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s\ \ P (check (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s))" assumes "\ (\ s)" "\ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" shows "P (check s)" proof (cases "\ s") case True then show ?thesis using assms(1) using check.simps[of s] by simp next case False show ?thesis proof (cases "min_lvar_not_in_bounds s") case None then show ?thesis using \\ \ s\ using assms(2) \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ using check.simps[of s] by simp next case (Some x\<^sub>i) let ?dir = "if (\\ s\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s x\<^sub>i) then (Positive :: ('i,'a)Direction) else Negative" let ?s' = "check' ?dir x\<^sub>i s" have "\\<^sub>l\<^sub>b (lt ?dir) (\\ s\ x\<^sub>i) (LB ?dir s x\<^sub>i)" using \min_lvar_not_in_bounds s = Some x\<^sub>i\ using min_lvar_not_in_bounds_Some[of s x\<^sub>i] using not_in_bounds[of x\<^sub>i "\\ s\" "\\<^sub>l s" "\\<^sub>u s"] by (auto split: if_splits simp add: bound_compare''_defs) have "P (check ?s')" apply (rule check'_cases) using \\ \ s\ \min_lvar_not_in_bounds s = Some x\<^sub>i\ \\\<^sub>l\<^sub>b (lt ?dir) (\\ s\ x\<^sub>i) (LB ?dir s x\<^sub>i)\ using assms(3)[of ?dir x\<^sub>i] using assms(4)[of ?dir x\<^sub>i] using check.simps[of "set_unsat (_ :: 'i list) s"] using \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by (auto simp add: bounds_consistent_def curr_val_satisfies_no_lhs_def) then show ?thesis using \\ \ s\ \min_lvar_not_in_bounds s = Some x\<^sub>i\ using check.simps[of s] using \\ (\ s)\ \\ s\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s\ by auto qed qed lemma check_induct: fixes s :: "('i,'a) state" assumes *: "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" assumes **: "\ s. \ s \ P s s" "\ s. \\ \ s; min_lvar_not_in_bounds s = None\ \ P s s" "\ s x\<^sub>i dir I. \dir = Positive \ dir = Negative; \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inl I\ \ P s (set_unsat I s)" assumes step': "\ s x\<^sub>i x\<^sub>j l\<^sub>i. \\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i)\ \ P s (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)" assumes trans': "\ si sj sk. \P si sj; P sj sk\ \ P si sk" shows "P s (check s)" proof- have "check_dom s" using * by (simp add: check_sat_terminates) then show ?thesis using * proof (induct s rule: check_dom.induct) case (step s') show ?case proof (rule check_cases) fix x\<^sub>i x\<^sub>j l\<^sub>i dir let ?dir = "if \\ s'\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s' x\<^sub>i then Positive else Negative" let ?s' = "check' dir x\<^sub>i s'" assume "\ \ s'" "min_lvar_not_in_bounds s' = Some x\<^sub>i" "min_rvar_incdec dir s' x\<^sub>i = Inr x\<^sub>j" "l\<^sub>i = the (LB dir s' x\<^sub>i)" "?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'" "dir = ?dir" moreover then have "\ ?s'" "\ (\ ?s')" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s'" "\ ?s'" using \\ s'\ \\ (\ s')\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'\ \\ s'\ using \?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'\ using pivotandupdate_check_precond[of dir s' x\<^sub>i x\<^sub>j l\<^sub>i] by auto ultimately have "P (check' dir x\<^sub>i s') (check (check' dir x\<^sub>i s'))" using step(2)[of x\<^sub>i] step(4)[of x\<^sub>i] \\ (\ s')\ \\ s'\ by auto then show "P s' (check (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'))" using \?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'\ \\ (\ s')\ \\ s'\ using \min_lvar_not_in_bounds s' = Some x\<^sub>i\ \min_rvar_incdec dir s' x\<^sub>i = Inr x\<^sub>j\ using step'[of s' x\<^sub>i x\<^sub>j l\<^sub>i] using trans'[of s' ?s' "check ?s'"] by (auto simp add: min_lvar_not_in_bounds_lvars min_rvar_incdec_eq_Some_rvars) qed (simp_all add: \\ s'\ \\ (\ s')\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'\ \\ s'\ **) qed qed lemma check_induct': fixes s :: "('i,'a) state" assumes "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" assumes "\ s x\<^sub>i dir I. \dir = Positive \ dir = Negative; \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inl I; P s\ \ P (set_unsat I s)" assumes "\ s x\<^sub>i x\<^sub>j l\<^sub>i. \\ (\ s); \ s; x\<^sub>i \ lvars (\ s); x\<^sub>j \ rvars_eq (eq_for_lvar (\ s) x\<^sub>i); P s\ \ P (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s)" assumes "P s" shows "P (check s)" proof- have "P s \ P (check s)" by (rule check_induct) (simp_all add: assms) then show ?thesis using \P s\ by simp qed lemma check_induct'': fixes s :: "('i,'a) state" assumes *: "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" assumes **: "\ s \ P s" "\ s. \\ s; \ (\ s); \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ \ s; min_lvar_not_in_bounds s = None\ \ P s" "\ s x\<^sub>i dir I. \dir = Positive \ dir = Negative; \ s; \ (\ s); \\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s; \ s; \ \ s; min_lvar_not_in_bounds s = Some x\<^sub>i; \\<^sub>l\<^sub>b (lt dir) (\\ s\ x\<^sub>i) (LB dir s x\<^sub>i); min_rvar_incdec dir s x\<^sub>i = Inl I\ \ P (set_unsat I s)" shows "P (check s)" proof (cases "\ s") case True then show ?thesis using \\ s \ P s\ by (simp add: check.simps) next case False have "check_dom s" using * by (simp add: check_sat_terminates) then show ?thesis using * False proof (induct s rule: check_dom.induct) case (step s') show ?case proof (rule check_cases) fix x\<^sub>i x\<^sub>j l\<^sub>i dir let ?dir = "if \\ s'\ x\<^sub>i <\<^sub>l\<^sub>b \\<^sub>l s' x\<^sub>i then Positive else Negative" let ?s' = "check' dir x\<^sub>i s'" assume "\ \ s'" "min_lvar_not_in_bounds s' = Some x\<^sub>i" "min_rvar_incdec dir s' x\<^sub>i = Inr x\<^sub>j" "l\<^sub>i = the (LB dir s' x\<^sub>i)" "?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'" "dir = ?dir" moreover then have "\ ?s'" "\ (\ ?s')" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s ?s'" "\ ?s'" "\ \ ?s'" using \\ s'\ \\ (\ s')\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'\ \\ s'\ using \?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'\ using pivotandupdate_check_precond[of dir s' x\<^sub>i x\<^sub>j l\<^sub>i] using pivotandupdate_unsat_id[of s' x\<^sub>i x\<^sub>j l\<^sub>i] by (auto simp add: min_lvar_not_in_bounds_lvars min_rvar_incdec_eq_Some_rvars) ultimately have "P (check (check' dir x\<^sub>i s'))" using step(2)[of x\<^sub>i] step(4)[of x\<^sub>i] \\ (\ s')\ \\ s'\ by auto then show "P (check (pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'))" using \?s' = pivot_and_update x\<^sub>i x\<^sub>j l\<^sub>i s'\ by simp qed (simp_all add: \\ s'\ \\ (\ s')\ \\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'\ \\ s'\ \\ \ s'\ ** ) qed qed end lemma poly_eval_update: "(p \ v ( x := c :: 'a :: lrv) \) = (p \ v \) + coeff p x *R (c - v x)" proof (transfer, simp, goal_cases) case (1 p v x c) hence fin: "finite {v. p v \ 0}" by simp have "(\y\{v. p v \ 0}. p y *R (if y = x then c else v y)) = (\y\{v. p v \ 0} \ {x}. p y *R (if y = x then c else v y)) + (\y\{v. p v \ 0} \ (UNIV - {x}). p y *R (if y = x then c else v y))" (is "?l = ?a + ?b") by (subst sum.union_disjoint[symmetric], auto intro: sum.cong fin) also have "?a = (if p x = 0 then 0 else p x *R c)" by auto also have "\ = p x *R c" by auto also have "?b = (\y\{v. p v \ 0} \ (UNIV - {x}). p y *R v y)" (is "_ = ?c") by (rule sum.cong, auto) finally have l: "?l = p x *R c + ?c" . define r where "r = (\y\{v. p v \ 0}. p y *R v y) + p x *R (c - v x)" have "r = (\y\{v. p v \ 0}. p y *R v y) + p x *R (c - v x)" by (simp add: r_def) also have "(\y\{v. p v \ 0}. p y *R v y) = (\y\{v. p v \ 0} \ {x}. p y *R v y) + ?c" (is "_ = ?d + _") by (subst sum.union_disjoint[symmetric], auto intro: sum.cong fin) also have "?d = (if p x = 0 then 0 else p x *R v x)" by auto also have "\ = p x *R v x" by auto finally have "(p x *R (c - v x) + p x *R v x) + ?c = r" by simp also have "(p x *R (c - v x) + p x *R v x) = p x *R c" unfolding scaleRat_right_distrib[symmetric] by simp finally have r: "p x *R c + ?c = r" . show ?case unfolding l r r_def .. qed context PivotUpdateMinVars begin context fixes rhs_eq_val :: "(var, 'a::lrv) mapping \ var \ 'a \ eq \ 'a" assumes "RhsEqVal rhs_eq_val" begin lemma check_minimal_unsat_state_core: assumes *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" shows "\ (check s) \ minimal_unsat_state_core (check s)" (is "?P (check s)") proof (rule check_induct'') fix s' :: "('i,'a) state" and x\<^sub>i dir I assume nolhs: "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s'" and min_rvar: "min_rvar_incdec dir s' x\<^sub>i = Inl I" and sat: "\ \ s'" and min_lvar: "min_lvar_not_in_bounds s' = Some x\<^sub>i" and dir: "dir = Positive \ dir = Negative" and lt: "\\<^sub>l\<^sub>b (lt dir) (\\ s'\ x\<^sub>i) (LB dir s' x\<^sub>i)" and norm: "\ (\ s')" and valuated: "\ s'" let ?eq = "eq_for_lvar (\ s') x\<^sub>i" have unsat_core: "set (the (\\<^sub>c (set_unsat I s'))) = set I" by auto obtain l\<^sub>i where LB_Some: "LB dir s' x\<^sub>i = Some l\<^sub>i" and lt: "lt dir (\\ s'\ x\<^sub>i) l\<^sub>i" using lt by (cases "LB dir s' x\<^sub>i") (auto simp add: bound_compare_defs) from LB_Some dir obtain i where LBI: "look (LBI dir s') x\<^sub>i = Some (i,l\<^sub>i)" and LI: "LI dir s' x\<^sub>i = i" by (auto simp: simp: indexl_def indexu_def boundsl_def boundsu_def) from min_rvar_incdec_eq_None[OF min_rvar] dir have Is': "LI dir s' (lhs (eq_for_lvar (\ s') x\<^sub>i)) \ indices_state s' \ set I \ indices_state s'" and reasable: "\ x. x \ rvars_eq ?eq \ \ reasable_var dir x ?eq s'" and setI: "set I = {LI dir s' (lhs ?eq)} \ {LI dir s' x |x. x \ rvars_eq ?eq \ coeff (rhs ?eq) x < 0} \ {UI dir s' x |x. x \ rvars_eq ?eq \ 0 < coeff (rhs ?eq) x}" (is "_ = ?L \ ?R1 \ ?R2") by auto note setI also have id: "lhs ?eq = x\<^sub>i" by (simp add: EqForLVar.eq_for_lvar EqForLVar_axioms min_lvar min_lvar_not_in_bounds_lvars) finally have iI: "i \ set I" unfolding LI by auto note setI = setI[unfolded id] have "LI dir s' x\<^sub>i \ indices_state s'" using LBI LI unfolding indices_state_def using dir by force from Is'[unfolded id, OF this] have Is': "set I \ indices_state s'" . have "x\<^sub>i \ lvars (\ s')" using min_lvar by (simp add: min_lvar_not_in_bounds_lvars) then have **: "?eq \ set (\ s')" "lhs ?eq = x\<^sub>i" by (auto simp add: eq_for_lvar) have Is': "set I \ indices_state (set_unsat I s')" using Is' * unfolding indices_state_def by auto have "\\ s'\ \\<^sub>t \ s'" and b: "\\ s'\ \\<^sub>b \ s' \ - lvars (\ s')" using nolhs[unfolded curr_val_satisfies_no_lhs_def] by auto from norm[unfolded normalized_tableau_def] have lvars_rvars: "lvars (\ s') \ rvars (\ s') = {}" by auto hence in_bnds: "x \ rvars (\ s') \ in_bounds x \\ s'\ (\ s')" for x by (intro b[unfolded satisfies_bounds_set.simps, rule_format, of x], auto) { assume dist: "distinct_indices_state (set_unsat I s')" hence "distinct_indices_state s'" unfolding distinct_indices_state_def by auto note dist = this[unfolded distinct_indices_state_def, rule_format] { fix x c i y assume c: "look (\\<^sub>i\<^sub>l s') x = Some (i,c) \ look (\\<^sub>i\<^sub>u s') x = Some (i,c)" and y: "y \ rvars_eq ?eq" and coeff: "coeff (rhs ?eq) y < 0 \ i = LI dir s' y \ coeff (rhs ?eq) y > 0 \ i = UI dir s' y" { assume coeff: "coeff (rhs ?eq) y < 0" and i: "i = LI dir s' y" from reasable[OF y] coeff have not_gt: "\ (\\<^sub>l\<^sub>b (lt dir) (\\ s'\ y) (LB dir s' y))" by auto then obtain d where LB: "LB dir s' y = Some d" using dir by (cases "LB dir s' y", auto simp: bound_compare_defs) with not_gt have le: "le (lt dir) (\\ s'\ y) d" using dir by (auto simp: bound_compare_defs) from LB have "look (LBI dir s') y = Some (i, d)" unfolding i using dir by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) with c dist[of x i c y d] dir have yx: "y = x" "d = c" by auto from y[unfolded yx] have "x \ rvars (\ s')" using **(1) unfolding rvars_def by force from in_bnds[OF this] le LB not_gt i have "\\ s'\ x = c" unfolding yx using dir by auto note yx(1) this } moreover { assume coeff: "coeff (rhs ?eq) y > 0" and i: "i = UI dir s' y" from reasable[OF y] coeff have not_gt: "\ (\\<^sub>u\<^sub>b (lt dir) (\\ s'\ y) (UB dir s' y))" by auto then obtain d where UB: "UB dir s' y = Some d" using dir by (cases "UB dir s' y", auto simp: bound_compare_defs) with not_gt have le: "le (lt dir) d (\\ s'\ y)" using dir by (auto simp: bound_compare_defs) from UB have "look (UBI dir s') y = Some (i, d)" unfolding i using dir by (auto simp: boundsl_def boundsu_def indexl_def indexu_def) with c dist[of x i c y d] dir have yx: "y = x" "d = c" by auto from y[unfolded yx] have "x \ rvars (\ s')" using **(1) unfolding rvars_def by force from in_bnds[OF this] le UB not_gt i have "\\ s'\ x = c" unfolding yx using dir by auto note yx(1) this } ultimately have "y = x" "\\ s'\ x = c" using coeff by blast+ } note x_vars_main = this { fix x c i assume c: "look (\\<^sub>i\<^sub>l s') x = Some (i,c) \ look (\\<^sub>i\<^sub>u s') x = Some (i,c)" and i: "i \ ?R1 \ ?R2" from i obtain y where y: "y \ rvars_eq ?eq" and coeff: "coeff (rhs ?eq) y < 0 \ i = LI dir s' y \ coeff (rhs ?eq) y > 0 \ i = UI dir s' y" by auto from x_vars_main[OF c y coeff] have "y = x" "\\ s'\ x = c" using coeff by blast+ with y have "x \ rvars_eq ?eq" "x \ rvars (\ s')" "\\ s'\ x = c" using **(1) unfolding rvars_def by force+ } note x_rvars = this have R1R2: "(?R1 \ ?R2, \\ s'\) \\<^sub>i\<^sub>s\<^sub>e s'" unfolding satisfies_state_index'.simps proof (intro conjI) show "\\ s'\ \\<^sub>t \ s'" by fact show "(?R1 \ ?R2, \\ s'\) \\<^sub>i\<^sub>b\<^sub>e \\ s'" unfolding satisfies_bounds_index'.simps proof (intro conjI impI allI) fix x c assume c: "\\<^sub>l s' x = Some c" and i: "\\<^sub>l s' x \ ?R1 \ ?R2" from c have ci: "look (\\<^sub>i\<^sub>l s') x = Some (\\<^sub>l s' x, c)" unfolding boundsl_def indexl_def by auto from x_rvars[OF _ i] ci show "\\ s'\ x = c" by auto next fix x c assume c: "\\<^sub>u s' x = Some c" and i: "\\<^sub>u s' x \ ?R1 \ ?R2" from c have ci: "look (\\<^sub>i\<^sub>u s') x = Some (\\<^sub>u s' x, c)" unfolding boundsu_def indexu_def by auto from x_rvars[OF _ i] ci show "\\ s'\ x = c" by auto qed qed have id1: "set (the (\\<^sub>c (set_unsat I s'))) = set I" "\ x. x \\<^sub>i\<^sub>s\<^sub>e set_unsat I s' \ x \\<^sub>i\<^sub>s\<^sub>e s'" by (auto simp: satisfies_state_index'.simps boundsl_def boundsu_def indexl_def indexu_def) have "subsets_sat_core (set_unsat I s')" unfolding subsets_sat_core_def id1 proof (intro allI impI) fix J assume sub: "J \ set I" show "\v. (J, v) \\<^sub>i\<^sub>s\<^sub>e s'" proof (cases "J \ ?R1 \ ?R2") case True with R1R2 have "(J, \\ s'\) \\<^sub>i\<^sub>s\<^sub>e s'" unfolding satisfies_state_index'.simps satisfies_bounds_index'.simps by blast thus ?thesis by blast next case False with sub obtain k where k: "k \ ?R1 \ ?R2" "k \ J" "k \ set I" unfolding setI by auto from k(1) obtain y where y: "y \ rvars_eq ?eq" and coeff: "coeff (rhs ?eq) y < 0 \ k = LI dir s' y \ coeff (rhs ?eq) y > 0 \ k = UI dir s' y" by auto hence cy0: "coeff (rhs ?eq) y \ 0" by auto from y **(1) have ry: "y \ rvars (\ s')" unfolding rvars_def by force hence yl: "y \ lvars (\ s')" using lvars_rvars by blast interpret rev: RhsEqVal rhs_eq_val by fact note update = rev.update_valuation_nonlhs[THEN mp, OF norm valuated yl] define diff where "diff = l\<^sub>i - \\ s'\ x\<^sub>i" have "\\ s'\ x\<^sub>i < l\<^sub>i \ 0 < l\<^sub>i - \\ s'\ x\<^sub>i" "l\<^sub>i < \\ s'\ x\<^sub>i \ l\<^sub>i - \\ s'\ x\<^sub>i < 0" using minus_gt by (blast, insert minus_lt, blast) with lt dir have diff: "lt dir 0 diff" by (auto simp: diff_def) define up where "up = inverse (coeff (rhs ?eq) y) *R diff" define v where "v = \\ (rev.update y (\\ s'\ y + up) s')\" show ?thesis unfolding satisfies_state_index'.simps proof (intro exI[of _ v] conjI) show "v \\<^sub>t \ s'" unfolding v_def using rev.update_satisfies_tableau[OF norm valuated yl] \\\ s'\ \\<^sub>t \ s'\ by auto with **(1) have "v \\<^sub>e ?eq" unfolding satisfies_tableau_def by auto from this[unfolded satisfies_eq_def id] have v_xi: "v x\<^sub>i = (rhs ?eq \ v \)" . from \\\ s'\ \\<^sub>t \ s'\ **(1) have "\\ s'\ \\<^sub>e ?eq" unfolding satisfies_tableau_def by auto hence V_xi: "\\ s'\ x\<^sub>i = (rhs ?eq \ \\ s'\ \)" unfolding satisfies_eq_def id . have "v x\<^sub>i = \\ s'\ x\<^sub>i + coeff (rhs ?eq) y *R up" unfolding v_xi unfolding v_def rev.update_valuate_rhs[OF **(1) norm] poly_eval_update V_xi by simp also have "\ = l\<^sub>i" unfolding up_def diff_def scaleRat_scaleRat using cy0 by simp finally have v_xi_l: "v x\<^sub>i = l\<^sub>i" . { assume both: "\\<^sub>u s' y \ ?R1 \ ?R2" "\\<^sub>u s' y \ None" "\\<^sub>l s' y \ ?R1 \ ?R2" "\\<^sub>l s' y \ None" and diff: "\\<^sub>l s' y \ \\<^sub>u s' y" from both(1) dir obtain xu cu where looku: "look (\\<^sub>i\<^sub>l s') xu = Some (\\<^sub>u s' y, cu) \ look (\\<^sub>i\<^sub>u s') xu = Some (\\<^sub>u s' y,cu)" by (smt Is' indices_state_def le_sup_iff mem_Collect_eq setI set_unsat_simps subsetCE) from both(1) obtain xu' where "xu' \ rvars_eq ?eq" "coeff (rhs ?eq) xu' < 0 \ \\<^sub>u s' y = LI dir s' xu' \ coeff (rhs ?eq) xu' > 0 \ \\<^sub>u s' y = UI dir s' xu'" by blast with x_vars_main(1)[OF looku this] have xu: "xu \ rvars_eq ?eq" "coeff (rhs ?eq) xu < 0 \ \\<^sub>u s' y = LI dir s' xu \ coeff (rhs ?eq) xu > 0 \ \\<^sub>u s' y = UI dir s' xu" by auto { assume "xu \ y" with dist[OF looku, of y] have "look (\\<^sub>i\<^sub>u s') y = None" by (cases "look (\\<^sub>i\<^sub>u s') y", auto simp: boundsu_def indexu_def, blast) with both(2) have False by (simp add: boundsu_def) } hence xu_y: "xu = y" by blast from both(3) dir obtain xl cl where lookl: "look (\\<^sub>i\<^sub>l s') xl = Some (\\<^sub>l s' y, cl) \ look (\\<^sub>i\<^sub>u s') xl = Some (\\<^sub>l s' y,cl)" by (smt Is' indices_state_def le_sup_iff mem_Collect_eq setI set_unsat_simps subsetCE) from both(3) obtain xl' where "xl' \ rvars_eq ?eq" "coeff (rhs ?eq) xl' < 0 \ \\<^sub>l s' y = LI dir s' xl' \ coeff (rhs ?eq) xl' > 0 \ \\<^sub>l s' y = UI dir s' xl'" by blast with x_vars_main(1)[OF lookl this] have xl: "xl \ rvars_eq ?eq" "coeff (rhs ?eq) xl < 0 \ \\<^sub>l s' y = LI dir s' xl \ coeff (rhs ?eq) xl > 0 \ \\<^sub>l s' y = UI dir s' xl" by auto { assume "xl \ y" with dist[OF lookl, of y] have "look (\\<^sub>i\<^sub>l s') y = None" by (cases "look (\\<^sub>i\<^sub>l s') y", auto simp: boundsl_def indexl_def, blast) with both(4) have False by (simp add: boundsl_def) } hence xl_y: "xl = y" by blast from xu(2) xl(2) diff have diff: "xu \ xl" by auto with xu_y xl_y have False by simp } note both_y_False = this show "(J, v) \\<^sub>i\<^sub>b\<^sub>e \\ s'" unfolding satisfies_bounds_index'.simps proof (intro conjI allI impI) fix x c assume x: "\\<^sub>l s' x = Some c" "\\<^sub>l s' x \ J" with k have not_k: "\\<^sub>l s' x \ k" by auto from x have ci: "look (\\<^sub>i\<^sub>l s') x = Some (\\<^sub>l s' x, c)" unfolding boundsl_def indexl_def by auto show "v x = c" proof (cases "\\<^sub>l s' x = i") case False hence iR12: "\\<^sub>l s' x \ ?R1 \ ?R2" using sub x unfolding setI LI by blast from x_rvars(2-3)[OF _ iR12] ci have xr: "x \ rvars (\ s')" and val: "\\ s'\ x = c" by auto with lvars_rvars have xl: "x \ lvars (\ s')" by auto show ?thesis proof (cases "x = y") case False thus ?thesis using val unfolding v_def map2fun_def' update[OF xl] using val by auto next case True note coeff = coeff[folded True] from coeff not_k dir ci have Iu: "\\<^sub>u s' x = k" by auto with ci Iu x(2) k sub False True have both: "\\<^sub>u s' y \ ?R1 \ ?R2" "\\<^sub>l s' y \ ?R1 \ ?R2" and diff: "\\<^sub>l s' y \ \\<^sub>u s' y" unfolding setI LI by auto have "\\<^sub>l s' y \ None" using x True by simp from both_y_False[OF both(1) _ both(2) this diff] have "\\<^sub>u s' y = None" by metis with reasable[OF y] dir coeff True have "dir = Negative \ 0 < coeff (rhs ?eq) y" "dir = Positive \ 0 > coeff (rhs ?eq) y" by (auto simp: bound_compare_defs) with dir coeff[unfolded True] have "k = \\<^sub>l s' y" by auto with diff Iu False True have False by auto thus ?thesis .. qed next case True from LBI ci[unfolded True] dir dist[unfolded distinct_indices_state_def, rule_format, of x i c x\<^sub>i l\<^sub>i] have xxi: "x = x\<^sub>i" and c: "c = l\<^sub>i" by auto have vxi: "v x = l\<^sub>i" unfolding xxi v_xi_l .. thus ?thesis unfolding c by simp qed next fix x c assume x: "\\<^sub>u s' x = Some c" "\\<^sub>u s' x \ J" with k have not_k: "\\<^sub>u s' x \ k" by auto from x have ci: "look (\\<^sub>i\<^sub>u s') x = Some (\\<^sub>u s' x, c)" unfolding boundsu_def indexu_def by auto show "v x = c" proof (cases "\\<^sub>u s' x = i") case False hence iR12: "\\<^sub>u s' x \ ?R1 \ ?R2" using sub x unfolding setI LI by blast from x_rvars(2-3)[OF _ iR12] ci have xr: "x \ rvars (\ s')" and val: "\\ s'\ x = c" by auto with lvars_rvars have xl: "x \ lvars (\ s')" by auto show ?thesis proof (cases "x = y") case False thus ?thesis using val unfolding v_def map2fun_def' update[OF xl] using val by auto next case True note coeff = coeff[folded True] from coeff not_k dir ci have Iu: "\\<^sub>l s' x = k" by auto with ci Iu x(2) k sub False True have both: "\\<^sub>u s' y \ ?R1 \ ?R2" "\\<^sub>l s' y \ ?R1 \ ?R2" and diff: "\\<^sub>l s' y \ \\<^sub>u s' y" unfolding setI LI by auto have "\\<^sub>u s' y \ None" using x True by simp from both_y_False[OF both(1) this both(2) _ diff] have "\\<^sub>l s' y = None" by metis with reasable[OF y] dir coeff True have "dir = Negative \ 0 > coeff (rhs ?eq) y" "dir = Positive \ 0 < coeff (rhs ?eq) y" by (auto simp: bound_compare_defs) with dir coeff[unfolded True] have "k = \\<^sub>u s' y" by auto with diff Iu False True have False by auto thus ?thesis .. qed next case True from LBI ci[unfolded True] dir dist[unfolded distinct_indices_state_def, rule_format, of x i c x\<^sub>i l\<^sub>i] have xxi: "x = x\<^sub>i" and c: "c = l\<^sub>i" by auto have vxi: "v x = l\<^sub>i" unfolding xxi v_xi_l .. thus ?thesis unfolding c by simp qed qed qed qed qed } note minimal_core = this have unsat_core: "unsat_state_core (set_unsat I s')" unfolding unsat_state_core_def unsat_core proof (intro impI conjI Is', clarify) fix v assume "(set I, v) \\<^sub>i\<^sub>s set_unsat I s'" then have Iv: "(set I, v) \\<^sub>i\<^sub>s s'" unfolding satisfies_state_index.simps by (auto simp: indexl_def indexu_def boundsl_def boundsu_def) from Iv have vt: "v \\<^sub>t \ s'" and Iv: "(set I, v) \\<^sub>i\<^sub>b \\ s'" unfolding satisfies_state_index.simps by auto have lt_le_eq: "\ x y :: 'a. (x < y) \ (x \ y \ x \ y)" by auto from Iv dir have lb: "\ x i c l. look (LBI dir s') x = Some (i,l) \ i \ set I \ le (lt dir) l (v x)" unfolding satisfies_bounds_index.simps by (auto simp: lt_le_eq indexl_def indexu_def boundsl_def boundsu_def) from lb[OF LBI iI] have li_x: "le (lt dir) l\<^sub>i (v x\<^sub>i)" . have "\\ s'\ \\<^sub>e ?eq" using nolhs \?eq \ set (\ s')\ unfolding curr_val_satisfies_no_lhs_def by (simp add: satisfies_tableau_def) then have "\\ s'\ x\<^sub>i = (rhs ?eq) \ \\ s'\ \" using \lhs ?eq = x\<^sub>i\ by (simp add: satisfies_eq_def) moreover have "v \\<^sub>e ?eq" using vt \?eq \ set (\ s')\ by (simp add: satisfies_state_def satisfies_tableau_def) then have "v x\<^sub>i = (rhs ?eq) \ v \" using \lhs ?eq = x\<^sub>i\ by (simp add: satisfies_eq_def) moreover have "\\<^sub>l\<^sub>b (lt dir) (v x\<^sub>i) (LB dir s' x\<^sub>i)" using li_x dir unfolding LB_Some by (auto simp: bound_compare'_defs) moreover from min_rvar_incdec_eq_None'[rule_format, OF dir min_rvar refl Iv] have "le (lt dir) (rhs (?eq) \v\) (rhs (?eq) \ \\ s'\ \)" . ultimately show False using dir lt LB_Some by (auto simp add: bound_compare_defs) qed thus "\ (set_unsat I s') \ minimal_unsat_state_core (set_unsat I s')" using minimal_core by (auto simp: minimal_unsat_state_core_def) qed (simp_all add: *) lemma Check_check: "Check check" proof fix s :: "('i,'a) state" assume "\ s" then show "check s = s" by (simp add: check.simps) next fix s :: "('i,'a) state" and v :: "'a valuation" assume *: "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" then have "v \\<^sub>t \ s = v \\<^sub>t \ (check s)" by (rule check_induct, simp_all add: pivotandupdate_tableau_equiv) moreover have "\ (\ (check s))" by (rule check_induct', simp_all add: * pivotandupdate_tableau_normalized) moreover have "\ (check s)" proof (rule check_induct', simp_all add: * pivotandupdate_tableau_valuated) fix s I show "\ s \ \ (set_unsat I s)" by (simp add: tableau_valuated_def) qed ultimately show "let s' = check s in v \\<^sub>t \ s = v \\<^sub>t \ s' \ \ (\ s') \ \ s'" by (simp add: Let_def) next fix s :: "('i,'a) state" assume *: "\ s" "\ (\ s)" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" from * show "\\<^sub>i (check s) = \\<^sub>i s" by (rule check_induct, simp_all add: pivotandupdate_bounds_id) next fix s :: "('i,'a) state" assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" have "\ \ (check s) \ \ (check s)" proof (rule check_induct'', simp_all add: *) fix s assume "min_lvar_not_in_bounds s = None" "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" then show " \ s" using min_lvar_not_in_bounds_None[of s] unfolding curr_val_satisfies_state_def satisfies_state_def unfolding curr_val_satisfies_no_lhs_def by (auto simp add: satisfies_bounds_set.simps satisfies_bounds.simps) qed then show "\ \ (check s) \ \ (check s)" by blast next fix s :: "('i,'a) state" assume *: "\ \ s" "\\<^sub>n\<^sub>o\<^sub>l\<^sub>h\<^sub>s s" "\ s" "\ (\ s)" "\ s" have "\ (check s) \ minimal_unsat_state_core (check s)" by (rule check_minimal_unsat_state_core[OF *]) then show "\ (check s) \ minimal_unsat_state_core (check s)" by blast qed end end subsection\Symmetries\ text\\label{sec:symmetries} Simplex algorithm exhibits many symmetric cases. For example, \assert_bound\ treats atoms \Leq x c\ and \Geq x c\ in a symmetric manner, \check_inc\ and \check_dec\ are symmetric, etc. These symmetric cases differ only in several aspects: order relations between numbers (\<\ vs \>\ and \\\ vs \\\), the role of lower and upper bounds (\\\<^sub>l\ vs \\\<^sub>u\) and their updating functions, comparisons with bounds (e.g., \\\<^sub>u\<^sub>b\ vs \\\<^sub>l\<^sub>b\ or \<\<^sub>l\<^sub>b\ vs \>\<^sub>u\<^sub>b\), and atom constructors (\Leq\ and \Geq\). These can be attributed to two different orientations (positive and negative) of rational axis. To avoid duplicating definitions and proofs, \assert_bound\ definition cases for \Leq\ and \Geq\ are replaced by a call to a newly introduced function parametrized by a \Direction\ --- a record containing minimal set of aspects listed above that differ in two definition cases such that other aspects can be derived from them (e.g., only \<\ need to be stored while \\\ can be derived from it). Two constants of the type \Direction\ are defined: \Positive\ (with \<\, \\\ orders, \\\<^sub>l\ for lower and \\\<^sub>u\ for upper bounds and their corresponding updating functions, and \Leq\ constructor) and \Negative\ (completely opposite from the previous one). Similarly, \check_inc\ and \check_dec\ are replaced by a new function \check_incdec\ parametrized by a \Direction\. All lemmas, previously repeated for each symmetric instance, were replaced by a more abstract one, again parametrized by a \Direction\ parameter. \vspace{-3mm} \ (*-------------------------------------------------------------------------- *) subsection\Concrete implementation\ (*-------------------------------------------------------------------------- *) (* -------------------------------------------------------------------------- *) (* Init init_state *) (* -------------------------------------------------------------------------- *) text\It is easy to give a concrete implementation of the initial state constructor, which satisfies the specification of the @{term Init} locale. For example:\ definition init_state :: "_ \ ('i,'a :: zero)state" where "init_state t = State t Mapping.empty Mapping.empty (Mapping.tabulate (vars_list t) (\ v. 0)) False None" interpretation Init "init_state :: _ \ ('i,'a :: lrv)state" proof fix t let ?init = "init_state t :: ('i,'a)state" show "\\ ?init\ \\<^sub>t t" unfolding satisfies_tableau_def satisfies_eq_def proof (safe) fix l r assume "(l, r) \ set t" then have "l \ set (vars_list t)" "vars r \ set (vars_list t)" by (auto simp: set_vars_list) (transfer, force) then have *: "vars r \ lhs ` set t \ (\x\set t. rvars_eq x)" by (auto simp: set_vars_list) have "\\ ?init\ l = (0::'a)" using \l \ set (vars_list t)\ unfolding init_state_def by (auto simp: map2fun_def lookup_tabulate) moreover have "r \ \\ ?init\ \ = (0::'a)" using * proof (transfer fixing: t, goal_cases) case (1 r) { fix x assume "x\{v. r v \ 0}" then have "r x *R \\ ?init\ x = (0::'a)" using 1 unfolding init_state_def by (auto simp add: map2fun_def lookup_tabulate comp_def restrict_map_def set_vars_list Abstract_Linear_Poly.vars_def) } then show ?case by auto qed ultimately show "\\ ?init\ (lhs (l, r)) = rhs (l, r) \ \\ ?init\ \" by auto qed next fix t show "\ (init_state t)" unfolding init_state_def by (auto simp add: lookup_tabulate tableau_valuated_def comp_def restrict_map_def set_vars_list lvars_def rvars_def) qed (simp_all add: init_state_def add: boundsl_def boundsu_def indexl_def indexu_def) (* -------------------------------------------------------------------------- *) (* MinVars min_lvar_not_in_bounds min_rvar_incdec_eq *) (* -------------------------------------------------------------------------- *) definition min_lvar_not_in_bounds :: "('i,'a::{linorder,zero}) state \ var option" where "min_lvar_not_in_bounds s \ min_satisfying (\ x. \ in_bounds x (\\ s\) (\ s)) (map lhs (\ s))" interpretation MinLVarNotInBounds "min_lvar_not_in_bounds :: ('i,'a::lrv) state \ _" proof fix s::"('i,'a) state" show "min_lvar_not_in_bounds s = None \ (\x\lvars (\ s). in_bounds x (\\ s\) (\ s))" unfolding min_lvar_not_in_bounds_def lvars_def using min_satisfying_None by blast next fix s x\<^sub>i show "min_lvar_not_in_bounds s = Some x\<^sub>i \ x\<^sub>i \ lvars (\ s) \ \ in_bounds x\<^sub>i \\ s\ (\ s) \ (\x\lvars (\ s). x < x\<^sub>i \ in_bounds x \\ s\ (\ s))" unfolding min_lvar_not_in_bounds_def lvars_def using min_satisfying_Some by blast+ qed \ \all variables in vs have either a positive or a negative coefficient, so no equal-zero test required.\ definition unsat_indices :: "('i,'a :: linorder) Direction \ ('i,'a) state \ var list \ eq \ 'i list" where "unsat_indices dir s vs eq = (let r = rhs eq; li = LI dir s; ui = UI dir s in remdups (li (lhs eq) # map (\ x. if coeff r x < 0 then li x else ui x) vs))" definition min_rvar_incdec_eq :: "('i,'a) Direction \ ('i,'a::lrv) state \ eq \ 'i list + var" where "min_rvar_incdec_eq dir s eq = (let rvars = Abstract_Linear_Poly.vars_list (rhs eq) in case min_satisfying (\ x. reasable_var dir x eq s) rvars of None \ Inl (unsat_indices dir s rvars eq) | Some x\<^sub>j \ Inr x\<^sub>j)" interpretation MinRVarsEq "min_rvar_incdec_eq :: ('i,'a :: lrv) Direction \ _" proof fix s eq "is" and dir :: "('i,'a) Direction" let ?min = "min_satisfying (\ x. reasable_var dir x eq s) (Abstract_Linear_Poly.vars_list (rhs eq))" let ?vars = "Abstract_Linear_Poly.vars_list (rhs eq)" { assume "min_rvar_incdec_eq dir s eq = Inl is" from this[unfolded min_rvar_incdec_eq_def Let_def, simplified] have "?min = None" and I: "set is = set (unsat_indices dir s ?vars eq)" by (cases ?min, auto)+ from this min_satisfying_None set_vars_list have 1: "\ x. x \ rvars_eq eq \ \ reasable_var dir x eq s" by blast { fix i assume "i \ set is" and dir: "dir = Positive \ dir = Negative" and lhs_eq: "LI dir s (lhs eq) \ indices_state s" from this[unfolded I unsat_indices_def Let_def] consider (lhs) "i = LI dir s (lhs eq)" | (LI_rhs) x where "i = LI dir s x" "x \ rvars_eq eq" "coeff (rhs eq) x < 0" | (UI_rhs) x where "i = UI dir s x" "x \ rvars_eq eq" "coeff (rhs eq) x \ 0" by (auto split: if_splits simp: set_vars_list) then have "i \ indices_state s" proof cases case lhs show ?thesis unfolding lhs using lhs_eq by auto next case LI_rhs from 1[OF LI_rhs(2)] LI_rhs(3) have "\ (\\<^sub>l\<^sub>b (lt dir) (\\ s\ x) (LB dir s x))" by auto then show ?thesis unfolding LI_rhs(1) unfolding indices_state_def using dir by (auto simp: bound_compare'_defs boundsl_def boundsu_def indexl_def indexu_def split: option.splits intro!: exI[of _ x]) auto next case UI_rhs from UI_rhs(2) have "coeff (rhs eq) x \ 0" by (simp add: coeff_zero) with UI_rhs(3) have "0 < coeff (rhs eq) x" by auto from 1[OF UI_rhs(2)] this have "\ (\\<^sub>u\<^sub>b (lt dir) (\\ s\ x) (UB dir s x))" by auto then show ?thesis unfolding UI_rhs(1) unfolding indices_state_def using dir by (auto simp: bound_compare'_defs boundsl_def boundsu_def indexl_def indexu_def split: option.splits intro!: exI[of _ x]) auto qed } then have 2: "dir = Positive \ dir = Negative \ LI dir s (lhs eq) \ indices_state s \ set is \ indices_state s" by auto show " (\ x \ rvars_eq eq. \ reasable_var dir x eq s) \ set is = {LI dir s (lhs eq)} \ {LI dir s x |x. x \ rvars_eq eq \ coeff (rhs eq) x < 0} \ {UI dir s x |x. x \ rvars_eq eq \ 0 < coeff (rhs eq) x} \ (dir = Positive \ dir = Negative \ LI dir s (lhs eq) \ indices_state s \ set is \ indices_state s)" proof (intro conjI impI 2, goal_cases) case 2 have "set is = {LI dir s (lhs eq)} \ LI dir s ` (rvars_eq eq \ {x. coeff (rhs eq) x < 0}) \ UI dir s ` (rvars_eq eq \ {x. \ coeff (rhs eq) x < 0})" unfolding I unsat_indices_def Let_def by (auto simp add: set_vars_list) also have "\ = {LI dir s (lhs eq)} \ LI dir s ` {x. x \ rvars_eq eq \ coeff (rhs eq) x < 0} \ UI dir s ` { x. x \ rvars_eq eq \ 0 < coeff (rhs eq) x}" proof (intro arg_cong2[of _ _ _ _ "(\)"] arg_cong[of _ _ "\ x. _ ` x"] refl, goal_cases) case 2 { fix x assume "x \ rvars_eq eq" hence "coeff (rhs eq) x \ 0" by (simp add: coeff_zero) hence or: "coeff (rhs eq) x < 0 \ coeff (rhs eq) x > 0" by auto assume "\ coeff (rhs eq) x < 0" hence "coeff (rhs eq) x > 0" using or by simp } note [dest] = this show ?case by auto qed auto finally show "set is = {LI dir s (lhs eq)} \ {LI dir s x |x. x \ rvars_eq eq \ coeff (rhs eq) x < 0} \ {UI dir s x |x. x \ rvars_eq eq \ 0 < coeff (rhs eq) x}" by auto qed (insert 1, auto) } fix x\<^sub>j assume "min_rvar_incdec_eq dir s eq = Inr x\<^sub>j" from this[unfolded min_rvar_incdec_eq_def Let_def] have "?min = Some x\<^sub>j" by (cases ?min, auto) then show "x\<^sub>j \ rvars_eq eq" "reasable_var dir x\<^sub>j eq s" "(\ x' \ rvars_eq eq. x' < x\<^sub>j \ \ reasable_var dir x' eq s)" using min_satisfying_Some set_vars_list by blast+ qed (* -------------------------------------------------------------------------- *) (* EqForLVar eq_idx_for_lvar *) (* -------------------------------------------------------------------------- *) primrec eq_idx_for_lvar_aux :: "tableau \ var \ nat \ nat" where "eq_idx_for_lvar_aux [] x i = i" | "eq_idx_for_lvar_aux (eq # t) x i = (if lhs eq = x then i else eq_idx_for_lvar_aux t x (i+1))" definition eq_idx_for_lvar where "eq_idx_for_lvar t x \ eq_idx_for_lvar_aux t x 0" lemma eq_idx_for_lvar_aux: assumes "x \ lvars t" shows "let idx = eq_idx_for_lvar_aux t x i in i \ idx \ idx < i + length t \ lhs (t ! (idx - i)) = x" using assms proof (induct t arbitrary: i) case Nil then show ?case by (simp add: lvars_def) next case (Cons eq t) show ?case using Cons(1)[of "i+1"] Cons(2) by (cases "x = lhs eq") (auto simp add: Let_def lvars_def nth_Cons') qed global_interpretation EqForLVarDefault: EqForLVar eq_idx_for_lvar defines eq_for_lvar_code = EqForLVarDefault.eq_for_lvar proof (unfold_locales) fix x t assume "x \ lvars t" then show "eq_idx_for_lvar t x < length t \ lhs (t ! eq_idx_for_lvar t x) = x" using eq_idx_for_lvar_aux[of x t 0] by (simp add: Let_def eq_idx_for_lvar_def) qed (* -------------------------------------------------------------------------- *) (* PivotEq pivot_eq *) (* -------------------------------------------------------------------------- *) definition pivot_eq :: "eq \ var \ eq" where "pivot_eq e y \ let cy = coeff (rhs e) y in (y, (-1/cy) *R ((rhs e) - cy *R (Var y)) + (1/cy) *R (Var (lhs e)))" lemma pivot_eq_satisfies_eq: assumes "y \ rvars_eq e" shows "v \\<^sub>e e = v \\<^sub>e pivot_eq e y" using assms using scaleRat_right_distrib[of "1 / Rep_linear_poly (rhs e) y" "- (rhs e \ v \)" "v (lhs e)"] using Groups.group_add_class.minus_unique[of "- ((rhs e) \ v \)" "v (lhs e)"] unfolding coeff_def vars_def by (simp add: coeff_def vars_def Let_def pivot_eq_def satisfies_eq_def) (auto simp add: rational_vector.scale_right_diff_distrib valuate_add valuate_minus valuate_uminus valuate_scaleRat valuate_Var) lemma pivot_eq_rvars: assumes "x \ vars (rhs (pivot_eq e v))" "x \ lhs e" "coeff (rhs e) v \ 0" "v \ lhs e" shows "x \ vars (rhs e)" proof- have "v \ vars ((1 / coeff (rhs e) v) *R (rhs e - coeff (rhs e) v *R Var v))" using coeff_zero by force then have "x \ v" using assms(1) assms(3) assms(4) using vars_plus[of "(-1 / coeff (rhs e) v) *R (rhs e - coeff (rhs e) v *R Var v)" "(1 / coeff (rhs e) v) *R Var (lhs e)"] by (auto simp add: Let_def vars_scaleRat pivot_eq_def) then show ?thesis using assms using vars_plus[of "(-1 / coeff (rhs e) v) *R (rhs e - coeff (rhs e) v *R Var v)" "(1 / coeff (rhs e) v) *R Var (lhs e)"] using vars_minus[of "rhs e" "coeff (rhs e) v *R Var v"] by (auto simp add: vars_scaleRat Let_def pivot_eq_def) qed interpretation PivotEq pivot_eq proof fix eq x\<^sub>j assume "x\<^sub>j \ rvars_eq eq" "lhs eq \ rvars_eq eq" have "lhs (pivot_eq eq x\<^sub>j) = x\<^sub>j" unfolding pivot_eq_def by (simp add: Let_def) moreover have "rvars_eq (pivot_eq eq x\<^sub>j) = {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" proof show "rvars_eq (pivot_eq eq x\<^sub>j) \ {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" proof fix x assume "x \ rvars_eq (pivot_eq eq x\<^sub>j)" have *: "coeff (rhs (pivot_eq eq x\<^sub>j)) x\<^sub>j = 0" using \x\<^sub>j \ rvars_eq eq\ \lhs eq \ rvars_eq eq\ using coeff_Var2[of "lhs eq" x\<^sub>j] by (auto simp add: Let_def pivot_eq_def) have "coeff (rhs eq) x\<^sub>j \ 0" using \x\<^sub>j \ rvars_eq eq\ using coeff_zero by (cases eq) (auto simp add:) then show "x \ {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" using pivot_eq_rvars[of x eq x\<^sub>j] using \x \ rvars_eq (pivot_eq eq x\<^sub>j)\ \x\<^sub>j \ rvars_eq eq\ \lhs eq \ rvars_eq eq\ using coeff_zero * by auto qed show "{lhs eq} \ (rvars_eq eq - {x\<^sub>j}) \ rvars_eq (pivot_eq eq x\<^sub>j)" proof fix x assume "x \ {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" have *: "coeff (rhs eq) (lhs eq) = 0" using coeff_zero using \lhs eq \ rvars_eq eq\ by auto have **: "coeff (rhs eq) x\<^sub>j \ 0" using \x\<^sub>j \ rvars_eq eq\ by (simp add: coeff_zero) have ***: "x \ rvars_eq eq \ coeff (Var (lhs eq)) x = 0" using \lhs eq \ rvars_eq eq\ using coeff_Var2[of "lhs eq" x] by auto have "coeff (Var x\<^sub>j) (lhs eq) = 0" using \x\<^sub>j \ rvars_eq eq\ \lhs eq \ rvars_eq eq\ using coeff_Var2[of x\<^sub>j "lhs eq"] by auto then have "coeff (rhs (pivot_eq eq x\<^sub>j)) x \ 0" using \x \ {lhs eq} \ (rvars_eq eq - {x\<^sub>j})\ * ** *** using coeff_zero[of "rhs eq" x] by (auto simp add: Let_def coeff_Var2 pivot_eq_def) then show "x \ rvars_eq (pivot_eq eq x\<^sub>j)" by (simp add: coeff_zero) qed qed ultimately show "let eq' = pivot_eq eq x\<^sub>j in lhs eq' = x\<^sub>j \ rvars_eq eq' = {lhs eq} \ (rvars_eq eq - {x\<^sub>j})" by (simp add: Let_def) next fix v eq x\<^sub>j assume "x\<^sub>j \ rvars_eq eq" then show "v \\<^sub>e pivot_eq eq x\<^sub>j = v \\<^sub>e eq" using pivot_eq_satisfies_eq by blast qed (* -------------------------------------------------------------------------- *) (* SubstVar subst_var *) (* -------------------------------------------------------------------------- *) definition subst_var:: "var \ linear_poly \ linear_poly \ linear_poly" where "subst_var v lp' lp \ lp + (coeff lp v) *R lp' - (coeff lp v) *R (Var v)" definition "subst_var_eq_code = SubstVar.subst_var_eq subst_var" global_interpretation SubstVar subst_var rewrites "SubstVar.subst_var_eq subst_var = subst_var_eq_code" proof (unfold_locales) fix x\<^sub>j lp' lp have *: "\x. \x \ vars (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j); x \ vars lp'\ \ x \ vars lp" proof- fix x assume "x \ vars (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j)" then have "coeff (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j) x \ 0" using coeff_zero by force assume "x \ vars lp'" then have "coeff lp' x = 0" using coeff_zero by auto show "x \ vars lp" proof(rule ccontr) assume "x \ vars lp" then have "coeff lp x = 0" using coeff_zero by auto then show False using \coeff (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j) x \ 0\ using \coeff lp' x = 0\ by (cases "x = x\<^sub>j") (auto simp add: coeff_Var2) qed qed have "vars (subst_var x\<^sub>j lp' lp) \ (vars lp - {x\<^sub>j}) \ vars lp'" unfolding subst_var_def using coeff_zero[of "lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j" x\<^sub>j] using coeff_zero[of lp' x\<^sub>j] using * by auto moreover have "\x. \x \ vars (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j); x \ vars lp; x \ vars lp'\ \ x = x\<^sub>j" proof- fix x assume "x \ vars lp" "x \ vars lp'" then have "coeff lp x \ 0" "coeff lp' x = 0" using coeff_zero by auto assume "x \ vars (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j)" then have "coeff (lp + coeff lp x\<^sub>j *R lp' - coeff lp x\<^sub>j *R Var x\<^sub>j) x = 0" using coeff_zero by force then show "x = x\<^sub>j" using \coeff lp x \ 0\ \coeff lp' x = 0\ by (cases "x = x\<^sub>j") (auto simp add: coeff_Var2) qed then have "vars lp - {x\<^sub>j} - vars lp' \ vars (subst_var x\<^sub>j lp' lp)" by (auto simp add: subst_var_def) ultimately show "vars lp - {x\<^sub>j} - vars lp' \s vars (subst_var x\<^sub>j lp' lp) \s vars lp - {x\<^sub>j} \ vars lp'" by simp next fix v x\<^sub>j lp' lp show "v x\<^sub>j = lp' \ v \ \ lp \ v \ = (subst_var x\<^sub>j lp' lp) \ v \" unfolding subst_var_def using valuate_minus[of "lp + coeff lp x\<^sub>j *R lp'" "coeff lp x\<^sub>j *R Var x\<^sub>j" v] using valuate_add[of lp "coeff lp x\<^sub>j *R lp'" v] using valuate_scaleRat[of "coeff lp x\<^sub>j" lp' v] valuate_scaleRat[of "coeff lp x\<^sub>j" "Var x\<^sub>j" v] using valuate_Var[of x\<^sub>j v] by auto next fix x\<^sub>j lp lp' assume "x\<^sub>j \ vars lp" hence 0: "coeff lp x\<^sub>j = 0" using coeff_zero by blast show "subst_var x\<^sub>j lp' lp = lp" unfolding subst_var_def 0 by simp next fix x\<^sub>j lp x lp' assume "x\<^sub>j \ vars lp" "x \ vars lp' - vars lp" hence x: "x \ x\<^sub>j" and 0: "coeff lp x = 0" and no0: "coeff lp x\<^sub>j \ 0" "coeff lp' x \ 0" using coeff_zero by blast+ from x have 00: "coeff (Var x\<^sub>j) x = 0" using coeff_Var2 by auto show "x \ vars (subst_var x\<^sub>j lp' lp)" unfolding subst_var_def coeff_zero[symmetric] by (simp add: 0 00 no0) qed (simp_all add: subst_var_eq_code_def) (* -------------------------------------------------------------------------- *) (* Update update *) (* -------------------------------------------------------------------------- *) definition rhs_eq_val where "rhs_eq_val v x\<^sub>i c e \ let x\<^sub>j = lhs e; a\<^sub>i\<^sub>j = coeff (rhs e) x\<^sub>i in \v\ x\<^sub>j + a\<^sub>i\<^sub>j *R (c - \v\ x\<^sub>i)" definition "update_code = RhsEqVal.update rhs_eq_val" definition "assert_bound'_code = Update.assert_bound' update_code" definition "assert_bound_code = Update.assert_bound update_code" global_interpretation RhsEqValDefault': RhsEqVal rhs_eq_val rewrites "RhsEqVal.update rhs_eq_val = update_code" and "Update.assert_bound update_code = assert_bound_code" and "Update.assert_bound' update_code = assert_bound'_code" proof unfold_locales fix v x c e assume "\v\ \\<^sub>e e" then show "rhs_eq_val v x c e = rhs e \ \v\(x := c) \" unfolding rhs_eq_val_def Let_def using valuate_update_x[of "rhs e" x "\v\" "\v\(x := c)"] by (auto simp add: satisfies_eq_def) qed (auto simp: update_code_def assert_bound'_code_def assert_bound_code_def) sublocale PivotUpdateMinVars < Check check proof (rule Check_check) show "RhsEqVal rhs_eq_val" .. qed definition "pivot_code = Pivot'.pivot eq_idx_for_lvar pivot_eq subst_var" definition "pivot_tableau_code = Pivot'.pivot_tableau eq_idx_for_lvar pivot_eq subst_var" global_interpretation Pivot'Default: Pivot' eq_idx_for_lvar pivot_eq subst_var rewrites "Pivot'.pivot eq_idx_for_lvar pivot_eq subst_var = pivot_code" and "Pivot'.pivot_tableau eq_idx_for_lvar pivot_eq subst_var = pivot_tableau_code" and "SubstVar.subst_var_eq subst_var = subst_var_eq_code" by (unfold_locales, auto simp: pivot_tableau_code_def pivot_code_def subst_var_eq_code_def) definition "pivot_and_update_code = PivotUpdate.pivot_and_update pivot_code update_code" global_interpretation PivotUpdateDefault: PivotUpdate eq_idx_for_lvar pivot_code update_code rewrites "PivotUpdate.pivot_and_update pivot_code update_code = pivot_and_update_code" by (unfold_locales, auto simp: pivot_and_update_code_def) sublocale Update < AssertBoundNoLhs assert_bound proof (rule update_to_assert_bound_no_lhs) show "Pivot eq_idx_for_lvar pivot_code" .. qed definition "check_code = PivotUpdateMinVars.check eq_idx_for_lvar min_lvar_not_in_bounds min_rvar_incdec_eq pivot_and_update_code" definition "check'_code = PivotUpdateMinVars.check' eq_idx_for_lvar min_rvar_incdec_eq pivot_and_update_code" global_interpretation PivotUpdateMinVarsDefault: PivotUpdateMinVars eq_idx_for_lvar min_lvar_not_in_bounds min_rvar_incdec_eq pivot_and_update_code rewrites "PivotUpdateMinVars.check eq_idx_for_lvar min_lvar_not_in_bounds min_rvar_incdec_eq pivot_and_update_code = check_code" and "PivotUpdateMinVars.check' eq_idx_for_lvar min_rvar_incdec_eq pivot_and_update_code = check'_code" by (unfold_locales) (simp_all add: check_code_def check'_code_def) definition "assert_code = Assert'.assert assert_bound_code check_code" global_interpretation Assert'Default: Assert' assert_bound_code check_code rewrites "Assert'.assert assert_bound_code check_code = assert_code" by (unfold_locales, auto simp: assert_code_def) definition "assert_bound_loop_code = AssertAllState''.assert_bound_loop assert_bound_code" definition "assert_all_state_code = AssertAllState''.assert_all_state init_state assert_bound_code check_code" definition "assert_all_code = AssertAllState.assert_all assert_all_state_code" global_interpretation AssertAllStateDefault: AssertAllState'' init_state assert_bound_code check_code rewrites "AssertAllState''.assert_bound_loop assert_bound_code = assert_bound_loop_code" and "AssertAllState''.assert_all_state init_state assert_bound_code check_code = assert_all_state_code" and "AssertAllState.assert_all assert_all_state_code = assert_all_code" by unfold_locales (simp_all add: assert_bound_loop_code_def assert_all_state_code_def assert_all_code_def) (* -------------------------------------------------------------------------- *) (* Preprocess preprocess *) (* -------------------------------------------------------------------------- *) primrec monom_to_atom:: "QDelta ns_constraint \ QDelta atom" where "monom_to_atom (LEQ_ns l r) = (if (monom_coeff l < 0) then (Geq (monom_var l) (r /R monom_coeff l)) else (Leq (monom_var l) (r /R monom_coeff l)))" | "monom_to_atom (GEQ_ns l r) = (if (monom_coeff l < 0) then (Leq (monom_var l) (r /R monom_coeff l)) else (Geq (monom_var l) (r /R monom_coeff l)))" primrec qdelta_constraint_to_atom:: "QDelta ns_constraint \ var \ QDelta atom" where "qdelta_constraint_to_atom (LEQ_ns l r) v = (if (is_monom l) then (monom_to_atom (LEQ_ns l r)) else (Leq v r))" | "qdelta_constraint_to_atom (GEQ_ns l r) v = (if (is_monom l) then (monom_to_atom (GEQ_ns l r)) else (Geq v r))" primrec qdelta_constraint_to_atom':: "QDelta ns_constraint \ var \ QDelta atom" where "qdelta_constraint_to_atom' (LEQ_ns l r) v = (Leq v r)" | "qdelta_constraint_to_atom' (GEQ_ns l r) v = (Geq v r)" fun linear_poly_to_eq:: "linear_poly \ var \ eq" where "linear_poly_to_eq p v = (v, p)" datatype 'i istate = IState (FirstFreshVariable: var) (Tableau: tableau) (Atoms: "('i,QDelta) i_atom list") (Poly_Mapping: "linear_poly \ var") (UnsatIndices: "'i list") primrec zero_satisfies :: "'a :: lrv ns_constraint \ bool" where "zero_satisfies (LEQ_ns l r) \ 0 \ r" | "zero_satisfies (GEQ_ns l r) \ 0 \ r" lemma zero_satisfies: "poly c = 0 \ zero_satisfies c \ v \\<^sub>n\<^sub>s c" by (cases c, auto simp: valuate_zero) lemma not_zero_satisfies: "poly c = 0 \ \ zero_satisfies c \ \ v \\<^sub>n\<^sub>s c" by (cases c, auto simp: valuate_zero) fun preprocess' :: "('i,QDelta) i_ns_constraint list \ var \ 'i istate" where "preprocess' [] v = IState v [] [] (\ p. None) []" | "preprocess' ((i,h) # t) v = (let s' = preprocess' t v; p = poly h; is_monom_h = is_monom p; v' = FirstFreshVariable s'; t' = Tableau s'; a' = Atoms s'; m' = Poly_Mapping s'; u' = UnsatIndices s' in if is_monom_h then IState v' t' ((i,qdelta_constraint_to_atom h v') # a') m' u' else if p = 0 then if zero_satisfies h then s' else IState v' t' a' m' (i # u') else (case m' p of Some v \ IState v' t' ((i,qdelta_constraint_to_atom h v) # a') m' u' | None \ IState (v' + 1) (linear_poly_to_eq p v' # t') ((i,qdelta_constraint_to_atom h v') # a') (m' (p \ v')) u') )" lemma preprocess'_simps: "preprocess' ((i,h) # t) v = (let s' = preprocess' t v; p = poly h; is_monom_h = is_monom p; v' = FirstFreshVariable s'; t' = Tableau s'; a' = Atoms s'; m' = Poly_Mapping s'; u' = UnsatIndices s' in if is_monom_h then IState v' t' ((i,monom_to_atom h) # a') m' u' else if p = 0 then if zero_satisfies h then s' else IState v' t' a' m' (i # u') else (case m' p of Some v \ IState v' t' ((i,qdelta_constraint_to_atom' h v) # a') m' u' | None \ IState (v' + 1) (linear_poly_to_eq p v' # t') ((i,qdelta_constraint_to_atom' h v') # a') (m' (p \ v')) u') )" by (cases h, auto simp add: Let_def split: option.splits) lemmas preprocess'_code = preprocess'.simps(1) preprocess'_simps declare preprocess'_code[code] text \Normalization of constraints helps to identify same polynomials, e.g., the constraints $x + y \leq 5$ and $-2x-2y \leq -12$ will be normalized to $x + y \leq 5$ and $x + y \geq 6$, so that only one slack-variable will be introduced for the polynomial $x+y$, and not another one for $-2x-2y$. Normalization will take care that the max-var of the polynomial in the constraint will have coefficient 1 (if the polynomial is non-zero)\ fun normalize_ns_constraint :: "'a :: lrv ns_constraint \ 'a ns_constraint" where "normalize_ns_constraint (LEQ_ns l r) = (let v = max_var l; c = coeff l v in if c = 0 then LEQ_ns l r else let ic = inverse c in if c < 0 then GEQ_ns (ic *R l) (scaleRat ic r) else LEQ_ns (ic *R l) (scaleRat ic r))" | "normalize_ns_constraint (GEQ_ns l r) = (let v = max_var l; c = coeff l v in if c = 0 then GEQ_ns l r else let ic = inverse c in if c < 0 then LEQ_ns (ic *R l) (scaleRat ic r) else GEQ_ns (ic *R l) (scaleRat ic r))" lemma normalize_ns_constraint[simp]: "v \\<^sub>n\<^sub>s (normalize_ns_constraint c) \ v \\<^sub>n\<^sub>s (c :: 'a :: lrv ns_constraint)" proof - let ?c = "coeff (poly c) (max_var (poly c))" consider (0) "?c = 0" | (pos) "?c > 0" | (neg) "?c < 0" by linarith thus ?thesis proof cases case 0 thus ?thesis by (cases c, auto) next case pos from pos have id: "a /R ?c \ b /R ?c \ (a :: 'a) \ b" for a b using scaleRat_leq1 by fastforce show ?thesis using pos id by (cases c, auto simp: Let_def valuate_scaleRat id) next case neg from neg have id: "a /R ?c \ b /R ?c \ (a :: 'a) \ b" for a b using scaleRat_leq2 by fastforce show ?thesis using neg id by (cases c, auto simp: Let_def valuate_scaleRat id) qed qed declare normalize_ns_constraint.simps[simp del] lemma i_satisfies_normalize_ns_constraint[simp]: "Iv \\<^sub>i\<^sub>n\<^sub>s\<^sub>s (map_prod id normalize_ns_constraint ` cs) \ Iv \\<^sub>i\<^sub>n\<^sub>s\<^sub>s cs" by (cases Iv, force) abbreviation max_var:: "QDelta ns_constraint \ var" where "max_var C \ Abstract_Linear_Poly.max_var (poly C)" fun start_fresh_variable :: "('i,QDelta) i_ns_constraint list \ var" where "start_fresh_variable [] = 0" | "start_fresh_variable ((i,h)#t) = max (max_var h + 1) (start_fresh_variable t)" definition preprocess_part_1 :: "('i,QDelta) i_ns_constraint list \ tableau \ (('i,QDelta) i_atom list) \ 'i list" where "preprocess_part_1 l \ let start = start_fresh_variable l; is = preprocess' l start in (Tableau is, Atoms is, UnsatIndices is)" lemma lhs_linear_poly_to_eq [simp]: "lhs (linear_poly_to_eq h v) = v" by (cases h) auto lemma rvars_eq_linear_poly_to_eq [simp]: "rvars_eq (linear_poly_to_eq h v) = vars h" by simp lemma fresh_var_monoinc: "FirstFreshVariable (preprocess' cs start) \ start" by (induct cs) (auto simp add: Let_def split: option.splits) abbreviation vars_constraints where "vars_constraints cs \ \ (set (map vars (map poly cs)))" lemma start_fresh_variable_fresh: "\ var \ vars_constraints (flat_list cs). var < start_fresh_variable cs" using max_var_max by (induct cs, auto simp add: max_def) force+ lemma vars_tableau_vars_constraints: "rvars (Tableau (preprocess' cs start)) \ vars_constraints (flat_list cs)" by (induct cs start rule: preprocess'.induct) (auto simp add: rvars_def Let_def split: option.splits) lemma lvars_tableau_ge_start: "\ var \ lvars (Tableau (preprocess' cs start)). var \ start" by (induct cs start rule: preprocess'.induct) (auto simp add: Let_def lvars_def fresh_var_monoinc split: option.splits) lemma rhs_no_zero_tableau_start: "0 \ rhs ` set (Tableau (preprocess' cs start))" by (induct cs start rule: preprocess'.induct, auto simp add: Let_def rvars_def fresh_var_monoinc split: option.splits) lemma first_fresh_variable_not_in_lvars: "\var \ lvars (Tableau (preprocess' cs start)). FirstFreshVariable (preprocess' cs start) > var" by (induct cs start rule: preprocess'.induct) (auto simp add: Let_def lvars_def split: option.splits) lemma sat_atom_sat_eq_sat_constraint_non_monom: assumes "v \\<^sub>a qdelta_constraint_to_atom h var" "v \\<^sub>e linear_poly_to_eq (poly h) var" "\ is_monom (poly h)" shows "v \\<^sub>n\<^sub>s h" using assms by (cases h) (auto simp add: satisfies_eq_def split: if_splits) lemma qdelta_constraint_to_atom_monom: assumes "is_monom (poly h)" shows "v \\<^sub>a qdelta_constraint_to_atom h var \ v \\<^sub>n\<^sub>s h" proof (cases h) case (LEQ_ns l a) then show ?thesis using assms using monom_valuate[of _ v] apply auto using scaleRat_leq2[of "a /R monom_coeff l" "v (monom_var l)" "monom_coeff l"] using divide_leq1[of "monom_coeff l" "v (monom_var l)" a] apply (force, simp add: divide_rat_def) using scaleRat_leq1[of "v (monom_var l)" "a /R monom_coeff l" "monom_coeff l"] using is_monom_monom_coeff_not_zero[of l] using divide_leq[of "monom_coeff l" "v (monom_var l)" a] using is_monom_monom_coeff_not_zero[of l] by (simp_all add: divide_rat_def) next case (GEQ_ns l a) then show ?thesis using assms using monom_valuate[of _ v] apply auto using scaleRat_leq2[of "v (monom_var l)" "a /R monom_coeff l" "monom_coeff l"] using divide_geq1[of a "monom_coeff l" "v (monom_var l)"] apply (force, simp add: divide_rat_def) using scaleRat_leq1[of "a /R monom_coeff l" "v (monom_var l)" "monom_coeff l"] using is_monom_monom_coeff_not_zero[of l] using divide_geq[of a "monom_coeff l" "v (monom_var l)"] using is_monom_monom_coeff_not_zero[of l] by (simp_all add: divide_rat_def) qed lemma preprocess'_Tableau_Poly_Mapping_None: "(Poly_Mapping (preprocess' cs start)) p = None \ linear_poly_to_eq p v \ set (Tableau (preprocess' cs start))" by (induct cs start rule: preprocess'.induct, auto simp: Let_def split: option.splits if_splits) lemma preprocess'_Tableau_Poly_Mapping_Some: "(Poly_Mapping (preprocess' cs start)) p = Some v \ linear_poly_to_eq p v \ set (Tableau (preprocess' cs start))" by (induct cs start rule: preprocess'.induct, auto simp: Let_def split: option.splits if_splits) lemma preprocess'_Tableau_Poly_Mapping_Some': "(Poly_Mapping (preprocess' cs start)) p = Some v \ \ h. poly h = p \ \ is_monom (poly h) \ qdelta_constraint_to_atom h v \ flat (set (Atoms (preprocess' cs start)))" by (induct cs start rule: preprocess'.induct, auto simp: Let_def split: option.splits if_splits) lemma not_one_le_zero_qdelta: "\ (1 \ (0 :: QDelta))" by code_simp lemma one_zero_contra[dest,consumes 2]: "1 \ x \ (x :: QDelta) \ 0 \ False" using order.trans[of 1 x 0] not_one_le_zero_qdelta by simp lemma i_preprocess'_sat: assumes "(I,v) \\<^sub>i\<^sub>a\<^sub>s set (Atoms (preprocess' s start))" "v \\<^sub>t Tableau (preprocess' s start)" "I \ set (UnsatIndices (preprocess' s start)) = {}" shows "(I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set s" using assms by (induct s start rule: preprocess'.induct) (auto simp add: Let_def satisfies_atom_set_def satisfies_tableau_def qdelta_constraint_to_atom_monom sat_atom_sat_eq_sat_constraint_non_monom split: if_splits option.splits dest!: preprocess'_Tableau_Poly_Mapping_Some zero_satisfies) lemma preprocess'_sat: assumes "v \\<^sub>a\<^sub>s flat (set (Atoms (preprocess' s start)))" "v \\<^sub>t Tableau (preprocess' s start)" "set (UnsatIndices (preprocess' s start)) = {}" shows "v \\<^sub>n\<^sub>s\<^sub>s flat (set s)" using i_preprocess'_sat[of UNIV v s start] assms by simp lemma sat_constraint_valuation: assumes "\ var \ vars (poly c). v1 var = v2 var" shows "v1 \\<^sub>n\<^sub>s c \ v2 \\<^sub>n\<^sub>s c" using assms using valuate_depend by (cases c) (force)+ lemma atom_var_first: assumes "a \ flat (set (Atoms (preprocess' cs start)))" "\ var \ vars_constraints (flat_list cs). var < start" shows "atom_var a < FirstFreshVariable (preprocess' cs start)" using assms proof(induct cs arbitrary: a) case (Cons hh t a) obtain i h where hh: "hh = (i,h)" by force let ?s = "preprocess' t start" show ?case proof(cases "a \ flat (set (Atoms ?s))") case True then show ?thesis using Cons(1)[of a] Cons(3) hh by (auto simp add: Let_def split: option.splits) next case False consider (monom) "is_monom (poly h)" | (normal) "\ is_monom (poly h)" "poly h \ 0" "(Poly_Mapping ?s) (poly h) = None" | (old) var where "\ is_monom (poly h)" "poly h \ 0" "(Poly_Mapping ?s) (poly h) = Some var" | (zero) "\ is_monom (poly h)" "poly h = 0" by auto then show ?thesis proof cases case monom from Cons(3) monom_var_in_vars hh monom have "monom_var (poly h) < start" by auto moreover from False have "a = qdelta_constraint_to_atom h (FirstFreshVariable (preprocess' t start))" using Cons(2) hh monom by (auto simp: Let_def) ultimately show ?thesis using fresh_var_monoinc[of start t] hh monom by (cases a; cases h) (auto simp add: Let_def ) next case normal have "a = qdelta_constraint_to_atom h (FirstFreshVariable (preprocess' t start))" using False normal Cons(2) hh by (auto simp: Let_def) then show ?thesis using hh normal by (cases a; cases h) (auto simp add: Let_def ) next case (old var) from preprocess'_Tableau_Poly_Mapping_Some'[OF old(3)] obtain h' where "poly h' = poly h" "qdelta_constraint_to_atom h' var \ flat (set (Atoms ?s))" by blast from Cons(1)[OF this(2)] Cons(3) this(1) old(1) have var: "var < FirstFreshVariable ?s" by (cases h', auto) have "a = qdelta_constraint_to_atom h var" using False old Cons(2) hh by (auto simp: Let_def) then have a: "atom_var a = var" using old by (cases a; cases h; auto simp: Let_def) show ?thesis unfolding a hh by (simp add: old Let_def var) next case zero from False show ?thesis using Cons(2) hh zero by (auto simp: Let_def split: if_splits) qed qed qed simp lemma satisfies_tableau_satisfies_tableau: assumes "v1 \\<^sub>t t" "\ var \ tvars t. v1 var = v2 var" shows "v2 \\<^sub>t t" using assms using valuate_depend[of _ v1 v2] by (force simp add: lvars_def rvars_def satisfies_eq_def satisfies_tableau_def) lemma preprocess'_unsat_indices: assumes "i \ set (UnsatIndices (preprocess' s start))" shows "\ ({i},v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set s" using assms proof (induct s start rule: preprocess'.induct) case (2 j h t v) then show ?case by (auto simp: Let_def not_zero_satisfies split: if_splits option.splits) qed simp lemma preprocess'_unsat: assumes "(I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set s" "vars_constraints (flat_list s) \ V" "\var \ V. var < start" shows "\v'. (\var \ V. v var = v' var) \ v' \\<^sub>a\<^sub>s restrict_to I (set (Atoms (preprocess' s start))) \ v' \\<^sub>t Tableau (preprocess' s start)" using assms proof(induct s) case Nil show ?case by (auto simp add: satisfies_atom_set_def satisfies_tableau_def) next case (Cons hh t) obtain i h where hh: "hh = (i,h)" by force from Cons hh obtain v' where var: "(\var\V. v var = v' var)" and v'_as: "v' \\<^sub>a\<^sub>s restrict_to I (set (Atoms (preprocess' t start)))" and v'_t: "v' \\<^sub>t Tableau (preprocess' t start)" and vars_h: "vars_constraints [h] \ V" by auto from Cons(2)[unfolded hh] have i: "i \ I \ v \\<^sub>n\<^sub>s h" by auto have "\ var \ vars (poly h). v var = v' var" using \(\var\V. v var = v' var)\ Cons(3) hh by auto then have vh_v'h: "v \\<^sub>n\<^sub>s h \ v' \\<^sub>n\<^sub>s h" by (rule sat_constraint_valuation) show ?case proof(cases "is_monom (poly h)") case True then have id: "is_monom (poly h) = True" by simp show ?thesis unfolding hh preprocess'.simps Let_def id if_True istate.simps istate.sel proof (intro exI[of _ v'] conjI v'_t var satisifies_atom_restrict_to_Cons[OF v'_as]) assume "i \ I" from i[OF this] var vh_v'h show "v' \\<^sub>a qdelta_constraint_to_atom h (FirstFreshVariable (preprocess' t start))" unfolding qdelta_constraint_to_atom_monom[OF True] by auto qed next case False then have id: "is_monom (poly h) = False" by simp let ?s = "preprocess' t start" let ?x = "FirstFreshVariable ?s" show ?thesis proof (cases "poly h = 0") case zero: False hence id': "(poly h = 0) = False" by simp let ?look = "(Poly_Mapping ?s) (poly h)" show ?thesis proof (cases ?look) case None let ?y = "poly h \ v'\" let ?v' = "v'(?x:=?y)" show ?thesis unfolding preprocess'.simps hh Let_def id id' if_False istate.simps istate.sel None option.simps proof (rule exI[of _ ?v'], intro conjI satisifies_atom_restrict_to_Cons satisfies_tableau_Cons) show vars': "(\var\V. v var = ?v' var)" using \(\var\V. v var = v' var)\ using fresh_var_monoinc[of start t] using Cons(4) by auto { assume "i \ I" from vh_v'h i[OF this] False show "?v' \\<^sub>a qdelta_constraint_to_atom h (FirstFreshVariable (preprocess' t start))" by (cases h, auto) } let ?atoms = "restrict_to I (set (Atoms (preprocess' t start)))" show "?v' \\<^sub>a\<^sub>s ?atoms" unfolding satisfies_atom_set_def proof fix a assume "a \ ?atoms" then have "v' \\<^sub>a a" using \v' \\<^sub>a\<^sub>s ?atoms\ hh by (force simp add: satisfies_atom_set_def) then show "?v' \\<^sub>a a" using \a \ ?atoms\ atom_var_first[of a t start] using Cons(3) Cons(4) by (cases a) auto qed show "?v' \\<^sub>e linear_poly_to_eq (poly h) (FirstFreshVariable (preprocess' t start))" using Cons(3) Cons(4) using valuate_depend[of "poly h" v' "v'(FirstFreshVariable (preprocess' t start) := (poly h) \ v' \)"] using fresh_var_monoinc[of start t] hh by (cases h) (force simp add: satisfies_eq_def)+ have "FirstFreshVariable (preprocess' t start) \ tvars (Tableau (preprocess' t start))" using first_fresh_variable_not_in_lvars[of t start] using Cons(3) Cons(4) using vars_tableau_vars_constraints[of t start] using fresh_var_monoinc[of start t] by force then show "?v' \\<^sub>t Tableau (preprocess' t start)" using \v' \\<^sub>t Tableau (preprocess' t start)\ using satisfies_tableau_satisfies_tableau[of v' "Tableau (preprocess' t start)" ?v'] by auto qed next case (Some var) from preprocess'_Tableau_Poly_Mapping_Some[OF Some] have "linear_poly_to_eq (poly h) var \ set (Tableau ?s)" by auto with v'_t[unfolded satisfies_tableau_def] have v'_h_var: "v' \\<^sub>e linear_poly_to_eq (poly h) var" by auto show ?thesis unfolding preprocess'.simps hh Let_def id id' if_False istate.simps istate.sel Some option.simps proof (intro exI[of _ v'] conjI var v'_t satisifies_atom_restrict_to_Cons satisfies_tableau_Cons v'_as) assume "i \ I" from vh_v'h i[OF this] False v'_h_var show "v' \\<^sub>a qdelta_constraint_to_atom h var" by (cases h, auto simp: satisfies_eq_iff) qed qed next case zero: True hence id': "(poly h = 0) = True" by simp show ?thesis proof (cases "zero_satisfies h") case True hence id'': "zero_satisfies h = True" by simp show ?thesis unfolding hh preprocess'.simps Let_def id id' id'' if_True if_False istate.simps istate.sel by (intro exI[of _ v'] conjI v'_t var v'_as) next case False hence id'': "zero_satisfies h = False" by simp { assume "i \ I" from i[OF this] not_zero_satisfies[OF zero False] have False by simp } note no_I = this show ?thesis unfolding hh preprocess'.simps Let_def id id' id'' if_True if_False istate.simps istate.sel proof (rule Cons(1)[OF _ _ Cons(4)]) show "(I, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set t" using Cons(2) by auto show "vars_constraints (map snd t) \ V" using Cons(3) by force qed qed qed qed qed lemma lvars_distinct: "distinct (map lhs (Tableau (preprocess' cs start)))" using first_fresh_variable_not_in_lvars[where ?'a = 'a] by (induct cs, auto simp add: Let_def lvars_def) (force split: option.splits) lemma normalized_tableau_preprocess': "\ (Tableau (preprocess' cs (start_fresh_variable cs)))" proof - let ?s = "start_fresh_variable cs" show ?thesis using lvars_distinct[of cs ?s] using lvars_tableau_ge_start[of cs ?s] using vars_tableau_vars_constraints[of cs ?s] using start_fresh_variable_fresh[of cs] unfolding normalized_tableau_def Let_def by (smt disjoint_iff_not_equal inf.absorb_iff2 inf.strict_order_iff rhs_no_zero_tableau_start subsetD) qed text \Improved preprocessing: Deletion. An equation x = p can be deleted from the tableau, if x does not occur in the atoms.\ lemma delete_lhs_var: assumes norm: "\ t" and t: "t = t1 @ (x,p) # t2" and t': "t' = t1 @ t2" and tv: "tv = (\ v. upd x (p \ \v\ \) v)" and x_as: "x \ atom_var ` snd ` set as" shows "\ t'" \ \new tableau is normalized\ "\w\ \\<^sub>t t' \ \tv w\ \\<^sub>t t" \ \solution of new tableau is translated to solution of old tableau\ "(I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as" \ \solution translation also works for bounds\ "v \\<^sub>t t \ v \\<^sub>t t'" \ \solution of old tableau is also solution for new tableau\ proof - have tv: "\tv w\ = \w\ (x := p \ \w\ \)" unfolding tv map2fun_def' by auto from norm show "\ t'" unfolding t t' normalized_tableau_def by (auto simp: lvars_def rvars_def) show "v \\<^sub>t t \ v \\<^sub>t t'" unfolding t t' satisfies_tableau_def by auto from norm have dist: "distinct (map lhs t)" "lvars t \ rvars t = {}" unfolding normalized_tableau_def by auto from x_as have x_as: "x \ atom_var ` snd ` (set as \ I \ UNIV)" by auto have "(I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as" unfolding i_satisfies_atom_set.simps satisfies_atom_set_def proof (intro ball_cong[OF refl]) fix a assume "a \ snd ` (set as \ I \ UNIV)" with x_as have "x \ atom_var a" by auto then show "\tv w\ \\<^sub>a a = \w\ \\<^sub>a a" unfolding tv by (cases a, auto) qed then show "(I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as" by blast assume w: "\w\ \\<^sub>t t'" from dist(2)[unfolded t] have xp: "x \ vars p" unfolding lvars_def rvars_def by auto { fix eq assume mem: "eq \ set t1 \ set t2" then have "eq \ set t'" unfolding t' by auto with w have w: "\w\ \\<^sub>e eq" unfolding satisfies_tableau_def by auto obtain y q where eq: "eq = (y,q)" by force from mem[unfolded eq] dist(1)[unfolded t] have xy: "x \ y" by force from mem[unfolded eq] dist(2)[unfolded t] have xq: "x \ vars q" unfolding lvars_def rvars_def by auto from w have "\tv w\ \\<^sub>e eq" unfolding tv eq satisfies_eq_iff using xy xq by (auto intro!: valuate_depend) } moreover have "\tv w\ \\<^sub>e (x,p)" unfolding satisfies_eq_iff tv using xp by (auto intro!: valuate_depend) ultimately show "\tv w\ \\<^sub>t t" unfolding t satisfies_tableau_def by auto qed definition pivot_tableau_eq :: "tableau \ eq \ tableau \ var \ tableau \ eq \ tableau" where "pivot_tableau_eq t1 eq t2 x \ let eq' = pivot_eq eq x; m = map (\ e. subst_var_eq x (rhs eq') e) in (m t1, eq', m t2)" lemma pivot_tableau_eq: assumes t: "t = t1 @ eq # t2" "t' = t1' @ eq' # t2'" and x: "x \ rvars_eq eq" and norm: "\ t" and pte: "pivot_tableau_eq t1 eq t2 x = (t1',eq',t2')" shows "\ t'" "lhs eq' = x" "(v :: 'a :: lrv valuation) \\<^sub>t t' \ v \\<^sub>t t" proof - let ?s = "\ t. State t undefined undefined undefined undefined undefined" let ?y = "lhs eq" have yl: "?y \ lvars t" unfolding t lvars_def by auto from norm have eq_t12: "?y \ lhs ` (set t1 \ set t2)" unfolding normalized_tableau_def t lvars_def by auto have eq: "eq_for_lvar_code t ?y = eq" by (metis (mono_tags, lifting) EqForLVarDefault.eq_for_lvar Un_insert_right eq_t12 image_iff insert_iff list.set(2) set_append t(1) yl) have *: "(?y, b) \ set t1 \ ?y \ lhs ` (set t1)" for b t1 by (metis image_eqI lhs.simps) have pivot: "pivot_tableau_code ?y x t = t'" unfolding Pivot'Default.pivot_tableau_def Let_def eq using pte[symmetric] unfolding t pivot_tableau_eq_def Let_def using eq_t12 by (auto dest!: *) note thms = Pivot'Default.pivot_vars' Pivot'Default.pivot_tableau note thms = thms[unfolded Pivot'Default.pivot_def, of "?s t", simplified, OF norm yl, unfolded eq, OF x, unfolded pivot] from thms(1) thms(2)[of v] show "\ t'" "v \\<^sub>t t' \ v \\<^sub>t t" by auto show "lhs eq' = x" using pte[symmetric] unfolding t pivot_tableau_eq_def Let_def pivot_eq_def by auto qed function preprocess_opt :: "var set \ tableau \ tableau \ tableau \ ((var,'a :: lrv)mapping \ (var,'a)mapping)" where "preprocess_opt X t1 [] = (t1,id)" | "preprocess_opt X t1 ((x,p) # t2) = (if x \ X then case preprocess_opt X t1 t2 of (t,tv) \ (t, (\ v. upd x (p \ \v\ \) v) o tv) else case find (\ x. x \ X) (Abstract_Linear_Poly.vars_list p) of None \ preprocess_opt X ((x,p) # t1) t2 | Some y \ case pivot_tableau_eq t1 (x,p) t2 y of (tt1,(z,q),tt2) \ case preprocess_opt X tt1 tt2 of (t,tv) \ (t, (\ v. upd z (q \ \v\ \) v) o tv))" by pat_completeness auto termination by (relation "measure (\ (X,t1,t2). length t2)", auto simp: pivot_tableau_eq_def Let_def) lemma preprocess_opt: assumes "X = atom_var ` snd ` set as" "preprocess_opt X t1 t2 = (t',tv)" "\ t" "t = rev t1 @ t2" shows "\ t'" "(\w\ :: 'a :: lrv valuation) \\<^sub>t t' \ \tv w\ \\<^sub>t t" "(I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as" "v \\<^sub>t t \ (v :: 'a valuation) \\<^sub>t t'" using assms proof (atomize(full), induct X t1 t2 arbitrary: t tv w rule: preprocess_opt.induct) case (1 X t1 t tv) then show ?case by (auto simp: normalized_tableau_def lvars_def rvars_def satisfies_tableau_def simp flip: rev_map) next case (2 X t1 x p t2 t tv w) note IH = 2(1-3) note X = 2(4) note res = 2(5) have norm: "\ t" by fact have t: "t = rev t1 @ (x, p) # t2" by fact show ?case proof (cases "x \ X") case False with res obtain tv' where res: "preprocess_opt X t1 t2 = (t', tv')" and tv: "tv = (\v. Mapping.update x (p \ \v\ \) v) o tv'" by (auto split: prod.splits) note delete = delete_lhs_var[OF norm t refl refl False[unfolded X]] note IH = IH(1)[OF False X res delete(1) refl] from delete(2)[of "tv' w"] delete(3)[of I "tv' w"] delete(4)[of v] IH[of w] show ?thesis unfolding tv o_def by auto next case True then have "\ x \ X" by simp note IH = IH(2-3)[OF this] show ?thesis proof (cases "find (\x. x \ X) (Abstract_Linear_Poly.vars_list p)") case None with res True have pre: "preprocess_opt X ((x, p) # t1) t2 = (t', tv)" by auto from t have t: "t = rev ((x, p) # t1) @ t2" by simp from IH(1)[OF None X pre norm t] show ?thesis . next case (Some z) from Some[unfolded find_Some_iff] have zX: "z \ X" and "z \ set (Abstract_Linear_Poly.vars_list p)" unfolding set_conv_nth by auto then have z: "z \ rvars_eq (x, p)" by (simp add: set_vars_list) obtain tt1 z' q tt2 where pte: "pivot_tableau_eq t1 (x, p) t2 z = (tt1,(z',q),tt2)" by (cases "pivot_tableau_eq t1 (x, p) t2 z", auto) then have pte_rev: "pivot_tableau_eq (rev t1) (x, p) t2 z = (rev tt1,(z',q),tt2)" unfolding pivot_tableau_eq_def Let_def by (auto simp: rev_map) note eq = pivot_tableau_eq[OF t refl z norm pte_rev] then have z': "z' = z" by auto note eq = eq(1,3)[unfolded z'] note pte = pte[unfolded z'] note pte_rev = pte_rev[unfolded z'] note delete = delete_lhs_var[OF eq(1) refl refl refl zX[unfolded X]] from res[unfolded preprocess_opt.simps Some option.simps pte] True obtain tv' where res: "preprocess_opt X tt1 tt2 = (t', tv')" and tv: "tv = (\v. Mapping.update z (q \ \v\ \) v) o tv'" by (auto split: prod.splits) note IH = IH(2)[OF Some, unfolded pte, OF refl refl refl X res delete(1) refl] from IH[of w] delete(2)[of "tv' w"] delete(3)[of I "tv' w"] delete(4)[of v] show ?thesis unfolding tv o_def eq(2) by auto qed qed qed definition "preprocess_part_2 as t = preprocess_opt (atom_var ` snd ` set as) [] t" lemma preprocess_part_2: assumes "preprocess_part_2 as t = (t',tv)" "\ t" shows "\ t'" "(\w\ :: 'a :: lrv valuation) \\<^sub>t t' \ \tv w\ \\<^sub>t t" "(I, \w\) \\<^sub>i\<^sub>a\<^sub>s set as \ (I, \tv w\) \\<^sub>i\<^sub>a\<^sub>s set as" "v \\<^sub>t t \ (v :: 'a valuation) \\<^sub>t t'" using preprocess_opt[OF refl assms(1)[unfolded preprocess_part_2_def] assms(2)] by auto definition preprocess :: "('i,QDelta) i_ns_constraint list \ _ \ _ \ (_ \ (var,QDelta)mapping) \ 'i list" where "preprocess l = (case preprocess_part_1 (map (map_prod id normalize_ns_constraint) l) of (t,as,ui) \ case preprocess_part_2 as t of (t,tv) \ (t,as,tv,ui))" lemma preprocess: assumes id: "preprocess cs = (t, as, trans_v, ui)" shows "\ t" "fst ` set as \ set ui \ fst ` set cs" "distinct_indices_ns (set cs) \ distinct_indices_atoms (set as)" "I \ set ui = {} \ (I, \v\) \\<^sub>i\<^sub>a\<^sub>s set as \ \v\ \\<^sub>t t \ (I, \trans_v v\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" "i \ set ui \ \v. ({i}, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" "\ v. (I,v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs \ \v'. (I,v') \\<^sub>i\<^sub>a\<^sub>s set as \ v' \\<^sub>t t" proof - define ncs where "ncs = map (map_prod id normalize_ns_constraint) cs" have ncs: "fst ` set ncs = fst ` set cs" "\ Iv. Iv \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set ncs \ Iv \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" unfolding ncs_def by force auto from id obtain t1 where part1: "preprocess_part_1 ncs = (t1,as,ui)" unfolding preprocess_def by (auto simp: ncs_def split: prod.splits) from id[unfolded preprocess_def part1 split ncs_def[symmetric]] have part_2: "preprocess_part_2 as t1 = (t,trans_v)" by (auto split: prod.splits) have norm: "\ t1" using normalized_tableau_preprocess' part1 by (auto simp: preprocess_part_1_def Let_def) note part_2 = preprocess_part_2[OF part_2 norm] show "\ t" by fact have unsat: "(I,\v\) \\<^sub>i\<^sub>a\<^sub>s set as \ \v\ \\<^sub>t t1 \ I \ set ui = {} \ (I,\v\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set ncs" for v using part1[unfolded preprocess_part_1_def Let_def, simplified] i_preprocess'_sat[of I] by blast with part_2(2,3) show "I \ set ui = {} \ (I,\v\) \\<^sub>i\<^sub>a\<^sub>s set as \ \v\ \\<^sub>t t \ (I,\trans_v v\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" by (auto simp: ncs) from part1[unfolded preprocess_part_1_def Let_def] obtain var where as: "as = Atoms (preprocess' ncs var)" and ui: "ui = UnsatIndices (preprocess' ncs var)" by auto note min_defs = distinct_indices_atoms_def distinct_indices_ns_def have min1: "(distinct_indices_ns (set ncs) \ (\ k a. (k,a) \ set as \ (\ v p. a = qdelta_constraint_to_atom p v \ (k,p) \ set ncs \ (\ is_monom (poly p) \ Poly_Mapping (preprocess' ncs var) (poly p) = Some v) ))) \ fst ` set as \ set ui \ fst ` set ncs" unfolding as ui proof (induct ncs var rule: preprocess'.induct) case (2 i h t v) hence sub: "fst ` set (Atoms (preprocess' t v)) \ set (UnsatIndices (preprocess' t v)) \ fst ` set t" by auto show ?case proof (intro conjI impI allI, goal_cases) show "fst ` set (Atoms (preprocess' ((i, h) # t) v)) \ set (UnsatIndices (preprocess' ((i,h) #t) v)) \ fst ` set ((i, h) # t)" using sub by (auto simp: Let_def split: option.splits) next case (1 k a) hence min': "distinct_indices_ns (set t)" unfolding min_defs list.simps by blast note IH = 2[THEN conjunct1, rule_format, OF min'] show ?case proof (cases "(k,a) \ set (Atoms (preprocess' t v))") case True from IH[OF this] show ?thesis by (force simp: Let_def split: option.splits if_split) next case new: False with 1(2) have ki: "k = i" by (auto simp: Let_def split: if_splits option.splits) show ?thesis proof (cases "is_monom (poly h)") case True thus ?thesis using new 1(2) by (auto simp: Let_def True intro!: exI) next case no_monom: False thus ?thesis using new 1(2) by (auto simp: Let_def no_monom split: option.splits if_splits intro!: exI) qed qed qed qed (auto simp: min_defs) then show "fst ` set as \ set ui \ fst ` set cs" by (auto simp: ncs) { assume mini: "distinct_indices_ns (set cs)" have mini: "distinct_indices_ns (set ncs)" unfolding distinct_indices_ns_def proof (intro impI allI, goal_cases) case (1 n1 n2 i) from 1(1) obtain c1 where c1: "(i,c1) \ set cs" and n1: "n1 = normalize_ns_constraint c1" unfolding ncs_def by auto from 1(2) obtain c2 where c2: "(i,c2) \ set cs" and n2: "n2 = normalize_ns_constraint c2" unfolding ncs_def by auto from mini[unfolded distinct_indices_ns_def, rule_format, OF c1 c2] show ?case unfolding n1 n2 by (cases c1; cases c2; auto simp: normalize_ns_constraint.simps Let_def) qed note min = min1[THEN conjunct1, rule_format, OF this] show "distinct_indices_atoms (set as)" unfolding distinct_indices_atoms_def proof (intro allI impI) fix i a b assume a: "(i,a) \ set as" and b: "(i,b) \ set as" from min[OF a] obtain v p where aa: "a = qdelta_constraint_to_atom p v" "(i, p) \ set ncs" "\ is_monom (poly p) \ Poly_Mapping (preprocess' ncs var) (poly p) = Some v" by auto from min[OF b] obtain w q where bb: "b = qdelta_constraint_to_atom q w" "(i, q) \ set ncs" "\ is_monom (poly q) \ Poly_Mapping (preprocess' ncs var) (poly q) = Some w" by auto from mini[unfolded distinct_indices_ns_def, rule_format, OF aa(2) bb(2)] have *: "poly p = poly q" "ns_constraint_const p = ns_constraint_const q" by auto show "atom_var a = atom_var b \ atom_const a = atom_const b" proof (cases "is_monom (poly q)") case True thus ?thesis unfolding aa(1) bb(1) using * by (cases p; cases q, auto) next case False thus ?thesis unfolding aa(1) bb(1) using * aa(3) bb(3) by (cases p; cases q, auto) qed qed } show "i \ set ui \ \v. ({i}, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" using preprocess'_unsat_indices[of i ncs] part1 unfolding preprocess_part_1_def Let_def by (auto simp: ncs) assume "\ w. (I,w) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" then obtain w where "(I,w) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set cs" by blast hence "(I,w) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set ncs" unfolding ncs . from preprocess'_unsat[OF this _ start_fresh_variable_fresh, of ncs] have "\v'. (I,v') \\<^sub>i\<^sub>a\<^sub>s set as \ v' \\<^sub>t t1" using part1 unfolding preprocess_part_1_def Let_def by auto then show "\v'. (I,v') \\<^sub>i\<^sub>a\<^sub>s set as \ v' \\<^sub>t t" using part_2(4) by auto qed interpretation PreprocessDefault: Preprocess preprocess by (unfold_locales; rule preprocess, auto) global_interpretation Solve_exec_ns'Default: Solve_exec_ns' preprocess assert_all_code defines solve_exec_ns_code = Solve_exec_ns'Default.solve_exec_ns by unfold_locales (* -------------------------------------------------------------------------- *) (* To_ns to_ns from_ns *) (* -------------------------------------------------------------------------- *) primrec constraint_to_qdelta_constraint:: "constraint \ QDelta ns_constraint list" where "constraint_to_qdelta_constraint (LT l r) = [LEQ_ns l (QDelta.QDelta r (-1))]" | "constraint_to_qdelta_constraint (GT l r) = [GEQ_ns l (QDelta.QDelta r 1)]" | "constraint_to_qdelta_constraint (LEQ l r) = [LEQ_ns l (QDelta.QDelta r 0)]" | "constraint_to_qdelta_constraint (GEQ l r) = [GEQ_ns l (QDelta.QDelta r 0)]" | "constraint_to_qdelta_constraint (EQ l r) = [LEQ_ns l (QDelta.QDelta r 0), GEQ_ns l (QDelta.QDelta r 0)]" | "constraint_to_qdelta_constraint (LTPP l1 l2) = [LEQ_ns (l1 - l2) (QDelta.QDelta 0 (-1))]" | "constraint_to_qdelta_constraint (GTPP l1 l2) = [GEQ_ns (l1 - l2) (QDelta.QDelta 0 1)]" | "constraint_to_qdelta_constraint (LEQPP l1 l2) = [LEQ_ns (l1 - l2) 0]" | "constraint_to_qdelta_constraint (GEQPP l1 l2) = [GEQ_ns (l1 - l2) 0]" | "constraint_to_qdelta_constraint (EQPP l1 l2) = [LEQ_ns (l1 - l2) 0, GEQ_ns (l1 - l2) 0]" primrec i_constraint_to_qdelta_constraint:: "'i i_constraint \ ('i,QDelta) i_ns_constraint list" where "i_constraint_to_qdelta_constraint (i,c) = map (Pair i) (constraint_to_qdelta_constraint c)" definition to_ns :: "'i i_constraint list \ ('i,QDelta) i_ns_constraint list" where "to_ns l \ concat (map i_constraint_to_qdelta_constraint l)" primrec \0_val :: "QDelta ns_constraint \ QDelta valuation \ rat" where "\0_val (LEQ_ns lll rrr) vl = \0 lll\vl\ rrr" | "\0_val (GEQ_ns lll rrr) vl = \0 rrr lll\vl\" primrec \0_val_min :: "QDelta ns_constraint list \ QDelta valuation \ rat" where "\0_val_min [] vl = 1" | "\0_val_min (h#t) vl = min (\0_val_min t vl) (\0_val h vl)" abbreviation vars_list_constraints where "vars_list_constraints cs \ remdups (concat (map Abstract_Linear_Poly.vars_list (map poly cs)))" definition from_ns ::"(var, QDelta) mapping \ QDelta ns_constraint list \ (var, rat) mapping" where "from_ns vl cs \ let \ = \0_val_min cs \vl\ in Mapping.tabulate (vars_list_constraints cs) (\ var. val (\vl\ var) \)" global_interpretation SolveExec'Default: SolveExec' to_ns from_ns solve_exec_ns_code defines solve_exec_code = SolveExec'Default.solve_exec and solve_code = SolveExec'Default.solve proof unfold_locales { fix ics :: "'i i_constraint list" and v' and I let ?to_ns = "to_ns ics" let ?flat = "set ?to_ns" assume sat: "(I,\v'\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s ?flat" define cs where "cs = map snd (filter (\ ic. fst ic \ I) ics)" define to_ns' where to_ns: "to_ns' = (\ l. concat (map constraint_to_qdelta_constraint l))" show "(I,\from_ns v' (flat_list ?to_ns)\) \\<^sub>i\<^sub>c\<^sub>s set ics" unfolding i_satisfies_cs.simps proof let ?listf = "map (\C. case C of (LEQ_ns l r) \ (l\\v'\\, r) | (GEQ_ns l r) \ (r, l\\v'\\) )" let ?to_ns = "\ ics. to_ns' (map snd (filter (\ic. fst ic \ I) ics))" let ?list = "?listf (to_ns' cs)" (* index-filtered list *) let ?f_list = "flat_list (to_ns ics)" let ?flist = "?listf ?f_list" (* full list *) obtain i_list where i_list: "?list = i_list" by force obtain f_list where f_list: "?flist = f_list" by force have if_list: "set i_list \ set f_list" unfolding i_list[symmetric] f_list[symmetric] to_ns_def to_ns set_map set_concat cs_def by (intro image_mono, force) have "\ qd1 qd2. (qd1, qd2) \ set ?list \ qd1 \ qd2" proof- fix qd1 qd2 assume "(qd1, qd2) \ set ?list" then show "qd1 \ qd2" using sat unfolding cs_def proof(induct ics) case Nil then show ?case by (simp add: to_ns) next case (Cons h t) obtain i c where h: "h = (i,c)" by force from Cons(2) consider (ic) "(qd1,qd2) \ set (?listf (?to_ns [(i,c)]))" | (t) "(qd1,qd2) \ set (?listf (?to_ns t))" unfolding to_ns h set_map set_concat by fastforce then show ?case proof cases case t from Cons(1)[OF this] Cons(3) show ?thesis unfolding to_ns_def by auto next case ic note ic = ic[unfolded to_ns, simplified] from ic have i: "(i \ I) = True" by (cases "i \ I", auto) note ic = ic[unfolded i if_True, simplified] from Cons(3)[unfolded h] i have "\v'\ \\<^sub>n\<^sub>s\<^sub>s set (to_ns' [c])" unfolding i_satisfies_ns_constraints.simps unfolding to_ns to_ns_def by force with ic show ?thesis by (induct c) (auto simp add: to_ns) qed qed qed then have l1: "\ > 0 \ \ \ (\_min ?list) \ \qd1 qd2. (qd1, qd2) \ set ?list \ val qd1 \ \ val qd2 \" for \ unfolding i_list by (simp add: delta_gt_zero delta_min[of i_list]) have "\_min ?flist \ \_min ?list" unfolding f_list i_list by (rule delta_min_mono[OF if_list]) from l1[OF delta_gt_zero this] have l1: "\qd1 qd2. (qd1, qd2) \ set ?list \ val qd1 (\_min f_list) \ val qd2 (\_min f_list)" unfolding f_list . have "\0_val_min (flat_list (to_ns ics)) \v'\ = \_min f_list" unfolding f_list[symmetric] proof(induct ics) case Nil show ?case by (simp add: to_ns_def) next case (Cons h t) then show ?case by (cases h; cases "snd h") (auto simp add: to_ns_def) qed then have l2: "from_ns v' ?f_list = Mapping.tabulate (vars_list_constraints ?f_list) (\ var. val (\v'\ var) (\_min f_list))" by (auto simp add: from_ns_def) fix c assume "c \ restrict_to I (set ics)" then obtain i where i: "i \ I" and mem: "(i,c) \ set ics" by auto from mem show "\from_ns v' ?f_list\ \\<^sub>c c" proof (induct c) case (LT lll rrr) then have "(lll\\v'\\, (QDelta.QDelta rrr (-1))) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr (-1)) (\_min f_list)" using l1 by simp moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using LT by (auto simp add: comp_def lookup_tabulate restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ (val (QDelta.QDelta rrr (-1)) (\_min f_list))" by (auto simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def) next case (GT lll rrr) then have "((QDelta.QDelta rrr 1), lll\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 1) (\_min f_list)" using l1 by simp moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using GT by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 1) (\_min f_list)" using l2 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def) next case (LEQ lll rrr) then have "(lll\\v'\\, (QDelta.QDelta rrr 0) ) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l1 by simp moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using LEQ by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l2 by (simp add: valuate_rat_valuate) then show ?case by (simp add: val_def) next case (GEQ lll rrr) then have "((QDelta.QDelta rrr 0), lll\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l1 by simp moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using GEQ by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l2 by (simp add: valuate_rat_valuate) then show ?case by (simp add: val_def) next case (EQ lll rrr) then have "((QDelta.QDelta rrr 0), lll\\v'\\) \ set ?list" and "(lll\\v'\\, (QDelta.QDelta rrr 0) ) \ set ?list" using i unfolding cs_def by (force simp add: to_ns)+ then have "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 0) (\_min f_list)" and "val (lll\\v'\\) (\_min f_list) \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l1 by simp_all moreover have "lll\(\x. val (\v'\ x) (\_min f_list))\ = lll\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars lll" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using EQ by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 0) (\_min f_list)" and "lll\\from_ns v' ?f_list\\ \ val (QDelta.QDelta rrr 0) (\_min f_list)" using l1 by (auto simp add: valuate_rat_valuate) then show ?case by (simp add: val_def) next case (LTPP ll1 ll2) then have "((ll1-ll2)\\v'\\, (QDelta.QDelta 0 (-1)) ) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 (-1)) (\_min f_list)" using l1 by simp moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using LTPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 (-1)) (\_min f_list)" using l1 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def valuate_minus) next case (GTPP ll1 ll2) then have "((QDelta.QDelta 0 1), (ll1-ll2)\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns) then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 1) (\_min f_list)" using l1 by simp moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using GTPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 1) (\_min f_list)" using l1 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def valuate_minus) next case (LEQPP ll1 ll2) then have "((ll1-ll2)\\v'\\, (QDelta.QDelta 0 0) ) \ set ?list" using i unfolding cs_def by (force simp add: to_ns zero_QDelta_def) then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by simp moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using LEQPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def valuate_minus) next case (GEQPP ll1 ll2) then have "((QDelta.QDelta 0 0), (ll1-ll2)\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns zero_QDelta_def) then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by simp moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using GEQPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by (simp add: valuate_rat_valuate) then show ?case using delta_gt_zero[of f_list] by (simp add: val_def valuate_minus) next case (EQPP ll1 ll2) then have "((ll1-ll2)\\v'\\, (QDelta.QDelta 0 0) ) \ set ?list" and "((QDelta.QDelta 0 0), (ll1-ll2)\\v'\\) \ set ?list" using i unfolding cs_def by (force simp add: to_ns zero_QDelta_def)+ then have "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 0) (\_min f_list)" and "val ((ll1-ll2)\\v'\\) (\_min f_list) \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by simp_all moreover have "(ll1-ll2)\(\x. val (\v'\ x) (\_min f_list))\ = (ll1-ll2)\\from_ns v' ?f_list\\" proof (rule valuate_depend, rule) fix x assume "x \ vars (ll1 - ll2)" then show "val (\v'\ x) (\_min f_list) = \from_ns v' ?f_list\ x" using l2 using EQPP by (auto simp add: lookup_tabulate comp_def restrict_map_def set_vars_list to_ns_def map2fun_def') qed ultimately have "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 0) (\_min f_list)" and "(ll1-ll2)\\from_ns v' ?f_list\\ \ val (QDelta.QDelta 0 0) (\_min f_list)" using l1 by (auto simp add: valuate_rat_valuate) then show ?case by (simp add: val_def valuate_minus) qed qed } note sat = this fix cs :: "('i \ constraint) list" have set_to_ns: "set (to_ns cs) = { (i,n) | i n c. (i,c) \ set cs \ n \ set (constraint_to_qdelta_constraint c)}" unfolding to_ns_def by auto show indices: "fst ` set (to_ns cs) = fst ` set cs" proof show "fst ` set (to_ns cs) \ fst ` set cs" unfolding set_to_ns by force { fix i assume "i \ fst ` set cs" then obtain c where "(i,c) \ set cs" by force hence "i \ fst ` set (to_ns cs)" unfolding set_to_ns by (cases c; force) } thus "fst ` set cs \ fst ` set (to_ns cs)" by blast qed { assume dist: "distinct_indices cs" show "distinct_indices_ns (set (to_ns cs))" unfolding distinct_indices_ns_def proof (intro allI impI conjI notI) fix n1 n2 i assume "(i,n1) \ set (to_ns cs)" "(i,n2) \ set (to_ns cs)" then obtain c1 c2 where i: "(i,c1) \ set cs" "(i,c2) \ set cs" and n: "n1 \ set (constraint_to_qdelta_constraint c1)" "n2 \ set (constraint_to_qdelta_constraint c2)" unfolding set_to_ns by auto from dist have "distinct (map fst cs)" unfolding distinct_indices_def by auto with i have c12: "c1 = c2" by (metis eq_key_imp_eq_value) note n = n[unfolded c12] show "poly n1 = poly n2" using n by (cases c2, auto) show "ns_constraint_const n1 = ns_constraint_const n2" using n by (cases c2, auto) qed } note mini = this fix I mode assume unsat: "minimal_unsat_core_ns I (set (to_ns cs))" note unsat = unsat[unfolded minimal_unsat_core_ns_def indices] hence indices: "I \ fst ` set cs" by auto show "minimal_unsat_core I cs" unfolding minimal_unsat_core_def proof (intro conjI indices impI allI, clarify) fix v assume v: "(I,v) \\<^sub>i\<^sub>c\<^sub>s set cs" let ?v = "\var. QDelta.QDelta (v var) 0" have "(I,?v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s (set (to_ns cs))" using v proof(induct cs) case (Cons ic cs) obtain i c where ic: "ic = (i,c)" by force from Cons(2-) ic have rec: "(I,v) \\<^sub>i\<^sub>c\<^sub>s set cs" and c: "i \ I \ v \\<^sub>c c" by auto { fix jn assume i: "i \ I" and "jn \ set (to_ns [(i,c)])" then have "jn \ set (i_constraint_to_qdelta_constraint (i,c))" unfolding to_ns_def by auto then obtain n where n: "n \ set (constraint_to_qdelta_constraint c)" and jn: "jn = (i,n)" by force from c[OF i] have c: "v \\<^sub>c c" by force from c n jn have "?v \\<^sub>n\<^sub>s snd jn" by (cases c) (auto simp add: less_eq_QDelta_def to_ns_def valuate_valuate_rat valuate_minus zero_QDelta_def) } note main = this from Cons(1)[OF rec] have IH: "(I,?v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set (to_ns cs)" . show ?case unfolding i_satisfies_ns_constraints.simps proof (intro ballI) fix x assume "x \ snd ` (set (to_ns (ic # cs)) \ I \ UNIV)" then consider (1) "x \ snd ` (set (to_ns cs) \ I \ UNIV)" | (2) "x \ snd ` (set (to_ns [(i,c)]) \ I \ UNIV)" unfolding ic to_ns_def by auto then show "?v \\<^sub>n\<^sub>s x" proof cases case 1 then show ?thesis using IH by auto next case 2 then obtain jn where x: "snd jn = x" and "jn \ set (to_ns [(i,c)]) \ I \ UNIV" by auto with main[of jn] show ?thesis unfolding to_ns_def by auto qed qed qed (simp add: to_ns_def) with unsat show False unfolding minimal_unsat_core_ns_def by simp blast next fix J assume *: "distinct_indices cs" "J \ I" hence "distinct_indices_ns (set (to_ns cs))" using mini by auto with * unsat obtain v where model: "(J, v) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set (to_ns cs)" by blast define w where "w = Mapping.Mapping (\ x. Some (v x))" have "v = \w\" unfolding w_def map2fun_def by (intro ext, transfer, auto) with model have model: "(J, \w\) \\<^sub>i\<^sub>n\<^sub>s\<^sub>s set (to_ns cs)" by auto from sat[OF this] show " \v. (J, v) \\<^sub>i\<^sub>c\<^sub>s set cs" by blast qed qed (* cleanup *) hide_const (open) le lt LE GE LB UB LI UI LBI UBI UBI_upd le_rat inv zero Var add flat flat_list restrict_to look upd (* -------------------------------------------------------------------------- *) (* Main soundness lemma and executability *) (* -------------------------------------------------------------------------- *) text \Simplex version with indexed constraints as input\ definition simplex_index :: "'i i_constraint list \ 'i list + (var, rat) mapping" where "simplex_index = solve_exec_code" lemma simplex_index: "simplex_index cs = Unsat I \ set I \ fst ` set cs \ \ (\ v. (set I, v) \\<^sub>i\<^sub>c\<^sub>s set cs) \ (distinct_indices cs \ (\ J \ set I. (\ v. (J, v) \\<^sub>i\<^sub>c\<^sub>s set cs)))" \ \minimal unsat core\ "simplex_index cs = Sat v \ \v\ \\<^sub>c\<^sub>s (snd ` set cs)" \ \satisfying assingment\ proof (unfold simplex_index_def) assume "solve_exec_code cs = Unsat I" from SolveExec'Default.simplex_unsat0[OF this] have core: "minimal_unsat_core (set I) cs" by auto then show "set I \ fst ` set cs \ \ (\ v. (set I, v) \\<^sub>i\<^sub>c\<^sub>s set cs) \ (distinct_indices cs \ (\J\set I. \v. (J, v) \\<^sub>i\<^sub>c\<^sub>s set cs))" unfolding minimal_unsat_core_def by auto next assume "solve_exec_code cs = Sat v" from SolveExec'Default.simplex_sat0[OF this] show "\v\ \\<^sub>c\<^sub>s (snd ` set cs)" . qed text \Simplex version where indices will be created\ definition simplex where "simplex cs = simplex_index (zip [0.. \ (\ v. v \\<^sub>c\<^sub>s set cs)" \ \unsat of original constraints\ "simplex cs = Unsat I \ set I \ {0.. \ (\ v. v \\<^sub>c\<^sub>s {cs ! i | i. i \ set I}) \ (\J\set I. \v. v \\<^sub>c\<^sub>s {cs ! i |i. i \ J})" \ \minimal unsat core\ "simplex cs = Sat v \ \v\ \\<^sub>c\<^sub>s set cs" \ \satisfying assignment\ proof (unfold simplex_def) let ?cs = "zip [0.. {0 ..< length cs}" and core: "\v. v \\<^sub>c\<^sub>s (snd ` (set ?cs \ set I \ UNIV))" "(distinct_indices (zip [0.. (\ J \ set I. \v. v \\<^sub>c\<^sub>s (snd ` (set ?cs \ J \ UNIV))))" by (auto simp flip: set_map) note core(2) also have "distinct_indices (zip [0.. J \ set I. \v. v \\<^sub>c\<^sub>s (snd ` (set ?cs \ J \ UNIV))) = (\ J \ set I. \v. v \\<^sub>c\<^sub>s { cs ! i | i. i \ J})" using index by (intro all_cong1 imp_cong ex_cong1 arg_cong[of _ _ "\ x. _ \\<^sub>c\<^sub>s x"] refl, force simp: set_zip) finally have core': "(\J\set I. \v. v \\<^sub>c\<^sub>s {cs ! i |i. i \ J}) " . note unsat = unsat_mono[OF core(1)] show "\ (\ v. v \\<^sub>c\<^sub>s set cs)" by (rule unsat, auto simp: set_zip) show "set I \ {0.. \ (\ v. v \\<^sub>c\<^sub>s {cs ! i | i. i \ set I}) \ (\J\set I. \v. v \\<^sub>c\<^sub>s {cs ! i |i. i \ J})" by (intro conjI index core', rule unsat, auto simp: set_zip) next assume "simplex_index (zip [0..v\ \\<^sub>c\<^sub>s set cs" by (auto simp flip: set_map) qed text \check executability\ lemma "case simplex [LTPP (lp_monom 2 1) (lp_monom 3 2 - lp_monom 3 0), GT (lp_monom 1 1) 5] of Sat _ \ True | Unsat _ \ False" by eval text \check unsat core\ lemma "case simplex_index [ (1 :: int, LT (lp_monom 1 1) 4), (2, GTPP (lp_monom 2 1) (lp_monom 1 2)), (3, EQPP (lp_monom 1 1) (lp_monom 2 2)), (4, GT (lp_monom 2 2) 5), (5, GT (lp_monom 3 0) 7)] of Sat _ \ False | Unsat I \ set I = {1,3,4}" \ \Constraints 1,3,4 are unsat core\ by eval end \ No newline at end of file diff --git a/thys/Simplex/Simplex_Auxiliary.thy b/thys/Simplex/Simplex_Auxiliary.thy --- a/thys/Simplex/Simplex_Auxiliary.thy +++ b/thys/Simplex/Simplex_Auxiliary.thy @@ -1,236 +1,236 @@ (* Authors: F. Maric, M. Spasic *) section \Auxiliary Results\ theory Simplex_Auxiliary imports - "HOL-Library.Permutation" + "HOL-Library.List_Permutation" "HOL-Library.Mapping" begin (* -------------------------------------------------------------------------- *) (* MoreList *) (* -------------------------------------------------------------------------- *) lemma map_reindex: assumes "\ i < length l. g (l ! i) = f i" shows "map f [0..i. f (l ! i)) [0.. 1" shows "last (tl l) = last l" using assms by (induct l) auto lemma hd_tl: assumes "length l > 1" shows "hd (tl l) = l ! 1" using assms by (induct l) (auto simp add: hd_conv_nth) lemma butlast_empty_conv_length: shows "(butlast l = []) = (length l \ 1)" by (induct l) (auto split: if_splits) lemma butlast_nth: assumes "n + 1 < length l" shows "butlast l ! n = l ! n" using assms by (induct l rule: rev_induct) (auto simp add: nth_append) lemma last_take_conv_nth: assumes "0 < n" "n \ length l" shows "last (take n l) = l ! (n - 1)" using assms by (cases "l = []") (auto simp add: last_conv_nth min_def) lemma tl_nth: assumes "l \ []" shows "tl l ! n = l ! (n + 1)" using assms by (induct l) auto lemma interval_3split: assumes "i < n" shows "[0..i < n\ by (auto simp del: upt_Suc) then show ?thesis by simp qed abbreviation "list_min l \ foldl min (hd l) (tl l)" lemma list_min_Min[simp]: "l \ [] \ list_min l = Min (set l)" proof (induct l rule: rev_induct) case (snoc a l') then show ?case by (cases "l' = []") (auto simp add: ac_simps) qed simp (* Minimal element of a list satisfying the given property *) definition min_satisfying :: "(('a::linorder) \ bool) \ 'a list \ 'a option" where "min_satisfying P l \ let xs = filter P l in if xs = [] then None else Some (list_min xs)" lemma min_satisfying_None: "min_satisfying P l = None \ (\ x \ set l. \ P x)" unfolding min_satisfying_def Let_def by (simp add: filter_empty_conv) lemma min_satisfying_Some: "min_satisfying P l = Some x \ x \ set l \ P x \ (\ x' \ set l. x' < x \ \ P x')" proof (safe) let ?xs = "filter P l" assume "min_satisfying P l = Some x" then have "set ?xs \ {}" "x = Min (set ?xs)" unfolding min_satisfying_def Let_def by (auto split: if_splits simp add: filter_empty_conv) then show "x \ set l" "P x" using Min_in[of "set ?xs"] by simp_all fix x' assume "x' \ set l" "P x'" "x' < x" have "x' \ set ?xs" proof (rule ccontr) assume "\ ?thesis" then have "x' \ x" using \x = Min (set ?xs)\ by simp then show False using \x' < x\ by simp qed then show "False" using \x' \ set l\ \P x'\ by simp qed (* -------------------------------------------------------------------------- *) (* MoreNat *) (* -------------------------------------------------------------------------- *) lemma min_element: fixes k :: nat assumes "\ (m::nat). P m" shows "\ mm. P mm \ (\ m'. m' < mm \ \ P m')" proof- from assms obtain m where "P m" by auto show ?thesis proof (cases "\m' P m'") case True then show ?thesis using \P m\ by auto next case False then show ?thesis proof (induct m) case 0 then show ?case by auto next case (Suc m') then show ?case by (cases "\ (\m'a P m'a)") auto qed qed qed (* -------------------------------------------------------------------------- *) (* MoreFun *) (* -------------------------------------------------------------------------- *) lemma finite_fun_args: assumes "finite A" "\ a \ A. finite (B a)" shows "finite {f. (\ a. if a \ A then f a \ B a else f a = f0 a)}" (is "finite (?F A)") using assms proof (induct) case empty have "?F {} = {\ x. f0 x}" by auto then show ?case by auto next case (insert a A') then have "finite (?F A')" by auto let ?f = "\ f. {f'. (\ a'. if a = a' then f' a \ B a else f' a' = f a')}" have "\ f \ ?F A'. finite (?f f)" proof fix f assume "f \ ?F A'" then have "?f f = (\ b. f (a := b)) ` B a" by (force split: if_splits) then show "finite (?f f)" using \\a\insert a A'. finite (B a)\ by auto qed then have "finite (\ (?f ` (?F A')))" using \finite (?F A')\ by auto moreover have "?F (insert a A') = \ (?f ` (?F A'))" proof show "?F (insert a A') \ \ (?f ` (?F A'))" proof fix f assume "f \ ?F (insert a A')" then have "f \ ?f (f (a := f0 a))" "f (a := f0 a) \ ?F A'" using \a \ A'\ by auto then show "f \ \ (?f ` (?F A'))" by blast qed next show "\ (?f ` (?F A')) \ ?F (insert a A')" proof fix f assume "f \ \ (?f ` (?F A'))" then obtain f0 where "f0 \ ?F A'" "f \ ?f f0" by auto then show "f \ ?F (insert a A')" using \a \ A'\ by (force split: if_splits) qed qed ultimately show ?case by simp qed (* -------------------------------------------------------------------------- *) (* MoreMapping *) (* -------------------------------------------------------------------------- *) lemma foldl_mapping_update: assumes "X \ set l" "distinct (map f l)" shows "Mapping.lookup (foldl (\m a. Mapping.update (f a) (g a) m) i l) (f X) = Some (g X)" using assms proof(induct l rule:rev_induct) case Nil then show ?case by simp next case (snoc h t) show ?case proof (cases "f h = f X") case True then show ?thesis using snoc by (auto simp: lookup_update) next case False show ?thesis by (simp add: lookup_update' False, rule snoc, insert False snoc, auto) qed qed end