diff --git a/thys/Banach_Steinhaus/Banach_Steinhaus.thy b/thys/Banach_Steinhaus/Banach_Steinhaus.thy --- a/thys/Banach_Steinhaus/Banach_Steinhaus.thy +++ b/thys/Banach_Steinhaus/Banach_Steinhaus.thy @@ -1,485 +1,485 @@ (* File: Banach_Steinhaus.thy Author: Dominique Unruh, University of Tartu Author: Jose Manuel Rodriguez Caballero, University of Tartu *) section \Banach-Steinhaus theorem\ theory Banach_Steinhaus imports Banach_Steinhaus_Missing begin text \ We formalize Banach-Steinhaus theorem as theorem @{text banach_steinhaus}. This theorem was originally proved in Banach-Steinhaus's paper~\cite{banach1927principe}. For the proof, we follow Sokal's approach~\cite{sokal2011really}. Furthermore, we prove as a corollary a result about pointwise convergent sequences of bounded operators whose domain is a Banach space. \ subsection \Preliminaries for Sokal's proof of Banach-Steinhaus theorem\ lemma linear_plus_norm: includes notation_norm assumes \linear f\ shows \\f \\ \ max \f (x + \)\ \f (x - \)\\ text \ Explanation: For arbitrary \<^term>\x\ and a linear operator \<^term>\f\, \<^term>\norm (f \)\ is upper bounded by the maximum of the norms of the shifts of \<^term>\f\ (i.e., \<^term>\f (x + \)\ and \<^term>\f (x - \)\). \ proof- have \norm (f \) = norm ( (inverse (of_nat 2)) *\<^sub>R (f (x + \) - f (x - \)) )\ by (smt add_diff_cancel_left' assms diff_add_cancel diff_diff_add linear_diff midpoint_def midpoint_plus_self of_nat_1 of_nat_add one_add_one scaleR_half_double) also have \\ = inverse (of_nat 2) * norm (f (x + \) - f (x - \))\ using Real_Vector_Spaces.real_normed_vector_class.norm_scaleR by simp also have \\ \ inverse (of_nat 2) * (norm (f (x + \)) + norm (f (x - \)))\ by (simp add: norm_triangle_ineq4) also have \\ \ max (norm (f (x + \))) (norm (f (x - \)))\ by auto finally show ?thesis by blast qed lemma onorm_Sup_on_ball: includes notation_norm assumes \r > 0\ shows "\f\ \ Sup ( (\x. \f *\<^sub>v x\) ` (ball x r) ) / r" text \ Explanation: Let \<^term>\f\ be a bounded operator and let \<^term>\x\ be a point. For any \<^term>\r > 0\, the operator norm of \<^term>\f\ is bounded above by the supremum of $f$ applied to the open ball of radius \<^term>\r\ around \<^term>\x\, divided by \<^term>\r\. \ proof- have bdd_above_3: \bdd_above ((\x. \f *\<^sub>v x\) ` (ball 0 r))\ proof - obtain M where \\ \. \f *\<^sub>v \\ \ M * norm \\ and \M \ 0\ using norm_blinfun norm_ge_zero by blast hence \\ \. \ \ ball 0 r \ \f *\<^sub>v \\ \ M * r\ using \r > 0\ by (smt mem_ball_0 mult_left_mono) thus ?thesis by (meson bdd_aboveI2) qed have bdd_above_2: \bdd_above ((\ \. \f *\<^sub>v (x + \)\) ` (ball 0 r))\ proof- have \bdd_above ((\ \. \f *\<^sub>v x\) ` (ball 0 r))\ by auto moreover have \bdd_above ((\ \. \f *\<^sub>v \\) ` (ball 0 r))\ using bdd_above_3 by blast ultimately have \bdd_above ((\ \. \f *\<^sub>v x\ + \f *\<^sub>v \\) ` (ball 0 r))\ by (rule bdd_above_plus) then obtain M where \\ \. \ \ ball 0 r \ \f *\<^sub>v x\ + \f *\<^sub>v \\ \ M\ unfolding bdd_above_def by (meson image_eqI) moreover have \\f *\<^sub>v (x + \)\ \ \f *\<^sub>v x\ + \f *\<^sub>v \\\ for \ by (simp add: blinfun.add_right norm_triangle_ineq) ultimately have \\ \. \ \ ball 0 r \ \f *\<^sub>v (x + \)\ \ M\ by (simp add: blinfun.add_right norm_triangle_le) thus ?thesis by (meson bdd_aboveI2) qed have bdd_above_4: \bdd_above ((\ \. \f *\<^sub>v (x - \)\) ` (ball 0 r))\ proof- obtain K where K_def: \\ \. \ \ ball 0 r \ \f *\<^sub>v (x + \)\ \ K\ using \bdd_above ((\ \. norm (f (x + \))) ` (ball 0 r))\ unfolding bdd_above_def by (meson image_eqI) have \\ \ ball (0::'a) r \ -\ \ ball 0 r\ for \ by auto thus ?thesis by (metis K_def ab_group_add_class.ab_diff_conv_add_uminus bdd_aboveI2) qed have bdd_above_1: \bdd_above ((\ \. max \f *\<^sub>v (x + \)\ \f *\<^sub>v (x - \)\) ` (ball 0 r))\ proof- have \bdd_above ((\ \. \f *\<^sub>v (x + \)\) ` (ball 0 r))\ using bdd_above_2 by blast moreover have \bdd_above ((\ \. \f *\<^sub>v (x - \)\) ` (ball 0 r))\ using bdd_above_4 by blast ultimately show ?thesis unfolding max_def apply auto apply (meson bdd_above_Int1 bdd_above_mono image_Int_subset) by (meson bdd_above_Int1 bdd_above_mono image_Int_subset) qed have bdd_above_6: \bdd_above ((\t. \f *\<^sub>v t\) ` ball x r)\ proof- have \bounded (ball x r)\ by simp hence \bounded ((\t. \f *\<^sub>v t\) ` ball x r)\ by (metis (no_types) add.left_neutral bdd_above_2 bdd_above_norm bounded_norm_comp image_add_ball image_image) thus ?thesis by (simp add: bounded_imp_bdd_above) qed have norm_1: \(\\. \f *\<^sub>v (x + \)\) ` ball 0 r = (\t. \f *\<^sub>v t\) ` ball x r\ by (metis add.right_neutral ball_translation image_image) have bdd_above_5: \bdd_above ((\\. norm (f (x + \))) ` ball 0 r)\ by (simp add: bdd_above_2) have norm_2: \\\\ < r \ \f *\<^sub>v (x - \)\ \ (\\. \f *\<^sub>v (x + \)\) ` ball 0 r\ for \ proof- assume \\\\ < r\ hence \\ \ ball (0::'a) r\ by auto hence \-\ \ ball (0::'a) r\ by auto thus ?thesis by (metis (no_types, lifting) ab_group_add_class.ab_diff_conv_add_uminus image_iff) qed have norm_2': \\\\ < r \ \f *\<^sub>v (x + \)\ \ (\\. \f *\<^sub>v (x - \)\) ` ball 0 r\ for \ proof- assume \norm \ < r\ hence \\ \ ball (0::'a) r\ by auto hence \-\ \ ball (0::'a) r\ by auto thus ?thesis by (metis (no_types, lifting) diff_minus_eq_add image_iff) qed have bdd_above_6: \bdd_above ((\\. \f *\<^sub>v (x - \)\) ` ball 0 r)\ by (simp add: bdd_above_4) have Sup_2: \(SUP \\ball 0 r. max \f *\<^sub>v (x + \)\ \f *\<^sub>v (x - \)\) = max (SUP \\ball 0 r. \f *\<^sub>v (x + \)\) (SUP \\ball 0 r. \f *\<^sub>v (x - \)\)\ proof- have \ball (0::'a) r \ {}\ using \r > 0\ by auto moreover have \bdd_above ((\\. \f *\<^sub>v (x + \)\) ` ball 0 r)\ using bdd_above_5 by blast moreover have \bdd_above ((\\. \f *\<^sub>v (x - \)\) ` ball 0 r)\ using bdd_above_6 by blast ultimately show ?thesis using max_Sup by (metis (mono_tags, lifting) Banach_Steinhaus_Missing.pointwise_max_def image_cong) qed have Sup_3': \\\\ < r \ \f *\<^sub>v (x + \)\ \ (\\. \f *\<^sub>v (x - \)\) ` ball 0 r\ for \::'a by (simp add: norm_2') have Sup_3'': \\\\ < r \ \f *\<^sub>v (x - \)\ \ (\\. \f *\<^sub>v (x + \)\) ` ball 0 r\ for \::'a by (simp add: norm_2) have Sup_3: \max (SUP \\ball 0 r. \f *\<^sub>v (x + \)\) (SUP \\ball 0 r. \f *\<^sub>v (x - \)\) = (SUP \\ball 0 r. \f *\<^sub>v (x + \)\)\ proof- have \(\\. \f *\<^sub>v (x + \)\) ` (ball 0 r) = (\\. \f *\<^sub>v (x - \)\) ` (ball 0 r)\ apply auto using Sup_3' apply auto using Sup_3'' by blast hence \Sup ((\\. \f *\<^sub>v (x + \)\) ` (ball 0 r))=Sup ((\\. \f *\<^sub>v (x - \)\) ` (ball 0 r))\ by simp thus ?thesis by simp qed have Sup_1: \Sup ((\t. \f *\<^sub>v t\) ` (ball 0 r)) \ Sup ( (\\. \f *\<^sub>v \\) ` (ball x r) )\ proof- have \(\t. \f *\<^sub>v t\) \ \ max \f *\<^sub>v (x + \)\ \f *\<^sub>v (x - \)\\ for \ apply(rule linear_plus_norm) apply (rule bounded_linear.linear) by (simp add: blinfun.bounded_linear_right) moreover have \bdd_above ((\ \. max \f *\<^sub>v (x + \)\ \f *\<^sub>v (x - \)\) ` (ball 0 r))\ using bdd_above_1 by blast moreover have \ball (0::'a) r \ {}\ using \r > 0\ by auto ultimately have \Sup ((\t. \f *\<^sub>v t\) ` (ball 0 r)) \ Sup ((\\. max \f *\<^sub>v (x + \)\ \f *\<^sub>v (x - \)\) ` (ball 0 r))\ using cSUP_mono by smt also have \\ = max (Sup ((\\. \f *\<^sub>v (x + \)\) ` (ball 0 r))) (Sup ((\\. \f *\<^sub>v (x - \)\) ` (ball 0 r)))\ using Sup_2 by blast also have \\ = Sup ((\\. \f *\<^sub>v (x + \)\) ` (ball 0 r))\ using Sup_3 by blast also have \\ = Sup ((\\. \f *\<^sub>v \\) ` (ball x r))\ by (metis add.right_neutral ball_translation image_image) finally show ?thesis by blast qed have \\f\ = (SUP x\ball 0 r. \f *\<^sub>v x\) / r\ using \0 < r\ onorm_r by blast moreover have \Sup ((\t. \f *\<^sub>v t\) ` (ball 0 r)) / r \ Sup ((\\. \f *\<^sub>v \\) ` (ball x r)) / r\ using Sup_1 \0 < r\ divide_right_mono by fastforce ultimately have \\f\ \ Sup ((\t. \f *\<^sub>v t\) ` ball x r) / r\ by simp thus ?thesis by simp qed lemma onorm_Sup_on_ball': includes notation_norm assumes \r > 0\ and \\ < 1\ shows \\\\ball x r. \ * r * \f\ \ \f *\<^sub>v \\\ text \ In the proof of Banach-Steinhaus theorem, we will use this variation of the lemma @{text onorm_Sup_on_ball}. Explanation: Let \<^term>\f\ be a bounded operator, let \<^term>\x\ be a point and let \<^term>\r\ be a positive real number. For any real number \<^term>\\ < 1\, there is a point \<^term>\\\ in the open ball of radius \<^term>\r\ around \<^term>\x\ such that \<^term>\\ * r * \f\ \ \f *\<^sub>v \\\. \ proof(cases \f = 0\) case True thus ?thesis by (metis assms(1) centre_in_ball mult_zero_right norm_zero order_refl zero_blinfun.rep_eq) next case False have bdd_above_1: \bdd_above ((\t. \(*\<^sub>v) f t\) ` ball x r)\ for f::\'a \\<^sub>L 'b\ using assms(1) bounded_linear_image by (simp add: bounded_linear_image blinfun.bounded_linear_right bounded_imp_bdd_above bounded_norm_comp) have \norm f > 0\ using \f \ 0\ by auto have \norm f \ Sup ( (\\. \(*\<^sub>v) f \\) ` (ball x r) ) / r\ using \r > 0\ by (simp add: onorm_Sup_on_ball) hence \r * norm f \ Sup ( (\\. \(*\<^sub>v) f \\) ` (ball x r) )\ using \0 < r\ by (smt divide_strict_right_mono nonzero_mult_div_cancel_left) moreover have \\ * r * norm f < r * norm f\ using \\ < 1\ using \0 < norm f\ \0 < r\ by auto ultimately have \\ * r * norm f < Sup ( (norm \ ((*\<^sub>v) f)) ` (ball x r) )\ by simp moreover have \(norm \ ( (*\<^sub>v) f)) ` (ball x r) \ {}\ using \0 < r\ by auto moreover have \bdd_above ((norm \ ( (*\<^sub>v) f)) ` (ball x r))\ using bdd_above_1 apply transfer by simp ultimately have \\t \ (norm \ ( (*\<^sub>v) f)) ` (ball x r). \ * r * norm f < t\ by (simp add: less_cSup_iff) thus ?thesis by (smt comp_def image_iff) qed subsection \Banach-Steinhaus theorem\ theorem banach_steinhaus: fixes f::\'c \ ('a::banach \\<^sub>L 'b::real_normed_vector)\ assumes \\x. bounded (range (\n. (f n) *\<^sub>v x))\ shows \bounded (range f)\ text\ This is Banach-Steinhaus Theorem. Explanation: If a family of bounded operators on a Banach space is pointwise bounded, then it is uniformly bounded. \ proof(rule classical) assume \\(bounded (range f))\ have sum_1: \\K. \n. sum (\k. inverse (real_of_nat 3^k)) {0..n} \ K\ proof- have \summable (\n. (inverse (real_of_nat 3))^n)\ using Series.summable_geometric_iff [where c = "inverse (real_of_nat 3)"] by auto moreover have \(inverse (real_of_nat 3))^n = inverse (real_of_nat 3^n)\ for n::nat using power_inverse by blast ultimately have \summable (\n. inverse (real_of_nat 3^n))\ by auto hence \bounded (range (\n. sum (\ k. inverse (real 3 ^ k)) {0.. using summable_imp_sums_bounded[where f = "(\n. inverse (real_of_nat 3^n))"] lessThan_atLeast0 by auto hence \\M. \h\(range (\n. sum (\ k. inverse (real 3 ^ k)) {0.. M\ using bounded_iff by blast then obtain M where \h\range (\n. sum (\ k. inverse (real 3 ^ k)) {0.. norm h \ M\ for h by blast have sum_2: \sum (\k. inverse (real_of_nat 3^k)) {0..n} \ M\ for n proof- have \norm (sum (\ k. inverse (real 3 ^ k)) {0..< Suc n}) \ M\ using \\h. h\(range (\n. sum (\ k. inverse (real 3 ^ k)) {0.. norm h \ M\ by blast hence \norm (sum (\ k. inverse (real 3 ^ k)) {0..n}) \ M\ by (simp add: atLeastLessThanSuc_atLeastAtMost) hence \(sum (\ k. inverse (real 3 ^ k)) {0..n}) \ M\ by auto thus ?thesis by blast qed have \sum (\k. inverse (real_of_nat 3^k)) {0..n} \ M\ for n using sum_2 by blast thus ?thesis by blast qed have \of_rat 2/3 < (1::real)\ by auto hence \\g::'a \\<^sub>L 'b. \x. \r. \\. g \ 0 \ r > 0 \ (\\ball x r \ (of_rat 2/3) * r * norm g \ norm ((*\<^sub>v) g \))\ using onorm_Sup_on_ball' by blast hence \\\. \g::'a \\<^sub>L 'b. \x. \r. g \ 0 \ r > 0 \ ((\ g x r)\ball x r \ (of_rat 2/3) * r * norm g \ norm ((*\<^sub>v) g (\ g x r)))\ by metis then obtain \ where f1: \\g \ 0; r > 0\ \ \ g x r \ ball x r \ (of_rat 2/3) * r * norm g \ norm ((*\<^sub>v) g (\ g x r))\ for g::\'a \\<^sub>L 'b\ and x and r by blast have \\n. \k. norm (f k) \ 4^n\ using \\(bounded (range f))\ by (metis (mono_tags, hide_lams) boundedI image_iff linear) hence \\k. \n. norm (f (k n)) \ 4^n\ by metis hence \\k. \n. norm ((f \ k) n) \ 4^n\ by simp then obtain k where \norm ((f \ k) n) \ 4^n\ for n by blast define T where \T = f \ k\ have \T n \ range f\ for n unfolding T_def by simp have \norm (T n) \ of_nat (4^n)\ for n unfolding T_def using \\ n. norm ((f \ k) n) \ 4^n\ by auto hence \T n \ 0\ for n by (smt T_def \\n. 4 ^ n \ norm ((f \ k) n)\ norm_zero power_not_zero zero_le_power) have \inverse (of_nat 3^n) > (0::real)\ for n by auto define y::\nat \ 'a\ where \y = rec_nat 0 (\n x. \ (T n) x (inverse (of_nat 3^n)))\ have \y (Suc n) \ ball (y n) (inverse (of_nat 3^n))\ for n using f1 \\ n. T n \ 0\ \\ n. inverse (of_nat 3^n) > 0\ unfolding y_def by auto hence \norm (y (Suc n) - y n) \ inverse (of_nat 3^n)\ for n unfolding ball_def apply auto using dist_norm by (smt norm_minus_commute) moreover have \\K. \n. sum (\k. inverse (real_of_nat 3^k)) {0..n} \ K\ using sum_1 by blast moreover have \Cauchy y\ using convergent_series_Cauchy[where a = "\n. inverse (of_nat 3^n)" and \ = y] dist_norm by (metis calculation(1) calculation(2)) hence \\ x. y \ x\ by (simp add: convergent_eq_Cauchy) then obtain x where \y \ x\ by blast have norm_2: \norm (x - y (Suc n)) \ (inverse (of_nat 2))*(inverse (of_nat 3^n))\ for n proof- have \inverse (real_of_nat 3) < 1\ by simp moreover have \y 0 = 0\ using y_def by auto ultimately have \norm (x - y (Suc n)) \ (inverse (of_nat 3)) * inverse (1 - (inverse (of_nat 3))) * ((inverse (of_nat 3)) ^ n)\ using bound_Cauchy_to_lim[where c = "inverse (of_nat 3)" and y = y and x = x] power_inverse semiring_norm(77) \y \ x\ \\ n. norm (y (Suc n) - y n) \ inverse (of_nat 3^n)\ by (metis divide_inverse) moreover have \inverse (real_of_nat 3) * inverse (1 - (inverse (of_nat 3))) = inverse (of_nat 2)\ by auto ultimately show ?thesis by (metis power_inverse) qed have \norm (x - y (Suc n)) \ (inverse (of_nat 2))*(inverse (of_nat 3^n))\ for n using norm_2 by blast have \\ M. \ n. norm ((*\<^sub>v) (T n) x) \ M\ unfolding T_def apply auto by (metis \\x. bounded (range (\n. (*\<^sub>v) (f n) x))\ bounded_iff rangeI) then obtain M where \norm ((*\<^sub>v) (T n) x) \ M\ for n by blast have norm_1: \norm (T n) * norm (y (Suc n) - x) + norm ((*\<^sub>v) (T n) x) \ inverse (real 2) * inverse (real 3 ^ n) * norm (T n) + norm ((*\<^sub>v) (T n) x)\ for n proof- have \norm (y (Suc n) - x) \ (inverse (of_nat 2))*(inverse (of_nat 3^n))\ using \norm (x - y (Suc n)) \ (inverse (of_nat 2))*(inverse (of_nat 3^n))\ by (simp add: norm_minus_commute) moreover have \norm (T n) \ 0\ by auto ultimately have \norm (T n) * norm (y (Suc n) - x) \ (inverse (of_nat 2))*(inverse (of_nat 3^n))*norm (T n)\ by (simp add: \\n. T n \ 0\) thus ?thesis by simp qed have inverse_2: \(inverse (of_nat 6)) * inverse (real 3 ^ n) * norm (T n) \ norm ((*\<^sub>v) (T n) x)\ for n proof- have \(of_rat 2/3)*(inverse (of_nat 3^n))*norm (T n) \ norm ((*\<^sub>v) (T n) (y (Suc n)))\ using f1 \\ n. T n \ 0\ \\ n. inverse (of_nat 3^n) > 0\ unfolding y_def by auto also have \\ = norm ((*\<^sub>v) (T n) ((y (Suc n) - x) + x))\ by auto also have \\ = norm ((*\<^sub>v) (T n) (y (Suc n) - x) + (*\<^sub>v) (T n) x)\ apply transfer apply auto by (metis diff_add_cancel linear_simps(1)) also have \\ \ norm ((*\<^sub>v) (T n) (y (Suc n) - x)) + norm ((*\<^sub>v) (T n) x)\ by (simp add: norm_triangle_ineq) also have \\ \ norm (T n) * norm (y (Suc n) - x) + norm ((*\<^sub>v) (T n) x)\ apply transfer apply auto using onorm by auto also have \\ \ (inverse (of_nat 2))*(inverse (of_nat 3^n))*norm (T n) + norm ((*\<^sub>v) (T n) x)\ using norm_1 by blast finally have \(of_rat 2/3) * inverse (real 3 ^ n) * norm (T n) \ inverse (real 2) * inverse (real 3 ^ n) * norm (T n) + norm ((*\<^sub>v) (T n) x)\ by blast hence \(of_rat 2/3) * inverse (real 3 ^ n) * norm (T n) - inverse (real 2) * inverse (real 3 ^ n) * norm (T n) \ norm ((*\<^sub>v) (T n) x)\ by linarith moreover have \(of_rat 2/3) * inverse (real 3 ^ n) * norm (T n) - inverse (real 2) * inverse (real 3 ^ n) * norm (T n) = (inverse (of_nat 6)) * inverse (real 3 ^ n) * norm (T n)\ by fastforce ultimately show \(inverse (of_nat 6)) * inverse (real 3 ^ n) * norm (T n) \ norm ((*\<^sub>v) (T n) x)\ by linarith qed have inverse_3: \(inverse (of_nat 6)) * (of_rat (4/3)^n) \ (inverse (of_nat 6)) * inverse (real 3 ^ n) * norm (T n)\ for n proof- have \of_rat (4/3)^n = inverse (real 3 ^ n) * (of_nat 4^n)\ apply auto by (metis divide_inverse_commute of_rat_divide power_divide of_rat_numeral_eq) also have \\ \ inverse (real 3 ^ n) * norm (T n)\ using \\n. norm (T n) \ of_nat (4^n)\ by simp finally have \of_rat (4/3)^n \ inverse (real 3 ^ n) * norm (T n)\ by blast moreover have \inverse (of_nat 6) > (0::real)\ by auto ultimately show ?thesis by auto qed have inverse_1: \(inverse (of_nat 6)) * (of_rat (4/3)^n) \ M\ for n proof- have \(inverse (of_nat 6)) * (of_rat (4/3)^n) \ (inverse (of_nat 6)) * inverse (real 3 ^ n) * norm (T n)\ using inverse_3 by blast also have \\ \ norm ((*\<^sub>v) (T n) x)\ using inverse_2 by blast finally have \(inverse (of_nat 6)) * (of_rat (4/3)^n) \ norm ((*\<^sub>v) (T n) x)\ by auto thus ?thesis using \\ n. norm ((*\<^sub>v) (T n) x) \ M\ by smt qed have \\n. M < (inverse (of_nat 6)) * (of_rat (4/3)^n)\ using Real.real_arch_pow by auto moreover have \(inverse (of_nat 6)) * (of_rat (4/3)^n) \ M\ for n using inverse_1 by blast ultimately show ?thesis by smt qed subsection \A consequence of Banach-Steinhaus theorem\ corollary bounded_linear_limit_bounded_linear: fixes f::\nat \ ('a::banach \\<^sub>L 'b::real_normed_vector)\ assumes \\x. convergent (\n. (f n) *\<^sub>v x)\ shows \\g. (\n. (*\<^sub>v) (f n)) \pointwise\ (*\<^sub>v) g\ text\ Explanation: If a sequence of bounded operators on a Banach space converges pointwise, then the limit is also a bounded operator. \ proof- have \\l. (\n. (*\<^sub>v) (f n) x) \ l\ for x by (simp add: \\x. convergent (\n. (*\<^sub>v) (f n) x)\ convergentD) hence \\F. (\n. (*\<^sub>v) (f n)) \pointwise\ F\ unfolding pointwise_convergent_to_def by metis obtain F where \(\n. (*\<^sub>v) (f n)) \pointwise\ F\ using \\F. (\n. (*\<^sub>v) (f n)) \pointwise\ F\ by auto have \\x. (\ n. (*\<^sub>v) (f n) x) \ F x\ using \(\n. (*\<^sub>v) (f n)) \pointwise\ F\ apply transfer by (simp add: pointwise_convergent_to_def) have \bounded (range f)\ using \\x. convergent (\n. (*\<^sub>v) (f n) x)\ banach_steinhaus \\x. \l. (\n. (*\<^sub>v) (f n) x) \ l\ convergent_imp_bounded by blast have norm_f_n: \\ M. \ n. norm (f n) \ M\ unfolding bounded_def by (meson UNIV_I \bounded (range f)\ bounded_iff image_eqI) have \isCont (\ t::'b. norm t) y\ for y::'b using Limits.isCont_norm by simp hence \(\ n. norm ((*\<^sub>v) (f n) x)) \ (norm (F x))\ for x using \\ x::'a. (\ n. (*\<^sub>v) (f n) x) \ F x\ by (simp add: tendsto_norm) hence norm_f_n_x: \\ M. \ n. norm ((*\<^sub>v) (f n) x) \ M\ for x using Elementary_Metric_Spaces.convergent_imp_bounded by (metis UNIV_I \\ x::'a. (\ n. (*\<^sub>v) (f n) x) \ F x\ bounded_iff image_eqI) have norm_f: \\K. \n. \x. norm ((*\<^sub>v) (f n) x) \ norm x * K\ proof- have \\ M. \ n. norm ((*\<^sub>v) (f n) x) \ M\ for x using norm_f_n_x \\x. (\n. (*\<^sub>v) (f n) x) \ F x\ by blast hence \\ M. \ n. norm (f n) \ M\ using norm_f_n by simp then obtain M::real where \\ M. \ n. norm (f n) \ M\ by blast have \\ n. \x. norm ((*\<^sub>v) (f n) x) \ norm x * norm (f n)\ apply transfer apply auto by (metis mult.commute onorm) thus ?thesis using \\ M. \ n. norm (f n) \ M\ by (metis (no_types, hide_lams) dual_order.trans norm_eq_zero order_refl - real_mult_le_cancel_iff2 vector_space_over_itself.scale_zero_left zero_less_norm_iff) + mult_le_cancel_iff2 vector_space_over_itself.scale_zero_left zero_less_norm_iff) qed have norm_F_x: \\K. \x. norm (F x) \ norm x * K\ proof- have "\K. \n. \x. norm ((*\<^sub>v) (f n) x) \ norm x * K" using norm_f \\x. (\n. (*\<^sub>v) (f n) x) \ F x\ by auto thus ?thesis using \\ x::'a. (\ n. (*\<^sub>v) (f n) x) \ F x\ apply transfer by (metis Lim_bounded tendsto_norm) qed have \linear F\ proof(rule linear_limit_linear) show \linear ((*\<^sub>v) (f n))\ for n apply transfer apply auto by (simp add: bounded_linear.linear) show \f \pointwise\ F\ using \(\n. (*\<^sub>v) (f n)) \pointwise\ F\ by auto qed moreover have \bounded_linear_axioms F\ using norm_F_x by (simp add: \\x. (\n. (*\<^sub>v) (f n) x) \ F x\ bounded_linear_axioms_def) ultimately have \bounded_linear F\ unfolding bounded_linear_def by blast hence \\g. (*\<^sub>v) g = F\ using bounded_linear_Blinfun_apply by auto thus ?thesis using \(\n. (*\<^sub>v) (f n)) \pointwise\ F\ apply transfer by auto qed end diff --git a/thys/Banach_Steinhaus/Banach_Steinhaus_Missing.thy b/thys/Banach_Steinhaus/Banach_Steinhaus_Missing.thy --- a/thys/Banach_Steinhaus/Banach_Steinhaus_Missing.thy +++ b/thys/Banach_Steinhaus/Banach_Steinhaus_Missing.thy @@ -1,898 +1,898 @@ (* File: Banach_Steinhaus_Missing.thy Author: Dominique Unruh, University of Tartu Author: Jose Manuel Rodriguez Caballero, University of Tartu *) section \Missing results for the proof of Banach-Steinhaus theorem\ theory Banach_Steinhaus_Missing imports "HOL-Analysis.Infinite_Set_Sum" begin subsection \Results missing for the proof of Banach-Steinhaus theorem\ text \ The results proved here are preliminaries for the proof of Banach-Steinhaus theorem using Sokal's approach, but they do not explicitly appear in Sokal's paper ~\cite{sokal2011reall}. \ text\Notation for the norm\ bundle notation_norm begin notation norm ("\_\") end bundle no_notation_norm begin no_notation norm ("\_\") end unbundle notation_norm text\Notation for apply bilinear function\ bundle notation_blinfun_apply begin notation blinfun_apply (infixr "*\<^sub>v" 70) end bundle no_notation_blinfun_apply begin no_notation blinfun_apply (infixr "*\<^sub>v" 70) end unbundle notation_blinfun_apply lemma bdd_above_plus: fixes f::\'a \ real\ assumes \bdd_above (f ` S)\ and \bdd_above (g ` S)\ shows \bdd_above ((\ x. f x + g x) ` S)\ text \ Explanation: If the images of two real-valued functions \<^term>\f\,\<^term>\g\ are bounded above on a set \<^term>\S\, then the image of their sum is bounded on \<^term>\S\. \ proof- obtain M where \\ x. x\S \ f x \ M\ using \bdd_above (f ` S)\ unfolding bdd_above_def by blast obtain N where \\ x. x\S \ g x \ N\ using \bdd_above (g ` S)\ unfolding bdd_above_def by blast have \\ x. x\S \ f x + g x \ M + N\ using \\x. x \ S \ f x \ M\ \\x. x \ S \ g x \ N\ by fastforce thus ?thesis unfolding bdd_above_def by blast qed text\The maximum of two functions\ definition pointwise_max:: "('a \ 'b::ord) \ ('a \ 'b) \ ('a \ 'b)" where \pointwise_max f g = (\x. max (f x) (g x))\ lemma max_Sup_absorb_left: fixes f g::\'a \ real\ assumes \X \ {}\ and \bdd_above (f ` X)\ and \bdd_above (g ` X)\ and \Sup (f ` X) \ Sup (g ` X)\ shows \Sup ((pointwise_max f g) ` X) = Sup (f ` X)\ text \Explanation: For real-valued functions \<^term>\f\ and \<^term>\g\, if the supremum of \<^term>\f\ is greater-equal the supremum of \<^term>\g\, then the supremum of \<^term>\max f g\ equals the supremum of \<^term>\f\. (Under some technical conditions.)\ proof- have y_Sup: \y \ ((\ x. max (f x) (g x)) ` X) \ y \ Sup (f ` X)\ for y proof- assume \y \ ((\ x. max (f x) (g x)) ` X)\ then obtain x where \y = max (f x) (g x)\ and \x \ X\ by blast have \f x \ Sup (f ` X)\ by (simp add: \x \ X\ \bdd_above (f ` X)\ cSUP_upper) moreover have \g x \ Sup (g ` X)\ by (simp add: \x \ X\ \bdd_above (g ` X)\ cSUP_upper) ultimately have \max (f x) (g x) \ Sup (f ` X)\ using \Sup (f ` X) \ Sup (g ` X)\ by auto thus ?thesis by (simp add: \y = max (f x) (g x)\) qed have y_f_X: \y \ f ` X \ y \ Sup ((\ x. max (f x) (g x)) ` X)\ for y proof- assume \y \ f ` X\ then obtain x where \x \ X\ and \y = f x\ by blast have \bdd_above ((\ \. max (f \) (g \)) ` X)\ by (metis (no_types) \bdd_above (f ` X)\ \bdd_above (g ` X)\ bdd_above_image_sup sup_max) moreover have \e > 0 \ \ k \ (\ \. max (f \) (g \)) ` X. y \ k + e\ for e::real using \Sup (f ` X) \ Sup (g ` X)\ by (smt \x \ X\ \y = f x\ image_eqI) ultimately show ?thesis using \x \ X\ \y = f x\ cSUP_upper by fastforce qed have \Sup ((\ x. max (f x) (g x)) ` X) \ Sup (f ` X)\ using y_Sup by (simp add: \X \ {}\ cSup_least) moreover have \Sup ((\ x. max (f x) (g x)) ` X) \ Sup (f ` X)\ using y_f_X by (metis (mono_tags) cSup_least calculation empty_is_image) ultimately show ?thesis unfolding pointwise_max_def by simp qed lemma max_Sup_absorb_right: fixes f g::\'a \ real\ assumes \X \ {}\ and \bdd_above (f ` X)\ and \bdd_above (g ` X)\ and \Sup (f ` X) \ Sup (g ` X)\ shows \Sup ((pointwise_max f g) ` X) = Sup (g ` X)\ text \ Explanation: For real-valued functions \<^term>\f\ and \<^term>\g\ and a nonempty set \<^term>\X\, such that the \<^term>\f\ and \<^term>\g\ are bounded above on \<^term>\X\, if the supremum of \<^term>\f\ on \<^term>\X\ is lower-equal the supremum of \<^term>\g\ on \<^term>\X\, then the supremum of \<^term>\pointwise_max f g\ on \<^term>\X\ equals the supremum of \<^term>\g\. This is the right analog of @{text max_Sup_absorb_left}. \ proof- have \Sup ((pointwise_max g f) ` X) = Sup (g ` X)\ using assms by (simp add: max_Sup_absorb_left) moreover have \pointwise_max g f = pointwise_max f g\ unfolding pointwise_max_def by auto ultimately show ?thesis by simp qed lemma max_Sup: fixes f g::\'a \ real\ assumes \X \ {}\ and \bdd_above (f ` X)\ and \bdd_above (g ` X)\ shows \Sup ((pointwise_max f g) ` X) = max (Sup (f ` X)) (Sup (g ` X))\ text \ Explanation: Let \<^term>\X\ be a nonempty set. Two supremum over \<^term>\X\ of the maximum of two real-value functions is equal to the maximum of their suprema over \<^term>\X\, provided that the functions are bounded above on \<^term>\X\. \ proof(cases \Sup (f ` X) \ Sup (g ` X)\) case True thus ?thesis by (simp add: assms(1) assms(2) assms(3) max_Sup_absorb_left) next case False have f1: "\ 0 \ Sup (f ` X) + - 1 * Sup (g ` X)" using False by linarith hence "Sup (Banach_Steinhaus_Missing.pointwise_max f g ` X) = Sup (g ` X)" by (simp add: assms(1) assms(2) assms(3) max_Sup_absorb_right) thus ?thesis using f1 by linarith qed lemma identity_telescopic: fixes x :: \_ \ 'a::real_normed_vector\ assumes \x \ l\ shows \(\ N. sum (\ k. x (Suc k) - x k) {n..N}) \ l - x n\ text\ Expression of a limit as a telescopic series. Explanation: If \<^term>\x\ converges to \<^term>\l\ then the sum \<^term>\sum (\ k. x (Suc k) - x k) {n..N}\ converges to \<^term>\l - x n\ as \<^term>\N\ goes to infinity. \ proof- have \(\ p. x (p + Suc n)) \ l\ using \x \ l\ by (rule LIMSEQ_ignore_initial_segment) hence \(\ p. x (Suc n + p)) \ l\ by (simp add: add.commute) hence \(\ p. x (Suc (n + p))) \ l\ by simp hence \(\ t. (- (x n)) + (\ p. x (Suc (n + p))) t ) \ (- (x n)) + l\ using tendsto_add_const_iff by metis hence f1: \(\ p. x (Suc (n + p)) - x n)\ l - x n\ by simp have \sum (\ k. x (Suc k) - x k) {n..n+p} = x (Suc (n+p)) - x n\ for p by (simp add: sum_Suc_diff) moreover have \(\ N. sum (\ k. x (Suc k) - x k) {n..N}) (n + t) = (\ p. sum (\ k. x (Suc k) - x k) {n..n+p}) t\ for t by blast ultimately have \(\ p. (\ N. sum (\ k. x (Suc k) - x k) {n..N}) (n + p)) \ l - x n\ using f1 by simp hence \(\ p. (\ N. sum (\ k. x (Suc k) - x k) {n..N}) (p + n)) \ l - x n\ by (simp add: add.commute) hence \(\ p. (\ N. sum (\ k. x (Suc k) - x k) {n..N}) p) \ l - x n\ using Topological_Spaces.LIMSEQ_offset[where f = "(\ N. sum (\ k. x (Suc k) - x k) {n..N})" and a = "l - x n" and k = n] by blast hence \(\ M. (\ N. sum (\ k. x (Suc k) - x k) {n..N}) M) \ l - x n\ by simp thus ?thesis by blast qed lemma bound_Cauchy_to_lim: assumes \y \ x\ and \\n. \y (Suc n) - y n\ \ c^n\ and \y 0 = 0\ and \c < 1\ shows \\x - y (Suc n)\ \ (c / (1 - c)) * c ^ n\ text\ Inequality about a sequence of approximations assuming that the sequence of differences is bounded by a geometric progression. Explanation: Let \<^term>\y\ be a sequence converging to \<^term>\x\. If \<^term>\y\ satisfies the inequality \\y (Suc n) - y n\ \ c ^ n\ for some \<^term>\c < 1\ and assuming \<^term>\y 0 = 0\ then the inequality \\x - y (Suc n)\ \ (c / (1 - c)) * c ^ n\ holds. \ proof- have \c \ 0\ using \\ n. \y (Suc n) - y n\ \ c^n\ by (smt norm_imp_pos_and_ge power_Suc0_right) have norm_1: \norm (\k = Suc n..N. y (Suc k) - y k) \ (c ^ Suc n)/(1 - c)\ for N proof(cases \N < Suc n\) case True hence \\sum (\k. y (Suc k) - y k) {Suc n .. N}\ = 0\ by auto thus ?thesis using \c \ 0\ \c < 1\ by auto next case False hence \N \ Suc n\ by auto have \c^(Suc N) \ 0\ using \c \ 0\ by auto have \1 - c > 0\ by (simp add: \c < 1\) hence \(1 - c)/(1 - c) = 1\ by auto have \\sum (\k. y (Suc k) - y k) {Suc n .. N}\ \ (sum (\k. \y (Suc k) - y k\) {Suc n .. N})\ by (simp add: sum_norm_le) hence \\sum (\k. y (Suc k) - y k) {Suc n .. N}\ \ (sum (power c) {Suc n .. N})\ by (simp add: assms(2) sum_norm_le) hence \(1 - c) * \sum (\k. y (Suc k) - y k) {Suc n .. N}\ \ (1 - c) * (sum (power c) {Suc n .. N})\ - using \0 < 1 - c\ real_mult_le_cancel_iff2 by blast + using \0 < 1 - c\ mult_le_cancel_iff2 by blast also have \\ = c^(Suc n) - c^(Suc N)\ using Set_Interval.sum_gp_multiplied \Suc n \ N\ by blast also have \\ \ c^(Suc n)\ using \c^(Suc N) \ 0\ by auto finally have \(1 - c) * \\k = Suc n..N. y (Suc k) - y k\ \ c ^ Suc n\ by blast hence \((1 - c) * \\k = Suc n..N. y (Suc k) - y k\)/(1 - c) \ (c ^ Suc n)/(1 - c)\ using \0 < 1 - c\ by (smt divide_right_mono) thus \\\k = Suc n..N. y (Suc k) - y k\ \ (c ^ Suc n)/(1 - c)\ using \0 < 1 - c\ by auto qed have \(\ N. (sum (\k. y (Suc k) - y k) {Suc n .. N})) \ x - y (Suc n)\ by (metis (no_types) \y \ x\ identity_telescopic) hence \(\ N. \sum (\k. y (Suc k) - y k) {Suc n .. N}\) \ \x - y (Suc n)\\ using tendsto_norm by blast hence \\x - y (Suc n)\ \ (c ^ Suc n)/(1 - c)\ using norm_1 Lim_bounded by blast hence \\x - y (Suc n)\ \ (c ^ Suc n)/(1 - c)\ by auto moreover have \(c ^ Suc n)/(1 - c) = (c / (1 - c)) * (c ^ n)\ by (simp add: divide_inverse_commute) ultimately show \\x - y (Suc n)\ \ (c / (1 - c)) * (c ^ n)\ by linarith qed lemma onorm_open_ball: includes notation_norm shows \\f\ = Sup { \f *\<^sub>v x\ | x. \x\ < 1 }\ text \ Explanation: Let \<^term>\f\ be a bounded linear operator. The operator norm of \<^term>\f\ is the supremum of \<^term>\norm (f x)\ for \<^term>\x\ such that \<^term>\norm x < 1\. \ proof(cases \(UNIV::'a set) = 0\) case True hence \x = 0\ for x::'a by auto hence \f *\<^sub>v x = 0\ for x by (metis (full_types) blinfun.zero_right) hence \\f\ = 0\ by (simp add: blinfun_eqI zero_blinfun.rep_eq) have \{ \f *\<^sub>v x\ | x. \x\ < 1} = {0}\ by (smt Collect_cong \\x. f *\<^sub>v x = 0\ norm_zero singleton_conv) hence \Sup { \f *\<^sub>v x\ | x. \x\ < 1} = 0\ by simp thus ?thesis using \\f\ = 0\ by auto next case False hence \(UNIV::'a set) \ 0\ by simp have nonnegative: \\f *\<^sub>v x\ \ 0\ for x by simp have \\ x::'a. x \ 0\ using \UNIV \ 0\ by auto then obtain x::'a where \x \ 0\ by blast hence \\x\ \ 0\ by auto define y where \y = x /\<^sub>R \x\\ have \norm y = \ x /\<^sub>R \x\ \\ unfolding y_def by auto also have \\ = \x\ /\<^sub>R \x\\ by auto also have \\ = 1\ using \\x\ \ 0\ by auto finally have \\y\ = 1\ by blast hence norm_1_non_empty: \{ \f *\<^sub>v x\ | x. \x\ = 1} \ {}\ by blast have norm_1_bounded: \bdd_above { \f *\<^sub>v x\ | x. \x\ = 1}\ unfolding bdd_above_def apply auto by (metis norm_blinfun) have norm_less_1_non_empty: \{\f *\<^sub>v x\ | x. \x\ < 1} \ {}\ by (metis (mono_tags, lifting) Collect_empty_eq_bot bot_empty_eq empty_iff norm_zero zero_less_one) have norm_less_1_bounded: \bdd_above {\f *\<^sub>v x\ | x. \x\ < 1}\ proof- have \\r. \a r\ < 1 \ \f *\<^sub>v (a r)\ \ r\ for a :: "real \ 'a" proof- obtain r :: "('a \\<^sub>L 'b) \ real" where "\f x. 0 \ r f \ (bounded_linear f \ \f *\<^sub>v x\ \ \x\ * r f)" using bounded_linear.nonneg_bounded by moura have \\ \f\ < 0\ by simp hence "(\r. \f\ * \a r\ \ r) \ (\r. \a r\ < 1 \ \f *\<^sub>v a r\ \ r)" by (meson less_eq_real_def mult_le_cancel_left2) thus ?thesis using dual_order.trans norm_blinfun by blast qed hence \\ M. \ x. \x\ < 1 \ \f *\<^sub>v x\ \ M\ by metis thus ?thesis by auto qed have Sup_non_neg: \Sup {\f *\<^sub>v x\ |x. \x\ = 1} \ 0\ by (smt Collect_empty_eq cSup_upper mem_Collect_eq nonnegative norm_1_bounded norm_1_non_empty) have \{0::real} \ {}\ by simp have \bdd_above {0::real}\ by simp show \\f\ = Sup {\f *\<^sub>v x\ | x. \x\ < 1}\ proof(cases \\x. f *\<^sub>v x = 0\) case True have \\f *\<^sub>v x\ = 0\ for x by (simp add: True) hence \{\f *\<^sub>v x\ | x. \x\ < 1 } \ {0}\ by blast moreover have \{\f *\<^sub>v x\ | x. \x\ < 1 } \ {0}\ using calculation norm_less_1_non_empty by fastforce ultimately have \{\f *\<^sub>v x\ | x. \x\ < 1 } = {0}\ by blast hence Sup1: \Sup {\f *\<^sub>v x\ | x. \x\ < 1 } = 0\ by simp have \\f\ = 0\ by (simp add: True blinfun_eqI) moreover have \Sup {\f *\<^sub>v x\ | x. \x\ < 1} = 0\ using Sup1 by blast ultimately show ?thesis by simp next case False have norm_f_eq_leq: \y \ {\f *\<^sub>v x\ | x. \x\ = 1} \ y \ Sup {\f *\<^sub>v x\ | x. \x\ < 1}\ for y proof- assume \y \ {\f *\<^sub>v x\ | x. \x\ = 1}\ hence \\ x. y = \f *\<^sub>v x\ \ \x\ = 1\ by blast then obtain x where \y = \f *\<^sub>v x\\ and \\x\ = 1\ by auto define y' where \y' n = (1 - (inverse (real (Suc n)))) *\<^sub>R y\ for n have \y' n \ {\f *\<^sub>v x\ | x. \x\ < 1}\ for n proof- have \y' n = (1 - (inverse (real (Suc n)))) *\<^sub>R \f *\<^sub>v x\\ using y'_def \y = \f *\<^sub>v x\\ by blast also have \... = \(1 - (inverse (real (Suc n))))\ *\<^sub>R \f *\<^sub>v x\\ by (metis (mono_tags, hide_lams) \y = \f *\<^sub>v x\\ abs_1 abs_le_self_iff abs_of_nat abs_of_nonneg add_diff_cancel_left' add_eq_if cancel_comm_monoid_add_class.diff_cancel diff_ge_0_iff_ge eq_iff_diff_eq_0 inverse_1 inverse_le_iff_le nat.distinct(1) of_nat_0 of_nat_Suc of_nat_le_0_iff zero_less_abs_iff zero_neq_one) also have \... = \f *\<^sub>v ((1 - (inverse (real (Suc n)))) *\<^sub>R x)\\ by (simp add: blinfun.scaleR_right) finally have y'_1: \y' n = \f *\<^sub>v ( (1 - (inverse (real (Suc n)))) *\<^sub>R x)\\ by blast have \\(1 - (inverse (Suc n))) *\<^sub>R x\ = (1 - (inverse (real (Suc n)))) * \x\\ by (simp add: linordered_field_class.inverse_le_1_iff) hence \\(1 - (inverse (Suc n))) *\<^sub>R x\ < 1\ by (simp add: \\x\ = 1\) thus ?thesis using y'_1 by blast qed have \(\n. (1 - (inverse (real (Suc n)))) ) \ 1\ using Limits.LIMSEQ_inverse_real_of_nat_add_minus by simp hence \(\n. (1 - (inverse (real (Suc n)))) *\<^sub>R y) \ 1 *\<^sub>R y\ using Limits.tendsto_scaleR by blast hence \(\n. (1 - (inverse (real (Suc n)))) *\<^sub>R y) \ y\ by simp hence \(\n. y' n) \ y\ using y'_def by simp hence \y' \ y\ by simp have \y' n \ Sup {\f *\<^sub>v x\ | x. \x\ < 1}\ for n using cSup_upper \\n. y' n \ {\f *\<^sub>v x\ |x. \x\ < 1}\ norm_less_1_bounded by blast hence \y \ Sup {\f *\<^sub>v x\ | x. \x\ < 1}\ using \y' \ y\ Topological_Spaces.Sup_lim by (meson LIMSEQ_le_const2) thus ?thesis by blast qed hence \Sup {\f *\<^sub>v x\ | x. \x\ = 1} \ Sup {\f *\<^sub>v x\ | x. \x\ < 1}\ by (metis (lifting) cSup_least norm_1_non_empty) have \y \ {\f *\<^sub>v x\ | x. \x\ < 1} \ y \ Sup {\f *\<^sub>v x\ | x. \x\ = 1}\ for y proof(cases \y = 0\) case True thus ?thesis by (simp add: Sup_non_neg) next case False hence \y \ 0\ by blast assume \y \ {\f *\<^sub>v x\ | x. \x\ < 1}\ hence \\ x. y = \f *\<^sub>v x\ \ \x\ < 1\ by blast then obtain x where \y = \f *\<^sub>v x\\ and \\x\ < 1\ by blast have \(1/\x\) * y = (1/\x\) * \f x\\ by (simp add: \y = \f *\<^sub>v x\\) also have \... = \1/\x\\ * \f *\<^sub>v x\\ by simp also have \... = \(1/\x\) *\<^sub>R (f *\<^sub>v x)\\ by simp also have \... = \f *\<^sub>v ((1/\x\) *\<^sub>R x)\\ by (simp add: blinfun.scaleR_right) finally have \(1/\x\) * y = \f *\<^sub>v ((1/\x\) *\<^sub>R x)\\ by blast have \x \ 0\ using \y \ 0\ \y = \f *\<^sub>v x\\ blinfun.zero_right by auto have \\ (1/\x\) *\<^sub>R x \ = \ (1/\x\) \ * \x\\ by simp also have \... = (1/\x\) * \x\\ by simp finally have \\(1/\x\) *\<^sub>R x\ = 1\ using \x \ 0\ by simp hence \(1/\x\) * y \ { \f *\<^sub>v x\ | x. \x\ = 1}\ using \1 / \x\ * y = \f *\<^sub>v (1 / \x\) *\<^sub>R x\\ by blast hence \(1/\x\) * y \ Sup { \f *\<^sub>v x\ | x. \x\ = 1}\ by (simp add: cSup_upper norm_1_bounded) moreover have \y \ (1/\x\) * y\ by (metis \\x\ < 1\ \y = \f *\<^sub>v x\\ mult_le_cancel_right1 norm_not_less_zero order.strict_implies_order \x \ 0\ less_divide_eq_1_pos zero_less_norm_iff) ultimately show ?thesis by linarith qed hence \Sup { \f *\<^sub>v x\ | x. \x\ < 1} \ Sup { \f *\<^sub>v x\ | x. \x\ = 1}\ by (smt cSup_least norm_less_1_non_empty) hence \Sup { \f *\<^sub>v x\ | x. \x\ = 1} = Sup { \f *\<^sub>v x\ | x. \x\ < 1}\ using \Sup {\f *\<^sub>v x\ |x. norm x = 1} \ Sup { \f *\<^sub>v x\ |x. \x\ < 1}\ by linarith have f1: \(SUP x. \f *\<^sub>v x\ / \x\) = Sup { \f *\<^sub>v x\ / \x\ | x. True}\ by (simp add: full_SetCompr_eq) have \y \ { \f *\<^sub>v x\ / \x\ |x. True} \ y \ { \f *\<^sub>v x\ |x. \x\ = 1} \ {0}\ for y proof- assume \y \ { \f *\<^sub>v x\ / \x\ |x. True}\ show ?thesis proof(cases \y = 0\) case True thus ?thesis by simp next case False have \\ x. y = \f *\<^sub>v x\ / \x\\ using \y \ { \f *\<^sub>v x\ / \x\ |x. True}\ by auto then obtain x where \y = \f *\<^sub>v x\ / \x\\ by blast hence \y = \(1/\x\)\ * \ f *\<^sub>v x \\ by simp hence \y = \(1/\x\) *\<^sub>R (f *\<^sub>v x)\\ by simp hence \y = \f ((1/\x\) *\<^sub>R x)\\ by (simp add: blinfun.scaleR_right) moreover have \\ (1/\x\) *\<^sub>R x \ = 1\ using False \y = \f *\<^sub>v x\ / \x\\ by auto ultimately have \y \ {\f *\<^sub>v x\ |x. \x\ = 1}\ by blast thus ?thesis by blast qed qed moreover have \y \ {\f x\ |x. \x\ = 1} \ {0} \ y \ {\f *\<^sub>v x\ / \x\ |x. True}\ for y proof(cases \y = 0\) case True thus ?thesis by auto next case False hence \y \ {0}\ by simp moreover assume \y \ {\f *\<^sub>v x\ |x. \x\ = 1} \ {0}\ ultimately have \y \ {\f *\<^sub>v x\ |x. \x\ = 1}\ by simp then obtain x where \\x\ = 1\ and \y = \f *\<^sub>v x\\ by auto have \y = \f *\<^sub>v x\ / \x\\ using \\x\ = 1\ \y = \f *\<^sub>v x\\ by simp thus ?thesis by auto qed ultimately have \{\f *\<^sub>v x\ / \x\ |x. True} = {\f *\<^sub>v x\ |x. \x\ = 1} \ {0}\ by blast hence \Sup {\f *\<^sub>v x\ / \x\ |x. True} = Sup ({\f *\<^sub>v x\ |x. \x\ = 1} \ {0})\ by simp have "\r s. \ (r::real) \ s \ sup r s = s" by (metis (lifting) sup.absorb_iff1 sup_commute) hence \Sup ({\f *\<^sub>v x\ |x. \x\ = 1} \ {(0::real)}) = max (Sup {\f *\<^sub>v x\ |x. \x\ = 1}) (Sup {0::real})\ using \0 \ Sup {\f *\<^sub>v x\ |x. \x\ = 1}\ \bdd_above {0}\ \{0} \ {}\ cSup_singleton cSup_union_distrib max.absorb_iff1 sup_commute norm_1_bounded norm_1_non_empty by (metis (no_types, lifting) ) moreover have \Sup {(0::real)} = (0::real)\ by simp ultimately have \Sup ({\f *\<^sub>v x\ |x. \x\ = 1} \ {0}) = Sup {\f *\<^sub>v x\ |x. \x\ = 1}\ using Sup_non_neg by linarith moreover have \Sup ( {\f *\<^sub>v x\ |x. \x\ = 1} \ {0}) = max (Sup {\f *\<^sub>v x\ |x. \x\ = 1}) (Sup {0}) \ using Sup_non_neg \Sup ({\f *\<^sub>v x\ |x. \x\ = 1} \ {0}) = max (Sup {\f *\<^sub>v x\ |x. \x\ = 1}) (Sup {0})\ by auto ultimately have f2: \Sup {\f *\<^sub>v x\ / \x\ | x. True} = Sup {\f *\<^sub>v x\ | x. \x\ = 1}\ using \Sup {\f *\<^sub>v x\ / \x\ |x. True} = Sup ({\f *\<^sub>v x\ |x. \x\ = 1} \ {0})\ by linarith have \(SUP x. \f *\<^sub>v x\ / \x\) = Sup {\f *\<^sub>v x\ | x. \x\ = 1}\ using f1 f2 by linarith hence \(SUP x. \f *\<^sub>v x\ / \x\) = Sup {\f *\<^sub>v x\ | x. \x\ < 1 }\ by (simp add: \Sup {\f *\<^sub>v x\ |x. \x\ = 1} = Sup {\f *\<^sub>v x\ |x. \x\ < 1}\) thus ?thesis apply transfer by (simp add: onorm_def) qed qed lemma onorm_r: includes notation_norm assumes \r > 0\ shows \\f\ = Sup ((\x. \f *\<^sub>v x\) ` (ball 0 r)) / r\ text \ Explanation: The norm of \<^term>\f\ is \<^term>\1/r\ of the supremum of the norm of \<^term>\f *\<^sub>v x\ for \<^term>\x\ in the ball of radius \<^term>\r\ centered at the origin. \ proof- have \\f\ = Sup {\f *\<^sub>v x\ |x. \x\ < 1}\ using onorm_open_ball by blast moreover have \{\f *\<^sub>v x\ |x. \x\ < 1} = (\x. \f *\<^sub>v x\) ` (ball 0 1)\ unfolding ball_def by auto ultimately have onorm_f: \\f\ = Sup ((\x. \f *\<^sub>v x\) ` (ball 0 1))\ by simp have s2: \x \ (\t. r *\<^sub>R \f *\<^sub>v t\) ` ball 0 1 \ x \ r * Sup ((\t. \f *\<^sub>v t\) ` ball 0 1)\ for x proof- assume \x \ (\t. r *\<^sub>R \f *\<^sub>v t\) ` ball 0 1\ hence \\ t. x = r *\<^sub>R \f *\<^sub>v t\ \ \t\ < 1\ by auto then obtain t where \x = r *\<^sub>R \f *\<^sub>v t\\ and \\t\ < 1\ by blast define y where \y = x /\<^sub>R r\ have \x = r * (inverse r * x)\ using \x = r *\<^sub>R norm (f t)\ by auto hence \x - (r * (inverse r * x)) \ 0\ by linarith hence \x \ r * (x /\<^sub>R r)\ by auto have \y \ (\k. \f *\<^sub>v k\) ` ball 0 1\ unfolding y_def by (smt \x \ (\t. r *\<^sub>R \f *\<^sub>v t\) ` ball 0 1\ assms image_iff inverse_inverse_eq pos_le_divideR_eq positive_imp_inverse_positive) moreover have \x \ r * y\ using \x \ r * (x /\<^sub>R r)\ y_def by blast ultimately have y_norm_f: \y \ (\t. \f *\<^sub>v t\) ` ball 0 1 \ x \ r * y\ by blast have \(\t. \f *\<^sub>v t\) ` ball 0 1 \ {}\ by simp moreover have \bdd_above ((\t. \f *\<^sub>v t\) ` ball 0 1)\ by (simp add: bounded_linear_image blinfun.bounded_linear_right bounded_imp_bdd_above bounded_norm_comp) moreover have \\ y. y \ (\t. \f *\<^sub>v t\) ` ball 0 1 \ x \ r * y\ using y_norm_f by blast ultimately show ?thesis by (smt \0 < r\ cSup_upper ordered_comm_semiring_class.comm_mult_left_mono) qed have s3: \(\x. x \ (\t. r * \f *\<^sub>v t\) ` ball 0 1 \ x \ y) \ r * Sup ((\t. \f *\<^sub>v t\) ` ball 0 1) \ y\ for y proof- assume \\x. x \ (\t. r * \f *\<^sub>v t\) ` ball 0 1 \ x \ y\ have x_leq: \x \ (\t. \f *\<^sub>v t\) ` ball 0 1 \ x \ y / r\ for x proof- assume \x \ (\t. \f *\<^sub>v t\) ` ball 0 1\ then obtain t where \t \ ball (0::'a) 1\ and \x = \f *\<^sub>v t\\ by auto define x' where \x' = r *\<^sub>R x\ have \x' = r * \f *\<^sub>v t\\ by (simp add: \x = \f *\<^sub>v t\\ x'_def) hence \x' \ (\t. r * \f *\<^sub>v t\) ` ball 0 1\ using \t \ ball (0::'a) 1\ by auto hence \x' \ y\ using \\x. x \ (\t. r * \f *\<^sub>v t\) ` ball 0 1 \ x \ y\ by blast thus \x \ y / r\ unfolding x'_def using \r > 0\ by (simp add: mult.commute pos_le_divide_eq) qed have \(\t. \f *\<^sub>v t\) ` ball 0 1 \ {}\ by simp moreover have \bdd_above ((\t. \f *\<^sub>v t\) ` ball 0 1)\ by (simp add: bounded_linear_image blinfun.bounded_linear_right bounded_imp_bdd_above bounded_norm_comp) ultimately have \Sup ((\t. \f *\<^sub>v t\) ` ball 0 1) \ y/r\ using x_leq by (simp add: \bdd_above ((\t. \f *\<^sub>v t\) ` ball 0 1)\ cSup_least) thus ?thesis using \r > 0\ by (smt divide_strict_right_mono nonzero_mult_div_cancel_left) qed have norm_scaleR: \norm \ ((*\<^sub>R) r) = ((*\<^sub>R) \r\) \ (norm::'a \ real)\ by auto have f_x1: \f (r *\<^sub>R x) = r *\<^sub>R f x\ for x by (simp add: blinfun.scaleR_right) have \ball (0::'a) r = ((*\<^sub>R) r) ` (ball 0 1)\ by (smt assms ball_scale nonzero_mult_div_cancel_left right_inverse_eq scale_zero_right) hence \Sup ((\t. \f *\<^sub>v t\) ` (ball 0 r)) = Sup ((\t. \f *\<^sub>v t\) ` (((*\<^sub>R) r) ` (ball 0 1)))\ by simp also have \\ = Sup (((\t. \f *\<^sub>v t\) \ ((*\<^sub>R) r)) ` (ball 0 1))\ using Sup.SUP_image by auto also have \\ = Sup ((\t. \f *\<^sub>v (r *\<^sub>R t)\) ` (ball 0 1))\ using f_x1 by (simp add: comp_assoc) also have \\ = Sup ((\t. \r\ *\<^sub>R \f *\<^sub>v t\) ` (ball 0 1))\ using norm_scaleR f_x1 by auto also have \\ = Sup ((\t. r *\<^sub>R \f *\<^sub>v t\) ` (ball 0 1))\ using \r > 0\ by auto also have \\ = r * Sup ((\t. \f *\<^sub>v t\) ` (ball 0 1))\ apply (rule cSup_eq_non_empty) apply simp using s2 apply auto using s3 by auto also have \\ = r * \f\\ using onorm_f by auto finally have \Sup ((\t. \f *\<^sub>v t\) ` ball 0 r) = r * \f\\ by blast thus \\f\ = Sup ((\x. \f *\<^sub>v x\) ` (ball 0 r)) / r\ using \r > 0\ by simp qed text\Pointwise convergence\ definition pointwise_convergent_to :: \( nat \ ('a \ 'b::topological_space) ) \ ('a \ 'b) \ bool\ (\((_)/ \pointwise\ (_))\ [60, 60] 60) where \pointwise_convergent_to x l = (\ t::'a. (\ n. (x n) t) \ l t)\ lemma linear_limit_linear: fixes f :: \_ \ ('a::real_vector \ 'b::real_normed_vector)\ assumes \\n. linear (f n)\ and \f \pointwise\ F\ shows \linear F\ text\ Explanation: If a family of linear operators converges pointwise, then the limit is also a linear operator. \ proof show "F (x + y) = F x + F y" for x y proof- have "\a. F a = lim (\n. f n a)" using \f \pointwise\ F\ unfolding pointwise_convergent_to_def by (metis (full_types) limI) moreover have "\f b c g. (lim (\n. g n + f n) = (b::'b) + c \ \ f \ c) \ \ g \ b" by (metis (no_types) limI tendsto_add) moreover have "\a. (\n. f n a) \ F a" using assms(2) pointwise_convergent_to_def by force ultimately have lim_sum: \lim (\ n. (f n) x + (f n) y) = lim (\ n. (f n) x) + lim (\ n. (f n) y)\ by metis have \(f n) (x + y) = (f n) x + (f n) y\ for n using \\ n. linear (f n)\ unfolding linear_def using Real_Vector_Spaces.linear_iff assms(1) by auto hence \lim (\ n. (f n) (x + y)) = lim (\ n. (f n) x + (f n) y)\ by simp hence \lim (\ n. (f n) (x + y)) = lim (\ n. (f n) x) + lim (\ n. (f n) y)\ using lim_sum by simp moreover have \(\ n. (f n) (x + y)) \ F (x + y)\ using \f \pointwise\ F\ unfolding pointwise_convergent_to_def by blast moreover have \(\ n. (f n) x) \ F x\ using \f \pointwise\ F\ unfolding pointwise_convergent_to_def by blast moreover have \(\ n. (f n) y) \ F y\ using \f \pointwise\ F\ unfolding pointwise_convergent_to_def by blast ultimately show ?thesis by (metis limI) qed show "F (r *\<^sub>R x) = r *\<^sub>R F x" for r and x proof- have \(f n) (r *\<^sub>R x) = r *\<^sub>R (f n) x\ for n using \\ n. linear (f n)\ by (simp add: Real_Vector_Spaces.linear_def real_vector.linear_scale) hence \lim (\ n. (f n) (r *\<^sub>R x)) = lim (\ n. r *\<^sub>R (f n) x)\ by simp have \convergent (\ n. (f n) x)\ by (metis assms(2) convergentI pointwise_convergent_to_def) moreover have \isCont (\ t::'b. r *\<^sub>R t) tt\ for tt by (simp add: bounded_linear_scaleR_right) ultimately have \lim (\ n. r *\<^sub>R ((f n) x)) = r *\<^sub>R lim (\ n. (f n) x)\ using \f \pointwise\ F\ unfolding pointwise_convergent_to_def by (metis (mono_tags) isCont_tendsto_compose limI) hence \lim (\ n. (f n) (r *\<^sub>R x)) = r *\<^sub>R lim (\ n. (f n) x)\ using \lim (\ n. (f n) (r *\<^sub>R x)) = lim (\ n. r *\<^sub>R (f n) x)\ by simp moreover have \(\ n. (f n) x) \ F x\ using \f \pointwise\ F\ unfolding pointwise_convergent_to_def by blast moreover have \(\ n. (f n) (r *\<^sub>R x)) \ F (r *\<^sub>R x)\ using \f \pointwise\ F\ unfolding pointwise_convergent_to_def by blast ultimately show ?thesis by (metis limI) qed qed lemma non_Cauchy_unbounded: fixes a ::\_ \ real\ assumes \\n. a n \ 0\ and \e > 0\ and \\M. \m. \n. m \ M \ n \ M \ m > n \ sum a {Suc n..m} \ e\ shows \(\n. (sum a {0..n})) \ \\ text\ Explanation: If the sequence of partial sums of nonnegative terms is not Cauchy, then it converges to infinite. \ proof- define S::"ereal set" where \S = range (\n. sum a {0..n})\ have \\s\S. k*e \ s\ for k::nat proof(induction k) case 0 from \\M. \m. \n. m \ M \ n \ M \ m > n \ sum a {Suc n..m} \ e\ obtain m n where \m \ 0\ and \n \ 0\ and \m > n\ and \sum a {Suc n..m} \ e\ by blast have \n < Suc n\ by simp hence \{0..n} \ {Suc n..m} = {0..m}\ using Set_Interval.ivl_disj_un(7) \n < m\ by auto moreover have \finite {0..n}\ by simp moreover have \finite {Suc n..m}\ by simp moreover have \{0..n} \ {Suc n..m} = {}\ by simp ultimately have \sum a {0..n} + sum a {Suc n..m} = sum a {0..m}\ by (metis sum.union_disjoint) moreover have \sum a {Suc n..m} > 0\ using \e > 0\ \sum a {Suc n..m} \ e\ by linarith moreover have \sum a {0..n} \ 0\ by (simp add: assms(1) sum_nonneg) ultimately have \sum a {0..m} > 0\ by linarith moreover have \sum a {0..m} \ S\ unfolding S_def by blast ultimately have \\s\S. 0 \ s\ using ereal_less_eq(5) by fastforce thus ?case by (simp add: zero_ereal_def) next case (Suc k) assume \\s\S. k*e \ s\ then obtain s where \s\S\ and \ereal (k * e) \ s\ by blast have \\N. s = sum a {0..N}\ using \s\S\ unfolding S_def by blast then obtain N where \s = sum a {0..N}\ by blast from \\M. \m. \n. m \ M \ n \ M \ m > n \ sum a {Suc n..m} \ e\ obtain m n where \m \ Suc N\ and \n \ Suc N\ and \m > n\ and \sum a {Suc n..m} \ e\ by blast have \finite {Suc N..n}\ by simp moreover have \finite {Suc n..m}\ by simp moreover have \{Suc N..n} \ {Suc n..m} = {Suc N..m}\ using Set_Interval.ivl_disj_un by (smt \Suc N \ n\ \n < m\ atLeastSucAtMost_greaterThanAtMost less_imp_le_nat) moreover have \{} = {Suc N..n} \ {Suc n..m}\ by simp ultimately have \sum a {Suc N..m} = sum a {Suc N..n} + sum a {Suc n..m}\ by (metis sum.union_disjoint) moreover have \sum a {Suc N..n} \ 0\ using \\n. a n \ 0\ by (simp add: sum_nonneg) ultimately have \sum a {Suc N..m} \ e\ using \e \ sum a {Suc n..m}\ by linarith have \finite {0..N}\ by simp have \finite {Suc N..m}\ by simp moreover have \{0..N} \ {Suc N..m} = {0..m}\ using Set_Interval.ivl_disj_un(7) \Suc N \ m\ by auto moreover have \{0..N} \ {Suc N..m} = {}\ by simp ultimately have \sum a {0..N} + sum a {Suc N..m} = sum a {0..m}\ by (metis \finite {0..N}\ sum.union_disjoint) hence \e + k * e \ sum a {0..m}\ using \ereal (real k * e) \ s\ \s = ereal (sum a {0..N})\ \e \ sum a {Suc N..m}\ by auto moreover have \e + k * e = (Suc k) * e\ by (simp add: semiring_normalization_rules(3)) ultimately have \(Suc k) * e \ sum a {0..m}\ by linarith hence \ereal ((Suc k) * e) \ sum a {0..m}\ by auto moreover have \sum a {0..m}\S\ unfolding S_def by blast ultimately show ?case by blast qed hence \\s\S. (real n) \ s\ for n by (meson assms(2) ereal_le_le ex_less_of_nat_mult less_le_not_le) hence \Sup S = \\ using Sup_le_iff Sup_subset_mono dual_order.strict_trans1 leD less_PInf_Ex_of_nat subsetI by metis hence Sup: \Sup ((range (\ n. (sum a {0..n})))::ereal set) = \\ using S_def by blast have \incseq (\n. (sum a {.. using \\n. a n \ 0\ using Extended_Real.incseq_sumI by auto hence \incseq (\n. (sum a {..< Suc n}))\ by (meson incseq_Suc_iff) hence \incseq (\n. (sum a {0..n})::ereal)\ using incseq_ereal by (simp add: atLeast0AtMost lessThan_Suc_atMost) hence \(\n. sum a {0..n}) \ Sup (range (\n. (sum a {0..n})::ereal))\ using LIMSEQ_SUP by auto thus ?thesis using Sup PInfty_neq_ereal by auto qed lemma sum_Cauchy_positive: fixes a ::\_ \ real\ assumes \\n. a n \ 0\ and \\K. \n. (sum a {0..n}) \ K\ shows \Cauchy (\n. sum a {0..n})\ text\ Explanation: If a series of nonnegative reals is bounded, then the series is Cauchy. \ proof (unfold Cauchy_altdef2, rule, rule) fix e::real assume \e>0\ have \\M. \m\M. \n\M. m > n \ sum a {Suc n..m} < e\ proof(rule classical) assume \\(\M. \m\M. \n\M. m > n \ sum a {Suc n..m} < e)\ hence \\M. \m. \n. m \ M \ n \ M \ m > n \ \(sum a {Suc n..m} < e)\ by blast hence \\M. \m. \n. m \ M \ n \ M \ m > n \ sum a {Suc n..m} \ e\ by fastforce hence \(\n. (sum a {0..n}) ) \ \\ using non_Cauchy_unbounded \0 < e\ assms(1) by blast from \\K. \n. sum a {0..n} \ K\ obtain K where \\n. sum a {0..n} \ K\ by blast from \(\n. sum a {0..n}) \ \\ have \\B. \N. \n\N. (\ n. (sum a {0..n}) ) n \ B\ using Lim_PInfty by simp hence \\n. (sum a {0..n}) \ K+1\ using ereal_less_eq(3) by blast thus ?thesis using \\n. (sum a {0..n}) \ K\ by smt qed have \sum a {Suc n..m} = sum a {0..m} - sum a {0..n}\ if "m > n" for m n apply (simp add: that atLeast0AtMost) using sum_up_index_split by (smt less_imp_add_positive that) hence \\M. \m\M. \n\M. m > n \ sum a {0..m} - sum a {0..n} < e\ using \\M. \m\M. \n\M. m > n \ sum a {Suc n..m} < e\ by smt from \\M. \m\M. \n\M. m > n \ sum a {0..m} - sum a {0..n} < e\ obtain M where \\m\M. \n\M. m > n \ sum a {0..m} - sum a {0..n} < e\ by blast moreover have \m > n \ sum a {0..m} \ sum a {0..n}\ for m n using \\ n. a n \ 0\ by (simp add: sum_mono2) ultimately have \\M. \m\M. \n\M. m > n \ \sum a {0..m} - sum a {0..n}\ < e\ by auto hence \\M. \m\M. \n\M. m \ n \ \sum a {0..m} - sum a {0..n}\ < e\ by (metis \0 < e\ abs_zero cancel_comm_monoid_add_class.diff_cancel diff_is_0_eq' less_irrefl_nat linorder_neqE_nat zero_less_diff) hence \\M. \m\M. \n\M. \sum a {0..m} - sum a {0..n}\ < e\ by (metis abs_minus_commute nat_le_linear) hence \\M. \m\M. \n\M. dist (sum a {0..m}) (sum a {0..n}) < e\ by (simp add: dist_real_def) hence \\M. \m\M. \n\M. dist (sum a {0..m}) (sum a {0..n}) < e\ by blast thus \\N. \n\N. dist (sum a {0..n}) (sum a {0..N}) < e\ by auto qed lemma convergent_series_Cauchy: fixes a::\nat \ real\ and \::\nat \ 'a::metric_space\ assumes \\M. \n. sum a {0..n} \ M\ and \\n. dist (\ (Suc n)) (\ n) \ a n\ shows \Cauchy \\ text\ Explanation: Let \<^term>\a\ be a real-valued sequence and let \<^term>\\\ be sequence in a metric space. If the partial sums of \<^term>\a\ are uniformly bounded and the distance between consecutive terms of \<^term>\\\ are bounded by the sequence \<^term>\a\, then \<^term>\\\ is Cauchy.\ proof (unfold Cauchy_altdef2, rule, rule) fix e::real assume \e > 0\ have \\k. a k \ 0\ using \\n. dist (\ (Suc n)) (\ n) \ a n\ dual_order.trans zero_le_dist by blast hence \Cauchy (\k. sum a {0..k})\ using \\M. \n. sum a {0..n} \ M\ sum_Cauchy_positive by blast hence \\M. \m\M. \n\M. dist (sum a {0..m}) (sum a {0..n}) < e\ unfolding Cauchy_def using \e > 0\ by blast hence \\M. \m\M. \n\M. m > n \ dist (sum a {0..m}) (sum a {0..n}) < e\ by blast have \dist (sum a {0..m}) (sum a {0..n}) = sum a {Suc n..m}\ if \n for m n proof - have \n < Suc n\ by simp have \finite {0..n}\ by simp moreover have \finite {Suc n..m}\ by simp moreover have \{0..n} \ {Suc n..m} = {0..m}\ using \n < Suc n\ \n < m\ by auto moreover have \{0..n} \ {Suc n..m} = {}\ by simp ultimately have sum_plus: \(sum a {0..n}) + sum a {Suc n..m} = (sum a {0..m})\ by (metis sum.union_disjoint) have \dist (sum a {0..m}) (sum a {0..n}) = \(sum a {0..m}) - (sum a {0..n})\\ using dist_real_def by blast moreover have \(sum a {0..m}) - (sum a {0..n}) = sum a {Suc n..m}\ using sum_plus by linarith ultimately show ?thesis by (simp add: \\k. 0 \ a k\ sum_nonneg) qed hence sum_a: \\M. \m\M. \n\M. m > n \ sum a {Suc n..m} < e\ by (metis \\M. \m\M. \n\M. dist (sum a {0..m}) (sum a {0..n}) < e\) obtain M where \\m\M. \n\M. m > n \ sum a {Suc n..m} < e\ using sum_a \e > 0\ by blast hence \\m. \n. Suc m \ Suc M \ Suc n \ Suc M \ Suc m > Suc n \ sum a {Suc n..Suc m - 1} < e\ by simp hence \\m\1. \n\1. m \ Suc M \ n \ Suc M \ m > n \ sum a {n..m - 1} < e\ by (metis Suc_le_D) hence sum_a2: \\M. \m\M. \n\M. m > n \ sum a {n..m-1} < e\ by (meson add_leE) have \dist (\ (n+p+1)) (\ n) \ sum a {n..n+p}\ for p n :: nat proof(induction p) case 0 thus ?case by (simp add: assms(2)) next case (Suc p) thus ?case by (smt Suc_eq_plus1 add_Suc_right add_less_same_cancel1 assms(2) dist_self dist_triangle2 gr_implies_not0 sum.cl_ivl_Suc) qed hence \m > n \ dist (\ m) (\ n) \ sum a {n..m-1}\ for m n :: nat by (metis Suc_eq_plus1 Suc_le_D diff_Suc_1 gr0_implies_Suc less_eq_Suc_le less_imp_Suc_add zero_less_Suc) hence \\M. \m\M. \n\M. m > n \ dist (\ m) (\ n) < e\ using sum_a2 \e > 0\ by smt thus "\N. \n\N. dist (\ n) (\ N) < e" using \0 < e\ by fastforce qed unbundle notation_blinfun_apply unbundle no_notation_norm end diff --git a/thys/Differential_Dynamic_Logic/Differential_Axioms.thy b/thys/Differential_Dynamic_Logic/Differential_Axioms.thy --- a/thys/Differential_Dynamic_Logic/Differential_Axioms.thy +++ b/thys/Differential_Dynamic_Logic/Differential_Axioms.thy @@ -1,2648 +1,2648 @@ theory "Differential_Axioms" imports Ordinary_Differential_Equations.ODE_Analysis "Ids" "Lib" "Syntax" "Denotational_Semantics" "Frechet_Correctness" "Axioms" "Coincidence" begin context ids begin section \Differential Axioms\ text \Differential axioms fall into two categories: Axioms for computing the derivatives of terms and axioms for proving properties of ODEs. The derivative axioms are all corollaries of the frechet correctness theorem. The ODE axioms are more involved, often requiring extensive use of the ODE libraries.\ subsection \Derivative Axioms\ definition diff_const_axiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"diff_const_axiom \ Equals (Differential ($f fid1 empty)) (Const 0)" definition diff_var_axiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"diff_var_axiom \ Equals (Differential (Var vid1)) (DiffVar vid1)" definition state_fun ::"'sf \ ('sf, 'sz) trm" where [axiom_defs]:"state_fun f = ($f f (\i. Var i))" definition diff_plus_axiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"diff_plus_axiom \ Equals (Differential (Plus (state_fun fid1) (state_fun fid2))) (Plus (Differential (state_fun fid1)) (Differential (state_fun fid2)))" definition diff_times_axiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"diff_times_axiom \ Equals (Differential (Times (state_fun fid1) (state_fun fid2))) (Plus (Times (Differential (state_fun fid1)) (state_fun fid2)) (Times (state_fun fid1) (Differential (state_fun fid2))))" \ \\[y=g(x)][y'=1](f(g(x))' = f(y)')\\ definition diff_chain_axiom::"('sf, 'sc, 'sz) formula" where [axiom_defs]:"diff_chain_axiom \ [[Assign vid2 (f1 fid2 vid1)]]([[DiffAssign vid2 (Const 1)]] (Equals (Differential ($f fid1 (singleton (f1 fid2 vid1)))) (Times (Differential (f1 fid1 vid2)) (Differential (f1 fid2 vid1)))))" subsection \ODE Axioms\ definition DWaxiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"DWaxiom = ([[EvolveODE (OVar vid1) (Predicational pid1)]](Predicational pid1))" definition DWaxiom' :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"DWaxiom' = ([[EvolveODE (OSing vid1 (Function fid1 (singleton (Var vid1)))) (Prop vid2 (singleton (Var vid1)))]](Prop vid2 (singleton (Var vid1))))" definition DCaxiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"DCaxiom = ( ([[EvolveODE (OVar vid1) (Predicational pid1)]]Predicational pid3) \ (([[EvolveODE (OVar vid1) (Predicational pid1)]](Predicational pid2)) \ ([[EvolveODE (OVar vid1) (And (Predicational pid1) (Predicational pid3))]]Predicational pid2)))" definition DEaxiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"DEaxiom = (([[EvolveODE (OSing vid1 (f1 fid1 vid1)) (p1 vid2 vid1)]] (P pid1)) \ ([[EvolveODE (OSing vid1 (f1 fid1 vid1)) (p1 vid2 vid1)]] [[DiffAssign vid1 (f1 fid1 vid1)]]P pid1))" definition DSaxiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"DSaxiom = (([[EvolveODE (OSing vid1 (f0 fid1)) (p1 vid2 vid1)]]p1 vid3 vid1) \ (Forall vid2 (Implies (Geq (Var vid2) (Const 0)) (Implies (Forall vid3 (Implies (And (Geq (Var vid3) (Const 0)) (Geq (Var vid2) (Var vid3))) (Prop vid2 (singleton (Plus (Var vid1) (Times (f0 fid1) (Var vid3))))))) ([[Assign vid1 (Plus (Var vid1) (Times (f0 fid1) (Var vid2)))]]p1 vid3 vid1)))))" \ \\(Q \ [c&Q](f(x)' \ g(x)'))\\ \ \\\\\ \ \\([c&Q](f(x) \ g(x))) --> (Q \ (f(x) \ g(x))\\ definition DIGeqaxiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"DIGeqaxiom = Implies (Implies (Prop vid1 empty) ([[EvolveODE (OVar vid1) (Prop vid1 empty)]](Geq (Differential (f1 fid1 vid1)) (Differential (f1 fid2 vid1))))) (Implies (Implies(Prop vid1 empty) (Geq (f1 fid1 vid1) (f1 fid2 vid1))) ([[EvolveODE (OVar vid1) (Prop vid1 empty)]](Geq (f1 fid1 vid1) (f1 fid2 vid1))))" \ \\g(x) > h(x) \ [x'=f(x), c & p(x)](g(x)' \ h(x)') \ [x'=f(x), c & p(x)]g(x) > h(x)\\ \ \\(Q \ [c&Q](f(x)' \ g(x)'))\\ \ \\\\\ \ \\([c&Q](f(x) > g(x))) <-> (Q \ (f(x) > g(x))\\ definition DIGraxiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"DIGraxiom = Implies (Implies (Prop vid1 empty) ([[EvolveODE (OVar vid1) (Prop vid1 empty)]](Geq (Differential (f1 fid1 vid1)) (Differential (f1 fid2 vid1))))) (Implies (Implies(Prop vid1 empty) (Greater (f1 fid1 vid1) (f1 fid2 vid1))) ([[EvolveODE (OVar vid1) (Prop vid1 empty)]](Greater (f1 fid1 vid1) (f1 fid2 vid1))))" \ \\[{1' = 1(1) & 1(1)}]2(1) <->\\ \ \\\2. [{1'=1(1), 2' = 2(1)*2 + 3(1) & 1(1)}]2(1)*)\\ definition DGaxiom :: "('sf, 'sc, 'sz) formula" where [axiom_defs]:"DGaxiom = (([[EvolveODE (OSing vid1 (f1 fid1 vid1)) (p1 vid1 vid1)]]p1 vid2 vid1) \ (Exists vid2 ([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1)) (OSing vid2 (Plus (Times (f1 fid2 vid1) (Var vid2)) (f1 fid3 vid1)))) (p1 vid1 vid1)]] p1 vid2 vid1)))" subsection \Proofs for Derivative Axioms\ lemma constant_deriv_inner: assumes interp:"\x i. (Functions I i has_derivative FunctionFrechet I i x) (at x)" shows "FunctionFrechet I id1 (vec_lambda (\i. sterm_sem I (empty i) (fst \))) (vec_lambda(\i. frechet I (empty i) (fst \) (snd \)))= 0" proof - have empty_zero:"(vec_lambda(\i. frechet I (empty i) (fst \) (snd \))) = 0" using local.empty_def Cart_lambda_cong frechet.simps(5) zero_vec_def apply auto apply(rule vec_extensionality) using local.empty_def Cart_lambda_cong frechet.simps(5) zero_vec_def by (simp add: local.empty_def) let ?x = "(vec_lambda (\i. sterm_sem I (empty i) (fst \)))" from interp have has_deriv:"(Functions I id1 has_derivative FunctionFrechet I id1 ?x) (at ?x)" by auto then have f_linear:"linear (FunctionFrechet I id1 ?x)" using Deriv.has_derivative_linear by auto then show ?thesis using empty_zero f_linear linear_0 by (auto) qed lemma constant_deriv_zero:"is_interp I \ directional_derivative I ($f id1 empty) \ = 0" apply(simp only: is_interp_def directional_derivative_def frechet.simps frechet_correctness) apply(rule constant_deriv_inner) apply(auto) done theorem diff_const_axiom_valid: "valid diff_const_axiom" apply(simp only: valid_def diff_const_axiom_def equals_sem) apply(rule allI | rule impI)+ apply(simp only: dterm_sem.simps constant_deriv_zero sterm_sem.simps) done theorem diff_var_axiom_valid: "valid diff_var_axiom" apply(auto simp add: diff_var_axiom_def valid_def directional_derivative_def) by (metis inner_prod_eq) theorem diff_plus_axiom_valid: "valid diff_plus_axiom" apply(auto simp add: diff_plus_axiom_def valid_def) subgoal for I a b using frechet_correctness[of I "(Plus (state_fun fid1) (state_fun fid2))" b] unfolding state_fun_def apply (auto intro: dfree.intros) unfolding directional_derivative_def by auto done theorem diff_times_axiom_valid: "valid diff_times_axiom" apply(auto simp add: diff_times_axiom_def valid_def) subgoal for I a b using frechet_correctness[of I "(Times (state_fun fid1) (state_fun fid2))" b] unfolding state_fun_def apply (auto intro: dfree.intros) unfolding directional_derivative_def by auto done subsection \Proofs for ODE Axioms\ lemma DW_valid:"valid DWaxiom" apply(unfold DWaxiom_def valid_def Let_def impl_sem ) apply(safe) apply(auto simp only: fml_sem.simps prog_sem.simps box_sem) subgoal for I aa ba ab bb sol t using mk_v_agree[of I "(OVar vid1)" "(ab,bb)" "sol t"] Vagree_univ[of "aa" "ba" "sol t" "ODEs I vid1 (sol t)"] solves_ode_domainD by (fastforce) done lemma DE_lemma: fixes ab bb::"'sz simple_state" and sol::"real \ 'sz simple_state" and I::"('sf, 'sc, 'sz) interp" shows "repd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) vid1 (dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t))) = mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)" proof have set_eq:" {Inl vid1, Inr vid1} = {Inr vid1, Inl vid1}" by auto have agree:"Vagree (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) (mk_xode I (OSing vid1 (f1 fid1 vid1)) (sol t)) {Inl vid1, Inr vid1}" using mk_v_agree[of I "(OSing vid1 (f1 fid1 vid1))" "(ab, bb)" "(sol t)"] unfolding semBV.simps using set_eq by auto have fact:"dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) = snd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) $ vid1" using agree unfolding Vagree_def dterm_sem.simps f1_def mk_xode.simps proof - assume alls:"(\i. Inl i \ {Inl vid1, Inr vid1} \ fst (mk_v I (OSing vid1 ($f fid1 (singleton (trm.Var vid1)))) (ab, bb) (sol t)) $ i = fst (sol t, ODE_sem I (OSing vid1 ($f fid1 (singleton (trm.Var vid1)))) (sol t)) $ i) \ (\i. Inr i \ {Inl vid1, Inr vid1} \ snd (mk_v I (OSing vid1 ($f fid1 (singleton (trm.Var vid1)))) (ab, bb) (sol t)) $ i = snd (sol t, ODE_sem I (OSing vid1 ($f fid1 (singleton (trm.Var vid1)))) (sol t)) $ i)" hence atVid'':"snd (mk_v I (OSing vid1 ($f fid1 (singleton (trm.Var vid1)))) (ab, bb) (sol t)) $ vid1 = sterm_sem I ($f fid1 (singleton (trm.Var vid1))) (sol t)" by auto have argsEq:"(\ i. dterm_sem I (singleton (trm.Var vid1) i) (mk_v I (OSing vid1 ($f fid1 (singleton (trm.Var vid1)))) (ab, bb) (sol t))) = (\ i. sterm_sem I (singleton (trm.Var vid1) i) (sol t))" using alls f1_def by auto thus "Functions I fid1 (\ i. dterm_sem I (singleton (trm.Var vid1) i) (mk_v I (OSing vid1 ($f fid1 (singleton (trm.Var vid1)))) (ab, bb) (sol t))) = snd (mk_v I (OSing vid1 ($f fid1 (singleton (trm.Var vid1)))) (ab, bb) (sol t)) $ vid1" by (simp only: atVid'' ODE_sem.simps sterm_sem.simps dterm_sem.simps) qed have eqSnd:"(\ y. if vid1 = y then snd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) $ vid1 else snd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) $ y) = snd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t))" by (simp add: vec_extensionality) have truth:"repd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) vid1 (dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t))) = mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)" using fact by (auto simp only: eqSnd repd.simps fact prod.collapse split: if_split) thus "fst (repd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) vid1 (dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)))) = fst (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t))" "snd (repd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) vid1 (dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)))) = snd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) " by auto qed lemma DE_valid:"valid DEaxiom" proof - have dsafe:"dsafe ($f fid1 (singleton (trm.Var vid1)))" unfolding singleton_def by(auto intro: dsafe.intros) have osafe:"osafe(OSing vid1 (f1 fid1 vid1))" unfolding f1_def empty_def singleton_def using dsafe osafe.intros dsafe.intros by (simp add: osafe_Sing dfree_Const) have fsafe:"fsafe (p1 vid2 vid1)" unfolding p1_def singleton_def using hpsafe_fsafe.intros(10) using dsafe dsafe_Fun_simps image_iff by (simp add: dfree_Const) show "valid DEaxiom" apply(auto simp only: DEaxiom_def valid_def Let_def iff_sem impl_sem) apply(auto simp only: fml_sem.simps prog_sem.simps mem_Collect_eq box_sem) proof - fix I::"('sf,'sc,'sz) interp" and aa ba ab bb sol and t::real and ac bc assume "is_interp I" assume allw:"\\. (\\ sol t. ((ab, bb), \) = (\, mk_v I (OSing vid1 (f1 fid1 vid1)) \ (sol t)) \ 0 \ t \ (sol solves_ode (\_. ODE_sem I (OSing vid1 (f1 fid1 vid1)))) {0..t} {x. mk_v I (OSing vid1 (f1 fid1 vid1)) \ x \ fml_sem I (p1 vid2 vid1)} \ (sol 0) = (fst \) ) \ \ \ fml_sem I (P pid1)" assume t:"0 \ t" assume aaba:"(aa, ba) = mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)" assume solve:" (sol solves_ode (\_. ODE_sem I (OSing vid1 (f1 fid1 vid1)))) {0..t} {x. mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) x \ fml_sem I (p1 vid2 vid1)}" assume sol0:" (sol 0) = (fst (ab, bb)) " assume rep:" (ac, bc) = repd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) vid1 (dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)))" have aaba_sem:"(aa,ba) \ fml_sem I (P pid1)" using allw t aaba solve sol0 rep by blast have truth:"repd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) vid1 (dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t))) = mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)" using DE_lemma by auto show " repd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) vid1 (dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t))) \ fml_sem I (P pid1)" using aaba aaba_sem truth by (auto) next fix I::"('sf,'sc,'sz) interp" and aa ba ab bb sol and t::real assume "is_interp I" assume all:"\\. (\\ sol t. ((ab, bb), \) = (\, mk_v I (OSing vid1 (f1 fid1 vid1)) \ (sol t)) \ 0 \ t \ (sol solves_ode (\_. ODE_sem I (OSing vid1 (f1 fid1 vid1)))) {0..t} {x. mk_v I (OSing vid1 (f1 fid1 vid1)) \ x \ fml_sem I (p1 vid2 vid1)} \ (sol 0) = (fst \) ) \ (\\'. \' = repd \ vid1 (dterm_sem I (f1 fid1 vid1) \) \ \' \ fml_sem I (P pid1))" hence justW:"(\\ sol t. ((ab, bb), (aa, ba)) = (\, mk_v I (OSing vid1 (f1 fid1 vid1)) \ (sol t)) \ 0 \ t \ (sol solves_ode (\_. ODE_sem I (OSing vid1 (f1 fid1 vid1)))) {0..t} {x. mk_v I (OSing vid1 (f1 fid1 vid1)) \ x \ fml_sem I (p1 vid2 vid1)} \ (sol 0) = (fst \)) \ (\\'. \' = repd (aa, ba) vid1 (dterm_sem I (f1 fid1 vid1) (aa, ba)) \ \' \ fml_sem I (P pid1))" by (rule allE) assume t:"0 \ t" assume aaba:"(aa, ba) = mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)" assume sol:"(sol solves_ode (\_. ODE_sem I (OSing vid1 (f1 fid1 vid1)))) {0..t} {x. mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) x \ fml_sem I (p1 vid2 vid1)}" assume sol0:" (sol 0) = (fst (ab, bb))" have "repd (aa, ba) vid1 (dterm_sem I (f1 fid1 vid1) (aa, ba)) \ fml_sem I (P pid1)" using justW t aaba sol sol0 by auto hence foo:"repd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) vid1 (dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t))) \ fml_sem I (P pid1)" using aaba by auto hence "repd (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)) vid1 (dterm_sem I (f1 fid1 vid1) (mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t))) = mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t)" using DE_lemma by auto thus "mk_v I (OSing vid1 (f1 fid1 vid1)) (ab, bb) (sol t) \ fml_sem I (P pid1)" using foo by auto qed qed lemma ODE_zero:"\i. Inl i \ BVO ODE \ Inr i \ BVO ODE \ ODE_sem I ODE \ $ i= 0" by(induction ODE, auto) lemma DE_sys_valid: assumes disj:"{Inl vid1, Inr vid1} \ BVO ODE = {}" shows "valid (([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1)) ODE) (p1 vid2 vid1)]] (P pid1)) \ ([[EvolveODE ((OProd (OSing vid1 (f1 fid1 vid1))ODE)) (p1 vid2 vid1)]] [[DiffAssign vid1 (f1 fid1 vid1)]]P pid1))" proof - have dsafe:"dsafe ($f fid1 (singleton (trm.Var vid1)))" unfolding singleton_def by(auto intro: dsafe.intros) have osafe:"osafe(OSing vid1 (f1 fid1 vid1))" unfolding f1_def empty_def singleton_def using dsafe osafe.intros dsafe.intros by (simp add: osafe_Sing dfree_Const) have fsafe:"fsafe (p1 vid2 vid1)" unfolding p1_def singleton_def using hpsafe_fsafe.intros(10) using dsafe dsafe_Fun_simps image_iff by (simp add: dfree_Const) show "valid (([[EvolveODE (OProd (OSing vid1 (f1 fid1 vid1)) ODE) (p1 vid2 vid1)]] (P pid1)) \ ([[EvolveODE ((OProd (OSing vid1 (f1 fid1 vid1)) ODE)) (p1 vid2 vid1)]] [[DiffAssign vid1 (f1 fid1 vid1)]]P pid1))" apply(auto simp only: DEaxiom_def valid_def Let_def iff_sem impl_sem) apply(auto simp only: fml_sem.simps prog_sem.simps mem_Collect_eq box_sem f1_def p1_def P_def expand_singleton) proof - fix I ::"('sf,'sc,'sz) interp" and aa ba ab bb sol and t::real and ac bc assume good:"is_interp I" assume bigAll:" \\. (\\ sol t. ((ab, bb), \) = (\, mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) ODE) \ (sol t)) \ 0 \ t \ (sol solves_ode (\_. ODE_sem I (OProd(OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) ODE ))) {0..t} {x. Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) \ x))} \ sol 0 = fst \) \ \ \ fml_sem I (Pc pid1)" let ?my\ = "mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab,bb) (sol t)" assume t:"0 \ t" assume aaba:"(aa, ba) = mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)" assume sol:"(sol solves_ode (\_. ODE_sem I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE))) {0..t} {x. Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) x))}" assume sol0:"sol 0 = fst (ab, bb)" assume acbc:"(ac, bc) = repd (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)) vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)))" have bigEx:"(\\ sol t. ((ab, bb), ?my\) = (\, mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) \ (sol t)) \ 0 \ t \ (sol solves_ode (\_. ODE_sem I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE))) {0..t} {x. Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) \ x))} \ sol 0 = fst \)" apply(rule exI[where x="(ab, bb)"]) apply(rule exI[where x="sol"]) apply(rule exI[where x="t"]) apply(rule conjI) apply(rule refl) apply(rule conjI) apply(rule t) apply(rule conjI) using sol apply blast by (rule sol0) have bigRes:"?my\ \ fml_sem I (Pc pid1)" using bigAll bigEx by blast have notin1:"Inl vid1 \ BVO ODE" using disj by auto have notin2:"Inr vid1 \ BVO ODE" using disj by auto have ODE_sem:"ODE_sem I ODE (sol t) $ vid1 = 0" using ODE_zero notin1 notin2 by blast have vec_eq:"(\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (sol t)) = (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)))" apply(rule vec_extensionality) apply simp using mk_v_agree[of I "(OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE)" "(ab, bb)" "(sol t)"] by(simp add: Vagree_def) have sem_eq:"(?my\ \ fml_sem I (Pc pid1)) = ((repd (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)) vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)))) \ fml_sem I (Pc pid1))" apply(rule coincidence_formula) subgoal by simp subgoal by (rule Iagree_refl) using mk_v_agree[of "I" "(OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE)" "(ab, bb)" "(sol t)"] unfolding Vagree_def apply simp apply(erule conjE)+ apply(erule allE[where x="vid1"])+ apply(simp add: ODE_sem) using vec_eq by simp show "repd (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)) vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t))) \ fml_sem I (Pc pid1)" using bigRes sem_eq by blast next fix I::"('sf,'sc,'sz)interp" and aa ba ab bb sol and t::real assume good_interp:"is_interp I" assume all:"\\. (\\ sol t. ((ab, bb), \) = (\, mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) \ (sol t)) \ 0 \ t \ (sol solves_ode (\_. ODE_sem I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE))) {0..t} {x. Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) \ x))} \ sol 0 = fst \) \ (\\'. \' = repd \ vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) \) \ \' \ fml_sem I (Pc pid1))" let ?my\ = "mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)" assume t:"0 \ t" assume aaba:"(aa, ba) = mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)" assume sol:" (sol solves_ode (\_. ODE_sem I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE))) {0..t} {x. Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) x))}" assume sol0:"sol 0 = fst (ab, bb)" have bigEx:"(\\ sol t. ((ab, bb), ?my\) = (\, mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) \ (sol t)) \ 0 \ t \ (sol solves_ode (\_. ODE_sem I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE))) {0..t} {x. Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) \ x))} \ sol 0 = fst \)" apply(rule exI[where x="(ab, bb)"]) apply(rule exI[where x=sol]) apply(rule exI[where x=t]) apply(rule conjI) apply(rule refl) apply(rule conjI) apply(rule t) apply(rule conjI) using sol sol0 by(blast)+ have rep_sem_eq:"repd (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)) vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t))) \ fml_sem I (Pc pid1) = (repd ?my\ vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) ?my\) \ fml_sem I (Pc pid1))" apply(rule coincidence_formula) subgoal by simp subgoal by (rule Iagree_refl) by(simp add: Vagree_def) have notin1:"Inl vid1 \ BVO ODE" using disj by auto have notin2:"Inr vid1 \ BVO ODE" using disj by auto have ODE_sem:"ODE_sem I ODE (sol t) $ vid1 = 0" using ODE_zero notin1 notin2 by blast have vec_eq:" (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t))) = (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (sol t))" apply(rule vec_extensionality) using mk_v_agree[of I "(OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE)" "(ab, bb)" "(sol t)"] by (simp add: Vagree_def) have sem_eq: "(repd ?my\ vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) ?my\) \ fml_sem I (Pc pid1)) = (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t) \ fml_sem I (Pc pid1)) " apply(rule coincidence_formula) subgoal by simp subgoal by (rule Iagree_refl) using mk_v_agree[of I "(OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE)" "(ab, bb)" "(sol t)"] unfolding Vagree_def apply simp apply(erule conjE)+ apply(erule allE[where x=vid1])+ by (simp add: ODE_sem vec_eq) have some_sem:"repd (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t)) vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t))) \ fml_sem I (Pc pid1)" using rep_sem_eq using all bigEx by blast have bigImp:"(\\'. \' = repd ?my\ vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) ?my\) \ \' \ fml_sem I (Pc pid1))" apply(rule allI) apply(rule impI) apply auto using some_sem by auto have fml_sem:"repd ?my\ vid1 (dterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) ?my\) \ fml_sem I (Pc pid1)" using sem_eq bigImp by blast show "mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))ODE) (ab, bb) (sol t) \ fml_sem I (Pc pid1)" using fml_sem sem_eq by blast qed qed lemma DC_valid:"valid DCaxiom" proof (auto simp only: fml_sem.simps prog_sem.simps DCaxiom_def valid_def iff_sem impl_sem box_sem, auto) fix I::"('sf,'sc,'sz) interp" and aa ba bb sol t assume "is_interp I" and all3:"\a b. (\sola. sol 0 = sola 0 \ (\t. (a, b) = mk_v I (OVar vid1) (sola 0, bb) (sola t) \ 0 \ t \ (sola solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sola 0, bb) x \ Contexts I pid1 UNIV})) \ (a, b) \ Contexts I pid3 UNIV" and all2:"\a b. (\sola. sol 0 = sola 0 \ (\t. (a, b) = mk_v I (OVar vid1) (sola 0, bb) (sola t) \ 0 \ t \ (sola solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sola 0, bb) x \ Contexts I pid1 UNIV})) \ (a, b) \ Contexts I pid2 UNIV" and t:"0 \ t" and aaba:"(aa, ba) = mk_v I (OVar vid1) (sol 0, bb) (sol t)" and sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sol 0, bb) x \ Contexts I pid1 UNIV \ mk_v I (OVar vid1) (sol 0, bb) x \ Contexts I pid3 UNIV}" from sol have sol1:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sol 0, bb) x \ Contexts I pid1 UNIV}" by (metis (mono_tags, lifting) Collect_mono solves_ode_supset_range) from all2 have all2':"\v. (\sola. sol 0 = sola 0 \ (\t. v = mk_v I (OVar vid1) (sola 0, bb) (sola t) \ 0 \ t \ (sola solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sola 0, bb) x \ Contexts I pid1 UNIV})) \ v \ Contexts I pid2 UNIV" by auto show "mk_v I (OVar vid1) (sol 0, bb) (sol t) \ Contexts I pid2 UNIV" apply(rule all2'[of "mk_v I (OVar vid1) (sol 0, bb) (sol t)"]) apply(rule exI[where x=sol]) apply(rule conjI) subgoal by (rule refl) subgoal using t sol1 by auto done next fix I::"('sf,'sc,'sz) interp" and aa ba bb sol t assume "is_interp I" and all3:"\a b. (\sola. sol 0 = sola 0 \ (\t. (a, b) = mk_v I (OVar vid1) (sola 0, bb) (sola t) \ 0 \ t \ (sola solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sola 0, bb) x \ Contexts I pid1 UNIV})) \ (a, b) \ Contexts I pid3 UNIV" and all2:"\a b. (\sola. sol 0 = sola 0 \ (\t. (a, b) = mk_v I (OVar vid1) (sola 0, bb) (sola t) \ 0 \ t \ (sola solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sola 0, bb) x \ Contexts I pid1 UNIV \ mk_v I (OVar vid1) (sola 0, bb) x \ Contexts I pid3 UNIV})) \ (a, b) \ Contexts I pid2 UNIV" and t:"0 \ t" and aaba:"(aa, ba) = mk_v I (OVar vid1) (sol 0, bb) (sol t)" and sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sol 0, bb) x \ Contexts I pid1 UNIV}" from all2 have all2':"\v. (\sola. sol 0 = sola 0 \ (\t. v = mk_v I (OVar vid1) (sola 0, bb) (sola t) \ 0 \ t \ (sola solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sola 0, bb) x \ Contexts I pid1 UNIV \ mk_v I (OVar vid1) (sola 0, bb) x \ Contexts I pid3 UNIV})) \ v \ Contexts I pid2 UNIV" by auto from all3 have all3':"\v. (\sola. sol 0 = sola 0 \ (\t. v = mk_v I (OVar vid1) (sola 0, bb) (sola t) \ 0 \ t \ (sola solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sola 0, bb) x \ Contexts I pid1 UNIV})) \ v \ Contexts I pid3 UNIV" by auto have inp1:"\s. 0 \ s \ s \ t \ mk_v I (OVar vid1) (sol 0, bb) (sol s) \ Contexts I pid1 UNIV" using sol solves_odeD atLeastAtMost_iff by blast have inp3:"\s. 0 \ s \ s \ t \ mk_v I (OVar vid1) (sol 0, bb) (sol s) \ Contexts I pid3 UNIV" apply(rule all3') subgoal for s apply(rule exI [where x=sol]) apply(rule conjI) subgoal by (rule refl) apply(rule exI [where x=s]) apply(rule conjI) subgoal by (rule refl) apply(rule conjI) subgoal by assumption subgoal using sol by (meson atLeastatMost_subset_iff order_refl solves_ode_subset) done done have inp13:"\s. 0 \ s \ s \ t \ mk_v I (OVar vid1) (sol 0, bb) (sol s) \ Contexts I pid1 UNIV \ mk_v I (OVar vid1) (sol 0, bb) (sol s) \ Contexts I pid3 UNIV" using inp1 inp3 by auto have sol13:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. mk_v I (OVar vid1) (sol 0, bb) x \ Contexts I pid1 UNIV \ mk_v I (OVar vid1) (sol 0, bb) x \ Contexts I pid3 UNIV}" apply(rule solves_odeI) subgoal using sol by (rule solves_odeD) subgoal for s using inp13[of s] by auto done show "mk_v I (OVar vid1) (sol 0, bb) (sol t) \ Contexts I pid2 UNIV" using t sol13 all2'[of "mk_v I (OVar vid1) (sol 0, bb) (sol t)"] by auto qed lemma DS_valid:"valid DSaxiom" proof - have dsafe:"dsafe($f fid1 (\i. Const 0))" using dsafe_Const by auto have osafe:"osafe(OSing vid1 (f0 fid1))" unfolding f0_def empty_def using dsafe osafe.intros by (simp add: osafe_Sing dfree_Const) have fsafe:"fsafe(p1 vid2 vid1)" unfolding p1_def apply(rule fsafe_Prop) using singleton.simps dsafe_Const by (auto intro: dfree.intros) show "valid DSaxiom" apply(auto simp only: DSaxiom_def valid_def Let_def iff_sem impl_sem box_sem) apply(auto simp only: fml_sem.simps prog_sem.simps mem_Collect_eq iff_sem impl_sem box_sem forall_sem) proof - fix I::"('sf,'sc,'sz) interp" and a b r aa ba assume good_interp:"is_interp I" assume allW:"\\. (\\ sol t. ((a, b), \) = (\, mk_v I (OSing vid1 (f0 fid1)) \ (sol t)) \ 0 \ t \ (sol solves_ode (\_. ODE_sem I (OSing vid1 (f0 fid1)))) {0..t} {x. mk_v I (OSing vid1 (f0 fid1)) \ x \ fml_sem I (p1 vid2 vid1)} \ (sol 0) = (fst \)) \ \ \ fml_sem I (p1 vid3 vid1)" assume "dterm_sem I (Const 0) (repv (a, b) vid2 r) \ dterm_sem I (trm.Var vid2) (repv (a, b) vid2 r)" hence leq:"0 \ r" by (auto) assume "\ra. repv (repv (a, b) vid2 r) vid3 ra \ {v. dterm_sem I (Const 0) v \ dterm_sem I (trm.Var vid3) v} \ {v. dterm_sem I (trm.Var vid3) v \ dterm_sem I (trm.Var vid2) v} \ Predicates I vid2 (\ i. dterm_sem I (singleton (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid3))) i) (repv (repv (a, b) vid2 r) vid3 ra))" hence constraint:"\ra. (0 \ ra \ ra \ r) \ (repv (repv (a, b) vid2 r) vid3 ra) \ fml_sem I (Prop vid2 (singleton (Plus (Var vid1) (Times (f0 fid1) (Var vid3)))))" using leq by auto assume aaba:" (aa, ba) = repv (repv (a, b) vid2 r) vid1 (dterm_sem I (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid2))) (repv (a, b) vid2 r))" let ?abba = "repv (repd (a, b) vid1 (Functions I fid1 (\ i. 0))) vid1 (dterm_sem I (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid2))) (repv (a, b) vid2 r))" from allW have thisW:"(\\ sol t. ((a, b), ?abba) = (\, mk_v I (OSing vid1 (f0 fid1)) \ (sol t)) \ 0 \ t \ (sol solves_ode (\_. ODE_sem I (OSing vid1 (f0 fid1)))) {0..t} {x. mk_v I (OSing vid1 (f0 fid1)) \ x \ fml_sem I (p1 vid2 vid1)} \ (sol 0) = (fst \)) \ ?abba \ fml_sem I (p1 vid3 vid1)" by blast let ?c = "Functions I fid1 (\ _. 0)" let ?sol = "(\t. \ i. if i = vid1 then (a $ i) + ?c * t else (a $ i))" have agrees:"Vagree (mk_v I (OSing vid1 (f0 fid1)) (a, b) (?sol r)) (a, b) (- semBV I (OSing vid1 (f0 fid1))) \ Vagree (mk_v I (OSing vid1 (f0 fid1)) (a, b) (?sol r)) (mk_xode I (OSing vid1 (f0 fid1)) (?sol r)) (semBV I (OSing vid1 (f0 fid1)))" using mk_v_agree[of "I" "(OSing vid1 (f0 fid1))" "(a,b)" "(?sol r)"] by auto have prereq1a:"fst ?abba = fst (mk_v I (OSing vid1 (f0 fid1)) (a,b) (?sol r))" using agrees aaba apply (auto simp add: aaba Vagree_def) apply (rule vec_extensionality) subgoal for i apply (cases "i = vid1") using vne12 agrees Vagree_def apply (auto simp add: aaba f0_def empty_def) done apply (rule vec_extensionality) subgoal for i apply (cases "i = vid1") apply(auto simp add: f0_def empty_def) done done have prereq1b:"snd (?abba) = snd (mk_v I (OSing vid1 (f0 fid1)) (a,b) (?sol r))" using agrees aaba apply (auto simp add: aaba Vagree_def) apply (rule vec_extensionality) subgoal for i apply (cases "i = vid1") using vne12 agrees Vagree_def apply (auto simp add: aaba f0_def empty_def ) done done have "?abba = mk_v I (OSing vid1 (f0 fid1)) (a,b) (?sol r)" using prod_eq_iff prereq1a prereq1b by blast hence req1:"((a, b), ?abba) = ((a, b), mk_v I (OSing vid1 (f0 fid1)) (a,b) (?sol r))" by auto have "sterm_sem I ($f fid1 (\i. Const 0)) b = Functions I fid1 (\ i. 0)" by auto hence vec_simp:"(\a b. \ i. if i = vid1 then sterm_sem I ($f fid1 (\i. Const 0)) b else 0) = (\a b. \ i. if i = vid1 then Functions I fid1 (\ i. 0) else 0)" by (auto simp add: vec_eq_iff cong: if_cong) have sub: "{0..r} \ UNIV" by auto have sub2:"{x. mk_v I (OSing vid1 (f0 fid1)) (a,b) x \ fml_sem I (p1 vid2 vid1)} \ UNIV" by auto have req3:"(?sol solves_ode (\_. ODE_sem I (OSing vid1 (f0 fid1)))) {0..r} {x. mk_v I (OSing vid1 (f0 fid1)) (a,b) x \ fml_sem I (p1 vid2 vid1)}" apply(auto simp add: f0_def empty_def vec_simp) apply(rule solves_odeI) apply(auto simp only: has_vderiv_on_def has_vector_derivative_def box_sem) apply (rule has_derivative_vec[THEN has_derivative_eq_rhs]) defer apply (rule ext) apply (subst scaleR_vec_def) apply (rule refl) apply (auto intro!: derivative_eq_intros) \ \Domain constraint satisfied\ using constraint apply (auto) subgoal for t apply(erule allE[where x="t"]) apply(auto simp add: p1_def) proof - have eq:"(\ i. dterm_sem I (if i = vid1 then Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid3)) else Const 0) (\ y. if vid3 = y then t else fst (\ y. if vid2 = y then r else fst (a, b) $ y, b) $ y, b)) = (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. Const 0))) (a, b) (\ i. if i = vid1 then a $ i + Functions I fid1 (\ _. 0) * t else a $ i)))" using vne12 vne13 mk_v_agree[of "I" "(OSing vid1 ($f fid1 (\i. Const 0)))" "(a, b)" "(\ i. if i = vid1 then a $ i + Functions I fid1 (\ _. 0) * t else a $ i)"] by (auto simp add: vec_eq_iff f0_def empty_def Vagree_def) show "0 \ t \ t \ r \ Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid3)) else Const 0) (\ y. if vid3 = y then t else fst (\ y. if vid2 = y then r else fst (a, b) $ y, b) $ y, b)) \ Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. Const 0))) (a, b) (\ i. if i = vid1 then a $ i + Functions I fid1 (\ _. 0) * t else a $ i)))" using eq by auto qed done have req4':"?sol 0 = fst (a,b)" by (auto simp: vec_eq_iff) then have req4: " (?sol 0) = (fst (a,b))" using VSagree_refl[of a] req4' unfolding VSagree_def by auto have inPred:"?abba \ fml_sem I (p1 vid3 vid1)" using req1 leq req3 req4 thisW by fastforce have sem_eq:"?abba \ fml_sem I (p1 vid3 vid1) \ (aa,ba) \ fml_sem I (p1 vid3 vid1)" apply (rule coincidence_formula) apply (auto simp add: aaba Vagree_def p1_def f0_def empty_def) subgoal using Iagree_refl by auto done from inPred sem_eq have inPred':"(aa,ba) \ fml_sem I (p1 vid3 vid1)" by auto \ \thus by lemma 6 consequence for formulas\ show "repv (repv (a, b) vid2 r) vid1 (dterm_sem I (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid2))) (repv (a, b) vid2 r)) \ fml_sem I (p1 vid3 vid1)" using aaba inPred' by (auto) next fix I::"('sf,'sc,'sz) interp" and aa ba ab bb sol and t:: real assume good_interp:"is_interp I" assume all:" \r. dterm_sem I (Const 0) (repv (ab, bb) vid2 r) \ dterm_sem I (trm.Var vid2) (repv (ab, bb) vid2 r) \ (\ra. repv (repv (ab, bb) vid2 r) vid3 ra \ {v. dterm_sem I (Const 0) v \ dterm_sem I (trm.Var vid3) v} \ {v. dterm_sem I (trm.Var vid3) v \ dterm_sem I (trm.Var vid2) v} \ Predicates I vid2 (\ i. dterm_sem I (singleton (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid3))) i) (repv (repv (ab, bb) vid2 r) vid3 ra))) \ (\\. \ = repv (repv (ab, bb) vid2 r) vid1 (dterm_sem I (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid2))) (repv (ab, bb) vid2 r)) \ \ \ fml_sem I (p1 vid3 vid1))" assume t:"0 \ t" assume aaba:"(aa, ba) = mk_v I (OSing vid1 (f0 fid1)) (ab, bb) (sol t)" assume sol:"(sol solves_ode (\_. ODE_sem I (OSing vid1 (f0 fid1)))) {0..t} {x. mk_v I (OSing vid1 (f0 fid1)) (ab, bb) x \ fml_sem I (p1 vid2 vid1)}" hence constraint:"\s. s \ {0 .. t} \ sol s \ {x. mk_v I (OSing vid1 (f0 fid1)) (ab, bb) x \ fml_sem I (p1 vid2 vid1)}" using solves_ode_domainD by fastforce \ \\sol 0 = fst (ab, bb)\\ assume sol0:" (sol 0) = (fst (ab, bb)) " have impl:"dterm_sem I (Const 0) (repv (ab, bb) vid2 t) \ dterm_sem I (trm.Var vid2) (repv (ab, bb) vid2 t) \ (\ra. repv (repv (ab, bb) vid2 t) vid3 ra \ {v. dterm_sem I (Const 0) v \ dterm_sem I (trm.Var vid3) v} \ {v. dterm_sem I (trm.Var vid3) v \ dterm_sem I (trm.Var vid2) v} \ Predicates I vid2 (\ i. dterm_sem I (singleton (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid3))) i) (repv (repv (ab, bb) vid2 t) vid3 ra))) \ (\\. \ = repv (repv (ab, bb) vid2 t) vid1 (dterm_sem I (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid2))) (repv (ab, bb) vid2 t)) \ \ \ fml_sem I (p1 vid3 vid1))" using all by auto interpret ll:ll_on_open_it UNIV "(\_. ODE_sem I (OSing vid1 (f0 fid1)))" "UNIV" 0 apply(standard) apply(auto) unfolding local_lipschitz_def f0_def empty_def sterm_sem.simps using gt_ex lipschitz_on_constant by blast have eq_UNIV:"ll.existence_ivl 0 (sol 0) = UNIV" apply(rule ll.existence_ivl_eq_domain) apply(auto) subgoal for tm tM t apply(unfold f0_def empty_def sterm_sem.simps) by(metis add.right_neutral mult_zero_left order_refl) done \ \Combine with \flow_usolves_ode\ and \equals_flowI\ to get uniqueness of solution\ let ?f = "(\_. ODE_sem I (OSing vid1 (f0 fid1)))" have sol_UNIV: "\t x. (ll.flow 0 x usolves_ode ?f from 0) (ll.existence_ivl 0 x) UNIV" using ll.flow_usolves_ode by auto from sol have sol': "(sol solves_ode (\_. ODE_sem I (OSing vid1 (f0 fid1)))) {0..t} UNIV" apply (rule solves_ode_supset_range) by auto from sol' have sol'':"\s. s \ 0 \ s \ t \ (sol solves_ode (\_. ODE_sem I (OSing vid1 (f0 fid1)))) {0..s} UNIV" by (simp add: solves_ode_subset) have sol0_eq:"sol 0 = ll.flow 0 (sol 0) 0" using ll.general.flow_initial_time_if by auto have isFlow:"\s. s \ 0 \ s \ t \ sol s = ll.flow 0 (sol 0) s" apply(rule ll.equals_flowI) apply(auto) subgoal using eq_UNIV by auto subgoal using sol'' closed_segment_eq_real_ivl t by (auto simp add: solves_ode_singleton) subgoal using eq_UNIV sol sol0_eq by auto done let ?c = "Functions I fid1 (\ _. 0)" let ?sol = "(\t. \ i. if i = vid1 then (ab $ i) + ?c * t else (ab $ i))" have vec_simp:"(\a b. \ i. if i = vid1 then sterm_sem I ($f fid1 (\i. Const 0)) b else 0) = (\a b. \ i. if i = vid1 then Functions I fid1 (\ i. 0) else 0)" by (auto simp add: vec_eq_iff cong: if_cong) have exp_sol:"(?sol solves_ode (\_. ODE_sem I (OSing vid1 (f0 fid1)))) {0..t} UNIV" apply(auto simp add: f0_def empty_def vec_simp) apply(rule solves_odeI) apply(auto simp only: has_vderiv_on_def has_vector_derivative_def box_sem) apply (rule has_derivative_vec[THEN has_derivative_eq_rhs]) defer apply (rule ext) apply (subst scaleR_vec_def) apply (rule refl) apply (auto intro!: derivative_eq_intros) done from exp_sol have exp_sol':"\s. s \ 0 \ s \ t \ (?sol solves_ode (\_. ODE_sem I (OSing vid1 (f0 fid1)))) {0..s} UNIV" by (simp add: solves_ode_subset) have exp_sol0_eq:"?sol 0 = ll.flow 0 (?sol 0) 0" using ll.general.flow_initial_time_if by auto have more_eq:"(\ i. if i = vid1 then ab $ i + Functions I fid1 (\ _. 0) * 0 else ab $ i) = sol 0" using sol0 apply auto apply(rule vec_extensionality) by(auto) have exp_isFlow:"\s. s \ 0 \ s \ t \ ?sol s = ll.flow 0 (sol 0) s" apply(rule ll.equals_flowI) apply(auto) subgoal using eq_UNIV by auto defer subgoal for s using eq_UNIV apply auto subgoal using exp_sol exp_sol0_eq more_eq apply(auto) done done using exp_sol' closed_segment_eq_real_ivl t apply(auto) by (simp add: solves_ode_singleton) have sol_eq_exp:"\s. s \ 0 \ s \ t \ ?sol s = sol s" unfolding exp_isFlow isFlow by auto then have sol_eq_exp_t:"?sol t = sol t" using t by auto then have sol_eq_exp_t':"sol t $ vid1 = ?sol t $ vid1" by auto then have useful:"?sol t $ vid1 = ab $ vid1 + Functions I fid1 (\ i. 0) * t" by auto from sol_eq_exp_t' useful have useful':"sol t $ vid1 = ab $ vid1 + Functions I fid1 (\ i. 0) * t" by auto have sol_int:"((ll.flow 0 (sol 0)) usolves_ode ?f from 0) {0..t} {x. mk_v I (OSing vid1 (f0 fid1)) (ab, bb) x \ fml_sem I (p1 vid2 vid1)}" apply (rule usolves_ode_subset_range[of "(ll.flow 0 (sol 0))" "?f" "0" "{0..t}" "UNIV" "{x. mk_v I (OSing vid1 (f0 fid1)) (ab, bb) x \ fml_sem I (p1 vid2 vid1)}"]) subgoal using eq_UNIV sol_UNIV[of "(sol 0)"] apply (auto) apply (rule usolves_ode_subset) using t by(auto) apply(auto) using sol apply(auto dest!: solves_ode_domainD) subgoal for xa using isFlow[of xa] by(auto) done have thing:"\s. 0 \ s \ s \ t \ fst (mk_v I (OSing vid1 ($f fid1 (\i. Const 0))) (ab, bb) (?sol s)) $ vid1 = ab $ vid1 + Functions I fid1 (\ i. 0) * s" subgoal for s using mk_v_agree[of I "(OSing vid1 ($f fid1 (\i. Const 0)))" "(ab, bb)" "(?sol s)"] apply auto unfolding Vagree_def by auto done have thing':"\s. 0 \ s \ s \ t \ fst (mk_v I (OSing vid1 ($f fid1 (\i. Const 0))) (ab, bb) (sol s)) $ vid1 = ab $ vid1 + Functions I fid1 (\ i. 0) * s" subgoal for s using thing[of s] sol_eq_exp[of s] by auto done have another_eq:"\i s. 0 \ s \ s \ t \ dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 (f0 fid1)) (ab, bb) (sol s)) = dterm_sem I (if i = vid1 then Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid3)) else Const 0) (\ y. if vid3 = y then s else fst (\ y. if vid2 = y then s else fst (ab, bb) $ y, bb) $ y, bb)" using mk_v_agree[of "I" "(OSing vid1 (f0 fid1))" "(ab, bb)" "(sol s)"] vne12 vne23 vne13 apply(auto simp add: f0_def p1_def empty_def) unfolding Vagree_def apply(simp add: f0_def empty_def) subgoal for s using thing' by auto done have allRa':"(\ra. repv (repv (ab, bb) vid2 t) vid3 ra \ {v. dterm_sem I (Const 0) v \ dterm_sem I (trm.Var vid3) v} \ {v. dterm_sem I (trm.Var vid3) v \ dterm_sem I (trm.Var vid2) v} \ Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 (f0 fid1)) (ab, bb) (sol ra))))" apply(rule allI) subgoal for ra using mk_v_agree[of "I" "(OSing vid1 (f0 fid1))" "(ab, bb)" "(sol ra)"] vne23 constraint[of ra] apply(auto simp add: Vagree_def p1_def) done done have anotherFact:"\ra. 0 \ ra \ ra \ t \ (\ i. if i = vid1 then ab $ i + Functions I fid1 (\ _. 0) * ra else ab $ i) $ vid1 = ab $ vid1 + dterm_sem I (f0 fid1) (\ y. if vid3 = y then ra else fst (\ y. if vid2 = y then t else fst (ab, bb) $ y, bb) $ y, bb) * ra " subgoal for ra apply simp apply(rule disjI2) by (auto simp add: f0_def empty_def) done have thing':"\ra i. 0 \ ra \ ra \ t \ dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. Const 0))) (ab, bb) (sol ra)) = dterm_sem I (if i = vid1 then Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid3)) else Const 0) (\ y. if vid3 = y then ra else fst (\ y. if vid2 = y then t else fst (ab, bb) $ y, bb) $ y, bb) " subgoal for ra i using vne12 vne13 mk_v_agree[of I "OSing vid1 ($f fid1 (\i. Const 0))" "(ab,bb)" "(sol ra)"] apply (auto) unfolding Vagree_def apply(safe) apply(erule allE[where x="vid1"])+ using sol_eq_exp[of ra] anotherFact[of ra] by auto done have allRa:"(\ra. repv (repv (ab, bb) vid2 t) vid3 ra \ {v. dterm_sem I (Const 0) v \ dterm_sem I (trm.Var vid3) v} \ {v. dterm_sem I (trm.Var vid3) v \ dterm_sem I (trm.Var vid2) v} \ Predicates I vid2 (\ i. dterm_sem I (singleton (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid3))) i) (repv (repv (ab, bb) vid2 t) vid3 ra)))" apply(rule allI) subgoal for ra using mk_v_agree[of "I" "(OSing vid1 (f0 fid1))" "(ab, bb)" "(sol ra)"] vne23 constraint[of ra] apply(auto simp add: Vagree_def p1_def) using sol_eq_exp[of ra] apply (auto simp add: f0_def empty_def Vagree_def vec_eq_iff) using thing' by auto done have fml3:"\ra. 0 \ ra \ ra \ t \ (\\. \ = repv (repv (ab, bb) vid2 t) vid1 (dterm_sem I (Plus (trm.Var vid1) (Times (f0 fid1) (trm.Var vid2))) (repv (ab, bb) vid2 t)) \ \ \ fml_sem I (p1 vid3 vid1))" using impl allRa by auto have someEq:"(\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (\ y. if vid1 = y then (if vid2 = vid1 then t else fst (ab, bb) $ vid1) + Functions I fid1 (\ i. 0) * t else fst (\ y. if vid2 = y then t else fst (ab, bb) $ y, bb) $ y, bb)) = (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. Const 0))) (ab, bb) (sol t)))" apply(rule vec_extensionality) using vne12 sol_eq_exp t thing by auto show "mk_v I (OSing vid1 (f0 fid1)) (ab, bb) (sol t) \ fml_sem I (p1 vid3 vid1)" using mk_v_agree[of I "OSing vid1 (f0 fid1)" "(ab, bb)" "sol t"] fml3[of t] unfolding f0_def p1_def empty_def Vagree_def using someEq by(auto simp add: sol_eq_exp_t' t vec_extensionality vne12) qed qed lemma MVT0_within: fixes f ::"real \ real" and f'::"real \ real \ real" and s t :: real assumes f':"\x. x \ {0..t} \ (f has_derivative (f' x)) (at x within {0..t})" assumes geq':"\x. x \ {0..t} \ f' x s \ 0" assumes int_s:"s > 0 \ s \ t" assumes t: "0 < t" shows "f s \ f 0" proof - have "f 0 + 0 \ f s" apply (rule Lib.MVT_ivl'[OF f', of 0 s 0]) subgoal for x by assumption subgoal for x using geq' by auto using t int_s t apply auto subgoal for x - by (metis int_s mult.commute mult.right_neutral order.trans real_mult_le_cancel_iff2) + by (metis int_s mult.commute mult.right_neutral order.trans mult_le_cancel_iff2) done then show "?thesis" by auto qed lemma MVT': fixes f g ::"real \ real" fixes f' g'::"real \ real \ real" fixes s t ::real assumes f':"\s. s \ {0..t} \ (f has_derivative (f' s)) (at s within {0..t})" assumes g':"\s. s \ {0..t} \ (g has_derivative (g' s)) (at s within {0..t})" assumes geq':"\x. x \ {0..t} \ f' x s \ g' x s" assumes geq0:"f 0 \ g 0" assumes int_s:"s > 0 \ s \ t" assumes t:"t > 0" shows "f s \ g s" proof - let ?h = "(\x. f x - g x)" let ?h' = "(\s x. f' s x - g' s x)" have "?h s \ ?h 0" apply(rule MVT0_within[of t ?h "?h'" s]) subgoal for s using f'[of s] g'[of s] by auto subgoal for sa using geq'[of sa] by auto subgoal using int_s by auto subgoal using t by auto done then show "?thesis" using geq0 by auto qed lemma MVT'_gr: fixes f g ::"real \ real" fixes f' g'::"real \ real \ real" fixes s t ::real assumes f':"\s. s \ {0..t} \ (f has_derivative (f' s)) (at s within {0..t})" assumes g':"\s. s \ {0..t} \ (g has_derivative (g' s)) (at s within {0..t})" assumes geq':"\x. x \ {0..t} \ f' x s \ g' x s" assumes geq0:"f 0 > g 0" assumes int_s:"s > 0 \ s \ t" assumes t:"t > 0" shows "f s > g s" proof - let ?h = "(\x. f x - g x)" let ?h' = "(\s x. f' s x - g' s x)" have "?h s \ ?h 0" apply(rule MVT0_within[of t ?h "?h'" s]) subgoal for s using f'[of s] g'[of s] by auto subgoal for sa using geq'[of sa] by auto subgoal using int_s by auto subgoal using t by auto done then show "?thesis" using geq0 by auto qed lemma frech_linear: fixes x \ \ \' I assumes good_interp:"is_interp I" assumes free:"dfree \" shows "x * frechet I \ \ \' = frechet I \ \ (x *\<^sub>R \')" using frechet_linear[OF good_interp free] by (simp add: linear_simps) lemma rift_in_space_time: fixes sol I ODE \ \ t s b assumes good_interp:"is_interp I" assumes free:"dfree \" assumes osafe:"osafe ODE" assumes sol:"(sol solves_ode (\_ \'. ODE_sem I ODE \')) {0..t} {x. mk_v I ODE (sol 0, b) x \ fml_sem I \}" assumes FVT:"FVT \ \ semBV I ODE" assumes ivl:"s \ {0..t}" shows "((\t. sterm_sem I \ (fst (mk_v I ODE (sol 0, b) (sol t)))) \ \This is Frechet derivative, so equivalent to:\ \ \\has_real_derivative frechet I \ (fst((mk_v I ODE (sol 0, b) (sol s)))) (snd (mk_v I ODE (sol 0, b) (sol s))))) (at s within {0..t})\\ has_derivative (\t'. t' * frechet I \ (fst((mk_v I ODE (sol 0, b) (sol s)))) (snd (mk_v I ODE (sol 0, b) (sol s))))) (at s within {0..t})" proof - let ?\ = "(\t. (mk_v I ODE (sol 0, b) (sol t)))" let ?\s = "(\t. fst (?\ t))" have sol_deriv:"\s. s \ {0..t} \ (sol has_derivative (\xa. xa *\<^sub>R ODE_sem I ODE (sol s))) (at s within {0..t})" using sol apply simp apply (drule solves_odeD(1)) unfolding has_vderiv_on_def has_vector_derivative_def by auto have sol_dom:"\s. s\ {0..t} \ ?\ s \ fml_sem I \" using sol apply simp apply (drule solves_odeD(2)) by auto let ?h = "(\t. sterm_sem I \ (?\s t))" let ?g = "(\\. sterm_sem I \ \)" let ?f = "?\s" let ?f' = "(\t'. t' *\<^sub>R (\ i. if i \ ODE_vars I ODE then ODE_sem I ODE (sol s) $ i else 0))" let ?g' = "(frechet I \ (?\s s))" have heq:"?h = ?g \ ?f" by (auto) have fact1:"\i. i \ ODE_vars I ODE \ (\t. ?\s(t) $ i) = (\t. sol t $ i)" subgoal for i apply(rule ext) subgoal for t using mk_v_agree[of I ODE "(sol 0, b)" "sol t"] unfolding Vagree_def by auto done done have fact2:"\i. i \ ODE_vars I ODE \ (\t. if i \ ODE_vars I ODE then ODE_sem I ODE (sol t) $ i else 0) = (\t. ODE_sem I ODE (sol t) $ i)" subgoal for i apply(rule ext) subgoal for t using mk_v_agree[of I ODE "(sol 0, b)" "sol t"] unfolding Vagree_def by auto done done have fact3:"\i. i \ (-ODE_vars I ODE) \ (\t. ?\s(t) $ i) = (\t. sol 0 $ i)" subgoal for i apply(rule ext) subgoal for t using mk_v_agree[of I ODE "(sol 0, b)" "sol t"] unfolding Vagree_def by auto done done have fact4:"\i. i \ (-ODE_vars I ODE) \ (\t. if i \ ODE_vars I ODE then ODE_sem I ODE (sol t) $ i else 0) = (\t. 0)" subgoal for i apply(rule ext) subgoal for t using mk_v_agree[of I ODE "(sol 0, b)" "sol t"] unfolding Vagree_def by auto done done have some_eq:"(\v'. \ i. v' *\<^sub>R ODE_sem I ODE (sol s) $ i) = (\v'. v' *\<^sub>R ODE_sem I ODE (sol s))" apply(rule ext) apply(rule vec_extensionality) by auto have some_sol:"(sol has_derivative (\v'. v' *\<^sub>R ODE_sem I ODE (sol s))) (at s within {0..t})" using sol ivl unfolding solves_ode_def has_vderiv_on_def has_vector_derivative_def by auto have some_eta:"(\t. \ i. sol t $ i) = sol" by (rule ext, rule vec_extensionality, auto) have ode_deriv:"\i. i \ ODE_vars I ODE \ ((\t. sol t $ i) has_derivative (\ v'. v' *\<^sub>R ODE_sem I ODE (sol s) $ i)) (at s within {0..t})" subgoal for i apply(rule has_derivative_proj) using some_eq some_sol some_eta by auto done have eta:"(\t. (\ i. ?f t $ i)) = ?f" by(rule ext, rule vec_extensionality, auto) have eta_esque:"(\t'. \ i. t' * (if i \ ODE_vars I ODE then ODE_sem I ODE (sol s) $ i else 0)) = (\t'. t' *\<^sub>R (\ i. if i \ ODE_vars I ODE then ODE_sem I ODE (sol s) $ i else 0))" apply(rule ext | rule vec_extensionality)+ subgoal for t' i by auto done have "((\t. (\ i. ?f t $ i)) has_derivative (\t'. (\ i. ?f' t' $ i))) (at s within {0..t})" apply (rule has_derivative_vec) subgoal for i apply(cases "i \ ODE_vars I ODE") subgoal using fact1[of i] fact2[of i] ode_deriv[of i] by auto subgoal using fact3[of i] fact4[of i] by auto done done then have fderiv:"(?f has_derivative ?f') (at s within {0..t})" using eta eta_esque by auto have gderiv:"(?g has_derivative ?g') (at (?f s) within ?f ` {0..t})" using has_derivative_at_withinI using frechet_correctness free good_interp by blast have chain:"((?g \ ?f) has_derivative (?g' \ ?f')) (at s within {0..t})" using fderiv gderiv diff_chain_within by blast let ?co\1 = "(fst (mk_v I ODE (sol 0, b) (sol s)), ODE_sem I ODE (fst (mk_v I ODE (sol 0, b) (sol s))))" let ?co\2 = "(fst (mk_v I ODE (sol 0, b) (sol s)), snd (mk_v I ODE (sol 0, b) (sol s)))" have sub_cont:"\a .a \ ODE_vars I ODE \ Inl a \ FVT \ \ False" using FVT by auto have sub_cont2:"\a .a \ ODE_vars I ODE \ Inr a \ FVT \ \ False" using FVT by auto have "Vagree (mk_v I ODE (sol 0, b) (sol s)) (sol s, b) (Inl ` ODE_vars I ODE)" using mk_v_agree[of I ODE "(sol 0, b)" "sol s"] unfolding Vagree_def by auto let ?co'\1 = "(\x. (fst (mk_v I ODE (sol 0, b) (sol s)), x *\<^sub>R (\ i. if i \ ODE_vars I ODE then ODE_sem I ODE (sol s) $ i else 0)))" let ?co'\2 = "(\x. (fst (mk_v I ODE (sol 0, b) (sol s)), x *\<^sub>R snd (mk_v I ODE (sol 0, b) (sol s))))" have co_agree_sem:"\s. Vagree (?co'\1 s) (?co'\2 s) (semBV I ODE)" subgoal for sa using mk_v_agree[of I ODE "(sol 0, b)" "sol s"] unfolding Vagree_def by auto done have co_agree_help:"\s. Vagree (?co'\1 s) (?co'\2 s) (FVT \)" using agree_sub[OF FVT co_agree_sem] by auto have co_agree':"\s. Vagree (?co'\1 s) (?co'\2 s) (FVDiff \)" subgoal for s using mk_v_agree[of I ODE "(sol 0, b)" "sol s"] unfolding Vagree_def apply auto subgoal for i x apply(cases x) subgoal for a apply(cases "a \ ODE_vars I ODE") by (simp | metis (no_types, lifting) FVT ODE_vars_lr Vagree_def mk_v_agree mk_xode.elims subsetD snd_conv)+ subgoal for a apply(cases "a \ ODE_vars I ODE") by (simp | metis (no_types, lifting) FVT Vagree_def mk_v_agree mk_xode.elims subsetD snd_conv)+ done subgoal for i x apply(cases x) subgoal for a apply(cases "a \ ODE_vars I ODE") using FVT ODE_vars_lr Vagree_def mk_v_agree mk_xode.elims subsetD snd_conv by auto subgoal for a apply(cases "a \ ODE_vars I ODE") apply(erule allE[where x=i])+ using FVT ODE_vars_lr Vagree_def mk_v_agree mk_xode.elims subsetD snd_conv by auto done done done have heq'':"(?g' \ ?f') = (\t'. t' *\<^sub>R frechet I \ (?\s s) (snd (?\ s)))" using mk_v_agree[of I ODE "(sol 0, b)" "sol s"] unfolding comp_def apply auto apply(rule ext | rule vec_extensionality)+ subgoal for x using frech_linear[of I \ x "(fst (mk_v I ODE (sol 0, b) (sol s)))" "(snd (mk_v I ODE (sol 0, b) (sol s)))", OF good_interp free] using coincidence_frechet[OF free, of "(?co'\1 x)" "(?co'\2 x)", OF co_agree'[of x], of I] by auto done have "((?g \ ?f) has_derivative (?g' \ ?f')) (at s within {0..t})" using chain by auto then have "((?g \ ?f) has_derivative (\t'. t' * frechet I \ (?\s s) (snd (?\ s)))) (at s within {0..t})" using heq'' by auto then have result:"((\t. sterm_sem I \ (?\s t)) has_derivative (\t. t * frechet I \ (?\s s) (snd (?\ s)))) (at s within {0..t})" using heq by auto then show "?thesis" by auto qed lemma dterm_sterm_dfree: "dfree \ \ (\\ \'. sterm_sem I \ \ = dterm_sem I \ (\, \'))" by(induction rule: dfree.induct, auto) \ \\g(x)\ h(x) \ [x'=f(x), c & p(x)](g(x)' \ h(x)') \ [x'=f(x), c]g(x) \ h(x)\\ lemma DIGeq_valid:"valid DIGeqaxiom" unfolding DIGeqaxiom_def apply(unfold DIGeqaxiom_def valid_def impl_sem iff_sem) apply(auto) (* 4 goals*) proof - fix I::"('sf,'sc,'sz) interp" and b aa ba and sol::"real \ 'sz simple_state" and t::real let ?ODE = "OVar vid1" let ?\ = "(\t. mk_v I (?ODE) (sol 0, b) (sol t))" assume t:"0 \ t" and sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (empty i) (mk_v I ?ODE (sol 0, b) x))}" and notin:" \(Predicates I vid1 (\ i. dterm_sem I (empty i) (sol 0, b)))" have fsafe:"fsafe (Prop vid1 empty)" by (auto simp add: empty_def) from sol have "Predicates I vid1 (\ i. dterm_sem I (empty i) (?\ 0))" using t solves_ode_domainD[of sol "(\a. ODEs I vid1)" "{0..t}"] by auto then have incon:"Predicates I vid1 (\ i. dterm_sem I (empty i) ((sol 0, b)))" using coincidence_formula[OF fsafe Iagree_refl[of I], of "(sol 0, b)" "?\ 0"] unfolding Vagree_def by (auto simp add: empty_def) show "dterm_sem I (f1 fid2 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t)) \ dterm_sem I (f1 fid1 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t))" using notin incon by auto next fix I::"('sf,'sc,'sz) interp" and b aa ba and sol::"real \ 'sz simple_state" and t::real let ?ODE = "OVar vid1" let ?\ = "(\t. mk_v I (?ODE) (sol 0, b) (sol t))" assume t:"0 \ t" and sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (empty i) (mk_v I ?ODE (sol 0, b) x))}" and notin:" \(Predicates I vid1 (\ i. dterm_sem I (empty i) (sol 0, b)))" have fsafe:"fsafe (Prop vid1 empty)" by (auto simp add: empty_def) from sol have "Predicates I vid1 (\ i. dterm_sem I (empty i) (?\ 0))" using t solves_ode_domainD[of sol "(\a. ODEs I vid1)" "{0..t}"] by auto then have incon:"Predicates I vid1 (\ i. dterm_sem I (empty i) ((sol 0, b)))" using coincidence_formula[OF fsafe Iagree_refl[of I], of "(sol 0, b)" "?\ 0"] unfolding Vagree_def by (auto simp add: empty_def) show "dterm_sem I (f1 fid2 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t)) \ dterm_sem I (f1 fid1 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t))" using notin incon by auto next fix I::"('sf,'sc,'sz) interp" and b aa ba and sol::"real \ 'sz simple_state" and t::real let ?ODE = "OVar vid1" let ?\ = "(\t. mk_v I (?ODE) (sol 0, b) (sol t))" assume t:"0 \ t" assume sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (local.empty i) (mk_v I (OVar vid1) (sol 0, b) x))}" assume notin:"\ Predicates I vid1 (\ i. dterm_sem I (local.empty i) (sol 0, b))" have fsafe:"fsafe (Prop vid1 empty)" by (auto simp add: empty_def) from sol have "Predicates I vid1 (\ i. dterm_sem I (empty i) (?\ 0))" using t solves_ode_domainD[of sol "(\a. ODEs I vid1)" "{0..t}"] by auto then have incon:"Predicates I vid1 (\ i. dterm_sem I (empty i) ((sol 0, b)))" using coincidence_formula[OF fsafe Iagree_refl[of I], of "(sol 0, b)" "?\ 0"] unfolding Vagree_def by (auto simp add: empty_def) show "dterm_sem I (f1 fid2 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t)) \ dterm_sem I (f1 fid1 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t))" using incon notin by auto next fix I::"('sf,'sc,'sz) interp" and b aa ba and sol::"real \ 'sz simple_state" and t::real let ?ODE = "OVar vid1" let ?\ = "(\t. mk_v I (?ODE) (sol 0, b) (sol t))" assume good_interp:"is_interp I" assume aaba:"(aa, ba) = mk_v I (OVar vid1) (sol 0, b) (sol t)" assume t:"0 \ t" assume sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (local.empty i) (mk_v I (OVar vid1) (sol 0, b) x))}" assume box:"\a ba. (\sola. sol 0 = sola 0 \ (\t. (a, ba) = mk_v I (OVar vid1) (sola 0, b) (sola t) \ 0 \ t \ (sola solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (local.empty i) (mk_v I (OVar vid1) (sola 0, b) x))})) \ directional_derivative I (f1 fid2 vid1) (a, ba) \ directional_derivative I (f1 fid1 vid1) (a, ba)" assume geq0:"dterm_sem I (f1 fid2 vid1) (sol 0, b) \ dterm_sem I (f1 fid1 vid1) (sol 0, b)" have free1:"dfree ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0))" by (auto intro: dfree.intros) have free2:"dfree ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))" by (auto intro: dfree.intros) from geq0 have geq0':"sterm_sem I (f1 fid2 vid1) (sol 0) \ sterm_sem I (f1 fid1 vid1) (sol 0)" unfolding f1_def using dterm_sterm_dfree[OF free1, of I "sol 0" b] dterm_sterm_dfree[OF free2, of I "sol 0" b] by auto let ?\s = "\x. fst (?\ x)" let ?\t = "\x. snd (?\ x)" let ?df1 = "(\t. dterm_sem I (f1 fid2 vid1) (?\ t))" let ?f1 = "(\t. sterm_sem I (f1 fid2 vid1) (?\s t))" let ?f1' = "(\ s t'. t' * frechet I (f1 fid2 vid1) (?\s s) (?\t s))" have dfeq:"?df1 = ?f1" apply(rule ext) subgoal for t using dterm_sterm_dfree[OF free1, of I "?\s t" "snd (?\ t)"] unfolding f1_def expand_singleton by auto done have free3:"dfree (f1 fid2 vid1)" unfolding f1_def by (auto intro: dfree.intros) let ?df2 = "(\t. dterm_sem I (f1 fid1 vid1) (?\ t))" let ?f2 = "(\t. sterm_sem I (f1 fid1 vid1) (?\s t))" let ?f2' = "(\s t' . t' * frechet I (f1 fid1 vid1) (?\s s) (?\t s))" let ?int = "{0..t}" have bluh:"\x i. (Functions I i has_derivative (THE f'. \x. (Functions I i has_derivative f' x) (at x)) x) (at x)" using good_interp unfolding is_interp_def by auto have blah:"(Functions I fid2 has_derivative (THE f'. \x. (Functions I fid2 has_derivative f' x) (at x)) (\ i. if i = vid1 then sol t $ vid1 else 0)) (at (\ i. if i = vid1 then sol t $ vid1 else 0))" using bluh by auto have bigEx:"\s. s \ {0..t} \(\sola. sol 0 = sola 0 \ (\ta. (fst (?\ s), snd (?\ s)) = mk_v I (OVar vid1) (sola 0, b) (sola ta) \ 0 \ ta \ (sola solves_ode (\a. ODEs I vid1)) {0..ta} {x. Predicates I vid1 (\ i. dterm_sem I (local.empty i) (mk_v I (OVar vid1) (sol 0, b) x))}))" subgoal for s apply(rule exI[where x=sol]) apply(rule conjI) subgoal by (rule refl) apply(rule exI[where x=s]) apply(rule conjI) subgoal by auto apply(rule conjI) subgoal by auto using sol using atLeastAtMost_iff atLeastatMost_subset_iff order_refl solves_ode_on_subset by (metis (no_types, lifting) subsetI) done have box':"\s. s \ {0..t} \ directional_derivative I (f1 fid2 vid1) (?\s s, ?\t s) \ directional_derivative I (f1 fid1 vid1) (?\s s, ?\t s)" subgoal for s using box apply simp apply (erule allE[where x="?\s s"]) apply (erule allE[where x="?\t s"]) using bigEx[of s] by auto done have dsafe1:"dsafe (f1 fid2 vid1)" unfolding f1_def by (auto intro: dsafe.intros) have dsafe2:"dsafe (f1 fid1 vid1)" unfolding f1_def by (auto intro: dsafe.intros) have agree1:"Vagree (sol 0, b) (?\ 0) (FVT (f1 fid2 vid1))" using mk_v_agree[of I "(OVar vid1)" "(sol 0, b)" "fst (?\ 0)"] unfolding f1_def Vagree_def expand_singleton apply auto by (metis (no_types, lifting) Compl_iff Vagree_def fst_conv mk_v_agree mk_xode.simps semBV.simps) have agree2:"Vagree (sol 0, b) (?\ 0) (FVT (f1 fid1 vid1))" using mk_v_agree[of I "(OVar vid1)" "(sol 0, b)" "fst (?\ 0)"] unfolding f1_def Vagree_def expand_singleton apply auto by (metis (no_types, lifting) Compl_iff Vagree_def fst_conv mk_v_agree mk_xode.simps semBV.simps) have sem_eq1:"dterm_sem I (f1 fid2 vid1) (sol 0, b) = dterm_sem I (f1 fid2 vid1) (?\ 0)" using coincidence_dterm[OF dsafe1 agree1] by auto then have sem_eq1':"sterm_sem I (f1 fid2 vid1) (sol 0) = sterm_sem I (f1 fid2 vid1) (?\s 0)" using dterm_sterm_dfree[OF free1, of I "sol 0" "b"] dterm_sterm_dfree[OF free1, of I "(?\s 0)" "snd (?\ 0)"] unfolding f1_def expand_singleton by auto have sem_eq2:"dterm_sem I (f1 fid1 vid1) (sol 0, b) = dterm_sem I (f1 fid1 vid1) (?\ 0)" using coincidence_dterm[OF dsafe2 agree2] by auto then have sem_eq2':"sterm_sem I (f1 fid1 vid1) (sol 0) = sterm_sem I (f1 fid1 vid1) (?\s 0)" using dterm_sterm_dfree[OF free2, of I "sol 0" "b"] dterm_sterm_dfree[OF free2, of I "(?\s 0)" "snd (?\ 0)"] unfolding f1_def expand_singleton by auto have good_interp':"\i x. (Functions I i has_derivative (THE f'. \x. (Functions I i has_derivative f' x) (at x)) x) (at x)" using good_interp unfolding is_interp_def by auto have chain : "\f f' g g' x s. (f has_derivative f') (at x within s) \ (g has_derivative g') (at (f x) within f ` s) \ (g \ f has_derivative g' \ f') (at x within s)" by(auto intro: derivative_intros) have sol1:"(sol solves_ode (\_. ODE_sem I (OVar vid1))) {0..t} {x. mk_v I (OVar vid1) (sol 0, b) x \ fml_sem I (Prop vid1 empty)}" using sol unfolding p1_def singleton_def empty_def by auto have FVTsub1:"vid1 \ ODE_vars I (OVar vid1) \ FVT ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) \ semBV I ((OVar vid1))" apply auto subgoal for x xa apply(cases "xa = vid1") by auto done have FVTsub2:"vid1 \ ODE_vars I (OVar vid1) \ FVT ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) \ semBV I ((OVar vid1))" apply auto subgoal for x xa apply(cases "xa = vid1") by auto done have osafe:"osafe (OVar vid1)" by auto have deriv1:"\s. vid1 \ ODE_vars I (OVar vid1) \ s \ ?int \ (?f1 has_derivative (?f1' s)) (at s within {0..t})" subgoal for s using rift_in_space_time[OF good_interp free1 osafe sol1 FVTsub1, of s] unfolding f1_def expand_singleton directional_derivative_def by blast done have deriv2:"\s. vid1 \ ODE_vars I (OVar vid1) \ s \ ?int \ (?f2 has_derivative (?f2' s)) (at s within {0..t})" subgoal for s using rift_in_space_time[OF good_interp free2 osafe sol1 FVTsub2, of s] unfolding f1_def expand_singleton directional_derivative_def by blast done have leq:"\s . s \ ?int \ ?f1' s 1 \ ?f2' s 1" subgoal for s using box'[of s] by (simp add: directional_derivative_def) done have preserve_agree1:"vid1 \ ODE_vars I (OVar vid1) \ VSagree (sol 0) (fst (mk_v I (OVar vid1) (sol 0, b) (sol t))) {vid1}" using mk_v_agree[of I "OVar vid1" "(sol 0, b)" "sol t"] unfolding Vagree_def VSagree_def by auto have preserve_coincide1: "vid1 \ ODE_vars I (OVar vid1) \ sterm_sem I (f1 fid2 vid1) (fst (mk_v I (OVar vid1) (sol 0, b) (sol t))) = sterm_sem I (f1 fid2 vid1) (sol 0)" using coincidence_sterm[of "(sol 0, b)" "(mk_v I (OVar vid1) (sol 0, b) (sol t))" "f1 fid2 vid1" I] preserve_agree1 unfolding VSagree_def Vagree_def f1_def by auto have preserve_coincide2: "vid1 \ ODE_vars I (OVar vid1) \ sterm_sem I (f1 fid1 vid1) (fst (mk_v I (OVar vid1) (sol 0, b) (sol t))) = sterm_sem I (f1 fid1 vid1) (sol 0)" using coincidence_sterm[of "(sol 0, b)" "(mk_v I (OVar vid1) (sol 0, b) (sol t))" "f1 fid1 vid1" I] preserve_agree1 unfolding VSagree_def Vagree_def f1_def by auto have "?f1 t \ ?f2 t" apply(cases "t = 0") subgoal using geq0' sem_eq1' sem_eq2' by auto subgoal apply(cases "vid1 \ ODE_vars I (OVar vid1)") subgoal apply (rule MVT'[OF deriv2 deriv1, of t]) (* 8 subgoals *) subgoal by auto subgoal by auto subgoal for s using deriv2[of s] using leq by auto using t leq geq0' sem_eq1' sem_eq2' by auto subgoal using geq0 using dterm_sterm_dfree[OF free1, of I "sol 0" "b"] using dterm_sterm_dfree[OF free2, of I "sol 0" "b"] using preserve_coincide1 preserve_coincide2 by(simp add: f1_def) done done then show " dterm_sem I (f1 fid2 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t)) \ dterm_sem I (f1 fid1 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t)) " using t dterm_sterm_dfree[OF free2, of I "?\s t" "snd (?\ t)"] dterm_sterm_dfree[OF free1, of I "?\s t" "snd (?\ t)"] by (simp add: f1_def) qed lemma DIGr_valid:"valid DIGraxiom" unfolding DIGraxiom_def apply(unfold DIGraxiom_def valid_def impl_sem iff_sem) apply(auto) (* 4 subgoals*) proof - fix I::"('sf,'sc,'sz) interp" and b aa ba and sol::"real \ 'sz simple_state" and t::real let ?ODE = "OVar vid1" let ?\ = "(\t. mk_v I (?ODE) (sol 0, b) (sol t))" assume t:"0 \ t" and sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (empty i) (mk_v I ?ODE (sol 0, b) x))}" and notin:" \(Predicates I vid1 (\ i. dterm_sem I (empty i) (sol 0, b)))" have fsafe:"fsafe (Prop vid1 empty)" by (auto simp add: empty_def) from sol have "Predicates I vid1 (\ i. dterm_sem I (empty i) (?\ 0))" using t solves_ode_domainD[of sol "(\a. ODEs I vid1)" "{0..t}"] by auto then have incon:"Predicates I vid1 (\ i. dterm_sem I (empty i) ((sol 0, b)))" using coincidence_formula[OF fsafe Iagree_refl[of I], of "(sol 0, b)" "?\ 0"] unfolding Vagree_def by (auto simp add: empty_def) show "dterm_sem I (f1 fid2 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t)) < dterm_sem I (f1 fid1 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t))" using notin incon by auto next fix I::"('sf,'sc,'sz) interp" and b aa ba and sol::"real \ 'sz simple_state" and t::real let ?ODE = "OVar vid1" let ?\ = "(\t. mk_v I (?ODE) (sol 0, b) (sol t))" assume t:"0 \ t" and sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (empty i) (mk_v I ?ODE (sol 0, b) x))}" and notin:" \(Predicates I vid1 (\ i. dterm_sem I (empty i) (sol 0, b)))" have fsafe:"fsafe (Prop vid1 empty)" by (auto simp add: empty_def) from sol have "Predicates I vid1 (\ i. dterm_sem I (empty i) (?\ 0))" using t solves_ode_domainD[of sol "(\a. ODEs I vid1)" "{0..t}"] by auto then have incon:"Predicates I vid1 (\ i. dterm_sem I (empty i) ((sol 0, b)))" using coincidence_formula[OF fsafe Iagree_refl[of I], of "(sol 0, b)" "?\ 0"] unfolding Vagree_def by (auto simp add: empty_def) show "dterm_sem I (f1 fid2 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t)) < dterm_sem I (f1 fid1 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t))" using notin incon by auto next fix I::"('sf,'sc,'sz) interp" and b aa ba and sol::"real \ 'sz simple_state" and t::real let ?ODE = "OVar vid1" let ?\ = "(\t. mk_v I (?ODE) (sol 0, b) (sol t))" assume t:"0 \ t" assume sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (local.empty i) (mk_v I (OVar vid1) (sol 0, b) x))}" assume notin:"\ Predicates I vid1 (\ i. dterm_sem I (local.empty i) (sol 0, b))" have fsafe:"fsafe (Prop vid1 empty)" by (auto simp add: empty_def) from sol have "Predicates I vid1 (\ i. dterm_sem I (empty i) (?\ 0))" using t solves_ode_domainD[of sol "(\a. ODEs I vid1)" "{0..t}"] by auto then have incon:"Predicates I vid1 (\ i. dterm_sem I (empty i) ((sol 0, b)))" using coincidence_formula[OF fsafe Iagree_refl[of I], of "(sol 0, b)" "?\ 0"] unfolding Vagree_def by (auto simp add: empty_def) show "dterm_sem I (f1 fid2 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t)) < dterm_sem I (f1 fid1 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t))" using incon notin by auto next fix I::"('sf,'sc,'sz) interp" and b aa ba and sol::"real \ 'sz simple_state" and t::real let ?ODE = "OVar vid1" let ?\ = "(\t. mk_v I (?ODE) (sol 0, b) (sol t))" assume good_interp:"is_interp I" assume aaba:"(aa, ba) = mk_v I (OVar vid1) (sol 0, b) (sol t)" assume t:"0 \ t" assume sol:"(sol solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (local.empty i) (mk_v I (OVar vid1) (sol 0, b) x))}" assume box:"\a ba. (\sola. sol 0 = sola 0 \ (\t. (a, ba) = mk_v I (OVar vid1) (sola 0, b) (sola t) \ 0 \ t \ (sola solves_ode (\a. ODEs I vid1)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (local.empty i) (mk_v I (OVar vid1) (sola 0, b) x))})) \ directional_derivative I (f1 fid2 vid1) (a, ba) \ directional_derivative I (f1 fid1 vid1) (a, ba)" assume geq0:"dterm_sem I (f1 fid2 vid1) (sol 0, b) < dterm_sem I (f1 fid1 vid1) (sol 0, b)" have free1:"dfree ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0))" by (auto intro: dfree.intros) have free2:"dfree ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))" by (auto intro: dfree.intros) from geq0 have geq0':"sterm_sem I (f1 fid2 vid1) (sol 0) < sterm_sem I (f1 fid1 vid1) (sol 0)" unfolding f1_def using dterm_sterm_dfree[OF free1, of I "sol 0" b] dterm_sterm_dfree[OF free2, of I "sol 0" b] by auto let ?\s = "\x. fst (?\ x)" let ?\t = "\x. snd (?\ x)" let ?df1 = "(\t. dterm_sem I (f1 fid2 vid1) (?\ t))" let ?f1 = "(\t. sterm_sem I (f1 fid2 vid1) (?\s t))" let ?f1' = "(\ s t'. t' * frechet I (f1 fid2 vid1) (?\s s) (?\t s))" have dfeq:"?df1 = ?f1" apply(rule ext) subgoal for t using dterm_sterm_dfree[OF free1, of I "?\s t" "snd (?\ t)"] unfolding f1_def expand_singleton by auto done have free3:"dfree (f1 fid2 vid1)" unfolding f1_def by (auto intro: dfree.intros) let ?df2 = "(\t. dterm_sem I (f1 fid1 vid1) (?\ t))" let ?f2 = "(\t. sterm_sem I (f1 fid1 vid1) (?\s t))" let ?f2' = "(\s t' . t' * frechet I (f1 fid1 vid1) (?\s s) (?\t s))" let ?int = "{0..t}" have bluh:"\x i. (Functions I i has_derivative (THE f'. \x. (Functions I i has_derivative f' x) (at x)) x) (at x)" using good_interp unfolding is_interp_def by auto have blah:"(Functions I fid2 has_derivative (THE f'. \x. (Functions I fid2 has_derivative f' x) (at x)) (\ i. if i = vid1 then sol t $ vid1 else 0)) (at (\ i. if i = vid1 then sol t $ vid1 else 0))" using bluh by auto have bigEx:"\s. s \ {0..t} \(\sola. sol 0 = sola 0 \ (\ta. (fst (?\ s), snd (?\ s)) = mk_v I (OVar vid1) (sola 0, b) (sola ta) \ 0 \ ta \ (sola solves_ode (\a. ODEs I vid1)) {0..ta} {x. Predicates I vid1 (\ i. dterm_sem I (local.empty i) (mk_v I (OVar vid1) (sol 0, b) x))}))" subgoal for s apply(rule exI[where x=sol]) apply(rule conjI) subgoal by (rule refl) apply(rule exI[where x=s]) apply(rule conjI) subgoal by auto apply(rule conjI) subgoal by auto using sol using atLeastAtMost_iff atLeastatMost_subset_iff order_refl solves_ode_on_subset by (metis (no_types, lifting) subsetI) done have box':"\s. s \ {0..t} \ directional_derivative I (f1 fid2 vid1) (?\s s, ?\t s) \ directional_derivative I (f1 fid1 vid1) (?\s s, ?\t s)" subgoal for s using box apply simp apply (erule allE[where x="?\s s"]) apply (erule allE[where x="?\t s"]) using bigEx[of s] by auto done have dsafe1:"dsafe (f1 fid2 vid1)" unfolding f1_def by (auto intro: dsafe.intros) have dsafe2:"dsafe (f1 fid1 vid1)" unfolding f1_def by (auto intro: dsafe.intros) have agree1:"Vagree (sol 0, b) (?\ 0) (FVT (f1 fid2 vid1))" using mk_v_agree[of I "(OVar vid1)" "(sol 0, b)" "fst (?\ 0)"] unfolding f1_def Vagree_def expand_singleton apply auto by (metis (no_types, lifting) Compl_iff Vagree_def fst_conv mk_v_agree mk_xode.simps semBV.simps) have agree2:"Vagree (sol 0, b) (?\ 0) (FVT (f1 fid1 vid1))" using mk_v_agree[of I "(OVar vid1)" "(sol 0, b)" "fst (?\ 0)"] unfolding f1_def Vagree_def expand_singleton apply auto by (metis (no_types, lifting) Compl_iff Vagree_def fst_conv mk_v_agree mk_xode.simps semBV.simps) have sem_eq1:"dterm_sem I (f1 fid2 vid1) (sol 0, b) = dterm_sem I (f1 fid2 vid1) (?\ 0)" using coincidence_dterm[OF dsafe1 agree1] by auto then have sem_eq1':"sterm_sem I (f1 fid2 vid1) (sol 0) = sterm_sem I (f1 fid2 vid1) (?\s 0)" using dterm_sterm_dfree[OF free1, of I "sol 0" "b"] dterm_sterm_dfree[OF free1, of I "(?\s 0)" "snd (?\ 0)"] unfolding f1_def expand_singleton by auto have sem_eq2:"dterm_sem I (f1 fid1 vid1) (sol 0, b) = dterm_sem I (f1 fid1 vid1) (?\ 0)" using coincidence_dterm[OF dsafe2 agree2] by auto then have sem_eq2':"sterm_sem I (f1 fid1 vid1) (sol 0) = sterm_sem I (f1 fid1 vid1) (?\s 0)" using dterm_sterm_dfree[OF free2, of I "sol 0" "b"] dterm_sterm_dfree[OF free2, of I "(?\s 0)" "snd (?\ 0)"] unfolding f1_def expand_singleton by auto have good_interp':"\i x. (Functions I i has_derivative (THE f'. \x. (Functions I i has_derivative f' x) (at x)) x) (at x)" using good_interp unfolding is_interp_def by auto have chain : "\f f' g g' x s. (f has_derivative f') (at x within s) \ (g has_derivative g') (at (f x) within f ` s) \ (g \ f has_derivative g' \ f') (at x within s)" by(auto intro: derivative_intros) have sol1:"(sol solves_ode (\_. ODE_sem I (OVar vid1))) {0..t} {x. mk_v I (OVar vid1) (sol 0, b) x \ fml_sem I (Prop vid1 empty)}" using sol unfolding p1_def singleton_def empty_def by auto have FVTsub1:"vid1 \ ODE_vars I (OVar vid1) \ FVT ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) \ semBV I ((OVar vid1))" apply auto subgoal for x xa apply(cases "xa = vid1") by auto done have FVTsub2:"vid1 \ ODE_vars I (OVar vid1) \ FVT ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) \ semBV I ((OVar vid1))" apply auto subgoal for x xa apply(cases "xa = vid1") by auto done have osafe:"osafe (OVar vid1)" by auto have deriv1:"\s. vid1 \ ODE_vars I (OVar vid1) \ s \ ?int \ (?f1 has_derivative (?f1' s)) (at s within {0..t})" subgoal for s using rift_in_space_time[OF good_interp free1 osafe sol1 FVTsub1, of s] unfolding f1_def expand_singleton directional_derivative_def by blast done have deriv2:"\s. vid1 \ ODE_vars I (OVar vid1) \ s \ ?int \ (?f2 has_derivative (?f2' s)) (at s within {0..t})" subgoal for s using rift_in_space_time[OF good_interp free2 osafe sol1 FVTsub2, of s] unfolding f1_def expand_singleton directional_derivative_def by blast done have leq:"\s . s \ ?int \ ?f1' s 1 \ ?f2' s 1" subgoal for s using box'[of s] by (simp add: directional_derivative_def) done have preserve_agree1:"vid1 \ ODE_vars I (OVar vid1) \ VSagree (sol 0) (fst (mk_v I (OVar vid1) (sol 0, b) (sol t))) {vid1}" using mk_v_agree[of I "OVar vid1" "(sol 0, b)" "sol t"] unfolding Vagree_def VSagree_def by auto have preserve_coincide1: "vid1 \ ODE_vars I (OVar vid1) \ sterm_sem I (f1 fid2 vid1) (fst (mk_v I (OVar vid1) (sol 0, b) (sol t))) = sterm_sem I (f1 fid2 vid1) (sol 0)" using coincidence_sterm[of "(sol 0, b)" "(mk_v I (OVar vid1) (sol 0, b) (sol t))" "f1 fid2 vid1" I] preserve_agree1 unfolding VSagree_def Vagree_def f1_def by auto have preserve_coincide2: "vid1 \ ODE_vars I (OVar vid1) \ sterm_sem I (f1 fid1 vid1) (fst (mk_v I (OVar vid1) (sol 0, b) (sol t))) = sterm_sem I (f1 fid1 vid1) (sol 0)" using coincidence_sterm[of "(sol 0, b)" "(mk_v I (OVar vid1) (sol 0, b) (sol t))" "f1 fid1 vid1" I] preserve_agree1 unfolding VSagree_def Vagree_def f1_def by auto have "?f1 t < ?f2 t" apply(cases "t = 0") subgoal using geq0' sem_eq1' sem_eq2' by auto subgoal apply(cases "vid1 \ ODE_vars I (OVar vid1)") subgoal apply (rule MVT'_gr[OF deriv2 deriv1, of t]) subgoal by auto subgoal by auto subgoal for s using deriv2[of s] using leq by auto using t leq geq0' sem_eq1' sem_eq2' by auto subgoal using geq0 using dterm_sterm_dfree[OF free1, of I "sol 0" "b"] using dterm_sterm_dfree[OF free2, of I "sol 0" "b"] using preserve_coincide1 preserve_coincide2 by(simp add: f1_def) done done then show "dterm_sem I (f1 fid2 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t)) < dterm_sem I (f1 fid1 vid1) (mk_v I (OVar vid1) (sol 0, b) (sol t))" using t dterm_sterm_dfree[OF free2, of I "?\s t" "snd (?\ t)"] dterm_sterm_dfree[OF free1, of I "?\s t" "snd (?\ t)"] using geq0 f1_def by (simp add: f1_def) qed lemma DG_valid:"valid DGaxiom" proof - have osafe:"osafe (OSing vid1 (f1 fid1 vid1))" by(auto simp add: osafe_Sing dfree_Fun dfree_Const f1_def expand_singleton) have fsafe:"fsafe (p1 vid1 vid1)" by(auto simp add: p1_def dfree_Const) have osafe2:"osafe (OProd (OSing vid1 (f1 fid1 vid1)) (OSing vid2 (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1))))" by(auto simp add: f1_def expand_singleton osafe.intros dfree.intros vne12) note sem = ode_alt_sem[OF osafe fsafe] note sem2 = ode_alt_sem[OF osafe2 fsafe] have p2safe:"fsafe (p1 vid2 vid1)" by(auto simp add: p1_def dfree_Const) show "valid DGaxiom" apply(auto simp del: prog_sem.simps(8) simp add: DGaxiom_def valid_def sem sem2) apply(rule exI[where x=0], auto simp add: f1_def p1_def expand_singleton) subgoal for I a b aa ba sol t proof - assume good_interp:"is_interp I" assume " \aa ba. (\sol t. (aa, ba) = mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol t) \ 0 \ t \ (sol solves_ode (\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) x))} \ VSagree (sol 0) a {uu. uu = vid1 \ Inl uu \ Inl ` {x. \xa. Inl x \ FVT (if xa = vid1 then trm.Var vid1 else Const 0)} \ (\x. Inl uu \ FVT (if x = vid1 then trm.Var vid1 else Const 0))}) \ Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (aa, ba))" then have bigAll:" \aa ba. (\sol t. (aa, ba) = mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol t) \ 0 \ t \ (sol solves_ode (\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) x))} \ VSagree (sol 0) a {uu. uu = vid1 \ (\x. Inl uu \ FVT (if x = vid1 then trm.Var vid1 else Const 0))}) \ Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (aa, ba))" by (auto) assume aaba:"(aa, ba) = mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (sol t)" assume t:"0 \ t" assume sol:" (sol solves_ode (\a b. (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) b else 0))) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) x))}" assume VSag:"VSagree (sol 0) (\ y. if vid2 = y then 0 else fst (a, b) $ y) {x. x = vid2 \ x = vid1 \ x = vid2 \ x = vid1 \ Inl x \ Inl ` {x. x = vid2 \ x = vid1} \ x = vid1}" let ?sol = "(\t. \ i. if i = vid1 then sol t $ vid1 else 0)" let ?aaba' = "mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (?sol t)" from bigAll[of "fst ?aaba'" "snd ?aaba'"] have bigEx:"(\sol t. ?aaba' = mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol t) \ 0 \ t \ (sol solves_ode (\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) x))} \ VSagree (sol 0) a {uu. uu = vid1 \ (\x. Inl uu \ FVT (if x = vid1 then trm.Var vid1 else Const 0))}) \ Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (?aaba'))" by simp have pre1:"?aaba' = mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (?sol t)" by (rule refl) have agreeL:"\s. fst (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (sol s)) $ vid1 = sol s $ vid1" subgoal for s using mk_v_agree[of I "(OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0)))))" "(\ y. if vid2 = y then 0 else fst (a, b) $ y, b)" "(sol s)"] unfolding Vagree_def by auto done have agreeR:"\s. fst (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (\ i. if i = vid1 then sol s $ vid1 else 0)) $ vid1 = sol s $ vid1" subgoal for s using mk_v_agree[of "I" "(OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))" "(a, b)" "(\ i. if i = vid1 then sol s $ vid1 else 0)"] unfolding Vagree_def by auto done have FV:"(FVF (p1 vid1 vid1)) = {Inl vid1}" unfolding p1_def expand_singleton apply auto subgoal for x xa apply(cases "xa = vid1") by auto done have agree:"\s. Vagree (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (sol s)) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (\ i. if i = vid1 then sol s $ vid1 else 0)) (FVF (p1 vid1 vid1))" using agreeR agreeL unfolding Vagree_def FV by auto note con_sem_eq = coincidence_formula[OF fsafe Iagree_refl agree] have constraint:"\s. 0 \ s \ s \ t \ Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (\ i. if i = vid1 then sol s $ vid1 else 0)))" using sol apply simp apply(drule solves_odeD(2)) apply auto[1] subgoal for s using con_sem_eq by (auto simp add: p1_def expand_singleton) done have eta:"sol = (\t. \ i. sol t $ i)" by (rule ext, rule vec_extensionality, simp) have yet_another_eq:"\x. (\xa. xa *\<^sub>R ((\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol x) else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (sol x) else 0))) = (\xa. (\ i. (xa *\<^sub>R ((\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol x) else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (sol x) else 0))) $ i))" subgoal for x by (rule ext, rule vec_extensionality, simp) done have sol_deriv:"\x. x \{0..t} \ (sol has_derivative (\xa. xa *\<^sub>R ((\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol x) else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (sol x) else 0)))) (at x within {0..t})" using sol apply simp apply(drule solves_odeD(1)) unfolding has_vderiv_on_def has_vector_derivative_def by auto then have sol_deriv:"\x. x \ {0..t} \ ((\t. \ i. sol t $ i) has_derivative (\xa. (\ i. (xa *\<^sub>R ((\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol x) else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (sol x) else 0))) $ i))) (at x within {0..t})" using yet_another_eq eta by auto have sol_deriv1: "\x. x \ {0..t} \ ((\t. sol t $ vid1) has_derivative (\xa. (xa *\<^sub>R ((\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol x) else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (sol x) else 0)) $ vid1))) (at x within {0..t})" subgoal for s (* I heard higher-order unification is hard.*) apply(rule has_derivative_proj[of "(\ i t. sol t $ i)" "(\j xa. (xa *\<^sub>R ((\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol s) else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (sol s) else 0)) $ j))" "at s within {0..t}""vid1"]) using sol_deriv[of s] by auto done have hmm:"\s. (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (sol s)) = (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (\ i. if i = vid1 then sol s $ vid1 else 0))" by(rule vec_extensionality, auto) have aha:"\s. (\xa. xa * sterm_sem I (f1 fid1 vid1) (sol s)) = (\xa. xa * sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol s $ vid1 else 0))" subgoal for s apply(rule ext) subgoal for xa using hmm by (auto simp add: f1_def) done done let ?sol' = "(\s. (\xa. \ i. if i = vid1 then xa * sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol s $ vid1 else 0) else 0))" let ?project_me_plz = "(\t. (\ i. if i = vid1 then ?sol t $ vid1 else 0))" have sol_deriv_eq:"\s. s \{0..t} \ ((\t. (\ i. if i = vid1 then ?sol t $ vid1 else 0)) has_derivative ?sol' s) (at s within {0..t})" subgoal for s apply(rule has_derivative_vec) subgoal for i apply (cases "i = vid1", cases "i = vid2", auto) using vne12 apply simp using sol_deriv1[of s] using aha by auto done done have yup:"(\t. (\ i. if i = vid1 then ?sol t $ vid1 else 0) $ vid1) = (\t. sol t $ vid1)" by(rule ext, auto) have maybe:"\s. (\xa. xa * sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol s $ vid1 else 0)) = (\xa. (\ i. if i = vid1 then xa * sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol s $ vid1 else 0) else 0) $ vid1) " by(rule ext, auto) have almost:"(\x. if vid1 = vid1 then (\ i. if i = vid1 then sol x $ vid1 else 0) $ vid1 else 0) = (\x. (\ i. if i = vid1 then sol x $ vid1 else 0) $ vid1)" by(rule ext, auto) have almost':"\s. (\h. if vid1 = vid1 then h * sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol s $ vid1 else 0) else 0) = (\h. h * sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol s $ vid1 else 0))" by(rule ext, auto) have deriv':" \x. x \ {0..t} \ ((\t. \ i. if i = vid1 then sol t $ vid1 else 0) has_derivative (\xa. (\ i. xa *\<^sub>R (if i = vid1 then sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol x $ vid1 else 0) else 0)))) (at x within {0..t})" subgoal for s apply(rule has_derivative_vec) subgoal for i apply(cases "i = vid1") prefer 2 subgoal by auto apply auto using has_derivative_proj[OF sol_deriv_eq[of s], of vid1] using yup maybe[of s] almost almost'[of s] by fastforce done done have derEq:"\s. (\xa. (\ i. xa *\<^sub>R (if i = vid1 then sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol s $ vid1 else 0) else 0))) = (\xa. xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol s $ vid1 else 0) else 0))" subgoal for s apply (rule ext, rule vec_extensionality) by auto done have "\x. x \ {0..t} \ ((\t. \ i. if i = vid1 then sol t $ vid1 else 0) has_derivative (\xa. xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol x $ vid1 else 0) else 0))) (at x within {0..t})" subgoal for s using deriv'[of s] derEq[of s] by auto done then have deriv:"((\t. \ i. if i = vid1 then sol t $ vid1 else 0) has_vderiv_on (\t. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (\ i. if i = vid1 then sol t $ vid1 else 0) else 0)) {0..t}" unfolding has_vderiv_on_def has_vector_derivative_def by auto have pre2:"(?sol solves_ode (\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) x))}" apply(rule solves_odeI) subgoal by (rule deriv) subgoal for s using constraint by auto done have pre3:"VSagree (?sol 0) a {u. u = vid1 \ (\x. Inl u \ FVT (if x = vid1 then trm.Var vid1 else Const 0))}" using vne12 VSag unfolding VSagree_def by simp have bigPre:"(\sol t. ?aaba' = mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then Var vid1 else Const 0))) (a, b) (sol t) \ 0 \ t \ (sol solves_ode (\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then Var vid1 else Const 0))) (a, b) x))} \ VSagree (sol 0) a {u. u = vid1 \ (\x. Inl u \ FVT (if x = vid1 then Var vid1 else Const 0))})" apply(rule exI[where x="?sol"]) apply(rule exI[where x=t]) apply(rule conjI) apply(rule pre1) apply(rule conjI) apply(rule t) apply(rule conjI) apply(rule pre2) by(rule pre3) have pred2:"Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) ?aaba')" using bigEx bigPre by auto then have pred2':"?aaba' \ fml_sem I (p1 vid2 vid1)" unfolding p1_def expand_singleton by auto let ?res_state = "(mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (sol t))" have aabaX:"(fst ?aaba') $ vid1 = sol t $ vid1" using aaba mk_v_agree[of "I" "(OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))" "(a, b)" "(?sol t)"] proof - assume " Vagree (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (\ i. if i = vid1 then sol t $ vid1 else 0)) (a, b) (- semBV I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))) \ Vagree (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (\ i. if i = vid1 then sol t $ vid1 else 0)) (mk_xode I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (\ i. if i = vid1 then sol t $ vid1 else 0)) (semBV I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))))" then have ag:" Vagree (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (?sol t)) (mk_xode I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (?sol t)) (semBV I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))))" by auto have sembv:"(semBV I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))) = {Inl vid1, Inr vid1}" by auto have sub:"{Inl vid1} \ {Inl vid1, Inr vid1}" by auto have ag':"Vagree (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (?sol t)) (mk_xode I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (?sol t)) {Inl vid1}" using ag agree_sub[OF sub] sembv by auto then have eq1:"fst (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (?sol t)) $ vid1 = fst (mk_xode I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (?sol t)) $ vid1" unfolding Vagree_def by auto moreover have "... = sol t $ vid1" by auto ultimately show ?thesis by auto qed have res_stateX:"(fst ?res_state) $ vid1 = sol t $ vid1" using mk_v_agree[of I "(OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0)))))" "(\ y. if vid2 = y then 0 else fst (a, b) $ y, b)" "(sol t)"] proof - assume "Vagree (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (sol t)) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (- semBV I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0)))))) \ Vagree (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (sol t)) (mk_xode I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (sol t)) (semBV I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))))" then have ag:" Vagree (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (sol t)) (mk_xode I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (sol t)) (semBV I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))))" by auto have sembv:"(semBV I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0)))))) = {Inl vid1, Inr vid1, Inl vid2, Inr vid2}" by auto have sub:"{Inl vid1} \ {Inl vid1, Inr vid1, Inl vid2, Inr vid2}" by auto have ag':"Vagree (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (sol t)) (mk_xode I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (sol t)) {Inl vid1}" using ag sembv agree_sub[OF sub] by auto then have "fst ?res_state $ vid1 = fst ((mk_xode I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (sol t))) $ vid1" unfolding Vagree_def by blast moreover have "... = sol t $ vid1" by auto ultimately show "?thesis" by linarith qed have agree:"Vagree ?aaba' (?res_state) (FVF (p1 vid2 vid1))" unfolding p1_def Vagree_def using aabaX res_stateX by auto have fml_sem_eq:"(?res_state \ fml_sem I (p1 vid2 vid1)) = (?aaba' \ fml_sem I (p1 vid2 vid1))" using coincidence_formula[OF p2safe Iagree_refl agree, of I] by auto then show "Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then 0 else fst (a, b) $ y, b) (sol t)))" using pred2 unfolding p1_def expand_singleton by auto qed subgoal for I a b r aa ba sol t proof - assume good_interp:"is_interp I" assume bigAll:" \aa ba. (\sol t. (aa, ba) = mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) (sol t) \ 0 \ t \ (sol solves_ode (\a b. (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) b else 0))) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) x))} \ VSagree (sol 0) (\ y. if vid2 = y then r else fst (a, b) $ y) {uu. uu = vid2 \ uu = vid1 \ uu = vid2 \ uu = vid1 \ Inl uu \ Inl ` ({x. \xa. Inl x \ FVT (if xa = vid1 then trm.Var vid1 else Const 0)} \ {x. x = vid2 \ (\xa. Inl x \ FVT (if xa = vid1 then trm.Var vid1 else Const 0))}) \ (\x. Inl uu \ FVT (if x = vid1 then trm.Var vid1 else Const 0))}) \ Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (aa, ba))" assume aaba:"(aa, ba) = mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol t)" assume t:"0 \ t" assume sol:"(sol solves_ode (\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) x))}" assume VSA:"VSagree (sol 0) a {uu. uu = vid1 \ Inl uu \ Inl ` {x. \xa. Inl x \ FVT (if xa = vid1 then trm.Var vid1 else Const 0)} \ (\x. Inl uu \ FVT (if x = vid1 then trm.Var vid1 else Const 0))}" let ?xode = "(\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)" let ?xconstraint = UNIV let ?ivl = "ll_on_open.existence_ivl {0 .. t} ?xode ?xconstraint 0 (sol 0)" have freef1:"dfree ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))" by(auto simp add: dfree_Fun dfree_Const) have simple_term_inverse':"\\. dfree \ \ raw_term (simple_term \) = \" using simple_term_inverse by auto have old_lipschitz:"local_lipschitz (UNIV::real set) UNIV (\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)" apply(rule c1_implies_local_lipschitz[where f'="(\ (t,b). blinfun_vec(\ i. if i = vid1 then blin_frechet (good_interp I) (simple_term (Function fid1 (\ i. if i = vid1 then Var vid1 else Const 0))) b else Blinfun(\ _. 0)))"]) apply auto subgoal for x apply(rule has_derivative_vec) subgoal for i apply(auto simp add: bounded_linear_Blinfun_apply good_interp_inverse good_interp) apply(auto simp add: simple_term_inverse'[OF freef1]) apply(cases "i = vid1") apply(auto simp add: f1_def expand_singleton) proof - let ?h = "(\b. Functions I fid1 (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) b))" let ?h' = "(\b'. FunctionFrechet I fid1 (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) x) (\ i. frechet I (if i = vid1 then trm.Var vid1 else Const 0) x b'))" let ?f = "(\ b. (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) b))" let ?f' = "(\ b'. (\ i. frechet I (if i = vid1 then trm.Var vid1 else Const 0) x b'))" let ?g = "Functions I fid1" let ?g'= "FunctionFrechet I fid1 (?f x)" have heq:"?h = ?g \ ?f" by(rule ext, auto) have heq':"?h' = ?g' \ ?f'" by(rule ext, auto) have fderiv:"(?f has_derivative ?f') (at x)" apply(rule has_derivative_vec) by (auto simp add: svar_deriv axis_def) have gderiv:"(?g has_derivative ?g') (at (?f x))" using good_interp unfolding is_interp_def by blast have gfderiv: "((?g \ ?f) has_derivative(?g' \ ?f')) (at x)" using fderiv gderiv diff_chain_at by blast have boring_eq:"(\b. Functions I fid1 (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) b)) = sterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))" by(rule ext, auto) have "(?h has_derivative ?h') (at x)" using gfderiv heq heq' by auto then show "(sterm_sem I ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)) has_derivative (\v'. (THE f'. \x. (Functions I fid1 has_derivative f' x) (at x)) (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) x) (\ i. frechet I (if i = vid1 then trm.Var vid1 else Const 0) x v'))) (at x)" using boring_eq by auto qed done proof - have the_thing:"continuous_on (UNIV::('sz Rvec set)) (\b. blinfun_vec (\i. if i = vid1 then blin_frechet (good_interp I) (simple_term ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) b else Blinfun (\_. 0)))" apply(rule continuous_blinfun_vec') subgoal for i apply(cases "i = vid1") apply(auto) using frechet_continuous[OF good_interp freef1] by (auto simp add: continuous_on_const) done have another_cont:"continuous_on (UNIV) (\x. blinfun_vec (\i. if i = vid1 then blin_frechet (good_interp I) (simple_term ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (snd x) else Blinfun (\_. 0)))" apply(rule continuous_on_compose2[of UNIV "(\b. blinfun_vec (\i. if i = vid1 then blin_frechet (good_interp I) (simple_term ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) b else Blinfun (\_. 0)))"]) apply(rule the_thing) by (auto intro!: continuous_intros) have ext:"(\x. case x of (t, b) \ blinfun_vec (\i. if i = vid1 then blin_frechet (good_interp I) (simple_term ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) b else Blinfun (\_. 0))) =(\x. blinfun_vec (\i. if i = vid1 then blin_frechet (good_interp I) (simple_term ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (snd x) else Blinfun (\_. 0))) " apply(rule ext, auto) by (metis snd_conv) then show "continuous_on (UNIV) (\x. case x of (t, b) \ blinfun_vec (\i. if i = vid1 then blin_frechet (good_interp I) (simple_term ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) b else Blinfun (\_. 0)))" using another_cont by (simp add: another_cont local.ext) qed have old_continuous:" \x. x \ UNIV \ continuous_on UNIV (\t. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) x else 0)" by(rule continuous_on_const) interpret ll_old: ll_on_open_it "UNIV" ?xode ?xconstraint 0 apply(standard) subgoal by auto prefer 3 subgoal by auto prefer 3 subgoal by auto apply(rule old_lipschitz) by (rule old_continuous) let ?ivl = "(ll_old.existence_ivl 0 (sol 0))" let ?flow = "ll_old.flow 0 (sol 0)" have tclosed:"{0..t} = {0--t}" using t real_Icc_closed_segment by auto have "(sol solves_ode (\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)) {0..t} UNIV" apply(rule solves_ode_supset_range) apply(rule sol) by auto then have sol':"(sol solves_ode (\a b. \ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0)) {0--t} UNIV" using tclosed by auto have sub:"{0--t} \ ll_old.existence_ivl 0 (sol 0)" apply(rule ll_old.closed_segment_subset_existence_ivl) apply(rule ll_old.existence_ivl_maximal_segment) apply(rule sol') apply(rule refl) by auto have usol_old:"(?flow usolves_ode ?xode from 0) ?ivl UNIV" by(rule ll_old.flow_usolves_ode, auto) have sol_old:"(ll_old.flow 0 (sol 0) solves_ode ?xode) ?ivl UNIV" by(rule ll_old.flow_solves_ode, auto) have another_sub:"\s. s \ {0..t} \ {s--0} \ {0..t}" unfolding closed_segment_def apply auto by (metis diff_0_right diff_left_mono mult.commute mult_left_le order.trans) have sol_eq_flow:"\s. s \ {0..t} \ sol s = ?flow s" using usol_old apply simp apply(drule usolves_odeD(4)) (* 7 subgoals*) apply auto subgoal for s x proof - assume xs0:"x \ {s--0}" assume s0:"0 \ s" and st: "s \ t" have "{s--0} \ {0..t}" using another_sub[of s] s0 st by auto then have "x \ {0..t}" using xs0 by auto then have "x \ {0--t}" using tclosed by auto then show "x \ ll_old.existence_ivl 0 (sol 0)" using sub by auto qed apply(rule solves_ode_subset) using sol' apply auto[1] subgoal for s proof - assume s0:"0 \ s" and st:"s \ t" show "{s--0} \ {0--t}" using tclosed unfolding closed_segment using s0 st using another_sub intervalE by blast qed done have sol_deriv_orig:"\s. s\?ivl \ (?flow has_derivative (\xa. xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0))) (at s within ?ivl)" using sol_old apply simp apply(drule solves_odeD(1)) by (auto simp add: has_vderiv_on_def has_vector_derivative_def) have sol_eta:"(\t. \ i. ?flow t $ i) = ?flow" by(rule ext, rule vec_extensionality, auto) have sol_deriv_eq1:"\s i. (\xa. xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0)) = (\xa. \ i. xa * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0))" by(rule ext, rule vec_extensionality, auto) have sol_deriv_proj:"\s i. s\?ivl \ ((\t. ?flow t $ i) has_derivative (\xa. (xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0)) $ i)) (at s within ?ivl)" subgoal for s i apply(rule has_derivative_proj[of "(\ i t. ?flow t $ i)" "(\ i t'. (t' *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0)) $ i)" "(at s within ?ivl)" "i"]) using sol_deriv_orig[of s] sol_eta sol_deriv_eq1 by auto done have sol_deriv_eq2:"\s i. (\xa. xa * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0)) = (\xa. (xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0)) $ i)" by(rule ext, auto) have sol_deriv_proj':"\s i. s\?ivl \ ((\t. ?flow t $ i) has_derivative (\xa. xa * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0))) (at s within ?ivl)" subgoal for s i using sol_deriv_proj[of s i] sol_deriv_eq2[of i s] by metis done have sol_deriv_proj_vid1:"\s. s\?ivl \ ((\t. ?flow t $ vid1) has_derivative (\xa. xa * (sterm_sem I (f1 fid1 vid1) (?flow s)))) (at s within ?ivl)" subgoal for s using sol_deriv_proj'[of s vid1] by auto done have deriv1_args:"\s. s \ ?ivl \ ((\ t. (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (?flow t))) has_derivative ((\ t'. \ i . t' * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0)))) (at s within ?ivl)" apply(rule has_derivative_vec) by (auto simp add: sol_deriv_proj_vid1) have con_fid:"\fid. continuous_on ?ivl (\x. sterm_sem I (f1 fid vid1) (?flow x))" subgoal for fid apply(rule has_derivative_continuous_on[of "?ivl" "(\x. sterm_sem I (f1 fid vid1) (?flow x))" "(\t t'. FunctionFrechet I fid (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (?flow t)) (\ i . t' * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow t) else 0)))"]) proof - fix s assume ivl:"s \ ?ivl" let ?h = "(\x. sterm_sem I (f1 fid vid1) (?flow x))" let ?g = "Functions I fid" let ?f = "(\x. (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (?flow x)))" let ?h' = "(\t'. FunctionFrechet I fid (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (?flow s)) (\ i. t' * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0)))" let ?g' = "FunctionFrechet I fid (?f s)" let ?f' = "(\ t'. \ i . t' * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0))" have heq:"?h = ?g \ ?f" unfolding comp_def f1_def expand_singleton by auto have heq':"?h' = ?g' \ ?f'" unfolding comp_def by auto have fderiv:"(?f has_derivative ?f') (at s within ?ivl)" using deriv1_args[OF ivl] by auto have gderiv:"(?g has_derivative ?g') (at (?f s) within (?f ` ?ivl))" using good_interp unfolding is_interp_def using has_derivative_subset by blast have gfderiv:"((?g \ ?f) has_derivative (?g' \ ?f')) (at s within ?ivl)" using fderiv gderiv diff_chain_within by blast show "((\x. sterm_sem I (f1 fid vid1) (?flow x)) has_derivative (\t'. FunctionFrechet I fid (\ i. sterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (?flow s)) (\ i. t' * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (?flow s) else 0)))) (at s within ?ivl)" using heq heq' gfderiv by auto qed done have con:"\x. continuous_on (?ivl) (\t. x * sterm_sem I (f1 fid2 vid1) (?flow t) + sterm_sem I (f1 fid3 vid1) (?flow t))" apply(rule continuous_on_add) apply(rule continuous_on_mult_left) apply(rule con_fid[of fid2]) by(rule con_fid[of fid3]) let ?axis = "(\ i. Blinfun(axis i))" have bounded_linear_deriv:"\t. bounded_linear (\y' . y' *\<^sub>R sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) t))" using bounded_linear_scaleR_left by blast have ll:"local_lipschitz (ll_old.existence_ivl 0 (sol 0)) UNIV (\t y. y * sterm_sem I (f1 fid2 vid1) (?flow t) + sterm_sem I (f1 fid3 vid1) (?flow t))" apply(rule c1_implies_local_lipschitz[where f'="(\ (t,y). Blinfun(\y' . y' *\<^sub>R sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) t)))"]) apply auto subgoal for t x apply(rule has_derivative_add_const) proof - have deriv:"((\x. x * sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) t)) has_derivative (\x. x * sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) t))) (at x)" by(auto intro: derivative_eq_intros) have eq:"(\x. x * sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) t)) = blinfun_apply (Blinfun (\y'. y' * sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) t)))" apply(rule ext) using bounded_linear_deriv[of t] by (auto simp add: bounded_linear_Blinfun_apply) show "((\x. x * sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) t)) has_derivative blinfun_apply (Blinfun (\y'. y' * sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) t)))) (at x)" using deriv eq by auto qed apply(auto intro: continuous_intros simp add: split_beta') proof - have bounded_linear:"\x. bounded_linear (\y'. y' * sterm_sem I (f1 fid2 vid1) x)" by (simp add: bounded_linear_mult_left) have eq:"(\x. Blinfun (\y'. y' * sterm_sem I (f1 fid2 vid1) x)) = (\x. (sterm_sem I (f1 fid2 vid1) x) *\<^sub>R id_blinfun)" apply(rule ext, rule blinfun_eqI) subgoal for x i using bounded_linear[of x] apply(auto simp add: bounded_linear_Blinfun_apply) by (simp add: blinfun.scaleR_left) done have conFlow:"continuous_on (ll_old.existence_ivl 0 (sol 0)) (ll_old.flow 0 (sol 0))" using ll_old.general.flow_continuous_on by blast have conF':"continuous_on (ll_old.flow 0 (sol 0) ` ll_old.existence_ivl 0 (sol 0)) (\x. (sterm_sem I (f1 fid2 vid1) x) *\<^sub>R id_blinfun)" apply(rule continuous_on_scaleR) apply(auto intro: continuous_intros) apply(rule sterm_continuous') apply(rule good_interp) by(auto simp add: f1_def intro: dfree.intros) have conF:"continuous_on (ll_old.flow 0 (sol 0) ` ll_old.existence_ivl 0 (sol 0)) (\x. Blinfun (\y'. y' * sterm_sem I (f1 fid2 vid1) x))" apply(rule continuous_on_compose2[of "UNIV" "(\x. Blinfun (\y'. y' * x))" "(ll_old.flow 0 (sol 0) ` ll_old.existence_ivl 0 (sol 0))" "sterm_sem I (f1 fid2 vid1)"]) subgoal by (metis blinfun_mult_left.abs_eq bounded_linear_blinfun_mult_left continuous_on_eq linear_continuous_on) apply(rule sterm_continuous') apply(rule good_interp) by(auto simp add: f1_def intro: dfree.intros) show "continuous_on (ll_old.existence_ivl 0 (sol 0) \ UNIV) (\x. Blinfun (\y'. y' * sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) (fst x))))" apply(rule continuous_on_compose2[of "ll_old.existence_ivl 0 (sol 0)" "(\x. Blinfun (\y'. y' * sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) x)))" "(ll_old.existence_ivl 0 (sol 0) \ UNIV)" "fst"]) apply(rule continuous_on_compose2[of "(ll_old.flow 0 (sol 0) ` ll_old.existence_ivl 0 (sol 0))" "(\x. Blinfun (\y'. y' * sterm_sem I (f1 fid2 vid1) x))" "(ll_old.existence_ivl 0 (sol 0))" "(ll_old.flow 0 (sol 0))"]) using conF conFlow by (auto intro!: continuous_intros) qed let ?ivl = "ll_old.existence_ivl 0 (sol 0)" \ \Construct solution to ODE for \y'\ here:\ let ?yode = "(\t y. y * sterm_sem I (f1 fid2 vid1) (?flow t) + sterm_sem I (f1 fid3 vid1) (?flow t))" let ?ysol0 = r interpret ll_new: ll_on_open_it "?ivl" "?yode" "UNIV" 0 apply(standard) apply(auto) apply(rule ll) by(rule con) have sol_new:"(ll_new.flow 0 r solves_ode ?yode) (ll_new.existence_ivl 0 r) UNIV" by(rule ll_new.flow_solves_ode, auto) have more_lipschitz:"\tm tM. tm \ ll_old.existence_ivl 0 (sol 0) \ tM \ ll_old.existence_ivl 0 (sol 0) \ \M L. \t\{tm..tM}. \x. \x * sterm_sem I (f1 fid2 vid1) (?flow t) + sterm_sem I (f1 fid3 vid1) (?flow t)\ \ M + L * \x\" proof - fix tm tM assume tm:"tm \ ll_old.existence_ivl 0 (sol 0)" assume tM:"tM \ ll_old.existence_ivl 0 (sol 0)" let ?f2 = "(\t. sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) t))" let ?f3 = "(\t. sterm_sem I (f1 fid3 vid1) (ll_old.flow 0 (sol 0) t))" let ?boundLP = "(\L t . (tm \ t \ t \ tM \ \?f2 t\ \ L))" let ?boundL = "(SOME L. (\t. ?boundLP L t))" have compactT:"compact {tm..tM}" by auto have sub:"{tm..tM} \ ll_old.existence_ivl 0 (sol 0)" by (metis atLeastatMost_empty_iff empty_subsetI ll_old.general.segment_subset_existence_ivl real_Icc_closed_segment tM tm) let ?f2abs = "(\x. abs(?f2 x))" have neg_compact:"\S::real set. compact S \ compact ((\x. -x) ` S)" by(rule compact_continuous_image, auto intro: continuous_intros) have compactf2:"compact (?f2 ` {tm..tM})" apply(rule compact_continuous_image) apply(rule continuous_on_compose2[of UNIV "sterm_sem I (f1 fid2 vid1)" "{tm..tM}" "ll_old.flow 0 (sol 0)"]) apply(rule sterm_continuous) apply(rule good_interp) subgoal by (auto intro: dfree.intros simp add: f1_def) apply(rule continuous_on_subset) prefer 2 apply (rule sub) subgoal using ll_old.general.flow_continuous_on by blast by auto then have boundedf2:"bounded (?f2 ` {tm..tM})" using compact_imp_bounded by auto then have boundedf2neg:"bounded ((\x. -x) ` ?f2 ` {tm..tM})" using compact_imp_bounded neg_compact by auto then have bdd_above_f2neg:"bdd_above ((\x. -x) ` ?f2 ` {tm..tM})" by (rule bounded_imp_bdd_above) then have bdd_above_f2:"bdd_above ( ?f2 ` {tm..tM})" using bounded_imp_bdd_above boundedf2 by auto have bdd_above_f2_abs:"bdd_above (abs ` ?f2 ` {tm..tM})" using bdd_above_f2neg bdd_above_f2 unfolding bdd_above_def apply auto subgoal for M1 M2 apply(rule exI[where x="max M1 M2"]) by fastforce done then have theBound:"\L. (\t. ?boundLP L t)" unfolding bdd_above_def norm_conv_dist by (auto simp add: Ball_def Bex_def norm_conv_dist image_iff norm_bcontfun_def dist_blinfun_def) then have boundLP:"\t. ?boundLP (?boundL) t" using someI[of "(\ L. \t. ?boundLP L t)"] by blast let ?boundMP = "(\M t. (tm \ t \ t \ tM \ \?f3 t\ \ M))" let ?boundM = "(SOME M. (\t. ?boundMP M t))" have compactf3:"compact (?f3 ` {tm..tM})" apply(rule compact_continuous_image) apply(rule continuous_on_compose2[of UNIV "sterm_sem I (f1 fid3 vid1)" "{tm..tM}" "ll_old.flow 0 (sol 0)"]) apply(rule sterm_continuous) apply(rule good_interp) subgoal by (auto intro: dfree.intros simp add: f1_def) apply(rule continuous_on_subset) prefer 2 apply (rule sub) subgoal using ll_old.general.flow_continuous_on by blast by auto then have boundedf3:"bounded (?f3 ` {tm..tM})" using compact_imp_bounded by auto then have boundedf3neg:"bounded ((\x. -x) ` ?f3 ` {tm..tM})" using compact_imp_bounded neg_compact by auto then have bdd_above_f3neg:"bdd_above ((\x. -x) ` ?f3 ` {tm..tM})" by (rule bounded_imp_bdd_above) then have bdd_above_f3:"bdd_above ( ?f3 ` {tm..tM})" using bounded_imp_bdd_above boundedf3 by auto have bdd_above_f3_abs:"bdd_above (abs ` ?f3 ` {tm..tM})" using bdd_above_f3neg bdd_above_f3 unfolding bdd_above_def apply auto subgoal for M1 M2 apply(rule exI[where x="max M1 M2"]) by fastforce done then have theBound:"\L. (\t. ?boundMP L t)" unfolding bdd_above_def norm_conv_dist by (auto simp add: Ball_def Bex_def norm_conv_dist image_iff norm_bcontfun_def dist_blinfun_def) then have boundMP:"\t. ?boundMP (?boundM) t" using someI[of "(\ M. \t. ?boundMP M t)"] by blast show "\M L. \t\{tm..tM}. \x. \x * ?f2 t + ?f3 t\ \ M + L * \x\" apply(rule exI[where x="?boundM"]) apply(rule exI[where x="?boundL"]) apply auto proof - fix t and x :: real assume ttm:"tm \ t" assume ttM:"t \ tM" from ttm ttM have ttmM:"tm \ t \ t \ tM" by auto have leqf3:"\?f3 t\ \ ?boundM" using boundMP ttmM by auto have leqf2:"\?f2 t\ \ ?boundL" using boundLP ttmM by auto have gr0:" \x\ \ 0" by auto have leqf2x:"\?f2 t\ * \x\ \ ?boundL * \x\" using gr0 leqf2 by (metis (no_types, lifting) real_scaleR_def scaleR_right_mono) have "\x * ?f2 t + ?f3 t\ \ \x\ * \?f2 t\ + \?f3 t\" proof - have f1: "\r ra. \r::real\ * \ra\ = \r * ra\" by (metis norm_scaleR real_norm_def real_scaleR_def) have "\r ra. \(r::real) + ra\ \ \r\ + \ra\" by (metis norm_triangle_ineq real_norm_def) then show ?thesis using f1 by presburger qed moreover have "... = \?f3 t\ + \?f2 t\ * \x\" by auto moreover have "... \ ?boundM + \?f2 t\ * \x\" using leqf3 by linarith moreover have "... \ ?boundM + ?boundL * \x\" using leqf2x by linarith ultimately show "\x * ?f2 t + ?f3 t\ \ ?boundM + ?boundL * \x\" by linarith qed qed have ivls_eq:"(ll_new.existence_ivl 0 r) = (ll_old.existence_ivl 0 (sol 0))" apply(rule ll_new.existence_ivl_eq_domain) apply auto apply (rule more_lipschitz) by auto have sub':"{0--t} \ ll_new.existence_ivl 0 r" using sub ivls_eq by auto have sol_new':"(ll_new.flow 0 r solves_ode ?yode) {0--t} UNIV" by(rule solves_ode_subset, rule sol_new, rule sub') let ?soly = "ll_new.flow 0 r" let ?sol' = "(\t. \ i. if i = vid2 then ?soly t else sol t $ i)" let ?aaba' = "mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) (?sol' t)" have duh:"(fst ?aaba', snd ?aaba') = ?aaba'" by auto note bigEx = spec[OF spec[OF bigAll, where x="fst ?aaba'"], where x="snd ?aaba'"] have sol_deriv:"\s. s \ {0..t} \ (sol has_derivative (\xa. xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol s) else 0))) (at s within {0..t})" using sol apply simp by(drule solves_odeD(1), auto simp add: has_vderiv_on_def has_vector_derivative_def) have silly_eq1:"(\t. \ i. sol t $ i) = sol" by(rule ext, rule vec_extensionality, auto) have silly_eq2:"\s. (\xa. \ i. (xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol s) else 0)) $ i) = (\xa. xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol s) else 0))" by(rule ext, rule vec_extensionality, auto) have sol_proj_deriv:"\s i. s \ {0..t} \ ((\ t. sol t $ i) has_derivative (\xa. (xa *\<^sub>R (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (sol s) else 0)) $ i)) (at s within {0..t})" subgoal for s i apply(rule has_derivative_proj) using sol_deriv[of s] silly_eq1 silly_eq2[of s] by auto done have sol_proj_deriv_vid1:"\s. s \ {0..t} \ ((\ t. sol t $ vid1) has_derivative (\xa. xa * sterm_sem I (f1 fid1 vid1) (sol s))) (at s within {0..t})" subgoal for s using sol_proj_deriv[of s vid1] by auto done have sol_proj_deriv_other:"\s i. s \ {0..t} \ i \ vid1 \ ((\ t. sol t $ i) has_derivative (\xa. 0)) (at s within {0..t})" subgoal for s i using sol_proj_deriv[of s i] by auto done have fact:"\x. x \{0..t} \ (ll_new.flow 0 r has_derivative (\xa. xa *\<^sub>R (ll_new.flow 0 r x * sterm_sem I (f1 fid2 vid1) (ll_old.flow 0 (sol 0) x) + sterm_sem I (f1 fid3 vid1) (ll_old.flow 0 (sol 0) x)))) (at x within {0 .. t})" using sol_new' apply simp apply(drule solves_odeD(1)) using tclosed unfolding has_vderiv_on_def has_vector_derivative_def by auto have new_sol_deriv:"\s. s \ {0..t} \ (ll_new.flow 0 r has_derivative (\xa. xa *\<^sub>R (ll_new.flow 0 r s * sterm_sem I (f1 fid2 vid1) (sol s) + sterm_sem I (f1 fid3 vid1) (sol s)))) (at s within {0.. t})" subgoal for s using fact[of s] tclosed sol_eq_flow[of s] by auto done have sterm_agree:"\s. Vagree (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i, undefined) (sol s, undefined) {Inl vid1}" subgoal for s unfolding Vagree_def using vne12 by auto done have FVF:"(FVT (f1 fid2 vid1)) = {Inl vid1}" unfolding f1_def expand_singleton apply auto subgoal for x xa by (cases "xa = vid1", auto) done have FVF2:"(FVT (f1 fid3 vid1)) = {Inl vid1}" unfolding f1_def expand_singleton apply auto subgoal for x xa by (cases "xa = vid1", auto) done have sterm_agree_FVF:"\s. Vagree (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i, undefined) (sol s, undefined) (FVT (f1 fid2 vid1))" using sterm_agree FVF by auto have sterm_agree_FVF2:"\s. Vagree (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i, undefined) (sol s, undefined) (FVT (f1 fid3 vid1))" using sterm_agree FVF2 by auto have y_component_sem_eq2:"\s. sterm_sem I (f1 fid2 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) = sterm_sem I (f1 fid2 vid1) (sol s)" using coincidence_sterm[OF sterm_agree_FVF, of I] by auto have y_component_sem_eq3:"\s. sterm_sem I (f1 fid3 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) = sterm_sem I (f1 fid3 vid1) (sol s)" using coincidence_sterm[OF sterm_agree_FVF2, of I] by auto have y_component_ode_eq:"\s. s \ {0..t} \ (\xa. xa * (ll_new.flow 0 r s * sterm_sem I (f1 fid2 vid1) (sol s) + sterm_sem I (f1 fid3 vid1) (sol s))) = (\xa. xa * (sterm_sem I (f1 fid2 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) * ll_new.flow 0 r s + sterm_sem I (f1 fid3 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i)))" subgoal for s apply(rule ext) subgoal for xa using y_component_sem_eq2 y_component_sem_eq3 by auto done done have agree_vid1:"\s. Vagree (sol s, undefined) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i, undefined) {Inl vid1}" unfolding Vagree_def using vne12 by auto have FVT_vid1:"FVT(f1 fid1 vid1) = {Inl vid1}" apply(auto simp add: f1_def) subgoal for x xa apply(cases "xa = vid1") by auto done have agree_vid1_FVT:"\s. Vagree (sol s, undefined) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i, undefined) (FVT (f1 fid1 vid1))" using FVT_vid1 agree_vid1 by auto have sterm_eq_vid1:"\s. sterm_sem I (f1 fid1 vid1) (sol s) = sterm_sem I (f1 fid1 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i)" subgoal for s using coincidence_sterm[OF agree_vid1_FVT[of s], of I] by auto done have vid1_deriv_eq:"\s. (\xa. xa * sterm_sem I (f1 fid1 vid1) (sol s)) = (\xa. xa * sterm_sem I (f1 fid1 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i))" subgoal for s apply(rule ext) subgoal for x' using sterm_eq_vid1[of s] by auto done done have inner_deriv:"\s. s \ {0..t} \ ((\t. \ i. if i = vid2 then ll_new.flow 0 r t else sol t $ i) has_derivative (\xa. (\ i. xa * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) else if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) else 0)))) (at s within {0..t})" subgoal for s apply(rule has_derivative_vec) subgoal for i apply(cases "i = vid2") subgoal using vne12 using new_sol_deriv[of s] using y_component_ode_eq by auto subgoal apply(cases "i = vid1") using sol_proj_deriv_vid1[of s] vid1_deriv_eq[of s] sol_proj_deriv_other[of s i] by auto done done done have deriv_eta:"\s. (\xa. xa *\<^sub>R ((\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) else 0))) = (\xa. (\ i. xa * (if i = vid1 then sterm_sem I (f1 fid1 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) else if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) else 0))) " subgoal for s apply(rule ext) apply(rule vec_extensionality) using vne12 by auto done have sol'_deriv:"\s. s \ {0..t} \ ((\t. \ i. if i = vid2 then ll_new.flow 0 r t else sol t $ i) has_derivative (\xa. xa *\<^sub>R ((\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i) else 0)))) (at s within {0..t})" subgoal for s using inner_deriv[of s] deriv_eta[of s] by auto done have FVT:"\i. FVT (if i = vid1 then trm.Var vid1 else Const 0) \ {Inl vid1}" by auto have agree:"\s. Vagree (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol s)) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i)) {Inl vid1}" subgoal for s using mk_v_agree [of "I" "(OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))" "(a, b)" "(sol s)"] using mk_v_agree [of I "(OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0)))))" "(\ y. if vid2 = y then r else fst (a, b) $ y, b)" "(\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i)"] unfolding Vagree_def using vne12 by simp done have agree':"\s i. Vagree (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol s)) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i)) (FVT (if i = vid1 then trm.Var vid1 else Const 0))" subgoal for s i using agree_sub[OF FVT[of i] agree[of s]] by auto done have safe:"\i. dsafe (if i = vid1 then trm.Var vid1 else Const 0)" subgoal for i apply(cases "i = vid1", auto) done done have dterm_sem_eq:"\s i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol s)) = dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i))" subgoal for s i using coincidence_dterm[OF safe[of i] agree'[of s i], of I] by auto done have dterm_vec_eq:"\s. (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol s))) = (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i)))" subgoal for s apply(rule vec_extensionality) subgoal for i using dterm_sem_eq[of i s] by auto done done have pred_same:"\s. s \ {0..t} \ Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol s))) \ Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i)))" subgoal for s using dterm_vec_eq[of s] by auto done have sol'_domain:"\s. 0 \ s \ s \ t \ Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) (\ i. if i = vid2 then ll_new.flow 0 r s else sol s $ i)))" subgoal for s using sol apply simp apply(drule solves_odeD(2)) using pred_same[of s] by auto done have sol':"(?sol' solves_ode (\a b. (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) b else 0))) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) (\ y. if vid2 = y then r else fst (a, b) $ y, b) x))}" apply(rule solves_odeI) subgoal unfolding has_vderiv_on_def has_vector_derivative_def using sol'_deriv by auto by(auto, rule sol'_domain, auto) have set_eq:"{y. y = vid2 \ y = vid1 \ y = vid2 \ y = vid1 \ (\x. Inl y \ FVT (if x = vid1 then trm.Var vid1 else Const 0))} = {vid1, vid2}" by auto have "VSagree (?sol' 0) (\ y. if vid2 = y then r else fst (a, b) $ y) {vid1, vid2}" using VSA unfolding VSagree_def by simp then have VSA':" VSagree (?sol' 0) (\ y. if vid2 = y then r else fst (a, b) $ y) {y. y = vid2 \ y = vid1 \ y = vid2 \ y = vid1 \ (\x. Inl y \ FVT (if x = vid1 then trm.Var vid1 else Const 0))} " by (auto simp add: set_eq) have bigPre:"(\sol t. (fst ?aaba', snd ?aaba') = mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) ((\ y. if vid2 = y then r else fst (a,b) $ y), b) (sol t) \ 0 \ t \ (sol solves_ode (\a b. (\ i. if i = vid1 then sterm_sem I (f1 fid1 vid1) b else 0) + (\ i. if i = vid2 then sterm_sem I (Plus (Times (f1 fid2 vid1) (trm.Var vid2)) (f1 fid3 vid1)) b else 0))) {0..t} {x. Predicates I vid1 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0))))) ((\ y. if vid2 = y then r else (fst (a,b)) $ y), b) x))} \ VSagree (sol 0) (\ y. if vid2 = y then r else fst (a,b) $ y) {uu. uu = vid2 \ uu = vid1 \ uu = vid2 \ uu = vid1 \ Inl uu \ Inl ` ({x. \xa. Inl x \ FVT (if xa = vid1 then trm.Var vid1 else Const 0)} \ {x. x = vid2 \ (\xa. Inl x \ FVT (if xa = vid1 then trm.Var vid1 else Const 0))}) \ (\x. Inl uu \ FVT (if x = vid1 then trm.Var vid1 else Const 0))})" apply(rule exI[where x="?sol'"]) apply(rule exI[where x=t]) apply(rule conjI) subgoal by simp apply(rule conjI) subgoal by (rule t) apply(rule conjI) apply(rule sol') using VSA' unfolding VSagree_def by auto have pred_sem:"Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) ?aaba')" using mp[OF bigEx bigPre] by auto let ?other_state = "(mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol t))" have agree:"Vagree (?aaba') (?other_state) {Inl vid1} " using mk_v_agree [of "I" "(OProd (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (OSing vid2 (Plus (Times ($f fid2 (\i. if i = vid1 then trm.Var vid1 else Const 0)) (trm.Var vid2)) ($f fid3 (\i. if i = vid1 then trm.Var vid1 else Const 0)))))" "(\ y. if vid2 = y then r else fst (a, b) $ y, b)" "(?sol' t)"] using mk_v_agree [of "I" "(OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0)))" "(a, b)" "(sol t)"] unfolding Vagree_def using vne12 by simp have sub:"\i. FVT (if i = vid1 then trm.Var vid1 else Const 0) \ {Inl vid1}" by auto have agree':"\i. Vagree (?aaba') (?other_state) (FVT (if i = vid1 then trm.Var vid1 else Const 0)) " subgoal for i using agree_sub[OF sub[of i] agree] by auto done have silly_safe:"\i. dsafe (if i = vid1 then trm.Var vid1 else Const 0)" subgoal for i apply(cases "i = vid1") by (auto simp add: dsafe_Var dsafe_Const) done have dsem_eq:"(\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) ?aaba') = (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) ?other_state)" apply(rule vec_extensionality) subgoal for i using coincidence_dterm[OF silly_safe[of i] agree'[of i], of I] by auto done show "Predicates I vid2 (\ i. dterm_sem I (if i = vid1 then trm.Var vid1 else Const 0) (mk_v I (OSing vid1 ($f fid1 (\i. if i = vid1 then trm.Var vid1 else Const 0))) (a, b) (sol t)))" using pred_sem dsem_eq by auto qed done qed end end diff --git a/thys/Ergodic_Theory/Gouezel_Karlsson.thy b/thys/Ergodic_Theory/Gouezel_Karlsson.thy --- a/thys/Ergodic_Theory/Gouezel_Karlsson.thy +++ b/thys/Ergodic_Theory/Gouezel_Karlsson.thy @@ -1,1810 +1,1810 @@ (* Author: Sébastien Gouëzel sebastien.gouezel@univ-rennes1.fr License: BSD *) section \Gouezel-Karlsson\ theory Gouezel_Karlsson imports Asymptotic_Density Kingman begin text \This section is devoted to the proof of the main ergodic result of the article "Subadditive and multiplicative ergodic theorems" by Gouezel and Karlsson~\cite{gouezel_karlsson}. It is a version of Kingman theorem ensuring that, for subadditive cocycles, there are almost surely many times $n$ where the cocycle is nearly additive at \emph{all} times between $0$ and $n$. This theorem is then used in this article to construct horofunctions characterizing the behavior at infinity of compositions of semi-contractions. This requires too many further notions to be implemented in current Isabelle/HOL, but the main ergodic result is completely proved below, in Theorem~\verb+Gouezel_Karlsson+, following the arguments in the paper (but in a slightly more general setting here as we are not making any ergodicity assumption). To simplify the exposition, the theorem is proved assuming that the limit of the subcocycle vanishes almost everywhere, in the locale \verb+Gouezel_Karlsson_Kingman+. The final result is proved by an easy reduction to this case. The main steps of the proof are as follows: \begin{itemize} \item assume first that the map is invertible, and consider the inverse map and the corresponding inverse subcocycle. With combinatorial arguments that only work for this inverse subcocycle, we control the density of bad times given some allowed error $d>0$, in a precise quantitative way, in Lemmas~\verb+upper_density_all_times+ and~\verb+upper_density_large_k+. We put these estimates together in Lemma~\verb+upper_density_delta+. \item These estimates are then transfered to the original time direction and the original subcocycle in Lemma~\verb+upper_density_good_direction_invertible+. The fact that we have quantitative estimates in terms of asymptotic densities is central here, just having some infiniteness statement would not be enough. \item The invertibility assumption is removed in Lemma~\verb+upper_density_good_direction+ by using the result in the natural extension. \item Finally, the main result is deduced in Lemma~\verb+infinite_AE+ (still assuming that the asymptotic limit vanishes almost everywhere), and in full generality in Theorem~\verb+Gouezel_Karlsson_Kingman+. \end{itemize} \ lemma upper_density_eventually_measure: fixes a::real assumes [measurable]: "\n. {x \ space M. P x n} \ sets M" and "emeasure M {x \ space M. upper_asymptotic_density {n. P x n} < a} > b" shows "\N. emeasure M {x \ space M. \n \ N. card ({n. P x n} \ {.. b" proof - define G where "G = {x \ space M. upper_asymptotic_density {n. P x n} < a}" define H where "H = (\N. {x \ space M. \n \ N. card ({n. P x n} \ {.. sets M" "\N. H N \ sets M" unfolding G_def H_def by auto have "G \ (\N. H N)" proof fix x assume "x \ G" then have "x \ space M" unfolding G_def by simp have "eventually (\n. card({n. P x n} \ {..x \ G\ unfolding G_def using upper_asymptotic_densityD by auto then obtain N where "\n. n \ N \ card({n. P x n} \ {.. H N" unfolding H_def using \x \ space M\ by auto then show "x \ (\N. H N)" by blast qed have "b < emeasure M G" using assms(2) unfolding G_def by simp also have "... \ emeasure M (\N. H N)" apply (rule emeasure_mono) using \G \ (\N. H N)\ by auto finally have "emeasure M (\N. H N) > b" by simp moreover have "(\N. emeasure M (H N)) \ emeasure M (\N. H N)" apply (rule Lim_emeasure_incseq) unfolding H_def incseq_def by auto ultimately have "eventually (\N. emeasure M (H N) > b) sequentially" by (simp add: order_tendsto_iff) then obtain N where "emeasure M (H N) > b" using eventually_False_sequentially eventually_mono by blast then show ?thesis unfolding H_def by blast qed locale Gouezel_Karlsson_Kingman = pmpt + fixes u::"nat \ 'a \ real" assumes subu: "subcocycle u" and subu_fin: "subcocycle_avg_ereal u > -\" and subu_0: "AE x in M. subcocycle_lim u x = 0" begin lemma int_u [measurable]: "integrable M (u n)" using subu unfolding subcocycle_def by auto text \Next lemma is Lemma 2.1 in~\cite{gouezel_karlsson}.\ lemma upper_density_all_times: assumes "d > (0::real)" shows "\c> (0::real). emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) x \ - c * l} < d} > 1 - d" proof - define f where "f = (\x. abs (u 1 x))" have [measurable]: "f \ borel_measurable M" unfolding f_def by auto define G where "G = {x \ space M. (\n. birkhoff_sum f n x / n) \ real_cond_exp M Invariants f x \ (\n. u n x / n) \ 0}" have [measurable]: "G \ sets M" unfolding G_def by auto have "AE x in M. (\n. birkhoff_sum f n x / n) \ real_cond_exp M Invariants f x" apply (rule birkhoff_theorem_AE_nonergodic) using subu unfolding f_def subcocycle_def by auto moreover have "AE x in M. (\n. u n x / n) \ 0" using subu_0 kingman_theorem_nonergodic(1)[OF subu subu_fin] by auto ultimately have "AE x in M. x \ G" unfolding G_def by auto then have "emeasure M G = 1" by (simp add: emeasure_eq_1_AE) define V where "V = (\c x. {n. \l \ {1..n}. u n x - u (n-l) x \ - c * l})" define Good where "Good = (\c. {x \ G. upper_asymptotic_density (V c x) < d})" have [measurable]: "Good c \ sets M" for c unfolding Good_def V_def by auto have I: "upper_asymptotic_density (V c x) \ real_cond_exp M Invariants f x / c" if "c>0" "x \ G" for c x proof - have [simp]: "c>0" "c \ 0" "c \ 0" using \c>0\ by auto define U where "U = (\n. abs(u 0 x) + birkhoff_sum f n x - c * card (V c x \ {1..n}))" have main: "u n x \ U n" for n proof (rule nat_less_induct) fix n assume H: "\m U m" consider "n = 0" | "n\1 \ n \ V c x" | "n\1 \ n \ V c x" by linarith then show "u n x \ U n" proof (cases) assume "n = 0" then show ?thesis unfolding U_def by auto next assume A: "n\1 \ n \ V c x" then have "n \ 1" by simp then have "n-1 {n}" using \1 \ n\ atLeastLessThanSuc by auto then have *: "card (V c x \ {1..n}) = card (V c x \ {1..n-1})" using A by auto have "u n x \ u (n-1) x + u 1 ((T^^(n-1)) x)" using \n\1\ subu unfolding subcocycle_def by (metis le_add_diff_inverse2) also have "... \ U (n-1) + f ((T^^(n-1)) x)" unfolding f_def using H \n-1 by auto also have "... = abs(u 0 x) + birkhoff_sum f (n-1) x + f ((T^^(n-1)) x) - c * card (V c x \ {1..n-1})" unfolding U_def by auto also have "... = abs(u 0 x) + birkhoff_sum f n x - c * card (V c x \ {1..n})" using * birkhoff_sum_cocycle[of f "n-1" 1 x] \1 \ n\ by auto also have "... = U n" unfolding U_def by simp finally show ?thesis by auto next assume B: "n\1 \ n \ V c x" then obtain l where l: "l\{1..n}" "u n x - u (n-l) x \ - c * l" unfolding V_def by blast then have "n-l < n" by simp have m: "- (r * ra) - r * rb = - (r * (rb + ra))" for r ra rb::real by (simp add: algebra_simps) have "card(V c x \ {1..n}) \ card ((V c x \ {1..n-l}) \ {n-l+1..n})" by (rule card_mono, auto) also have "... \ card (V c x \ {1..n-l}) + card {n-l+1..n}" by (rule card_Un_le) also have "... \ card (V c x \ {1..n-l}) + l" by auto finally have "card(V c x \ {1..n}) \ card (V c x \ {1..n-l}) + real l" by auto then have *: "-c * card (V c x \ {1..n-l}) - c * l \ -c * card(V c x \ {1..n})" using m by auto have "birkhoff_sum f ((n-l) + l) x = birkhoff_sum f (n-l) x + birkhoff_sum f l ((T^^(n-l))x)" by (rule birkhoff_sum_cocycle) moreover have "birkhoff_sum f l ((T^^(n-l))x) \ 0" unfolding f_def birkhoff_sum_def using sum_nonneg by auto ultimately have **: "birkhoff_sum f (n-l) x \ birkhoff_sum f n x" using l(1) by auto have "u n x \ u (n-l) x - c * l" using l by simp also have "... \ U (n-l) - c* l" using H \n-l < n\ by auto also have "... = abs(u 0 x) + birkhoff_sum f (n-l) x - c * card (V c x \ {1..n-l}) - c*l" unfolding U_def by auto also have "... \ abs(u 0 x) + birkhoff_sum f n x - c * card (V c x \ {1..n})" using * ** by simp finally show ?thesis unfolding U_def by auto qed qed have "(\n. abs(u 0 x) * (1/n) + birkhoff_sum f n x / n - u n x / n) \ abs(u 0 x) * 0 + real_cond_exp M Invariants f x - 0" apply (intro tendsto_intros) using \x \ G\ unfolding G_def by auto moreover have "(abs(u 0 x) + birkhoff_sum f n x - u n x)/n = abs(u 0 x) * (1/n) + birkhoff_sum f n x / n - u n x / n" for n by (auto simp add: add_divide_distrib diff_divide_distrib) ultimately have "(\n. (abs(u 0 x) + birkhoff_sum f n x - u n x)/n) \ real_cond_exp M Invariants f x" by auto then have a: "limsup (\n. (abs(u 0 x) + birkhoff_sum f n x - u n x)/n) = real_cond_exp M Invariants f x" by (simp add: assms lim_imp_Limsup) have "c * card (V c x \ {1..n})/n \ (abs(u 0 x) + birkhoff_sum f n x - u n x)/n" for n using main[of n] unfolding U_def by (simp add: divide_right_mono) then have "limsup (\n. c * card (V c x \ {1..n})/n) \ limsup (\n. (abs(u 0 x) + birkhoff_sum f n x - u n x)/n)" by (simp add: Limsup_mono) then have b: "limsup (\n. c * card (V c x \ {1..n})/n) \ real_cond_exp M Invariants f x" using a by simp have "ereal(upper_asymptotic_density (V c x)) = limsup (\n. card (V c x \ {1..n})/n)" using upper_asymptotic_density_shift[of "V c x" 1 0] by auto also have "... = limsup (\n. ereal(1/c) * ereal(c * card (V c x \ {1..n})/n))" by auto also have "... = (1/c) * limsup (\n. c * card (V c x \ {1..n})/n)" by (rule limsup_ereal_mult_left, auto) also have "... \ ereal (1/c) * real_cond_exp M Invariants f x" by (rule ereal_mult_left_mono[OF b], auto) finally show "upper_asymptotic_density (V c x) \ real_cond_exp M Invariants f x / c" by auto qed { fix r::real obtain c::nat where "r / d < c" using reals_Archimedean2 by auto then have "r/d < real c+1" by auto then have "r / (real c+1) < d" using \d>0\ by (simp add: divide_less_eq mult.commute) then have "\c::nat. r / (real c+1) < d" by auto } then have unG: "(\c::nat. {x \ G. real_cond_exp M Invariants f x / (c+1) < d}) = G" by auto have *: "r < d * (real n + 1)" if "m \ n" "r < d * (real m + 1)" for m n r proof - have "d * (real m + 1) \ d * (real n + 1)" using \d>0\ \m \ n\ by auto then show ?thesis using \r < d * (real m + 1)\ by auto qed have "(\c. emeasure M {x \ G. real_cond_exp M Invariants f x / (real c+1) < d}) \ emeasure M (\c::nat. {x \ G. real_cond_exp M Invariants f x / (c+1) < d})" apply (rule Lim_emeasure_incseq) unfolding incseq_def by (auto simp add: divide_simps *) then have "(\c. emeasure M {x \ G. real_cond_exp M Invariants f x / (real c+1) < d}) \ emeasure M G" using unG by auto then have "(\c. emeasure M {x \ G. real_cond_exp M Invariants f x / (real c+1) < d}) \ 1" using \emeasure M G = 1\ by simp then have "eventually (\c. emeasure M {x \ G. real_cond_exp M Invariants f x / (real c+1) < d} > 1 - d) sequentially" apply (rule order_tendstoD) apply (insert \0, auto simp add: ennreal_1[symmetric] ennreal_lessI simp del: ennreal_1) done then obtain c0 where c0: "emeasure M {x \ G. real_cond_exp M Invariants f x / (real c0+1) < d} > 1 - d" using eventually_sequentially by auto define c where "c = real c0 + 1" then have "c > 0" by auto have *: "emeasure M {x \ G. real_cond_exp M Invariants f x / c < d} > 1 - d" unfolding c_def using c0 by auto have "{x \ G. real_cond_exp M Invariants f x / c < d} \ {x \ space M. upper_asymptotic_density (V c x) < d}" apply auto using G_def apply blast using I[OF \c>0\] by fastforce then have "emeasure M {x \ G. real_cond_exp M Invariants f x / c < d} \ emeasure M {x \ space M. upper_asymptotic_density (V c x) < d}" apply (rule emeasure_mono) unfolding V_def by auto then have "emeasure M {x \ space M. upper_asymptotic_density (V c x) < d} > 1 - d" using * by auto then show ?thesis unfolding V_def using \c>0\ by auto qed text \Next lemma is Lemma 2.2 in~\cite{gouezel_karlsson}.\ lemma upper_density_large_k: assumes "d > (0::real)" "d \ 1" shows "\k::nat. emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {k..n}. u n x - u (n-l) x \ - d * l} < d} > 1 - d" proof - have [simp]: "d>0" "d \ 0" "d \ 0" using \d>0\ by auto define rho where "rho = d * d * d / 4" have [simp]: "rho > 0" "rho \ 0" "rho \ 0" unfolding rho_def using assms by auto text \First step: choose a time scale $s$ at which all the computations will be done. the integral of $u_s$ should be suitably small -- how small precisely is given by $\rho$.\ have "ennreal(\x. abs(u n x / n) \M) = (\\<^sup>+x. abs(u n x /n - subcocycle_lim u x) \M)" for n proof - have "ennreal(\x. abs(u n x / n) \M) = (\\<^sup>+x. abs(u n x /n) \M)" apply (rule nn_integral_eq_integral[symmetric]) using int_u by auto also have "... = (\\<^sup>+x. abs(u n x /n - subcocycle_lim u x) \M)" apply (rule nn_integral_cong_AE) using subu_0 by auto finally show ?thesis by simp qed moreover have "(\n. \\<^sup>+x. abs(u n x /n - subcocycle_lim u x) \M) \ 0" by (rule kingman_theorem_nonergodic(3)[OF subu subu_fin]) ultimately have "(\n. ennreal(\x. abs(u n x / n) \M)) \ 0" by auto then have "(\n. (\x. abs(u n x / n) \M)) \ 0" by (simp add: ennreal_0[symmetric] del: ennreal_0) then have "eventually (\n. (\x. abs(u n x / n) \M) < rho) sequentially" by (rule order_tendstoD(2), auto) then obtain s::nat where [simp]: "s>0" and s_int: "(\x. abs(u s x / s) \M) < rho" by (metis (mono_tags, lifting) neq0_conv eventually_sequentially gr_implies_not0 linorder_not_le of_nat_0_eq_iff order_refl zero_neq_one) text \Second step: a truncation argument, to decompose $|u_1|$ as a sum of a constant (its contribution will be small if $k$ is large at the end of the argument) and of a function with small integral).\ have "(\n. (\x. abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ n} x \M)) \ (\x. 0 \M)" proof (rule integral_dominated_convergence[where ?w = "\x. abs(u 1 x)"]) show "AE x in M. norm (abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ n} x) \ abs(u 1 x)" for n unfolding indicator_def by auto { fix x have "abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ n} x = (0::real)" if "n > abs(u 1 x)" for n::nat unfolding indicator_def using that by auto then have "eventually (\n. abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ n} x = 0) sequentially" by (metis (mono_tags, lifting) eventually_at_top_linorder reals_Archimedean2 less_le_trans of_nat_le_iff) then have "(\n. abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ n} x) \ 0" by (rule tendsto_eventually) } then show "AE x in M. (\n. abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ n} x) \ 0" by simp qed (auto simp add: int_u) then have "eventually (\n. (\x. abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ n} x \M) < rho) sequentially" by (rule order_tendstoD(2), auto) then obtain Knat::nat where Knat: "Knat > 0" "(\x. abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ Knat} x \M) < rho" by (metis (mono_tags, lifting) eventually_sequentially gr_implies_not0 neq0_conv linorder_not_le of_nat_0_eq_iff order_refl zero_neq_one) define K where "K = real Knat" then have [simp]: "K \ 0" "K>0" and K: "(\x. abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ K} x \M) < rho" using Knat by auto define F where "F = (\x. abs(u 1 x) * indicator {x. abs(u 1 x) \ K} x)" have int_F [measurable]: "integrable M F" unfolding F_def apply (rule Bochner_Integration.integrable_bound[where ?f = "\x. abs(u 1 x)"]) unfolding indicator_def by (auto simp add: int_u) have "(\x. F x \M) = (\x. abs(u 1 x) * indicator {x \ space M. abs(u 1 x) \ K} x \M)" apply (rule integral_cong_AE) unfolding F_def by (auto simp add: indicator_def) then have F_int: "(\x. F x \M) < rho" using K by auto have F_pos: "F x \ 0" for x unfolding F_def by auto have u1_bound: "abs(u 1 x) \ K + F x" for x unfolding F_def indicator_def apply (cases "x \ {x \ space M. K \ \u 1 x\}") by auto define F2 where "F2 = (\x. F x + abs(u s x/s))" have int_F2 [measurable]: "integrable M F2" unfolding F2_def using int_F int_u[of s] by auto have F2_pos: "F2 x \ 0" for x unfolding F2_def using F_pos by auto have "(\x. F2 x \M) = (\x. F x \M) + (\x. abs(u s x/s) \M)" unfolding F2_def apply (rule Bochner_Integration.integral_add) using int_F int_u by auto then have F2_int: "(\x. F2 x \M) < 2 * rho" using F_int s_int by auto text \We can now choose $k$, large enough. The reason for our choice will only appear at the end of the proof.\ define k where "k = max (2 * s + 1) (nat(ceiling((2 * d * s + 2 * K * s)/(d/2))))" have "k > 2 * s" unfolding k_def by auto have "k \ (2 * d * s + 2 * K * s)/(d/2)" unfolding k_def by linarith then have "(2 * d * s + 2 * K * s)/k \ d/2" using \k > 2 * s\ by (simp add: divide_simps mult.commute) text \Third step: definition of a good set $G$ where everything goes well.\ define G where "G = {x \ space M. (\n. u n x / n) \ 0 \ (\n. birkhoff_sum (\x. abs(u s x / s)) n x / n) \ real_cond_exp M Invariants (\x. abs(u s x / s)) x \ (\n. birkhoff_sum F n x / n) \ real_cond_exp M Invariants F x \ real_cond_exp M Invariants F x + real_cond_exp M Invariants (\x. abs(u s x / s)) x = real_cond_exp M Invariants F2 x}" have [measurable]: "G \ sets M" unfolding G_def by auto have "AE x in M. (\n. u n x / n) \ 0" using kingman_theorem_nonergodic(1)[OF subu subu_fin] subu_0 by auto moreover have "AE x in M.(\n. birkhoff_sum (\x. abs(u s x / s)) n x / n) \ real_cond_exp M Invariants (\x. abs(u s x / s)) x" apply (rule birkhoff_theorem_AE_nonergodic) using int_u[of s] by auto moreover have "AE x in M. (\n. birkhoff_sum F n x / n) \ real_cond_exp M Invariants F x" by (rule birkhoff_theorem_AE_nonergodic[OF int_F]) moreover have "AE x in M. real_cond_exp M Invariants F x + real_cond_exp M Invariants (\x. abs(u s x / s)) x = real_cond_exp M Invariants F2 x" unfolding F2_def apply (rule AE_symmetric[OF real_cond_exp_add]) using int_u[of s] int_F int_u[of s] by auto ultimately have "AE x in M. x \ G" unfolding G_def by auto then have "emeasure M G = 1" by (simp add: emeasure_eq_1_AE) text \Estimation of asymptotic densities of bad times, for points in $G$. There is a trivial part, named $U$ below, that has to be treated separately because it creates problematic boundary effects.\ define U where "U = (\x. {n. \l \ {n-s<..n}. u n x - u (n-l) x \ - d * l})" define V where "V = (\x. {n. \l \ {k..n-s}. u n x - u (n-l) x \ - d * l})" text \Trivial estimate for $U(x)$: this set is finite for $x\in G$.\ have densU: "upper_asymptotic_density (U x) = 0" if "x \ G" for x proof - define C where "C = Max {abs(u m x) |m. m {n. u n x \ C - d * n}" proof (auto) fix n assume "n \ U x" then obtain l where l: "l\ {n-s <..n}" "u n x - u (n-l) x \ - d * l" unfolding U_def by auto define m where "m = n-l" have "m < s" unfolding m_def using l by auto have "u n x \ u m x - d * l" using l m_def by auto also have "... \ abs(u m x) - d * n + d * m" unfolding m_def using l by (simp add: algebra_simps of_nat_diff) also have "... \ Max {abs(u m x) |m. mm < s\ apply (auto) by (rule Max_ge, auto) also have "... \ Max {abs(u m x) |m. mm < s\ \d>0\ by auto finally show "u n x \ C - d * n" unfolding C_def by auto qed have "eventually (\n. u n x / n > -d/2) sequentially" apply (rule order_tendstoD(1)) using \x \ G\ \d>0\ unfolding G_def by auto then obtain N where N: "\n. n \ N \ u n x / n > - d/2" using eventually_sequentially by auto { fix n assume *: "u n x \ C - d * n" "n > N" then have "n \ N" "n > 0" by auto have "2 * u n x \ 2 * C - 2 * d * n" using * by auto moreover have "2 * u n x \ - d * n" using N[OF \n \ N\] \n > 0\ by (simp add: divide_simps) ultimately have "- d * n \ 2 * C - 2 * d * n" by auto then have "d * n \ 2 * C" by auto then have "n \ 2 * C / d" using \d>0\ by (simp add: mult.commute divide_simps) } then have "{n. u n x \ C - d * n} \ {..max (nat (floor(2*C/d))) N}" by (auto, meson le_max_iff_disj le_nat_floor not_le) then have "finite {n. u n x \ C - d * n}" using finite_subset by blast then have "finite (U x)" using * finite_subset by blast then show ?thesis using upper_asymptotic_density_finite by auto qed text \Main step: control of $u$ along the sequence $ns+t$, with a term $-(d/2) Card(V(x) \cap [1,ns+t])$ on the right. Then, after averaging in $t$, Birkhoff theorem will imply that $Card(V(x) \cap [1,n])$ is suitably small.\ define Z where "Z = (\t n x. Max {u i x|i. i< s} + (\i {1..n * s + t}))" have Main: "u (n * s + t) x \ Z t n x" if "t < s" for n x t proof (rule nat_less_induct[where ?n = n]) fix n assume H: "\m Z t m x" consider "n = 0"|"n>0 \ V x \ {(n-1) * s+t<..n * s+t} = {}"|"n>0 \ V x \ {(n-1) * s+t<..n * s+t} \ {}" by auto then show "u (n * s+t) x \ Z t n x" proof (cases) assume "n = 0" then have "V x \ {1..n * s + t} = {}" unfolding V_def using \t \k>2* s\ by auto then have *: "card(V x \ {1..n * s+t}) = 0" by simp have **: "0 \ (\in = 0\ by auto also have "... \ Max {u i x|i. i< s}" by (rule Max_ge, auto simp add: \t) also have "... \ Z t n x" unfolding Z_def birkhoff_sum_def using \n = 0\ * ** by auto finally show ?thesis by simp next assume A: "n>0 \ V x \ {(n-1) * s+t<..n * s+t} = {}" then have "n\1" by simp have "n * s + t = (n-1) * s + t + s" using \n\1\ by (simp add: add.commute add.left_commute mult_eq_if) have "V x \ {1..n * s + t} = V x \ {1..(n-1) * s + t} \ V x \ {(n-1) * s + t<..n * s + t}" using \n\1\ by (auto, simp add: mult_eq_if) then have *: "card(V x \ {1..n * s+t}) = card(V x \ {1..(n-1) * s+t})" using A by auto have **: "birkhoff_sum F ((n-1) * s + t) x \ birkhoff_sum F (n * s + t) x" unfolding birkhoff_sum_def apply (rule sum_mono2) using \n * s+t = (n-1) * s+t + s\ F_pos by auto have "(\i (\i (\in\1\ lessThan_Suc_atMost sum.lessThan_Suc[of "\i. abs(u s ((T^^(i* s+t))x))" "n-1", symmetric] by auto finally have ***: "(\i (\in\1\ by (simp add: add.commute add.left_commute mult_eq_if) also have "... \ u ((n-1) * s+t) x + u s ((T^^((n-1) * s+t)) x)" using subcocycle_ineq[OF subu, of "(n-1) * s+t" s x] by simp also have "... \ Max {u i x|i. i< s} + (\i {1..(n-1) * s+t}) + u s ((T^^((n-1) * s+t)) x)" using H \n\1\ unfolding Z_def by auto also have "... \ Max {u i x|i. i< s} + (\i {1..n * s+t})" using * ** *** by auto also have "... \ Z t n x" unfolding Z_def by (auto simp add: divide_simps) finally show ?thesis by simp next assume B: "n>0 \ V x \ {(n-1) * s+t<..n * s+t} \ {}" then have [simp]: "n>0" "n\1" "n \ 0" by auto obtain m where m: "m \ V x \ {(n-1) * s + t<..n * s + t}" using B by blast then obtain l where l: "l \ {k..m-s}" "u m x - u (m-l) x \ - d * l" unfolding V_def by auto then have "m-s>0" using \k>2* s\ by auto then have "m-l \ s" using l by auto define p where "p = (m-l-t) div s" have p1: "m-l \ p * s + t" unfolding p_def using \m-l \ s\ \s>t\ minus_mod_eq_div_mult [symmetric, of "m - l - t" s] by simp have p2: "m-l < p* s + t + s" unfolding p_def using \m-l \ s\ \s>t\ div_mult_mod_eq[of "m-l-t" s] mod_less_divisor[OF \s>0\, of "m-l-t"] by linarith then have "l \ m - p * s - t -s" by auto then have "l \ (n-1) * s + t -p * s - t- s" using m by auto then have "l + 2 * s\ (n * s + t) - (p * s+t)" by (simp add: diff_mult_distrib) have "(p+1) * s + t \ (n-1) * s + t" using \k> 2 * s\ m l(1) p1 by (auto simp add: algebra_simps) then have "p+1 \ n-1" using \s>0\ by (meson add_le_cancel_right mult_le_cancel2) then have "p \ n-1" "p (n * s + t)" using m l(1) p1 by (auto simp add: algebra_simps) then have "(1::real) \ ((n * s + t) - (p* s + t)) / k" using \k > 2* s\ by auto have In: "u (n * s + t) x \ u m x + (\i \ {(n-1) * s + t..i \ {(n-1) * s+t.. 0" by (rule sum_nonneg, auto) then show ?thesis using True by auto next case False then have m2: "n * s + t - m >0" "(n-1) * s+t \ m" using m by auto have "birkhoff_sum (u 1) (n * s+t-m) ((T^^m) x) = (\iij \ {m.. (\j \ {m.. (\j \ {(n-1) * s+t..j \ {m..j \ {(n-1) * s+t.. (\j \ {(n-1) * s+t.. u m x + u (n * s+t-m) ((T^^m) x)" using subcocycle_ineq[OF subu, of m "n * s+t-m"] m2 by auto also have "... \ u m x + birkhoff_sum (u 1) (n * s+t-m) ((T^^m) x)" using subcocycle_bounded_by_birkhoff1[OF subu \n * s+t - m >0\, of "(T^^m)x"] by simp finally show ?thesis using * by auto qed have Ip: "u (m-l) x \ u (p* s+t) x + (\i \ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x)))" proof (cases "m-l = p* s+t") case True have "(\i \ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) \ 0" by (rule sum_nonneg, auto) then show ?thesis using True by auto next case False then have "m-l - (p* s+t) > 0" using p1 by auto have I: "p * s + t + (m - l - (p * s + t)) = m - l" using p1 by auto have "birkhoff_sum (u 1) (m-l - (p* s+t)) ((T^^(p* s+t)) x) = (\iij \ {p* s+t.. (\j \ {p* s+t.. (\j \ {p* s+t..j \ {m-l..<(p+1)* s+t}. abs(u 1 ((T^^j) x)))" by auto also have "... = (\j \ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^j) x)))" apply (rule sum.atLeastLessThan_concat) using p1 p2 by auto finally have *: "birkhoff_sum (u 1) (m-l - (p* s+t)) ((T^^(p* s+t)) x) \ (\j \ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^j) x)))" by auto have "u (m-l) x \ u (p* s+t) x + u (m-l - (p* s+t)) ((T^^(p* s+t)) x)" using subcocycle_ineq[OF subu, of "p* s+t" "m-l - (p* s+t)" x] I by auto also have "... \ u (p* s+t) x + birkhoff_sum (u 1) (m-l - (p* s+t)) ((T^^(p* s+t)) x)" using subcocycle_bounded_by_birkhoff1[OF subu \m-l - (p* s+t) > 0\, of "(T^^(p* s+t)) x"] by simp finally show ?thesis using * by auto qed have "(\i \ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) \ (\i \ {p* s+t..<(p+1)* s+t}. K + F ((T^^i) x))" apply (rule sum_mono) using u1_bound by auto moreover have "(\i \ {(n-1) * s+t.. (\i \ {(n-1) * s+t..i \ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) + (\i \ {(n-1) * s+t.. (\i \ {p* s+t..<(p+1)* s+t}. K + F ((T^^i) x)) + (\i \ {(n-1) * s+t..i \ {p* s+t..<(p+1)* s+t}. F ((T^^i) x)) + (\i \{(n-1) * s+t.. 2* K * s + (\i \ {p* s+t..<(n-1) * s+t}. F ((T^^i) x)) + (\i \{(n-1) * s+t..(p+1)* s+t\(n-1) * s+t\ F_pos by auto also have "... = 2* K * s + (\i \ {p* s+t..p\n-1\ by auto finally have A0: "(\i \ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) + (\i \ {(n-1) * s+t.. 2* K * s + (\i \ {p* s+t.. {p * s + t<.. n * s+t}) \ card {p * s + t<.. n * s+t}" by (rule card_mono, auto) have "2 * d * s + 2 * K * s > 0" using \K>0\ \s>0\ \d>0\ by (metis add_pos_pos mult_2 mult_zero_left of_nat_0_less_iff pos_divide_less_eq times_divide_eq_right) then have "2 * d * s + 2 * K * s \ ((n * s + t) - (p* s + t)) * ((2 * d * s + 2 * K * s) / k)" using \1 \ ((n * s + t) - (p* s + t)) / k\ by (simp add: le_divide_eq_1 pos_le_divide_eq) also have "... \ ((n * s + t) - (p* s + t)) * (d/2)" apply (rule mult_left_mono) using \(2 * d * s + 2 * K * s)/k \ d/2\ by auto finally have "2 * d * s + 2 * K * s \ ((n * s + t) - (p* s + t)) * (d/2)" by auto then have "-d * ((n * s+t) - (p* s+t)) + 2 * d * s + 2 * K * s \ -d * ((n * s+t) - (p* s+t)) + ((n * s + t) - (p* s + t)) * (d/2)" by linarith also have "... = (-d/2) * card {p * s + t<.. n * s+t}" by auto also have "... \ (-d/2) * card(V x \ {p * s + t<.. n * s+t})" using \card(V x \ {p * s + t<.. n * s+t}) \ card {p * s + t<.. n * s+t}\ by auto finally have A1: "-d * ((n * s+t) - (p* s+t)) + 2 * d * s + 2 * K * s \ (-d/2) * card(V x \ {p * s + t<.. n * s+t})" by simp have "V x \ {1.. n * s+t} = V x \ {1..p * s + t} \ V x \ {p * s + t<.. n * s+t}" using \p * s + t + k \ n * s + t\ by auto then have "card (V x \ {1.. n * s+t}) = card(V x \ {1..p * s + t} \ V x \ {p * s + t<.. n * s+t})" by auto also have "... = card (V x \ {1..p * s + t}) + card (V x \ {p * s + t<.. n * s+t})" by (rule card_Un_disjoint, auto) finally have A2: "card (V x \ {1..p * s + t}) + card (V x \ {p * s + t<.. n * s+t}) = card (V x \ {1.. n * s+t})" by simp have A3: "(\i (\ip\n-1\ by auto have A4: "birkhoff_sum F (p * s + t) x + (\i \ {p* s+t..p\n-1\ by auto have "u (n * s+t) x \ u m x + (\i \ {(n-1) * s+t.. (u m x - u (m-l) x) + u (m-l) x + (\i \ {(n-1) * s+t.. - d * l + u (p* s+t) x + (\i \ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) + (\i \ {(n-1) * s+t.. - d * ((n * s+t) - (p* s+t)) + 2*d* s + u (p* s+t) x + (\i \ {p* s+t..<(p+1)* s+t}. abs(u 1 ((T^^i) x))) + (\i \ {(n-1) * s+t..l + 2* s\ (n * s+t) - (p* s+t)\ apply (auto simp add: algebra_simps) - by (metis assms(1) distrib_left mult.commute mult_2 of_nat_add of_nat_le_iff real_mult_le_cancel_iff2) + by (metis assms(1) distrib_left mult.commute mult_2 of_nat_add of_nat_le_iff mult_le_cancel_iff2) also have "... \ -d * ((n * s+t) - (p* s+t)) + 2*d* s + Z t p x + 2* K * s + (\i \ {p* s+t..p by auto also have "... \ Z t p x - d/2 * card(V x \ {p * s + t<.. n * s+t}) + (\i \ {p* s+t..i {1..p * s + t}) - d/2 * card(V x \ {p * s + t<.. n * s+t}) + (\i \ {p* s+t.. Max {u i x |i. i < s} + (\ii \ {p* s+t.. {1..p * s + t}) - d/2 * card(V x \ {p * s + t<.. n * s+t})" using A3 by auto also have "... = Z t n x" unfolding Z_def using A2 A4 by (auto simp add: algebra_simps, metis distrib_left of_nat_add) finally show ?thesis by simp qed qed have Main2: "(d/2) * card(V x \ {1..n}) \ Max {u i x|i. i< s} + birkhoff_sum (\x. abs(u s x/ s)) (n+2* s) x + birkhoff_sum F (n + 2 * s) x + (1/s) * (\i< 2 * s. abs(u (n+i) x))" for n x proof - define N where "N = (n div s) + 1" then have "n \ N * s" using \s > 0\ dividend_less_div_times less_or_eq_imp_le by auto have "N * s \ n + s" by (auto simp add: N_def) have eq_t: "(d/2) * card(V x \ {1..n}) \ abs(u(N* s+t) x) + (Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x) + (\i birkhoff_sum F (n+ 2* s) x" unfolding birkhoff_sum_def apply (rule sum_mono2) using F_pos \N * s \ n + s\ \t by auto have "card(V x \ {1..n}) \ card(V x \ {1..N* s+t})" apply (rule card_mono) using \n \ N * s\ by auto then have "(d/2) * card(V x \ {1..n}) \ (d/2) * card(V x \ {1..N* s+t})" by auto also have "... \ - u (N* s+t) x + Max {u i x|i. i< s} + (\it < s\, of N x] unfolding Z_def by auto also have "... \ abs(u(N* s+t) x) + Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x + (\iti\{N* s.. (\i\{n..n \ N * s\ \N * s \ n + s\ by auto also have "... = (\i<2* s. abs (u (n+i) x))" by (rule sum.reindex_bij_betw[symmetric], rule bij_betw_byWitness[where ?f' = "\i. i - n"], auto) finally have **: "(\t (\i<2* s. abs (u (n+i) x))" by simp have "(\tii (\iN * s \ n + s\ by auto finally have ***: "(\ti s * birkhoff_sum (\x. abs(u s x/ s)) (n+2* s) x" unfolding birkhoff_sum_def using \s>0\ by (auto simp add: sum_divide_distrib[symmetric]) have ****: "s * (\ii {1..n}) = (\t {1..n}))" by auto also have "... \ (\tittti (\i<2* s. abs (u (n+i) x)) + s * (Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x) + s * birkhoff_sum (\x. abs(u s x/ s)) (n+2* s) x" using ** *** by auto also have "... = s * ((1/s) * (\i<2* s. abs (u (n+i) x)) + Max {u i x|i. i< s} + birkhoff_sum F (n + 2* s) x + birkhoff_sum (\x. abs(u s x/ s)) (n+2* s) x)" by (auto simp add: divide_simps mult.commute distrib_left) finally show ?thesis by auto qed have densV: "upper_asymptotic_density (V x) \ (2/d) * real_cond_exp M Invariants F2 x" if "x \ G" for x proof - have *: "(\n. abs(u n x/n)) \ 0" apply (rule tendsto_rabs_zero) using \x\G\ unfolding G_def by auto define Bound where "Bound = (\n. (Max {u i x|i. i< s}*(1/n) + birkhoff_sum (\x. abs(u s x/ s)) (n+2* s) x / n + birkhoff_sum F (n + 2* s) x / n + (1/s) * (\i<2* s. abs(u (n+i) x) / n)))" have "Bound \ (Max {u i x|i. i< s} * 0 + real_cond_exp M Invariants (\x. abs(u s x/s)) x + real_cond_exp M Invariants F x + (1/s) * (\i < 2 * s. 0))" unfolding Bound_def apply (intro tendsto_intros) using \x\G\ * unfolding G_def by auto moreover have "real_cond_exp M Invariants (\x. abs(u s x/s)) x + real_cond_exp M Invariants F x = real_cond_exp M Invariants F2 x" using \x \ G\ unfolding G_def by auto ultimately have B_conv: "Bound \ real_cond_exp M Invariants F2 x" by simp have *: "(d/2) * card(V x \ {1..n}) / n \ Bound n" for n proof - have "(d/2) * card(V x \ {1..n}) / n \ (Max {u i x|i. i< s} + birkhoff_sum (\x. abs(u s x/ s)) (n+2* s) x + birkhoff_sum F (n + 2* s) x + (1/s) * (\i<2* s. abs(u (n+i) x)))/n" using Main2[of x n] using divide_right_mono of_nat_0_le_iff by blast also have "... = Bound n" unfolding Bound_def by (auto simp add: add_divide_distrib sum_divide_distrib[symmetric]) finally show ?thesis by simp qed have "ereal(d/2 * upper_asymptotic_density (V x)) = ereal(d/2) * ereal(upper_asymptotic_density (V x))" by auto also have "... = ereal (d/2) * limsup(\n. card(V x \ {1..n}) / n)" using upper_asymptotic_density_shift[of "V x" 1 0] by auto also have "... = limsup(\n. ereal (d/2) * (card(V x \ {1..n}) / n))" by (rule limsup_ereal_mult_left[symmetric], auto) also have "... \ limsup Bound" apply (rule Limsup_mono) using * not_eventuallyD by auto also have "... = ereal(real_cond_exp M Invariants F2 x)" using B_conv convergent_limsup_cl convergent_def convergent_real_imp_convergent_ereal limI by force finally have "d/2 * upper_asymptotic_density (V x) \ real_cond_exp M Invariants F2 x" by auto then show ?thesis by (simp add: divide_simps mult.commute) qed define epsilon where "epsilon = 2 * rho / d" have [simp]: "epsilon > 0" "epsilon \ 0" "epsilon \ 0" unfolding epsilon_def by auto have "emeasure M {x\space M. real_cond_exp M Invariants F2 x \ epsilon} \ (1/epsilon) * (\x. real_cond_exp M Invariants F2 x \M)" apply (intro integral_Markov_inequality real_cond_exp_pos real_cond_exp_int(1)) by (auto simp add: int_F2 F2_pos) also have "... = (1/epsilon) * (\x. F2 x \M)" apply (intro arg_cong[where f = ennreal]) by (simp, rule real_cond_exp_int(2), simp add: int_F2) also have "... < (1/epsilon) * 2 * rho" using F2_int by (intro ennreal_lessI) (auto simp add: divide_simps) also have "... = d" unfolding epsilon_def by auto finally have *: "emeasure M {x\space M. real_cond_exp M Invariants F2 x \ epsilon} < d" by simp define G2 where "G2 = {x \ G. real_cond_exp M Invariants F2 x < epsilon}" have [measurable]: "G2 \ sets M" unfolding G2_def by simp have "1 = emeasure M G" using \emeasure M G = 1\ by simp also have "... \ emeasure M (G2 \ {x\space M. real_cond_exp M Invariants F2 x \ epsilon})" apply (rule emeasure_mono) unfolding G2_def using sets.sets_into_space[OF \G \ sets M\] by auto also have "... \ emeasure M G2 + emeasure M {x\space M. real_cond_exp M Invariants F2 x \ epsilon}" by (rule emeasure_subadditive, auto) also have "... < emeasure M G2 + d" using * by auto finally have "1 - d < emeasure M G2" using emeasure_eq_measure \d \ 1\ by (auto intro!: ennreal_less_iff[THEN iffD2] simp del: ennreal_plus simp add: ennreal_plus[symmetric]) have "upper_asymptotic_density {n. \l \ {k..n}. u n x - u (n-l) x \ - d * l} < d" if "x \ G2" for x proof - have "x \ G" using \x \ G2\ unfolding G2_def by auto have "{n. \l \ {k..n}. u n x - u (n-l) x \ - d * l} \ U x \ V x" unfolding U_def V_def by fastforce then have "upper_asymptotic_density {n. \l \ {k..n}. u n x - u (n-l) x \ - d * l} \ upper_asymptotic_density (U x \ V x)" by (rule upper_asymptotic_density_subset) also have "... \ upper_asymptotic_density (U x) + upper_asymptotic_density (V x)" by (rule upper_asymptotic_density_union) also have "... \ (2/d) * real_cond_exp M Invariants F2 x" using densU[OF \x \ G\] densV[OF \x \ G\] by auto also have "... < (2/d) * epsilon" using \x \ G2\ unfolding G2_def by (simp add: divide_simps) text \This is where the choice of $\rho$ at the beginning of the proof is relevant: we choose it so that the above term is at most $d$.\ also have "... = d" unfolding epsilon_def rho_def by auto finally show ?thesis by simp qed then have "G2 \ {x \ space M. upper_asymptotic_density {n. \l \ {k..n}. u n x - u (n-l) x \ - d * l} < d}" using sets.sets_into_space[OF \G2 \ sets M\] by blast then have "emeasure M G2 \ emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {k..n}. u n x - u (n-l) x \ - d * l} < d}" by (rule emeasure_mono, auto) then have "emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {k..n}. u n x - u (n-l) x \ - d * l} < d} > 1 -d" using \emeasure M G2 > 1 - d\ by auto then show ?thesis by blast qed text \The two previous lemmas are put together in the following lemma, corresponding to Lemma 2.3 in~\cite{gouezel_karlsson}.\ lemma upper_density_delta: fixes d::real assumes "d > 0" "d \ 1" shows "\delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ emeasure M {x \ space M. \(N::nat). card {n \{..l \ {1..n}. u n x - u (n-l) x \ - delta l * l} \ d * N} > 1 - d" proof - define d2 where "d2 = d/2" have [simp]: "d2 > 0" unfolding d2_def using assms by simp then have "\ d2 < 0" using not_less [of d2 0] by (simp add: less_le) have "d2/2 > 0" by simp obtain c0 where c0: "c0> (0::real)" "emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) x \ - c0 * l} < d2/2} > 1 - (d2/2)" using upper_density_all_times[OF \d2/2 > 0\] by blast have "\N. emeasure M {x \ space M. \n \ N. card ({n. \l \ {1..n}. u n x - u (n-l) x \ - c0 * l} \ {.. 1 - (d2/2)" apply (rule upper_density_eventually_measure) using c0(2) by auto then obtain N1 where N1: "emeasure M {x \ space M. \B \ N1. card ({n. \l \ {1..n}. u n x - u (n-l) x \ - c0 * l} \ {.. 1 - (d2/2)" by blast define O1 where "O1 = {x \ space M. \B \ N1. card ({n. \l \ {1..n}. u n x - u (n-l) x \ - c0 * l} \ {.. sets M" unfolding O1_def by auto have "emeasure M O1 > 1 - (d2/2)" unfolding O1_def using N1 by auto have *: "\N. emeasure M {x \ space M. \B \ N. card({n. \l \ {N..n}. u n x - u (n-l) x \ - e * l} \ {.. 1 - e" if "e>0" "e \ 1" for e::real proof - obtain k where k: "emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {k..n}. u n x - u (n-l) x \ - e * l} < e} > 1 - e" using upper_density_large_k[OF \e>0\ \e \ 1\] by blast then obtain N0 where N0: "emeasure M {x \ space M. \B \ N0. card({n. \l \ {k..n}. u n x - u (n-l) x \ - e * l} \ {.. 1 - e" using upper_density_eventually_measure[OF _ k] by auto define N where "N = max k N0" have "emeasure M {x \ space M. \B \ N0. card({n. \l \ {k..n}. u n x - u (n-l) x \ - e * l} \ {.. emeasure M {x \ space M. \B \ N. card({n. \l \ {N..n}. u n x - u (n-l) x \ - e * l} \ {.. space M" "\B\N0. card ({n. \l\{k..n}. u n x - u (n - l) x \ - (e * real l)} \ {.. B" have "card({n. \l \ {N..n}. u n x - u (n-l) x \ - (e * real l)} \ {.. card({n. \l \ {k..n}. u n x - u (n-l) x \ -(e * real l)} \ {..l \ {N..n}. u n x - u (n-l) x \ - (e * real l)} \ {.. card({n. \l \ {k..n}. u n x - u (n-l) x \ -(e * real l)} \ {..B\N\ unfolding N_def by auto finally show "card ({n. \l\{N..n}. u n x - u (n - l) x \ - (e * real l)} \ {.. space M. \B \ N. card({n. \l \ {N..n}. u n x - u (n-l) x \ - e * l} \ {.. 1 - e" using N0 by simp then show ?thesis by auto qed define Ne where "Ne = (\(e::real). SOME N. emeasure M {x \ space M. \B \ N. card({n. \l \ {N..n}. u n x - u (n-l) x \ - e * l} \ {.. 1 - e)" have Ne: "emeasure M {x \ space M. \B \ Ne e. card({n. \l \ {Ne e..n}. u n x - u (n-l) x \ - e * l} \ {.. 1 - e" if "e>0" "e \ 1" for e::real unfolding Ne_def by (rule someI_ex[OF *[OF that]]) define eps where "eps = (\(n::nat). d2 * (1/2)^n)" have [simp]: "eps n > 0" for n unfolding eps_def by auto then have [simp]: "eps n \ 0" for n by (rule less_imp_le) have "eps n \ (1 / 2) * 1" for n unfolding eps_def d2_def using \d \ 1\ by (intro mult_mono power_le_one) auto also have "\ < 1" by auto finally have [simp]: "eps n < 1" for n by simp then have [simp]: "eps n \ 1" for n by (rule less_imp_le) have "(\n. d2 * (1/2)^n) \ d2 * 0" by (rule tendsto_mult, auto simp add: LIMSEQ_realpow_zero) then have "eps \ 0" unfolding eps_def by auto define Nf where "Nf = (\N. (if (N = 0) then 0 else if (N = 1) then N1 + 1 else max (N1+1) (Max {Ne(eps n)|n. n \ N}) + N))" have "Nf N < Nf (N+1)" for N proof - consider "N = 0" | "N = 1" | "N>1" by fastforce then show ?thesis proof (cases) assume "N>1" have "Max {Ne (eps n) |n. n \ N} \ Max {Ne (eps n) |n. n \ Suc N}" by (rule Max_mono, auto) then show ?thesis unfolding Nf_def by auto qed (auto simp add: Nf_def) qed then have "strict_mono Nf" using strict_mono_Suc_iff by auto define On where "On = (\(N::nat). (if (N = 1) then O1 else {x \ space M. \B \ Nf N. card({n. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N) * l} \ {.. sets M" for N unfolding On_def by auto have "emeasure M (On N) > 1 - eps N" if "N>0" for N proof - consider "N = 1" | "N>1" using \N>0\ by linarith then show ?thesis proof (cases) case 1 then show ?thesis unfolding On_def eps_def using \emeasure M O1 > 1 - (d2/2)\ by auto next case 2 have "Ne (eps N) \ Max {Ne(eps n)|n. n \ N}" by (rule Max.coboundedI, auto) also have "... \ Nf N" unfolding Nf_def using \N>1\ by auto finally have "Ne (eps N) \ Nf N" by simp have "1 - eps N < emeasure M {x \ space M. \B \ Ne(eps N). card({n. \l \ {Ne(eps N)..n}. u n x - u (n-l) x \ - (eps N) * l} \ {.. emeasure M {x \ space M. \B \ Nf N. card({n. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N) * l} \ {.. space M" "\n\Ne (eps N). card ({n. \l\{Ne (eps N)..n}. u n x - u (n - l) x \ - (eps N * l)} \ {.. n" have "card({n. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. card({n. \l \ {Ne(eps N)..n}. u n x - u (n-l) x \ -(eps N) * l} \ {..Ne (eps N) \ Nf N\ by auto then have "real(card({n. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. card({n. \l \ {Ne(eps N)..n}. u n x - u (n-l) x \ -(eps N) * l} \ {..n \ Nf N\ \Ne (eps N) \ Nf N\ by auto finally show "real (card ({n. \l\{Nf N..n}. u n x - u (n - l) x \ - (eps N * l)} \ {..N>1\ by auto finally show ?thesis by simp qed qed then have *: "emeasure M (On (N+1)) > 1 - eps (N+1)" for N by simp define Ogood where "Ogood = (\N. On (N+1))" have [measurable]: "Ogood \ sets M" unfolding Ogood_def by auto have "emeasure M Ogood \ 1 - (\N. eps(N+1))" unfolding Ogood_def apply (intro emeasure_intersection, auto) using * by (auto simp add: eps_def summable_mult summable_divide summable_geometric less_imp_le) moreover have "(\N. eps(N+1)) = d2" unfolding eps_def apply (subst suminf_mult) using sums_unique[OF power_half_series, symmetric] by (auto intro!: summable_divide summable_geometric) finally have "emeasure M Ogood \ 1 - d2" by simp then have "emeasure M Ogood > 1 - d" unfolding d2_def using \d>0\ \d \ 1\ by (simp add: emeasure_eq_measure field_sum_of_halves ennreal_less_iff) have Ogood_union: "Ogood = (\(K::nat). Ogood \ {x \ space M. \n \ {1..Nf 1}. \l \ {1..n}. u n x - u (n-l) x > - (real K * l)})" apply auto using sets.sets_into_space[OF \Ogood \ sets M\] apply blast proof - fix x define M where "M = Max {abs(u n x - u (n-l) x)/l | n l. n \ {1..Nf 1} \ l \ {1..n}}" obtain N::nat where "N > M" using reals_Archimedean2 by blast have "finite { (n, l) | n l. n \ {1..Nf 1} \ l \ {1..n}}" by (rule finite_subset[where ?B = "{1.. Nf 1} \ {1..Nf 1}"], auto) moreover have "{abs(u n x - u (n-l) x)/l | n l. n \ {1..Nf 1} \ l \ {1..n}} = (\ (n, l). abs(u n x - u (n-l) x)/l)` { (n, l) | n l. n \ {1..Nf 1} \ l \ {1..n}}" by auto ultimately have fin: "finite {abs(u n x - u (n-l) x)/l | n l. n \ {1..Nf 1} \ l \ {1..n}}" by auto { fix n l assume nl: "n \ {1..Nf 1} \ l \ {1..n}" then have "real l>0" by simp have "abs(u n x - u (n-l) x)/l \ M" unfolding M_def apply (rule Max_ge) using fin nl by auto then have "abs(u n x - u (n-l) x)/l < real N" using \N>M\ by simp then have "abs(u n x - u (n-l) x)< real N * l" using \0 < real l\ pos_divide_less_eq by blast then have "u n x - u (n-l) x > - (real N * l)" by simp } then have "\n\{Suc 0..Nf (Suc 0)}. \l\{Suc 0..n}. - (real N * real l) < u n x - u (n - l) x" by auto then show "\N. \n\{Suc 0..Nf (Suc 0)}. \l\{Suc 0..n}. - (real N * real l) < u n x - u (n - l) x" by auto qed have "(\K. emeasure M (Ogood \ {x \ space M. \n \ {1..Nf 1}. \l \ {1..n}. u n x - u (n-l) x > - (real K * l)})) \ emeasure M (\(K::nat). Ogood \ {x \ space M. \n \ {1..Nf 1}. \l \ {1..n}. u n x - u (n-l) x > - (real K * l)})" apply (rule Lim_emeasure_incseq, auto) unfolding incseq_def apply auto proof - fix m n x na l assume "m \ (n::nat)" "\n\{Suc 0..Nf (Suc 0)}. \l\{Suc 0..n}. - (real m * real l) < u n x - u (n - l) x" "Suc 0 \ l" "l \ na" "na \ Nf (Suc 0)" then have "- (real m * real l) < u na x - u (na - l) x" by auto moreover have "- (real n * real l) \ - (real m * real l)" using \m \ n\ by (simp add: mult_mono) ultimately show "- (real n * real l) < u na x - u (na - l) x" by auto qed moreover have "emeasure M (\(K::nat). Ogood \ {x \ space M. \n \ {1..Nf 1}. \l \ {1..n}. u n x - u (n-l) x > - (real K * l)}) > 1 - d" using Ogood_union \emeasure M Ogood > 1 - d\ by auto ultimately have a: "eventually (\K. emeasure M (Ogood \ {x \ space M. \n \ {1..Nf 1}. \l \ {1..n}. u n x - u (n-l) x > - (real K * l)}) > 1 - d) sequentially" by (rule order_tendstoD(1)) have b: "eventually (\K. K \ max c0 d2) sequentially" using eventually_at_top_linorder nat_ceiling_le_eq by blast have "eventually (\K. K \ max c0 d2 \ emeasure M (Ogood \ {x \ space M. \n \ {1..Nf 1}. \l \ {1..n}. u n x - u (n-l) x > - (real K * l)}) > 1 - d) sequentially" by (rule eventually_elim2[OF a b], auto) then obtain K where K: "K\max c0 d2" "emeasure M (Ogood \ {x \ space M. \n \ {1..Nf 1}. \l \ {1..n}. u n x - u (n-l) x > - (real K * l)}) > 1 - d" using eventually_False_sequentially eventually_elim2 by blast define Og where "Og = Ogood \ {x \ space M. \n \ {1..Nf 1}. \l \ {1..n}. u n x - u (n-l) x > - (real K * l)}" have [measurable]: "Og \ sets M" unfolding Og_def by simp have "emeasure M Og > 1 - d" unfolding Og_def using K by simp have fin: "finite {N. Nf N \ n}" for n using pseudo_inverse_finite_set[OF filterlim_subseq[OF \strict_mono Nf\]] by auto define prev_N where "prev_N = (\n. Max {N. Nf N \ n})" define delta where "delta = (\l. if (prev_N l \ 1) then K else eps (prev_N l))" have "\l. delta l > 0" unfolding delta_def using \K\max c0 d2\ \c0>0\ by auto have "LIM n sequentially. prev_N n :> at_top" unfolding prev_N_def apply (rule tendsto_at_top_pseudo_inverse2) using \strict_mono Nf\ by (simp add: filterlim_subseq) then have "eventually (\l. prev_N l > 1) sequentially" by (simp add: filterlim_iff) then have "eventually (\l. delta l = eps(prev_N l)) sequentially" unfolding delta_def by (simp add: eventually_mono) moreover have "(\l. eps(prev_N l)) \ 0" by (rule filterlim_compose[OF \eps \ 0\ \LIM n sequentially. prev_N n :> at_top\]) ultimately have "delta \ 0" by (simp add: filterlim_cong) have "delta n \ K" for n proof - have *: "d2 * (1 / 2) ^ prev_N n \ real K * 1" apply (rule mult_mono') using \K \ max c0 d2\ \d2>0\ by (auto simp add: power_le_one less_imp_le) then show ?thesis unfolding delta_def apply auto unfolding eps_def using * by auto qed define bad_times where "bad_times = (\x. {n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ (\N\{2..}. {n \ {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)}))" have card_bad_times: "card (bad_times x \ {.. d2 * B" if "x \ Og" for x B proof - have "(\N\{.. (\N. (1/2)^N)" by (rule sum_le_suminf, auto simp add: summable_geometric) also have "... = 2" using suminf_geometric[of "1/(2::real)"] by auto finally have *: "(\N\{.. 2" by simp have "(\ N \ {2.. (\ N \ {2..N\{2..N\{..N\{..N\{.. (d2 * (1/4) * B) * 2" apply (rule mult_left_mono) using * \d2 > 0\ apply auto by (metis \0 < d2\ mult_eq_0_iff mult_le_0_iff not_le of_nat_eq_0_iff of_nat_le_0_iff) finally have I: "(\ N \ {2.. d2/2 * B" by auto have "x \ On 1" using \x \ Og\ unfolding Og_def Ogood_def by auto then have "x \ O1" unfolding On_def by auto have B1: "real(card({n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. (d2/2) * B" for B proof (cases "B \ N1") case True have "card({n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. card({n. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. (d2/2) * B" using \x \ O1\ unfolding O1_def using True by auto finally show ?thesis by auto next case False then have "B < Nf 1" unfolding Nf_def by auto then have "{n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. (d2/2) * B" using \\ d2 < 0\ by simp finally show ?thesis by simp qed have BN: "real(card ({n \ {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. eps N * B" if "N \ 2" for N B proof - have "x \ On ((N-1) + 1)" using \x \ Og\ unfolding Og_def Ogood_def by auto then have "x \ On N" using \N\2\ by auto show ?thesis proof (cases "B \ Nf N") case True have "card ({n \ {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. card ({n. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. eps N * B" using \x \ On N\ \N\2\ True unfolding On_def by auto finally show ?thesis by simp next case False then have "{n \ {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. eps N * B" by (metis \\n. 0 < eps n\ le_less mult_eq_0_iff mult_pos_pos of_nat_0 of_nat_0_le_iff) finally show ?thesis by simp qed qed { fix N assume "N \ B" have "Nf N \ B" using seq_suble[OF \strict_mono Nf\, of N] \N \ B\ by simp then have "{Nf N..} \ {.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {..N\{B..}. {n \ {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. {2.. {B..}" by auto then have "(\N\{2..}. {n \ {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. (\N\{2.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. (\N\{B..}. {n \ {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {..N\{2.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. {.. {n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. (\N\{2.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. {.. card({n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. (\N\{2.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. card({n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {..N\{2.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. card({n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. N\{2.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. {.. real(card({n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. N\{2.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)} \ {.. N\{2.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. (d2/2 * B) + (\ N\{2.. {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)} \ {.. (d2/2 * B) + (\ N \ {2.. (d2/2 * B) + (d2/2*B)" using I by auto finally show ?thesis by simp qed have ineq_on_Og: "u n x - u (n-l) x > - delta l * l" if "l \ {1..n}" "n \ bad_times x" "x \ Og" for n x l proof - consider "n < Nf 1" | "n \ Nf 1 \ prev_N l \ 1" | "n \ Nf 1 \ prev_N l \ 2" by linarith then show ?thesis proof (cases) assume "n < Nf 1" then have "{N. Nf N \ n} = {0}" apply auto using \strict_mono Nf\ unfolding strict_mono_def apply (metis le_trans less_Suc0 less_imp_le_nat linorder_neqE_nat not_less) unfolding Nf_def by auto then have "prev_N n = 0" unfolding prev_N_def by auto moreover have "prev_N l \ prev_N n" unfolding prev_N_def apply (rule Max_mono) using \l \ {1..n}\ fin apply auto unfolding Nf_def by auto ultimately have "prev_N l = 0" using \prev_N l \ prev_N n\ by auto then have "delta l = K" unfolding delta_def by auto have "1 \ {N. Nf N \ n}" using fin[of n] by (metis (full_types) Max_ge \prev_N n = 0\ fin not_one_le_zero prev_N_def) then have "n < Nf 1" by auto moreover have "n \ 1" using \l \ {1..n}\ by auto ultimately have "n \ {1..Nf 1}" by auto then have "u n x - u (n-l) x > - (real K * l)" using \x \ Og\ unfolding Og_def using \l \ {1..n}\ by auto then show ?thesis using \delta l = K\ by auto next assume H: "n \ Nf 1 \ prev_N l \ 1" then have "delta l = K" unfolding delta_def by auto have "n \ {n \ {Nf 1..}. \l\{1..n}. u n x - u (n-l) x \ - (c0 * l)}" using \n \ bad_times x\ unfolding bad_times_def by auto then have "u n x - u (n-l) x > - (c0 * l)" using H \l \ {1..n}\ by force moreover have "- (c0 * l) \ - (real K * l)" using K(1) by (simp add: mult_mono) ultimately show ?thesis using \delta l = K\ by auto next assume H: "n \ Nf 1 \ prev_N l \ 2" define N where "N = prev_N l" have "N \ 2" unfolding N_def using H by auto have "prev_N l \ {N. Nf N \ l}" unfolding prev_N_def apply (rule Max_in, auto simp add: fin) unfolding Nf_def by auto then have "Nf N \ l" unfolding N_def by auto then have "Nf N \ n" using \l \ {1..n}\ by auto have "n \ {n \ {Nf N..}. \l \ {Nf N..n}. u n x - u (n-l) x \ - (eps N * l)}" using \n \ bad_times x\ \N\2\ unfolding bad_times_def by auto then have "u n x - u (n-l) x > - (eps N * l)" using \Nf N \ n\ \Nf N \ l\ \l \ {1..n}\ by force moreover have "eps N = delta l" unfolding delta_def N_def using H by auto ultimately show ?thesis by auto qed qed have "Og \ {x \ space M. \(B::nat). card {n \{..l \ {1..n}. u n x - u (n-l) x \ - delta l * l} \ d * B}" proof (auto) fix x assume "x \ Og" then show "x \ space M" unfolding Og_def by auto next fix x B assume "x \ Og" have *: "{n. n < B \ (\l\{Suc 0..n}. u n x - u (n - l) x \ - (delta l * real l))} \ bad_times x \ {..x\Og\ by force have "card {n. n < B \ (\l\{Suc 0..n}. u n x - u (n - l) x \ - (delta l * real l))} \ card (bad_times x \ {.. d2 * B" using card_bad_times \x \ Og\ by auto also have "... \ d * B" unfolding d2_def using \d>0\ by auto finally show "card {n. n < B \ (\l\{Suc 0..n}. u n x - u (n - l) x \ - (delta l * real l))} \ d * B" by simp qed then have "emeasure M Og \ emeasure M {x \ space M. \(B::nat). card {n \{..l \ {1..n}. u n x - u (n-l) x \ - delta l * l} \ d * B}" by (rule emeasure_mono, auto) then have "emeasure M {x \ space M. \(B::nat). card {n \{..l \ {1..n}. u n x - u (n-l) x \ - delta l * l} \ d * B} > 1-d" using \emeasure M Og > 1 - d\ by auto then show ?thesis using \delta \ 0\ \\l. delta l > 0\ by auto qed text \We go back to the natural time direction, by using the previous result for the inverse map and the inverse subcocycle, and a change of variables argument. The price to pay is that the estimates we get are weaker: we have a control on a set of upper asymptotic density close to $1$, while having a set of lower asymptotic density close to $1$ as before would be stronger. This will nevertheless be sufficient for our purposes below.\ lemma upper_density_good_direction_invertible: assumes "invertible_qmpt" "d>(0::real)" "d \ 1" shows "\delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} \ 1-d} \ ennreal(1-d)" proof - interpret I: Gouezel_Karlsson_Kingman M Tinv "(\n x. u n ((Tinv^^n) x))" proof show "Tinv \ quasi_measure_preserving M M" using Tinv_qmpt[OF \invertible_qmpt\] unfolding qmpt_def qmpt_axioms_def by simp show "Tinv \ measure_preserving M M" using Tinv_mpt[OF \invertible_qmpt\] unfolding mpt_def mpt_axioms_def by simp show "mpt.subcocycle M Tinv (\n x. u n ((Tinv ^^ n) x))" using subcocycle_u_Tinv[OF subu \invertible_qmpt\] by simp show "- \ < subcocycle_avg_ereal (\n x. u n ((Tinv ^^ n) x))" using subcocycle_avg_ereal_Tinv[OF subu \invertible_qmpt\] subu_fin by simp show "AE x in M. fmpt.subcocycle_lim M Tinv (\n x. u n ((Tinv ^^ n) x)) x = 0" using subcocycle_lim_Tinv[OF subu \invertible_qmpt\] subu_0 by auto qed have bij: "bij T" using \invertible_qmpt\ unfolding invertible_qmpt_def by simp define e where "e = d * d / 2" have "e>0" "e\1" unfolding e_def using \d>0\ \d \ 1\ by (auto, meson less_imp_le mult_left_le one_le_numeral order_trans) obtain delta::"nat \ real" where d: "\l. delta l > 0" "delta \ 0" "emeasure M {x \ space M. \N. card {n \ {..l\{1..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) \ - delta l * real l} \ e * real N} > 1-e" using I.upper_density_delta[OF \e>0\ \e\1\] by blast define S where "S = {x \ space M. \N. card {n \ {..l\{1..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) \ - delta l * real l} \ e * real N}" have [measurable]: "S \ sets M" unfolding S_def by auto have "emeasure M S > 1 - e" unfolding S_def using d(3) by simp define Og where "Og = (\n. {x \ space M. \l\{1..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) > - delta l * real l})" have [measurable]: "Og n \ sets M" for n unfolding Og_def by auto define Pg where "Pg = (\n. {x \ space M. \l\{1..n}. u n x - u (n - l) ((T^^l) x) > - delta l * real l})" have [measurable]: "Pg n \ sets M" for n unfolding Pg_def by auto define Bad where "Bad = (\i::nat. {x \ space M. \N\i. card {n \ {.. Pg n} \ (1-d) * real N})" have [measurable]: "Bad i \ sets M" for i unfolding Bad_def by auto then have "range Bad \ sets M" by auto have "incseq Bad" unfolding Bad_def incseq_def by auto have inc: "{x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d} \ (\i. Bad i)" proof fix x assume H: "x \ {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d}" then have "x \ space M" by simp define A where "A = {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l}" have "upper_asymptotic_density A < 1-d" using H unfolding A_def by simp then have "\i. \N\i. card (A \ {.. (1-d)* real N" using upper_asymptotic_densityD[of A "1-d"] by (metis (no_types, lifting) eventually_sequentially less_imp_le) then obtain i where "card (A \ {.. (1-d)* real N" if "N\i" for N by blast moreover have "A \ {.. (\l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l)}" for N unfolding A_def by auto ultimately have "x \ Bad i" unfolding Bad_def Pg_def using \x \ space M\ by auto then show "x \ (\i. Bad i)" by blast qed have "emeasure M (Og n) \ emeasure M (Pg n)" for n proof - have *: "(T^^n)-`(Og n) \ space M \ Pg n" for n proof fix x assume x: "x \ (T^^n)-`(Og n) \ space M" define y where "y = (T^^n) x" then have "y \ Og n" using x by auto have "y \ space M" using sets.sets_into_space[OF \Og n \ sets M\] \y \ Og n\ by auto have "x = (Tinv^^n) y" unfolding y_def Tinv_def using inv_fn_o_fn_is_id[OF bij, of n] by (metis comp_apply) { fix l assume "l \ {1..n}" have "(T^^l) x = (T^^l) ((Tinv^^l) ((Tinv^^(n-l))y))" apply (subst \x = (Tinv^^n) y\) using funpow_add[of l "n-l" Tinv] \l \ {1..n}\ by fastforce then have *: "(T^^l) x = (Tinv^^(n-l)) y" unfolding Tinv_def using fn_o_inv_fn_is_id[OF bij] by (metis comp_apply) then have "u n x - u (n-l) ((T^^l) x) = u n ((Tinv^^n) y) - u (n-l) ((Tinv^^(n-l)) y)" using \x = (Tinv^^n) y\ by auto also have "... > - delta l * real l" using \y \ Og n\ \l \ {1..n}\ unfolding Og_def by auto finally have "u n x - u (n-l) ((T^^l) x) > - delta l * real l" by simp } then show "x \ Pg n" unfolding Pg_def using x by auto qed have "emeasure M (Og n) = emeasure M ((T^^n)-`(Og n) \ space M)" using T_vrestr_same_emeasure(2) unfolding vimage_restr_def by auto also have "... \ emeasure M (Pg n)" apply (rule emeasure_mono) using * by auto finally show ?thesis by simp qed { fix N::nat assume "N \ 1" have I: "card {n\{.. Og n} \ (1-e) * real N" if "x \ S" for x proof - have "x \ space M" using \x \ S\ sets.sets_into_space[OF \S \ sets M\] by auto have a: "real (card {n. n < N \ (\l\{Suc 0..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) \ - (delta l * real l))}) \ e * real N" using \x \ S\ unfolding S_def by auto have *: "{n. n < N} = {n. n < N \ (\l\{Suc 0..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) \ - (delta l * real l))} \ {n. n < N \ x \ Og n}" unfolding Og_def using \x \ space M\ by (auto, meson atLeastAtMost_iff linorder_not_le) have "N = card {n. n < N}" by auto also have "... = card {n. n < N \ (\l\{Suc 0..n}. u n ((Tinv ^^ n) x) - u (n - l) ((Tinv ^^ (n - l)) x) \ - (delta l * real l))} + card {n. n < N \ x \ Og n}" apply (subst *, rule card_Un_disjoint) unfolding Og_def by auto ultimately have "real N \ e * real N + card {n. n < N \ x \ Og n}" using a by auto then show ?thesis by (auto simp add: algebra_simps) qed define m where "m = measure M (Bad N)" have "m \ 0" "1-m \ 0" unfolding m_def by auto have *: "1-e \ emeasure M S" using \emeasure M S > 1 - e\ by auto have "ennreal((1-e) * ((1-e) * real N)) = ennreal(1-e) * ennreal((1-e) * real N)" apply (rule ennreal_mult) using \e \ 1\ by auto also have "... \ emeasure M S * ennreal((1-e) * real N)" using mult_right_mono[OF *] by simp also have "... = (\\<^sup>+ x\S. ((1-e) * real N) \M)" by (metis \S \ events\ mult.commute nn_integral_cmult_indicator) also have "... \ (\\<^sup>+x \ S. ennreal(card {n\{.. Og n}) \M)" apply (rule nn_integral_mono) using I unfolding indicator_def by (simp) also have "... \ (\\<^sup>+x \ space M. ennreal(card {n\{.. Og n}) \M)" by (rule nn_set_integral_set_mono, simp only: sets.sets_into_space[OF \S \ sets M\]) also have "... = (\\<^sup>+x. ennreal(card {n\{.. Og n}) \M)" by (rule nn_set_integral_space) also have "... = (\\<^sup>+x. ennreal (\n\{..M)" apply (rule nn_integral_cong) using sum_indicator_eq_card2[of "{..\<^sup>+x. (\n\{..M)" apply (rule nn_integral_cong, auto, simp only: sum_ennreal[symmetric]) by (metis ennreal_0 ennreal_eq_1 indicator_eq_1_iff indicator_simps(2) real_of_nat_indicator) also have "... = (\n \{..\<^sup>+x. (indicator (Og n) x) \M))" by (rule nn_integral_sum, simp) also have "... = (\n \{.. (\n \{..\n. emeasure M (Og n) \ emeasure M (Pg n)\ by simp also have "... = (\n \{..\<^sup>+x. (indicator (Pg n) x) \M))" by simp also have "... = (\\<^sup>+x. (\n\{..M)" by (rule nn_integral_sum[symmetric], simp) also have "... = (\\<^sup>+x. ennreal (\n\{..M)" apply (rule nn_integral_cong, auto, simp only: sum_ennreal[symmetric]) by (metis ennreal_0 ennreal_eq_1 indicator_eq_1_iff indicator_simps(2) real_of_nat_indicator) also have "... = (\\<^sup>+x. ennreal(card {n\{.. Pg n}) \M)" apply (rule nn_integral_cong) using sum_indicator_eq_card2[of "{..\<^sup>+x \ space M. ennreal(card {n\{.. Pg n}) \M)" by (rule nn_set_integral_space[symmetric]) also have "... = (\\<^sup>+x \ Bad N \ (space M - Bad N). ennreal(card {n\{.. Pg n}) \M)" apply (rule nn_integral_cong) unfolding indicator_def by auto also have "... = (\\<^sup>+x \ Bad N. ennreal(card {n\{.. Pg n}) \M) + (\\<^sup>+x \ space M - Bad N. ennreal(card {n\{.. Pg n}) \M)" by (rule nn_integral_disjoint_pair, auto) also have "... \ (\\<^sup>+x \ Bad N. ennreal((1-d) * real N) \M) + (\\<^sup>+x \ space M - Bad N. ennreal(real N) \M)" apply (rule add_mono) apply (rule nn_integral_mono, simp add: Bad_def indicator_def, auto) apply (rule nn_integral_mono, simp add: indicator_def, auto) using card_Collect_less_nat[of N] card_mono[of "{n. n < N}"] by (simp add: Collect_mono_iff) also have "... = ennreal((1-d) * real N) * emeasure M (Bad N) + ennreal(real N) * emeasure M (space M - Bad N)" by (simp add: nn_integral_cmult_indicator) also have "... = ennreal((1-d) * real N) * ennreal(m) + ennreal(real N) * ennreal(1-m)" unfolding m_def by (simp add: emeasure_eq_measure prob_compl) also have "... = ennreal((1-d) * real N * m + real N * (1-m))" using \m \ 0\ \1-m \ 0\ \d \ 1\ ennreal_plus ennreal_mult by auto finally have "ennreal((1-e) * ((1-e) * real N)) \ ennreal((1-d) * real N * m + real N * (1-m))" by simp moreover have "(1-d) * real N * m + real N * (1-m) \ 0" using \m \ 0\ \1-m \ 0\ \d \ 1\ by auto ultimately have "(1-e) * ((1-e) * real N) \ (1-d) * real N * m + real N * (1-m)" using ennreal_le_iff by auto then have "0 \ (e * 2 - d * m - e * e) * real N" by (auto simp add: algebra_simps) then have "0 \ e * 2 - d * m - e * e" using \N \ 1\ by (simp add: zero_le_mult_iff) also have "... \ e * 2 - d * m" using \e > 0\ by auto finally have "m \ e * 2 / d" using \d>0\ by (auto simp add: algebra_simps divide_simps) then have "m \ d" unfolding e_def using \d>0\ by (auto simp add: divide_simps) then have "emeasure M (Bad N) \ d" unfolding m_def by (simp add: emeasure_eq_measure ennreal_leI) } then have "emeasure M (\i. Bad i) \ d" using LIMSEQ_le_const2[OF Lim_emeasure_incseq[OF \range Bad \ sets M\ \incseq Bad\]] by auto then have "emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d} \ d" using emeasure_mono[OF inc, of M] by auto then have *: "measure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d} \ d" using emeasure_eq_measure \d>0\ by fastforce have "{x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} \ 1-d} = space M - {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d}" by auto then have "measure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} \ 1-d} = measure M (space M - {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d})" by simp also have "... = measure M (space M) - measure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} < 1-d}" by (rule measure_Diff, auto) also have "... \ 1 - d" using * prob_space by linarith finally have "emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} \ 1-d} \ 1 - d" using emeasure_eq_measure by auto then show ?thesis using d(1) d(2) by blast qed text \Now, we want to remove the invertibility assumption in the previous lemma. The idea is to go to the natural extension of the system, use the result there and project it back. However, if the system is not defined on a polish space, there is no reason why it should have a natural extension, so we have first to project the original system on a polish space on which the subcocycle is defined. This system is obtained by considering the joint distribution of the subcocycle and all its iterates (this is indeed a polish system, as a space of functions from $\mathbb{N}^2$ to $\mathbb{R}$).\ lemma upper_density_good_direction: assumes "d>(0::real)" "d \ 1" shows "\delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l} \ 1-d} \ ennreal(1-d)" proof - define U where "U = (\x. (\n. u n x))" define projJ where "projJ = (\x. (\n. U ((T^^n)x)))" define MJ where "MJ = (distr M borel (\x. (\n. U ((T^^n)x))))" define TJ::"(nat \ nat \ real) \ (nat \ nat \ real)" where "TJ = nat_left_shift" have *: "mpt_factor projJ MJ TJ" unfolding projJ_def MJ_def TJ_def apply (rule fmpt_factor_projection) unfolding U_def by (rule measurable_coordinatewise_then_product, simp) interpret J: polish_pmpt MJ TJ unfolding projJ_def polish_pmpt_def apply (auto) apply (rule pmpt_factor) using * apply simp unfolding polish_pmpt_axioms_def MJ_def by auto have [simp]: "projJ \ measure_preserving M MJ" using mpt_factorE(1)[OF *] by simp then have [measurable]: "projJ \ measurable M MJ" by (simp add: measure_preservingE(1)) text \We define a subcocycle $uJ$ in the projection corresponding to the original subcocycle $u$ above. (With the natural definition, it is only a subcocycle almost everywhere.) We check that it shares most properties of $u$.\ define uJ::"nat \ (nat \ nat \ real) \ real" where "uJ = (\n x. x 0 n)" have [measurable]: "uJ n \ borel_measurable borel" for n unfolding uJ_def by (metis measurable_product_coordinates measurable_product_then_coordinatewise) moreover have "measurable borel borel = measurable MJ borel" apply (rule measurable_cong_sets) unfolding MJ_def by auto ultimately have [measurable]: "uJ n \ borel_measurable MJ" for n by fast have uJ_proj: "u n x = uJ n (projJ x)" for n x unfolding uJ_def projJ_def U_def by auto have uJ_int: "integrable MJ (uJ n)" for n apply (rule measure_preserving_preserves_integral'(1)[OF \projJ \ measure_preserving M MJ\]) apply (subst uJ_proj[of n, symmetric]) using int_u[of n] by auto have uJ_int2: "(\x. uJ n x \MJ) = (\x. u n x \M)" for n unfolding uJ_proj apply (rule measure_preserving_preserves_integral'(2)[OF \projJ \ measure_preserving M MJ\]) apply (subst uJ_proj[of n, symmetric]) using int_u[of n] by auto have uJ_AE: "AE x in MJ. uJ (n+m) x \ uJ n x + uJ m ((TJ^^n) x)" for m n proof - have "AE x in M. uJ (n+m) (projJ x) \ uJ n (projJ x) + uJ m (projJ ((T^^n) x))" unfolding uJ_proj[symmetric] using subcocycle_ineq[OF subu] by auto moreover have "AE x in M. projJ ((T^^n) x) = (TJ^^n) (projJ x)" using qmpt_factor_iterates[OF mpt_factor_is_qmpt_factor[OF *]] by auto ultimately have *: "AE x in M. uJ (n+m) (projJ x) \ uJ n (projJ x) + uJ m ((TJ^^n) (projJ x))" by auto show ?thesis apply (rule quasi_measure_preserving_AE'[OF measure_preserving_is_quasi_measure_preserving[OF \projJ \ measure_preserving M MJ\], OF *]) by auto qed have uJ_0: "AE x in MJ. (\n. uJ n x / n) \ 0" proof - have "AE x in M. (\n. u n x / n) \ subcocycle_lim u x" by (rule kingman_theorem_nonergodic(1)[OF subu subu_fin]) moreover have "AE x in M. subcocycle_lim u x = 0" using subu_0 by simp ultimately have *: "AE x in M. (\n. uJ n (projJ x) / n) \ 0" unfolding uJ_proj by auto show ?thesis apply (rule quasi_measure_preserving_AE'[OF measure_preserving_is_quasi_measure_preserving[OF \projJ \ measure_preserving M MJ\], OF *]) by auto qed text \Then, we go to the natural extension of $TJ$, to have an invertible system.\ define MI where "MI = J.natural_extension_measure" define TI where "TI = J.natural_extension_map" define projI where "projI = J.natural_extension_proj" interpret I: pmpt MI TI unfolding MI_def TI_def by (rule J.natural_extension(1)) have "I.mpt_factor projI MJ TJ" unfolding projI_def using I.mpt_factorE(1) J.natural_extension(3) MI_def TI_def by auto then have [simp]: "projI \ measure_preserving MI MJ" using I.mpt_factorE(1) by auto then have [measurable]: "projI \ measurable MI MJ" by (simp add: measure_preservingE(1)) have "I.invertible_qmpt" using J.natural_extension(2) MI_def TI_def by auto text \We define a natural subcocycle $uI$ there, and check its properties.\ define uI where uI_proj: "uI = (\n x. uJ n (projI x))" have [measurable]: "uI n \ borel_measurable MI" for n unfolding uI_proj by auto have uI_int: "integrable MI (uI n)" for n unfolding uI_proj by (rule measure_preserving_preserves_integral(1)[OF \projI \ measure_preserving MI MJ\ uJ_int]) have "(\x. uJ n x \MJ) = (\x. uI n x \MI)" for n unfolding uI_proj by (rule measure_preserving_preserves_integral(2)[OF \projI \ measure_preserving MI MJ\ uJ_int]) then have uI_int2: "(\x. uI n x \MI) = (\x. u n x \M)" for n using uJ_int2 by simp have uI_AE: "AE x in MI. uI (n+m) x \ uI n x + uI m (((TI)^^n) x)" for m n proof - have "AE x in MI. uJ (n+m) (projI x) \ uJ n (projI x) + uJ m (((TJ)^^n) (projI x))" apply (rule quasi_measure_preserving_AE[OF measure_preserving_is_quasi_measure_preserving[OF \projI \ measure_preserving MI MJ\]]) using uJ_AE by auto moreover have "AE x in MI. ((TJ)^^n) (projI x) = projI (((TI)^^n) x)" using I.qmpt_factor_iterates[OF I.mpt_factor_is_qmpt_factor[OF \I.mpt_factor projI MJ TJ\]] by auto ultimately show ?thesis unfolding uI_proj by auto qed have uI_0: "AE x in MI. (\n. uI n x / n) \ 0" unfolding uI_proj apply (rule quasi_measure_preserving_AE[OF measure_preserving_is_quasi_measure_preserving[OF \projI \ measure_preserving MI MJ\]]) using uJ_0 by simp text \As $uI$ is only a subcocycle almost everywhere, we correct it to get a genuine subcocycle, to which we will apply Lemma \verb+upper_density_good_direction_invertible+.\ obtain vI where H: "I.subcocycle vI" "AE x in MI. \n. vI n x = uI n x" using I.subcocycle_AE[OF uI_AE uI_int] by blast have [measurable]: "\n. vI n \ borel_measurable MI" "\n. integrable MI (vI n)" using I.subcocycle_integrable[OF H(1)] by auto have "(\x. vI n x \MI) = (\x. uI n x \MI)" for n apply (rule integral_cong_AE) using H(2) by auto then have "(\x. vI n x \MI) = (\x. u n x \M)" for n using uI_int2 by simp then have "I.subcocycle_avg_ereal vI = subcocycle_avg_ereal u" unfolding I.subcocycle_avg_ereal_def subcocycle_avg_ereal_def by auto then have vI_fin: "I.subcocycle_avg_ereal vI > -\" using subu_fin by simp have "AE x in MI. (\n. vI n x / n) \ 0" using uI_0 H(2) by auto moreover have "AE x in MI. (\n. vI n x / n) \ I.subcocycle_lim vI x" by (rule I.kingman_theorem_nonergodic(1)[OF H(1) vI_fin]) ultimately have vI_0: "AE x in MI. I.subcocycle_lim vI x = 0" using LIMSEQ_unique by auto interpret GKK: Gouezel_Karlsson_Kingman MI TI vI apply standard using H(1) vI_fin vI_0 by auto obtain delta where delta: "\l. delta l > 0" "delta \ 0" "emeasure MI {x \ space MI. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} \ 1 - d } \ 1 - d" using GKK.upper_density_good_direction_invertible[OF \I.invertible_qmpt\ \d>0\ \d\1\] by blast text \Then, we need to go back to the original system, showing that the estimates for $TI$ carry over. First, we go to $TJ$.\ have BJ: "emeasure MJ {x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d } \ 1 - d" proof - have *: "AE x in MI. uJ n (projI x) = vI n x" for n using uI_proj H(2) by auto have **: "AE x in MI. \n. uJ n (projI x) = vI n x" by (subst AE_all_countable, auto intro: *) have "AE x in MI. \m n. uJ n (projI ((TI^^m) x)) = vI n ((TI^^m) x)" by (rule I.T_AE_iterates[OF **]) then have "AE x in MI. (\m n. uJ n (projI ((TI^^m) x)) = vI n ((TI^^m) x)) \ (\n. projI ((TI^^n) x) = (TJ^^n) (projI x))" using I.qmpt_factor_iterates[OF I.mpt_factor_is_qmpt_factor[OF \I.mpt_factor projI MJ TJ\]] by auto then obtain ZI where ZI: "\x. x \ space MI - ZI \ (\m n. uJ n (projI ((TI^^m) x)) = vI n ((TI^^m) x)) \ (\n. projI ((TI^^n) x) = (TJ^^n) (projI x))" "ZI \ null_sets MI" using AE_E3 by blast have *: "uJ n (projI x) - uJ (n - l) ((TJ ^^ l) (projI x)) = vI n x - vI (n - l) ((TI ^^ l) x)" if "x \ space MI - ZI" for x n l proof - have "(TI^^0) x = x" "(TJ^^0) (projI x) = (projI x)" by auto then show ?thesis using ZI(1)[OF that] by metis qed have "projI-`{x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} \ space MI - ZI = {x \ space MI - ZI. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n (projI x) - uJ (n - l) ((TJ ^^ l) (projI x))} \ 1 - d}" by (auto simp add: measurable_space[OF \projI \ measurable MI MJ\]) also have "... = {x \ space MI - ZI. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} \ 1 - d}" using * by auto also have "... = {x \ space MI. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} \ 1 - d} - ZI" by auto finally have *: "projI-`{x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} \ space MI - ZI = {x \ space MI. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} \ 1 - d} - ZI" by simp have "emeasure MJ {x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} = emeasure MI (projI-`{x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} \ space MI)" by (rule measure_preservingE(2)[symmetric], auto) also have "... = emeasure MI ((projI-`{x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} \ space MI) - ZI)" by (rule emeasure_Diff_null_set[OF \ZI \ null_sets MI\, symmetric], measurable) also have "... = emeasure MI ({x \ space MI. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} \ 1 - d} - ZI)" using * by simp also have "... = emeasure MI {x \ space MI. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < vI n x - vI (n - l) ((TI ^^ l) x)} \ 1 - d}" by (rule emeasure_Diff_null_set[OF \ZI \ null_sets MI\], measurable) also have "... \ 1-d" using delta(3) by simp finally show ?thesis by simp qed text \Then, we go back to $T$ with virtually the same argument.\ have "emeasure M {x \ space M. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} \ 1 - d } \ 1 - d" proof - obtain Z where Z: "\x. x \ space M - Z \ (\n. projJ ((T^^n) x) = (TJ^^n) (projJ x))" "Z \ null_sets M" using AE_E3[OF qmpt_factor_iterates[OF mpt_factor_is_qmpt_factor[OF \mpt_factor projJ MJ TJ\]]] by blast have *: "uJ n (projJ x) - uJ (n - l) ((TJ ^^ l) (projJ x)) = u n x - u (n - l) ((T^^ l) x)" if "x \ space M - Z" for x n l proof - have "(T^^0) x = x" "(TJ^^0) (projJ x) = (projJ x)" by auto then show ?thesis using Z(1)[OF that] uJ_proj by metis qed have "projJ-`{x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} \ space M - Z = {x \ space M - Z. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n (projJ x) - uJ (n - l) ((TJ ^^ l) (projJ x))} \ 1 - d}" by (auto simp add: measurable_space[OF \projJ \ measurable M MJ\]) also have "... = {x \ space M - Z. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} \ 1 - d}" using * by auto also have "... = {x \ space M. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} \ 1 - d} - Z" by auto finally have *: "projJ-`{x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} \ space M - Z = {x \ space M. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} \ 1 - d} - Z" by simp have "emeasure MJ {x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} = emeasure M (projJ-`{x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} \ space M)" by (rule measure_preservingE(2)[symmetric], auto) also have "... = emeasure M ((projJ-`{x \ space MJ. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < uJ n x - uJ (n - l) ((TJ ^^ l) x)} \ 1 - d} \ space M) - Z)" by (rule emeasure_Diff_null_set[OF \Z \ null_sets M\, symmetric], measurable) also have "... = emeasure M ({x \ space M. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} \ 1 - d} - Z)" using * by simp also have "... = emeasure M {x \ space M. upper_asymptotic_density {n. \l\{1..n}. - delta l * real l < u n x - u (n - l) ((T ^^ l) x)} \ 1 - d}" by (rule emeasure_Diff_null_set[OF \Z \ null_sets M\], measurable) finally show ?thesis using BJ by simp qed then show ?thesis using delta(1) delta(2) by auto qed text \From the quantitative lemma above, we deduce the qualitative statement we are after, still in the setting of the locale.\ lemma infinite_AE: shows "AE x in M. \delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ (infinite {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l})" proof - have "\deltaf::real \ nat \ real. \d. ((d > 0 \ d \ 1) \ ((\l. deltaf d l > 0) \ (deltaf d \ 0) \ emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l} \ 1-d} \ ennreal(1-d)))" apply (subst choice_iff'[symmetric]) using upper_density_good_direction by auto then obtain deltaf::"real \ nat \ real" where H: "\d. d > 0 \ d \1 \ (\l. deltaf d l > 0) \ (deltaf d \ 0) \ emeasure M {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l} \ 1-d} \ ennreal(1-d)" by blast define U where "U = (\d. {x \ space M. upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l} \ 1-d})" have [measurable]: "U d \ sets M" for d unfolding U_def by auto have *: "emeasure M (U d) \ 1 - d" if "d>0 \ d\ 1" for d unfolding U_def using H that by auto define V where "V = (\n::nat. U (1/(n+2)))" have [measurable]: "V \ sets M" unfolding V_def by auto have a: "emeasure M V \ 1 - 1 / (n + 2)" for n::nat proof - have "1 - 1 / (n + 2) = 1 - 1 / (real n + 2)" by auto also have "... \ emeasure M (U (1/(real n+2)))" using *[of "1 / (real n + 2)"] by auto also have "... \ emeasure M V" apply (rule Measure_Space.emeasure_mono) unfolding V_def by auto finally show ?thesis by simp qed have b: "(\n::nat. 1 - 1 / (n + 2)) \ ennreal(1 - 0)" by (intro tendsto_intros LIMSEQ_ignore_initial_segment) have "emeasure M V \ 1 - 0" apply (rule Lim_bounded) using a b by auto then have "emeasure M V = 1" by (simp add: emeasure_ge_1_iff) then have "AE x in M. x \ V" by (simp add: emeasure_eq_measure prob_eq_1) moreover { fix x assume "x \ V" then obtain n::nat where "x \ U (1/(real n+2))" unfolding V_def by blast define d where "d = 1/(real n + 2)" have "0 < d" "d\1" unfolding d_def by auto have "0 < 1-d" unfolding d_def by auto also have "... \ upper_asymptotic_density {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l}" using \x \ U (1/(real n+2))\ unfolding U_def d_def by auto finally have "infinite {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - (deltaf d l) * l}" using upper_asymptotic_density_finite by force then have "\delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ (infinite {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) > - delta l * l})" using H \0 < d\ \d\1\ by auto } ultimately show ?thesis by auto qed end text \Finally, we obtain the full statement, by reducing to the previous situation where the asymptotic average vanishes.\ theorem (in pmpt) Gouezel_Karlsson_Kingman: assumes "subcocycle u" "subcocycle_avg_ereal u > -\" shows "AE x in M. \delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ (infinite {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - delta l * l})" proof - have [measurable]: "integrable M (u n)" "u n \ borel_measurable M" for n using subcocycle_integrable[OF assms(1)] by auto define v where "v = birkhoff_sum (\x. -subcocycle_lim u x)" have int [measurable]: "integrable M (\x. -subcocycle_lim u x)" using kingman_theorem_nonergodic(2)[OF assms] by auto have "subcocycle v" unfolding v_def apply (rule subcocycle_birkhoff) using assms \integrable M (\x. -subcocycle_lim u x)\ unfolding subcocycle_def by auto have "subcocycle_avg_ereal v > - \" unfolding v_def using subcocycle_avg_ereal_birkhoff[OF int] kingman_theorem_nonergodic(2)[OF assms] by auto have "AE x in M. subcocycle_lim v x = real_cond_exp M Invariants (\x. -subcocycle_lim u x) x" unfolding v_def by (rule subcocycle_lim_birkhoff[OF int]) moreover have "AE x in M. real_cond_exp M Invariants (\x. - subcocycle_lim u x) x = - subcocycle_lim u x" by (rule real_cond_exp_F_meas[OF int], auto) ultimately have AEv: "AE x in M. subcocycle_lim v x = - subcocycle_lim u x" by auto define w where "w = (\n x. u n x + v n x)" have a: "subcocycle w" unfolding w_def by (rule subcocycle_add[OF assms(1) \subcocycle v\]) have b: "subcocycle_avg_ereal w > -\" unfolding w_def by (rule subcocycle_avg_add(1)[OF assms(1) \subcocycle v\ assms(2) \subcocycle_avg_ereal v > - \\]) have "AE x in M. subcocycle_lim w x = subcocycle_lim u x + subcocycle_lim v x" unfolding w_def by (rule subcocycle_lim_add[OF assms(1) \subcocycle v\ assms(2) \subcocycle_avg_ereal v > - \\]) then have c: "AE x in M. subcocycle_lim w x = 0" using AEv by auto interpret Gouezel_Karlsson_Kingman M T w apply standard using a b c by auto have "AE x in M. \delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ (infinite {n. \l \ {1..n}. w n x - w (n-l) ((T^^l) x) > - delta l * l})" using infinite_AE by auto moreover { fix x assume H: "\delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ (infinite {n. \l \ {1..n}. w n x - w (n-l) ((T^^l) x) > - delta l * l})" "x \ space M" have *: "v n x = - n * subcocycle_lim u x" for n unfolding v_def using birkhoff_sum_of_invariants[OF _ \x \ space M\] by auto have **: "v n ((T^^l) x) = - n * subcocycle_lim u x" for n l proof - have "v n ((T^^l) x) = - n * subcocycle_lim u ((T^^l) x)" unfolding v_def using birkhoff_sum_of_invariants[OF _ T_spaceM_stable(2)[OF \x \ space M\]] by auto also have "... = - n * subcocycle_lim u x" using Invariants_func_is_invariant_n[OF subcocycle_lim_meas_Inv(2) \x \ space M\] by auto finally show ?thesis by simp qed have "w n x - w (n-l) ((T^^l) x) = u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x" if "l \ {1..n}" for n l unfolding w_def using *[of n] **[of "n-l" l] that apply (auto simp add: algebra_simps) by (metis comm_semiring_class.distrib diff_add_inverse nat_le_iff_add of_nat_add) then have "\delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ (infinite {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - delta l * l})" using H(1) by auto } ultimately show ?thesis by auto qed text \The previous theorem only contains a lower bound. The corresponding upper bound follows readily from Kingman's theorem. The next statement combines both upper and lower bounds.\ theorem (in pmpt) Gouezel_Karlsson_Kingman': assumes "subcocycle u" "subcocycle_avg_ereal u > -\" shows "AE x in M. \delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ (infinite {n. \l \ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l})" proof - { fix x assume x: "\delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ (infinite {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - delta l * l})" "(\l. u l x/l) \ subcocycle_lim u x" then obtain alpha::"nat \ real" where a: "\l. alpha l > 0" "alpha \ 0" "infinite {n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - alpha l * l}" by auto define delta::"nat \ real" where "delta = (\l. alpha l + norm(u l x / l - subcocycle_lim u x))" { fix n assume *: "\l \ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - alpha l * l" have H: "x > -a \ x < a \ abs x < a" for a x::real by simp have "abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l" if "l\{1..n}" for l proof (rule H) have "u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x \ u l x - l * subcocycle_lim u x" using assms(1) subcocycle_ineq[OF assms(1), of l "n-l" x] that by auto also have "... \ l * norm(u l x/l - subcocycle_lim u x)" using that by (auto simp add: algebra_simps divide_simps) also have "... < delta l * l" unfolding delta_def using a(1)[of l] that by auto finally show "u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x < delta l * l" by simp have "- (delta l * l) \ -alpha l * l" unfolding delta_def by (auto simp add: algebra_simps) also have "... < u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x" using * that by auto finally show "u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > -(delta l * l)" by simp qed then have "\l \ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l" by auto } then have "{n. \l \ {1..n}. u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x > - alpha l * l} \ {n. \l \ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l}" by auto then have "infinite {n. \l \ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l}" using a(3) finite_subset by blast moreover have "delta \ 0 + 0" unfolding delta_def using x(2) by (intro tendsto_intros a(2) tendsto_norm_zero LIM_zero) moreover have "delta l > 0" for l unfolding delta_def using a(1)[of l] by auto ultimately have "\delta::nat\real. (\l. delta l > 0) \ (delta \ 0) \ (infinite {n. \l \ {1..n}. abs(u n x - u (n-l) ((T^^l) x) - l * subcocycle_lim u x) < delta l * l})" by auto } then show ?thesis using Gouezel_Karlsson_Kingman[OF assms] kingman_theorem_nonergodic(1)[OF assms] by auto qed end (*of Gouezel_Karlsson.thy*) diff --git a/thys/Ergodic_Theory/Invariants.thy b/thys/Ergodic_Theory/Invariants.thy --- a/thys/Ergodic_Theory/Invariants.thy +++ b/thys/Ergodic_Theory/Invariants.thy @@ -1,1796 +1,1796 @@ (* Author: Sébastien Gouëzel sebastien.gouezel@univ-rennes1.fr License: BSD *) section \The invariant sigma-algebra, Birkhoff theorem\ theory Invariants imports Recurrence "HOL-Probability.Conditional_Expectation" begin subsection \The sigma-algebra of invariant subsets\ text \The invariant sigma-algebra of a qmpt is made of those sets that are invariant under the dynamics. When the transformation is ergodic, it is made of sets of zero or full measure. In general, the Birkhoff theorem is expressed in terms of the conditional expectation of an integrable function with respect to the invariant sigma-algebra.\ context qmpt begin text \We define the invariant sigma-algebra, as the sigma algebra of sets which are invariant under the dynamics, i.e., they coincide with their preimage under $T$.\ definition Invariants where "Invariants = sigma (space M) {A \ sets M. T-`A \ space M = A}" text \For this definition to make sense, we need to check that it really defines a sigma-algebra: otherwise, the \verb+sigma+ operation would make garbage out of it. This is the content of the next lemma.\ lemma Invariants_sets: "sets Invariants = {A \ sets M. T-`A \ space M = A}" proof - have "sigma_algebra (space M) {A \ sets M. T-`A \ space M = A}" proof - define I where "I = {A. T-`A \ space M = A}" have i: "\A. A \ I \ A \ space M" unfolding I_def by auto have "algebra (space M) I" proof (subst algebra_iff_Un) have a: "I \ Pow (space M)" using i by auto have b: "{} \ I" unfolding I_def by auto { fix A assume *: "A \ I" then have "T-`(space M - A) = T-`(space M) - T-`A" by auto then have "T-`(space M - A) \ space M = T-`(space M) \ (space M) - T-`A \ (space M)" by auto also have "... = space M - A" using * I_def by (simp add: inf_absorb2 subsetI) finally have "space M - A \ I" unfolding I_def by simp } then have c: "(\a\I. space M - a \ I)" by simp have d: "(\a\I. \b\I. a \ b \ I)" unfolding I_def by auto show "I \ Pow (space M) \ {} \ I \ (\a\I. space M - a \ I) \ (\a\I. \b\I. a \ b \ I)" using a b c d by blast qed moreover have "(\F. range F \ I \ (\i::nat. F i) \ I)" unfolding I_def by auto ultimately have "sigma_algebra (space M) I" using sigma_algebra_iff by auto moreover have "sigma_algebra (space M) (sets M)" using measure_space measure_space_def by auto ultimately have "sigma_algebra (space M) (I \ (sets M))" using sigma_algebra_intersection by auto moreover have "I \ sets M = {A \ sets M. T-`A \ space M = A}" unfolding I_def by auto ultimately show ?thesis by simp qed then show ?thesis unfolding Invariants_def using sigma_algebra.sets_measure_of_eq by blast qed text \By definition, the invariant subalgebra is a subalgebra of the original algebra. This is expressed in the following lemmas.\ lemma Invariants_is_subalg: "subalgebra M Invariants" unfolding subalgebra_def using Invariants_sets Invariants_def by (simp add: space_measure_of_conv) lemma Invariants_in_sets: assumes "A \ sets Invariants" shows "A \ sets M" using Invariants_sets assms by blast lemma Invariants_measurable_func: assumes "f \ measurable Invariants N" shows "f \ measurable M N" using Invariants_is_subalg measurable_from_subalg assms by auto text \We give several trivial characterizations of invariant sets or functions.\ lemma Invariants_vrestr: assumes "A \ sets Invariants" shows "T--`A = A" using assms Invariants_sets Invariants_in_sets[OF assms] by auto lemma Invariants_points: assumes "A \ sets Invariants" "x \ A" shows "T x \ A" using assms Invariants_sets by auto lemma Invariants_func_is_invariant: fixes f::"_ \ 'b::t2_space" assumes "f \ borel_measurable Invariants" "x \ space M" shows "f (T x) = f x" proof - have "{f x} \ sets borel" by simp then have "f-`({f x}) \ space M \ Invariants" using assms(1) by (metis (no_types, lifting) Invariants_def measurable_sets space_measure_of_conv) moreover have "x \ f-`({f x}) \ space M" using assms(2) by blast ultimately have "T x \ f-`({f x}) \ space M" by (rule Invariants_points) then show ?thesis by simp qed lemma Invariants_func_is_invariant_n: fixes f::"_ \ 'b::t2_space" assumes "f \ borel_measurable Invariants" "x \ space M" shows "f ((T^^n) x) = f x" by (induction n, auto simp add: assms Invariants_func_is_invariant) lemma Invariants_func_charac: assumes [measurable]: "f \ measurable M N" and "\x. x \ space M \ f(T x) = f x" shows "f \ measurable Invariants N" proof (rule measurableI) fix A assume "A \ sets N" have "space Invariants = space M" using Invariants_is_subalg subalgebra_def by force show "f -` A \ space Invariants \ sets Invariants" apply (subst Invariants_sets) apply (auto simp add: assms \A \ sets N\ \space Invariants = space M\) using \A \ sets N\ assms(1) measurable_sets by blast next fix x assume "x \ space Invariants" have "space Invariants = space M" using Invariants_is_subalg subalgebra_def by force then show "f x \ space N" using assms(1) \x \ space Invariants\ by (metis measurable_space) qed lemma birkhoff_sum_of_invariants: fixes f::" _ \ real" assumes "f \ borel_measurable Invariants" "x \ space M" shows "birkhoff_sum f n x = n * f x" unfolding birkhoff_sum_def using Invariants_func_is_invariant_n[OF assms] by auto text \There are two possible definitions of the invariant sigma-algebra, competing in the literature: one could also use the sets such that $T^{-1}A$ coincides with $A$ up to a measure $0$ set. It turns out that this is equivalent to being invariant (in our sense) up to a measure $0$ set. Therefore, for all interesting purposes, the two definitions would give the same results. For the proof, we start from an almost invariant set, and build a genuinely invariant set that coincides with it by adding or throwing away null parts. \ proposition Invariants_quasi_Invariants_sets: assumes [measurable]: "A \ sets M" shows "(\B \ sets Invariants. A \ B \ null_sets M) \ (T--`A \ A \ null_sets M)" proof assume "\B \ sets Invariants. A \ B \ null_sets M" then obtain B where "B \ sets Invariants" "A \ B \ null_sets M" by auto then have [measurable]: "B \ sets M" using Invariants_in_sets by simp have "B = T--` B" using Invariants_vrestr \B \ sets Invariants\ by simp then have "T--`A \ B = T--`(A \ B)" by simp moreover have "T--`(A \ B) \ null_sets M" by (rule T_quasi_preserves_null2(1)[OF \A \ B \ null_sets M\]) ultimately have "T--`A \ B \ null_sets M" by simp then show "T--`A \ A \ null_sets M" by (rule null_sym_diff_transitive) (auto simp add: \A \ B \ null_sets M\ Un_commute) next assume H: "T --` A \ A \ null_sets M" have [measurable]: "\n. (T^^n)--`A \ sets M" by simp { fix K assume [measurable]: "K \ sets M" and "T--`K \ K \ null_sets M" fix n::nat have "(T^^n)--`K \ K \ null_sets M" proof (induction n) case 0 have "(T^^0)--` K = K" using T_vrestr_0 by simp then show ?case using Diff_cancel sup.idem by (metis null_sets.empty_sets) next case (Suc n) have "T--`((T^^n)--`K \ K) \ null_sets M" using Suc.IH T_quasi_preserves_null(1)[of "((T^^n)--`K \ K)"] by simp then have *: "(T^^(Suc n))--`K \ T--`K \ null_sets M" using T_vrestr_composed(2)[OF \K \ sets M\] by simp then show ?case by (rule null_sym_diff_transitive, simp add: \T--`K \ K \ null_sets M\ \K \ sets M\, measurable) qed } note * = this define C where "C = (\n. (T^^n)--`A)" have [measurable]: "C \ sets M" unfolding C_def by simp have "C \ A \ (\n. (T^^n)--`A \ A)" unfolding C_def by auto moreover have "(\n. (T^^n)--`A \ A) \ null_sets M" using * null_sets_UN assms \T --` A \ A \ null_sets M\ by auto ultimately have CA: "C \ A \ null_sets M" by (meson \C \ sets M\ assms sets.Diff sets.Un null_sets_subset) then have "T--`(C \ A) \ null_sets M" by (rule T_quasi_preserves_null2(1)) then have "T--`C \ T--`A \ null_sets M" by simp then have "T--`C \ A \ null_sets M" by (rule null_sym_diff_transitive, auto simp add: H) then have TCC: "T--`C \ C \ null_sets M" apply (rule null_sym_diff_transitive) using CA by (auto simp add: Un_commute) have "C \ (\n\{1..}. (T^^n)--`A)" unfolding C_def by auto moreover have "T--`C = (\n\{1..}. (T^^n)--`A)" using T_vrestr_composed(2)[OF assms] by (simp add: C_def atLeast_Suc_greaterThan greaterThan_0) ultimately have "C \ T--`C" by blast then have "(T^^0)--`C \ (T^^1)--`C" using T_vrestr_0 by auto moreover have "(T^^1)--`C \ (\n\{1..}. (T^^n)--`C)" by auto ultimately have "(T^^0)--`C \ (\n\{1..}. (T^^n)--`C)" by auto then have "(T^^0)--`C \ (\n\{1..}. (T^^n)--`C) = (\n\{1..}. (T^^n)--`C)" by auto moreover have "(\n. (T^^n)--`C) = (T^^0)--`C \ (\n\{1..}. (T^^n)--`C)" by (rule union_insert_0) ultimately have C2: "(\n. (T^^n)--`C) = (\n\{1..}. (T^^n)--`C)" by simp define B where "B = (\n. (T^^n)--`C)" have [measurable]: "B \ sets M" unfolding B_def by simp have "B \ C \ (\n. (T^^n)--`C \ C)" unfolding B_def by auto moreover have "(\n. (T^^n)--`C \ C) \ null_sets M" using * null_sets_UN assms TCC by auto ultimately have "B \ C \ null_sets M" by (meson \B \ sets M\ \C \ sets M\ assms sets.Diff sets.Un null_sets_subset) then have "B \ A \ null_sets M" by (rule null_sym_diff_transitive, auto simp add: CA) then have a: "A \ B \ null_sets M" by (simp add: Un_commute) have "T--`B = (\n\{1..}. (T^^n)--`C)" using T_vrestr_composed(2)[OF \C \ sets M\] by (simp add: B_def atLeast_Suc_greaterThan greaterThan_0) then have "T--`B = B" unfolding B_def using C2 by simp then have "B \ sets Invariants" using Invariants_sets vimage_restr_def by auto then show "\B \ sets Invariants. A \ B \ null_sets M" using a by blast qed text \In a conservative setting, it is enough to be included in its image or its preimage to be almost invariant: otherwise, since the difference set has disjoint preimages, and is therefore null by conservativity.\ lemma (in conservative) preimage_included_then_almost_invariant: assumes [measurable]: "A \ sets M" and "T--`A \ A" shows "A \ (T--`A) \ null_sets M" proof - define B where "B = A - T--`A" then have [measurable]: "B \ sets M" by simp have "(T^^(Suc n))--`A \ (T^^n)--`A" for n using T_vrestr_composed(3)[OF assms(1)] vrestr_inclusion[OF assms(2)] by auto then have "disjoint_family (\n. (T^^n)--`A - (T^^(Suc n))--`A)" by (rule disjoint_family_Suc2[where ?A = "\n. (T^^n)--`A"]) moreover have "(T^^n)--`A - (T^^(Suc n))--`A = (T^^n)--`B" for n unfolding B_def Suc_eq_plus1 using T_vrestr_composed(3)[OF assms(1)] by auto ultimately have "disjoint_family (\n. (T^^n)--` B)" by simp then have "\n. n \ 0 \ ((T^^n)--`B) \ B = {}" unfolding disjoint_family_on_def by (metis UNIV_I T_vrestr_0(1)[OF \B \ sets M\]) then have "\n. n > 0 \ (T^^n)-`B \ B = {}" unfolding vimage_restr_def by (simp add: Int_assoc) then have "B \ null_sets M" using disjoint_then_null[OF \B \ sets M\] Int_commute by auto then show ?thesis unfolding B_def using assms(2) by (simp add: Diff_mono Un_absorb2) qed lemma (in conservative) preimage_includes_then_almost_invariant: assumes [measurable]: "A \ sets M" and "A \ T--`A" shows "A \ (T--`A) \ null_sets M" proof - define B where "B = T--`A - A" then have [measurable]: "B \ sets M" by simp have "\n. (T^^(Suc n))--`A \ (T^^n)--`A" using T_vrestr_composed(3)[OF assms(1)] vrestr_inclusion[OF assms(2)] by auto then have "disjoint_family (\n. (T^^(Suc n))--`A - (T^^n)--`A)" by (rule disjoint_family_Suc[where ?A = "\n. (T^^n)--`A"]) moreover have "\n. (T^^(Suc n))--`A - (T^^n)--`A = (T^^n)--`B" unfolding B_def Suc_eq_plus1 using T_vrestr_composed(3)[OF assms(1)] by auto ultimately have "disjoint_family (\n. (T^^n)--` B)" by simp then have "\n. n \ 0 \ ((T^^n)--`B) \ B = {}" unfolding disjoint_family_on_def by (metis UNIV_I T_vrestr_0(1)[OF \B \ sets M\]) then have "\n. n > 0 \ (T^^n)-`B \ B = {}" unfolding vimage_restr_def by (simp add: Int_assoc) then have "B \ null_sets M" using disjoint_then_null[OF \B \ sets M\] Int_commute by auto then show ?thesis unfolding B_def using assms(2) by (simp add: Diff_mono Un_absorb1) qed text \The above properties for sets are also true for functions: if $f$ and $f \circ T$ coincide almost everywhere, i.e., $f$ is almost invariant, then $f$ coincides almost everywhere with a true invariant function. The idea of the proof is straightforward: throw away the orbits on which $f$ is not really invariant (say this is the complement of the good set), and replace it by $0$ there. However, this does not work directly: the good set is not invariant, some points may have a non-constant value of $f$ on their orbit but reach the good set eventually. One can however define $g$ to be equal to the eventual value of $f$ along the orbit, if the orbit reaches the good set, and $0$ elsewhere.\ proposition Invariants_quasi_Invariants_functions: fixes f::"_ \ 'b::{second_countable_topology, t2_space}" assumes f_meas [measurable]: "f \ borel_measurable M" shows "(\g \ borel_measurable Invariants. AE x in M. f x = g x) \ (AE x in M. f(T x) = f x)" proof assume "\g\borel_measurable Invariants. AE x in M. f x = g x" then obtain g where g:"g\borel_measurable Invariants" "AE x in M. f x = g x" by blast then have [measurable]: "g \ borel_measurable M" using Invariants_measurable_func by auto define A where "A = {x \ space M. f x = g x}" have [measurable]: "A \ sets M" unfolding A_def by simp define B where "B = space M - A" have [measurable]: "B \ sets M" unfolding B_def by simp moreover have "AE x in M. x \ B" unfolding B_def A_def using g(2) by auto ultimately have "B \ null_sets M" using AE_iff_null_sets by blast then have "T--`B \ null_sets M" by (rule T_quasi_preserves_null2(1)) then have "B \ T--`B \ null_sets M" using \B \ null_sets M\ by auto then have "AE x in M. x \ (B \ T--`B)" using AE_iff_null_sets null_setsD2 by blast then have i: "AE x in M. x \ space M - (B \ T--`B)" by auto { fix x assume *: "x \ space M - (B \ T--`B)" then have "x \ A" unfolding B_def by blast then have "f x = g x" unfolding A_def by blast have "T x \ A" using * B_def by auto then have "f(T x) = g(T x)" unfolding A_def by blast moreover have "g(T x) = g x" apply (rule Invariants_func_is_invariant) using * by (auto simp add: assms \g\borel_measurable Invariants\) ultimately have "f(T x) = f x" using \f x = g x\ by simp } then show "AE x in M. f(T x) = f x" using i by auto next assume *: "AE x in M. f (T x) = f x" text \\verb+good_set+ is the set of points for which $f$ is constant on their orbit. Here, we define $g = f$. If a point ever enters \verb+good_set+, then we take $g$ to be the value of $f$ there. Otherwise, $g$ takes an arbitrary value, say $y_0$.\ define good_set where "good_set = {x \ space M. \n. f((T^^(Suc n)) x) = f((T^^n) x)}" define good_time where "good_time = (\x. Inf {n. (T^^n) x \ good_set})" have "AE x in M. x \ good_set" using T_AE_iterates[OF *] by (simp add: good_set_def) have [measurable]: "good_set \ sets M" unfolding good_set_def by auto obtain y0::'b where True by auto define g where "g = (\x. if (\n. (T^^n) x \ good_set) then f((T^^(good_time x)) x) else y0)" have [measurable]: "good_time \ measurable M (count_space UNIV)" unfolding good_time_def by measurable have [measurable]: "g \ borel_measurable M" unfolding g_def by measurable have "f x = g x" if "x \ good_set" for x proof - have a: "0 \ {n. (T^^n) x \ good_set}" using that by simp have "good_time x = 0" unfolding good_time_def apply (intro cInf_eq_non_empty) using a by blast+ moreover have "{n. (T^^n) x \ good_set} \ {}" using a by blast ultimately show "f x = g x" unfolding g_def by auto qed then have "AE x in M. f x = g x" using \AE x in M. x \ good_set\ by auto have *: "f((T^^(Suc 0)) x) = f((T^^0) x)" if "x \ good_set" for x using that unfolding good_set_def by blast have good_1: "T x \ good_set \ f(T x) = f x" if "x \ good_set" for x using *[OF that] that unfolding good_set_def apply (auto) unfolding T_Tn_T_compose by blast then have good_k: "\x. x \ good_set \ (T^^k) x \ good_set \ f((T^^k) x) = f x" for k by (induction k, auto) have "g(T x) = g x" if "x \ space M" for x proof (cases) assume *: "\n. (T^^n) (T x) \ good_set" define n where "n = Inf {n. (T^^n) (T x) \ good_set}" have "(T^^n)(T x) \ good_set" using * Inf_nat_def1 by (metis empty_iff mem_Collect_eq n_def) then have a: "(T^^(n+1)) x \ good_set" by (metis Suc_eq_plus1 comp_eq_dest_lhs funpow.simps(2) funpow_swap1) then have **: "\m. (T^^m) x \ good_set" by blast define m where "m = Inf {m. (T^^m) x \ good_set}" then have "(T^^m) x \ good_set" using ** Inf_nat_def1 by (metis empty_iff mem_Collect_eq) have "n+1 \ {m. (T^^m) x \ good_set}" using a by simp then have "m \ n+1" using m_def by (simp add: Inf_nat_def Least_le) then obtain k where "n+1 = m + k" using le_iff_add by blast have "g x = f((T^^m) x)" unfolding g_def good_time_def using ** m_def by simp also have "... = f((T^^k) ((T^^m) x))" using \(T^^m) x \ good_set\ good_k by simp also have "... = f((T^^(n+1))x)" using \n+1 = m + k\[symmetric] funpow_add by (metis add.commute comp_apply) also have "... = f((T^^n) (T x))" using funpow_Suc_right by (metis Suc_eq_plus1 comp_apply) also have "... = g(T x)" unfolding g_def good_time_def using * n_def by simp finally show "g(T x) = g x" by simp next assume *: "\(\n. (T^^n) (T x) \ good_set)" then have "g(T x) = y0" unfolding g_def by simp have **: "\(\n. (T^^(Suc n)) x \ good_set)" using funpow_Suc_right * by (metis comp_apply) have "T x \ good_set" using good_k * by blast then have "x \ good_set" using good_1 by auto then have "\(\n. (T^^n) x \ good_set)" using ** using good_1 by fastforce then have "g x = y0" unfolding g_def by simp then show "g(T x) = g x" using \g(T x) = y0\ by simp qed then have "g \ borel_measurable Invariants" by (rule Invariants_func_charac[OF \g \ borel_measurable M\]) then show "\g\borel_measurable Invariants. AE x in M. f x = g x" using \AE x in M. f x = g x\ by blast qed text \In a conservative setting, it suffices to have an almost everywhere inequality to get an almost everywhere equality, as the set where there is strict inequality has $0$ measure as its iterates are disjoint, by conservativity.\ proposition (in conservative) AE_decreasing_then_invariant: fixes f::"_ \ 'b::{linorder_topology, second_countable_topology}" assumes "AE x in M. f(T x) \ f x" and [measurable]: "f \ borel_measurable M" shows "AE x in M. f(T x) = f x" proof - obtain D::"'b set" where D: "countable D" "(\x y. x < y \ (\d \ D. x \ d \ d < y))" using countable_separating_set_linorder2 by blast define A where "A = {x \ space M. f(T x) \ f x}" then have [measurable]: "A \ sets M" by simp define B where "B = {x \ space M. \n. f((T^^(n+1)) x) \ f((T^^n)x)}" then have [measurable]: "B \ sets M" by simp have "space M - A \ null_sets M" unfolding A_def using assms by (simp add: assms(1) AE_iff_null_sets) then have "(\n. (T^^n)--`(space M - A)) \ null_sets M" by (metis null_sets_UN T_quasi_preserves_null2(2)) moreover have "space M - B = (\n. (T^^n)--`(space M - A))" unfolding B_def A_def by auto ultimately have "space M - B \ null_sets M" by simp have *: "B = (\n. (T^^n)--`A)" unfolding B_def A_def by auto then have "T--`B = (\n. T--` (T^^n)--`A)" by auto also have "... = (\n. (T^^(n+1))--`A)" using T_vrestr_composed(2)[OF \A \ sets M\] by simp also have "... \ (\n. (T^^n)--`A)" by blast finally have B1: "B \ T--`B" using * by simp have "B \ A" using * T_vrestr_0[OF \A \ sets M\] by blast then have B2: "\x. x \ B \ f(T x) \ f x" unfolding A_def by auto define C where "C = (\t. {x \ B. f x \ t})" { fix t have "C t = B \ f-`{..t} \ space M" unfolding C_def using sets.sets_into_space[OF \B \ sets M\] by auto then have [measurable]: "C t \ sets M" using assms(2) by simp have "C t \ T--`(C t)" using B1 unfolding C_def vimage_restr_def apply auto using B2 order_trans by blast then have "T--`(C t) - C t \ null_sets M" by (metis Diff_mono Un_absorb1 preimage_includes_then_almost_invariant[OF \C t \ sets M\]) } then have "(\d\D. T--`(C d) - C d) \ null_sets M" using \countable D\ by (simp add: null_sets_UN') then have "(space M - B) \ (\d\D. T--`(C d) - C d) \ null_sets M" using \space M - B \ null_sets M\ by auto then have "AE x in M. x \ (space M - B) \ (\d\D. T--`(C d) - C d)" using AE_not_in by blast moreover { fix x assume x: "x \ space M" "x \ (space M - B) \ (\d\D. T--`(C d) - C d)" then have "x \ B" by simp then have "T x \ B" using B1 by auto have "f(T x) = f x" proof (rule ccontr) assume "f(T x) \ f x" then have "f(T x) < f x" using B2[OF \x \ B\] by simp then obtain d where d: "d \ D" "f(T x) \ d \ d < f x" using D by auto then have "T x \ C d" using \T x \ B\ unfolding C_def by simp then have "x \ T--`(C d)" using \x \ space M\ by simp then have "x \ C d" using x \d \ D\ by simp then have "f x \ d" unfolding C_def by simp then show False using d by auto qed } ultimately show ?thesis by auto qed proposition (in conservative) AE_increasing_then_invariant: fixes f::"_ \ 'b::{linorder_topology, second_countable_topology}" assumes "AE x in M. f(T x) \ f x" and [measurable]: "f \ borel_measurable M" shows "AE x in M. f(T x) = f x" proof - obtain D::"'b set" where D: "countable D" "(\x y. x < y \ (\d \ D. x < d \ d \ y))" using countable_separating_set_linorder1 by blast define A where "A = {x \ space M. f(T x) \ f x}" then have [measurable]: "A \ sets M" by simp define B where "B = {x \ space M. \n. f((T^^(n+1)) x) \ f((T^^n)x)}" then have [measurable]: "B \ sets M" by simp have "space M - A \ null_sets M" unfolding A_def using assms by (simp add: assms(1) AE_iff_null_sets) then have "(\n. (T^^n)--`(space M - A)) \ null_sets M" by (metis null_sets_UN T_quasi_preserves_null2(2)) moreover have "space M - B = (\n. (T^^n)--`(space M - A))" unfolding B_def A_def by auto ultimately have "space M - B \ null_sets M" by simp have *: "B = (\n. (T^^n)--`A)" unfolding B_def A_def by auto then have "T--`B = (\n. T--` (T^^n)--`A)" by auto also have "... = (\n. (T^^(n+1))--`A)" using T_vrestr_composed(2)[OF \A \ sets M\] by simp also have "... \ (\n. (T^^n)--`A)" by blast finally have B1: "B \ T--`B" using * by simp have "B \ A" using * T_vrestr_0[OF \A \ sets M\] by blast then have B2: "\x. x \ B \ f(T x) \ f x" unfolding A_def by auto define C where "C = (\t. {x \ B. f x \ t})" { fix t have "C t = B \ f-`{t..} \ space M" unfolding C_def using sets.sets_into_space[OF \B \ sets M\] by auto then have [measurable]: "C t \ sets M" using assms(2) by simp have "C t \ T--`(C t)" using B1 unfolding C_def vimage_restr_def apply auto using B2 order_trans by blast then have "T--`(C t) - C t \ null_sets M" by (metis Diff_mono Un_absorb1 preimage_includes_then_almost_invariant[OF \C t \ sets M\]) } then have "(\d\D. T--`(C d) - C d) \ null_sets M" using \countable D\ by (simp add: null_sets_UN') then have "(space M - B) \ (\d\D. T--`(C d) - C d) \ null_sets M" using \space M - B \ null_sets M\ by auto then have "AE x in M. x \ (space M - B) \ (\d\D. T--`(C d) - C d)" using AE_not_in by blast moreover { fix x assume x: "x \ space M" "x \ (space M - B) \ (\d\D. T--`(C d) - C d)" then have "x \ B" by simp then have "T x \ B" using B1 by auto have "f(T x) = f x" proof (rule ccontr) assume "f(T x) \ f x" then have "f(T x) > f x" using B2[OF \x \ B\] by simp then obtain d where d: "d \ D" "f(T x) \ d \ d > f x" using D by auto then have "T x \ C d" using \T x \ B\ unfolding C_def by simp then have "x \ T--`(C d)" using \x \ space M\ by simp then have "x \ C d" using x \d \ D\ by simp then have "f x \ d" unfolding C_def by simp then show False using d by auto qed } ultimately show ?thesis by auto qed text \For an invertible map, the invariants of $T$ and $T^{-1}$ are the same.\ lemma Invariants_Tinv: assumes "invertible_qmpt" shows "qmpt.Invariants M Tinv = Invariants" proof - interpret I: qmpt M Tinv using Tinv_qmpt[OF assms] by auto have "(T -` A \ space M = A) \ (Tinv -` A \ space M = A)" if "A \ sets M" for A proof assume "T -` A \ space M = A" then show "Tinv -` A \ space M = A" using assms that unfolding Tinv_def invertible_qmpt_def apply auto apply (metis IntE UNIV_I bij_def imageE inv_f_f vimageE) apply (metis I.T_spaceM_stable(1) Int_iff Tinv_def bij_inv_eq_iff vimageI) done next assume "Tinv -` A \ space M = A" then show "T -` A \ space M = A" using assms that unfolding Tinv_def invertible_qmpt_def apply auto apply (metis IntE bij_def inv_f_f vimageE) apply (metis T_Tinv_of_set T_meas Tinv_def assms qmpt.vrestr_of_set qmpt_axioms vrestr_image(3)) done qed then have "{A \ sets M. Tinv -` A \ space M = A} = {A \ sets M. T -` A \ space M = A}" by blast then show ?thesis unfolding Invariants_def I.Invariants_def by auto qed end sublocale fmpt \ finite_measure_subalgebra M Invariants unfolding finite_measure_subalgebra_def finite_measure_subalgebra_axioms_def using Invariants_is_subalg by (simp add: finite_measureI) context fmpt begin text \The conditional expectation with respect to the invariant sigma-algebra is the same for $f$ or $f \circ T$, essentially by definition.\ lemma Invariants_of_foTn: fixes f::"'a \ real" assumes [measurable]: "integrable M f" shows "AE x in M. real_cond_exp M Invariants (f o (T^^n)) x = real_cond_exp M Invariants f x" proof (rule real_cond_exp_charact) fix A assume [measurable]: "A \ sets Invariants" then have [measurable]: "A \ sets M" using Invariants_in_sets by blast then have ind_meas [measurable]: "((indicator A)::('a \ real)) \ borel_measurable Invariants" by auto have "set_lebesgue_integral M A (f \ (T^^n)) = (\x. indicator A x * f((T^^n) x) \M)" by (auto simp: comp_def set_lebesgue_integral_def) also have "... = (\x. indicator A ((T^^n) x) * f ((T^^n) x) \M)" by (rule Bochner_Integration.integral_cong, auto simp add: Invariants_func_is_invariant_n[OF ind_meas]) also have "... = (\x. indicator A x * f x \M)" apply (rule Tn_integral_preserving(2)) using integrable_mult_indicator[OF \A \ sets M\ assms] by auto also have "... = (\x. indicator A x * real_cond_exp M Invariants f x \M)" apply (rule real_cond_exp_intg(2)[symmetric]) using integrable_mult_indicator[OF \A \ sets M\ assms] by auto also have "... = set_lebesgue_integral M A (real_cond_exp M Invariants f)" by (auto simp: set_lebesgue_integral_def) finally show "set_lebesgue_integral M A (f \ (T^^n)) = set_lebesgue_integral M A (real_cond_exp M Invariants f)" by simp qed (auto simp add: assms real_cond_exp_int Tn_integral_preserving(1)[OF assms] comp_def) lemma Invariants_of_foT: fixes f::"'a \ real" assumes [measurable]: "integrable M f" shows "AE x in M. real_cond_exp M Invariants f x = real_cond_exp M Invariants (f o T) x" using Invariants_of_foTn[OF assms, where ?n = 1] by auto lemma birkhoff_sum_Invariants: fixes f::"'a \ real" assumes [measurable]: "integrable M f" shows "AE x in M. real_cond_exp M Invariants (birkhoff_sum f n) x = n * real_cond_exp M Invariants f x" proof - define F where "F = (\i. f o (T^^i))" have [measurable]: "\i. F i \ borel_measurable M" unfolding F_def by auto have *: "integrable M (F i)" for i unfolding F_def by (subst comp_def, rule Tn_integral_preserving(1)[OF assms, of i]) have "AE x in M. n * real_cond_exp M Invariants f x = (\i\{..i\{..i\{..i\{..x. \i\{..x. \i\{..Birkhoff theorem\ subsubsection \Almost everywhere version of Birkhoff theorem\ text \This paragraph is devoted to the proof of Birkhoff theorem, arguably the most fundamental result of ergodic theory. This theorem asserts that Birkhoff averages of an integrable function $f$ converge almost surely, to the conditional expectation of $f$ with respect to the invariant sigma algebra. This result implies for instance the strong law of large numbers (in probability theory). There are numerous proofs of this statement, but none is really easy. We follow the very efficient argument given in Katok-Hasselblatt. To help the reader, here is the same proof informally. The first part of the proof is formalized in \verb+birkhoff_lemma1+, the second one in \verb+birkhoff_lemma+, and the conclusion in \verb+birkhoff_theorem+. Start with an integrable function $g$. let $G_n(x) = \max_{k\leq n} S_k g(x)$. Then $\limsup S_n g/n \leq 0$ outside of $A$, the set where $G_n$ tends to infinity. Moreover, $G_{n+1} - G_n \circ T$ is bounded by $g$, and tends to $g$ on $A$. It follows from the dominated convergence theorem that $\int_A G_{n+1} - G_n \circ T \to \int_A g$. As $\int_A G_{n+1} - G_n \circ T = \int_A G_{n+1} - G_n \geq 0$, we obtain $\int_A g \geq 0$. Apply now this result to the function $g = f - E(f | I) - \epsilon$, where $\epsilon>0$ is fixed. Then $\int_A g = -\epsilon \mu(A)$, then have $\mu(A) = 0$. Thus, almost surely, $\limsup S_n g/n\leq 0$, i.e., $\limsup S_n f/n \leq E(f|I)+\epsilon$. Letting $\epsilon$ tend to $0$ gives $\limsup S_n f/n \leq E(f|I)$. Applying the same result to $-f$ gives $S_n f/n \to E(f|I)$. \ context fmpt begin lemma birkhoff_aux1: fixes f::"'a \ real" assumes [measurable]: "integrable M f" defines "A \ {x \ space M. limsup (\n. ereal(birkhoff_sum f n x)) = \}" shows "A \ sets Invariants" "(\x. f x * indicator A x \M) \ 0" proof - let ?bsf = "birkhoff_sum f" have [measurable]: "A \ sets M" unfolding A_def by simp have Ainv: "x \ A \ T x \ A" if "x \ space M" for x proof - have "ereal(?bsf (1 + n) x) = ereal(f x) + ereal(?bsf n (T x))" for n unfolding birkhoff_sum_cocycle birkhoff_sum_1 by simp moreover have "limsup (\n. ereal(f x) + ereal(?bsf n (T x))) = ereal(f x) + limsup(\n. ereal(?bsf n (T x)))" by (rule ereal_limsup_lim_add, auto) moreover have "limsup (\n. ereal(?bsf (n+1) x)) = limsup (\n. ereal(?bsf n x))" using limsup_shift by simp ultimately have "limsup (\n. ereal(birkhoff_sum f n x)) = ereal(f x) + limsup (\n. ereal(?bsf n (T x)))" by simp then have "limsup (\n. ereal(?bsf n x)) = \ \ limsup (\n. ereal(?bsf n (T x))) = \" by simp then show "x \ A \ T x \ A" using \x \ space M\ A_def by simp qed then show "A \ sets Invariants" using assms(2) Invariants_sets by auto define F where "F = (\n x. MAX k \{0..n}. ?bsf k x)" have [measurable]: "\n. F n \ borel_measurable M" unfolding F_def by measurable have intFn: "integrable M (F n)" for n unfolding F_def by (rule integrable_MAX, auto simp add: birkhoff_sum_integral(1)[OF assms(1)]) have Frec: "F (n+1) x - F n (T x) = max (-F n (T x)) (f x)" for n x proof - have "{0..n+1} = {0} \ {1..n+1}" by auto then have "(\k. ?bsf k x) ` {0..n+1} = (\k. ?bsf k x) ` {0} \ (\k. ?bsf k x) ` {1..n+1}" by blast then have *: "(\k. ?bsf k x) ` {0..n+1} = {0} \ (\k. ?bsf k x) ` {1..n+1}" using birkhoff_sum_1(1) by simp have b: "F (n+1) x = max (Max {0}) (MAX k \{1..n+1}. ?bsf k x)" by (subst F_def, subst *, rule Max.union, auto) have "(\k. ?bsf k x) ` {1..n+1} = (\k. ?bsf (1+k) x) ` {0..n}" using Suc_le_D by fastforce also have "... = (\k. f x + ?bsf k (T x)) ` {0..n}" by (subst birkhoff_sum_cocycle, subst birkhoff_sum_1(2), auto) finally have c: "F (n+1) x = max 0 (MAX k \{0..n}. ?bsf k (T x) + f x)" using b by (simp add: add_ac) have "{f x + birkhoff_sum f k (T x) |k. k \{0..n}} = (+) (f x) ` {birkhoff_sum f k (T x) |k. k \{0..n}}" by blast have "(MAX k \{0..n}. ?bsf k (T x) + f x) = (MAX k \{0..n}. ?bsf k (T x)) + f x" by (rule Max_add_commute) auto also have "... = F n (T x) + f x" unfolding F_def by simp finally have "(MAX k \{0..n}. ?bsf k (T x) + f x) = f x + F n (T x)" by simp then have "F (n+1) x = max 0 (f x + F n (T x))" using c by simp then show "F (n+1) x - F n (T x) = max (-F n (T x)) (f x)" by auto qed have a: "abs((F (n+1) x - F n (T x)) * indicator A x) \ abs(f x)" for n x proof - have "F (n+1) x -F n (T x) \ f x" using Frec by simp then have *: "F (n+1) x -F n (T x) \ - abs(f x)" by simp have "F n (T x) \ birkhoff_sum f 0 (T x)" unfolding F_def apply (rule Max_ge, simp) using atLeastAtMost_iff by blast then have "F n (T x) \ 0" using birkhoff_sum_1(1) by simp then have "-F n (T x) \ abs (f x)" by simp moreover have "f x \ abs(f x)" by simp ultimately have "F (n+1) x -F n (T x) \ abs(f x)" using Frec by simp then have "abs(F (n+1) x - F n (T x)) \ abs(f x)" using * by simp then show "abs((F (n+1) x - F n (T x)) * indicator A x) \ abs(f x)" unfolding indicator_def by auto qed have b: "(\n. (F (n+1) x - F n (T x)) * indicator A x) \ f x * indicator A x" for x proof (rule tendsto_eventually, cases) assume "x \ A" then have "T x \ A" using Ainv A_def by auto then have "limsup (\n. ereal(birkhoff_sum f n (T x))) > ereal(-f x)" unfolding A_def by simp then obtain N where "ereal(?bsf N (T x)) > ereal(-f x)" using Limsup_obtain by blast then have *: "?bsf N (T x) > -f x" by simp { fix n assume "n\N" then have "?bsf N (T x) \ (\k. ?bsf k (T x)) ` {0..n}" by auto then have "F n (T x) \ ?bsf N (T x)" unfolding F_def by simp then have "F n (T x) \ -f x" using * by simp then have "max (-F n (T x)) (f x) = f x" by simp then have "F (n+1) x - F n (T x) = f x" using Frec by simp then have "(F (n+1) x - F n (T x)) * indicator A x = f x * indicator A x" by simp } then show "eventually (\n. (F (n+1) x - F n (T x)) * indicator A x = f x * indicator A x) sequentially" using eventually_sequentially by blast next assume "\(x \ A)" then have "indicator A x = (0::real)" by simp then show "eventually (\n. (F (n+1) x - F n (T x)) * indicator A x = f x * indicator A x) sequentially" by auto qed have lim: "(\n. (\x. (F (n+1) x - F n (T x)) * indicator A x \M)) \ (\x. f x * indicator A x \M)" proof (rule integral_dominated_convergence[where ?w = "(\x. abs(f x))"]) show "integrable M (\x. \f x\)" using assms(1) by auto show "AE x in M. (\n. (F (n + 1) x - F n (T x)) * indicator A x) \ f x * indicator A x" using b by auto show "\n. AE x in M. norm ((F (n + 1) x - F n (T x)) * indicator A x) \ \f x\" using a by auto qed (simp_all) have "(\x. (F (n+1) x - F n (T x)) * indicator A x \M) \ 0" for n proof - have "(\x. F n (T x) * indicator A x \M) = (\x. (\x. F n x * indicator A x) (T x) \M)" by (rule Bochner_Integration.integral_cong, auto simp add: Ainv indicator_def) also have "... = (\x. F n x * indicator A x \M)" by (rule T_integral_preserving, auto simp add: intFn integrable_real_mult_indicator) finally have i: "(\x. F n (T x) * indicator A x \M) = (\x. F n x * indicator A x \M)" by simp have "(\x. (F (n+1) x - F n (T x)) * indicator A x \M) = (\x. F (n+1) x * indicator A x - F n (T x) * indicator A x \M)" by (simp add: mult.commute right_diff_distrib) also have "... = (\x. F (n+1) x * indicator A x \M) - (\x. F n (T x) * indicator A x \M)" by (rule Bochner_Integration.integral_diff, auto simp add: intFn integrable_real_mult_indicator T_meas T_integral_preserving(1)) also have "... = (\x. F (n+1) x * indicator A x \M) - (\x. F n x * indicator A x \M)" using i by simp also have "... = (\x. F (n+1) x * indicator A x - F n x * indicator A x \M)" by (rule Bochner_Integration.integral_diff[symmetric], auto simp add: intFn integrable_real_mult_indicator) also have "... = (\x. (F (n+1) x - F n x) * indicator A x \M)" by (simp add: mult.commute right_diff_distrib) finally have *: "(\x. (F (n+1) x - F n (T x)) * indicator A x \M) = (\x. (F (n+1) x - F n x) * indicator A x \M)" by simp have "F n x \ F (n+1) x" for x unfolding F_def by (rule Max_mono, auto) then have "(F (n+1) x - F n x) * indicator A x \ 0" for x by simp then have "integral\<^sup>L M (\x. 0) \ integral\<^sup>L M (\x. (F (n+1) x - F n x) * indicator A x)" by (auto simp add: intFn integrable_real_mult_indicator intro: integral_mono) then have "(\x. (F (n+1) x - F n x) * indicator A x \M) \ 0" by simp then show "(\x. (F (n+1) x - F n (T x)) * indicator A x \M) \ 0" using * by simp qed then show "(\x. f x * indicator A x \M) \ 0" using lim by (simp add: LIMSEQ_le_const) qed lemma birkhoff_aux2: fixes f::"'a \ real" assumes [measurable]: "integrable M f" shows "AE x in M. limsup (\n. ereal(birkhoff_sum f n x / n)) \ real_cond_exp M Invariants f x" proof - { fix \ assume "\ > (0::real)" define g where "g = (\x. f x - real_cond_exp M Invariants f x - \)" then have intg: "integrable M g" using assms real_cond_exp_int(1) assms by auto define A where "A = {x \ space M. limsup (\n. ereal(birkhoff_sum g n x)) = \}" have Ag: "A \ sets Invariants" "(\x. g x * indicator A x \M) \ 0" unfolding A_def by (rule birkhoff_aux1[where ?f = g, OF intg])+ then have [measurable]: "A \ sets M" by (simp add: Invariants_in_sets) have eq: "(\x. indicator A x * real_cond_exp M Invariants f x \M) = (\x. indicator A x * f x \M)" proof (rule real_cond_exp_intg[where ?f = "\x. (indicator A x)::real" and ?g = f]) have "(\x. indicator A x * f x) = (\x. f x * indicator A x)" by auto then show "integrable M (\x. indicator A x * f x)" using integrable_real_mult_indicator[OF \A \ sets M\ assms] by simp show "indicator A \ borel_measurable Invariants" using \A \ sets Invariants\ by measurable qed (simp) have "0 \ (\x. g x * indicator A x \M)" using Ag by simp also have "... = (\x. f x * indicator A x - real_cond_exp M Invariants f x * indicator A x - \ * indicator A x \M)" unfolding g_def by (simp add: left_diff_distrib) also have "... = (\x. f x * indicator A x \M) - (\x. real_cond_exp M Invariants f x * indicator A x \M) - (\x. \ * indicator A x \M)" using assms real_cond_exp_int(1)[OF assms] integrable_real_mult_indicator[OF \A \ sets M\] by (auto simp: simp del: integrable_mult_left_iff) also have "... = - (\x. \ * indicator A x \M)" by (auto simp add: eq mult.commute) also have "... = - \ * measure M A" by auto finally have "0 \ - \ * measure M A" by simp then have "measure M A = 0" using \\ > 0\ by (simp add: measure_le_0_iff mult_le_0_iff) then have "A \ null_sets M" by (simp add: emeasure_eq_measure null_setsI) then have "AE x in M. x \ space M - A" by (metis (no_types, lifting) AE_cong Diff_iff AE_not_in) moreover { fix x assume "x \ space M - A" then have "limsup (\n. ereal(birkhoff_sum g n x)) < \" unfolding A_def by auto then obtain C where C: "\n. birkhoff_sum g n x \ C" using limsup_finite_then_bounded by presburger { fix n::nat assume "n > 0" have "birkhoff_sum g n x = birkhoff_sum f n x - birkhoff_sum (real_cond_exp M Invariants f) n x - birkhoff_sum (\x. \) n x" unfolding g_def using birkhoff_sum_add birkhoff_sum_diff by auto moreover have "birkhoff_sum (real_cond_exp M Invariants f) n x = n * real_cond_exp M Invariants f x" using birkhoff_sum_of_invariants using \x \ space M - A\ by auto moreover have "birkhoff_sum (\x. \) n x = n * \" unfolding birkhoff_sum_def by auto ultimately have "birkhoff_sum g n x = birkhoff_sum f n x - n * real_cond_exp M Invariants f x - n * \" by simp then have "birkhoff_sum f n x = birkhoff_sum g n x + n * real_cond_exp M Invariants f x + n * \" by simp then have "birkhoff_sum f n x / n = birkhoff_sum g n x / n + real_cond_exp M Invariants f x + \" using \n > 0\ by (simp add: field_simps) then have "birkhoff_sum f n x / n \ C/n + real_cond_exp M Invariants f x + \" using C[of n] \n > 0\ by (simp add: divide_right_mono) then have "ereal(birkhoff_sum f n x / n) \ ereal(C/n + real_cond_exp M Invariants f x + \)" by simp } then have "eventually (\n. ereal(birkhoff_sum f n x / n) \ ereal(C/n + real_cond_exp M Invariants f x + \)) sequentially" by (simp add: eventually_at_top_dense) then have b: "limsup (\n. ereal(birkhoff_sum f n x / n)) \ limsup (\n. ereal(C/n + real_cond_exp M Invariants f x + \))" by (simp add: Limsup_mono) have "(\n. ereal(C*(1/real n) + real_cond_exp M Invariants f x + \)) \ ereal(C * 0 + real_cond_exp M Invariants f x + \)" by (intro tendsto_intros) then have "limsup (\n. ereal(C/real n + real_cond_exp M Invariants f x + \)) = real_cond_exp M Invariants f x + \" using sequentially_bot tendsto_iff_Liminf_eq_Limsup by force then have "limsup (\n. ereal(birkhoff_sum f n x / n)) \ real_cond_exp M Invariants f x + \" using b by simp } ultimately have "AE x in M. limsup (\n. ereal(birkhoff_sum f n x / n)) \ real_cond_exp M Invariants f x + \" by auto then have "AE x in M. limsup (\n. ereal(birkhoff_sum f n x / n)) \ ereal(real_cond_exp M Invariants f x) + \" by auto } then show ?thesis by (rule AE_upper_bound_inf_ereal) qed theorem birkhoff_theorem_AE_nonergodic: fixes f::"'a \ real" assumes "integrable M f" shows "AE x in M. (\n. birkhoff_sum f n x / n) \ real_cond_exp M Invariants f x" proof - { fix x assume i: "limsup (\n. ereal(birkhoff_sum f n x /n)) \ real_cond_exp M Invariants f x" and ii: "limsup (\n. ereal(birkhoff_sum (\x. -f x) n x / n)) \ real_cond_exp M Invariants (\x. -f x) x" and iii: "real_cond_exp M Invariants (\x. -f x) x = - real_cond_exp M Invariants f x" have "\n. birkhoff_sum (\x. -f x) n x = - birkhoff_sum f n x" using birkhoff_sum_cmult[where ?c = "-1" and ?f = f] by auto then have "\n. ereal(birkhoff_sum (\x. -f x) n x / n) = - ereal(birkhoff_sum f n x / n)" by auto moreover have "limsup (\n. - ereal(birkhoff_sum f n x / n)) = - liminf (\n. ereal(birkhoff_sum f n x /n))" by (rule ereal_Limsup_uminus) ultimately have "-liminf (\n. ereal(birkhoff_sum f n x /n)) = limsup (\n. ereal(birkhoff_sum (\x. -f x) n x / n))" by simp then have "-liminf (\n. ereal(birkhoff_sum f n x /n)) \ - real_cond_exp M Invariants f x" using ii iii by simp then have "liminf (\n. ereal(birkhoff_sum f n x /n)) \ real_cond_exp M Invariants f x" by (simp add: ereal_uminus_le_reorder) then have "(\n. birkhoff_sum f n x /n) \ real_cond_exp M Invariants f x" using i by (simp add: limsup_le_liminf_real) } note * = this moreover have "AE x in M. limsup (\n. ereal(birkhoff_sum f n x /n)) \ real_cond_exp M Invariants f x" using birkhoff_aux2 assms by simp moreover have "AE x in M. limsup (\n. ereal(birkhoff_sum (\x. -f x) n x / n)) \ real_cond_exp M Invariants (\x. -f x) x" using birkhoff_aux2 assms by simp moreover have "AE x in M. real_cond_exp M Invariants (\x. -f x) x = - real_cond_exp M Invariants f x" using real_cond_exp_cmult[where ?c = "-1"] assms by force ultimately show ?thesis by auto qed text \If a function $f$ is integrable, then $E(f\circ T - f | I) = E(f\circ T | I) - E(f|I) = 0$. Hence, $S_n(f \circ T - f) / n$ converges almost everywhere to $0$, i.e., $f(T^n x)/n \to 0$. It is remarkable (and sometimes useful) that this holds under the weaker condition that $f\circ T - f$ is integrable (but not necessarily $f$), where this naive argument fails. The reason is that the Birkhoff sum of $f \circ T - f$ is $f\circ T^n - f$. If $n$ is such that $x$ and $T^n(x)$ belong to a set where $f$ is bounded, it follows that this Birkhoff sum is also bounded. Along such a sequence of times, $S_n(f\circ T - f)/n$ tends to $0$. By Poincare recurrence theorem, there are such times for almost every points. As it also converges to $E(f \circ T - f | I)$, it follows that this function is almost everywhere $0$. Then $f (T^n x)/n = S_n(f\circ T^n - f)/n - f/n$ tends almost surely to $E(f\circ T-f |I) = 0$. \ lemma limit_foTn_over_n: fixes f::"'a \ real" assumes [measurable]: "f \ borel_measurable M" and "integrable M (\x. f(T x) - f x)" shows "AE x in M. real_cond_exp M Invariants (\x. f(T x) - f x) x = 0" "AE x in M. (\n. f((T^^n) x) / n) \ 0" proof - define E::"nat \ 'a set" where "E k = {x \ space M. \f x\ \ k}" for k have [measurable]: "E k \ sets M" for k unfolding E_def by auto have *: "(\k. E k) = space M" unfolding E_def by (auto simp add: real_arch_simple) define F::"nat \ 'a set" where "F k = recurrent_subset_infty (E k)" for k have [measurable]: "F k \ sets M" for k unfolding F_def by auto have **: "E k - F k \ null_sets M" for k unfolding F_def using Poincare_recurrence_thm by auto have "space M - (\k. F k) \ null_sets M" apply (rule null_sets_subset[of "(\k. E k - F k)"]) unfolding *[symmetric] using ** by auto with AE_not_in[OF this] have "AE x in M. x \ (\k. F k)" by auto moreover have "AE x in M. (\n. birkhoff_sum (\x. f(T x) - f x) n x / n) \ real_cond_exp M Invariants (\x. f(T x) - f x) x" by (rule birkhoff_theorem_AE_nonergodic[OF assms(2)]) moreover have "real_cond_exp M Invariants (\x. f(T x) - f x) x = 0 \ (\n. f((T^^n) x) / n) \ 0" if H: "(\n. birkhoff_sum (\x. f(T x) - f x) n x / n) \ real_cond_exp M Invariants (\x. f(T x) - f x) x" "x \ (\k. F k)" for x proof - have "f((T^^n) x) = birkhoff_sum (\x. f(T x) - f x) n x + f x" for n unfolding birkhoff_sum_def by (induction n, auto) then have "f((T^^n) x) / n = birkhoff_sum (\x. f(T x) - f x) n x / n + f x * (1/n)" for n by (auto simp add: divide_simps) moreover have "(\n. birkhoff_sum (\x. f(T x) - f x) n x / n + f x * (1/n)) \ real_cond_exp M Invariants (\x. f(T x) - f x) x + f x * 0" by (intro tendsto_intros H(1)) ultimately have lim: "(\n. f((T^^n) x) / n) \ real_cond_exp M Invariants (\x. f(T x) - f x) x" by auto obtain k where "x \ F k" using H(2) by auto then have "infinite {n. (T^^n) x \ E k}" unfolding F_def recurrent_subset_infty_inf_returns by auto with infinite_enumerate[OF this] obtain r :: "nat \ nat" where r: "strict_mono r" "\n. r n \ {n. (T^^n) x \ E k}" by auto have A: "(\n. k * (1/r n)) \ real k * 0" apply (intro tendsto_intros) using LIMSEQ_subseq_LIMSEQ[OF lim_1_over_n \strict_mono r\] unfolding comp_def by auto have B: "\f((T^^(r n)) x) / r n\ \ k / (r n)" for n using r(2) unfolding E_def by (auto simp add: divide_simps) have "(\n. f((T^^(r n)) x) / r n) \ 0" apply (rule tendsto_rabs_zero_cancel, rule tendsto_sandwich[of "\n. 0" _ _ "\n. k * (1/r n)"]) using A B by auto moreover have "(\n. f((T^^(r n)) x) / r n) \ real_cond_exp M Invariants (\x. f(T x) - f x) x" using LIMSEQ_subseq_LIMSEQ[OF lim \strict_mono r\] unfolding comp_def by auto ultimately have *: "real_cond_exp M Invariants (\x. f(T x) - f x) x = 0" using LIMSEQ_unique by auto then have "(\n. f((T^^n) x) / n) \ 0" using lim by auto then show ?thesis using * by auto qed ultimately show "AE x in M. real_cond_exp M Invariants (\x. f(T x) - f x) x = 0" "AE x in M. (\n. f((T^^n) x) / n) \ 0" by auto qed text \We specialize the previous statement to the case where $f$ itself is integrable.\ lemma limit_foTn_over_n': fixes f::"'a \ real" assumes [measurable]: "integrable M f" shows "AE x in M. (\n. f((T^^n) x) / n) \ 0" by (rule limit_foTn_over_n, simp, rule Bochner_Integration.integrable_diff) (auto intro: assms T_integral_preserving(1)) text \It is often useful to show that a function is cohomologous to a nicer function, i.e., to prove that a given $f$ can be written as $f = g + u - u \circ T$ where $g$ is nicer than $f$. We show below that any integrable function is cohomologous to a function which is arbitrarily close to $E(f|I)$. This is an improved version of Lemma 2.1 in [Benoist-Quint, Annals of maths, 2011]. Note that the function $g$ to which $f$ is cohomologous is very nice (and, in particular, integrable), but the transfer function is only measurable in this argument. The fact that the control on conditional expectation is nevertheless preserved throughout the argument follows from Lemma~\verb+limit_foTn_over_n+ above.\ text \We start with the lemma (and the proof) of [BQ2011]. It shows that, if a function has a conditional expectation with respect to invariants which is positive, then it is cohomologous to a nonnegative function. The argument is the clever remark that $g = \max (0, \inf_n S_n f)$ and $u = \min (0, \inf_n S_n f)$ work (where these expressions are well defined as $S_n f$ tends to infinity thanks to our assumption).\ lemma cohomologous_approx_cond_exp_aux: fixes f::"'a \ real" assumes [measurable]: "integrable M f" and "AE x in M. real_cond_exp M Invariants f x > 0" shows "\u g. u \ borel_measurable M \ (integrable M g) \ (AE x in M. g x \ 0 \ g x \ max 0 (f x)) \ (\x. f x = g x + u x - u (T x))" proof - define h::"'a \ real" where "h = (\x. (INF n\{1..}. birkhoff_sum f n x))" define u where "u = (\x. min (h x) 0)" define g where "g = (\x. f x - u x + u (T x))" have [measurable]: "h \ borel_measurable M" "u \ borel_measurable M" "g \ borel_measurable M" unfolding g_def h_def u_def by auto have "f x = g x + u x - u (T x)" for x unfolding g_def by auto { fix x assume H: "real_cond_exp M Invariants f x > 0" "(\n. birkhoff_sum f n x / n) \ real_cond_exp M Invariants f x" have "eventually (\n. ereal(birkhoff_sum f n x / n) * ereal n = ereal(birkhoff_sum f n x)) sequentially" unfolding eventually_sequentially by (rule exI[of _ 1], auto) moreover have "(\n. ereal(birkhoff_sum f n x / n) * ereal n) \ ereal(real_cond_exp M Invariants f x) * \" apply (intro tendsto_intros) using H by auto ultimately have "(\n. ereal(birkhoff_sum f n x)) \ ereal(real_cond_exp M Invariants f x) * \" by (blast intro: Lim_transform_eventually) then have "(\n. ereal(birkhoff_sum f n x)) \ \" using H by auto then have B: "\C. \n. C \ birkhoff_sum f n x" by (intro liminf_finite_then_bounded_below, simp add: liminf_PInfty) have "h x \ f x" unfolding h_def apply (rule cInf_lower) using B by force+ have "{birkhoff_sum f n (T x) |n. n \ {1..}} = {birkhoff_sum f (1+n) (x) - f x |n. n \ {1..}}" unfolding birkhoff_sum_cocycle by auto also have "... = {birkhoff_sum f n x - f x |n. n \ {2..}}" by (metis (no_types, hide_lams) Suc_1 Suc_eq_plus1_left Suc_le_D Suc_le_mono atLeast_iff) finally have *: "{birkhoff_sum f n (T x) |n. n \ {1..}} = (\t. t - (f x))`{birkhoff_sum f n x |n. n \ {2..}}" by auto have "h(T x) = Inf {birkhoff_sum f n (T x) |n. n \ {1..}}" unfolding h_def by (metis Setcompr_eq_image) also have "... = (\t\{birkhoff_sum f n x |n. n \ {2..}}. t - f x)" by (simp only: *) also have "... = (\t. t - (f x)) (Inf {birkhoff_sum f n x |n. n \ {2..}})" using B by (auto intro!: monoI bijI mono_bij_cInf [symmetric]) finally have I: "Inf {birkhoff_sum f n x |n. n \ {2..}} = f x + h (T x)" by auto have "max 0 (h x) + u x = h x" unfolding u_def by auto also have "... = Inf {birkhoff_sum f n x |n. n \ {1..}}" unfolding h_def by (metis Setcompr_eq_image) also have "... = Inf ({birkhoff_sum f n x |n. n \ {1}} \ {birkhoff_sum f n x |n. n \ {2..}})" by (auto intro!: arg_cong[of _ _ Inf], metis One_nat_def Suc_1 antisym birkhoff_sum_1(2) not_less_eq_eq, force) also have "Inf ({birkhoff_sum f n x |n. n \ {1}} \ {birkhoff_sum f n x |n. n \ {2..}}) = min (Inf {birkhoff_sum f n x |n. n \ {1}}) (Inf {birkhoff_sum f n x |n. n \ {2..}})" unfolding inf_min[symmetric] apply (intro cInf_union_distrib) using B by auto also have "... = min (f x) (f x + h (T x))" using I by auto also have "... = f x + u (T x)" unfolding u_def by auto finally have "max 0 (h x) = f x + u (T x) - u x" by auto then have "g x = max 0 (h x)" unfolding g_def by auto then have "g x \ 0 \ g x \ max 0 (f x)" using \h x \ f x\ by auto } then have *: "AE x in M. g x \ 0 \ g x \ max 0 (f x)" using assms(2) birkhoff_theorem_AE_nonergodic[OF assms(1)] by auto moreover have "integrable M g" apply (rule Bochner_Integration.integrable_bound[of _ f]) using * by (auto simp add: assms) ultimately have "u \ borel_measurable M \ integrable M g \ (AE x in M. 0 \ g x \ g x \ max 0 (f x)) \ (\x. f x = g x + u x - u (T x))" using \\x. f x = g x + u x - u (T x)\ \u \ borel_measurable M\ by auto then show ?thesis by blast qed text \To deduce the stronger version that $f$ is cohomologous to an arbitrarily good approximation of $E(f|I)$, we apply the previous lemma twice, to control successively the negative and the positive side. The sign control in the conclusion of the previous lemma implies that the second step does not spoil the first one.\ lemma cohomologous_approx_cond_exp: fixes f::"'a \ real" and B::"'a \ real" assumes [measurable]: "integrable M f" "B \ borel_measurable M" and "AE x in M. B x > 0" shows "\g u. u \ borel_measurable M \ integrable M g \ (\x. f x = g x + u x - u (T x)) \ (AE x in M. abs(g x - real_cond_exp M Invariants f x) \ B x)" proof - define C where "C = (\x. min (B x) 1)" have [measurable]: "integrable M C" apply (rule Bochner_Integration.integrable_bound[of _ "\_. (1::real)"], auto) unfolding C_def using assms(3) by auto have "C x \ B x" for x unfolding C_def by auto have "AE x in M. C x > 0" unfolding C_def using assms(3) by auto have AECI: "AE x in M. real_cond_exp M Invariants C x > 0" by (intro real_cond_exp_gr_c \integrable M C\ \AE x in M. C x > 0\) define f1 where "f1 = (\x. f x - real_cond_exp M Invariants f x)" have "integrable M f1" unfolding f1_def by (intro Bochner_Integration.integrable_diff \integrable M f\ real_cond_exp_int(1)) have "AE x in M. real_cond_exp M Invariants f1 x = real_cond_exp M Invariants f x - real_cond_exp M Invariants (real_cond_exp M Invariants f) x" unfolding f1_def apply (rule real_cond_exp_diff) by (intro Bochner_Integration.integrable_diff \integrable M f\ \integrable M C\ real_cond_exp_int(1))+ moreover have "AE x in M. real_cond_exp M Invariants (real_cond_exp M Invariants f) x = real_cond_exp M Invariants f x" by (intro real_cond_exp_nested_subalg subalg \integrable M f\, auto) ultimately have AEf1: "AE x in M. real_cond_exp M Invariants f1 x = 0" by auto have A [measurable]: "integrable M (\x. f1 x + C x)" by (intro Bochner_Integration.integrable_add \integrable M f1\ \integrable M C\) have "AE x in M. real_cond_exp M Invariants (\x. f1 x + C x) x = real_cond_exp M Invariants f1 x + real_cond_exp M Invariants C x" by (intro real_cond_exp_add \integrable M f1\ \integrable M C\) then have B: "AE x in M. real_cond_exp M Invariants (\x. f1 x + C x) x > 0" using AECI AEf1 by auto obtain u2 g2 where H2: "u2 \ borel_measurable M" "integrable M g2" "AE x in M. g2 x \ 0 \ g2 x \ max 0 (f1 x + C x)" "\x. f1 x + C x = g2 x + u2 x - u2 (T x)" using cohomologous_approx_cond_exp_aux[OF A B] by blast define f2 where "f2 = (\x. (g2 x - C x))" have *: "u2(T x) - u2 x = f2 x -f1 x" for x unfolding f2_def using H2(4)[of x] by auto have "AE x in M. f2 x \ - C x" using H2(3) unfolding f2_def by auto have "integrable M f2" unfolding f2_def by (intro Bochner_Integration.integrable_diff \integrable M g2\ \integrable M C\) have "AE x in M. real_cond_exp M Invariants (\x. u2(T x) - u2 x) x = 0" proof (rule limit_foTn_over_n) show "integrable M (\x. u2(T x) - u2 x)" unfolding * by (intro Bochner_Integration.integrable_diff \integrable M f1\ \integrable M f2\) qed (simp add: \u2 \ borel_measurable M\) then have "AE x in M. real_cond_exp M Invariants (\x. f2 x - f1 x) x = 0" unfolding * by simp moreover have "AE x in M. real_cond_exp M Invariants (\x. f2 x - f1 x) x = real_cond_exp M Invariants f2 x - real_cond_exp M Invariants f1 x" by (intro real_cond_exp_diff \integrable M f2\ \integrable M f1\) ultimately have AEf2: "AE x in M. real_cond_exp M Invariants f2 x = 0" using AEf1 by auto have A [measurable]: "integrable M (\x. C x - f2 x)" by (intro Bochner_Integration.integrable_diff \integrable M f2\ \integrable M C\) have "AE x in M. real_cond_exp M Invariants (\x. C x - f2 x) x = real_cond_exp M Invariants C x - real_cond_exp M Invariants f2 x" by (intro real_cond_exp_diff \integrable M f2\ \integrable M C\) then have B: "AE x in M. real_cond_exp M Invariants (\x. C x - f2 x) x > 0" using AECI AEf2 by auto obtain u3 g3 where H3: "u3 \ borel_measurable M" "integrable M g3" "AE x in M. g3 x \ 0 \ g3 x \ max 0 (C x - f2 x)" "\x. C x - f2 x = g3 x + u3 x - u3 (T x)" using cohomologous_approx_cond_exp_aux[OF A B] by blast define f3 where "f3 = (\x. C x - g3 x)" have "AE x in M. f3 x \ min (C x) (f2 x)" unfolding f3_def using H3(3) by auto then have "AE x in M. f3 x \ -C x" using \AE x in M. f2 x \ - C x\ \AE x in M. C x > 0\ by auto moreover have "AE x in M. f3 x \ C x" unfolding f3_def using H3(3) by auto ultimately have "AE x in M. abs(f3 x) \ C x" by auto then have *: "AE x in M. abs(f3 x) \ B x" using order_trans[OF _ \\x. C x \ B x\] by auto define g where "g = (\x. f3 x + real_cond_exp M Invariants f x)" define u where "u = (\x. u2 x - u3 x)" have "AE x in M. abs (g x - real_cond_exp M Invariants f x) \ B x" unfolding g_def using * by auto moreover have "f x = g x + u x - u(T x)" for x using H3(4)[of x] H2(4)[of x] unfolding u_def g_def f3_def f2_def f1_def by auto moreover have "u \ borel_measurable M" unfolding u_def using \u2 \ borel_measurable M\ \u3 \ borel_measurable M\ by auto moreover have "integrable M g" unfolding g_def f3_def by (intro Bochner_Integration.integrable_add Bochner_Integration.integrable_diff \integrable M C\ \integrable M g3\ \integrable M f\ real_cond_exp_int(1)) ultimately show ?thesis by auto qed subsubsection \$L^1$ version of Birkhoff theorem\ text \The $L^1$ convergence in Birkhoff theorem follows from the almost everywhere convergence and general considerations on $L^1$ convergence (Scheffe's lemma) explained in \verb+AE_and_int_bound_implies_L1_conv2+. This argument works neatly for nonnegative functions, the general case reduces to this one by taking the positive and negative parts of a given function. One could also prove it by truncation: for bounded functions, the $L^1$ convergence follows from the boundedness and almost sure convergence. The general case follows by density, but it is a little bit tedious to write as one need to make sure that the conditional expectation of the truncation converges to the conditional expectation of the original function. This is true in $L^1$ as the conditional expectation is a contraction in $L^1$, it follows almost everywhere after taking a subsequence. All in all, the argument based on Scheffe's lemma seems more economical.\ lemma birkhoff_lemma_L1: fixes f::"'a \ real" assumes "\x. f x \ 0" and [measurable]: "integrable M f" shows "(\n. \\<^sup>+x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) \M) \ 0" proof (rule Scheffe_lemma2) show i: "integrable M (real_cond_exp M Invariants f)" using assms by (simp add: real_cond_exp_int(1)) show "AE x in M. (\n. birkhoff_sum f n x / real n) \ real_cond_exp M Invariants f x" using birkhoff_theorem_AE_nonergodic assms by simp fix n have [measurable]: "(\x. ennreal \birkhoff_sum f n x\) \ borel_measurable M" by measurable show [measurable]: "(\x. birkhoff_sum f n x / real n) \ borel_measurable M" by measurable have "AE x in M. real_cond_exp M Invariants f x \ 0" using assms(1) real_cond_exp_pos by simp then have *: "AE x in M. norm (real_cond_exp M Invariants f x) = real_cond_exp M Invariants f x" by auto have **: "(\ x. norm (real_cond_exp M Invariants f x) \M) = (\ x. real_cond_exp M Invariants f x \M)" apply (rule integral_cong_AE) using * by auto have "(\\<^sup>+x. ennreal (norm (real_cond_exp M Invariants f x)) \M) = (\ x. norm (real_cond_exp M Invariants f x) \M)" by (rule nn_integral_eq_integral) (auto simp add: i) also have "... = (\ x. real_cond_exp M Invariants f x \M)" using ** by simp also have "... = (\ x. f x \M)" using real_cond_exp_int(2) assms(2) by auto also have "... = (\x. norm(f x) \M)" using assms by auto also have "... = (\\<^sup>+x. norm(f x) \M)" by (rule nn_integral_eq_integral[symmetric], auto simp add: assms(2)) finally have eq: "(\\<^sup>+ x. norm (real_cond_exp M Invariants f x) \M) = (\\<^sup>+ x. norm(f x) \M)" by simp { fix x have "norm(birkhoff_sum f n x) \ birkhoff_sum (\x. norm(f x)) n x" using birkhoff_sum_abs by fastforce then have "norm(birkhoff_sum f n x) \ birkhoff_sum (\x. ennreal(norm(f x))) n x" unfolding birkhoff_sum_def by auto } then have "(\\<^sup>+x. norm(birkhoff_sum f n x) \M) \ (\\<^sup>+x. birkhoff_sum (\x. ennreal(norm(f x))) n x \M)" by (simp add: nn_integral_mono) also have "... = n * (\\<^sup>+x. norm(f x) \M)" by (rule birkhoff_sum_nn_integral, auto) also have "... = n * (\\<^sup>+ x. norm (real_cond_exp M Invariants f x) \M)" using eq by simp finally have *: "(\\<^sup>+x. norm(birkhoff_sum f n x) \M) \ n * (\\<^sup>+ x. norm (real_cond_exp M Invariants f x) \M)" by simp show "(\\<^sup>+ x. ennreal (norm (birkhoff_sum f n x / real n)) \M) \ (\\<^sup>+ x. norm (real_cond_exp M Invariants f x) \M)" proof (cases) assume "n = 0" then show ?thesis by auto next assume "\(n = 0)" then have "n > 0" by simp then have "1/ennreal(real n) \ 0" by simp have "(\\<^sup>+ x. ennreal (norm (birkhoff_sum f n x / real n)) \M) = (\\<^sup>+ x. ennreal (norm (birkhoff_sum f n x)) / ennreal(real n) \M)" using \n > 0\ by (auto simp: divide_ennreal) also have "... = (\\<^sup>+ x. (1/ennreal(real n)) * ennreal (norm (birkhoff_sum f n x)) \M)" by (simp add: \0 < n\ divide_ennreal_def mult.commute) also have "... = (1/ennreal(real n) * (\\<^sup>+ x. ennreal (norm (birkhoff_sum f n x)) \M))" by (subst nn_integral_cmult) auto also have "... \ (1/ennreal(real n)) * (ennreal(real n) * (\\<^sup>+ x. norm (real_cond_exp M Invariants f x) \M))" using * by (intro mult_mono) (auto simp: ennreal_of_nat_eq_real_of_nat) also have "... = (\\<^sup>+ x. norm (real_cond_exp M Invariants f x) \M)" using \n > 0\ by (auto simp del: ennreal_1 simp add: ennreal_1[symmetric] divide_ennreal ennreal_mult[symmetric] mult.assoc[symmetric]) simp finally show ?thesis by simp qed qed theorem birkhoff_theorem_L1_nonergodic: fixes f::"'a \ real" assumes [measurable]: "integrable M f" shows "(\n. \\<^sup>+x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) \M) \ 0" proof - define g where "g = (\x. max (f x) 0)" have g_int [measurable]: "integrable M g" unfolding g_def using assms by auto define h where "h = (\x. max (-f x) 0)" have h_int [measurable]: "integrable M h" unfolding h_def using assms by auto have "f = (\x. g x - h x)" unfolding g_def h_def by auto { fix n::nat assume "n > 0" have "\x. birkhoff_sum f n x = birkhoff_sum g n x - birkhoff_sum h n x" using birkhoff_sum_diff \f = (\x. g x - h x)\ by auto then have "\x. birkhoff_sum f n x / n = birkhoff_sum g n x / n - birkhoff_sum h n x / n" using \n > 0\ by (simp add: diff_divide_distrib) moreover have "AE x in M. real_cond_exp M Invariants g x - real_cond_exp M Invariants h x = real_cond_exp M Invariants f x" using AE_symmetric[OF real_cond_exp_diff] g_int h_int \f = (\x. g x - h x)\ by auto ultimately have "AE x in M. birkhoff_sum f n x / n - real_cond_exp M Invariants f x = (birkhoff_sum g n x / n - real_cond_exp M Invariants g x) - (birkhoff_sum h n x / n - real_cond_exp M Invariants h x)" by auto then have *: "AE x in M. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) \ norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) + norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x)" by auto have "(\\<^sup>+ x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) \M) \ (\\<^sup>+ x. ennreal(norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x)) + norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) \M)" apply (rule nn_integral_mono_AE) using * by (simp add: ennreal_plus[symmetric] del: ennreal_plus) also have "... = (\\<^sup>+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) \M) + (\\<^sup>+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) \M)" apply (rule nn_integral_add) apply auto using real_cond_exp_F_meas borel_measurable_cond_exp2 by measurable finally have "(\\<^sup>+ x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) \M) \ (\\<^sup>+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) \M) + (\\<^sup>+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) \M)" by simp } then have *: "eventually (\n. (\\<^sup>+ x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) \M) \ (\\<^sup>+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) \M) + (\\<^sup>+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) \M)) sequentially" using eventually_at_top_dense by auto have **: "eventually (\n. (\\<^sup>+ x. norm(birkhoff_sum f n x / n - real_cond_exp M Invariants f x) \M) \ 0) sequentially" by simp have "(\n. (\\<^sup>+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) \M)) \ 0" apply (rule birkhoff_lemma_L1, auto simp add: g_int) unfolding g_def by auto moreover have "(\n. (\\<^sup>+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) \M)) \ 0" apply (rule birkhoff_lemma_L1, auto simp add: h_int) unfolding h_def by auto ultimately have "(\n. (\\<^sup>+ x. norm(birkhoff_sum g n x / n - real_cond_exp M Invariants g x) \M) + (\\<^sup>+ x. norm(birkhoff_sum h n x / n - real_cond_exp M Invariants h x) \M)) \ 0" using tendsto_add[of _ 0 _ _ 0] by auto then show ?thesis using tendsto_sandwich[OF ** *] by auto qed subsubsection \Conservativity of skew products\ text \The behaviour of skew-products of the form $(x, y) \mapsto (Tx, y + f x)$ is directly related to Birkhoff theorem, as the iterates involve the Birkhoff sums in the fiber. Birkhoff theorem implies that such a skew product is conservative when the function $f$ has vanishing conditional expectation. To prove the theorem, assume by contradiction that a set $A$ with positive measure does not intersect its preimages. Replacing $A$ with a smaller set $C$, we can assume that $C$ is bounded in the $y$-direction, by a constant $N$, and also that all its nonempty vertical fibers, above the projection $Cx$, have a measure bounded from below. Then, by Birkhoff theorem, for any $r>0$, most of the first $n$ preimages of $C$ are contained in the set $\{|y| \leq r n+N\}$, of measure $O(r n)$. Hence, they can not be disjoint if $r < \mu(C)$. To make this argument rigorous, one should only consider the preimages whose $x$-component belongs to a set $Dx$ where the Birkhoff sums are small. This condition has a positive measure if $\mu(Cx) + \mu(Dx) > \mu(M)$, which one can ensure by taking $Dx$ large enough.\ theorem (in fmpt) skew_product_conservative: fixes f::"'a \ real" assumes [measurable]: "integrable M f" and "AE x in M. real_cond_exp M Invariants f x = 0" shows "conservative_mpt (M \\<^sub>M lborel) (\(x,y). (T x, y + f x))" proof (rule conservative_mptI) let ?TS = "(\(x,y). (T x, y + f x))" let ?MS = "M \\<^sub>M (lborel::real measure)" have f_meas [measurable]: "f \ borel_measurable M" by auto have "mpt M T" by (simp add: mpt_axioms) with mpt_skew_product_real[OF this f_meas] show "mpt ?MS ?TS" by simp then interpret TS: mpt ?MS ?TS by auto fix A::"('a \ real) set" assume A1 [measurable]: "A \ sets ?MS" and A2: "emeasure ?MS A > 0" have "A = (\N::nat. A \ {(x,y). abs(y) \ N})" by (auto simp add: real_arch_simple) then have *: "emeasure ?MS (\N::nat. A \ {(x,y). abs(y) \ N}) > 0" using A2 by simp have "space ?MS = space M \ space (lborel::real measure)" using space_pair_measure by auto then have A_inc: "A \ space M \ space (lborel::real measure)" using sets.sets_into_space[OF A1] by auto { fix N::nat have "{(x, y). abs(y) \ real N \ x \ space M} = space M \ {-(real N)..(real N)}" by auto then have "{(x, y). \y\ \ real N \ x \ space M} \ sets ?MS" by auto then have "A \ {(x, y). \y\ \ real N \ x \ space M} \ sets ?MS" using A1 by auto moreover have "A \ {(x,y). abs(y) \ real N} = A \ {(x, y). \y\ \ real N \ x \ space M}" using A_inc by blast ultimately have "A \ {(x,y). abs(y) \ real N} \ sets ?MS" by auto } then have [measurable]: "\N::nat. A \ {(x, y). \y\ \ real N} \ sets (M \\<^sub>M borel)" by auto have "\N::nat. emeasure ?MS (A \ {(x,y). abs(y) \ N}) > 0" apply (rule emeasure_pos_unionE) using * by auto then obtain N::nat where N: "emeasure ?MS (A \ {(x,y). abs(y) \ N}) > 0" by auto define B where "B = A \ {(x,y). abs(y) \ N}" have B_meas [measurable]: "B \ sets (M \\<^sub>M lborel)" unfolding B_def by auto have "0 < emeasure (M \\<^sub>M lborel) B" unfolding B_def using N by auto also have "... = (\\<^sup>+x. emeasure lborel (Pair x -` B) \M)" apply (rule sigma_finite_measure.emeasure_pair_measure_alt) using B_meas by (auto simp add: lborel.sigma_finite_measure_axioms) finally have *: "(\\<^sup>+x. emeasure lborel (Pair x -` B) \M) > 0" by simp have "\Cx\sets M. \e::real>0. emeasure M Cx > 0 \ (\x \ Cx. emeasure lborel (Pair x -` B) \ e)" by (rule not_AE_zero_int_ennreal_E, auto simp add: *) then obtain Cx e where [measurable]: "Cx \ sets M" and Cxe: "e>(0::real)" "emeasure M Cx > 0" "\x. x \ Cx \ emeasure lborel (Pair x -` B) \ e" by blast define C where "C = B \ (Cx \ (UNIV::real set))" have C_meas [measurable]: "C \ sets (M \\<^sub>M lborel)" unfolding C_def using B_meas by auto have Cx_fibers: "\x. x \ Cx \ emeasure lborel (Pair x -` C) \ e" using Cxe(3) C_def by auto define c where "c = (measure M Cx)/2" have "c > 0" unfolding c_def using Cxe(2) by (simp add: emeasure_eq_measure) text \We will apply Birkhoff theorem to show that most preimages of $C$ at time $n$ are contained in a cylinder of height roughly $r n$, for some suitably small $r$. How small $r$ should be to get a contradiction can be determined at the end of the proof. It turns out that the good condition is the following one -- this is by no means obvious now.\ define r where "r = (if measure M (space M) = 0 then 1 else e * c / (4 * measure M (space M)))" have "r > 0" using \e > 0\ \c > 0\ unfolding r_def apply auto using measure_le_0_iff by fastforce have pos: "e*c-2*r*measure M (space M) > 0" using \e > 0\ \c > 0\ unfolding r_def by auto define Bgood where "Bgood = {x \ space M. (\n. birkhoff_sum f n x / n) \ 0}" have [measurable]: "Bgood \ sets M" unfolding Bgood_def by auto have *: "AE x in M. x \ Bgood" unfolding Bgood_def using birkhoff_theorem_AE_nonergodic[OF assms(1)] assms(2) by auto then have "emeasure M Bgood = emeasure M (space M)" by (intro emeasure_eq_AE) auto { fix x assume "x \ Bgood" then have "x \ space M" unfolding Bgood_def by auto have "(\n. birkhoff_sum f n x / n) \ 0" using \x \ Bgood\ unfolding Bgood_def by auto moreover have "0 \ {-r<..r>0\ by auto ultimately have "eventually (\n. birkhoff_sum f n x / n \ {-r<..0" "\n. n \ n0 \ birkhoff_sum f n x / n \ {-r<.. n0" then have "n>0" using \n0>0\ by auto with n0(2)[OF \n \ n0\] have "abs(birkhoff_sum f n x / n) \ r" by auto then have "abs(birkhoff_sum f n x) \ r * n" using \n>0\ by (simp add: divide_le_eq) } then have "x \ (\n0. {x \ space M. \n\{n0..}. abs(birkhoff_sum f n x) \ r * n})" using \x \ space M\ by blast } then have "AE x in M. x \ space M - (\n0. {x \ space M. \n\{n0..}. abs(birkhoff_sum f n x) \ r * n})" using * by auto then have eqM: "emeasure M (\n0. {x \ space M. \n\{n0..}. abs(birkhoff_sum f n x) \ r * n}) = emeasure M (space M)" by (intro emeasure_eq_AE) auto have "(\n0. emeasure M {x \ space M. \n\{n0..}. abs(birkhoff_sum f n x) \ r * n} + c) \ emeasure M (\n0. {x \ space M. \n\{n0..}. abs(birkhoff_sum f n x) \ r * n}) + c" by (intro tendsto_intros Lim_emeasure_incseq) (auto simp add: incseq_def) moreover have "emeasure M (\n0. {x \ space M. \n\{n0..}. abs(birkhoff_sum f n x) \ r * n}) + c > emeasure M (space M)" using eqM \c > 0\ emeasure_eq_measure by auto ultimately have "eventually (\n0. emeasure M {x \ space M. \n\{n0..}. abs(birkhoff_sum f n x) \ r * n} + c > emeasure M (space M)) sequentially" unfolding order_tendsto_iff by auto then obtain n0 where n0: "emeasure M {x \ space M. \n\{n0..}. abs(birkhoff_sum f n x) \ r * n} + c > emeasure M (space M)" using eventually_sequentially by auto define Dx where "Dx = {x \ space M. \n\{n0..}. abs(birkhoff_sum f n x) \ r * n}" have Dx_meas [measurable]: "Dx \ sets M" unfolding Dx_def by auto have "emeasure M Dx + c \ emeasure M (space M)" using n0 Dx_def by auto obtain n1::nat where n1: "n1 > max n0 ((measure M (space M) * 2 * N + e*c*n0 - e*c) / (e*c-2*r*measure M (space M)))" by (metis mult.commute mult.left_neutral numeral_One reals_Archimedean3 zero_less_numeral) then have "n1 > n0" by auto have n1_ineq: "n1 * (e*c-2*r*measure M (space M)) > (measure M (space M) * 2 * N + e*c*n0 - e*c)" using n1 pos by (simp add: pos_divide_less_eq) define D where "D = (\n. Dx \ {-r*n1-N..r*n1+N} \ (?TS^^n)-`C)" have Dn_meas [measurable]: "D n \ sets (M \\<^sub>M lborel)" for n unfolding D_def apply (rule TS.T_intersec_meas(2)) using C_meas by auto have "emeasure ?MS (D n) \ e * c" if "n \ {n0..n1}" for n proof - have "n \ n0" "n \ n1" using that by auto { fix x assume [simp]: "x \ space M" define F where "F = {y \ {-r*n1-N..r*n1+N}. y + birkhoff_sum f n x \ Pair ((T^^n)x) -`C}" have [measurable]: "F \ sets lborel" unfolding F_def by measurable { fix y::real have "(?TS^^n)(x,y) = ((T^^n)x, y + birkhoff_sum f n x)" using skew_product_real_iterates by simp then have "(indicator C ((?TS^^n) (x,y))::ennreal) = indicator Cx ((T^^n)x) * indicator (Pair ((T^^n)x) -`C) (y + birkhoff_sum f n x)" using C_def by (simp add: indicator_def) moreover have "(indicator (D n) (x, y)::ennreal) = indicator Dx x * indicator {-r*n1-N..r*n1+N} y * indicator C ((?TS^^n) (x,y))" unfolding D_def by (simp add: indicator_def) ultimately have "(indicator (D n) (x, y)::ennreal) = indicator Dx x * indicator {-r*n1-N..r*n1+N} y * indicator Cx ((T^^n)x) * indicator (Pair ((T^^n)x) -`C) (y + birkhoff_sum f n x)" by (simp add: mult.assoc) then have "(indicator (D n) (x, y)::ennreal) = indicator (Dx \ (T^^n)-`Cx) x * indicator F y" unfolding F_def by (simp add: indicator_def) } then have "(\\<^sup>+y. indicator (D n) (x, y) \lborel) = (\\<^sup>+y. indicator (Dx \ (T^^n)-`Cx) x * indicator F y \lborel)" by auto also have "... = indicator (Dx \ (T^^n)-`Cx) x * (\\<^sup>+y. indicator F y \lborel)" by (rule nn_integral_cmult, auto) also have "... = indicator (Dx \ (T^^n)-`Cx) x * emeasure lborel F" using \F \ sets lborel\ by auto finally have A: "(\\<^sup>+y. indicator (D n) (x, y) \lborel) = indicator (Dx \ (T^^n)-`Cx) x * emeasure lborel F" by simp have "(\\<^sup>+y. indicator (D n) (x, y) \lborel) \ ennreal e * indicator (Dx \ (T^^n)-`Cx) x" proof (cases) assume "indicator (Dx \ (T^^n)-`Cx) x = (0::ennreal)" then show ?thesis by auto next assume "\(indicator (Dx \ (T^^n)-`Cx) x = (0::ennreal))" then have "x \ Dx \ (T^^n)-`Cx" by (simp add: indicator_eq_0_iff) then have "x \ Dx" "(T^^n) x \ Cx" by auto then have "abs(birkhoff_sum f n x) \ r * n" using \n \ {n0..n1}\ Dx_def by auto then have *: "abs(birkhoff_sum f n x) \ r * n1" using \n \ n1\ \r>0\ - by (meson of_nat_le_iff order_trans real_mult_le_cancel_iff2) + by (meson of_nat_le_iff order_trans mult_le_cancel_iff2) have F_expr: "F = {-r*n1-N..r*n1+N} \ (+)(birkhoff_sum f n x) -` (Pair ((T^^n)x) -`C)" unfolding F_def by (auto simp add: add.commute) have "(Pair ((T^^n)x) -`C) \ {real_of_int (- int N)..real N}" unfolding C_def B_def by auto then have "((+)(birkhoff_sum f n x)) -` (Pair ((T^^n)x) -`C) \ {-N-birkhoff_sum f n x..N-birkhoff_sum f n x}" by auto also have "... \ {-r * n1 - N .. r * n1 + N}" using * by auto finally have "F = ((+)(birkhoff_sum f n x)) -` (Pair ((T^^n)x) -`C)" unfolding F_expr by auto then have "emeasure lborel F = emeasure lborel ((+)(birkhoff_sum f n x) -` (Pair ((T^^n)x) -`C))" by auto also have "... = emeasure lborel (((+)(birkhoff_sum f n x) -` (Pair ((T^^n)x) -`C)) \ space lborel)" by simp also have "... = emeasure (distr lborel borel ((+) (birkhoff_sum f n x))) (Pair ((T^^n)x) -`C)" apply (rule emeasure_distr[symmetric]) using C_meas by auto also have "... = emeasure lborel (Pair ((T^^n)x) -`C)" using lborel_distr_plus[of "birkhoff_sum f n x"] by simp also have "... \ e" using Cx_fibers \(T^^n) x \ Cx\ by auto finally have "emeasure lborel F \ e" by auto then show ?thesis using A by (simp add: indicator_def) qed } moreover have "emeasure ?MS (D n) = (\\<^sup>+x. (\\<^sup>+y. indicator (D n) (x, y) \lborel) \M)" using Dn_meas lborel.emeasure_pair_measure by blast ultimately have "emeasure ?MS (D n) \ (\\<^sup>+x. ennreal e * indicator (Dx \ (T ^^ n) -` Cx) x \M)" by (simp add: nn_integral_mono) also have "(\\<^sup>+x. ennreal e * indicator (Dx \ (T ^^ n) -` Cx) x \M) = e * (\\<^sup>+x. indicator (Dx \ (T ^^ n) -` Cx) x \M)" apply (rule nn_integral_cmult) using \e>0\ by auto also have "... = ennreal e * emeasure M (Dx \ (T ^^ n) -` Cx)" by simp finally have *: "emeasure ?MS (D n) \ ennreal e * emeasure M (Dx \ (T ^^ n) -` Cx)" by auto have "c + emeasure M (space M) \ emeasure M Dx + emeasure M Cx" using \emeasure M Dx + c \ emeasure M (space M)\ unfolding c_def by (auto simp: emeasure_eq_measure ennreal_plus[symmetric] simp del: ennreal_plus) also have "... = emeasure M Dx + emeasure M ((T^^n)--`Cx)" by (simp add: T_vrestr_same_emeasure(2)) also have "... = emeasure M (Dx \ ((T^^n)--`Cx)) + emeasure M (Dx \ ((T^^n)--`Cx))" by (rule emeasure_Un_Int, auto) also have "... \ emeasure M (space M) + emeasure M (Dx \ ((T^^n)-`Cx))" proof - have "emeasure M (Dx \ ((T^^n)--`Cx)) \ emeasure M (space M)" by (rule emeasure_mono, auto simp add: sets.sets_into_space) moreover have "Dx \ ((T^^n)--`Cx) = Dx \ ((T^^n)-`Cx)" by (simp add: vrestr_intersec_in_space) ultimately show ?thesis by (metis add.commute add_left_mono) qed finally have "emeasure M (Dx \ ((T^^n)-`Cx)) \ c" by (simp add: emeasure_eq_measure) then have "ennreal e * emeasure M (Dx \ ((T^^n)-`Cx)) \ ennreal e * c" using \e > 0\ using mult_left_mono by fastforce with * show "emeasure ?MS (D n) \ e * c" using \0 \0 by (auto simp: ennreal_mult[symmetric]) qed have "\(disjoint_family_on D {n0..n1})" proof assume D: "disjoint_family_on D {n0..n1}" have "emeasure lborel {-r*n1-N..r*n1+N} = (r * real n1 + real N) - (- r * real n1 - real N)" apply (rule emeasure_lborel_Icc) using \r>0\ by auto then have *: "emeasure lborel {-r*n1-N..r*n1+N} = ennreal(2 * r * real n1 + 2 * real N)" by (auto simp: ac_simps) have "ennreal(e * c) * (real n1 - real n0 + 1) = ennreal(e*c) * card {n0..n1}" using \n1 > n0\ by (auto simp: ennreal_of_nat_eq_real_of_nat Suc_diff_le ac_simps of_nat_diff) also have "... = (\n\{n0..n1}. ennreal(e*c))" by (simp add: ac_simps) also have "... \ (\n\{n0..n1}. emeasure ?MS (D n))" using \\n. n \ {n0..n1} \ emeasure ?MS (D n) \ e * c\ by (meson sum_mono) also have "... = emeasure ?MS (\n\{n0..n1}. D n)" apply (rule sum_emeasure) using Dn_meas by (auto simp add: D) also have "... \ emeasure ?MS (space M \ {-r*n1-N..r*n1+N})" apply (rule emeasure_mono) unfolding D_def using sets.sets_into_space[OF Dx_meas] by auto also have "... = emeasure M (space M) * emeasure lborel {-r*n1-N..r*n1+N}" by (rule sigma_finite_measure.emeasure_pair_measure_Times, auto simp add: lborel.sigma_finite_measure_axioms) also have "... = emeasure M (space M) * ennreal(2 * r * real n1 + 2 * real N)" using * by auto finally have "ennreal(e * c) * (real n1- real n0+1) \ emeasure M (space M) * ennreal(2 * r * real n1 + 2 * real N)" by simp then have "e*c * (real n1- real n0 + 1) \ measure M (space M) * (2 * r * real n1 + 2 * real N)" using \0 \0 \0 \n0 < n1\ emeasure_eq_measure by (auto simp: ennreal_mult'[symmetric] simp del: ennreal_plus) then have "0 \ measure M (space M) * (2 * r * real n1 + 2 * real N) - e*c * (real n1- real n0 + 1)" by auto also have "... = (measure M (space M) * 2 * N + e*c*n0 - e*c) - n1 * (e*c-2*r*measure M (space M))" by algebra finally have "n1 * (e*c-2*r*measure M (space M)) \ measure M (space M) * 2 * N + e*c*n0 - e*c" by linarith then show False using n1_ineq by auto qed then obtain n m where nm: "n D n \ {}" unfolding disjoint_family_on_def by (metis inf_sup_aci(1) linorder_cases) define k where "k = m-n" then have "k>0" "D (n+k) \ D n \ {}" using nm by auto then have "((?TS^^(n+k))-`A) \ ((?TS^^n)-`A) \ {}" unfolding D_def C_def B_def by auto moreover have "((?TS^^(n+k))-`A) \ ((?TS^^n)-`A) = (?TS^^n)-`(((?TS^^k)-`A) \ A)" using funpow_add by (simp add: add.commute funpow_add set.compositionality) ultimately have "((?TS^^k)-`A) \ A \ {}" by auto then show "\k>0. ((?TS^^k)-`A) \ A \ {}" using \k>0\ by auto qed subsubsection \Oscillations around the limit in Birkhoff theorem\ text \In this paragraph, we prove that, in Birkhoff theorem with vanishing limit, the Birkhoff sums are infinitely many times arbitrarily close to $0$, both on the positive and the negative side. In the ergodic case, this statement implies for instance that if the Birkhoff sums of an integrable function tend to infinity almost everywhere, then the integral of the function can not vanish, it has to be strictly positive (while Birkhoff theorem per se does not exclude the convergence to infinity, at a rate slower than linear). This converts a qualitative information (convergence to infinity at an unknown rate) to a quantitative information (linear convergence to infinity). This result (sometimes known as Atkinson's Lemma) has been reinvented many times, for instance by Kesten and by Guivarch. It plays an important role in the study of random products of matrices. This is essentially a consequence of the conservativity of the corresponding skew-product, proved in \verb+skew_product_conservative+. Indeed, this implies that, starting from a small set $X \times [-e/2, e/2]$, the skew-product comes back infinitely often to itself, which implies that the Birkhoff sums at these times are bounded by $e$. To show that the Birkhoff sums come back to $[0,e]$ is a little bit more tricky. Argue by contradiction, and induce on $A \times [0,e/2]$ where $A$ is the set of points where the Birkhoff sums don't come back to $[0,e]$. Then the second coordinate decreases strictly when one iterates the skew product, which is not compatible with conservativity.\ lemma birkhoff_sum_small_asymp_lemma: assumes [measurable]: "integrable M f" and "AE x in M. real_cond_exp M Invariants f x = 0" "e>(0::real)" shows "AE x in M. infinite {n. birkhoff_sum f n x \ {0..e}}" proof - have [measurable]: "f \ borel_measurable M" by auto have [measurable]: "\N. {x \ space M. \N. \n\{N..}. birkhoff_sum f n x \ {0..e}} \ sets M" by auto { fix N assume "N>(0::nat)" define Ax where "Ax = {x \ space M. \n\{N..}. birkhoff_sum f n x \ {0..e}}" then have [measurable]: "Ax \ sets M" by auto define A where "A = Ax \ {0..e/2}" then have A_meas [measurable]: "A \ sets (M \\<^sub>M lborel)" by auto define TN where "TN = T^^N" interpret TN: fmpt M TN unfolding TN_def using fmpt_power by auto define fN where "fN = birkhoff_sum f N" have "TN.birkhoff_sum fN n x = birkhoff_sum f (n*N) x" for n x proof (induction n) case 0 then show ?case by auto next case (Suc n) have "TN.birkhoff_sum fN (Suc n) x = TN.birkhoff_sum fN n x + fN ((TN^^n) x)" using TN.birkhoff_sum_cocycle[of fN n 1] by auto also have "... = birkhoff_sum f (n*N) x + birkhoff_sum f N ((TN^^n) x)" using Suc.IH fN_def by auto also have "... = birkhoff_sum f (n*N+N) x" unfolding TN_def by (subst funpow_mult, subst mult.commute[of N n], rule birkhoff_sum_cocycle[of f "n*N" N x, symmetric]) finally show ?case by (simp add: add.commute) qed then have not0e: "\x n. x \ Ax \ n \ 0 \ TN.birkhoff_sum fN n x \ {0..e}" unfolding Ax_def by auto let ?TS = "(\(x,y). (T x, y + f x))" let ?MS = "M \\<^sub>M (lborel::real measure)" interpret TS: conservative_mpt ?MS ?TS by (rule skew_product_conservative, auto simp add: assms) let ?TSN = "(\(x,y). (TN x, y + fN x))" have *:"?TSN = ?TS^^N" unfolding TN_def fN_def using skew_product_real_iterates by auto interpret TSN: conservative_mpt ?MS ?TSN using * TS.conservative_mpt_power by auto define MA TA where "MA = restrict_space ?MS A" and "TA = TSN.induced_map A" interpret TA: conservative_mpt MA TA unfolding MA_def TA_def by (rule TSN.induced_map_conservative_mpt, measurable) have *: "\ x y. snd (TA (x,y)) = snd (x,y) + TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x" unfolding TA_def TSN.induced_map_def using TN.skew_product_real_iterates Pair_def by auto have [measurable]: "snd \ borel_measurable ?MS" by auto then have [measurable]: "snd \ borel_measurable MA" unfolding MA_def using measurable_restrict_space1 by blast have "AE z in MA. z \ TSN.recurrent_subset A" unfolding MA_def using TSN.induced_map_recurrent_typical(1)[OF A_meas]. moreover { fix z assume z: "z \ TSN.recurrent_subset A" define x y where "x = fst z" and "y = snd z" then have "z = (x,y)" by simp have "z \ A" using z "TSN.recurrent_subset_incl"(1) by auto then have "x \ Ax" "y \ {0..e/2}" unfolding A_def x_def y_def by auto define y2 where "y2 = y + TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x" have "y2 = snd (TA z)" unfolding y2_def using * \z = (x, y)\ by force moreover have "TA z \ A" unfolding TA_def using \z \ A\ TSN.induced_map_stabilizes_A by auto ultimately have "y2 \ {0..e/2}" unfolding A_def by auto have "TSN.return_time_function A (x,y) \ 0" using \z = (x,y)\ \z \ TSN.recurrent_subset A\ TSN.return_time0[of A] by fast then have "TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x \ {0..e}" using not0e[OF \x \ Ax\] by auto moreover have "TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x \ {-e..e}" using \y \ {0..e/2}\ \y2 \ {0..e/2}\ y2_def by auto ultimately have "TN.birkhoff_sum fN (TSN.return_time_function A (x,y)) x \ {-e..<0}" by auto then have "y2 < y" using y2_def by auto then have "snd(TA z) < snd z" unfolding y_def using \y2 = snd (TA z)\ by auto } ultimately have a: "AE z in MA. snd(TA z) < snd z" by auto then have "AE z in MA. snd(TA z) \ snd z" by auto then have "AE z in MA. snd(TA z) = snd z" using TA.AE_decreasing_then_invariant[of snd] by auto then have "AE z in MA. False" using a by auto then have "space MA \ null_sets MA" by (simp add: AE_iff_null_sets) then have "emeasure MA A = 0" by (metis A_meas MA_def null_setsD1 space_restrict_space2) then have "emeasure ?MS A = 0" unfolding MA_def by (metis A_meas emeasure_restrict_space sets.sets_into_space sets.top space_restrict_space space_restrict_space2) moreover have "emeasure ?MS A = emeasure M Ax * emeasure lborel {0..e/2}" unfolding A_def by (intro lborel.emeasure_pair_measure_Times) auto ultimately have "emeasure M {x \ space M. \n\{N..}. birkhoff_sum f n x \ {0..e}} = 0" using \e>0\ Ax_def by simp then have "{x \ space M. \n\{N..}. birkhoff_sum f n x \ {0..e}} \ null_sets M" by auto } then have "(\N\{0<..}. {x \ space M. \n\{N..}. birkhoff_sum f n x \ {0..e}}) \ null_sets M" by (auto simp: greaterThan_0) moreover have "{x \ space M. \(infinite {n. birkhoff_sum f n x \ {0..e}})} \ (\N\{0<..}. {x \ space M. \n\{N..}. birkhoff_sum f n x \ {0..e}})" proof fix x assume H: "x \ {x \ space M. \(infinite {n. birkhoff_sum f n x \ {0..e}})}" then have "x \ space M" by auto have *: "finite {n. birkhoff_sum f n x \ {0..e}}" using H by auto then obtain N where "\n. n \ N \ n \ {n. birkhoff_sum f n x \ {0..e}}" by (metis finite_nat_set_iff_bounded not_less) then have "x \ {x \ space M. \n\{N+1..}. birkhoff_sum f n x \ {0..e}}" using \x \ space M\ by auto moreover have "N+1>0" by auto ultimately show "x \ (\N\{0<..}. {x \ space M. \n\{N..}. birkhoff_sum f n x \ {0..e}})" by auto qed ultimately show ?thesis unfolding eventually_ae_filter by auto qed theorem birkhoff_sum_small_asymp_pos_nonergodic: assumes [measurable]: "integrable M f" and "e > (0::real)" shows "AE x in M. infinite {n. birkhoff_sum f n x \ {n * real_cond_exp M Invariants f x .. n * real_cond_exp M Invariants f x + e}}" proof - define g where "g = (\x. f x - real_cond_exp M Invariants f x)" have g_meas [measurable]: "integrable M g" unfolding g_def using real_cond_exp_int(1)[OF assms(1)] assms(1) by auto have "AE x in M. real_cond_exp M Invariants (real_cond_exp M Invariants f) x = real_cond_exp M Invariants f x" by (rule real_cond_exp_F_meas, auto simp add: real_cond_exp_int(1)[OF assms(1)]) then have *: "AE x in M. real_cond_exp M Invariants g x = 0" unfolding g_def using real_cond_exp_diff[OF assms(1) real_cond_exp_int(1)[OF assms(1)]] by auto have "AE x in M. infinite {n. birkhoff_sum g n x \ {0..e}}" by (rule birkhoff_sum_small_asymp_lemma, auto simp add: \e>0\ * g_meas) moreover { fix x assume "x \ space M" "infinite {n. birkhoff_sum g n x \ {0..e}}" { fix n assume H: "birkhoff_sum g n x \ {0..e}" have "birkhoff_sum g n x = birkhoff_sum f n x - birkhoff_sum (real_cond_exp M Invariants f) n x" unfolding g_def using birkhoff_sum_diff by auto also have "... = birkhoff_sum f n x - n * real_cond_exp M Invariants f x" using birkhoff_sum_of_invariants \x \ space M\ by auto finally have "birkhoff_sum f n x \ {n * real_cond_exp M Invariants f x .. n * real_cond_exp M Invariants f x + e}" using H by simp } then have "{n. birkhoff_sum g n x \ {0..e}} \ {n. birkhoff_sum f n x \ {n * real_cond_exp M Invariants f x .. n * real_cond_exp M Invariants f x + e}}" by auto then have "infinite {n. birkhoff_sum f n x \ {n * real_cond_exp M Invariants f x .. n * real_cond_exp M Invariants f x + e}}" using \infinite {n. birkhoff_sum g n x \ {0..e}}\ finite_subset by blast } ultimately show ?thesis by auto qed theorem birkhoff_sum_small_asymp_neg_nonergodic: assumes [measurable]: "integrable M f" and "e > (0::real)" shows "AE x in M. infinite {n. birkhoff_sum f n x \ {n * real_cond_exp M Invariants f x - e .. n * real_cond_exp M Invariants f x}}" proof - define g where "g = (\x. real_cond_exp M Invariants f x - f x)" have g_meas [measurable]: "integrable M g" unfolding g_def using real_cond_exp_int(1)[OF assms(1)] assms(1) by auto have "AE x in M. real_cond_exp M Invariants (real_cond_exp M Invariants f) x = real_cond_exp M Invariants f x" by (rule real_cond_exp_F_meas, auto simp add: real_cond_exp_int(1)[OF assms(1)]) then have *: "AE x in M. real_cond_exp M Invariants g x = 0" unfolding g_def using real_cond_exp_diff[OF real_cond_exp_int(1)[OF assms(1)] assms(1)] by auto have "AE x in M. infinite {n. birkhoff_sum g n x \ {0..e}}" by (rule birkhoff_sum_small_asymp_lemma, auto simp add: \e>0\ * g_meas) moreover { fix x assume "x \ space M" "infinite {n. birkhoff_sum g n x \ {0..e}}" { fix n assume H: "birkhoff_sum g n x \ {0..e}" have "birkhoff_sum g n x = birkhoff_sum (real_cond_exp M Invariants f) n x - birkhoff_sum f n x" unfolding g_def using birkhoff_sum_diff by auto also have "... = n * real_cond_exp M Invariants f x - birkhoff_sum f n x" using birkhoff_sum_of_invariants \x \ space M\ by auto finally have "birkhoff_sum f n x \ {n * real_cond_exp M Invariants f x - e .. n * real_cond_exp M Invariants f x}" using H by simp } then have "{n. birkhoff_sum g n x \ {0..e}} \ {n. birkhoff_sum f n x \ {n * real_cond_exp M Invariants f x - e .. n * real_cond_exp M Invariants f x}}" by auto then have "infinite {n. birkhoff_sum f n x \ {n * real_cond_exp M Invariants f x - e .. n * real_cond_exp M Invariants f x}}" using \infinite {n. birkhoff_sum g n x \ {0..e}}\ finite_subset by blast } ultimately show ?thesis by auto qed subsubsection \Conditional expectation for the induced map\ text \Thanks to Birkhoff theorem, one can relate conditional expectations with respect to the invariant sigma algebra, for a map and for a corresponding induced map, as follows.\ proposition Invariants_cond_exp_induced_map: fixes f::"'a \ real" assumes [measurable]: "A \ sets M" "integrable M f" defines "MA \ restrict_space M A" and "TA \ induced_map A" and "fA \ induced_function A f" shows "AE x in MA. real_cond_exp MA (qmpt.Invariants MA TA) fA x = real_cond_exp M Invariants f x * real_cond_exp MA (qmpt.Invariants MA TA) (return_time_function A) x" proof - interpret A: fmpt MA TA unfolding MA_def TA_def by (rule induced_map_fmpt[OF assms(1)]) have "integrable M fA" unfolding fA_def using induced_function_integral_nonergodic(1) assms by auto then have "integrable MA fA" unfolding MA_def by (metis assms(1) integrable_mult_indicator integrable_restrict_space sets.Int_space_eq2) then have a: "AE x in MA. (\n. A.birkhoff_sum fA n x / n) \ real_cond_exp MA A.Invariants fA x" using A.birkhoff_theorem_AE_nonergodic by auto have "AE x in M. (\n. birkhoff_sum f n x / n) \ real_cond_exp M Invariants f x" using birkhoff_theorem_AE_nonergodic assms(2) by auto then have b: "AE x in MA. (\n. birkhoff_sum f n x / n) \ real_cond_exp M Invariants f x" unfolding MA_def by (metis (mono_tags, lifting) AE_restrict_space_iff assms(1) eventually_mono sets.Int_space_eq2) define phiA where "phiA = (\x. return_time_function A x)" have "integrable M phiA" unfolding phiA_def using return_time_integrable by auto then have "integrable MA phiA" unfolding MA_def by (metis assms(1) integrable_mult_indicator integrable_restrict_space sets.Int_space_eq2) then have c: "AE x in MA. (\n. A.birkhoff_sum (\x. real(phiA x)) n x / n) \ real_cond_exp MA A.Invariants phiA x" using A.birkhoff_theorem_AE_nonergodic by auto have "A-recurrent_subset A \ null_sets M" using Poincare_recurrence_thm(1)[OF assms(1)] by auto then have "A - recurrent_subset A \ null_sets MA" unfolding MA_def by (metis Diff_subset assms(1) emeasure_restrict_space null_setsD1 null_setsD2 null_setsI sets.Int_space_eq2 sets_restrict_space_iff) then have "AE x in MA. x \ recurrent_subset A" by (simp add: AE_iff_null MA_def null_setsD2 set_diff_eq space_restrict_space2) moreover have "\x. x \ recurrent_subset A \ phiA x > 0" unfolding phiA_def using return_time0 by fastforce ultimately have *: "AE x in MA. phiA x > 0" by auto have d: "AE x in MA. real_cond_exp MA A.Invariants phiA x > 0" by (rule A.real_cond_exp_gr_c, auto simp add: * \integrable MA phiA\) { fix x assume A: "(\n. A.birkhoff_sum fA n x / n) \ real_cond_exp MA A.Invariants fA x" and B: "(\n. birkhoff_sum f n x / n) \ real_cond_exp M Invariants f x" and C: "(\n. A.birkhoff_sum (\x. real(phiA x)) n x / n) \ real_cond_exp MA A.Invariants phiA x" and D: "real_cond_exp MA A.Invariants phiA x > 0" define R where "R = (\n. A.birkhoff_sum phiA n x)" have D2: "ereal(real_cond_exp MA A.Invariants phiA x) > 0" using D by simp have "\n. real(R n)/ n = A.birkhoff_sum (\x. real(phiA x)) n x / n" unfolding R_def A.birkhoff_sum_def by auto moreover have "(\n. A.birkhoff_sum (\x. real(phiA x)) n x / n) \ real_cond_exp MA A.Invariants phiA x" using C by auto ultimately have Rnn: "(\n. real(R n)/n) \ real_cond_exp MA A.Invariants phiA x" by presburger have "\n. ereal(real(R n))/ n = ereal(A.birkhoff_sum (\x. real(phiA x)) n x / n)" unfolding R_def A.birkhoff_sum_def by auto moreover have "(\n. ereal(A.birkhoff_sum (\x. real(phiA x)) n x / n)) \ real_cond_exp MA A.Invariants phiA x" using C by auto ultimately have i: "(\n. ereal(real(R n))/n) \ real_cond_exp MA A.Invariants phiA x" by auto have ii: "(\n. real n) \ \" by (rule id_nat_ereal_tendsto_PInf) have iii: "(\n. ereal(real(R n))/n * real n) \ \" using tendsto_mult_ereal_PInf[OF i D2 ii] by simp have "\n. n > 0 \ ereal(real(R n))/n * real n = R n" by auto then have "eventually (\n. ereal(real(R n))/n * real n = R n) sequentially" using eventually_at_top_dense by auto then have "(\n. real(R n)) \ \" using iii by (simp add: filterlim_cong) then have "(\n. birkhoff_sum f (R n) x / (R n)) \ real_cond_exp M Invariants f x" using limit_along_weak_subseq B by auto then have l: "(\n. (birkhoff_sum f (R n) x / (R n)) * ((R n)/n)) \ real_cond_exp M Invariants f x * real_cond_exp MA A.Invariants phiA x" by (rule tendsto_mult, simp add: Rnn) obtain N where N: "\n. n > N \ R n > 0" using \(\n. real(R n)) \ \\ by (metis (full_types) eventually_at_top_dense filterlim_iff filterlim_weak_subseq) then have "\n. n > N \ (birkhoff_sum f (R n) x / (R n)) * ((R n)/n) = birkhoff_sum f (R n) x / n" by auto then have "eventually (\n. (birkhoff_sum f (R n) x / (R n)) * ((R n)/n) = birkhoff_sum f (R n) x / n) sequentially" by simp with tendsto_cong[OF this] have main_limit: "(\n. birkhoff_sum f (R n) x / n) \ real_cond_exp M Invariants f x * real_cond_exp MA A.Invariants phiA x" using l by auto have "\n. birkhoff_sum f (R n) x = A.birkhoff_sum fA n x" unfolding R_def fA_def phiA_def TA_def using induced_function_birkhoff_sum[OF assms(1)] by simp then have "\n. birkhoff_sum f (R n) x /n = A.birkhoff_sum fA n x / n" by simp then have "(\n. A.birkhoff_sum fA n x / n) \ real_cond_exp M Invariants f x * real_cond_exp MA A.Invariants phiA x" using main_limit by presburger then have "real_cond_exp MA A.Invariants fA x = real_cond_exp M Invariants f x * real_cond_exp MA A.Invariants phiA x" using A LIMSEQ_unique by auto } then show ?thesis using a b c d unfolding phiA_def by auto qed corollary Invariants_cond_exp_induced_map_0: fixes f::"'a \ real" assumes [measurable]: "A \ sets M" "integrable M f" and "AE x in M. real_cond_exp M Invariants f x = 0" defines "MA \ restrict_space M A" and "TA \ induced_map A" and "fA \ induced_function A f" shows "AE x in MA. real_cond_exp MA (qmpt.Invariants MA TA) fA x = 0" proof - have "AE x in MA. real_cond_exp M Invariants f x = 0" unfolding MA_def apply (subst AE_restrict_space_iff) using assms(3) by auto then show ?thesis unfolding MA_def TA_def fA_def using Invariants_cond_exp_induced_map[OF assms(1) assms(2)] by auto qed end (*of locale fmpt*) end (*of Invariants.thy*) diff --git a/thys/Ergodic_Theory/Normalizing_Sequences.thy b/thys/Ergodic_Theory/Normalizing_Sequences.thy --- a/thys/Ergodic_Theory/Normalizing_Sequences.thy +++ b/thys/Ergodic_Theory/Normalizing_Sequences.thy @@ -1,1448 +1,1448 @@ (* Author: Sébastien Gouëzel sebastien.gouezel@univ-rennes1.fr License: BSD *) section \Normalizing sequences\ theory Normalizing_Sequences imports Transfer_Operator Asymptotic_Density begin text \In this file, we prove the main result in~\cite{gouezel_normalizing_sequences}: in a conservative system, if a renormalized sequence $S_n f/B_n$ converges in distribution towards a limit which is not a Dirac mass at $0$, then $B_n$ can not grow exponentially fast. We also prove the easier result that, in a probability preserving system, normalizing sequences grow at most polynomially.\ subsection \Measure of the preimages of disjoint sets.\ text \We start with a general result about conservative maps: If $A_n$ are disjoint sets, and $P$ is a finite mass measure which is absolutely continuous with respect to $M$, then $T^{-n}A_n$ is most often small: $P(T^{-n} A_n)$ tends to $0$ in Cesaro average. The proof is written in terms of densities and positive transfer operators, so we first write it in ennreal.\ theorem (in conservative) disjoint_sets_emeasure_Cesaro_tendsto_zero: fixes P::"'a measure" and A::"nat \ 'a set" assumes [measurable]: "\n. A n \ sets M" and "disjoint_family A" "absolutely_continuous M P" "sets P = sets M" "emeasure P (space M) \ \" shows "(\n. (\i (T^^i)-`(A i)))/n) \ 0" proof (rule order_tendstoI) fix delta::ennreal assume "delta > 0" have "\epsilon. epsilon \ 0 \ epsilon \ \ \ 4 * epsilon < delta" apply (cases delta) apply (rule exI[of _ "delta/5"]) using \delta>0\ apply (auto simp add: ennreal_divide_eq_top_iff ennreal_divide_numeral numeral_mult_ennreal intro!: ennreal_lessI) apply (rule exI[of _ 1]) by auto then obtain epsilon where "epsilon \ 0" "epsilon \ \" "4 * epsilon < delta" by auto then have "epsilon > 0" using not_gr_zero by blast define L::ennreal where "L = (1/epsilon) * (1+ emeasure P (space M))" have "L \ \" unfolding L_def using assms(5) divide_ennreal_def ennreal_mult_eq_top_iff \epsilon \ 0\ by auto have "L \ 0" unfolding L_def using \epsilon \ \\ by (simp add: ennreal_divide_eq_top_iff) have "emeasure P (space M) \ epsilon * L" unfolding L_def using \epsilon \ 0\ \epsilon \ \\ \emeasure P (space M) \ \\ apply (cases epsilon) apply (metis (no_types, lifting) add.commute add.right_neutral add_left_mono ennreal_divide_times infinity_ennreal_def mult.left_neutral mult_divide_eq_ennreal zero_le_one) by simp then have "emeasure P (space M) / L \ epsilon" using \L \ 0\ \L \ \\ by (metis divide_le_posI_ennreal mult.commute not_gr_zero) then have "c * (emeasure P (space M)/L) \ c * epsilon" for c by (rule mult_left_mono, simp) text \We introduce the density of $P$.\ define f where "f = RN_deriv M P" have [measurable]: "f \ borel_measurable M" unfolding f_def by auto have [simp]: "P = density M f" unfolding f_def apply (rule density_RN_deriv[symmetric]) using assms by auto have "space P = space M" by auto interpret Pc: finite_measure P apply standard unfolding \space P = space M\ using assms(5) by auto have *: "AE x in P. eventually (\n. (\i L * f x) sequentially" proof - have "AE x in M. f x \ \" unfolding f_def apply (intro RN_deriv_finite Pc.sigma_finite_measure) unfolding \space P = space M\ using assms by auto moreover have "AE x in M. f x > 0 \ (\n. (nn_transfer_operator^^n) f x) = \" using recurrence_series_infinite_transfer_operator by auto ultimately have "AE x in M. f x > 0 \ ((\n. (nn_transfer_operator^^n) f x) = \ \ f x \ \)" by auto then have AEP: "AE x in P. (\n. (nn_transfer_operator^^n) f x) = \ \ f x \ \" unfolding \P = density M f\ using AE_density[of f M] by auto moreover have "eventually (\n. (\i L * f x) sequentially" if "(\n. (nn_transfer_operator^^n) f x) = \ \ f x \ \" for x proof - have "(\n. (\i (\i. (nn_transfer_operator^^i) f x)" by (simp add: summable_LIMSEQ) moreover have "(\i. (nn_transfer_operator^^i) f x) > L * f x" using that \L \ \\ by (auto simp add: ennreal_mult_less_top top.not_eq_extremum) ultimately show ?thesis by (rule order_tendstoD(1)) qed ultimately show ?thesis by auto qed have "\U N. U \ sets P \ (\n \ N. \x \ U. (\i L * f x) \ emeasure P (space P - U) < epsilon" apply (rule Pc.Egorov_lemma[OF _ *]) using \epsilon\0\ by (auto simp add: zero_less_iff_neq_zero) then obtain U N1 where [measurable]: "U \ sets M" and U: "emeasure P (space M - U) < epsilon" "\n x. n \ N1 \ x \ U \ L * f x < (\isets P = sets M\ \space P = space M\ by blast have "U \ space M" by (rule sets.sets_into_space, simp) define K where "K = N1 + 1" have "K \ N1" "K \ 1" unfolding K_def by auto have *: "K * emeasure P (space M) / epsilon \ \" using \emeasure P (space M) \ \\ \epsilon \ 0\ ennreal_divide_eq_top_iff ennreal_mult_eq_top_iff by auto obtain N2::nat where N2: "N2 \ K * emeasure P (space M) / epsilon" using ennreal_archimedean[OF *] by auto define N where "N = 2 * K + N2" have "(\k\{.. (T^^k)-`(A k))) / n < delta" if "n \ N" for n proof - have "n \ 2 * K" "of_nat n \ ((of_nat N2)::ennreal)" using that unfolding N_def by auto then have "n \ K * emeasure P (space M) / epsilon" using N2 order_trans by blast then have "K * emeasure P (space M) \ n * epsilon" using \epsilon > 0\ \epsilon \ \\ by (smt divide_ennreal_def divide_right_mono_ennreal ennreal_mult_divide_eq ennreal_mult_eq_top_iff infinity_ennreal_def mult.commute not_le order_le_less) have "n \ 1" using \n \ 2 * K\ \K \ 1\ by auto have *: "((\k\{K.. (\i\{K..k \ {K.. ((\k\{K-j..j < K\ by auto also have "... = (\i\{K..x. x+j"]) using \j < K\ by auto finally show ?thesis by simp qed have "L * (\ k \ {K.. (T^^k)-`(A k))) = L * (\ k \ {K..\<^sup>+x. indicator (U \ (T^^k)-`(A k)) x \P))" by auto also have "... = (\ k \ {K..\<^sup>+x. L * indicator (U \ (T^^k)-`(A k)) x \P))" unfolding sum_distrib_left by (intro sum.cong nn_integral_cmult[symmetric], auto) also have "... = (\ k \ {K..\<^sup>+x. f x * (L * indicator (U \ (T^^k)-`(A k)) x) \M))" unfolding \P = density M f\ by (intro sum.cong nn_integral_density, auto) also have "... = (\ k \ {K..\<^sup>+x. f x * L * indicator U x * indicator (A k) ((T^^k) x) \M))" by (intro sum.cong nn_integral_cong, auto simp add: algebra_simps indicator_def) also have "... \ (\ k \ {K..\<^sup>+x. (\j \ {..M))" apply (intro sum_mono nn_integral_mono) using U(2)[OF \K \ N1\] unfolding indicator_def using less_imp_le by (auto simp add: algebra_simps) also have "... = (\\<^sup>+x. (\k\{K..j \ {..M)" apply (subst nn_integral_sum, simp) unfolding sum_distrib_right by auto also have "... = (\\<^sup>+x. (\j \ {..k\{K..M)" by (rule nn_integral_cong, rule sum.swap) also have "... = (\j \ {..\<^sup>+x. (nn_transfer_operator^^j) f x * (\k\{K..M))" apply (subst nn_integral_sum, simp) unfolding sum_distrib_left by auto also have "... \ (\j \ {..\<^sup>+x. (nn_transfer_operator^^j) f x * (\i\{K..M))" apply (rule sum_mono, rule nn_integral_mono) using * by (auto simp add: mult_left_mono) also have "... = (\i\{K..j \ {..\<^sup>+x. (nn_transfer_operator^^j) f x * indicator (A (i-j)) ((T^^(i-j)) x) \M)))" unfolding sum_distrib_left using sum.swap by (subst nn_integral_sum, auto) also have "... = (\i\{K..j \ {..\<^sup>+x. f x * indicator (A (i-j)) ((T^^(i-j)) ((T^^j) x)) \M)))" by (subst nn_transfer_operator_intg_Tn, auto) also have "... = (\i\{K..\<^sup>+x. f x * (\j \ {..M))" unfolding sum_distrib_left by (subst nn_integral_sum, auto) also have "... = (\i\{K..\<^sup>+x. (\j \ {..P))" unfolding \P = density M f\ funpow_add comp_def apply (rule sum.cong, simp) by (rule nn_integral_density[symmetric], auto) also have "... = (\i\{K..\<^sup>+x. (\j \ {..P))" by auto also have "... \ (\i\{K..\<^sup>+x. (1::ennreal) \P))" apply (rule sum_mono) apply (rule nn_integral_mono) apply (rule disjoint_family_indicator_le_1) using assms(2) apply (auto simp add: disjoint_family_on_def) by (metis Int_iff diff_diff_cancel equals0D le_less le_trans) also have "... \ n * emeasure P (space M)" using assms(4) by (auto intro!: mult_right_mono) finally have *: "L * (\ k \ {K.. (T^^k)-`(A k))) \ n * emeasure P (space M)" by simp have Ineq: "(\ k \ {K.. (T^^k)-`(A k))) \ n * emeasure P (space M) / L" using divide_right_mono_ennreal[OF *, of L] \L \ 0\ by (metis (no_types, lifting) \L \ \\ ennreal_mult_divide_eq infinity_ennreal_def mult.commute) have I: "{.. {K.. {n-K..n \ 2 * K\ by auto have "(\k\{.. (T^^k)-`(A k))) \ (\k\{.. (T^^k)-`(A k)) + epsilon)" proof (rule sum_mono) fix k have "emeasure P (space M \ (T^^k)-`(A k)) \ emeasure P ((U \ (T^^k)-`(A k)) \ (space M - U))" by (rule emeasure_mono, auto) also have "... \ emeasure P (U \ (T^^k)-`(A k)) + emeasure P (space M - U)" by (rule emeasure_subadditive, auto) also have "... \ emeasure P (U \ (T^^k)-`(A k)) + epsilon" using U(1) by auto finally show "emeasure P (space M \ (T ^^ k) -` A k) \ emeasure P (U \ (T ^^ k) -` A k) + epsilon" by simp qed also have "... = (\k\{.. {K.. {n-K.. (T^^k)-`(A k))) + (\k\{..k\{.. (T^^k)-`(A k))) + (\k\{K.. (T^^k)-`(A k))) + (\k\{n-K.. (T^^k)-`(A k))) + n * epsilon" apply (subst sum.union_disjoint) apply simp apply simp using \n \ 2 * K\ apply (simp add: ivl_disj_int_one(2) ivl_disj_un_one(2)) by (subst sum.union_disjoint, auto) also have "... \ (\k\{..k\{n-K..U \ space M\ by auto also have "... = K * emeasure P (space M) + n * emeasure P (space M)/L + K * emeasure P (space M) + n * epsilon" using \n \ 2 * K\ by auto also have "... \ n * epsilon + n * epsilon + n * epsilon + n * epsilon" apply (intro add_mono) using \K * emeasure P (space M) \ n * epsilon\ \of_nat n * (emeasure P (space M)/L) \ of_nat n * epsilon\ ennreal_times_divide by auto also have "... = n * (4 * epsilon)" by (metis (no_types, lifting) add.assoc distrib_right mult.left_commute mult_2 numeral_Bit0) also have "... < n * delta" using \4 * epsilon < delta\ \n \ 1\ by (simp add: ennreal_mult_strict_left_mono ennreal_of_nat_eq_real_of_nat) finally show ?thesis apply (subst divide_less_ennreal) using \n \ 1\ of_nat_less_top by (auto simp add: mult.commute) qed then show "eventually (\n. (\k\{.. (T^^k)-`(A k))) / n < delta) sequentially" unfolding eventually_sequentially by auto qed (simp) text \We state the previous theorem using measures instead of emeasures. This is clearly equivalent, but one has to play with ennreal carefully to prove it.\ theorem (in conservative) disjoint_sets_measure_Cesaro_tendsto_zero: fixes P::"'a measure" and A::"nat \ 'a set" assumes [measurable]: "\n. A n \ sets M" and "disjoint_family A" "absolutely_continuous M P" "sets P = sets M" "emeasure P (space M) \ \" shows "(\n. (\i (T^^i)-`(A i)))/n) \ 0" proof - have "space P = space M" using assms(4) sets_eq_imp_space_eq by blast moreover have "emeasure P Q \ emeasure P (space P)" for Q by (simp add: emeasure_space) ultimately have [simp]: "emeasure P Q \ \" for Q using \emeasure P (space M) \ \\ neq_top_trans by auto have *: "ennreal ((\i (T^^i)-`(A i)))/n) = (\i (T^^i)-`(A i)))/n" if "n > 0" for n apply (subst divide_ennreal[symmetric]) apply (auto intro!: sum_nonneg that simp add: ennreal_of_nat_eq_real_of_nat[symmetric]) apply(subst sum_ennreal[symmetric], simp) apply (subst emeasure_eq_ennreal_measure) by auto have "eventually (\n. ennreal ((\i (T^^i)-`(A i)))/n) = (\i (T^^i)-`(A i)))/n) sequentially" unfolding eventually_sequentially apply (rule exI[of _ 1]) using * by auto then have *: "(\n. ennreal ((\i (T^^i)-`(A i)))/n)) \ ennreal 0" using disjoint_sets_emeasure_Cesaro_tendsto_zero[OF assms] tendsto_cong by force show ?thesis apply (subst tendsto_ennreal_iff[symmetric]) using * apply auto unfolding eventually_sequentially apply (rule exI[of _ 1]) by (auto simp add: divide_simps intro!: sum_nonneg) qed text \As convergence to $0$ in Cesaro mean is equivalent to convergence to $0$ along a density one sequence, we obtain the equivalent formulation of the previous theorem.\ theorem (in conservative) disjoint_sets_measure_density_one_tendsto_zero: fixes P::"'a measure" and A::"nat \ 'a set" assumes [measurable]: "\n. A n \ sets M" and "disjoint_family A" "absolutely_continuous M P" "sets P = sets M" "emeasure P (space M) \ \" shows "\B. lower_asymptotic_density B = 1 \ (\n. measure P (space M \ (T^^n)-`(A n)) * indicator B n) \ 0" by (rule cesaro_imp_density_one[OF _ disjoint_sets_measure_Cesaro_tendsto_zero[OF assms]], simp) subsection \Normalizing sequences do not grow exponentially in conservative systems\ text \We prove the main result in~\cite{gouezel_normalizing_sequences}: in a conservative system, if a renormalized sequence $S_n f/B_n$ converges in distribution towards a limit which is not a Dirac mass at $0$, then $B_n$ can not grow exponentially fast. The proof is expressed in the following locale. The main theorem is Theorem~\verb+subexponential_growth+ below. To prove it, we need several preliminary estimates.\ text \We will use the fact that a real random variables which is not the Dirac mass at $0$ gives positive mass to a set separated away from $0$.\ lemma (in real_distribution) not_Dirac_0_imp_positive_mass_away_0: assumes "prob {0} < 1" shows "\a. a > 0 \ prob {x. abs(x) > a} > 0" proof - have "1 = prob UNIV" using prob_space by auto also have "... = prob {0} + prob (UNIV -{0})" by (subst finite_measure_Union[symmetric], auto) finally have "0 < prob (UNIV -{0})" using assms by auto also have "... \ prob (\n::nat. {x. abs(x)>(1/2)^n})" apply (rule finite_measure_mono) by (auto, meson one_less_numeral_iff reals_power_lt_ex semiring_norm(76) zero_less_abs_iff) finally have "prob (\n::nat. {x. abs(x)>(1/2)^n}) \ 0" by simp then have "\n. prob {x. abs(x)>(1/2)^n} \ 0" using measure_countably_zero[of "\n. {x. abs(x)>(1/2)^n}"] by force then obtain N where N: "prob {x. abs(x) > (1/2)^N} \ 0" by blast show ?thesis apply (rule exI[of _ "(1/2)^N"]) using N by (auto simp add: zero_less_measure_iff) qed locale conservative_limit = conservative M + PS: prob_space P + PZ: real_distribution Z for M::"'a measure" and P::"'a measure" and Z::"real measure" + fixes f g::"'a \ real" and B::"nat \ real" assumes PabsM: "absolutely_continuous M P" and Bpos: "\n. B n > 0" and M [measurable]: "f \ borel_measurable M" "g \ borel_measurable M" "sets P = sets M" and non_trivial: "PZ.prob {0} < 1" and conv: "weak_conv_m (\n. distr P borel (\x. (g x + birkhoff_sum f n x) / B n)) Z" begin text \For measurability statements, we want every question about $Z$ or $P$ to reduce to a question about Borel sets of $M$. We add in the next lemma all the statements that are needed so that this happens automatically.\ lemma PSZ [simp, measurable_cong]: "space P = space M" "h \ borel_measurable P \ h \ borel_measurable M" "A \ sets P \ A \ sets M" using M sets_eq_imp_space_eq real_distribution_def by auto text \The first nontrivial upper bound is the following lemma, asserting that $B_{n+1}$ can not be much larger than $\max B_i$ for $i \leq n$. This is proved by saying that $S_{n+1} f = f + (S_n f) \circ T$, and we know that $S_n f$ is not too large on a set of very large measure, so the same goes for $(S_n f) \circ T$ by a non-singularity argument. Excepted that the measure $P$ does not have to be nonsingular for the map $T$, so one has to tweak a little bit this idea, using transfer operators and conservativity. This is easier to do when the density of $P$ is bounded by $1$, so we first give the proof under this assumption, and then we reduce to this case by replacing $M$ with $M+P$ in the second lemma below.\ text \First, let us prove the lemma assuming that the density $h$ of $P$ is bounded by $1$.\ lemma upper_bound_C_aux: assumes "P = density M h" "\x. h x \ 1" and [measurable]: "h \ borel_measurable M" shows "\C\1. \n. B (Suc n) \ C * Max {B i|i. i \ n}" proof - obtain a0 where a0: "a0 > 0" "PZ.prob {x. abs(x) > a0} > 0" using PZ.not_Dirac_0_imp_positive_mass_away_0[OF non_trivial] by blast define a where "a = a0/2" have "a > 0" using \a0 > 0\ unfolding a_def by auto define alpha where "alpha = PZ.prob {x. abs (x) > a0}/4" have "alpha > 0" unfolding alpha_def using a0 by auto have "PZ.prob {x. abs (x) > 2 * a} > 3 * alpha" using a0 unfolding a_def alpha_def by auto text \First step: choose $K$ such that, with probability $1-\alpha$, one has $\sum_{1 \leq k < K} h(T^k x) \geq 1$. This follows directly from conservativity.\ have "\K. K \ 1 \ PS.prob {x \ space M. (\i\{1.. 1} \ 1 - alpha" proof - have *: "AE x in P. eventually (\n. (\i 2) sequentially" proof - have "AE x in M. h x > 0 \ (\i. h ((T^^i) x)) = \" using recurrence_series_infinite by auto then have AEP: "AE x in P. (\i. h ((T^^i) x)) = \" unfolding \P = density M h\ using AE_density[of h M] by auto moreover have "eventually (\n. (\i 2) sequentially" if "(\i. h ((T^^i) x)) = \" for x proof - have "(\n. (\i (\i. h ((T^^i) x))" by (simp add: summable_LIMSEQ) moreover have "(\i. h ((T^^i) x)) > 2" using that by auto ultimately show ?thesis by (rule order_tendstoD(1)) qed ultimately show ?thesis by auto qed have "\U N. U \ sets P \ (\n \ N. \x \ U. (\i 2) \ emeasure P (space P - U) < alpha" apply (rule PS.Egorov_lemma) apply measurable using M(3) measurable_ident_sets apply blast using * \alpha > 0\ by auto then obtain U N1 where [measurable]: "U \ sets M" and U: "emeasure P (space M - U) < alpha" "\n x. n \ N1 \ x \ U \ 2 < (\i space M" by (rule sets.sets_into_space, simp) define K where "K = N1+1" then have "K \ 1" by auto have Ux: "(\i\{1.. 1" if "x \ U" for x proof - have *: "1 < t" if "2 < 1 + t" for t::ennreal apply (cases t) using that apply auto by (metis ennreal_add_left_cancel_less ennreal_less_iff ennreal_numeral le_numeral_extra(1) numeral_One one_add_one) have "2 < (\i \ {..i \ {0}. h ((T^^i) x)) + (\i \ {1..K \ 1\ by auto also have "... = h x + (\i \ {1.. 1 + (\i \ {1.. space M. (\i\{1.. 1} \ 1 - alpha" proof - have "PS.prob (space P - U) < alpha" using U(1) by (simp add: PS.emeasure_eq_measure ennreal_less_iff) then have "1 - alpha < PS.prob U" using PS.prob_compl by auto also have "... \ PS.prob {x \ space M. (\i\{1.. 1}" apply (rule PS.finite_measure_mono) using Ux sets.sets_into_space[OF \U \ sets M\] by auto finally show ?thesis by simp qed then show ?thesis using \K \ 1\ by auto qed then obtain K where K: "K \ 1" "PS.prob {x \ space M. (\i\{1.. 1} \ 1 - alpha" by blast text \Second step: obtain $D$ which controls the tails of the $K$ first Birkhoff sums of $f$.\ have "\D. PS.prob {x \ space M. \k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D} \ 1 - alpha" proof - have D: "\D. PS.prob {x \ space P. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D} < alpha/K \ D \ 1" for k apply (rule PS.random_variable_small_tails) using \K \ 1\ \alpha > 0\ by auto have "\D. \k. PS.prob {x \ space P. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D k} < alpha/K \ D k \ 1" apply (rule choice) using D by auto then obtain D where D: "\k. PS.prob {x \ space P. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D k} < alpha/K" by blast define D0 where "D0 = Max (D`{..K})" have "PS.prob {x \ space M. \k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D0} \ 1 - alpha" proof - have D1: "PS.prob {x \ space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D0} < alpha/K" if "k \ K" for k proof - have "D k \ D0" unfolding D0_def apply (rule Max_ge) using that by auto have "PS.prob {x \ space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D0} \ PS.prob {x \ space P. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D k}" apply (rule PS.finite_measure_mono) using \D k \ D0\ by auto then show ?thesis using D[of k] by auto qed have "PS.prob (\k\ {.. space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D0}) \ (\k \ {.. space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D0})" by (rule PS.finite_measure_subadditive_finite, auto) also have "... \ (\k \ {..K \ 1\ by auto finally have "PS.prob (\k\ {.. space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D0}) \ alpha" by simp then have "1 - alpha \ 1 - PS.prob (\k\ {.. space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D0})" by simp also have "... = PS.prob (space P - (\k\ {.. space M. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D0}))" by (rule PS.prob_compl[symmetric], auto) also have "... \ PS.prob {x \ space M. \k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D0}" by (rule PS.finite_measure_mono, auto) finally show ?thesis by simp qed then show ?thesis by blast qed then obtain D where D: "PS.prob {x \ space M. \k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D} \ 1 - alpha" by blast text \Third step: obtain $\epsilon$ small enough so that, for any set $U$ with probability less than $\epsilon$ and for any $k\leq K$, one has $\int_U \hat T^k h < \delta$, where $\delta$ is very small.\ define delta where "delta = alpha/(2 * K)" then have "delta > 0" using \alpha > 0\ \K \ 1\ by auto have "\epsilon > (0::real). \U \ sets P. \k \ K. emeasure P U < epsilon \ (\\<^sup>+x\U. ((nn_transfer_operator^^k) h) x \P) \ delta" proof - have *: "\epsilon>(0::real). \U\sets P. emeasure P U < epsilon \ (\\<^sup>+x\U. ((nn_transfer_operator^^k) h) x \P) < delta" for k proof (rule small_nn_integral_on_small_sets[OF _ \0 < delta\]) have "(\\<^sup>+x. ((nn_transfer_operator^^k) h) x \P) = (\\<^sup>+x. h x * ((nn_transfer_operator^^k) h) x \M)" unfolding \P = density M h\ by (rule nn_integral_density, auto) also have "... \ (\\<^sup>+x. 1 * ((nn_transfer_operator^^k) h) x \M)" apply (intro nn_integral_mono mult_right_mono) using assms(2) by auto also have "... = (\\<^sup>+x. 1 * h x \M)" by (rule nn_transfer_operator_intTn_g, auto) also have "... = emeasure P (space M)" using PS.emeasure_space_1 by (simp add: emeasure_density \P = density M h\) also have "... < \" using PS.emeasure_space_1 by simp finally show "(\\<^sup>+x. ((nn_transfer_operator^^k) h) x \P) \ \" by auto qed (simp) have "\epsilon. \k. epsilon k > (0::real) \ (\U\sets P. emeasure P U < epsilon k \ (\\<^sup>+x\U. ((nn_transfer_operator^^k) h) x \P) < delta)" apply (rule choice) using * by blast then obtain epsilon::"nat \ real" where E: "\k. epsilon k > 0" "\k U. U \ sets P \ emeasure P U < epsilon k \ (\\<^sup>+x\U. ((nn_transfer_operator^^k) h) x \P) < delta" by blast define epsilon0 where "epsilon0 = Min (epsilon`{..K})" have "epsilon0 \ epsilon`{..K}" unfolding epsilon0_def by (rule Min_in, auto) then have "epsilon0 > 0" using E(1) by auto have small_setint: "(\\<^sup>+x\U. ((nn_transfer_operator^^k) h) x \P) \ delta" if "k \ K" "U \ sets P" "emeasure P U < epsilon0" for k U proof - have *: "epsilon0 \ epsilon k" unfolding epsilon0_def apply (rule Min_le) using \k \ K\ by auto show ?thesis apply (rule less_imp_le[OF E(2)[OF \U \ sets P\]]) using ennreal_leI[OF *] \emeasure P U < epsilon0\ by auto qed then show ?thesis using \epsilon0 > 0\ by auto qed then obtain epsilon::real where "epsilon > 0" and small_setint: "\k U. k \ K \ U \ sets P \ emeasure P U < epsilon \ (\\<^sup>+x\U. ((nn_transfer_operator^^k) h) x \P) \ delta" by blast text \Fourth step: obtain an index after which the convergence in distribution ensures that the probability to be larger than $2 a$ and to be very large is comparable for $(g+S_n f)/B_n$ and for $Z$.\ obtain C0 where "PZ.prob {x. abs(x) \ C0} < epsilon" "C0 \ 1" using PZ.random_variable_small_tails[OF \epsilon > 0\, of "\x. x"] by auto have A: "eventually (\n. measure (distr P borel (\x. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) > 2 * a} > 3 * alpha) sequentially" apply (rule open_set_weak_conv_lsc[of _ Z]) by (auto simp add: PZ.real_distribution_axioms conv \PZ.prob {x. abs (x) > 2 * a} > 3 * alpha\) have B: "eventually (\n. measure (distr P borel (\x. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) \ C0} < epsilon) sequentially" apply (rule closed_set_weak_conv_usc[of _ Z]) by (auto simp add: PZ.real_distribution_axioms conv \PZ.prob {x. abs(x) \ C0} < epsilon\) obtain N where N: "\n. n \ N \ measure (distr P borel (\x. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) > 2 * a} > 3 * alpha" "\n. n \ N \ measure (distr P borel (\x. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) \ C0} < epsilon" using eventually_conj[OF A B] unfolding eventually_sequentially by blast text \Fifth step: obtain a trivial control on $B_n$ for $n$ smaller than $N$.\ define C1 where "C1 = Max {B k/B 0 |k. k \ N+K+1}" define C where "C = max (max C0 C1) (max (D / (a * B 0)) (C0/a))" have "C \ 1" unfolding C_def using \C0 \ 1\ by auto text \Now, we can put everything together. If $n$ is large enough, we prove that $B_{n+1} \leq C \max_{i\leq n} B_i$, by contradiction.\ have geK: "B (Suc n) \ C * Max {B i |i. i \ n}" if "n > N + K" for n proof (rule ccontr) have "Suc n \ N" using that by auto let ?h = "(\x. (g x + birkhoff_sum f (Suc n) x) / B (Suc n))" have "measure (distr P borel ?h) {x. abs (x) > 2 * a} = measure P (?h-` {x. abs (x) > 2 * a} \ space P)" by (rule measure_distr, auto) also have "... = measure P {x \ space M. abs(?h x) > 2 * a}" by (rule HOL.cong[of "measure P"], auto) finally have A: "PS.prob {x \ space M. abs(?h x) > 2 * a} > 3 * alpha" using N(1)[OF \Suc n \ N\] by auto have *: "PS.prob {y \ space M. C0 \ \g y + birkhoff_sum f (Suc n - k) y\ / \B (Suc n - k)\} < epsilon" if "k \ {1.. N" using that \n > N + K\ by auto let ?h = "(\x. (g x + birkhoff_sum f (Suc n-k) x) / B (Suc n-k))" have "measure (distr P borel ?h) {x. abs (x) \ C0} = measure P (?h-` {x. abs (x) \ C0} \ space P)" by (rule measure_distr, auto) also have "... = measure P {x \ space M. abs(?h x) \ C0}" by (rule HOL.cong[of "measure P"], auto) finally show ?thesis using N(2)[OF \Suc n - k \ N\] by auto qed have P_le_epsilon: "emeasure P {y \ space M. C0 \ \g y + birkhoff_sum f (Suc n - k) y\ / \B (Suc n - k)\} < ennreal epsilon" if "k \ {1..epsilon > 0\ ennreal_lessI unfolding PS.emeasure_eq_measure by auto assume "\(B (Suc n) \ C * Max {B i |i. i \ n})" then have "C * Max {B i |i. i \ n} \ B (Suc n)" by simp moreover have "C * B 0 \ C * Max {B i |i. i \ n}" apply (rule mult_left_mono, rule Max_ge) using \C \ 1\ by auto ultimately have "C * B 0 \ B (Suc n)" by auto have "(D / (a * B 0)) * B 0 \ C * B 0" apply (rule mult_right_mono) unfolding C_def using Bpos[of 0] by auto then have "(D / (a * B 0)) * B 0 \ B (Suc n)" using \C * B 0 \ B (Suc n)\ by simp then have "D \ a * B (Suc n)" using Bpos[of 0] \a > 0\ by (auto simp add: divide_simps algebra_simps) define X where "X = {x \ space M. abs((g x + birkhoff_sum f (Suc n) x)/B(Suc n)) > 2 * a} \ {x \ space M. \k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D} \ {x \ space M. (\i\{1.. 1}" have [measurable]: "X \ sets M" unfolding X_def by auto have "3 * alpha + (1-alpha) + (1-alpha) \ PS.prob {x \ space M. abs((g x + birkhoff_sum f (Suc n) x)/B(Suc n)) > 2 * a} + PS.prob {x \ space M. \k < K. abs(g x + birkhoff_sum f k x - g((T^^k) x)) \ D} + PS.prob {x \ space M. (\i\{1.. 1}" using A D K(2) by auto also have "... \ 2 + PS.prob X" unfolding X_def by (rule PS.sum_measure_le_measure_inter3, auto) finally have "PS.prob X \ alpha" by auto have I: "(\y. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k))) ((T^^k) x) \ C0" if "x \ X" "k \ {1.. abs(g x + birkhoff_sum f (Suc n) x)" using \x \ X\ Bpos[of "Suc n"] unfolding X_def by (auto simp add: divide_simps) also have "... = abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x) + (g x + birkhoff_sum f k x - g((T^^k) x)))" using \n > N+K\ \k \ {1.. birkhoff_sum_cocycle[of f k "Suc n - k" x] by auto also have "... \ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x)) + abs(g x + birkhoff_sum f k x - g((T^^k) x))" by auto also have "... \ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x)) + D" using \x \ X\ \k \ {1.. unfolding X_def by auto also have "... \ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x)) + a * B (Suc n)" using \D \ a * B (Suc n)\ by simp finally have *: "a * B (Suc n) \ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x))" by simp have "(C0/a) * B (Suc n - k) \ C * B (Suc n - k)" apply (rule mult_right_mono) unfolding C_def using less_imp_le[OF Bpos] by auto also have "... \ C * Max {B i |i. i \ n}" apply (rule mult_left_mono, rule Max_ge) using \k \ {1.. \C \ 1\ by auto also have "... \ B (Suc n)" by fact finally have "C0 * B (Suc n - k) \ a * B (Suc n)" using \a>0\ by (simp add: divide_simps algebra_simps) then have "C0 * B (Suc n - k) \ abs(g((T^^k) x) + birkhoff_sum f (Suc n -k) ((T^^k) x))" using * by auto then show ?thesis using Bpos[of "Suc n - k"] by (simp add: divide_simps) qed have J: "1 \ (\k\{1..y. h y * indicator {y \ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) \ C0} y) ((T^^k) x))" if "x \ X" for x proof - have "x \ space M" using \x \ X\ unfolding X_def by auto have "1 \ (\k \ {1..x \ X\ unfolding X_def by auto also have "... = (\k \ {1.. space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) \ C0} ((T^^k) x))" apply (rule sum.cong) unfolding indicator_def using I[OF \x \ X\] T_spaceM_stable(2)[OF \x \ space M\] by auto finally show ?thesis by simp qed have "ennreal alpha \ emeasure P X" using \PS.prob X \ alpha\ by (simp add: PS.emeasure_eq_measure) also have "... = (\\<^sup>+x. indicator X x \P)" by auto also have "... \ (\\<^sup>+x. (\k\{1..y. h y * indicator {y \ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) \ C0} y) ((T^^k) x)) \P)" apply (rule nn_integral_mono) using J unfolding indicator_def by fastforce also have "... = (\k\{1..\<^sup>+x. (\y. h y * indicator {y \ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) \ C0} y) ((T^^k) x) \P))" by (rule nn_integral_sum, auto) also have "... = (\k\{1..\<^sup>+x. (\y. h y * indicator {y \ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) \ C0} y) ((T^^k) x) * h x \M))" unfolding \P = density M h\ by (auto intro!: sum.cong nn_integral_densityR[symmetric]) also have "... = (\k\{1..\<^sup>+x. h x * indicator {y \ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) \ C0} x * ((nn_transfer_operator^^k) h) x \M))" by (auto intro!: sum.cong nn_transfer_operator_intTn_g[symmetric]) also have "... = (\k\{1..\<^sup>+x. ((nn_transfer_operator^^k) h) x * indicator {y \ space M. abs((g y + birkhoff_sum f (Suc n - k) y)/ B (Suc n - k)) \ C0} x \P))" unfolding \P = density M h\ by (subst nn_integral_density, auto intro!: sum.cong simp add: algebra_simps) also have "... \ (\k\{1..k\{1..delta > 0\] by (rule sum_ennreal) finally have "alpha \ (\k\{1..delta > 0\ by auto also have "... \ K * delta" using \delta > 0\ by auto finally show False unfolding delta_def using \K \ 1\ \alpha > 0\ by (auto simp add: divide_simps algebra_simps) qed text \If $n$ is not large, we get the same bound in a trivial way, as there are only finitely many cases to consider and we have adjusted the constant $C$ so that it works for all of them.\ have leK: "B (Suc n) \ C * Max {B i |i. i \ n}" if "n \ N+K" for n proof - have "B (Suc n)/B 0 \ Max {B k/B 0 |k. k \ N+K+1}" apply (rule Max_ge, simp) using \n \ N+K\ by auto also have "... \ C" unfolding C_def C1_def by auto finally have "B (Suc n) \ C * B 0" using Bpos[of 0] by (simp add: divide_simps) also have "... \ C * Max {B i |i. i \ n}" apply (rule mult_left_mono) apply (rule Max_ge) using \C \ 1\ by auto finally show ?thesis by simp qed have "B (Suc n) \ C * Max {B i |i. i \ n}" for n using geK[of n] leK[of n] by force then show ?thesis using \C \ 1\ by auto qed text \Then, we prove the lemma without further assumptions, reducing to the previous case by replacing $m$ with $m+P$. We do this at the level of densities since the addition of measures is not defined in the library (and it would be problematic as measures carry their sigma-algebra, so what should one do when the sigma-algebras do not coincide?)\ lemma upper_bound_C: "\C\1. \n. B (Suc n) \ C * Max {B i|i. i \ n}" proof - text \We introduce the density of $P$, and show that it is almost everywhere finite.\ define h where "h = RN_deriv M P" have [measurable]: "h \ borel_measurable M" unfolding h_def by auto have P [simp]: "P = density M h" unfolding h_def apply (rule density_RN_deriv[symmetric]) using PabsM by auto have "space P = space M" by auto have *: "AE x in M. h x \ \" unfolding h_def apply (rule RN_deriv_finite) using PS.sigma_finite_measure_axioms PabsM by auto have **: "null_sets (density M (\x. 1 + h x)) = null_sets M" by (rule null_sets_density, auto) text \We introduce the new system with invariant measure $M+P$, given by the density $1+h$.\ interpret A: conservative "density M (\x. 1 + h x)" T apply (rule conservative_density) using * by auto interpret B: conservative_limit T "density M (\x. 1 + h x)" P Z f g B apply standard using conv Bpos non_trivial absolutely_continuousI_density[OF \h \ borel_measurable M\] unfolding absolutely_continuous_def ** by auto text \We obtain the result by applying the result above to the new dynamical system. We have to check the additional assumption that the density of $P$ with respect to the new measure $M + P$ is bounded by $1$. Since this density if $h/(1+h)$, this is trivial modulo a computation in ennreal that is not automated (yet?).\ have z: "1 = ennreal 1" by auto have Trivial: "a = (1+a) * (a/(1+a))" if "a \ \" for a::ennreal apply (cases a) apply auto unfolding z ennreal_plus_if apply (subst divide_ennreal) apply simp apply simp apply (subst ennreal_mult'[symmetric]) using that by auto have Trivial2: "a / (1+a) \ 1" for a::ennreal apply (cases a) apply auto unfolding z ennreal_plus_if apply (subst divide_ennreal) by auto show ?thesis apply (rule B.upper_bound_C_aux[of "\x. h x/(1 + h x)"]) using * Trivial Trivial2 by (auto simp add: density_density_eq density_unique_iff) qed text \The second main upper bound is the following. Again, it proves that $B_{n+1} \leq L \max_{i \leq n} B_i$, for some constant $L$, but with two differences. First, $L$ only depends on the distribution of $Z$ (which is stronger). Second, this estimate is only proved along a density $1$ sequence of times (which is weaker). The first point implies that this lemma will also apply to $T^j$, with the same $L$, which amounts to replacing $L$ by $L^{1/j}$, making it in practice arbitrarily close to $1$. The second point is problematic at first sight, but for the exceptional times we will use the bound of the previous lemma so this will not really create problems. For the proof, we split the sum $S_{n+1} f$ as $S_n f + f \circ T^n$. If $B_{n+1}$ is much larger than $B_n$, we deduce that $S_n f$ is much smaller than $S_{n+1}f$ with large probability, which means that $f \circ T^n$ is larger than anything that has been seen before. Since preimages of distinct events have a measure that tends to $0$ along a density $1$ subsequence, this can only happen along a density $0$ subsequence.\ lemma upper_bound_L: fixes a::real and L::real and alpha::real assumes "a > 0" "alpha > 0" "L > 3" "PZ.prob {x. abs (x) > 2 * a} > 3 * alpha" "PZ.prob {x. abs (x) \ (L-1) * a} < alpha" shows "\A. lower_asymptotic_density A = 1 \ (\n\A. B (Suc n) \ L * Max {B i|i. i \ n})" proof - define m where "m = (\n. Max {B i|i. i \ n})" define K where "K = (\n::nat. {x \ space M. abs(f x) \ {a * L * m n <..< a * L * m (Suc n)}})" have [measurable]: "K n \ sets M" for n unfolding K_def by auto have *: "m n \ m p" if "n \ p" for n p unfolding m_def K_def using that by (auto intro!: Max_mono) have "K n \ K p = {}" if "n < p" for n p proof (auto simp add: K_def) fix x assume "\f x\ < a * L * m (Suc n)" "a * L * m p < \f x\" moreover have "a * L * m (Suc n) \ a * L * m p" using *[of "Suc n" p] that \a > 0\ \L > 3\ by auto ultimately show False by auto qed then have "disjoint_family K" unfolding disjoint_family_on_def using nat_neq_iff by auto have "\A0. lower_asymptotic_density A0 = 1 \ (\n. measure P (space M \ (T^^n)-`(K n)) * indicator A0 n) \ 0" apply (rule disjoint_sets_measure_density_one_tendsto_zero) apply fact+ using PabsM by auto then obtain A0 where A0: "lower_asymptotic_density A0 = 1" "(\n. measure P (space M \ (T^^n)-`(K n)) * indicator A0 n) \ 0" by blast obtain N0 where N0: "\n. n \ N0 \ measure P (space M \ (T^^n)-`(K n)) * indicator A0 n < alpha" using order_tendstoD(2)[OF A0(2) \alpha > 0\] unfolding eventually_sequentially by blast have A: "eventually (\n. measure (distr P borel (\x. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) > 2 * a} > 3 * alpha) sequentially" apply (rule open_set_weak_conv_lsc[of _ Z]) by (auto simp add: PZ.real_distribution_axioms conv assms) have B: "eventually (\n. measure (distr P borel (\x. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) \ (L- 1) * a} < alpha) sequentially" apply (rule closed_set_weak_conv_usc[of _ Z]) by (auto simp add: PZ.real_distribution_axioms conv assms) obtain N where N: "\n. n \ N \ measure (distr P borel (\x. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) > 2 * a} > 3 * alpha" "\n. n \ N \ measure (distr P borel (\x. (g x + birkhoff_sum f n x) / B n)) {x. abs (x) \ (L-1) * a} < alpha" using eventually_conj[OF A B] unfolding eventually_sequentially by blast have I: "PS.prob {x \ space M. abs((g x + birkhoff_sum f n x) / B n) < (L-1) * a} > 1 - alpha" if "n \ N" for n proof - let ?h = "(\x. (g x + birkhoff_sum f n x) / B n)" have "measure (distr P borel ?h) {x. abs (x) \ (L-1) * a} = measure P (?h-` {x. abs (x) \ (L-1) * a} \ space P)" by (rule measure_distr, auto) also have "... = measure P {x \ space M. abs(?h x) \ (L-1) * a}" by (rule HOL.cong[of "measure P"], auto) finally have A: "PS.prob {x \ space M. abs(?h x) \ (L-1) * a} < alpha" using N(2)[OF that] by auto have *: "{x \ space M. abs(?h x) < (L-1) * a} = space M - {x \ space M. abs(?h x) \ (L-1) * a}" by auto show ?thesis unfolding * using A PS.prob_compl by auto qed have Main: "PS.prob (space M \ (T^^n)-`(K n)) > alpha" if "n \ N" "B (Suc n) > L * m n" for n proof - have "Suc n \ N" using that by auto let ?h = "(\x. (g x + birkhoff_sum f (Suc n) x) / B (Suc n))" have "measure (distr P borel ?h) {x. abs (x) > 2 * a} = measure P (?h-` {x. abs (x) > 2 * a} \ space P)" by (rule measure_distr, auto) also have "... = measure P {x \ space M. abs(?h x) > 2 * a}" by (rule HOL.cong[of "measure P"], auto) finally have A: "PS.prob {x \ space M. abs(?h x) > 2 * a} > 3 * alpha" using N(1)[OF \Suc n \ N\] by auto define X where "X = {x \ space M. abs((g x + birkhoff_sum f n x) / B n) < (L-1) * a} \ {x \ space M. abs((g x + birkhoff_sum f (Suc n) x) / B (Suc n)) < (L-1) * a} \ {x \ space M. abs((g x + birkhoff_sum f (Suc n) x) / B (Suc n)) > 2 * a}" have "(1 - alpha) + (1 - alpha) + 3 * alpha < PS.prob {x \ space M. abs((g x + birkhoff_sum f n x) / B n) < (L-1) * a} + PS.prob {x \ space M. abs((g x + birkhoff_sum f (Suc n) x) / B (Suc n)) < (L-1) * a} + PS.prob {x \ space M. abs((g x + birkhoff_sum f (Suc n) x) / B (Suc n)) > 2 * a}" using A I[OF \n \ N\] I[OF \Suc n \ N\] by auto also have "... \ 2 + PS.prob X" unfolding X_def by (rule PS.sum_measure_le_measure_inter3, auto) finally have "PS.prob X > alpha" by auto have "X \ space M \ (T^^n)-`(K n)" proof have *: "B i \ m n" if "i \ n" for i unfolding m_def by (rule Max_ge, auto simp add: that) have **: "B i \ B (Suc n)" if "i \ Suc n" for i proof (cases "i \ n") case True have "m n \ B (Suc n) / L" using \L * m n < B (Suc n)\ \L > 3\ by (simp add: divide_simps algebra_simps) also have "... \ B (Suc n)" using Bpos[of "Suc n"] \L > 3\ by (simp add: divide_simps algebra_simps) finally show ?thesis using *[OF True] by simp next case False then show ?thesis using \i \ Suc n\ le_SucE by blast qed have "m (Suc n) = B (Suc n)" unfolding m_def by (rule Max_eqI, auto simp add: **) fix x assume "x \ X" then have "abs (g x + birkhoff_sum f n x) < (L-1) * a * B n" unfolding X_def using Bpos[of n] by (auto simp add: algebra_simps divide_simps) also have "... \ L * a * m n" using *[of n] \L > 3\ \a > 0\ Bpos[of n] by (auto intro!: mult_mono) also have "... \ a * B (Suc n)" using \B (Suc n) > L * m n\ less_imp_le \a > 0\ by auto finally have A: "abs (g x + birkhoff_sum f n x) < a * B (Suc n)" by simp have B: "abs(g x + birkhoff_sum f (Suc n) x) < (L-1) * a * B (Suc n)" using \x \ X\ unfolding X_def using Bpos[of "Suc n"] by (auto simp add: algebra_simps divide_simps) have *: "f((T^^n) x) = (g x + birkhoff_sum f (Suc n) x) - (g x + birkhoff_sum f n x)" apply (auto simp add: algebra_simps) by (metis add.commute birkhoff_sum_1(2) birkhoff_sum_cocycle plus_1_eq_Suc) have "abs(f((T^^n) x)) \ abs (g x + birkhoff_sum f (Suc n) x) + abs(g x + birkhoff_sum f n x)" unfolding * by simp also have "... < (L-1) * a * B (Suc n) + a * B (Suc n)" using A B by auto also have "... = L * a * m (Suc n)" using \m (Suc n) = B (Suc n)\ by (simp add: algebra_simps) finally have Z1: "abs(f((T^^n) x)) < L * a * m (Suc n)" by simp have "2 * a * B (Suc n) < abs (g x + birkhoff_sum f (Suc n) x)" using \x \ X\ unfolding X_def using Bpos[of "Suc n"] by (auto simp add: algebra_simps divide_simps) also have "... = abs(f((T^^n) x) + (g x + birkhoff_sum f n x))" unfolding * by auto also have "... \ abs(f((T^^n) x)) + abs (g x + birkhoff_sum f n x)" by auto also have "... < abs(f((T^^n) x)) + a * B (Suc n)" using A by auto finally have "abs(f((T^^n) x)) > a * B (Suc n)" by simp then have Z2: "abs(f((T^^n) x)) > a * L * m n" using mult_strict_left_mono[OF \B (Suc n) > L * m n\ \a > 0\] by auto show "x \ space M \ (T ^^ n) -` K n" using Z1 Z2 \x \ X\ unfolding K_def X_def by (auto simp add: algebra_simps) qed have "PS.prob X \ PS.prob (space M \ (T^^n)-`(K n))" by (rule PS.finite_measure_mono, fact, auto) then show "alpha < PS.prob (space M \ (T ^^ n) -` K n)" using \alpha < PS.prob X\ by simp qed define A where "A = A0 \ {N + N0..}" have "lower_asymptotic_density A = 1" unfolding A_def by (rule lower_asymptotic_density_one_intersection, fact, simp) moreover have "B (Suc n) \ L * m n" if "n \ A" for n proof (rule ccontr) assume "\(B (Suc n) \ L * m n)" then have "L * m n < B (Suc n)" "n \ N" "n \ N0" using \n \ A\ unfolding A_def by auto then have "PS.prob (space M \ (T^^n)-`(K n)) > alpha" using Main by auto moreover have "PS.prob (space M \ (T^^n)-`(K n)) * indicator A0 n < alpha" using N0[OF \n \ N0\] by simp moreover have "indicator A0 n = (1::real)" using \n \ A\ unfolding A_def indicator_def by auto ultimately show False by simp qed ultimately show ?thesis unfolding m_def by blast qed text \Now, we combine the two previous statements to prove the main theorem.\ theorem subexponential_growth: "(\n. max 0 (ln (B n) /n)) \ 0" proof - obtain a0 where a0: "a0 > 0" "PZ.prob {x. abs (x) > a0} > 0" using PZ.not_Dirac_0_imp_positive_mass_away_0[OF non_trivial] by blast define a where "a = a0/2" have "a > 0" using \a0 > 0\ unfolding a_def by auto define alpha where "alpha = PZ.prob {x. abs (x) > a0}/4" have "alpha > 0" unfolding alpha_def using a0 by auto have "PZ.prob {x. abs (x) > 2 * a} > 3 * alpha" using a0 unfolding a_def alpha_def by auto obtain C0 where C0: "PZ.prob {x. abs(x) \ C0} < alpha" "C0 \ 3 * a" using PZ.random_variable_small_tails[OF \alpha > 0\, of "\x. x"] by auto define L where "L = C0/a + 1" have "PZ.prob {x. abs(x) \ (L-1) * a} < alpha" unfolding L_def using C0 \a>0\ by auto have "L > 3" unfolding L_def using C0 \a > 0\ by (auto simp add: divide_simps) obtain C where C: "\n. B (Suc n) \ C * Max {B i|i. i \ n}" "C \ 1" using upper_bound_C by blast have C2: "B n \ C * Max {B i|i. i < n}" if "n > 0" for n proof - obtain m where m: "n = Suc m" using \0 < n\ gr0_implies_Suc by auto have *: "i \ m \ i < Suc m" for i by auto show ?thesis using C(1)[of m] unfolding m * by auto qed have Mainj: "eventually (\n. ln (B n) / n \ (1+ln L)/j) sequentially" if "j > 0" for j::nat proof - have *: "\A. lower_asymptotic_density A = 1 \ (\n\A. B (j * Suc n + k) \ L * Max {B (j * i + k) |i. i \ n})" for k proof - interpret Tj0: conservative M "(T^^j)" using conservative_power[of j] by auto have *: "g x + birkhoff_sum f k x + Tj0.birkhoff_sum (\x. birkhoff_sum f j ((T ^^ k) x)) n x = g x + birkhoff_sum f (j * n + k) x" for x n proof - have "birkhoff_sum f (j * n + k) x = (\i \ {.. {k..i \ {..i \ {k..sia. a-k"]) also have "... = birkhoff_sum f k x + Tj0.birkhoff_sum (\x. birkhoff_sum f j ((T ^^ k) x)) n x" unfolding birkhoff_sum_def Tj0.birkhoff_sum_def funpow_mult funpow_add'[symmetric] by (auto simp add: algebra_simps intro!: sum.swap) finally show ?thesis by simp qed interpret Tj: conservative_limit "T^^j" M P Z "\x. birkhoff_sum f j ((T^^k) x)" "\x. g x + birkhoff_sum f k x" "\n. B (j * n + k)" apply standard using PabsM Bpos non_trivial conv \j>0\ unfolding * by (auto intro!: weak_conv_m_subseq strict_monoI) show ?thesis apply (rule Tj.upper_bound_L[OF \a > 0\ \alpha > 0\]) by fact+ qed have "\A. \k. lower_asymptotic_density (A k) = 1 \ (\n\A k. B (j * Suc n + k) \ L * Max {B (j * i + k) |i. i \ n})" apply (rule choice) using * by auto then obtain A where A: "\k. lower_asymptotic_density (A k) = 1" "\k n. n \ A k \ B (j * Suc n + k) \ L * Max {B (j * i + k) |i. i \ n}" by blast define Aj where "Aj = (\klower_asymptotic_density Aj = 1\ unfolding Bj_def lower_upper_asymptotic_density_complement by simp define M where "M = (\n. Max {B p |p. p < (n+1) * j})" have "B 0 \ M n" for n unfolding M_def apply (rule Max_ge, auto, rule exI[of _ 0]) using \j > 0\ by auto then have Mpos: "M n > 0" for n by (metis Bpos not_le not_less_iff_gr_or_eq order.strict_trans) have M_L: "M (Suc n) \ L * M n" if "n \ Aj" for n proof - have *: "B s \ L * M n" if "s < (n+2) * j" for s proof (cases "s < (n+1) * j") case True have "B s \ M n" unfolding M_def apply (rule Max_ge) using True by auto also have "... \ L * M n" using \L > 3\ \M n > 0\ by auto finally show ?thesis by simp next case False then obtain k where "k < j" "s = (n+1) * j + k" using \s < (n+2) * j\ le_Suc_ex by force then have "B s = B (j * Suc n + k)" by (auto simp add: algebra_simps) also have "... \ L * Max {B (j * i + k) |i. i \ n}" using A(2)[of n k] \n \ Aj\ unfolding Aj_def using \k < j\ by auto also have "... \ L * Max {B a|a. a < (n+1) * j}" apply (rule mult_left_mono, rule Max_mono) using \L>3\ proof (auto) fix i assume "i \ n" show "\a. B (j * i + k) = B a \ a < j + n * j" apply (rule exI[of _ "j * i + k"]) using \k < j\ \i \ n\ by (auto simp add: add_mono_thms_linordered_field(3) algebra_simps) qed finally show ?thesis unfolding M_def by auto qed show ?thesis unfolding M_def apply (rule Max.boundedI) using * unfolding M_def using \j > 0\ by auto qed have M_C: "M (Suc n) \ C^j * M n" for n proof - have I: "Max {B s|s. s < (n+1) * j + k} \ C^k * M n" for k proof (induction k) case 0 show ?case apply (rule Max.boundedI) unfolding M_def using \j > 0\ by auto next case (Suc k) have *: "B s \ C * C ^ k * M n" if "s < Suc (j + n * j + k)" for s proof (cases "s < j + n * j + k") case True then have "B s \ C^k * M n" using iffD1[OF Max_le_iff, OF _ _ Suc.IH] by auto also have "... \ C * C^k * M n" using \C \ 1\ \M n > 0\ by auto finally show ?thesis by simp next case False then have "s = j + n * j + k" using that by auto then have "B s \ C * Max {B s|s. s < (n+1) * j + k}" using C2[of s] using \j > 0\ by auto also have "... \ C * C^k * M n" using Suc.IH \C \ 1\ by auto finally show ?thesis by simp qed show ?case apply (rule Max.boundedI) using \j > 0\ * by auto qed show ?thesis using I[of j] unfolding M_def by (auto simp add: algebra_simps) qed have I: "ln (M n) \ ln (M 0) + n * ln L + card (Bj \ {.. Bj") case True then have *: "Bj \ {.. {.. {n}" by auto have **: "card (Bj \ {.. {.. ln (C^j * M n)" using M_C \\n. 0 < M n\ less_le_trans ln_le_cancel_iff by blast also have "... = ln (M n) + ln (C^j)" using \C \ 1\ \0 < M n\ ln_mult by auto also have "... \ ln (M 0) + n * ln L + card (Bj \ {.. {.. ln (M 0) + (Suc n) * ln L + card (Bj \ {..L > 3\ by auto finally show ?thesis by auto next case False have "M (Suc n) \ L * M n" apply (rule M_L) using False unfolding Bj_def by auto then have "ln (M (Suc n)) \ ln (L * M n)" using \\n. 0 < M n\ less_le_trans ln_le_cancel_iff by blast also have "... = ln (M n) + ln L" using \L > 3\ \0 < M n\ ln_mult by auto also have "... \ ln (M 0) + Suc n * ln L + card (Bj \ {.. ln (M 0) + Suc n * ln L + card (Bj \ {..C \ 1\ by (auto intro!: mult_right_mono card_mono) finally show ?thesis by auto qed qed have "ln (M n)/n \ ln (M 0)* (1/n) + ln L + (card (Bj \ {..1" for n using that apply (auto simp add: algebra_simps divide_simps) by (metis (no_types, hide_lams) I add.assoc mult.commute mult_left_mono of_nat_0_le_iff semiring_normalization_rules(34)) then have A: "eventually (\n. ln (M n)/n \ ln (M 0)* (1/n) + ln L + (card (Bj \ {..n. ln (M 0)*(1/n) + ln L + (card (Bj \ {.. ln (M 0) * 0 + ln L + 0 *ln (C^j)" by (intro tendsto_intros upper_asymptotic_density_zero_lim, fact) have B: "eventually (\n. ln (M 0)*(1/n) + ln L + (card (Bj \ {..n. ln (M n)/n < 1 + ln L) sequentially" using eventually_conj[OF A B] by (simp add: eventually_mono) then obtain N where N: "\n. n \ N \ ln (M n)/n < 1 + ln L" unfolding eventually_sequentially by blast have "ln (B p) / p \ (1+ln L) / j" if "p \ (N+1) * j" for p proof - define n where "n = p div j" have "n \ N+1" unfolding n_def using that by (metis \0 < j\ div_le_mono div_mult_self_is_m) then have "n \ N" "n \ 1" by auto have *: "p < (n+1) * j" "n * j \ p" unfolding n_def using \j > 0\ dividend_less_div_times by auto have "B p \ M n" unfolding M_def apply (rule Max_ge) using * by auto then have "ln (B p) \ ln (M n)" using Bpos Mpos ln_le_cancel_iff by blast also have "... \ n * (1+ln L)" using N[OF \n \ N\] \n \ 1\ by (auto simp add: divide_simps algebra_simps) also have "... \ (p/j) * (1+ln L)" apply (rule mult_right_mono) using *(2) \j > 0\ \L > 3\ apply (auto simp add: divide_simps algebra_simps) using of_nat_mono by fastforce finally show ?thesis using \j > 0\ that by (simp add: algebra_simps divide_simps) qed then show "eventually (\p. ln (B p) / p \ (1+ln L)/j) sequentially" unfolding eventually_sequentially by auto qed show "(\n. max 0 (ln (B n) / real n)) \ 0" proof (rule order_tendstoI) fix e::real assume "e > 0" have *: "(\j. (1+ln L) * (1/j)) \ (1+ln L) * 0" by (intro tendsto_intros) have "eventually (\j. (1+ln L) * (1/j) < e) sequentially" apply (rule order_tendstoD[OF *]) using \e > 0\ by auto then obtain j::nat where j: "j > 0" "(1+ln L) * (1/j) < e" unfolding eventually_sequentially using add.right_neutral le_eq_less_or_eq by fastforce show "eventually (\n. max 0 (ln (B n) / real n) < e) sequentially" using Mainj[OF \j > 0\] j(2) \e > 0\ by (simp add: eventually_mono) qed (simp add: max.strict_coboundedI1) qed end (*of locale conservative_limit*) subsection \Normalizing sequences grow at most polynomially in probability preserving systems\ text \In probability preserving systems, normalizing sequences grow at most polynomially. The proof, also given in~\cite{gouezel_normalizing_sequences}, is considerably easier than the conservative case. We prove that $B_{n+1} \leq C B_n$ (more precisely, this only holds if $B_{n+1}$ is large enough), by arguing that $S_{n+1} f = S_n f + f \circ T^n$, where $f\circ T^n$ is negligible if $B_{n+1}$ is large thanks to the measure preservation. We also prove that $B_{2n} \leq E B_n$, by writing $S_{2n} f = S_n f+ S_n f \circ T^n$ and arguing that the two terms on the right have the same distribution. Finally, combining these two estimates, the polynomial growth follows readily.\ locale pmpt_limit = pmpt M + PZ: real_distribution Z for M::"'a measure" and Z::"real measure" + fixes f::"'a \ real" and B::"nat \ real" assumes Bpos: "\n. B n > 0" and M [measurable]: "f \ borel_measurable M" and non_trivial: "PZ.prob {0} < 1" and conv: "weak_conv_m (\n. distr P borel (\x. (birkhoff_sum f n x) / B n)) Z" begin text \First, we prove that $B_{n+1} \leq C B_n$ if $B_{n+1}$ is large enough.\ lemma upper_bound_CD: "\C D. (\n. B (Suc n) \ D \ B (Suc n) \ C * B n) \ C \ 1" proof - obtain a where a: "a > 0" "PZ.prob {x. abs (x) > a} > 0" using PZ.not_Dirac_0_imp_positive_mass_away_0[OF non_trivial] by blast define alpha where "alpha = PZ.prob {x. abs (x) > a}/4" have "alpha > 0" unfolding alpha_def using a by auto have A: "PZ.prob {x. abs (x) > a} > 3 * alpha" using a unfolding alpha_def by auto obtain C0 where C0: "PZ.prob {x. abs(x) \ C0} < alpha" "C0 \ a" using PZ.random_variable_small_tails[OF \alpha > 0\, of "\x. x"] by auto have A: "eventually (\n. measure (distr M borel (\x. (birkhoff_sum f n x) / B n)) {x. abs (x) > a} > 3 * alpha) sequentially" apply (rule open_set_weak_conv_lsc[of _ Z]) by (auto simp add: PZ.real_distribution_axioms conv A) have B: "eventually (\n. measure (distr M borel (\x. (birkhoff_sum f n x) / B n)) {x. abs (x) \ C0} < alpha) sequentially" apply (rule closed_set_weak_conv_usc[of _ Z]) by (auto simp add: PZ.real_distribution_axioms conv C0) obtain N where N: "\n. n \ N \ measure (distr M borel (\x. (birkhoff_sum f n x) / B n)) {x. abs x > a} > 3 * alpha" "\n. n \ N \ measure (distr M borel (\x. (birkhoff_sum f n x) / B n)) {x. abs x \ C0} < alpha" using eventually_conj[OF A B] unfolding eventually_sequentially by blast obtain Cf where Cf: "prob {x \ space M. abs(f x) \ Cf} < alpha" "Cf \ 1" using random_variable_small_tails[OF \alpha > 0\ M] by auto have Main: "B (Suc n) \ (2*C0/a) * B n" if "n \ N" "B (Suc n) \ 2 * Cf/a" for n proof - have "Suc n \ N" using that by auto let ?h = "(\x. (birkhoff_sum f (Suc n) x) / B (Suc n))" have "measure (distr M borel ?h) {x. abs (x) > a} = measure M (?h-` {x. abs (x) > a} \ space M)" by (rule measure_distr, auto) also have "... = prob {x \ space M. abs(?h x) > a}" by (rule HOL.cong[of "measure M"], auto) finally have A: "prob {x \ space M. abs(?h x) > a} > 3 * alpha" using N(1)[OF \Suc n \ N\] by auto let ?h = "(\x. (birkhoff_sum f n x) / B n)" have "measure (distr M borel ?h) {x. abs (x) \ C0} = measure M (?h-` {x. abs (x) \ C0} \ space M)" by (rule measure_distr, auto) also have "... = measure M {x \ space M. abs(?h x) \ C0}" by (rule HOL.cong[of "measure M"], auto) finally have B0: "prob {x \ space M. abs(?h x) \ C0} < alpha" using N(2)[OF \n \ N\] by auto have *: "{x \ space M. abs(?h x) < C0} = space M - {x \ space M. abs(?h x) \ C0}" by auto have B: "prob {x \ space M. abs(?h x) < C0} > 1- alpha" unfolding * using B0 prob_compl by auto have "prob {x \ space M. abs(f ((T^^n) x)) \ Cf} = prob ((T^^n)-`{x \ space M. abs(f x) \ Cf} \ space M)" by (rule HOL.cong[of "prob"], auto) also have "... = prob {x \ space M. abs(f x) \ Cf}" using T_vrestr_same_measure(2)[of "{x \ space M. abs(f x) \ Cf}" n] unfolding vimage_restr_def by auto finally have C0: "prob {x \ space M. abs(f ((T^^n) x)) \ Cf} < alpha" using Cf by simp have *: "{x \ space M. abs(f ((T^^n) x)) < Cf} = space M - {x \ space M. abs(f ((T^^n) x)) \ Cf}" by auto have C: "prob {x \ space M. abs(f ((T^^n) x)) < Cf} > 1- alpha" unfolding * using C0 prob_compl by auto define X where "X = {x \ space M. abs((birkhoff_sum f n x) / B n) < C0} \ {x \ space M. abs((birkhoff_sum f (Suc n) x) / B (Suc n)) > a} \ {x \ space M. abs(f ((T^^n) x)) < Cf}" have "(1 - alpha) + 3 * alpha + (1 - alpha) < prob {x \ space M. abs((birkhoff_sum f n x) / B n) < C0} + prob {x \ space M. abs((birkhoff_sum f (Suc n) x) / B (Suc n)) > a} + prob {x \ space M. abs(f ((T^^n) x)) < Cf}" using A B C by auto also have "... \ 2 + prob X" unfolding X_def by (rule sum_measure_le_measure_inter3, auto) finally have "prob X > alpha" by auto then have "X \ {}" using \alpha > 0\ by auto then obtain x where "x \ X" by auto have *: "abs(birkhoff_sum f n x) \ C0 * B n" "abs(birkhoff_sum f (Suc n) x) \ a * B (Suc n)" "abs(f((T^^n) x)) \ Cf" using \x \ X\ Bpos[of n] Bpos[of "Suc n"] unfolding X_def by (auto simp add: divide_simps) have "a * B (Suc n) \ abs(birkhoff_sum f (Suc n) x)" using * by simp also have "... = abs(birkhoff_sum f n x + f ((T^^n) x))" by (metis Groups.add_ac(2) One_nat_def birkhoff_sum_1(3) birkhoff_sum_cocycle plus_1_eq_Suc) also have "... \ C0 * B n + Cf" using * by auto also have "... \ C0 * B n + (a/2) * B (Suc n)" using \B (Suc n) \ 2 * Cf/a\ \a > 0\ by (auto simp add: divide_simps algebra_simps) finally show "B (Suc n) \ (2 * C0/a) * B n" using \a > 0\ by (auto simp add: divide_simps algebra_simps) qed define C1 where "C1 = Max {B(Suc n)/B n |n. n \ N}" have *: "B (Suc n) \ max ((2 * C0/a)) C1 * B n" if "B (Suc n) > 2 * Cf/a" for n proof (cases "n > N") case True then show ?thesis using Main[OF less_imp_le[OF \n > N\] less_imp_le[OF that]] Bpos[of n] - by (meson max.cobounded1 order_trans real_mult_le_cancel_iff1) + by (meson max.cobounded1 order_trans mult_le_cancel_iff1) next case False then have "n \ N" by simp have "B(Suc n)/B n \ C1" unfolding C1_def apply (rule Max_ge) using \n \ N\ by auto then have "B (Suc n) \ C1 * B n" using Bpos[of n] by (simp add: divide_simps) then show ?thesis - using Bpos[of n] by (meson max.cobounded2 order_trans real_mult_le_cancel_iff1) + using Bpos[of n] by (meson max.cobounded2 order_trans mult_le_cancel_iff1) qed show ?thesis apply (rule exI[of _ "max ((2 * C0/a)) C1"], rule exI[of _ "2 * Cf/a"]) using * linorder_not_less \C0 \ a\ \a > 0\ by (auto intro!: max.coboundedI1) qed text \Second, we prove that $B_{2n} \leq E B_n$.\ lemma upper_bound_E: "\E. \n. B (2 * n) \ E * B n" proof - obtain a where a: "a > 0" "PZ.prob {x. abs (x) > a} > 0" using PZ.not_Dirac_0_imp_positive_mass_away_0[OF non_trivial] by blast define alpha where "alpha = PZ.prob {x. abs (x) > a}/4" have "alpha > 0" unfolding alpha_def using a by auto have A: "PZ.prob {x. abs (x) > a} > 3 * alpha" using a unfolding alpha_def by auto obtain C0 where C0: "PZ.prob {x. abs(x) \ C0} < alpha" "C0 \ a" using PZ.random_variable_small_tails[OF \alpha > 0\, of "\x. x"] by auto have A: "eventually (\n. measure (distr M borel (\x. (birkhoff_sum f n x) / B n)) {x. abs (x) > a} > 3 * alpha) sequentially" apply (rule open_set_weak_conv_lsc[of _ Z]) by (auto simp add: PZ.real_distribution_axioms conv A) have B: "eventually (\n. measure (distr M borel (\x. (birkhoff_sum f n x) / B n)) {x. abs (x) \ C0} < alpha) sequentially" apply (rule closed_set_weak_conv_usc[of _ Z]) by (auto simp add: PZ.real_distribution_axioms conv C0) obtain N where N: "\n. n \ N \ measure (distr M borel (\x. (birkhoff_sum f n x) / B n)) {x. abs x > a} > 3 * alpha" "\n. n \ N \ measure (distr M borel (\x. (birkhoff_sum f n x) / B n)) {x. abs x \ C0} < alpha" using eventually_conj[OF A B] unfolding eventually_sequentially by blast have Main: "B (2 * n) \ (2*C0/a) * B n" if "n \ N" for n proof - have "2 * n \ N" using that by auto let ?h = "(\x. (birkhoff_sum f (2 * n) x) / B (2 * n))" have "measure (distr M borel ?h) {x. abs (x) > a} = measure M (?h-` {x. abs (x) > a} \ space M)" by (rule measure_distr, auto) also have "... = prob {x \ space M. abs(?h x) > a}" by (rule HOL.cong[of "measure M"], auto) finally have A: "prob {x \ space M. abs((birkhoff_sum f (2 * n) x) / B (2 * n)) > a} > 3 * alpha" using N(1)[OF \2 * n \ N\] by auto let ?h = "(\x. (birkhoff_sum f n x) / B n)" have "measure (distr M borel ?h) {x. abs (x) \ C0} = measure M (?h-` {x. abs (x) \ C0} \ space M)" by (rule measure_distr, auto) also have "... = measure M {x \ space M. abs(?h x) \ C0}" by (rule HOL.cong[of "measure M"], auto) finally have B0: "prob {x \ space M. abs(?h x) \ C0} < alpha" using N(2)[OF \n \ N\] by auto have *: "{x \ space M. abs(?h x) < C0} = space M - {x \ space M. abs(?h x) \ C0}" by auto have B: "prob {x \ space M. abs((birkhoff_sum f n x) / B n) < C0} > 1- alpha" unfolding * using B0 prob_compl by auto have "prob {x \ space M. abs(?h ((T^^n) x)) < C0} = prob ((T^^n)-`{x \ space M. abs(?h x) < C0} \ space M)" by (rule HOL.cong[of "prob"], auto) also have "... = prob {x \ space M. abs(?h x) < C0}" using T_vrestr_same_measure(2)[of "{x \ space M. abs(?h x) < C0}" n] unfolding vimage_restr_def by auto finally have C: "prob {x \ space M. abs((birkhoff_sum f n ((T^^n) x)) / B n) < C0} > 1- alpha" using B by simp define X where "X = {x \ space M. abs((birkhoff_sum f n x) / B n) < C0} \ {x \ space M. abs((birkhoff_sum f (2* n) x) / B (2* n)) > a} \ {x \ space M. abs((birkhoff_sum f n ((T^^n) x)) / B n) < C0}" have "(1 - alpha) + 3 * alpha + (1 - alpha) < prob {x \ space M. abs((birkhoff_sum f n x) / B n) < C0} + prob {x \ space M. abs((birkhoff_sum f (2* n) x) / B (2* n)) > a} + prob {x \ space M. abs((birkhoff_sum f n ((T^^n) x)) / B n) < C0}" using A B C by auto also have "... \ 2 + prob X" unfolding X_def by (rule sum_measure_le_measure_inter3, auto) finally have "prob X > alpha" by auto then have "X \ {}" using \alpha > 0\ by auto then obtain x where "x \ X" by auto have *: "abs(birkhoff_sum f n x) \ C0 * B n" "abs((birkhoff_sum f (2 * n) x)) \ a * B (2 * n)" "abs((birkhoff_sum f n ((T^^n) x))) \ C0 * B n" using \x \ X\ Bpos[of n] Bpos[of "2* n"] unfolding X_def by (auto simp add: divide_simps) have "a * B (2 * n) \ abs(birkhoff_sum f (2 * n) x)" using * by simp also have "... = abs(birkhoff_sum f n x + birkhoff_sum f n ((T^^n) x))" unfolding birkhoff_sum_cocycle[of f n n x, symmetric] by (simp add: mult_2) also have "... \ 2 * C0 * B n" using * by auto finally show "B (2 * n) \ (2 * C0/a) * B n" using \a > 0\ by (auto simp add: divide_simps algebra_simps) qed define C1 where "C1 = Max {B(2 * n)/B n |n. n \ N}" have *: "B (2*n) \ max ((2 * C0/a)) C1 * B n" for n proof (cases "n > N") case True then show ?thesis using Main[OF less_imp_le[OF \n > N\]] Bpos[of n] - by (meson max.cobounded1 order_trans real_mult_le_cancel_iff1) + by (meson max.cobounded1 order_trans mult_le_cancel_iff1) next case False then have "n \ N" by simp have "B(2*n)/B n \ C1" unfolding C1_def apply (rule Max_ge) using \n \ N\ by auto then have "B (2*n) \ C1 * B n" using Bpos[of n] by (simp add: divide_simps) then show ?thesis - using Bpos[of n] by (meson max.cobounded2 order_trans real_mult_le_cancel_iff1) + using Bpos[of n] by (meson max.cobounded2 order_trans mult_le_cancel_iff1) qed show ?thesis apply (rule exI[of _ "max ((2 * C0/a)) C1"]) using * by auto qed text \Finally, we combine the estimates in the two lemmas above to show that $B_n$ grows at most polynomially.\ theorem polynomial_growth: "\C K. \n>0. B n \ C * (real n)^K" proof - obtain C D where C: "C \ 1" "\n. B (Suc n) \ D \ B (Suc n) \ C * B n" using upper_bound_CD by blast obtain E where E: "\n. B (2 * n) \ E * B n" using upper_bound_E by blast have "E \ 1" using E[of 0] Bpos[of 0] by auto obtain k::nat where "log 2 (max C E) \ k" using real_arch_simple[of "log 2 (max C E)"] by blast then have "max C E \ 2^k" by (meson less_log_of_power not_less one_less_numeral_iff semiring_norm(76)) then have "C \ 2^k" "E \ 2^k" by auto define P where "P = max D (B 0)" have "P > 0" unfolding P_def using Bpos[of 0] by auto have Main: "\n. n < 2^r \ B n \ P * 2^(2 * k * r)" for r proof (induction r) case 0 fix n::nat assume "n < 2^0" then show "B n \ P * 2 ^ (2 * k * 0)" unfolding P_def by auto next case (Suc r) fix n::nat assume "n < 2^(Suc r)" consider "even n" | "B n \ D" | "odd n \ B n > D" by linarith then show "B n \ P * 2 ^ (2 * k * Suc r)" proof (cases) case 1 then obtain m where m: "n = 2 * m" by (rule evenE) have "m < 2^r" using \n < 2^(Suc r)\ unfolding m by auto then have *: "B m \ P * 2^(2 * k * r)" using Suc.IH by auto have "B n \ E * B m" unfolding m using E by simp also have "... \ 2^k * B m" apply (rule mult_right_mono[OF _ less_imp_le[OF Bpos[of m]]]) using \E \ 2^k\ by simp also have "... \ 2^k * (P * 2^(2 * k * r))" apply (rule mult_left_mono[OF *]) by auto also have "... = P * 2^(2 * k * r + k)" by (auto simp add: algebra_simps power_add) also have "... \ P * 2^(2 * k * Suc r)" apply (rule mult_left_mono) using \P > 0\ by auto finally show ?thesis by simp next case 2 have "D \ P * 1" unfolding P_def by auto also have "... \ P * 2^(2 * k * Suc r)" by (rule mult_left_mono[OF _ less_imp_le[OF \P > 0\]], auto) finally show ?thesis using 2 by simp next case 3 then obtain m where m: "n = 2 * m + 1" using oddE by blast have "m < 2^r" using \n < 2^(Suc r)\ unfolding m by auto then have *: "B m \ P * 2^(2 * k * r)" using Suc.IH by auto have "B n > D" using 3 by auto then have "B n \ C * B (2 * m)" unfolding m using C(2)[of "2 * m"] by auto also have "... \ C * (E * B m)" apply (rule mult_left_mono) using \C \ 1\ E[of m] by auto also have "... \ 2^k * (2^k * B m)" apply (intro mult_mono) using \C \ 2^k\ \C \ 1\ \E \ 1\ \E \ 2^k\ Bpos[of m] by auto also have "... \ 2^k * (2^k * (P * 2^(2 * k * r)))" apply (intro mult_left_mono) using * by auto also have "... = P * 2^(2 * k * Suc r)" using \P > 0\ by (simp add: algebra_simps divide_simps mult_2_right power_add) finally show ?thesis by simp qed qed have I: "B n \ (P * 2^(2 * k)) * n^(2 * k)" if "n > 0" for n proof - define r::nat where "r = nat(floor(log 2 (real n)))" have *: "int r = floor(log 2 (real n))" unfolding r_def using \0 < n\ by auto have I: "2^r \ n \ n < 2^(r+1)" using floor_log_nat_eq_powr_iff[OF _ \n > 0\, of 2 r] * by auto then have "B n \ P * 2^(2 * k * (r+1))" using Main[of n "r+1"] by auto also have "... = (P * 2^(2 * k)) * ((2^r)^(2*k))" by (simp add: power_add power_mult[symmetric]) also have "... \ (P * 2^(2 * k)) * n^(2 * k)" apply (rule mult_left_mono) using I \P > 0\ by (auto simp add: power_mono) finally show ?thesis by simp qed show ?thesis apply (rule exI[of _ "P * 2^(2 * k)"], rule exI[of _ "2 * k"]) using I by auto qed end (*of locale locale pmpt_limit*) end (*of Normalizing_Sequences.thy*) diff --git a/thys/First_Welfare_Theorem/Utility_Functions.thy b/thys/First_Welfare_Theorem/Utility_Functions.thy --- a/thys/First_Welfare_Theorem/Utility_Functions.thy +++ b/thys/First_Welfare_Theorem/Utility_Functions.thy @@ -1,350 +1,350 @@ (* License: LGPL *) (* Author: Julian Parsert Author: Cezary Kaliszyk *) section \ Utility Functions \ text \ Utility functions and results involving them. \ theory Utility_Functions imports Preferences begin subsection "Ordinal utility functions" text \ Ordinal utility function locale \ locale ordinal_utility = fixes carrier :: "'a set" fixes relation :: "'a relation" fixes u :: "'a \ real" assumes util_def[iff]: "x \ carrier \ y \ carrier \ x \[relation] y \ u x \ u y" assumes not_outside: "x \[relation] y \ x \ carrier" and "x \[relation] y \ y \ carrier" begin lemma util_def_conf: "x \ carrier \ y \ carrier \ u x \ u y \ x \[relation] y" using util_def by blast lemma relation_subset_crossp: "relation \ carrier \ carrier" proof fix x assume "x \ relation" have "\(a,b) \ relation. a \ carrier \ b \ carrier" by (metis (no_types, lifting) case_prod_conv ordinal_utility_axioms ordinal_utility_def surj_pair) then show "x \ carrier \ carrier" using \x \ relation\ by auto qed text \ Utility function implies totality of relation \ lemma util_imp_total: "total_on carrier relation" proof fix x and y assume x_inc: "x \ carrier" and y_inc : "y \ carrier" have fst : "u x \ u y \ u y \ u x" using util_def by auto then show "x \[relation] y \ y \[relation] x" by (simp add: x_inc y_inc) qed lemma x_y_in_carrier: "x \[relation] y \ x \ carrier \ y \ carrier" by (meson ordinal_utility_axioms ordinal_utility_def) text \ Utility function implies transitivity of relation. \ lemma util_imp_trans: "trans relation" proof (rule transI) fix x and y and z assume x_y: "x \[relation] y" assume y_z: "y \[relation] z" have x_ge_y: "x \[relation] y" using x_y by auto then have x_y: "u x \ u y" by (meson x_y_in_carrier ordinal_utility_axioms util_def x_y) have "u y \ u z" by (meson y_z ordinal_utility_axioms ordinal_utility_def) have "x \ carrier" using x_y_in_carrier[of x y] x_ge_y by simp then have "u x \ u z" using \u z \ u y\ order_trans x_y by blast hence "x \[relation] z" by (meson \x \ carrier\ ordinal_utility_axioms ordinal_utility_def y_z) then show "x \[relation] z" . qed lemma util_imp_refl: "refl_on carrier relation" by (simp add: refl_on_def relation_subset_crossp) lemma affine_trans_is_u: shows "\\>0. (\\. ordinal_utility carrier relation (\x. u(x)*\ + \))" proof (rule allI, rule impI, rule allI) fix \::real and \ assume *:"\ > 0" show "ordinal_utility carrier relation (\x. u x * \ + \)" proof (subst ordinal_utility_def, rule conjI, goal_cases) case 1 then show ?case - by (metis * add.commute add_le_cancel_left not_le real_mult_less_iff1 util_def_conf) + by (metis * add.commute add_le_cancel_left not_le mult_less_iff1 util_def_conf) next case 2 then show ?case by (meson refl_on_domain util_imp_refl) qed qed text \ This utility function definition is ordinal. Hence they are only unique up to a monotone transformation. \ lemma ordinality_of_utility_function : fixes f :: "real \ real" assumes monot: "monotone (>) (>) f" shows "(f \ u) x > (f \ u) y \ u x > u y" proof - let ?func = "(\x. f(u x))" have "\m n . u m \ u n \ ?func m \ ?func n" by (metis le_less monot monotone_def not_less) hence "u x > u y \ ?func x > ?func y" using not_le by blast thus ?thesis by auto qed corollary utility_prefs_corresp : fixes f :: "real \ real" assumes monotonicity : "monotone (>) (>) f" shows "\x\carrier. \y\carrier. (x,y) \ relation \ (f \ u) x \ (f \ u) y" by (meson monotonicity not_less ordinality_of_utility_function util_def_conf) corollary monotone_comp_is_utility: fixes f :: "real \ real" assumes monot: "monotone (>) (>) f" shows "ordinal_utility carrier relation (f \ u)" proof (rule ordinal_utility.intro, goal_cases) case (1 x y) then show ?case using monot utility_prefs_corresp by blast next case (2 x y) then show ?case using not_outside by blast next case (3 x y) then show ?case using x_y_in_carrier by blast qed lemma ordinal_utility_left: assumes "x \[relation] y" shows "u x \ u y" using assms x_y_in_carrier by blast lemma add_right: assumes "\x y. x \[relation] y \ f x \ f y" shows "ordinal_utility carrier relation (\x. u x + f x)" proof (rule ordinal_utility.intro, goal_cases) case (1 x y) assume xy: "x \ carrier" "y \ carrier" then show ?case proof - have "u x \ u y \ (\r. ((x, y) \ relation \ \ r \ u x + f x) \ r \ u y + f y) \ u y \ u x" by (metis (no_types) add_le_cancel_left add_le_cancel_right assms util_def xy(1) xy(2)) moreover show ?thesis by (meson add_mono assms calculation le_cases order_trans util_def xy(1) xy(2)) qed next case (2 x y) then show ?case using not_outside by blast next case (3 x y) then show ?case using x_y_in_carrier by blast qed lemma add_left: assumes "\x y. x \[relation] y \ f x \ f y" shows "ordinal_utility carrier relation (\x. f x + u x)" proof - have "ordinal_utility carrier relation (\x. u x + f x)" by (simp add: add_right assms) thus ?thesis using Groups.ab_semigroup_add_class.add.commute by (simp add: add.commute) qed lemma ordinal_utility_scale_transl: assumes "(c::real) > 0" shows "ordinal_utility carrier relation (\x. c * (u x) + d)" proof - have "monotone (>) (>) (\x. c * x + d)" (is "monotone (>) (>) ?fn" ) by (simp add: assms monotone_def) with monotone_comp_is_utility have "ordinal_utility carrier relation (?fn \ u)" by blast moreover have "?fn \ u = (\x. c * (u x) + d)" by auto finally show ?thesis by auto qed lemma strict_prefernce_iff_strict_utility: assumes "x \ carrier" assumes "y \ carrier" shows "x \[relation] y \ u x > u y" by (meson assms(1) assms(2) less_eq_real_def not_less util_def) end text \ A utility function implies a rational preference relation. Hence a utility function contains exactly the same amount of information as a RPR \ sublocale ordinal_utility \ rational_preference carrier relation proof fix x and y assume xy: "x \[relation] y" then show "x \ carrier" and "y \ carrier" using not_outside by (simp) (meson xy refl_onD2 util_imp_refl) next show "preorder_on carrier relation" proof- have "trans relation" using util_imp_trans by auto then have "preorder_on carrier relation" by (simp add: preorder_on_def util_imp_refl) then show ?thesis . qed next show "total_on carrier relation" by (simp add: util_imp_total) qed text \ Given a finite carrier set. We can guarantee that given a rational preference relation, there must also exist a utility function representing this relation. Construction of witness roughly follows from.\ theorem fnt_carrier_exists_util_fun: assumes "finite carrier" assumes "rational_preference carrier relation" shows "\u. ordinal_utility carrier relation u" proof- define f where f: "f = (\x. card (no_better_than x carrier relation))" have "ordinal_utility carrier relation f" proof fix x y assume x_c: "x \ carrier" assume y_c: "y \ carrier" show "x \[relation] y \ (real (f y) \ real (f x))" proof assume asm: "x \[relation] y" define yn where yn: "yn = no_better_than y carrier relation" define xn where xn: "xn = no_better_than x carrier relation" then have "yn \ xn" by (simp add: asm yn assms(2) rational_preference.no_better_subset_pref) then have "card yn \ card xn" by (simp add: x_c y_c asm assms(1) assms(2) rational_preference.card_leq_pref xn yn) then show "(real (f y) \ real (f x))" using f xn yn by simp next assume "real (f y) \ real (f x)" then show "x \[relation] y" using assms(1) assms(2) f rational_preference.card_leq_pref x_c y_c by fastforce qed next fix x y assume asm: "x \[relation] y" show "x \ carrier" by (meson asm assms(2) preference.not_outside rational_preference.axioms(1)) show "y \ carrier" by (meson asm assms(2) preference_def rational_preference_def) qed then show ?thesis by blast qed corollary obt_u_fnt_carrier: assumes "finite carrier" assumes "rational_preference carrier relation" obtains u where "ordinal_utility carrier relation u" using assms(1) assms(2) fnt_carrier_exists_util_fun by blast theorem ordinal_util_imp_rat_prefs: assumes "ordinal_utility carrier relation u" shows "rational_preference carrier relation" by (metis (full_types) assms order_on_defs(1) ordinal_utility.util_imp_refl ordinal_utility.util_imp_total ordinal_utility.util_imp_trans ordinal_utility_def preference.intro rational_preference.intro rational_preference_axioms_def) subsection \ Utility function on Euclidean Space \ locale eucl_ordinal_utility = ordinal_utility carrier relation u for carrier :: "('a::euclidean_space) set" and relation :: "'a relation" and u :: "'a \ real" sublocale eucl_ordinal_utility \ rational_preference carrier relation using rational_preference_axioms by blast lemma ord_eucl_utility_imp_rpr: "eucl_ordinal_utility s rel u \ real_vector_rpr s rel" using eucl_ordinal_utility.axioms ordinal_util_imp_rat_prefs real_vector_rpr.intro by blast context eucl_ordinal_utility begin text \ Local non-satiation on utility functions \ lemma lns_pref_lns_util [iff]: "local_nonsatiation carrier relation \ (\x\carrier. \e > 0. \y\carrier. norm (y - x) \ e \ u y > u x)" (is "_ \ ?alt") proof assume lns: "local_nonsatiation carrier relation" have "\a b. a \ b \ u a > u b" by (metis less_eq_real_def util_def x_y_in_carrier) then show "?alt" by (meson lns local_nonsatiation_def) next assume lns: "?alt" show "local_nonsatiation carrier relation" proof(rule lns_normI) fix x and e::real assume x_in: "x \ carrier" assume e: "e > 0" have "\x \ carrier. \e>0. \y\carrier. norm (y - x) \ e \ y \ x" by (meson less_eq_real_def linorder_not_less lns util_def) have "\y\carrier. norm (y - x) \ e \ u y > u x" using e x_in lns by blast then show "\y\carrier. norm (y - x) \ e \ y \ x" by (meson compl not_less util_def x_in) qed qed end lemma finite_carrier_rpr_iff_u: assumes "finite carrier" and "(relation::'a relation) \ carrier \ carrier" shows "rational_preference carrier relation \ (\u. ordinal_utility carrier relation u)" proof assume "rational_preference carrier relation" then show "\u. ordinal_utility carrier relation u" by (simp add: assms(1) fnt_carrier_exists_util_fun) next assume "\u. ordinal_utility carrier relation u" then show "rational_preference carrier relation" by (metis (full_types) order_on_defs(1) ordinal_utility.util_imp_refl ordinal_utility.util_imp_total ordinal_utility.util_imp_trans ordinal_utility_def preference.intro rational_preference_axioms_def rational_preference_def) qed end \ No newline at end of file diff --git a/thys/Fourier/Fourier.thy b/thys/Fourier/Fourier.thy --- a/thys/Fourier/Fourier.thy +++ b/thys/Fourier/Fourier.thy @@ -1,2742 +1,2742 @@ section\The basics of Fourier series\ text\Ported from HOL Light; thanks to Manuel Eberl for help with the real asymp proof methods\ theory "Fourier" imports Periodic Square_Integrable "HOL-Real_Asymp.Real_Asymp" Confine Fourier_Aux2 begin subsection\Orthonormal system of L2 functions and their Fourier coefficients\ definition orthonormal_system :: "'a::euclidean_space set \ ('b \ 'a \ real) \ bool" where "orthonormal_system S w \ \m n. l2product S (w m) (w n) = (if m = n then 1 else 0)" definition orthonormal_coeff :: "'a::euclidean_space set \ (nat \ 'a \ real) \ ('a \ real) \ nat \ real" where "orthonormal_coeff S w f n = l2product S (w n) f" lemma orthonormal_system_eq: "orthonormal_system S w \ l2product S (w m) (w n) = (if m = n then 1 else 0)" by (simp add: orthonormal_system_def) lemma orthonormal_system_l2norm: "orthonormal_system S w \ l2norm S (w i) = 1" by (simp add: l2norm_def orthonormal_system_def) lemma orthonormal_partial_sum_diff: assumes os: "orthonormal_system S w" and w: "\i. (w i) square_integrable S" and f: "f square_integrable S" and "finite I" shows "(l2norm S (\x. f x - (\i\I. a i * w i x)))\<^sup>2 = (l2norm S f)\<^sup>2 + (\i\I. (a i)\<^sup>2) - 2 * (\i\I. a i * orthonormal_coeff S w f i)" proof - have "S \ sets lebesgue" using f square_integrable_def by blast then have "(\x. \i\I. a i * w i x) square_integrable S" by (intro square_integrable_sum square_integrable_lmult w \finite I\) with assms show ?thesis apply (simp add: l2norm_pow_2 orthonormal_coeff_def orthonormal_system_def) apply (simp add: l2product_rdiff l2product_sym l2product_rsum l2product_rmult algebra_simps power2_eq_square if_distrib [of "\x. _ * x"]) done qed lemma orthonormal_optimal_partial_sum: assumes "orthonormal_system S w" "\i. (w i) square_integrable S" "f square_integrable S" "finite I" shows "l2norm S (\x. f x - (\i\I. orthonormal_coeff S w f i * w i x)) \ l2norm S (\x. f x - (\i\I. a i * w i x))" proof - have "2 * (a i * l2product S f(w i)) \ (a i)\<^sup>2 + (l2product S f(w i))\<^sup>2" for i using sum_squares_bound [of "a i" "l2product S f(w i)"] by (force simp: algebra_simps) then have *: "2 * (\i\I. a i * l2product S f(w i)) \ (\i\I. (a i)\<^sup>2 + (l2product S f(w i))\<^sup>2)" by (simp add: sum_distrib_left sum_mono) have S: "S \ sets lebesgue" using assms square_integrable_def by blast with assms * show ?thesis apply (simp add: l2norm_le square_integrable_sum square_integrable_diff square_integrable_lmult flip: l2norm_pow_2) apply (simp add: orthonormal_coeff_def orthonormal_partial_sum_diff) apply (simp add: l2product_sym power2_eq_square sum.distrib) done qed lemma Bessel_inequality: assumes "orthonormal_system S w" "\i. (w i) square_integrable S" "f square_integrable S" "finite I" shows "(\i\I. (orthonormal_coeff S w f i)\<^sup>2) \ (l2norm S f)\<^sup>2" using orthonormal_partial_sum_diff [OF assms, of "orthonormal_coeff S w f"] apply (simp add: algebra_simps flip: power2_eq_square) by (metis (lifting) add_diff_cancel_left' diff_ge_0_iff_ge zero_le_power2) lemma Fourier_series_square_summable: assumes os: "orthonormal_system S w" and w: "\i. (w i) square_integrable S" and f: "f square_integrable S" shows "summable (confine (\i. (orthonormal_coeff S w f i) ^ 2) I)" proof - have "incseq (\n. \i\I \ {..n}. (orthonormal_coeff S w f i)\<^sup>2)" by (auto simp: incseq_def intro: sum_mono2) moreover have "\i. (\i\I \ {..i}. (orthonormal_coeff S w f i)\<^sup>2) \ (l2norm S f)\<^sup>2" by (simp add: Bessel_inequality assms) ultimately obtain L where "(\n. \i\I \ {..n}. (orthonormal_coeff S w f i)\<^sup>2) \ L" using incseq_convergent by blast then have "\r>0. \no. \n\no. \(\i\{..n} \ I. (orthonormal_coeff S w f i)\<^sup>2) - L\ < r" by (simp add: LIMSEQ_iff Int_commute) then show ?thesis by (auto simp: summable_def sums_confine_le LIMSEQ_iff) qed lemma orthonormal_Fourier_partial_sum_diff_squared: assumes os: "orthonormal_system S w" and w: "\i. (w i) square_integrable S" and f: "f square_integrable S" and "finite I" shows "(l2norm S (\x. f x -(\i\I. orthonormal_coeff S w f i * w i x)))\<^sup>2 = (l2norm S f)\<^sup>2 - (\i\I. (orthonormal_coeff S w f i)\<^sup>2)" using orthonormal_partial_sum_diff [OF assms, where a = "orthonormal_coeff S w f"] by (simp add: power2_eq_square) lemma Fourier_series_l2_summable: assumes os: "orthonormal_system S w" and w: "\i. (w i) square_integrable S" and f: "f square_integrable S" obtains g where "g square_integrable S" "(\n. l2norm S (\x. (\i\I \ {..n}. orthonormal_coeff S w f i * w i x) - g x)) \ 0" proof - have S: "S \ sets lebesgue" using f square_integrable_def by blast let ?\ = "\A x. (\i\I \ A. orthonormal_coeff S w f i * w i x)" let ?\ = "confine (\i. (orthonormal_coeff S w f i)\<^sup>2) I" have si: "?\ A square_integrable S" if "finite A" for A by (force intro: square_integrable_lmult w square_integrable_sum S that) have "\N. \m\N. \n\N. l2norm S (\x. ?\ {..m} x - ?\ {..n} x) < e" if "e > 0" for e proof - have "summable ?\" by (rule Fourier_series_square_summable [OF os w f]) then have "Cauchy (\n. sum ?\ {..n})" by (simp add: summable_def sums_def_le convergent_eq_Cauchy) then obtain M where M: "\m n. \m\M; n\M\ \ dist (sum ?\ {..m}) (sum ?\ {..n}) < e\<^sup>2" using \e > 0\ unfolding Cauchy_def by (drule_tac x="e^2" in spec) auto have "\m\M; n\M\ \ l2norm S (\x. ?\ {..m} x - ?\ {..n} x) < e" for m n proof (induct m n rule: linorder_class.linorder_wlog) case (le m n) have sum_diff: "sum f(I \ {..n}) - sum f(I \ {..m}) = sum f(I \ {Suc m..n})" for f :: "nat \ real" proof - have "(I \ {..n}) = (I \ {..m} \ I \ {Suc m..n})" "(I \ {..m}) \ (I \ {Suc m..n}) = {}" using le by auto then show ?thesis by (simp add: algebra_simps flip: sum.union_disjoint) qed have "(l2norm S (\x. ?\ {..n} x - ?\ {..m} x))^2 \ \(\i\I \ {..n}. (orthonormal_coeff S w f i)\<^sup>2) - (\i\I \ {..m}. (orthonormal_coeff S w f i)\<^sup>2)\" proof (simp add: sum_diff) have "(l2norm S (?\ {Suc m..n}))\<^sup>2 = (\i\I \ {Suc m..n}. l2product S (\x. \j\I \ {Suc m..n}. orthonormal_coeff S w f j * w j x) (\x. orthonormal_coeff S w f i * w i x))" by (simp add: l2norm_pow_2 l2product_rsum si w) also have "\ = (\i\I \ {Suc m..n}. \j\I \ {Suc m..n}. orthonormal_coeff S w f j * orthonormal_coeff S w f i * l2product S (w j) (w i))" by (simp add: l2product_lsum l2product_lmult l2product_rmult si w flip: mult.assoc) also have "\ \ \\i\I \ {Suc m..n}. (orthonormal_coeff S w f i)\<^sup>2\" by (simp add: orthonormal_system_eq [OF os] power2_eq_square if_distrib [of "(*)_"] cong: if_cong) finally show "(l2norm S (?\ {Suc m..n}))\<^sup>2 \ \\i\I \ {Suc m..n}. (orthonormal_coeff S w f i)\<^sup>2\" . qed also have "\ < e\<^sup>2" using M [OF \n\M\ \m\M\] by (simp add: dist_real_def sum_confine_eq_Int Int_commute) finally have "(l2norm S (\x. ?\ {..m} x - ?\ {..n} x))^2 < e^2" using l2norm_diff [OF si si] by simp with \e > 0\ show ?case by (simp add: power2_less_imp_less) next case (sym a b) then show ?case by (subst l2norm_diff) (auto simp: si) qed then show ?thesis by blast qed with that show thesis by (blast intro: si [of "{.._}"] l2_complete [of "\n. ?\ {..n}"]) qed lemma Fourier_series_l2_summable_strong: assumes os: "orthonormal_system S w" and w: "\i. (w i) square_integrable S" and f: "f square_integrable S" obtains g where "g square_integrable S" "\i. i \ I \ orthonormal_coeff S w (\x. f x - g x) i = 0" "(\n. l2norm S (\x. (\i\I \ {..n}. orthonormal_coeff S w f i * w i x) - g x)) \ 0" proof - have S: "S \ sets lebesgue" using f square_integrable_def by blast obtain g where g: "g square_integrable S" and teng: "(\n. l2norm S (\x. (\i\I \ {..n}. orthonormal_coeff S w f i * w i x) - g x)) \ 0" using Fourier_series_l2_summable [OF assms] . show thesis proof show "orthonormal_coeff S w (\x. f x - g x) i = 0" if "i \ I" for i proof (rule tendsto_unique) let ?\ = "\A x. (\i\I \ A. orthonormal_coeff S w f i * w i x)" let ?f = "\n. l2product S (w i) (\x. (f x - ?\ {..n} x) + (?\ {..n} x - g x))" show "?f \ orthonormal_coeff S w (\x. f x - g x) i" by (simp add: orthonormal_coeff_def) have 1: "(\n. l2product S (w i) (\x. f x - ?\ {..n} x)) \ 0" proof (rule tendsto_eventually) have "l2product S (w i) (\x. f x - ?\ {..j} x) = 0" if "j \ i" for j using that \i \ I\ apply (simp add: l2product_rdiff l2product_rsum l2product_rmult orthonormal_coeff_def w f S) apply (simp add: orthonormal_system_eq [OF os] if_distrib [of "(*)_"] cong: if_cong) done then show "\\<^sub>F n in sequentially. l2product S (w i) (\x. f x - ?\ {..n} x) = 0" using eventually_at_top_linorder by blast qed have 2: "(\n. l2product S (w i) (\x. ?\ {..n} x - g x)) \ 0" proof (intro Lim_null_comparison [OF _ teng] eventuallyI) show "norm (l2product S (w i) (\x. ?\ {..n} x - g x)) \ l2norm S (\x. ?\ {..n} x - g x)" for n using Schwartz_inequality_abs [of "w i" S "(\x. ?\ {..n} x - g x)"] by (simp add: S g f w orthonormal_system_l2norm [OF os]) qed show "?f \ 0" using that tendsto_add [OF 1 2] by (subst l2product_radd) (simp_all add: l2product_radd w f g S) qed auto qed (auto simp: g teng) qed subsection\Actual trigonometric orthogonality relations\ lemma integrable_sin_cx: "integrable (lebesgue_on {-pi..pi}) (\x. sin(x * c))" by (intro continuous_imp_integrable_real continuous_intros) lemma integrable_cos_cx: "integrable (lebesgue_on {-pi..pi}) (\x. cos(x * c))" by (intro continuous_imp_integrable_real continuous_intros) lemma integral_cos_Z' [simp]: assumes "n \ \" shows "integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. cos(n * x)) = (if n = 0 then 2 * pi else 0)" by (metis assms integral_cos_Z mult_commute_abs) lemma integral_sin_and_cos: fixes m n::int shows "integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. cos(m * x) * cos(n * x)) = (if \m\ = \n\ then if n = 0 then 2 * pi else pi else 0)" "integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. cos(m * x) * sin(n * x)) = 0" "integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin(m * x) * cos(n * x)) = 0" "\m \ 0; n \ 0\ \ integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin (m * x) * sin (n * x)) = (if m = n \ n \ 0 then pi else 0)" "\integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin (m * x) * sin (n * x))\ = (if \m\ = \n\ \ n \ 0 then pi else 0)" by (simp_all add: abs_if sin_times_sin cos_times_sin sin_times_cos cos_times_cos integrable_sin_cx integrable_cos_cx mult_ac flip: distrib_left distrib_right left_diff_distrib right_diff_distrib) lemma integral_sin_and_cos_Z [simp]: fixes m n::real assumes "m \ \" "n \ \" shows "integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. cos(m * x) * cos(n * x)) = (if \m\ = \n\ then if n = 0 then 2 * pi else pi else 0)" "integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. cos(m * x) * sin(n * x)) = 0" "integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin(m * x) * cos(n * x)) = 0" "\integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin (m * x) * sin (n * x))\ = (if \m\ = \n\ \ n \ 0 then pi else 0)" using assms unfolding Ints_def apply safe unfolding integral_sin_and_cos apply auto done lemma integral_sin_and_cos_N [simp]: fixes m n::real assumes "m \ \" "n \ \" shows "integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin (m * x) * sin (n * x)) = (if m = n \ n \ 0 then pi else 0)" using assms unfolding Nats_altdef1 by (auto simp: integral_sin_and_cos) lemma integrable_sin_and_cos: fixes m n::int shows "integrable (lebesgue_on {a..b}) (\x. cos(x * m) * cos(x * n))" "integrable (lebesgue_on {a..b}) (\x. cos(x * m) * sin(x * n))" "integrable (lebesgue_on {a..b}) (\x. sin(x * m) * cos(x * n))" "integrable (lebesgue_on {a..b}) (\x. sin(x * m) * sin(x * n))" by (intro continuous_imp_integrable_real continuous_intros)+ lemma sqrt_pi_ge1: "sqrt pi \ 1" using pi_gt3 by auto definition trigonometric_set :: "nat \ real \ real" where "trigonometric_set n \ if n = 0 then \x. 1 / sqrt(2 * pi) else if odd n then \x. sin(real(Suc (n div 2)) * x) / sqrt(pi) else (\x. cos((n div 2) * x) / sqrt pi)" lemma trigonometric_set: "trigonometric_set 0 x = 1 / sqrt(2 * pi)" "trigonometric_set (Suc (2 * n)) x = sin(real(Suc n) * x) / sqrt(pi)" "trigonometric_set (2 * n + 2) x = cos(real(Suc n) * x) / sqrt(pi)" "trigonometric_set (Suc (Suc (2 * n))) x = cos(real(Suc n) * x) / sqrt(pi)" by (simp_all add: trigonometric_set_def algebra_simps add_divide_distrib) lemma trigonometric_set_even: "trigonometric_set(2*k) = (if k = 0 then (\x. 1 / sqrt(2 * pi)) else (\x. cos(k * x) / sqrt pi))" by (induction k) (auto simp: trigonometric_set_def add_divide_distrib split: if_split_asm) lemma orthonormal_system_trigonometric_set: "orthonormal_system {-pi..pi} trigonometric_set" proof - have "l2product {-pi..pi} (trigonometric_set m) (trigonometric_set n) = (if m = n then 1 else 0)" for m n proof (induction m rule: odd_even_cases) case 0 show ?case by (induction n rule: odd_even_cases) (auto simp: trigonometric_set l2product_def measure_restrict_space) next case (odd m) show ?case by (induction n rule: odd_even_cases) (auto simp: trigonometric_set l2product_def double_not_eq_Suc_double) next case (even m) show ?case by (induction n rule: odd_even_cases) (auto simp: trigonometric_set l2product_def Suc_double_not_eq_double) qed then show ?thesis unfolding orthonormal_system_def by auto qed lemma square_integrable_trigonometric_set: "(trigonometric_set i) square_integrable {-pi..pi}" proof - have "continuous_on {-pi..pi} (\x. sin ((1 + real n) * x) / sqrt pi)" for n by (intro continuous_intros) auto moreover have "continuous_on {-pi..pi} (\x. cos (real i * x / 2) / sqrt pi) " by (intro continuous_intros) auto ultimately show ?thesis by (simp add: trigonometric_set_def) qed subsection\Weierstrass for trigonometric polynomials\ lemma Weierstrass_trig_1: fixes g :: "real \ real" assumes contf: "continuous_on UNIV g" and periodic: "\x. g(x + 2 * pi) = g x" and 1: "norm z = 1" shows "continuous (at z within (sphere 0 1)) (g \ Im \ Ln)" proof (cases "Re z < 0") let ?f = "complex_of_real \ g \ (\x. x + pi) \ Im \ Ln \ uminus" case True have "continuous (at z within (sphere 0 1)) (complex_of_real \ g \ Im \ Ln)" proof (rule continuous_transform_within) have [simp]: "z \ \\<^sub>\\<^sub>0" using True complex_nonneg_Reals_iff by auto show "continuous (at z within (sphere 0 1)) ?f" by (intro continuous_within_Ln continuous_intros continuous_on_imp_continuous_within [OF contf]) auto show "?f x' = (complex_of_real \ g \ Im \ Ln) x'" if "x' \ (sphere 0 1)" and "dist x' z < 1" for x' proof - have "x' \ 0" using that by auto with that show ?thesis by (auto simp: Ln_minus add.commute periodic) qed qed (use 1 in auto) then have "continuous (at z within (sphere 0 1)) (Re \ complex_of_real \ g \ Im \ Ln)" unfolding o_def by (rule continuous_Re) then show ?thesis by (simp add: o_def) next case False let ?f = "complex_of_real \ g \ Im \ Ln \ uminus" have "z \ 0" using 1 by auto with False have "z \ \\<^sub>\\<^sub>0" by (auto simp: not_less nonpos_Reals_def) then have "continuous (at z within (sphere 0 1)) (complex_of_real \ g \ Im \ Ln)" by (intro continuous_within_Ln continuous_intros continuous_on_imp_continuous_within [OF contf]) auto then have "continuous (at z within (sphere 0 1)) (Re \ complex_of_real \ g \ Im \ Ln)" unfolding o_def by (rule continuous_Re) then show ?thesis by (simp add: o_def) qed inductive_set cx_poly :: "(complex \ real) set" where Re: "Re \ cx_poly" | Im: "Im \ cx_poly" | const: "(\x. c) \ cx_poly" | add: "\f \ cx_poly; g \ cx_poly\ \ (\x. f x + g x) \ cx_poly" | mult: "\f \ cx_poly; g \ cx_poly\ \ (\x. f x * g x) \ cx_poly" declare cx_poly.intros [intro] lemma Weierstrass_trig_polynomial: assumes contf: "continuous_on {-pi..pi} f" and fpi: "f(-pi) = f pi" and "0 < e" obtains n::nat and a b where "\x::real. x \ {-pi..pi} \ \f x - (\k\n. a k * sin (k * x) + b k * cos (k * x))\ < e" proof - interpret CR: function_ring_on "cx_poly" "sphere 0 1" proof show "continuous_on (sphere 0 1) f" if "f \ cx_poly" for f using that by induction (assumption | intro continuous_intros)+ fix x y::complex assume "x \ sphere 0 1" and "y \ sphere 0 1" and "x \ y" then consider "Re x \ Re y" | "Im x \ Im y" using complex_eqI by blast then show "\f\cx_poly. f x \ f y" by (metis cx_poly.Im cx_poly.Re) qed (auto simp: cx_poly.intros) have "continuous (at z within (sphere 0 1)) (f \ Im \ Ln)" if "norm z = 1" for z proof - obtain g where contg: "continuous_on UNIV g" and gf: "\x. x \ {-pi..pi} \ g x = f x" and periodic: "\x. g(x + 2*pi) = g x" using Tietze_periodic_interval [OF contf fpi] by auto let ?f = "(g \ Im \ Ln)" show ?thesis proof (rule continuous_transform_within) show "continuous (at z within (sphere 0 1)) ?f" using Weierstrass_trig_1 [OF contg periodic that] by (simp add: sphere_def) show "?f x' = (f \ Im \ Ln) x'" if "x' \ sphere 0 1" "dist x' z < 1" for x' proof - have "x' \ 0" using that by auto then have "Im (Ln x') \ {-pi..pi}" using Im_Ln_le_pi [of x'] mpi_less_Im_Ln [of x'] by simp then show ?thesis using gf by simp qed qed (use that in auto) qed then have "continuous_on (sphere 0 1) (f \ Im \ Ln)" using continuous_on_eq_continuous_within mem_sphere_0 by blast then obtain g where "g \ cx_poly" and g: "\x. x \ sphere 0 1 \ \(f \ Im \ Ln) x - g x\ < e" using CR.Stone_Weierstrass_basic [of "f \ Im \ Ln"] \e > 0\ by meson define trigpoly where "trigpoly \ \f. \n a b. f = (\x. (\k\n. a k * sin(real k * x) + b k * cos(real k * x)))" have tp_const: "trigpoly(\x. c)" for c unfolding trigpoly_def by (rule_tac x=0 in exI) auto have tp_add: "trigpoly(\x. f x + g x)" if "trigpoly f" "trigpoly g" for f g proof - obtain n1 a1 b1 where feq: "f = (\x. \k\n1. a1 k * sin (real k * x) + b1 k * cos (real k * x))" using \trigpoly f\ trigpoly_def by blast obtain n2 a2 b2 where geq: "g = (\x. \k\n2. a2 k * sin (real k * x) + b2 k * cos (real k * x))" using \trigpoly g\ trigpoly_def by blast let ?a = "\n. (if n \ n1 then a1 n else 0) + (if n \ n2 then a2 n else 0)" let ?b = "\n. (if n \ n1 then b1 n else 0) + (if n \ n2 then b2 n else 0)" have eq: "{k. k \ max a b \ k \ a} = {..a}" "{k. k \ max a b \ k \ b} = {..b}" for a b::nat by auto have "(\x. f x + g x) = (\x. \k \ max n1 n2. ?a k * sin (real k * x) + ?b k * cos (real k * x))" by (simp add: feq geq algebra_simps eq sum.distrib if_distrib [of "\u. _ * u"] cong: if_cong flip: sum.inter_filter) then show ?thesis unfolding trigpoly_def by meson qed have tp_sum: "trigpoly(\x. \i\S. f i x)" if "finite S" "\i. i \ S \ trigpoly(f i)" for f and S :: "nat set" using that by induction (auto simp: tp_const tp_add) have tp_cmul: "trigpoly(\x. c * f x)" if "trigpoly f" for f c proof - obtain n a b where feq: "f = (\x. \k\n. a k * sin (real k * x) + b k * cos (real k * x))" using \trigpoly f\ trigpoly_def by blast have "(\x. c * f x) = (\x. \k \ n. (c * a k) * sin (real k * x) + (c * b k) * cos (real k * x))" by (simp add: feq algebra_simps sum_distrib_left) then show ?thesis unfolding trigpoly_def by meson qed have tp_cdiv: "trigpoly(\x. f x / c)" if "trigpoly f" for f c using tp_cmul [OF \trigpoly f\, of "inverse c"] by (simp add: divide_inverse_commute) have tp_diff: "trigpoly(\x. f x - g x)" if "trigpoly f" "trigpoly g" for f g using tp_add [OF \trigpoly f\ tp_cmul [OF \trigpoly g\, of "-1"]] by auto have tp_sin: "trigpoly(\x. sin (n * x))" if "n \ \" for n unfolding trigpoly_def proof obtain k where "n = real k" using Nats_cases \n \ \\ by blast then show "\a b. (\x. sin (n * x)) = (\x. \i \ nat\n\. a i * sin (real i * x) + b i * cos (real i * x))" apply (rule_tac x="\i. if i = k then 1 else 0" in exI) apply (rule_tac x="\i. 0" in exI) apply (simp add: if_distrib [of "\u. u * _"] cong: if_cong) done qed have tp_cos: "trigpoly(\x. cos (n * x))" if "n \ \" for n unfolding trigpoly_def proof obtain k where "n = real k" using Nats_cases \n \ \\ by blast then show "\a b. (\x. cos (n * x)) = (\x. \i \ nat\n\. a i * sin (real i * x) + b i * cos (real i * x))" apply (rule_tac x="\i. 0" in exI) apply (rule_tac x="\i. if i = k then 1 else 0" in exI) apply (simp add: if_distrib [of "\u. u * _"] cong: if_cong) done qed have tp_sincos: "trigpoly(\x. sin(i * x) * sin(j * x)) \ trigpoly(\x. sin(i * x) * cos(j * x)) \ trigpoly(\x. cos(i * x) * sin(j * x)) \ trigpoly(\x. cos(i * x) * cos(j * x))" (is "?P i j") for i j::nat proof (rule linorder_wlog [of _ j i]) show "?P j i" if "i \ j" for j i using that by (simp add: sin_times_sin cos_times_cos sin_times_cos cos_times_sin diff_divide_distrib tp_add tp_diff tp_cdiv tp_cos tp_sin flip: left_diff_distrib distrib_right) qed (simp add: mult_ac) have tp_mult: "trigpoly(\x. f x * g x)" if "trigpoly f" "trigpoly g" for f g proof - obtain n1 a1 b1 where feq: "f = (\x. \k\n1. a1 k * sin (real k * x) + b1 k * cos (real k * x))" using \trigpoly f\ trigpoly_def by blast obtain n2 a2 b2 where geq: "g = (\x. \k\n2. a2 k * sin (real k * x) + b2 k * cos (real k * x))" using \trigpoly g\ trigpoly_def by blast then show ?thesis unfolding feq geq by (simp add: algebra_simps sum_product tp_sum tp_add tp_cmul tp_sincos del: mult.commute) qed have *: "trigpoly(\x. f(exp(\ * of_real x)))" if "f \ cx_poly" for f using that proof induction case Re then show ?case using tp_cos [of 1] by (auto simp: Re_exp) next case Im then show ?case using tp_sin [of 1] by (auto simp: Im_exp) qed (auto simp: tp_const tp_add tp_mult) obtain n a b where eq: "(g (iexp x)) = (\k\n. a k * sin (real k * x) + b k * cos (real k * x))" for x using * [OF \g \ cx_poly\] trigpoly_def by meson show thesis proof show "\f \ - (\k\n. a k * sin (real k * \) + b k * cos (real k * \))\ < e" if "\ \ {-pi..pi}" for \ proof - have "f \ - g (iexp \) = (f \ Im \ Ln) (iexp \) - g (iexp \)" proof (cases "\ = -pi") case True then show ?thesis by (simp add: exp_minus fpi) next case False then show ?thesis using that by auto qed then show ?thesis using g [of "exp(\ * of_real \)"] by (simp flip: eq) qed qed qed subsection\A bit of extra hacking round so that the ends of a function are OK\ lemma integral_tweak_ends: fixes a b :: real assumes "a < b" "e > 0" obtains f where "continuous_on {a..b} f" "f a = d" "f b = 0" "l2norm {a..b} f < e" proof - have cont: "continuous_on {a..b} (\x. if x \ a+1 / real(Suc n) then ((Suc n) * d) * ((a+1 / (Suc n)) - x) else 0)" for n proof (cases "a+1/(Suc n) \ b") case True have *: "1 / (1 + real n) > 0" by auto have abeq: "{a..b} = {a..a+1/(Suc n)} \ {a+1/(Suc n)..b}" using \a < b\ True apply auto using "*" by linarith show ?thesis unfolding abeq proof (rule continuous_on_cases) show "continuous_on {a..a+1 / real (Suc n)} (\x. real (Suc n) * d * (a+1 / real (Suc n) - x))" by (intro continuous_intros) qed auto next case False show ?thesis proof (rule continuous_on_eq [where f = "\x. ((Suc n) * d) * ((a+1/(Suc n)) - x)"]) show " continuous_on {a..b} (\x. (Suc n) * d * (a+1 / real (Suc n) - x))" by (intro continuous_intros) qed (use False in auto) qed let ?f = "\k x. (if x \ a+1 / (Suc k) then (Suc k) * d * (a+1 / (Suc k) - x) else 0)\<^sup>2" let ?g = "\x. if x = a then d\<^sup>2 else 0" have bmg: "?g \ borel_measurable (lebesgue_on {a..b})" apply (rule measurable_restrict_space1) using borel_measurable_if_I [of _ "{a}", OF borel_measurable_const] by auto have bmf: "?f k \ borel_measurable (lebesgue_on {a..b})" for k proof - have bm: "(\x. (Suc k) * d * (a+1 / real (Suc k) - x)) \ borel_measurable (lebesgue_on {..a+1 / (Suc k)})" by (intro measurable) (auto simp: measurable_completion measurable_restrict_space1) show ?thesis apply (intro borel_measurable_power measurable_restrict_space1) using borel_measurable_if_I [of _ "{.. a+1 / (Suc k)}", OF bm] apply auto done qed have int_d2: "integrable (lebesgue_on {a..b}) (\x. d\<^sup>2)" by (intro continuous_imp_integrable_real continuous_intros) have "(\k. ?f k x) \ ?g x" if x: "x \ {a..b}" for x proof (cases "x = a") case False then have "x > a" using x by auto with x obtain N where "N > 0" and N: "1 / real N < x-a" using real_arch_invD [of "x-a"] by (force simp: inverse_eq_divide) then have "x > a+1 / (1 + real k)" if "k \ N" for k proof - have "a+1 / (1 + real k) < a+1 / real N" using that \0 < N\ by (simp add: field_simps) also have "\ < x" using N by linarith finally show ?thesis . qed then show ?thesis apply (intro tendsto_eventually eventually_sequentiallyI [where c=N]) by (fastforce simp: False) qed auto then have tends: "AE x in (lebesgue_on {a..b}). (\k. ?f k x) \ ?g x" by force have le_d2: "\k. AE x in (lebesgue_on {a..b}). norm (?f k x) \ d\<^sup>2" proof show "norm ((if x \ a+1 / real (Suc k) then real (Suc k) * d * (a+1 / real (Suc k) - x) else 0)\<^sup>2) \ d\<^sup>2" if "x \ space (lebesgue_on {a..b})" for k x using that apply (simp add: abs_mult divide_simps flip: abs_le_square_iff) apply (auto simp: abs_if zero_less_mult_iff mult_left_le) done qed have integ: "integrable (lebesgue_on {a..b}) ?g" using integrable_dominated_convergence [OF bmg bmf int_d2 tends le_d2] by auto have int: "(\k. integral\<^sup>L (lebesgue_on {a..b}) (?f k)) \ integral\<^sup>L (lebesgue_on {a..b}) ?g" using integral_dominated_convergence [OF bmg bmf int_d2 tends le_d2] by auto then obtain N where N: "\k. k \ N \ \integral\<^sup>L (lebesgue_on {a..b}) (?f k) - integral\<^sup>L (lebesgue_on {a..b}) ?g\ < e\<^sup>2" apply (simp add: lim_sequentially dist_real_def) apply (drule_tac x="e^2" in spec) using \e > 0\ by auto obtain M where "M > 0" and M: "1 / real M < b-a" using real_arch_invD [of "b-a"] by (metis \a < b\ diff_gt_0_iff_gt inverse_eq_divide neq0_conv) have *: "\integral\<^sup>L (lebesgue_on {a..b}) (?f (max M N)) - integral\<^sup>L (lebesgue_on {a..b}) ?g\ < e\<^sup>2" using N by force let ?\ = "\x. if x \ a+1/(Suc (max M N)) then ((Suc (max M N)) * d) * ((a+1/(Suc (max M N))) - x) else 0" show ?thesis proof show "continuous_on {a..b} ?\" by (rule cont) have "1 / (1 + real (max M N)) \ 1 / (real M)" by (simp add: \0 < M\ frac_le) then have "\ (b \ a+1 / (1 + real (max M N)))" using M \a < b\ \M > 0\ max.cobounded1 [of M N] by linarith then show "?\ b = 0" by simp have null_a: "{a} \ null_sets (lebesgue_on {a..b})" using \a < b\ by (simp add: null_sets_restrict_space) have "LINT x|lebesgue_on {a..b}. ?g x = 0" by (force intro: integral_eq_zero_AE AE_I' [OF null_a]) then have "l2norm {a..b} ?\ < sqrt (e^2)" unfolding l2norm_def l2product_def power2_eq_square [symmetric] apply (intro real_sqrt_less_mono) using "*" by linarith then show "l2norm {a..b} ?\ < e" using \e > 0\ by auto qed auto qed lemma square_integrable_approximate_continuous_ends: assumes f: "f square_integrable {a..b}" and "a < b" "0 < e" obtains g where "continuous_on {a..b} g" "g b = g a" "g square_integrable {a..b}" "l2norm {a..b} (\x. f x - g x) < e" proof - obtain g where contg: "continuous_on UNIV g" and "g square_integrable {a..b}" and lg: "l2norm {a..b} (\x. f x - g x) < e/2" using f \e > 0\ square_integrable_approximate_continuous by (metis (full_types) box_real(2) half_gt_zero_iff lmeasurable_cbox) obtain h where conth: "continuous_on {a..b} h" and "h a = g b - g a" "h b = 0" and lh: "l2norm {a..b} h < e/2" using integral_tweak_ends \e > 0\ by (metis \a < b\ zero_less_divide_iff zero_less_numeral) have "h square_integrable {a..b}" using \continuous_on {a..b} h\ continuous_imp_square_integrable by blast show thesis proof show "continuous_on {a..b} (\x. g x + h x)" by (blast intro: continuous_on_subset [OF contg] conth continuous_intros) then show "(\x. g x + h x) square_integrable {a..b}" using continuous_imp_square_integrable by blast show "g b + h b = g a + h a" by (simp add: \h a = g b - g a\ \h b = 0\) have "l2norm {a..b} (\x. (f x - g x) + - h x) < e" proof (rule le_less_trans [OF l2norm_triangle [of "\x. f x - g x" "{a..b}" "\x. - (h x)"]]) show "(\x. f x - g x) square_integrable {a..b}" using \g square_integrable {a..b}\ f square_integrable_diff by blast show "(\x. - h x) square_integrable {a..b}" by (simp add: \h square_integrable {a..b}\) show "l2norm {a..b} (\x. f x - g x) + l2norm {a..b} (\x. - h x) < e" using \h square_integrable {a..b}\ l2norm_neg lg lh by auto qed then show "l2norm {a..b} (\x. f x - (g x + h x)) < e" by (simp add: field_simps) qed qed subsection\Hence the main approximation result\ lemma Weierstrass_l2_trig_polynomial: assumes f: "f square_integrable {-pi..pi}" and "0 < e" obtains n a b where "l2norm {-pi..pi} (\x. f x - (\k\n. a k * sin(real k * x) + b k * cos(real k * x))) < e" proof - let ?\ = "\n a b x. \k\n. a k * sin(real k * x) + b k * cos(real k * x)" obtain g where contg: "continuous_on {-pi..pi} g" and geq: "g(-pi) = g pi" and g: "g square_integrable {-pi..pi}" and norm_fg: "l2norm {-pi..pi} (\x. f x - g x) < e/2" using \e > 0\ by (auto intro: square_integrable_approximate_continuous_ends [OF f, of "e/2"]) then obtain n a b where g_phi_less: "\x. x \ {-pi..pi} \ \g x - (?\ n a b x)\ < e/6" using \e > 0\ Weierstrass_trig_polynomial [OF contg geq, of "e/6"] by (meson zero_less_divide_iff zero_less_numeral) show thesis proof have si: "(?\ n u v) square_integrable {-pi..pi}" for n::nat and u v proof (intro square_integrable_sum continuous_imp_square_integrable) show "continuous_on {-pi..pi} (\x. u k * sin (real k * x) + v k * cos (real k * x))" if "k \ {..n}" for k using that by (intro continuous_intros) qed auto have "l2norm {-pi..pi} (\x. f x - (?\ n a b x)) = l2norm {-pi..pi} (\x. (f x - g x) + (g x - (?\ n a b x)))" by simp also have "\ \ l2norm {-pi..pi} (\x. f x - g x) + l2norm {-pi..pi} (\x. g x - (?\ n a b x))" proof (rule l2norm_triangle) show "(\x. f x - g x) square_integrable {-pi..pi}" using f g square_integrable_diff by blast show "(\x. g x - (?\ n a b x)) square_integrable {-pi..pi}" using g si square_integrable_diff by blast qed also have "\ < e" proof - have g_phi: "(\x. g x - (?\ n a b x)) square_integrable {-pi..pi}" using g si square_integrable_diff by blast have "l2norm {-pi..pi} (\x. g x - (?\ n a b x)) \ e/2" unfolding l2norm_def l2product_def power2_eq_square [symmetric] proof (rule real_le_lsqrt) show "0 \ LINT x|lebesgue_on {-pi..pi}. (g x - (?\ n a b x))\<^sup>2" by (rule integral_nonneg_AE) auto have "LINT x|lebesgue_on {-pi..pi}. (g x - (?\ n a b x))\<^sup>2 \ LINT x|lebesgue_on {-pi..pi}. (e / 6) ^ 2" proof (rule integral_mono) show "integrable (lebesgue_on {-pi..pi}) (\x. (g x - (?\ n a b x))\<^sup>2)" using g_phi square_integrable_def by auto show "integrable (lebesgue_on {-pi..pi}) (\x. (e / 6)\<^sup>2)" by (intro continuous_intros continuous_imp_integrable_real) show "(g x - (?\ n a b x))\<^sup>2 \ (e / 6)\<^sup>2" if "x \ space (lebesgue_on {-pi..pi})" for x using \e > 0\ g_phi_less that apply (simp add: abs_le_square_iff [symmetric]) using less_eq_real_def by blast qed also have "\ \ (e / 2)\<^sup>2" using \e > 0\ pi_less_4 by (auto simp: power2_eq_square measure_restrict_space) finally show "LINT x|lebesgue_on {-pi..pi}. (g x - (?\ n a b x))\<^sup>2 \ (e / 2)\<^sup>2" . qed (use \e > 0\ in auto) with norm_fg show ?thesis by auto qed finally show "l2norm {-pi..pi} (\x. f x - (?\ n a b x)) < e" . qed qed proposition Weierstrass_l2_trigonometric_set: assumes f: "f square_integrable {-pi..pi}" and "0 < e" obtains n a where "l2norm {-pi..pi} (\x. f x - (\k\n. a k * trigonometric_set k x)) < e" proof - obtain n a b where lee: "l2norm {-pi..pi} (\x. f x - (\k\n. a k * sin(real k * x) + b k * cos(real k * x))) < e" using Weierstrass_l2_trig_polynomial [OF assms] . let ?a = "\k. if k = 0 then sqrt(2 * pi) * b 0 else if even k then sqrt pi * b(k div 2) else if k \ 2 * n then sqrt pi * a((Suc k) div 2) else 0" show thesis proof have [simp]: "Suc (i * 2) \ n * 2 \ i {..k\n. b k * cos (real k * x)) = (\i\n. if i = 0 then b 0 else b i * cos (real i * x))" for x by (rule sum.cong) auto moreover have "(\k\n. a k * sin (real k * x)) = (\i\n. (if Suc (2 * i) \ 2 * n then sqrt pi * a (Suc i) * sin ((1 + real i) * x) else 0) / sqrt pi)" (is "?lhs = ?rhs") for x proof (cases "n=0") case False then obtain n' where n': "n = Suc n'" using not0_implies_Suc by blast have "?lhs = (\k = Suc 0..n. a k * sin (real k * x))" by (simp add: atMost_atLeast0 sum_shift_lb_Suc0_0) also have "\ = (\ij\n'. a (Suc j) * sin (real (Suc j) * x)) = (\i = ?rhs" by (simp add: field_simps if_distrib [of "\x. x/_"] sum.inter_restrict [where B = "{..k\n. a k * sin(real k * x) + b k * cos(real k * x)) = (\k \ Suc(2*n). ?a k * trigonometric_set k x)" for x unfolding sum.in_pairs_0 trigonometric_set_def by (simp add: sum.distrib if_distrib [of "\x. x*_"] cong: if_cong) with lee show "l2norm {-pi..pi} (\x. f x - (\k \ Suc(2*n). ?a k * trigonometric_set k x)) < e" by auto qed qed subsection\Convergence wrt the L2 norm of trigonometric Fourier series\ definition Fourier_coefficient where "Fourier_coefficient \ orthonormal_coeff {-pi..pi} trigonometric_set" lemma Fourier_series_l2: assumes "f square_integrable {-pi..pi}" shows "(\n. l2norm {-pi..pi} (\x. f x - (\i\n. Fourier_coefficient f i * trigonometric_set i x))) \ 0" proof (clarsimp simp add: lim_sequentially dist_real_def Fourier_coefficient_def) let ?h = "\n x. (\i\n. orthonormal_coeff {-pi..pi} trigonometric_set f i * trigonometric_set i x)" show "\N. \n\N. \l2norm {-pi..pi} (\x. f x - ?h n x)\ < e" if "0 < e" for e proof - obtain N a where lte: "l2norm {-pi..pi} (\x. f x - (\k\N. a k * trigonometric_set k x)) < e" using Weierstrass_l2_trigonometric_set by (meson \0 < e\ assms) show ?thesis proof (intro exI allI impI) show "\l2norm {-pi..pi} (\x. f x - ?h m x)\ < e" if "N \ m" for m proof - have "\l2norm {-pi..pi} (\x. f x - ?h m x)\ = l2norm {-pi..pi} (\x. f x - ?h m x)" proof (rule abs_of_nonneg) show "0 \ l2norm {-pi..pi} (\x. f x - ?h m x)" apply (intro l2norm_pos_le square_integrable_diff square_integrable_sum square_integrable_lmult square_integrable_trigonometric_set assms, auto) done qed also have "\ \ l2norm {-pi..pi} (\x. f x - (\k\N. a k * trigonometric_set k x))" proof - have "(\i\m. (if i \ N then a i else 0) * trigonometric_set i x) = (\i\N. a i * trigonometric_set i x)" for x using sum.inter_restrict [where A = "{..m}" and B = "{..N}", symmetric] that by (force simp: if_distrib [of "\x. x * _"] min_absorb2 cong: if_cong) moreover have "l2norm {-pi..pi} (\x. f x - ?h m x) \ l2norm {-pi..pi} (\x. f x - (\i\m. (if i \ N then a i else 0) * trigonometric_set i x))" using orthonormal_optimal_partial_sum [OF orthonormal_system_trigonometric_set square_integrable_trigonometric_set assms] by simp ultimately show ?thesis by simp qed also have "\ < e" by (rule lte) finally show ?thesis . qed qed qed qed subsection\Fourier coefficients go to 0 (weak form of Riemann-Lebesgue)\ lemma trigonometric_set_mul_absolutely_integrable: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\x. trigonometric_set n x * f x) absolutely_integrable_on {-pi..pi}" proof (rule absolutely_integrable_bounded_measurable_product_real) show "trigonometric_set n \ borel_measurable (lebesgue_on {-pi..pi})" using square_integrable_def square_integrable_trigonometric_set by blast show "bounded (trigonometric_set n ` {-pi..pi})" unfolding bounded_iff using pi_gt3 sqrt_pi_ge1 by (rule_tac x=1 in exI) (auto simp: trigonometric_set_def dist_real_def intro: order_trans [OF abs_sin_le_one] order_trans [OF abs_cos_le_one]) qed (auto simp: assms) lemma trigonometric_set_mul_integrable: "f absolutely_integrable_on {-pi..pi} \ integrable (lebesgue_on {-pi..pi}) (\x. trigonometric_set n x * f x)" using trigonometric_set_mul_absolutely_integrable by (simp add: integrable_restrict_space set_integrable_def) lemma trigonometric_set_integrable [simp]: "integrable (lebesgue_on {-pi..pi}) (trigonometric_set n)" using trigonometric_set_mul_integrable [where f = id] by simp (metis absolutely_integrable_imp_integrable fmeasurableD interval_cbox lmeasurable_cbox square_integrable_imp_absolutely_integrable square_integrable_trigonometric_set) lemma absolutely_integrable_sin_product: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\x. sin(k * x) * f x) absolutely_integrable_on {-pi..pi}" proof (rule absolutely_integrable_bounded_measurable_product_real) show "(\x. sin (k * x)) \ borel_measurable (lebesgue_on {-pi..pi})" by (metis borel_measurable_integrable integrable_sin_cx mult_commute_abs) show "bounded ((\x. sin (k * x)) ` {-pi..pi})" by (metis (mono_tags, lifting) abs_sin_le_one bounded_iff imageE real_norm_def) qed (auto simp: assms) lemma absolutely_integrable_cos_product: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\x. cos(k * x) * f x) absolutely_integrable_on {-pi..pi}" proof (rule absolutely_integrable_bounded_measurable_product_real) show "(\x. cos (k * x)) \ borel_measurable (lebesgue_on {-pi..pi})" by (metis borel_measurable_integrable integrable_cos_cx mult_commute_abs) show "bounded ((\x. cos (k * x)) ` {-pi..pi})" by (metis (mono_tags, lifting) abs_cos_le_one bounded_iff imageE real_norm_def) qed (auto simp: assms) lemma assumes "f absolutely_integrable_on {-pi..pi}" shows Fourier_products_integrable_cos: "integrable (lebesgue_on {-pi..pi}) (\x. cos(k * x) * f x)" and Fourier_products_integrable_sin: "integrable (lebesgue_on {-pi..pi}) (\x. sin(k * x) * f x)" using absolutely_integrable_cos_product absolutely_integrable_sin_product assms by (auto simp: integrable_restrict_space set_integrable_def) lemma Riemann_lebesgue_square_integrable: assumes "orthonormal_system S w" "\i. w i square_integrable S" "f square_integrable S" shows "orthonormal_coeff S w f \ 0" using Fourier_series_square_summable [OF assms, of UNIV] summable_LIMSEQ_zero by force proposition Riemann_lebesgue: assumes "f absolutely_integrable_on {-pi..pi}" shows "Fourier_coefficient f \ 0" unfolding lim_sequentially proof (intro allI impI) fix e::real assume "e > 0" then obtain g where "continuous_on UNIV g" and gabs: "g absolutely_integrable_on {-pi..pi}" and fg_e2: "integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. \f x - g x\) < e/2" using absolutely_integrable_approximate_continuous [OF assms, of "e/2"] by (metis (full_types) box_real(2) half_gt_zero_iff lmeasurable_cbox) have "g square_integrable {-pi..pi}" using \continuous_on UNIV g\ continuous_imp_square_integrable continuous_on_subset by blast then have "orthonormal_coeff {-pi..pi} trigonometric_set g \ 0" using Riemann_lebesgue_square_integrable orthonormal_system_trigonometric_set square_integrable_trigonometric_set by blast with \e > 0\ obtain N where N: "\n. n \ N \ \orthonormal_coeff {-pi..pi} trigonometric_set g n\ < e/2" unfolding lim_sequentially by (metis half_gt_zero_iff norm_conv_dist real_norm_def) have "\Fourier_coefficient f n\ < e" if "n \ N" for n proof - have "\LINT x|lebesgue_on {-pi..pi}. trigonometric_set n x * g x\ < e/2" using N [OF \n \ N\] by (auto simp: orthonormal_coeff_def l2product_def) have "integrable (lebesgue_on {-pi..pi}) (\x. trigonometric_set n x * g x)" using gabs trigonometric_set_mul_integrable by blast moreover have "integrable (lebesgue_on {-pi..pi}) (\x. trigonometric_set n x * f x)" using assms trigonometric_set_mul_integrable by blast ultimately have "\(LINT x|lebesgue_on {-pi..pi}. trigonometric_set n x * g x) - (LINT x|lebesgue_on {-pi..pi}. trigonometric_set n x * f x)\ = \(LINT x|lebesgue_on {-pi..pi}. trigonometric_set n x * (g x - f x))\" by (simp add: algebra_simps flip: Bochner_Integration.integral_diff) also have "\ \ LINT x|lebesgue_on {-pi..pi}. \f x - g x\" proof (rule integral_abs_bound_integral) show "integrable (lebesgue_on {-pi..pi}) (\x. trigonometric_set n x * (g x - f x))" by (simp add: gabs assms trigonometric_set_mul_integrable) have "(\x. f x - g x) absolutely_integrable_on {-pi..pi}" using gabs assms by blast then show "integrable (lebesgue_on {-pi..pi}) (\x. \f x - g x\)" by (simp add: absolutely_integrable_imp_integrable) fix x assume "x \ space (lebesgue_on {-pi..pi})" then have "-pi \ x" "x \ pi" by auto have "\trigonometric_set n x\ \ 1" using pi_ge_two apply (simp add: trigonometric_set_def) using sqrt_pi_ge1 abs_sin_le_one order_trans abs_cos_le_one by metis then show "\trigonometric_set n x * (g x - f x)\ \ \f x - g x\" using abs_ge_zero mult_right_mono by (fastforce simp add: abs_mult abs_minus_commute) qed finally have "\(LINT x|lebesgue_on {-pi..pi}. trigonometric_set n x * g x) - (LINT x|lebesgue_on {-pi..pi}. trigonometric_set n x * f x)\ \ LINT x|lebesgue_on {-pi..pi}. \f x - g x\" . then show ?thesis using N [OF \n \ N\] fg_e2 unfolding Fourier_coefficient_def orthonormal_coeff_def l2product_def by linarith qed then show "\no. \n\no. dist (Fourier_coefficient f n) 0 < e" by auto qed lemma Riemann_lebesgue_sin: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\n. integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin(real n * x) * f x)) \ 0" unfolding lim_sequentially proof (intro allI impI) fix e::real assume "e > 0" then obtain N where N: "\n. n \ N \ \Fourier_coefficient f n\ < e/4" using Riemann_lebesgue [OF assms] unfolding lim_sequentially by (metis norm_conv_dist real_norm_def zero_less_divide_iff zero_less_numeral) have "\LINT x|lebesgue_on {-pi..pi}. sin (real n * x) * f x\ < e" if "n > N" for n using that proof (induction n) case (Suc n) have "\Fourier_coefficient f(Suc (2 * n))\ < e/4" using N Suc.prems by auto then have "\LINT x|lebesgue_on {-pi..pi}. sin ((1 + real n) * x) * f x\ < sqrt pi * e / 4" by (simp add: Fourier_coefficient_def orthonormal_coeff_def trigonometric_set_def l2product_def field_simps) also have "\ \ e" using \0 < e\ pi_less_4 real_sqrt_less_mono by (fastforce simp add: field_simps) finally show ?case by simp qed auto then show "\no. \n\no. dist (LINT x|lebesgue_on {-pi..pi}. sin (real n * x) * f x) 0 < e" by (metis (full_types) le_neq_implies_less less_add_same_cancel2 less_trans norm_conv_dist real_norm_def zero_less_one) qed lemma Riemann_lebesgue_cos: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\n. integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. cos(real n * x) * f x)) \ 0" unfolding lim_sequentially proof (intro allI impI) fix e::real assume "e > 0" then obtain N where N: "\n. n \ N \ \Fourier_coefficient f n\ < e/4" using Riemann_lebesgue [OF assms] unfolding lim_sequentially by (metis norm_conv_dist real_norm_def zero_less_divide_iff zero_less_numeral) have "\LINT x|lebesgue_on {-pi..pi}. cos (real n * x) * f x\ < e" if "n > N" for n using that proof (induction n) case (Suc n) have eq: "(x * 2 + x * (real n * 2)) / 2 = x + x * (real n)" for x by simp have "\Fourier_coefficient f(2*n + 2)\ < e/4" using N Suc.prems by auto then have "\LINT x|lebesgue_on {-pi..pi}. f x * cos (x + x * (real n))\ < sqrt pi * e / 4" by (simp add: Fourier_coefficient_def orthonormal_coeff_def trigonometric_set_def l2product_def field_simps eq) also have "\ \ e" using \0 < e\ pi_less_4 real_sqrt_less_mono by (fastforce simp add: field_simps) finally show ?case by (simp add: field_simps) qed auto then show "\no. \n\no. dist (LINT x|lebesgue_on {-pi..pi}. cos (real n * x) * f x) 0 < e" by (metis (full_types) le_neq_implies_less less_add_same_cancel2 less_trans norm_conv_dist real_norm_def zero_less_one) qed lemma Riemann_lebesgue_sin_half: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\n. LINT x|lebesgue_on {-pi..pi}. sin ((real n + 1/2) * x) * f x) \ 0" proof (simp add: algebra_simps sin_add) let ?INT = "integral\<^sup>L (lebesgue_on {-pi..pi})" let ?f = "(\n. ?INT (\x. sin(n * x) * cos(1/2 * x) * f x) + ?INT (\x. cos(n * x) * sin(1/2 * x) * f x))" show "(\n. ?INT (\x. f x * (cos (x * real n) * sin (x/2)) + f x * (sin (x * real n) * cos (x/2)))) \ 0" proof (rule Lim_transform_eventually) have sin: "(\x. sin (1/2 * x) * f x) absolutely_integrable_on {-pi..pi}" by (intro absolutely_integrable_sin_product assms) have cos: "(\x. cos (1/2 * x) * f x) absolutely_integrable_on {-pi..pi}" by (intro absolutely_integrable_cos_product assms) show "\\<^sub>F n in sequentially. ?f n = ?INT (\x. f x * (cos (x * real n) * sin (x/2)) + f x * (sin (x * real n) * cos (x/2)))" unfolding mult.assoc apply (rule eventuallyI) apply (subst Bochner_Integration.integral_add [symmetric]) apply (safe intro!: cos absolutely_integrable_sin_product sin absolutely_integrable_cos_product absolutely_integrable_imp_integrable) apply (auto simp: algebra_simps) done have "?f \ 0 + 0" unfolding mult.assoc by (intro tendsto_add Riemann_lebesgue_sin Riemann_lebesgue_cos sin cos) then show "?f \ (0::real)" by simp qed qed lemma Fourier_sum_limit_pair: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\n. \k\2 * n. Fourier_coefficient f k * trigonometric_set k t) \ l \ (\n. \k\n. Fourier_coefficient f k * trigonometric_set k t) \ l" (is "?lhs = ?rhs") proof assume L: ?lhs show ?rhs unfolding lim_sequentially dist_real_def proof (intro allI impI) fix e::real assume "e > 0" then obtain N1 where N1: "\n. n \ N1 \ \Fourier_coefficient f n\ < e/2" using Riemann_lebesgue [OF assms] unfolding lim_sequentially by (metis norm_conv_dist real_norm_def zero_less_divide_iff zero_less_numeral) obtain N2 where N2: "\n. n \ N2 \ \(\k\2 * n. Fourier_coefficient f k * trigonometric_set k t) - l\ < e/2" using L unfolding lim_sequentially dist_real_def by (meson \0 < e\ half_gt_zero_iff) show "\no. \n\no. \(\k\n. Fourier_coefficient f k * trigonometric_set k t) - l\ < e" proof (intro exI allI impI) fix n assume n: "N1 + 2*N2 + 1 \ n" then have "n \ N1" "n \ N2" "n div 2 \ N2" by linarith+ consider "n = 2 * (n div 2)" | "n = Suc(2 * (n div 2))" by linarith then show "\(\k\n. Fourier_coefficient f k * trigonometric_set k t) - l\ < e" proof cases case 1 show ?thesis apply (subst 1) using N2 [OF \n div 2 \ N2\] by linarith next case 2 have "\(\k\2 * (n div 2). Fourier_coefficient f k * trigonometric_set k t) - l\ < e/2" using N2 [OF \n div 2 \ N2\] by linarith moreover have "\Fourier_coefficient f(Suc (2 * (n div 2))) * trigonometric_set (Suc (2 * (n div 2))) t\ < (e/2) * 1" proof - have "\trigonometric_set (Suc (2 * (n div 2))) t\ \ 1" apply (simp add: trigonometric_set) using abs_sin_le_one sqrt_pi_ge1 by (blast intro: order_trans) moreover have "\Fourier_coefficient f(Suc (2 * (n div 2)))\ < e/2" using N1 \N1 \ n\ by auto ultimately show ?thesis unfolding abs_mult - by (meson abs_ge_zero le_less_trans mult_left_mono real_mult_less_iff1 zero_less_one) + by (meson abs_ge_zero le_less_trans mult_left_mono mult_less_iff1 zero_less_one) qed ultimately show ?thesis apply (subst 2) unfolding sum.atMost_Suc by linarith qed qed qed next assume ?rhs then show ?lhs unfolding lim_sequentially by (auto simp: elim!: imp_forward ex_forward) qed subsection\Express Fourier sum in terms of the special expansion at the origin\ lemma Fourier_sum_0: "(\k \ n. Fourier_coefficient f k * trigonometric_set k 0) = (\k \ n div 2. Fourier_coefficient f(2*k) * trigonometric_set (2*k) 0)" (is "?lhs = ?rhs") proof - have "?lhs = (\k = 2 * 0.. Suc (2 * (n div 2)). Fourier_coefficient f k * trigonometric_set k 0)" proof (rule sum.mono_neutral_left) show "\i\{2 * 0..Suc (2 * (n div 2))} - {..n}. Fourier_coefficient f i * trigonometric_set i 0 = 0" proof clarsimp fix i assume "i \ Suc (2 * (n div 2))" "\ i \ n" then have "i = Suc n" "even n" by presburger+ moreover assume "trigonometric_set i 0 \ 0" ultimately show "Fourier_coefficient f i = 0" by (simp add: trigonometric_set_def) qed qed auto also have "\ = ?rhs" unfolding sum.in_pairs by (simp add: trigonometric_set atMost_atLeast0) finally show ?thesis . qed lemma Fourier_sum_0_explicit: "(\k\n. Fourier_coefficient f k * trigonometric_set k 0) = (Fourier_coefficient f 0 / sqrt 2 + (\k = 1..n div 2. Fourier_coefficient f(2*k))) / sqrt pi" (is "?lhs = ?rhs") proof - have "(\k=0..n. Fourier_coefficient f k * trigonometric_set k 0) = Fourier_coefficient f 0 * trigonometric_set 0 0 + (\k = 1..n. Fourier_coefficient f k * trigonometric_set k 0)" by (simp add: Fourier_sum_0 sum.atLeast_Suc_atMost) also have "\ = ?rhs" proof - have "Fourier_coefficient f 0 * trigonometric_set 0 0 = Fourier_coefficient f 0 / (sqrt 2 * sqrt pi)" by (simp add: real_sqrt_mult trigonometric_set(1)) moreover have "(\k = Suc 0..n. Fourier_coefficient f k * trigonometric_set k 0) = (\k = Suc 0..n div 2. Fourier_coefficient f(2*k)) / sqrt pi" proof (induction n) case (Suc n) show ?case by (simp add: Suc) (auto simp: trigonometric_set_def field_simps) qed auto ultimately show ?thesis by (simp add: add_divide_distrib) qed finally show ?thesis by (simp add: atMost_atLeast0) qed lemma Fourier_sum_0_integrals: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\k\n. Fourier_coefficient f k * trigonometric_set k 0) = (integral\<^sup>L (lebesgue_on {-pi..pi}) f / 2 + (\k = Suc 0..n div 2. integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. cos(k * x) * f x))) / pi" proof - have "integral\<^sup>L (lebesgue_on {-pi..pi}) f / (sqrt (2 * pi) * sqrt 2 * sqrt pi) = integral\<^sup>L (lebesgue_on {-pi..pi}) f / (2 * pi)" by (simp add: algebra_simps real_sqrt_mult) moreover have "(\k = Suc 0..n div 2. LINT x|lebesgue_on {-pi..pi}. trigonometric_set (2*k) x * f x) / sqrt pi = (\k = Suc 0..n div 2. LINT x|lebesgue_on {-pi..pi}. cos (k * x) * f x) / pi" by (simp add: trigonometric_set_def sum_divide_distrib) ultimately show ?thesis unfolding Fourier_sum_0_explicit by (simp add: Fourier_coefficient_def orthonormal_coeff_def l2product_def trigonometric_set add_divide_distrib) qed lemma Fourier_sum_0_integral: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\k\n. Fourier_coefficient f k * trigonometric_set k 0) = integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. (1/2 + (\k = Suc 0..n div 2. cos(k * x))) * f x) / pi" proof - note * = Fourier_products_integrable_cos [OF assms] have "integrable (lebesgue_on {-pi..pi}) (\x. \n = Suc 0..n div 2. f x * cos (x * real n))" using * by (force simp: mult_ac) moreover have "integral\<^sup>L (lebesgue_on {-pi..pi}) f / 2 + (\k = Suc 0..n div 2. LINT x|lebesgue_on {-pi..pi}. f x * cos (x * real k)) = (LINT x|lebesgue_on {-pi..pi}. f x / 2) + (LINT x|lebesgue_on {-pi..pi}. (\n = Suc 0..n div 2. f x * cos (x * real n)))" proof (subst Bochner_Integration.integral_sum) show "integrable (lebesgue_on {-pi..pi}) (\x. f x * cos (x * real i))" if "i \ {Suc 0..n div 2}" for i using that * [of i] by (auto simp: Bochner_Integration.integral_sum mult_ac) qed auto ultimately show ?thesis using Fourier_products_integrable_cos [OF assms] absolutely_integrable_imp_integrable [OF assms] by (simp add: Fourier_sum_0_integrals sum_distrib_left assms algebra_simps) qed subsection\How Fourier coefficients behave under addition etc\ lemma Fourier_coefficient_add: assumes "f absolutely_integrable_on {-pi..pi}" "g absolutely_integrable_on {-pi..pi}" shows "Fourier_coefficient (\x. f x + g x) i = Fourier_coefficient f i + Fourier_coefficient g i" using assms trigonometric_set_mul_integrable by (simp add: Fourier_coefficient_def orthonormal_coeff_def l2product_def distrib_left) lemma Fourier_coefficient_minus: assumes "f absolutely_integrable_on {-pi..pi}" shows "Fourier_coefficient (\x. - f x) i = - Fourier_coefficient f i" using assms trigonometric_set_mul_integrable by (simp add: Fourier_coefficient_def orthonormal_coeff_def l2product_def) lemma Fourier_coefficient_diff: assumes f: "f absolutely_integrable_on {-pi..pi}" and g: "g absolutely_integrable_on {-pi..pi}" shows "Fourier_coefficient (\x. f x - g x) i = Fourier_coefficient f i - Fourier_coefficient g i" proof - have mg: "(\x. - g x) absolutely_integrable_on {-pi..pi}" using g by (simp add: absolutely_integrable_measurable_real) show ?thesis using Fourier_coefficient_add [OF f mg] Fourier_coefficient_minus [OF g] by simp qed lemma Fourier_coefficient_const: "Fourier_coefficient (\x. c) i = (if i = 0 then c * sqrt(2 * pi) else 0)" by (auto simp: Fourier_coefficient_def orthonormal_coeff_def l2product_def trigonometric_set_def divide_simps measure_restrict_space) lemma Fourier_offset_term: fixes f :: "real \ real" assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "Fourier_coefficient (\x. f(x+t)) (2 * n + 2) * trigonometric_set (2 * n + 2) 0 = Fourier_coefficient f(2 * n+1) * trigonometric_set (2 * n+1) t + Fourier_coefficient f(2 * n + 2) * trigonometric_set (2 * n + 2) t" proof - have eq: "(2 + 2 * real n) * x / 2 = (1 + real n) * x" for x by (simp add: divide_simps) have 1: "integrable (lebesgue_on {-pi..pi}) (\x. f x * (cos (x + x * n) * cos (t + t * n)))" using Fourier_products_integrable_cos [of f "Suc n"] f by (simp add: algebra_simps) (simp add: mult.assoc [symmetric]) have 2: "integrable (lebesgue_on {-pi..pi}) (\x. f x * (sin (x + x * n) * sin (t + t * n)))" using Fourier_products_integrable_sin [of f "Suc n"] f by (simp add: algebra_simps) (simp add: mult.assoc [symmetric]) have "has_bochner_integral (lebesgue_on {-pi..pi}) (\x. cos (real (Suc n) * (x + t - t)) * f(x + t)) (LINT x|lebesgue_on {-pi..pi}. cos (real (Suc n) * (x - t)) * f x)" proof (rule has_integral_periodic_offset) have "(\x. cos (real (Suc n) * (x - t)) * f x) absolutely_integrable_on {-pi..pi}" proof (rule absolutely_integrable_bounded_measurable_product_real) show "(\x. cos (real (Suc n) * (x - t))) \ borel_measurable (lebesgue_on {-pi..pi})" by (intro continuous_imp_measurable_on_sets_lebesgue continuous_intros) auto show "bounded ((\x. cos (real (Suc n) * (x - t))) ` {-pi..pi})" by (rule boundedI [where B=1]) auto qed (use f in auto) then show "has_bochner_integral (lebesgue_on {-pi..pi}) (\x. cos ((Suc n) * (x - t)) * f x) (LINT x|lebesgue_on {-pi..pi}. cos (real (Suc n) * (x - t)) * f x)" by (simp add: has_bochner_integral_integrable integrable_restrict_space set_integrable_def) next fix x show "cos (real (Suc n) * (x + (pi - - pi) - t)) * f(x + (pi - - pi)) = cos (real (Suc n) * (x - t)) * f x" using periodic cos.plus_of_nat [of "(Suc n) * (x - t)" "Suc n"] by (simp add: algebra_simps) qed then have "has_bochner_integral (lebesgue_on {-pi..pi}) (\x. cos (real (Suc n) * x) * f(x + t)) (LINT x|lebesgue_on {-pi..pi}. cos (real (Suc n) * (x - t)) * f x)" by simp then have "LINT x|lebesgue_on {-pi..pi}. cos ((Suc n) * x) * f(x+t) = LINT x|lebesgue_on {-pi..pi}. cos ((Suc n) * (x - t)) * f x" using has_bochner_integral_integral_eq by blast also have "\ = LINT x|lebesgue_on {-pi..pi}. cos ((Suc n) * x - ((Suc n) * t)) * f x" by (simp add: algebra_simps) also have "\ = cos ((Suc n) * t) * (LINT x|lebesgue_on {-pi..pi}. cos ((Suc n) * x) * f x) + sin ((Suc n) * t) * (LINT x|lebesgue_on {-pi..pi}. sin ((Suc n) * x) * f x)" by (simp add: cos_diff algebra_simps 1 2 flip: integral_mult_left_zero integral_mult_right_zero) finally have "LINT x|lebesgue_on {-pi..pi}. cos ((Suc n) * x) * f(x+t) = cos ((Suc n) * t) * (LINT x|lebesgue_on {-pi..pi}. cos ((Suc n) * x) * f x) + sin ((Suc n) * t) * (LINT x|lebesgue_on {-pi..pi}. sin ((Suc n) * x) * f x)" . then show ?thesis unfolding Fourier_coefficient_def orthonormal_coeff_def trigonometric_set_def by (simp add: eq) (simp add: divide_simps algebra_simps l2product_def) qed lemma Fourier_sum_offset: fixes f :: "real \ real" assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\k\2*n. Fourier_coefficient f k * trigonometric_set k t) = (\k\2*n. Fourier_coefficient (\x. f(x+t)) k * trigonometric_set k 0)" (is "?lhs = ?rhs") proof - have "?lhs = Fourier_coefficient f 0 * trigonometric_set 0 t + (\k = Suc 0..2*n. Fourier_coefficient f k * trigonometric_set k t)" by (simp add: atMost_atLeast0 sum.atLeast_Suc_atMost) moreover have "(\k = Suc 0..2*n. Fourier_coefficient f k * trigonometric_set k t) = (\k = Suc 0..2*n. Fourier_coefficient (\x. f(x+t)) k * trigonometric_set k 0)" proof (cases n) case (Suc n') have "(\k = Suc 0..2 * n. Fourier_coefficient f k * trigonometric_set k t) = (\k = Suc 0.. Suc (Suc (2 * n')). Fourier_coefficient f k * trigonometric_set k t)" by (simp add: Suc) also have "\ = (\k \ Suc (2 * n'). Fourier_coefficient f(Suc k) * trigonometric_set (Suc k) t)" unfolding atMost_atLeast0 using sum.shift_bounds_cl_Suc_ivl by blast also have "\ = (\k \ Suc (2 * n'). Fourier_coefficient (\x. f(x+t)) (Suc k) * trigonometric_set (Suc k) 0)" unfolding sum.in_pairs_0 proof (rule sum.cong [OF refl]) show "Fourier_coefficient f(Suc (2 * k)) * trigonometric_set (Suc (2 * k)) t + Fourier_coefficient f(Suc (Suc (2 * k))) * trigonometric_set (Suc (Suc (2 * k))) t = Fourier_coefficient (\x. f(x + t)) (Suc (2 * k)) * trigonometric_set (Suc (2 * k)) 0 + Fourier_coefficient (\x. f(x + t)) (Suc (Suc (2 * k))) * trigonometric_set (Suc (Suc (2 * k))) 0" if "k \ {..n'}" for k using that Fourier_offset_term [OF assms, of t ] by (auto simp: trigonometric_set_def) qed also have "\ = (\k = Suc 0..Suc (Suc (2 * n')). Fourier_coefficient (\x. f(x+t)) k * trigonometric_set k 0)" by (simp only: atMost_atLeast0 sum.shift_bounds_cl_Suc_ivl) also have "\ = (\k = Suc 0..2*n. Fourier_coefficient (\x. f(x+t)) k * trigonometric_set k 0)" by (simp add: Suc) finally show ?thesis . qed auto moreover have "?rhs = Fourier_coefficient (\x. f(x+t)) 0 * trigonometric_set 0 0 + (\k = Suc 0..2*n. Fourier_coefficient (\x. f(x+t)) k * trigonometric_set k 0)" by (simp add: atMost_atLeast0 sum.atLeast_Suc_atMost) moreover have "Fourier_coefficient f 0 * trigonometric_set 0 t = Fourier_coefficient (\x. f(x+t)) 0 * trigonometric_set 0 0" by (simp add: Fourier_coefficient_def orthonormal_coeff_def trigonometric_set_def l2product_def integral_periodic_offset periodic) ultimately show ?thesis by simp qed lemma Fourier_sum_offset_unpaired: fixes f :: "real \ real" assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\k\2*n. Fourier_coefficient f k * trigonometric_set k t) = (\k\n. Fourier_coefficient (\x. f(x+t)) (2*k) * trigonometric_set (2*k) 0)" (is "?lhs = ?rhs") proof - have "?lhs = (\k\n. Fourier_coefficient (\x. f(x+t)) (2*k) * trigonometric_set (2*k) 0 + Fourier_coefficient (\x. f(x+t)) (Suc (2*k)) * trigonometric_set (Suc (2*k)) 0)" apply (simp add: assms Fourier_sum_offset) apply (subst sum.in_pairs_0 [symmetric]) by (simp add: trigonometric_set) also have "\ = ?rhs" by (auto simp: trigonometric_set) finally show ?thesis . qed subsection\Express partial sums using Dirichlet kernel\ definition Dirichlet_kernel where "Dirichlet_kernel \ \n x. if x = 0 then real n + 1/2 else sin((real n + 1/2) * x) / (2 * sin(x/2))" lemma Dirichlet_kernel_0 [simp]: "\x\ < 2 * pi \ Dirichlet_kernel 0 x = 1/2" by (auto simp: Dirichlet_kernel_def sin_zero_iff) lemma Dirichlet_kernel_minus [simp]: "Dirichlet_kernel n (-x) = Dirichlet_kernel n x" by (auto simp: Dirichlet_kernel_def) lemma Dirichlet_kernel_continuous_strong: "continuous_on {-(2 * pi)<..<2 * pi} (Dirichlet_kernel n)" proof - have "isCont (Dirichlet_kernel n) z" if "-(2 * pi) < z" "z < 2 * pi" for z proof (cases "z=0") case True have *: "(\x. sin ((n + 1/2) * x) / (2 * sin (x/2))) \0 \ real n + 1/2" by real_asymp show ?thesis unfolding Dirichlet_kernel_def continuous_at True apply (rule Lim_transform_eventually [where f = "\x. sin((n + 1/2) * x) / (2 * sin(x/2))"]) apply (auto simp: * eventually_at_filter) done next case False have "continuous (at z) (\x. sin((real n + 1/2) * x) / (2 * sin(x/2)))" using that False by (intro continuous_intros) (auto simp: sin_zero_iff) moreover have "\\<^sub>F x in nhds z. x \ 0" using False t1_space_nhds by blast ultimately show ?thesis using False by (force simp: Dirichlet_kernel_def continuous_at eventually_at_filter elim: Lim_transform_eventually) qed then show ?thesis by (simp add: continuous_on_eq_continuous_at) qed lemma Dirichlet_kernel_continuous: "continuous_on {-pi..pi} (Dirichlet_kernel n)" apply (rule continuous_on_subset [OF Dirichlet_kernel_continuous_strong], clarsimp) using pi_gt_zero by linarith lemma absolutely_integrable_mult_Dirichlet_kernel: assumes "f absolutely_integrable_on {-pi..pi}" shows "(\x. Dirichlet_kernel n x * f x) absolutely_integrable_on {-pi..pi}" proof (rule absolutely_integrable_bounded_measurable_product_real) show "Dirichlet_kernel n \ borel_measurable (lebesgue_on {-pi..pi})" by (simp add: Dirichlet_kernel_continuous continuous_imp_measurable_on_sets_lebesgue) have "compact (Dirichlet_kernel n ` {-pi..pi})" by (auto simp: compact_continuous_image [OF Dirichlet_kernel_continuous]) then show "bounded (Dirichlet_kernel n ` {-pi..pi})" using compact_imp_bounded by blast qed (use assms in auto) lemma cosine_sum_lemma: "(1/2 + (\k = Suc 0..n. cos(real k * x))) * sin(x/2) = sin((real n + 1/2) * x) / 2" proof - consider "n=0" | "n \ 1" by linarith then show ?thesis proof cases case 2 then have "(\k = Suc 0..n. (sin (real k * x + x/2) * 2 - sin (real k * x - x/2) * 2) / 2) = sin (real n * x + x/2) - sin (x/2)" proof (induction n) case (Suc n) show ?case proof (cases "n=0") case False with Suc show ?thesis by (simp add: divide_simps algebra_simps) qed auto qed auto then show ?thesis by (simp add: distrib_right sum_distrib_right cos_times_sin) qed auto qed lemma Dirichlet_kernel_cosine_sum: assumes "\x\ < 2 * pi" shows "Dirichlet_kernel n x = 1/2 + (\k = Suc 0..n. cos(real k * x))" proof - have "sin ((real n + 1/2) * x) / (2 * sin (x/2)) = 1/2 + (\k = Suc 0..n. cos (real k * x))" if "x \ 0" proof - have "sin(x/2) \ 0" using assms that by (auto simp: sin_zero_iff) then show ?thesis using cosine_sum_lemma [of x n] by (simp add: divide_simps) qed then show ?thesis by (auto simp: Dirichlet_kernel_def) qed lemma integrable_Dirichlet_kernel: "integrable (lebesgue_on {-pi..pi}) (Dirichlet_kernel n)" using Dirichlet_kernel_continuous continuous_imp_integrable_real by blast lemma integral_Dirichlet_kernel [simp]: "integral\<^sup>L (lebesgue_on {-pi..pi}) (Dirichlet_kernel n) = pi" proof - have "integral\<^sup>L (lebesgue_on {-pi..pi}) (Dirichlet_kernel n) = LINT x|lebesgue_on {-pi..pi}. 1/2 + (\k = Suc 0..n. cos (k * x))" using pi_ge_two by (force intro: Bochner_Integration.integral_cong [OF refl Dirichlet_kernel_cosine_sum]) also have "\ = (LINT x|lebesgue_on {-pi..pi}. 1/2) + (LINT x|lebesgue_on {-pi..pi}. (\k = Suc 0..n. cos (k * x)))" proof (rule Bochner_Integration.integral_add) show "integrable (lebesgue_on {-pi..pi}) (\x. \k = Suc 0..n. cos (real k * x))" by (rule Bochner_Integration.integrable_sum) (metis integrable_cos_cx mult_commute_abs) qed auto also have "\ = pi" proof - have "\i. \Suc 0 \ i; i \ n\ \ integrable (lebesgue_on {-pi..pi}) (\x. cos (real i * x))" by (metis integrable_cos_cx mult_commute_abs) then have "LINT x|lebesgue_on {-pi..pi}. (\k = Suc 0..n. cos (real k * x)) = 0" by (simp add: Bochner_Integration.integral_sum) then show ?thesis by (auto simp: measure_restrict_space) qed finally show ?thesis . qed lemma integral_Dirichlet_kernel_half [simp]: "integral\<^sup>L (lebesgue_on {0..pi}) (Dirichlet_kernel n) = pi/2" proof - have "integral\<^sup>L (lebesgue_on {- pi..0}) (Dirichlet_kernel n) + integral\<^sup>L (lebesgue_on {0..pi}) (Dirichlet_kernel n) = pi" using integral_combine [OF integrable_Dirichlet_kernel] integral_Dirichlet_kernel by simp moreover have "integral\<^sup>L (lebesgue_on {- pi..0}) (Dirichlet_kernel n) = integral\<^sup>L (lebesgue_on {0..pi}) (Dirichlet_kernel n)" using integral_reflect_real [of pi 0 "Dirichlet_kernel n"] by simp ultimately show ?thesis by simp qed lemma Fourier_sum_offset_Dirichlet_kernel: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\k\2*n. Fourier_coefficient f k * trigonometric_set k t) = integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. Dirichlet_kernel n x * f(x+t)) / pi" (is "?lhs = ?rhs") proof - have ft: "(\x. f(x+t)) absolutely_integrable_on {-pi..pi}" using absolutely_integrable_periodic_offset [OF f, of t] periodic by simp have "?lhs = (\k=0..n. Fourier_coefficient (\x. f(x+t)) (2*k) * trigonometric_set (2*k) 0)" using Fourier_sum_offset_unpaired assms atMost_atLeast0 by auto also have "\ = Fourier_coefficient (\x. f(x+t)) 0 / sqrt (2 * pi) + (\k = Suc 0..n. Fourier_coefficient (\x. f(x+t)) (2*k) / sqrt pi)" by (simp add: sum.atLeast_Suc_atMost trigonometric_set_def) also have "\ = (LINT x|lebesgue_on {-pi..pi}. f(x+t)) / (2 * pi) + (\k = Suc 0..n. (LINT x|lebesgue_on {-pi..pi}. cos (real k * x) * f(x+t)) / pi)" by (simp add: Fourier_coefficient_def orthonormal_coeff_def trigonometric_set_def l2product_def) also have "\ = LINT x|lebesgue_on {-pi..pi}. f(x+t) / (2 * pi) + (\k = Suc 0..n. (cos (real k * x) * f(x+t)) / pi)" using Fourier_products_integrable_cos [OF ft] absolutely_integrable_imp_integrable [OF ft] by simp also have "\ = (LINT x|lebesgue_on {-pi..pi}. f(x+t) / 2 + (\k = Suc 0..n. cos (real k * x) * f(x+t))) / pi" by (simp add: divide_simps sum_distrib_right mult.assoc) also have "\ = ?rhs" proof - have "LINT x|lebesgue_on {-pi..pi}. f(x + t) / 2 + (\k = Suc 0..n. cos (real k * x) * f(x + t)) = LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel n x * f(x + t)" proof - have eq: "f(x+t) / 2 + (\k = Suc 0..n. cos (real k * x) * f(x + t)) = Dirichlet_kernel n x * f(x + t)" if "- pi \ x" "x \ pi" for x proof (cases "x = 0") case False then have "sin (x/2) \ 0" using that by (auto simp: sin_zero_iff) then have "f(x + t) * (1/2 + (\k = Suc 0..n. cos(real k * x))) = f(x + t) * sin((real n + 1/2) * x) / 2 / sin(x/2)" using cosine_sum_lemma [of x n] by (simp add: divide_simps) then show ?thesis by (simp add: Dirichlet_kernel_def False field_simps sum_distrib_left) qed (simp add: Dirichlet_kernel_def algebra_simps) show ?thesis by (rule Bochner_Integration.integral_cong [OF refl]) (simp add: eq) qed then show ?thesis by simp qed finally show ?thesis . qed lemma Fourier_sum_limit_Dirichlet_kernel: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "((\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t)) \ l) \ (\n. LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel n x * f(x + t)) \ pi * l" (is "?lhs = ?rhs") proof - have "?lhs \ (\n. (LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel n x * f(x + t)) / pi) \ l" by (simp add: Fourier_sum_limit_pair [OF f, symmetric] Fourier_sum_offset_Dirichlet_kernel assms) also have "\ \ (\k. (LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel k x * f(x + t)) * inverse pi) \ pi * l * inverse pi" by (auto simp: divide_simps) also have "\ \ ?rhs" using tendsto_mult_right_iff [of "inverse pi", symmetric] by auto finally show ?thesis . qed subsection\A directly deduced sufficient condition for convergence at a point\ lemma simple_Fourier_convergence_periodic: assumes f: "f absolutely_integrable_on {-pi..pi}" and ft: "(\x. (f(x+t) - f t) / sin(x/2)) absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t)) \ f t" proof - let ?\ = "\n. \k\n. Fourier_coefficient (\x. f x - f t) k * trigonometric_set k t" let ?\ = "\n. LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel n x * (f(x + t) - f t)" have fft: "(\x. f x - f t) absolutely_integrable_on {-pi..pi}" by (simp add: f absolutely_integrable_measurable_real) have fxt: "(\x. f(x + t)) absolutely_integrable_on {-pi..pi}" using absolutely_integrable_periodic_offset assms by auto have *: "?\ \ 0 \ ?\ \ 0" by (simp add: Fourier_sum_limit_Dirichlet_kernel fft periodic) moreover have "(\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t) - f t) \ 0" if "?\ \ 0" proof (rule Lim_transform_eventually [OF that eventually_sequentiallyI]) show "(\k\n. Fourier_coefficient (\x. f x - f t) k * trigonometric_set k t) = (\k\n. Fourier_coefficient f k * trigonometric_set k t) - f t" if "Suc 0 \ n" for n proof - have "(\k = Suc 0..n. Fourier_coefficient (\x. f x - f t) k * trigonometric_set k t) = (\k = Suc 0..n. Fourier_coefficient f k * trigonometric_set k t)" proof (rule sum.cong [OF refl]) fix k assume k: "k \ {Suc 0..n}" then have [simp]: "integral\<^sup>L (lebesgue_on {-pi..pi}) (trigonometric_set k) = 0" by (auto simp: trigonometric_set_def) show "Fourier_coefficient (\x. f x - f t) k * trigonometric_set k t = Fourier_coefficient f k * trigonometric_set k t" using k unfolding Fourier_coefficient_def orthonormal_coeff_def l2product_def by (simp add: right_diff_distrib f absolutely_integrable_measurable_real trigonometric_set_mul_integrable) qed moreover have "Fourier_coefficient (\x. f x - f t) 0 * trigonometric_set 0 t = Fourier_coefficient f 0 * trigonometric_set 0 t - f t" using f unfolding Fourier_coefficient_def orthonormal_coeff_def l2product_def by (auto simp: divide_simps trigonometric_set absolutely_integrable_imp_integrable measure_restrict_space) ultimately show ?thesis by (simp add: sum.atLeast_Suc_atMost atMost_atLeast0) qed qed moreover have "?\ \ 0" proof - have eq: "\n. ?\ n = integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin((n + 1/2) * x) * ((f(x+t) - f t) / sin(x/2) / 2))" unfolding Dirichlet_kernel_def by (rule Bochner_Integration.integral_cong [OF refl]) (auto simp: divide_simps sin_zero_iff) show ?thesis unfolding eq by (intro ft set_integrable_divide Riemann_lebesgue_sin_half) qed ultimately show ?thesis by (simp add: LIM_zero_cancel) qed subsection\A more natural sufficient Hoelder condition at a point\ lemma bounded_inverse_sin_half: assumes "d > 0" obtains B where "B>0" "\x. x \ ({-pi..pi} - {-d<.. \inverse (sin (x/2))\ \ B" proof - have "bounded ((\x. inverse (sin (x/2))) ` ({-pi..pi} - {- d<..x \ {-pi..pi}; x \ 0\ \ sin (x/2) \ 0" for x using \0 < d\ by (auto simp: sin_zero_iff) then show "continuous_on ({-pi..pi} - {-d<..x. inverse (sin (x/2)))" using \0 < d\ by (intro continuous_intros) auto show "compact ({-pi..pi} - {-d<.. 0" "a > 0" and ft: "\x. \x-t\ < d \ \f x - f t\ \ M * \x-t\ powr a" and periodic: "\x. f(x + 2*pi) = f x" shows "(\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t)) \ f t" proof (rule simple_Fourier_convergence_periodic [OF f]) have half: "((\x. sin(x/2)) has_real_derivative 1/2) (at 0)" by (rule derivative_eq_intros refl | force)+ then obtain e0::real where "e0 > 0" and e0: "\x. \x \ 0; \x\ < e0\ \ \sin (x/2) / x - 1/2\ < 1/4" apply (simp add: DERIV_def Lim_at dist_real_def) apply (drule_tac x="1/4" in spec, auto) done obtain e where "e > 0" and e: "\x. \x\ < e \ \(f(x+t) - f t) / sin (x/2)\ \ 4 * (\M\ * \x\ powr (a-1))" proof show "min d e0 > 0" using \d > 0\ \e0 > 0\ by auto next fix x assume x: "\x\ < min d e0" show "\(f(x + t) - f t) / sin (x/2)\ \ 4 * (\M\ * \x\ powr (a - 1))" proof (cases "sin(x/2) = 0 \ x=0") case False have eq: "\(f(x + t) - f t) / sin (x/2)\ = \inverse (sin (x/2) / x)\ * (\f(x + t) - f t\ / \x\)" by simp show ?thesis unfolding eq proof (rule mult_mono) have "\sin (x/2) / x - 1/2\ < 1/4" using e0 [of x] x False by force then have "1/4 \ \sin (x/2) / x\" by (simp add: abs_if field_simps split: if_split_asm) then show "\inverse (sin (x/2) / x)\ \ 4" using False by (simp add: field_simps) have "\f(x + t) - f t\ / \x\ \ M * \x + t - t\ powr (a - 1)" using ft [of "x+t"] x by (auto simp: divide_simps algebra_simps Transcendental.powr_mult_base) also have "\ \ \M\ * \x\ powr (a - 1)" by (simp add: mult_mono) finally show "\f(x + t) - f t\ / \x\ \ \M\ * \x\ powr (a - 1)" . qed auto qed auto qed obtain B where "B>0" and B: "\x. x \ ({-pi..pi} - {- e<.. \inverse (sin (x/2))\ \ B" using bounded_inverse_sin_half [OF \e > 0\] by metis let ?g = "\x. max (B * \f(x + t) - f t\) ((4 * \M\) * \x\ powr (a - 1))" show "(\x. (f(x + t) - f t) / sin (x/2)) absolutely_integrable_on {-pi..pi}" proof (rule measurable_bounded_by_integrable_imp_absolutely_integrable) have fxt: "(\x. f(x + t)) absolutely_integrable_on {-pi..pi}" using absolutely_integrable_periodic_offset assms by auto show "(\x. (f(x + t) - f t) / sin (x/2)) \ borel_measurable (lebesgue_on {-pi..pi})" proof (intro measurable) show "(\x. f(x + t)) \ borel_measurable (lebesgue_on {-pi..pi})" using absolutely_integrable_on_def fxt integrable_imp_measurable by blast show "(\x. sin (x/2)) \ borel_measurable (lebesgue_on {-pi..pi})" by (intro continuous_imp_measurable_on_sets_lebesgue continuous_intros) auto qed auto have "(\x. f(x + t) - f t) absolutely_integrable_on {-pi..pi}" by (intro set_integral_diff fxt) (simp add: set_integrable_def) moreover have "(\x. \x\ powr (a - 1)) absolutely_integrable_on {-pi..pi}" proof - have "((\x. x powr (a - 1)) has_integral inverse a * pi powr a - inverse a * 0 powr a) {0..pi}" proof (rule fundamental_theorem_of_calculus_interior) show "continuous_on {0..pi} (\x. inverse a * x powr a)" using \a > 0\ by (intro continuous_on_powr' continuous_intros) auto show "((\b. inverse a * b powr a) has_vector_derivative x powr (a - 1)) (at x)" if "x \ {0<..a > 0\ apply (simp flip: has_field_derivative_iff_has_vector_derivative) apply (rule derivative_eq_intros | simp)+ done qed auto then have int: "(\x. x powr (a - 1)) integrable_on {0..pi}" by blast show ?thesis proof (rule nonnegative_absolutely_integrable_1) show "(\x. \x\ powr (a - 1)) integrable_on {-pi..pi}" proof (rule Henstock_Kurzweil_Integration.integrable_combine) show "(\x. \x\ powr (a - 1)) integrable_on {0..pi}" using int integrable_eq by fastforce then show "(\x. \x\ powr (a - 1)) integrable_on {- pi..0}" using Henstock_Kurzweil_Integration.integrable_reflect_real by fastforce qed auto qed auto qed ultimately show "?g integrable_on {-pi..pi}" apply (intro set_lebesgue_integral_eq_integral absolutely_integrable_max_1 absolutely_integrable_bounded_measurable_product_real set_integrable_abs measurable) apply (auto simp: image_constant_conv) done show "norm ((f(x + t) - f t) / sin (x/2)) \ ?g x" if "x \ {-pi..pi}" for x proof (cases "\x\ < e") case True then show ?thesis using e [OF True] by (simp add: max_def) next case False then have "\inverse (sin (x/2))\ \ B" using B that by force then have "\inverse (sin (x/2))\ * \f(x + t) - f t\ \ B * \f(x + t) - f t\" by (simp add: mult_right_mono) then have "\f(x + t) - f t\ / \sin (x/2)\ \ B * \f(x + t) - f t\" by (simp add: divide_simps split: if_split_asm) then show ?thesis by auto qed qed auto qed (auto simp: periodic) text\In particular, a Lipschitz condition at the point\ corollary Lipschitz_Fourier_convergence_periodic: assumes f: "f absolutely_integrable_on {-pi..pi}" and "d > 0" and ft: "\x. \x-t\ < d \ \f x - f t\ \ M * \x-t\" and periodic: "\x. f(x + 2*pi) = f x" shows "(\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t)) \ f t" using Hoelder_Fourier_convergence_periodic [OF f \d > 0\, of 1] assms by (fastforce simp add:) text\In particular, if left and right derivatives both exist\ proposition bi_differentiable_Fourier_convergence_periodic: assumes f: "f absolutely_integrable_on {-pi..pi}" and f_lt: "f differentiable at_left t" and f_gt: "f differentiable at_right t" and periodic: "\x. f(x + 2*pi) = f x" shows "(\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t)) \ f t" proof - obtain D_lt where D_lt: "\e. e > 0 \ \d>0. \x dist x t < d \ dist ((f x - f t) / (x - t)) D_lt < e" using f_lt unfolding has_field_derivative_iff real_differentiable_def Lim_within by (meson lessThan_iff) then obtain d_lt where "d_lt > 0" and d_lt: "\x. \x < t; 0 < \x - t\; \x - t\ < d_lt\ \ \(f x - f t) / (x - t) - D_lt\ < 1" unfolding dist_real_def by (metis dist_commute dist_real_def zero_less_one) obtain D_gt where D_gt: "\e. e > 0 \ \d>0. \x>t. 0 < dist x t \ dist x t < d \ dist ((f x - f t) / (x - t)) D_gt < e" using f_gt unfolding has_field_derivative_iff real_differentiable_def Lim_within by (meson greaterThan_iff) then obtain d_gt where "d_gt > 0" and d_gt: "\x. \x > t; 0 < \x - t\; \x - t\ < d_gt\ \ \(f x - f t) / (x - t) - D_gt\ < 1" unfolding dist_real_def by (metis dist_commute dist_real_def zero_less_one) show ?thesis proof (rule Lipschitz_Fourier_convergence_periodic [OF f]) fix x assume "\x - t\ < min d_lt d_gt" then have *: "\x - t\ < d_lt" "\x - t\ < d_gt" by auto consider "xt" by linarith then show "\f x - f t\ \ (\D_lt\ + \D_gt\ + 1) * \x - t\" proof cases case 1 then have "\(f x - f t) / (x - t) - D_lt\ < 1" using d_lt [OF 1] * by auto then have "\(f x - f t) / (x - t)\ - \D_lt\ < 1" by linarith then have "\f x - f t\ \ (\D_lt\ + 1) * \x - t\" by (simp add: comm_semiring_class.distrib divide_simps split: if_split_asm) also have "\ \ (\D_lt\ + \D_gt\ + 1) * \x - t\" by (simp add: mult_right_mono) finally show ?thesis . next case 3 then have "\(f x - f t) / (x - t) - D_gt\ < 1" using d_gt [OF 3] * by auto then have "\(f x - f t) / (x - t)\ - \D_gt\ < 1" by linarith then have "\f x - f t\ \ (\D_gt\ + 1) * \x - t\" by (simp add: comm_semiring_class.distrib divide_simps split: if_split_asm) also have "\ \ (\D_lt\ + \D_gt\ + 1) * \x - t\" by (simp add: mult_right_mono) finally show ?thesis . qed auto qed (auto simp: \0 < d_gt\ \0 < d_lt\ periodic) qed text\And in particular at points where the function is differentiable\ lemma differentiable_Fourier_convergence_periodic: assumes f: "f absolutely_integrable_on {-pi..pi}" and fdif: "f differentiable (at t)" and periodic: "\x. f(x + 2*pi) = f x" shows "(\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t)) \ f t" by (rule bi_differentiable_Fourier_convergence_periodic) (auto simp: differentiable_at_withinI assms) text\Use reflection to halve the region of integration\ lemma absolutely_integrable_mult_Dirichlet_kernel_reflected: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\x. Dirichlet_kernel n x * f(t+x)) absolutely_integrable_on {-pi..pi}" "(\x. Dirichlet_kernel n x * f(t-x)) absolutely_integrable_on {-pi..pi}" "(\x. Dirichlet_kernel n x * c) absolutely_integrable_on {-pi..pi}" proof - show "(\x. Dirichlet_kernel n x * f(t+x)) absolutely_integrable_on {-pi..pi}" apply (rule absolutely_integrable_mult_Dirichlet_kernel) using absolutely_integrable_periodic_offset [OF f, of t] periodic apply simp done then show "(\x. Dirichlet_kernel n x * f(t-x)) absolutely_integrable_on {-pi..pi}" by (subst absolutely_integrable_reflect_real [symmetric]) simp show "(\x. Dirichlet_kernel n x * c) absolutely_integrable_on {-pi..pi}" by (simp add: absolutely_integrable_measurable_real borel_measurable_integrable integrable_Dirichlet_kernel) qed lemma absolutely_integrable_mult_Dirichlet_kernel_reflected_part: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and "d \ pi" shows "(\x. Dirichlet_kernel n x * f(t+x)) absolutely_integrable_on {0..d}" "(\x. Dirichlet_kernel n x * f(t-x)) absolutely_integrable_on {0..d}" "(\x. Dirichlet_kernel n x * c) absolutely_integrable_on {0..d}" using absolutely_integrable_mult_Dirichlet_kernel_reflected [OF f periodic, of n] \d \ pi\ by (force intro: absolutely_integrable_on_subinterval)+ lemma absolutely_integrable_mult_Dirichlet_kernel_reflected_part2: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and "d \ pi" shows "(\x. Dirichlet_kernel n x * (f(t+x) + f(t-x))) absolutely_integrable_on {0..d}" "(\x. Dirichlet_kernel n x * ((f(t+x) + f(t-x)) - c)) absolutely_integrable_on {0..d}" using absolutely_integrable_mult_Dirichlet_kernel_reflected_part assms by (simp_all add: distrib_left right_diff_distrib) lemma integral_reflect_and_add: fixes f :: "real \ 'b::euclidean_space" assumes "integrable (lebesgue_on {-a..a}) f" shows "integral\<^sup>L (lebesgue_on {-a..a}) f = integral\<^sup>L (lebesgue_on {0..a}) (\x. f x + f(-x))" proof (cases "a \ 0") case True have f: "integrable (lebesgue_on {0..a}) f" "integrable (lebesgue_on {-a..0}) f" using integrable_subinterval assms by fastforce+ then have fm: "integrable (lebesgue_on {0..a}) (\x. f(-x))" using integrable_reflect_real by fastforce have "integral\<^sup>L (lebesgue_on {-a..a}) f = integral\<^sup>L (lebesgue_on {-a..0}) f + integral\<^sup>L (lebesgue_on {0..a}) f" by (simp add: True assms integral_combine) also have "\ = integral\<^sup>L (lebesgue_on {0..a}) (\x. f(-x)) + integral\<^sup>L (lebesgue_on {0..a}) f" by (metis (no_types) add.inverse_inverse add.inverse_neutral integral_reflect_real) also have "\ = integral\<^sup>L (lebesgue_on {0..a}) (\x. f x + f(-x))" by (simp add: fm f) finally show ?thesis . qed (use assms in auto) lemma Fourier_sum_offset_Dirichlet_kernel_half: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\k\2*n. Fourier_coefficient f k * trigonometric_set k t) - l = (LINT x|lebesgue_on {0..pi}. Dirichlet_kernel n x * (f(t+x) + f(t-x) - 2*l)) / pi" proof - have fxt: "(\x. f(x + t)) absolutely_integrable_on {-pi..pi}" using absolutely_integrable_periodic_offset assms by auto have int: "integrable (lebesgue_on {0..pi}) (Dirichlet_kernel n)" using not_integrable_integral_eq by fastforce have "LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel n x * f(x + t) = LINT x|lebesgue_on {0..pi}. Dirichlet_kernel n x * f(x + t) + Dirichlet_kernel n (- x) * f(- x + t)" by (simp add: integral_reflect_and_add absolutely_integrable_imp_integrable absolutely_integrable_mult_Dirichlet_kernel fxt) also have "\ = (LINT x|lebesgue_on {0..pi}. Dirichlet_kernel n x * (f(t+x) + f(t-x) - 2*l)) + pi * l" using absolutely_integrable_mult_Dirichlet_kernel_reflected_part [OF f periodic order_refl, of n t] apply (simp add: algebra_simps absolutely_integrable_imp_integrable int) done finally show ?thesis by (simp add: Fourier_sum_offset_Dirichlet_kernel assms divide_simps) qed lemma Fourier_sum_limit_Dirichlet_kernel_half: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t)) \ l \ (\n. (LINT x|lebesgue_on {0..pi}. Dirichlet_kernel n x * (f(t+x) + f(t-x) - 2*l))) \ 0" apply (simp flip: Fourier_sum_limit_pair [OF f]) apply (simp add: Lim_null [where l=l] Fourier_sum_offset_Dirichlet_kernel_half assms) done subsection\Localization principle: convergence only depends on values "nearby"\ proposition Riemann_localization_integral: assumes f: "f absolutely_integrable_on {-pi..pi}" and g: "g absolutely_integrable_on {-pi..pi}" and "d > 0" and d: "\x. \x\ < d \ f x = g x" shows "(\n. integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. Dirichlet_kernel n x * f x) - integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. Dirichlet_kernel n x * g x)) \ 0" (is "?a \ 0") proof - let ?f = "\x. (if \x\ < d then 0 else f x - g x) / sin(x/2) / 2" have eq: "?a n = integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin((n+1/2) * x) * ?f x)" for n proof - have "?a n = integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. Dirichlet_kernel n x * (if \x\ < d then 0 else f x - g x))" apply (simp add: absolutely_integrable_imp_integrable absolutely_integrable_mult_Dirichlet_kernel f g flip: Bochner_Integration.integral_diff) apply (auto simp: d algebra_simps intro: Bochner_Integration.integral_cong) done also have "\ = integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. sin((n+1/2) * x) * ?f x)" using \d > 0\ by (auto simp: Dirichlet_kernel_def intro: Bochner_Integration.integral_cong) finally show ?thesis . qed show ?thesis unfolding eq proof (rule Riemann_lebesgue_sin_half) obtain B where "B>0" and B: "\x. x \ ({-pi..pi} - {-d<.. \inverse (sin (x/2))\ \ B" using bounded_inverse_sin_half [OF \d > 0\] by metis have "(\x. (if \x\ < d then 0 else f x - g x) / sin (x/2)) absolutely_integrable_on {-pi..pi}" proof (rule measurable_bounded_by_integrable_imp_absolutely_integrable) show "(\x. (if \x\ < d then 0 else f x - g x) / sin (x/2)) \ borel_measurable (lebesgue_on {-pi..pi})" proof (intro measurable) show "f \ borel_measurable (lebesgue_on {-pi..pi})" "g \ borel_measurable (lebesgue_on {-pi..pi})" using absolutely_integrable_on_def f g integrable_imp_measurable by blast+ show "(\x. x) \ borel_measurable (lebesgue_on {-pi..pi})" "(\x. sin (x/2)) \ borel_measurable (lebesgue_on {-pi..pi})" by (intro continuous_intros continuous_imp_measurable_on_sets_lebesgue | force)+ qed auto have "(\x. B * \f x - g x\) absolutely_integrable_on {-pi..pi}" using \B > 0\ by (simp add: f g set_integrable_abs) then show "(\x. B * \f x - g x\) integrable_on {-pi..pi}" using absolutely_integrable_on_def by blast next fix x assume x: "x \ {-pi..pi}" then have [simp]: "sin (x/2) = 0 \ x=0" using \0 < d\ by (force simp: sin_zero_iff) show "norm ((if \x\ < d then 0 else f x - g x) / sin (x/2)) \ B * \f x - g x\" proof (cases "\x\ < d") case False then have "1 \ B * \sin (x/2)\" using \B > 0\ B [of x] x \d > 0\ by (auto simp: abs_less_iff divide_simps split: if_split_asm) then have "1 * \f x - g x\ \ B * \sin (x/2)\ * \f x - g x\" by (metis (full_types) abs_ge_zero mult.commute mult_left_mono) then show ?thesis using False by (auto simp: divide_simps algebra_simps) qed (simp add: d) qed auto then show "(\x. (if \x\ < d then 0 else f x - g x) / sin (x/2) / 2) absolutely_integrable_on {-pi..pi}" using set_integrable_divide by blast qed qed lemma Riemann_localization_integral_range: assumes f: "f absolutely_integrable_on {-pi..pi}" and "0 < d" "d \ pi" shows "(\n. integral\<^sup>L (lebesgue_on {-pi..pi}) (\x. Dirichlet_kernel n x * f x) - integral\<^sup>L (lebesgue_on {-d..d}) (\x. Dirichlet_kernel n x * f x)) \ 0" proof - have *: "(\n. (LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel n x * f x) - (LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel n x * (if x \ {-d..d} then f x else 0))) \ 0" proof (rule Riemann_localization_integral [OF f _ \0 < d\]) have "f absolutely_integrable_on {-d..d}" using f absolutely_integrable_on_subinterval \d \ pi\ by fastforce moreover have "(\x. if - pi \ x \ x \ pi then if x \ {-d..d} then f x else 0 else 0) = (\x. if x \ {-d..d} then f x else 0)" using \d \ pi\ by force ultimately show "(\x. if x \ {-d..d} then f x else 0) absolutely_integrable_on {-pi..pi}" apply (subst absolutely_integrable_restrict_UNIV [symmetric]) apply (simp flip: absolutely_integrable_restrict_UNIV [of "{-d..d}" f]) done qed auto then show ?thesis using integral_restrict [of "{-d..d}" "{-pi..pi}" "\x. Dirichlet_kernel _ x * f x"] assms by (simp add: if_distrib cong: if_cong) qed lemma Riemann_localization: assumes f: "f absolutely_integrable_on {-pi..pi}" and g: "g absolutely_integrable_on {-pi..pi}" and perf: "\x. f(x + 2*pi) = f x" and perg: "\x. g(x + 2*pi) = g x" and "d > 0" and d: "\x. \x-t\ < d \ f x = g x" shows "(\n. \k\n. Fourier_coefficient f k * trigonometric_set k t) \ c \ (\n. \k\n. Fourier_coefficient g k * trigonometric_set k t) \ c" proof - have "(\n. LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel n x * f(x + t)) \ pi * c \ (\n. LINT x|lebesgue_on {-pi..pi}. Dirichlet_kernel n x * g (x + t)) \ pi * c" proof (intro Lim_transform_eq Riemann_localization_integral) show "(\x. f(x + t)) absolutely_integrable_on {-pi..pi}" "(\x. g (x + t)) absolutely_integrable_on {-pi..pi}" using assms by (auto intro: absolutely_integrable_periodic_offset) qed (use assms in auto) then show ?thesis by (simp add: Fourier_sum_limit_Dirichlet_kernel assms) qed subsection\Localize the earlier integral\ lemma Riemann_localization_integral_range_half: assumes f: "f absolutely_integrable_on {-pi..pi}" and "0 < d" "d \ pi" shows "(\n. (LINT x|lebesgue_on {0..pi}. Dirichlet_kernel n x * (f x + f(-x))) - (LINT x|lebesgue_on {0..d}. Dirichlet_kernel n x * (f x + f(-x)))) \ 0" proof - have *: "(\x. Dirichlet_kernel n x * f x) absolutely_integrable_on {-d..d}" for n by (metis (full_types) absolutely_integrable_mult_Dirichlet_kernel absolutely_integrable_on_subinterval \d \ pi\ atLeastatMost_subset_iff f neg_le_iff_le) show ?thesis using absolutely_integrable_mult_Dirichlet_kernel [OF f] using Riemann_localization_integral_range [OF assms] apply (simp add: "*" absolutely_integrable_imp_integrable integral_reflect_and_add) apply (simp add: algebra_simps) done qed lemma Fourier_sum_limit_Dirichlet_kernel_part: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and d: "0 < d" "d \ pi" shows "(\n. \k\n. Fourier_coefficient f k * trigonometric_set k t) \ l \ (\n. (LINT x|lebesgue_on {0..d}. Dirichlet_kernel n x * ((f(t+x) + f(t-x)) - 2*l))) \ 0" proof - have "(\x. f(t+x)) absolutely_integrable_on {-pi..pi}" using absolutely_integrable_periodic_offset [OF f, of t] periodic by simp then have int: "(\x. f(t+x) - l) absolutely_integrable_on {-pi..pi}" by auto have "(\n. LINT x|lebesgue_on {0..pi}. Dirichlet_kernel n x * (f(t+x) + f(t-x) - 2*l)) \ 0 \ (\n. LINT x|lebesgue_on {0..d}. Dirichlet_kernel n x * (f(t+x) + f(t-x) - 2*l)) \ 0" by (rule Lim_transform_eq) (use Riemann_localization_integral_range_half [OF int d] in auto) then show ?thesis by (simp add: Fourier_sum_limit_Dirichlet_kernel_half assms) qed subsection\Make a harmless simplifying tweak to the Dirichlet kernel\ lemma inte_Dirichlet_kernel_mul_expand: assumes f: "f \ borel_measurable (lebesgue_on S)" and S: "S \ sets lebesgue" shows "(LINT x|lebesgue_on S. Dirichlet_kernel n x * f x = LINT x|lebesgue_on S. sin((n+1/2) * x) * f x / (2 * sin(x/2))) \ (integrable (lebesgue_on S) (\x. Dirichlet_kernel n x * f x) \ integrable (lebesgue_on S) (\x. sin((n+1/2) * x) * f x / (2 * sin(x/2))))" proof (cases "0 \ S") case True have *: "{x. x = 0 \ x \ S} \ sets (lebesgue_on S)" using True by (simp add: S sets_restrict_space_iff cong: conj_cong) have bm1: "(\x. Dirichlet_kernel n x * f x) \ borel_measurable (lebesgue_on S)" unfolding Dirichlet_kernel_def by (force intro: * assms measurable continuous_imp_measurable_on_sets_lebesgue continuous_intros) have bm2: "(\x. sin ((n + 1/2) * x) * f x / (2 * sin (x/2))) \ borel_measurable (lebesgue_on S)" by (intro assms measurable continuous_imp_measurable_on_sets_lebesgue continuous_intros) auto have 0: "{0} \ null_sets (lebesgue_on S)" using True by (simp add: S null_sets_restrict_space) show ?thesis apply (intro conjI integral_cong_AE integrable_cong_AE bm1 bm2 AE_I' [OF 0]) unfolding Dirichlet_kernel_def by auto next case False then show ?thesis unfolding Dirichlet_kernel_def by (auto intro!: Bochner_Integration.integral_cong Bochner_Integration.integrable_cong) qed lemma assumes f: "f \ borel_measurable (lebesgue_on S)" and S: "S \ sets lebesgue" shows integral_Dirichlet_kernel_mul_expand: "(LINT x|lebesgue_on S. Dirichlet_kernel n x * f x) = (LINT x|lebesgue_on S. sin((n+1/2) * x) * f x / (2 * sin(x/2)))" (is "?th1") and integrable_Dirichlet_kernel_mul_expand: "integrable (lebesgue_on S) (\x. Dirichlet_kernel n x * f x) \ integrable (lebesgue_on S) (\x. sin((n+1/2) * x) * f x / (2 * sin(x/2)))" (is "?th2") using inte_Dirichlet_kernel_mul_expand [OF assms] by auto proposition Fourier_sum_limit_sine_part: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and d: "0 < d" "d \ pi" shows "(\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t)) \ l \ (\n. LINT x|lebesgue_on {0..d}. sin((n + 1/2) * x) * ((f(t+x) + f(t-x) - 2*l) / x)) \ 0" (is "?lhs \ ?\ \ 0") proof - have ftx: "(\x. f(t+x)) absolutely_integrable_on {-pi..pi}" using absolutely_integrable_periodic_offset assms by auto then have ftmx: "(\x. f(t-x)) absolutely_integrable_on {-pi..pi}" by (simp flip: absolutely_integrable_reflect_real [where f= "(\x. f(t+x))"]) have fbm: "(\x. f(t+x) + f(t-x) - 2*l) absolutely_integrable_on {-pi..pi}" by (force intro: ftmx ftx) let ?\ = "\n. LINT x|lebesgue_on {0..d}. Dirichlet_kernel n x * ((f(t+x) + f(t-x)) - 2*l)" have "?lhs \ ?\ \ 0" by (intro Fourier_sum_limit_Dirichlet_kernel_part assms) moreover have "?\ \ 0 \ ?\ \ 0" proof (rule Lim_transform_eq [OF Lim_transform_eventually]) let ?f = "\n. LINT x|lebesgue_on {0..d}. sin((real n + 1/2) * x) * (1 / (2 * sin(x/2)) - 1/x) * (f(t+x) + f(t-x) - 2*l)" have "?f n = (LINT x|lebesgue_on {-pi..pi}. sin ((n + 1/2) * x) * ((if x \ {0..d} then 1 / (2 * sin (x/2)) - 1/x else 0) * (f(t+x) + f(t-x) - 2*l)))" for n using d by (simp add: integral_restrict if_distrib [of "\u. _ * (u * _)"] mult.assoc flip: atLeastAtMost_iff cong: if_cong) moreover have "\ \ 0" proof (intro Riemann_lebesgue_sin_half absolutely_integrable_bounded_measurable_product_real) have "(\x. 1 / (2 * sin(x/2)) - 1/x) \ borel_measurable (lebesgue_on {0..d})" by (intro measurable continuous_imp_measurable_on_sets_lebesgue continuous_intros) auto then show "(\x. if x \ {0..d} then 1 / (2 * sin(x/2)) - 1/x else 0) \ borel_measurable (lebesgue_on {-pi..pi})" using d by (simp add: borel_measurable_if_lebesgue_on flip: atLeastAtMost_iff) let ?g = "\x::real. 1 / (2 * sin(x/2)) - 1/x" have limg: "(?g \ ?g a) (at a within {0..d})" \\thanks to Manuel Eberl\ if a: "0 \ a" "a \ d" for a proof - have "(?g \ 0) (at_right 0)" and "(?g \ ?g d) (at_left d)" using d sin_gt_zero[of "d/2"] by (real_asymp simp: field_simps)+ moreover have "(?g \ ?g a) (at a)" if "a > 0" using a that d sin_gt_zero[of "a/2"] that by (real_asymp simp: field_simps) ultimately show ?thesis using that d by (cases "a = 0 \ a = d") (auto simp: at_within_Icc_at at_within_Icc_at_right at_within_Icc_at_left) qed have "((\x. if x \ {0..d} then 1 / (2 * sin(x/2)) - 1/x else 0) ` {-pi..pi}) = ((\x. 1 / (2 * sin(x/2)) - 1/x) ` {0..d})" using d by (auto intro: image_eqI [where x=0]) moreover have "bounded \" by (intro compact_imp_bounded compact_continuous_image) (auto simp: continuous_on limg) ultimately show "bounded ((\x. if x \ {0..d} then 1 / (2 * sin(x/2)) - 1/x else 0) ` {-pi..pi})" by simp qed (auto simp: fbm) ultimately show "?f \ (0::real)" by simp show "\\<^sub>F n in sequentially. ?f n = ?\ n - ?\ n" proof (rule eventually_sequentiallyI [where c = "Suc 0"]) fix n assume "n \ Suc 0" have "integrable (lebesgue_on {0..d}) (\x. sin ((real n + 1/2) * x) * (f(t+x) + f(t-x) - 2*l) / (2 * sin(x/2)))" using d apply (subst integrable_Dirichlet_kernel_mul_expand [symmetric]) apply (intro assms absolutely_integrable_imp_borel_measurable absolutely_integrable_on_subinterval [OF fbm] absolutely_integrable_imp_integrable absolutely_integrable_mult_Dirichlet_kernel_reflected_part2 | force)+ done moreover have "integrable (lebesgue_on {0..d}) (\x. sin ((real n + 1/2) * x) * (f(t+x) + f(t-x) - 2*l) / x)" proof (rule integrable_cong_AE_imp) let ?g = "\x. Dirichlet_kernel n x * (2 * sin(x/2) / x * (f(t+x) + f(t-x) - 2*l))" have *: "\2 * sin (x/2) / x\ \ 1" for x::real using abs_sin_x_le_abs_x [of "x/2"] by (simp add: divide_simps mult.commute abs_mult) have "bounded ((\x. 1 + (x/2)\<^sup>2) ` {-pi..pi})" by (intro compact_imp_bounded compact_continuous_image continuous_intros) auto then have bo: "bounded ((\x. 2 * sin (x/2) / x) ` {-pi..pi})" using * unfolding bounded_real by blast show "integrable (lebesgue_on {0..d}) ?g" using d apply (intro absolutely_integrable_imp_integrable absolutely_integrable_on_subinterval [OF absolutely_integrable_mult_Dirichlet_kernel] absolutely_integrable_bounded_measurable_product_real bo fbm measurable continuous_imp_measurable_on_sets_lebesgue continuous_intros, auto) done show "(\x. sin ((n + 1/2) * x) * (f(t+x) + f(t-x) - 2*l) / x) \ borel_measurable (lebesgue_on {0..d})" using d apply (intro measurable absolutely_integrable_imp_borel_measurable absolutely_integrable_on_subinterval [OF ftx] absolutely_integrable_on_subinterval [OF ftmx] absolutely_integrable_continuous_real continuous_intros, auto) done have "Dirichlet_kernel n x * (2 * sin(x/2)) / x = sin ((real n + 1/2) * x) / x" if "0 < x" "x \ d" for x using that d by (simp add: Dirichlet_kernel_def divide_simps sin_zero_pi_iff) then show "AE x in lebesgue_on {0..d}. ?g x = sin ((real n + 1/2) * x) * (f(t+x) + f(t-x) - 2*l) / x" using d by (force intro: AE_I' [where N="{0}"]) qed ultimately have "?f n = (LINT x|lebesgue_on {0..d}. sin ((n + 1/2) * x) * (f(t+x) + f(t-x) - 2*l) / (2 * sin(x/2))) - (LINT x|lebesgue_on {0..d}. sin ((n + 1/2) * x) * (f(t+x) + f(t-x) - 2*l) / x)" by (simp add: right_diff_distrib [of _ _ "1/_"] left_diff_distrib) also have "\ = ?\ n - ?\ n" using d by (simp add: measurable_restrict_mono [OF absolutely_integrable_imp_borel_measurable [OF fbm]] integral_Dirichlet_kernel_mul_expand) finally show "?f n = ?\ n - ?\ n" . qed qed ultimately show ?thesis by simp qed subsection\Dini's test for the convergence of a Fourier series\ proposition Fourier_Dini_test: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and int0d: "integrable (lebesgue_on {0..d}) (\x. \f(t+x) + f(t-x) - 2*l\ / x)" and "0 < d" shows "(\n. (\k\n. Fourier_coefficient f k * trigonometric_set k t)) \ l" proof - define \ where "\ \ \n x. sin((n + 1/2) * x) * ((f(t+x) + f(t-x) - 2*l) / x)" have "(\n. LINT x|lebesgue_on {0..pi}. \ n x) \ 0" unfolding lim_sequentially dist_real_def proof (intro allI impI) fix e :: real assume "e > 0" define \ where "\ \ \x. LINT x|lebesgue_on {0..x}. \f(t+x) + f(t-x) - 2*l\ / x" have [simp]: "\ 0 = 0" by (simp add: \_def integral_eq_zero_null_sets) have cont: "continuous_on {0..d} \" unfolding \_def using indefinite_integral_continuous_real int0d by blast with \d > 0\ have "\e>0. \da>0. \x'\{0..d}. \x' - 0\ < da \ \\ x' - \ 0\ < e" by (force simp: continuous_on_real_range dest: bspec [where x=0]) with \e > 0\ obtain k where "k > 0" and k: "\x'. \0 \ x'; x' < k; x' \ d\ \ \\ x'\ < e/2" by (drule_tac x="e/2" in spec) auto define \ where "\ \ min d (min (k/2) pi)" have e2: "\\ \\ < e/2" and "\ > 0" "\ \ pi" unfolding \_def using k \k > 0\ \d > 0\ by auto have ftx: "(\x. f(t+x)) absolutely_integrable_on {-pi..pi}" using absolutely_integrable_periodic_offset assms by auto then have ftmx: "(\x. f(t-x)) absolutely_integrable_on {-pi..pi}" by (simp flip: absolutely_integrable_reflect_real [where f= "(\x. f(t+x))"]) have 1: "\ n absolutely_integrable_on {0..\}" for n::nat unfolding \_def proof (rule absolutely_integrable_bounded_measurable_product_real) show "(\x. sin ((real n + 1/2) * x)) \ borel_measurable (lebesgue_on {0..\})" by (intro continuous_imp_measurable_on_sets_lebesgue continuous_intros) auto show "bounded ((\x. sin ((real n + 1/2) * x)) ` {0..\})" by (simp add: bounded_iff) (use abs_sin_le_one in blast) have "(\x. (f(t+x) + f(t-x) - 2*l) / x) \ borel_measurable (lebesgue_on {0..\})" using \d > 0\ unfolding \_def by (intro measurable absolutely_integrable_imp_borel_measurable absolutely_integrable_on_subinterval [OF ftx] absolutely_integrable_on_subinterval [OF ftmx] absolutely_integrable_continuous_real continuous_intros, auto) moreover have "integrable (lebesgue_on {0..\}) (norm \ (\x. (f(t+x) + f(t-x) - 2*l) / x))" proof (rule integrable_subinterval [of 0 d]) show "integrable (lebesgue_on {0..d}) (norm \ (\x. (f(t+x) + f(t-x) - 2*l) / x))" using int0d by (subst integrable_cong) (auto simp: o_def) show "{0..\} \ {0..d}" using \d > 0\ by (auto simp: \_def) qed ultimately show "(\x. (f(t+x) + f(t-x) - 2*l) / x) absolutely_integrable_on {0..\}" by (auto simp: absolutely_integrable_measurable) qed auto have 2: "\ n absolutely_integrable_on {\..pi}" for n::nat unfolding \_def proof (rule absolutely_integrable_bounded_measurable_product_real) show "(\x. sin ((n + 1/2) * x)) \ borel_measurable (lebesgue_on {\..pi})" by (intro continuous_imp_measurable_on_sets_lebesgue continuous_intros) auto show "bounded ((\x. sin ((n + 1/2) * x)) ` {\..pi})" by (simp add: bounded_iff) (use abs_sin_le_one in blast) have "(\x. inverse x * (f(t+x) + f(t-x) - 2*l)) absolutely_integrable_on {\..pi}" proof (rule absolutely_integrable_bounded_measurable_product_real) have "(\x. 1/x) \ borel_measurable (lebesgue_on {\..pi})" by (auto simp: measurable_completion measurable_restrict_space1) then show "inverse \ borel_measurable (lebesgue_on {\..pi})" by (metis (no_types, lifting) inverse_eq_divide measurable_lebesgue_cong) have "\x\{\..pi}. inverse \x\ \ inverse \" using \0 < \\ by auto then show "bounded (inverse ` {\..pi})" by (auto simp: bounded_iff) show "(\x. f(t+x) + f(t-x) - 2*l) absolutely_integrable_on {\..pi}" proof (rule absolutely_integrable_on_subinterval) show "(\x. (f(t+x) + f(t-x) - 2*l)) absolutely_integrable_on {-pi..pi}" by (fast intro: ftx ftmx absolutely_integrable_on_const) show "{\..pi} \ {-pi..pi}" using \0 < \\ by auto qed qed auto then show "(\x. (f(t+x) + f(t-x) - 2*l) / x) absolutely_integrable_on {\..pi}" by (metis (no_types, lifting) divide_inverse mult.commute set_integrable_cong) qed auto have "(\x. f(t+x) - l) absolutely_integrable_on {-pi..pi}" using ftx by auto moreover have "bounded ((\x. 0) ` {x. \x\ < \})" using bounded_def by blast moreover have "bounded (inverse ` {x. \ \x\ < \})" using \\ > 0\ by (auto simp: divide_simps intro: boundedI [where B = "1/\"]) ultimately have "(\x. (if \x\ < \ then 0 else inverse x) * (if x \ {-pi..pi} then f(t+x) - l else 0)) absolutely_integrable_on UNIV" apply (intro absolutely_integrable_bounded_measurable_product_real measurable set_integral_diff) apply (auto simp: lebesgue_on_UNIV_eq measurable_completion simp flip: absolutely_integrable_restrict_UNIV [where S = "{-pi..pi}"]) done moreover have "(if x \ {-pi..pi} then if \x\ < \ then 0 else (f(t+x) - l) / x else 0) = (if \x\ < \ then 0 else inverse x) * (if x \ {-pi..pi} then f(t+x) - l else 0)" for x by (auto simp: divide_simps) ultimately have *: "(\x. if \x\ < \ then 0 else (f(t+x) - l) / x) absolutely_integrable_on {-pi..pi}" by (simp add: flip: absolutely_integrable_restrict_UNIV [where S = "{-pi..pi}"]) have "(\n. LINT x|lebesgue_on {0..pi}. sin ((n + 1/2) * x) * (if \x\ < \ then 0 else (f(t+x) - l) / x) + sin ((n + 1/2) * -x) * (if \x\ < \ then 0 else (f(t-x) - l) / -x)) \ 0" using Riemann_lebesgue_sin_half [OF *] * by (simp add: integral_reflect_and_add absolutely_integrable_imp_integrable absolutely_integrable_sin_product cong: if_cong) moreover have "sin ((n + 1/2) * x) * (if \x\ < \ then 0 else (f(t+x) - l) / x) + sin ((n + 1/2) * -x) * (if \x\ < \ then 0 else (f(t-x) - l) / -x) = (if \ \x\ < \ then \ n x else 0)" for x n by simp (auto simp: divide_simps algebra_simps \_def) ultimately have "(\n. LINT x|lebesgue_on {0..pi}. (if \ \x\ < \ then \ n x else 0)) \ 0" by simp moreover have "(if 0 \ x \ x \ pi then if \ \x\ < \ then \ n x else 0 else 0) = (if \ \ x \ x \ pi then \ n x else 0)" for x n using \\ > 0\ by auto ultimately have \: "(\n. LINT x|lebesgue_on {\..pi}. \ n x) \ 0" by (simp flip: Lebesgue_Measure.integral_restrict_UNIV) then obtain N::nat where N: "\n. n\N \ \LINT x|lebesgue_on {\..pi}. \ n x\ < e/2" unfolding lim_sequentially dist_real_def by (metis (full_types) \0 < e\ diff_zero half_gt_zero_iff) have "\integral\<^sup>L (lebesgue_on {0..pi}) (\ n)\ < e" if "n \ N" for n::nat proof - have int: "integrable (lebesgue_on {0..pi}) (\ (real n))" by (intro integrable_combine [of concl: 0 pi] absolutely_integrable_imp_integrable) (use \\ > 0\ \\ \ pi\ 1 2 in auto) then have "integral\<^sup>L (lebesgue_on {0..pi}) (\ n) = integral\<^sup>L (lebesgue_on {0..\}) (\ n) + integral\<^sup>L (lebesgue_on {\..pi}) (\ n)" by (rule integral_combine) (use \0 < \\ \\ \ pi\ in auto) moreover have "\integral\<^sup>L (lebesgue_on {0..\}) (\ n)\ \ LINT x|lebesgue_on {0..\}. \f(t + x) + f(t - x) - 2 * l\ / x" proof (rule integral_abs_bound_integral) show "integrable (lebesgue_on {0..\}) (\ (real n))" by (meson integrable_subinterval \\ \ pi\ int atLeastatMost_subset_iff order_refl) have "{0..\} \ {0..d}" by (auto simp: \_def) then show "integrable (lebesgue_on {0..\}) (\x. \f(t + x) + f(t - x) - 2 * l\ / x)" by (rule integrable_subinterval [OF int0d]) show "\\ (real n) x\ \ \f(t + x) + f(t - x) - 2 * l\ / x" if "x \ space (lebesgue_on {0..\})" for x using that apply (auto simp: \_def divide_simps abs_mult) by (simp add: mult.commute mult_left_le) qed ultimately have "\integral\<^sup>L (lebesgue_on {0..pi}) (\ n)\ < e/2 + e/2" using N [OF that] e2 unfolding \_def by linarith then show ?thesis by simp qed then show "\N. \n\N. \integral\<^sup>L (lebesgue_on {0..pi}) (\ (real n)) - 0\ < e" by force qed then show ?thesis unfolding \_def using Fourier_sum_limit_sine_part assms pi_gt_zero by blast qed subsection\Cesaro summability of Fourier series using Fejér kernel\ definition Fejer_kernel :: "nat \ real \ real" where "Fejer_kernel \ \n x. if n = 0 then 0 else (\r sin(x/2) = 0") case True have "(\rrx. Fejer_kernel n x * f x) absolutely_integrable_on {-pi..pi}" proof (rule absolutely_integrable_bounded_measurable_product_real) show "Fejer_kernel n \ borel_measurable (lebesgue_on {-pi..pi})" by (simp add: Fejer_kernel_continuous continuous_imp_measurable_on_sets_lebesgue) show "bounded (Fejer_kernel n ` {-pi..pi})" using Fejer_kernel_continuous compact_continuous_image compact_imp_bounded by blast qed (use assms in auto) lemma absolutely_integrable_mult_Fejer_kernel_reflected1: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\x. Fejer_kernel n x * f(t + x)) absolutely_integrable_on {-pi..pi}" using assms by (force intro: absolutely_integrable_mult_Fejer_kernel absolutely_integrable_periodic_offset) lemma absolutely_integrable_mult_Fejer_kernel_reflected2: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\x. Fejer_kernel n x * f(t - x)) absolutely_integrable_on {-pi..pi}" proof - have "(\x. f(t - x)) absolutely_integrable_on {-pi..pi}" using assms apply (subst absolutely_integrable_reflect_real [symmetric]) apply (simp add: absolutely_integrable_periodic_offset) done then show ?thesis by (rule absolutely_integrable_mult_Fejer_kernel) qed lemma absolutely_integrable_mult_Fejer_kernel_reflected3: shows "(\x. Fejer_kernel n x * c) absolutely_integrable_on {-pi..pi}" using absolutely_integrable_on_const absolutely_integrable_mult_Fejer_kernel by blast lemma absolutely_integrable_mult_Fejer_kernel_reflected_part1: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and "d \ pi" shows "(\x. Fejer_kernel n x * f(t + x)) absolutely_integrable_on {0..d}" by (rule absolutely_integrable_on_subinterval [OF absolutely_integrable_mult_Fejer_kernel_reflected1]) (auto simp: assms) lemma absolutely_integrable_mult_Fejer_kernel_reflected_part2: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and "d \ pi" shows "(\x. Fejer_kernel n x * f(t - x)) absolutely_integrable_on {0..d}" by (rule absolutely_integrable_on_subinterval [OF absolutely_integrable_mult_Fejer_kernel_reflected2]) (auto simp: assms) lemma absolutely_integrable_mult_Fejer_kernel_reflected_part3: assumes "d \ pi" shows "(\x. Fejer_kernel n x * c) absolutely_integrable_on {0..d}" by (rule absolutely_integrable_on_subinterval [OF absolutely_integrable_mult_Fejer_kernel_reflected2]) (auto simp: assms) lemma absolutely_integrable_mult_Fejer_kernel_reflected_part4: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and "d \ pi" shows "(\x. Fejer_kernel n x * (f(t + x) + f(t - x))) absolutely_integrable_on {0..d}" unfolding distrib_left by (intro set_integral_add absolutely_integrable_mult_Fejer_kernel_reflected_part1 absolutely_integrable_mult_Fejer_kernel_reflected_part2 assms) lemma absolutely_integrable_mult_Fejer_kernel_reflected_part5: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and "d \ pi" shows "(\x. Fejer_kernel n x * ((f(t + x) + f(t - x)) - c)) absolutely_integrable_on {0..d}" unfolding distrib_left right_diff_distrib by (intro set_integral_add set_integral_diff absolutely_integrable_on_const absolutely_integrable_mult_Fejer_kernel_reflected_part1 absolutely_integrable_mult_Fejer_kernel_reflected_part2 assms, auto) lemma Fourier_sum_offset_Fejer_kernel_half: fixes n::nat assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and "n > 0" shows "(\rk\2*r. Fourier_coefficient f k * trigonometric_set k t) / n - l = (LINT x|lebesgue_on {0..pi}. Fejer_kernel n x * (f(t + x) + f(t - x) - 2 * l)) / pi" proof - have "(\rk\2 * r. Fourier_coefficient f k * trigonometric_set k t) - real n * l = (\rk\2 * r. Fourier_coefficient f k * trigonometric_set k t) - l)" by (simp add: sum_subtractf) also have "\ = (\r = real n * ((LINT x|lebesgue_on {0..pi}. Fejer_kernel n x * (f(t + x) + f(t - x) - 2 * l)) / pi)" proof - have "integrable (lebesgue_on {0..pi}) (\x. Dirichlet_kernel i x * (f(t + x) + f(t - x) - 2 * l))" for i using absolutely_integrable_mult_Dirichlet_kernel_reflected_part2(2) f periodic by (force simp: intro!: absolutely_integrable_imp_integrable) then show ?thesis using \n > 0\ apply (simp add: Fejer_kernel_def flip: sum_divide_distrib) apply (simp add: sum_distrib_right flip: Bochner_Integration.integral_sum [symmetric]) done qed finally have "(\rk\2 * r. Fourier_coefficient f k * trigonometric_set k t) - real n * l = real n * ((LINT x|lebesgue_on {0..pi}. Fejer_kernel n x * (f(t + x) + f(t - x) - 2 * l)) / pi)" . with \n > 0\ show ?thesis by (auto simp: mult.commute divide_simps) qed lemma Fourier_sum_limit_Fejer_kernel_half: fixes n::nat assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" shows "(\n. ((\rk\2*r. Fourier_coefficient f k * trigonometric_set k t)) / n) \ l \ ((\n. integral\<^sup>L (lebesgue_on {0..pi}) (\x. Fejer_kernel n x * ((f(t + x) + f(t - x)) - 2*l))) \ 0)" (is "?lhs = ?rhs") proof - have "?lhs \ (\n. ((\rk\2*r. Fourier_coefficient f k * trigonometric_set k t)) / n - l) \ 0" by (simp add: LIM_zero_iff) also have "\ \ (\n. ((((\rk\2*r. Fourier_coefficient f k * trigonometric_set k t)) / n) - l) * pi) \ 0" using tendsto_mult_right_iff [OF pi_neq_zero] by simp also have "\ \ ?rhs" apply (intro Lim_transform_eq [OF Lim_transform_eventually [of "\n. 0"]] eventually_sequentiallyI [of 1]) apply (simp_all add: Fourier_sum_offset_Fejer_kernel_half assms) done finally show ?thesis . qed lemma has_integral_Fejer_kernel: "has_bochner_integral (lebesgue_on {-pi..pi}) (Fejer_kernel n) (if n = 0 then 0 else pi)" proof - have "has_bochner_integral (lebesgue_on {-pi..pi}) (\x. (\rrx. (\rr 0" by (simp add: Fejer_kernel) theorem Fourier_Fejer_Cesaro_summable: assumes f: "f absolutely_integrable_on {-pi..pi}" and periodic: "\x. f(x + 2*pi) = f x" and fl: "(f \ l) (at t within atMost t)" and fr: "(f \ r) (at t within atLeast t)" shows "(\n. (\mk\2*m. Fourier_coefficient f k * trigonometric_set k t) / n) \ (l+r) / 2" proof - define h where "h \ \u. (f(t+u) + f(t-u)) - (l+r)" have "(\n. LINT u|lebesgue_on {0..pi}. Fejer_kernel n u * h u) \ 0" proof - have h0: "(h \ 0) (at 0 within atLeast 0)" proof - have l0: "((\u. f(t-u) - l) \ 0) (at 0 within {0..})" using fl unfolding Lim_within apply (elim all_forward imp_forward ex_forward conj_forward asm_rl, clarify) apply (drule_tac x="t-x" in bspec) apply (auto simp: dist_norm) done have r0: "((\u. f(t + u) - r) \ 0) (at 0 within {0..})" using fr unfolding Lim_within apply (elim all_forward imp_forward ex_forward conj_forward asm_rl, clarify) apply (drule_tac x="t+x" in bspec) apply (auto simp: dist_norm) done show ?thesis using tendsto_add [OF l0 r0] by (simp add: h_def algebra_simps) qed show ?thesis unfolding lim_sequentially dist_real_def diff_0_right proof clarify fix e::real assume "e > 0" then obtain x' where "0 < x'" "\x. \0 < x; x < x'\ \ \h x\ < e / (2 * pi)" using h0 unfolding Lim_within dist_real_def by (auto simp: dest: spec [where x="e/2/pi"]) then obtain \ where "0 < \" "\ < pi" and \: "\x. 0 < x \ x \ \ \ \h x\ < e/2/pi" apply (intro that [where \="min x' pi/2"], auto) using m2pi_less_pi by linarith have ftx: "(\x. f(t+x)) absolutely_integrable_on {-pi..pi}" using absolutely_integrable_periodic_offset assms by auto then have ftmx: "(\x. f(t-x)) absolutely_integrable_on {-pi..pi}" by (simp flip: absolutely_integrable_reflect_real [where f= "(\x. f(t+x))"]) have h_aint: "h absolutely_integrable_on {-pi..pi}" unfolding h_def by (intro absolutely_integrable_on_const set_integral_diff set_integral_add, auto simp: ftx ftmx) have "(\n. LINT x|lebesgue_on {\..pi}. Fejer_kernel n x * h x) \ 0" proof (rule Lim_null_comparison) define \ where "\ \ \n. (LINT x|lebesgue_on {\..pi}. \h x\ / (2 * sin(x/2) ^ 2)) / n" show "\\<^sub>F n in sequentially. norm (LINT x|lebesgue_on {\..pi}. Fejer_kernel n x * h x) \ \ n" proof (rule eventually_sequentiallyI) fix n::nat assume "n \ 1" have hint: "(\x. h x / (2 * sin(x/2) ^ 2)) absolutely_integrable_on {\..pi}" unfolding divide_inverse mult.commute [of "h _"] proof (rule absolutely_integrable_bounded_measurable_product_real) have cont: "continuous_on {\..pi} (\x. inverse (2 * (sin (x * inverse 2))\<^sup>2))" using \0 < \\ by (intro continuous_intros) (auto simp: sin_zero_pi_iff) show "(\x. inverse (2 * (sin (x * inverse 2))\<^sup>2)) \ borel_measurable (lebesgue_on {\..pi})" using \0 < \\ by (intro cont continuous_imp_measurable_on_sets_lebesgue) auto show "bounded ((\x. inverse (2 * (sin (x * inverse 2))\<^sup>2)) ` {\..pi})" using cont by (simp add: compact_continuous_image compact_imp_bounded) show "h absolutely_integrable_on {\..pi}" using \0 < \\ \\ < pi\ by (auto intro: absolutely_integrable_on_subinterval [OF h_aint]) qed auto then have *: "integrable (lebesgue_on {\..pi}) (\x. \h x\ / (2 * (sin (x/2))\<^sup>2))" by (simp add: absolutely_integrable_measurable o_def) define \ where "\ \ \x. (if n = 0 then 0 else if x = 0 then n/2 else (sin (real n / 2 * x))\<^sup>2 / (2 * real n * (sin (x/2))\<^sup>2)) * h x" have "\LINT x|lebesgue_on {\..pi}. \ x\ \ (LINT x|lebesgue_on {\..pi}. \h x\ / (2 * (sin (x/2))\<^sup>2) / n)" proof (rule integral_abs_bound_integral) show **: "integrable (lebesgue_on {\..pi}) (\x. \h x\ / (2 * (sin (x/2))\<^sup>2) / n)" using Bochner_Integration.integrable_mult_left [OF *, of "1/n"] by (simp add: field_simps) show \: "\\ x\ \ \h x\ / (2 * (sin (x/2))\<^sup>2) / real n" if "x \ space (lebesgue_on {\..pi})" for x using that \0 < \\ apply (simp add: \_def divide_simps mult_less_0_iff abs_mult) apply (auto simp: square_le_1 mult_left_le_one_le) done show "integrable (lebesgue_on {\..pi}) \" proof (rule measurable_bounded_by_integrable_imp_lebesgue_integrable [OF _ **]) let ?g = "\x. \h x\ / (2 * sin(x/2) ^ 2) / n" have ***: "integrable (lebesgue_on {\..pi}) (\x. (sin (n/2 * x))\<^sup>2 * (inverse (2 * (sin (x/2))\<^sup>2) * h x))" proof (rule absolutely_integrable_imp_integrable [OF absolutely_integrable_bounded_measurable_product_real]) show "(\x. (sin (real n / 2 * x))\<^sup>2) \ borel_measurable (lebesgue_on {\..pi})" by (intro continuous_imp_measurable_on_sets_lebesgue continuous_intros) auto show "bounded ((\x. (sin (real n / 2 * x))\<^sup>2) ` {\..pi})" by (force simp: square_le_1 intro: boundedI [where B=1]) show "(\x. inverse (2 * (sin (x/2))\<^sup>2) * h x) absolutely_integrable_on {\..pi}" using hint by (simp add: divide_simps) qed auto show "\ \ borel_measurable (lebesgue_on {\..pi})" apply (rule borel_measurable_integrable) apply (rule integrable_cong [where f = "\x. sin(n / 2 * x) ^ 2 / (2 * n * sin(x/2) ^ 2) * h x", OF refl, THEN iffD1]) using \0 < \\ ** apply (force simp: \_def divide_simps algebra_simps mult_less_0_iff abs_mult) using Bochner_Integration.integrable_mult_left [OF ***, of "1/n"] by (simp add: field_simps) show "norm (\ x) \ ?g x" if "x \ {\..pi}" for x using that \ by (simp add: \_def) qed auto qed then show "norm (LINT x|lebesgue_on {\..pi}. Fejer_kernel n x * h x) \ \ n" by (simp add: Fejer_kernel \_def \_def flip: Bochner_Integration.integral_divide [OF *]) qed show "\ \ 0" unfolding \_def divide_inverse by (simp add: tendsto_mult_right_zero lim_inverse_n) qed then obtain N where N: "\n. n \ N \ \LINT x|lebesgue_on {\..pi}. Fejer_kernel n x * h x\ < e/2" unfolding lim_sequentially by (metis half_gt_zero_iff norm_conv_dist real_norm_def \e > 0\) show "\N. \n\N. \(LINT u|lebesgue_on {0..pi}. Fejer_kernel n u * h u)\ < e" proof (intro exI allI impI) fix n :: nat assume n: "n \ max 1 N" with N have 1: "\LINT x|lebesgue_on {\..pi}. Fejer_kernel n x * h x\ < e/2" by simp have "integral\<^sup>L (lebesgue_on {0..\}) (Fejer_kernel n) \ integral\<^sup>L (lebesgue_on {0..pi}) (Fejer_kernel n)" using \\ < pi\ has_bochner_integral_iff has_integral_Fejer_kernel_half by (force intro!: integral_mono_lebesgue_on_AE) also have "\ \ pi" using has_integral_Fejer_kernel_half by (simp add: has_bochner_integral_iff) finally have int_le_pi: "integral\<^sup>L (lebesgue_on {0..\}) (Fejer_kernel n) \ pi" . have 2: "\LINT x|lebesgue_on {0..\}. Fejer_kernel n x * h x\ \ (LINT x|lebesgue_on {0..\}. Fejer_kernel n x * e/2/pi)" proof - have eq: "integral\<^sup>L (lebesgue_on {0..\}) (\x. Fejer_kernel n x * h x) = integral\<^sup>L (lebesgue_on {0..\}) (\x. Fejer_kernel n x * (if x = 0 then 0 else h x))" proof (rule integral_cong_AE) have \: "{u. u \ 0 \ P u} \ {0..\} = {0} \ Collect P \ {0..\}" "{u. u \ 0 \ P u} \ {0..\} = (Collect P \ {0..\}) - {0}" for P using \0 < \\ by auto have *: "- {0} \ A \ {0..\} = A \ {0..\} - {0}" for A by auto show "(\x. Fejer_kernel n x * h x) \ borel_measurable (lebesgue_on {0..\})" using \\ < pi\ by (intro absolutely_integrable_imp_borel_measurable h_aint absolutely_integrable_on_subinterval [OF absolutely_integrable_mult_Fejer_kernel], auto) then show "(\x. Fejer_kernel n x * (if x = 0 then 0 else h x)) \ borel_measurable (lebesgue_on {0..\})" apply (simp add: in_borel_measurable Ball_def vimage_def Collect_conj_eq Collect_imp_eq * flip: Collect_neg_eq) apply (elim all_forward imp_forward asm_rl) using \0 < \\ apply (auto simp: \ sets.insert_in_sets sets_restrict_space_iff cong: conj_cong) done have 0: "{0} \ null_sets (lebesgue_on {0..\})" using \0 < \\ by (simp add: null_sets_restrict_space) then show "AE x in lebesgue_on {0..\}. Fejer_kernel n x * h x = Fejer_kernel n x * (if x = 0 then 0 else h x)" by (auto intro: AE_I' [OF 0]) qed show ?thesis unfolding eq proof (rule integral_abs_bound_integral) have "(\x. if x = 0 then 0 else h x) absolutely_integrable_on {- pi..pi}" proof (rule absolutely_integrable_spike [OF h_aint]) show "negligible {0}" by auto qed auto with \0 < \\ \\ < pi\ show "integrable (lebesgue_on {0..\}) (\x. Fejer_kernel n x * (if x = 0 then 0 else h x))" by (intro absolutely_integrable_imp_integrable h_aint absolutely_integrable_on_subinterval [OF absolutely_integrable_mult_Fejer_kernel]) auto show "integrable (lebesgue_on {0..\}) (\x. Fejer_kernel n x * e / 2 / pi)" by (simp add: absolutely_integrable_imp_integrable \\ < pi\ absolutely_integrable_mult_Fejer_kernel_reflected_part3 less_eq_real_def) show "\Fejer_kernel n x * (if x = 0 then 0 else h x)\ \ Fejer_kernel n x * e / 2 / pi" if "x \ space (lebesgue_on {0..\})" for x using that \ [of x] \e > 0\ by (auto simp: abs_mult eq simp flip: times_divide_eq_right intro: mult_left_mono) qed qed have 3: "\ \ e/2" using int_le_pi \0 < e\ by (simp add: divide_simps mult.commute [of e]) have "integrable (lebesgue_on {0..pi}) (\x. Fejer_kernel n x * h x)" unfolding h_def by (simp add: absolutely_integrable_imp_integrable absolutely_integrable_mult_Fejer_kernel_reflected_part5 assms) then have "LINT x|lebesgue_on {0..pi}. Fejer_kernel n x * h x = (LINT x|lebesgue_on {0..\}. Fejer_kernel n x * h x) + (LINT x|lebesgue_on {\..pi}. Fejer_kernel n x * h x)" by (rule integral_combine) (use \0 < \\ \\ < pi\ in auto) then show "\LINT u|lebesgue_on {0..pi}. Fejer_kernel n u * h u\ < e" using 1 2 3 by linarith qed qed qed then show ?thesis unfolding h_def by (simp add: Fourier_sum_limit_Fejer_kernel_half assms add_divide_distrib) qed corollary Fourier_Fejer_Cesaro_summable_simple: assumes f: "continuous_on UNIV f" and periodic: "\x. f(x + 2*pi) = f x" shows "(\n. (\mk\2*m. Fourier_coefficient f k * trigonometric_set k x) / n) \ f x" proof - have "(\n. (\mk\2*m. Fourier_coefficient f k * trigonometric_set k x) / n) \ (f x + f x) / 2" proof (rule Fourier_Fejer_Cesaro_summable) show "f absolutely_integrable_on {- pi..pi}" using absolutely_integrable_continuous_real continuous_on_subset f by blast show "(f \ f x) (at x within {..x})" "(f \ f x) (at x within {x..})" using Lim_at_imp_Lim_at_within continuous_on_def f by blast+ qed (auto simp: periodic Lim_at_imp_Lim_at_within continuous_on_def f) then show ?thesis by simp qed end diff --git a/thys/Gauss_Sums/Polya_Vinogradov.thy b/thys/Gauss_Sums/Polya_Vinogradov.thy --- a/thys/Gauss_Sums/Polya_Vinogradov.thy +++ b/thys/Gauss_Sums/Polya_Vinogradov.thy @@ -1,873 +1,873 @@ (* File: Polya_Vinogradov.thy Authors: Rodrigo Raya, EPFL; Manuel Eberl, TUM The Pólya–Vinogradov inequality, both the general case and the stronger variant for primitive characters. *) section \The Pólya--Vinogradov Inequality\ theory Polya_Vinogradov imports Gauss_Sums "Dirichlet_Series.Divisor_Count" begin unbundle no_vec_lambda_notation subsection \The case of primitive characters\ text \ We first prove a stronger variant of the Pólya--Vinogradov inequality for primitive characters. The fully general variant will then simply be a corollary of this. First, we need some bounds on logarithms, exponentials, and the harmonic numbers: \ (* TODO: Move? *) lemma ln_add_one_self_less_self: fixes x :: real assumes "x > 0" shows "ln (1 + x) < x" proof - have "0 \ x" "0 < x" "exp x > 0" "1+x > 0" using assms by simp+ have "1 + x < 1 + x + x\<^sup>2 / 2" using \0 < x\ by auto also have "\ \ exp x" using exp_lower_Taylor_quadratic[OF \0 \ x\] by blast finally have "1 + x < exp (x)" by blast then have "ln (1 + x) < ln (exp (x))" using ln_less_cancel_iff[OF \1+x > 0\ \exp(x) > 0\] by auto also have "\ = x" using ln_exp by blast finally show ?thesis by auto qed lemma exp_1_bounds: assumes "x > (0::real)" shows "exp 1 > (1 + 1 / x) powr x" and "exp 1 < (1 + 1 / x) powr (x+1)" proof - have "ln (1 + 1 / x) < 1 / x" using ln_add_one_self_less_self assms by simp thus "exp 1 > (1 + 1 / x) powr x" using assms by (simp add: field_simps powr_def) next have "1 < (x + 1) * ln ((x + 1) / x)" (is "_ < ?f x") proof (rule DERIV_neg_imp_decreasing_at_top[where ?f = ?f]) fix t assume t: "x \ t" have "(?f has_field_derivative (ln (1 + 1 / t) - 1 / t)) (at t)" using t assms by (auto intro!: derivative_eq_intros simp:divide_simps) moreover have "ln (1 + 1 / t) - 1 / t < 0" using ln_add_one_self_less_self[of "1 / t"] t assms by auto ultimately show "\y. ((\t. (t + 1) * ln ((t + 1) / t)) has_real_derivative y) (at t) \ y < 0" by blast qed real_asymp thus "exp 1 < (1 + 1 / x) powr (x + 1)" using assms by (simp add: powr_def field_simps) qed lemma harm_aux_ineq_1: fixes k :: real assumes "k > 1" shows "1 / k < ln (1 + 1 / (k - 1))" proof - have "k-1 > 0" \k > 0\ using assms by simp+ from exp_1_bounds(2)[OF \k-1 > 0\] have "exp 1 < (1 + 1 / (k - 1)) powr k" by simp then have n_z: "(1 + 1 / (k - 1)) powr k > 0" using assms not_exp_less_zero by auto have "(1::real) = ln (exp(1))" using ln_exp by auto also have "\ < ln ((1 + 1 / (k - 1)) powr k)" using ln_less_cancel_iff[of "exp(1)",simplified,OF \(1 + 1 / (k - 1)) powr k > 0\] exp_1_bounds[OF \k - 1 > 0\] by simp also have "\ = k * ln (1 + 1 / (k - 1))" using ln_powr n_z by simp finally have "1 < k * ln (1 + 1 / (k - 1))" by blast then show ?thesis using assms by (simp add: field_simps) qed lemma harm_aux_ineq_2_lemma: assumes "x \ (0::real)" shows "1 < (x + 1) * ln (1 + 2 / (2 * x + 1))" proof - have "0 < ln (1+2/(2*x+1)) - 1 / (x + 1)" (is "_ < ?f x") proof (rule DERIV_neg_imp_decreasing_at_top[where ?f = ?f]) fix t assume t: "x \ t" from assms t have "3 + 8 * t + 4 * t^2 > 0" by (intro add_pos_nonneg) auto hence *: "3 + 8 * t + 4 * t^2 \ 0" by auto have "(?f has_field_derivative (-1 / ((1 + t)^2 * (3 + 8 * t + 4 * t ^ 2)))) (at t)" apply (insert assms t *, (rule derivative_eq_intros refl | simp add: add_pos_pos)+) apply (auto simp: divide_simps) apply (auto simp: algebra_simps power2_eq_square) done moreover have "-1 / ((1 + t)^2 * (3 + 8 * t + 4 * t^2)) < 0" using t assms by (intro divide_neg_pos mult_pos_pos add_pos_nonneg) auto ultimately show "\y. (?f has_real_derivative y) (at t) \ y < 0" by blast qed real_asymp thus "1 < (x + 1) * ln (1+2/(2*x+1))" using assms by (simp add: field_simps) qed lemma harm_aux_ineq_2: fixes k :: real assumes "k \ 1" shows "1 / (k + 1) < ln (1 + 2 / (2 * k + 1))" proof - have "k > 0" using assms by auto have "1 < (k + 1) * ln (1 + 2 / (2 * k + 1))" using harm_aux_ineq_2_lemma assms by simp then show ?thesis by (simp add: \0 < k\ add_pos_pos mult.commute mult_imp_div_pos_less) qed lemma nat_0_1_induct [case_names 0 1 step]: assumes "P 0" "P 1" "\n. n \ 1 \ P n \ P (Suc n)" shows "P n" proof (induction n rule: less_induct) case (less n) show ?case using assms(3)[OF _ less.IH[of "n - 1"]] by (cases "n \ 1") (insert assms(1-2),auto simp: eval_nat_numeral le_Suc_eq) qed lemma harm_less_ln: fixes m :: nat assumes "m > 0" shows "harm m < ln (2 * m + 1)" using assms proof (induct m rule: nat_0_1_induct) case 0 then show ?case by blast next case 1 have "harm 1 = (1::real)" unfolding harm_def by simp have "harm 1 < ln (3::real)" by (subst \harm 1 = 1\,subst ln3_gt_1,simp) then show ?case by simp next case (step n) have "harm (n+1) = harm n + 1/(n+1)" by ((subst Suc_eq_plus1[symmetric])+,subst harm_Suc,subst inverse_eq_divide,blast) also have "\ < ln (real (2 * n + 1)) + 1/(n+1)" using step(1-2) by auto also have "\ < ln (real (2 * n + 1)) + ln (1+2/(2*n+1))" proof - from step(1) have "real n \ 1" by simp have "1 / real (n + 1) < ln (1 + 2 / real (2 * n + 1))" using harm_aux_ineq_2[OF \1 \ (real n)\] by (simp add: add.commute) then show ?thesis by auto qed also have "\ = ln ((2 * n + 1) * (1+2/(2*n+1)))" by (rule ln_mult[symmetric],simp,simp add: field_simps) also have "\ = ln (2*(n+1)+1)" proof - have "(2 * n + 1) * (1+2/(2*n+1)) = 2*(n+1)+1" by (simp add: field_simps) then show ?thesis by presburger qed finally show ?case by simp qed (* END TODO *) text\Theorem 8.21\ theorem (in primitive_dchar) polya_vinogradov_inequality_primitive: fixes x :: nat shows "norm (\m=1..x. \ m) < sqrt n * ln n" proof - define \ :: complex where "\ = gauss_sum 1 div sqrt n" have \_mod: "norm \ = 1" using fourier_primitive(2) by (simp add: \_def) { fix m have "\ m = (\ div sqrt n) * (\k = 1..n. (cnj (\ k)) * unity_root n (-m*k))" using fourier_primitive(1)[of m] \_def by blast} note chi_expr = this have "(\m = 1..x. \(m)) = (\m = 1..x. (\ div sqrt n) * (\k = 1..n. (cnj (\ k)) * unity_root n (-m*k)))" by(rule sum.cong[OF refl]) (use chi_expr in blast) also have "\ = (\m = 1..x. (\k = 1..n. (\ div sqrt n) * ((cnj (\ k)) * unity_root n (-m*k))))" by (rule sum.cong,simp,simp add: sum_distrib_left) also have "\ = (\k = 1..n. (\m = 1..x. (\ div sqrt n) * ((cnj (\ k)) * unity_root n (-m*k))))" by (rule sum.swap) also have "\ = (\k = 1..n. (\ div sqrt n) * (cnj (\ k) * (\m = 1..x. unity_root n (-m*k))))" by (rule sum.cong,simp,simp add: sum_distrib_left) also have "\ = (\k = 1.. div sqrt n) * (cnj (\ k) * (\m = 1..x. unity_root n (-m*k))))" using n by (intro sum.mono_neutral_right) (auto intro: eq_zero) also have "\ = (\ div sqrt n) * (\k = 1.. k) * (\m = 1..x. unity_root n (-m*k))))" by (simp add: sum_distrib_left) finally have "(\m = 1..x. \(m)) = (\ div sqrt n) * (\k = 1.. k) * (\m = 1..x. unity_root n (-m*k))))" by blast hence eq: "sqrt n * (\m=1..x. \(m)) = \ * (\k=1.. k) * (\m=1..x. unity_root n (-m*k))))" by auto define f where "f = (\k. (\m = 1..x. unity_root n (-m*k)))" hence "(sqrt n) * norm(\m = 1..x. \(m)) = norm(\ * (\k=1.. k) * (\m = 1..x. unity_root n (-m*k)))))" proof - have "norm(sqrt n * (\m=1..x. \(m))) = norm (sqrt n) * norm((\m = 1..x. \(m)))" by (simp add: norm_mult) also have "\ = (sqrt n) * norm((\m = 1..x. \(m)))" by simp finally have 1: "norm((sqrt n) * (\m = 1..x. \(m))) = (sqrt n) * norm((\m = 1..x. \(m)))" by blast then show ?thesis using eq by algebra qed also have "\ = norm (\k = 1.. k) * (\m = 1..x. unity_root n (-m*k))))" by (simp add: norm_mult \_mod) also have "\ \ (\k = 1.. k) * (\ m = 1..x. unity_root n (-m*k))))" using norm_sum by blast also have "\ = (\k = 1.. k)) * norm((\ m = 1..x. unity_root n (-m*k))))" by (rule sum.cong,simp, simp add: norm_mult) also have "\ \ (\k = 1..m = 1..x. unity_root n (-m*k))))" proof - show ?thesis proof (rule sum_mono) fix k assume "k \ {1..m=1..x. unity_root n (- int m * int k))" have "sum_aux \ 0" unfolding sum_aux_def by auto have "norm (cnj (\ k)) \ 1" using norm_le_1[of k] by simp then have "norm (cnj (\ k)) * sum_aux \ 1 * sum_aux" using \sum_aux \ 0\ by (simp add: mult_left_le_one_le) then show " norm (cnj (\ k)) * norm (\m = 1..x. unity_root n (- int m * int k)) \ norm (\m = 1..x. unity_root n (- int m * int k))" unfolding sum_aux_def by argo qed qed also have "\ = (\k = 1..m = 1..x. \(m)) \ (\k = 1..m = 1..x. unity_root n (-m*(n-k)))" unfolding f_def by blast also have "\ = (\m = 1..x. unity_root n (m*k))" proof (rule sum.cong,simp) fix xa assume "xa \ {1..x}" have "(k * int xa - int n * int xa) mod int n = (k * int xa - 0) mod int n" by (intro mod_diff_cong) auto thus "unity_root n (-int xa * (int n - k)) = unity_root n (int xa * k)" unfolding ring_distribs by (intro unity_root_cong) (auto simp: cong_def algebra_simps) qed also have "\ = cnj(f(k))" proof - have "cnj(f(k)) = cnj (\m = 1..x. unity_root n (- int m * k))" unfolding f_def by blast also have "cnj (\m = 1..x. unity_root n (- int m * k)) = (\m = 1..x. cnj(unity_root n (- int m * k)))" by (rule cnj_sum) also have "\ = (\m = 1..x. unity_root n (int m * k))" by (intro sum.cong) (auto simp: unity_root_uminus) finally show ?thesis by auto qed finally show "f(n-k) = cnj(f(k))" by blast qed hence "norm(f(n-k)) = norm(cnj(f(k)))" by simp hence "norm(f(n-k)) = norm(f(k))" by auto } note eq = this have 25: "odd n \ (\k = 1..n - 1. norm (f (int k))) \ 2 * (\k = 1..(n-1) div 2. norm (f (int k)))" "even n \ (\k = 1..n - 1. norm (f (int k))) \ 2 * (\k = 1..(n-2) div 2. norm (f (int k))) + norm(f(n div 2))" proof - assume "odd n" define g where "g = (\k. norm (f k))" have "(n-1) div 2 = n div 2" using \odd n\ n using div_mult_self1_is_m[OF pos2,of "n-1"] odd_two_times_div_two_nat[OF \odd n\] by linarith have "(\i=1..n-1. g i) = (\i\{1..n div 2}\{n div 2<..n-1}. g i)" using n by (intro sum.cong,auto) also have "\ = (\i\{1..n div 2}. g i) + (\i\{n div 2<..n-1}. g i)" by (subst sum.union_disjoint,auto) also have "(\i\{n div 2<..n-1}. g i) = (\i\{1..n - (n div 2 + 1)}. g (n - i))" by (rule sum.reindex_bij_witness[of _ "\i. n - i" "\i. n - i"],auto) also have "\ \ (\i\{1..n div 2}. g (n - i))" by (intro sum_mono2,simp,auto simp add: g_def) finally have 1: "(\i=1..n-1. g i) \ (\i=1..n div 2. g i + g (n - i))" by (simp add: sum.distrib) have "(\i=1..n div 2. g i + g (n - i)) = (\i=1..n div 2. 2 * g i)" unfolding g_def apply(rule sum.cong,simp) using eq int_ops(6) by force also have "\ = 2 * (\i=1..n div 2. g i)" by (rule sum_distrib_left[symmetric]) finally have 2: "(\i=1..n div 2. g i + g (n - i)) = 2 * (\i=1..n div 2. g i)" by blast from 1 2 have "(\i=1..n-1. g i) \ 2 * (\i=1..n div 2. g i)" by algebra then show "(\n = 1..n - 1. norm (f (int n))) \ 2 * (\n = 1..(n-1) div 2. norm (f (int n)))" unfolding g_def \(n-1) div 2 = n div 2\ by blast next assume "even n" define g where "g = (\n. norm (f (n)))" have "(n-2) div 2 = n div 2 - 1" using \even n\ n by simp have "(\i=1..n-1. g i) = (\i\{1.. {n div 2} \ {n div 2<..n-1}. g i)" using n by (intro sum.cong,auto) also have "\ = (\i\{1..i\{n div 2<..n-1}. g i) + g(n div 2)" by (subst sum.union_disjoint,auto) also have "(\i\{n div 2<..n-1}. g i) = (\i\{1..n - (n div 2+1)}. g (n - i))" by (rule sum.reindex_bij_witness[of _ "\i. n - i" "\i. n - i"],auto) also have "\ \ (\i\{1..even n\ n by auto then have "n - (n div 2 + 1) < n div 2" using n by (simp add: divide_simps) then show "{1..n - (n div 2 + 1)} \ {1..i=1..n-1. g i) \ (\i=1..i=1..i=1.. = 2 * (\i=1..i=1..i=1..i=1..n-1. g i) \ 2 * (\i=1..i=1..n-1. g i) \ 2 * (\i=1..(n-2) div 2. g i) + g(n div 2)" proof - have "{1..i=1..i=1..(n-2) div 2. g i)" by (rule sum.cong,simp) then show ?thesis using 3 by presburger qed then show "(\k = 1..n - 1. norm (f (int k))) \ 2 * (\n = 1..(n-2) div 2. norm (f (int n))) + g(n div 2)" unfolding g_def by blast qed (* expression for each f(n) *) {fix k :: int assume "1 \ k" "k \ n div 2" have "k \ n - 1" using \k \ n div 2\ n by linarith define y where "y = unity_root n (-k)" define z where "z = exp (-(pi*k/n)* \)" have "z^2 = exp (2*(-(pi*k/n)* \))" unfolding z_def using exp_double[symmetric] by blast also have "\ = y" unfolding y_def unity_root_conv_exp by (simp add: algebra_simps) finally have z_eq: "y = z^2" by blast have z_not_0: "z \ 0" using z_eq by (simp add: z_def) then have "y \ 1" using unity_root_eq_1_iff_int \1 \ k\ \k \ n - 1\ not_less unity_root_eq_1_iff_int y_def zdvd_not_zless by auto have "f(k) = (\m = 1..x . y^m)" unfolding f_def y_def by (subst unity_root_pow,rule sum.cong,simp,simp add: algebra_simps) also have sum: "\ = (\m = 1.. = (\m = 0.. = (y^(x+1) - 1) div (y - 1) - 1" using geometric_sum[OF \y \ 1\, of "x+1"] by (simp add: atLeast0LessThan) also have "\ = (y^(x+1) - 1 - (y-1)) div (y - 1)" proof - have "y - 1 \ 0" using \y \ 1\ by simp show ?thesis using divide_diff_eq_iff[OF \y - 1 \ 0\, of "(y^(x+1) - 1)" 1] by auto qed also have "\ = (y^(x+1) - y) div (y - 1)" by (simp add: algebra_simps) also have "\ = y * (y^x - 1) div (y - 1)" by (simp add: algebra_simps) also have "\ = z^2 * ((z^2)^x - 1) div (z^2 - 1)" unfolding z_eq by blast also have "\ = z^2 * (z^(2*x) - 1) div (z^2 - 1)" by (subst power_mult[symmetric, of z 2 x],blast) also have "\ = z^(x+1)*((z ^x -inverse(z^x))) / (z - inverse(z))" proof - have "z^x \ 0" using z_not_0 by auto have 1: "z ^ (2 * x) - 1 = z^x*(z ^x -inverse(z^x))" by (simp add: semiring_normalization_rules(36) right_inverse[OF \z^x \ 0\] right_diff_distrib') have 2: "z\<^sup>2 - 1 = z*(z - inverse(z))" by (simp add: right_diff_distrib' semiring_normalization_rules(29) right_inverse[OF \z \ 0\]) have 3: "z\<^sup>2 * (z^x / z) = z^(x+1)" proof - have "z\<^sup>2 * (z^x / z) = z\<^sup>2 * (z^x * inverse z)" by (simp add: inverse_eq_divide) also have "\ = z^(x+1)" by (simp add: algebra_simps power2_eq_square right_inverse[OF \z \ 0\]) finally show ?thesis by blast qed have "z\<^sup>2 * (z ^ (2 * x) - 1) / (z\<^sup>2 - 1) = z\<^sup>2 * (z^x*(z ^x -inverse(z^x))) / (z*(z - inverse(z)))" by (subst 1, subst 2,blast) also have "\ = (z\<^sup>2 * (z^x / z)) * ((z ^x -inverse(z^x))) / (z - inverse(z))" by simp also have "\ = z^(x+1) *((z ^x -inverse(z^x))) / (z - inverse(z))" by (subst 3,simp) finally show ?thesis by simp qed finally have "f(k) = z^(x+1) *((z ^x -inverse(z^x))) / (z - inverse(z))" by blast (* inequality for each f(k) *) then have "norm(f(k)) = norm(z^(x+1) * (((z ^x -inverse(z^x))) / (z - inverse(z))))" by auto also have "\ = norm(z^(x+1)) * norm(((z ^x -inverse(z^x))) / (z - inverse(z)))" using norm_mult by blast also have "\ = norm(((z ^x -inverse(z^x))) / (z - inverse(z)))" proof - have "norm(z) = 1" unfolding z_def by auto have "norm(z^(x+1)) = 1" by (subst norm_power,simp add: \norm(z) = 1\) then show ?thesis by simp qed also have "\ = norm((exp (-(x*pi*k/n)* \) - exp ((x*pi*k/n)* \)) div (exp (-(pi*k/n)* \) - exp ((pi*k/n)* \)))" proof - have 1: "z ^ x = exp (-(x*pi*k/n)* \)" unfolding z_def by (subst exp_of_nat_mult[symmetric],simp add: algebra_simps) have "inverse (z ^ x) = inverse (exp (-(x*pi*k/n)* \))" using \z ^ x = exp (-(x*pi*k/n)* \)\ by auto also have "\ = (exp ((x*pi*k/n)* \))" by (simp add: exp_minus) finally have 2: "inverse(z^x) = exp ((x*pi*k/n)* \)" by simp have 3: "inverse z = exp ((pi*k/n)* \)" by (simp add: exp_minus z_def) show ?thesis using 1 2 3 z_def by simp qed also have "\ = norm((sin (x*pi*k/n)) div (sin (pi*k/n)))" proof - have num: "(exp (-(x*pi*k/n)* \) - exp ((x*pi*k/n)* \)) = (-2*\* sin((x*pi*k/n)))" proof - have 1: "exp (-(x*pi*k/n)* \) = cos(-(x*pi*k/n)) + \ * sin(-(x*pi*k/n))" "exp ((x*pi*k/n)* \) = cos((x*pi*k/n)) + \ * sin((x*pi*k/n))" using Euler Im_complex_of_real Im_divide_of_nat Im_i_times Re_complex_of_real complex_Re_of_int complex_i_mult_minus exp_zero mult.assoc mult.commute by force+ have "(exp (-(x*pi*k/n)* \) - exp ((x*pi*k/n)* \)) = (cos(-(x*pi*k/n)) + \ * sin(-(x*pi*k/n))) - (cos((x*pi*k/n)) + \ * sin((x*pi*k/n)))" using 1 by argo also have "\ = -2*\* sin((x*pi*k/n))" by simp finally show ?thesis by blast qed have den: "(exp (-(pi*k/n)* \) - exp ((pi*k/n)* \)) = -2*\* sin((pi*k/n))" proof - have 1: "exp (-(pi*k/n)* \) = cos(-(pi*k/n)) + \ * sin(-(pi*k/n))" "exp ((pi*k/n)* \) = cos((pi*k/n)) + \ * sin((pi*k/n))" using Euler Im_complex_of_real Im_divide_of_nat Im_i_times Re_complex_of_real complex_Re_of_int complex_i_mult_minus exp_zero mult.assoc mult.commute by force+ have "(exp (-(pi*k/n)* \) - exp ((pi*k/n)* \)) = (cos(-(pi*k/n)) + \ * sin(-(pi*k/n))) - (cos((pi*k/n)) + \ * sin((pi*k/n)))" using 1 by argo also have "\ = -2*\* sin((pi*k/n))" by simp finally show ?thesis by blast qed have "norm((exp (-(x*pi*k/n)* \) - exp ((x*pi*k/n)* \)) div (exp (-(pi*k/n)* \) - exp ((pi*k/n)* \))) = norm((-2*\* sin((x*pi*k/n))) div (-2*\* sin((pi*k/n))))" using num den by presburger also have "\ = norm(sin((x*pi*k/n)) div sin((pi*k/n)))" by (simp add: norm_divide) finally show ?thesis by blast qed also have "\ = norm((sin (x*pi*k/n))) div norm((sin (pi*k/n)))" by (simp add: norm_divide) also have "\ \ 1 div norm((sin (pi*k/n)))" proof - have "norm((sin (pi*k/n))) \ 0" by simp have "norm (sin (x*pi*k/n)) \ 1" by simp then show ?thesis using divide_right_mono[OF \norm (sin (x*pi*k/n)) \ 1\ \norm((sin (pi*k/n))) \ 0\] by blast qed finally have 26: "norm(f(k)) \ 1 div norm((sin (pi*k/n)))" by blast (* inequality with sin *) { fix t assume "t \ 0" "t \ pi div 2" then have "t \ {0..pi div 2}" by auto have "convex_on {0..pi/2} (\x. -sin x)" by (rule convex_on_realI[where f' = "\x. - cos x"]) (auto intro!: derivative_eq_intros simp: cos_monotone_0_pi_le) from convex_onD_Icc'[OF this \t \ {0..pi div 2}\] have "sin(t) \ (2 div pi)*t" by simp } note sin_ineq = this have sin_ineq_inst: "sin ((pi*k) / n) \ (2 * k) / n" proof - have "pi / n \ 0" by simp have 1: "(pi*k) / n \ 0" using \1 \ k\ by auto have "(pi*k)/n = (pi / n) * k" by simp also have "\ \ (pi / n) * (n / 2)" using mult_left_mono[of "k" "n / 2" "pi / n"] \k \ n div 2\ \0 \ pi / real n\ by linarith also have "\ \ pi / 2" by (simp add: divide_simps) finally have 2: "(pi*k)/n \ pi / 2" by auto have "(2 / pi) * (pi * k / n) \ sin((pi * k) / n)" using sin_ineq[OF 1 2] by blast then show "sin((pi * k) / n) \ (2*k) / n" by auto qed from 26 have "norm(f(k)) \ 1 div abs((sin (pi*k/n)))" by simp also have "\ \ 1 / abs((2*k) / n)" proof - have "sin (pi*k/n) \ (2*k) / n" using sin_ineq_inst by simp moreover have "(2*k) / n > 0" using n \1 \ k\ by auto ultimately have "abs((sin (pi*k/n))) \ abs((2*k)/n)" by auto have "abs((2*k)/n) > 0" using \(2*k)/n > 0\ by linarith then show "1 div abs((sin (pi*k/n))) \ 1 / abs(((2*k)/n))" using \abs((2*k)/n) > 0\ \abs((sin (pi*k/n))) \ abs(((2*k)/n))\ by (intro frac_le) auto qed also have "\ = n / (2*k)" using \k \ 1\ by simp finally have "norm(f(k)) \ n / (2*k)" by blast } note ineq = this (* inequality for the odd and even case*) have "sqrt n * norm (sum \ {1..x}) < n * ln n" proof (cases "even n") case True have "norm (f(n div 2)) \ 1" proof - have "int (n div 2) \ 1" using n \even n\ by auto show ?thesis using ineq[OF \int (n div 2) \ 1\] True n by force qed from 24 have "sqrt n * norm (sum \ {1..x}) \ (\k = 1.. = (\k = 1..n-1. norm (f (int k)))" by (intro sum.cong) auto also have "\ \ 2 * (\k = 1..(n - 2) div 2. norm (f (int k))) + norm(f(n div 2))" using 25(2)[OF True] by blast also have "\ \ real n * (\k = 1..(n - 2) div 2. 1 / k) + norm(f(n div 2))" proof - have "(\k = 1..(n - 2) div 2. norm (f (int k))) \ (\k = 1..(n - 2) div 2. real n div (2*k))" proof (rule sum_mono) fix k assume "k \ {1..(n - 2) div 2}" then have "1 \ int k" "int k \ n div 2" by auto show "norm (f (int k)) \ real n / (2*k)" using ineq[OF \1 \ int k\ \int k \ n div 2\] by auto qed also have "\ = (\k = 1..(n - 2) div 2. (real n div 2) * (1 / k))" by (rule sum.cong,auto) also have "\ = (real n div 2) * (\k = 1..(n - 2) div 2. 1 / k)" using sum_distrib_left[symmetric] by fast finally have "(\k = 1..(n - 2) div 2. norm (f (int k))) \ (real n div 2) * (\k = 1..(n - 2) div 2. 1 / k)" by blast then show ?thesis by argo qed also have "\ = real n * harm ((n - 2) div 2) + norm(f(n div 2))" unfolding harm_def inverse_eq_divide by simp also have "\ < n * ln n" proof (cases "n = 2") case True have "real n * harm ((n - 2) div 2) + norm (f (int (n div 2))) \ 1" using \n = 2\ \norm (f (int (n div 2))) \ 1\ unfolding harm_def by simp moreover have "real n * ln (real n) \ 4 / 3" using \n = 2\ ln2_ge_two_thirds by auto ultimately show ?thesis by argo next case False have "n > 3" using n \n \ 2\ \even n\ by auto then have "(n-2) div 2 > 0" by simp then have "harm ((n - 2) div 2) < ln (real (2 * ((n - 2) div 2) + 1))" using harm_less_ln by blast also have "\ = ln (real (n - 1))" using \even n\ \n > 3\ by simp finally have 1: "harm ((n - 2) div 2) < ln (real (n - 1))" by blast then have "real n * harm ((n - 2) div 2) < real n * ln (real (n - 1))" using n by simp then have "real n * harm ((n - 2) div 2) + norm (f (int (n div 2))) < real n * ln (real (n - 1)) + 1" using \norm (f (int (n div 2))) \ 1\ by argo also have "\ = real n * ln (real (n - 1)) + real n * 1 / real n" using n by auto also have "\ < real n * ln (real (n - 1)) + real n * ln (1 + 1 / (real n - 1))" proof - have "real n > 1" "real n > 0" using n by simp+ then have "real n * (1 / real n) < real n * ln (1 + 1 / (real n - 1))" by (intro mult_strict_left_mono harm_aux_ineq_1) auto then show ?thesis by auto qed also have "\ = real n * ( ln (real (n - 1)) + ln (1 + 1 / (real n - 1)))" by argo also have "\ = real n * ( ln (real (n - 1) * (1 + 1 / (real n - 1))))" proof - have "real (n - 1) > 0" "1 + 1 / (real n - 1) > 0" using n by (auto simp add: add_pos_nonneg) show ?thesis by (subst ln_mult [OF \real (n - 1) > 0\ \1 + 1 / (real n - 1) > 0\,symmetric],blast) qed also have "\ = real n * ln n" using n by (auto simp add: divide_simps) finally show ?thesis by blast qed finally show ?thesis by blast next case False from 24 have "sqrt n * norm (sum \ {1..x}) \ (\k= 1.. = (\k= 1..n-1. norm (f (int k)))" by (intro sum.cong) auto also have "\ \ 2 * (\k = 1..(n - 1) div 2. norm (f (int k)))" using 25(1)[OF False] by blast also have "\ \ real n * (\k = 1..(n - 1) div 2. 1 / k)" proof - have "(\k = 1..(n - 1) div 2. norm (f (int k))) \ (\k = 1..(n - 1) div 2. real n div (2*k))" proof (rule sum_mono) fix k assume "k \ {1..(n - 1) div 2}" then have "1 \ int k" "int k \ n div 2" by auto show "norm (f (int k)) \ real n / (2*k)" using ineq[OF \1 \ int k\ \int k \ n div 2\] by auto qed also have "\ = (\k = 1..(n - 1) div 2. (n / 2) * (1 / k))" by (rule sum.cong,auto) also have "\ = (n / 2) * (\k = 1..(n - 1) div 2. 1 / k)" using sum_distrib_left[symmetric] by fast finally have "(\k = 1..(n - 1) div 2. norm (f (int k))) \ (real n div 2) * (\k = 1..(n - 1) div 2. 1 / k)" by blast then show ?thesis by argo qed also have "\ = real n * harm ((n - 1) div 2)" unfolding harm_def inverse_eq_divide by simp also have "\ < n * ln n" proof - have "n > 2" using n \odd n\ by presburger then have "(n-1) div 2 > 0" by auto then have "harm ((n - 1) div 2) < ln (real (2 * ((n - 1) div 2) + 1))" using harm_less_ln by blast also have "\ = ln (real n)" using \odd n\ by simp finally show ?thesis using n by simp qed finally show ?thesis by blast qed then have 1: "sqrt n * norm (sum \ {1..x}) < n * ln n" by blast show "norm (sum \ {1..x}) < sqrt n * ln n" proof - have 2: "norm (sum \ {1..x}) * sqrt n < n * ln n" using 1 by argo have "sqrt n > 0" using n by simp have 3: "(n * ln n) / sqrt n = sqrt n * ln n" using n by (simp add: field_simps) show "norm (sum \ {1..x}) < sqrt n * ln n" using mult_imp_less_div_pos[OF \sqrt n > 0\ 2] 3 by argo qed qed subsection \General case\ text \ We now first prove the inequality for the general case in terms of the divisor function: \ theorem (in dcharacter) polya_vinogradov_inequality_explicit: assumes nonprincipal: "\ \ principal_dchar n" shows "norm (sum \ {1..x}) < sqrt conductor * ln conductor * divisor_count (n div conductor)" proof - write primitive_extension ("\") write conductor ("c") interpret \: primitive_dchar c "residue_mult_group c" primitive_extension using primitive_primitive_extension nonprincipal by metis have *: "k \ x div b \ b * k \ x" if "b > 0" for b k by (metis that antisym_conv div_le_mono div_mult_self1_is_m less_imp_le not_less times_div_less_eq_dividend) have **: "a > 0" if "a dvd n" for a using n that by (auto intro!: Nat.gr0I) from nonprincipal have "(\m=1..x. \ m) = (\m | m \ {1..x} \ coprime m n. \ m)" by (intro sum.mono_neutral_cong_right) (auto simp: eq_zero_iff principal_decomposition) also have "\ = (\m=1..x. \ m * (\d | d dvd gcd m n. moebius_mu d))" by (subst sum_moebius_mu_divisors', intro sum.mono_neutral_cong_left) (auto simp: coprime_iff_gcd_eq_1 simp del: coprime_imp_gcd_eq_1) also have "\ = (\m=1..x. \d | d dvd gcd m n. \ m * moebius_mu d)" by (simp add: sum_distrib_left) also have "\ = (\m=1..x. \d | d dvd m \ d dvd n. \ m * moebius_mu d)" by (intro sum.cong) auto also have "\ = (\(m, d)\(SIGMA m:{1..x}. {d. d dvd m \ d dvd n}). \ m * moebius_mu d)" using n by (subst sum.Sigma) auto also have "\ = (\(d, q)\(SIGMA d:{d. d dvd n}. {1..x div d}). moebius_mu d * \ (d * q))" by (intro sum.reindex_bij_witness[of _ "\(d,q). (d * q, d)" "\(m,d). (d, m div d)"]) (auto simp: * ** Suc_le_eq) also have "\ = (\d | d dvd n. moebius_mu d * \ d * (\q=1..x div d. \ q))" using n by (subst sum.Sigma [symmetric]) (auto simp: sum_distrib_left mult.assoc) finally have eq: "(\m=1..x. \ m) = \" . have "norm (\m=1..x. \ m) \ (\d | d dvd n. norm (moebius_mu d * \ d) * norm (\q=1..x div d. \ q))" unfolding eq by (intro sum_norm_le) (simp add: norm_mult) also have "\ < (\d | d dvd n. norm (moebius_mu d * \ d) * (sqrt c * ln c))" (is "sum ?lhs _ < sum ?rhs _") proof (rule sum_strict_mono_ex1) show "\d\{d. d dvd n}. ?lhs d \ ?rhs d" by (intro ballI mult_left_mono less_imp_le[OF \.polya_vinogradov_inequality_primitive]) auto show "\d\{d. d dvd n}. ?lhs d < ?rhs d" by (intro bexI[of _ 1] mult_strict_left_mono \.polya_vinogradov_inequality_primitive) auto qed (use n in auto) also have "\ = sqrt c * ln c * (\d | d dvd n. norm (moebius_mu d * \ d))" by (simp add: sum_distrib_left sum_distrib_right mult_ac) also have "(\d | d dvd n. norm (moebius_mu d * \ d)) = (\d | d dvd n \ squarefree d \ coprime d c. 1)" using n by (intro sum.mono_neutral_cong_right) (auto simp: moebius_mu_def \.eq_zero_iff norm_mult norm_power \.norm) also have "\ = card {d. d dvd n \ squarefree d \ coprime d c}" by simp also have "card {d. d dvd n \ squarefree d \ coprime d c} \ card {d. d dvd (n div c)}" proof (intro card_mono; safe?) show "finite {d. d dvd (n div c)}" using dvd_div_eq_0_iff[of c n] n conductor_dvd by (intro finite_divisors_nat) auto next fix d assume d: "d dvd n" "squarefree d" "coprime d c" hence "d > 0" by (intro Nat.gr0I) auto show "d dvd (n div c)" proof (rule multiplicity_le_imp_dvd) fix p :: nat assume p: "prime p" show "multiplicity p d \ multiplicity p (n div c)" proof (cases "p dvd d") assume "p dvd d" with d \d > 0\ p have "multiplicity p d = 1" by (auto simp: squarefree_factorial_semiring' in_prime_factors_iff) moreover have "p dvd (n div c)" proof - have "p dvd c * (n div c)" using \p dvd d\ \d dvd n\ conductor_dvd by auto moreover have "\(p dvd c)" using d p \p dvd d\ coprime_common_divisor not_prime_unit by blast ultimately show "p dvd (n div c)" using p prime_dvd_mult_iff by blast qed hence "multiplicity p (n div c) \ 1" using n p conductor_dvd dvd_div_eq_0_iff[of c n] by (intro multiplicity_geI) (auto intro: Nat.gr0I) ultimately show ?thesis by simp qed (auto simp: not_dvd_imp_multiplicity_0) qed (use \d > 0\ in simp_all) qed also have "card {d. d dvd (n div c)} = divisor_count (n div c)" by (simp add: divisor_count_def) finally show "norm (sum \ {1..x}) < sqrt c * ln c * divisor_count (n div c)" using conductor_gr_0 by (simp add: mult_left_mono) qed (* TODO: Move? *) text \ Next, we obtain a suitable upper bound on the number of divisors of \n\: \ lemma divisor_count_upper_bound_aux: fixes n :: nat shows "divisor_count n \ 2 * card {d. d dvd n \ d \ sqrt n}" proof (cases "n = 0") case False hence n: "n > 0" by simp have *: "x > 0" if "x dvd n" for x using that n by (auto intro!: Nat.gr0I) have **: "real n = sqrt (real n) * sqrt (real n)" by simp have ***: "n < x * sqrt n \ sqrt n < x" "x * sqrt n < n \ x < sqrt n" for x - by (metis ** n of_nat_0_less_iff real_mult_less_iff1 real_sqrt_gt_0_iff)+ + by (metis ** n of_nat_0_less_iff mult_less_iff1 real_sqrt_gt_0_iff)+ have "divisor_count n = card {d. d dvd n}" by (simp add: divisor_count_def) also have "{d. d dvd n} = {d. d dvd n \ d \ sqrt n} \ {d. d dvd n \ d > sqrt n}" by auto also have "card \ = card {d. d dvd n \ d \ sqrt n} + card {d. d dvd n \ d > sqrt n}" using n by (subst card_Un_disjoint) auto also have "bij_betw (\d. n div d) {d. d dvd n \ d > sqrt n} {d. d dvd n \ d < sqrt n}" using n by (intro bij_betwI[of _ _ _ "\d. n div d"]) (auto simp: Real.real_of_nat_div real_sqrt_divide field_simps * ***) hence "card {d. d dvd n \ d > sqrt n} = card {d. d dvd n \ d < sqrt n}" by (rule bij_betw_same_card) also have "\ \ card {d. d dvd n \ d \ sqrt n}" using n by (intro card_mono) auto finally show "divisor_count n \ 2 * \" by simp qed auto lemma divisor_count_upper_bound: fixes n :: nat shows "divisor_count n \ 2 * nat \sqrt n\" proof (cases "n = 0") case False have "divisor_count n \ 2 * card {d. d dvd n \ d \ sqrt n}" by (rule divisor_count_upper_bound_aux) also have "card {d. d dvd n \ d \ sqrt n} \ card {1..nat \sqrt n\}" using False by (intro card_mono) (auto simp: le_nat_iff le_floor_iff Suc_le_eq intro!: Nat.gr0I) also have "\ = nat \sqrt n\" by simp finally show ?thesis by simp qed auto lemma divisor_count_upper_bound': fixes n :: nat shows "real (divisor_count n) \ 2 * sqrt n" proof - have "real (divisor_count n) \ 2 * real (nat \sqrt n\)" using divisor_count_upper_bound[of n] by linarith also have "\ \ 2 * sqrt n" by simp finally show ?thesis . qed (* END TODO *) text \ We are now ready to prove the `regular' Pólya--Vinogradov inequality. Apostol formulates it in the following way (Theorem 13.15, notation adapted): `If \\\ is any nonprincipal character mod \n\, then for all \x \ 2\ we have $\sum_{m\leq x} \chi(m) = O(\sqrt{n}\log n)$.' The precondition \x \ 2\ here is completely unnecessary. The `Big-O' notation is somewhat problematic since it does not make explicit in what way the variables are quantified (in particular the \x\ and the \\\). The statement of the theorem in this way (for a fixed character \\\) seems to suggest that \n\ is fixed here, which would make the use of `Big-O' completely vacuous, since it is an asymptotic statement about \n\. We therefore decided to formulate the inequality in the following more explicit way, even giving an explicit constant factor: \ theorem (in dcharacter) polya_vinogradov_inequality: assumes nonprincipal: "\ \ principal_dchar n" shows "norm (\m=1..x. \ m) < 2 * sqrt n * ln n" proof - have "n div conductor > 0" using n conductor_dvd dvd_div_eq_0_iff[of conductor n] by auto have "norm (\m=1..x. \ m) < sqrt conductor * ln conductor * divisor_count (n div conductor)" using nonprincipal by (rule polya_vinogradov_inequality_explicit) also have "\ \ sqrt conductor * ln conductor * (2 * sqrt (n div conductor))" using conductor_gr_0 \n div conductor > 0\ by (intro mult_left_mono divisor_count_upper_bound') (auto simp: Suc_le_eq) also have "sqrt (n div conductor) = sqrt n / sqrt conductor" using conductor_dvd by (simp add: Real.real_of_nat_div real_sqrt_divide) also have "sqrt conductor * ln conductor * (2 * (sqrt n / sqrt conductor)) = 2 * sqrt n * ln conductor" using conductor_gr_0 n by (simp add: algebra_simps) also have "\ \ 2 * sqrt n * ln n" using conductor_le_modulus conductor_gr_0 by (intro mult_left_mono) auto finally show ?thesis . qed unbundle vec_lambda_notation end \ No newline at end of file diff --git a/thys/GenClock/GenClock.thy b/thys/GenClock/GenClock.thy --- a/thys/GenClock/GenClock.thy +++ b/thys/GenClock/GenClock.thy @@ -1,1357 +1,1357 @@ (* Title: Formalization of Schneider's generalized clock synchronization protocol. Author: Alwen Tiu, LORIA, June 11, 2005 Maintainer: Alwen Tiu *) theory GenClock imports Complex_Main begin subsection\Types and constants definitions\ text\Process is represented by natural numbers. The type 'event' corresponds to synchronization rounds.\ type_synonym process = nat type_synonym event = nat (* synchronization rounds *) type_synonym time = real type_synonym Clocktime = real axiomatization \ :: real and \ :: real and \ :: real and rmin :: real and rmax :: real and \ :: real and \ :: real and (* Number of processes *) np :: process and maxfaults :: process and (* Physical clocks *) PC :: "[process, time] \ Clocktime" and (* Virtual / Logical clocks *) VC :: "[process, time] \ Clocktime" and (* Starting (real) time of synchronization rounds *) te :: "[process, event] \ time" and (* Clock readings at each round *) \ :: "[process, event] \ (process \ Clocktime)" and (* Logical clock in-effect at a time interval *) IC :: "[process, event, time] \ Clocktime" and (* Correct clocks *) correct :: "[process, time] \ bool" and (* The averaging function to calculate clock adjustment *) cfn :: "[process, (process \ Clocktime)] \ Clocktime" and \ :: "[Clocktime, Clocktime] \ Clocktime" and \ :: "Clocktime \ Clocktime" definition count :: "[process \ bool, process] \ nat" where "count f n = card {p. p < n \ f p}" definition (* Adjustment to a clock *) Adj :: "[process, event] \ Clocktime" where "Adj = (\ p i. if 0 < i then cfn p (\ p i) - PC p (te p i) - else 0)" + else 0)" definition (* Auxilary predicates used in the definition of precision enhancement *) okRead1 :: "[process \ Clocktime, Clocktime, process \ bool] \ bool" where "okRead1 f x ppred \ (\ l m. ppred l \ ppred m \ \f l - f m\ \ x)" definition - okRead2 :: "[process \ Clocktime, process \ Clocktime, Clocktime, + okRead2 :: "[process \ Clocktime, process \ Clocktime, Clocktime, process \ bool] \ bool" where "okRead2 f g x ppred \ (\ p. ppred p \ \f p - g p\ \ x)" definition rho_bound1 :: "[[process, time] \ Clocktime] \ bool" where "rho_bound1 C \ (\ p s t. correct p t \ s \ t \ C p t - C p s \ (t - s)*(1 + \))" definition rho_bound2 :: "[[process, time] \ Clocktime] \ bool" where "rho_bound2 C \ (\ p s t. correct p t \ s \ t \ (t - s)*(1 - \) \ C p t - C p s)" subsection\Clock conditions\ text\Some general assumptions\ axiomatization where - constants_ax: "0 < \ \ 0 < \ \ 0 < rmin + constants_ax: "0 < \ \ 0 < \ \ 0 < rmin \ rmin \ rmax \ 0 < \ \ 0 < np \ maxfaults \ np" axiomatization where PC_monotone: "\ p s t. correct p t \ s \ t \ PC p s \ PC p t" axiomatization where VClock: "\ p t i. correct p t \ te p i \ t \ t < te p (i + 1) \ VC p t = IC p i t" axiomatization where IClock: "\ p t i. correct p t \ IC p i t = PC p t + Adj p i" text\Condition 1: initial skew\ axiomatization where init: "\ p. correct p 0 \ 0 \ PC p 0 \ PC p 0 \ \" text\Condition 2: bounded drift\ axiomatization where rate_1: "\ p s t. correct p t \ s \ t \ PC p t - PC p s \ (t - s)*(1 + \)" and rate_2: "\ p s t. correct p t \ s \ t \ (t - s)*(1 - \) \ PC p t - PC p s" text\Condition 3: bounded interval\ axiomatization where rts0: "\ p t i. correct p t \ t \ te p (i+1) \ t - te p i \ rmax" and rts1: "\ p t i. correct p t \ te p (i+1) \ t \ rmin \ t - te p i" text\Condition 4 : bounded delay\ axiomatization where rts2a: "\ p q t i. correct p t \ correct q t \ te q i + \ \ t \ te p i \ t" and rts2b: "\ p q i. correct p (te p i) \ correct q (te q i) \ abs(te p i - te q i) \ \" text\Condition 5: initial synchronization\ axiomatization where synch0: "\ p. te p 0 = 0" text\Condition 6: nonoverlap\ axiomatization where nonoverlap: "\ \ rmin" text\Condition 7: reading errors\ axiomatization where - readerror: "\ p q i. correct p (te p (i+1)) \ correct q (te p (i+1)) \ + readerror: "\ p q i. correct p (te p (i+1)) \ correct q (te p (i+1)) \ abs(\ p (i+1) q - IC q i (te p (i+1))) \ \" text\Condition 8: bounded faults\ axiomatization where correct_closed: "\ p s t. s \ t \ correct p t \ correct p s" and correct_count: "\ t. np - maxfaults \ count (\ p. correct p t) np" text\Condition 9: Translation invariance\ axiomatization where trans_inv: "\ p f x. 0 \ x \ cfn p (\ y. f y + x) = cfn p f + x" text\Condition 10: precision enhancement\ axiomatization where - prec_enh: - "\ ppred p q f g x y. - np - maxfaults \ count ppred np \ - okRead1 f y ppred \ okRead1 g y ppred \ - okRead2 f g x ppred \ ppred p \ ppred q + prec_enh: + "\ ppred p q f g x y. + np - maxfaults \ count ppred np \ + okRead1 f y ppred \ okRead1 g y ppred \ + okRead2 f g x ppred \ ppred p \ ppred q \ abs(cfn p f - cfn q g) \ \ x y" text\Condition 11: accuracy preservation\ axiomatization where acc_prsv: "\ ppred p q f x. okRead1 f x ppred \ np - maxfaults \ count ppred np \ ppred p \ ppred q \ abs(cfn p f - f q) \ \ x" subsubsection\Some derived properties of clocks\ -lemma rts0d: +lemma rts0d: assumes cp: "correct p (te p (i+1))" shows "te p (i+1) - te p i \ rmax" using cp rts0 by simp lemma rts1d: assumes cp: "correct p (te p (i+1))" shows "rmin \ te p (i+1) - te p i" using cp rts1 by simp -lemma rte: +lemma rte: assumes cp: "correct p (te p (i+1))" shows "te p i \ te p (i+1)" proof- from cp rts1d have "rmin \ te p (i+1) - te p i" by simp from this constants_ax show ?thesis by arith qed lemma beta_bound1: assumes corr_p: "correct p (te p (i+1))" and corr_q: "correct q (te p (i+1))" shows "0 \ te p (i+1) - te q i" proof- from corr_p rte have "te p i \ te p (i+1)" by simp from this corr_p correct_closed have corr_pi: "correct p (te p i)" by blast from corr_p rts1d nonoverlap have "rmin \ te p (i+1) - te p i" by simp from this nonoverlap have "\ \ te p (i+1) - te p i" by simp hence "te p i + \ \ te p (i+1)" by simp - from this corr_p corr_q rts2a + from this corr_p corr_q rts2a have "te q i \ te p (i+1)" by blast thus ?thesis by simp qed lemma beta_bound2: assumes corr_p: "correct p (te p (i+1))" and corr_q: "correct q (te q i)" shows "te p (i+1) - te q i \ rmax + \" proof- from corr_p rte have "te p i \ te p (i+1)" by simp from this corr_p correct_closed have corr_pi: "correct p (te p i)" by blast have split: "te p (i+1) - te q i = (te p (i+1) - te p i) + (te p i - te q i)" by (simp) from corr_q corr_pi rts2b have Eq1: "abs(te p i - te q i) \ \" by simp have Eq2: "te p i - te q i \ \" proof cases assume "te q i \ te p i" from this Eq1 show ?thesis by (simp add: abs_if) next assume "\ (te q i \ te p i)" from this Eq1 show ?thesis by (simp add: abs_if) qed from corr_p rts0d have "te p (i+1) - te p i \ rmax" by simp from this split Eq2 show ?thesis by simp qed subsubsection\Bounded-drift for logical clocks (IC)\ -lemma bd: +lemma bd: assumes ie: "s \ t" and rb1: "rho_bound1 C" and rb2: "rho_bound2 D" and PC_ie: "D q t - D q s \ C p t - C p s" and corr_p: "correct p t" and corr_q: "correct q t" shows "\ C p t - D q t \ \ \ C p s - D q s \ + 2*\*(t - s)" proof- let ?Dt = "C p t - D q t" let ?Ds = "C p s - D q s" let ?Bp = "C p t - C p s" let ?Bq = "D q t - D q s" let ?I = "t - s" have "\ ?Bp - ?Bq \ \ 2*\*(t - s)" proof- from PC_ie have Eq1: "\ ?Bp - ?Bq \ = ?Bp - ?Bq" by (simp add: abs_if) from corr_p ie rb1 have Eq2: "?Bp - ?Bq \ ?I*(1+\) - ?Bq" (is "?E1 \ ?E2") by(simp add: rho_bound1_def) from corr_q ie rb2 have "?I*(1 - \) \ ?Bq" by(simp add: rho_bound2_def) from this have Eq3: "?E2 \ ?I*(1+\) - ?I*(1 - \)" by(simp) have Eq4: "?I*(1+\) - ?I*(1 - \) = 2*\*?I" by(simp add: algebra_simps) from Eq1 Eq2 Eq3 Eq4 show ?thesis by simp qed moreover have "\?Dt\ \ \?Bp - ?Bq\ + \?Ds\" by(simp add: abs_if) ultimately show ?thesis by simp qed -lemma bounded_drift: +lemma bounded_drift: assumes ie: "s \ t" and rb1: "rho_bound1 C" and rb2: "rho_bound2 C" and rb3: "rho_bound1 D" and rb4: "rho_bound2 D" and corr_p: "correct p t" and corr_q: "correct q t" shows "\C p t - D q t\ \ \C p s - D q s\ + 2*\*(t - s)" proof- let ?Bp = "C p t - C p s" let ?Bq = "D q t - D q s" show ?thesis proof cases assume "?Bq \ ?Bp" from this ie rb1 rb4 corr_p corr_q bd show ?thesis by simp next assume "\ (?Bq \ ?Bp)" hence "?Bp \ ?Bq" by simp - from this ie rb2 rb3 corr_p corr_q bd + from this ie rb2 rb3 corr_p corr_q bd have "\D q t - C p t\ \ \D q s - C p s\ + 2*\*(t - s)" by simp from this show ?thesis by (simp add: abs_minus_commute) qed qed text\Drift rate of logical clocks\ lemma IC_rate1: "rho_bound1 (\ p t. IC p i t)" proof- { fix p::process fix s::time fix t::time assume cp: "correct p t" assume ie: "s \ t" from cp ie correct_closed have cps: "correct p s" by blast have "IC p i t - IC p i s \ (t - s)*(1+\)" proof- from cp IClock have "IC p i t = PC p t + Adj p i" by simp moreover from cps IClock have "IC p i s = PC p s + Adj p i" by simp moreover from cp ie rate_1 have "PC p t - PC p s \ (t - s)*(1+\)" by simp ultimately show ?thesis by simp qed } thus ?thesis by (simp add: rho_bound1_def) qed lemma IC_rate2: "rho_bound2 (\ p t. IC p i t)" proof- { fix p::process fix s::time fix t::time assume cp: "correct p t" assume ie: "s \ t" from cp ie correct_closed have cps: "correct p s" by blast have "(t - s)*(1 - \) \ IC p i t - IC p i s" proof- from cp IClock have "IC p i t = PC p t + Adj p i" by simp moreover from cps IClock have "IC p i s = PC p s + Adj p i" by simp moreover from cp ie rate_2 have "(t - s)*(1-\) \ PC p t - PC p s" by simp ultimately show ?thesis by simp qed } thus ?thesis by (simp add: rho_bound2_def) qed text\Auxilary function \ICf\: we introduce this to avoid some unification problem in some tactic of isabelle.\ definition ICf :: "nat \ (process \ time \ Clocktime)" where "ICf i = (\ p t. IC p i t)" -lemma IC_bd: +lemma IC_bd: assumes ie: "s \ t" and corr_p: "correct p t" and corr_q: "correct q t" - shows "\IC p i t - IC q j t\ \ \IC p i s - IC q j s\ + 2*\*(t - s)" + shows "\IC p i t - IC q j t\ \ \IC p i s - IC q j s\ + 2*\*(t - s)" proof- let ?C = "ICf i" let ?D = "ICf j" let ?G = "\?C p t - ?D q t\ \ \?C p s - ?D q s\ + 2*\*(t - s)" - from IC_rate1 have rb1: "rho_bound1 (ICf i) \ rho_bound1 (ICf j)" + from IC_rate1 have rb1: "rho_bound1 (ICf i) \ rho_bound1 (ICf j)" by (simp add: ICf_def) - from IC_rate2 have rb2: "rho_bound2 (ICf i) \ rho_bound2 (ICf j)" + from IC_rate2 have rb2: "rho_bound2 (ICf i) \ rho_bound2 (ICf j)" by (simp add: ICf_def) from ie rb1 rb2 corr_p corr_q bounded_drift have ?G by simp from this show ?thesis by (simp add: ICf_def) qed -lemma event_bound: +lemma event_bound: assumes ie1: "0 \ (t::real)" and corr_p: "correct p t" and corr_q: "correct q t" shows "\ i. t < max (te p i) (te q i)" proof (rule ccontr) assume A: "\ (\ i. t < max (te p i) (te q i))" - show False + show False proof- have F1: "\ i. te p i \ t" - proof + proof fix i :: nat from A have "\ (t < max (te p i) (te q i))" by simp hence Eq1: "max (te p i) (te q i) \ t" by arith have Eq2: "te p i \ max (te p i) (te q i)" by (simp add: max_def) from Eq1 Eq2 show "te p i \ t" by simp qed - + have F2: "\ (i :: nat). correct p (te p i)" proof fix i :: nat from F1 have "te p i \ t" by simp - from this corr_p correct_closed + from this corr_p correct_closed show "correct p (te p i)" by blast qed - + have F3: "\ (i :: nat). real i * rmin \ te p i" proof fix i :: nat show "real i * rmin \ te p i" proof (induct i) from synch0 show "real (0::nat) * rmin \ te p 0" by simp next fix i :: nat assume ind_hyp: "real i * rmin \ te p i" - + show "real (Suc i) * rmin \ te p (Suc i)" proof- - have Eq1: "real i * rmin + rmin = (real i + 1)*rmin" - by (simp add: distrib_right) + have Eq1: "real i * rmin + rmin = (real i + 1)*rmin" + by (simp add: distrib_right) have Eq2: "real i + 1 = real (i+1)" by simp - from Eq1 Eq2 + from Eq1 Eq2 have Eq3: "real i * rmin + rmin = real (i+1) * rmin" by presburger from F2 have cp1: "correct p (te p (i+1))" by simp from F2 have cp2: "correct p (te p i)" by simp from cp1 rts1d have "rmin \ te p (i+1) - te p i" by simp hence Eq4: "te p i + rmin \ te p (i+1)" by simp from ind_hyp have "real i * rmin + rmin \ te p i + rmin" by (simp) from this Eq4 have "real i * rmin + rmin \ te p (i+1)" by simp from this Eq3 show ?thesis by simp qed qed qed - + have F4: "\ (i::nat). real i * rmin \ t" proof fix i::nat from F1 have "te p i \ t" by simp moreover from F3 have "real i * rmin \ te p i" by simp ultimately show "real i * rmin \ t" by simp qed from constants_ax have "0 < rmin" by simp - from this reals_Archimedean3 + from this reals_Archimedean3 have Archi: "\ (k::nat). t < real k * rmin" by blast - + from Archi obtain k::nat where C: "t < real k * rmin" .. - - from F4 have "real k * rmin \ t" by simp + + from F4 have "real k * rmin \ t" by simp hence notC: "\ (t < real k * rmin)" by simp - + from C notC show False by simp qed qed subsection\Agreement property\ definition "\1 x = \ (2*\*\ + 2*\) (2*\ + x + 2*\*(rmax + \))" definition "\2 x = x + 2*\*rmax" definition "\3 x = \ (2*\ + x + 2*\*(rmax + \)) + \ + 2*\*\" definition okmaxsync :: "[nat, Clocktime] \ bool" where "okmaxsync i x \ (\ p q. correct p (max (te p i) (te q i)) - \ correct q (max (te p i) (te q i)) \ + \ correct q (max (te p i) (te q i)) \ \IC p i (max (te p i) (te q i)) - IC q i (max (te p i) (te q i))\ \ x)" definition okClocks :: "[process, process, nat] \ bool" where "okClocks p q i \ (\ t. 0 \ t \ t < max (te p i) (te q i) - \ correct p t \ correct q t + \ correct p t \ correct q t \ \VC p t - VC q t\ \ \)" lemma okClocks_sym: assumes ok_pq: "okClocks p q i" shows "okClocks q p i" proof- { fix t :: time assume ie1: "0 \ t" assume ie2: "t < max (te q i) (te p i)" assume corr_q: "correct q t" assume corr_p: "correct p t" have "max (te q i) (te p i) = max (te p i) (te q i)" by (simp add: max_def) - from this ok_pq ie1 ie2 corr_p corr_q + from this ok_pq ie1 ie2 corr_p corr_q have "\VC q t - VC p t\ \ \" by(simp add: abs_minus_commute okClocks_def) } thus ?thesis by (simp add: okClocks_def) qed -lemma ICp_Suc: +lemma ICp_Suc: assumes corr_p: "correct p (te p (i+1))" shows "IC p (i+1) (te p (i+1)) = cfn p (\ p (i+1)) " using corr_p IClock by(simp add: Adj_def) lemma IC_trans_inv: -assumes ie1: "te q (i+1) \ te p (i+1)" +assumes ie1: "te q (i+1) \ te p (i+1)" and corr_p: "correct p (te p (i+1))" and corr_q: "correct q (te p (i+1))" -shows -"IC q (i+1) (te p (i+1)) = +shows +"IC q (i+1) (te p (i+1)) = cfn q (\ n. \ q (i+1) n + (PC q (te p (i+1)) - PC q (te q (i+1))))" (is "?T1 = ?T2") proof- let ?X = "PC q (te p (i+1)) - PC q (te q (i+1))" from corr_q ie1 PC_monotone have posX: "0 \ ?X" by (simp add: le_diff_eq) from IClock corr_q have "?T1 = cfn q (\ q (i+1)) + ?X" by(simp add: Adj_def) from this posX trans_inv show ?thesis by simp qed -lemma beta_rho: +lemma beta_rho: assumes ie: "te q (i+1) \ te p (i+1)" and corr_p: "correct p (te p (i+1))" and corr_q: "correct q (te p (i+1))" and corr_l: "correct l (te p (i+1))" shows "\(PC l (te p (i+1)) - PC l (te q (i+1))) - (te p (i+1) - te q (i+1))\ \ \*\" proof- let ?X = "(PC l (te p (i+1)) - PC l (te q (i+1)))" - let ?D = "te p (i+1) - te q (i+1)" + let ?D = "te p (i+1) - te q (i+1)" from ie have posD: "0 \ ?D" by simp - from ie PC_monotone corr_l have posX: "0 \ ?X" + from ie PC_monotone corr_l have posX: "0 \ ?X" by (simp add: le_diff_eq) from ie corr_l rate_1 have bound1: "?X \ ?D * (1 + \)" by simp from ie corr_l correct_closed have corr_l_tq: "correct l (te q (i+1))" by(blast) from ie corr_q correct_closed have corr_q_tq: "correct q (te q (i+1))" by blast from corr_q_tq corr_p rts2b have "\?D\ \ \" by(simp) from this constants_ax posD have D_beta: "?D*\ \ \*\" by(simp add: abs_if) - show ?thesis + show ?thesis proof cases assume A: "?D \ ?X" from posX posD A have absEq: "\?X - ?D\ = ?X - ?D" by(simp add: abs_if) - from bound1 have bound2: "?X - ?D \ ?D*\" + from bound1 have bound2: "?X - ?D \ ?D*\" by(simp add: mult.commute distrib_right) from D_beta absEq bound2 show ?thesis by simp next assume notA: "\ (?D \ ?X)" from this have absEq2: "\?X - ?D\ = ?D - ?X" by(simp add: abs_if) from ie corr_l rate_2 have bound3: "?D*(1 - \) \ ?X" by simp from this have "?D - ?X \ ?D*\" by (simp add: algebra_simps) from this absEq2 D_beta show ?thesis by simp qed qed -text\This lemma (and the next one pe-cond2) proves an assumption used +text\This lemma (and the next one pe-cond2) proves an assumption used in the precision enhancement.\ -lemma pe_cond1: -assumes ie: "te q (i+1) \ te p (i+1)" +lemma pe_cond1: +assumes ie: "te q (i+1) \ te p (i+1)" and corr_p: "correct p (te p (i+1))" and corr_q: "correct q (te p (i + 1))" and corr_l: "correct l (te p (i+1))" -shows "\\ q (i+1) l + (PC q (te p (i+1)) - PC q (te q (i+1))) - +shows "\\ q (i+1) l + (PC q (te p (i+1)) - PC q (te q (i+1))) - \ p (i+1) l\ \ 2* \ * \ + 2*\" (is "?M \ ?N") proof- let ?Xl = "(PC l (te p (i+1)) - PC l (te q (i+1)))" let ?Xq = "(PC q (te p (i+1)) - PC q (te q (i+1)))" - let ?D = "te p (i+1) - te q (i+1)" + let ?D = "te p (i+1) - te q (i+1)" let ?T = "\ p (i+1) l - \ q (i+1) l" let ?RE1 = "\ p (i+1) l - IC l i (te p (i+1))" let ?RE2 = "\ q (i+1) l - IC l i (te q (i+1))" let ?ICT = "IC l i (te p (i+1)) - IC l i (te q (i+1))" have "?M = \(?Xq - ?D) - (?T - ?D)\" by(simp add: abs_if) hence Split: "?M \ \?Xq - ?D\ + \?T - ?D\" by(simp add: abs_if) from ie corr_q correct_closed have corr_q_tq: "correct q (te q (i+1))" by(blast) from ie corr_l correct_closed have corr_l_tq: "correct l (te q (i+1))" by blast - from corr_p corr_q corr_l ie beta_rho + from corr_p corr_q corr_l ie beta_rho have XlD: "\?Xl - ?D\ \ \*\" by simp - - from corr_p corr_q ie beta_rho + + from corr_p corr_q ie beta_rho have XqD: "\?Xq - ?D\ \ \*\" by simp - have TD: "\?T - ?D\ \ 2*\ + \*\" + have TD: "\?T - ?D\ \ 2*\ + \*\" proof- have Eq1: "\?T - ?D\ = \(?T - ?ICT) + (?ICT - ?D)\" (is "?E1 = ?E2") by (simp add: abs_if) - - have Eq2: "?E2 \ \?T - ?ICT\ + \?ICT - ?D\" + + have Eq2: "?E2 \ \?T - ?ICT\ + \?ICT - ?D\" by(simp add: abs_if) have Eq3: "\?T - ?ICT\ \ \?RE1\ + \?RE2\" by(simp add: abs_if) - from readerror corr_p corr_l + from readerror corr_p corr_l have Eq4: "\?RE1\ \ \" by simp - - from corr_l_tq corr_q_tq this readerror + + from corr_l_tq corr_q_tq this readerror have Eq5: "\?RE2\ \ \" by simp - + from Eq3 Eq4 Eq5 have Eq6: "\?T - ?ICT\ \ 2*\" by simp have Eq7: "?ICT - ?D = ?Xl - ?D" proof- from corr_p rte have "te p i \ te p (i+1)" by(simp) from this corr_l correct_closed have corr_l_tpi: "correct l (te p i)" by blast from corr_q_tq rte have "te q i \ te q (i+1)" by simp from this corr_l_tq correct_closed have corr_l_tqi: "correct l (te q i)" by blast from IClock corr_l have F1: "IC l i (te p (i+1)) = PC l (te p (i+1)) + Adj l i" by(simp) - from IClock corr_l_tq + from IClock corr_l_tq have F2: "IC l i (te q (i+1)) = PC l (te q (i+1)) + Adj l i" by simp from F1 F2 show ?thesis by(simp) qed from this XlD have Eq8: "\?ICT - ?D\ \ \*\" by arith - from Eq1 Eq2 Eq6 Eq8 show ?thesis + from Eq1 Eq2 Eq6 Eq8 show ?thesis by(simp) qed from Split XqD TD have F1: "?M \ 2* \ * \ + 2*\" by(simp) - have F2: "2 * \ * \ + 2*\ = 2* \ * \ + 2*\" + have F2: "2 * \ * \ + 2*\ = 2* \ * \ + 2*\" by simp from F1 show ?thesis by (simp only: F2) qed -lemma pe_cond2: +lemma pe_cond2: assumes ie: "te m i \ te l i" and corr_k: "correct k (te k (i+1))" and corr_l_tk: "correct l (te k (i+1))" and corr_m_tk: "correct m (te k (i+1))" and ind_hyp: "\IC l i (te l i) - IC m i (te l i)\ \ \S" shows "\\ k (i+1) l - \ k (i+1) m\ \ 2*\ + \S + 2*\*(rmax + \)" proof- let ?X = "\ k (i+1) l - \ k (i+1) m" let ?N = "2*\ + \S + 2*\*(rmax + \)" let ?D1 = "\ k (i+1) l - IC l i (te k (i+1))" let ?D2 = "\ k (i+1) m - IC m i (te k (i+1))" let ?ICS = "IC l i (te k (i+1)) - IC m i (te k (i+1))" let ?tlm = "te l i" let ?IC = "IC l i ?tlm - IC m i ?tlm" - have Eq1: "\?X\ = \(?D1 - ?D2) + ?ICS\" (is "?E1 = ?E2") + have Eq1: "\?X\ = \(?D1 - ?D2) + ?ICS\" (is "?E1 = ?E2") by (simp add: abs_if) have Eq2: "?E2 \ \?D1 - ?D2\ + \?ICS\" by (simp add: abs_if) - from corr_l_tk corr_k beta_bound1 have ie_lk: "te l i \ te k (i+1)" + from corr_l_tk corr_k beta_bound1 have ie_lk: "te l i \ te k (i+1)" by (simp add: le_diff_eq) - + from this corr_l_tk correct_closed have corr_l: "correct l (te l i)" by blast from ie_lk corr_l_tk corr_m_tk IC_bd have Eq3: "\?ICS\ \ \?IC\ + 2*\*(te k (i+1) - ?tlm)" by simp from this ind_hyp have Eq4: "\?ICS\ \ \S + 2*\*(te k (i+1) - ?tlm)" by simp from corr_l corr_k beta_bound2 have "te k (i+1) - ?tlm \ rmax + \" by simp from this constants_ax have "2*\*(te k (i+1) - ?tlm) \ 2*\*(rmax + \)" - by (simp add: real_mult_le_cancel_iff2) + by simp from this Eq4 have Eq4a: "\?ICS\ \ \S + 2*\*(rmax + \)" - by (simp) + by simp - from corr_k corr_l_tk readerror + from corr_k corr_l_tk readerror have Eq5: "\?D1\ \ \" by simp - from corr_k corr_m_tk readerror + from corr_k corr_m_tk readerror have Eq6: "\?D2\ \ \" by simp have "\?D1 - ?D2\ \ \?D1\ + \?D2\" by (simp add: abs_if) from this Eq5 Eq6 have Eq7: "\?D1 - ?D2\ \ 2*\" by (simp) from Eq1 Eq2 Eq4a Eq7 split show ?thesis by (simp) qed lemma theta_bound: assumes corr_l: "correct l (te p (i+1))" and corr_m: "correct m (te p (i+1))" and corr_p: "correct p (te p (i+1))" -and IC_bound: +and IC_bound: "\IC l i (max (te l i) (te m i)) - IC m i (max (te l i) (te m i))\ \ \S" shows "\\ p (i+1) l - \ p (i+1) m\ \ 2*\ + \S + 2*\*(rmax + \)" proof- from corr_p corr_l beta_bound1 have tli_le_tp: "te l i \ te p (i+1)" by (simp add: le_diff_eq) from corr_p corr_m beta_bound1 have tmi_le_tp: "te m i \ te p (i+1)" by (simp add: le_diff_eq) - + let ?tml = "max (te l i) (te m i)" from tli_le_tp tmi_le_tp have tml_le_tp: "?tml \ te p (i+1)" by simp from tml_le_tp corr_l correct_closed have corr_l_tml: "correct l ?tml" by blast from tml_le_tp corr_m correct_closed have corr_m_tml: "correct m ?tml" by blast let ?Y = "2*\ + \S + 2*\*(rmax + \)" show "\\ p (i+1) l - \ p (i+1) m\ \ ?Y" proof cases assume A: "te m i < te l i" - from this IC_bound + from this IC_bound have "\IC l i (te l i) - IC m i (te l i)\ \ \S" by(simp add: max_def) - from this A corr_p corr_l corr_m pe_cond2 - show ?thesis by(simp) + from this A corr_p corr_l corr_m pe_cond2 + show ?thesis by(simp) next assume "\ (te m i < te l i)" hence Eq1: "te l i \ te m i" by simp - from this IC_bound + from this IC_bound have Eq2: "\IC l i (te m i) - IC m i (te m i)\ \ \S" by(simp add: max_def) hence "\IC m i (te m i) - IC l i (te m i)\ \ \S" by (simp add: abs_minus_commute) - from this Eq1 corr_p corr_l corr_m pe_cond2 + from this Eq1 corr_p corr_l corr_m pe_cond2 have "\\ p (i+1) m - \ p (i+1) l\ \ ?Y" by(simp) thus ?thesis by (simp add: abs_minus_commute) qed qed -lemma four_one_ind_half: +lemma four_one_ind_half: assumes ie1: "\ \ rmin" and ie2: "\ \ \S" and ie3: "\1 \S \ \S" and ind_hyp: "okmaxsync i \S" and ie4: "te q (i+1) \ te p (i+1)" and corr_p: "correct p (te p (i+1))" and corr_q: "correct q (te p (i+1))" shows "\IC p (i+1) (te p (i+1)) - IC q (i+1) (te p (i+1))\ \ \S" proof- let ?tpq = "te p (i+1)" let ?f = "\ n. \ q (i+1) n + (PC q (te p (i+1)) - PC q (te q (i+1)))" let ?g = "\ p (i+1)" from ie4 corr_q correct_closed have corr_q_tq: "correct q (te q (i+1))" by blast - - have Eq_IC_cfn: "\IC p (i+1) ?tpq - IC q (i+1) ?tpq\ = + + have Eq_IC_cfn: "\IC p (i+1) ?tpq - IC q (i+1) ?tpq\ = \cfn q ?f - cfn p ?g\" proof- - from corr_p ICp_Suc have Eq1: "IC p (i+1) ?tpq = cfn p ?g" by simp - - from ie4 corr_p corr_q IC_trans_inv + from corr_p ICp_Suc have Eq1: "IC p (i+1) ?tpq = cfn p ?g" by simp + + from ie4 corr_p corr_q IC_trans_inv have Eq2: "IC q (i+1) ?tpq = cfn q ?f" by simp - + from Eq1 Eq2 show ?thesis by(simp add: abs_if) qed let ?ppred = "\ l. correct l (te p (i+1))" - + let ?X = "2*\*\ + 2*\" have "\ l. ?ppred l \ \?f l - ?g l\ \ ?X" proof - { fix l assume "?ppred l" from ie4 corr_p corr_q this pe_cond1 have "\?f l - ?g l\ \ (2*\*\ + 2*\)" by(auto) } thus ?thesis by blast qed - hence cond1: "okRead2 ?f ?g ?X ?ppred" + hence cond1: "okRead2 ?f ?g ?X ?ppred" by(simp add: okRead2_def) - + let ?Y = "2*\ + \S + 2*\*(rmax + \)" have "\ l m. ?ppred l \ ?ppred m \ \?f l - ?f m\ \ ?Y" proof- { fix l m assume corr_l: "?ppred l" assume corr_m: "?ppred m" from corr_p corr_l beta_bound1 have tli_le_tp: "te l i \ te p (i+1)" by (simp add: le_diff_eq) from corr_p corr_m beta_bound1 have tmi_le_tp: "te m i \ te p (i+1)" by (simp add: le_diff_eq) let ?tlm = "max (te l i) (te m i)" from tli_le_tp tmi_le_tp have tlm_le_tp: "?tlm \ te p (i+1)" by simp from ie4 corr_l correct_closed have corr_l_tq: "correct l (te q (i+1))" by blast from ie4 corr_m correct_closed have corr_m_tq: "correct m (te q (i+1))" by blast from tlm_le_tp corr_l correct_closed have corr_l_tlm: "correct l ?tlm" by blast from tlm_le_tp corr_m correct_closed have corr_m_tlm: "correct m ?tlm" by blast - from ind_hyp corr_l_tlm corr_m_tlm + from ind_hyp corr_l_tlm corr_m_tlm have EqAbs1: "\IC l i ?tlm - IC m i ?tlm\ \ \S" by(auto simp add: okmaxsync_def) have EqAbs3: "\?f l - ?f m\ = \\ q (i+1) l - \ q (i+1) m\" by (simp add: abs_if) from EqAbs1 corr_q_tq corr_l_tq corr_m_tq theta_bound have "\\ q (i+1) l - \ q (i+1) m\ \ ?Y" by simp from this EqAbs3 have "\?f l - ?f m\ \ ?Y" by simp } thus ?thesis by simp qed - hence cond2a: "okRead1 ?f ?Y ?ppred" by (simp add: okRead1_def) + hence cond2a: "okRead1 ?f ?Y ?ppred" by (simp add: okRead1_def) have "\ l m. ?ppred l \ ?ppred m \ \?g l - ?g m\ \ ?Y" proof- { fix l m assume corr_l: "?ppred l" assume corr_m: "?ppred m" from corr_p corr_l beta_bound1 have tli_le_tp: "te l i \ te p (i+1)" by (simp add: le_diff_eq) from corr_p corr_m beta_bound1 have tmi_le_tp: "te m i \ te p (i+1)" by (simp add: le_diff_eq) let ?tlm = "max (te l i) (te m i)" from tli_le_tp tmi_le_tp have tlm_le_tp: "?tlm \ te p (i+1)" by simp from tlm_le_tp corr_l correct_closed have corr_l_tlm: "correct l ?tlm" by blast from tlm_le_tp corr_m correct_closed have corr_m_tlm: "correct m ?tlm" by blast - from ind_hyp corr_l_tlm corr_m_tlm + from ind_hyp corr_l_tlm corr_m_tlm have EqAbs1: "\IC l i ?tlm - IC m i ?tlm\ \ \S" by(auto simp add: okmaxsync_def) from EqAbs1 corr_p corr_l corr_m theta_bound have "\?g l - ?g m\ \ ?Y" by simp } thus ?thesis by simp qed - hence cond2b: "okRead1 ?g ?Y ?ppred" by (simp add: okRead1_def) + hence cond2b: "okRead1 ?g ?Y ?ppred" by (simp add: okRead1_def) from correct_count have "np - maxfaults \ count ?ppred np" by simp - from this corr_p corr_q cond1 cond2a cond2b prec_enh - have "\cfn q ?f - cfn p ?g\ \ \ ?X ?Y" + from this corr_p corr_q cond1 cond2a cond2b prec_enh + have "\cfn q ?f - cfn p ?g\ \ \ ?X ?Y" by blast from ie3 this have "\cfn q ?f - cfn p ?g\ \ \S" by (simp add: \1_def) from this Eq_IC_cfn show ?thesis by (simp) qed text\Theorem 4.1 in Shankar's paper.\ -theorem four_one: +theorem four_one: assumes ie1: "\ \ rmin" and ie2: "\ \ \S" and ie3: "\1 \S \ \S" shows "okmaxsync i \S" proof(induct i) show "okmaxsync 0 \S" proof- { - fix p q + fix p q assume corr_p: "correct p (max (te p 0) (te q 0))" assume corr_q: "correct q (max (te p 0) (te q 0))" from corr_p synch0 have cp0: "correct p 0" by simp from corr_q synch0 have cq0: "correct q 0" by simp - from synch0 cp0 cq0 IClock - have IC_eq_PC: + from synch0 cp0 cq0 IClock + have IC_eq_PC: "\IC p 0 (max (te p 0) (te q 0)) - IC q 0 (max (te p 0) (te q 0))\ = \PC p 0 - PC q 0\" (is "?T1 = ?T2") by(simp add: Adj_def) - from ie2 init synch0 cp0 have range1: "0 \ PC p 0 \ PC p 0 \ \S" + from ie2 init synch0 cp0 have range1: "0 \ PC p 0 \ PC p 0 \ \S" by auto from ie2 init synch0 cq0 have range2: "0 \ PC q 0 \ PC q 0 \ \S" by auto have "?T2 \ \S" proof cases assume A:"PC p 0 < PC q 0" - from A range1 range2 show ?thesis + from A range1 range2 show ?thesis by(auto simp add: abs_if) next assume notA: "\ (PC p 0 < PC q 0)" from notA range1 range2 show ?thesis by(auto simp add: abs_if) qed from this IC_eq_PC have "?T1 \ \S" by simp } thus ?thesis by (simp add: okmaxsync_def) qed next fix i assume ind_hyp: "okmaxsync i \S" show "okmaxsync (Suc i) \S" proof- { fix p q assume corr_p: "correct p (max (te p (i + 1)) (te q (i + 1)))" assume corr_q: "correct q (max (te p (i + 1)) (te q (i + 1)))" let ?tp = "te p (i + 1)" let ?tq = "te q (i + 1)" let ?tpq = "max ?tp ?tq" have "\IC p (i+1) ?tpq - IC q (i+1) ?tpq\ \ \S" (is "?E1 \ \S") proof cases assume A: "?tq < ?tp" - from A corr_p have cp1: "correct p (te p (i+1))" + from A corr_p have cp1: "correct p (te p (i+1))" by (simp add: max_def) - from A corr_q have cq1: "correct q (te p (i+1))" + from A corr_q have cq1: "correct q (te p (i+1))" by (simp add: max_def) - from A - have Eq1: "?E1 = \IC p (i+1) (te p (i+1)) - IC q (i+1) (te p (i+1))\" + from A + have Eq1: "?E1 = \IC p (i+1) (te p (i+1)) - IC q (i+1) (te p (i+1))\" (is "?E1 = ?E2") by (simp add: max_def) - from A cp1 cq1 corr_p corr_q ind_hyp ie1 ie2 ie3 - four_one_ind_half + from A cp1 cq1 corr_p corr_q ind_hyp ie1 ie2 ie3 + four_one_ind_half have "?E2 \ \S" by (simp) from this Eq1 show ?thesis by simp next assume notA: "\ (?tq < ?tp)" - from this corr_p have cp2: "correct p (te q (i+1))" + from this corr_p have cp2: "correct p (te q (i+1))" by (simp add: max_def) from notA corr_q have cq2: "correct q (te q (i+1))" by (simp add: max_def) - from notA - have Eq2: "?E1 = \IC q (i+1) (te q (i+1)) - IC p (i+1) (te q (i+1))\" + from notA + have Eq2: "?E1 = \IC q (i+1) (te q (i+1)) - IC p (i+1) (te q (i+1))\" (is "?E1 = ?E3") by (simp add: max_def abs_minus_commute) from notA have "?tp \ ?tq" by simp from this cp2 cq2 ind_hyp ie1 ie2 ie3 four_one_ind_half have "?E3 \ \S" by simp from this Eq2 show ?thesis by (simp) qed } thus ?thesis by (simp add: okmaxsync_def) qed qed -lemma VC_cfn: +lemma VC_cfn: assumes corr_p: "correct p (te p (i+1))" and ie: "te p (i+1) < te p (i+2)" shows "VC p (te p (i+1)) = cfn p (\ p (i+1))" proof- from ie corr_p VClock have "VC p (te p (i+1)) = IC p (i+1) (te p (i+1))" by simp moreover - from corr_p IClock + from corr_p IClock have "IC p (i+1) (te p (i+1)) = PC p (te p (i+1)) + Adj p (i+1)" by blast moreover have "PC p (te p (i+1)) + Adj p (i+1) = cfn p (\ p (i+1))" by(simp add: Adj_def) ultimately show ?thesis by simp qed text\Lemma for the inductive case in Theorem 4.2\ lemma four_two_ind: assumes ie1: "\ \ rmin" and ie2: "\ \ \S" and ie3: "\1 \S \ \S" and ie4: "\2 \S \ \" and ie5: "\3 \S \ \" and ie6: "te q (i+1) \ te p (i+1)" and ind_hyp: "okClocks p q i" and t_bound1: "0 \ t" and t_bound2: "t < max (te p (i+1)) (te q (i+1))" and t_bound3: "max (te p i) (te q i) \ t" and tpq_bound: "max (te p i) (te q i) < max (te p (i+1)) (te q (i+1))" and corr_p: "correct p t" and corr_q: "correct q t" shows "\VC p t - VC q t\ \ \" proof cases assume A: "t < te q (i+1)" - + let ?tpq = "max (te p i) (te q i)" - - have Eq1: "te p i \ t \ te q i \ t" + + have Eq1: "te p i \ t \ te q i \ t" proof cases assume "te p i \ te q i" from this t_bound3 show ?thesis by (simp add: max_def) next assume "\ (te p i \ te q i)" from this t_bound3 show ?thesis by (simp add: max_def) qed - + from ie6 have tp_max: "max (te p (i+1)) (te q (i+1)) = te p (i+1)" by(simp add: max_def) from this t_bound2 have Eq2: "t < te p (i+1)" by simp from VClock Eq1 Eq2 corr_p have Eq3: "VC p t = IC p i t" by simp - + from VClock Eq1 A corr_q have Eq4: "VC q t = IC q i t" by simp from Eq3 Eq4 have Eq5: "\VC p t - VC q t\ = \IC p i t - IC q i t\" by simp from t_bound3 corr_p corr_q correct_closed have corr_tpq: "correct p ?tpq \ correct q ?tpq" by(blast) from t_bound3 IC_bd corr_p corr_q have Eq6: "\IC p i t - IC q i t\ \ \IC p i ?tpq - IC q i ?tpq\ + 2*\*(t - ?tpq)" (is "?E1 \ ?E2") by(blast) - + from ie1 ie2 ie3 four_one have "okmaxsync i \S" by simp from this corr_tpq have "\IC p i ?tpq - IC q i ?tpq\ \ \S" by(simp add: okmaxsync_def) - + from Eq6 this have Eq7: "?E1 \ \S + 2*\*(t - ?tpq)" by simp from corr_p Eq2 rts0 have "t - te p i \ rmax" by simp from this have "t - ?tpq \ rmax" by (simp add: max_def) - from this constants_ax have "2*\*(t - ?tpq) \ 2*\*rmax" - by (simp add: real_mult_le_cancel_iff1) + from this constants_ax have "2*\*(t - ?tpq) \ 2*\*rmax" + by simp hence "\S + 2*\*(t - ?tpq) \ \S + 2*\*rmax" by simp from this Eq7 have "?E1 \ \S + 2*\*rmax" by simp from this Eq5 ie4 show ?thesis by(simp add: \2_def) next assume "\ (t < te q (i+1))" hence B: "te q (i+1) \ t" by simp - from ie6 t_bound2 + from ie6 t_bound2 have tp_max: "max (te p (i+1)) (te q (i+1)) = te p (i+1)" by(simp add: max_def) have "te p i \ max (te p i) (te q i)" by(simp add: max_def) from this t_bound3 have tp_bound1: "te p i \ t" by simp from tp_max t_bound2 have tp_bound2: "t < te p (i+1)" by simp have tq_bound1: "t < te q (i+2)" proof (rule ccontr) assume "\ (t < te q (i+2))" hence C: "te q (i+2) \ t" by simp - from C corr_q correct_closed + from C corr_q correct_closed have corr_q_t2: "correct q (te q (i+2))" by blast have "te q (i+1) + \ \ t" proof- from corr_q_t2 rts1d have "rmin \ te q (i+2) - te q (i+1)" by simp from this ie1 have "\ \ te q (i+2) - te q (i+1)" by simp hence "te q (i+1) + \ \ te q (i+2)" by simp from this C show ?thesis by simp qed from this corr_p corr_q rts2a have "te p (i+1) \ t" by blast hence "\ (t < te p (i+1))" by simp from this tp_bound2 show False by simp qed from tq_bound1 B have tq_bound2: "te q (i+1) < te q (i+2)" by simp from B tp_bound2 have tq_bound3: "te q (i+1) < te p (i+1)" by simp - from B corr_p correct_closed + from B corr_p correct_closed have corr_p_tq1: "correct p (te q (i+1))" by blast - from B correct_closed corr_q + from B correct_closed corr_q have corr_q_tq1: "correct q (te q (i+1))" by blast - from corr_p_tq1 corr_q_tq1 beta_bound1 - have tq_bound4: "te p i \ te q (i+1)" + from corr_p_tq1 corr_q_tq1 beta_bound1 + have tq_bound4: "te p i \ te q (i+1)" by(simp add: le_diff_eq) - from tq_bound1 VClock B corr_q + from tq_bound1 VClock B corr_q have Eq1: "VC q t = IC q (i+1) t" by simp - + from VClock tp_bound1 tp_bound2 corr_p have Eq2: "VC p t = IC p i t" by simp - + from Eq1 Eq2 have Eq3: "\VC p t - VC q t\ = \IC p i t - IC q (i+1) t\" by simp - - from B corr_p corr_q IC_bd - have "\IC p i t - IC q (i+1) t\ \ - \IC p i (te q (i+1)) - IC q (i+1) (te q (i+1))\ + 2*\*(t - te q (i+1))" + + from B corr_p corr_q IC_bd + have "\IC p i t - IC q (i+1) t\ \ + \IC p i (te q (i+1)) - IC q (i+1) (te q (i+1))\ + 2*\*(t - te q (i+1))" by simp - from this Eq3 - have VC_split: "\VC p t - VC q t\ \ - \IC p i (te q (i+1)) - IC q (i+1) (te q (i+1))\ + 2*\*(t - te q (i+1))" + from this Eq3 + have VC_split: "\VC p t - VC q t\ \ + \IC p i (te q (i+1)) - IC q (i+1) (te q (i+1))\ + 2*\*(t - te q (i+1))" by simp from tq_bound2 VClock corr_q_tq1 have Eq4: "VC q (te q (i+1)) = IC q (i+1) (te q (i+1))" by simp - from this tq_bound2 VC_cfn corr_q_tq1 + from this tq_bound2 VC_cfn corr_q_tq1 have Eq5: "IC q (i+1) (te q (i+1)) = cfn q (\ q (i+1))" by simp - - hence IC_eq_cfn: "IC p i (te q (i+1)) - IC q (i+1) (te q (i+1)) = + + hence IC_eq_cfn: "IC p i (te q (i+1)) - IC q (i+1) (te q (i+1)) = IC p i (te q (i+1)) - cfn q (\ q (i+1))" - (is "?E1 = ?E2") + (is "?E1 = ?E2") by simp let ?f = "\ q (i+1)" let ?ppred = "\ l. correct l (te q (i+1))" let ?X = "2*\ + \S + 2*\*(rmax + \)" have "\ l m. ?ppred l \ ?ppred m \ \\ q (i+1) l - \ q (i+1) m\ \ ?X" proof- { fix l :: process fix m :: process assume corr_l: "?ppred l" assume corr_m: "?ppred m" - + let ?tlm = "max (te l i) (te m i)" have tlm_bound: "?tlm \ te q (i+1)" proof- from corr_l corr_q_tq1 beta_bound1 have "te l i \ te q (i+1)" by (simp add: le_diff_eq) moreover from corr_m corr_q_tq1 beta_bound1 have "te m i \ te q (i+1)" by (simp add: le_diff_eq) ultimately show ?thesis by simp qed - - from tlm_bound corr_l corr_m correct_closed + + from tlm_bound corr_l corr_m correct_closed have corr_tlm: "correct l ?tlm \ correct m ?tlm" by blast have "\IC l i ?tlm - IC m i ?tlm\ \ \S" proof- from ie1 ie2 ie3 four_one have "okmaxsync i \S" by simp from this corr_tlm show ?thesis by(simp add: okmaxsync_def) qed from this corr_l corr_m corr_q_tq1 theta_bound have "\\ q (i+1) l - \ q (i+1) m\ \ ?X" by simp } thus ?thesis by blast qed hence readOK: "okRead1 (\ q (i+1)) ?X ?ppred" by(simp add: okRead1_def) let ?E3 = "cfn q (\ q (i+1)) - \ q (i+1) p" let ?E4 = "\ q (i+1) p - IC p i (te q (i+1))" have "\?E2\ = \?E3 + ?E4\" by (simp add: abs_if) hence Eq8: "\?E2\ \ \?E3\ + \?E4\" by (simp add: abs_if) - + from correct_count have ppredOK: "np - maxfaults \ count ?ppred np" by simp from readOK ppredOK corr_p_tq1 corr_q_tq1 acc_prsv have "\?E3\ \ \ ?X" by blast from this Eq8 have Eq9: "\?E2\ \ \ ?X + \?E4\" by simp from corr_p_tq1 corr_q_tq1 readerror have "\?E4\ \ \" by simp - + from this Eq9 have Eq10: "\?E2\ \ \ ?X + \" by simp - - from this VC_split IC_eq_cfn - have almost_right: - "\VC p t - VC q t\ \ - \ ?X + \ + 2*\*(t - te q (i+1))" + + from this VC_split IC_eq_cfn + have almost_right: + "\VC p t - VC q t\ \ + \ ?X + \ + 2*\*(t - te q (i+1))" by simp have "t - te q (i+1) \ \" proof (rule ccontr) assume "\ (t - te q (i+1) \ \)" hence "te q (i+1) + \ \ t" by simp from this corr_p corr_q rts2a have "te p (i+1) \ t" by auto hence "\ (t < te p (i+1))" by simp from this tp_bound2 show False by simp qed - from this constants_ax - have "\ ?X + \ + 2*\*(t - te q (i+1)) + from this constants_ax + have "\ ?X + \ + 2*\*(t - te q (i+1)) \ \ ?X + \ + 2*\*\" by (simp) - from this almost_right - have "\VC p t - VC q t\ \ \ ?X + \ + 2*\*\" + from this almost_right + have "\VC p t - VC q t\ \ \ ?X + \ + 2*\*\" by arith from this ie5 show ?thesis by (simp add: \3_def) qed text\Theorem 4.2 in Shankar's paper.\ -theorem four_two: +theorem four_two: assumes ie1: "\ \ rmin" and ie2: "\ \ \S" and ie3: "\1 \S \ \S" and ie4: "\2 \S \ \" and ie5: "\3 \S \ \" shows "okClocks p q i" proof (induct i) show "okClocks p q 0" proof- { fix t :: time assume t_bound1: "0 \ t" assume t_bound2: "t < max (te p 0) (te q 0)" assume corr_p: "correct p t" assume corr_q: "correct q t" from t_bound2 synch0 have "t < 0" by(simp add: max_def) from this t_bound1 have False by simp hence "\VC p t - VC q t\ \ \" by simp } thus ?thesis by (simp add: okClocks_def) qed next fix i::nat assume ind_hyp: "okClocks p q i" show "okClocks p q (Suc i)" proof - { fix t :: time assume t_bound1: "0 \ t" assume t_bound2: "t < max (te p (i+1)) (te q (i+1))" assume corr_p: "correct p t" assume corr_q: "correct q t" - + let ?tpq1 = "max (te p i) (te q i)" let ?tpq2 = "max (te p (i+1)) (te q (i+1))" have "\VC p t - VC q t\ \ \" proof cases assume tpq_bound: "?tpq1 < ?tpq2" - show ?thesis + show ?thesis proof cases assume "t < ?tpq1" from t_bound1 this corr_p corr_q ind_hyp show ?thesis by(simp add: okClocks_def) next assume "\ (t < ?tpq1)" hence tpq_le_t: "?tpq1 \ t" by arith - show ?thesis + show ?thesis proof cases assume A: "te q (i+1) \ te p (i+1)" - from this tpq_le_t tpq_bound ie1 ie2 ie3 ie4 ie5 - ind_hyp t_bound1 t_bound2 + from this tpq_le_t tpq_bound ie1 ie2 ie3 ie4 ie5 + ind_hyp t_bound1 t_bound2 corr_p corr_q tpq_bound four_two_ind show ?thesis by(simp) next assume "\ (te q (i+1) \ te p (i+1))" hence B: "te p (i+1) \ te q (i+1)" by simp - - from ind_hyp okClocks_sym have ind_hyp1: "okClocks q p i" + + from ind_hyp okClocks_sym have ind_hyp1: "okClocks q p i" by blast have maxsym1: "max (te p (i+1)) (te q (i+1)) = max (te q (i+1)) (te p (i+1))" by (simp add: max_def) - have maxsym2: "max (te p i) (te q i) = max (te q i) (te p i)" + have maxsym2: "max (te p i) (te q i) = max (te q i) (te p i)" by (simp add: max_def) - from maxsym1 t_bound2 + from maxsym1 t_bound2 have t_bound21: "t < max (te q (i+1)) (te p (i+1))" by simp - - from maxsym1 maxsym2 tpq_bound + + from maxsym1 maxsym2 tpq_bound have tpq_bound1: "max (te q i) (te p i) < max (te q (i+1)) (te p (i+1))" by simp - from maxsym2 tpq_le_t + from maxsym2 tpq_le_t have tpq_le_t1: "max (te q i) (te p i) \ t" by simp - from B tpq_le_t1 tpq_bound1 ie1 ie2 ie3 ie4 ie5 - ind_hyp1 t_bound1 t_bound21 + from B tpq_le_t1 tpq_bound1 ie1 ie2 ie3 ie4 ie5 + ind_hyp1 t_bound1 t_bound21 corr_p corr_q tpq_bound four_two_ind have "\VC q t - VC p t\ \ \" by(simp) thus ?thesis by (simp add: abs_minus_commute) qed qed next assume "\ (?tpq1 < ?tpq2)" hence "?tpq2 \ ?tpq1" by arith from t_bound2 this have "t < ?tpq1" by arith from t_bound1 this corr_p corr_q ind_hyp show ?thesis by(simp add: okClocks_def) qed } thus ?thesis by (simp add: okClocks_def) qed qed - + text\The main theorem: all correct clocks are synchronized within the bound delta.\ -theorem agreement: +theorem agreement: assumes ie1: "\ \ rmin" and ie2: "\ \ \S" and ie3: "\1 \S \ \S" and ie4: "\2 \S \ \" and ie5: "\3 \S \ \" and ie6: "0 \ t" and cpq: "correct p t \ correct q t" shows "\VC p t - VC q t\ \ \" proof- from ie6 cpq event_bound have "\ i :: nat. t < max (te p i) (te q i)" by simp from this obtain i :: nat where t_bound: "t < max (te p i) (te q i)" .. - + from t_bound ie1 ie2 ie3 ie4 ie5 four_two have "okClocks p q i" by simp - from ie6 this t_bound cpq show ?thesis + from ie6 this t_bound cpq show ?thesis by (simp add: okClocks_def) qed end diff --git a/thys/Girth_Chromatic/Girth_Chromatic.thy b/thys/Girth_Chromatic/Girth_Chromatic.thy --- a/thys/Girth_Chromatic/Girth_Chromatic.thy +++ b/thys/Girth_Chromatic/Girth_Chromatic.thy @@ -1,737 +1,737 @@ theory Girth_Chromatic imports Ugraphs Girth_Chromatic_Misc "HOL-Probability.Probability" "HOL-Decision_Procs.Approximation" begin section \Probability Space on Sets of Edges\ definition cylinder :: "'a set \ 'a set \ 'a set \ 'a set set" where "cylinder S A B = {T \ Pow S. A \ T \ B \ T = {}}" lemma full_sum: fixes p :: real assumes "finite S" shows "(\A\Pow S. p^card A * (1 - p)^card (S - A)) = 1" using assms proof induct case (insert s S) have "inj_on (insert s) (Pow S)" and "\x. S - insert s x = S - x" and "Pow S \ insert s ` Pow S = {}" and "\x. x \ Pow S \ card (insert s S - x) = Suc (card (S - x))" using insert(1-2) by (auto simp: insert_Diff_if intro!: inj_onI) moreover have "\x. x \ S \ card (insert s x) = Suc (card x)" using insert(1-2) by (subst card.insert) (auto dest: finite_subset) ultimately show ?case by (simp add: sum.reindex sum_distrib_left[symmetric] ac_simps insert.hyps sum.union_disjoint Pow_insert) qed simp text \Definition of the probability space on edges:\ locale edge_space = fixes n :: nat and p :: real assumes p_prob: "0 \ p" "p \ 1" begin definition S_verts :: "nat set" where "S_verts \ {1..n}" definition S_edges :: "uedge set" where "S_edges = all_edges S_verts" definition edge_ugraph :: "uedge set \ ugraph" where "edge_ugraph es \ (S_verts, es \ S_edges)" definition "P = point_measure (Pow S_edges) (\s. p^card s * (1 - p)^card (S_edges - s))" lemma finite_verts[intro!]: "finite S_verts" by (auto simp: S_verts_def) lemma finite_edges[intro!]: "finite S_edges" by (auto simp: S_edges_def all_edges_def finite_verts) lemma finite_graph[intro!]: "finite (uverts (edge_ugraph es))" unfolding edge_ugraph_def by auto lemma uverts_edge_ugraph[simp]: "uverts (edge_ugraph es) = S_verts" by (simp add: edge_ugraph_def) lemma uedges_edge_ugraph[simp]: "uedges (edge_ugraph es) = es \ S_edges" unfolding edge_ugraph_def by simp lemma space_eq: "space P = Pow S_edges" by (simp add: P_def space_point_measure) lemma sets_eq: "sets P = Pow (Pow S_edges)" by (simp add: P_def sets_point_measure) lemma emeasure_eq: "emeasure P A = (if A \ Pow S_edges then (\edges\A. p^card edges * (1 - p)^card (S_edges - edges)) else 0)" using finite_edges p_prob by (simp add: P_def space_point_measure emeasure_point_measure_finite sets_point_measure emeasure_notin_sets) lemma integrable_P[intro, simp]: "integrable P (f::_ \ real)" using finite_edges by (simp add: integrable_point_measure_finite P_def) lemma borel_measurable_P[measurable]: "f \ borel_measurable P" unfolding P_def by simp lemma prob_space_P: "prob_space P" proof show "emeasure P (space P) = 1" \ \Sum of probabilities equals 1\ using finite_edges by (simp add: emeasure_eq full_sum one_ereal_def space_eq) qed end sublocale edge_space \ prob_space P by (rule prob_space_P) context edge_space begin lemma prob_eq: "prob A = (if A \ Pow S_edges then (\edges\A. p^card edges * (1 - p)^card (S_edges - edges)) else 0)" using emeasure_eq[of A] p_prob unfolding emeasure_eq_measure by (simp add: sum_nonneg) lemma integral_finite_singleton: "integral\<^sup>L P f = (\x\Pow S_edges. f x * measure P {x})" using p_prob prob_eq unfolding P_def by (subst lebesgue_integral_point_measure_finite) (auto intro!: sum.cong) text \Probability of cylinder sets:\ lemma cylinder_prob: assumes "A \ S_edges" "B \ S_edges" "A \ B = {}" shows "prob (cylinder S_edges A B) = p ^ (card A) * (1 - p) ^ (card B)" (is "_ = ?pp A B") proof - have "Pow S_edges \ cylinder S_edges A B = cylinder S_edges A B" "\x. x \ cylinder S_edges A B \ A \ x = x" "\x. x \ cylinder S_edges A B \ finite x" "\x. x \ cylinder S_edges A B \ B \ (S_edges - B - x) = {}" "\x. x \ cylinder S_edges A B \ B \ (S_edges - B - x) = S_edges - x" "finite A" "finite B" using assms by (auto simp add: cylinder_def intro: finite_subset) then have "(\T\cylinder S_edges A B. ?pp T (S_edges - T)) = (\T \ cylinder S_edges A B. p^(card A + card (T - A)) * (1 - p)^(card B + card ((S_edges - B) - T)))" using finite_edges by (simp add: card_Un_Int) also have "\ = ?pp A B * (\T\cylinder S_edges A B. ?pp (T - A) (S_edges - B - T))" by (simp add: power_add sum_distrib_left ac_simps) also have "\ = ?pp A B" proof - have "\T. T \ cylinder S_edges A B \ S_edges - B - T = (S_edges - A) - B - (T - A)" "Pow (S_edges - A - B) = (\x. x - A) ` cylinder S_edges A B" "inj_on (\x. x - A) (cylinder S_edges A B)" "finite (S_edges - A - B)" using assms by (auto simp: cylinder_def intro!: inj_onI) with full_sum[of "S_edges - A - B"] show ?thesis by (simp add: sum.reindex) qed finally show ?thesis by (auto simp add: prob_eq cylinder_def) qed lemma Markov_inequality: fixes a :: real and X :: "uedge set \ real" assumes "0 < c" "\x. 0 \ f x" shows "prob {x \ space P. c \ f x} \ (\x. f x \ P) / c" proof - from assms have "(\\<^sup>+ x. ennreal (f x) \P) = (\x. f x \P)" by (intro nn_integral_eq_integral) auto with assms show ?thesis using nn_integral_Markov_inequality[of f P "space P" "1 / c"] by (simp cong: nn_integral_cong add: emeasure_eq_measure ennreal_mult[symmetric]) qed end subsection \Graph Probabilities outside of @{term Edge_Space} locale\ text \ These abbreviations allow a compact expression of probabilities about random graphs outside of the @{term Edge_Space} locale. We also transfer a few of the lemmas we need from the locale into the toplevel theory. \ abbreviation MGn :: "(nat \ real) \ nat \ (uedge set) measure" where "MGn p n \ (edge_space.P n (p n))" abbreviation probGn :: "(nat \ real) \ nat \ (uedge set \ bool) \ real" where "probGn p n P \ measure (MGn p n) {es \ space (MGn p n). P es}" lemma probGn_le: assumes p_prob: "0 < p n" "p n < 1" assumes sub: "\n es. es \ space (MGn p n) \ P n es \ Q n es" shows "probGn p n (P n) \ probGn p n (Q n)" proof - from p_prob interpret E: edge_space n "p n" by unfold_locales auto show ?thesis by (auto intro!: E.finite_measure_mono sub simp: E.space_eq E.sets_eq) qed section \Short cycles\ definition short_cycles :: "ugraph \ nat \ uwalk set" where "short_cycles G k \ {p \ ucycles G. uwalk_length p \ k}" text \obtains a vertex in a short cycle:\ definition choose_v :: "ugraph \ nat \ uvert" where "choose_v G k \ SOME u. \p. p \ short_cycles G k \ u \ set p" partial_function (tailrec) kill_short :: "ugraph \ nat \ ugraph" where "kill_short G k = (if short_cycles G k = {} then G else (kill_short (G -- (choose_v G k)) k))" lemma ksc_simps[simp]: "short_cycles G k = {} \ kill_short G k = G" "short_cycles G k \ {} \ kill_short G k = kill_short (G -- (choose_v G k)) k" by (auto simp: kill_short.simps) lemma assumes "short_cycles G k \ {}" shows choose_v__in_uverts: "choose_v G k \ uverts G" (is ?t1) and choose_v__in_short: "\p. p \ short_cycles G k \ choose_v G k \ set p" (is ?t2) proof - from assms obtain p where "p \ ucycles G" "uwalk_length p \ k" unfolding short_cycles_def by auto moreover then obtain u where "u \ set p" unfolding ucycles_def by (cases p) (auto simp: uwalk_length_conv) ultimately have "\u p. p \ short_cycles G k \ u \ set p" by (auto simp: short_cycles_def) then show ?t2 by (auto simp: choose_v_def intro!: someI_ex) then show ?t1 by (auto simp: short_cycles_def ucycles_def uwalks_def) qed lemma kill_step_smaller: assumes "short_cycles G k \ {}" shows "short_cycles (G -- (choose_v G k)) k \ short_cycles G k" proof - let ?cv = "choose_v G k" from assms obtain p where "p \ short_cycles G k" "?cv \ set p" by atomize_elim (rule choose_v__in_short) have "short_cycles (G -- ?cv) k \ short_cycles G k" proof fix p assume "p \ short_cycles (G -- ?cv) k" then show "p \ short_cycles G k" unfolding short_cycles_def ucycles_def uwalks_def using edges_Gu[of G ?cv] by (auto simp: verts_Gu) qed moreover have "p \ short_cycles (G -- ?cv) k" using \?cv \ set p\ by (auto simp: short_cycles_def ucycles_def uwalks_def verts_Gu) ultimately show ?thesis using \p \ short_cycles G k\ by auto qed text \Induction rule for @{term kill_short}:\ lemma kill_short_induct[consumes 1, case_names empty kill_vert]: assumes fin: "finite (uverts G)" assumes a_empty: "\G. short_cycles G k = {} \ P G k" assumes a_kill: "\G. finite (short_cycles G k) \ short_cycles G k \ {} \ P (G -- (choose_v G k)) k \ P G k" shows "P G k" proof - have "finite (short_cycles G k)" using finite_ucycles[OF fin] by (auto simp: short_cycles_def) then show ?thesis by (induct "short_cycles G k" arbitrary: G rule: finite_psubset_induct) (metis kill_step_smaller a_kill a_empty) qed text \Large Girth (after @{term kill_short}):\ lemma kill_short_large_girth: assumes "finite (uverts G)" shows "k < girth (kill_short G k)" using assms proof (induct G k rule: kill_short_induct) case (empty G) then have "\p. p \ ucycles G \ k < enat (uwalk_length p)" by (auto simp: short_cycles_def) with empty show ?case by (auto simp: girth_def intro: enat_less_INF_I) qed simp text \Order of graph (after @{term kill_short}):\ lemma kill_short_order_of_graph: assumes "finite (uverts G)" shows "card (uverts G) - card (short_cycles G k) \ card (uverts (kill_short G k))" using assms assms proof (induct G k rule: kill_short_induct) case (kill_vert G) let ?oG = "G -- (choose_v G k)" have "finite (uverts ?oG)" using kill_vert by (auto simp: remove_vertex_def) moreover have "uverts (kill_short G k) = uverts (kill_short ?oG k)" using kill_vert by simp moreover have "card (uverts G) = Suc (card (uverts ?oG))" using choose_v__in_uverts kill_vert by (simp add: remove_vertex_def card_Suc_Diff1 del: card_Diff_insert) moreover have "card (short_cycles ?oG k) < card (short_cycles G k)" by (intro psubset_card_mono kill_vert.hyps kill_step_smaller) ultimately show ?case using kill_vert.hyps by presburger qed simp text \Independence number (after @{term kill_short}):\ lemma kill_short_\: assumes "finite (uverts G)" shows "\ (kill_short G k) \ \ G" using assms proof (induct G k rule: kill_short_induct) case (kill_vert G) note kill_vert(3) also have "\ (G -- (choose_v G k)) \ \ G" by (rule \_remove_le) finally show ?case using kill_vert by simp qed simp text \Wellformedness (after @{term kill_short}):\ lemma kill_short_uwellformed: assumes "finite (uverts G)" "uwellformed G" shows "uwellformed (kill_short G k)" using assms proof (induct G k rule: kill_short_induct) case (kill_vert G) from kill_vert.prems have "uwellformed (G -- (choose_v G k))" by (auto simp: uwellformed_def remove_vertex_def) with kill_vert.hyps show ?case by simp qed simp section \The Chromatic-Girth Theorem\ text \Probability of Independent Edges:\ lemma (in edge_space) random_prob_independent: assumes "n \ k" "k \ 2" shows "prob {es \ space P. k \ \ (edge_ugraph es)} \ (n choose k)*(1-p)^(k choose 2)" proof - let "?k_sets" = "{vs. vs \ S_verts \ card vs = k}" { fix vs assume A: "vs \ ?k_sets" then have B: "all_edges vs \ S_edges" unfolding all_edges_def S_edges_def by blast have "{es \ space P. vs \ independent_sets (edge_ugraph es)} = cylinder S_edges {} (all_edges vs)" (is "?L = _") using A by (auto simp: independent_sets_def edge_ugraph_def space_eq cylinder_def) then have "prob ?L = (1-p)^(k choose 2)" using A B finite by (auto simp: cylinder_prob card_all_edges dest: finite_subset) } note prob_k_indep = this \ \probability that a fixed set of k vertices is independent in a random graph\ have "{es \ space P. k \ card ` independent_sets (edge_ugraph es)} = (\vs \ ?k_sets. {es \ space P. vs \ independent_sets (edge_ugraph es)})" (is "?L = ?R") unfolding image_def space_eq independent_sets_def by auto then have "prob ?L \ (\vs \ ?k_sets. prob {es \ space P. vs \ independent_sets (edge_ugraph es)})" by (auto intro!: finite_measure_subadditive_finite simp: space_eq sets_eq) also have "\ = (n choose k)*((1 - p) ^ (k choose 2))" by (simp add: prob_k_indep S_verts_def n_subsets) finally show ?thesis using \k \ 2\ by (simp add: le_\_iff) qed text \Almost never many independent edges:\ lemma almost_never_le_\: fixes k :: nat and p :: "nat \ real" assumes p_prob: "\\<^sup>\ n. 0 < p n \ p n < 1" assumes [arith]: "k > 0" assumes N_prop: "\\<^sup>\ n. (6 * k * ln n)/n \ p n" shows "(\n. probGn p n (\es. 1/2*n/k \ \ (edge_space.edge_ugraph n es))) \ 0" (is "(\n. ?prob_fun n) \ 0") proof - let "?prob_fun_raw n" = "probGn p n (\es. nat(ceiling (1/2*n/k)) \ \ (edge_space.edge_ugraph n es))" define r where "r n = 1 / 2 * n / k" for n :: nat let ?nr = "\n. nat(ceiling (r n))" have r_pos: "\n. 0 < n \ 0 < r n " by (auto simp: r_def field_simps) have nr_bounds: "\\<^sup>\ n. 2 \ ?nr n \ ?nr n \ n" by (intro eventually_sequentiallyI[of "4 * k"]) (simp add: r_def nat_ceiling_le_eq le_natceiling_iff field_simps) from nr_bounds p_prob have ev_prob_fun_raw_le: "\\<^sup>\ n. probGn p n (\es. ?nr n\ \ (edge_space.edge_ugraph n es)) \ (n * exp (- p n * (real (?nr n) - 1) / 2)) powr ?nr n" (is "\\<^sup>\ n. ?prob_fun_raw_le n") proof (rule eventually_elim2) fix n :: nat assume A: "2 \ ?nr n \ ?nr n \ n" "0 < p n \p n < 1" then interpret pG: edge_space n "p n" by unfold_locales auto have r: "real (?nr n - Suc 0) = real (?nr n) - Suc 0" using A by auto have [simp]: "n>0" using A by linarith have "probGn p n (\es. ?nr n \ \ (edge_space.edge_ugraph n es)) \ (n choose ?nr n) * (1 - p n)^(?nr n choose 2)" using A by (auto intro: pG.random_prob_independent) also have "\ \ n powr ?nr n * (1 - p n) powr (?nr n choose 2)" using A by (simp add: powr_realpow of_nat_power [symmetric] binomial_le_pow del: of_nat_power) also have "\ = n powr ?nr n * (1 - p n) powr (?nr n * (?nr n - 1) / 2)" by (cases "even (?nr n - 1)") (auto simp add: n_choose_2_nat real_of_nat_div) also have "\ = n powr ?nr n * ((1 - p n) powr ((?nr n - 1) / 2)) powr ?nr n" by (auto simp add: powr_powr r ac_simps) also have "\ \ (n * exp (- p n * (?nr n - 1) / 2)) powr ?nr n" proof - have "(1 - p n) powr ((?nr n - 1) / 2) \ exp (- p n) powr ((?nr n - 1) / 2)" using A by (auto simp: powr_mono2 diff_conv_add_uminus simp del: add_uminus_conv_diff) also have "\ = exp (- p n * (?nr n - 1) / 2)" by (auto simp: powr_def) finally show ?thesis using A by (auto simp: powr_mono2 powr_mult) qed finally show "probGn p n (\es. ?nr n \ \ (edge_space.edge_ugraph n es)) \ (n * exp (- p n * (real (?nr n) - 1) / 2)) powr ?nr n" using A r by simp qed from p_prob N_prop have ev_expr_bound: "\\<^sup>\ n. n * exp (-p n * (real (?nr n) - 1) / 2) \ (exp 1 / n) powr (1 / 2)" proof (elim eventually_rev_mp, intro eventually_sequentiallyI conjI impI) fix n assume n_bound[arith]: "2 \ n" and p_bound: "0 < p n \ p n < 1" "(6 * k * ln n) / n \ p n" have r_bound: "r n \ ?nr n" by (rule real_nat_ceiling_ge) have "n * exp (-p n * (real (?nr n)- 1) / 2) \ n * exp (- 3 / 2 * ln n + p n / 2)" proof - have "0 < ln n" using "n_bound" by auto then have "(3 / 2) * ln n \ ((6 * k * ln n) / n) * (?nr n / 2)" using r_bound le_of_int_ceiling[of "n/2*k"] by (simp add: r_def field_simps del: le_of_int_ceiling) also have "\ \ p n * (?nr n / 2)" using n_bound p_bound r_bound r_pos[of n] by (auto simp: field_simps) finally show ?thesis using r_bound by (auto simp: field_simps) qed also have "\ \ n * n powr (- 3 / 2) * exp 1 powr (1 / 2)" using p_bound by (simp add: powr_def exp_add [symmetric]) also have "\ \ n powr (-1 / 2) * exp 1 powr (1 / 2)" by (simp add: powr_mult_base) also have "\ = (exp 1 / n) powr (1/2)" by (simp add: powr_divide powr_minus_divide) finally show "n * exp (- p n * (real (?nr n) - 1) / 2) \ (exp 1 / n) powr (1 / 2)" . qed have ceil_bound: "\G n. 1/2*n/k \ \ G \ nat(ceiling (1/2*n/k)) \ \ G" by (case_tac "\ G") (auto simp: nat_ceiling_le_eq) show ?thesis proof (unfold ceil_bound, rule real_tendsto_sandwich) show "(\n. 0) \ 0" "(\n. (exp 1 / n) powr (1 / 2)) \ 0" "\\<^sup>\ n. 0 \ ?prob_fun_raw n" using p_prob by (auto intro: measure_nonneg LIMSEQ_inv_powr elim: eventually_mono) next from nr_bounds ev_expr_bound ev_prob_fun_raw_le show "\\<^sup>\ n. ?prob_fun_raw n \ (exp 1 / n) powr (1 / 2)" proof (elim eventually_rev_mp, intro eventually_sequentiallyI impI conjI) fix n assume A: "3 \ n" and nr_bounds: "2 \ ?nr n \ ?nr n \ n" and prob_fun_raw_le: "?prob_fun_raw_le n" and expr_bound: "n * exp (- p n * (real (nat(ceiling (r n))) - 1) / 2) \ (exp 1 / n) powr (1 / 2)" have "exp 1 < (3 :: real)" by (approximation 6) then have "(exp 1 / n) powr (1 / 2) \ 1 powr (1 / 2)" using A by (intro powr_mono2) (auto simp: field_simps) then have ep_bound: "(exp 1 / n) powr (1 / 2) \ 1" by simp have "?prob_fun_raw n \ (n * exp (- p n * (real (?nr n) - 1) / 2)) powr (?nr n)" using prob_fun_raw_le by (simp add: r_def) also have "\ \ ((exp 1 / n) powr (1 / 2)) powr ?nr n" using expr_bound A by (auto simp: powr_mono2) also have "\ \ ((exp 1 / n) powr (1 / 2))" using nr_bounds ep_bound A by (auto simp: powr_le_one_le) finally show "?prob_fun_raw n \ (exp 1 / n) powr (1 / 2)" . qed qed qed text \Mean number of k-cycles in a graph. (Or rather of paths describing a circle of length @{term k}):\ lemma (in edge_space) mean_k_cycles: assumes "3 \ k" "k < n" shows "(\es. card {c \ ucycles (edge_ugraph es). uwalk_length c = k} \ P) = of_nat (fact n div fact (n - k)) * p ^ k" proof - let ?k_cycle = "\es c k. c \ ucycles (edge_ugraph es) \ uwalk_length c = k" define C where "C k = {c. ?k_cycle S_edges c k}" for k \ \@{term "C k"} is the set of all possible cycles of size @{term k} in @{term "edge_ugraph S_edges"}\ define XG where "XG es = {c. ?k_cycle es c k}" for es \ \@{term "XG es"} is the set of cycles contained in a @{term "edge_ugraph es"}\ define XC where "XC c = {es \ space P. ?k_cycle es c k}" for c \ \"@{term "XC c"} is the set of graphs (edge sets) containing a cycle c"\ then have XC_in_sets: "\c. XC c \ sets P" and XC_cyl: "\c. c \ C k \ XC c = cylinder S_edges (set (uwalk_edges c)) {}" by (auto simp: ucycles_def space_eq uwalks_def C_def cylinder_def sets_eq) have "(\es. card {c \ ucycles (edge_ugraph es). uwalk_length c = k} \ P) = (\x\space P. card (XG x) * prob {x})" by (simp add: XG_def integral_finite_singleton space_eq) also have "\ = (\c\C k. prob (cylinder S_edges (set (uwalk_edges c)) {}))" proof - have XG_Int_C: "\s. s \ space P \ C k \ XG s = XG s" unfolding XG_def C_def ucycles_def uwalks_def edge_ugraph_def by auto have fin_XC: "\k. finite (XC k)" and fin_C: "finite (C k)" unfolding C_def XC_def by (auto simp: finite_edges space_eq intro!: finite_ucycles) have "(\x\space P. card (XG x) * prob {x}) = (\x\space P. (\c \ XG x. prob {x}))" by simp also have "\ = (\x\space P. (\c \ C k. if c \ XG x then prob {x} else 0))" using fin_C by (simp add: sum.If_cases) (simp add: XG_Int_C) also have "\ = (\c \ C k. (\ x \ space P \ XC c. prob {x}))" using finite_edges by (subst sum.swap) (simp add: sum.inter_restrict XG_def XC_def space_eq) also have "\ = (\c \ C k. prob (XC c))" using fin_XC XC_in_sets by (auto simp add: prob_eq sets_eq space_eq intro!: sum.cong) finally show ?thesis by (simp add: XC_cyl) qed also have "\ = (\c\C k. p ^ k)" proof - have "\x. x \ C k \ card (set (uwalk_edges x)) = uwalk_length x" by (auto simp: uwalk_length_def C_def ucycles_distinct_edges intro: distinct_card) then show ?thesis by (auto simp: C_def ucycles_def uwalks_def cylinder_prob) qed also have "\ = of_nat (fact n div fact (n - k)) * p ^ k" proof - have inj_last_Cons: "\A. inj_on (\es. last es # es) A" by (rule inj_onI) simp { fix xs A assume "3 \ length xs - Suc 0" "hd xs = last xs" then have "xs \ (\xs. last xs # xs) ` A \ tl xs \ A" by (cases xs) (auto simp: inj_image_mem_iff[OF inj_last_Cons] split: if_split_asm) } note image_mem_iff_inst = this { fix xs have "xs \ uwalks (edge_ugraph S_edges) \ set (tl xs) \ S_verts" unfolding uwalks_def by (induct xs) auto } moreover { fix xs assume "set xs \ S_verts" "2 \ length xs" "distinct xs" then have "(last xs # xs) \ uwalks (edge_ugraph S_edges)" proof (induct xs rule: uwalk_edges.induct) case (3 x y ys) have S_edges_memI: "\x y. x \ S_verts \ y \ S_verts \ x \ y \ {x, y} \ S_edges" unfolding S_edges_def all_edges_def image_def by auto have "ys \ [] \ set ys \ S_verts \ last ys \ S_verts" by auto with 3 show ?case by (auto simp add: uwalks_def Suc_le_eq intro: S_edges_memI) qed simp_all} moreover note \3 \ k\ ultimately have "C k = (\xs. last xs # xs) ` {xs. length xs = k \ distinct xs \ set xs \ S_verts}" by (auto simp: C_def ucycles_def uwalk_length_conv image_mem_iff_inst) moreover have "card S_verts = n" by (simp add: S_verts_def) ultimately have "card (C k) = fact n div fact (n - k)" using \k < n\ by (simp add: card_image[OF inj_last_Cons] card_lists_distinct_length_eq' fact_div_fact) then show ?thesis by simp qed finally show ?thesis by simp qed text \Girth-Chromatic number theorem:\ theorem girth_chromatic: fixes l :: nat shows "\G. uwellformed G \ l < girth G \ l < chromatic_number G" proof - define k where "k = max 3 l" define \ where "\ = 1 / (2 * k)" define p where "p n = real n powr (\ - 1)" for n :: nat let ?ug = edge_space.edge_ugraph define short_count where "short_count g = card (short_cycles g k)" for g \ \This random variable differs from the one used in the proof of theorem 11.2.2, as we count the number of paths describing a circle, not the circles themselves\ from k_def have "3 \ k" "l \ k" by auto from \3 \ k\ have \_props: "0 < \" "\ < 1 / k" "\ < 1" by (auto simp: \_def field_simps) have ev_p: "\\<^sup>\ n. 0 < p n \ p n < 1" proof (rule eventually_sequentiallyI) fix n :: nat assume "2 \ n" with \\ < 1\ have "n powr (\ - 1) < 1" by (auto intro!: powr_less_one) then show "0 < p n \ p n < 1" using \2 \ n\ by (auto simp: p_def) qed then have prob_short_count_le: "\\<^sup>\ n. probGn p n (\es. (real n/2) \ short_count (?ug n es)) \ 2 * (k - 2) * n powr (\ * k - 1)" (is "\\<^sup>\ n. ?P n") proof (elim eventually_rev_mp, intro eventually_sequentiallyI impI) fix n :: nat assume A: "Suc k \ n" "0 < p n \ p n < 1" then interpret pG: edge_space n "p n" by unfold_locales auto have "1 \ n" using A by auto define mean_short_count where "mean_short_count = (\es. short_count (?ug n es) \ pG.P)" have mean_short_count_le: "mean_short_count \ (k - 2) * n powr (\ * k)" proof - have small_empty: "\es k. k \ 2 \ short_cycles (edge_space.edge_ugraph n es) k = {}" by (auto simp add: short_cycles_def ucycles_def) have short_count_conv: "\es. short_count (?ug n es) = (\i=3..k. real (card {c \ ucycles (?ug n es). uwalk_length c = i}))" proof (unfold short_count_def, induct k) case 0 with small_empty show ?case by auto next case (Suc k) show ?case proof (cases "Suc k \ 2") case True with small_empty show ?thesis by auto next case False have "{c \ ucycles (?ug n es). uwalk_length c \ Suc k} = {c \ ucycles (?ug n es). uwalk_length c \ k} \ {c \ ucycles (?ug n es). uwalk_length c = Suc k}" by auto moreover have "finite (uverts (edge_space.edge_ugraph n es))" by auto ultimately have "card {c \ ucycles (?ug n es). uwalk_length c \ Suc k} = card {c \ ucycles (?ug n es). uwalk_length c \ k} + card {c \ ucycles (?ug n es). uwalk_length c = Suc k}" using finite_ucycles by (subst card_Un_disjoint[symmetric]) auto then show ?thesis using Suc False unfolding short_cycles_def by (auto simp: not_le) qed qed have "mean_short_count = (\i=3..k. \es. card {c \ ucycles (?ug n es). uwalk_length c = i} \ pG.P)" unfolding mean_short_count_def short_count_conv by (subst Bochner_Integration.integral_sum) (auto intro: pG.integral_finite_singleton) also have "\ = (\i\{3..k}. of_nat (fact n div fact (n - i)) * p n ^ i)" using A by (simp add: pG.mean_k_cycles) also have "\ \ (\ i\{3..k}. n ^ i * p n ^ i)" apply (rule sum_mono) - by (meson A fact_div_fact_le_pow Suc_leD atLeastAtMost_iff of_nat_le_iff order_trans real_mult_le_cancel_iff1 zero_less_power) + by (meson A fact_div_fact_le_pow Suc_leD atLeastAtMost_iff of_nat_le_iff order_trans mult_le_cancel_iff1 zero_less_power) also have "... \ (\ i\{3..k}. n powr (\ * k))" using \1 \ n\ \0 < \\ A by (intro sum_mono) (auto simp: p_def field_simps powr_mult_base powr_powr powr_realpow[symmetric] powr_mult[symmetric] powr_add[symmetric]) finally show ?thesis by simp qed have "pG.prob {es \ space pG.P. n/2 \ short_count (?ug n es)} \ mean_short_count / (n/2)" unfolding mean_short_count_def using \1 \ n\ by (intro pG.Markov_inequality) (auto simp: short_count_def) also have "\ \ 2 * (k - 2) * n powr (\ * k - 1)" proof - have "mean_short_count / (n / 2) \ 2 * (k - 2) * (1 / n powr 1) * n powr (\ * k)" using mean_short_count_le \1 \ n\ by (simp add: field_simps) then show ?thesis by (simp add: powr_diff algebra_simps) qed finally show "?P n" . qed define pf_short_count pf_\ where "pf_short_count n = probGn p n (\es. n/2 \ short_count (?ug n es))" and "pf_\ n = probGn p n (\es. 1/2 * n/k \ \ (edge_space.edge_ugraph n es))" for n have ev_short_count_le: "\\<^sup>\ n. pf_short_count n < 1 / 2" proof - have "\ * k - 1 < 0" using \_props \3 \ k\ by (auto simp: field_simps) then have "(\n. 2 * (k - 2) * n powr (\ * k - 1)) \ 0" (is "?bound \ 0") by (intro tendsto_mult_right_zero LIMSEQ_neg_powr) then have "\\<^sup>\ n. dist (?bound n) 0 < 1 / 2" by (rule tendstoD) simp with prob_short_count_le show ?thesis by (rule eventually_elim2) (auto simp: dist_real_def pf_short_count_def) qed have lim_\: "pf_\ \ 0" proof - have "0 < k" using \3 \ k\ by simp have "\\<^sup>\ n. (6*k) * ln n / n \ p n \ (6*k) * ln n * n powr - \ \ 1" proof (rule eventually_sequentiallyI) fix n :: nat assume "1 \ n" then have "(6 * k) * ln n / n \ p n \ (6*k) * ln n * (n powr - 1) \ n powr (\ - 1)" by (subst powr_minus) (simp add: divide_inverse p_def) also have "\ \ (6*k) * ln n * ((n powr - 1) / (n powr (\ - 1))) \ n powr (\ - 1) / (n powr (\ - 1))" using \1 \ n\ by (auto simp: field_simps) also have "\ \ (6*k) * ln n * n powr - \ \ 1" by (simp add: powr_diff [symmetric] ) finally show "(6*k) * ln n / n \ p n \ (6*k) * ln n * n powr - \ \ 1" . qed then have "(\\<^sup>\ n. (6 * k) * ln n / real n \ p n) \ (\\<^sup>\ n. (6*k) * ln n * n powr - \ \ 1)" by (rule eventually_subst) also have "\\<^sup>\ n. (6*k) * ln n * n powr - \ \ 1" proof - { fix n :: nat assume "0 < n" have "ln (real n) \ n powr (\/2) / (\/2)" using \0 < n\ \0 < \\ by (intro ln_powr_bound) auto also have "\ \ 2/\ * n powr (\/2)" by (auto simp: field_simps) finally have "(6*k) * ln n * (n powr - \) \ (6*k) * (2/\ * n powr (\/2)) * (n powr - \)" using \0 < n\ \0 < k\ by (intro mult_right_mono mult_left_mono) auto also have "\ = 12*k/\ * n powr (-\/2)" unfolding divide_inverse by (auto simp: field_simps powr_minus[symmetric] powr_add[symmetric]) finally have "(6*k) * ln n * (n powr - \) \ 12*k/\ * n powr (-\/2)" . } then have "\\<^sup>\ n. (6*k) * ln n * (n powr - \) \ 12*k/\ * n powr (-\/2)" by (intro eventually_sequentiallyI[of 1]) auto also have "\\<^sup>\ n. 12*k/\ * n powr (-\/2) \ 1" proof - have "(\n. 12*k/\ * n powr (-\/2)) \ 0" using \0 < \\ by (intro tendsto_mult_right_zero LIMSEQ_neg_powr) auto then show ?thesis using \0 < \\ by - (drule tendstoD[where e=1], auto elim: eventually_mono) qed finally (eventually_le_le) show ?thesis . qed finally have "\\<^sup>\ n. real (6 * k) * ln (real n) / real n \ p n" . with ev_p \0 < k\ show ?thesis unfolding pf_\_def by (rule almost_never_le_\) qed from ev_short_count_le lim_\[THEN tendstoD, of "1/2"] ev_p have "\\<^sup>\ n. 0 < p n \ p n < 1 \ pf_short_count n < 1/2 \ pf_\ n < 1/2" by simp (elim eventually_rev_mp, auto simp: eventually_sequentially dist_real_def) then obtain n where "0 < p n" "p n < 1" and [arith]: "0 < n" and probs: "pf_short_count n < 1/2" "pf_\ n < 1/2" by (auto simp: eventually_sequentially) then interpret ES: edge_space n "(p n)" by unfold_locales auto have rest_compl: "\A P. A - {x\A. P x} = {x\A. \P x}" by blast from probs have "ES.prob ({es \ space ES.P. n/2 \ short_count (?ug n es)} \ {es \ space ES.P. 1/2 * n/k \ \ (?ug n es)}) \ pf_short_count n + pf_\ n" unfolding pf_short_count_def pf_\_def by (subst ES.finite_measure_subadditive) auto also have "\ < 1" using probs by auto finally have "0 < ES.prob (space ES.P - ({es \ space ES.P. n/2 \ short_count (?ug n es)} \ {es \ space ES.P. 1/2 * n/k \ \ (?ug n es)}))" (is "0 < ES.prob ?S") by (subst ES.prob_compl) auto also have "?S = {es \ space ES.P. short_count (?ug n es) < n/2 \ \ (?ug n es) < 1/2* n/k}" (is "\ = ?C") by (auto simp: not_less rest_compl) finally have "?C \ {}" by (intro notI) (simp only:, auto) then obtain es where es_props: "es \ space ES.P" "short_count (?ug n es) < n/2" "\ (?ug n es) < 1/2 * n/k" by auto \ \now we obtained a high colored graph (few independent nodes) with almost no short cycles\ define G where "G = ?ug n es" define H where "H = kill_short G k" have G_props: "uverts G = {1..n}" "finite (uverts G)" "short_count G < n/2" "\ G < 1/2 * n/k" unfolding G_def using es_props by (auto simp: ES.S_verts_def) have "uwellformed G" by (auto simp: G_def uwellformed_def all_edges_def ES.S_edges_def) with G_props have T1: "uwellformed H" unfolding H_def by (intro kill_short_uwellformed) have "enat l \ enat k" using \l \ k\ by simp also have "\ < girth H" using G_props by (auto simp: kill_short_large_girth H_def) finally have T2: "l < girth H" . have card_H: "n/2 \ card (uverts H)" using G_props es_props kill_short_order_of_graph[of G k] by (simp add: short_count_def H_def) then have uverts_H: "uverts H \ {}" "0 < card (uverts H)" by auto then have "0 < \ H" using zero_less_\ uverts_H by auto have \_HG: "\ H \ \ G" unfolding H_def G_def by (auto intro: kill_short_\) have "enat l \ ereal k" using \l \ k\ by auto also have "\ < (n/2) / \ G" using G_props \3 \ k\ by (cases "\ G") (auto simp: field_simps) also have "\ \ (n/2) / \ H" using \_HG \0 < \ H\ by (auto simp: ereal_of_enat_pushout intro!: ereal_divide_left_mono) also have "\ \ card (uverts H) / \ H" using card_H \0 < \ H\ by (auto intro!: ereal_divide_right_mono) also have "\ \ chromatic_number H" using uverts_H T1 by (intro chromatic_lb) auto finally have T3: "l < chromatic_number H" by (simp del: ereal_of_enat_simps) from T1 T2 T3 show ?thesis by fast qed end diff --git a/thys/Green/CircExample.thy b/thys/Green/CircExample.thy --- a/thys/Green/CircExample.thy +++ b/thys/Green/CircExample.thy @@ -1,794 +1,794 @@ section \The Circle Example\ theory CircExample imports Green SymmetricR2Shapes begin locale circle = R2 + fixes d::real assumes d_gt_0: "0 < d" begin definition circle_y where "circle_y t = sqrt (1/4 - t * t)" definition circle_cube where "circle_cube = (\(x,y). ((x - 1/2) * d, (2 * y - 1) * d * sqrt (1/4 - (x -1/2)*(x -1/2))))" lemma circle_cube_nice: shows "circle_cube = (\(x,y). (d * x_coord x, (2 * y - 1) * d * circle_y (x_coord x)))" by (auto simp add: circle_cube_def circle_y_def x_coord_def) definition rot_circle_cube where "rot_circle_cube = prod.swap \ (circle_cube) \ prod.swap" abbreviation "rot_y t1 t2 \ ((t1-1/2)/(2* circle_y (x_coord (rot_x t1 t2))) +1/2::real)" definition "x_coord_inv (x::real) = (1/2) + x" lemma x_coord_inv_1: "x_coord_inv (x_coord (x::real)) = x" by (auto simp add: x_coord_inv_def x_coord_def) lemma x_coord_inv_2: "x_coord (x_coord_inv (x::real)) = x" by (auto simp add: x_coord_inv_def x_coord_def) definition "circle_y_inv = circle_y" abbreviation "rot_x'' (x::real) (y::real) \ (x_coord_inv ((2 * y - 1) * circle_y (x_coord x)))" lemma circle_y_bounds: assumes "-1/2 \ (x::real) \ x \ 1/2" shows "0 \ circle_y x" "circle_y x \ 1/2" unfolding circle_y_def real_sqrt_ge_0_iff proof - show "0 \ 1/4 - x * x" using assms by (sos "(((A<0 * R<1) + ((A<=0 * (A<=1 * R<1)) * (R<1 * [1]^2))))") show "sqrt (1/4 - x * x) \ 1/2" apply (rule real_le_lsqrt) using assms apply(auto simp add: divide_simps algebra_simps) by (sos "(((A<0 * R<1) + ((A<=0 * (A<=1 * R<1)) * (R<1 * [1]^2))))") qed lemma circle_y_x_coord_bounds: assumes "0 \ (x::real)" "x \ 1" shows "0 \ circle_y (x_coord x) \ circle_y (x_coord x) \ 1/2" using circle_y_bounds[OF x_coord_bounds[OF assms]] by auto lemma rot_x_ivl: assumes "(0::real) \ x" "x \ 1" "0 \ y" "y \ 1" shows "0 \ rot_x'' x y \ rot_x'' x y \ 1" proof have "\a::real. 0 \ a \ a \ 1/2 \ 0 \ 1/2 + (2 * y - 1) * a" using assms by (sos "(((A<0 * R<1) + (((A<=4 * R<1) * (R<1/2 * [1]^2)) + (((A<=1 * (A<=5 * R<1)) * (R<1 * [1]^2)) + ((A<=0 * (A<=4 * R<1)) * (R<1 * [1]^2))))))") then show "0 \ rot_x'' x y" using assms circle_y_x_coord_bounds by(auto simp add: x_coord_inv_def) have "\a::real. 0 \ a \ a \ 1/2 \ 1/2 + (2 * y - 1) * a \ 1" using assms by (sos "(((A<0 * R<1) + (((A<=5 * R<1) * (R<1/2 * [1]^2)) + (((A<=1 * (A<=4 * R<1)) * (R<1 * [1]^2)) + ((A<=0 * (A<=5 * R<1)) * (R<1 * [1]^2))))))") then show "rot_x'' x y \ 1" using assms circle_y_x_coord_bounds by (auto simp add: x_coord_inv_def) qed abbreviation "rot_y'' (x::real) (y::real) \ (x_coord x)/(2* (circle_y (x_coord (rot_x'' x y)))) + 1/2" lemma rot_y_ivl: assumes "(0::real) \ x" "x \ 1" "0 \ y" "y \ 1" shows "0 \ rot_y'' x y \ rot_y'' x y \ 1" proof show "0 \ rot_y'' x y" proof(cases "(x_coord x) < 0") case True have i: "\a b::real. a < 0 \ 0 \ a + b \ (0 \ a/(2*(b)) + 1/2)" by(auto simp add: algebra_simps divide_simps) have *: "(1/2 - x) \ sqrt (x * x + (1/4 + (x * (y * 4) + x * (x * (y * (y * 4))))) - (x + (x * (x * (y * 4)) + x * (y * (y * 4)))))" apply (rule real_le_rsqrt) using assms apply (simp add: algebra_simps power2_eq_square mult_left_le_one_le) by (sos "(((A<0 * R<1) + ((A<=0 * (A<=1 * (A<=2 * (A<=3 * R<1)))) * (R<4 * [1]^2))))") have rw: "\x - x * x\ = x - x * x" using assms by (sos "(() & (((((A<0 * A<1) * R<1) + ((A<=0 * (A<=1 * (A<1 * R<1))) * (R<1 * [1]^2)))) & ((((A<0 * A<1) * R<1) + ((A<=0 * (A<=1 * (A<1 * R<1))) * (R<1 * [1]^2))))))") have "0 \ x_coord x + (circle_y (x_coord (rot_x'' x y)))" using * apply (auto simp add: x_coord_inv_2) by (auto simp add: circle_y_def algebra_simps rw x_coord_def) then show ?thesis using True i by blast next case False have i: "\a b::real. 0 \ a \ 0 \ b \ (0 \ a/(2*b) + 1/2)" by(auto simp add: algebra_simps divide_simps) have "0 \ circle_y (x_coord (x_coord_inv ((2 * y - 1) * circle_y (x_coord x))))" proof - have rw: "\x - x * x\ = x - x * x" using assms by (sos "(() & (((((A<0 * A<1) * R<1) + ((A<=0 * (A<=1 * (A<1 * R<1))) * (R<1 * [1]^2)))) & ((((A<0 * A<1) * R<1) + ((A<=0 * (A<=1 * (A<1 * R<1))) * (R<1 * [1]^2))))))") have "\x. 0 \ x \ x \ 1/2 \ -1/2\ (2 * y - 1) * x" using assms by (sos "(((A<0 * R<1) + (((A<=4 * R<1) * (R<1/2 * [1]^2)) + (((A<=1 * (A<=5 * R<1)) * (R<1 * [1]^2)) + ((A<=0 * (A<=4 * R<1)) * (R<1 * [1]^2))))))") then have "- 1/2 \ (2 * y - 1) * circle_y (x_coord x)" using circle_y_x_coord_bounds assms(1-2) by auto moreover have "\x. 0 \ x \ x \ 1/2 \ (2 * y - 1) * x \ 1/2 " using assms by (sos "(((A<0 * R<1) + (((A<=5 * R<1) * (R<1/2 * [1]^2)) + (((A<=1 * (A<=4 * R<1)) * (R<1 * [1]^2)) + ((A<=0 * (A<=5 * R<1)) * (R<1 * [1]^2))))))") then have "(2 * y - 1) * circle_y (x_coord x) \ 1/2" using circle_y_x_coord_bounds assms(1-2) by auto ultimately show "0 \ circle_y (x_coord (x_coord_inv ((2 * y - 1) * circle_y (x_coord x))))" by (simp add: circle_y_bounds(1) x_coord_inv_2) qed then show ?thesis using False by auto qed have i: "\a b::real. a < 0 \ 0 \ b \ (a/(2*(b)) + 1/2) \ 1" by(auto simp add: algebra_simps divide_simps) show "rot_y'' x y \ 1" proof(cases "(x_coord x) < 0") case True have i: "\a b::real. a < 0 \ 0 \ b \ (a/(2*(b)) + 1/2) \ 1" by(auto simp add: algebra_simps divide_simps) have "\x. 0 \ x \ x \ 1/2 \ -1/2\ (2 * y - 1) * x" using assms by (sos "(((A<0 * R<1) + (((A<=4 * R<1) * (R<1/2 * [1]^2)) + (((A<=1 * (A<=5 * R<1)) * (R<1 * [1]^2)) + ((A<=0 * (A<=4 * R<1)) * (R<1 * [1]^2))))))") then have "- 1/2 \ (2 * y - 1) * circle_y (x_coord x)" using circle_y_x_coord_bounds assms(1-2) by auto moreover have "\x. 0 \ x \ x \ 1/2 \ (2 * y - 1) * x \ 1/2 " using assms by (sos "(((A<0 * R<1) + (((A<=5 * R<1) * (R<1/2 * [1]^2)) + (((A<=1 * (A<=4 * R<1)) * (R<1 * [1]^2)) + ((A<=0 * (A<=5 * R<1)) * (R<1 * [1]^2))))))") then have "(2 * y - 1) * circle_y (x_coord x) \ 1/2" using circle_y_x_coord_bounds assms(1-2) by auto ultimately have "0 \ circle_y (x_coord (x_coord_inv ((2 * y - 1) * circle_y (x_coord x))))" by (simp add: circle_y_bounds(1) x_coord_inv_2) then show ?thesis by (simp add: True i) next case False have i: "\a b::real. 0 \ a \ a \ b \ (a/(2*b) + 1/2) \ 1" by(auto simp add: algebra_simps divide_simps) have "(x - 1/2) * (x - 1/2) \ (x * x + (1/4 + (x * (y * 4) + x * (x * (y * (y * 4))))) - (x + (x * (x * (y * 4)) + x * (y * (y * 4)))))" using assms False apply(auto simp add: x_coord_def) by (sos "(((A<0 * R<1) + (((A<=0 * (A<=1 * (A<=2 * R<1))) * (R<2 * [1]^2)) + ((A<=0 * (A<=1 * (A<=2 * (A<=3 * R<1)))) * (R<2 * [1]^2)))))") then have "sqrt ((x - 1/2) * (x - 1/2)) \ sqrt (x * x + (1/4 + (x * (y * 4) + x * (x * (y * (y * 4))))) - (x + (x * (x * (y * 4)) + x * (y * (y * 4)))))" using real_sqrt_le_mono by blast then have *: "(x - 1/2) \ sqrt (x * x + (1/4 + (x * (y * 4) + x * (x * (y * (y * 4))))) - (x + (x * (x * (y * 4)) + x * (y * (y * 4)))))" using assms False by(auto simp add: x_coord_def) have rw: "\x - x * x\ = x - x * x" using assms by (sos "(() & (((((A<0 * A<1) * R<1) + ((A<=0 * (A<=1 * (A<1 * R<1))) * (R<1 * [1]^2)))) & ((((A<0 * A<1) * R<1) + ((A<=0 * (A<=1 * (A<1 * R<1))) * (R<1 * [1]^2))))))") have "x_coord x \ circle_y (x_coord (x_coord_inv ((2 * y - 1) * circle_y (x_coord x))))" using * unfolding x_coord_inv_2 by (auto simp add: circle_y_def algebra_simps rw x_coord_def) then show ?thesis using False i by auto qed qed lemma circle_eq_rot_circle: assumes "0 \ x" "x \ 1" "0 \ y" "y \ 1" shows "(circle_cube (x, y)) = (rot_circle_cube (rot_y'' x y, rot_x'' x y))" proof have rw: "\1/4 - x_coord x * x_coord x\ = 1/4 - x_coord x * x_coord x" apply(rule abs_of_nonneg) using assms mult_left_le by (auto simp add: x_coord_def divide_simps algebra_simps) show "fst (circle_cube (x, y)) = fst (rot_circle_cube (rot_y'' x y, rot_x'' x y))" using assms d_gt_0 apply(simp add: circle_cube_nice rot_circle_cube_def x_coord_inv_2 circle_y_def algebra_simps rw) apply(auto simp add: x_coord_def algebra_simps) by (sos "(((((A<0 * A<1) * ((A<0 * A<1) * R<1)) + (([~4*d^2] * A=0) + (((A<=1 * (A<=2 * (A<=3 * R<1))) * (R<8 * [d]^2)) + ((A<=1 * (A<=2 * (A<=3 * (A<1 * R<1)))) * (R<8 * [d]^2)))))) & ((((A<0 * A<1) * ((A<0 * A<1) * R<1)) + (([~4*d^2] * A=0) + (((A<=0 * (A<=2 * (A<=3 * R<1))) * (R<8 * [d]^2)) + ((A<=0 * (A<=2 * (A<=3 * (A<1 * R<1)))) * (R<8 * [d]^2)))))))") (*Be patient: it takes a bit of time, still better than creeping its proof manually.*) show "snd (circle_cube (x, y)) = snd (rot_circle_cube (rot_y'' x y, rot_x'' x y))" using assms by(auto simp add: circle_cube_def rot_circle_cube_def x_coord_inv_def circle_y_def x_coord_def) qed lemma rot_circle_eq_circle: assumes "0 \ x" "x \ 1" "0 \ y" "y \ 1" shows "(rot_circle_cube (x, y)) = (circle_cube (rot_x'' y x, rot_y'' y x))" proof show "fst (rot_circle_cube (x, y)) = fst (circle_cube (rot_x'' y x, rot_y'' y x))" using assms by(auto simp add: circle_cube_def rot_circle_cube_def x_coord_inv_def circle_y_def x_coord_def) have rw: "\1/4 - x_coord y * x_coord y\ = 1/4 - x_coord y * x_coord y" apply(rule abs_of_nonneg) using assms mult_left_le by (auto simp add: x_coord_def divide_simps algebra_simps) show "snd (rot_circle_cube (x, y)) = snd (circle_cube (rot_x'' y x, rot_y'' y x))" using assms d_gt_0 apply(simp add: circle_cube_nice rot_circle_cube_def x_coord_inv_2 circle_y_def algebra_simps rw) apply(auto simp add: x_coord_def algebra_simps) by (sos "(((((A<0 * A<1) * ((A<0 * A<1) * R<1)) + (([~4*d^2] * A=0) + (((A<=0 * (A<=1 * (A<=3 * R<1))) * (R<8 * [d]^2)) + ((A<=0 * (A<=1 * (A<=3 * (A<1 * R<1)))) * (R<8 * [d]^2)))))) & ((((A<0 * A<1) * ((A<0 * A<1) * R<1)) + (([~4*d^2] * A=0) + (((A<=0 * (A<=1 * (A<=2 * R<1))) * (R<8 * [d]^2)) + ((A<=0 * (A<=1 * (A<=2 * (A<1 * R<1)))) * (R<8 * [d]^2)))))))") (*Be patient: it takes a bit of time, still better than creeping its proof manually.*) qed lemma rot_img_eq: assumes "0 < d" shows "(cubeImage (circle_cube )) = (cubeImage (rot_circle_cube))" apply(auto simp add: cubeImage_def image_def cbox_def real_pair_basis) by (meson rot_y_ivl rot_x_ivl assms circle_eq_rot_circle rot_circle_eq_circle)+ lemma rot_circle_div_circle: assumes "0 < (d::real)" shows "gen_division (cubeImage circle_cube) (cubeImage ` {rot_circle_cube})" using rot_img_eq[OF assms] by(auto simp add: gen_division_def) lemma circle_cube_boundary_valid: assumes "(k,\)\boundary circle_cube" shows "valid_path \" proof - have f01: "finite{0,1}" by simp show ?thesis using assms unfolding boundary_def horizontal_boundary_def vertical_boundary_def circle_cube_def valid_path_def piecewise_C1_differentiable_on_def by safe (rule derivative_intros continuous_intros f01 exI ballI conjI refl | force simp add: field_simps)+ qed lemma rot_circle_cube_boundary_valid: assumes "(k,\)\boundary rot_circle_cube" shows "valid_path \" using assms swap_valid_boundaries circle_cube_boundary_valid by (fastforce simp add: rot_circle_cube_def) lemma diff_divide_cancel: fixes z::real shows "z \ 0 \ (a * z - a * (b * z)) / z = (a - a * b)" by (auto simp: field_simps) lemma circle_cube_is_type_I: assumes "0 < d" shows "typeI_twoCube circle_cube" unfolding typeI_twoCube_def proof (intro exI conjI ballI) have f01: "finite{-d/2,d/2}" by simp show "-d/2 < d/2" using assms by simp show "(\x. d * sqrt (1/4 - (x/d) * (x/d))) piecewise_C1_differentiable_on {- d/2..d/2}" using assms unfolding piecewise_C1_differentiable_on_def apply (intro exI conjI) apply (rule ballI refl f01 derivative_intros continuous_intros | simp)+ apply (auto simp: field_simps) by sos show "(\x. -d * sqrt (1/4 - (x/d) * (x/d))) piecewise_C1_differentiable_on {- d/2..d/2}" using assms unfolding piecewise_C1_differentiable_on_def apply (intro exI conjI) apply (rule ballI refl f01 derivative_intros continuous_intros | simp)+ apply (auto simp: field_simps) by sos show "- d * sqrt (1/4 - x / d * (x / d)) \ d * sqrt (1/4 - x / d * (x / d))" if "x \ {- d/2..d/2}" for x proof - have *: "x^2 \ (d/2)^2" using real_sqrt_le_iff that by fastforce show ?thesis apply (rule mult_right_mono) using assms * apply (simp_all add: divide_simps power2_eq_square) done qed qed (auto simp add: circle_cube_def divide_simps algebra_simps diff_divide_cancel) lemma rot_circle_cube_is_type_II: shows "typeII_twoCube rot_circle_cube" using d_gt_0 swap_typeI_is_typeII circle_cube_is_type_I by (auto simp add: rot_circle_cube_def) definition circle_bot_edge where "circle_bot_edge = (1::int, \t. (x_coord t * d, - d * circle_y (x_coord t)))" definition circle_top_edge where "circle_top_edge = (- 1::int, \t. (x_coord t * d, d * circle_y (x_coord t)))" definition circle_right_edge where "circle_right_edge = (1::int, \y. (d/2, 0))" definition circle_left_edge where "circle_left_edge = (- 1::int, \y. (- (d/2), 0))" lemma circle_cube_boundary_explicit: "boundary circle_cube = {circle_left_edge,circle_right_edge,circle_bot_edge,circle_top_edge}" by (auto simp add: valid_two_cube_def boundary_def horizontal_boundary_def vertical_boundary_def circle_cube_def circle_top_edge_def circle_bot_edge_def circle_cube_nice x_coord_def circle_y_def circle_left_edge_def circle_right_edge_def) definition rot_circle_right_edge where "rot_circle_right_edge = (1::int, \t. (d * circle_y (x_coord t), x_coord t * d))" definition rot_circle_left_edge where "rot_circle_left_edge = (- 1::int, \t. (- d * circle_y (x_coord t), x_coord t * d))" definition rot_circle_top_edge where "rot_circle_top_edge = (- 1::int, \y. (0, d/2))" definition rot_circle_bot_edge where "rot_circle_bot_edge = (1::int, \y. (0, - (d/2)))" lemma rot_circle_cube_boundary_explicit: "boundary (rot_circle_cube) = {rot_circle_top_edge,rot_circle_bot_edge,rot_circle_right_edge,rot_circle_left_edge}" by (auto simp add: rot_circle_cube_def valid_two_cube_def boundary_def horizontal_boundary_def vertical_boundary_def circle_cube_def rot_circle_right_edge_def rot_circle_left_edge_def x_coord_def circle_y_def rot_circle_top_edge_def rot_circle_bot_edge_def) lemma rot_circle_cube_vertical_boundary_explicit: "vertical_boundary rot_circle_cube = {rot_circle_right_edge,rot_circle_left_edge}" by (auto simp add: rot_circle_cube_def valid_two_cube_def vertical_boundary_def circle_cube_def rot_circle_right_edge_def rot_circle_left_edge_def x_coord_def circle_y_def) lemma circ_left_edge_neq_top: "(- 1::int, \y::real. (- (d/2), 0)) \ (- 1, \x. ((x - 1/2) * d, d * sqrt (1/4 - (x - 1/2) * (x - 1/2))))" by (metis (no_types, lifting) add_diff_cancel_right' d_gt_0 mult.commute mult_cancel_left order_less_irrefl prod.inject) lemma circle_cube_valid_two_cube: "valid_two_cube (circle_cube)" proof (auto simp add: valid_two_cube_def boundary_def horizontal_boundary_def vertical_boundary_def circle_cube_def) have iv: "(- 1::int, \y::real. (- (d/2), 0)) \ (- 1, \x. ((x - 1/2) * d, d * sqrt (1/4 - (x - 1/2) * (x - 1/2))))" using d_gt_0 apply (auto simp add: algebra_simps) by (metis (no_types, hide_lams) add_diff_cancel_right' add_uminus_conv_diff cancel_comm_monoid_add_class.diff_cancel less_eq_real_def linorder_not_le mult.left_neutral prod.simps(1)) have v: "(1::int, \y. (d/2, 0)) \ (1, \x. ((x - 1/2) * d, - (d * sqrt (1/4 - (x - 1/2) * (x - 1/2)))))" using d_gt_0 apply (auto simp add: algebra_simps) by (metis (no_types, hide_lams) diff_0 equal_neg_zero mult_zero_left nonzero_mult_div_cancel_left order_less_irrefl prod.sel(1) times_divide_eq_right zero_neq_numeral) show " card {(- 1::int, \y. (- (d/2), 0)), (1, \y. (d/2, 0)), (1, \x. ((x - 1/2) * d, - (d * sqrt (1/4 - (x - 1/2) * (x - 1/2))))), (- 1, \x. ((x - 1/2) * d, d * sqrt (1/4 - (x - 1/2) * (x - 1/2))))} = 4" using iv v by auto qed lemma rot_circle_cube_valid_two_cube: shows "valid_two_cube rot_circle_cube" using valid_cube_valid_swap circle_cube_valid_two_cube d_gt_0 rot_circle_cube_def by force definition circle_arc_0 where "circle_arc_0 = (1, \t::real. (0,0))" lemma circle_top_bot_edges_neq' [simp]: shows "circle_top_edge \ circle_bot_edge" by (simp add: circle_top_edge_def circle_bot_edge_def) lemma rot_circle_top_left_edges_neq [simp]: "rot_circle_top_edge \ rot_circle_left_edge" apply (simp add: rot_circle_left_edge_def rot_circle_top_edge_def x_coord_def) by (metis (mono_tags, hide_lams) cancel_comm_monoid_add_class.diff_cancel d_gt_0 divide_eq_0_iff mult_zero_left order_less_irrefl prod.sel(2) zero_neq_numeral) lemma rot_circle_bot_left_edges_neq [simp]: "rot_circle_bot_edge \ rot_circle_left_edge" by (simp add: rot_circle_left_edge_def rot_circle_bot_edge_def x_coord_def) lemma rot_circle_top_right_edges_neq [simp]: "rot_circle_top_edge \ rot_circle_right_edge" by (simp add: rot_circle_right_edge_def rot_circle_top_edge_def x_coord_def) lemma rot_circle_bot_right_edges_neq [simp]: "rot_circle_bot_edge \ rot_circle_right_edge" apply (simp add: rot_circle_right_edge_def rot_circle_bot_edge_def x_coord_def) by (metis (mono_tags, hide_lams) cancel_comm_monoid_add_class.diff_cancel d_gt_0 divide_eq_0_iff mult_zero_left neg_0_equal_iff_equal order_less_irrefl prod.sel(2) zero_neq_numeral) lemma rot_circle_right_top_edges_neq' [simp]: "rot_circle_right_edge \ rot_circle_left_edge" by (simp add: rot_circle_left_edge_def rot_circle_right_edge_def) lemma rot_circle_left_bot_edges_neq [simp]: "rot_circle_left_edge \ rot_circle_top_edge" apply (simp add: rot_circle_top_edge_def rot_circle_left_edge_def) by (metis (no_types, hide_lams) cancel_comm_monoid_add_class.diff_cancel d_gt_0 mult.commute mult_zero_right nonzero_mult_div_cancel_left order_less_irrefl prod.sel(2) times_divide_eq_right x_coord_def zero_neq_numeral) lemma circle_right_top_edges_neq [simp]: "circle_right_edge \ circle_top_edge" proof - have "circle_right_edge = (1, (\r::real. (d / 2, 0::real)))" using circle.circle_right_edge_def circle_axioms by blast then show ?thesis using circle.circle_top_edge_def circle_axioms by auto qed lemma circle_left_bot_edges_neq [simp]: "circle_left_edge \ circle_bot_edge" proof - have "circle_bot_edge = (1, \r. (x_coord r * d, - d * circle_y (x_coord r)))" using circle.circle_bot_edge_def circle_axioms by blast then show ?thesis by (simp add: circle_left_edge_def) qed lemma circle_left_top_edges_neq [simp]: "circle_left_edge \ circle_top_edge" proof - have "\r. ((r - 1 / 2) * d, d * sqrt (1 / 4 - (r - 1 / 2) * (r - 1 / 2))) \ (- (d / 2), 0)" by (metis circ_left_edge_neq_top) then have "(\r. d * r \ - (d / 2)) \ (\r ra. (x_coord (x_coord_inv r) * d, d * circle_y (x_coord (x_coord_inv r))) = (x_coord ra * d, d * circle_y (x_coord ra)) \ d * circle_y r \ 0)" by (metis mult.commute) then have "(\r. (x_coord r * d, d * circle_y (x_coord r))) \ (\r. (- (d / 2), 0))" by (metis mult.commute prod.simps(1) x_coord_inv_2) then show ?thesis by (simp add: circle_left_edge_def circle_top_edge_def) qed lemma circle_right_bot_edges_neq [simp]: "circle_right_edge \ circle_bot_edge" - by (smt Pair_inject circle_bot_edge_def d_gt_0 circle.circle_right_edge_def circle_axioms real_mult_le_cancel_iff1 x_coord_def) + by (smt Pair_inject circle_bot_edge_def d_gt_0 circle.circle_right_edge_def circle_axioms mult_le_cancel_iff1 x_coord_def) definition circle_polar where "circle_polar t = ((d/2) * cos (2 * pi * t), (d/2) * sin (2 * pi * t))" lemma circle_polar_smooth: "(circle_polar) C1_differentiable_on {0..1}" proof - have "inj ((*) (2 * pi))" by (auto simp: inj_on_def) then have *: "\x. finite ({0..1} \ (*) (2 * pi) -` {x})" by (simp add: finite_vimageI) have "(\t. ((d/2) * cos (2 * pi * t), (d/2) * sin (2 * pi * t))) C1_differentiable_on {0..1}" by (rule * derivative_intros)+ then show ?thesis apply (rule eq_smooth_gen) by(simp add: circle_polar_def) qed abbreviation "custom_arccos \ (\x. (if -1 \ x \ x \ 1 then arccos x else (if x < -1 then -x + pi else 1 -x )))" lemma cont_custom_arccos: assumes "S \ {-1..1}" shows "continuous_on S custom_arccos" proof - have "continuous_on ({-1..1} \ {}) custom_arccos" by (auto intro!: continuous_on_cases continuous_intros) with assms show ?thesis using continuous_on_subset by auto qed lemma custom_arccos_has_deriv: assumes "- 1 < x" "x < 1" shows "DERIV custom_arccos x :> inverse (- sqrt (1 - x\<^sup>2))" proof - have x1: "\x\\<^sup>2 < 1\<^sup>2" by (simp add: abs_less_iff abs_square_less_1 assms) show ?thesis apply (rule DERIV_inverse_function [where f=cos and a="-1" and b=1]) apply (rule x1 derivative_eq_intros | simp add: sin_arccos)+ using assms x1 cont_custom_arccos [of "{-1<..<1}"] apply (auto simp: continuous_on_eq_continuous_at greaterThanLessThan_subseteq_atLeastAtMost_iff) done qed declare custom_arccos_has_deriv[THEN DERIV_chain2, derivative_intros] custom_arccos_has_deriv[THEN DERIV_chain2, unfolded has_field_derivative_def, derivative_intros] lemma circle_boundary_reparams: shows rot_circ_left_edge_reparam_polar_circ_split: "reparam (rec_join [(rot_circle_left_edge)]) (rec_join [(subcube (1/4) (1/2) (1, circle_polar)), (subcube (1/2) (3/4) (1, circle_polar))])" (is ?P1) and circ_top_edge_reparam_polar_circ_split: "reparam (rec_join [(circle_top_edge)]) (rec_join [(subcube 0 (1/4) (1, circle_polar)), (subcube (1/4) (1/2) (1, circle_polar))])" (is ?P2) and circ_bot_edge_reparam_polar_circ_split: "reparam (rec_join [(circle_bot_edge)]) (rec_join [(subcube (1/2) (3/4) (1, circle_polar)), (subcube (3/4) 1 (1, circle_polar))])" (is ?P3) and rot_circ_right_edge_reparam_polar_circ_split: "reparam (rec_join [(rot_circle_right_edge)]) (rec_join [(subcube (3/4) 1 (1, circle_polar)), (subcube 0 (1/4) (1, circle_polar))])" (is ?P4) proof - let ?\ = "((*) (1/pi) \ custom_arccos \ (\t. 2 * x_coord (1 - t)))" let ?LHS1 = "(\x. (- (d * sqrt (1/4 - x_coord (1 - x) * x_coord (1 - x))), x_coord (1 - x) * d))" let ?RHS1 = "((\x. if x * 2 \ 1 then (d * cos (2 * pi * (2 * x/4 + 1/4))/2, d * sin (2 * pi * (2 * x/4 + 1/4))/2) else (d * cos (2 * pi * ((2 * x - 1)/4 + 1/2))/2, d * sin (2 * pi * ((2 * x - 1)/4 + 1/2))/2)) \ ?\)" let ?LHS2 = "\x. ((x_coord (1 - x) * d, d * sqrt (1/4 - x_coord (1 - x) * x_coord (1 - x))))" let ?RHS2 = "((\x. if x * 2 \ 1 then (d * cos (2 * x * pi/2)/2, d * sin (2 * x * pi/2)/2) else (d * cos (2 * pi * ((2 * x - 1)/4 + 1/4))/2, d * sin (2 * pi * ((2 * x - 1)/4 + 1/4))/2)) \ ?\)" let ?LHS3 = "\x. (x_coord x * d, - (d * sqrt (1/4 - x_coord x * x_coord x)))" let ?RHS3 = "(\x. if x * 2 \ 1 then (d * cos (2 * pi * (2 * x/4 + 1/2))/2, d * sin (2 * pi * (2 * x/4 + 1/2))/2) else (d * cos (2 * pi * ((2 * x - 1)/4 + 3/4))/2, d * sin (2 * pi * ((2 * x - 1)/4 + 3/4))/2))" let ?LHS4 = "\x. (d * sqrt (1/4 - x_coord x * x_coord x), x_coord x * d)" let ?RHS4 = "(\x. if x * 2 \ 1 then (d * cos (2 * pi * (2 * x/4 + 3/4))/2, d * sin (2 * pi * (2 * x/4 + 3/4))/2) else (d * cos ((2 * x - 1) * pi/2)/2, d * sin ((2 * x - 1) * pi/2)/2))" have phi_diff: "?\ piecewise_C1_differentiable_on {0..1}" unfolding piecewise_C1_differentiable_on_def proof show "continuous_on {0..1} ?\" unfolding x_coord_def by (intro continuous_on_compose cont_custom_arccos continuous_intros) auto have "?\ C1_differentiable_on {0..1} - {0,1}" unfolding x_coord_def piecewise_C1_differentiable_on_def C1_differentiable_on_def valid_path_def by (simp | rule has_vector_derivative_pair_within DERIV_image_chain derivative_eq_intros continuous_intros exI ballI conjI | force simp add: field_simps | sos)+ then show "\s. finite s \ ?\ C1_differentiable_on {0..1} - s" by force qed have inj: "inj ?\" apply (intro comp_inj_on inj_on_cases inj_on_arccos) apply (auto simp add: inj_on_def x_coord_def) using pi_ge_zero apply auto[1] apply (smt arccos) by (smt arccos_lbound) then have fin: "\x. \0 \ x; x \ 1\ \ finite (?\ -` {x})" by (simp add: finite_vimageI) have "?\ ` {0..1} = {0..1}" proof show "?\ ` {0..1} \ {0..1}" by (auto simp add: x_coord_def divide_simps arccos_lbound arccos_bounded) have "arccos (1 - 2 * ((1 - cos (x * pi))/2)) = x * pi" if "0 \ x" "x \ 1" for x using that by (simp add: field_simps arccos_cos) then show "{0..1} \ ?\ ` {0..1}" apply (auto simp: x_coord_def o_def image_def) by (rule_tac x="(1 - cos (x * pi))/2" in bexI) auto qed then have bij_phi: "bij_betw ?\ {0..1} {0..1}" unfolding bij_betw_def using inj inj_on_subset by blast have phi01: "?\ -` {0..1} \ {0..1}" by (auto simp add: subset_iff x_coord_def divide_simps) { fix x::real assume x: "0 \ x" "x \ 1" then have i: "- 1 \ (2 * x - 1)" "(2 * x - 1) \ 1" by auto have ii: "cos (pi / 2 + arccos (1 - x * 2)) = -sin (arccos (1 - x * 2))" using minus_sin_cos_eq[symmetric, where ?x = "arccos (1 - x * 2)"] by (auto simp add: add.commute) have "2 * sqrt (x - x * x) = sqrt (4*x - 4*x * x)" by (metis mult.assoc real_sqrt_four real_sqrt_mult right_diff_distrib) also have "... = sqrt (1 - (2 * x - 1) * (2 * x - 1))" by (simp add: algebra_simps) finally have iii: "2 * sqrt (x - x * x) = cos (arcsin (2 * x - 1)) \ 2 * sqrt (x - x * x) = sin (arccos (1 - 2 * x))" using arccos_minus[OF i] unfolding minus_diff_eq sin_pi_minus by (simp add: cos_arcsin i power2_eq_square sin_arccos) then have 1: "?LHS1 x = ?RHS1 x" using d_gt_0 i x apply (simp add: x_coord_def field_simps) apply (auto simp add: diff_divide_distrib add_divide_distrib right_diff_distrib mult.commute ii) using cos_sin_eq[where ?x = "- arccos (1 - x * 2)"] by (simp add: cos_sin_eq[where ?x = "- arccos (1 - x * 2)"] right_diff_distrib) have 2: "?LHS2 x = ?RHS2 x" using x d_gt_0 iii by (auto simp add: x_coord_def diff_divide_distrib algebra_simps) have a: "cos (pi / 2 - arccos (x * 2 - 1)) = cos (pi / 2 - arccos (1 - x * 2))" by (smt arccos_minus arccos_cos2 arccos_lbound cos_arccos cos_ge_minus_one cos_le_one i(1) i(2) pi_def pi_half) have b: "cos (arccos (1 - x * 2) + pi * 3 / 2) = cos ((pi / 2 - arccos (x * 2 - 1)) + 2 * pi)" using arccos_minus[OF i] by(auto simp add: mult.commute add.commute) then have c: "... = cos (pi / 2 - arccos (x * 2 - 1))" using cos_periodic by blast have "cos (- pi - arccos (1 - x * 2)) = cos (- (pi + arccos (1 - x * 2)))" by (auto simp add: minus_add[where b = "pi" and a = "arccos (1 - x * 2)", symmetric]) moreover have "... = cos ((pi + arccos (1 - x * 2)))" using cos_minus by blast moreover have "... = cos (2*pi - arccos (x * 2 - 1))" using arccos_minus[OF i] by (auto simp add: mult.commute add.commute) moreover have "... = cos (arccos (x * 2 - 1))" using cos_2pi_minus by auto ultimately have d: "cos (- pi - arccos (1 - x * 2)) = (x * 2 - 1)" using cos_arccos[OF i] mult.commute by metis have cosm: "\x. cos (x - pi*2) = cos x" by (metis cos_periodic eq_diff_eq' mult.commute) have 34: "?LHS3 x = (?RHS3 \ ?\) x" "?LHS4 x = (?RHS4 \ ?\) x" using d_gt_0 x a b c iii cos_periodic [of "pi / 2 - arccos (x * 2 - 1)"] apply (auto simp add: x_coord_def algebra_simps diff_divide_distrib power2_eq_square) apply (auto simp add: sin_cos_eq cosm) using d apply (auto simp add: right_diff_distrib) by (smt cos_minus) note 1 2 34 } note * = this show ?P1 ?P2 ?P3 ?P4 apply (auto simp add: subcube_def circle_bot_edge_def circle_top_edge_def circle_polar_def reversepath_def subpath_def joinpaths_def circle_y_def rot_circle_left_edge_def rot_circle_right_edge_def) unfolding reparam_def by (rule ballI exI conjI impI phi_diff bij_phi phi01 fin * | force simp add: x_coord_def)+ qed definition circle_cube_boundary_to_polarcircle where "circle_cube_boundary_to_polarcircle \ \ if (\ = (circle_top_edge)) then {subcube 0 (1/4) (1, circle_polar), subcube (1/4) (1/2) (1, circle_polar)} else if (\ = (circle_bot_edge)) then {subcube (1/2) (3/4) (1, circle_polar), subcube (3/4) 1 (1, circle_polar)} else {}" definition rot_circle_cube_boundary_to_polarcircle where "rot_circle_cube_boundary_to_polarcircle \ \ if (\ = (rot_circle_left_edge )) then {subcube (1/4) (1/2) (1, circle_polar), subcube (1/2) (3/4) (1, circle_polar)} else if (\ = (rot_circle_right_edge)) then {subcube (3/4) 1 (1, circle_polar), subcube 0 (1/4) (1, circle_polar)} else {}" lemma circle_arcs_neq: assumes "0 \ k" "k \ 1" "0 \ n" "n \ 1" "n < k" "k + n < 1" shows "subcube k m (1, circle_polar) \ subcube n q (1, circle_polar)" proof (simp add: subcube_def subpath_def circle_polar_def) have "cos (2 * pi * k) \ cos(2 * pi * n)" unfolding cos_eq proof safe show False if "2 * pi * k = 2 * pi * n + 2 * m * pi" "m \ \" for m proof - have "2 * pi * (k - n ) = 2 * m * pi" using distrib_left that by (simp add: left_diff_distrib mult.commute) then have a: "m = (k - n)" by auto have "\k - n\ = 0 " using assms by (simp add: floor_eq_iff) then have "k - n \ \" using assms by (auto simp only: frac_eq_0_iff[symmetric] frac_def) then show False using that a by auto qed show False if "2 * pi * k = - (2 * pi * n) + 2 * m * pi" "m \ \" for m proof - have "2 * pi * (k + n ) = 2 * m * pi" using that by (auto simp add: distrib_left) then have a: "m = (k + n)" by auto have "\k + n\ = 0 " using assms by (simp add: floor_eq_iff) then have "k + n \ \" using Ints_def assms by force then show False using that a by auto qed qed then have "(\x. (d * cos (2 * pi * ((m - k) * x + k))/2, d * sin (2 * pi * ((m - k) * x + k))/2)) 0 \ (\x. (d * cos (2 * pi * ((q - n) * x + n))/2, d * sin (2 * pi * ((q - n) * x + n))/2)) 0" using d_gt_0 by auto then show "(\x. (d * cos (2 * pi * ((m - k) * x + k))/2, d * sin (2 * pi * ((m - k) * x + k))/2)) \ (\x. (d * cos (2 * pi * ((q - n) * x + n))/2, d * sin (2 * pi * ((q - n) * x + n))/2))" by metis qed lemma circle_arcs_neq_2: assumes "0 \ k" "k \ 1" "0 \ n" "n \ 1" "n < k" "0 < n" and kn12: "1/2 < k + n" and "k + n < 3/2" shows "subcube k m (1, circle_polar) \ subcube n q (1, circle_polar)" proof (simp add: subcube_def subpath_def circle_polar_def) have "sin (2 * pi * k) \ sin(2 * pi * n)" unfolding sin_eq proof safe show False if "m \ \" "2 * pi * k = 2 * pi * n + 2 * m * pi" for m proof - have "2 * pi * (k - n) = 2 * m * pi" using that by (simp add: left_diff_distrib mult.commute) then have a: "m = (k - n)" by auto have "\k - n\ = 0 " using assms by (simp add: floor_eq_iff) then have "k - n \ \" using assms by (auto simp only: frac_eq_0_iff[symmetric] frac_def ) then show False using that a by auto qed show False if "2 * pi * k = - (2 * pi * n) + (2 * m + 1) * pi" "m \ \" for m proof - have i: "\pi. 0 < pi \ 2 * pi * (k + n ) = 2 * m * pi + pi \ m = (k + n) - 1/2" by (sos "(((((A<0 * A<1) * R<1) + ([1/2] * A=0))) & ((((A<0 * A<1) * R<1) + ([~1/2] * A=0))))") have "2 * pi * (k + n) = 2 * m * pi + pi" using that by (simp add: algebra_simps) then have a: "m = (k + n) - 1/2" using i[OF pi_gt_zero] by fastforce have "\k + n - 1/2\ = 0 " using assms by (auto simp add: floor_eq_iff) then have "k + n - 1/2 \ \" by (metis Ints_cases add.commute add.left_neutral add_diff_cancel_left' add_diff_eq kn12 floor_of_int of_int_0 order_less_irrefl) then show False using that a by auto qed qed then have "(\x. (d * cos (2 * pi * ((m - k) * x + k))/2, d * sin (2 * pi * ((m - k) * x + k))/2)) 0 \ (\x. (d * cos (2 * pi * ((q - n) * x + n))/2, d * sin (2 * pi * ((q - n) * x + n))/2)) 0" using d_gt_0 by auto then show "(\x. (d * cos (2 * pi * ((m - k) * x + k))/2, d * sin (2 * pi * ((m - k) * x + k))/2)) \ (\x. (d * cos (2 * pi * ((q - n) * x + n))/2, d * sin (2 * pi * ((q - n) * x + n))/2))" by metis qed lemma circle_cube_is_only_horizontal_div_of_rot: shows "only_horizontal_division (boundary (circle_cube)) {rot_circle_cube}" unfolding only_horizontal_division_def proof (rule exI [of _ "{}"], simp, intro conjI ballI) show "finite (boundary circle_cube)" using circle.circle_cube_boundary_explicit circle_axioms by auto show "boundary_chain (boundary circle_cube)" by (simp add: two_cube_boundary_is_boundary) show "\x. x \ boundary circle_cube \ case x of (k, x) \ valid_path x" using circle_cube_boundary_valid by blast let ?\ = "(boundary (circle_cube))" let ?pi = "{circle_left_edge, circle_right_edge}" let ?pj = "{rot_circle_top_edge, rot_circle_bot_edge}" let ?f = "circle_cube_boundary_to_polarcircle" let ?one_chaini = "boundary (circle_cube) - ?pi" have c: "common_reparam_exists ?\ (two_chain_vertical_boundary {rot_circle_cube})" unfolding common_reparam_exists_def proof (intro exI conjI) let ?subdiv = "{(subcube 0 (1/4) (1, circle_polar)), (subcube (1/4) (1/2) (1, circle_polar)), (subcube (1/2) (3/4) (1, circle_polar)), (subcube (3/4) 1 (1, circle_polar))}" show "(\(k, \)\?subdiv. \ C1_differentiable_on {0..1})" using subpath_smooth[OF circle_polar_smooth] by (auto simp add: subcube_def) have 1: "finite ?subdiv" by auto show "boundary_chain ?subdiv" by (simp add: boundary_chain_def subcube_def) show "chain_reparam_chain' (boundary (circle_cube) - ?pi) ?subdiv" unfolding chain_reparam_chain'_def proof (intro exI conjI impI) show "\ (?f ` ?one_chaini) = ?subdiv" apply (simp add: circle_cube_boundary_to_polarcircle_def circle_cube_boundary_explicit) using circle_top_bot_edges_neq' by metis let ?l = "[subcube 0 (1/4) (1, circle_polar), subcube (1/4) (1/2) (1, circle_polar)]" have "chain_reparam_weak_path (coeff_cube_to_path (circle_top_edge)) {subcube 0 (1/4) (1, circle_polar), subcube (1/4) (1/2) (1, circle_polar)}" unfolding chain_reparam_weak_path_def proof (intro exI conjI) show "valid_chain_list ?l" by (auto simp add: subcube_def circle_top_edge_def x_coord_def circle_y_def pathfinish_def pathstart_def reversepath_def subpath_def joinpaths_def) show "reparam (coeff_cube_to_path circle_top_edge) (rec_join ?l)" using circ_top_edge_reparam_polar_circ_split by auto show "distinct ?l" apply simp apply (subst neq_commute) apply (simp add: circle_arcs_neq) done qed auto moreover have "chain_reparam_weak_path (coeff_cube_to_path (circle_bot_edge)) {subcube (1/2) (3/4) (1, circle_polar), subcube (3/4) 1 (1, circle_polar)}" unfolding chain_reparam_weak_path_def proof let ?l = "[subcube (1/2) (3/4) (1, circle_polar), subcube (3/4) 1 (1, circle_polar)]" have a: "valid_chain_list ?l" by (auto simp add: subcube_def circle_top_edge_def x_coord_def circle_y_def pathfinish_def pathstart_def reversepath_def subpath_def joinpaths_def) have b: "reparam (rec_join [circle_bot_edge]) (rec_join ?l)" using circ_bot_edge_reparam_polar_circ_split by auto have c: "subcube (3/4) 1 (1, circle_polar) \ subcube (1/2) (3/4) (1, circle_polar)" apply(rule circle_arcs_neq_2) using d_gt_0(1) by auto show "set ?l = {subcube (1/2) (3/4) (1, circle_polar), subcube (3/4) 1 (1, circle_polar)} \ distinct ?l \ reparam (coeff_cube_to_path (circle_bot_edge)) (rec_join ?l) \ valid_chain_list ?l \ ?l \ []" using a b c by auto qed ultimately show "(\cube\?one_chaini. chain_reparam_weak_path (rec_join [cube]) (?f cube))" by (auto simp add: circle_cube_boundary_to_polarcircle_def UNION_eq circle_cube_boundary_explicit) show "(\p\?one_chaini. \p'\?one_chaini. p \ p' \ ?f p \ ?f p' = {})" using circle_arcs_neq circle_arcs_neq_2 apply (auto simp add: circle_cube_boundary_to_polarcircle_def UNION_eq circle_cube_boundary_explicit neq_commute d_gt_0) using circle_top_bot_edges_neq' d_gt_0 apply auto[1] apply (smt atLeastAtMost_iff divide_less_eq_1_pos zero_less_divide_1_iff) apply (smt atLeastAtMost_iff divide_less_eq_1_pos zero_less_divide_iff) apply (smt atLeastAtMost_iff divide_cancel_left divide_less_eq_1_pos field_sum_of_halves zero_less_divide_1_iff) done show "(\x\?one_chaini. finite (?f x))" by (auto simp add: circle_cube_boundary_to_polarcircle_def circle_cube_boundary_explicit) qed show "(\(k, \)\?pi. point_path \)" using d_gt_0 by (auto simp add: point_path_def circle_left_edge_def circle_right_edge_def) let ?f = "rot_circle_cube_boundary_to_polarcircle" let ?one_chain2.0 = "two_chain_vertical_boundary {rot_circle_cube} - ?pj" show "chain_reparam_chain' (two_chain_vertical_boundary {rot_circle_cube} - ?pj) ?subdiv" unfolding chain_reparam_chain'_def proof (intro exI conjI) have rw: "?one_chain2.0 = {rot_circle_left_edge, rot_circle_right_edge}" by(auto simp add: rot_circle_cube_vertical_boundary_explicit two_chain_vertical_boundary_def) show "\ (?f ` ?one_chain2.0) = ?subdiv" using rot_circle_right_top_edges_neq' by (auto simp add: rot_circle_cube_boundary_to_polarcircle_def rw) show "(\cube\?one_chain2.0. chain_reparam_weak_path (rec_join [cube]) (?f cube))" proof (clarsimp simp add: rot_circle_cube_boundary_to_polarcircle_def rw, intro conjI) let ?l = "[subcube (1/4) (1/2) (1, circle_polar), subcube (1/2) (3/4) (1, circle_polar)]" show "chain_reparam_weak_path (coeff_cube_to_path (rot_circle_left_edge)) {subcube (1/4) (1/2) (1, circle_polar), subcube (1/2) (3/4) (1, circle_polar)}" unfolding chain_reparam_weak_path_def proof (intro exI conjI) show "valid_chain_list ?l" by (auto simp add: subcube_def pathfinish_def pathstart_def reversepath_def subpath_def joinpaths_def) show "reparam (coeff_cube_to_path rot_circle_left_edge) (rec_join ?l)" using rot_circ_left_edge_reparam_polar_circ_split by auto show "distinct ?l" apply simp apply (subst neq_commute) apply (simp add: circle_arcs_neq) done qed auto show "chain_reparam_weak_path (coeff_cube_to_path (rot_circle_right_edge)) {subcube (3/4) 1 (1, circle_polar), subcube 0 (1/4) (1, circle_polar)}" unfolding chain_reparam_weak_path_def proof (intro exI conjI) let ?l = "[subcube (3/4) 1 (1, circle_polar), subcube 0 (1/4) (1, circle_polar)]" show "valid_chain_list ?l" by (auto simp add: circle_polar_def subcube_def pathfinish_def pathstart_def reversepath_def subpath_def joinpaths_def) show "reparam (coeff_cube_to_path rot_circle_right_edge) (rec_join ?l)" using rot_circ_right_edge_reparam_polar_circ_split by auto show "distinct ?l" by (simp add: circle_arcs_neq) qed auto qed show "(\p\?one_chain2.0. \p'\?one_chain2.0. p \ p' \ ?f p \ ?f p' = {})" using circle_arcs_neq circle_arcs_neq_2 apply (auto simp add: rot_circle_cube_boundary_to_polarcircle_def neq_commute) apply (metis add.right_neutral divide_less_eq_1_pos dual_order.order_iff_strict num.distinct(1) one_less_numeral_iff prod.sel(1) prod.sel(2) semiring_norm(68) subcube_def zero_less_divide_1_iff zero_less_numeral) apply (smt field_sum_of_halves) done show "(\x\?one_chain2.0. finite (?f x))" by (auto simp add: rot_circle_cube_boundary_to_polarcircle_def UNION_eq rw) qed show "(\(k, \)\?pj. point_path \)" using d_gt_0 by (auto simp add: point_path_def rot_circle_top_edge_def rot_circle_bot_edge_def) qed then show "common_sudiv_exists (two_chain_vertical_boundary {rot_circle_cube}) (boundary circle_cube) \ common_reparam_exists (boundary circle_cube) (two_chain_vertical_boundary {rot_circle_cube})" by blast qed lemma GreenThm_cirlce: assumes "\twoC\{circle_cube}. analytically_valid (cubeImage twoC) (\x. F x \ i) j" "\twoC\{rot_circle_cube}. analytically_valid (cubeImage twoC) (\x. F x \ j) i" shows "integral (cubeImage (circle_cube)) (\x. partial_vector_derivative (\x. F x \ j) i x - partial_vector_derivative (\x. F x \ i) j x) = one_chain_line_integral F {i, j} (boundary (circle_cube))" proof(rule green_typeI_typeII_chain.GreenThm_typeI_typeII_divisible_region_finite_holes[of "(cubeImage (circle_cube))" i j F "{circle_cube}" "{rot_circle_cube}", OF _ _ _ circle_cube_is_only_horizontal_div_of_rot _], auto) show "\ a b. (a, b) \ boundary circle_cube \ valid_path b" using circle_cube_boundary_valid by auto show "green_typeI_typeII_chain (cubeImage circle_cube) i j F {circle_cube} {rot_circle_cube}" using assms proof(auto simp add: green_typeI_typeII_chain_def green_typeI_chain_def green_typeII_chain_def green_typeI_chain_axioms_def green_typeII_chain_axioms_def intro!: circle_cube_is_type_I rot_circle_cube_is_type_II d_gt_0 R2_axioms) show "gen_division (cubeImage circle_cube) {cubeImage circle_cube}" by (simp add: gen_division_def) show "gen_division (cubeImage (circle_cube)) ({cubeImage rot_circle_cube})" using rot_circle_div_circle d_gt_0 by auto show "valid_two_chain {rot_circle_cube}" "valid_two_chain {circle_cube}" apply (auto simp add: valid_two_chain_def) using rot_circle_cube_valid_two_cube circle_cube_valid_two_cube assms(1) by auto qed show "only_vertical_division (boundary (circle_cube)) {circle_cube}" using twoChainVertDiv_of_itself[of "{circle_cube}"] apply(simp add: two_chain_boundary_def) using circle_cube_boundary_valid by auto qed end end diff --git a/thys/Gromov_Hyperbolicity/Metric_Completion.thy b/thys/Gromov_Hyperbolicity/Metric_Completion.thy --- a/thys/Gromov_Hyperbolicity/Metric_Completion.thy +++ b/thys/Gromov_Hyperbolicity/Metric_Completion.thy @@ -1,482 +1,482 @@ (* Author: Sébastien Gouëzel sebastien.gouezel@univ-rennes1.fr License: BSD *) section \The metric completion of a metric space\ theory Metric_Completion imports Isometries begin text \Any metric space can be completed, by adding the missing limits of Cauchy sequences. Formally, there exists an isometric embedding of the space in a complete space, with dense image. In this paragraph, we construct this metric completion. This is exactly the same construction as the way in which real numbers are constructed from rational numbers.\ subsection \Definition of the metric completion\ quotient_type (overloaded) 'a metric_completion = "nat \ ('a::metric_space)" / partial: "\u v. (Cauchy u) \ (Cauchy v) \ (\n. dist (u n) (v n)) \ 0" unfolding part_equivp_def proof(auto intro!: ext) show "\x. Cauchy x" by (rule exI[of _ "\_. undefined"]) (simp add: convergent_Cauchy convergent_const) fix x y z::"nat \ 'a" assume H: "(\n. dist (x n) (y n)) \ 0" "(\n. dist (x n) (z n)) \ 0" have *: "(\n. dist (x n) (y n) + dist (x n) (z n)) \ 0 + 0" by (rule tendsto_add) (auto simp add: H) show "(\n. dist (y n) (z n)) \ 0" apply (rule tendsto_sandwich[of "\_. 0" _ _ "\n. dist (x n) (y n) + dist (x n) (z n)"]) using * by (auto simp add: dist_triangle3) next fix x y z::"nat \ 'a" assume H: "(\n. dist (x n) (y n)) \ 0" "(\n. dist (y n) (z n)) \ 0" have *: "(\n. dist (x n) (y n) + dist (y n) (z n)) \ 0 + 0" by (rule tendsto_add) (auto simp add: H) show "(\n. dist (x n) (z n)) \ 0" apply (rule tendsto_sandwich[of "\_. 0" _ _ "\n. dist (x n) (y n) + dist (y n) (z n)"]) using * by (auto simp add: dist_triangle) next fix x y::"nat \ 'a" assume H: "Cauchy x" "(\v. Cauchy v \ (\n. dist (x n) (v n)) \ 0) = (\v. Cauchy v \ (\n. dist (y n) (v n)) \ 0)" have "Cauchy x \ (\n. dist (x n) (x n)) \ 0" using H by auto then have "(\n. dist (y n) (x n))\ 0" using H by meson moreover have "dist (x n) (y n) = dist (y n) (x n)" for n using dist_commute by auto ultimately show "(\n. dist (x n) (y n)) \ 0" by auto qed text \We have to show that the metric completion is indeed a metric space, that the original space embeds isometrically into it, and that it is complete. Before we prove these statements, we start with two simple lemmas that will be needed later on.\ lemma convergent_Cauchy_dist: fixes u v::"nat \ ('a::metric_space)" assumes "Cauchy u" "Cauchy v" shows "convergent (\n. dist (u n) (v n))" proof (rule real_Cauchy_convergent, intro CauchyI) fix e::real assume "e > 0" obtain Nu where Nu: "\n \ Nu. \m \ Nu. dist (u n) (u m) < e/2" using assms(1) by (metis \0 < e\ less_divide_eq_numeral1(1) metric_CauchyD mult_zero_left) obtain Nv where Nv: "\n \ Nv. \m \ Nv. dist (v n) (v m) < e/2" using assms(2) by (metis \0 < e\ less_divide_eq_numeral1(1) metric_CauchyD mult_zero_left) define M where "M = max Nu Nv" { fix n m assume H: "n \ M" "m \ M" have *: "dist (u n) (u m) < e/2" "dist (v n) (v m) < e/2" using Nu Nv H unfolding M_def by auto have "dist (u m) (v m) - dist (u n) (v n) \ dist (u m) (u n) + dist (v n) (v m)" by (simp add: algebra_simps) (metis add_le_cancel_left dist_commute dist_triangle2 dist_triangle_le) also have "... < e/2 + e/2" using * by (simp add: dist_commute) finally have A: "dist (u m) (v m) - dist (u n) (v n) < e" by simp have "dist (u n) (v n) - dist (u m) (v m) \ dist (u m) (u n) + dist (v n) (v m)" by (simp add: algebra_simps) (metis add_le_cancel_left dist_commute dist_triangle2 dist_triangle_le) also have "... < e/2 + e/2" using * by (simp add: dist_commute) finally have "dist (u n) (v n) - dist (u m) (v m) < e" by simp then have "norm(dist (u m) (v m) - dist (u n) (v n)) < e" using A by auto } then show "\M. \m \ M. \n \ M. norm (dist (u m) (v m) - dist (u n) (v n)) < e" by auto qed lemma convergent_add_null: fixes u v::"nat \ ('a::real_normed_vector)" assumes "convergent u" "(\n. v n - u n) \ 0" shows "convergent v" "lim v = lim u" proof - have "(\n. u n + (v n - u n)) \ lim u + 0" apply (rule tendsto_add) using assms convergent_LIMSEQ_iff by auto then have *: "v \ lim u" by auto show "convergent v" using * by (simp add: Lim_def convergentI) show "lim v = lim u" using * by (simp add: limI) qed text \Let us now prove that the metric completion is a metric space: the distance between two Cauchy sequences is the limit of the distances of points in the sequence. The convergence follows from Lemma~\verb+convergent_Cauchy_dist+ above.\ instantiation metric_completion :: (metric_space) metric_space begin lift_definition dist_metric_completion::"('a::metric_space) metric_completion \ 'a metric_completion \ real" is "\x y. lim (\n. dist (x n) (y n))" proof - fix x y z t::"nat \ 'a" assume H: "Cauchy x \ Cauchy y \ (\n. dist (x n) (y n)) \ 0" "Cauchy z \ Cauchy t \ (\n. dist (z n) (t n)) \ 0" show "lim (\n. dist (x n) (z n)) = lim (\n. dist (y n) (t n))" proof (rule convergent_add_null(2)) show "convergent (\n. dist (y n) (t n))" apply (rule convergent_Cauchy_dist) using H by auto have a: "(\n. - dist (t n) (z n) - dist (x n) (y n)) \ -0 -0" apply (intro tendsto_intros) using H by (auto simp add: dist_commute) have b:"(\n. dist (t n) (z n) + dist (x n) (y n)) \ 0 + 0" apply (rule tendsto_add) using H by (auto simp add: dist_commute) have I: "dist (x n) (z n) \ dist (t n) (y n) + (dist (t n) (z n) + dist (x n) (y n))" for n using dist_triangle[of "x n" "z n" "y n"] dist_triangle[of "y n" "z n" "t n"] by (auto simp add: dist_commute add.commute) show "(\n. dist (x n) (z n) - dist (y n) (t n)) \ 0" apply (rule tendsto_sandwich[of "\n. -(dist (x n) (y n) + dist (z n) (t n))" _ _ "\n. dist (x n) (y n) + dist (z n) (t n)"]) apply (auto intro!: always_eventually simp add: algebra_simps dist_commute I) apply (meson add_left_mono dist_triangle3 dist_triangle_le) using a b by auto qed qed lemma dist_metric_completion_limit: fixes x y::"'a metric_completion" shows "(\n. dist (rep_metric_completion x n) (rep_metric_completion y n)) \ dist x y" proof - have C: "Cauchy (rep_metric_completion x)" "Cauchy (rep_metric_completion y)" using Quotient3_metric_completion Quotient3_rep_reflp by fastforce+ show ?thesis unfolding dist_metric_completion_def using C apply auto using convergent_Cauchy_dist[OF C] convergent_LIMSEQ_iff by force qed lemma dist_metric_completion_limit': fixes x y::"nat \ 'a" assumes "Cauchy x" "Cauchy y" shows "(\n. dist (x n) (y n)) \ dist (abs_metric_completion x) (abs_metric_completion y)" apply (subst dist_metric_completion.abs_eq) using assms convergent_Cauchy_dist[OF assms] by (auto simp add: convergent_LIMSEQ_iff) text \To define a metric space in the current library of Isabelle/HOL, one should also introduce a uniformity structure and a topology, as follows (they are prescribed by the distance):\ definition uniformity_metric_completion::"(('a metric_completion) \ ('a metric_completion)) filter" where "uniformity_metric_completion = (INF e\{0 <..}. principal {(x, y). dist x y < e})" definition open_metric_completion :: "'a metric_completion set \ bool" where "open_metric_completion U = (\x\U. eventually (\(x', y). x' = x \ y \ U) uniformity)" instance proof fix x y::"'a metric_completion" have C: "Cauchy (rep_metric_completion x)" "Cauchy (rep_metric_completion y)" using Quotient3_metric_completion Quotient3_rep_reflp by fastforce+ show "(dist x y = 0) = (x = y)" apply (subst Quotient3_rel_rep[OF Quotient3_metric_completion, symmetric]) unfolding dist_metric_completion_def using C apply auto using convergent_Cauchy_dist[OF C] convergent_LIMSEQ_iff apply force by (simp add: limI) next fix x y z::"'a metric_completion" have a: "(\n. dist (rep_metric_completion x n) (rep_metric_completion y n)) \ dist x y" using dist_metric_completion_limit by auto have b: "(\n. dist (rep_metric_completion x n) (rep_metric_completion z n) + dist (rep_metric_completion y n) (rep_metric_completion z n)) \ dist x z + dist y z" apply (rule tendsto_add) using dist_metric_completion_limit by auto show "dist x y \ dist x z + dist y z" by (rule LIMSEQ_le[OF a b], rule exI[of _ 0], auto simp add: dist_triangle2) qed (auto simp add: uniformity_metric_completion_def open_metric_completion_def) end text \Let us now show that the distance thus defined on the metric completion is indeed complete. This is essentially by design.\ instance metric_completion :: (metric_space) complete_space proof fix X::"nat \ 'a metric_completion" assume "Cauchy X" have *: "\N. \n \ N. dist (rep_metric_completion (X k) N) (rep_metric_completion (X k) n) < (1/Suc k)" for k proof - have "Cauchy (rep_metric_completion (X k))" using Quotient3_metric_completion Quotient3_rep_reflp by fastforce+ then have "\N. \m \ N. \n \ N. dist (rep_metric_completion (X k) m) (rep_metric_completion (X k) n) < (1/Suc k)" unfolding Cauchy_def by auto then show ?thesis by auto qed have "\N. \k. \n \ N k. dist (rep_metric_completion (X k) (N k)) (rep_metric_completion (X k) n) < (1/Suc k)" apply (rule choice) using * by auto then obtain N::"nat \ nat" where N: "dist (rep_metric_completion (X k) (N k)) (rep_metric_completion (X k) n) < (1/Suc k)" if "n \ N k" for n k by auto define u where "u = (\k. rep_metric_completion (X k) (N k))" have "Cauchy u" proof (rule metric_CauchyI) fix e::real assume "e > 0" obtain K::nat where "K > 4/e" using reals_Archimedean2 by blast obtain L::nat where L: "\m \ L. \n \ L. dist (X m) (X n) < e/2" using metric_CauchyD[OF \Cauchy X\, of "e/2"] \e > 0\ by auto { fix m n assume "m \ max K L" "n \ max K L" then have "dist (X m) (X n) < e/2" using L by auto then have "eventually (\p. dist (rep_metric_completion (X m) p) (rep_metric_completion (X n) p) < e/2) sequentially" using dist_metric_completion_limit[of "X m" "X n"] by (metis order_tendsto_iff) then obtain p where p: "p \ max (N m) (N n)" "dist (rep_metric_completion (X m) p) (rep_metric_completion (X n) p) < e/2" using eventually_False_sequentially eventually_elim2 eventually_ge_at_top by blast have "dist (u m) (rep_metric_completion (X m) p) < 1 / real (Suc m)" unfolding u_def using N[of m p] p(1) by auto also have "... < e/4" using \m \ max K L\ \K > 4/e\ \e > 0\ apply (auto simp add: divide_simps algebra_simps) - by (metis leD le_less_trans less_add_same_cancel2 linear of_nat_le_iff real_mult_le_cancel_iff2) + by (metis leD le_less_trans less_add_same_cancel2 linear of_nat_le_iff mult_le_cancel_iff2) finally have Im: "dist (u m) (rep_metric_completion (X m) p) < e/4" by simp have "dist (u n) (rep_metric_completion (X n) p) < 1 / real (Suc n)" unfolding u_def using N[of n p] p(1) by auto also have "... < e/4" using \n \ max K L\ \K > 4/e\ \e > 0\ apply (auto simp add: divide_simps algebra_simps) - by (metis leD le_less_trans less_add_same_cancel2 linear of_nat_le_iff real_mult_le_cancel_iff2) + by (metis leD le_less_trans less_add_same_cancel2 linear of_nat_le_iff mult_le_cancel_iff2) finally have In: "dist (u n) (rep_metric_completion (X n) p) < e/4" by simp have "dist (u m) (u n) \ dist (u m) (rep_metric_completion (X m) p) + dist (rep_metric_completion (X m) p) (rep_metric_completion (X n) p) + dist (rep_metric_completion (X n) p) (u n)" by (metis add.commute add_left_mono dist_commute dist_triangle_le dist_triangle) also have "... < e/4 + e/2 + e/4" using In Im p(2) by (simp add: dist_commute) also have "... = e" by auto finally have "dist (u m) (u n) < e" by auto } then show "\M. \m \ M. \n \ M. dist (u m) (u n) < e" by meson qed have *: "(\n. dist (abs_metric_completion u) (X n)) \ 0" proof (rule order_tendstoI, auto simp add: less_le_trans eventually_sequentially) fix e::real assume "e > 0" obtain K::nat where "K > 4/e" using reals_Archimedean2 by blast obtain L::nat where L: "\m \ L. \n \ L. dist (u m) (u n) < e/4" using metric_CauchyD[OF \Cauchy u\, of "e/4"] \e > 0\ by auto { fix n assume n: "n \ max K L" { fix p assume p: "p \ max (N n) L" have "dist (u n) (rep_metric_completion (X n) p) < 1/(Suc n)" unfolding u_def using N p by simp also have "... < e/4" using \n \ max K L\ \K > 4/e\ \e > 0\ apply (auto simp add: divide_simps algebra_simps) - by (metis leD le_less_trans less_add_same_cancel2 linear of_nat_le_iff real_mult_le_cancel_iff2) + by (metis leD le_less_trans less_add_same_cancel2 linear of_nat_le_iff mult_le_cancel_iff2) finally have *: "dist (u n) (rep_metric_completion (X n) p) < e/4" by fastforce have "dist (u p) (rep_metric_completion (X n) p) \ dist (u p) (u n) + dist (u n) (rep_metric_completion (X n) p)" using dist_triangle by auto also have "... < e/4 + e/4" using * L n p by force finally have "dist (u p) (rep_metric_completion (X n) p) \ e/2" by auto } then have A: "eventually (\p. dist (u p) (rep_metric_completion (X n) p) \ e/2) sequentially" using eventually_at_top_linorder by blast have B: "(\p. dist (u p) (rep_metric_completion (X n) p)) \ dist (abs_metric_completion u) (X n)" using dist_metric_completion_limit'[OF \Cauchy u\, of "rep_metric_completion (X n)"] unfolding Quotient3_abs_rep[OF Quotient3_metric_completion, of "X n"] using Quotient3_rep_reflp[OF Quotient3_metric_completion] by auto have "dist (abs_metric_completion u) (X n) \ e/2" apply (rule LIMSEQ_le_const2[OF B]) using A unfolding eventually_sequentially by auto then have "dist (abs_metric_completion u) (X n) < e" using \e > 0\ by auto } then show "\N. \n \ N. dist (abs_metric_completion u) (X n) < e" by blast qed have "X \ abs_metric_completion u" apply (rule tendstoI) using * by (auto simp add: order_tendsto_iff dist_commute) then show "convergent X" unfolding convergent_def by auto qed subsection \Isometric embedding of a space in its metric completion\ text \The canonical embedding of a space into its metric completion is obtained by taking the Cauchy sequence which is constant, equal to the given point. This is indeed an isometric embedding with dense image, as we prove in the lemmas below.\ definition to_metric_completion::"('a::metric_space) \ 'a metric_completion" where "to_metric_completion x = abs_metric_completion (\n. x)" lemma to_metric_completion_isometry: "isometry_on UNIV to_metric_completion" proof (rule isometry_onI) fix x y::'a have "(\n. dist (x) (y)) \ dist (to_metric_completion x) (to_metric_completion y)" unfolding to_metric_completion_def apply (rule dist_metric_completion_limit') unfolding Cauchy_def by auto then show "dist (to_metric_completion x) (to_metric_completion y) = dist x y" by (simp add: LIMSEQ_const_iff) qed lemma to_metric_completion_dense: assumes "open U" "U \ {}" shows "\x. to_metric_completion x \ U" proof - obtain y where "y \ U" using \U \ {}\ by auto obtain e::real where e: "e > 0" "\z. dist z y < e \ z \ U" using \y \ U\ \open U\ by (metis open_dist) have *: "Cauchy (rep_metric_completion y)" using Quotient3_metric_completion Quotient3_rep_reflp by fastforce then obtain N where N: "\n \ N. \m \ N. dist (rep_metric_completion y n) (rep_metric_completion y m) < e/2" using \e > 0\ unfolding Cauchy_def by (meson divide_pos_pos zero_less_numeral) define x where "x = rep_metric_completion y N" have "(\n. dist x (rep_metric_completion y n)) \ dist (to_metric_completion x) (abs_metric_completion (rep_metric_completion y))" unfolding to_metric_completion_def apply (rule dist_metric_completion_limit') using * unfolding Cauchy_def by auto then have "(\n. dist x (rep_metric_completion y n)) \ dist (to_metric_completion x) y" unfolding Quotient3_abs_rep[OF Quotient3_metric_completion] by simp moreover have "eventually (\n. dist x (rep_metric_completion y n) \ e/2) sequentially" unfolding eventually_sequentially x_def apply (rule exI[of _ N]) using N less_imp_le by auto ultimately have "dist (to_metric_completion x) y \ e/2" using LIMSEQ_le_const2 unfolding eventually_sequentially by metis then have "to_metric_completion x \ U" using e by auto then show ?thesis by auto qed lemma to_metric_completion_dense': "closure (range to_metric_completion) = UNIV" apply (auto simp add: closure_iff_nhds_not_empty) using to_metric_completion_dense by fastforce text \The main feature of the completion is that a uniformly continuous function on the original space can be extended to a uniformly continuous function on the completion, i.e., it can be written as the composition of a new function and of the inclusion \verb+to_metric_completion+.\ lemma lift_to_metric_completion: fixes f::"('a::metric_space) \ ('b::complete_space)" assumes "uniformly_continuous_on UNIV f" shows "\g. (uniformly_continuous_on UNIV g) \ (f = g o to_metric_completion) \ (\x \ range to_metric_completion. g x = f (inv to_metric_completion x))" proof - define I::"'a metric_completion \ 'a" where "I = inv to_metric_completion" have "uniformly_continuous_on (range to_metric_completion) I" using isometry_on_uniformly_continuous[OF isometry_on_inverse(1)[OF to_metric_completion_isometry]] I_def by auto then have UC: "uniformly_continuous_on (range to_metric_completion) (\x. f (I x))" using assms uniformly_continuous_on_compose by (metis I_def bij_betw_imp_surj_on bij_betw_inv_into isometry_on_inverse(4) to_metric_completion_isometry) obtain g where g: "uniformly_continuous_on (closure(range to_metric_completion)) g" "\x. x \ range to_metric_completion \ f (I x) = g x" using uniformly_continuous_on_extension_on_closure[OF UC] by metis have "uniformly_continuous_on UNIV g" using to_metric_completion_dense' g(1) by metis moreover have "f x = g (to_metric_completion x)" for x using g(2) by (metis I_def UNIV_I isometry_on_inverse(2) range_eqI to_metric_completion_isometry) moreover have "g x = f (inv to_metric_completion x)" if "x \ range to_metric_completion" for x using I_def g(2) that by auto ultimately show ?thesis unfolding comp_def by auto qed text \When the function is an isometry, the lifted function is also an isometry (and its range is the closure of the range of the original function). This shows that the metric completion is unique, up to isometry:\ lemma lift_to_metric_completion_isometry: fixes f::"('a::metric_space) \ ('b::complete_space)" assumes "isometry_on UNIV f" shows "\g. isometry_on UNIV g \ range g = closure(range f) \ f = g o to_metric_completion \ (\x \ range to_metric_completion. g x = f (inv to_metric_completion x))" proof - have *: "uniformly_continuous_on UNIV f" using assms isometry_on_uniformly_continuous by force obtain g where g: "uniformly_continuous_on UNIV g" "f = g o to_metric_completion" "\x. x \ range to_metric_completion \ g x = f (inv to_metric_completion x)" using lift_to_metric_completion[OF *] by blast have *: "isometry_on (range to_metric_completion) g" apply (rule isometry_on_cong[OF _ g(3)], rule isometry_on_compose[of _ _ f]) using assms isometry_on_inverse[OF to_metric_completion_isometry] isometry_on_subset by (auto) (fastforce) then have "isometry_on UNIV g" unfolding to_metric_completion_dense'[symmetric] apply (rule isometry_on_closure) using continuous_on_subset[OF uniformly_continuous_imp_continuous[OF g(1)]] by auto have "g`(range to_metric_completion) \ range f" using g unfolding comp_def by auto moreover have "g`(closure (range to_metric_completion)) \ closure (g`(range to_metric_completion))" using uniformly_continuous_imp_continuous[OF g(1)] by (meson closed_closure closure_subset continuous_on_subset image_closure_subset top_greatest) ultimately have "range g \ closure (range f)" unfolding to_metric_completion_dense' by (simp add: g(2) image_comp) have "range f \ range g" using g(2) by auto moreover have "closed (range g)" using isometry_on_complete_image[OF \isometry_on UNIV g\] by (simp add: complete_eq_closed) ultimately have "closure (range f) \ range g" by (simp add: closure_minimal) then have "range g = closure (range f)" using \range g \ closure (range f)\ by auto then show ?thesis using \isometry_on UNIV g\ g by metis qed subsection \The metric completion of a second countable space is second countable\ text \We want to show that the metric completion of a second countable space is still second countable. This is most easily expressed using the fact that a metric space is second countable if and only if there exists a dense countable subset. We prove the equivalence in the next lemma, and use it then to prove that the metric completion is still second countable.\ lemma second_countable_iff_dense_countable_subset: "(\B::'a::metric_space set set. countable B \ topological_basis B) \ (\A::'a set. countable A \ closure A = UNIV)" proof assume "\B::'a set set. countable B \ topological_basis B" then obtain B::"'a set set" where "countable B" "topological_basis B" by auto define A where "A = (\U. SOME x. x \ U)`B" have "countable A" unfolding A_def using \countable B\ by auto moreover have "closure A = UNIV" proof (auto simp add: closure_approachable) fix x::'a and e::real assume "e > 0" obtain U where "U \ B" "x \ U" "U \ ball x e" by (rule topological_basisE[OF \topological_basis B\, of "ball x e" x], auto simp add: \e > 0\) define y where "y = (\U. SOME x. x \ U) U" have "y \ U" unfolding y_def using \x \ U\ some_in_eq by fastforce then have "dist y x < e" using \U \ ball x e\ by (metis dist_commute mem_ball subset_iff) moreover have "y \ A" unfolding A_def y_def using \U \ B\ by auto ultimately show "\y\A. dist y x < e" by auto qed ultimately show "\A::'a set. countable A \ closure A = UNIV" by auto next assume "\A::'a set. countable A \ closure A = UNIV" then obtain A::"'a set" where "countable A" "closure A = UNIV" by auto define B where "B = (\(x, (n::nat)). ball x (1/n))`(A \ UNIV)" have "countable B" unfolding B_def using \countable A\ by auto moreover have "topological_basis B" proof (rule topological_basisI) fix x::'a and U assume "x \ U" "open U" then obtain e where "e > 0" "ball x e \ U" using openE by blast obtain n::nat where "n > 2/e" using reals_Archimedean2 by auto then have "n > 0" using \e > 0\ not_less by fastforce then have "1/n > 0" using zero_less_divide_iff by fastforce then obtain y where y: "y \ A" "dist x y < 1/n" by (metis \closure A = UNIV\ UNIV_I closure_approachable dist_commute) then have "ball y (1/n) \ B" unfolding B_def by auto moreover have "x \ ball y (1/n)" using y(2) by (auto simp add: dist_commute) moreover have "ball y (1/n) \ U" proof (auto) fix z assume z: "dist y z < 1/n" have "dist z x \ dist z y + dist y x" using dist_triangle by auto also have "... < 1/n + 1/n" using z y(2) by (auto simp add: dist_commute) also have "... < e" using \n > 2/e\ \e > 0\ \n > 0\ by (auto simp add: divide_simps mult.commute) finally have "z \ ball x e" by (auto simp add: dist_commute) then show "z \ U" using \ball x e \ U\ by auto qed ultimately show "\V\B. x \ V \ V \ U" by metis qed (auto simp add: B_def) ultimately show "\B::'a set set. countable B \ topological_basis B" by auto qed lemma second_countable_metric_dense_subset: "\A::'a::{metric_space, second_countable_topology} set. countable A \ closure A = UNIV" using ex_countable_basis by (auto simp add: second_countable_iff_dense_countable_subset[symmetric]) instance metric_completion::("{metric_space, second_countable_topology}") second_countable_topology proof obtain A::"'a set" where "countable A" "closure A = UNIV" using second_countable_metric_dense_subset by auto define Ab where "Ab = to_metric_completion`A" have "range to_metric_completion \ closure Ab" unfolding Ab_def by (metis \closure A = UNIV\ isometry_on_continuous[OF to_metric_completion_isometry] closed_closure closure_subset image_closure_subset) then have "closure Ab = UNIV" by (metis (no_types) to_metric_completion_dense'[symmetric] \range to_metric_completion \ closure Ab\ closure_closure closure_mono top.extremum_uniqueI) moreover have "countable Ab" unfolding Ab_def using \countable A\ by auto ultimately have "\Ab::'a metric_completion set. countable Ab \ closure Ab = UNIV" by auto then show "\B::'a metric_completion set set. countable B \ open = generate_topology B" using second_countable_iff_dense_countable_subset topological_basis_imp_subbasis by auto qed instance metric_completion::("{metric_space, second_countable_topology}") polish_space by standard end (*of theory Metric_Completion*) diff --git a/thys/Interval_Arithmetic_Word32/Interval_Word32.thy b/thys/Interval_Arithmetic_Word32/Interval_Word32.thy --- a/thys/Interval_Arithmetic_Word32/Interval_Word32.thy +++ b/thys/Interval_Arithmetic_Word32/Interval_Word32.thy @@ -1,4143 +1,4143 @@ (* Author: Brandon Bohrer *) theory Interval_Word32 imports Complex_Main Word_Lib.Word_Lib_Sumo begin abbreviation signed_real_of_word :: \'a::len word \ real\ where \signed_real_of_word \ signed\ lemma (in linordered_idom) signed_less_numeral_iff: \signed w < numeral n \ sint w < numeral n\ (is \?P \ ?Q\) proof - have \?Q \ of_int (sint w) < of_int (numeral n)\ by (simp only: of_int_less_iff) also have \\ \ ?P\ by (transfer fixing: less less_eq n) simp finally show ?thesis .. qed lemma (in linordered_idom) signed_less_neg_numeral_iff: \signed w < - numeral n \ sint w < - numeral n\ (is \?P \ ?Q\) proof - have \?Q \ of_int (sint w) < of_int (- numeral n)\ by (simp only: of_int_less_iff) also have \\ \ ?P\ by (transfer fixing: less less_eq uminus n) simp finally show ?thesis .. qed lemma (in linordered_idom) numeral_less_signed_iff: \numeral n < signed w \ numeral n < sint w\ (is \?P \ ?Q\) proof - have \?Q \ of_int (numeral n) < of_int (sint w)\ by (simp only: of_int_less_iff) also have \\ \ ?P\ by (transfer fixing: less less_eq n) simp finally show ?thesis .. qed lemma (in linordered_idom) neg_numeral_less_signed_iff: \- numeral n < signed w \ - numeral n < sint w\ (is \?P \ ?Q\) proof - have \?Q \ of_int (- numeral n) < of_int (sint w)\ by (simp only: of_int_less_iff) also have \\ \ ?P\ by (transfer fixing: less less_eq uminus n) simp finally show ?thesis .. qed lemma (in linordered_idom) signed_nonnegative_iff: \0 \ signed w \ 0 \ sint w\ (is \?P \ ?Q\) proof - have \?Q \ of_int 0 \ of_int (sint w)\ by (simp only: of_int_le_iff) also have \\ \ ?P\ by (transfer fixing: less_eq) simp finally show ?thesis .. qed lemma signed_real_of_word_plus_numeral_eq_signed_real_of_word_iff: \signed_real_of_word v + numeral n = signed_real_of_word w \ sint v + numeral n = sint w\ (is \?P \ ?Q\) proof - have \?Q \ real_of_int (sint v + numeral n) = real_of_int (sint w)\ by (simp only: of_int_eq_iff) also have \\ \ ?P\ by simp finally show ?thesis .. qed lemma signed_real_of_word_sum_less_eq_numeral_iff: \signed_real_of_word v + signed_real_of_word w \ numeral n \ sint v + sint w \ numeral n\ (is \?P \ ?Q\) proof - have \?Q \ real_of_int (sint v + sint w) \ real_of_int (numeral n)\ by (simp only: of_int_le_iff) also have \\ \ ?P\ by simp finally show ?thesis .. qed lemma signed_real_of_word_sum_less_eq_neg_numeral_iff: \signed_real_of_word v + signed_real_of_word w \ - numeral n \ sint v + sint w \ - numeral n\ (is \?P \ ?Q\) proof - have \?Q \ real_of_int (sint v + sint w) \ real_of_int (- numeral n)\ by (simp only: of_int_le_iff) also have \\ \ ?P\ by simp finally show ?thesis .. qed lemma signed_real_of_word_sum_less_numeral_iff: \signed_real_of_word v + signed_real_of_word w < numeral n \ sint v + sint w < numeral n\ (is \?P \ ?Q\) proof - have \?Q \ real_of_int (sint v + sint w) < real_of_int (numeral n)\ by (simp only: of_int_less_iff) also have \\ \ ?P\ by simp finally show ?thesis .. qed lemma signed_real_of_word_sum_less_neg_numeral_iff: \signed_real_of_word v + signed_real_of_word w < - numeral n \ sint v + sint w < - numeral n\ (is \?P \ ?Q\) proof - have \?Q \ real_of_int (sint v + sint w) < real_of_int (- numeral n)\ by (simp only: of_int_less_iff) also have \\ \ ?P\ by simp finally show ?thesis .. qed lemma numeral_less_eq_signed_real_of_word_sum: \numeral n \ signed_real_of_word v + signed_real_of_word w \ numeral n \ sint v + sint w\ (is \?P \ ?Q\) proof - have \?Q \ real_of_int (numeral n) \ real_of_int (sint v + sint w)\ by (simp only: of_int_le_iff) also have \\ \ ?P\ by simp finally show ?thesis .. qed lemma neg_numeral_less_eq_signed_real_of_word_sum: \- numeral n \ signed_real_of_word v + signed_real_of_word w \ - numeral n \ sint v + sint w\ (is \?P \ ?Q\) proof - have \?Q \ real_of_int (- numeral n) \ real_of_int (sint v + sint w)\ by (simp only: of_int_le_iff) also have \\ \ ?P\ by simp finally show ?thesis .. qed lemma numeral_less_signed_real_of_word_sum: \numeral n < signed_real_of_word v + signed_real_of_word w \ numeral n < sint v + sint w\ (is \?P \ ?Q\) proof - have \?Q \ real_of_int (numeral n) < real_of_int (sint v + sint w)\ by (simp only: of_int_less_iff) also have \\ \ ?P\ by simp finally show ?thesis .. qed lemma neg_numeral_less_signed_real_of_word_sum: \- numeral n < signed_real_of_word v + signed_real_of_word w \ - numeral n < sint v + sint w\ (is \?P \ ?Q\) proof - have \?Q \ real_of_int (- numeral n) < real_of_int (sint v + sint w)\ by (simp only: of_int_less_iff) also have \\ \ ?P\ by simp finally show ?thesis .. qed lemmas real_of_word_simps [simp] = signed_less_numeral_iff [where ?'a = real] numeral_less_signed_iff [where ?'a = real] signed_less_neg_numeral_iff [where ?'a = real] neg_numeral_less_signed_iff [where ?'a = real] signed_nonnegative_iff [where ?'a = real] lemmas more_real_of_word_simps = signed_real_of_word_plus_numeral_eq_signed_real_of_word_iff signed_real_of_word_sum_less_eq_numeral_iff signed_real_of_word_sum_less_eq_neg_numeral_iff signed_real_of_word_sum_less_numeral_iff signed_real_of_word_sum_less_neg_numeral_iff numeral_less_eq_signed_real_of_word_sum neg_numeral_less_eq_signed_real_of_word_sum numeral_less_signed_real_of_word_sum neg_numeral_less_signed_real_of_word_sum declare more_real_of_word_simps [simp] text\Interval-Word32.thy implements conservative interval arithmetic operators on 32-bit word values, with explicit infinities for values outside the representable bounds. It is suitable for use in interpreters for languages which must have a well-understood low-level behavior (see Interpreter.thy). This work was originally part of the paper by Bohrer \emph{et al.}~\cite{BohrerTMMP18}. It is worth noting that this is not the first formalization of interval arithmetic in Isabelle/HOL. This article is presented regardless because it has unique goals in mind which have led to unique design decisions. Our goal is generate code which can be used to perform conservative arithmetic in implementations extracted from a proof. The Isabelle standard library now features interval arithmetic, for example in Approximation.thy. Ours differs in two ways: 1) We use intervals with explicit positive and negative infinities, and with overflow checking. Such checking is often relevant in implementation-level code with unknown inputs. To promote memory-efficient implementations, we moreover use sentinel values for infinities, rather than datatype constructors. This is especially important in real-time settings where the garbarge collection required for datatypes can be a concern. 2) Our goal is not to use interval arithmetic to discharge Isabelle goals, but to generate useful proven-correct implementation code, see Interpreter.thy. On the other hand, we are not concerned with producing interval-based automation for arithmetic goals in HOL. In practice, much of the work in this theory comes down to sheer case-analysis. Bounds-checking requires many edge cases in arithmetic functions, which come with many cases in proofs. Where possible, we attempt to offload interesting facts about word representations of numbers into reusable lemmas, but even then main results require many subcases, each with a certain amount of arithmetic grunt work. \ section \Interval arithmetic definitions\ subsection \Syntax\ text\Words are 32-bit\ type_synonym word = "32 Word.word" text\Sentinel values for infinities. Note that we leave the maximum value ($2^31$) completed unused, so that negation of $(2^{31})-1$ is not an edge case\ definition NEG_INF::"word" where NEG_INF_def[simp]:"NEG_INF = -((2 ^ 31) -1)" definition NegInf::"real" where NegInf[simp]:"NegInf = real_of_int (sint NEG_INF)" definition POS_INF::"word" where POS_INF_def[simp]:"POS_INF = (2^31) - 1" definition PosInf::"real" where PosInf[simp]:"PosInf = real_of_int (sint POS_INF)" text\Subtype of words who represent a finite value. \ typedef bword = "{n::word. sint n \ sint NEG_INF \ sint n \ sint POS_INF}" apply(rule exI[where x=NEG_INF]) by (auto) text\Numeric literals\ type_synonym lit = bword setup_lifting type_definition_bword lift_definition bword_zero::"bword" is "0::32 Word.word" by auto lift_definition bword_one::"bword" is "1::32 Word.word" by(auto simp add: sint_uint) lift_definition bword_neg_one::"bword" is "-1::32 Word.word" by(auto) definition word::"word \ bool" where word_def[simp]:"word w \ w \ {NEG_INF..POS_INF}" named_theorems rep_simps "Simplifications for representation functions" text\Definitions of interval containment and word representation repe w r iff word w encodes real number r\ inductive repe ::"word \ real \ bool" (infix "\\<^sub>E" 10) where repPOS_INF:"r \ real_of_int (sint POS_INF) \ repe POS_INF r" | repNEG_INF:"r \ real_of_int (sint NEG_INF) \ repe NEG_INF r" | repINT:"(sint w) < real_of_int(sint POS_INF) \ (sint w) > real_of_int(sint NEG_INF) \ repe w (sint w)" inductive_simps repePos_simps[rep_simps]:"repe POS_INF r" and repeNeg_simps[rep_simps]:"repe NEG_INF r" and repeInt_simps[rep_simps]:"repe w (sint w)" text\repU w r if w represents an upper bound of r\ definition repU ::"word \ real \ bool" (infix "\\<^sub>U" 10) where "repU w r \ \ r'. r' \ r \ repe w r'" lemma repU_leq:"repU w r \ r' \ r \ repU w r'" unfolding repU_def using order_trans by auto text\repU w r if w represents a lower bound of r\ definition repL ::"word \ real \ bool" (infix "\\<^sub>L" 10) where "repL w r \ \ r'. r' \ r \ repe w r'" lemma repL_geq:"repL w r \ r' \ r \ repL w r'" unfolding repL_def using order_trans by auto text\repP (l,u) r iff l and u encode lower and upper bounds of r\ definition repP ::"word * word \ real \ bool" (infix "\\<^sub>P" 10) where "repP w r \ let (w1, w2) = w in repL w1 r \ repU w2 r" lemma int_not_posinf: assumes b1:"real_of_int (sint ra) < real_of_int (sint POS_INF)" assumes b2:"real_of_int (sint NEG_INF) < real_of_int (sint ra)" shows "ra \ POS_INF" using b1 b2 by auto lemma int_not_neginf: assumes b1:" real_of_int (sint ra) < real_of_int (sint POS_INF)" assumes b2:" real_of_int (sint NEG_INF) < real_of_int (sint ra)" shows "ra \ NEG_INF" using b1 b2 by auto lemma int_not_undef: assumes b1:"real_of_int (sint ra) < real_of_int (sint POS_INF)" assumes b2:"real_of_int (sint NEG_INF) < real_of_int (sint ra)" shows "ra \ NEG_INF-1" using b1 b2 by auto lemma sint_range: assumes b1:"real_of_int (sint ra) < real_of_int (sint POS_INF)" assumes b2:"real_of_int (sint NEG_INF) < real_of_int (sint ra)" shows "sint ra \ {i. i > -((2^31)-1) \ i < (2^31)-1}" using b1 b2 by auto lemma word_size_neg: fixes w :: "32 Word.word" shows "size (-w) = size w" using Word.word_size[of w] Word.word_size[of "-w"] by auto lemma uint_distinct: fixes w1 w2 shows "w1 \ w2 \ uint w1 \ uint w2" by auto section \Preliminary lemmas\ subsection \Case analysis lemmas\ text\Case analysis principle for pairs of intervals, used in proofs of arithmetic operations\ lemma ivl_zero_case: fixes l1 u1 l2 u2 :: real assumes ivl1:"l1 \ u1" assumes ivl2:"l2 \ u2" shows "(l1 \ 0 \ 0 \ u1 \ l2 \ 0 \ 0 \ u2) \(l1 \ 0 \ 0 \ u1 \ 0 \ l2) \(l1 \ 0 \ 0 \ u1 \ u2 \ 0) \(0 \ l1 \ l2 \ 0 \ 0 \ u2) \(u1 \ 0 \ l2 \ 0 \ 0 \ u2) \(u1 \ 0 \ u2 \ 0) \(u1 \ 0 \ 0 \ l2) \(0 \ l1 \ u2 \ 0) \(0 \ l1 \ 0 \ l2)" using ivl1 ivl2 by (metis le_cases) lemma case_ivl_zero [consumes 2, case_names ZeroZero ZeroPos ZeroNeg PosZero NegZero NegNeg NegPos PosNeg PosPos]: fixes l1 u1 l2 u2 :: real shows "l1 \ u1 \ l2 \ u2 \ ((l1 \ 0 \ 0 \ u1 \ l2 \ 0 \ 0 \ u2) \ P) \ ((l1 \ 0 \ 0 \ u1 \ 0 \ l2) \ P) \ ((l1 \ 0 \ 0 \ u1 \ u2 \ 0) \ P) \ ((0 \ l1 \ l2 \ 0 \ 0 \ u2) \ P) \ ((u1 \ 0 \ l2 \ 0 \ 0 \ u2) \ P) \ ((u1 \ 0 \ u2 \ 0) \ P) \ ((u1 \ 0 \ 0 \ l2) \ P) \ ((0 \ l1 \ u2 \ 0) \ P) \ ((0 \ l1 \ 0 \ l2) \ P) \ P" using ivl_zero_case[of l1 u1 l2 u2] by auto lemma case_inf2[case_names PosPos PosNeg PosNum NegPos NegNeg NegNum NumPos NumNeg NumNum]: shows "\w1 w2 P. (w1 = POS_INF \ w2 = POS_INF \ P w1 w2) \ (w1 = POS_INF \ w2 = NEG_INF \ P w1 w2) \ (w1 = POS_INF \ w2 \ POS_INF \ w2 \ NEG_INF \ P w1 w2) \ (w1 = NEG_INF \ w2 = POS_INF \ P w1 w2) \ (w1 = NEG_INF \ w2 = NEG_INF \ P w1 w2) \ (w1 = NEG_INF \ w2 \ POS_INF \ w2 \ NEG_INF \ P w1 w2) \ (w1 \ POS_INF \ w1 \ NEG_INF \ w2 = POS_INF \ P w1 w2) \ (w1 \ POS_INF \ w1 \ NEG_INF \ w2 = NEG_INF \ P w1 w2) \ (w1 \ POS_INF \ w1 \ NEG_INF \ w2 \ POS_INF \ w2 \ NEG_INF \ P w1 w2) \ P w1 w2" by(auto) lemma case_pu_inf[case_names PosAny AnyPos NegNeg NegNum NumNeg NumNum]: shows "\w1 w2 P. (w1 = POS_INF \ P w1 w2) \ (w2 = POS_INF \ P w1 w2) \ (w1 = NEG_INF \ w2 = NEG_INF \ P w1 w2) \ (w1 = NEG_INF \ w2 \ POS_INF \ w2 \ NEG_INF \ P w1 w2) \ (w1 \ POS_INF \ w1 \ NEG_INF \ w2 = NEG_INF \ P w1 w2) \ (w1 \ POS_INF \ w1 \ NEG_INF \ w2 \ POS_INF \ w2 \ NEG_INF \ P w1 w2) \ P w1 w2" by(auto) lemma case_pl_inf[case_names NegAny AnyNeg PosPos PosNum NumPos NumNum]: shows "\w1 w2 P. (w1 = NEG_INF \ P w1 w2) \ (w2 = NEG_INF \ P w1 w2) \ (w1 = POS_INF \ w2 = POS_INF \ P w1 w2) \ (w1 = POS_INF \ w2 \ POS_INF \ w2 \ NEG_INF \ P w1 w2) \ (w1 \ POS_INF \ w1 \ NEG_INF \ w2 = POS_INF \ P w1 w2) \ (w1 \ POS_INF \ w1 \ NEG_INF \ w2 \ POS_INF \ w2 \ NEG_INF \ P w1 w2) \ P w1 w2" by(auto) lemma word_trichotomy[case_names Less Equal Greater]: fixes w1 w2 :: word shows "(w1 P w1 w2) \ (w1 = w2 \ P w1 w2) \ (w2 P w1 w2) \ P w1 w2" using signed.linorder_cases by auto lemma case_times_inf [case_names PosPos NegPos PosNeg NegNeg PosLo PosHi PosZero NegLo NegHi NegZero LoPos HiPos ZeroPos LoNeg HiNeg ZeroNeg AllFinite]: fixes w1 w2 P assumes pp:"(w1 = POS_INF \ w2 = POS_INF \ P w1 w2)" and np:"(w1 = NEG_INF \ w2 = POS_INF \ P w1 w2)" and pn:"(w1 = POS_INF \ w2 = NEG_INF \ P w1 w2)" and nn:"(w1 = NEG_INF \ w2 = NEG_INF \ P w1 w2)" and pl:"(w1 = POS_INF \ w2 \ NEG_INF \ w2 P w1 w2)" and ph:"(w1 = POS_INF \ w2 \ POS_INF \ 0 P w1 w2)" and pz:"(w1 = POS_INF \ w2 = 0 \ P w1 w2)" and nl:"(w1 = NEG_INF \ w2 \ NEG_INF \ w2 P w1 w2)" and nh:"(w1 = NEG_INF \ w2 \ POS_INF \ 0 P w1 w2)" and nz:"(w1 = NEG_INF \ 0 = w2 \ P w1 w2)" and lp:"(w1 \ NEG_INF \ w1 w2 = POS_INF \ P w1 w2)" and hp:"(w1 \ POS_INF \ 0 w2 = POS_INF \ P w1 w2)" and zp:"(0 = w1 \ w2 = POS_INF \ P w1 w2)" and ln:"(w1 \ NEG_INF \ w1 w2 = NEG_INF \ P w1 w2)" and hn:"(w1 \ POS_INF \ 0 w2 = NEG_INF \ P w1 w2)" and zn:"(0 = w1 \ w2 = NEG_INF \ P w1 w2)" and allFinite:"w1 \ NEG_INF \ w1 \ POS_INF \ w2 \ NEG_INF \ w2 \ POS_INF \ P w1 w2" shows " P w1 w2" proof (cases rule: word_trichotomy[of w1 0]) case Less then have w1l:"w1 Trivial arithmetic lemmas\ lemma max_diff_pos:"0 \ 9223372034707292161 + ((-(2 ^ 31))::real)" by auto lemma max_less:"2 ^ 31 < (9223372039002259455::int)" by auto lemma sints64:"sints 64 = {i. - (2 ^ 63) \ i \ i < 2 ^ 63}" using sints_def[of 64] range_sbintrunc[of 63] by auto lemma sints32:"sints 32 = {i. - (2 ^ 31) \ i \ i < 2 ^ 31}" using sints_def[of 32] range_sbintrunc[of 31] by auto lemma upcast_max:"sint((scast(0x80000001::word))::64 Word.word)=sint((0x80000001::32 Word.word))" by auto lemma upcast_min:"(0xFFFFFFFF80000001::64 Word.word) = ((scast (-0x7FFFFFFF::word))::64 Word.word)" by auto lemma min_extend_neg:"sint ((0xFFFFFFFF80000001)::64 Word.word) < 0" by auto lemma min_extend_val':"sint ((-0x7FFFFFFF)::64 Word.word) = (-0x7FFFFFFF)" by auto lemma min_extend_val:"(-0x7FFFFFFF::64 Word.word) = 0xFFFFFFFF80000001" by auto lemma range2s:"\x::int. x \ 2 ^ 31 - 1 \ x + (- 2147483647) < 2147483647" by auto section \Arithmetic operations\ text\This section defines operations which conservatively compute upper and lower bounds of arithmetic functions given upper and lower bounds on their arguments. Each function comes with a proof that it rounds in the advertised direction. \ subsection \Addition upper bound\ text\Upper bound of w1 + w2\ fun pu :: "word \ word \ word" where "pu w1 w2 = (if w1 = POS_INF then POS_INF else if w2 = POS_INF then POS_INF else if w1 = NEG_INF then (if w2 = NEG_INF then NEG_INF else (let sum::64 Word.word = ((scast w2)::64 Word.word) + ((scast NEG_INF)::64 Word.word) in if (sum::64 Word.word) <=s ((scast NEG_INF)::64 Word.word) then NEG_INF else scast sum)) else if w2 = NEG_INF then (let sum::64 Word.word = ((scast w1)::64 Word.word) + ((scast NEG_INF)::64 Word.word) in if (sum::64 Word.word) <=s ((scast NEG_INF)::64 Word.word) then NEG_INF else scast sum) else (let sum::64 Word.word = ((scast w1)::64 Word.word) + ((scast w2)::64 Word.word) in if ((scast POS_INF)::64 Word.word) <=s (sum::64 Word.word) then POS_INF else if (sum::64 Word.word) <=s ((scast NEG_INF)::64 Word.word) then NEG_INF else scast sum))" lemma scast_down_range: fixes w::"'a::len Word.word" assumes "sint w \ sints (len_of (TYPE('b::len)))" shows "sint w = sint ((scast w)::'b Word.word)" using word_sint.Abs_inverse [OF assms] by simp lemma pu_lemma: fixes w1 w2 fixes r1 r2 :: real assumes up1:"w1 \\<^sub>U (r1::real)" assumes up2:"w2 \\<^sub>U (r2::real)" shows "pu w1 w2 \\<^sub>U (r1 + r2)" proof - have scast_eq1:"sint((scast w1)::64 Word.word) = sint w1" apply(rule Word.sint_up_scast) unfolding Word.is_up by auto have scast_eq2:"sint((scast (0x80000001::word))::64 Word.word) = sint ((0x80000001::32 Word.word))" by auto have scast_eq3:"sint((scast w2)::64 Word.word) = sint w2" apply(rule Word.sint_up_scast) unfolding Word.is_up by auto have w2Geq:"sint ((scast w2)::64 Word.word) \ - (2 ^ 31) " using word_sint.Rep[of "(w2)::32 Word.word"] sints32 Word.word_size scast_eq1 upcast_max scast_eq3 len32 mem_Collect_eq by auto have "sint ((scast w2)::64 Word.word) \ 2 ^ 31" apply (auto simp add: word_sint.Rep[of "(w2)::32 Word.word"] sints32 scast_eq3 len32) using word_sint.Rep[of "(w2)::32 Word.word"] len32[of "TYPE(32)"] sints32 by auto then have w2Less:"sint ((scast w2)::64 Word.word) < 9223372039002259455" by auto have w2Range: "-(2 ^ (size ((scast w2)::64 Word.word) - 1)) \ sint ((scast w2)::64 Word.word) + sint ((-0x7FFFFFFF)::64 Word.word) \ sint ((scast w2)::64 Word.word) + sint ((-0x7FFFFFFF)::64 Word.word) \ 2 ^ (size ((scast w2)::64 Word.word) - 1) - 1" apply(auto simp add: Word.word_size scast_eq1 upcast_max sints64 max_less) using max_diff_pos max_less w2Less w2Geq by auto have w2case1a:"sint (((scast w2)::64 Word.word) + (-0x7FFFFFFF::64 Word.word)) = sint ((scast w2)::64 Word.word) + sint (-0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF w2Range]) have w1Lower:"sint ((scast w1)::64 Word.word) \ - (2 ^ 31) " using word_sint.Rep[of "(w1)::32 Word.word"] sints32 Word.word_size scast_eq1 scast_eq2 scast_eq3 len32 mem_Collect_eq by auto have w1Leq:"sint ((scast w1)::64 Word.word) \ 2 ^ 31" apply (auto simp add: word_sint.Rep[of "(w1)::32 Word.word"] sints32 scast_eq1 len32) using word_sint.Rep[of "(w1)::32 Word.word"] len32[of "TYPE(32)"] sints32 by auto then have w1Less:"sint ((scast w1)::64 Word.word) < 9223372039002259455" using max_less by auto have w1MinusBound:" - (2 ^ (size ((scast w1)::64 Word.word) - 1)) \ sint ((scast w1)::64 Word.word) + sint ((-0x7FFFFFFF)::64 Word.word) \ sint ((scast w1)::64 Word.word) + sint ((-0x7FFFFFFF)::64 Word.word) \ 2 ^ (size ((scast w1)::64 Word.word) - 1) - 1" apply(auto simp add: Word.word_size[of "(scast w1)::64 Word.word"] Word.word_size[of "(scast (-0x7FFFFFFF))::64 Word.word"] scast_eq3 scast_eq2 word_sint.Rep[of "(w1)::32 Word.word"] word_sint.Rep[of "0x80000001::32 Word.word"] word_sint.Rep[of "(scast w1)::64 Word.word"] word_sint.Rep[of "-0x7FFFFFFF::64 Word.word"] sints64 sints32) using w1Lower w1Less by auto have w1case1a:"sint (((scast w1)::64 Word.word) + (-0x7FFFFFFF::64 Word.word)) = sint ((scast w1)::64 Word.word) + sint (-0x7FFFFFFF::64 Word.word)" by (rule signed_arith_sint(1)[of "(scast w1)::64 Word.word" "(- 0x7FFFFFFF)", OF w1MinusBound]) have w1case1a':"sint (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001) = sint ((scast w1)::64 Word.word) + sint ((-0x7FFFFFFF)::64 Word.word)" using min_extend_val w1case1a by auto have w1Leq':"sint w1 \ 2^31 - 1" using word_sint.Rep[of "(w1)::32 Word.word"] by (auto simp add: sints32 len32[of "TYPE(32)"]) have neg64:"(((scast w2)::64 Word.word) + 0xFFFFFFFF80000001) = ((scast w2)::64 Word.word) + (-0x7FFFFFFF)" by auto have arith:"\x::int. x \ 2 ^ 31 - 1 \ x + (- 2147483647) < 2147483647" by auto obtain r'\<^sub>1 and r'\<^sub>2 where geq1:"r'\<^sub>1\r1" and equiv1:"w1 \\<^sub>E r'\<^sub>1" and geq2:"r'\<^sub>2\r2" and equiv2:"w2 \\<^sub>E r'\<^sub>2" using up1 up2 unfolding repU_def by auto show ?thesis proof (cases rule: case_pu_inf[where ?w1.0=w1, where ?w2.0=w2]) case PosAny then show ?thesis apply (auto simp add: repU_def repe.simps) using linear by blast next case AnyPos then show ?thesis apply (auto simp add: repU_def repe.simps) using linear by blast next case NegNeg then show ?thesis using up1 up2 by (auto simp add: repU_def repe.simps) next case NegNum assume neq1:"w2 \ POS_INF" assume eq2:"w1 = NEG_INF" assume neq3:"w2 \ NEG_INF" let ?sum = "(scast w2 + scast NEG_INF)::64 Word.word" have leq1:"r'\<^sub>1 \ (real_of_int (sint NEG_INF))" using equiv1 neq1 eq2 neq3 by (auto simp add: repe.simps) have leq2:"r'\<^sub>2 = (real_of_int (sint w2))" using equiv2 neq1 eq2 neq3 by (auto simp add: repe.simps) have case1:"?sum <=s ((scast NEG_INF)::64 Word.word) \ NEG_INF \\<^sub>U r1 + r2" using up1 up2 apply (simp add: repU_def repe.simps word_sle_eq) apply (rule exI [where x= "r1 + r2"]) apply auto using w2case1a apply (auto simp add: eq2 scast_eq3) subgoal for r' proof - assume \r1 \ r'\ \r' \ - 2147483647\ \r2 \ signed w2\ \sint w2 \ 0\ from \sint w2 \ 0\ have \real_of_int (sint w2) \ real_of_int 0\ by (simp only: of_int_le_iff) then have \signed w2 \ (0::real)\ by simp from \r1 \ r'\ \r' \ - 2147483647\ have \r1 \ - 2147483647\ by (rule order_trans) moreover from \r2 \ signed w2\ \signed w2 \ (0::real)\ have \r2 \ 0\ by (rule order_trans) ultimately show \r1 + r2 \ - 2147483647\ by simp qed done have case2:"\(?sum <=s scast NEG_INF) \ scast ?sum \\<^sub>U r1 + r2" apply(simp add: repU_def repe.simps word_sle_def up1 up2) apply(rule exI[where x= "r'\<^sub>2 - 0x7FFFFFFF"]) apply(rule conjI) subgoal proof - assume " \ sint (scast w2 + 0xFFFFFFFF80000001) \ - 2147483647" have bound1:"r1 \ - 2147483647" using leq1 geq1 by (auto) have bound2:"r2 \ r'\<^sub>2" using leq2 geq2 by auto show "r1 + r2 \ r'\<^sub>2 - 2147483647" using bound1 bound2 by(linarith) qed apply(rule disjI2) apply(rule disjI2) apply(auto simp add: not_le) subgoal proof - assume a:"sint (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001) > - 2147483647" then have sintw2_bound:"sint (((scast w2)::64 Word.word) + (-0x7FFFFFFF)) > - 2147483647" unfolding min_extend_val by auto have case1a:" sint (((scast w2)::64 Word.word) + (-0x7FFFFFFF::64 Word.word)) = sint ((scast w2)::64 Word.word) + sint (-0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF w2Range]) have "- 0x7FFFFFFF < sint w2 + (- 0x7FFFFFFF)" using sintw2_bound case1a min_extend_val' scast_eq3 by linarith then have w2bound:"0 < sint w2" using less_add_same_cancel2 by blast have rightSize:"sint (((scast w2)::64 Word.word) + - 0x7FFFFFFF) \ sints (len_of TYPE(32))" using case1a scast_eq3 min_extend_val' word_sint.Rep[of "(w2)::32 Word.word"] w2bound by (auto simp add: sints32 len32[of "TYPE(32)"]) have downcast:"sint ((scast (((scast w2)::64 Word.word) + ((- 0x7FFFFFFF))))::word) = sint (((scast w2)::64 Word.word) + ((- 0x7FFFFFFF)::64 Word.word)) " using scast_down_range[OF rightSize] by auto then have b:"sint ((scast (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001))::word) = sint (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001)" using min_extend_val by auto have c:"sint (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001) = sint ((scast w2)::64 Word.word) + sint ((-0x7FFFFFFF)::64 Word.word)" using min_extend_val case1a by auto show \r'\<^sub>2 - 2147483647 = signed (SCAST(64 \ 32) (SCAST(32 \ 64) w2 + 0xFFFFFFFF80000001))\ using a b min_extend_val' scast_eq3 leq2 case1a [symmetric] apply (simp add: algebra_simps) apply transfer apply simp done qed subgoal proof - have range2a:" - (2 ^ (size ((scast w2)::64 Word.word) - 1)) \ sint ((scast w2)::64 Word.word) + sint ((-0x7FFFFFFF)::64 Word.word) \ sint ((scast w2)::64 Word.word) + sint ((-0x7FFFFFFF)::64 Word.word) \ 2 ^ (size ((scast w2)::64 Word.word) - 1) - 1" apply(auto simp add: Word.word_size scast_eq1 upcast_max sints64 sints32 max_less) using max_diff_pos max_less w2Geq w2Less by auto have case2a:"sint (((scast w2)::64 Word.word) + (-0x7FFFFFFF::64 Word.word)) = sint ((scast w2)::64 Word.word) + sint (-0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF range2a]) have neg64:"(((scast w2)::64 Word.word) + 0xFFFFFFFF80000001) = ((scast w2)::64 Word.word) + (-0x7FFFFFFF)" by auto assume "sint (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001) > - 2147483647" then have sintw2_bound:"sint (((scast w2)::64 Word.word) + (-0x7FFFFFFF)) > - 2147483647" unfolding neg64 by auto have a:"sint (((scast w2)::64 Word.word) + (-0x7FFFFFFF)) = sint((scast w2)::64 Word.word) + sint((-0x7FFFFFFF)::64 Word.word)" using case2a by auto have b:"sint ((scast w2)::64 Word.word) = sint w2" apply(rule Word.sint_up_scast) unfolding Word.is_up by auto have d:"sint w2 \ 2^31 - 1" using word_sint.Rep[of "(w2)::32 Word.word"] by (auto simp add: sints32 len32[of "TYPE(32)"]) have "- 0x7FFFFFFF < sint w2 + (- 0x7FFFFFFF)" using sintw2_bound case2a min_extend_val' scast_eq3 by linarith then have w2bound:"0 < sint w2" using less_add_same_cancel2 by blast have rightSize:"sint (((scast w2)::64 Word.word) + - 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding case2a b min_extend_val' using word_sint.Rep[of "(w2)::32 Word.word"] w2bound by (auto simp add: sints32 len32[of "TYPE(32)"]) have downcast:"sint ((scast (((scast w2)::64 Word.word) + ((- 0x7FFFFFFF))))::word) = sint (((scast w2)::64 Word.word) + ((- 0x7FFFFFFF)::64 Word.word))" using scast_down_range[OF rightSize] by auto have "sint (scast (((scast w2)::64 Word.word) + (-0x7FFFFFFF))::word) < 2147483647" unfolding downcast a b min_extend_val' using range2s[of "sint w2", OF d] by auto then show "sint (scast (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001)::word) < 2147483647" by auto qed subgoal proof - assume notLeq:"sint (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001) > - 2147483647" then have gr:"sint (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001) > - 2147483647" by auto have case2a:"sint (((scast w2)::64 Word.word) + (-0x7FFFFFFF::64 Word.word)) = sint ((scast w2)::64 Word.word) + sint (-0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF w2Range]) from neg64 have sintw2_bound:"sint (((scast w2)::64 Word.word) + (-0x7FFFFFFF)) > - 2147483647" unfolding neg64 using notLeq by auto have a:"sint (((scast w2)::64 Word.word) + (-0x7FFFFFFF)) = sint((scast w2)::64 Word.word) + sint((-0x7FFFFFFF)::64 Word.word)" using case2a by auto have c:"sint((-0x7FFFFFFF)::64 Word.word) = -0x7FFFFFFF" by auto have d:"sint w2 \ 2^31 - 1" using word_sint.Rep[of "(w2)::32 Word.word"] by (auto simp add: sints32 len32[of "TYPE(32)"]) have "- 0x7FFFFFFF < sint w2 + (- 0x7FFFFFFF)" using sintw2_bound case2a c scast_eq3 by linarith then have w2bound:"0 < sint w2" using less_add_same_cancel2 by blast have rightSize:"sint (((scast w2)::64 Word.word) + - 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding case2a scast_eq3 using word_sint.Rep[of "(w2)::32 Word.word"] w2bound by (auto simp add: sints32 len32[of "TYPE(32)"]) have downcast:"sint ((scast (((scast w2)::64 Word.word) + ((- 0x7FFFFFFF))))::word) = sint (((scast w2)::64 Word.word) + ((- 0x7FFFFFFF)::64 Word.word))" using scast_down_range[OF rightSize] by auto have sintEq:" sint ((scast (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001))::word) = sint (((scast w2)::64 Word.word) + 0xFFFFFFFF80000001) " using downcast by auto show "- 2147483647 < sint (SCAST(64 \ 32) (SCAST(32 \ 64) w2 + 0xFFFFFFFF80000001))" unfolding sintEq using gr of_int_less_iff of_int_minus of_int_numeral by linarith qed done have castEquiv:"\(?sum <=s scast NEG_INF) \ (scast ?sum) \\<^sub>U r1 + r2" using up1 up2 case1 case2 by fastforce have letRep:"(let sum = ?sum in if sum <=s scast NEG_INF then NEG_INF else scast sum) \\<^sub>U r1 + r2" using case1 case2 by(cases "?sum <=s scast NEG_INF"; auto) show "pu w1 w2 \\<^sub>U r1 + r2" using letRep eq2 neq1 by(auto) next case NumNeg assume neq3:"w1 \ NEG_INF" assume neq1:"w1 \ POS_INF" assume eq2:"w2 = NEG_INF" let ?sum = "(scast w1 + scast NEG_INF)::64 Word.word" have case1:"?sum <=s ((scast NEG_INF)::64 Word.word) \ NEG_INF \\<^sub>U r1 + r2" using up1 up2 apply (simp add: repU_def repe.simps word_sle_def) apply(rule exI[where x= "r1 + r2"]) apply(auto) using w1case1a min_extend_neg apply (auto simp add: neq1 eq2 neq3 repINT repU_def repe.simps repeInt_simps up2 word_sless_alt) using repINT repU_def repe.simps repeInt_simps up2 word_sless_alt proof - fix r' assume a1:"sint ((scast w1)::64 Word.word) \ 0" then have "sint w1 \ 0" using scast_eq1 by auto then have h3: \signed w1 \ (0::real)\ by transfer simp assume a2:"r2 \ r'" assume a3:"r1 \ signed w1" assume a4:"r' \ (- 2147483647)" from a2 a4 have h1:"r2 \ - 2147483647" by auto from a1 a3 h3 have h2:"r1 \ 0" using dual_order.trans of_int_le_0_iff by blast show "r1 + r2 \ (- 2147483647)" using h1 h2 add.right_neutral add_mono by fastforce qed have leq1:"r'\<^sub>2 \ (real_of_int (sint NEG_INF))" and leq2:"r'\<^sub>1 = (real_of_int (sint w1))" using equiv1 equiv2 neq1 eq2 neq3 unfolding repe.simps by auto have case1a:"sint (((scast w1)::64 Word.word) + (-0x7FFFFFFF::64 Word.word)) = sint ((scast w1)::64 Word.word) + sint (-0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF w1MinusBound]) have case2:"\(?sum <=s scast NEG_INF) \ scast ?sum \\<^sub>U r1 + r2" apply (simp add: repU_def repe.simps word_sle_def up1 up2) apply(rule exI[where x= "r'\<^sub>1 - 0x7FFFFFFF"]) (*r1 + r2*) apply(rule conjI) subgoal using leq1 leq2 geq1 geq2 by auto apply(rule disjI2) apply(rule disjI2) apply(auto) subgoal proof - have f:"r'\<^sub>1 = (real_of_int (sint w1))" by (simp add: leq1 leq2 ) assume a:"\ sint (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001) \ - 2147483647" then have sintw2_bound:"sint (((scast w1)::64 Word.word) + (-0x7FFFFFFF)) > - 2147483647" unfolding min_extend_val by auto have "- 0x7FFFFFFF < sint w1 + (- 0x7FFFFFFF)" using sintw2_bound case1a min_extend_val' scast_eq1 by linarith then have w2bound:"0 < sint w1" using less_add_same_cancel2 by blast have rightSize:"sint (((scast w1)::64 Word.word) + - 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding w1case1a using w2bound word_sint.Rep[of "(w1)::32 Word.word"] by (auto simp add: sints32 len32[of "TYPE(32)"] scast_eq1) have downcast:"sint ((scast (((scast w1)::64 Word.word) + ((- 0x7FFFFFFF))))::word) = sint (((scast w1)::64 Word.word) + ((- 0x7FFFFFFF)::64 Word.word))" using scast_down_range[OF rightSize] by auto then have "sint ((scast (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001))::word) = sint (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001)" using min_extend_val by auto then have \signed (SCAST(64 \ 32) (SCAST(32 \ 64) w1 + 0xFFFFFFFF80000001)) = (signed (SCAST(32 \ 64) w1 + 0xFFFFFFFF80000001) :: real)\ by transfer simp moreover have "r'\<^sub>1 - (real_of_int 2147483647) = (real_of_int (sint ((scast w1)::64 Word.word ) - 2147483647))" by (simp add: scast_eq1 leq2) moreover from w1case1a' have \signed (SCAST(32 \ 64) w1 + 0xFFFFFFFF80000001) = signed (SCAST(32 \ 64) w1) + (signed (- 0x7FFFFFFF :: 64 Word.word) :: real)\ by transfer simp ultimately show "r'\<^sub>1 - 2147483647 = (signed ((scast (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001))::word))" by simp qed subgoal proof - assume "\ sint (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001) \ - 2147483647" then have sintw2_bound:"sint (((scast w1)::64 Word.word) + (-0x7FFFFFFF)) > - 2147483647" unfolding neg64 by auto have "- 0x7FFFFFFF < sint w1 + (- 0x7FFFFFFF)" using sintw2_bound case1a min_extend_val' scast_eq1 by linarith then have w2bound:"0 < sint w1" using less_add_same_cancel2 by blast have rightSize:"sint (((scast w1)::64 Word.word) + - 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding case1a scast_eq1 w1case1a' using word_sint.Rep[of "(w1)::32 Word.word"] w2bound by(auto simp add: sints32 len32[of "TYPE(32)"]) have downcast:"sint ((scast (((scast w1)::64 Word.word) + ((- 0x7FFFFFFF))))::word) = sint (((scast w1)::64 Word.word) + ((- 0x7FFFFFFF)::64 Word.word)) " using scast_down_range[OF rightSize] by auto show "sint (scast (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001)::word) < 2147483647" using downcast min_extend_val' w1case1a' scast_eq1 arith[of "sint w1", OF w1Leq'] by auto qed subgoal proof - assume notLeq:"\ sint (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001) \ - 2147483647" then have gr:"sint (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001) > - 2147483647" by auto then have sintw2_bound:"sint (((scast w1)::64 Word.word) + (-0x7FFFFFFF)) > - 2147483647" unfolding neg64 using notLeq by auto have "- 0x7FFFFFFF < sint w1 + (- 0x7FFFFFFF)" using sintw2_bound case1a min_extend_val' scast_eq1 by linarith then have w2bound:"0 < sint w1" using less_add_same_cancel2 by blast have rightSize:"sint (((scast w1)::64 Word.word) + - 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding case1a scast_eq1 w1case1a' using word_sint.Rep[of "(w1)::32 Word.word"] w2bound by (auto simp add: sints32 len32[of "TYPE(32)"]) show "- 2147483647 < sint ((scast (((scast w1)::64 Word.word) + 0xFFFFFFFF80000001))::word)" using scast_down_range[OF rightSize] gr of_int_less_iff of_int_minus of_int_numeral by simp qed done have letUp:"(let sum=?sum in if sum <=s scast NEG_INF then NEG_INF else scast sum) \\<^sub>U r1+r2" using case1 case2 by (auto simp add: Let_def) have puSimp:"pu w1 w2=(let sum = ?sum in if sum <=s scast NEG_INF then NEG_INF else scast sum)" using neq3 neq1 eq2 equiv1 leq2 repeInt_simps by force then show "pu w1 w2 \\<^sub>U r1 + r2" using letUp puSimp by auto next case NumNum assume notinf1:"w1 \ POS_INF" assume notinf2:"w2 \ POS_INF" assume notneginf1:"w1 \ NEG_INF" assume notneginf2:"w2 \ NEG_INF" let ?sum = "((scast w1)::64 Word.word) + ((scast w2):: 64 Word.word)" have inf_case:"scast POS_INF <=s ?sum \ POS_INF \\<^sub>U r1 + r2" using repU_def repePos_simps by (meson dual_order.strict_trans not_less order_refl) have truth:" - (2 ^ (size ((scast w1)::64 Word.word) - 1)) \ sint ((scast w1)::64 Word.word) + sint ((scast w2)::64 Word.word) \ sint ((scast w1)::64 Word.word) + sint ((scast w2)::64 Word.word) \ 2 ^ (size ((scast w1)::64 Word.word) - 1) - 1" using Word.word_size[of "(scast w2)::64 Word.word"] Word.word_size[of "(scast w1)::64 Word.word"] scast_eq1 scast_eq3 word_sint.Rep[of "(w1)::32 Word.word"] word_sint.Rep[of "(w2)::32 Word.word"] word_sint.Rep[of "(scast w1)::64 Word.word"] word_sint.Rep[of "(scast w2)::64 Word.word"] sints64 sints32 by auto have sint_eq:"sint((scast w1 + scast w2)::64 Word.word) = sint w1 + sint w2" using signed_arith_sint(1)[of "(scast w1)::64 Word.word" "(scast w2)::64 Word.word", OF truth] scast_eq1 scast_eq3 by auto have bigOne:"scast w1 + scast w2 <=s ((- 0x7FFFFFFF)::64 Word.word) \ \r'\r1 + r2. r' \ (- 0x7FFFFFFF)" proof - assume "scast w1 + scast w2 <=s ((- 0x7FFFFFFF)::64 Word.word)" then have "sint w1 + sint w2 \ - 0x7FFFFFFF" using sint_eq unfolding word_sle_eq by auto then have sum_leq: \real_of_int (sint w1 + sint w2) \ real_of_int (- 0x7FFFFFFF)\ by (simp only: of_int_le_iff) obtain r'\<^sub>1 r'\<^sub>2 ::real where bound1:"r'\<^sub>1 \ r1 \ (w1 \\<^sub>E r'\<^sub>1)" and bound2:"r'\<^sub>2 \ r2 \ (w2 \\<^sub>E r'\<^sub>2)" using up1 up2 unfolding repU_def by auto have somethingA:"r'\<^sub>1 \ sint w1" and somethingB:"r'\<^sub>2 \ sint w2" using \scast w1 + scast w2 <=s - 0x7FFFFFFF\ word_sle_def notinf1 notinf2 bound1 bound2 unfolding repe.simps by auto have something:"r'\<^sub>1 + r'\<^sub>2 \ sint w1 + sint w2" using somethingA somethingB add_mono by fastforce show "\r'\r1 + r2. r' \ (- 0x7FFFFFFF)" apply(rule exI[where x = "r'\<^sub>1 + r'\<^sub>2"]) using bound1 bound2 add_mono something sum_leq apply (auto intro: order_trans [of _ \signed_real_of_word w1 + signed_real_of_word w2\]) done qed have anImp:"\r'. (r'\r1 + r2 \ r' \ (- 2147483647)) \ (\r. - (2 ^ 31 - 1) = - (2 ^ 31 - 1) \ r' = r \ r \ (real_of_int (sint ((- (2 ^ 31 - 1))::32 Word.word))))" by auto have anEq:"((scast ((- (2 ^ 31 - 1))::32 Word.word))::64 Word.word) = (- 0x7FFFFFFF)" by auto have bigTwo: "\(((scast POS_INF)::64 Word.word) <=s ?sum) \ \(?sum <=s ((scast NEG_INF)::64 Word.word)) \ \r'\r1 + r2. r' = (real_of_int (sint (scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word))::word))) \ (r' < 0x7FFFFFFF \ (-0x7FFFFFFF) < r')" proof - assume "\(((scast POS_INF)::64 Word.word) <=s ?sum)" and "\(?sum <=s ((scast NEG_INF)::64 Word.word))" then have interval_int: "sint w1 + sint w2 < 0x7FFFFFFF" "(- 0x7FFFFFFF) < sint w1 + sint w2" unfolding word_sle_eq POS_INF_def NEG_INF_def using sint_eq by auto then have interval: \real_of_int (sint w1 + sint w2) < real_of_int (0x7FFFFFFF)\ \real_of_int (- 0x7FFFFFFF) < real_of_int (sint w1 + sint w2)\ by (simp_all only: of_int_less_iff) obtain r'\<^sub>1 r'\<^sub>2 ::real where bound1:"r'\<^sub>1 \ r1 \ (w1 \\<^sub>E r'\<^sub>1)" and bound2:"r'\<^sub>2 \ r2 \ (w2 \\<^sub>E r'\<^sub>2)" using up1 up2 unfolding repU_def by auto have somethingA:"r'\<^sub>1 \ sint w1" and somethingB:"r'\<^sub>2 \ sint w2" using word_sle_def notinf1 notinf2 bound1 bound2 unfolding repe.simps by auto have something:"r'\<^sub>1 + r'\<^sub>2 \ sint w1 + sint w2" using somethingA somethingB add_mono by fastforce have "(w1 \\<^sub>E r'\<^sub>1)" using bound1 by auto then have r1w1:"r'\<^sub>1 = (real_of_int (sint w1))" and w1U:" (real_of_int (sint w1)) < (real_of_int (sint POS_INF))" and w1L:" (real_of_int (sint NEG_INF)) < (real_of_int (sint w1))" unfolding repe.simps using notinf1 notinf2 notneginf1 notneginf2 by (auto) have "(w2 \\<^sub>E r'\<^sub>2)" using bound2 by auto then have r2w1:"r'\<^sub>2 = (real_of_int (sint w2))" and w2U:" (real_of_int (sint w2)) < (real_of_int (sint POS_INF))" and w2L:" (real_of_int (sint NEG_INF)) < (real_of_int (sint w2))" unfolding repe.simps using notinf1 notinf2 notneginf1 notneginf2 by (auto) have "sint (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)) = sint ((scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)))::word)" apply(rule scast_down_range) unfolding sint_eq using sints32 interval_int by auto then have cast_eq:"(sint ((scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)))::word)) = sint w1 + sint w2" using scast_down_range sints32 interval_int sint_eq by auto from something and cast_eq have r12_sint_scast:"r'\<^sub>1 + r'\<^sub>2 = (sint ((scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)))::word))" using r1w1 w1U w1L r2w1 w2U w2L by (simp) show ?thesis using bound1 bound2 add_mono r12_sint_scast cast_eq interval \r'\<^sub>1 + r'\<^sub>2 = (real_of_int (sint (scast (scast w1 + scast w2))))\ by simp qed have neg_inf_case:"?sum <=s ((scast ((NEG_INF)::word))::64 Word.word) \ NEG_INF \\<^sub>U r1 + r2" proof (unfold repU_def NEG_INF_def repe.simps) assume "scast w1 + scast w2 <=s ((scast ((- (2 ^ 31 - 1))::word))::64 Word.word)" then have "scast w1 + scast w2 <=s ((- 0x7FFFFFFF)::64 Word.word)" by (metis anEq) then obtain r' where geq:"(r' \ r1 + r2)" and leq:"(r' \ (- 0x7FFFFFFF))" using bigOne by auto show "(\r'\plus r1 r2. (\r. uminus (minus(2 ^ 31) 1) = POS_INF \ r' = r \ (real_of_int (sint POS_INF)) \ r) \ (\r. uminus (minus(2 ^ 31) 1) = uminus (minus(2 ^ 31) 1) \ r' = r \ r \ real_of_int (sint ((uminus (minus(2 ^ 31) 1))::word))) \ (\w. uminus (minus(2 ^ 31) 1) = w \ r' = real_of_int (sint w) \ (real_of_int (sint w)) < (real_of_int (sint POS_INF)) \ less (real_of_int (sint (uminus (minus(2 ^ 31) 1)))) (real_of_int (sint w))))" using leq anImp geq by meson qed have int_case:"\(((scast POS_INF)::64 Word.word) <=s ?sum) \ \ (?sum <=s ((scast NEG_INF)::64 Word.word)) \ ((scast ?sum)::word) \\<^sub>U r1 + r2" proof - assume bound1:"\ ((scast POS_INF)::64 Word.word) <=s scast w1 + scast w2" assume bound2:"\ scast w1 + scast w2 <=s ((scast NEG_INF)::64 Word.word)" obtain r'::real where rDef:"r' = (real_of_int (sint ((scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)))::word)))" and r12:"r'\r1 + r2" and boundU:"r' < 0x7FFFFFFF" and boundL:"(-0x7FFFFFFF) < r'" using bigTwo[OF bound1 bound2] by auto obtain w::word where wdef:"w = (scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word))::word)" by auto then have wr:"r' = (real_of_int (sint w))" using r12 bound1 bound2 rDef by blast show ?thesis unfolding repU_def repe.simps using r12 wdef rDef boundU boundL wr by auto qed have almost:"(let sum::64 Word.word = scast w1 + scast w2 in if scast POS_INF <=s sum then POS_INF else if sum <=s scast NEG_INF then NEG_INF else scast sum) \\<^sub>U r1 + r2" apply(cases "((scast POS_INF)::64 Word.word) <=s ((?sum)::64 Word.word)") subgoal using inf_case Let_def int_case neg_inf_case by auto apply(cases "?sum <=s scast NEG_INF") subgoal using inf_case Let_def int_case neg_inf_case proof - assume "\ (scast POS_INF::64 Word.word) <=s scast w1 + scast w2" then have "\ (scast w1::64 Word.word) + scast w2 <=s scast NEG_INF \ \ (scast POS_INF::64 Word.word) <=s scast w1 + scast w2 \ \ (scast w1::64 Word.word) + scast w2 <=s scast NEG_INF \ ((let w = scast w1 + scast w2 in if scast POS_INF <=s (w::64 Word.word) then POS_INF else if w <=s scast NEG_INF then NEG_INF else scast w) \\<^sub>U r1 + r2)" using neg_inf_case by presburger then show ?thesis using int_case by force qed subgoal using inf_case Let_def int_case neg_inf_case proof - assume a1: "\ (scast POS_INF::64 Word.word) <=s scast w1 + scast w2" assume "\ (scast w1::64 Word.word) + scast w2 <=s scast NEG_INF" have "\ (scast w1::64 Word.word) + scast w2 <=s scast NEG_INF \ \ (scast POS_INF::64 Word.word) <=s scast w1 + scast w2 \ ((let w = scast w1 + scast w2 in if scast POS_INF <=s (w::64 Word.word) then POS_INF else if w <=s scast NEG_INF then NEG_INF else scast w) \\<^sub>U r1 + r2)" using a1 neg_inf_case by presburger then show ?thesis using int_case by force qed done then show ?thesis using notinf1 notinf2 notneginf1 notneginf2 by auto qed qed text\Lower bound of w1 + w2\ fun pl :: "word \ word \ word" where "pl w1 w2 = (if w1 = NEG_INF then NEG_INF else if w2 = NEG_INF then NEG_INF else if w1 = POS_INF then (if w2 = POS_INF then POS_INF else (let sum::64 Word.word = ((scast w2)::64 Word.word) + ((scast POS_INF)::64 Word.word) in if ((scast POS_INF)::64 Word.word) <=s(sum::64 Word.word) then POS_INF else scast sum)) else if w2 = POS_INF then (let sum::64 Word.word = ((scast w1)::64 Word.word) + ((scast POS_INF)::64 Word.word) in if ((scast POS_INF)::64 Word.word) <=s(sum::64 Word.word) then POS_INF else scast sum) else (let sum::64 Word.word = ((scast w1)::64 Word.word) + ((scast w2)::64 Word.word) in if ((scast POS_INF)::64 Word.word) <=s (sum::64 Word.word) then POS_INF else if (sum::64 Word.word) <=s ((scast NEG_INF)::64 Word.word) then NEG_INF else scast sum))" subsection \Addition lower bound\ text\Correctness of lower bound of w1 + w2\ lemma pl_lemma: assumes lo1:"w1 \\<^sub>L (r1::real)" assumes lo2:"w2 \\<^sub>L (r2::real)" shows "pl w1 w2 \\<^sub>L (r1 + r2)" proof - have scast_eq1:"sint((scast w1)::64 Word.word) = sint w1" apply(rule Word.sint_up_scast) unfolding Word.is_up by auto have scast_eq2:"sint((scast (0x80000001::word))::64 Word.word)=sint((0x80000001::32 Word.word))" by auto have scast_eq3:"sint((scast w2)::64 Word.word) = sint w2" apply(rule Word.sint_up_scast) unfolding Word.is_up by auto have sints64:"sints 64 = {i. - (2 ^ 63) \ i \ i < 2 ^ 63}" using sints_def[of 64] range_sbintrunc[of 63] by auto have sints32:"sints 32 = {i. - (2 ^ 31) \ i \ i < 2 ^ 31}" using sints_def[of 32] range_sbintrunc[of 31] by auto have thing1:"0 \ 9223372034707292161 + ((-(2 ^ 31))::real)" by auto have "sint (( w2)) \ (-(2 ^ 31))" using word_sint.Rep[of "(w2)::32 Word.word"] sints32 mem_Collect_eq Word.word_size[of "(scast w2)::64 Word.word"] scast_eq1 scast_eq2 scast_eq3 len32 by auto then have thing4:"sint ((scast w2)::64 Word.word) \ (-(2 ^ 31))" using scast_down_range sint_ge sints_num using scast_eq3 by linarith have aLeq2:"(-(2 ^ 31)::int) \ -9223372039002259455" by auto then have thing2:" (0::int) \ 9223372039002259455 + sint ((scast w2)::64 Word.word)" using thing4 aLeq2 by (metis ab_group_add_class.ab_left_minus add.commute add_mono neg_le_iff_le) have aLeq:"2 ^ 31 \ (9223372039002259455::int)" by auto have bLeq:"sint ((scast w2)::64 Word.word) \ 2 ^ 31" apply ( auto simp add: word_sint.Rep[of "(w2)::32 Word.word"] sints32 scast_eq3 len32) using word_sint.Rep[of "(w2)::32 Word.word"] len32[of "TYPE(32)"] sints32 by auto have thing3:" sint ((scast w2)::64 Word.word) \ 9223372034707292160 " using aLeq bLeq by auto have truth:" - (2 ^ (size ((scast w2)::64 Word.word) - 1)) \ sint ((scast w2)::64 Word.word) + sint ((0x7FFFFFFF)::64 Word.word) \ sint ((scast w2)::64 Word.word) + sint ((0x7FFFFFFF)::64 Word.word) \ 2 ^ (size ((scast w2)::64 Word.word) - 1) - 1" by(auto simp add: Word.word_size[of "(scast w2)::64 Word.word"] Word.word_size[of "(scast (0x7FFFFFFF))::64 Word.word"] scast_eq1 scast_eq2 sints64 sints32 thing2 thing1 thing3) have case1a:" sint (((scast w2)::64 Word.word) + (0x7FFFFFFF::64 Word.word)) = sint ((scast w2)::64 Word.word) + sint (0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF truth]) have case1b:"sint ((0xFFFFFFFF80000001)::64 Word.word) < 0" by auto have arith:"\x::int. x \ 2 ^ 31 - 1 \ x + (- 2147483647) < 2147483647" by auto have neg64:"(((scast w2)::64 Word.word) + 0x7FFFFFFF) = ((scast w2)::64 Word.word) + (0x7FFFFFFF)" by auto obtain r'\<^sub>1 and r'\<^sub>2 where geq1:"r'\<^sub>1\r1" and equiv1:"w1 \\<^sub>E r'\<^sub>1" and geq2:"r'\<^sub>2\r2" and equiv2:"w2 \\<^sub>E r'\<^sub>2" using lo1 lo2 unfolding repL_def by auto show ?thesis proof (cases rule: case_pl_inf[where ?w1.0=w1, where ?w2.0=w2]) case NegAny then show ?thesis apply (auto simp add: repL_def repe.simps) using lo1 lo2 linear by auto next case AnyNeg then show ?thesis apply (auto simp add: repL_def repe.simps) using linear by auto next case PosPos then show ?thesis using lo1 lo2 by (auto simp add: repL_def repe.simps) next case PosNum assume neq1:"w2 \ POS_INF" assume eq2:"w1 = POS_INF" assume neq3:"w2 \ NEG_INF" let ?sum = "(scast w2 + scast POS_INF)::64 Word.word" have case1:"(((scast POS_INF)::64 Word.word) <=s ?sum) \ POS_INF \\<^sub>L r1 + r2" using lo1 lo2 apply (simp add: repL_def repe.simps word_sle_def) apply(rule exI[where x= "r1 + r2"]) using case1a case1b apply (auto simp add: neq1 eq2 neq3 repINT repL_def repe.simps repeInt_simps lo2 word_sless_alt) proof - fix r' assume a1:"0 \ sint (((scast w2)::64 Word.word))" from a1 have h3:"2147483647 \ sint w2 + 0x7FFFFFFF " using scast_eq3 by auto assume a2:"r' \ r1" assume a3:"signed w2 \ r2" assume a4:"(2147483647) \ r'" from a2 a4 have h1:"2147483647 \ r1" by auto from a1 a3 h3 have h2:"0 \ r2" using of_int_le_0_iff le_add_same_cancel2 apply simp apply transfer apply simp apply (metis (full_types) of_int_0 of_int_le_iff order_trans signed_take_bit_nonnegative_iff) done show "(2147483647) \ r1 + r2 " using h1 h2 h3 add.right_neutral add_mono by fastforce qed have leq1:"r'\<^sub>1 \ (real_of_int (sint POS_INF))" using equiv1 neq1 eq2 neq3 unfolding repe.simps by auto have leq2:"r'\<^sub>2 = (real_of_int (sint w2))" using equiv2 neq1 eq2 neq3 unfolding repe.simps by auto have case2:"\(scast POS_INF <=s ?sum) \ scast ?sum \\<^sub>L r1 + r2" apply (simp add: repL_def repe.simps word_sle_def lo1 lo2) apply(rule exI[where x= "r'\<^sub>2 + 0x7FFFFFFF"]) (*r1 + r2*) apply(rule conjI) subgoal proof - assume "\ 2147483647 \ sint (scast w2 + 0x7FFFFFFF)" have bound1:"2147483647 \ r1" using leq1 geq1 by (auto) have bound2:"r'\<^sub>2 \ r2 " using leq2 geq2 by auto show "r'\<^sub>2 + 2147483647 \ r1 + r2" using bound1 bound2 by linarith qed apply(rule disjI2) apply(rule disjI2) apply(auto) subgoal proof - assume a:"\ 2147483647 \ sint (((scast w2)::64 Word.word) + 0x7FFFFFFF)" then have sintw2_bound:"2147483647 > sint (((scast w2)::64 Word.word) + (0x7FFFFFFF))" by auto have case1a:"sint (((scast w2)::64 Word.word) + (0x7FFFFFFF::64 Word.word)) = sint ((scast w2)::64 Word.word) + sint (0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF truth]) have a1:"sint (((scast w2)::64 Word.word) + (0x7FFFFFFF)) = sint((scast w2)::64 Word.word) + sint((0x7FFFFFFF)::64 Word.word)" using case1a by auto have c1:"sint((0x7FFFFFFF)::64 Word.word) = 0x7FFFFFFF" by auto have "sint w2 + ( 0x7FFFFFFF) < 0x7FFFFFFF" using sintw2_bound case1a c1 scast_eq3 by linarith then have w2bound:"sint w2 < 0" using add_less_same_cancel2 by blast have rightSize:"sint (((scast w2)::64 Word.word) + 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding case1a scast_eq3 c1 using word_sint.Rep[of "(w2)::32 Word.word"] w2bound by (auto simp add: sints32 len32[of "TYPE(32)"]) have downcast:"sint ((scast (((scast w2)::64 Word.word) + (( 0x7FFFFFFF))))::word) = sint (((scast w2)::64 Word.word) + (( 0x7FFFFFFF)::64 Word.word))" using scast_down_range[OF rightSize] by auto then have b:"sint ((scast (((scast w2)::64 Word.word) + 0x7FFFFFFF))::word) = sint (((scast w2)::64 Word.word) + 0x7FFFFFFF)" by auto have c:"sint (((scast w2)::64 Word.word) + 0x7FFFFFFF) = sint ((scast w2)::64 Word.word) + sint ((0x7FFFFFFF)::64 Word.word)" using case1a by auto have d:"sint ((0x7FFFFFFF)::64 Word.word) = (0x7FFFFFFF)" by auto have f:"r'\<^sub>2 = (real_of_int (sint w2))" by (simp add: leq2) show "r'\<^sub>2 + 2147483647 = (signed ((scast (((scast w2)::64 Word.word) + 0x7FFFFFFF))::word))" using a b c d scast_eq3 f leq2 of_int_numeral by auto qed subgoal proof - have truth2a:"-(2^(size ((scast w2)::64 Word.word)-1)) \ sint ((scast w2)::64 Word.word) + sint ((0x7FFFFFFF)::64 Word.word) \ sint ((scast w2)::64 Word.word) + sint ((0x7FFFFFFF)::64 Word.word) \ 2 ^ (size ((scast w2)::64 Word.word) - 1) - 1" apply(auto simp add: Word.word_size[of "(scast w2)::64 Word.word"] Word.word_size[of "(scast (0x7FFFFFFF))::64 Word.word"] scast_eq1 scast_eq2 sints64 sints32 thing2) using thing1 thing2 thing3 by auto have case2a:" sint (((scast w2)::64 Word.word) + (0x7FFFFFFF::64 Word.word)) = sint ((scast w2)::64 Word.word) + sint (0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF truth2a]) have min_cast:"(0x7FFFFFFF::64 Word.word) =((scast (0x7FFFFFFF::word))::64 Word.word)" by auto assume "\ 2147483647 \ sint (((scast w2)::64 Word.word) + 0x7FFFFFFF)" then have sintw2_bound:"2147483647 > sint (((scast w2)::64 Word.word) + (0x7FFFFFFF))" using neg64 by auto have a:"sint (((scast w2)::64 Word.word) + (0x7FFFFFFF)) = sint((scast w2)::64 Word.word) + sint((0x7FFFFFFF)::64 Word.word)" using case2a by auto have c:"sint((0x7FFFFFFF)::64 Word.word) = 0x7FFFFFFF" by auto have " 0x7FFFFFFF > sint w2 + ( 0x7FFFFFFF)" using sintw2_bound case2a c scast_eq3 by linarith then have w2bound:" sint w2 < 0" by simp have rightSize:"sint (((scast w2)::64 Word.word) + 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding case2a scast_eq3 c apply (auto simp add: sints32 len32[of "TYPE(32)"]) using word_sint.Rep[of "(w2)::32 Word.word"] sints32 len32[of "TYPE(32)"] w2bound by auto have downcast:"sint ((scast (((scast w2)::64 Word.word) + (( 0x7FFFFFFF))))::word) = sint (((scast w2)::64 Word.word) + (( 0x7FFFFFFF)::64 Word.word))" using scast_down_range[OF rightSize] by auto then show "sint (scast (((scast w2)::64 Word.word) + 0x7FFFFFFF)::word) < 2147483647" unfolding downcast a scast_eq3 c using w2bound by auto qed subgoal proof - assume notLeq:"\ 2147483647 \ sint (((scast w2)::64 Word.word) + 0x7FFFFFFF)" then have gr:"sint (((scast w2)::64 Word.word) + 0x7FFFFFFF) < 2147483647" by auto have case2a:" sint (((scast w2)::64 Word.word) + (0x7FFFFFFF::64 Word.word)) = sint ((scast w2)::64 Word.word) + sint (0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF truth]) have min_cast:"(0x7FFFFFFF::64 Word.word) =((scast (0x7FFFFFFF::word))::64 Word.word)" by auto have neg64:"(((scast w2)::64 Word.word) + 0x7FFFFFFF) = ((scast w2)::64 Word.word) + (0x7FFFFFFF)" by auto then have sintw2_bound:"sint (((scast w2)::64 Word.word) + (0x7FFFFFFF)) < 2147483647" using neg64 using notLeq by auto have a:"sint (((scast w2)::64 Word.word) + (0x7FFFFFFF)) = sint((scast w2)::64 Word.word) + sint((0x7FFFFFFF)::64 Word.word)" using case2a by auto have c:"sint((0x7FFFFFFF)::64 Word.word) = 0x7FFFFFFF" by auto have "- 2147483647 \ w2" using neq3 unfolding NEG_INF_def by auto then have "sint((- 2147483647)::word) \ sint w2" using word_sint.Rep_inject by blast then have n1:"- 2147483647 \ sint w2" by auto have "- 2147483648 \ w2" apply(rule repe.cases[OF equiv2]) by auto then have "sint(- 2147483648::word) \ sint w2" using word_sint.Rep_inject by blast then have n2:"- 2147483648 \ sint w2" by auto then have d:"sint w2 > - 2147483647" using word_sint.Rep[of "(w2)::32 Word.word"] sints32 len32[of "TYPE(32)"] neq3 n1 n2 by auto have w2bound:"- 2147483647 < sint w2 + 0x7FFFFFFF" using sintw2_bound case2a c scast_eq3 d by linarith have rightSize:"sint (((scast w2)::64 Word.word) + 0x7FFFFFFF) \ sints (len_of TYPE(32))" using sints32 len32[of "TYPE(32)"] w2bound word_sint.Rep[of "(w2)::32 Word.word"] c case2a scast_eq3 sintw2_bound by (auto simp add: sints32 len32[of "TYPE(32)"]) have downcast:"sint ((scast (((scast w2)::64 Word.word) + (( 0x7FFFFFFF))))::word) = sint (((scast w2)::64 Word.word) + (( 0x7FFFFFFF)::64 Word.word))" using scast_down_range[OF rightSize] by auto have sintEq:" sint ((scast (((scast w2)::64 Word.word) + 0x7FFFFFFF))::word) = sint (((scast w2)::64 Word.word) + 0x7FFFFFFF) " using downcast by auto show "- 2147483647 < sint ((scast (((scast w2)::64 Word.word) + 0x7FFFFFFF))::word)" unfolding sintEq using gr of_int_less_iff of_int_minus of_int_numeral c case2a scast_eq3 w2bound by simp qed done have "(let sum = ?sum in if scast POS_INF <=s sum then POS_INF else scast sum) \\<^sub>L r1 + r2" using case1 case2 by (auto simp add: Let_def) then show ?thesis using lo1 lo2 neq1 eq2 neq3 by (auto) next case NumPos assume neq3:"w1 \ NEG_INF" assume neq1:"w1 \ POS_INF" assume eq2:"w2 = POS_INF" let ?sum = "(scast w1 + scast POS_INF)::64 Word.word" have thing1:"0 \ 9223372034707292161 + ((-(2 ^ 31))::real)" by auto have "sint (( w1)) \ (-(2 ^ 31))" using word_sint.Rep[of "(w1)::32 Word.word"] scast_eq1 scast_eq2 scast_eq3 Word.word_size[of "(scast w1)::64 Word.word"] sints32 len32 mem_Collect_eq by auto then have thing4:"sint ((scast w1)::64 Word.word) \ (-(2 ^ 31))" using scast_down_range sint_ge sints_num scast_eq3 scast_eq1 by linarith have aLeq2:"(-(2 ^ 31)::int) \ -9223372039002259455" by auto then have thing2:" (0::int) \ 9223372039002259455 + sint ((scast w1)::64 Word.word)" using thing4 aLeq2 by (metis ab_group_add_class.ab_left_minus add.commute add_mono neg_le_iff_le) have aLeq:"2 ^ 31 \ (9223372039002259455::int)" by auto have bLeq:"sint ((scast w1)::64 Word.word) \ 2 ^ 31" apply (auto simp add: word_sint.Rep[of "(w1)::32 Word.word"] sints32 scast_eq1 len32) using word_sint.Rep[of "(w1)::32 Word.word"] len32[of "TYPE(32)"] sints32 by clarsimp have thing3:" sint ((scast w1)::64 Word.word) \ 9223372034707292160 " using aLeq bLeq by auto have truth:" - (2 ^ (size ((scast w1)::64 Word.word) - 1)) \ sint ((scast w1)::64 Word.word) + sint ((0x7FFFFFFF)::64 Word.word) \ sint ((scast w1)::64 Word.word) + sint ((0x7FFFFFFF)::64 Word.word) \ 2 ^ (size ((scast w1)::64 Word.word) - 1) - 1" by(auto simp add: Word.word_size[of "(scast w1)::64 Word.word"] Word.word_size[of "(scast (0x7FFFFFFF))::64 Word.word"] scast_eq3 scast_eq2 sints64 sints32 thing2 thing1 thing3) have case1a:"sint (((scast w1)::64 Word.word) + (0x7FFFFFFF::64 Word.word)) = sint ((scast w1)::64 Word.word) + sint (0x7FFFFFFF::64 Word.word)" by(rule signed_arith_sint(1)[OF truth]) have case1b:"sint ((0xFFFFFFFF80000001)::64 Word.word) < 0" by auto have g:"(0x7FFFFFFF::64 Word.word) = 0x7FFFFFFF" by auto have c:"sint (((scast w1)::64 Word.word) + 0x7FFFFFFF) = sint ((scast w1)::64 Word.word) + sint ((0x7FFFFFFF)::64 Word.word)" using g case1a by blast have d:"sint ((0x7FFFFFFF)::64 Word.word) = (0x7FFFFFFF)" by auto have e:"sint ((scast w1)::64 Word.word) = sint w1" using scast_eq1 by blast have d2:"sint w1 \ 2^31 - 1" using word_sint.Rep[of "(w1)::32 Word.word"] by (auto simp add: sints32 len32[of "TYPE(32)"]) have case1:"scast POS_INF <=s ?sum \ POS_INF \\<^sub>L r1 + r2" using lo1 lo2 apply (simp add: repL_def repe.simps word_sle_def) apply(rule exI[where x= "r1 + r2"]) apply(auto) using case1a case1b apply (auto simp add: neq1 eq2 neq3 repINT repL_def repe.simps repeInt_simps lo2 word_sless_alt) (* close 4 goals *) proof - fix r' have h3:"sint (((scast w1)::64 Word.word) + 0x7FFFFFFF) = sint (((scast w1)::64 Word.word)) + sint(0x7FFFFFFF::64 Word.word)" using case1a by auto have h4:"sint(0x7FFFFFFF::64 Word.word) = 2147483647" by auto assume a1:"0 \ sint ((scast w1)::64 Word.word)" then have h3:"sint w1 \ 0" using scast_eq1 h3 h4 by auto assume a2:"r' \ r2" assume a3:"signed w1 \ r1" assume a4:"(2147483647) \ r'" from a2 a4 have h1:"r2 \ 2147483647" by auto from a3 h3 have h2:"r1 \ 0" by (auto intro: order_trans [of _ \signed_real_of_word w1\]) show " 2147483647 \ r1 + r2" using h1 h2 add.right_neutral add_mono by fastforce qed have leq1:"r'\<^sub>2 \ (real_of_int (sint POS_INF))" and leq2:"r'\<^sub>1 = (real_of_int (sint w1))" using equiv1 equiv2 neq1 eq2 neq3 unfolding repe.simps by auto have neg64:"(((scast w1)::64 Word.word) + 0xFFFFFFFF80000001) = ((scast w1)::64 Word.word) + (-0x7FFFFFFF)" by auto have case2:"\(scast POS_INF <=s ?sum) \ scast ?sum \\<^sub>L r1 + r2" apply (simp add: repL_def repe.simps word_sle_def lo1 lo2) apply(rule exI[where x= "r'\<^sub>1 + 0x7FFFFFFF"]) apply(rule conjI) subgoal proof - assume "\ 2147483647 \ sint (scast w1 + 0x7FFFFFFF)" have bound1:"r2 \ 2147483647" using leq1 geq2 by (auto) have bound2:"r1 \ r'\<^sub>1" using leq2 geq1 by auto show "r'\<^sub>1 + 2147483647 \ r1 + r2" using bound1 bound2 by linarith qed apply(rule disjI2) apply(rule disjI2) apply(auto) subgoal proof - have f:"r'\<^sub>1 = (real_of_int (sint w1))" by (simp add: leq1 leq2 ) assume a:"\ 2147483647 \ sint (((scast w1)::64 Word.word) + 0x7FFFFFFF)" then have sintw2_bound:"2147483647 > sint (((scast w1)::64 Word.word) + (0x7FFFFFFF))" by auto have "0x7FFFFFFF > sint w1 + (0x7FFFFFFF)" using sintw2_bound case1a d scast_eq1 by linarith then have w2bound:"0 > sint w1" using add_less_same_cancel2 by blast have rightSize:"sint (((scast w1)::64 Word.word) + 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding case1a e using w2bound word_sint.Rep[of "(w1)::32 Word.word"] by (auto simp add: sints32 len32[of "TYPE(32)"] ) have arith:"\x::int. x \ 2 ^ 31 - 1 \ x + (- 2147483647) < 2147483647" by auto have downcast:"sint ((scast (((scast w1)::64 Word.word) + (( 0x7FFFFFFF))))::word) = sint (((scast w1)::64 Word.word) + (( 0x7FFFFFFF)::64 Word.word))" using scast_down_range[OF rightSize] by auto then have b:"sint((scast (((scast w1)::64 Word.word) + 0x7FFFFFFF))::word) = sint(((scast w1)::64 Word.word) + 0x7FFFFFFF)" using g by auto show "r'\<^sub>1 + 2147483647 = ((signed_real_of_word ((scast (((scast w1)::64 Word.word) + 0x7FFFFFFF))::word)))" using a b c d e f proof - have "(real_of_int (sint ((scast w1)::64 Word.word ) + 2147483647)) = r'\<^sub>1 + (real_of_int 2147483647)" using e leq2 by auto from this [symmetric] show ?thesis using b c d of_int_numeral by simp qed qed subgoal proof - assume "\ 2147483647 \ sint (((scast w1)::64 Word.word) + 0x7FFFFFFF)" then have sintw2_bound:"sint (((scast w1)::64 Word.word) + (0x7FFFFFFF)) < 2147483647" unfolding neg64 by auto have "0x7FFFFFFF > sint w1 + (0x7FFFFFFF)" using sintw2_bound case1a d scast_eq1 by linarith then have w2bound:"0 > sint w1" using add_less_same_cancel2 by blast have rightSize:"sint (((scast w1)::64 Word.word) + 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding case1a e c using word_sint.Rep[of "(w1)::32 Word.word"] w2bound by (auto simp add: sints32 len32[of "TYPE(32)"]) have arith:"\x::int. x \ 2 ^ 31 - 1 \ x + (- 2147483647) < 2147483647" by auto have downcast:"sint ((scast (((scast w1)::64 Word.word) + 0x7FFFFFFF))::word) = sint (((scast w1)::64 Word.word) + ((0x7FFFFFFF)::64 Word.word))" using scast_down_range[OF rightSize] by auto show "sint (scast (((scast w1)::64 Word.word) + 0x7FFFFFFF)::word) < 2147483647" using downcast d e c arith[of "sint w1", OF d2] sintw2_bound by linarith qed subgoal proof - assume notLeq:"\ 2147483647 \ sint (((scast w1)::64 Word.word) + 0x7FFFFFFF)" then have gr:"2147483647 > sint (((scast w1)::64 Word.word) + 0x7FFFFFFF)" by auto then have sintw2_bound:"sint (((scast w1)::64 Word.word) + (0x7FFFFFFF)) < 2147483647" unfolding neg64 using notLeq by auto have "0x7FFFFFFF > sint w1 + ( 0x7FFFFFFF)" using sintw2_bound case1a d scast_eq1 by linarith then have useful:"0 > sint w1" using add_less_same_cancel2 by blast have rightSize:"sint (((scast w1)::64 Word.word) + 0x7FFFFFFF) \ sints (len_of TYPE(32))" unfolding case1a e using word_sint.Rep[of "(w1)::32 Word.word"] sints32 len32[of "TYPE(32)"] useful by auto have "- 2147483647 \ w1" using neq3 unfolding NEG_INF_def by auto then have "sint((- 2147483647)::word) \ sint w1" using word_sint.Rep_inject by blast then have n1:"- 2147483647 \ sint w1" by auto have "- 2147483648 \ w1" apply(rule repe.cases[OF equiv1]) using int_not_undef[of w1] by auto then have "sint(- 2147483648::word) \ sint w1" using word_sint.Rep_inject by blast then have n2:"- 2147483648 \ sint w1" by auto then have d:"sint w1 > - 2147483647" using word_sint.Rep[of "(w1)::32 Word.word"] sints32 len32[of "TYPE(32)"] n1 n2 neq3 by (simp) have d2:"sint (0x7FFFFFFF::64 Word.word) > 0" by auto from d d2 have d3:"- 2147483647 < sint w1 + sint (0x7FFFFFFF::64 Word.word)" by auto have d4:"sint ((scast (((scast w1)::64 Word.word) + 0x7FFFFFFF))::word) = sint w1 + sint (0x7FFFFFFF::64 Word.word)" using case1a rightSize scast_down_range scast_eq1 by fastforce then show "- 2147483647 < sint (SCAST(64 \ 32) (SCAST(32 \ 64) w1 + 0x7FFFFFFF))" using d3 d4 by auto qed done have "(let sum = ?sum in if scast POS_INF <=s sum then POS_INF else scast sum) \\<^sub>L r1 + r2" using case1 case2 by (auto simp add: Let_def) then show ?thesis using neq1 eq2 neq3 by (auto) next case NumNum assume notinf1:"w1 \ POS_INF" assume notinf2:"w2 \ POS_INF" assume notneginf1:"w1 \ NEG_INF" assume notneginf2:"w2 \ NEG_INF" let ?sum = "((scast w1)::64 Word.word) + ((scast w2):: 64 Word.word)" have truth:" - (2 ^ (size ((scast w1)::64 Word.word) - 1)) \ sint ((scast w1)::64 Word.word) + sint ((scast w2)::64 Word.word) \ sint ((scast w1)::64 Word.word) + sint ((scast w2)::64 Word.word) \ 2 ^ (size ((scast w1)::64 Word.word) - 1) - 1" using Word.word_size[of "(scast w2)::64 Word.word"] Word.word_size[of "(scast w1)::64 Word.word"] scast_eq1 scast_eq3 sints64 sints32 word_sint.Rep[of "(w1)::32 Word.word"] word_sint.Rep[of "(w2)::32 Word.word"] by auto have sint_eq:"sint((scast w1 + scast w2)::64 Word.word) = sint w1 + sint w2" using signed_arith_sint(1)[of "(scast w1)::64 Word.word" "(scast w2)::64 Word.word", OF truth] scast_eq1 scast_eq3 by auto have bigOne:"scast w1 + scast w2 <=s ((- 0x7FFFFFFF)::64 Word.word) \ \r'\r1 + r2. r' \ -0x7FFFFFFF" proof - assume "scast w1 + scast w2 <=s ((- 0x7FFFFFFF)::64 Word.word)" then have sum_leq:"sint w1 + sint w2 \ - 0x7FFFFFFF" and sum_leq':" (sint w1 + sint w2) \ (- 2147483647)" using sint_eq unfolding word_sle_eq by auto obtain r'\<^sub>1 r'\<^sub>2 ::real where bound1:"r'\<^sub>1 \ r1 \ (w1 \\<^sub>E r'\<^sub>1)" and bound2:"r'\<^sub>2 \ r2 \ (w2 \\<^sub>E r'\<^sub>2)" using lo1 lo2 unfolding repL_def by auto have somethingA:"r'\<^sub>1 \ sint w1" and somethingB:"r'\<^sub>2 \ sint w2" using bound1 bound2 \scast w1 + scast w2 <=s -0x7FFFFFFF\ word_sle_def notinf1 notinf2 unfolding repe.simps by auto have something:"r'\<^sub>1 + r'\<^sub>2 \ sint w1 + sint w2" using somethingA somethingB add_mono by fastforce show "\r'\r1 + r2. r' \ (-0x7FFFFFFF)" apply (rule exI[where x = "r'\<^sub>1 + r'\<^sub>2"]) using bound1 bound2 add_mono something sum_leq' apply (auto intro: order_trans [of _ \signed_real_of_word w1 + signed_real_of_word w2\]) done qed have anImp:"\r'. (r'\r1 + r2 \ r' \ (- 2147483647)) \ (\r. - (2 ^ 31 - 1) = - (2 ^ 31 - 1) \ r' = r \ r \ (real_of_int (sint ((- (2 ^ 31 - 1))::32 Word.word))))" by auto have anEq:"((scast ((- (2 ^ 31 - 1))::32 Word.word))::64 Word.word) = (- 0x7FFFFFFF)" by auto have bigTwo: "\(((scast POS_INF)::64 Word.word) <=s ?sum) \ \(?sum <=s ((scast NEG_INF)::64 Word.word)) \ \r'\r1 + r2. r' = (real_of_int (sint (scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word))::word))) \ (r' < 0x7FFFFFFF \ (-0x7FFFFFFF) < r')" proof - assume "\(((scast POS_INF)::64 Word.word) <=s ?sum)" then have sum_leq:"sint w1 + sint w2 < 0x7FFFFFFF" unfolding word_sle_eq using sint_eq by auto then have sum_leq':" (sint w1 + sint w2) < (2147483647)" by auto assume "\(?sum <=s ((scast NEG_INF)::64 Word.word))" then have sum_geq:"(- 0x7FFFFFFF) < sint w1 + sint w2" unfolding word_sle_eq using sint_eq by auto then have sum_geq':" (- 2147483647) < (sint w1 + sint w2)" by auto obtain r'\<^sub>1 r'\<^sub>2 ::real where bound1:"r'\<^sub>1 \ r1 \ (w1 \\<^sub>E r'\<^sub>1)" and bound2:"r'\<^sub>2 \ r2 \ (w2 \\<^sub>E r'\<^sub>2)" using lo1 lo2 unfolding repL_def by auto have somethingA:"r'\<^sub>1 \ sint w1" and somethingB:"r'\<^sub>2 \ sint w2" using word_sle_def notinf1 notinf2 bound1 bound2 unfolding repe.simps by auto have something:"r'\<^sub>1 + r'\<^sub>2 \ sint w1 + sint w2" using somethingA somethingB add_mono by fastforce have "(w1 \\<^sub>E r'\<^sub>1)" using bound1 by auto then have r1w1:"r'\<^sub>1 = (real_of_int (sint w1))" and w1U:"(real_of_int (sint w1)) < (real_of_int (sint POS_INF))" and w1L:"(real_of_int (sint NEG_INF)) < (real_of_int (sint w1))" unfolding repe.simps using notinf1 notinf2 notneginf1 notneginf2 by (auto) have "(w2 \\<^sub>E r'\<^sub>2)" using bound2 by auto then have r2w1:"r'\<^sub>2 = (real_of_int (sint w2))" and w2U:"(real_of_int (sint w2)) < (real_of_int (sint POS_INF))" and w2L:"(real_of_int (sint NEG_INF)) < (real_of_int (sint w2))" unfolding repe.simps using notinf1 notinf2 notneginf1 notneginf2 by (auto) have "sint (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)) = sint ((scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)))::word)" apply(rule scast_down_range) unfolding sint_eq using sints32 sum_geq sum_leq by auto then have cast_eq:"(sint ((scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)))::word)) = sint w1 + sint w2" using scast_down_range sints32 sum_geq sum_leq sint_eq by auto from something and cast_eq have r12_sint_scast:"r'\<^sub>1 + r'\<^sub>2 = (sint ((scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)))::word))" using r1w1 w1U w1L r2w1 w2U w2L by (simp) have leq_ref:"\x y ::real. x = y ==> x \ y" by auto show ?thesis apply(rule exI[where x="r'\<^sub>1 + r'\<^sub>2"]) apply(rule conjI) subgoal using r12_sint_scast cast_eq leq_ref r2w1 r1w1 add_mono[of r'\<^sub>1 r1 r'\<^sub>2 r2] bound1 bound2 by auto using bound1 bound2 add_mono r12_sint_scast cast_eq sum_leq sum_leq' sum_geq' sum_geq \r'\<^sub>1 + r'\<^sub>2 = (real_of_int (sint (scast (scast w1 + scast w2))))\ by auto qed have neg_inf_case:"?sum <=s ((scast ((NEG_INF)::word))::64 Word.word) \ NEG_INF \\<^sub>L r1 + r2" proof (unfold repL_def NEG_INF_def repe.simps) assume "scast w1 + scast w2 <=s ((scast ((- (2 ^ 31 - 1))::word))::64 Word.word)" then have "scast w1 + scast w2 <=s ((- 0x7FFFFFFF)::64 Word.word)" by (metis anEq) then obtain r' where geq:"(r' \ r1 + r2)" and leq:"(r' \ (-0x7FFFFFFF))" using bigOne by auto show "(\r'\plus r1 r2. (\r. uminus (minus(2 ^ 31) 1) = POS_INF \ r' = r \ (real_of_int (sint POS_INF)) \ r) \ (\r. uminus (minus(2 ^ 31) 1) = uminus (minus(2 ^ 31) 1) \ r' = r \ r \ (real_of_int (sint ((uminus (minus(2 ^ 31) 1))::word)))) \ (\w. uminus (minus(2 ^ 31) 1) = w \ r' = (real_of_int (sint w)) \ (real_of_int (sint w)) < (real_of_int (sint POS_INF)) \ less ( (real_of_int (sint (uminus (minus(2 ^ 31) 1))))) ((real_of_int (sint w)))))" using leq geq by (meson dual_order.strict_trans linorder_not_le order_less_irrefl) qed have bigThree:"0x7FFFFFFF <=s ((scast w1)::64 Word.word) + ((scast w2)::64 Word.word) \ \r'\r1 + r2. 2147483647 \ r'" proof - assume "0x7FFFFFFF <=s ((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)" then have sum_leq:"0x7FFFFFFF \ sint w1 + sint w2 " and sum_leq':" 2147483647 \ (sint w1 + sint w2)" using sint_eq unfolding word_sle_eq by auto obtain r'\<^sub>1 r'\<^sub>2 ::real where bound1:"r'\<^sub>1 \ r1 \ (w1 \\<^sub>E r'\<^sub>1)" and bound2:"r'\<^sub>2 \ r2 \ (w2 \\<^sub>E r'\<^sub>2)" using lo1 lo2 unfolding repL_def by auto have somethingA:"r'\<^sub>1 \ sint w1" and somethingB:"r'\<^sub>2 \ sint w2" using \ 0x7FFFFFFF <=s scast w1 + scast w2 \ word_sle_def notinf1 notinf2 bound1 bound2 repe.simps by auto have something:"r'\<^sub>1 + r'\<^sub>2 \ sint w1 + sint w2" using somethingA somethingB add_mono of_int_add by fastforce show "\r'\ r1 + r2. (2147483647) \ r'" apply(rule exI[where x = "r'\<^sub>1 + r'\<^sub>2"]) using bound1 bound2 add_mono something sum_leq' order.trans proof - have f1: " (real_of_int (sint w2)) = r'\<^sub>2" by (metis bound2 notinf2 notneginf2 repe.cases) have " (real_of_int (sint w1)) = r'\<^sub>1" by (metis bound1 notinf1 notneginf1 repe.cases) then have f2: " (real_of_int 2147483647) \ r'\<^sub>2 + r'\<^sub>1" using f1 sum_leq' by auto have "r'\<^sub>2 + r'\<^sub>1 \ r2 + r1" by (meson add_left_mono add_right_mono bound1 bound2 order.trans) then show "r'\<^sub>1 + r'\<^sub>2 \ r1 + r2 \ 2147483647 \ r'\<^sub>1 + r'\<^sub>2" using f2 by (simp add: add.commute) qed qed have inf_case:"((scast POS_INF)::64 Word.word) <=s ?sum \ POS_INF \\<^sub>L r1 + r2" proof - assume "((scast POS_INF)::64 Word.word) <=s ((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)" then have "((scast ((2 ^ 31 - 1)::word))::64 Word.word) <=s ((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)" unfolding repL_def repe.simps by auto then have "(0x7FFFFFFF::64 Word.word) <=s ((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)" by auto then obtain r' where geq:"(r' \ r1 + r2)" and leq:"(0x7FFFFFFF \ r')" using bigThree by auto show "?thesis" unfolding repL_def repe.simps using leq geq by auto qed have int_case:"\(((scast POS_INF)::64 Word.word) <=s ?sum) \ \ (?sum <=s ((scast NEG_INF)::64 Word.word)) \ ((scast ?sum)::word) \\<^sub>L r1 + r2" proof - assume bound1:"\ ((scast POS_INF)::64 Word.word) <=s scast w1 + scast w2" assume bound2:"\ scast w1 + scast w2 <=s ((scast NEG_INF)::64 Word.word)" obtain r'::real where rDef:"r' = (real_of_int (sint ((scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word)))::word)))" and r12:"r'\r1 + r2" and boundU:"r' < 0x7FFFFFFF" and boundL:" (-0x7FFFFFFF) < r'" using bigTwo[OF bound1 bound2] by auto obtain w::word where wdef:"w = (scast (((scast w1)::64 Word.word) + ((scast w2)::64 Word.word))::word)" by auto then have wr:"r' = (real_of_int (sint w))" using r12 bound1 bound2 rDef by blast show ?thesis unfolding repL_def repe.simps using r12 wdef rDef boundU boundL wr by auto qed have "(let sum = ?sum in if scast POS_INF <=s sum then POS_INF else if sum <=s scast NEG_INF then NEG_INF else scast sum) \\<^sub>L r1 + r2" apply(cases "((scast POS_INF)::64 Word.word) <=s ?sum") apply(cases "?sum <=s scast NEG_INF") using inf_case neg_inf_case int_case by (auto simp add: Let_def) then show ?thesis using notinf1 notinf2 notneginf1 notneginf2 by(auto) qed qed subsection \Max function\ text\Maximum of w1 + w2 in 2s-complement\ fun wmax :: "word \ word \ word" where "wmax w1 w2 = (if w1 Correctness of wmax\ lemma wmax_lemma: assumes eq1:"w1 \\<^sub>E (r1::real)" assumes eq2:"w2 \\<^sub>E (r2::real)" shows "wmax w1 w2 \\<^sub>E (max r1 r2)" proof(cases rule: case_inf2[where ?w1.0=w1, where ?w2.0=w2]) case PosPos from PosPos eq1 eq2 have bound1:"(real_of_int (sint POS_INF)) \ r1" and bound2:"(real_of_int (sint POS_INF)) \ r2" by (auto simp add: repe.simps) have eqInf:"wmax w1 w2 = POS_INF" using PosPos unfolding wmax.simps by auto have pos_eq:"POS_INF \\<^sub>E max r1 r2" apply(rule repPOS_INF) using bound1 bound2 by linarith show ?thesis using pos_eq eqInf by auto next case PosNeg from PosNeg have bound1:"(real_of_int (sint POS_INF)) \ r1" and bound2:"r2 \ (real_of_int (sint NEG_INF))" using eq1 eq2 by (auto simp add: repe.simps) have eqNeg:"wmax w1 w2 = POS_INF" unfolding eq1 eq2 wmax.simps PosNeg word_sless_def word_sle_def by(auto) have neg_eq:"POS_INF \\<^sub>E max r1 r2" apply(rule repPOS_INF) using bound1 bound2 eq1 eq2 by auto show "?thesis" using eqNeg neg_eq by auto next case PosNum from PosNum eq1 eq2 have bound1:" (real_of_int (sint POS_INF)) \ r1" and bound2a:" (real_of_int (sint NEG_INF)) < (real_of_int (sint w2))" and bound2b:" (real_of_int (sint w2)) < (real_of_int (sint POS_INF))" by (auto simp add: repe.simps) have eqNeg:"wmax w1 w2 = POS_INF" using PosNum bound2b unfolding wmax.simps word_sless_def word_sle_def by auto have neg_eq:"POS_INF \\<^sub>E max r1 r2" apply (rule repPOS_INF) using bound1 bound2a bound2b word_sless_alt le_max_iff_disj unfolding eq1 eq2 by blast show "?thesis" using eqNeg neg_eq by auto next case NegPos from NegPos eq1 eq2 have bound1:"r1 \ (real_of_int (sint NEG_INF))" and bound2:" (real_of_int (sint POS_INF)) \ r2" by (auto simp add: repe.simps) have eqNeg:"wmax w1 w2 = POS_INF" unfolding NegPos word_sless_def word_sle_def by(auto) have neg_eq:"POS_INF \\<^sub>E max r1 r2" apply(rule repPOS_INF) using bound1 bound2 by auto show "wmax w1 w2 \\<^sub>E max r1 r2" using eqNeg neg_eq by auto next case NegNeg from NegNeg eq1 eq2 have bound1:"r1 \ (real_of_int (sint NEG_INF))" and bound2:"r2 \ (real_of_int (sint NEG_INF))" by (auto simp add: repe.simps) have eqNeg:"NEG_INF \\<^sub>E max r1 r2" apply(rule repNEG_INF) using eq1 eq2 bound1 bound2 by(auto) have neg_eq:"wmax w1 w2 = NEG_INF" using NegNeg by auto show "wmax w1 w2 \\<^sub>E max r1 r2" using eqNeg neg_eq by auto next case NegNum from NegNum eq1 eq2 have eq3:"r2 = (real_of_int (sint w2))" and bound2a:"(real_of_int (sint w2)) < (real_of_int (sint POS_INF))" and bound2b:"(real_of_int (sint NEG_INF)) < (real_of_int (sint w2))" and bound1:"r1 \ (real_of_int (sint NEG_INF))" by (auto simp add: repe.simps) have eqNeg:"max r1 r2 = (real_of_int (sint w2))" using NegNum assms(2) bound2a eq3 repeInt_simps bound1 bound2a bound2b by (metis less_irrefl max.bounded_iff max_def not_less) then have extra_eq:"(wmax w1 w2) = w2" using assms(2) bound2a eq3 NegNum repeInt_simps by (simp add: word_sless_alt) have neg_eq:"wmax w1 w2 \\<^sub>E (real_of_int (sint (wmax w1 w2)))" apply(rule repINT) using extra_eq eq3 bound2a bound2b by(auto) show "wmax w1 w2 \\<^sub>E max r1 r2" using eqNeg neg_eq extra_eq by auto next case NumPos from NumPos eq1 eq2 have p2:"w2 = POS_INF" and eq1:"r1 = (real_of_int (sint w1))" and eq2:"r2 = r2" and bound1a:"(real_of_int (sint w1)) < (real_of_int (sint POS_INF))" and bound1b:"(real_of_int (sint NEG_INF)) < (real_of_int (sint w1))" and bound2:"(real_of_int (sint POS_INF)) \ r2" by (auto simp add: repe.simps) have res1:"wmax w1 w2 = POS_INF" using NumPos p2 eq1 eq2 assms(1) bound1b p2 repeInt_simps by (simp add: word_sless_alt) have res3:"POS_INF \\<^sub>E max r1 r2" using repPOS_INF NumPos bound2 le_max_iff_disj by blast show "wmax w1 w2 \\<^sub>E max r1 r2" using res1 res3 by auto next case NumNeg from NumNeg eq1 eq2 have n2:"w2 = NEG_INF" and rw1:"r1 = (real_of_int (sint w1))" and bound1a:"(real_of_int (sint w1)) < (real_of_int (sint POS_INF))" and bound1b:"(real_of_int (sint NEG_INF)) < (real_of_int (sint w1))" and bound2:"r2 \ (real_of_int (sint NEG_INF))" by (auto simp add: repe.simps) have res1:"max r1 r2 = (real_of_int (sint (wmax w1 w2)))" using bound1b bound2 NumNeg less_trans wmax.simps of_int_less_iff word_sless_alt rw1 antisym_conv2 less_imp_le max_def by metis have res2:"wmax w1 w2 \\<^sub>E (real_of_int (sint (wmax w1 w2)))" apply(rule repINT) using bound1a bound1b bound2 NumNeg leD leI less_trans n2 wmax.simps by (auto simp add: word_sless_alt) show "wmax w1 w2 \\<^sub>E max r1 r2" using res1 res2 by auto next case NumNum from NumNum eq1 eq2 have eq1:"r1 = (real_of_int (sint w1))" and eq2:"r2 = (real_of_int (sint w2))" and bound1a:"(real_of_int (sint w1)) < (real_of_int (sint POS_INF))" and bound1b:"(real_of_int (sint NEG_INF)) < (real_of_int (sint w1))" and bound2a:"(real_of_int (sint w2)) < (real_of_int (sint POS_INF))" and bound2b:"(real_of_int (sint NEG_INF)) < (real_of_int (sint w2))" by (auto simp add: repe.simps) have res1:"max r1 r2 = (real_of_int (sint (wmax w1 w2)))" using eq1 eq2 bound1a bound1b bound2a bound2b apply (auto simp add: max_def word_sless_alt not_less; transfer) apply simp_all done have res2:"wmax w1 w2 \\<^sub>E (real_of_int (sint (wmax w1 w2)))" apply (rule repINT) using bound1a bound1b bound2a bound2b by (simp add: \max r1 r2 = (real_of_int (sint (wmax w1 w2)))\ eq2 min_less_iff_disj)+ show "wmax w1 w2 \\<^sub>E max r1 r2" using res1 res2 by auto qed lemma max_repU1: assumes "w1 \\<^sub>U x" assumes "w2 \\<^sub>U y" shows "wmax w1 w2 \\<^sub>U x " using wmax_lemma assms repU_def by (meson le_max_iff_disj) lemma max_repU2: assumes "w1 \\<^sub>U y" assumes "w2 \\<^sub>U x" shows "wmax w1 w2 \\<^sub>U x" using wmax_lemma assms repU_def by (meson le_max_iff_disj) text\Product of w1 * w2 with bounds checking\ fun wtimes :: "word \ word \ word" where "wtimes w1 w2 = (if w1 = POS_INF \ w2 = POS_INF then POS_INF else if w1 = NEG_INF \ w2 = POS_INF then NEG_INF else if w1 = POS_INF \ w2 = NEG_INF then NEG_INF else if w1 = NEG_INF \ w2 = NEG_INF then POS_INF else if w1 = POS_INF \ w2 0 0 = w2 then 0 else if w1 = NEG_INF \ w2 0 0 = w2 then 0 else if w1 w2 = POS_INF then NEG_INF else if 0 w2 = POS_INF then POS_INF else if 0 = w1 \ w2 = POS_INF then 0 else if w1 w2 = NEG_INF then POS_INF else if 0 w2 = NEG_INF then NEG_INF else if 0 = w1 \ w2 = NEG_INF then 0 else (let prod::64 Word.word = (scast w1) * (scast w2) in if prod <=s (scast NEG_INF) then NEG_INF else if (scast POS_INF) <=s prod then POS_INF else (scast prod)))" subsection \Multiplication upper bound\ text\Product of 32-bit numbers fits in 64 bits\ lemma times_upcast_lower: fixes x y::int assumes a1:"x \ -2147483648" assumes a2:"y \ -2147483648" assumes a3:"x \ 2147483648" assumes a4:"y \ 2147483648" shows "- 4611686018427387904 \ x * y" proof - let ?thesis = "- 4611686018427387904 \ x * y" have is_neg:"- 4611686018427387904 < (0::int)" by auto have case1:"x \ 0 \ y \ 0 \ ?thesis" proof - assume a5:"x \ 0" and a6:"y \ 0" have h1:"x * y \ 0" using a5 a6 by (simp add: zero_le_mult_iff) then show ?thesis using is_neg by auto qed have case2:"x \ 0 \ y \ 0 \ ?thesis" proof - assume a5:"x \ 0" and a6:"y \ 0" have h1:"-2147483648 * (2147483648) \ x * 2147483648" using a1 a2 a3 a4 a5 a6 by linarith have h2:"-2147483648 \ y" using a6 by auto have h3:"x * 2147483648 \ x * y" using a1 a2 a3 a4 a5 a6 h2 using mult_left_mono_neg by blast show ?thesis using h1 h2 h3 by auto qed have case3:"x \ 0 \ y \ 0 \ ?thesis" proof - assume a5:"x \ 0" and a6:"y \ 0" have h1:"2147483648 * (-2147483648) \ 2147483648 * y" using a1 a2 a3 a4 a5 a6 by linarith have h2:"-2147483648 \ x" using a5 by auto have h3:"2147483648 * y \ x * y" using a1 a2 a3 a4 a5 a6 h2 using mult_left_mono_neg mult_right_mono_neg by blast show ?thesis using h1 h2 h3 by auto qed have case4:"x \ 0 \ y \ 0 \ ?thesis" proof - assume a5:"x \ 0" and a6:"y \ 0" have h1:"x * y \ 0" using a5 a6 by (simp add: zero_le_mult_iff) then show ?thesis using is_neg by auto qed show ?thesis using case1 case2 case3 case4 by linarith qed text\Product of 32-bit numbers fits in 64 bits\ lemma times_upcast_upper: fixes x y ::int assumes a1:"x \ -2147483648" assumes a2:"y \ -2147483648" assumes a3:"x \ 2147483648" assumes a4:"y \ 2147483648" shows "x * y \ 4611686018427387904" proof - let ?thesis = "x * y \ 4611686018427387904" have case1:"x \ 0 \ y \ 0 \ ?thesis" proof - assume a5:"x \ 0" and a6:"y \ 0" have h1:"2147483648 * 2147483648 \ x * 2147483648" using a1 a2 a3 a4 a5 a6 by linarith have h2:"x * 2147483648 \ x * y" using a1 a2 a3 a4 a5 a6 by (simp add: mult_mono) show ?thesis using h1 h2 by auto qed have case2:"x \ 0 \ y \ 0 \ ?thesis" proof - assume a5:"x \ 0" and a6:"y \ 0" have h1:"2147483648 * 2147483648 \ (0::int)" by auto have h2:"0 \ x * y" using a5 a6 mult_nonneg_nonpos2 by blast show ?thesis using h1 h2 by auto qed have case3:"x \ 0 \ y \ 0 \ ?thesis" proof - assume a5:"x \ 0" and a6:"y \ 0" have h1:"2147483648 * 2147483648 \ (0::int)" by auto have h2:"0 \ x * y" using a5 a6 mult_nonneg_nonpos by blast show ?thesis using h1 h2 by auto qed have case4:"x \ 0 \ y \ 0 \ ?thesis" proof - assume a5:"x \ 0" and a6:"y \ 0" have h1:"-2147483648 * -2147483648 \ x * -2147483648" using a1 a2 a3 a4 a5 a6 by linarith have h2:"x * -2147483648 \ x * y" using a1 a2 a3 a4 a5 a6 mult_left_mono_neg by blast show ?thesis using h1 h2 by auto qed show "x * y \ 4611686018427387904" using case1 case2 case3 case4 by linarith qed text\Correctness of 32x32 bit multiplication\ subsection \Exact multiplication\ lemma wtimes_exact: assumes eq1:"w1 \\<^sub>E r1" assumes eq2:"w2 \\<^sub>E r2" shows "wtimes w1 w2 \\<^sub>E r1 * r2" proof - have POS_cast:"sint ((scast POS_INF)::64 Word.word) = sint POS_INF" apply(rule Word.sint_up_scast) unfolding Word.is_up by auto have POS_sint:"sint POS_INF = (2^31)-1" by auto have w1_cast:"sint ((scast w1)::64 Word.word) = sint w1" apply(rule Word.sint_up_scast) unfolding Word.is_up by auto have w2_cast:"sint ((scast w2)::64 Word.word) = sint w2" apply(rule Word.sint_up_scast) unfolding Word.is_up by auto have NEG_cast:"sint ((scast NEG_INF)::64 Word.word) = sint NEG_INF" apply(rule Word.sint_up_scast) unfolding Word.is_up by auto have rangew1:"sint ((scast w1)::64 Word.word) \ {- (2 ^ 31).. (2^31)} " using word_sint.Rep[of "(w1)::32 Word.word"] sints32 len32 mem_Collect_eq POS_cast w1_cast by auto have rangew2:"sint ((scast w2)::64 Word.word) \ {- (2 ^ 31).. (2^31)} " using word_sint.Rep[of "(w2)::32 Word.word"] sints32 len32 mem_Collect_eq POS_cast w2_cast by auto show ?thesis proof (cases rule: case_times_inf[of w1 w2]) case PosPos then have a1: "PosInf \ r1" and a2: "PosInf \ r2" using "PosPos" eq1 eq2 repe.simps by (auto) have f3: "\n e::real. 1 \ max ( (numeral n)) e" by (simp add: le_max_iff_disj) have "\n e::real. 0 \ max ( (numeral n)) e" by (simp add: le_max_iff_disj) then have "r1 \ r1 * r2" using f3 "PosPos" eq1 eq2 repe.simps using eq1 eq2 by (auto simp add: repe.simps) then have "PosInf \ r1 * r2" using a1 by linarith then show ?thesis using "PosPos" by(auto simp add: repe.simps) next case NegPos from "NegPos" have notPos:"w1 \ POS_INF" unfolding POS_INF_def NEG_INF_def by auto have a1: "NegInf \ r1" using eq1 "NegPos" by (auto simp add: repe.simps) have a2: "PosInf \ r2" using eq2 "NegPos" by (auto simp add: repe.simps) have f1: "real_of_int Numeral1 = 1" by simp have f3: "(real_of_int 3) \ - r1" using a1 by (auto) have f4:"0 \ r2" using f1 a2 by(auto) have f5: "r1 \ - 1" using f3 by auto have fact:"r1 * r2 \ - r2" using f5 f4 mult_right_mono by fastforce show ?thesis using a1 a2 fact by (auto simp add: repe.simps "NegPos") next case PosNeg have a1: "PosInf \ r1" using eq1 "PosNeg" by (auto simp add: repe.simps) then have h1:"r1 \ 1" by (auto) have a2: " NegInf \ r2" using eq2 "PosNeg" by (auto simp add: repe.simps) have f1: "\ NegInf * (- 1) \ 1" by (auto) have f2: "\e ea::real. (e * (- 1) \ ea) = (ea * (- 1) \ e)" by force then have f3: "\ 1 * (- 1::real) \ NegInf" using f1 by blast have f4: "r1 * (- 1) \ NegInf" using f2 a1 by(auto) have f5: "\e ea eb. (if (ea::real) \ eb then e \ eb else e \ ea) = (e \ ea \ e \ eb)" by force have " 0 * (- 1::real) \ 1" by simp then have "r1 * (- 1) \ 0" using f5 f4 f3 f2 by meson then have f6: "0 \ r1" by fastforce have "1 * (- 1) \ (- 1::real)" using f2 by force then have fact:"r2 \ (- 1)" using f3 a2 by fastforce have rule:"\c. c > 0 \ r1 \ c \ r2 \ -1 \ r1 * r2 \ -c" apply auto by (metis (no_types, hide_lams) f5 mult_less_cancel_left_pos mult_minus1_right neg_le_iff_le not_less) have "r1 * r2 \ NegInf" using "PosNeg" f6 fact rule[of PosInf] a1 by(auto) then show ?thesis using "PosNeg" by (auto simp add: repe.simps) next case NegNeg have a1: "(-2147483647) \ r1" using eq1 "NegNeg" by (auto simp add: repe.simps) then have h1:"r1 \ -1" using max.bounded_iff max_def one_le_numeral by auto have a2: " (-2147483647) \ r2" using eq2 "NegNeg" by (auto simp add: repe.simps) have f1: "\e ea eb. \ (e::real) \ ea \ \ 0 \ eb \ eb * e \ eb * ea" using mult_left_mono by metis have f2: "- 1 = (- 1::real)" by force have f3: " 0 \ (1::real)" by simp have f4: "\e ea eb. (ea::real) \ e \ \ ea \ eb \ \ eb \ e" by (meson less_le_trans not_le) have f5: " 0 \ (2147483647::real)" by simp have f6: "- (- 2147483647) = (2147483647::real)" by force then have f7: "- ( (- 2147483647) * r1) = (2147483647 * r1)" by (metis mult_minus_left) have f8: "- ( (- 2147483647) * (- 1)) = 2147483647 * (- 1::real)" by simp have " 2147483647 = - 1 * (- 2147483647::real)" by simp then have f9: "r1 \ (- 1) \ 2147483647 \ r1 * (- 2147483647)" using f8 f7 f5 f2 f1 by linarith have f10: "- 2147483647 = (- 2147483647::real)" by fastforce have f11: "- (r2 * 1 * (r1 * (- 1))) = r1 * r2" by (simp add: mult.commute) have f12: "r1 * (- 1) = - (r1 * 1)" by simp have "r1 * 1 * ( (- 2147483647) * 1) = (- 2147483647) * r1" by (simp add: mult.commute) then have f13: "r1 * (- 1) * ( (- 2147483647) * 1) = 2147483647 * r1" using f12 f6 by (metis (no_types) mult_minus_left) have " 1 * r1 \ 1 * (- 2147483647)" using a1 by (auto simp add: a1) then have " 2147483647 \ r1 * (- 1)" by fastforce then have "0 \ r1 * (- 1)" using f5 f4 by (metis) then have "r1 \ (- 1) \ - (r1 * 2147483647) \ - (r2 * 1 * (r1 * (- 1)))" by (metis a2 f11 h1 mult_left_mono_neg minus_mult_right mult_minus1_right neg_0_le_iff_le) then have "r1 \ (- 1) \ r1 * (- 2147483647) \ r2 * r1" using f11 f10 by (metis mult_minus_left mult.commute) then have fact:" 2147483647 \ r2 * r1" using f9 f4 by blast show ?thesis using a1 a2 h1 fact by (auto simp add: repe.simps "NegNeg" mult.commute) next case PosLo from "PosLo" have w2NotPinf:"w2 \ POS_INF" and w2NotNinf:"w2 \ NEG_INF" by (auto) from eq1 "PosLo" have upper:" (real_of_int (sint POS_INF)) \ r1 " by (auto simp add: repe.simps) have lower1:"sint w2 < 0" using "PosLo" apply (auto simp add: word_sless_def word_sle_def) by (simp add: dual_order.order_iff_strict) then have lower2:"sint w2 \ -1" by auto from eq2 have rw2:"r2 = (real_of_int (sint w2))" using repe.simps "PosLo" by (auto simp add: repe.simps) have f4: "r1 * (- 1) \ (- 2147483647)" using upper by (auto) have f5: "r2 \ (- 1)" using lower2 rw2 by transfer simp have "0 < r1" using upper by (auto) have "\r. r < - 2147483647 \ \ r < r1 * - 1" using f4 less_le_trans by blast then have "r1 * (real_of_int (sint w2)) \ (- 2147483647)" using f5 f4 upper lower2 rw2 mult_left_mono by (metis \0 < r1\ dual_order.order_iff_strict f5 mult_left_mono rw2) then have "r1 * r2 \ real_of_int (sint NEG_INF)" using upper lower2 rw2 by (auto) then show ?thesis using "PosLo" by (auto simp add: repe.simps) next case PosHi from "PosHi" have w2NotPinf:"w2 \ POS_INF" and w2NotNinf:"w2 \ NEG_INF" by (auto) from eq1 "PosHi" have upper:"(real_of_int (sint POS_INF)) \ r1 " by (auto simp add: repe.simps) have lower1:"sint w2 > 0" using "PosHi" apply (auto simp add: word_sless_def word_sle_def) by (simp add: dual_order.order_iff_strict) then have lower2:"sint w2 \ 1" by auto from eq2 have rw2:"r2 = (real_of_int (sint w2))" using repe.simps "PosHi" by (auto) have "0 \ r1" using upper by (auto) then have "r1 \ r1 * r2" using rw2 lower2 by (metis (no_types) mult_left_mono mult.right_neutral of_int_1_le_iff) have "PosInf \ r1 * r2" using upper lower2 rw2 apply (auto) using \0 \ r1\ mult_numeral_1_right numeral_One of_int_1_le_iff zero_le_one apply simp using mult_mono [of 2147483647 r1 1 \signed_real_of_word (w2::32 Word.word)\] apply simp apply transfer apply simp done then show ?thesis using "PosHi" by (auto simp add: repe.simps) next case PosZero from "PosZero" have w2NotPinf:"w2 \ POS_INF" and w2NotNinf:"w2 \ NEG_INF" by (auto) from eq1 "PosZero" have upper:" (real_of_int (sint POS_INF)) \ r1 " by (auto simp add: repe.simps) have lower1:"sint w2 = 0" using "PosZero" by (auto simp add: word_sless_def word_sle_def) from eq2 have rw2:"r2 = (real_of_int (sint w2))" using repe.simps "PosZero" by auto have "0 = r1 * r2" using "PosZero" rw2 by auto then show ?thesis using "PosZero" by (auto simp add: repe.simps) next case NegHi have w2NotPinf:"w2 \ POS_INF" and w2NotNinf:"w2 \ NEG_INF" using "NegHi" by (auto) from eq1 "NegHi" have upper:"(real_of_int (sint NEG_INF)) \ r1 " by (auto simp add: repe.simps) have low:"sint w2 > 0" using "NegHi" apply (auto simp add: word_sless_def word_sle_def) by (simp add: dual_order.order_iff_strict) then have lower1:"(real_of_int (sint w2)) > 0" by transfer simp then have lower2:"(real_of_int (sint w2)) \ 1" using low by transfer simp from eq1 have rw1:"r1 \ (real_of_int (sint w1))" using repe.simps "NegHi" by (simp add: upper) from eq2 have rw2:"r2 = (real_of_int (sint w2))" using repe.simps "NegHi" by (auto) have mylem:"\x y z::real. x \ -1 \ y \ 1 \ z \ -1 \ x \ z \ x * y \ z" proof - fix x y z::real assume a1:"x \ -1" assume a2:"y \ 1" then have h1:"-1 \ -y" by auto assume a3:"z \ -1" then have a4:"z < 0" by auto from a4 have h2:"-z > 0" using leD leI by auto from a3 have h5:"-z \ 1" by (simp add: leD leI) assume a5:"x \ z" then have h6:"-x \ -z" by auto have h3:"-x * -z = x * z" by auto show "x * y \ z" using a1 a2 a3 a5 a4 h1 h2 h3 h6 h5 a5 dual_order.trans leD mult.right_neutral by (metis dual_order.order_iff_strict mult_less_cancel_left2) qed have prereqs:"r1 \ - 1" "1 \ (real_of_int (sint w2))" " (- 2147483647::real) \ - 1 " "r1 \ (-2147483647)" using rw1 rw2 "NegHi" lower2 by (auto simp add: word_sless_def word_sle_def) have "r1 * r2 \ real_of_int (sint NEG_INF)" using upper lower1 lower2 rw1 rw2 apply (auto simp add: word_sless_def word_sle_def) using mylem[of "r1" " (real_of_int (sint w2))" " (- 2147483647)"] prereqs by auto then show ?thesis using "NegHi" by (auto simp add: repe.simps) next case NegLo from "NegLo" have w2NotPinf:"w2 \ POS_INF" and w2NotNinf:"w2 \ NEG_INF" by (auto) from eq1 "NegLo" have upper:"(real_of_int (sint NEG_INF)) \ r1" by (auto simp add: repe.simps) have low:"sint w2 < 0" using "NegLo" by (auto simp add: word_sless_def word_sle_def dual_order.order_iff_strict) then have lower1:"(real_of_int (sint w2)) < 0" by transfer simp from eq1 have rw1:"r1 \ (real_of_int (sint w1))" using repe.simps "NegLo" by (simp add: upper) from eq2 have rw2:"r2 = (real_of_int (sint w2))" using repe.simps "NegLo" by (auto) have hom:"(- 2147483647) = -(2147483647::real)" by auto have mylem:"\x y z::real. y < 0 \ x \ y \ z \ -1 \ -y \ x * z" proof - fix x y z::real assume a1:"y < 0" assume a2:"x \ y" then have h1:"-x \ -y" by auto assume a3:"z \ -1" then have a4:"z < 0" by auto from a4 have h2:"-z > 0" using leD leI by auto from a3 have h5:"-z \ 1" by (simp add: leD leI) have h4:"-x * -z \ -y" using a1 a2 a3 a4 h1 h2 h5 dual_order.trans mult.right_neutral - by (metis mult.commute neg_0_less_iff_less real_mult_le_cancel_iff1) + by (metis mult.commute neg_0_less_iff_less mult_le_cancel_iff1) have h3:"-x * -z = x * z" by auto show "- y \ x * z " using a1 a2 a3 a4 h1 h2 h3 h4 h5 by simp qed have prereqs:"- 2147483647 < (0::real)" " r1 \ - 2147483647" using rw1 rw2 "NegLo" by (auto simp add: word_sless_def word_sle_def) moreover have \sint w2 \ - 1\ using low by simp then have \real_of_int (sint w2) \ real_of_int (- 1)\ by (simp only: of_int_le_iff) then have \signed_real_of_word w2 \ - 1\ by simp ultimately have "2147483647 \ r1 * r2" using upper lower1 rw1 rw2 mylem[of "-2147483647" "r1" "(real_of_int (sint w2))"] by (auto simp add: word_sless_def word_sle_def) then show ?thesis using "NegLo" by (auto simp add: repe.simps) next case NegZero from "NegZero" have w2NotPinf:"w2 \ POS_INF" and w2NotNinf:"w2 \ NEG_INF" by (auto) from eq2 "NegZero" have "r2 = 0" using repe.simps "NegZero" by (auto) then show ?thesis using "NegZero" by (auto simp add: repe.simps) next case LoPos from "LoPos" have w2NotPinf:"w1 \ POS_INF" and w2NotNinf:"w1 \ NEG_INF" by (auto) from eq2 "LoPos" have upper:"(real_of_int (sint POS_INF)) \ r2 " by (auto simp add: repe.simps) have lower1:"sint w1 < 0" using "LoPos" apply (auto simp add: word_sless_def word_sle_def) by (simp add: dual_order.order_iff_strict) then have lower2:"sint w1 \ -1" by auto from eq1 have rw1:"r1 = (real_of_int (sint w1))" using repe.simps "LoPos" by (auto simp add: repe.simps) have f4: "r2 * (- 1) \ (- 2147483647)" using upper by(auto) have f5: "r1 \ (- 1)" using lower2 rw1 by transfer simp have "0 < r2" using upper by(auto) then have "r2 * r1 \ r2 * (- 1)" by (metis dual_order.order_iff_strict mult_right_mono f5 mult.commute) then have "r2 * r1 \ (- 2147483647)" by (meson f4 less_le_trans not_le) then have "(real_of_int (sint w1)) * r2 \ (- 2147483647)" using f5 f4 rw1 less_le_trans not_le mult.commute rw1 by (auto simp add: mult.commute) then have "r1 * r2 \ NegInf" using rw1 by (auto) then show ?thesis using "LoPos" by (auto simp: repe.simps) next case HiPos from "HiPos" have w2NotPinf:"w1 \ POS_INF" and w2NotNinf:"w1 \ NEG_INF" by (auto) from eq2 "HiPos" have upper:"(real_of_int (sint POS_INF)) \ r2 " by (auto simp add: repe.simps) have lower1:"sint w1 > 0" using "HiPos" by (auto simp add: word_sless_def word_sle_def dual_order.order_iff_strict) then have lower2:"sint w1 \ 1" by auto from eq1 have rw2:"r1 = (real_of_int (sint w1))" using "HiPos" by (auto simp add: repe.simps) have "0 \ r2" using upper by(auto) then have "r2 \ r2 * r1" using lower2 rw2 by (metis (no_types) mult_left_mono mult.right_neutral of_int_1_le_iff) have "2147483647 \ r1 * r2" using upper lower2 rw2 apply (simp add: word_sless_def word_sle_def) using mult_mono [of 1 \signed_real_of_word w1\ 2147483647 r2] apply simp apply transfer apply simp done then show ?thesis using "HiPos" by (auto simp add: repe.simps) next case ZeroPos from "ZeroPos" have w2NotPinf:"w1 \ POS_INF" and w2NotNinf:"w1 \ NEG_INF" by (auto) from eq2 "ZeroPos" have upper:" (real_of_int (sint POS_INF)) \ r2 " by (auto simp add: repe.simps) have lower1:"sint w1 = 0" using "ZeroPos" by (auto simp add: word_sless_def word_sle_def) from eq1 have rw2:"r1 = (real_of_int (sint w1))" using repe.simps "ZeroPos" by (auto) have "r1 = 0" using lower1 rw2 by auto then show ?thesis using "ZeroPos" by (auto simp add: repe.simps) next case ZeroNeg from "ZeroNeg" have w2NotPinf:"w1 \ POS_INF" and w2NotNinf:"w1 \ NEG_INF" by (auto) from eq2 "ZeroNeg" have upper:"(real_of_int (sint NEG_INF)) \ r2 " by (auto simp add: repe.simps) have lower1:"sint w1 = 0" using "ZeroNeg" by (auto simp add: word_sless_def word_sle_def) from eq1 have rw2:"r1 = (real_of_int (sint w1))" using repe.simps "ZeroNeg" by (auto) have "r1 = 0" using lower1 rw2 by auto then show ?thesis using "ZeroNeg" by (auto simp add: repe.simps) next case LoNeg from "LoNeg" have w2NotPinf:"w1 \ POS_INF" and w2NotNinf:"w1 \ NEG_INF" by (auto) from eq2 "LoNeg" have upper:" (real_of_int (sint NEG_INF)) \ r2 " by (auto simp add: repe.simps) have low:"sint w1 < 0" using "LoNeg" apply (auto simp add: word_sless_def word_sle_def) by (simp add: dual_order.order_iff_strict) then have lower1:"(real_of_int (sint w1)) < 0" by transfer simp from low have \sint w1 \ - 1\ by simp then have lower2:"(real_of_int (sint w1)) \ -1" by transfer simp from eq1 have rw1:"r2 \ (real_of_int (sint w2))" using "LoNeg" upper by auto from eq1 have rw2:"r1 = (real_of_int (sint w1))" using "LoNeg" by (auto simp add: upper repe.simps) have hom:"(- 2147483647::real) = -(2147483647)" by auto have mylem:"\x y z::real. y < 0 \ x \ y \ z \ -1 \ -y \ x * z" proof - fix x y z::real assume a1:"y < 0" assume a2:"x \ y" then have h1:"-x \ -y" by auto assume a3:"z \ -1" then have a4:"z < 0" by auto from a4 have h2:"-z > 0" using leD leI by auto from a3 have h5:"-z \ 1" by (simp add: leD leI) have h4:"-x * -z \ -y" using a1 a2 a3 a4 h1 h2 h5 dual_order.trans mult_left_mono mult.right_neutral mult.commute by (metis dual_order.order_iff_strict mult_minus_right mult_zero_right neg_le_iff_le) have h3:"-x * -z = x * z" by auto show "- y \ x * z " using a1 a2 a3 a4 h1 h2 h3 h4 h5 by simp qed have prereqs:"- 2147483647 < (0::real)" " r2 \ - 2147483647" " (real_of_int (sint w1)) \ - 1" using rw1 rw2 "LoNeg" lower2 by (auto simp add: word_sless_def word_sle_def lower2) have "2147483647 \ r1 * r2" using upper lower1 lower2 rw1 rw2 mylem[of "-2147483647" "r2" "(real_of_int (sint w1))"] prereqs by (auto simp add:word_sless_def word_sle_def mult.commute) then show ?thesis using "LoNeg" by (auto simp add: repe.simps) next case HiNeg from HiNeg have w1NotPinf:"w1 \ POS_INF" and w1NotNinf:"w1 \ NEG_INF" by (auto) have upper:" (real_of_int (sint NEG_INF)) \ r2 " using HiNeg eq2 by (auto simp add: repe.simps ) have low:"sint w1 > 0" using HiNeg apply (auto simp add: word_sless_def word_sle_def) by (simp add: dual_order.order_iff_strict) then have lower1:"(real_of_int (sint w1)) > 0" by transfer simp from low have \sint w1 \ 1\ by simp then have lower2:"(real_of_int (sint w1)) \ 1" by transfer simp from eq2 have rw1:"r2 \ (real_of_int (sint w2))" using repe.simps HiNeg by (simp add: upper) from eq1 have rw2:"r1 = (real_of_int (sint w1))" using repe.simps HiNeg by (auto) have mylem:"\x y z::real. x \ -1 \ y \ 1 \ z \ -1 \ x \ z \ x * y \ z" proof - fix x y z::real assume a1:"x \ -1" assume a2:"y \ 1" then have h1:"-1 \ -y" by auto assume a3:"z \ -1" then have a4:"z < 0" by auto from a4 have h2:"-z > 0" using leD leI by auto from a3 have h5:"-z \ 1" by (simp add: leD leI) assume a5:"x \ z" then have h6:"-x \ -z" by auto have h3:"-x * -z = x * z" by auto show "x * y \ z" using a1 a2 a3 a4 h1 h2 h3 h6 h5 a5 dual_order.trans less_eq_real_def by (metis mult_less_cancel_left1 not_le) qed have prereqs:"r2 \ - 1" "1 \ (real_of_int (sint w1))" " (- 2147483647) \ - (1::real )" "r2 \ (- 2147483647)" using rw1 rw2 HiNeg lower2 by (auto simp add: word_sless_def word_sle_def) have "r1 * r2 \ - 2147483647" using upper lower1 lower2 rw1 rw2 apply (auto simp add: word_sless_def word_sle_def) using mylem[of "r2" "(real_of_int (sint w1))" " (- 2147483647)"] prereqs by (auto simp add: mult.commute) then show ?thesis using HiNeg by(auto simp add: repe.simps) next case AllFinite let ?prod = "(((scast w1)::64 Word.word) * ((scast w2)::64 Word.word))" consider (ProdNeg) "?prod <=s ((scast NEG_INF)::64 Word.word)" | (ProdPos) "(((scast POS_INF)::64 Word.word) <=s ?prod)" | (ProdFin) "\(?prod <=s ((scast NEG_INF)::64 Word.word)) \ \((scast POS_INF)::64 Word.word) <=s ?prod" by (auto) then show ?thesis proof (cases) case ProdNeg have bigLeq:"(4611686018427387904::real) \ 9223372036854775807" by auto have set_cast:"\x::int. (x \ {-(2^31)..2^31}) = ( (real_of_int x) \ {-(2^31)..2^31})" by auto have eq3:"sint(((scast w1)::64 Word.word) * ((scast w2)::64 Word.word)) = sint ((scast w1)::64 Word.word) * sint ((scast w2)::64 Word.word)" apply(rule Word_Lemmas.signed_arith_sint(4)) using rangew1 rangew2 w1_cast w2_cast using Word.word_size[of "((scast w1)::64 Word.word)"] using Word.word_size[of "((scast w2)::64 Word.word)"] using times_upcast_upper[of "sint w1" "sint w2"] using times_upcast_lower[of "sint w1" "sint w2"] by auto assume "?prod <=s ((scast NEG_INF)::64 Word.word)" then have sint_leq:"sint ?prod \ sint ((scast NEG_INF)::64 Word.word)" using word_sle_def by blast have neqs:"w1 \ POS_INF" " w1 \ NEG_INF" "w2 \ POS_INF" "w2 \ NEG_INF" using AllFinite word_sless_def signed.not_less_iff_gr_or_eq by force+ from eq1 have rw1:"r1 = (real_of_int (sint w1))" using neqs by (auto simp add: repe.simps) from eq2 have rw2:"r2 = (real_of_int (sint w2))" using neqs by (auto simp add: repe.simps) show ?thesis using AllFinite ProdNeg w1_cast w2_cast rw1 rw2 sint_leq apply (auto simp add: repe.simps eq3) apply (subst (asm) of_int_le_iff [symmetric, where ?'a = real]) apply simp done next case ProdPos have bigLeq:"(4611686018427387904::real) \ 9223372036854775807" by auto have set_cast:"\x::int. (x \ {-(2^31)..2^31}) = ( (real_of_int x) \ {-(2^31)..2^31})" by auto have eq3:"sint(((scast w1)::64 Word.word) * ((scast w2)::64 Word.word)) = sint ((scast w1)::64 Word.word) * sint ((scast w2)::64 Word.word)" apply(rule Word_Lemmas.signed_arith_sint(4)) using rangew1 rangew2 POS_cast POS_sint w1_cast w2_cast using Word.word_size[of "((scast w1)::64 Word.word)"] using Word.word_size[of "((scast w2)::64 Word.word)"] using times_upcast_upper[of "sint w1" "sint w2"] using times_upcast_lower[of "sint w1" "sint w2"] by auto assume cast:"((scast POS_INF)::64 Word.word) <=s ?prod" then have sint_leq:"sint ((scast POS_INF)::64 Word.word) \ sint ?prod" using word_sle_def by blast have neqs:"w1 \ POS_INF" " w1 \ NEG_INF" "w2 \ POS_INF" "w2 \ NEG_INF" using AllFinite word_sless_def signed.not_less_iff_gr_or_eq by force+ from eq1 have rw1:"r1 = (real_of_int (sint w1))" using repe.simps AllFinite neqs by auto from eq2 have rw2:"r2 = (real_of_int (sint w2))" using repe.simps AllFinite neqs by auto have prodHi:"r1 * r2 \ PosInf" using w1_cast w2_cast rw1 rw2 sint_leq apply (auto simp add: eq3) apply (subst (asm) of_int_le_iff [symmetric, where ?'a = real]) apply simp done have infs:"SCAST(32 \ 64) NEG_INF 64) POS_INF" by (auto) have casted:"SCAST(32 \ 64) POS_INF <=s SCAST(32 \ 64) w1 * SCAST(32 \ 64) w2" using cast by auto have almostContra:"SCAST(32 \ 64) NEG_INF 64) w1 * SCAST(32 \ 64) w2" using infs cast signed.order.strict_trans2 by blast have contra:"\(SCAST(32 \ 64) w1 * SCAST(32 \ 64) w2 <=s SCAST(32 \ 64) NEG_INF)" using eq3 almostContra by auto have wtimesCase:"wtimes w1 w2 = POS_INF" using neqs ProdPos almostContra wtimes.simps AllFinite ProdPos by (auto simp add: repe.simps Let_def) show ?thesis using prodHi apply(simp only: repe.simps) apply(rule disjI1) apply(rule exI[where x= "r1*r2"]) apply(rule conjI) apply(rule wtimesCase) using prodHi by auto next case ProdFin have bigLeq:"(4611686018427387904::real) \ 9223372036854775807" by auto have set_cast:"\x::int. (x \ {-(2^31)..2^31}) = ( (real_of_int x) \ {-(2^31)..2^31})" by auto have eq3:"sint(((scast w1)::64 Word.word) * ((scast w2)::64 Word.word)) = sint ((scast w1)::64 Word.word) * sint ((scast w2)::64 Word.word)" apply(rule Word_Lemmas.signed_arith_sint(4)) using rangew1 rangew2 POS_cast POS_sint w1_cast w2_cast using Word.word_size[of "((scast w1)::64 Word.word)"] using Word.word_size[of "((scast w2)::64 Word.word)"] using times_upcast_upper[of "sint w1" "sint w2"] using times_upcast_lower[of "sint w1" "sint w2"] by auto from ProdFin have a1:"\(?prod <=s ((scast NEG_INF)::64 Word.word))" by auto then have sintGe:"sint (?prod) > sint (((scast NEG_INF)::64 Word.word))" using word_sle_def dual_order.order_iff_strict signed.linear by fastforce from ProdFin have a2:"\((scast POS_INF)::64 Word.word) <=s ?prod" by auto then have sintLe:"sint (((scast POS_INF)::64 Word.word)) > sint (?prod)" using word_sle_def dual_order.order_iff_strict signed.linear by fastforce have neqs:"w1 \ POS_INF" " w1 \ NEG_INF" "w2 \ POS_INF" "w2 \ NEG_INF" using AllFinite word_sless_def signed.not_less_iff_gr_or_eq by force+ from eq1 have rw1:"r1 = (real_of_int (sint w1))" using neqs by(auto simp add: repe.simps) from eq2 have rw2:"r2 = (real_of_int (sint w2))" using neqs by(auto simp add: repe.simps) from rw1 rw2 have "r1 * r2 = (real_of_int ((sint w1) * (sint w2)))" by simp have rightSize:"sint (((scast w1)::64 Word.word) * ((scast w2)::64 Word.word)) \ sints (len_of TYPE(32))" using sintLe sintGe sints32 by (simp) have downcast:"sint ((scast (((scast w1)::64 Word.word) * ((scast w2)::64 Word.word)))::word) = sint (((scast w1)::64 Word.word) * ((scast w2)::64 Word.word))" using scast_down_range[OF rightSize] by auto then have res_eq:"r1 * r2 = real_of_int(sint((scast (((scast w1)::64 Word.word)*((scast w2)::64 Word.word)))::word))" using rw1 rw2 eq3 POS_cast POS_sint w1_cast w2_cast downcast \r1 * r2 = (real_of_int (sint w1 * sint w2))\ by (auto) have res_up:"sint (scast (((scast w1)::64 Word.word) * ((scast w2)::64 Word.word))::word) < sint POS_INF" using rw1 rw2 eq3 POS_cast POS_sint w1_cast w2_cast downcast \r1 * r2 = (real_of_int (sint w1 * sint w2))\ \sint (scast w1 * scast w2) < sint (scast POS_INF)\ of_int_eq_iff res_eq by presburger have res_lo:"sint NEG_INF < sint (scast (((scast w1)::64 Word.word) * ((scast w2)::64 Word.word))::word)" using rw1 rw2 eq3 POS_cast POS_sint w1_cast w2_cast NEG_cast downcast \r1 * r2 = (real_of_int (sint w1 * sint w2))\ \sint (scast NEG_INF) < sint (scast w1 * scast w2)\ of_int_eq_iff res_eq by presburger have "scast ?prod \\<^sub>E (r1 * r2)" using res_eq res_up res_lo apply (auto simp add: rep_simps) using repeInt_simps by auto then show ?thesis using AllFinite ProdFin by(auto) qed qed qed subsection \Multiplication upper bound\ text\Upper bound of multiplication from upper and lower bounds\ fun tu :: "word \ word \ word \ word \ word" where "tu w1l w1u w2l w2u = wmax (wmax (wtimes w1l w2l) (wtimes w1u w2l)) (wmax (wtimes w1l w2u) (wtimes w1u w2u))" lemma tu_lemma: assumes u1:"u\<^sub>1 \\<^sub>U (r1::real)" assumes u2:"u\<^sub>2 \\<^sub>U (r2::real)" assumes l1:"l\<^sub>1 \\<^sub>L (r1::real)" assumes l2:"l\<^sub>2 \\<^sub>L (r2::real)" shows "tu l\<^sub>1 u\<^sub>1 l\<^sub>2 u\<^sub>2 \\<^sub>U (r1 * r2)" proof - obtain rl1 rl2 ru1 ru2 :: real where gru1:"ru1 \ r1" and gru2:"ru2 \ r2" and grl1:"rl1 \ r1" and grl2:"rl2 \ r2" and eru1:"u\<^sub>1 \\<^sub>E ru1" and eru2:"u\<^sub>2 \\<^sub>E ru2" and erl1:"l\<^sub>1 \\<^sub>E rl1" and erl2:"l\<^sub>2 \\<^sub>E rl2" using u1 u2 l1 l2 unfolding repU_def repL_def by auto have timesuu:"wtimes u\<^sub>1 u\<^sub>2 \\<^sub>E ru1 * ru2" using wtimes_exact[OF eru1 eru2] by auto have timesul:"wtimes u\<^sub>1 l\<^sub>2 \\<^sub>E ru1 * rl2" using wtimes_exact[OF eru1 erl2] by auto have timeslu:"wtimes l\<^sub>1 u\<^sub>2 \\<^sub>E rl1 * ru2" using wtimes_exact[OF erl1 eru2] by auto have timesll:"wtimes l\<^sub>1 l\<^sub>2 \\<^sub>E rl1 * rl2" using wtimes_exact[OF erl1 erl2] by auto have maxt12:"wmax (wtimes l\<^sub>1 l\<^sub>2) (wtimes u\<^sub>1 l\<^sub>2) \\<^sub>E max (rl1 * rl2) (ru1 * rl2)" by (rule wmax_lemma[OF timesll timesul]) have maxt34:"wmax (wtimes l\<^sub>1 u\<^sub>2) (wtimes u\<^sub>1 u\<^sub>2) \\<^sub>E max (rl1 * ru2) (ru1 * ru2)" by (rule wmax_lemma[OF timeslu timesuu]) have bigMax:"wmax (wmax (wtimes l\<^sub>1 l\<^sub>2) (wtimes u\<^sub>1 l\<^sub>2)) (wmax (wtimes l\<^sub>1 u\<^sub>2) (wtimes u\<^sub>1 u\<^sub>2)) \\<^sub>E max (max (rl1 * rl2) (ru1 * rl2)) (max (rl1 * ru2) (ru1 * ru2))" by (rule wmax_lemma[OF maxt12 maxt34]) obtain maxt12val :: real where maxU12:"wmax (wtimes l\<^sub>1 l\<^sub>2) (wtimes u\<^sub>1 l\<^sub>2) \\<^sub>U max (rl1 * rl2) (ru1 * rl2)" using maxt12 unfolding repU_def by blast obtain maxt34val :: real where maxU34:"wmax (wtimes l\<^sub>1 u\<^sub>2) (wtimes u\<^sub>1 u\<^sub>2) \\<^sub>U max (rl1 * ru2) (ru1 * ru2)" using maxt34 unfolding repU_def by blast obtain bigMaxU:"wmax (wmax (wtimes l\<^sub>1 l\<^sub>2) (wtimes u\<^sub>1 l\<^sub>2)) (wmax (wtimes l\<^sub>1 u\<^sub>2) (wtimes u\<^sub>1 u\<^sub>2)) \\<^sub>U max (max (rl1 * rl2) (ru1 * rl2)) (max (rl1 * ru2) (ru1 * ru2))" using bigMax unfolding repU_def by blast have ivl1:"rl1 \ ru1" using grl1 gru1 by auto have ivl2:"rl2 \ ru2" using grl2 gru2 by auto let ?thesis = "tu l\<^sub>1 u\<^sub>1 l\<^sub>2 u\<^sub>2 \\<^sub>U r1 * r2" show ?thesis using ivl1 ivl2 proof(cases rule: case_ivl_zero) case ZeroZero assume "rl1 \ 0 \ 0 \ ru1 \ rl2 \ 0 \ 0 \ ru2" then have geq1:"ru1 \ 0" and geq2:"ru2 \ 0" by auto consider "r1 \ 0 \ r2 \ 0" | "r1 \ 0 \ r2 \ 0" | "r1 \ 0 \ r2 \ 0" | "r1 \ 0 \ r2 \ 0" using le_cases by auto then show "tu l\<^sub>1 u\<^sub>1 l\<^sub>2 u\<^sub>2 \\<^sub>U r1 * r2" proof (cases) case 1 have g1:"ru1 * ru2 \ ru1 * r2" using "1" geq1 geq2 grl2 gru2 by (simp add: mult_left_mono) have g2:"ru1 * r2 \ r1 * r2" using "1" geq1 geq2 grl1 grl2 gru1 gru2 by (simp add: mult_right_mono) from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up eru1 eru2 erl1 erl2 repU_def timesuu tu.simps max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] by (metis wmax.elims) next case 2 have g1:"ru1 * ru2 \ 0" using "2" geq1 geq2 grl2 gru2 by (simp) have g2:"0 \ r1 * r2" using "2" by (simp add: mult_le_0_iff) from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.coboundedI1 max.commute maxt34 by (metis repU_def tu.simps) next case 3 have g1:"ru1 * ru2 \ 0" using "3" geq1 geq2 by simp have g2:"0 \ r1 * r2" using "3" by (simp add: mult_le_0_iff) from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] repU_def tu.simps timesuu by (metis max.coboundedI1 max.commute maxt34) next case 4 have g1:"rl1 * rl2 \ rl1 * r2" using "4" geq1 geq2 grl1 grl2 gru1 gru2 using \rl1 \ 0 \ 0 \ ru1 \ rl2 \ 0 \ 0 \ ru2\ less_eq_real_def by (metis mult_left_mono_neg) have g2:"rl1 * r2 \ r1 * r2" using "4" geq1 geq2 grl1 grl2 gru1 gru2 \rl1 \ 0 \ 0 \ ru1 \ rl2 \ 0 \ 0 \ ru2\ by (metis mult_left_mono_neg mult.commute) from g1 and g2 have up:"rl1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.commute maxt34 by (metis max_repU1 repU_def timesll tu.simps) qed next case ZeroPos assume bounds:"rl1 \ 0 \ 0 \ ru1 \ 0 \ rl2" have r2:"r2 \ 0" using bounds dual_order.trans grl2 by blast consider "r1 \ 0" | "r1 \ 0" using le_cases by (auto) then show ?thesis proof (cases) case 1 assume r1:"r1 \ 0" have g1:"ru1 * ru2 \ ru1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 using mult_left_mono by blast have g2:"ru1 * r2 \ r1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 using mult_right_mono by blast from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.coboundedI1 max.commute maxt34 by (metis repU_def tu.simps) next case 2 assume r1:"r1 \ 0" have g1:"ru1 * ru2 \ 0" using r1 r2 bounds grl1 grl2 gru1 gru2 using mult_left_mono by (simp add: mult_less_0_iff less_le_trans not_less) have g2:"0 \ r1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 using mult_right_mono by (simp add: mult_le_0_iff) from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.coboundedI1 max.commute maxt34 by (metis repU_def tu.simps) qed next case ZeroNeg assume bounds:"rl1 \ 0 \ 0 \ ru1 \ ru2 \ 0" have r2:"r2 \ 0" using bounds dual_order.trans gru2 by blast have case1:"r1 \ 0 \ ?thesis" proof - assume r1:"r1 \ 0" have g1:"rl1 * rl2 \ 0" using r1 r2 bounds grl1 grl2 gru1 gru2 mult_less_0_iff less_le_trans not_less by metis have g2:"0 \ r1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 using mult_right_mono by (simp add: mult_le_0_iff) from g1 and g2 have up:"rl1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.commute maxt34 by (metis max_repU2 max_repU1 repU_def timesll tu.simps) qed have case2:"r1 \ 0 \ ?thesis" proof - assume r1:"r1 \ 0" have g1:"rl1 * rl2 \ rl1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 by (metis mult_left_mono_neg) have g2:"rl1 * r2 \ r1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 mult.commute by (metis mult_left_mono_neg) from g1 and g2 have up:"rl1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.commute maxt34 by (metis max_repU1 repU_def timesll tu.simps) qed show "tu l\<^sub>1 u\<^sub>1 l\<^sub>2 u\<^sub>2 \\<^sub>U r1 * r2" using case1 case2 le_cases by blast next case PosZero assume bounds:"0 \ rl1 \ rl2 \ 0 \ 0 \ ru2" have r1:"r1 \ 0" using bounds dual_order.trans grl1 by blast consider "r2 \ 0" | "r2 \ 0" using le_cases by auto then show ?thesis proof (cases) case 1 have g1:"ru1 * ru2 \ ru1 * r2" using "1" bounds grl1 grl2 gru1 gru2 using mult_left_mono using leD leI less_le_trans by metis have g2:"ru1 * r2 \ r1 * r2" using "1" bounds grl1 grl2 gru1 gru2 using mult_right_mono by blast from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.coboundedI1 max.commute maxt34 by (metis repU_def tu.simps) next case 2 have g1:"ru1 * ru2 \ 0" using r1 bounds grl2 gru2 gru1 leD leI less_le_trans by auto have g2:"0 \ r1 * r2" using r1 "2" by (simp add: mult_le_0_iff) from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.coboundedI1 max.commute maxt34 by (metis repU_def tu.simps) qed next case NegZero assume bounds:"ru1 \ 0 \ rl2 \ 0 \ 0 \ ru2" have r1:"r1 \ 0" using bounds dual_order.trans gru1 by blast consider "r2 \ 0" | "r2 \ 0" using le_cases by auto then show ?thesis proof (cases) case 1 have g1:"ru1 * rl2 \ 0" using r1 "1" bounds grl1 grl2 gru1 gru2 mult_less_0_iff not_less by metis have g2:"0 \ r1 * r2" using r1 "1" bounds grl1 grl2 gru1 gru2 by (simp add: mult_le_0_iff) from g1 and g2 have up:"ru1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.commute maxt34 by (metis max_repU1 repU_def timesul tu.simps) next case 2 have lower:"rl1 \ 0" using bounds dual_order.trans grl1 r1 by blast have g1:"rl1 * rl2 \ rl1 * r2" using r1 "2" bounds grl1 grl2 gru1 gru2 less_eq(1) less_le_trans not_less mult_le_cancel_left by metis have g2:"rl1 * r2 \ r1 * r2" using r1 "2" bounds grl1 grl2 gru1 gru2 mult.commute not_le lower mult_le_cancel_left by metis from g1 and g2 have up:"rl1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.commute maxt34 by (metis max_repU1 repU_def timesll tu.simps) qed next case NegNeg assume bounds:"ru1 \ 0 \ ru2 \ 0" have r1:"r1 \ 0" using bounds dual_order.trans gru1 by blast have r2:"r2 \ 0" using bounds dual_order.trans gru2 by blast have lower1:"rl1 \ 0" using bounds dual_order.trans grl1 r1 by blast have lower2:"rl2 \ 0" using bounds dual_order.trans grl2 r2 by blast have g1:"rl1 * rl2 \ rl1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 less_eq(1) mult_le_cancel_left less_le_trans not_less by metis have g2:"rl1 * r2 \ r1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 mult.commute not_le lower1 lower2 mult_le_cancel_left by metis from g1 and g2 have up:"rl1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max_repU2 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] max.commute maxt34 by (metis max_repU1 repU_def timesll tu.simps) next case NegPos assume bounds:"ru1 \ 0 \ 0 \ rl2" have r1:"r1 \ 0" using bounds dual_order.trans gru1 by blast have r2:"r2 \ 0" using bounds dual_order.trans grl2 by blast have lower1:"rl1 \ 0" using bounds dual_order.trans grl1 r1 by blast have lower2:"rl2 \ 0" using bounds by auto have upper1:"ru1 \ 0" using bounds by auto have upper2:"ru2 \ 0" using bounds dual_order.trans gru2 r2 by blast have g1:"ru1 * rl2 \ ru1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 not_less upper1 lower2 mult_le_cancel_left by metis have g2:"ru1 * r2 \ r1 * r2" using r1 upper1 r2 mult_right_mono gru1 by metis from g1 and g2 have up:"ru1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims maxt34 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] by (metis max_repU1 repU_def timesul tu.simps) next case PosNeg assume bounds:"0 \ rl1 \ ru2 \ 0" have r1:"r1 \ 0" using bounds dual_order.trans grl1 by blast have r2:"r2 \ 0" using bounds dual_order.trans gru2 by blast have lower1:"rl1 \ 0" using bounds by auto have lower2:"rl2 \ 0" using dual_order.trans grl2 r2 by blast have upper1:"ru1 \ 0" using dual_order.trans gru1 u1 r1 by blast have upper2:"ru2 \ 0" using bounds by auto have g1:"rl1 * ru2 \ rl1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 not_less upper2 lower1 mult_le_cancel_left by metis have g2:"rl1 * r2 \ r1 * r2" using r1 lower1 r2 not_less gru2 gru1 grl1 grl2 by (metis mult_le_cancel_left mult.commute) from g1 and g2 have up:"rl1 * ru2 \ r1 * r2" by auto show "tu l\<^sub>1 u\<^sub>1 l\<^sub>2 u\<^sub>2 \\<^sub>U r1 * r2" using up maxU12 maxU34 bigMaxU wmax.elims max.coboundedI1 max.commute maxt34 max_repU2[OF maxU12] max_repU2[OF maxU34] max_repU2[OF bigMaxU] by (metis repU_def tu.simps) next case PosPos assume bounds:"0 \ rl1 \ 0 \ rl2" have r1:"r1 \ 0" using bounds dual_order.trans grl1 by blast have r2:"r2 \ 0" using bounds dual_order.trans grl2 by blast have lower1:"rl1 \ 0" using bounds by auto have lower2:"rl2 \ 0" using bounds by auto have upper1:"ru1 \ 0" using dual_order.trans gru1 u1 r1 by blast have upper2:"ru2 \ 0" using dual_order.trans gru2 u2 r2 bounds by blast have g1:"ru1 * ru2 \ ru1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 mult_left_mono leD leI less_le_trans by metis have g2:"ru1 * r2 \ r1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 mult_right_mono by metis from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims max.coboundedI1 max.commute maxt34 max_repU2[OF bigMaxU] max_repU2[OF maxU12] max_repU2[OF maxU34] by (metis repU_def tu.simps) qed qed subsection \Minimum function\ text\Minimum of 2s-complement words\ fun wmin :: "word \ word \ word" where "wmin w1 w2 = (if w1 Correctness of wmin\ lemma wmin_lemma: assumes eq1:"w1 \\<^sub>E (r1::real)" assumes eq2:"w2 \\<^sub>E (r2::real)" shows "wmin w1 w2 \\<^sub>E (min r1 r2)" proof(cases rule: case_inf2[where ?w1.0=w1, where ?w2.0=w2]) case PosPos assume p1:"w1 = POS_INF" and p2:"w2 = POS_INF" then have bound1:"(real_of_int (sint POS_INF)) \ r1" and bound2:"(real_of_int (sint POS_INF)) \ r2" using eq1 eq2 by (auto simp add: rep_simps repe.simps) have eqInf:"wmin w1 w2 = POS_INF" using p1 p2 unfolding wmin.simps by auto have pos_eq:"POS_INF \\<^sub>E min r1 r2" apply(rule repPOS_INF) using bound1 bound2 unfolding eq1 eq2 by auto show ?thesis using pos_eq eqInf by auto next case PosNeg assume p1:"w1 = POS_INF" assume n2:"w2 = NEG_INF" obtain r ra :: real where bound1:" (real_of_int (sint POS_INF)) \ r" and bound2:"ra \ (real_of_int (sint NEG_INF))" and eq1:"r1 = r" and eq2:"r2 = ra" using p1 n2 eq1 eq2 by(auto simp add: rep_simps repe.simps) have eqNeg:"wmin w1 w2 = NEG_INF" unfolding eq1 eq2 wmin.simps p1 n2 word_sless_def word_sle_def by(auto) have neg_eq:"NEG_INF \\<^sub>E min r1 r2" apply(rule repNEG_INF) using bound1 bound2 eq1 eq2 by auto show "?thesis" using eqNeg neg_eq by auto next case PosNum assume p1:"w1 = POS_INF" assume np2:"w2 \ POS_INF" assume nn2:"w2 \ NEG_INF" have eq2:"r2 = (real_of_int (sint w2))" and bound1:"(real_of_int (sint POS_INF)) \ r1" and bound2a:"(real_of_int (sint NEG_INF)) < (real_of_int (sint w2))" and bound2b:"(real_of_int (sint w2)) < (real_of_int (sint POS_INF))" using p1 np2 nn2 eq1 eq2 by(auto simp add: rep_simps repe.simps) have eqNeg:"min r1 r2 = sint w2" using p1 by (metis bound1 bound2b dual_order.trans eq2 min_def not_less) have neg_eq:"wmin w1 w2 \\<^sub>E (real_of_int (sint (wmin w1 w2)))" apply (rule repINT) using bound1 bound2a bound2b bound2b p1 unfolding eq1 eq2 by (auto simp add: word_sless_alt) show "?thesis" using eqNeg neg_eq by (metis bound2b less_eq_real_def not_less of_int_less_iff p1 wmin.simps word_sless_alt) next case NegPos assume n1:"w1 = NEG_INF" assume p2:"w2 = POS_INF" have bound1:"r1 \ (real_of_int (sint NEG_INF))" and bound2:"(real_of_int (sint POS_INF)) \ r2" using n1 p2 eq1 eq2 by(auto simp add: rep_simps repe.simps) have eqNeg:"wmin w1 w2 = NEG_INF" unfolding eq1 eq2 wmin.simps n1 p2 word_sless_def word_sle_def by(auto) have neg_eq:"NEG_INF \\<^sub>E min r1 r2" apply(rule repNEG_INF) using bound1 bound2 unfolding eq1 eq2 by auto show "wmin w1 w2 \\<^sub>E min r1 r2" using eqNeg neg_eq by auto next case NegNeg assume n1:"w1 = NEG_INF" assume n2:"w2 = NEG_INF" have bound1:"r1 \ (real_of_int (sint NEG_INF))" and bound2:"r2 \ (real_of_int (sint NEG_INF))" using n1 n2 eq1 eq2 by(auto simp add: rep_simps repe.simps) have eqNeg:"NEG_INF \\<^sub>E min r1 r2" apply(rule repNEG_INF) using eq1 eq2 bound1 bound2 unfolding NEG_INF_def by (auto) have neg_eq:"wmin w1 w2 = NEG_INF" using n1 n2 unfolding NEG_INF_def wmin.simps by auto show "wmin w1 w2 \\<^sub>E min r1 r2" using eqNeg neg_eq by auto next case NegNum assume n1:"w1 = NEG_INF" and nn2:"w2 \ NEG_INF" and np2:"w2 \ POS_INF" have eq2:"r2 = (real_of_int (sint w2))" and bound2a:"(real_of_int (sint w2)) < (real_of_int (sint POS_INF))" and bound2b:"(real_of_int (sint NEG_INF)) < (real_of_int (sint w2))" and bound1:"r1 \ (real_of_int (sint NEG_INF))" using n1 nn2 np2 eq2 eq1 eq2 by (auto simp add: rep_simps repe.simps) have eqNeg:"wmin w1 w2 = NEG_INF" using n1 assms(2) bound2a eq2 n1 repeInt_simps by (auto simp add: word_sless_alt) have neg_eq:"NEG_INF \\<^sub>E min r1 r2" apply(rule repNEG_INF) using bound1 bound2a bound2b eq1 min_le_iff_disj by blast show "wmin w1 w2 \\<^sub>E min r1 r2" using eqNeg neg_eq by auto next case NumPos assume p2:"w2 = POS_INF" and nn1:"w1 \ NEG_INF" and np1:"w1 \ POS_INF" have eq1:"r1 = (real_of_int (sint w1))" and bound1a:" (real_of_int (sint w1)) < (real_of_int (sint POS_INF))" and bound1b:" (real_of_int (sint NEG_INF)) < (real_of_int (sint w1))" and bound2:" (real_of_int (sint POS_INF)) \ r2" using nn1 np1 p2 eq2 eq1 eq2 by (auto simp add: rep_simps repe.simps) have res1:"wmin w1 w2 = w1" using p2 eq1 eq2 assms(1) bound1b p2 repeInt_simps by (auto simp add: word_sless_alt) have res2:"min r1 r2 = (real_of_int (sint w1))" using eq1 eq2 bound1a bound1b bound2 by transfer (auto simp add: less_imp_le less_le_trans min_def) have res3:"wmin w1 w2 \\<^sub>E (real_of_int (sint (wmin w1 w2)))" apply(rule repINT) using p2 bound1a res1 bound1a bound1b bound2 by auto show "wmin w1 w2 \\<^sub>E min r1 r2" using res1 res2 res3 by auto next case NumNeg assume nn1:"w1 \ NEG_INF" assume np1:"w1 \ POS_INF" assume n2:"w2 = NEG_INF" have eq1:"r1 = (real_of_int (sint w1))" and bound1a:" (real_of_int (sint w1)) < (real_of_int (sint POS_INF))" and bound1b:" (real_of_int (sint NEG_INF)) < (real_of_int (sint w1))" and bound2:"r2 \ (real_of_int (sint NEG_INF))" using nn1 np1 n2 eq2 eq1 eq2 by (auto simp add: rep_simps repe.simps) have res1:"wmin w1 w2 = NEG_INF" using n2 bound1b by (metis min.absorb_iff2 min_def n2 not_less of_int_less_iff wmin.simps word_sless_alt) have res2:"NEG_INF \\<^sub>E min r1 r2" apply(rule repNEG_INF) using eq1 eq2 bound1a bound1b bound2 min_le_iff_disj by blast show "wmin w1 w2 \\<^sub>E min r1 r2" using res1 res2 by auto next case NumNum assume np1:"w1 \ POS_INF" assume nn1:"w1 \ NEG_INF" assume np2:"w2 \ POS_INF" assume nn2:"w2 \ NEG_INF" have eq1:"r1 = (real_of_int (sint w1))" and eq2:"r2 = (real_of_int (sint w2))" and bound1a:" (real_of_int (sint w1)) < (real_of_int (sint POS_INF))" and bound1b:" (real_of_int (sint NEG_INF)) < (real_of_int (sint w1))" and bound2a:" (real_of_int (sint w2)) < (real_of_int (sint POS_INF))" and bound2b:" (real_of_int (sint NEG_INF)) < (real_of_int (sint w2))" using nn1 np1 nn2 np2 eq2 eq1 eq2 by (auto simp add: rep_simps repe.simps) have res1:"min r1 r2 = (real_of_int (sint (wmin w1 w2)))" using eq1 eq2 bound1a bound1b bound2a bound2b apply (simp add: min_def word_sless_alt not_less) apply transfer apply simp done have res2:"wmin w1 w2 \\<^sub>E (real_of_int (sint (wmin w1 w2)))" apply (rule repINT) using bound1a bound1b bound2a bound2b by (simp add: \min r1 r2 = (real_of_int (sint (wmin w1 w2)))\ eq2 min_less_iff_disj)+ show "wmin w1 w2 \\<^sub>E min r1 r2" using res1 res2 by auto qed lemma min_repU1: assumes "w1 \\<^sub>L x" assumes "w2 \\<^sub>L y" shows "wmin w1 w2 \\<^sub>L x " using wmin_lemma assms repL_def by (meson min_le_iff_disj) lemma min_repU2: assumes "w1 \\<^sub>L y" assumes "w2 \\<^sub>L x" shows "wmin w1 w2 \\<^sub>L x" using wmin_lemma assms repL_def by (meson min_le_iff_disj) subsection \Multiplication lower bound\ text\Multiplication lower bound\ fun tl :: "word \ word \ word \ word \ word" where "tl w1l w1u w2l w2u = wmin (wmin (wtimes w1l w2l) (wtimes w1u w2l)) (wmin (wtimes w1l w2u) (wtimes w1u w2u))" text\Correctness of multiplication lower bound\ lemma tl_lemma: assumes u1:"u\<^sub>1 \\<^sub>U (r1::real)" assumes u2:"u\<^sub>2 \\<^sub>U (r2::real)" assumes l1:"l\<^sub>1 \\<^sub>L (r1::real)" assumes l2:"l\<^sub>2 \\<^sub>L (r2::real)" shows "tl l\<^sub>1 u\<^sub>1 l\<^sub>2 u\<^sub>2 \\<^sub>L (r1 * r2)" proof - obtain rl1 rl2 ru1 ru2 :: real where gru1:"ru1 \ r1" and gru2:"ru2 \ r2" and grl1:"rl1 \ r1" and grl2:"rl2 \ r2" and eru1:"u\<^sub>1 \\<^sub>E ru1" and eru2:"u\<^sub>2 \\<^sub>E ru2" and erl1:"l\<^sub>1 \\<^sub>E rl1" and erl2:"l\<^sub>2 \\<^sub>E rl2" using u1 u2 l1 l2 unfolding repU_def repL_def by auto have timesuu:"wtimes u\<^sub>1 u\<^sub>2 \\<^sub>E ru1 * ru2" using wtimes_exact[OF eru1 eru2] by auto have timesul:"wtimes u\<^sub>1 l\<^sub>2 \\<^sub>E ru1 * rl2" using wtimes_exact[OF eru1 erl2] by auto have timeslu:"wtimes l\<^sub>1 u\<^sub>2 \\<^sub>E rl1 * ru2" using wtimes_exact[OF erl1 eru2] by auto have timesll:"wtimes l\<^sub>1 l\<^sub>2 \\<^sub>E rl1 * rl2" using wtimes_exact[OF erl1 erl2] by auto have maxt12:"wmin (wtimes l\<^sub>1 l\<^sub>2) (wtimes u\<^sub>1 l\<^sub>2) \\<^sub>E min (rl1 * rl2) (ru1 * rl2)" by (rule wmin_lemma[OF timesll timesul]) have maxt34:"wmin (wtimes l\<^sub>1 u\<^sub>2) (wtimes u\<^sub>1 u\<^sub>2) \\<^sub>E min (rl1 * ru2) (ru1 * ru2)" by (rule wmin_lemma[OF timeslu timesuu]) have bigMax:"wmin (wmin (wtimes l\<^sub>1 l\<^sub>2) (wtimes u\<^sub>1 l\<^sub>2)) (wmin (wtimes l\<^sub>1 u\<^sub>2) (wtimes u\<^sub>1 u\<^sub>2)) \\<^sub>E min (min(rl1 * rl2) (ru1 * rl2)) (min (rl1 * ru2) (ru1 * ru2))" by (rule wmin_lemma[OF maxt12 maxt34]) obtain maxt12val :: real where maxU12:"wmin (wtimes l\<^sub>1 l\<^sub>2) (wtimes u\<^sub>1 l\<^sub>2) \\<^sub>L min (rl1 * rl2) (ru1 * rl2)" using maxt12 unfolding repL_def by blast obtain maxt34val :: real where maxU34:"wmin (wtimes l\<^sub>1 u\<^sub>2) (wtimes u\<^sub>1 u\<^sub>2) \\<^sub>L min (rl1 * ru2) (ru1 * ru2)" using maxt34 unfolding repL_def by blast obtain bigMaxU:"wmin (wmin (wtimes l\<^sub>1 l\<^sub>2) (wtimes u\<^sub>1 l\<^sub>2)) (wmin (wtimes l\<^sub>1 u\<^sub>2) (wtimes u\<^sub>1 u\<^sub>2)) \\<^sub>L min (min (rl1 * rl2) (ru1 * rl2)) (min (rl1 * ru2) (ru1 * ru2))" using bigMax unfolding repL_def by blast have ivl1:"rl1 \ ru1" using grl1 gru1 by auto have ivl2:"rl2 \ ru2" using grl2 gru2 by auto let ?thesis = "tl l\<^sub>1 u\<^sub>1 l\<^sub>2 u\<^sub>2 \\<^sub>L r1 * r2" show ?thesis using ivl1 ivl2 proof(cases rule: case_ivl_zero) case ZeroZero assume "rl1 \ 0 \ 0 \ ru1 \ rl2 \ 0 \ 0 \ ru2" then have geq1:"ru1 \ 0" and geq2:"ru2 \ 0" and geq3:"rl1 \ 0" and geq4:"rl2 \ 0" by auto consider "r1 \ 0 \ r2 \ 0" | "r1 \ 0 \ r2 \ 0" | "r1 \ 0 \ r2 \ 0" | "r1 \ 0 \ r2 \ 0" using le_cases by auto then show ?thesis proof (cases) case 1 have g1:"rl1 * ru2 \ 0" using "1" geq1 geq2 geq3 geq4 grl2 gru2 mult_le_0_iff by blast have g2:"0 \ r1 * r2" using "1" geq1 geq2 grl1 grl2 gru1 gru2 by (simp) from g1 and g2 have up:"rl1 * ru2 \ r1 * r2" by auto show ?thesis using up eru1 eru2 erl1 erl2 min_repU1 min_repU2 repL_def repU_def timeslu tl.simps wmin.elims by (metis bigMax min_le_iff_disj) next case 2 have g1:"rl1 * ru2 \ rl1 * r2" using "2" geq1 geq2 grl2 gru2 by (metis mult_le_cancel_left geq3 leD) have g2:"rl1 * r2 \ r1 * r2" using "2" geq1 geq2 grl2 gru2 by (simp add: mult_right_mono grl1) from g1 and g2 have up:"rl1 * ru2 \ r1 * r2" by auto show ?thesis by (metis up maxU12 min_repU2 repL_def tl.simps min.coboundedI1 maxt34) next case 3 have g1:"ru1 * rl2 \ ru1 * r2" using "3" geq1 geq2 grl2 gru2 by (simp add: mult_left_mono) have g2:"ru1 * r2 \ r1 * r2" using "3" geq1 geq2 grl1 grl2 gru1 gru2 mult_minus_right mult_right_mono by (simp add: mult_right_mono_neg) from g1 and g2 have up:"ru1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmin.elims min_repU2 min_repU1 maxt34 timesul by (metis repL_def tl.simps) next case 4 have g1:"ru1 * rl2 \ 0" using "4" geq1 geq2 grl1 grl2 gru1 gru2 \rl1 \ 0 \ 0 \ ru1 \ rl2 \ 0 \ 0 \ ru2\ mult_less_0_iff less_eq_real_def not_less by auto have g2:"0 \ r1 * r2" using "4" geq1 geq2 grl1 grl2 gru1 gru2 by (metis mult_less_0_iff not_less) from g1 and g2 have up:"ru1 * rl2 \ r1 * r2" by auto show ?thesis by (metis up maxU12 maxU34 wmin.elims min_repU1 min_repU2 repL_def timesul tl.simps) qed next case ZeroPos assume bounds:"rl1 \ 0 \ 0 \ ru1 \ 0 \ rl2" have r2:"r2 \ 0" using bounds dual_order.trans grl2 by blast consider "r1 \ 0" | "r1 \ 0" using le_cases by auto then show ?thesis proof (cases) case 1 have g1:"rl1 * rl2 \ 0" using "1" r2 bounds grl1 grl2 gru1 gru2 by (simp add: mult_le_0_iff) have g2:"0 \ r1 * r2" using "1" r2 bounds grl1 grl2 gru1 gru2 by (simp) from g1 and g2 have up:"rl1 * rl2 \ r1 * r2" by auto show ?thesis by (metis repL_def timesll tl.simps up maxU12 maxU34 wmin.elims min_repU2 min_repU1) next case 2 have bound:"ru2 \ 0" using "2" r2 bounds grl1 grl2 gru1 gru2 dual_order.trans by auto then have g1:"rl1 * ru2 \ rl1 * r2" using "2" r2 bounds grl1 grl2 gru1 gru2 mult_le_cancel_left by fastforce have g2:"rl1 * r2 \ r1 * r2" using "2" r2 bounds grl1 grl2 gru1 gru2 mult_le_0_iff mult_le_cancel_right by fastforce from g1 and g2 have up:"rl1 * ru2 \ r1 * r2" by auto show ?thesis by (metis up maxU12 wmin.elims min_repU2 min.coboundedI1 maxt34 repL_def tl.simps) qed next case ZeroNeg assume bounds:"rl1 \ 0 \ 0 \ ru1 \ ru2 \ 0" have r2:"r2 \ 0" using bounds dual_order.trans gru2 by blast consider (Pos) "r1 \ 0" | (Neg) "r1 \ 0" using le_cases by auto then show ?thesis proof (cases) case Pos have bound:"rl2 \ 0" using Pos r2 bounds grl1 grl2 gru1 gru2 dual_order.trans by auto then have g1:"ru1 * rl2 \ ru1 * r2" using Pos bounds grl1 grl2 gru1 gru2 mult_le_cancel_left by fastforce have p1:"\a::real. (0 \ - a) = (a \ 0)" by(auto) have p2:"\a b::real. (- a \ - b) = (b \ a)" by auto have g2:"ru1 * r2 \ r1 * r2" using Pos r2 bounds grl1 grl2 gru1 gru2 p1 p2 by (simp add: mult_right_mono_neg) from g1 and g2 have up:"ru1 * rl2 \ r1 * r2" by auto show ?thesis by (metis up maxU12 maxU34 wmin.elims min_repU2 min_repU1 repL_def timesul tl.simps) next case Neg have g1:"ru1 * ru2 \ 0" using Neg r2 bounds grl1 grl2 gru1 gru2 mult_le_0_iff by blast have g2:"0 \ r1 * r2" using Neg r2 zero_le_mult_iff by blast from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmin.elims min_repU2 min_repU1 min.coboundedI1 min.commute maxt34 by (metis repL_def tl.simps) qed next case PosZero assume bounds:"0 \ rl1 \ rl2 \ 0 \ 0 \ ru2" have r1:"r1 \ 0" using bounds dual_order.trans grl1 by blast have bound:"0 \ ru1" using r1 bounds grl1 grl2 gru1 gru2 dual_order.trans by auto consider "r2 \ 0" | "r2 \ 0" using le_cases by auto then show ?thesis proof (cases) case 1 have g1:"rl1 * rl2 \ 0" using r1 "1" bounds grl1 grl2 gru1 gru2 mult_le_0_iff by blast have g2:"0 \ r1 * r2" using r1 "1" bounds grl1 grl2 gru1 gru2 zero_le_mult_iff by blast from g1 and g2 have up:"rl1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmax.elims min_repU2 min_repU1 min.coboundedI1 min.commute maxt12 maxt34 repL_def timesll tl.simps by metis next case 2 have g1:"ru1 * rl2 \ ru1 * r2" using r1 "2" bounds bound grl1 grl2 gru1 gru2 using mult_left_mono by blast have g2:"ru1 * r2 \ r1 * r2" using r1 "2" bounds bound grl2 gru2 by (metis mult_left_mono_neg gru1 mult.commute) from g1 and g2 have up:"ru1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmin.elims min_repU2 min_repU1 maxt34 by (metis repL_def timesul tl.simps) qed next case NegZero assume bounds:"ru1 \ 0 \ rl2 \ 0 \ 0 \ ru2" have r1:"r1 \ 0" using bounds dual_order.trans gru1 by blast have bound:"rl1 \ 0" using r1 bounds grl1 grl2 gru1 gru2 dual_order.trans by auto consider "r2 \ 0" | "r2 \ 0" using le_cases by auto then show ?thesis proof (cases) case 1 assume r2:"r2 \ 0" have g1:"rl1 * ru2 \ rl1 * r2" using r1 r2 bounds bound grl1 grl2 gru1 gru2 by (metis mult_le_cancel_left leD) have g2:"rl1 * r2 \ r1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 mult_right_mono by (simp add: mult_le_0_iff) from g1 and g2 have up:"rl1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU min_repU2 min_repU1 min.coboundedI1 maxt34 by (metis min_repU2 repL_def tl.simps) next case 2 assume r2:"r2 \ 0" have lower:"rl1 \ 0" using bounds dual_order.trans grl1 r1 by blast have g1:"ru1 * ru2 \ 0" using r1 r2 bounds grl1 grl2 gru1 gru2 mult_le_0_iff by blast have g2:"0 \ r1 * r2" using r1 r2 by (simp add: zero_le_mult_iff) from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmin.elims min_repU2 min_repU1 min.coboundedI1 min.commute maxt34 by (metis repL_def tl.simps) qed next case NegNeg assume bounds:"ru1 \ 0 \ ru2 \ 0" have r1:"r1 \ 0" using bounds dual_order.trans gru1 by blast have r2:"r2 \ 0" using bounds dual_order.trans gru2 by blast have lower1:"rl1 \ 0" using bounds dual_order.trans grl1 r1 by blast have lower2:"rl2 \ 0" using bounds dual_order.trans grl2 r2 by blast have g1:"ru1 * ru2 \ ru1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 using not_less mult_le_cancel_left by metis have g2:"ru1 * r2 \ r1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 mult_le_cancel_left mult.commute not_le lower1 lower2 by metis from g1 and g2 have up:"ru1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmin.elims min_repU2 min_repU1 min.coboundedI1 min.commute maxt34 by (metis repL_def tl.simps) next case NegPos assume bounds:"ru1 \ 0 \ 0 \ rl2" have r1:"r1 \ 0" using bounds dual_order.trans gru1 by blast have r2:"r2 \ 0" using bounds dual_order.trans grl2 by blast have lower1:"rl1 \ 0" using bounds dual_order.trans grl1 r1 by blast have lower2:"rl2 \ 0" using bounds by auto have upper1:"ru1 \ 0" using bounds by auto have upper2:"ru2 \ 0" using bounds dual_order.trans gru2 r2 by blast have g1:"rl1 * ru2 \ rl1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 less_le_trans upper1 lower2 by (metis mult_le_cancel_left not_less) have g2:"rl1 * r2 \ r1 * r2" using r1 upper1 r2 mult_right_mono mult_le_0_iff grl1 by blast from g1 and g2 have up:"rl1 * ru2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU wmin.elims min_repU2 min_repU1 maxt12 maxt34 by (metis repL_def timeslu tl.simps) next case PosNeg assume bounds:"0 \ rl1 \ ru2 \ 0" have r1:"r1 \ 0" using bounds dual_order.trans grl1 by blast have r2:"r2 \ 0" using bounds dual_order.trans gru2 by blast have lower1:"rl1 \ 0" using bounds by auto have lower2:"rl2 \ 0" using dual_order.trans grl2 r2 by blast have upper1:"ru1 \ 0" using dual_order.trans gru1 u1 r1 by blast have upper2:"ru2 \ 0" using bounds by auto have g1:"ru1 * rl2 \ ru1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 mult_left_mono less_le_trans not_less by metis have g2:"ru1 * r2 \ r1 * r2" using r1 lower1 r2 not_less gru2 gru1 grl1 grl2 by (metis mult_le_cancel_left mult.commute) from g1 and g2 have up:"ru1 * rl2 \ r1 * r2" by auto show "tl l\<^sub>1 u\<^sub>1 l\<^sub>2 u\<^sub>2 \\<^sub>L r1 * r2" using up maxU12 maxU34 bigMaxU wmin.elims min_repU2 min_repU1 by (metis repL_def timesul tl.simps) next case PosPos assume bounds:"0 \ rl1 \ 0 \ rl2" have r1:"r1 \ 0" using bounds dual_order.trans grl1 by blast have r2:"r2 \ 0" using bounds dual_order.trans grl2 by blast have lower1:"rl1 \ 0" using bounds by auto have lower2:"rl2 \ 0" using bounds by auto have upper1:"ru1 \ 0" using dual_order.trans gru1 u1 r1 by blast have upper2:"ru2 \ 0" using dual_order.trans gru2 u2 r2 bounds by blast have g1:"rl1 * rl2 \ rl1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 using mult_left_mono using leD leI less_le_trans by auto have g2:"rl1 * r2 \ r1 * r2" using r1 r2 bounds grl1 grl2 gru1 gru2 using mult_right_mono by blast from g1 and g2 have up:"rl1 * rl2 \ r1 * r2" by auto show ?thesis using up maxU12 maxU34 bigMaxU min_repU2 min_repU1 min.coboundedI1 maxt12 maxt34 by (metis repL_def tl.simps) qed qed text\Most significant bit only changes under successor when all other bits are 1\ lemma msb_succ: fixes w :: "32 Word.word" assumes neq1:"uint w \ 0xFFFFFFFF" assumes neq2:"uint w \ 0x7FFFFFFF" shows "msb (w + 1) = msb w" proof - have "w \ 0xFFFFFFFF" using neq1 by auto then have neqneg1:"w \ -1" by auto have "w \ 0x7FFFFFFF" using neq2 by auto then have neqneg2:"w \ (2^31)-1" by auto show ?thesis using neq1 neq2 unfolding msb_big using Word_Lemmas.word_le_make_less[of "w + 1" "0x80000000"] Word_Lemmas.word_le_make_less[of "w " "0x80000000"] neqneg1 neqneg2 by auto qed text\Negation commutes with msb except at edge cases\ lemma msb_non_min: fixes w :: "32 Word.word" assumes neq1:"uint w \ 0" assumes neq2:"uint w \ ((2^(len_of (TYPE(31)))))" shows "msb (uminus w) = HOL.Not(msb(w))" proof - have fact1:"uminus w = word_succ (~~ w)" by (rule twos_complement) have fact2:"msb (~~w) = HOL.Not(msb w)" using word_ops_msb[of w] by auto have neqneg1:"w \ 0" using neq1 by auto have not_undef:"w \ 0x80000000" using neq2 by auto then have neqneg2:"w \ (2^31)" by auto from \w \ 0\ have \~~ w \ ~~ 0\ by (simp only: bit.compl_eq_compl_iff) simp then have "(~~ w) \ 0xFFFFFFFF" by auto then have uintNeq1:"uint (~~ w) \ 0xFFFFFFFF" using uint_distinct[of "~~w" "0xFFFFFFFF"] by auto from \w \ 2 ^ 31\ have \~~ w \ ~~ 2 ^ 31\ by (simp only: bit.compl_eq_compl_iff) simp then have "(~~ w) \ 0x7FFFFFFF" by auto then have uintNeq2:" uint (~~ w) \ 0x7FFFFFFF" using uint_distinct[of "~~w" "0x7FFFFFFF"] by auto have fact3:"msb ((~~w) + 1) = msb (~~w)" apply(rule msb_succ[of "~~w"]) using neq1 neq2 uintNeq1 uintNeq2 by auto show "msb (uminus w) = HOL.Not(msb(w))" using fact1 fact2 fact3 by (simp add: word_succ_p1) qed text\Only 0x80000000 preserves msb=1 under negation\ lemma msb_min_neg: fixes w::"word" assumes msb1:"msb (- w)" assumes msb2:"msb w" shows "uint w = ((2^(len_of (TYPE(31)))))" proof (rule ccontr) from \msb w\ have \w \ 0\ using word_msb_0 by auto then have \uint w \ 0\ by transfer simp moreover assume \uint w \ 2 ^ LENGTH(31)\ ultimately have \msb (- w) \ \ msb w\ by (rule msb_non_min) with assms show False by simp qed text\Only 0x00000000 preserves msb=0 under negation\ lemma msb_zero: fixes w::"word" assumes msb1:"\ msb (- w)" assumes msb2:"\ msb w" shows "uint w = 0" proof - have neq:"w \ ((2 ^ len_of TYPE(31))::word)" using msb1 msb2 by auto have eq:"uint ((2 ^ len_of TYPE(31))::word) = 2 ^ len_of TYPE(31)" by auto then have neq:"uint w \ uint ((2 ^ len_of TYPE(31))::word)" using uint_distinct[of w "2^len_of TYPE(31)"] neq eq by auto show ?thesis using msb1 msb2 minus_zero msb_non_min[of w] neq by force qed text\Finite numbers alternate msb under negation\ lemma msb_pos: fixes w::"word" assumes msb1:"msb (- w)" assumes msb2:"\ msb w" shows "uint w \ {1 .. (2^((len_of TYPE(32)) - 1))-1}" proof - have main: "w \ {1 .. (2^((len_of TYPE(32)) - 1))-1}" using msb1 msb2 apply(clarsimp) unfolding word_msb_sint apply(rule conjI) apply (metis neg_equal_0_iff_equal not_le word_less_1) proof - have imp:"w \ 0x80000000 \ False" proof - assume geq:"w \ 0x80000000" then have "msb w" using Word_Lemmas.msb_big[of w] by auto then show False using msb2 by auto qed have mylem:"\w1 w2::word. uint w1 \ uint w2 \ w1 \ w2" subgoal for w1 w2 by (simp add: word_le_def) done have mylem2:"\w1 w2::word. w1 > w2 \ uint w1 > uint w2" subgoal for w1 w2 by (simp add: word_less_def) done have gr_to_geq:"w > 0x7FFFFFFF \ w \ 0x80000000" apply(rule mylem) using mylem2[of "0x7FFFFFFF" "w"] by auto have taut:"w \ 0x7FFFFFFF \ w > 0x7FFFFFFF" by auto then show "w \ 0x7FFFFFFF" using imp taut gr_to_geq by auto qed have set_eq:"(uint ` (({1..(minus(2 ^ (minus(len_of TYPE(32)) 1)) 1)})::word set)) = ({1..minus(2 ^ (minus (len_of TYPE(32)) 1)) 1}::int set)" apply(auto simp add: word_le_def) subgoal for xa proof - assume lower:"1 \ xa" and upper:"xa \ 2147483647" then have in_range:"xa \ {0 .. 2^32-1}" by auto then have "xa \ range (uint::word \ int)" unfolding word_uint.Rep_range uints_num by auto then obtain w::word where xaw:"xa = uint w" by auto then have "w \ {1..0x7FFFFFFF} " using lower upper apply(clarsimp, auto) by (auto simp add: word_le_def) then show ?thesis using uint_distinct uint_distinct main image_eqI word_le_def xaw by blast qed done then show "uint w \ {1..2 ^ (len_of TYPE(32) - 1) - 1}" using uint_distinct uint_distinct main image_eqI by blast qed lemma msb_neg: fixes w::"word" assumes msb1:"\ msb (- w)" assumes msb2:"msb w" shows "uint w \ {2^((len_of TYPE(32) - 1))+1 .. 2^((len_of TYPE(32)))-1}" proof - have mylem:"\w1 w2::word. uint w1 \ uint w2 \ w1 \ w2" by (simp add: word_le_def) have mylem2:"\w1 w2::word. w1 > w2 \ uint w1 > uint w2" by (simp add: word_less_def) have gr_to_geq:"w > 0x80000000 \ w \ 0x80000001" apply(rule mylem) using mylem2[of "0x80000000" "w"] by auto have taut:"w \ 0x80000000 \ 0x80000000 < w" by auto have imp:"w \ 0x80000000 \ False" proof - assume geq:"w \ 0x80000000" then have "(msb (-w))" using Word_Lemmas.msb_big[of "-w"] Word_Lemmas.msb_big[of "w"] by (simp add: msb2) then show False using msb1 by auto qed have main: "w \ {2^((len_of TYPE(32)) - 1)+1 .. 2^((len_of TYPE(32)))-1}" using msb1 msb2 apply(clarsimp) unfolding word_msb_sint proof - show "0x80000001 \ w" using imp taut gr_to_geq by auto qed have set_eq:"(uint ` (({2^((len_of TYPE(32) - 1))+1 .. 2^((len_of TYPE(32)))-1})::word set)) = {2^((len_of TYPE(32) - 1))+1 .. 2^((len_of TYPE(32)))-1}" apply(auto) subgoal for xa by (simp add: word_le_def) subgoal for w using uint_lt [of w] by simp subgoal for xa proof - assume lower:"2147483649 \ xa" and upper:"xa \ 4294967295" then have in_range:"xa \ {0x80000000 .. 0xFFFFFFFF}" by auto then have "xa \ range (uint::word \ int)" unfolding word_uint.Rep_range uints_num by auto then obtain w::word where xaw:"xa = uint w" by auto then have the_in:"w \ {0x80000001 .. 0xFFFFFFFF} " using lower upper by (auto simp add: word_le_def) have the_eq:"(0xFFFFFFFF::word) = -1" by auto from the_in the_eq have "w \ {0x80000001 .. -1}" by auto then show ?thesis using uint_distinct uint_distinct main image_eqI word_le_def xaw by blast qed done then show "uint w \ {2^((len_of TYPE(32)) - 1)+1 .. 2^((len_of TYPE(32)))-1}" using uint_distinct uint_distinct main image_eqI by blast qed text\2s-complement commutes with negation except edge cases\ lemma sint_neg_hom: fixes w :: "32 Word.word" shows "uint w \ ((2^(len_of (TYPE(31))))) \ (sint(-w) = -(sint w))" unfolding word_sint_msb_eq apply auto subgoal using msb_min_neg by auto prefer 3 subgoal using msb_zero[of w] by (simp add: msb_zero) proof - assume msb1:"msb (- w)" assume msb2:"\ msb w" have "uint w \ {1 .. (2^((len_of TYPE(32)) - 1))-1}" using msb_pos[OF msb1 msb2] by auto then have bound:"uint w \ {1 .. 0x7FFFFFFF}" by auto have size:"size (w::32 Word.word) = 32" using Word.word_size[of w] by auto have lem:"\x::int. \n::nat. x \ {1..(2^n)-1} \ ((- x) mod (2^n)) - (2^n) = - x" subgoal for x n apply(cases "x mod 2^n = 0") by(auto simp add: Divides.zmod_zminus1_eq_if[of x "2^n"]) done have lem_rule:"uint w \ {1..2 ^ 32 - 1} \ (- uint w mod 4294967296) - 4294967296 = - uint w" using lem[of "uint w" 32] by auto have almost:"- uint w mod 4294967296 - 4294967296 = - uint w" apply(rule lem_rule) using bound by auto show "uint (- w) - 2 ^ size (- w) = - uint w" using bound unfolding Word.uint_word_ariths word_size_neg by (auto simp add: size almost) next assume neq:"uint w \ 0x80000000" assume msb1:"\ msb (- w)" assume msb2:"msb w" have bound:"uint w \ {0x80000001.. 0xFFFFFFFF}" using msb1 msb2 msb_neg by auto have size:"size (w::32 Word.word) = 32" using Word.word_size[of w] by auto have lem:"\x::int. \n::nat. x \ {1..(2^n)-1} \ (-x mod (2^n)) = (2^n) - x" subgoal for x n apply(auto) apply(cases "x mod 2^n = 0") by (simp add: Divides.zmod_zminus1_eq_if[of x "2^n"])+ done from bound have wLeq: "uint w \ 4294967295" and wGeq: "2147483649 \ uint w" by auto from wLeq have wLeq':"uint w \ 4294967296" by fastforce have f3: "(0 \ 4294967296 + - 1 * uint w + - 1 * ((4294967296 + - 1 * uint w) mod 4294967296)) = (uint w + (4294967296 + - 1 * uint w) mod 4294967296 \ 4294967296)" by auto have f4: "(0 \ 4294967296 + - 1 * uint w) = (uint w \ 4294967296)" by auto have f5: "\i ia. \ (0::int) \ i \ 0 \ i + - 1 * (i mod ia)" by (simp add: zmod_le_nonneg_dividend) then have f6: "uint w + (4294967296 + - 1 * uint w) mod 4294967296 \ 4294967296" using f4 f3 wLeq' by blast have f7: "4294967296 + - 1 * uint w + - 4294967296 = - 1 * uint w" by auto have f8: "- (1::int) * 4294967296 = - 4294967296" by auto have f9: "(0 \ - 1 * uint w) = (uint w \ 0)" by auto have f10: "(4294967296 + -1 * uint w + -1 * ((4294967296 + -1 * uint w) mod 4294967296) \ 0) = (4294967296 \ uint w + (4294967296 + - 1 * uint w) mod 4294967296)" by auto have f11: "\ 4294967296 \ (0::int)" by auto have f12: "\x0. ((0::int) < x0) = (\ x0 \ 0)" by auto have f13: "\x0 x1. ((x1::int) < x0) = (\ 0 \ x1 + - 1 * x0)" by auto have f14: "\x0 x1. ((x1::int) \ x1 mod x0) = (x1 + - 1 * (x1 mod x0) \ 0)" by auto have "\ uint w \ 0" using wGeq by fastforce then have "4294967296 \ uint w + (4294967296 + - 1 * uint w) mod 4294967296" using f14 f13 f12 f11 f10 f9 f8 f7 by (metis (no_types) int_mod_ge) then show "uint (- w) = 2 ^ size w - uint w" using f6 unfolding Word.uint_word_ariths by (auto simp add: size f4) qed text\2s-complement encoding is injective\ lemma sint_dist: fixes x y ::word assumes "x \ y" shows "sint x \ sint y" by (simp add: assms) subsection\Negation\ fun wneg :: "word \ word" where "wneg w = (if w = NEG_INF then POS_INF else if w = POS_INF then NEG_INF else -w)" text\word negation is correct\ lemma wneg_lemma: assumes eq:"w \\<^sub>E (r::real)" shows "wneg w \\<^sub>E -r" apply(rule repe.cases[OF eq]) apply(auto intro!: repNEG_INF repPOS_INF simp add: repe.simps)[2] subgoal for ra proof - assume eq:"w = ra" assume i:"r = (real_of_int (sint ra))" assume bounda:" (real_of_int (sint ra)) < (real_of_int (sint POS_INF))" assume boundb:" (real_of_int (sint NEG_INF)) < (real_of_int (sint ra))" have raNeq:"ra \ 2147483647" using sint_range[OF bounda boundb] by (auto) have raNeqUndef:"ra \ 2147483648" using int_not_undef[OF bounda boundb] by (auto) have "uint ra \ uint ((2 ^ len_of TYPE(31))::word)" apply (rule uint_distinct) using raNeqUndef by auto then have raNeqUndefUint:"uint ra \ ((2 ^ len_of TYPE(31)))" by auto have res1:"wneg w \\<^sub>E (real_of_int (sint (wneg w)))" apply (rule repINT) using sint_range[OF bounda boundb] sint_neg_hom[of ra, OF raNeqUndefUint] raNeq raNeqUndefUint raNeqUndef eq by(auto) have res2:"- r = (real_of_int (sint (wneg w)))" using eq bounda boundb i sint_neg_hom[of ra, OF raNeqUndefUint] raNeq raNeqUndef eq apply auto apply transfer apply simp done show ?thesis using res1 res2 by auto qed done subsection\Comparison\ fun wgreater :: "word \ word \ bool" where "wgreater w1 w2 = (sint w1 > sint w2)" lemma neg_less_contra:"\x. Suc x < - (Suc x) \ False" by auto text\Comparison < is correct\ lemma wgreater_lemma:"w1 \\<^sub>L (r1::real) \ w2 \\<^sub>U r2 \ wgreater w1 w2 \ r1 > r2" proof (auto simp add: repU_def repL_def) fix r'\<^sub>1 r'\<^sub>2 assume sint_le:"sint w1 > sint w2" then have sless:"(w2 1 \ r1" assume r2_leq:"r2 \ r'\<^sub>2" assume wr1:"w1 \\<^sub>E r'\<^sub>1" assume wr2:"w2 \\<^sub>E r'\<^sub>2" have greater:"r'\<^sub>1 > r'\<^sub>2" using wr1 wr2 apply(auto simp add: repe.simps) prefer 4 using sless sint_le apply (auto simp add: less_le_trans not_le) apply transfer apply simp apply transfer apply simp apply transfer apply simp done show "r1 > r2" using r1_leq r2_leq greater by auto qed text\Comparison $\geq$ of words\ fun wgeq :: "word \ word \ bool" where "wgeq w1 w2 = ((\ ((w2 = NEG_INF \ w1 = NEG_INF) \(w2 = POS_INF \ w1 = POS_INF))) \ (sint w2 \ sint w1))" text\Comparison $\geq$ of words is correct\ lemma wgeq_lemma:"w1 \\<^sub>L r1 \ w2 \\<^sub>U (r2::real) \ wgeq w1 w2 \ r1 \ r2" proof (unfold wgeq.simps) assume assms:"\ (w2 = NEG_INF \ w1 = NEG_INF \ w2 = POS_INF \ w1 = POS_INF) \ sint w2 \ sint w1" assume a1:"w1 \\<^sub>L r1" and a2:"w2 \\<^sub>U (r2::real)" from assms have sint_le:"sint w2 \ sint w1" by auto then have sless:"(w2 <=s w1)" using word_sless_alt word_sle_def by auto obtain r'\<^sub>1 r'\<^sub>2 where r1_leq:"r'\<^sub>1 \ r1" and r2_leq:"r2 \ r'\<^sub>2" and wr1:"w1 \\<^sub>E r'\<^sub>1" and wr2:"w2 \\<^sub>E r'\<^sub>2" using a1 a2 unfolding repU_def repL_def by auto from assms have check1:"\ (w1 = NEG_INF \ w2 = NEG_INF)" by auto from assms have check2:"\ (w1 = POS_INF \ w2 = POS_INF)" by auto have less:"r'\<^sub>2 \ r'\<^sub>1" using sless sint_le check1 check2 repe.simps wr2 wr1 apply (auto simp add: repe.simps) apply transfer apply simp apply transfer apply simp apply transfer apply simp apply transfer apply simp apply transfer apply simp apply transfer apply simp apply transfer apply simp apply transfer apply simp done show "r1 \ r2" using r1_leq r2_leq less by auto qed subsection\Absolute value\ text\Absolute value of word\ fun wabs :: "word \ word" where "wabs l1 = (wmax l1 (wneg l1))" text\Correctness of wmax\ lemma wabs_lemma: assumes eq:"w \\<^sub>E (r::real)" shows "wabs w \\<^sub>E (abs r)" proof - have w:"wmax w (wneg w) \\<^sub>E max r (-r)" by (rule wmax_lemma[OF eq wneg_lemma[OF eq]]) have r:"max r (-r) = abs r" by auto from w r show ?thesis by auto qed declare more_real_of_word_simps [simp del] end diff --git a/thys/Irrational_Series_Erdos_Straus/Irrational_Series_Erdos_Straus.thy b/thys/Irrational_Series_Erdos_Straus/Irrational_Series_Erdos_Straus.thy --- a/thys/Irrational_Series_Erdos_Straus/Irrational_Series_Erdos_Straus.thy +++ b/thys/Irrational_Series_Erdos_Straus/Irrational_Series_Erdos_Straus.thy @@ -1,2017 +1,2017 @@ (* Title: Irrational_Series_Erdos_Straus.thy Author: Angeliki Koutsoukou-Argyraki and Wenda Li, University of Cambridge, UK. We formalise certain irrationality criteria for infinite series by P. Erdos and E.G. Straus. In particular, we formalise Theorem 2.1, Corollary 2.10 and Theorem 3.1 in [1]. The latter is an application of Theorem 2.1 involving the prime numbers. References: [1] P. Erdos and E.G. Straus, On the irrationality of certain series, Pacific Journal of Mathematics, Vol. 55, No 1, 1974 https://projecteuclid.org/euclid.pjm/1102911140 *) theory "Irrational_Series_Erdos_Straus" imports Prime_Number_Theorem.Prime_Number_Theorem Prime_Distribution_Elementary.PNT_Consequences begin section \Miscellaneous\ lemma suminf_comparison: assumes "summable f" and gf: "\n. norm (g n) \ f n" shows "suminf g \ suminf f" proof (rule suminf_le) show "g n \ f n" for n using gf[of n] by auto show "summable g" using assms summable_comparison_test' by blast show "summable f" using assms(1) . qed lemma tendsto_of_int_diff_0: assumes "(\n. f n - of_int(g n)) \ (0::real)" "\\<^sub>F n in sequentially. f n > 0" shows "\\<^sub>F n in sequentially. 0 \ g n" proof - have "\\<^sub>F n in sequentially. \f n - of_int(g n)\ < 1 / 2" using assms(1)[unfolded tendsto_iff,rule_format,of "1/2"] by auto then show ?thesis using assms(2) apply eventually_elim by linarith qed lemma eventually_mono_sequentially: assumes "eventually P sequentially" assumes "\x. P (x+k) \ Q (x+k)" shows "eventually Q sequentially" using sequentially_offset[OF assms(1),of k] apply (subst eventually_sequentially_seg[symmetric,of _ k]) apply (elim eventually_mono) by fact lemma frequently_eventually_at_top: fixes P Q::"'a::linorder \ bool" assumes "frequently P at_top" "eventually Q at_top" shows "frequently (\x. P x \ (\y\x. Q y) ) at_top" using assms unfolding frequently_def eventually_at_top_linorder by (metis (mono_tags, hide_lams) le_cases order_trans) lemma eventually_at_top_mono: fixes P Q::"'a::linorder \ bool" assumes event_P:"eventually P at_top" assumes PQ_imp:"\x. x\z \ \y\x. P y \ Q x" shows "eventually Q at_top" proof - obtain N where N_P:"\n\N. P n" using event_P[unfolded eventually_at_top_linorder] by auto define N' where "N' = max N z" have "Q x" when "x\N'" for x apply (rule PQ_imp) using N_P that unfolding N'_def by auto then show ?thesis unfolding eventually_at_top_linorder by auto qed lemma frequently_at_top_elim: fixes P Q::"'a::linorder \ bool" assumes "\\<^sub>Fx in at_top. P x" assumes "\i. P i \ \j>i. Q j" shows "\\<^sub>Fx in at_top. Q x" using assms unfolding frequently_def eventually_at_top_linorder by (meson leD le_cases less_le_trans) lemma less_Liminf_iff: fixes X :: "_ \ _ :: complete_linorder" shows "Liminf F X < C \ (\yx. y \ X x) F)" apply (subst Not_eq_iff[symmetric]) apply (simp add:not_less not_frequently not_le le_Liminf_iff) by force lemma sequentially_even_odd_imp: assumes "\\<^sub>F N in sequentially. P (2*N)" "\\<^sub>F N in sequentially. P (2*N+1)" shows "\\<^sub>F n in sequentially. P n" proof - obtain N where N_P:"\x\N. P (2 * x) \ P (2 * x + 1)" using eventually_conj[OF assms] unfolding eventually_at_top_linorder by auto define N' where "N'=2*N " have "P n" when "n\2*N" for n proof - define n' where "n'= n div 2" then have "n'\N" using that by auto then have "P (2 * n') \ P (2 * n' + 1)" using N_P by auto then show ?thesis unfolding n'_def apply (cases "even n") by auto qed then show ?thesis unfolding eventually_at_top_linorder by auto qed section \Theorem 2.1 and Corollary 2.10\ context fixes a b ::"nat\int " assumes a_pos:"\ n. a n >0 " and a_large:"\\<^sub>F n in sequentially. a n > 1" and ab_tendsto: "(\n. \b n\ / (a (n-1)*a n)) \ 0" begin private lemma aux_series_summable: "summable (\n. b n / (\k\n. a k))" proof - have "\e>0. \\<^sub>F x in sequentially. \b x\ / (a (x-1) * a x) < e" using ab_tendsto[unfolded tendsto_iff] apply (simp add:of_int_abs[symmetric] abs_mult del:of_int_abs) by (subst (asm) (2) abs_of_pos,use \\ n. a n > 0\ in auto)+ from this[rule_format,of 1] have "\\<^sub>F x in sequentially. \real_of_int(b x)\ < (a (x-1) * a x)" using \\ n. a n >0\ by auto moreover have "\n. (\k\n. real_of_int (a k)) > 0" using a_pos by (auto intro!:linordered_semidom_class.prod_pos) ultimately have "\\<^sub>F n in sequentially. \b n\ / (\k\n. a k) < (a (n-1) * a n) / (\k\n. a k)" apply (elim eventually_mono) by (auto simp add:field_simps) moreover have "\b n\ / (\k\n. a k) = norm (b n / (\k\n. a k))" for n using \\n. (\k\n. real_of_int (a k)) > 0\[rule_format,of n] by auto ultimately have "\\<^sub>F n in sequentially. norm (b n / (\k\n. a k)) < (a (n-1) * a n) / (\k\n. a k)" by algebra moreover have "summable (\n. (a (n-1) * a n) / (\k\n. a k))" proof - obtain s where a_gt_1:"\ n\s. a n >1" using a_large[unfolded eventually_at_top_linorder] by auto define cc where "cc= (\k0" unfolding cc_def apply (rule linordered_semidom_class.prod_pos) using a_pos by auto have "(\k\n+s. a k) \ cc * 2^n" for n proof - have "prod a {s.. 2^n" proof (induct n) case 0 then show ?case using a_gt_1 by auto next case (Suc n) moreover have "a (s + Suc n) \ 2" using a_gt_1 by (smt le_add1) ultimately show ?case apply (subst prod.atLeastLessThan_Suc,simp) using mult_mono'[of 2 "a (Suc (s + n))" " 2 ^ n" "prod a {s..cc>0\ unfolding cc_def by (simp add: atLeast0AtMost) qed then have "1/(\k\n+s. a k) \ 1/(cc * 2^n)" for n proof - assume asm:"\n. cc * 2 ^ n \ prod a {..n + s}" then have "real_of_int (cc * 2 ^ n) \ prod a {..n + s}" using of_int_le_iff by blast moreover have "prod a {..n + s} >0" using \cc>0\ by (simp add: a_pos prod_pos) ultimately show ?thesis using \cc>0\ by (auto simp:field_simps simp del:of_int_prod) qed moreover have "summable (\n. 1/(cc * 2^n))" proof - have "summable (\n. 1/(2::int)^n)" using summable_geometric[of "1/(2::int)"] by (simp add:power_one_over) from summable_mult[OF this,of "1/cc"] show ?thesis by auto qed ultimately have "summable (\n. 1 / (\k\n+s. a k))" apply (elim summable_comparison_test'[where N=0]) apply (unfold real_norm_def, subst abs_of_pos) by (auto simp add: \\n. 0 < (\k\n. real_of_int (a k))\) then have "summable (\n. 1 / (\k\n. a k))" apply (subst summable_iff_shift[where k=s,symmetric]) by simp then have "summable (\n. (a (n+1) * a (n+2)) / (\k\n+2. a k))" proof - assume asm:"summable (\n. 1 / real_of_int (prod a {..n}))" have "1 / real_of_int (prod a {..n}) = (a (n+1) * a (n+2)) / (\k\n+2. a k)" for n proof - have "a (Suc (Suc n)) \ 0" "a (Suc n) \0" using a_pos by (metis less_irrefl)+ then show ?thesis by (simp add: atLeast0_atMost_Suc atMost_atLeast0) qed then show ?thesis using asm by auto qed then show "summable (\n. (a (n-1) * a n) / (\k\n. a k))" apply (subst summable_iff_shift[symmetric,of _ 2]) by auto qed ultimately show ?thesis apply (elim summable_comparison_test_ev[rotated]) by (simp add: eventually_mono) qed private fun get_c::"(nat \ int) \ (nat \ int) \ int \ nat \ (nat \ int)" where "get_c a' b' B N 0 = round (B * b' N / a' N)"| "get_c a' b' B N (Suc n) = get_c a' b' B N n * a' (n+N) - B * b' (n+N)" lemma ab_rationality_imp: assumes ab_rational:"(\n. (b n / (\i \ n. a i))) \ \" shows "\ (B::int)>0. \ c::nat\ int. (\\<^sub>F n in sequentially. B*b n = c n * a n - c(n+1) \ \c(n+1)\ (\n. c (Suc n) / a n) \ 0" proof - have [simp]:"a n \ 0" for n using a_pos by (metis less_numeral_extra(3)) obtain A::int and B::int where AB_eq:"(\n. real_of_int (b n) / real_of_int (prod a {..n})) = A / B" and "B>0" proof - obtain q::rat where "(\n. real_of_int (b n) / real_of_int (prod a {..n})) = real_of_rat q" using ab_rational by (rule Rats_cases) simp moreover obtain A::int and B::int where "q = Rat.Fract A B" "B > 0" "coprime A B" by (rule Rat_cases) auto ultimately show ?thesis by (auto intro!: that[of A B] simp:of_rat_rat) qed define f where "f = (\n. b n / real_of_int (prod a {..n}))" define R where "R = (\N. (\n. B*b (n+N+1) / prod a {N..n+N+1}))" have all_e_ubound:"\e>0. \\<^sub>F M in sequentially. \n. \B*b (n+M+1) / prod a {M..n+M+1}\ < e/4 * 1/2^n" proof safe fix e::real assume "e>0" obtain N where N_a2: "\n \ N. a n \ 2" and N_ba: "\n \ N. \b n\ / (a (n-1) * a n) < e/(4*B)" proof - have "\\<^sub>F n in sequentially. \b n\ / (a (n - 1) * a n) < e/(4*B)" using order_topology_class.order_tendstoD[OF ab_tendsto,of "e/(4*B)"] \B>0\ \e>0\ by auto moreover have "\\<^sub>F n in sequentially. a n \ 2" using a_large by (auto elim: eventually_mono) ultimately have "\\<^sub>F n in sequentially. \b n\ / (a (n - 1) * a n) < e/(4*B) \ a n \ 2" by eventually_elim auto then show ?thesis unfolding eventually_at_top_linorder using that by auto qed have geq_N_bound:"\B*b (n+M+1) / prod a {M..n+M+1}\ < e/4 * 1/2^n" when "M\N" for n M proof - define D where "D = B*b (n+M+1)/ (a (n+M) * a (n+M+1))" have "\B*b (n+M+1) / prod a {M..n+M+1}\ = \D / prod a {M.." proof - have "{M..n+M+1} = {M.. {n+M,n+M+1}" by auto then have "prod a {M..n+M+1} = a (n+M) * a (n+M+1)* prod a {M..e/4 * (1/prod a {M.." proof - have "\D\ < e/ 4" unfolding D_def using N_ba[rule_format, of "n+M+1"] \B>0\ \M \ N\ \e>0\ a_pos by (auto simp:field_simps abs_mult abs_of_pos) from mult_strict_right_mono[OF this,of "1/prod a {M..e>0\ show ?thesis apply (auto simp:abs_prod abs_mult prod_pos) by (subst (2) abs_of_pos,auto)+ qed also have "... \ e/4 * 1/2^n" proof - have "prod a {M.. 2^n" proof (induct n) case 0 then show ?case by simp next case (Suc n) then show ?case using \M\N\ by (simp add: N_a2 mult.commute mult_mono' prod.atLeastLessThan_Suc) qed then have "real_of_int (prod a {M.. 2^n" using numeral_power_le_of_int_cancel_iff by blast then show ?thesis using \e>0\ by (auto simp add:divide_simps) qed finally show ?thesis . qed show "\\<^sub>F M in sequentially. \n. \real_of_int (B * b (n + M + 1)) / real_of_int (prod a {M..n + M + 1})\ < e / 4 * 1 / 2 ^ n" apply (rule eventually_sequentiallyI[of N]) using geq_N_bound by blast qed have R_tendsto_0:"R \ 0" proof (rule tendstoI) fix e::real assume "e>0" show "\\<^sub>F x in sequentially. dist (R x) 0 < e" using all_e_ubound[rule_format,OF \e>0\] proof eventually_elim case (elim M) define g where "g = (\n. B*b (n+M+1) / prod a {M..n+M+1})" have g_lt:"\g n\ < e/4 * 1/2^n" for n using elim unfolding g_def by auto have g_abs_summable:"summable (\n. \g n\)" proof - have "summable (\n. e/4 * 1/2^n)" using summable_geometric[of "1/2",THEN summable_mult,of "e/4",simplified] by (auto simp add:algebra_simps power_divide) then show ?thesis apply (elim summable_comparison_test') using g_lt less_eq_real_def by auto qed have "\\n. g n\ \ (\n. \g n\)" by (rule summable_rabs[OF g_abs_summable]) also have "... \(\n. e/4 * 1/2^n)" proof (rule suminf_comparison) show "summable (\n. e/4 * 1/2^n)" using summable_geometric[of "1/2",THEN summable_mult,of "e/4",simplified] by (auto simp add:algebra_simps power_divide) show "\n. norm \g n\ \ e / 4 * 1 / 2 ^ n" using g_lt less_eq_real_def by auto qed also have "... = (e/4) * (\n. (1/2)^n)" apply (subst suminf_mult[symmetric]) subgoal apply (rule complete_algebra_summable_geometric) by simp subgoal by (auto simp:algebra_simps power_divide) done also have "... = e/2" by (simp add:suminf_geometric[of "1/2"]) finally have "\\n. g n\ \ e / 2" . then show "dist (R M) 0 < e" unfolding R_def g_def using \e>0\ by auto qed qed obtain N where R_N_bound:"\M \ N. \R M\ \ 1 / 4" and N_geometric:"\M\N. \n. \real_of_int (B * b (n + M + 1)) / (prod a {M..n + M + 1})\ < 1 / 2 ^ n" proof - obtain N1 where N1:"\M \ N1. \R M\ \ 1 / 4" using metric_LIMSEQ_D[OF R_tendsto_0,of "1/4"] all_e_ubound[rule_format,of 4,unfolded eventually_sequentially] by (auto simp:less_eq_real_def) obtain N2 where N2:"\M\N2. \n. \real_of_int (B * b (n + M + 1)) / (prod a {M..n + M + 1})\ < 1 / 2 ^ n" using all_e_ubound[rule_format,of 4,unfolded eventually_sequentially] by (auto simp:less_eq_real_def) define N where "N=max N1 N2" show ?thesis using that[of N] N1 N2 unfolding N_def by simp qed define C where "C = B * prod a {..nn. f n)" unfolding AB_eq f_def using \B>0\ by auto also have "... = B * prod a {..nn. f (n+N+1)))" using suminf_split_initial_segment[OF \summable f\, of "N+1"] by auto also have "... = B * prod a {..nn. f (n+N+1)))" using sum.atLeast0_lessThan_Suc by simp also have "... = C + B * b N / a N + (\n. B*b (n+N+1) / prod a {N..n+N+1})" proof - have "B * prod a {.. {N}" using ivl_disj_un_singleton(2) by blast then show ?thesis unfolding f_def by auto qed moreover have "B * prod a {..n. f (n+N+1)) = (\n. B*b (n+N+1) / prod a {N..n+N+1})" proof - have "summable (\n. f (n + N + 1))" using \summable f\ summable_iff_shift[of f "N+1"] by auto moreover have "prod a {.. {N..n + N + 1}" by auto then show ?thesis unfolding f_def apply simp apply (subst prod.union_disjoint) by auto qed ultimately show ?thesis apply (subst suminf_mult[symmetric]) by (auto simp add: mult.commute mult.left_commute) qed ultimately show ?thesis unfolding C_def by (auto simp:algebra_simps) qed also have "... = C +B * b N / a N + R N" unfolding R_def by simp finally show ?thesis . qed have R_bound:"\R M\ \ 1 / 4" and R_Suc:"R (Suc M) = a M * R M - B * b (Suc M) / a (Suc M)" when "M \ N" for M proof - define g where "g = (\n. B*b (n+M+1) / prod a {M..n+M+1})" have g_abs_summable:"summable (\n. \g n\)" proof - have "summable (\n.(1::real)/2^n)" using summable_geometric[of "(1::real)/2",simplified] by (auto elim!: back_subst[of "summable"] simp:field_simps) moreover have "\g n\ < 1/2^n" for n using N_geometric[rule_format,OF that] unfolding g_def by simp ultimately show ?thesis apply (elim summable_comparison_test') using less_eq_real_def by auto qed show "\R M\ \ 1 / 4" using R_N_bound[rule_format,OF that] . have "R M = (\n. g n)" unfolding R_def g_def by simp also have "... = g 0 + (\n. g (Suc n))" apply (subst suminf_split_head) using summable_rabs_cancel[OF g_abs_summable] by auto also have "... = g 0 + 1/a M * (\n. a M * g (Suc n))" apply (subst suminf_mult) by (auto simp add: g_abs_summable summable_Suc_iff summable_rabs_cancel) also have "... = g 0 + 1/a M * R (Suc M)" proof - have "a M * g (Suc n) = B * b (n + M + 2) / prod a {Suc M..n + M + 2}" for n proof - have "{M..Suc (Suc (M + n))} = {M} \ {Suc M..Suc (Suc (M + n))}" by auto then show ?thesis unfolding g_def using \B>0\ by (auto simp add:algebra_simps) qed then have "(\n. a M * g (Suc n)) = R (Suc M)" unfolding R_def by auto then show ?thesis by auto qed finally have "R M = g 0 + 1 / a M * R (Suc M)" . then have "R (Suc M) = a M * R M - g 0 * a M" by (auto simp add:algebra_simps) moreover have "{M..Suc M} = {M,Suc M}" by auto ultimately show "R (Suc M) = a M * R M - B * b (Suc M) / a (Suc M)" unfolding g_def by auto qed define c where "c = (\n. if n\N then get_c a b B N (n-N) else undefined)" have c_rec:"c (n+1) = c n * a n - B * b n" when "n \ N" for n unfolding c_def using that by (auto simp:Suc_diff_le) have c_R:"c (Suc n) / a n = R n" when "n \ N" for n using that proof (induct rule:nat_induct_at_least) case base have "\ c (N+1) / a N \ \ 1/2" proof - have "c N = round (B * b N / a N)" unfolding c_def by simp moreover have "c (N+1) / a N = c N - B * b N / a N" using a_pos[rule_format,of N] by (auto simp add:c_rec[of N,simplified] divide_simps) ultimately show ?thesis using of_int_round_abs_le by auto qed moreover have "\R N\ \ 1 / 4" using R_bound[of N] by simp ultimately have "\c (N+1) / a N - R N \ < 1" by linarith moreover have "c (N+1) / a N - R N \ \" proof - have "c (N+1) / a N = c N - B * b N / a N" using a_pos[rule_format,of N] by (auto simp add:c_rec[of N,simplified] divide_simps) moreover have " B * b N / a N + R N \ \" proof - have "C = B * (\nn {..n}" if "nB>0\ apply simp apply (subst prod.union_disjoint) by auto qed finally have "C = real_of_int (B * (\n \" using Ints_of_int by blast moreover note \A * prod a {.. ultimately show ?thesis by (metis Ints_diff Ints_of_int add.assoc add_diff_cancel_left') qed ultimately show ?thesis by (simp add: diff_diff_add) qed ultimately have "c (N+1) / a N - R N = 0" by (metis Ints_cases less_irrefl of_int_0 of_int_lessD) then show ?case by simp next case (Suc n) have "c (Suc (Suc n)) / a (Suc n) = c (Suc n) - B * b (Suc n) / a (Suc n)" apply (subst c_rec[of "Suc n",simplified]) using \N \ n\ by (auto simp add: divide_simps) also have "... = a n * R n - B * b (Suc n) / a (Suc n)" using Suc by (auto simp: divide_simps) also have "... = R (Suc n)" using R_Suc[OF \N \ n\] by simp finally show ?case . qed have ca_tendsto_zero:"(\n. c (Suc n) / a n) \ 0" using R_tendsto_0 apply (elim filterlim_mono_eventually) using c_R by (auto intro!:eventually_sequentiallyI[of N]) have ca_bound:"\c (n + 1)\ < a n / 2" when "n \ N" for n proof - have "\c (Suc n)\ / a n = \c (Suc n) / a n\" using a_pos[rule_format,of n] by auto also have "... = \R n\" using c_R[OF that] by auto also have "... < 1/2" using R_bound[OF that] by auto finally have "\c (Suc n)\ / a n < 1/2" . then show ?thesis using a_pos[rule_format,of n] by auto qed (* (* the following part corresponds to (2.7) (2.8) in the original paper, but turns out to be not necessary. *) have c_round:"c n = round (B * b n / a n)" when "n \ N" for n proof (cases "n=N") case True then show ?thesis unfolding c_def by simp next case False with \n\N\ obtain n' where n_Suc:"n=Suc n'" and "n' \ N" by (metis le_eq_less_or_eq lessE less_imp_le_nat) have "B * b n / a n = c n - R n" proof - have "R n = c n - B * b n / a n" using c_R[OF \n'\N\,symmetric,folded n_Suc] R_Suc[OF \n'\N\,folded n_Suc] by (auto simp:field_simps) then show ?thesis by (auto simp:field_simps) qed then have "\B * b n / a n - c n\ = \R n\" by auto then have "\B * b n / a n - c n\ < 1/2" using R_bound[OF \n \ N\] by auto from round_unique'[OF this] show ?thesis by auto qed *) show "\B>0. \c. (\\<^sub>F n in sequentially. B * b n = c n * a n - c (n + 1) \ real_of_int \c (n + 1)\ < a n / 2) \ (\n. c (Suc n) / a n) \ 0" unfolding eventually_at_top_linorder apply (rule exI[of _ B],use \B>0\ in simp) apply (intro exI[of _c] exI[of _ N]) using c_rec ca_bound ca_tendsto_zero by fastforce qed private lemma imp_ab_rational: assumes "\ (B::int)>0. \ c::nat\ int. (\\<^sub>F n in sequentially. B*b n = c n * a n - c(n+1) \ \c(n+1)\n. (b n / (\i \ n. a i))) \ \" proof - obtain B::int and c::"nat\int" and N::nat where "B>0" and large_n:"\n\N. B * b n = c n * a n - c (n + 1) \ real_of_int \c (n + 1)\ < a n / 2 \ a n\2" proof - obtain B c where "B>0" and event1:"\\<^sub>F n in sequentially. B * b n = c n * a n - c (n + 1) \ real_of_int \c (n + 1)\ < real_of_int (a n) / 2" using assms by auto from eventually_conj[OF event1 a_large,unfolded eventually_at_top_linorder] obtain N where "\n\N. (B * b n = c n * a n - c (n + 1) \ real_of_int \c (n + 1)\ < real_of_int (a n) / 2) \ 2 \ a n" by fastforce then show ?thesis using that[of B N c] \B>0\ by auto qed define f where "f=(\n. real_of_int (b n) / real_of_int (prod a {..n}))" define S where "S = (\n. f n)" have "summable f" unfolding f_def by (rule aux_series_summable) define C where "C=B*prod a {..nn. (c (n+N) * a (n+N)) / prod a {N..n+N})" define h2 where "h2 = (\n. c (n+N+1) / prod a {N..n+N})" have f_h12:"B*prod a {..n. B * b (n+N))" define g2 where "g2 = (\n. prod a {.. {N..n + N}) = prod a {.. {N..n + N}) = prod a {..n+N}" by (simp add: ivl_disj_un_one(4)) ultimately show ?thesis unfolding g2_def apply simp using a_pos by (metis less_irrefl) qed ultimately have "B*prod a {..nn. f (n+N)))" using suminf_split_initial_segment[OF \summable f\,of N] unfolding S_def by (auto simp:algebra_simps) also have "... = C + B*prod a {..n. f (n+N))" unfolding C_def by (auto simp:algebra_simps) also have "... = C + (\n. h1 n - h2 n)" apply (subst suminf_mult[symmetric]) subgoal using \summable f\ by (simp add: summable_iff_shift) subgoal using f_h12 by auto done also have "... = C + h1 0" proof - have "(\n. \i\n. h1 i - h2 i) \ (\i. h1 i - h2 i)" proof (rule summable_LIMSEQ') have "(\i. h1 i - h2 i) = (\i. real_of_int (B * prod a {..i. h1 i - h2 i)" using \summable f\ by (simp add: summable_iff_shift summable_mult) qed moreover have "(\i\n. h1 i - h2 i) = h1 0 - h2 n" for n proof (induct n) case 0 then show ?case by simp next case (Suc n) have "(\i\Suc n. h1 i - h2 i) = (\i\n. h1 i - h2 i) + h1 (n+1) - h2 (n+1)" by auto also have "... = h1 0 - h2 n + h1 (n+1) - h2 (n+1)" using Suc by auto also have "... = h1 0 - h2 (n+1)" proof - have "h2 n = h1 (n+1)" unfolding h2_def h1_def apply (auto simp:prod.nat_ivl_Suc') using a_pos by (metis less_numeral_extra(3)) then show ?thesis by auto qed finally show ?case by simp qed ultimately have "(\n. h1 0 - h2 n) \ (\i. h1 i - h2 i)" by simp then have "h2 \ (h1 0 - (\i. h1 i - h2 i))" apply (elim metric_tendsto_imp_tendsto) by (auto intro!:eventuallyI simp add:dist_real_def) moreover have "h2 \ 0" proof - have h2_n:"\h2 n\ < (1 / 2)^(n+1)" for n proof - have "\h2 n\ = \c (n + N + 1)\ / prod a {N..n + N}" unfolding h2_def abs_divide using a_pos by (simp add: abs_of_pos prod_pos) also have "... < (a (N+n) / 2) / prod a {N..n + N}" unfolding h2_def apply (rule divide_strict_right_mono) subgoal using large_n[rule_format,of "N+n"] by (auto simp add:algebra_simps) subgoal using a_pos by (simp add: prod_pos) done also have "... = 1 / (2*prod a {N.. (1/2)^(n+1)" proof (induct n) case 0 then show ?case by auto next case (Suc n) define P where "P=1 / real_of_int (2 * prod a {N.. ( (1 / 2) ^ (n + 1) ) / a (n+N) " apply (rule divide_right_mono) subgoal unfolding P_def using Suc by auto subgoal by (simp add: a_pos less_imp_le) done also have "... \ ( (1 / 2) ^ (n + 1) ) / 2 " apply (rule divide_left_mono) using large_n[rule_format,of "n+N",simplified] by auto also have "... = (1 / 2) ^ (n + 2)" by auto finally show ?case by simp qed finally show ?thesis . qed have "(\n. (1 / 2)^(n+1)) \ (0::real)" using tendsto_mult_right_zero[OF LIMSEQ_abs_realpow_zero2[of "1/2",simplified],of "1/2"] by auto then show ?thesis apply (elim Lim_null_comparison[rotated]) using h2_n less_eq_real_def by (auto intro!:eventuallyI) qed ultimately have "(\i. h1 i - h2 i) = h1 0" using LIMSEQ_unique by fastforce then show ?thesis by simp qed also have "... = C + c N" unfolding h1_def using a_pos by auto (metis less_irrefl) finally show ?thesis . qed then have "S = (C + real_of_int (c N)) / (B*prod a {..0 < B\ a_pos less_irrefl mult.commute mult_pos_pos nonzero_mult_div_cancel_right of_int_eq_0_iff prod_pos) moreover have "... \ \" unfolding C_def f_def by (intro Rats_divide Rats_add Rats_mult Rats_of_int Rats_sum) ultimately show "S \ \" by auto qed theorem theorem_2_1_Erdos_Straus : "(\n. (b n / (\i \ n. a i))) \ \ \ (\ (B::int)>0. \ c::nat\ int. (\\<^sub>F n in sequentially. B*b n = c n * a n - c(n+1) \ \c(n+1)\The following is a Corollary to Theorem 2.1. \ corollary corollary_2_10_Erdos_Straus: assumes ab_event:"\\<^sub>F n in sequentially. b n > 0 \ a (n+1) \ a n" and ba_lim_leq:"lim (\n. (b(n+1) - b n )/a n) \ 0" and ba_lim_exist:"convergent (\n. (b(n+1) - b n )/a n)" and "liminf (\n. a n / b n) = 0 " shows "(\n. (b n / (\i \ n. a i))) \ \" proof assume "(\n. (b n / (\i \ n. a i))) \ \" then obtain B c where "B>0" and abc_event:"\\<^sub>F n in sequentially. B * b n = c n * a n - c (n + 1) \ \c (n + 1)\ < a n / 2" and ca_vanish: "(\n. c (Suc n) / a n) \ 0" using ab_rationality_imp by auto have bac_close:"(\n. B * b n / a n - c n) \ 0" proof - have "\\<^sub>F n in sequentially. B * b n - c n * a n + c (n + 1) = 0" using abc_event by (auto elim!:eventually_mono) then have "\\<^sub>F n in sequentially. (B * b n - c n * a n + c (n+1)) / a n = 0 " apply eventually_elim by auto then have "\\<^sub>F n in sequentially. B * b n / a n - c n + c (n + 1) / a n = 0" apply eventually_elim using a_pos by (auto simp:divide_simps) (metis less_irrefl) then have "(\n. B * b n / a n - c n + c (n + 1) / a n) \ 0" by (simp add: eventually_mono tendsto_iff) from tendsto_diff[OF this ca_vanish] show ?thesis by auto qed have c_pos:"\\<^sub>F n in sequentially. c n > 0" proof - from bac_close have *:"\\<^sub>F n in sequentially. c n \ 0" apply (elim tendsto_of_int_diff_0) using ab_event a_large apply (eventually_elim) using \B>0\ by auto show ?thesis proof (rule ccontr) assume "\ (\\<^sub>F n in sequentially. c n > 0)" moreover have "\\<^sub>F n in sequentially. c (Suc n) \ 0 \ c n\0" using * eventually_sequentially_Suc[of "\n. c n\0"] by (metis (mono_tags, lifting) eventually_at_top_linorder le_Suc_eq) ultimately have "\\<^sub>F n in sequentially. c n = 0 \ c (Suc n) \ 0" using eventually_elim2 frequently_def by fastforce moreover have "\\<^sub>F n in sequentially. b n > 0 \ B * b n = c n * a n - c (n + 1)" using ab_event abc_event by eventually_elim auto ultimately have "\\<^sub>F n in sequentially. c n = 0 \ c (Suc n) \ 0 \ b n > 0 \ B * b n = c n * a n - c (n + 1)" using frequently_eventually_frequently by fastforce from frequently_ex[OF this] obtain n where "c n = 0" "c (Suc n) \ 0" "b n > 0" "B * b n = c n * a n - c (n + 1)" by auto then have "B * b n \ 0" by auto then show False using \b n>0\ \B > 0\ using mult_pos_pos not_le by blast qed qed have bc_epsilon:"\\<^sub>F n in sequentially. b (n+1) / b n > (c (n+1) - \) / c n" when "\>0" "\<1" for \::real proof - have "\\<^sub>F x in sequentially. \c (Suc x) / a x\ < \ / 2" using ca_vanish[unfolded tendsto_iff,rule_format, of "\/2"] \\>0\ by auto moreover then have "\\<^sub>F x in sequentially. \c (x+2) / a (x+1)\ < \ / 2" apply (subst (asm) eventually_sequentially_Suc[symmetric]) by simp moreover have "\\<^sub>F n in sequentially. B * b (n+1) = c (n+1) * a (n+1) - c (n + 2)" using abc_event apply (subst (asm) eventually_sequentially_Suc[symmetric]) by (auto elim:eventually_mono) moreover have "\\<^sub>F n in sequentially. c n > 0 \ c (n+1) > 0 \ c (n+2) > 0" proof - have "\\<^sub>F n in sequentially. 0 < c (Suc n)" using c_pos by (subst eventually_sequentially_Suc) simp moreover then have "\\<^sub>F n in sequentially. 0 < c (Suc (Suc n))" using c_pos by (subst eventually_sequentially_Suc) simp ultimately show ?thesis using c_pos by eventually_elim auto qed ultimately show ?thesis using ab_event abc_event proof eventually_elim case (elim n) define \\<^sub>0 \\<^sub>1 where "\\<^sub>0 = c (n+1) / a n" and "\\<^sub>1 = c (n+2) / a (n+1)" have "\\<^sub>0 > 0" "\\<^sub>1 > 0" "\\<^sub>0 < \/2" "\\<^sub>1 < \/2" using a_pos elim by (auto simp add: \\<^sub>0_def \\<^sub>1_def) have "(\ - \\<^sub>1) * c n > 0" apply (rule mult_pos_pos) using \\\<^sub>1 > 0\ \\\<^sub>1 < \/2\ \\>0\ elim by auto moreover have "\\<^sub>0 * (c (n+1) - \) > 0" apply (rule mult_pos_pos[OF \\\<^sub>0 > 0\]) using elim(4) that(2) by linarith ultimately have "(\ - \\<^sub>1) * c n + \\<^sub>0 * (c (n+1) - \) > 0" by auto moreover have "c n - \\<^sub>0 > 0" using \\\<^sub>0 < \ / 2\ elim(4) that(2) by linarith moreover have "c n > 0" by (simp add: elim(4)) ultimately have "(c (n+1) - \) / c n < (c (n+1) - \\<^sub>1) / (c n - \\<^sub>0)" by (auto simp add: field_simps) also have "... \ (c (n+1) - \\<^sub>1) / (c n - \\<^sub>0) * (a (n+1) / a n)" proof - have "(c (n+1) - \\<^sub>1) / (c n - \\<^sub>0) > 0" by (smt \0 < (\ - \\<^sub>1) * real_of_int (c n)\ \0 < real_of_int (c n) - \\<^sub>0\ divide_pos_pos elim(4) mult_le_0_iff of_int_less_1_iff that(2)) moreover have "(a (n+1) / a n) \ 1" using a_pos elim(5) by auto - ultimately show ?thesis by (metis mult_cancel_left1 real_mult_le_cancel_iff2) + ultimately show ?thesis by (metis mult_cancel_left1 mult_le_cancel_iff2) qed also have "... = (B * b (n+1)) / (B * b n)" proof - have "B * b n = c n * a n - c (n + 1)" using elim by auto also have "... = a n * (c n - \\<^sub>0)" using a_pos[rule_format,of n] unfolding \\<^sub>0_def by (auto simp:field_simps) finally have "B * b n = a n * (c n - \\<^sub>0)" . moreover have "B * b (n+1) = a (n+1) * (c (n+1) - \\<^sub>1)" unfolding \\<^sub>1_def using a_pos[rule_format,of "n+1"] apply (subst \B * b (n + 1) = c (n + 1) * a (n + 1) - c (n + 2)\) by (auto simp:field_simps) ultimately show ?thesis by (simp add: mult.commute) qed also have "... = b (n+1) / b n" using \B>0\ by auto finally show ?case . qed qed have eq_2_11:"\\<^sub>F n in sequentially. b (n+1) > b n + (1 - \)^2 * a n / B" when "\>0" "\<1" "\ (\\<^sub>F n in sequentially. c (n+1) \ c n)" for \::real proof - have "\\<^sub>F x in sequentially. c x < c (Suc x) " using that(3) by (simp add:not_eventually frequently_elim1) moreover have "\\<^sub>F x in sequentially. \c (Suc x) / a x\ < \" using ca_vanish[unfolded tendsto_iff,rule_format, of \] \\>0\ by auto moreover have "\\<^sub>F n in sequentially. c n > 0 \ c (n+1) > 0" proof - have "\\<^sub>F n in sequentially. 0 < c (Suc n)" using c_pos by (subst eventually_sequentially_Suc) simp then show ?thesis using c_pos by eventually_elim auto qed ultimately show ?thesis using ab_event abc_event bc_epsilon[OF \\>0\ \\<1\] proof (elim frequently_rev_mp,eventually_elim) case (elim n) then have "c (n+1) / a n < \" using a_pos[rule_format,of n] by auto also have "... \ \ * c n" using elim(7) that(1) by auto finally have "c (n+1) / a n < \ * c n" . then have "c (n+1) / c n < \ * a n" using a_pos[rule_format,of n] elim by (auto simp:field_simps) then have "(1 - \) * a n < a n - c (n+1) / c n" by (auto simp:algebra_simps) then have "(1 - \)^2 * a n / B < (1 - \) * (a n - c (n+1) / c n) / B" - apply (subst (asm) real_mult_less_iff1[symmetric, of "(1-\)/B"]) - using \\<1\ \B>0\ by (auto simp:divide_simps power2_eq_square) + apply (subst (asm) mult_less_iff1[symmetric, of "(1-\)/B"]) + using \\<1\ \B>0\ by (auto simp: divide_simps power2_eq_square mult_less_iff1) then have "b n + (1 - \)^2 * a n / B < b n + (1 - \) * (a n - c (n+1) / c n) / B" using \B>0\ by auto also have "... = b n + (1 - \) * ((c n *a n - c (n+1)) / c n) / B" using elim by (auto simp:field_simps) also have "... = b n + (1 - \) * (b n / c n)" proof - have "B * b n = c n * a n - c (n + 1)" using elim by auto from this[symmetric] show ?thesis using \B>0\ by simp qed also have "... = (1+(1-\)/c n) * b n" by (auto simp:algebra_simps) also have "... = ((c n+1-\)/c n) * b n" using elim by (auto simp:divide_simps) also have "... \ ((c (n+1) -\)/c n) * b n" proof - define cp where "cp = c n+1" have "c (n+1) \ cp" unfolding cp_def using \c n < c (Suc n)\ by auto moreover have "c n>0" "b n>0" using elim by auto ultimately show ?thesis apply (fold cp_def) by (auto simp:divide_simps) qed also have "... < b (n+1)" using elim by (auto simp:divide_simps) finally show ?case . qed qed have "\\<^sub>F n in sequentially. c (n+1) \ c n" proof (rule ccontr) assume "\ (\\<^sub>F n in sequentially. c (n + 1) \ c n)" from eq_2_11[OF _ _ this,of "1/2"] have "\\<^sub>F n in sequentially. b (n+1) > b n + 1/4 * a n / B" by (auto simp:algebra_simps power2_eq_square) then have *:"\\<^sub>F n in sequentially. (b (n+1) - b n) / a n > 1 / (B * 4)" apply (elim frequently_elim1) subgoal for n using a_pos[rule_format,of n] by (auto simp:field_simps) done define f where "f = (\n. (b (n+1) - b n) / a n)" have "f \ lim f" using convergent_LIMSEQ_iff ba_lim_exist unfolding f_def by auto from this[unfolded tendsto_iff,rule_format, of "1 / (B*4)"] have "\\<^sub>F x in sequentially. \f x - lim f\ < 1 / (B * 4)" using \B>0\ by (auto simp:dist_real_def) moreover have "\\<^sub>F n in sequentially. f n > 1 / (B * 4)" using * unfolding f_def by auto ultimately have "\\<^sub>F n in sequentially. f n > 1 / (B * 4) \ \f n - lim f\ < 1 / (B * 4)" by (auto elim:frequently_eventually_frequently[rotated]) from frequently_ex[OF this] obtain n where "f n > 1 / (B * 4)" "\f n - lim f\ < 1 / (B * 4)" by auto moreover have "lim f \ 0" using ba_lim_leq unfolding f_def by auto ultimately show False by linarith qed then obtain N where N_dec:"\n\N. c (n+1) \ c n" by (meson eventually_at_top_linorder) define max_c where "max_c = (MAX n \ {..N}. c n)" have max_c:"c n \ max_c" for n proof (cases "n\N") case True then show ?thesis unfolding max_c_def by simp next case False then have "n\N" by auto then have "c n\c N" proof (induct rule:nat_induct_at_least) case base then show ?case by simp next case (Suc n) then have "c (n+1) \ c n" using N_dec by auto then show ?case using \c n \ c N\ by auto qed moreover have "c N \ max_c" unfolding max_c_def by auto ultimately show ?thesis by auto qed have "max_c > 0 " proof - obtain N where "\n\N. 0 < c n" using c_pos[unfolded eventually_at_top_linorder] by auto then have "c N > 0" by auto then show ?thesis using max_c[of N] by simp qed have ba_limsup_bound:"1/(B*(B+1)) \ limsup (\n. b n/a n)" "limsup (\n. b n/a n) \ max_c / B + 1 / (B+1)" proof - define f where "f = (\n. b n/a n)" from tendsto_mult_right_zero[OF bac_close,of "1/B"] have "(\n. f n - c n / B) \ 0" unfolding f_def using \B>0\ by (auto simp:algebra_simps) from this[unfolded tendsto_iff,rule_format,of "1/(B+1)"] have "\\<^sub>F x in sequentially. \f x - c x / B\ < 1 / (B+1)" using \B>0\ by auto then have *:"\\<^sub>F n in sequentially. 1/(B*(B+1)) \ ereal (f n) \ ereal (f n) \ max_c / B + 1 / (B+1)" using c_pos proof eventually_elim case (elim n) then have "f n - c n / B < 1 / (B+1)" by auto then have "f n < c n / B + 1 / (B+1)" by simp also have "... \ max_c / B + 1 / (B+1)" using max_c[of n] using \B>0\ by (auto simp:divide_simps) finally have *:"f n < max_c / B + 1 / (B+1)" . have "1/(B*(B+1)) = 1/B - 1 / (B+1)" using \B>0\ by (auto simp:divide_simps) also have "... \ c n/B - 1 / (B+1)" using \0 < c n\ \B>0\ by (auto,auto simp:divide_simps) also have "... < f n" using elim by auto finally have "1/(B*(B+1)) < f n" . with * show ?case by simp qed show "limsup f \ max_c / B + 1 / (B+1)" apply (rule Limsup_bounded) using * by (auto elim:eventually_mono) have "1/(B*(B+1)) \ liminf f" apply (rule Liminf_bounded) using * by (auto elim:eventually_mono) also have "liminf f \ limsup f" by (simp add: Liminf_le_Limsup) finally show "1/(B*(B+1)) \ limsup f" . qed have "0 < inverse (ereal (max_c / B + 1 / (B+1)))" using \max_c > 0\ \B>0\ by (simp add: pos_add_strict) also have "... \ inverse (limsup (\n. b n/a n))" proof (rule ereal_inverse_antimono[OF _ ba_limsup_bound(2)]) have "0<1/(B*(B+1))" using \B>0\ by auto also have "... \ limsup (\n. b n/a n)" using ba_limsup_bound(1) . finally show "0\limsup (\n. b n/a n)" using zero_ereal_def by auto qed also have "... = liminf (\n. inverse (ereal ( b n/a n)))" apply (subst Liminf_inverse_ereal[symmetric]) using a_pos ab_event by (auto elim!:eventually_mono simp:divide_simps) also have "... = liminf (\n. ( a n/b n))" apply (rule Liminf_eq) using a_pos ab_event apply (auto elim!:eventually_mono) by (metis less_int_code(1)) finally have "liminf (\n. ( a n/b n)) > 0" . then show False using \liminf (\n. a n / b n) = 0\ by simp qed end section\Some auxiliary results on the prime numbers. \ lemma nth_prime_nonzero[simp]:"nth_prime n \ 0" by (simp add: prime_gt_0_nat prime_nth_prime) lemma nth_prime_gt_zero[simp]:"nth_prime n >0" by (simp add: prime_gt_0_nat prime_nth_prime) lemma ratio_of_consecutive_primes: "(\n. nth_prime (n+1)/nth_prime n) \1" proof - define f where "f=(\x. real (nth_prime (Suc x)) /real (nth_prime x))" define g where "g=(\x. (real x * ln (real x)) / (real (Suc x) * ln (real (Suc x))))" have p_n:"(\x. real (nth_prime x) / (real x * ln (real x))) \ 1" using nth_prime_asymptotics[unfolded asymp_equiv_def,simplified] . moreover have p_sn:"(\n. real (nth_prime (Suc n)) / (real (Suc n) * ln (real (Suc n)))) \ 1" using nth_prime_asymptotics[unfolded asymp_equiv_def,simplified ,THEN LIMSEQ_Suc] . ultimately have "(\x. f x * g x) \ 1" using tendsto_divide[OF p_sn p_n] unfolding f_def g_def by (auto simp:algebra_simps) moreover have "g \ 1" unfolding g_def by real_asymp ultimately have "(\x. if g x = 0 then 0 else f x) \ 1" apply (drule_tac tendsto_divide[OF _ \g \ 1\]) by auto then have "f \ 1" proof (elim filterlim_mono_eventually) have "\\<^sub>F x in sequentially. (if g (x+3) = 0 then 0 else f (x+3)) = f (x+3)" unfolding g_def by auto then show "\\<^sub>F x in sequentially. (if g x = 0 then 0 else f x) = f x" apply (subst (asm) eventually_sequentially_seg) by simp qed auto then show ?thesis unfolding f_def by auto qed lemma nth_prime_double_sqrt_less: assumes "\ > 0" shows "\\<^sub>F n in sequentially. (nth_prime (2*n) - nth_prime n) / sqrt (nth_prime n) < n powr (1/2+\)" proof - define pp ll where "pp=(\n. (nth_prime (2*n) - nth_prime n) / sqrt (nth_prime n))" and "ll=(\x::nat. x * ln x)" have pp_pos:"pp (n+1) > 0" for n unfolding pp_def by simp have "(\x. nth_prime (2 * x)) \[sequentially] (\x. (2 * x) * ln (2 * x))" using nth_prime_asymptotics[THEN asymp_equiv_compose ,of "(*) 2" sequentially,unfolded comp_def] using mult_nat_left_at_top pos2 by blast also have "... \[sequentially] (\x. 2 *x * ln x)" by real_asymp finally have "(\x. nth_prime (2 * x)) \[sequentially] (\x. 2 *x * ln x)" . from this[unfolded asymp_equiv_def, THEN tendsto_mult_left,of 2] have "(\x. nth_prime (2 * x) / (x * ln x)) \ 2" unfolding asymp_equiv_def by auto moreover have *:"(\x. nth_prime x / (x * ln x)) \ 1" using nth_prime_asymptotics unfolding asymp_equiv_def by auto ultimately have "(\x. (nth_prime (2 * x) - nth_prime x) / ll x) \ 1" unfolding ll_def apply - apply (drule (1) tendsto_diff) apply (subst of_nat_diff,simp) by (subst diff_divide_distrib,simp) moreover have "(\x. sqrt (nth_prime x) / sqrt (ll x)) \ 1" unfolding ll_def using tendsto_real_sqrt[OF *] by (auto simp: real_sqrt_divide) ultimately have "(\x. pp x * (sqrt (ll x) / (ll x))) \ 1" apply - apply (drule (1) tendsto_divide,simp) by (auto simp:field_simps of_nat_diff pp_def) moreover have "\\<^sub>F x in sequentially. sqrt (ll x) / ll x = 1/sqrt (ll x)" apply (subst eventually_sequentially_Suc[symmetric]) by (auto intro!:eventuallyI simp:ll_def divide_simps) ultimately have "(\x. pp x / sqrt (ll x)) \ 1" apply (elim filterlim_mono_eventually) by (auto elim!:eventually_mono) (metis mult.right_neutral times_divide_eq_right) moreover have "(\x. sqrt (ll x) / x powr (1/2+\)) \ 0" unfolding ll_def using \\>0\ by real_asymp ultimately have "(\x. pp x / x powr (1/2+\) * (sqrt (ll x) / sqrt (ll x))) \ 0" apply - apply (drule (1) tendsto_mult) by (auto elim:filterlim_mono_eventually) moreover have "\\<^sub>F x in sequentially. sqrt (ll x) / sqrt (ll x) = 1" apply (subst eventually_sequentially_Suc[symmetric]) by (auto intro!:eventuallyI simp:ll_def ) ultimately have "(\x. pp x / x powr (1/2+\)) \ 0" apply (elim filterlim_mono_eventually) by (auto elim:eventually_mono) from tendstoD[OF this, of 1,simplified] show "\\<^sub>F x in sequentially. pp x < x powr (1 / 2 + \)" apply (elim eventually_mono_sequentially[of _ 1]) using pp_pos by auto qed section \Theorem 3.1\ text\Theorem 3.1 is an application of Theorem 2.1 with the sequences considered involving the prime numbers.\ theorem theorem_3_10_Erdos_Straus: fixes a::"nat \ int" assumes a_pos:"\ n. a n >0" and "mono a" and nth_1:"(\n. nth_prime n / (a n)^2) \ 0" and nth_2:"liminf (\n. a n / nth_prime n) = 0" shows "(\n. (nth_prime n / (\i \ n. a i))) \ \" proof assume asm:"(\n. (nth_prime n / (\i \ n. a i))) \ \" have a2_omega:"(\n. (a n)^2) \ \(\x. x * ln x)" proof - have "(\n. real (nth_prime n)) \ o(\n. real_of_int ((a n)\<^sup>2))" apply (rule smalloI_tendsto[OF nth_1]) using a_pos by (metis (mono_tags, lifting) less_int_code(1) not_eventuallyD of_int_0_eq_iff zero_eq_power2) moreover have "(\x. real (nth_prime x)) \ \(\x. real x * ln (real x))" using nth_prime_bigtheta by blast ultimately show ?thesis using landau_omega.small_big_trans smallo_imp_smallomega by blast qed have a_gt_1:"\\<^sub>F n in sequentially. 1 < a n" proof - have "\\<^sub>F x in sequentially. \x * ln x\ \ (a x)\<^sup>2" using a2_omega[unfolded smallomega_def,simplified,rule_format,of 1] by auto then have "\\<^sub>F x in sequentially. \(x+3) * ln (x+3)\ \ (a (x+3))\<^sup>2" apply (subst (asm) eventually_sequentially_seg[symmetric, of _ 3]) by simp then have "\\<^sub>F n in sequentially. 1 < a ( n+3)" proof (elim eventually_mono) fix x assume "\real (x + 3) * ln (real (x + 3))\ \ real_of_int ((a (x + 3))\<^sup>2)" moreover have "\real (x + 3) * ln (real (x + 3))\ > 3" proof - have "ln (real (x + 3)) > 1" apply simp using ln3_gt_1 ln_gt_1 by force moreover have "real(x+3) \ 3" by simp ultimately have "(x+3)*ln (real (x + 3)) > 3*1 " apply (rule_tac mult_le_less_imp_less) by auto then show ?thesis by auto qed ultimately have "real_of_int ((a (x + 3))\<^sup>2) > 3" by auto then show "1 < a (x + 3)" by (smt Suc3_eq_add_3 a_pos add.commute of_int_1 one_power2) qed then show ?thesis apply (subst eventually_sequentially_seg[symmetric, of _ 3]) by auto qed obtain B::int and c where "B>0" and Bc_large:"\\<^sub>F n in sequentially. B * nth_prime n = c n * a n - c (n + 1) \ \c (n + 1)\ < a n / 2" and ca_vanish: "(\n. c (Suc n) / real_of_int (a n)) \ 0" proof - note a_gt_1 moreover have "(\n. real_of_int \int (nth_prime n)\ / real_of_int (a (n - 1) * a n)) \ 0" proof - define f where "f=(\n. nth_prime (n+1) / (a n * a (n+1)))" define g where "g=(\n. 2*nth_prime n / (a n)^2)" have "\\<^sub>F x in sequentially. norm (f x) \ g x" proof - have "\\<^sub>F n in sequentially. nth_prime (n+1) < 2*nth_prime n" using ratio_of_consecutive_primes[unfolded tendsto_iff ,rule_format,of 1,simplified] apply (elim eventually_mono) by (auto simp :divide_simps dist_norm) moreover have "\\<^sub>F n in sequentially. real_of_int (a n * a (n+1)) \ (a n)^2" apply (rule eventuallyI) using \mono a\ by (auto simp:power2_eq_square a_pos incseq_SucD) ultimately show ?thesis unfolding f_def g_def apply eventually_elim apply (subst norm_divide) apply (rule_tac linordered_field_class.frac_le) using a_pos[rule_format, THEN order.strict_implies_not_eq ] by auto qed moreover have "g \ 0 " using nth_1[THEN tendsto_mult_right_zero,of 2] unfolding g_def by auto ultimately have "f \ 0" using Lim_null_comparison[of f g sequentially] by auto then show ?thesis unfolding f_def by (rule_tac LIMSEQ_imp_Suc) auto qed moreover have "(\n. real_of_int (int (nth_prime n)) / real_of_int (prod a {..n})) \ \" using asm by simp ultimately have "\B>0. \c. (\\<^sub>F n in sequentially. B * int (nth_prime n) = c n * a n - c (n + 1) \ real_of_int \c (n + 1)\ < real_of_int (a n) / 2) \ (\n. real_of_int (c (Suc n)) / real_of_int (a n)) \ 0" using ab_rationality_imp[OF a_pos,of nth_prime] by fast then show thesis apply clarify apply (rule_tac c=c and B=B in that) by auto qed have bac_close:"(\n. B * nth_prime n / a n - c n) \ 0" proof - have "\\<^sub>F n in sequentially. B * nth_prime n - c n * a n + c (n + 1) = 0" using Bc_large by (auto elim!:eventually_mono) then have "\\<^sub>F n in sequentially. (B * nth_prime n - c n * a n + c (n+1)) / a n = 0 " apply eventually_elim by auto then have "\\<^sub>F n in sequentially. B * nth_prime n / a n - c n + c (n + 1) / a n = 0" apply eventually_elim using a_pos by (auto simp:divide_simps) (metis less_irrefl) then have "(\n. B * nth_prime n / a n - c n + c (n + 1) / a n) \ 0" by (simp add: eventually_mono tendsto_iff) from tendsto_diff[OF this ca_vanish] show ?thesis by auto qed have c_pos:"\\<^sub>F n in sequentially. c n > 0" proof - from bac_close have *:"\\<^sub>F n in sequentially. c n \ 0" apply (elim tendsto_of_int_diff_0) using a_gt_1 apply (eventually_elim) using \B>0\ by auto show ?thesis proof (rule ccontr) assume "\ (\\<^sub>F n in sequentially. c n > 0)" moreover have "\\<^sub>F n in sequentially. c (Suc n) \ 0 \ c n\0" using * eventually_sequentially_Suc[of "\n. c n\0"] by (metis (mono_tags, lifting) eventually_at_top_linorder le_Suc_eq) ultimately have "\\<^sub>F n in sequentially. c n = 0 \ c (Suc n) \ 0" using eventually_elim2 frequently_def by fastforce moreover have "\\<^sub>F n in sequentially. nth_prime n > 0 \ B * nth_prime n = c n * a n - c (n + 1)" using Bc_large by eventually_elim auto ultimately have "\\<^sub>F n in sequentially. c n = 0 \ c (Suc n) \ 0 \ B * nth_prime n = c n * a n - c (n + 1)" using frequently_eventually_frequently by fastforce from frequently_ex[OF this] obtain n where "c n = 0" "c (Suc n) \ 0" "B * nth_prime n = c n * a n - c (n + 1)" by auto then have "B * nth_prime n \ 0" by auto then show False using \B > 0\ by (simp add: mult_le_0_iff) qed qed have B_nth_prime:"\\<^sub>F n in sequentially. nth_prime n > B" proof - have "\\<^sub>F x in sequentially. B+1 \ nth_prime x" using nth_prime_at_top[unfolded filterlim_at_top_ge[where c="nat B+1"] ,rule_format,of "nat B + 1",simplified] apply (elim eventually_mono) using \B>0\ by auto then show ?thesis apply (elim eventually_mono) by auto qed have bc_epsilon:"\\<^sub>F n in sequentially. nth_prime (n+1) / nth_prime n > (c (n+1) - \) / c n" when "\>0" "\<1" for \::real proof - have "\\<^sub>F x in sequentially. \c (Suc x) / a x\ < \ / 2" using ca_vanish[unfolded tendsto_iff,rule_format, of "\/2"] \\>0\ by auto moreover then have "\\<^sub>F x in sequentially. \c (x+2) / a (x+1)\ < \ / 2" apply (subst (asm) eventually_sequentially_Suc[symmetric]) by simp moreover have "\\<^sub>F n in sequentially. B * nth_prime (n+1) = c (n+1) * a (n+1) - c (n + 2)" using Bc_large apply (subst (asm) eventually_sequentially_Suc[symmetric]) by (auto elim:eventually_mono) moreover have "\\<^sub>F n in sequentially. c n > 0 \ c (n+1) > 0 \ c (n+2) > 0" proof - have "\\<^sub>F n in sequentially. 0 < c (Suc n)" using c_pos by (subst eventually_sequentially_Suc) simp moreover then have "\\<^sub>F n in sequentially. 0 < c (Suc (Suc n))" using c_pos by (subst eventually_sequentially_Suc) simp ultimately show ?thesis using c_pos by eventually_elim auto qed ultimately show ?thesis using Bc_large proof eventually_elim case (elim n) define \\<^sub>0 \\<^sub>1 where "\\<^sub>0 = c (n+1) / a n" and "\\<^sub>1 = c (n+2) / a (n+1)" have "\\<^sub>0 > 0" "\\<^sub>1 > 0" "\\<^sub>0 < \/2" "\\<^sub>1 < \/2" using a_pos elim \mono a\ by (auto simp add: \\<^sub>0_def \\<^sub>1_def abs_of_pos) have "(\ - \\<^sub>1) * c n > 0" apply (rule mult_pos_pos) using \\\<^sub>1 > 0\ \\\<^sub>1 < \/2\ \\>0\ elim by auto moreover have "\\<^sub>0 * (c (n+1) - \) > 0" apply (rule mult_pos_pos[OF \\\<^sub>0 > 0\]) using elim(4) that(2) by linarith ultimately have "(\ - \\<^sub>1) * c n + \\<^sub>0 * (c (n+1) - \) > 0" by auto moreover have "c n - \\<^sub>0 > 0" using \\\<^sub>0 < \ / 2\ elim(4) that(2) by linarith moreover have "c n > 0" by (simp add: elim(4)) ultimately have "(c (n+1) - \) / c n < (c (n+1) - \\<^sub>1) / (c n - \\<^sub>0)" by (auto simp add:field_simps) also have "... \ (c (n+1) - \\<^sub>1) / (c n - \\<^sub>0) * (a (n+1) / a n)" proof - have "(c (n+1) - \\<^sub>1) / (c n - \\<^sub>0) > 0" by (smt \0 < (\ - \\<^sub>1) * real_of_int (c n)\ \0 < real_of_int (c n) - \\<^sub>0\ divide_pos_pos elim(4) mult_le_0_iff of_int_less_1_iff that(2)) moreover have "(a (n+1) / a n) \ 1" using a_pos \mono a\ by (simp add: mono_def) - ultimately show ?thesis by (metis mult_cancel_left1 real_mult_le_cancel_iff2) + ultimately show ?thesis by (metis mult_cancel_left1 mult_le_cancel_iff2) qed also have "... = (B * nth_prime (n+1)) / (B * nth_prime n)" proof - have "B * nth_prime n = c n * a n - c (n + 1)" using elim by auto also have "... = a n * (c n - \\<^sub>0)" using a_pos[rule_format,of n] unfolding \\<^sub>0_def by (auto simp:field_simps) finally have "B * nth_prime n = a n * (c n - \\<^sub>0)" . moreover have "B * nth_prime (n+1) = a (n+1) * (c (n+1) - \\<^sub>1)" unfolding \\<^sub>1_def using a_pos[rule_format,of "n+1"] apply (subst \B * nth_prime (n + 1) = c (n + 1) * a (n + 1) - c (n + 2)\) by (auto simp:field_simps) ultimately show ?thesis by (simp add: mult.commute) qed also have "... = nth_prime (n+1) / nth_prime n" using \B>0\ by auto finally show ?case . qed qed have c_ubound:"\x. \n. c n > x" proof (rule ccontr) assume " \ (\x. \n. x < c n)" then obtain ub where "\n. c n \ ub" "ub > 0" by (meson dual_order.trans int_one_le_iff_zero_less le_cases not_le) define pa where "pa = (\n. nth_prime n / a n)" have pa_pos:"\n. pa n > 0" unfolding pa_def by (simp add: a_pos) have "liminf (\n. 1 / pa n) = 0" using nth_2 unfolding pa_def by auto then have "(\y\<^sub>F x in sequentially. ereal (1 / pa x) \ y)" apply (subst less_Liminf_iff[symmetric]) using \0 < B\ \0 < ub\ by auto then have "\\<^sub>F x in sequentially. 1 / pa x < B/(ub+1)" by (meson frequently_mono le_less_trans less_ereal.simps(1)) then have "\\<^sub>F x in sequentially. B*pa x > (ub+1)" apply (elim frequently_elim1) by (metis \0 < ub\ mult.left_neutral of_int_0_less_iff pa_pos pos_divide_less_eq pos_less_divide_eq times_divide_eq_left zless_add1_eq) moreover have "\\<^sub>F x in sequentially. c x \ ub" using \\n. c n \ ub\ by simp ultimately have "\\<^sub>F x in sequentially. B*pa x - c x > 1" apply (elim frequently_rev_mp eventually_mono) by linarith moreover have "(\n. B * pa n - c n) \0" unfolding pa_def using bac_close by auto from tendstoD[OF this,of 1] have "\\<^sub>F n in sequentially. \B * pa n - c n\ < 1" by auto ultimately have "\\<^sub>F x in sequentially. B*pa x - c x > 1 \ \B * pa x - c x\ < 1" using frequently_eventually_frequently by blast then show False by (simp add: frequently_def) qed have eq_2_11:"\\<^sub>F n in sequentially. c (n+1)>c n \ nth_prime (n+1) > nth_prime n + (1 - \)^2 * a n / B" when "\>0" "\<1" for \::real proof - have "\\<^sub>F x in sequentially. \c (Suc x) / a x\ < \" using ca_vanish[unfolded tendsto_iff,rule_format, of \] \\>0\ by auto moreover have "\\<^sub>F n in sequentially. c n > 0 \ c (n+1) > 0" proof - have "\\<^sub>F n in sequentially. 0 < c (Suc n)" using c_pos by (subst eventually_sequentially_Suc) simp then show ?thesis using c_pos by eventually_elim auto qed ultimately show ?thesis using Bc_large bc_epsilon[OF \\>0\ \\<1\] proof (eventually_elim, rule_tac impI) case (elim n) assume "c n < c (n + 1)" have "c (n+1) / a n < \" using a_pos[rule_format,of n] using elim(1,2) by auto also have "... \ \ * c n" using elim(2) that(1) by auto finally have "c (n+1) / a n < \ * c n" . then have "c (n+1) / c n < \ * a n" using a_pos[rule_format,of n] elim by (auto simp:field_simps) then have "(1 - \) * a n < a n - c (n+1) / c n" by (auto simp:algebra_simps) then have "(1 - \)^2 * a n / B < (1 - \) * (a n - c (n+1) / c n) / B" - apply (subst (asm) real_mult_less_iff1[symmetric, of "(1-\)/B"]) - using \\<1\ \B>0\ by (auto simp:divide_simps power2_eq_square) + apply (subst (asm) mult_less_iff1[symmetric, of "(1-\)/B"]) + using \\<1\ \B>0\ by (auto simp: divide_simps power2_eq_square mult_less_iff1) then have "nth_prime n + (1 - \)^2 * a n / B < nth_prime n + (1 - \) * (a n - c (n+1) / c n) / B" using \B>0\ by auto also have "... = nth_prime n + (1 - \) * ((c n *a n - c (n+1)) / c n) / B" using elim by (auto simp:field_simps) also have "... = nth_prime n + (1 - \) * (nth_prime n / c n)" proof - have "B * nth_prime n = c n * a n - c (n + 1)" using elim by auto from this[symmetric] show ?thesis using \B>0\ by simp qed also have "... = (1+(1-\)/c n) * nth_prime n" by (auto simp:algebra_simps) also have "... = ((c n+1-\)/c n) * nth_prime n" using elim by (auto simp:divide_simps) also have "... \ ((c (n+1) -\)/c n) * nth_prime n" proof - define cp where "cp = c n+1" have "c (n+1) \ cp" unfolding cp_def using \c n < c (n + 1)\ by auto moreover have "c n>0" "nth_prime n>0" using elim by auto ultimately show ?thesis apply (fold cp_def) by (auto simp:divide_simps) qed also have "... < nth_prime (n+1)" using elim by (auto simp:divide_simps) finally show "real (nth_prime n) + (1 - \)\<^sup>2 * real_of_int (a n) / real_of_int B < real (nth_prime (n + 1))" . qed qed have c_neq_large:"\\<^sub>F n in sequentially. c (n+1) \ c n" proof (rule ccontr) assume "\ (\\<^sub>F n in sequentially. c (n + 1) \ c n)" then have that:"\\<^sub>F n in sequentially. c (n + 1) = c n" unfolding frequently_def . have "\\<^sub>F x in sequentially. (B * int (nth_prime x) = c x * a x - c (x + 1) \ \real_of_int (c (x + 1))\ < real_of_int (a x) / 2) \ 0 < c x \ B < int (nth_prime x) \ (c (x+1)>c x \ nth_prime (x+1) > nth_prime x + a x / (2* B))" using Bc_large c_pos B_nth_prime eq_2_11[of "1-1/ sqrt 2",simplified] by eventually_elim (auto simp:divide_simps) then have "\\<^sub>F m in sequentially. nth_prime (m+1) > (1+1/(2*B))*nth_prime m" proof (elim frequently_eventually_at_top[OF that, THEN frequently_at_top_elim]) fix n assume "c (n + 1) = c n \ (\y\n. (B * int (nth_prime y) = c y * a y - c (y + 1) \ \real_of_int (c (y + 1))\ < real_of_int (a y) / 2) \ 0 < c y \ B < int (nth_prime y) \ (c y < c (y + 1) \ real (nth_prime y) + real_of_int (a y) / real_of_int (2 * B) < real (nth_prime (y + 1))))" then have "c (n + 1) = c n" and Bc_eq:"\y\n. B * int (nth_prime y) = c y * a y - c (y + 1) \ 0 < c y \ \real_of_int (c (y + 1))\ < real_of_int (a y) / 2 \ B < int (nth_prime y) \ (c y < c (y + 1) \ real (nth_prime y) + real_of_int (a y) / real_of_int (2 * B) < real (nth_prime (y + 1)))" by auto obtain m where "n c n" "c nN. N > n \ c N > c n" using c_ubound[rule_format, of "MAX x\{..n}. c x"] by (metis Max_ge atMost_iff dual_order.trans finite_atMost finite_imageI image_eqI linorder_not_le order_refl) then obtain N where "N>n" "c N>c n" by auto define A m where "A={m. n (m+1)\N \ c (m+1) > c n}" and "m = Min A" have "finite A" unfolding A_def by (metis (no_types, lifting) A_def add_leE finite_nat_set_iff_bounded_le mem_Collect_eq) moreover have "N-1\A" unfolding A_def using \c n < c N\ \n < N\ \c (n + 1) = c n\ by (smt Suc_diff_Suc Suc_eq_plus1 Suc_leI Suc_pred add.commute add_diff_inverse_nat add_leD1 diff_is_0_eq' mem_Collect_eq nat_add_left_cancel_less zero_less_one) ultimately have "m\A" using Min_in unfolding m_def by auto then have "n0" unfolding m_def A_def by auto moreover have "c m \ c n" proof (rule ccontr) assume " \ c m \ c n" then have "m-1\A" using \m\A\ \c (n + 1) = c n\ unfolding A_def by auto (smt One_nat_def Suc_eq_plus1 Suc_lessI less_diff_conv) from Min_le[OF \finite A\ this,folded m_def] \m>0\ show False by auto qed ultimately show ?thesis using that[of m] by auto qed have "(1 + 1 / (2 * B)) * nth_prime m < nth_prime m + a m / (2*B)" proof - have "nth_prime m < a m" proof - have "B * int (nth_prime m) < c m * (a m - 1)" using Bc_eq[rule_format,of m] \c m \ c n\ \c n < c (m + 1)\ \n < m\ by (auto simp:algebra_simps) also have "... \ c n * (a m - 1)" by (simp add: \c m \ c n\ a_pos mult_right_mono) finally have "B * int (nth_prime m) < c n * (a m - 1)" . moreover have "c n\B" proof - have " B * int (nth_prime n) = c n * (a n - 1)" "B < int (nth_prime n)" and c_a:"\real_of_int (c (n + 1))\ < real_of_int (a n) / 2" using Bc_eq[rule_format,of n] \c (n + 1) = c n\ by (auto simp:algebra_simps) from this(1) have " c n dvd (B * int (nth_prime n))" by simp moreover have "coprime (c n) (int (nth_prime n))" proof - have "c n < int (nth_prime n)" proof (rule ccontr) assume "\ c n < int (nth_prime n)" then have asm:"c n \ int (nth_prime n)" by auto then have "a n > 2 * nth_prime n" using c_a \c (n + 1) = c n\ by auto then have "a n -1 \ 2 * nth_prime n" by simp then have "a n - 1 > 2 * B" using \B < int (nth_prime n)\ by auto from mult_le_less_imp_less[OF asm this] \B>0\ have "int (nth_prime n) * (2 * B) < c n * (a n - 1)" by auto then show False using \B * int (nth_prime n) = c n * (a n - 1)\ by (smt \0 < B\ \B < int (nth_prime n)\ combine_common_factor mult.commute mult_pos_pos) qed then have "\ nth_prime n dvd c n" by (simp add: Bc_eq zdvd_not_zless) then have "coprime (int (nth_prime n)) (c n)" by (auto intro!:prime_imp_coprime_int) then show ?thesis using coprime_commute by blast qed ultimately have "c n dvd B" using coprime_dvd_mult_left_iff by auto then show ?thesis using \0 < B\ zdvd_imp_le by blast qed moreover have "c n > 0 " using Bc_eq by blast ultimately show ?thesis using \B>0\ by (smt a_pos mult_mono) qed then show ?thesis using \B>0\ by (auto simp:field_simps) qed also have "... < nth_prime (m+1)" using Bc_eq[rule_format, of m] \n \c m \ c n\ \c n < c (m+1)\ by linarith finally show "\j>n. (1 + 1 / real_of_int (2 * B)) * real (nth_prime j) < real (nth_prime (j + 1))" using \m>n\ by auto qed then have "\\<^sub>F m in sequentially. nth_prime (m+1)/nth_prime m > (1+1/(2*B))" by (auto elim:frequently_elim1 simp:field_simps) moreover have "\\<^sub>F m in sequentially. nth_prime (m+1)/nth_prime m < (1+1/(2*B))" using ratio_of_consecutive_primes[unfolded tendsto_iff,rule_format,of "1/(2*B)"] \B>0\ unfolding dist_real_def by (auto elim!:eventually_mono simp:algebra_simps) ultimately show False by (simp add: eventually_mono frequently_def) qed have c_gt_half:"\\<^sub>F N in sequentially. card {n\{N..<2*N}. c n > c (n+1)} > N / 2" proof - define h where "h=(\n. (nth_prime (2*n) - nth_prime n) / sqrt (nth_prime n))" have "\\<^sub>F n in sequentially. h n < n / 2" proof - have "\\<^sub>F n in sequentially. h n < n powr (5/6)" using nth_prime_double_sqrt_less[of "1/3"] unfolding h_def by auto moreover have "\\<^sub>F n in sequentially. n powr (5/6) < (n /2)" by real_asymp ultimately show ?thesis by eventually_elim auto qed moreover have "\\<^sub>F n in sequentially. sqrt (nth_prime n) / a n < 1 / (2*B)" using nth_1[THEN tendsto_real_sqrt,unfolded tendsto_iff ,rule_format,of "1/(2*B)"] \B>0\ a_pos by (auto simp:real_sqrt_divide abs_of_pos) ultimately have "\\<^sub>F x in sequentially. c (x+1) \ c x \ sqrt (nth_prime x) / a x < 1 / (2*B) \ h x < x / 2 \ (c (x+1)>c x \ nth_prime (x+1) > nth_prime x + a x / (2* B))" using c_neq_large B_nth_prime eq_2_11[of "1-1/ sqrt 2",simplified] by eventually_elim (auto simp:divide_simps) then show ?thesis proof (elim eventually_at_top_mono) fix N assume "N\1" and N_asm:"\y\N. c (y + 1) \ c y \ sqrt (real (nth_prime y)) / real_of_int (a y) < 1 / real_of_int (2 * B) \ h y < y / 2 \ (c y < c (y + 1) \ real (nth_prime y) + real_of_int (a y) / real_of_int (2 * B) < real (nth_prime (y + 1)))" define S where "S={n \ {N..<2 * N}. c n < c (n + 1)}" define g where "g=(\n. (nth_prime (n+1) - nth_prime n) / sqrt (nth_prime n))" define f where "f=(\n. nth_prime (n+1) - nth_prime n)" have g_gt_1:"g n>1" when "n\N" "c n < c (n + 1)" for n proof - have "nth_prime n + sqrt (nth_prime n) < nth_prime (n+1)" proof - have "nth_prime n + sqrt (nth_prime n) < nth_prime n + a n / (2*B)" using N_asm[rule_format,OF \n\N\] a_pos by (auto simp:field_simps) also have "... < nth_prime (n+1)" using N_asm[rule_format,OF \n\N\] \c n < c (n + 1)\ by auto finally show ?thesis . qed then show ?thesis unfolding g_def using \c n < c (n + 1)\ by auto qed have g_geq_0:"g n \ 0" for n unfolding g_def by auto have "finite S" "\x\S. x\N \ c x sum g S" proof (induct S) case empty then show ?case by auto next case (insert x F) moreover have "g x>1" proof - have "c x < c (x+1)" "x\N" using insert(4) by auto then show ?thesis using g_gt_1 by auto qed ultimately show ?case by simp qed also have "... \ sum g {N..<2*N}" apply (rule sum_mono2) unfolding S_def using g_geq_0 by auto also have "... \ sum (\n. f n/sqrt (nth_prime N)) {N..<2*N}" apply (rule sum_mono) unfolding f_def g_def by (auto intro!:divide_left_mono) also have "... = sum f {N..<2*N} / sqrt (nth_prime N)" unfolding sum_divide_distrib[symmetric] by auto also have "... = (nth_prime (2*N) - nth_prime N) / sqrt (nth_prime N)" proof - have "sum f {N..<2 * N} = nth_prime (2 * N) - nth_prime N" proof (induct N) case 0 then show ?case by simp next case (Suc N) have ?case if "N=0" proof - have "sum f {Suc N..<2 * Suc N} = sum f {1}" using that by (simp add: numeral_2_eq_2) also have "... = nth_prime 2 - nth_prime 1" unfolding f_def by (simp add:numeral_2_eq_2) also have "... = nth_prime (2 * Suc N) - nth_prime (Suc N)" using that by auto finally show ?thesis . qed moreover have ?case if "N\0" proof - have "sum f {Suc N..<2 * Suc N} = sum f {N..<2 * Suc N} - f N" apply (subst (2) sum.atLeast_Suc_lessThan) using that by auto also have "... = sum f {N..<2 * N}+ f (2*N) + f(2*N+1) - f N" by auto also have "... = nth_prime (2 * Suc N) - nth_prime (Suc N)" using Suc unfolding f_def by auto finally show ?thesis . qed ultimately show ?case by blast qed then show ?thesis by auto qed also have "... = h N" unfolding h_def by auto also have "... < N/2" using N_asm by auto finally have "card S < N/2" . define T where "T={n \ {N..<2 * N}. c n > c (n + 1)}" have "T \ S = {N..<2 * N}" "T \ S = {}" "finite T" unfolding T_def S_def using N_asm by fastforce+ then have "card T + card S = card {N..<2 * N}" using card_Un_disjoint \finite S\ by metis also have "... = N" by simp finally have "card T + card S = N" . with \card S < N/2\ show "card T > N/2" by linarith qed qed text\Inequality (3.5) in the original paper required a slight modification: \ have a_gt_plus:"\\<^sub>F n in sequentially. c n > c (n+1) \a (n+1) > a n + (a n - c(n+1) - 1) / c (n+1)" proof - note a_gt_1[THEN eventually_all_ge_at_top] c_pos[THEN eventually_all_ge_at_top] moreover have "\\<^sub>F n in sequentially. B * int (nth_prime (n+1)) = c (n+1) * a (n+1) - c (n + 2)" using Bc_large apply (subst (asm) eventually_sequentially_Suc[symmetric]) by (auto elim:eventually_mono) moreover have "\\<^sub>F n in sequentially. B * int (nth_prime n) = c n * a n - c (n + 1) \ \c (n + 1)\ < a n / 2" using Bc_large by (auto elim:eventually_mono) ultimately show ?thesis apply (eventually_elim) proof (rule impI) fix n assume "\y\n. 1 < a y" "\y\n. 0 < c y" and Suc_n_eq:"B * int (nth_prime (n + 1)) = c (n + 1) * a (n + 1) - c (n + 2)" and "B * int (nth_prime n) = c n * a n - c (n + 1) \ real_of_int \c (n + 1)\ < real_of_int (a n) / 2" and "c (n + 1) < c n" then have n_eq:"B * int (nth_prime n) = c n * a n - c (n + 1)" and c_less_a: "real_of_int \c (n + 1)\ < real_of_int (a n) / 2" by auto from \\y\n. 1 < a y\ \\y\n. 0 < c y\ have *:"a n>1" "a (n+1) > 1" "c n > 0" "c (n+1) > 0" "c (n+2) > 0" by auto then have "(1+1/c (n+1))* (a n - 1)/a (n+1) = (c (n+1)+1) * ((a n - 1) / (c (n+1) * a (n+1)))" by (auto simp:field_simps) also have "... \ c n * ((a n - 1) / (c (n+1) * a (n+1)))" apply (rule mult_right_mono) subgoal using \c (n + 1) < c n\ by auto subgoal by (smt \0 < c (n + 1)\ a_pos divide_nonneg_pos mult_pos_pos of_int_0_le_iff of_int_0_less_iff) done also have "... = (c n * (a n - 1)) / (c (n+1) * a (n+1))" by auto also have "... < (c n * (a n - 1)) / (c (n+1) * a (n+1) - c (n+2))" apply (rule divide_strict_left_mono) subgoal using \c (n+2) > 0\ by auto unfolding Suc_n_eq[symmetric] using * \B>0\ by auto also have "... < (c n * a n - c (n+1)) / (c (n+1) * a (n+1) - c (n+2))" apply (rule frac_less) unfolding Suc_n_eq[symmetric] using * \B>0\ \c (n + 1) < c n\ by (auto simp:algebra_simps) also have "... = nth_prime n / nth_prime (n+1)" unfolding Suc_n_eq[symmetric] n_eq[symmetric] using \B>0\ by auto also have "... < 1" by auto finally have "(1 + 1 / real_of_int (c (n + 1))) * real_of_int (a n - 1) / real_of_int (a (n + 1)) < 1 " . then show "a n + (a n - c (n + 1) - 1) / (c (n + 1)) < (a (n + 1))" using * by (auto simp:field_simps) qed qed have a_gt_1:"\\<^sub>F n in sequentially. c n > c (n+1) \ a (n+1) > a n + 1" using Bc_large a_gt_plus c_pos[THEN eventually_all_ge_at_top] apply eventually_elim proof (rule impI) fix n assume "c (n + 1) < c n \ a n + (a n - c (n + 1) - 1) / c (n + 1) < a (n + 1)" "c (n + 1) < c n" and B_eq:"B * int (nth_prime n) = c n * a n - c (n + 1) \ \real_of_int (c (n + 1))\ < real_of_int (a n) / 2" and c_pos:"\y\n. 0 < c y" from this(1,2) have "a n + (a n - c (n + 1) - 1) / c (n + 1) < a (n + 1)" by auto moreover have "a n - 2 * c (n+1) > 0" using B_eq c_pos[rule_format,of "n+1"] by auto then have "a n - 2 * c (n+1) \ 1" by simp then have "(a n - c (n + 1) - 1) / c (n + 1) \ 1" using c_pos[rule_format,of "n+1"] by (auto simp:field_simps) ultimately show "a n + 1 < a (n + 1)" by auto qed text\The following corresponds to inequality (3.6) in the paper, which had to be slightly corrected: \ have a_gt_sqrt:"\\<^sub>F n in sequentially. c n > c (n+1) \ a (n+1) > a n + (sqrt n - 2)" proof - have a_2N:"\\<^sub>F N in sequentially. a (2*N) \ N /2 +1" using c_gt_half a_gt_1[THEN eventually_all_ge_at_top] proof eventually_elim case (elim N) define S where "S={n \ {N..<2 * N}. c (n + 1) < c n}" define f where "f = (\n. a (Suc n) - a n)" have f_1:"\x\S. f x\1" and f_0:"\x. f x\0" subgoal using elim unfolding S_def f_def by auto subgoal using \mono a\[THEN incseq_SucD] unfolding f_def by auto done have "N / 2 < card S" using elim unfolding S_def by auto also have "... \ sum f S" unfolding of_int_sum apply (rule sum_bounded_below[of _ 1,simplified]) using f_1 by auto also have "... \ sum f {N..<2 * N}" unfolding of_int_sum apply (rule sum_mono2) unfolding S_def using f_0 by auto also have "... = a (2*N) - a N" unfolding of_int_sum f_def of_int_diff apply (rule sum_Suc_diff') by auto finally have "N / 2 < a (2*N) - a N" . then show ?case using a_pos[rule_format,of N] by linarith qed have a_n4:"\\<^sub>F n in sequentially. a n > n/4" proof - obtain N where a_N:"\n\N. a (2*n) \ n /2+1" using a_2N unfolding eventually_at_top_linorder by auto have "a n>n/4" when "n\2*N" for n proof - define n' where "n'=n div 2" have "n'\N" unfolding n'_def using that by auto have "n/4 < n' /2+1" unfolding n'_def by auto also have "... \ a (2*n')" using a_N \n'\N\ by auto also have "... \a n" unfolding n'_def apply (cases "even n") subgoal by simp subgoal by (simp add: assms(2) incseqD) done finally show ?thesis . qed then show ?thesis unfolding eventually_at_top_linorder by auto qed have c_sqrt:"\\<^sub>F n in sequentially. c n < sqrt n / 4" proof - have "\\<^sub>F x in sequentially. x>1" by simp moreover have "\\<^sub>F x in sequentially. real (nth_prime x) / (real x * ln (real x)) < 2" using nth_prime_asymptotics[unfolded asymp_equiv_def,THEN order_tendstoD(2),of 2] by simp ultimately have "\\<^sub>F n in sequentially. c n < B*8 *ln n + 1" using a_n4 Bc_large proof eventually_elim case (elim n) from this(4) have "c n=(B*nth_prime n+c (n+1))/a n" using a_pos[rule_format,of n] by (auto simp:divide_simps) also have "... = (B*nth_prime n)/a n+c (n+1)/a n" by (auto simp:divide_simps) also have "... < (B*nth_prime n)/a n + 1" proof - have "c (n+1)/a n < 1" using elim(4) by auto then show ?thesis by auto qed also have "... < B*8 * ln n + 1" proof - have "B*nth_prime n < 2*B*n*ln n" using \real (nth_prime n) / (real n * ln (real n)) < 2\ \B>0\ \ 1 < n\ by (auto simp:divide_simps) moreover have "real n / 4 < real_of_int (a n)" by fact ultimately have "(B*nth_prime n) / a n < (2*B*n*ln n) / (n/4)" apply (rule_tac frac_less) using \B>0\ \ 1 < n\ by auto also have "... = B*8 * ln n" using \ 1 < n\ by auto finally show ?thesis by auto qed finally show ?case . qed moreover have "\\<^sub>F n in sequentially. B*8 *ln n + 1 < sqrt n / 4" by real_asymp ultimately show ?thesis by eventually_elim auto qed have "\\<^sub>F n in sequentially. 0 < c (n+1)" "\\<^sub>F n in sequentially. c (n+1) < sqrt (n+1) / 4" "\\<^sub>F n in sequentially. n > 4" "\\<^sub>F n in sequentially. (n - 4) / sqrt (n + 1) + 1 > sqrt n" subgoal using c_pos[THEN eventually_all_ge_at_top] by eventually_elim auto subgoal using c_sqrt[THEN eventually_all_ge_at_top] by eventually_elim (use le_add1 in blast) subgoal by simp subgoal by real_asymp done then show ?thesis using a_gt_plus a_n4 apply eventually_elim proof (rule impI) fix n assume asm:"0 < c (n + 1)" "c (n + 1) < sqrt (real (n + 1)) / 4" and a_ineq:"c (n + 1) < c n \ a n + (a n - c (n + 1) - 1) / c (n + 1) < a (n + 1)" "c (n + 1) < c n" and "n / 4 < a n" "n > 4" and n_neq:" sqrt (real n) < real (n - 4) / sqrt (real (n + 1)) + 1" have "(n-4) / sqrt(n+1) = (n/4 - 1)/ (sqrt (real (n + 1)) / 4)" using \n>4\ by (auto simp:divide_simps) also have "... < (a n - 1) / c (n + 1)" apply (rule frac_less) using \n > 4\ \n / 4 < a n\ \0 < c (n + 1)\ \c (n + 1) < sqrt (real (n + 1)) / 4\ by auto also have "... - 1 = (a n - c (n + 1) - 1) / c (n + 1)" using \0 < c (n + 1)\ by (auto simp:field_simps) also have "a n + ... < a (n+1)" using a_ineq by auto finally have "a n + ((n - 4) / sqrt (n + 1) - 1) < a (n + 1)" by simp moreover have "(n - 4) / sqrt (n + 1) - 1 > sqrt n - 2" using n_neq[THEN diff_strict_right_mono,of 2] \n>4\ by (auto simp:algebra_simps of_nat_diff) ultimately show "real_of_int (a n) + (sqrt (real n) - 2) < real_of_int (a (n + 1))" by argo qed qed text\The following corresponds to inequality $ a_{2N} > N^{3/2}/2$ in the paper, which had to be slightly corrected: \ have a_2N_sqrt:"\\<^sub>F N in sequentially. a (2*N) > real N * (sqrt (real N)/2 - 1)" using c_gt_half a_gt_sqrt[THEN eventually_all_ge_at_top] eventually_gt_at_top[of 4] proof eventually_elim case (elim N) define S where "S={n \ {N..<2 * N}. c (n + 1) < c n}" define f where "f = (\n. a (Suc n) - a n)" have f_N:"\x\S. f x\sqrt N - 2" proof fix x assume "x\S" then have "sqrt (real x) - 2 < f x" "x\N" using elim unfolding S_def f_def by auto moreover have "sqrt x - 2 \ sqrt N - 2" using \x\N\ by simp ultimately show "sqrt (real N) - 2 \ real_of_int (f x)" by argo qed have f_0:"\x. f x\0" using \mono a\[THEN incseq_SucD] unfolding f_def by auto have "(N / 2) * (sqrt N - 2) < card S * (sqrt N - 2)" apply (rule mult_strict_right_mono) subgoal using elim unfolding S_def by auto subgoal using \N>4\ by (metis diff_gt_0_iff_gt numeral_less_real_of_nat_iff real_sqrt_four real_sqrt_less_iff) done also have "... \ sum f S" unfolding of_int_sum apply (rule sum_bounded_below) using f_N by auto also have "... \ sum f {N..<2 * N}" unfolding of_int_sum apply (rule sum_mono2) unfolding S_def using f_0 by auto also have "... = a (2*N) - a N" unfolding of_int_sum f_def of_int_diff apply (rule sum_Suc_diff') by auto finally have "real N / 2 * (sqrt (real N) - 2) < real_of_int (a (2 * N) - a N)" . then have "real N / 2 * (sqrt (real N) - 2) < a (2 * N)" using a_pos[rule_format,of N] by linarith then show ?case by (auto simp:field_simps) qed text\The following part is required to derive the final contradiction of the proof.\ have a_n_sqrt:"\\<^sub>F n in sequentially. a n > (((n-1)/2) powr (3/2) - (n-1)) /2" proof (rule sequentially_even_odd_imp) define f where "f=(\N. ((real (2 * N - 1) / 2) powr (3 / 2) - real (2 * N - 1)) / 2)" define g where "g=(\N. real N * (sqrt (real N) / 2 - 1))" have "\\<^sub>F N in sequentially. g N > f N" unfolding f_def g_def by real_asymp moreover have "\\<^sub>F N in sequentially. a (2 * N) > g N" unfolding g_def using a_2N_sqrt . ultimately show "\\<^sub>F N in sequentially. f N < a (2 * N)" by eventually_elim auto next define f where "f=(\N. ((real (2 * N + 1 - 1) / 2) powr (3 / 2) - real (2 * N + 1 - 1)) / 2)" define g where "g=(\N. real N * (sqrt (real N) / 2 - 1))" have "\\<^sub>F N in sequentially. g N = f N" using eventually_gt_at_top[of 0] apply eventually_elim unfolding f_def g_def by (auto simp:algebra_simps powr_half_sqrt[symmetric] powr_mult_base) moreover have "\\<^sub>F N in sequentially. a (2 * N) > g N" unfolding g_def using a_2N_sqrt . moreover have "\\<^sub>F N in sequentially. a (2 * N + 1) \ a (2*N)" apply (rule eventuallyI) using \mono a\ by (simp add: incseqD) ultimately show "\\<^sub>F N in sequentially. f N < (a (2 * N + 1))" apply eventually_elim by auto qed have a_nth_prime_gt:"\\<^sub>F n in sequentially. a n / nth_prime n > 1" proof - define f where "f=(\n::nat. (((n-1)/2) powr (3/2) - (n-1)) /2)" have "\\<^sub>F x in sequentially. real (nth_prime x) / (real x * ln (real x)) < 2" using nth_prime_asymptotics[unfolded asymp_equiv_def,THEN order_tendstoD(2),of 2] by simp from this[] eventually_gt_at_top[of 1] have "\\<^sub>F n in sequentially. real (nth_prime n) < 2*(real n * ln n)" apply eventually_elim by (auto simp:field_simps) moreover have *:"\\<^sub>F N in sequentially. f N >0 " unfolding f_def by real_asymp moreover have " \\<^sub>F n in sequentially. f n < a n" using a_n_sqrt unfolding f_def . ultimately have "\\<^sub>F n in sequentially. a n / nth_prime n > f n / (2*(real n * ln n))" apply eventually_elim apply (rule frac_less2) by auto moreover have "\\<^sub>F n in sequentially. (f n)/ (2*(real n * ln n)) > 1" unfolding f_def by real_asymp ultimately show ?thesis by eventually_elim argo qed have a_nth_prime_lt:"\\<^sub>F n in sequentially. a n / nth_prime n < 1" proof - have "liminf (\x. a x / nth_prime x) < 1" using nth_2 by auto from this[unfolded less_Liminf_iff] show ?thesis apply (auto elim!:frequently_elim1) by (meson divide_less_eq_1 ereal_less_eq(7) leD leI nth_prime_nonzero of_nat_eq_0_iff of_nat_less_0_iff order.trans) qed from a_nth_prime_gt a_nth_prime_lt show False by (simp add: eventually_mono frequently_def) qed section\Acknowledgements\ text\A.K.-A. and W.L. were supported by the ERC Advanced Grant ALEXANDRIA (Project 742178) funded by the European Research Council and led by Professor Lawrence Paulson at the University of Cambridge, UK.\ end \ No newline at end of file diff --git a/thys/LLL_Basis_Reduction/LLL.thy b/thys/LLL_Basis_Reduction/LLL.thy --- a/thys/LLL_Basis_Reduction/LLL.thy +++ b/thys/LLL_Basis_Reduction/LLL.thy @@ -1,1745 +1,1745 @@ (* Authors: Jose Divasón Maximilian Haslbeck Sebastiaan Joosten René Thiemann Akihisa Yamada License: BSD *) section \The LLL Algorithm\ text \Soundness of the LLL algorithm is proven in four steps. In the basic version, we do recompute the Gram-Schmidt ortogonal (GSO) basis in every step. This basic version will have a full functional soundness proof, i.e., termination and the property that the returned basis is reduced. Then in LLL-Number-Bounds we will strengthen the invariant and prove that all intermediate numbers stay polynomial in size. Moreover, in LLL-Impl we will refine the basic version, so that the GSO does not need to be recomputed in every step. Finally, in LLL-Complexity, we develop an cost-annotated version of the refined algorithm and prove a polynomial upper bound on the number of arithmetic operations.\ text \This theory provides a basic implementation and a soundness proof of the LLL algorithm to compute a "short" vector in a lattice.\ theory LLL imports Gram_Schmidt_2 Missing_Lemmas Jordan_Normal_Form.Determinant "Abstract-Rewriting.SN_Order_Carrier" begin subsection \Core Definitions, Invariants, and Theorems for Basic Version\ (* Note/TODO by Max Haslbeck: Up to here I refactored the code in Gram_Schmidt_2 and Gram_Schmidt_Int which now makes heavy use of locales. In the future I would also like to do this here (instead of using LLL_invariant everywhere). *) locale LLL = fixes n :: nat (* n-dimensional vectors, *) and m :: nat (* number of vectors *) and fs_init :: "int vec list" (* initial basis *) and \ :: rat (* approximation factor *) begin sublocale vec_module "TYPE(int)" n. abbreviation RAT where "RAT \ map (map_vec rat_of_int)" abbreviation SRAT where "SRAT xs \ set (RAT xs)" abbreviation Rn where "Rn \ carrier_vec n :: rat vec set" sublocale gs: gram_schmidt_fs n "RAT fs_init" . abbreviation lin_indep where "lin_indep fs \ gs.lin_indpt_list (RAT fs)" abbreviation gso where "gso fs \ gram_schmidt_fs.gso n (RAT fs)" abbreviation \ where "\ fs \ gram_schmidt_fs.\ n (RAT fs)" abbreviation reduced where "reduced fs \ gram_schmidt_fs.reduced n (RAT fs) \" abbreviation weakly_reduced where "weakly_reduced fs \ gram_schmidt_fs.weakly_reduced n (RAT fs) \" text \lattice of initial basis\ definition "L = lattice_of fs_init" text \maximum squared norm of initial basis\ definition "N = max_list (map (nat \ sq_norm) fs_init)" text \maximum absolute value in initial basis\ definition "M = Max ({abs (fs_init ! i $ j) | i j. i < m \ j < n} \ {0})" text \This is the core invariant which enables to prove functional correctness.\ definition "\_small fs i = (\ j < i. abs (\ fs i j) \ 1/2)" definition LLL_invariant_weak :: "int vec list \ bool" where "LLL_invariant_weak fs = ( gs.lin_indpt_list (RAT fs) \ lattice_of fs = L \ length fs = m)" lemma LLL_inv_wD: assumes "LLL_invariant_weak fs" shows "lin_indep fs" "length (RAT fs) = m" "set fs \ carrier_vec n" "\ i. i < m \ fs ! i \ carrier_vec n" "\ i. i < m \ gso fs i \ carrier_vec n" "length fs = m" "lattice_of fs = L" proof (atomize (full), goal_cases) case 1 interpret gs': gram_schmidt_fs_lin_indpt n "RAT fs" by (standard) (use assms LLL_invariant_weak_def gs.lin_indpt_list_def in auto) show ?case using assms gs'.fs_carrier gs'.f_carrier gs'.gso_carrier by (auto simp add: LLL_invariant_weak_def gram_schmidt_fs.reduced_def) qed lemma LLL_inv_wI: assumes "set fs \ carrier_vec n" "length fs = m" "lattice_of fs = L" "lin_indep fs" shows "LLL_invariant_weak fs" unfolding LLL_invariant_weak_def Let_def using assms by auto definition LLL_invariant :: "bool \ nat \ int vec list \ bool" where "LLL_invariant upw i fs = ( gs.lin_indpt_list (RAT fs) \ lattice_of fs = L \ reduced fs i \ i \ m \ length fs = m \ (upw \ \_small fs i) )" lemma LLL_inv_imp_w: "LLL_invariant upw i fs \ LLL_invariant_weak fs" unfolding LLL_invariant_def LLL_invariant_weak_def by blast lemma LLL_invD: assumes "LLL_invariant upw i fs" shows "lin_indep fs" "length (RAT fs) = m" "set fs \ carrier_vec n" "\ i. i < m \ fs ! i \ carrier_vec n" "\ i. i < m \ gso fs i \ carrier_vec n" "length fs = m" "lattice_of fs = L" "weakly_reduced fs i" "i \ m" "reduced fs i" "upw \ \_small fs i" proof (atomize (full), goal_cases) case 1 interpret gs': gram_schmidt_fs_lin_indpt n "RAT fs" by (standard) (use assms LLL_invariant_def gs.lin_indpt_list_def in auto) show ?case using assms gs'.fs_carrier gs'.f_carrier gs'.gso_carrier by (auto simp add: LLL_invariant_def gram_schmidt_fs.reduced_def) qed lemma LLL_invI: assumes "set fs \ carrier_vec n" "length fs = m" "lattice_of fs = L" "i \ m" "lin_indep fs" "reduced fs i" "upw \ \_small fs i" shows "LLL_invariant upw i fs" unfolding LLL_invariant_def Let_def split using assms by auto end locale fs_int' = fixes n m fs_init fs assumes LLL_inv: "LLL.LLL_invariant_weak n m fs_init fs" sublocale fs_int' \ fs_int_indpt using LLL_inv unfolding LLL.LLL_invariant_weak_def by (unfold_locales) blast context LLL begin lemma gso_cong: assumes "\ i. i \ x \ f1 ! i = f2 ! i" "x < length f1" "x < length f2" shows "gso f1 x = gso f2 x" by (rule gs.gso_cong, insert assms, auto) lemma \_cong: assumes "\ k. j < i \ k \ j \ f1 ! k = f2 ! k" and i: "i < length f1" "i < length f2" and "j < i \ f1 ! i = f2 ! i" shows "\ f1 i j = \ f2 i j" by (rule gs.\_cong, insert assms, auto) definition reduction where "reduction = (4+\)/(4*\)" definition d :: "int vec list \ nat \ int" where "d fs k = gs.Gramian_determinant fs k" definition D :: "int vec list \ nat" where "D fs = nat (\ i < m. d fs i)" definition "d\ gs i j = int_of_rat (of_int (d gs (Suc j)) * \ gs i j)" definition logD :: "int vec list \ nat" where "logD fs = (if \ = 4/3 then (D fs) else nat (floor (log (1 / of_rat reduction) (D fs))))" definition LLL_measure :: "nat \ int vec list \ nat" where "LLL_measure i fs = (2 * logD fs + m - i)" context fixes fs assumes Linv: "LLL_invariant_weak fs" begin interpretation fs: fs_int' n m fs_init fs by (standard) (use Linv in auto) lemma Gramian_determinant: assumes k: "k \ m" shows "of_int (gs.Gramian_determinant fs k) = (\ j 0" (is ?g2) using assms fs.Gramian_determinant LLL_inv_wD[OF Linv] by auto lemma LLL_d_pos [intro]: assumes k: "k \ m" shows "d fs k > 0" unfolding d_def using fs.Gramian_determinant k LLL_inv_wD[OF Linv] by auto lemma LLL_d_Suc: assumes k: "k < m" shows "of_int (d fs (Suc k)) = sq_norm (gso fs k) * of_int (d fs k)" using assms fs.fs_int_d_Suc LLL_inv_wD[OF Linv] unfolding fs.d_def d_def by auto lemma LLL_D_pos: shows "D fs > 0" using fs.fs_int_D_pos LLL_inv_wD[OF Linv] unfolding D_def fs.D_def fs.d_def d_def by auto end text \Condition when we can increase the value of $i$\ lemma increase_i: assumes Linv: "LLL_invariant upw i fs" assumes i: "i < m" and upw: "upw \ i = 0" and red_i: "i \ 0 \ sq_norm (gso fs (i - 1)) \ \ * sq_norm (gso fs i)" shows "LLL_invariant True (Suc i) fs" "LLL_measure i fs > LLL_measure (Suc i) fs" proof - note inv = LLL_invD[OF Linv] from inv(8,10) have red: "weakly_reduced fs i" and sred: "reduced fs i" by (auto) from red red_i i have red: "weakly_reduced fs (Suc i)" unfolding gram_schmidt_fs.weakly_reduced_def by (intro allI impI, rename_tac ii, case_tac "Suc ii = i", auto) from inv(11) upw have sred_i: "\ j. j < i \ \\ fs i j\ \ 1 / 2" unfolding \_small_def by auto from sred sred_i have sred: "reduced fs (Suc i)" unfolding gram_schmidt_fs.reduced_def by (intro conjI[OF red] allI impI, rename_tac ii j, case_tac "ii = i", auto) show "LLL_invariant True (Suc i) fs" by (intro LLL_invI, insert inv red sred i, auto) show "LLL_measure i fs > LLL_measure (Suc i) fs" unfolding LLL_measure_def using i by auto qed text \Standard addition step which makes $\mu_{i,j}$ small\ definition "\_small_row i fs j = (\ j'. j \ j' \ j' < i \ abs (\ fs i j') \ inverse 2)" lemma basis_reduction_add_row_main: assumes Linv: "LLL_invariant_weak fs" and i: "i < m" and j: "j < i" and fs': "fs' = fs[ i := fs ! i - c \\<^sub>v fs ! j]" shows "LLL_invariant_weak fs'" "LLL_invariant True i fs \ LLL_invariant True i fs'" "c = round (\ fs i j) \ \_small_row i fs (Suc j) \ \_small_row i fs' j" (* mu-value at position i j gets small *) "c = round (\ fs i j) \ abs (\ fs' i j) \ 1/2" (* mu-value at position i j gets small *) "LLL_measure i fs' = LLL_measure i fs" (* new values of gso: no change *) "\ i. i < m \ gso fs' i = gso fs i" (* new values of mu *) "\ i' j'. i' < m \ j' < m \ \ fs' i' j' = (if i' = i \ j' \ j then \ fs i j' - of_int c * \ fs j j' else \ fs i' j')" (* new values of d *) "\ ii. ii \ m \ d fs' ii = d fs ii" proof - define bnd :: rat where bnd: "bnd = 4 ^ (m - 1 - Suc j) * of_nat (N ^ (m - 1) * m)" define M where "M = map (\i. map (\ fs i) [0.. i" "j < m" and jstrict: "j < i" and add: "set fs \ carrier_vec n" "i < length fs" "j < length fs" "i \ j" and len: "length fs = m" and indep: "lin_indep fs" using inv j i by auto let ?R = rat_of_int let ?RV = "map_vec ?R" from inv i j have Fij: "fs ! i \ carrier_vec n" "fs ! j \ carrier_vec n" by auto let ?x = "fs ! i - c \\<^sub>v fs ! j" let ?g = "gso fs" let ?g' = "gso fs'" let ?mu = "\ fs" let ?mu' = "\ fs'" from inv j i have Fi:"\ i. i < length (RAT fs) \ (RAT fs) ! i \ carrier_vec n" and gs_carr: "?g j \ carrier_vec n" "?g i \ carrier_vec n" "\ i. i < j \ ?g i \ carrier_vec n" "\ j. j < i \ ?g j \ carrier_vec n" and len': "length (RAT fs) = m" and add':"set (map ?RV fs) \ carrier_vec n" by auto have RAT_F1: "RAT fs' = (RAT fs)[i := (RAT fs) ! i - ?R c \\<^sub>v (RAT fs) ! j]" unfolding fs' proof (rule nth_equalityI[rule_format], goal_cases) case (2 k) show ?case proof (cases "k = i") case False thus ?thesis using 2 by auto next case True hence "?thesis = (?RV (fs ! i - c \\<^sub>v fs ! j) = ?RV (fs ! i) - ?R c \\<^sub>v ?RV (fs ! j))" using 2 add by auto also have "\" by (rule eq_vecI, insert Fij, auto) finally show ?thesis by simp qed qed auto hence RAT_F1_i:"RAT fs' ! i = (RAT fs) ! i - ?R c \\<^sub>v (RAT fs) ! j" (is "_ = _ - ?mui") using i len by auto have uminus: "fs ! i - c \\<^sub>v fs ! j = fs ! i + -c \\<^sub>v fs ! j" by (subst minus_add_uminus_vec, insert Fij, auto) have "lattice_of fs' = lattice_of fs" unfolding fs' uminus by (rule lattice_of_add[OF add, of _ "- c"], auto) with inv have lattice: "lattice_of fs' = L" by auto from add len have "k < length fs \ \ k \ i \ fs' ! k \ carrier_vec n" for k unfolding fs' by (metis (no_types, lifting) nth_list_update nth_mem subset_eq carrier_dim_vec index_minus_vec(2) index_smult_vec(2)) hence "k < length fs \ fs' ! k \ carrier_vec n" for k unfolding fs' using add len by (cases "k \ i",auto) with len have F1: "set fs' \ carrier_vec n" "length fs' = m" unfolding fs' by (auto simp: set_conv_nth) hence F1': "length (RAT fs') = m" "SRAT fs' \ Rn" by auto from indep have dist: "distinct (RAT fs)" by (auto simp: gs.lin_indpt_list_def) have Fij': "(RAT fs) ! i \ Rn" "(RAT fs) ! j \ Rn" using add'[unfolded set_conv_nth] i \j < m\ len by auto have uminus': "(RAT fs) ! i - ?R c \\<^sub>v (RAT fs) ! j = (RAT fs) ! i + - ?R c \\<^sub>v (RAT fs) ! j" by (subst minus_add_uminus_vec[where n = n], insert Fij', auto) have span_F_F1: "gs.span (SRAT fs) = gs.span (SRAT fs')" unfolding RAT_F1 uminus' by (rule gs.add_vec_span, insert len add, auto) have **: "?RV (fs ! i) + - ?R c \\<^sub>v (RAT fs) ! j = ?RV (fs ! i - c \\<^sub>v fs ! j)" by (rule eq_vecI, insert Fij len i j, auto) from i j len have "j < length (RAT fs)" "i < length (RAT fs)" "i \ j" by auto from gs.lin_indpt_list_add_vec[OF this indep, of "- of_int c"] have "gs.lin_indpt_list ((RAT fs) [i := (RAT fs) ! i + - ?R c \\<^sub>v (RAT fs) ! j])" (is "gs.lin_indpt_list ?F1") . also have "?F1 = RAT fs'" unfolding fs' using i len Fij' ** by (auto simp: map_update) finally have indep_F1: "lin_indep fs'" . have conn1: "set (RAT fs) \ carrier_vec n" "length (RAT fs) = m" "distinct (RAT fs)" "gs.lin_indpt (set (RAT fs))" using inv unfolding gs.lin_indpt_list_def by auto have conn2: "set (RAT fs') \ carrier_vec n" "length (RAT fs') = m" "distinct (RAT fs')" "gs.lin_indpt (set (RAT fs'))" using indep_F1 F1' unfolding gs.lin_indpt_list_def by auto interpret gs1: gram_schmidt_fs_lin_indpt n "RAT fs" by (standard) (use inv gs.lin_indpt_list_def in auto) interpret gs2: gram_schmidt_fs_lin_indpt n "RAT fs'" by (standard) (use indep_F1 F1' gs.lin_indpt_list_def in auto) let ?G = "map ?g [0 ..< m]" let ?G' = "map ?g' [0 ..< m]" from gs1.span_gso gs2.span_gso gs1.gso_carrier gs2.gso_carrier conn1 conn2 span_F_F1 len have span_G_G1: "gs.span (set ?G) = gs.span (set ?G')" and lenG: "length ?G = m" and Gi: "i < length ?G \ ?G ! i \ Rn" and G1i: "i < length ?G' \ ?G' ! i \ Rn" for i by auto have eq: "x \ i \ RAT fs' ! x = (RAT fs) ! x" for x unfolding RAT_F1 by auto hence eq_part: "x < i \ ?g' x = ?g x" for x by (intro gs.gso_cong, insert len, auto) have G: "i < m \ (RAT fs) ! i \ Rn" "i < m \ fs ! i \ carrier_vec n" for i by(insert add len', auto) note carr1[intro] = this[OF i] this[OF ji(2)] have "x < m \ ?g x \ Rn" "x < m \ ?g' x \ Rn" "x < m \ dim_vec (gso fs x) = n" "x < m \ dim_vec (gso fs' x) = n" for x using inv G1i by (auto simp:o_def Gi G1i) hence carr2[intro!]:"?g i \ Rn" "?g' i \ Rn" "?g ` {0.. Rn" "?g ` {0.. Rn" using i by auto have F1_RV: "?RV (fs' ! i) = RAT fs' ! i" using i F1 by auto have F_RV: "?RV (fs ! i) = (RAT fs) ! i" using i len by auto from eq_part have span_G1_G: "gs.span (?g' ` {0.. ?rs" using gs2.oc_projection_exist[of i] conn2 i unfolding span_G1_G by auto from \j < i\ have Gj_mem: "(RAT fs) ! j \ (\ x. ((RAT fs) ! x)) ` {0 ..< i}" by auto have id1: "set (take i (RAT fs)) = (\x. ?RV (fs ! x)) ` {0..i < m\ len by (subst nth_image[symmetric], force+) have "(RAT fs) ! j \ ?rs \ (RAT fs) ! j \ gs.span ((\x. ?RV (fs ! x)) ` {0..i < m\ id1 inv by auto also have "(\x. ?RV (fs ! x)) ` {0..x. ((RAT fs) ! x)) ` {0..i < m\ len by force also have "(RAT fs) ! j \ gs.span \" by (rule gs.span_mem[OF _ Gj_mem], insert \i < m\ G, auto) finally have "(RAT fs) ! j \ ?rs" . hence in2:"?mui \ ?rs" apply(intro gs.prod_in_span) by force+ have ineq:"((RAT fs) ! i - ?g' i) + ?mui - ?mui = ((RAT fs) ! i - ?g' i)" using carr1 carr2 by (intro eq_vecI, auto) have cong': "A = B \ A \ C \ B \ C" for A B :: "'a vec" and C by auto have *: "?g ` {0.. Rn" by auto have in_span: "(RAT fs) ! i - ?g' i \ ?rs" by (rule cong'[OF eq_vecI gs.span_add1[OF * in1 in2,unfolded ineq]], insert carr1 carr2, auto) { fix x assume x:"x < i" hence "x < m" "i \ x" using i by auto from gs2.orthogonal this inv assms have "?g' i \ ?g' x = 0" by auto } hence G1_G: "?g' i = ?g i" by (intro gs1.oc_projection_unique) (use inv i eq_part in_span in auto) show eq_fs:"x < m \ ?g' x = ?g x" for x proof(induct x rule:nat_less_induct[rule_format]) case (1 x) hence ind: "m < x \ ?g' m = ?g m" for m by auto { assume "x > i" hence ?case unfolding gs2.gso.simps[of x] gs1.gso.simps[of x] unfolding gs1.\.simps gs2.\.simps using ind eq by (auto intro: cong[OF _ cong[OF refl[of "gs.sumlist"]]]) } note eq_rest = this show ?case by (rule linorder_class.linorder_cases[of x i],insert G1_G eq_part eq_rest,auto) qed hence Hs:"?G' = ?G" by (auto simp:o_def) have red: "weakly_reduced fs i \ weakly_reduced fs' i" using eq_fs \i < m\ unfolding gram_schmidt_fs.weakly_reduced_def by simp let ?Mi = "M ! i ! j" have Gjn: "dim_vec (fs ! j) = n" using Fij(2) carrier_vecD by blast define E where "E = addrow_mat m (- ?R c) i j" define M' where "M' = gs1.M m" define N' where "N' = gs2.M m" have E: "E \ carrier_mat m m" unfolding E_def by simp have M: "M' \ carrier_mat m m" unfolding gs1.M_def M'_def by auto have N: "N' \ carrier_mat m m" unfolding gs2.M_def N'_def by auto let ?mat = "mat_of_rows n" let ?GsM = "?mat ?G" have Gs: "?GsM \ carrier_mat m n" by auto hence GsT: "?GsM\<^sup>T \ carrier_mat n m" by auto have Gnn: "?mat (RAT fs) \ carrier_mat m n" unfolding mat_of_rows_def using len by auto have "?mat (RAT fs') = addrow (- ?R c) i j (?mat (RAT fs))" unfolding RAT_F1 by (rule eq_matI, insert Gjn ji(2), auto simp: len mat_of_rows_def) also have "\ = E * ?mat (RAT fs)" unfolding E_def by (rule addrow_mat, insert j i, auto simp: mat_of_rows_def len) finally have HEG: "?mat (RAT fs') = E * ?mat (RAT fs)" . (* lemma 16.12(i), part 1 *) have "(E * M') * ?mat ?G = E * (M' * ?mat ?G)" by (rule assoc_mult_mat[OF E M Gs]) also have "M' * ?GsM = ?mat (RAT fs)" using gs1.matrix_equality conn1 M'_def by simp also have "E * \ = ?mat (RAT fs')" unfolding HEG .. also have "\ = N' * ?mat ?G'" using gs2.matrix_equality conn2 unfolding N'_def by simp also have "?mat ?G' = ?GsM" unfolding Hs .. finally have "(E * M') * ?GsM = N' * ?GsM" . from arg_cong[OF this, of "\ x. x * ?GsM\<^sup>T"] E M N have EMN: "(E * M') * (?GsM * ?GsM\<^sup>T) = N' * (?GsM * ?GsM\<^sup>T)" by (subst (1 2) assoc_mult_mat[OF _ Gs GsT, of _ m, symmetric], auto) have "det (?GsM * ?GsM\<^sup>T) = gs.Gramian_determinant ?G m" unfolding gs.Gramian_determinant_def by (subst gs.Gramian_matrix_alt_def, auto simp: Let_def) also have "\ > 0" proof - have 1: "gs.lin_indpt_list ?G" using conn1 gs1.orthogonal_gso gs1.gso_carrier by (intro gs.orthogonal_imp_lin_indpt_list) (auto) interpret G: gram_schmidt_fs_lin_indpt n ?G by (standard) (use 1 gs.lin_indpt_list_def in auto) show ?thesis by (intro G.Gramian_determinant) auto qed finally have "det (?GsM * ?GsM\<^sup>T) \ 0" by simp from vec_space.det_nonzero_congruence[OF EMN this _ _ N] Gs E M have EMN: "E * M' = N'" by auto (* lemma 16.12(i), part 2 *) { fix i' j' assume ij: "i' < m" "j' < m" and choice: "i' \ i \ j < j'" have "?mu' i' j' = N' $$ (i',j')" using ij F1 unfolding N'_def gs2.M_def by auto also have "\ = addrow (- ?R c) i j M' $$ (i',j')" unfolding EMN[symmetric] E_def by (subst addrow_mat[OF M], insert ji, auto) also have "\ = (if i = i' then - ?R c * M' $$ (j, j') + M' $$ (i', j') else M' $$ (i', j'))" by (rule index_mat_addrow, insert ij M, auto) also have "\ = M' $$ (i', j')" proof (cases "i = i'") case True with choice have jj: "j < j'" by auto have "M' $$ (j, j') = ?mu j j'" using ij ji len unfolding M'_def gs1.M_def by auto also have "\ = 0" unfolding gs1.\.simps using jj by auto finally show ?thesis using True by auto qed auto also have "\ = ?mu i' j'" using ij len unfolding M'_def gs1.M_def by auto also note calculation } note mu_no_change = this { fix j' assume jj': "j' \ j" with j i have j': "j' < m" by auto have "?mu' i j' = N' $$ (i,j')" using jj' j i F1 unfolding N'_def gs2.M_def by auto also have "\ = addrow (- ?R c) i j M' $$ (i,j')" unfolding EMN[symmetric] E_def by (subst addrow_mat[OF M], insert ji, auto) also have "\ = - ?R c * M' $$ (j, j') + M' $$ (i, j')" by (rule index_mat_addrow, insert j' i M, auto) also have "\ = M' $$ (i, j') - ?R c * M' $$ (j, j')" by simp also have "M' $$ (i, j') = ?mu i j'" using i j' len unfolding M'_def gs1.M_def by auto also have "M' $$ (j, j') = ?mu j j'" using i j j' len unfolding M'_def gs1.M_def by auto finally have "?mu' i j' = ?mu i j' - ?R c * ?mu j j'" by auto } note mu_change = this show mu_update: "i' < m \ j' < m \ ?mu' i' j' = (if i' = i \ j' \ j then ?mu i j' - ?R c * ?mu j j' else ?mu i' j')" for i' j' using mu_change[of j'] mu_no_change[of i' j'] by auto { assume "LLL_invariant True i fs" from LLL_invD[OF this] have "weakly_reduced fs i" and sred: "reduced fs i" by auto from red[OF this(1)] have red: "weakly_reduced fs' i" . have sred: "reduced fs' i" unfolding gram_schmidt_fs.reduced_def proof (intro conjI[OF red] impI allI, goal_cases) case (1 i' j) with mu_no_change[of i' j] sred[unfolded gram_schmidt_fs.reduced_def, THEN conjunct2, rule_format, of i' j] i show ?case by auto qed show "LLL_invariant True i fs'" by (intro LLL_invI[OF F1 lattice \i \ m\ indep_F1 sred], auto) } show Linv': "LLL_invariant_weak fs'" by (intro LLL_inv_wI[OF F1 lattice indep_F1]) have mudiff:"?mu i j - of_int c = ?mu' i j" by (subst mu_change, auto simp: gs1.\.simps) have lin_indpt_list_fs: "gs.lin_indpt_list (RAT fs')" unfolding gs.lin_indpt_list_def using conn2 by auto { assume c: "c = round (\ fs i j)" have small: "abs (?mu i j - of_int c) \ inverse 2" unfolding j c using of_int_round_abs_le by (auto simp add: abs_minus_commute) from this[unfolded mudiff] show mu'_2: "abs (?mu' i j) \ 1 / 2" by simp assume mu_small: "\_small_row i fs (Suc j)" show "\_small_row i fs' j" unfolding \_small_row_def proof (intro allI, goal_cases) case (1 j') show ?case using mu'_2 mu_small[unfolded \_small_row_def, rule_format, of j'] by (cases "j' > j", insert mu_update[of i j'] i, auto) qed } { fix i assume i: "i \ m" have "rat_of_int (d fs' i) = of_int (d fs i)" unfolding d_def Gramian_determinant(1)[OF Linv i] Gramian_determinant(1)[OF Linv' i] by (rule prod.cong[OF refl], subst eq_fs, insert i, auto) thus "d fs' i = d fs i" by simp } note d = this have D: "D fs' = D fs" unfolding D_def by (rule arg_cong[of _ _ nat], rule prod.cong[OF refl], auto simp: d) show "LLL_measure i fs' = LLL_measure i fs" unfolding LLL_measure_def logD_def D .. qed text \Addition step which can be skipped since $\mu$-value is already small\ lemma basis_reduction_add_row_main_0: assumes Linv: "LLL_invariant_weak fs" and i: "i < m" and j: "j < i" and 0: "round (\ fs i j) = 0" and mu_small: "\_small_row i fs (Suc j)" shows "\_small_row i fs j" (is ?g1) proof - note inv = LLL_inv_wD[OF Linv] from inv(5)[OF i] inv(5)[of j] i j have id: "fs[i := fs ! i - 0 \\<^sub>v fs ! j] = fs" by (intro nth_equalityI, insert inv i, auto) show ?g1 using basis_reduction_add_row_main[OF Linv i j _, of fs] 0 id mu_small by auto qed lemma \_small_row_refl: "\_small_row i fs i" unfolding \_small_row_def by auto lemma basis_reduction_add_row_done: assumes Linv: "LLL_invariant True i fs" and i: "i < m" and mu_small: "\_small_row i fs 0" shows "LLL_invariant False i fs" proof - note inv = LLL_invD[OF Linv] from mu_small have mu_small: "\_small fs i" unfolding \_small_row_def \_small_def by auto show ?thesis using i mu_small by (intro LLL_invI[OF inv(3,6,7,9,1,10)], auto) qed (* lemma 16.16 (ii), one case *) lemma d_swap_unchanged: assumes len: "length F1 = m" and i0: "i \ 0" and i: "i < m" and ki: "k \ i" and km: "k \ m" and swap: "F2 = F1[i := F1 ! (i - 1), i - 1 := F1 ! i]" shows "d F1 k = d F2 k" proof - let ?F1_M = "mat k n (\(i, y). F1 ! i $ y)" let ?F2_M = "mat k n (\(i, y). F2 ! i $ y)" have "\ P. P \ carrier_mat k k \ det P \ {-1, 1} \ ?F2_M = P * ?F1_M" proof cases assume ki: "k < i" hence H: "?F2_M = ?F1_M" unfolding swap by (intro eq_matI, auto) let ?P = "1\<^sub>m k" have "?P \ carrier_mat k k" "det ?P \ {-1, 1}" "?F2_M = ?P * ?F1_M" unfolding H by auto thus ?thesis by blast next assume "\ k < i" with ki have ki: "k > i" by auto let ?P = "swaprows_mat k i (i - 1)" from i0 ki have neq: "i \ i - 1" and kmi: "i - 1 < k" by auto have *: "?P \ carrier_mat k k" "det ?P \ {-1, 1}" using det_swaprows_mat[OF ki kmi neq] ki by auto from i len have iH: "i < length F1" "i - 1 < length F1" by auto have "?P * ?F1_M = swaprows i (i - 1) ?F1_M" by (subst swaprows_mat[OF _ ki kmi], auto) also have "\ = ?F2_M" unfolding swap by (intro eq_matI, rename_tac ii jj, case_tac "ii = i", (insert iH, simp add: nth_list_update)[1], case_tac "ii = i - 1", insert iH neq ki, auto simp: nth_list_update) finally show ?thesis using * by metis qed then obtain P where P: "P \ carrier_mat k k" and detP: "det P \ {-1, 1}" and H': "?F2_M = P * ?F1_M" by auto have "d F2 k = det (gs.Gramian_matrix F2 k)" unfolding d_def gs.Gramian_determinant_def by simp also have "\ = det (?F2_M * ?F2_M\<^sup>T)" unfolding gs.Gramian_matrix_def Let_def by simp also have "?F2_M * ?F2_M\<^sup>T = ?F2_M * (?F1_M\<^sup>T * P\<^sup>T)" unfolding H' by (subst transpose_mult[OF P], auto) also have "\ = P * (?F1_M * (?F1_M\<^sup>T * P\<^sup>T))" unfolding H' by (subst assoc_mult_mat[OF P], auto) also have "det \ = det P * det (?F1_M * (?F1_M\<^sup>T * P\<^sup>T))" by (rule det_mult[OF P], insert P, auto) also have "?F1_M * (?F1_M\<^sup>T * P\<^sup>T) = (?F1_M * ?F1_M\<^sup>T) * P\<^sup>T" by (subst assoc_mult_mat, insert P, auto) also have "det \ = det (?F1_M * ?F1_M\<^sup>T) * det P" by (subst det_mult, insert P, auto simp: det_transpose) also have "det (?F1_M * ?F1_M\<^sup>T) = det (gs.Gramian_matrix F1 k)" unfolding gs.Gramian_matrix_def Let_def by simp also have "\ = d F1 k" unfolding d_def gs.Gramian_determinant_def by simp finally have "d F2 k = (det P * det P) * d F1 k" by simp also have "det P * det P = 1" using detP by auto finally show "d F1 k = d F2 k" by simp qed definition base where "base = real_of_rat ((4 * \) / (4 + \))" definition g_bound :: "int vec list \ bool" where "g_bound fs = (\ i < m. sq_norm (gso fs i) \ of_nat N)" end locale LLL_with_assms = LLL + assumes \: "\ \ 4/3" and lin_dep: "lin_indep fs_init" and len: "length fs_init = m" begin lemma \0: "\ > 0" "\ \ 0" using \ by auto lemma fs_init: "set fs_init \ carrier_vec n" using lin_dep[unfolded gs.lin_indpt_list_def] by auto lemma reduction: "0 < reduction" "reduction \ 1" "\ > 4/3 \ reduction < 1" "\ = 4/3 \ reduction = 1" using \ unfolding reduction_def by auto lemma base: "\ > 4/3 \ base > 1" using reduction(1,3) unfolding reduction_def base_def by auto lemma basis_reduction_swap_main: assumes Linvw: "LLL_invariant_weak fs" and small: "LLL_invariant False i fs \ abs (\ fs i (i - 1)) \ 1/2" and i: "i < m" and i0: "i \ 0" and norm_ineq: "sq_norm (gso fs (i - 1)) > \ * sq_norm (gso fs i)" and fs'_def: "fs' = fs[i := fs ! (i - 1), i - 1 := fs ! i]" shows "LLL_invariant_weak fs'" and "LLL_invariant False i fs \ LLL_invariant False (i - 1) fs'" and "LLL_measure i fs > LLL_measure (i - 1) fs'" (* new values of gso *) and "\ k. k < m \ gso fs' k = (if k = i - 1 then gso fs i + \ fs i (i - 1) \\<^sub>v gso fs (i - 1) else if k = i then gso fs (i - 1) - (RAT fs ! (i - 1) \ gso fs' (i - 1) / sq_norm (gso fs' (i - 1))) \\<^sub>v gso fs' (i - 1) else gso fs k)" (is "\ k. _ \ _ = ?newg k") (* new values of norms of gso *) and "\ k. k < m \ sq_norm (gso fs' k) = (if k = i - 1 then sq_norm (gso fs i) + (\ fs i (i - 1) * \ fs i (i - 1)) * sq_norm (gso fs (i - 1)) else if k = i then sq_norm (gso fs i) * sq_norm (gso fs (i - 1)) / sq_norm (gso fs' (i - 1)) else sq_norm (gso fs k))" (is "\ k. _ \ _ = ?new_norm k") (* new values of \-values *) and "\ ii j. ii < m \ j < ii \ \ fs' ii j = ( if ii = i - 1 then \ fs i j else if ii = i then if j = i - 1 then \ fs i (i - 1) * sq_norm (gso fs (i - 1)) / sq_norm (gso fs' (i - 1)) else \ fs (i - 1) j else if ii > i \ j = i then \ fs ii (i - 1) - \ fs i (i - 1) * \ fs ii i else if ii > i \ j = i - 1 then \ fs ii (i - 1) * \ fs' i (i - 1) + \ fs ii i * sq_norm (gso fs i) / sq_norm (gso fs' (i - 1)) else \ fs ii j)" (is "\ ii j. _ \ _ \ _ = ?new_mu ii j") (* new d-values *) and "\ ii. ii \ m \ of_int (d fs' ii) = (if ii = i then sq_norm (gso fs' (i - 1)) / sq_norm (gso fs (i - 1)) * of_int (d fs i) else of_int (d fs ii))" proof - note inv = LLL_inv_wD[OF Linvw] interpret fs: fs_int' n m fs_init fs by (standard) (use Linvw in auto) let ?mu1 = "\ fs" let ?mu2 = "\ fs'" let ?g1 = "gso fs" let ?g2 = "gso fs'" have m12: "\?mu1 i (i - 1)\ \ inverse 2" using small proof assume "LLL_invariant False i fs" from LLL_invD(11)[OF this] i0 show ?thesis unfolding \_small_def by auto qed auto note d = d_def note Gd = Gramian_determinant(1) note Gd12 = Gd[OF Linvw] let ?x = "?g1 (i - 1)" let ?y = "?g1 i" let ?cond = "\ * sq_norm ?y < sq_norm ?x" from inv have len: "length fs = m" and HC: "set fs \ carrier_vec n" and L: "lattice_of fs = L" using i by auto from i0 inv i have swap: "set fs \ carrier_vec n" "i < length fs" "i - 1 < length fs" "i \ i - 1" unfolding Let_def by auto have RAT_fs': "RAT fs' = (RAT fs)[i := (RAT fs) ! (i - 1), i - 1 := (RAT fs) ! i]" unfolding fs'_def using swap by (intro nth_equalityI, auto simp: nth_list_update) have span': "gs.span (SRAT fs) = gs.span (SRAT fs')" unfolding fs'_def by (rule arg_cong[of _ _ gs.span], insert swap, auto) have lfs': "lattice_of fs' = lattice_of fs" unfolding fs'_def by (rule lattice_of_swap[OF swap refl]) with inv have lattice: "lattice_of fs' = L" by auto have len': "length fs' = m" using inv unfolding fs'_def by auto have fs': "set fs' \ carrier_vec n" using swap unfolding fs'_def set_conv_nth by (auto, rename_tac k, case_tac "k = i", force, case_tac "k = i - 1", auto) let ?rv = "map_vec rat_of_int" from inv(1) have indepH: "lin_indep fs" . from i i0 len have "i < length (RAT fs)" "i - 1 < length (RAT fs)" by auto with distinct_swap[OF this] len have "distinct (RAT fs') = distinct (RAT fs)" unfolding RAT_fs' by (auto simp: map_update) with len' fs' span' indepH have indepH': "lin_indep fs'" unfolding fs'_def using i i0 by (auto simp: gs.lin_indpt_list_def) have lenR': "length (RAT fs') = m" using len' by auto have conn1: "set (RAT fs) \ carrier_vec n" "length (RAT fs) = m" "distinct (RAT fs)" "gs.lin_indpt (set (RAT fs))" using inv unfolding gs.lin_indpt_list_def by auto have conn2: "set (RAT fs') \ carrier_vec n" "length (RAT fs') = m" "distinct (RAT fs')" "gs.lin_indpt (set (RAT fs'))" using indepH' lenR' unfolding gs.lin_indpt_list_def by auto interpret gs2: gram_schmidt_fs_lin_indpt n "RAT fs'" by (standard) (use indepH' lenR' gs.lin_indpt_list_def in auto) have fs'_fs: "k < i - 1 \ fs' ! k = fs ! k" for k unfolding fs'_def by auto { fix k assume ki: "k < i - 1" with i have kn: "k < m" by simp have "?g2 k = ?g1 k" by (rule gs.gso_cong, insert ki kn len, auto simp: fs'_def) } note G2_G = this have take_eq: "take (Suc i - 1 - 1) fs' = take (Suc i - 1 - 1) fs" by (intro nth_equalityI, insert len len' i swap(2-), auto intro!: fs'_fs) have i1n: "i - 1 < m" using i by auto let ?R = rat_of_int let ?RV = "map_vec ?R" let ?f1 = "\ i. RAT fs ! i" let ?f2 = "\ i. RAT fs' ! i" let ?n1 = "\ i. sq_norm (?g1 i)" let ?n2 = "\ i. sq_norm (?g2 i)" have heq:"fs ! (i - 1) = fs' ! i" "take (i-1) fs = take (i-1) fs'" "?f2 (i - 1) = ?f1 i" "?f2 i = ?f1 (i - 1)" unfolding fs'_def using i len i0 by auto have norm_pos2: "j < m \ ?n2 j > 0" for j using gs2.sq_norm_pos len' by simp have norm_pos1: "j < m \ ?n1 j > 0" for j using fs.gs.sq_norm_pos inv by simp have norm_zero2: "j < m \ ?n2 j \ 0" for j using norm_pos2[of j] by linarith have norm_zero1: "j < m \ ?n1 j \ 0" for j using norm_pos1[of j] by linarith have gs: "\ j. j < m \ ?g1 j \ Rn" using inv by blast have gs2: "\ j. j < m \ ?g2 j \ Rn" using fs.gs.gso_carrier conn2 by auto have g: "\ j. j < m \ ?f1 j \ Rn" using inv by auto have g2: "\ j. j < m \ ?f2 j \ Rn" using gs2.f_carrier conn2 by blast let ?fs1 = "?f1 ` {0..< (i - 1)}" have G: "?fs1 \ Rn" using g i by auto let ?gs1 = "?g1 ` {0..< (i - 1)}" have G': "?gs1 \ Rn" using gs i by auto let ?S = "gs.span ?fs1" let ?S' = "gs.span ?gs1" have S'S: "?S' = ?S" by (rule fs.gs.partial_span', insert conn1 i, auto) have "gs.is_oc_projection (?g2 (i - 1)) (gs.span (?g2 ` {0..< (i - 1)})) (?f2 (i - 1))" using i len' by (intro gs2.gso_oc_projection_span(2)) auto also have "?f2 (i - 1) = ?f1 i" unfolding fs'_def using len i by auto also have "gs.span (?g2 ` {0 ..< (i - 1)}) = gs.span (?f2 ` {0 ..< (i - 1)})" using i len' by (intro gs2.partial_span') auto also have "?f2 ` {0 ..< (i - 1)} = ?fs1" by (rule image_cong[OF refl], insert len i, auto simp: fs'_def) finally have claim1: "gs.is_oc_projection (?g2 (i - 1)) ?S (?f1 i)" . have list_id: "[0..j. ?mu1 i j \\<^sub>v ?g1 j) [0 ..< i]) + ?g1 i" (is "_ = ?sum + _") apply(subst fs.gs.fi_is_sum_of_mu_gso, insert len i, force) unfolding map_append list_id by (subst gs.M.sumlist_snoc, insert i gs conn1, auto simp: fs.gs.\.simps) have f1im1_sum: "?f1 (i - 1) = gs.sumlist (map (\j. ?mu1 (i - 1) j \\<^sub>v ?g1 j) [0...simps) have sum: "?sum \ Rn" by (rule gs.sumlist_carrier, insert gs i, auto) have sum1: "?sum1 \ Rn" by (rule gs.sumlist_carrier, insert gs i, auto) from gs.span_closed[OF G] have S: "?S \ Rn" by auto from gs i have gs': "\ j. j < i - 1 \ ?g1 j \ Rn" and gsi: "?g1 (i - 1) \ Rn" by auto have "[0 ..< i] = [0 ..< Suc (i - 1)]" using i0 by simp also have "\ = [0 ..< i - 1] @ [i - 1]" by simp finally have list: "[0 ..< i] = [0 ..< i - 1] @ [i - 1]" . { (* d does not change for k \ i *) fix k assume kn: "k \ m" and ki: "k \ i" from d_swap_unchanged[OF len i0 i ki kn fs'_def] have "d fs k = d fs' k" by simp } note d = this (* new value of g (i-1) *) have g2_im1: "?g2 (i - 1) = ?g1 i + ?mu1 i (i - 1) \\<^sub>v ?g1 (i - 1)" (is "_ = _ + ?mu_f1") proof (rule gs.is_oc_projection_eq[OF claim1 _ S g[OF i]]) show "gs.is_oc_projection (?g1 i + ?mu_f1) ?S (?f1 i)" unfolding gs.is_oc_projection_def proof (intro conjI allI impI) let ?sum' = "gs.sumlist (map (\j. ?mu1 i j \\<^sub>v ?g1 j) [0 ..< i - 1])" have sum': "?sum' \ Rn" by (rule gs.sumlist_carrier, insert gs i, auto) show inRn: "(?g1 i + ?mu_f1) \ Rn" using gs[OF i] gsi i by auto have carr: "?sum \ Rn" "?g1 i \ Rn" "?mu_f1 \ Rn" "?sum' \ Rn" using sum' sum gs[OF i] gsi i by auto have "?f1 i - (?g1 i + ?mu_f1) = (?sum + ?g1 i) - (?g1 i + ?mu_f1)" unfolding f1i_sum by simp also have "\ = ?sum - ?mu_f1" using carr by auto also have "?sum = gs.sumlist (map (\j. ?mu1 i j \\<^sub>v ?g1 j) [0 ..< i - 1] @ [?mu_f1])" unfolding list by simp also have "\ = ?sum' + ?mu_f1" by (subst gs.sumlist_append, insert gs' gsi, auto) also have "\ - ?mu_f1 = ?sum'" using sum' gsi by auto finally have id: "?f1 i - (?g1 i + ?mu_f1) = ?sum'" . show "?f1 i - (?g1 i + ?mu_f1) \ gs.span ?S" unfolding id gs.span_span[OF G] proof (rule gs.sumlist_in_span[OF G]) fix v assume "v \ set (map (\j. ?mu1 i j \\<^sub>v ?g1 j) [0 ..< i - 1])" then obtain j where j: "j < i - 1" and v: "v = ?mu1 i j \\<^sub>v ?g1 j" by auto show "v \ ?S" unfolding v by (rule gs.smult_in_span[OF G], unfold S'S[symmetric], rule gs.span_mem, insert gs i j, auto) qed fix x assume "x \ ?S" hence x: "x \ ?S'" using S'S by simp show "(?g1 i + ?mu_f1) \ x = 0" proof (rule gs.orthocompl_span[OF _ G' inRn x]) fix x assume "x \ ?gs1" then obtain j where j: "j < i - 1" and x_id: "x = ?g1 j" by auto from j i x_id gs[of j] have x: "x \ Rn" by auto { fix k assume k: "k > j" "k < m" have "?g1 k \ x = 0" unfolding x_id by (rule fs.gs.orthogonal, insert conn1 k, auto) } from this[of i] this[of "i - 1"] j i have main: "?g1 i \ x = 0" "?g1 (i - 1) \ x = 0" by auto have "(?g1 i + ?mu_f1) \ x = ?g1 i \ x + ?mu_f1 \ x" by (rule add_scalar_prod_distrib[OF gs[OF i] _ x], insert gsi, auto) also have "\ = 0" using main by (subst smult_scalar_prod_distrib[OF gsi x], auto) finally show "(?g1 i + ?mu_f1) \ x = 0" . qed qed qed { (* 16.13 (i): for g, only g_i and g_{i-1} can change *) fix k assume kn: "k < m" and ki: "k \ i" "k \ i - 1" have "?g2 k = gs.oc_projection (gs.span (?g2 ` {0..i") case True hence "k < i - 1" using ki by auto then show ?thesis apply(intro image_cong) unfolding fs'_def using len i by auto next case False have "?f2 ` {0.. = ?f1 ` {0.. = gs.span (?g1 ` {0.. = ?g1 k" by (subst fs.gs.gso_oc_projection_span, insert kn conn1, auto) finally have "?g2 k = ?g1 k" . } note g2_g1_identical = this (* calculation of new mu-values *) { (* no change of mu for lines before line i - 1 *) fix jj ii assume ii: "ii < i - 1" have "?mu2 ii jj = ?mu1 ii jj" using ii i len by (subst gs.\_cong[of _ _ "RAT fs" "RAT fs'"], auto simp: fs'_def) } note mu'_mu_small_i = this { (* swap of mu-values in lines i - 1 and i for j < i - 1 *) fix jj assume jj: "jj < i - 1" hence id1: "jj < i - 1 \ True" "jj < i \ True" by auto have id2: "?g2 jj = ?g1 jj" by (subst g2_g1_identical, insert jj i, auto) have "?mu2 i jj = ?mu1 (i - 1) jj" "?mu2 (i - 1) jj = ?mu1 i jj" unfolding gs2.\.simps fs.gs.\.simps id1 id2 if_True using len i i0 by (auto simp: fs'_def) } note mu'_mu_i_im1_j = this have im1: "i - 1 < m" using i by auto (* calculation of new value of g_i *) let ?g2_im1 = "?g2 (i - 1)" have g2_im1_Rn: "?g2_im1 \ Rn" using i conn2 by (auto intro!: fs.gs.gso_carrier) { let ?mu2_f2 = "\ j. - ?mu2 i j \\<^sub>v ?g2 j" let ?sum = "gs.sumlist (map (\j. - ?mu1 (i - 1) j \\<^sub>v ?g1 j) [0 ..< i - 1])" have mhs: "?mu2_f2 (i - 1) \ Rn" using i conn2 by (auto intro!: fs.gs.gso_carrier) have sum': "?sum \ Rn" by (rule gs.sumlist_carrier, insert gs i, auto) have gim1: "?f1 (i - 1) \ Rn" using g i by auto have "?g2 i = ?f2 i + gs.sumlist (map ?mu2_f2 [0 ..< i-1] @ [?mu2_f2 (i-1)])" unfolding gs2.gso.simps[of i] list by simp also have "?f2 i = ?f1 (i - 1)" unfolding fs'_def using len i i0 by auto also have "map ?mu2_f2 [0 ..< i-1] = map (\j. - ?mu1 (i - 1) j \\<^sub>v ?g1 j) [0 ..< i - 1]" by (rule map_cong[OF refl], subst g2_g1_identical, insert i, auto simp: mu'_mu_i_im1_j) also have "gs.sumlist (\ @ [?mu2_f2 (i - 1)]) = ?sum + ?mu2_f2 (i - 1)" by (subst gs.sumlist_append, insert gs i mhs, auto) also have "?f1 (i - 1) + \ = (?f1 (i - 1) + ?sum) + ?mu2_f2 (i - 1)" using gim1 sum' mhs by auto also have "?f1 (i - 1) + ?sum = ?g1 (i - 1)" unfolding fs.gs.gso.simps[of "i - 1"] by simp also have "?mu2_f2 (i - 1) = - (?f2 i \ ?g2_im1 / sq_norm ?g2_im1) \\<^sub>v ?g2_im1" unfolding gs2.\.simps using i0 by simp also have "\ = - ((?f2 i \ ?g2_im1 / sq_norm ?g2_im1) \\<^sub>v ?g2_im1)" by auto also have "?g1 (i - 1) + \ = ?g1 (i - 1) - ((?f2 i \ ?g2_im1 / sq_norm ?g2_im1) \\<^sub>v ?g2_im1)" by (rule sym, rule minus_add_uminus_vec[of _ n], insert gsi g2_im1_Rn, auto) also have "?f2 i = ?f1 (i - 1)" by fact finally have "?g2 i = ?g1 (i - 1) - (?f1 (i - 1) \ ?g2 (i - 1) / sq_norm (?g2 (i - 1))) \\<^sub>v ?g2 (i - 1)" . } note g2_i = this let ?n1 = "\ i. sq_norm (?g1 i)" let ?n2 = "\ i. sq_norm (?g2 i)" (* calculation of new norms *) { (* norm of g (i - 1) *) have "?n2 (i - 1) = sq_norm (?g1 i + ?mu_f1)" unfolding g2_im1 by simp also have "\ = (?g1 i + ?mu_f1) \ (?g1 i + ?mu_f1)" by (simp add: sq_norm_vec_as_cscalar_prod) also have "\ = (?g1 i + ?mu_f1) \ ?g1 i + (?g1 i + ?mu_f1) \ ?mu_f1" by (rule scalar_prod_add_distrib, insert gs i, auto) also have "(?g1 i + ?mu_f1) \ ?g1 i = ?g1 i \ ?g1 i + ?mu_f1 \ ?g1 i" by (rule add_scalar_prod_distrib, insert gs i, auto) also have "(?g1 i + ?mu_f1) \ ?mu_f1 = ?g1 i \ ?mu_f1 + ?mu_f1 \ ?mu_f1" by (rule add_scalar_prod_distrib, insert gs i, auto) also have "?mu_f1 \ ?g1 i = ?g1 i \ ?mu_f1" by (rule comm_scalar_prod, insert gs i, auto) also have "?g1 i \ ?g1 i = sq_norm (?g1 i)" by (simp add: sq_norm_vec_as_cscalar_prod) also have "?g1 i \ ?mu_f1 = ?mu1 i (i - 1) * (?g1 i \ ?g1 (i - 1))" by (rule scalar_prod_smult_right, insert gs[OF i] gs[OF \i - 1 < m\], auto) also have "?g1 i \ ?g1 (i - 1) = 0" using orthogonalD[OF fs.gs.orthogonal_gso, of i "i - 1"] i len i0 by (auto simp: o_def) also have "?mu_f1 \ ?mu_f1 = ?mu1 i (i - 1) * (?mu_f1 \ ?g1 (i - 1))" by (rule scalar_prod_smult_right, insert gs[OF i] gs[OF \i - 1 < m\], auto) also have "?mu_f1 \ ?g1 (i - 1) = ?mu1 i (i - 1) * (?g1 (i - 1) \ ?g1 (i - 1))" by (rule scalar_prod_smult_left, insert gs[OF i] gs[OF \i - 1 < m\], auto) also have "?g1 (i - 1) \ ?g1 (i - 1) = sq_norm (?g1 (i - 1))" by (simp add: sq_norm_vec_as_cscalar_prod) finally have "?n2 (i - 1) = ?n1 i + (?mu1 i (i - 1) * ?mu1 i (i - 1)) * ?n1 (i - 1)" by (simp add: ac_simps o_def) } note sq_norm_g2_im1 = this from norm_pos1[OF i] norm_pos1[OF im1] norm_pos2[OF i] norm_pos2[OF im1] have norm0: "?n1 i \ 0" "?n1 (i - 1) \ 0" "?n2 i \ 0" "?n2 (i - 1) \ 0" by auto hence norm0': "?n2 (i - 1) \ 0" using i by auto { (* new norm of g i *) have si: "Suc i \ m" and im1: "i - 1 \ m" using i by auto have det1: "gs.Gramian_determinant (RAT fs) (Suc i) = (\jfs.gs.gso j\\<^sup>2)" using fs.gs.Gramian_determinant si len by auto have det2: "gs.Gramian_determinant (RAT fs') (Suc i) = (\jgs2.gso j\\<^sup>2)" using gs2.Gramian_determinant si len' by auto from norm_zero1[OF less_le_trans[OF _ im1]] have 0: "(\j < i-1. ?n1 j) \ 0" by (subst prod_zero_iff, auto) have "rat_of_int (d fs' (Suc i)) = rat_of_int (d fs (Suc i))" using d_swap_unchanged[OF len i0 i _ si fs'_def] by auto also have "rat_of_int (d fs' (Suc i)) = gs.Gramian_determinant (RAT fs') (Suc i)" unfolding d_def by (subst fs.of_int_Gramian_determinant[symmetric], insert conn2 i g fs', auto simp: set_conv_nth) also have "\ = (\j = (\jj\ ?set. ?n2 j) = ?n2 i * ?n2 (i - 1) * (\j < i-1. ?n2 j)" using i0 by (subst prod.insert; (subst prod.insert)?; auto) also have "(\j\ ?set. ?n1 j) = ?n1 i * ?n1 (i - 1) * (\j < i-1. ?n1 j)" using i0 by (subst prod.insert; (subst prod.insert)?; auto) also have "(\j < i-1. ?n2 j) = (\j < i-1. ?n1 j)" by (rule prod.cong, insert G2_G, auto) finally have "?n2 i = ?n1 i * ?n1 (i - 1) / ?n2 (i - 1)" using 0 norm0' by (auto simp: field_simps) } note sq_norm_g2_i = this (* mu values in rows > i do not change with j \ {i, i - 1} *) { fix ii j assume ii: "ii > i" "ii < m" and ji: "j \ i" "j \ i - 1" { assume j: "j < ii" have "?mu2 ii j = (?f2 ii \ ?g2 j) / sq_norm (?g2 j)" unfolding gs2.\.simps using j by auto also have "?f2 ii = ?f1 ii" using ii len unfolding fs'_def by auto also have "?g2 j = ?g1 j" using g2_g1_identical[of j] j ii ji by auto finally have "?mu2 ii j = ?mu1 ii j" unfolding fs.gs.\.simps using j by auto } hence "?mu2 ii j = ?mu1 ii j" by (cases "j < ii", auto simp: gs2.\.simps fs.gs.\.simps) } note mu_no_change_large_row = this { (* the new value of mu i (i - 1) *) have "?mu2 i (i - 1) = (?f2 i \ ?g2 (i - 1)) / ?n2 (i - 1)" unfolding gs2.\.simps using i0 by auto also have "?f2 i \ ?g2 (i - 1) = ?f1 (i - 1) \ ?g2 (i - 1)" using len i i0 unfolding fs'_def by auto also have "\ = ?f1 (i - 1) \ (?g1 i + ?mu1 i (i - 1) \\<^sub>v ?g1 (i - 1))" unfolding g2_im1 by simp also have "\ = ?f1 (i - 1) \ ?g1 i + ?f1 (i - 1) \ (?mu1 i (i - 1) \\<^sub>v ?g1 (i - 1))" by (rule scalar_prod_add_distrib[of _ n], insert i gs g, auto) also have "?f1 (i - 1) \ ?g1 i = 0" by (subst fs.gs.fi_scalar_prod_gso, insert conn1 im1 i i0, auto simp: fs.gs.\.simps fs.gs.\.simps) also have "?f1 (i - 1) \ (?mu1 i (i - 1) \\<^sub>v ?g1 (i - 1)) = ?mu1 i (i - 1) * (?f1 (i - 1) \ ?g1 (i - 1))" by (rule scalar_prod_smult_distrib, insert gs g i, auto) also have "?f1 (i - 1) \ ?g1 (i - 1) = ?n1 (i - 1)" by (subst fs.gs.fi_scalar_prod_gso, insert conn1 im1, auto simp: fs.gs.\.simps) finally have "?mu2 i (i - 1) = ?mu1 i (i - 1) * ?n1 (i - 1) / ?n2 (i - 1)" by (simp add: sq_norm_vec_as_cscalar_prod) } note mu'_mu_i_im1 = this { (* the new values of mu ii (i - 1) for ii > i *) fix ii assume iii: "ii > i" and ii: "ii < m" hence iii1: "i - 1 < ii" by auto have "?mu2 ii (i - 1) = (?f2 ii \ ?g2 (i - 1)) / ?n2 (i - 1)" unfolding gs2.\.simps using i0 iii1 by auto also have "?f2 ii \ ?g2 (i-1) = ?f1 ii \ ?g2 (i - 1)" using len i i0 iii ii unfolding fs'_def by auto also have "\ = ?f1 ii \ (?g1 i + ?mu1 i (i - 1) \\<^sub>v ?g1 (i - 1))" unfolding g2_im1 by simp also have "\ = ?f1 ii \ ?g1 i + ?f1 ii \ (?mu1 i (i - 1) \\<^sub>v ?g1 (i - 1))" by (rule scalar_prod_add_distrib[of _ n], insert i ii gs g, auto) also have "?f1 ii \ ?g1 i = ?mu1 ii i * ?n1 i" by (rule fs.gs.fi_scalar_prod_gso, insert conn1 ii i, auto) also have "?f1 ii \ (?mu1 i (i - 1) \\<^sub>v ?g1 (i - 1)) = ?mu1 i (i - 1) * (?f1 ii \ ?g1 (i - 1))" by (rule scalar_prod_smult_distrib, insert gs g i ii, auto) also have "?f1 ii \ ?g1 (i - 1) = ?mu1 ii (i - 1) * ?n1 (i - 1)" by (rule fs.gs.fi_scalar_prod_gso, insert conn1 ii im1, auto) finally have "?mu2 ii (i - 1) = ?mu1 ii (i - 1) * ?mu2 i (i - 1) + ?mu1 ii i * ?n1 i / ?n2 (i - 1)" unfolding mu'_mu_i_im1 using norm0 by (auto simp: field_simps) } note mu'_mu_large_row_im1 = this { (* the new values of mu ii i for ii > i *) fix ii assume iii: "ii > i" and ii: "ii < m" have "?mu2 ii i = (?f2 ii \ ?g2 i) / ?n2 i" unfolding gs2.\.simps using i0 iii by auto also have "?f2 ii \ ?g2 i = ?f1 ii \ ?g2 i" using len i i0 iii ii unfolding fs'_def by auto also have "\ = ?f1 ii \ (?g1 (i - 1) - (?f1 (i - 1) \ ?g2 (i - 1) / ?n2 (i - 1)) \\<^sub>v ?g2 (i - 1))" unfolding g2_i by simp also have "?f1 (i - 1) = ?f2 i" using i i0 len unfolding fs'_def by auto also have "?f2 i \ ?g2 (i - 1) / ?n2 (i - 1) = ?mu2 i (i - 1)" unfolding gs2.\.simps using i i0 by auto also have "?f1 ii \ (?g1 (i - 1) - ?mu2 i (i - 1) \\<^sub>v ?g2 (i - 1)) = ?f1 ii \ ?g1 (i - 1) - ?f1 ii \ (?mu2 i (i - 1) \\<^sub>v ?g2 (i - 1))" by (rule scalar_prod_minus_distrib[OF g gs], insert gs2 ii i, auto) also have "?f1 ii \ ?g1 (i - 1) = ?mu1 ii (i - 1) * ?n1 (i - 1)" by (rule fs.gs.fi_scalar_prod_gso, insert conn1 ii im1, auto) also have "?f1 ii \ (?mu2 i (i - 1) \\<^sub>v ?g2 (i - 1)) = ?mu2 i (i - 1) * (?f1 ii \ ?g2 (i - 1))" by (rule scalar_prod_smult_distrib, insert gs gs2 g i ii, auto) also have "?f1 ii \ ?g2 (i - 1) = (?f1 ii \ ?g2 (i - 1) / ?n2 (i - 1)) * ?n2 (i - 1)" using norm0 by (auto simp: field_simps) also have "?f1 ii \ ?g2 (i - 1) = ?f2 ii \ ?g2 (i - 1)" using len ii iii unfolding fs'_def by auto also have "\ / ?n2 (i - 1) = ?mu2 ii (i - 1)" unfolding gs2.\.simps using iii by auto finally have "?mu2 ii i = (?mu1 ii (i - 1) * ?n1 (i - 1) - ?mu2 i (i - 1) * ?mu2 ii (i - 1) * ?n2 (i - 1)) / ?n2 i" by simp also have "\ = (?mu1 ii (i - 1) - ?mu1 i (i - 1) * ?mu2 ii (i - 1)) * ?n2 (i - 1) / ?n1 i" unfolding sq_norm_g2_i mu'_mu_i_im1 using norm0 by (auto simp: field_simps) also have "\ = (?mu1 ii (i - 1) * ?n2 (i - 1) - ?mu1 i (i - 1) * ((?mu1 ii i * ?n1 i + ?mu1 i (i - 1) * ?mu1 ii (i - 1) * ?n1 (i - 1)))) / ?n1 i" unfolding mu'_mu_large_row_im1[OF iii ii] mu'_mu_i_im1 using norm0 by (auto simp: field_simps) also have "\ = ?mu1 ii (i - 1) - ?mu1 i (i - 1) * ?mu1 ii i" unfolding sq_norm_g2_im1 using norm0 by (auto simp: field_simps) finally have "?mu2 ii i = ?mu1 ii (i - 1) - ?mu1 i (i - 1) * ?mu1 ii i" . } note mu'_mu_large_row_i = this { fix k assume k: "k < m" show "?g2 k = ?newg k" unfolding g2_i[symmetric] unfolding g2_im1[symmetric] using g2_g1_identical[OF k] by auto show "?n2 k = ?new_norm k" unfolding sq_norm_g2_i[symmetric] unfolding sq_norm_g2_im1[symmetric] using g2_g1_identical[OF k] by auto fix j assume jk: "j < k" hence j: "j < m" using k by auto have "k < i - 1 \ k = i - 1 \ k = i \ k > i" by linarith thus "?mu2 k j = ?new_mu k j" unfolding mu'_mu_i_im1[symmetric] using mu'_mu_large_row_i[OF _ k] mu'_mu_large_row_im1 [OF _ k] mu_no_change_large_row[OF _ k, of j] mu'_mu_small_i mu'_mu_i_im1_j jk j k by auto } note new_g = this { (* 16.13 (ii) : norm of g (i - 1) decreases by reduction factor *) note sq_norm_g2_im1 also have "?n1 i + (?mu1 i (i - 1) * ?mu1 i (i - 1)) * ?n1 (i - 1) < 1/\ * (?n1 (i - 1)) + (1/2 * 1/2) * (?n1 (i - 1))" proof (rule add_less_le_mono[OF _ mult_mono]) from norm_ineq[unfolded mult.commute[of \], THEN linordered_field_class.mult_imp_less_div_pos[OF \0(1)]] show "?n1 i < 1/\ * ?n1 (i - 1)" using len i by auto from m12 have abs: "abs (?mu1 i (i - 1)) \ 1/2" by auto have "?mu1 i (i - 1) * ?mu1 i (i - 1) \ abs (?mu1 i (i - 1)) * abs (?mu1 i (i - 1))" by auto also have "\ \ 1/2 * 1/2" using mult_mono[OF abs abs] by auto finally show "?mu1 i (i - 1) * ?mu1 i (i - 1) \ 1/2 * 1/2" by auto qed auto also have "\ = reduction * sq_norm (?g1 (i - 1))" unfolding reduction_def using \0 by (simp add: ring_distribs add_divide_distrib) finally have "?n2 (i - 1) < reduction * ?n1 (i - 1)" . } note g_reduction = this (* Lemma 16.13 (ii) *) have lin_indpt_list_fs': "gs.lin_indpt_list (RAT fs')" unfolding gs.lin_indpt_list_def using conn2 by auto { (* stay reduced *) assume "LLL_invariant False i fs" note inv = LLL_invD[OF this] from inv have "weakly_reduced fs i" by auto hence "weakly_reduced fs (i - 1)" unfolding gram_schmidt_fs.weakly_reduced_def by auto hence red: "weakly_reduced fs' (i - 1)" unfolding gram_schmidt_fs.weakly_reduced_def using i G2_G by simp from inv have sred: "reduced fs i" by auto have sred: "reduced fs' (i - 1)" unfolding gram_schmidt_fs.reduced_def proof (intro conjI[OF red] allI impI, goal_cases) case (1 i' j) with sred have "\?mu1 i' j\ \ 1 / 2" unfolding gram_schmidt_fs.reduced_def by auto thus ?case using mu'_mu_small_i[OF 1(1)] by simp qed have mu_small: "\_small fs' (i - 1)" unfolding \_small_def proof (intro allI impI, goal_cases) case (1 j) thus ?case using inv(11) unfolding mu'_mu_i_im1_j[OF 1] \_small_def by auto qed show "LLL_invariant False (i - 1) fs'" by (rule LLL_invI, insert lin_indpt_list_fs' conn2 mu_small span' lattice fs' sred i, auto) } (* invariant is established *) show newInvw: "LLL_invariant_weak fs'" by (rule LLL_inv_wI, insert lin_indpt_list_fs' conn2 span' lattice fs', auto) (* show decrease in measure *) { (* 16.16 (ii), the decreasing case *) have ile: "i \ m" using i by auto from Gd[OF newInvw, folded d_def, OF ile] have "?R (d fs' i) = (\j = prod ?n2 ({0 ..< i-1} \ {i - 1})" by (rule sym, rule prod.cong, (insert i0, auto)[1], insert i, auto) also have "\ = ?n2 (i - 1) * prod ?n2 ({0 ..< i-1})" by simp also have "prod ?n2 ({0 ..< i-1}) = prod ?n1 ({0 ..< i-1})" by (rule prod.cong[OF refl], subst g2_g1_identical, insert i, auto) also have "\ = (prod ?n1 ({0 ..< i-1} \ {i - 1})) / ?n1 (i - 1)" by (subst prod.union_disjoint, insert norm_pos1[OF im1], auto) also have "prod ?n1 ({0 ..< i-1} \ {i - 1}) = prod ?n1 {0.. = (\j = ?R (d fs i)" unfolding d_def Gd[OF Linvw ile] by (rule prod.cong[OF refl], insert i, auto) finally have new_di: "?R (d fs' i) = ?n2 (i - 1) / ?n1 (i - 1) * ?R (d fs i)" by simp also have "\ < (reduction * ?n1 (i - 1)) / ?n1 (i - 1) * ?R (d fs i)" by (rule mult_strict_right_mono[OF divide_strict_right_mono[OF g_reduction norm_pos1[OF im1]]], insert LLL_d_pos[OF Linvw] i, auto) also have "\ = reduction * ?R (d fs i)" using norm_pos1[OF im1] by auto finally have "d fs' i < real_of_rat reduction * d fs i" using of_rat_less of_rat_mult of_rat_of_int_eq by metis note this new_di } note d_i = this show "ii \ m \ ?R (d fs' ii) = (if ii = i then ?n2 (i - 1) / ?n1 (i - 1) * ?R (d fs i) else ?R (d fs ii))" for ii using d_i d by auto have pos: "k < m \ 0 < d fs' k" "k < m \ 0 \ d fs' k" for k using LLL_d_pos[OF newInvw, of k] by auto have prodpos:"0< (\ix\{0.. (\x\{0..iaii j \ {0 ..< m} - {i} \ {i}. d fs' j)" by (rule prod.cong, insert i, auto) also have "real_of_int \ = real_of_int (\ j \ {0 ..< m} - {i}. d fs' j) * real_of_int (d fs' i)" by (subst prod.union_disjoint, auto) also have "\ < (\ j \ {0 ..< m} - {i}. d fs' j) * (of_rat reduction * d fs i)" by(rule mult_strict_left_mono[OF d_i(1)],insert prod_pos',auto) also have "(\ j \ {0 ..< m} - {i}. d fs' j) = (\ j \ {0 ..< m} - {i}. d fs j)" by (rule prod.cong, insert d, auto) also have "\ * (of_rat reduction * d fs i) = of_rat reduction * (\ j \ {0 ..< m} - {i} \ {i}. d fs j)" by (subst prod.union_disjoint, auto) also have "(\ j \ {0 ..< m} - {i} \ {i}. d fs j) = (\ j = 4/3") case True show ?thesis using D unfolding reduction(4)[OF True] logD_def unfolding True by simp next case False hence False': "\ = 4/3 \ False" by simp from False \ have "\ > 4/3" by simp with reduction have reduction1: "reduction < 1" by simp let ?new = "real (D fs')" let ?old = "real (D fs)" let ?log = "log (1/of_rat reduction)" note pos = LLL_D_pos[OF newInvw] LLL_D_pos[OF Linvw] from reduction have "real_of_rat reduction > 0" by auto hence gediv:"1/real_of_rat reduction > 0" by auto have "(1/of_rat reduction) * ?new \ ((1/of_rat reduction) * of_rat reduction) * ?old" - unfolding mult.assoc real_mult_le_cancel_iff2[OF gediv] using D by simp + unfolding mult.assoc mult_le_cancel_iff2[OF gediv] using D by simp also have "(1/of_rat reduction) * of_rat reduction = 1" using reduction by auto finally have "(1/of_rat reduction) * ?new \ ?old" by auto hence "?log ((1/of_rat reduction) * ?new) \ ?log ?old" by (subst log_le_cancel_iff, auto simp: pos reduction1 reduction) hence "floor (?log ((1/of_rat reduction) * ?new)) \ floor (?log ?old)" by (rule floor_mono) hence "nat (floor (?log ((1/of_rat reduction) * ?new))) \ nat (floor (?log ?old))" by simp also have "\ = logD fs" unfolding logD_def False' by simp also have "?log ((1/of_rat reduction) * ?new) = 1 + ?log ?new" by (subst log_mult, insert reduction reduction1, auto simp: pos ) also have "floor (1 + ?log ?new) = 1 + floor (?log ?new)" by simp also have "nat (1 + floor (?log ?new)) = 1 + nat (floor (?log ?new))" by (subst nat_add_distrib, insert pos reduction reduction1, auto) also have "nat (floor (?log ?new)) = logD fs'" unfolding logD_def False' by simp finally show "logD fs' < logD fs" by simp qed show "LLL_measure i fs > LLL_measure (i - 1) fs'" unfolding LLL_measure_def using i logD by simp qed lemma LLL_inv_initial_state: "LLL_invariant True 0 fs_init" proof - from lin_dep[unfolded gs.lin_indpt_list_def] have "set (RAT fs_init) \ Rn" by auto hence fs_init: "set fs_init \ carrier_vec n" by auto show ?thesis by (rule LLL_invI[OF fs_init len _ _ lin_dep], auto simp: L_def gs.reduced_def gs.weakly_reduced_def) qed lemma LLL_inv_m_imp_reduced: assumes "LLL_invariant True m fs" shows "reduced fs m" using LLL_invD[OF assms] by blast lemma basis_reduction_short_vector: assumes LLL_inv: "LLL_invariant True m fs" and v: "v = hd fs" and m0: "m \ 0" shows "v \ carrier_vec n" "v \ L - {0\<^sub>v n}" "h \ L - {0\<^sub>v n} \ rat_of_int (sq_norm v) \ \ ^ (m - 1) * rat_of_int (sq_norm h)" "v \ 0\<^sub>v j" proof - let ?L = "lattice_of fs_init" have a1: "\ \ 1" using \ by auto from LLL_invD[OF LLL_inv] have L: "lattice_of fs = L" and red: "gram_schmidt_fs.weakly_reduced n (RAT fs) \ (length (RAT fs))" and basis: "lin_indep fs" and lenH: "length fs = m" and H: "set fs \ carrier_vec n" by (auto simp: gs.lin_indpt_list_def gs.reduced_def) from lin_dep have G: "set fs_init \ carrier_vec n" unfolding gs.lin_indpt_list_def by auto with m0 len have "dim_vec (hd fs_init) = n" by (cases fs_init, auto) from v m0 lenH v have v: "v = fs ! 0" by (cases fs, auto) interpret gs1: gram_schmidt_fs_lin_indpt n "RAT fs" by (standard) (use assms LLL_invariant_def gs.lin_indpt_list_def in auto) let ?r = "rat_of_int" let ?rv = "map_vec ?r" let ?F = "RAT fs" let ?h = "?rv h" { assume h:"h \ L - {0\<^sub>v n}" (is ?h_req) from h[folded L] have h: "h \ lattice_of fs" "h \ 0\<^sub>v n" by auto { assume f: "?h = 0\<^sub>v n" have "?h = ?rv (0\<^sub>v n)" unfolding f by (intro eq_vecI, auto) hence "h = 0\<^sub>v n" using of_int_hom.vec_hom_zero_iff[of h] of_int_hom.vec_hom_inj by auto with h have False by simp } hence h0: "?h \ 0\<^sub>v n" by auto with lattice_of_of_int[OF H h(1)] have "?h \ gs.lattice_of ?F - {0\<^sub>v n}" by auto } from gs1.weakly_reduced_imp_short_vector[OF red this a1] lenH show "h \ L - {0\<^sub>v n} \ ?r (sq_norm v) \ \ ^ (m - 1) * ?r (sq_norm h)" using basis unfolding L v gs.lin_indpt_list_def by (auto simp: sq_norm_of_int) from m0 H lenH show vn: "v \ carrier_vec n" unfolding v by (cases fs, auto) have vL: "v \ L" unfolding L[symmetric] v using m0 H lenH by (intro basis_in_latticeI, cases fs, auto) { assume "v = 0\<^sub>v n" hence "hd ?F = 0\<^sub>v n" unfolding v using m0 lenH by (cases fs, auto) with gs.lin_indpt_list_nonzero[OF basis] have False using m0 lenH by (cases fs, auto) } with vL show v: "v \ L - {0\<^sub>v n}" by auto have jn:"0\<^sub>v j \ carrier_vec n \ j = n" unfolding zero_vec_def carrier_vec_def by auto with v vn show "v \ 0\<^sub>v j" by auto qed lemma LLL_mu_d_Z: assumes inv: "LLL_invariant_weak fs" and j: "j \ ii" and ii: "ii < m" shows "of_int (d fs (Suc j)) * \ fs ii j \ \" proof - interpret fs: fs_int' n m fs_init fs by standard (use inv in auto) show ?thesis using assms fs.fs_int_mu_d_Z LLL_inv_wD[OF inv] unfolding d_def fs.d_def by auto qed context fixes fs assumes Linv: "LLL_invariant_weak fs" and gbnd: "g_bound fs" begin interpretation gs1: gram_schmidt_fs_lin_indpt n "RAT fs" by (standard) (use Linv LLL_invariant_weak_def gs.lin_indpt_list_def in auto) lemma LLL_inv_N_pos: assumes m: "m \ 0" shows "N > 0" proof - let ?r = rat_of_int note inv = LLL_inv_wD[OF Linv] from inv have F: "RAT fs ! 0 \ Rn" "fs ! 0 \ carrier_vec n" using m by auto from m have upt: "[0..< m] = 0 # [1 ..< m]" using upt_add_eq_append[of 0 1 "m - 1"] by auto from inv(6) m have "map_vec ?r (fs ! 0) \ 0\<^sub>v n" using gs.lin_indpt_list_nonzero[OF inv(1)] unfolding set_conv_nth by force hence F0: "fs ! 0 \ 0\<^sub>v n" by auto hence "sq_norm (fs ! 0) \ 0" using F by simp hence 1: "sq_norm (fs ! 0) \ 1" using sq_norm_vec_ge_0[of "fs ! 0"] by auto from gbnd m have "sq_norm (gso fs 0) \ of_nat N" unfolding g_bound_def by auto also have "gso fs 0 = RAT fs ! 0" unfolding upt using F by (simp add: gs1.gso.simps[of 0]) also have "RAT fs ! 0 = map_vec ?r (fs ! 0)" using inv(6) m by auto also have "sq_norm \ = ?r (sq_norm (fs ! 0))" by (simp add: sq_norm_of_int) finally show ?thesis using 1 by (cases N, auto) qed (* equation (3) in front of Lemma 16.18 *) lemma d_approx_main: assumes i: "ii \ m" "m \ 0" shows "rat_of_int (d fs ii) \ rat_of_nat (N^ii)" proof - note inv = LLL_inv_wD[OF Linv] from LLL_inv_N_pos i have A: "0 < N" by auto note main = inv(2)[unfolded gram_schmidt_int_def gram_schmidt_wit_def] have "rat_of_int (d fs ii) = (\jgso fs j\\<^sup>2)" unfolding d_def using i by (auto simp: Gramian_determinant [OF Linv]) also have "\ \ (\j = (of_nat N)^ii" unfolding prod_constant by simp also have "\ = of_nat (N^ii)" by simp finally show ?thesis by simp qed lemma d_approx: assumes i: "ii < m" shows "rat_of_int (d fs ii) \ rat_of_nat (N^ii)" using d_approx_main[of ii] assms by auto lemma d_bound: assumes i: "ii < m" shows "d fs ii \ N^ii" using d_approx[OF assms] unfolding d_def by linarith lemma D_approx: "D fs \ N ^ (m * m)" proof - note inv = LLL_inv_wD[OF Linv] from LLL_inv_N_pos have N: "m \ 0 \ 0 < N" by auto note main = inv(2)[unfolded gram_schmidt_int_def gram_schmidt_wit_def] have "rat_of_int (\ii \ (\i \ (\i = (of_nat N)^(m * m)" unfolding prod_constant power_mult by simp also have "\ = of_nat (N ^ (m * m))" by simp finally have "(\i N ^ (m * m)" by linarith also have "(\i N ^ (m * m)" by linarith qed lemma LLL_measure_approx: assumes "\ > 4/3" "m \ 0" shows "LLL_measure i fs \ m + 2 * m * m * log base N" proof - have b1: "base > 1" using base assms by auto have id: "base = 1 / real_of_rat reduction" unfolding base_def reduction_def using \0 by (auto simp: field_simps of_rat_divide) from LLL_D_pos[OF Linv] have D1: "real (D fs) \ 1" by auto note invD = LLL_inv_wD[OF Linv] from invD have F: "set fs \ carrier_vec n" and len: "length fs = m" by auto have N0: "N > 0" using LLL_inv_N_pos[OF assms(2)] . from D_approx have D: "D fs \ N ^ (m * m)" . hence "real (D fs) \ real (N ^ (m * m))" by linarith also have "\ = real N ^ (m * m)" by simp finally have log: "log base (real (D fs)) \ log base (real N ^ (m * m))" by (subst log_le_cancel_iff[OF b1], insert D1 N0, auto) have "real (logD fs) = real (nat \log base (real (D fs))\)" unfolding logD_def id using assms by auto also have "\ \ log base (real (D fs))" using b1 D1 by auto also have "\ \ log base (real N ^ (m * m))" by fact also have "\ = (m * m) * log base (real N)" by (rule log_nat_power, insert N0, auto) finally have main: "logD fs \ m * m * log base N" by simp have "real (LLL_measure i fs) = real (2 * logD fs + m - i)" unfolding LLL_measure_def split invD(1) by simp also have "\ \ 2 * real (logD fs) + m" using invD by simp also have "\ \ 2 * (m * m * log base N) + m" using main by auto finally show ?thesis by simp qed end lemma g_bound_fs_init: "g_bound fs_init" proof - { fix i assume i: "i < m" let ?N = "map (nat o sq_norm) fs_init" let ?r = rat_of_int from i have mem: "nat (sq_norm (fs_init ! i)) \ set ?N" using fs_init len unfolding set_conv_nth by force interpret gs: gram_schmidt_fs_lin_indpt n "RAT fs_init" by (standard) (use len lin_dep LLL_invariant_def gs.lin_indpt_list_def in auto) from mem_set_imp_le_max_list[OF _ mem] have FN: "nat (sq_norm (fs_init ! i)) \ N" unfolding N_def by force hence "\fs_init ! i\\<^sup>2 \ int N" using i by auto also have "\ \ int (N * m)" using i by fastforce finally have f_bnd: "\fs_init ! i\\<^sup>2 \ int (N * m)" . from FN have "rat_of_nat (nat (sq_norm (fs_init ! i))) \ rat_of_nat N" by simp also have "rat_of_nat (nat (sq_norm (fs_init ! i))) = ?r (sq_norm (fs_init ! i))" using sq_norm_vec_ge_0[of "fs_init ! i"] by auto also have "\ = sq_norm (RAT fs_init ! i)" unfolding sq_norm_of_int[symmetric] using fs_init len i by auto finally have "sq_norm (RAT fs_init ! i) \ rat_of_nat N" . with gs.sq_norm_gso_le_f i len lin_dep have g_bnd: "\gs.gso i\\<^sup>2 \ rat_of_nat N" unfolding gs.lin_indpt_list_def by fastforce note f_bnd g_bnd } thus "g_bound fs_init" unfolding g_bound_def by auto qed lemma LLL_measure_approx_fs_init: "LLL_invariant upw i fs_init \ 4 / 3 < \ \ m \ 0 \ real (LLL_measure i fs_init) \ real m + real (2 * m * m) * log base (real N)" using LLL_measure_approx[OF LLL_inv_imp_w g_bound_fs_init] . lemma N_le_MMn: assumes m0: "m \ 0" shows "N \ nat M * nat M * n" unfolding N_def proof (rule max_list_le, unfold set_map o_def) fix ni assume "ni \ (\x. nat \x\\<^sup>2) ` set fs_init" then obtain fi where ni: "ni = nat (\fi\\<^sup>2)" and fi: "fi \ set fs_init" by auto from fi len obtain i where fii: "fi = fs_init ! i" and i: "i < m" unfolding set_conv_nth by auto from fi fs_init have fi: "fi \ carrier_vec n" by auto let ?set = "{\fs_init ! i $ j\ |i j. i < m \ j < n} \ {0}" have id: "?set = (\ (i,j). abs (fs_init ! i $ j)) ` ({0.. {0.. {0}" by force have fin: "finite ?set" unfolding id by auto { fix j assume "j < n" hence "M \ \fs_init ! i $ j\" unfolding M_def using i by (intro Max_ge[of _ "abs (fs_init ! i $ j)"], intro fin, auto) } note M = this from Max_ge[OF fin, of 0] have M0: "M \ 0" unfolding M_def by auto have "ni = nat (\fi\\<^sup>2)" unfolding ni by auto also have "\ \ nat (int n * \fi\\<^sub>\\<^sup>2)" using sq_norm_vec_le_linf_norm[OF fi] by (intro nat_mono, auto) also have "\ = n * nat (\fi\\<^sub>\\<^sup>2)" by (simp add: nat_mult_distrib) also have "\ \ n * nat (M^2)" proof (rule mult_left_mono[OF nat_mono]) have fi: "\fi\\<^sub>\ \ M" unfolding linf_norm_vec_def proof (rule max_list_le, unfold set_append set_map, rule ccontr) fix x assume "x \ abs ` set (list_of_vec fi) \ set [0]" and xM: "\ x \ M" with M0 obtain fij where fij: "fij \ set (list_of_vec fi)" and x: "x = abs fij" by auto from fij fi obtain j where j: "j < n" and fij: "fij = fi $ j" unfolding set_list_of_vec vec_set_def by auto from M[OF j] xM[unfolded x fij fii] show False by auto qed auto show "\fi\\<^sub>\\<^sup>2 \ M^2" unfolding abs_le_square_iff[symmetric] using fi using linf_norm_vec_ge_0[of fi] by auto qed auto finally show "ni \ nat M * nat M * n" using M0 by (subst nat_mult_distrib[symmetric], auto simp: power2_eq_square ac_simps) qed (insert m0 len, auto) subsection \Basic LLL implementation based on previous results\ text \We now assemble a basic implementation of the LLL algorithm, where only the lattice basis is updated, and where the GSO and the $\mu$-values are always computed from scratch. This enables a simple soundness proof and permits to separate an efficient implementation from the soundness reasoning.\ fun basis_reduction_add_rows_loop where "basis_reduction_add_rows_loop i fs 0 = fs" | "basis_reduction_add_rows_loop i fs (Suc j) = ( let c = round (\ fs i j); fs' = (if c = 0 then fs else fs[ i := fs ! i - c \\<^sub>v fs ! j]) in basis_reduction_add_rows_loop i fs' j)" definition basis_reduction_add_rows where "basis_reduction_add_rows upw i fs = (if upw then basis_reduction_add_rows_loop i fs i else fs)" definition basis_reduction_swap where "basis_reduction_swap i fs = (False, i - 1, fs[i := fs ! (i - 1), i - 1 := fs ! i])" definition basis_reduction_step where "basis_reduction_step upw i fs = (if i = 0 then (True, Suc i, fs) else let fs' = basis_reduction_add_rows upw i fs in if sq_norm (gso fs' (i - 1)) \ \ * sq_norm (gso fs' i) then (True, Suc i, fs') else basis_reduction_swap i fs')" function basis_reduction_main where "basis_reduction_main (upw,i,fs) = (if i < m \ LLL_invariant upw i fs then basis_reduction_main (basis_reduction_step upw i fs) else fs)" by pat_completeness auto definition "reduce_basis = basis_reduction_main (True, 0, fs_init)" definition "short_vector = hd reduce_basis" text \Soundness of this implementation is easily proven\ lemma basis_reduction_add_rows_loop: assumes inv: "LLL_invariant True i fs" and mu_small: "\_small_row i fs j" and res: "basis_reduction_add_rows_loop i fs j = fs'" and i: "i < m" and j: "j \ i" shows "LLL_invariant False i fs'" "LLL_measure i fs' = LLL_measure i fs" proof (atomize(full), insert assms, induct j arbitrary: fs) case (0 fs) thus ?case using basis_reduction_add_row_done[of i fs] by auto next case (Suc j fs) hence j: "j < i" by auto let ?c = "round (\ fs i j)" show ?case proof (cases "?c = 0") case True thus ?thesis using Suc(1)[OF Suc(2) basis_reduction_add_row_main_0[OF LLL_inv_imp_w[OF Suc(2)] i j True Suc(3)]] Suc(2-) by auto next case False note step = basis_reduction_add_row_main(2-)[OF LLL_inv_imp_w[OF Suc(2)] i j refl] note step = step(1)[OF Suc(2)] step(2-) show ?thesis using Suc(1)[OF step(1-2)] False Suc(2-) step(4) by simp qed qed lemma basis_reduction_add_rows: assumes inv: "LLL_invariant upw i fs" and res: "basis_reduction_add_rows upw i fs = fs'" and i: "i < m" shows "LLL_invariant False i fs'" "LLL_measure i fs' = LLL_measure i fs" proof (atomize(full), goal_cases) case 1 note def = basis_reduction_add_rows_def show ?case proof (cases upw) case False with res inv show ?thesis by (simp add: def) next case True with inv have "LLL_invariant True i fs" by auto note start = this \_small_row_refl[of i fs] from res[unfolded def] True have "basis_reduction_add_rows_loop i fs i = fs'" by auto from basis_reduction_add_rows_loop[OF start this i] show ?thesis by auto qed qed lemma basis_reduction_swap: assumes inv: "LLL_invariant False i fs" and res: "basis_reduction_swap i fs = (upw',i',fs')" and cond: "sq_norm (gso fs (i - 1)) > \ * sq_norm (gso fs i)" and i: "i < m" "i \ 0" shows "LLL_invariant upw' i' fs'" (is ?g1) "LLL_measure i' fs' < LLL_measure i fs" (is ?g2) proof - note invw = LLL_inv_imp_w[OF inv] note def = basis_reduction_swap_def from res[unfolded basis_reduction_swap_def] have id: "upw' = False" "i' = i - 1" "fs' = fs[i := fs ! (i - 1), i - 1 := fs ! i]" by auto from basis_reduction_swap_main(2-3)[OF invw _ i cond id(3)] inv show ?g1 ?g2 unfolding id by auto qed lemma basis_reduction_step: assumes inv: "LLL_invariant upw i fs" and res: "basis_reduction_step upw i fs = (upw',i',fs')" and i: "i < m" shows "LLL_invariant upw' i' fs'" "LLL_measure i' fs' < LLL_measure i fs" proof (atomize(full), goal_cases) case 1 note def = basis_reduction_step_def note invw = LLL_inv_imp_w[OF inv] obtain fs'' where fs'': "basis_reduction_add_rows upw i fs = fs''" by auto show ?case proof (cases "i = 0") case True from increase_i[OF inv i] True res show ?thesis by (auto simp: def) next case False hence id: "(i = 0) = False" by auto note res = res[unfolded def id if_False fs'' Let_def] let ?x = "sq_norm (gso fs'' (i - 1))" let ?y = "\ * sq_norm (gso fs'' i)" from basis_reduction_add_rows[OF inv fs'' i] have inv: "LLL_invariant False i fs''" and meas: "LLL_measure i fs'' = LLL_measure i fs" by auto note invw = LLL_inv_imp_w[OF inv] show ?thesis proof (cases "?x \ ?y") case True from increase_i[OF inv i] id True res meas show ?thesis by simp next case gt: False hence "?x > ?y" by auto from basis_reduction_swap[OF inv _ this i False] gt res meas show ?thesis by auto qed qed qed termination by (relation "measure (\ (upw,i,fs). LLL_measure i fs)", insert basis_reduction_step, auto split: prod.splits) declare basis_reduction_main.simps[simp del] lemma basis_reduction_main: assumes "LLL_invariant upw i fs" and res: "basis_reduction_main (upw,i,fs) = fs'" shows "LLL_invariant True m fs'" using assms proof (induct "LLL_measure i fs" arbitrary: i fs upw rule: less_induct) case (less i fs upw) have id: "LLL_invariant upw i fs = True" using less by auto note res = less(3)[unfolded basis_reduction_main.simps[of upw i fs] id] note inv = less(2) note IH = less(1) show ?case proof (cases "i < m") case i: True obtain i' fs' upw' where step: "basis_reduction_step upw i fs = (upw',i',fs')" (is "?step = _") by (cases ?step, auto) from IH[OF basis_reduction_step(2,1)[OF inv step i]] res[unfolded step] i show ?thesis by auto next case False with LLL_invD[OF inv] have i: "i = m" by auto with False res inv have "LLL_invariant upw m fs'" by auto thus "LLL_invariant True m fs'" unfolding LLL_invariant_def by auto qed qed lemma reduce_basis_inv: assumes res: "reduce_basis = fs" shows "LLL_invariant True m fs" using basis_reduction_main[OF LLL_inv_initial_state res[unfolded reduce_basis_def]] . lemma reduce_basis: assumes res: "reduce_basis = fs" shows "lattice_of fs = L" "reduced fs m" "lin_indep fs" "length fs = m" using LLL_invD[OF reduce_basis_inv[OF res]] by blast+ lemma short_vector: assumes res: "short_vector = v" and m0: "m \ 0" shows "v \ carrier_vec n" "v \ L - {0\<^sub>v n}" "h \ L - {0\<^sub>v n} \ rat_of_int (sq_norm v) \ \ ^ (m - 1) * rat_of_int (sq_norm h)" "v \ 0\<^sub>v j" using basis_reduction_short_vector[OF reduce_basis_inv[OF refl] res[symmetric, unfolded short_vector_def] m0] by blast+ end end diff --git a/thys/Matrices_for_ODEs/MTX_Examples.thy b/thys/Matrices_for_ODEs/MTX_Examples.thy --- a/thys/Matrices_for_ODEs/MTX_Examples.thy +++ b/thys/Matrices_for_ODEs/MTX_Examples.thy @@ -1,237 +1,237 @@ (* Title: Verification Examples Author: Jonathan Julián Huerta y Munive, 2020 Maintainer: Jonathan Julián Huerta y Munive *) section \ Verification examples \ theory MTX_Examples imports MTX_Flows "Hybrid_Systems_VCs.HS_VC_Spartan" begin subsection \ Examples \ abbreviation hoareT :: "('a \ bool) \ ('a \ 'a set) \ ('a \ bool) \ bool" ("PRE_ HP _ POST _" [85,85]85) where "PRE P HP X POST Q \ (P \ |X]Q)" subsubsection \ Verification by uniqueness. \ abbreviation mtx_circ :: "2 sq_mtx" ("A") where "A \ mtx ([0, 1] # [-1, 0] # [])" abbreviation mtx_circ_flow :: "real \ real^2 \ real^2" ("\") where "\ t s \ (\ i. if i = 1 then s$1 * cos t + s$2 * sin t else - s$1 * sin t + s$2 * cos t)" lemma mtx_circ_flow_eq: "exp (t *\<^sub>R A) *\<^sub>V s = \ t s" apply(rule local_flow.eq_solution[OF local_flow_sq_mtx_linear, symmetric]) apply(rule ivp_solsI, simp_all add: sq_mtx_vec_mult_eq vec_eq_iff) unfolding UNIV_2 using exhaust_2 by (force intro!: poly_derivatives simp: matrix_vector_mult_def)+ lemma mtx_circ: "PRE(\s. r\<^sup>2 = (s $ 1)\<^sup>2 + (s $ 2)\<^sup>2) HP x\=(*\<^sub>V) A & G POST (\s. r\<^sup>2 = (s $ 1)\<^sup>2 + (s $ 2)\<^sup>2)" apply(subst local_flow.fbox_g_ode[OF local_flow_sq_mtx_linear]) unfolding mtx_circ_flow_eq by auto no_notation mtx_circ ("A") and mtx_circ_flow ("\") subsubsection \ Flow of diagonalisable matrix. \ abbreviation mtx_hOsc :: "real \ real \ 2 sq_mtx" ("A") where "A a b \ mtx ([0, 1] # [a, b] # [])" abbreviation mtx_chB_hOsc :: "real \ real \ 2 sq_mtx" ("P") where "P a b \ mtx ([a, b] # [1, 1] # [])" lemma inv_mtx_chB_hOsc: "a \ b \ (P a b)\<^sup>-\<^sup>1 = (1/(a - b)) *\<^sub>R mtx ([ 1, -b] # [-1, a] # [])" apply(rule sq_mtx_inv_unique, unfold scaleR_mtx2 times_mtx2) by (simp add: diff_divide_distrib[symmetric] one_mtx2)+ lemma invertible_mtx_chB_hOsc: "a \ b \ mtx_invertible (P a b)" apply(rule mtx_invertibleI[of _ "(P a b)\<^sup>-\<^sup>1"]) apply(unfold inv_mtx_chB_hOsc scaleR_mtx2 times_mtx2 one_mtx2) by (subst sq_mtx_eq_iff, simp add: vector_def frac_diff_eq1)+ lemma mtx_hOsc_diagonalizable: fixes a b :: real defines "\\<^sub>1 \ (b - sqrt (b^2+4*a))/2" and "\\<^sub>2 \ (b + sqrt (b^2+4*a))/2" assumes "b\<^sup>2 + a * 4 > 0" and "a \ 0" shows "A a b = P (-\\<^sub>2/a) (-\\<^sub>1/a) * (\\\\ i. if i = 1 then \\<^sub>1 else \\<^sub>2) * (P (-\\<^sub>2/a) (-\\<^sub>1/a))\<^sup>-\<^sup>1" unfolding assms apply(subst inv_mtx_chB_hOsc) using assms(3,4) apply(simp_all add: diag2_eq[symmetric]) unfolding sq_mtx_times_eq sq_mtx_scaleR_eq UNIV_2 apply(subst sq_mtx_eq_iff) using exhaust_2 assms by (auto simp: field_simps, auto simp: field_power_simps) lemma mtx_hOsc_solution_eq: fixes a b :: real defines "\\<^sub>1 \ (b - sqrt (b\<^sup>2+4*a))/2" and "\\<^sub>2 \ (b + sqrt (b\<^sup>2+4*a))/2" defines "\ t \ mtx ( [\\<^sub>2*exp(t*\\<^sub>1) - \\<^sub>1*exp(t*\\<^sub>2), exp(t*\\<^sub>2)-exp(t*\\<^sub>1)]# [a*exp(t*\\<^sub>2) - a*exp(t*\\<^sub>1), \\<^sub>2*exp(t*\\<^sub>2)-\\<^sub>1*exp(t*\\<^sub>1)]#[])" assumes "b\<^sup>2 + a * 4 > 0" and "a \ 0" shows "P (-\\<^sub>2/a) (-\\<^sub>1/a) * (\\\\ i. exp (t * (if i=1 then \\<^sub>1 else \\<^sub>2))) * (P (-\\<^sub>2/a) (-\\<^sub>1/a))\<^sup>-\<^sup>1 = (1/sqrt (b\<^sup>2 + a * 4)) *\<^sub>R (\ t)" unfolding assms apply(subst inv_mtx_chB_hOsc) using assms apply(simp_all add: mtx_times_scaleR_commute, subst sq_mtx_eq_iff) unfolding UNIV_2 sq_mtx_times_eq sq_mtx_scaleR_eq sq_mtx_uminus_eq apply(simp_all add: axis_def) by (auto simp: field_simps, auto simp: field_power_simps)+ lemma local_flow_mtx_hOsc: fixes a b defines "\\<^sub>1 \ (b - sqrt (b^2+4*a))/2" and "\\<^sub>2 \ (b + sqrt (b^2+4*a))/2" defines "\ t \ mtx ( [\\<^sub>2*exp(t*\\<^sub>1) - \\<^sub>1*exp(t*\\<^sub>2), exp(t*\\<^sub>2)-exp(t*\\<^sub>1)]# [a*exp(t*\\<^sub>2) - a*exp(t*\\<^sub>1), \\<^sub>2*exp(t*\\<^sub>2)-\\<^sub>1*exp(t*\\<^sub>1)]#[])" assumes "b\<^sup>2 + a * 4 > 0" and "a \ 0" shows "local_flow ((*\<^sub>V) (A a b)) UNIV UNIV (\t. (*\<^sub>V) ((1/sqrt (b\<^sup>2 + a * 4)) *\<^sub>R \ t))" unfolding assms using local_flow_sq_mtx_linear[of "A a b"] assms apply(subst (asm) exp_scaleR_diagonal2[OF invertible_mtx_chB_hOsc mtx_hOsc_diagonalizable]) apply(simp, simp, simp) by (subst (asm) mtx_hOsc_solution_eq) simp_all lemma overdamped_door_arith: assumes "b\<^sup>2 + a * 4 > 0" and "a < 0" and "b \ 0" and "t \ 0" and "s1 > 0" shows "0 \ ((b + sqrt (b\<^sup>2 + 4 * a)) * exp (t * (b - sqrt (b\<^sup>2 + 4 * a)) / 2) / 2 - (b - sqrt (b\<^sup>2 + 4 * a)) * exp (t * (b + sqrt (b\<^sup>2 + 4 * a)) / 2) / 2) * s1 / sqrt (b\<^sup>2 + a * 4)" proof(subst diff_divide_distrib[symmetric], simp) have f0: "s1 / (2 * sqrt (b\<^sup>2 + a * 4)) > 0" (is "s1/?c3 > 0") using assms(1,5) by simp have f1: "(b - sqrt (b\<^sup>2 + 4 * a)) < (b + sqrt (b\<^sup>2 + 4 * a))" (is "?c2 < ?c1") and f2: "(b + sqrt (b\<^sup>2 + 4 * a)) < 0" using sqrt_ge_absD[of b "b\<^sup>2 + 4 * a"] assms by (force, linarith) hence f3: "exp (t * ?c2 / 2) \ exp (t * ?c1 / 2)" (is "exp ?t1 \ exp ?t2") unfolding exp_le_cancel_iff using assms(4) by (case_tac "t=0", simp_all) hence "?c2 * exp ?t2 \ ?c2 * exp ?t1" - using f1 f2 real_mult_le_cancel_iff2[of "-?c2" "exp ?t1" "exp ?t2"] by linarith + using f1 f2 mult_le_cancel_iff2[of "-?c2" "exp ?t1" "exp ?t2"] by linarith also have "... < ?c1 * exp ?t1" using f1 by auto also have"... \ ?c1 * exp ?t1" using f1 f2 by auto ultimately show "0 \ (?c1 * exp ?t1 - ?c2 * exp ?t2) * s1 / ?c3" using f0 f1 assms(5) by auto qed lemma overdamped_door: assumes "b\<^sup>2 + a * 4 > 0" and "a < 0" and "b \ 0" and "0 \ t" shows "PRE (\s. s$1 = 0) HP (LOOP (\s. {s. s$1 > 0 \ s$2 = 0}); (x\=(*\<^sub>V) (A a b) & G on {0..t} UNIV @ 0) INV (\s. 0 \ s$1)) POST (\s. 0 \ s $ 1)" apply(rule fbox_loopI, simp_all add: le_fun_def) apply(subst local_flow.fbox_g_ode_ivl[OF local_flow_mtx_hOsc[OF assms(1)]]) using assms apply(simp_all add: le_fun_def fbox_def) unfolding sq_mtx_scaleR_eq UNIV_2 sq_mtx_vec_mult_eq by (clarsimp simp: overdamped_door_arith) no_notation mtx_hOsc ("A") and mtx_chB_hOsc ("P") subsubsection \ Flow of non-diagonalisable matrix. \ abbreviation mtx_cnst_acc :: "3 sq_mtx" ("K") where "K \ mtx ( [0,1,0] # [0,0,1] # [0,0,0] # [])" lemma pow2_scaleR_mtx_cnst_acc: "(t *\<^sub>R K)\<^sup>2 = mtx ( [0,0,t\<^sup>2] # [0,0,0] # [0,0,0] # [])" unfolding power2_eq_square apply(subst sq_mtx_eq_iff) unfolding sq_mtx_times_eq UNIV_3 by auto lemma powN_scaleR_mtx_cnst_acc: "n > 2 \ (t *\<^sub>R K)^n = 0" apply(induct n, simp, case_tac "n \ 2") apply(subgoal_tac "n = 2", erule ssubst) unfolding power_Suc2 pow2_scaleR_mtx_cnst_acc sq_mtx_times_eq UNIV_3 by (auto simp: sq_mtx_eq_iff) lemma exp_mtx_cnst_acc: "exp (t *\<^sub>R K) = ((t *\<^sub>R K)\<^sup>2/\<^sub>R 2) + (t *\<^sub>R K) + 1" unfolding exp_def apply(subst suminf_eq_sum[of 2]) using powN_scaleR_mtx_cnst_acc by (simp_all add: numeral_2_eq_2) lemma exp_mtx_cnst_acc_simps: "exp (t *\<^sub>R K) $$ 1 $ 1 = 1" "exp (t *\<^sub>R K) $$ 1 $ 2 = t" "exp (t *\<^sub>R K) $$ 1 $ 3 = t^2/2" "exp (t *\<^sub>R K) $$ 2 $ 1 = 0" "exp (t *\<^sub>R K) $$ 2 $ 2 = 1" "exp (t *\<^sub>R K) $$ 2 $ 3 = t" "exp (t *\<^sub>R K) $$ 3 $ 1 = 0" "exp (t *\<^sub>R K) $$ 3 $ 2 = 0" "exp (t *\<^sub>R K) $$ 3 $ 3 = 1" unfolding exp_mtx_cnst_acc one_mtx3 pow2_scaleR_mtx_cnst_acc by simp_all lemma exp_mtx_cnst_acc_vec_mult_eq: "exp (t *\<^sub>R K) *\<^sub>V s = vector [s$3 * t^2/2 + s$2 * t + s$1, s$3 * t + s$2, s$3]" apply(simp add: sq_mtx_vec_mult_eq vector_def) unfolding UNIV_3 apply (simp add: exp_mtx_cnst_acc_simps fun_eq_iff) using exhaust_3 exp_mtx_cnst_acc_simps(7,8,9) by fastforce lemma local_flow_mtx_cnst_acc: "local_flow ((*\<^sub>V) K) UNIV UNIV (\t s. ((t *\<^sub>R K)\<^sup>2/\<^sub>R 2 + (t *\<^sub>R K) + 1) *\<^sub>V s)" using local_flow_sq_mtx_linear[of K] unfolding exp_mtx_cnst_acc . lemma docking_station_arith: assumes "(d::real) > x" and "v > 0" shows "(v = v\<^sup>2 * t / (2 * d - 2 * x)) \ (v * t - v\<^sup>2 * t\<^sup>2 / (4 * d - 4 * x) + x = d)" proof assume "v = v\<^sup>2 * t / (2 * d - 2 * x)" hence "v * t = 2 * (d - x)" using assms by (simp add: eq_divide_eq power2_eq_square) hence "v * t - v\<^sup>2 * t\<^sup>2 / (4 * d - 4 * x) + x = 2 * (d - x) - 4 * (d - x)\<^sup>2 / (4 * (d - x)) + x" apply(subst power_mult_distrib[symmetric]) by (erule ssubst, subst power_mult_distrib, simp) also have "... = d" apply(simp only: mult_divide_mult_cancel_left_if) using assms by (auto simp: power2_eq_square) finally show "v * t - v\<^sup>2 * t\<^sup>2 / (4 * d - 4 * x) + x = d" . next assume "v * t - v\<^sup>2 * t\<^sup>2 / (4 * d - 4 * x) + x = d" hence "0 = v\<^sup>2 * t\<^sup>2 / (4 * (d - x)) + (d - x) - v * t" by auto hence "0 = (4 * (d - x)) * (v\<^sup>2 * t\<^sup>2 / (4 * (d - x)) + (d - x) - v * t)" by auto also have "... = v\<^sup>2 * t\<^sup>2 + 4 * (d - x)\<^sup>2 - (4 * (d - x)) * (v * t)" using assms apply(simp add: distrib_left right_diff_distrib) apply(subst right_diff_distrib[symmetric])+ by (simp add: power2_eq_square) also have "... = (v * t - 2 * (d - x))\<^sup>2" by (simp only: power2_diff, auto simp: field_simps power2_diff) finally have "0 = (v * t - 2 * (d - x))\<^sup>2" . hence "v * t = 2 * (d - x)" by auto thus "v = v\<^sup>2 * t / (2 * d - 2 * x)" apply(subst power2_eq_square, subst mult.assoc) apply(erule ssubst, subst right_diff_distrib[symmetric]) using assms by auto qed lemma docking_station: assumes "d > x\<^sub>0" and "v\<^sub>0 > 0" shows "PRE (\s. s$1 = x\<^sub>0 \ s$2 = v\<^sub>0) HP ((3 ::= (\s. -(v\<^sub>0^2/(2*(d-x\<^sub>0))))); x\=(*\<^sub>V) K & G) POST (\s. s$2 = 0 \ s$1 = d)" apply(clarsimp simp: le_fun_def local_flow.fbox_g_ode[OF local_flow_sq_mtx_linear[of K]]) unfolding exp_mtx_cnst_acc_vec_mult_eq using assms by (simp add: docking_station_arith) no_notation mtx_cnst_acc ("K") end \ No newline at end of file diff --git a/thys/Neumann_Morgenstern_Utility/Expected_Utility.thy b/thys/Neumann_Morgenstern_Utility/Expected_Utility.thy --- a/thys/Neumann_Morgenstern_Utility/Expected_Utility.thy +++ b/thys/Neumann_Morgenstern_Utility/Expected_Utility.thy @@ -1,274 +1,274 @@ (* License: LGPL *) theory Expected_Utility imports Neumann_Morgenstern_Utility_Theorem begin section \ Definition of vNM-utility function \ text \ We define a version of the vNM Utility function using the locale mechanism. Currently this definition and system U have no proven relation yet. \ text \ Important: u is actually not the von Neuman Utility Function, but a Bernoulli Utility Function. The Expected value p given u is the von Neumann Utility Function. \ locale vNM_utility = fixes outcomes :: "'a set" fixes relation :: "'a pmf relation" fixes u :: "'a \ real" assumes "relation \ (lotteries_on outcomes \ lotteries_on outcomes)" assumes "\p q. p \ lotteries_on outcomes \ q \ lotteries_on outcomes \ p \[relation] q \ measure_pmf.expectation p u \ measure_pmf.expectation q u" begin lemma vNM_utilityD: shows "relation \ (lotteries_on outcomes \ lotteries_on outcomes)" and "p \ lotteries_on outcomes \ q \ lotteries_on outcomes \ p \[relation] q \ measure_pmf.expectation p u \ measure_pmf.expectation q u" using vNM_utility_axioms vNM_utility_def by (blast+) lemma not_outside: assumes "p \[relation] q" shows "p \ lotteries_on outcomes" and "q \ lotteries_on outcomes" proof (goal_cases) case 1 then show ?case by (meson assms contra_subsetD mem_Sigma_iff vNM_utility_axioms vNM_utility_def) next case 2 then show ?case by (metis assms mem_Sigma_iff subsetCE vNM_utility_axioms vNM_utility_def) qed lemma utility_ge: assumes "p \[relation] q" shows "measure_pmf.expectation p u \ measure_pmf.expectation q u" using assms vNM_utility_axioms vNM_utility_def by (metis (no_types, lifting) not_outside(1) not_outside(2)) end (* vNM_Utility *) sublocale vNM_utility \ ordinal_utility "(lotteries_on outcomes)" relation "(\p. measure_pmf.expectation p u)" proof (standard, goal_cases) case (2 x y) then show ?case using not_outside(1) by blast next case (3 x y) then show ?case by (auto simp add: not_outside(2)) qed (metis (mono_tags, lifting) vNM_utility_axioms vNM_utility_def) context vNM_utility begin lemma strict_preference_iff_strict_utility: assumes "p \ lotteries_on outcomes" assumes "q \ lotteries_on outcomes" shows "p \[relation] q \ measure_pmf.expectation p u > measure_pmf.expectation q u" by (meson assms(1) assms(2) less_eq_real_def not_le util_def) lemma pos_distrib_left: assumes "c > 0" shows "(\z\outcomes. pmf q z * (c * u z)) = c * (\z\outcomes. pmf q z * (u z))" proof - have "(\z\outcomes. pmf q z * (c * u z)) = (\z\outcomes. pmf q z * c * u z)" by (simp add: ab_semigroup_mult_class.mult_ac(1)) also have "... = (\z\outcomes. c * pmf q z * u z)" by (simp add: mult.commute) also have "... = c * (\z\outcomes. pmf q z * u z)" by (simp add: ab_semigroup_mult_class.mult_ac(1) sum_distrib_left) finally show ?thesis . qed lemma sum_pmf_util_commute: "(\a\outcomes. pmf p a * u a) = (\a\outcomes. u a * pmf p a)" by (simp add: mult.commute) section \ Finite outcomes \ context assumes fnt: "finite outcomes" begin lemma sum_equals_pmf_expectation: assumes "p \ lotteries_on outcomes" shows"(\z\outcomes. (pmf p z) * (u z)) = measure_pmf.expectation p u" proof - have fnt: "finite outcomes" by (simp add: vNM_utilityD(1) fnt) have "measure_pmf.expectation p u = (\a\outcomes. pmf p a * u a)" using support_in_outcomes assms fnt integral_measure_pmf_real sum_pmf_util_commute by fastforce then show ?thesis using real_scaleR_def by presburger qed lemma expected_utility_weak_preference: assumes "p \ lotteries_on outcomes" and "q \ lotteries_on outcomes" shows "p \[relation] q \ (\z\outcomes. (pmf p z) * (u z)) \ (\z\outcomes. (pmf q z) * (u z))" using sum_equals_pmf_expectation[of p, OF assms(1)] sum_equals_pmf_expectation[of q, OF assms(2)] vNM_utility_def assms(1) assms(2) util_def_conf by presburger lemma diff_leq_zero_weak_preference: assumes "p \ lotteries_on outcomes" and "q \ lotteries_on outcomes" shows "p \ q \ ((\a\outcomes. pmf q a * u a) - (\a\outcomes. pmf p a * u a) \ 0)" using assms(1) assms(2) diff_le_0_iff_le by (metis (mono_tags, lifting) expected_utility_weak_preference) lemma expected_utility_strict_preference: assumes "p \ lotteries_on outcomes" and "q \ lotteries_on outcomes" shows "p \[relation] q \ measure_pmf.expectation p u > measure_pmf.expectation q u" using assms expected_utility_weak_preference less_eq_real_def not_le by (metis (no_types, lifting) util_def_conf) lemma scale_pos_left: assumes "c > 0" shows "vNM_utility outcomes relation (\x. c * u x)" proof(standard, goal_cases) case 1 then show ?case using vNM_utility_axioms vNM_utility_def by blast next case (2 p q) have "q \ lotteries_on outcomes" and "p \ lotteries_on outcomes" using "2"(2) by (simp add: fnt "2"(1))+ then have *: "p \ q = (measure_pmf.expectation q u \ measure_pmf.expectation p u)" using expected_utility_weak_preference[of p q] assms by blast have dist_c: "(\z\outcomes. (pmf q z) * (c * u z)) = c * (\z\outcomes. (pmf q z) * (u z))" using pos_distrib_left[of c q] assms by blast have dist_c': "(\z\outcomes. (pmf p z) * (c * u z)) = c * (\z\outcomes. (pmf p z) * (u z))" using pos_distrib_left[of c p] assms by blast have "p \ q \ ((\z\outcomes. (pmf q z) * (c * u z)) \ (\z\outcomes. (pmf p z) * (c * u z)))" proof (rule iffI) assume "p \ q" then have "(\z\outcomes. pmf q z * (u z)) \ (\z\outcomes. pmf p z * (u z))" using utility_ge using "2"(1) "2"(2) sum_equals_pmf_expectation by presburger then show "(\z\outcomes. pmf q z * (c * u z)) \ (\z\outcomes. pmf p z * (c * u z))" using dist_c dist_c' by (simp add: assms) next assume "(\z\outcomes. pmf q z * (c * u z)) \ (\z\outcomes. pmf p z * (c * u z))" then have "(\z\outcomes. pmf q z * (u z)) \ (\z\outcomes. pmf p z * (u z))" - using "2"(1) real_mult_le_cancel_iff2 assms by (simp add: dist_c dist_c') + using "2"(1) mult_le_cancel_iff2 assms by (simp add: dist_c dist_c') then show "p \ q" using "2"(2) assms "2"(1) by (simp add: * sum_equals_pmf_expectation) qed then show ?case by (simp add: "*" assms) qed lemma strict_alt_def: assumes "p \ lotteries_on outcomes" and "q \ lotteries_on outcomes" shows "p \[relation] q \ (\z\outcomes. (pmf p z) * (u z)) > (\z\outcomes. (pmf q z) * (u z))" using sum_equals_pmf_expectation[of p, OF assms(1)] assms(1) assms(2) sum_equals_pmf_expectation[of q, OF assms(2)] strict_prefernce_iff_strict_utility by presburger lemma strict_alt_def_utility_g: assumes "p \[relation] q" shows "(\z\outcomes. (pmf p z) * (u z)) > (\z\outcomes. (pmf q z) * (u z))" using assms not_outside(1) not_outside(2) strict_alt_def by meson end (* finite outcomes *) end (* Definition of vNM Utility Function as locale *) lemma vnm_utility_is_ordinal_utility: assumes "vNM_utility outcomes relation u" shows "ordinal_utility (lotteries_on outcomes) relation (\p. measure_pmf.expectation p u)" proof (standard, goal_cases) case (1 x y) then show ?case using assms vNM_utility_def by blast next case (2 x y) then show ?case using assms vNM_utility.not_outside(1) by blast next case (3 x y) then show ?case using assms vNM_utility.not_outside(2) by blast qed lemma vnm_utility_imp_reational_prefs: assumes "vNM_utility outcomes relation u" shows "rational_preference (lotteries_on outcomes) relation" proof (standard,goal_cases) case (1 x y) then show ?case using assms vNM_utility.not_outside(1) by blast next case (2 x y) then show ?case using assms vNM_utility.not_outside(2) by blast next case 3 have t: "trans relation" using assms ordinal_utility.util_imp_trans vnm_utility_is_ordinal_utility by blast have "refl_on (lotteries_on outcomes) relation" by (meson assms order_refl refl_on_def vNM_utility_def) then show ?case using preorder_on_def t by blast next case 4 have "total_on (lotteries_on outcomes) relation" using ordinal_utility.util_imp_total[of "lotteries_on outcomes" relation "(\p. (\z\outcomes. (pmf p z) * (u z)))"] assms vnm_utility_is_ordinal_utility using ordinal_utility.util_imp_total by blast then show ?case by simp qed theorem expected_utilty_theorem_form_vnm_utility: assumes fnt: "finite outcomes" and "outcomes \ {}" shows "rational_preference (lotteries_on outcomes) \ \ independent_vnm (lotteries_on outcomes) \ \ continuous_vnm (lotteries_on outcomes) \ \ (\u. vNM_utility outcomes \ u)" proof assume "rational_preference (\

outcomes) \ \ independent_vnm (\

outcomes) \ \ continuous_vnm (\

outcomes) \" with Von_Neumann_Morgenstern_Utility_Theorem[of outcomes \, OF assms] have "(\u. ordinal_utility (\

outcomes) \ (\x. measure_pmf.expectation x u))" using assms by blast then obtain u where u: "ordinal_utility (\

outcomes) \ (\x. measure_pmf.expectation x u)" by auto have "vNM_utility outcomes \ u" proof (standard, goal_cases) case 1 then show ?case using u ordinal_utility.relation_subset_crossp by blast next case (2 p q) then show ?case using assms(2) expected_value_is_utility_function fnt u by blast qed then show "\u. vNM_utility outcomes \ u" by blast next assume a: "\u. vNM_utility outcomes \ u" then have "rational_preference (\

outcomes) \" using vnm_utility_imp_reational_prefs by auto moreover have "independent_vnm (\

outcomes) \" using a by (meson assms(2) fnt vNM_utility_implies_independence vnm_utility_is_ordinal_utility) moreover have "continuous_vnm (\

outcomes) \" using a by (meson assms(2) fnt vNM_utilty_implies_continuity vnm_utility_is_ordinal_utility) ultimately show "rational_preference (\

outcomes) \ \ independent_vnm (\

outcomes) \ \ continuous_vnm (\

outcomes) \" by auto qed end diff --git a/thys/Ordinary_Differential_Equations/Ex/Examples_One_Step_Method.thy b/thys/Ordinary_Differential_Equations/Ex/Examples_One_Step_Method.thy --- a/thys/Ordinary_Differential_Equations/Ex/Examples_One_Step_Method.thy +++ b/thys/Ordinary_Differential_Equations/Ex/Examples_One_Step_Method.thy @@ -1,481 +1,481 @@ theory Examples_One_Step_Method imports "HOL-ODE-Numerics.ODE_Numerics" begin subsection \Example 1\ experiment begin schematic_goal e1_fas: "[1, (X!1)\<^sup>2 - X!0] = interpret_floatariths ?fas X" unfolding power2_eq_square\ \TODO: proper affine implementation of power\ by (reify_floatariths) concrete_definition e1_fas uses e1_fas interpretation e1: ode_interpretation true_form UNIV e1_fas "\(x, y). (1, y\<^sup>2 - x)::real*real" "d::2" for d by unfold_locales (auto simp: e1_fas_def less_Suc_eq_0_disj nth_Basis_list_prod Basis_list_real_def mk_ode_ops_def eucl_of_list_prod power2_eq_square intro!: isFDERIV_I) lemma e1_0: "t \ {4 .. 4} \ (x, y) \ {(0, 0) .. (0, 0.71875)} \ t \ e1.existence_ivl0 (x, y) \ e1.flow0 (x, y) t \ {(3.999, -1.96)..(4, -1.9)}" by (tactic \ode_bnds_tac @{thms e1_fas_def} 30 20 7 12 [(0, 1, "0x000000")] (* "out_e1_0.out" *) "" @{context} 1\) lemma e1_1: "t \ {4 .. 4} \ (x, y) \ {(0, 0.71875) .. (0, 0.71875)} \ t \ e1.existence_ivl0 (x, y) \ e1.flow0 (x, y) t \ {(3.999, -1.921)..(4, -1.919)}" by (tactic \ode_bnds_tac @{thms e1_fas_def} 30 20 7 12 [(0, 1, "0xff0000")] (* "out_e1_1.out" *) "" @{context} 1\) end subsection \Example Exponential\ experiment begin schematic_goal exp_fas: "[1, (X!1)] = interpret_floatariths ?fas X" by (reify_floatariths) concrete_definition exp_fas uses exp_fas interpretation exp_ivp: ode_interpretation true_form UNIV exp_fas "\(t::real, x). (1, x::real)" "one::2" by standard (auto simp: exp_fas_def less_Suc_eq_0_disj nth_Basis_list_prod Basis_list_real_def mk_ode_ops_def eucl_of_list_prod power2_eq_square intro!: isFDERIV_I) subsubsection \connection to exponential function\ lemma exp_ivp_existence_ivl0: "exp_ivp.existence_ivl0 p = UNIV" by (rule exp_ivp.existence_ivl_eq_domain) (auto intro!: exI[where x=1] order_trans[OF norm_Pair_le] order_trans[OF _ norm_snd_le]) lemma exp_eq_exp_ivp_aux: "exp_ivp.flow0 (0, 1) x = (x, exp x)" apply (rule exp_ivp.flow_unique[where phi="\x. (x, exp x)"]) unfolding exp_ivp_existence_ivl0 by (auto simp: has_vector_derivative_def intro!: derivative_eq_intros) lemma ex_eq_exp_ivp: "exp x = snd (exp_ivp.flow0 (0, 1) x)" unfolding exp_eq_exp_ivp_aux by simp text \concrete example\ lemma exp_ode_result: "t \ {1 .. 1} \ (s, x) \ {(0, 1) .. (0, 1)} \ t \ exp_ivp.existence_ivl0 (s, x) \ exp_ivp.flow0 (s, x) t \ {(0.99, 2.718281) .. (1, 2.718284)}" by (tactic \ode_bnds_tac @{thms exp_fas_def} 30 20 7 20 [(0, 1, "0x000000")] (* "out_exp.out" *) "" @{context} 1\) lemma exp1: "exp 1 \ {2.718281 .. 2.718284::real}" using exp_ode_result[of 1 0 1] by (auto simp: exp_eq_exp_ivp_aux) end subsection \Van der Pol\ experiment begin schematic_goal vdp_fas: "[X!1, X!1 * (1 - (X!0)\<^sup>2) - X!0] = interpret_floatariths ?fas X" unfolding power2_eq_square\ \TODO: proper affine implementation of power\ by (reify_floatariths) concrete_definition vdp_fas uses vdp_fas interpretation vdp: ode_interpretation true_form UNIV vdp_fas "\(x, y). (y, y * (1 - x\<^sup>2) - x)::real*real" "n::2" for n by standard (auto simp: vdp_fas_def less_Suc_eq_0_disj nth_Basis_list_prod Basis_list_real_def mk_ode_ops_def eucl_of_list_prod power2_eq_square intro!: isFDERIV_I) lemma vdp_c0: "t \ point_ivl 7 \ (x, y) \ point_ivl (1.4, 2.4) \ t \ vdp.existence_ivl0 (x, y) \ vdp.flow0 (x, y) t \ {(1.870, 0.9887) .. (1.875, 1.001)}" by (tactic \ode_bnds_tac @{thms vdp_fas_def} 30 35 9 14 [(0, 1, "0x000000")] (* "out_vdp_c0.out" *) "" @{context} 1\) lemma vdp_c1: "t \ point_ivl 7 \ (x, y) \ point_ivl (1.4, 2.4) \ t \ vdp.existence_ivl0 (x, y) \ vdp.flow0 (x, y) t \ {(1.870, 0.9887) .. (1.875, 1.001)} \ vdp.Dflow (x, y) t o\<^sub>L blinfun_of_list [1,0, 0,1] \ vdp.blinfuns_of_lvivl ([-0.197,-0.399, 0.835, 1.71], [-0.189, -0.387, 0.858, 1.75])" by (tactic \ode'_bnds_tac @{thms vdp_fas_def} 30 80 40 14 [(0, 1, "0x000000", [0,1])] ["0x7f0000", "0x00007f"] (* "out_vdp_c1.out" *) "" @{context} 1\) end subsection \Example Classical Lorenz Equations\ experiment begin schematic_goal lorenz_fas: "[10 * (X!1 - X!0), X!0 * (28 - X!2) - X!1, X!0 * X!1 - 8 /3 * X!2] = interpret_floatariths ?fas X" by (reify_floatariths) concrete_definition lorenz_fas uses lorenz_fas interpretation lorenz: ode_interpretation true_form UNIV lorenz_fas "\(x, y, z). (10 * (y - x), x * (28 - z) - y, x * y - 8 / 3 * z) ::real*real*real" "three::3" by standard (auto simp: lorenz_fas_def less_Suc_eq_0_disj nth_Basis_list_prod Basis_list_real_def mk_ode_ops_def eucl_of_list_prod power2_eq_square intro!: isFDERIV_I) text \Taken from "A database of rigorous and high-precision periodic orbits of the Lorenz model" (2015) by Barrio, Dena, Tucker\ lemma lorenz_c0: "t \ point_ivl 1.558652210 \ (x, y, z) \ point_ivl ( -2.147367631, 2.078048211, 27) \ t \ lorenz.existence_ivl0 (x, y, z) \ lorenz.flow0 (x, y, z) t \ {(-2.1515, 2.0738, 26.992) .. (-2.1434, 2.0824, 27.009)}" by (tactic \ode_bnds_tac @{thms lorenz_fas_def} 30 35 9 16 [(0, 1, "0x7f0000"), (0, 2, "0x00007f")] (* "out_lorenz_c0.out" *) "" @{context} 1\) lemma lorenz_c1: "t \ point_ivl (FloatR 52299689 (- 25) \ \for C1-info, the target time needs to be a float\) \ (x, y, z) \ point_ivl ( -2.147367631, 2.078048211, 27) \ t \ lorenz.existence_ivl0 (x, y, z) \ lorenz.flow0 (x, y, z) t \ {(-2.158, 2.064, 26.98) .. (-2.137, 2.092, 27.02)} \ lorenz.Dflow (x, y, z) t o\<^sub>L blinfun_of_list [1,0,0, 0,1,0, 0,0,1] \ lorenz.blinfuns_of_lvivl ([-0.535,-1.15,-0.794, 1.49,4.01,0.651, 2.71,6.85,2.11], [-0.479,-1.03,-0.751, 1.58,4.14,0.703, 2.82,7.00,2.19])" by (tactic \ode'_bnds_tac @{thms lorenz_fas_def} 30 80 40 16 [(0, 1, "0x000000", [0,1,2]), (0, 2, "0x7f7f7f", [0,1,2])] ["0x7f0000", "0x007f00", "0x00007f"] (* "out_lorenz_c1.out" *) "" @{context} 1\) end subsection \Bessel Function inspired example\ text \\label{sec:bessel}\ experiment begin unbundle floatarith_notation text \encoding a constant parameter and higher derivatives in the ode...\ schematic_goal bessel_fas: "[0, -1, -(X!3), (((X!1)\<^sup>2 - (X!0)\<^sup>2) * X!2 + (X!1) * (X!3)) / (X!1)\<^sup>2] = interpret_floatariths ?fas X" unfolding power2_eq_square\ \TODO: proper affine implementation of power\ by (reify_floatariths) concrete_definition bessel_fas uses bessel_fas interpretation bessel: ode_interpretation "Less (Var 1) (Num 0)" "{(mu, s, _, _). s < 0}" bessel_fas "\(mu::real, s::real, x::real, x'::real). (0::real, -1::real, -x'::real, (((s\<^sup>2 - mu\<^sup>2) * x + s * x') / s\<^sup>2))" "four::4" by standard (auto simp: bessel_fas_def less_Suc_eq_0_disj nth_Basis_list_prod Basis_list_real_def inverse_eq_divide mk_ode_ops_def eucl_of_list_prod power2_eq_square intro!: isFDERIV_I) definition "J0 = 0.765197686557966551449717526103" definition "J0' = 0.440050585744933515959682203719" lemma bessel_result: "t \ {10 .. 10} \ (x, y, j0, j0') \ {(0, -1, 0.765197686557966551449717526103, 0.440050585744933515959682203719) .. (0, -1, 0.765197686557966551449717526103, 0.440050585744933515959682203719)} \ t \ bessel.existence_ivl0 (x, y, j0, j0') \ bessel.flow0 (x, y, j0, j0') t \ {(0, -11.01, -0.18, -0.185) .. (0, -11, -0.165, -0.169)}" by (tactic \ode_bnds_tac @{thms bessel_fas_def} 30 20 7 12 [(1, 2, "0x007f00"), (1, 3, "0x00007f")] (* "out_bessel.out" *) "" @{context} 1\) end subsection \ Oil reservoir in Affine arithmetic \ text \\label{sec:exampleoil}\ experiment begin schematic_goal oil_fas: "[X!1, (X!1)\<^sup>2 - 3 / (0.001 + (X!0)\<^sup>2)] = interpret_floatariths ?fas X" unfolding power2_eq_square\ \TODO: proper affine implementation of power\ by (reify_floatariths) concrete_definition oil_fas uses oil_fas lemma oil_deriv_ok: fixes y::real shows "1 / 1000 + y*y = 0 \ False" proof - have "1 / 1000 + y*y > 0" by (auto intro!: add_pos_nonneg) thus ?thesis by auto qed interpretation oil: ode_interpretation true_form UNIV oil_fas "\(y, z). (z, z\<^sup>2 - 3 / (0.001 + y\<^sup>2))::real*real" "two::2" by standard (auto simp: oil_fas_def oil_deriv_ok less_Suc_eq_0_disj nth_Basis_list_prod Basis_list_real_def inverse_eq_divide mk_ode_ops_def eucl_of_list_prod isDERIV_Power_iff power2_eq_square intro!: isFDERIV_I) theorem oil_20: "t \ {50 .. 50} \ (x, y) \ {(10, 0) .. (10, 0)} \ t \ oil.existence_ivl0 (x, y) \ oil.flow0 (x, y) t \ {(-8.310, -0.2252) .. (-8.257, -0.2236)}" by (tactic \ode_bnds_tac @{thms oil_fas_def} 30 20 7 20 [(0, 1, "0x000000")] (* "out_oil_20.out" *) "" @{context} 1\) theorem oil_30: "t \ {50 .. 50} \ (x, y) \ {(10, 0) .. (10, 0)} \ t \ oil.existence_ivl0 (x, y) \ oil.flow0 (x, y) t \ {(-8.279, -0.2246) .. (-8.276, -0.2245)}" by (tactic \ode_bnds_tac @{thms oil_fas_def} 30 20 7 30 [(0, 1, "0xff0000")] (* "out_oil_30.out" *) "" @{context} 1\) end subsection \Example V in Walter's textbook~\cite{walter}\ experiment begin schematic_goal e3_fas: "[1, (X!1)\<^sup>2 + (X!0)\<^sup>2] = interpret_floatariths ?fas X" unfolding power2_eq_square\ \TODO: proper affine implementation of power\ by (reify_floatariths) concrete_definition e3_fas uses e3_fas interpretation e3: ode_interpretation true_form UNIV e3_fas "\(t, x). (1, x\<^sup>2 + t\<^sup>2)::real*real" "d::2" for d by standard (auto simp: e3_fas_def less_Suc_eq_0_disj nth_Basis_list_prod Basis_list_real_def mk_ode_ops_def eucl_of_list_prod power2_eq_square intro!: isFDERIV_I) lemma e3_flow_mem: "t \ {0.125 .. 0.125} \ (s, x) \ {(0, 1) .. (0, 1)} \ t \ e3.existence_ivl0 (s, x) \ (e3.flow0 (s, x) t \ {(0.124, 1.14347) .. (0.126, 1.14376)})" by (tactic \ode_bnds_tac @{thms e3_fas_def} 30 20 7 11 [(0, 1, "0x000000")] (* "out_e2.out" *) "" @{context} 1\) subsubsection \Walter's analytical obtained by in section 9, Example V.\ lemma cos_neq_zeroI: assumes "-pi / 2 < x" "x < pi / 2" shows "cos x \ 0" using assms div2_less_self by (fastforce simp: cos_zero_iff) lemma quadratic_real_max_eq: fixes a b c x::real defines "xm \ -b / (2 * a)" shows "a < 0 \ a * x\<^sup>2 + b * x + c \ a * xm\<^sup>2 + b * xm + c" unfolding xm_def by (sos "(((((A<0 * (A<1 * A<2)) * R<1) + ((A<2 * R<1) * (R<1 * [a^2]^2)))) & ((((A<0 * (A<1 * A<2)) * R<1) + ((R<1 * (R<1/2 * [2*a^4*x + a^3*b]^2)) + ((A<0 * (A<1 * R<1)) * (R<1/2 * [2*a^2*x + a*b]^2))))))") definition "f = (\t x. t\<^sup>2 + x\<^sup>2::real)" lemma ll: "local_lipschitz UNIV UNIV f" by (rule c1_implies_local_lipschitz[where f'="\(t, x). blinfun_scaleR_left (blinfun_scaleR_left id_blinfun 2) x"]) (auto intro!: continuous_intros derivative_eq_intros ext simp: f_def split_beta' blinfun.bilinear_simps) lemma cont: "continuous_on UNIV (\t. f t x)" by (auto intro!: continuous_intros simp: f_def) interpretation ll_on_open_real UNIV f UNIV by unfold_locales (auto intro!: ll cont) definition f1::"real \ real \ real" where "f1 t x \ x\<^sup>2" definition v::"real \ real" where "v t = 1 / (1 - t)" lemma v: "(v solves_ode f1) {0..<1} UNIV" and "v 0 = 1" by (auto intro!: solves_odeI derivative_eq_intros ext simp: v_def has_vderiv_on_def f1_def has_vector_derivative_def divide_simps power2_eq_square) lemma lower_bound: assumes y: "(y solves_ode f) {0 ..< t1} UNIV" and iv: "y 0 = 1" and t: "t1 < 1" "t \ {0 ..< t1}" shows "v t \ y t" using y[THEN solves_ode_on_subset] solves_odeD(1)[OF v, THEN has_vderiv_on_subset] apply (rule lower_solution[where ?t0.0 = 0]) using t by (auto simp: f1_def f_def v_def iv) definition f2::"real \ real \ real" where "f2 t x \ 1 + x\<^sup>2" definition w::"real \ real" where "w t = tan (t + pi / 4)" lemma w: "(w solves_ode f2) {0.. {0 ..< t1}" shows "y t \ w t" using y[THEN solves_ode_on_subset] solves_odeD(1)[OF w,THEN has_vderiv_on_subset] apply (rule upper_solution[where ?t0.0 = 0]) using t pi_less_4 by (auto simp: f2_def f_def w_def iv abs_square_less_1 tan_45) context fixes a::real assumes a: "a > 1" assumes cond: "1 < 16 * a\<^sup>2 * (a - 1)" begin definition w1::"real \ real" where "w1 t = 1 / (1 - a * t)" definition w1'::"real \ real" where "w1' t = a / ((1 - a * t)\<^sup>2)" lemma w1_lower: assumes s: "s < 1 / a" "0 < s" shows "f s (w1 s) < w1' s" proof - define smax where "smax \ 1 / (2 * a)" have "a - 1 > (1 - a * s)\<^sup>2 * s\<^sup>2" proof - have "sqrt ((1 - a * s)\<^sup>2 * s\<^sup>2) = (1 - a * s) * s" using a s by (auto simp: power_mult_distrib[symmetric] algebra_simps divide_simps split: if_split_asm) also have "\ = (- a) * s\<^sup>2 + 1 * s + 0" by (simp add: algebra_simps power2_eq_square) also have "\ \ (1 - a * smax) * smax" apply (rule order_trans[OF quadratic_real_max_eq]) using a s by (auto simp: smax_def divide_simps algebra_simps power2_eq_square) finally have "((1 - a * s)\<^sup>2 * s\<^sup>2) \ (1 - a * smax)\<^sup>2 * smax\<^sup>2" unfolding power_mult_distrib[symmetric] by (simp add: sqrt_le_D) also have "\ < a - 1" using a s cond by (auto simp add: smax_def power2_eq_square algebra_simps divide_simps split: if_splits) finally show ?thesis . qed then have "s\<^sup>2 < (a - 1) / (1 - a * s)\<^sup>2" using a s by (auto simp: divide_simps algebra_simps) then have "s\<^sup>2 + (1 / (1 - a * s))\<^sup>2 < a / (1 - a * s)\<^sup>2" by (simp add: diff_divide_distrib algebra_simps power_one_over) then show ?thesis by (simp add: w1_def w1'_def f_def) qed lemma w1: "(w1 has_vderiv_on w1') {0..<1/a}" by (auto intro!: derivative_eq_intros split: if_splits simp: w1_def w1'_def has_vderiv_on_def has_vector_derivative_def algebra_simps divide_simps power2_eq_square) lemma upper_bound1: assumes y: "(y solves_ode f) {0 ..< t1} UNIV" and iv: "y 0 = 1" and t: "t1 < 1 / a" "t \ {0 ..< t1}" shows "y t \ w1 t" proof - have less_one: "s * a < 1" if a1: "s < t1" and a2: "t1 * a < 1" for s proof - have "0 < a" using a by linarith then have "a * s \ t1 * a" - using a1 by (metis (no_types) less_eq_real_def mult.commute real_mult_le_cancel_iff2) + using a1 by (metis (no_types) less_eq_real_def mult.commute mult_le_cancel_iff2) then have "a * s < 1" using a2 by linarith then show ?thesis by (metis mult.commute) qed show ?thesis using y[THEN solves_ode_on_subset] w1[THEN has_vderiv_on_subset] w1_lower apply (rule upper_solution[where ?t0.0 = 0 and S = "{0 ..< t1}"]) using t a by (auto simp: less_one w1_def f_def w1'_def iv divide_simps) qed end definition w16::"real \ real" where "w16 t = 16 / (16 - 17 * t)" lemma upper_bound16: assumes y: "(y solves_ode f) {0 ..< t1} UNIV" and iv: "y 0 = 1" and t: "t1 < 16 / 17" "t \ {0 ..< t1}" shows "y t \ w16 t" using upper_bound1[of "17/16" y t1 t] assms by (simp add: divide_simps w1_def w16_def) lemma approx_lower_bound: "1.142857139 \ v 0.125" unfolding v_def by (approximation 40) lemma approx_upper_bound: "w 0.125 \ 1.287426955" unfolding w_def by (approximation 40) lemma approx_upper_bound16: "w16 0.125 \ 1.153153155" unfolding w16_def by (approximation 40) end subsection \Building\ experiment begin text \Benchmark from the ARCH friendly competition: group AFF. This example is out of the scope of the current ODE solver: dimension is very high, and without matrix exponential, the ODE solver is too unstable.\ definition "x1' x == x ! 25" definition "x2' x == x ! 26" definition "x3' x == x ! 27" definition "x4' x == x ! 28" definition "x5' x == x ! 29" definition "x6' x == x ! 30" definition "x7' x == x ! 31" definition "x8' x == x ! 32" definition "x9' x == x ! 33" definition "x10' x == x ! 34" definition "x11' x == x ! 35" definition "x12' x == x ! 36" definition "x13' x == x ! 37" definition "x14' x == x ! 38" definition "x15' x == x ! 39" definition "x16' x == x ! 40" definition "x17' x == x ! 41" definition "x18' x == x ! 42" definition "x19' x == x ! 43" definition "x20' x == x ! 44" definition "x21' x == x ! 45" definition "x22' x == x ! 46" definition "x23' x == x ! 47" definition "x24' x == x ! 48" definition "x25' x == 0.013697*(x!49) - 606.16*x!1 + 34.67*x!2 - 5.0432*x!3 - 734.04*x!4 + 124.55*x!5 - 302.91*x!6 - 98.21*x!7 - 2.204*x!8 - 10.668*x!9 - 41.864*x!10 + 8.1784*x!11 + 51.36*x!12 - 10.956*x!13 - 2.3086*x!14 - 11.998*x!15 - 42.003*x!16 + 4.9418*x!17 - 1.5216*x!18 - 10.35*x!19 + 8.6969*x!20 + 14.96*x!21 + 37.885*x!22 - 24.546*x!23 - 19.3*x!24 - 1.1333*x!25 + 0.036523*x!26 - 0.0053128*x!27 - 0.77328*x!28 + 0.13121*x!29 - 0.3191*x!30 - 0.10346*x!31 - 0.0023218*x!32 - 0.011238*x!33 - 0.044102*x!34 + 0.0086155*x!35 + 0.054106*x!36 - 0.011541*x!37 - 0.002432*x!38 - 0.012639*x!39 - 0.044248*x!40 + 0.0052059*x!41 - 0.0016029*x!42 - 0.010904*x!43 + 0.0091618*x!44 + 0.01576*x!45 + 0.03991*x!46 - 0.025858*x!47 - 0.020331*x!48" definition "x26' x == 33.51*x!1 - 664.52*x!2 + 79.773*x!3 + 207.45*x!4 + 829.03*x!5 + 94.055*x!6 - 4.3133*x!7 - 136.48*x!8 + 16.276*x!9 + 28.243*x!10 - 76.936*x!11 - 0.55452*x!12 + 30.931*x!13 + 51.904*x!14 + 11.87*x!15 + 9.6948*x!16 - 15.613*x!17 + 15.14*x!18 + 7.6133*x!19 - 5.7122*x!20 + 3.7841*x!21 - 21.172*x!22 - 29.014*x!23 + 10.245*x!24 + 0.035301*x!25 - 1.1948*x!26 + 0.084037*x!27 + 0.21854*x!28 + 0.87334*x!29 + 0.099082*x!30 - 0.0045438*x!31 - 0.14378*x!32 + 0.017146*x!33 + 0.029753*x!34 - 0.081048*x!35 - 0.00058416*x!36 + 0.032585*x!37 + 0.054679*x!38 + 0.012505*x!39 + 0.010213*x!40 - 0.016447*x!41 + 0.01595*x!42 + 0.0080202*x!43 - 0.0060176*x!44 + 0.0039864*x!45 - 0.022303*x!46 - 0.030565*x!47 + 0.010792*x!48" definition "x27' x == 76.572*x!2 - 3.4577*x!1 - 799.41*x!3 - 347.78*x!4 - 142.59*x!5 + 867.7*x!6 - 165.08*x!7 + 36.992*x!8 + 91.256*x!9 - 66.266*x!10 + 26.434*x!11 + 39.519*x!12 + 67.373*x!13 - 32.177*x!14 + 17.164*x!15 + 133.45*x!16 + 34.378*x!17 + 6.5652*x!18 + 27.869*x!19 - 51.473*x!20 + 41.979*x!21 - 44.928*x!22 + 34.417*x!23 + 30.824*x!24 - 0.0036425*x!25 + 0.080665*x!26 - 1.3369*x!27 - 0.36637*x!28 - 0.15021*x!29 + 0.91408*x!30 - 0.17391*x!31 + 0.038969*x!32 + 0.096134*x!33 - 0.069808*x!34 + 0.027847*x!35 + 0.041632*x!36 + 0.070975*x!37 - 0.033897*x!38 + 0.018082*x!39 + 0.14058*x!40 + 0.036215*x!41 + 0.0069161*x!42 + 0.029358*x!43 - 0.054224*x!44 + 0.044223*x!45 - 0.04733*x!46 + 0.036257*x!47 + 0.032471*x!48" definition "x28' x == 206.8*x!2 - 734.86*x!1 - 348.32*x!3 - 1910.4*x!4 - 135.41*x!5 - 218.05*x!6 - 1130.0*x!7 + 263.61*x!8 - 178.6*x!9 - 285.02*x!10 + 65.125*x!11 + 125.09*x!12 - 78.164*x!13 - 2.0288*x!14 + 11.414*x!15 - 54.481*x!16 - 17.625*x!17 - 14.273*x!18 - 13.334*x!19 + 11.514*x!20 + 88.076*x!21 + 56.304*x!22 - 12.53*x!23 + 19.497*x!24 - 0.77414*x!25 + 0.21785*x!26 - 0.36694*x!27 - 2.5072*x!28 - 0.14264*x!29 - 0.2297*x!30 - 1.1904*x!31 + 0.2777*x!32 - 0.18815*x!33 - 0.30026*x!34 + 0.068606*x!35 + 0.13177*x!36 - 0.082342*x!37 - 0.0021372*x!38 + 0.012024*x!39 - 0.057393*x!40 - 0.018567*x!41 - 0.015036*x!42 - 0.014047*x!43 + 0.012129*x!44 + 0.092784*x!45 + 0.059314*x!46 - 0.013199*x!47 + 0.020539*x!48" definition "x29' x == 126.41*x!1 + 830.41*x!2 - 142.11*x!3 - 134.84*x!4 - 1968.7*x!5 + 54.561*x!6 + 141.8*x!7 + 1183.9*x!8 + 118.23*x!9 - 32.897*x!10 + 295.19*x!11 - 13.727*x!12 - 6.9809*x!13 - 150.49*x!14 - 16.631*x!15 + 76.503*x!16 + 25.453*x!17 + 26.848*x!18 + 8.696*x!19 - 20.18*x!20 - 16.511*x!21 + 8.533*x!22 + 112.09*x!23 + 9.1024*x!24 + 0.13317*x!25 + 0.8748*x!26 - 0.14971*x!27 - 0.14205*x!28 - 2.5687*x!29 + 0.057478*x!30 + 0.14938*x!31 + 1.2472*x!32 + 0.12455*x!33 - 0.034656*x!34 + 0.31097*x!35 - 0.014461*x!36 - 0.0073541*x!37 - 0.15853*x!38 - 0.01752*x!39 + 0.080592*x!40 + 0.026813*x!41 + 0.028283*x!42 + 0.0091609*x!43 - 0.021259*x!44 - 0.017394*x!45 + 0.0089891*x!46 + 0.11808*x!47 + 0.009589*x!48" definition "x30' x == 89.14*x!2 - 308.62*x!1 + 845.85*x!3 - 220.15*x!4 + 49.494*x!5 - 2326.8*x!6 + 132.76*x!7 + 67.169*x!8 - 1400.6*x!9 + 24.168*x!10 + 24.278*x!11 - 85.266*x!12 - 294.4*x!13 + 127.92*x!14 - 24.456*x!15 - 379.06*x!16 - 100.16*x!17 - 46.537*x!18 - 67.411*x!19 + 169.3*x!20 - 100.19*x!21 + 200.07*x!22 - 126.91*x!23 - 44.045*x!24 - 0.32511*x!25 + 0.093904*x!26 + 0.89106*x!27 - 0.23192*x!28 + 0.052139*x!29 - 2.9459*x!30 + 0.13986*x!31 + 0.07076*x!32 - 1.4755*x!33 + 0.02546*x!34 + 0.025576*x!35 - 0.089824*x!36 - 0.31014*x!37 + 0.13476*x!38 - 0.025763*x!39 - 0.39932*x!40 - 0.10551*x!41 - 0.049024*x!42 - 0.071015*x!43 + 0.17835*x!44 - 0.10555*x!45 + 0.21077*x!46 - 0.13369*x!47 - 0.046399*x!48" definition "x31' x == 140.17*x!5 - 3.2644*x!2 - 166.03*x!3 - 1130.1*x!4 - 97.135*x!1 + 149.9*x!6 - 2042.2*x!7 + 77.153*x!8 + 122.7*x!9 - 1223.6*x!10 - 66.505*x!11 + 249.18*x!12 - 16.632*x!13 + 2.6274*x!14 + 56.399*x!15 - 46.871*x!16 - 118.4*x!17 - 62.011*x!18 - 2.1658*x!19 - 30.269*x!20 + 130.25*x!21 + 5.2191*x!22 - 24.931*x!23 + 6.3303*x!24 - 0.10233*x!25 - 0.0034389*x!26 - 0.17491*x!27 - 1.1905*x!28 + 0.14767*x!29 + 0.15792*x!30 - 2.6461*x!31 + 0.081277*x!32 + 0.12926*x!33 - 1.289*x!34 - 0.07006*x!35 + 0.2625*x!36 - 0.01752*x!37 + 0.0027678*x!38 + 0.059414*x!39 - 0.049377*x!40 - 0.12472*x!41 - 0.065326*x!42 - 0.0022816*x!43 - 0.031887*x!44 + 0.13721*x!45 + 0.005498*x!46 - 0.026264*x!47 + 0.0066687*x!48" definition "x32' x == 34.596*x!3 - 138.29*x!2 - 2.5435*x!1 + 264.02*x!4 + 1185.0*x!5 + 58.931*x!6 + 77.078*x!7 - 2124.4*x!8 - 54.178*x!9 + 240.2*x!10 - 1279.6*x!11 + 14.033*x!12 + 37.313*x!13 + 238.99*x!14 + 1.7648*x!15 - 96.468*x!16 + 33.324*x!17 - 103.99*x!18 - 129.69*x!19 + 35.961*x!20 + 34.916*x!21 - 10.09*x!22 - 99.84*x!23 - 13.713*x!24 - 0.0026795*x!25 - 0.14568*x!26 + 0.036445*x!27 + 0.27814*x!28 + 1.2484*x!29 + 0.062081*x!30 + 0.081198*x!31 - 2.7327*x!32 - 0.057074*x!33 + 0.25304*x!34 - 1.348*x!35 + 0.014783*x!36 + 0.039308*x!37 + 0.25177*x!38 + 0.0018592*x!39 - 0.10162*x!40 + 0.035106*x!41 - 0.10955*x!42 - 0.13662*x!43 + 0.037884*x!44 + 0.036783*x!45 - 0.010629*x!46 - 0.10518*x!47 - 0.014446*x!48" definition "x33' x == 54.576*x!3 - 4.3846*x!2 - 11.068*x!1 - 167.43*x!4 + 119.06*x!5 - 1413.1*x!6 + 116.3*x!7 - 50.978*x!8 - 2668.8*x!9 + 58.12*x!10 + 61.221*x!11 - 299.51*x!12 - 1455.4*x!13 + 600.59*x!14 - 14.962*x!15 - 609.81*x!16 - 179.98*x!17 - 140.89*x!18 - 67.425*x!19 + 261.44*x!20 - 215.8*x!21 + 374.46*x!22 - 267.83*x!23 - 167.39*x!24 - 0.01166*x!25 - 0.004619*x!26 + 0.057493*x!27 - 0.17638*x!28 + 0.12543*x!29 - 1.4886*x!30 + 0.12251*x!31 - 0.053703*x!32 - 3.3062*x!33 + 0.061226*x!34 + 0.064493*x!35 - 0.31552*x!36 - 1.5332*x!37 + 0.63269*x!38 - 0.015762*x!39 - 0.6424*x!40 - 0.1896*x!41 - 0.14842*x!42 - 0.071029*x!43 + 0.27541*x!44 - 0.22734*x!45 + 0.39448*x!46 - 0.28215*x!47 - 0.17634*x!48" definition "x34' x == 29.028*x!2 - 42.219*x!1 - 59.833*x!3 - 284.78*x!4 - 31.96*x!5 + 16.189*x!6 - 1220.6*x!7 + 238.17*x!8 + 45.965*x!9 - 2290.9*x!10 + 219.74*x!11 + 1329.3*x!12 - 104.55*x!13 - 231.45*x!14 + 196.73*x!15 - 91.293*x!16 - 418.1*x!17 - 203.17*x!18 + 77.147*x!19 - 64.407*x!20 + 127.6*x!21 - 66.422*x!22 + 17.647*x!23 + 14.772*x!24 - 0.044476*x!25 + 0.03058*x!26 - 0.063031*x!27 - 0.3*x!28 - 0.033668*x!29 + 0.017054*x!30 - 1.2858*x!31 + 0.2509*x!32 + 0.048422*x!33 - 2.9081*x!34 + 0.23149*x!35 + 1.4003*x!36 - 0.11014*x!37 - 0.24382*x!38 + 0.20725*x!39 - 0.096173*x!40 - 0.44045*x!41 - 0.21403*x!42 + 0.08127*x!43 - 0.06785*x!44 + 0.13442*x!45 - 0.069972*x!46 + 0.01859*x!47 + 0.015562*x!48" definition "x35' x == 5.3147*x!1 - 77.32*x!2 + 33.098*x!3 + 62.873*x!4 + 295.95*x!5 + 11.239*x!6 - 66.808*x!7 - 1279.2*x!8 + 57.198*x!9 + 221.07*x!10 - 2387.6*x!11 + 120.01*x!12 + 572.58*x!13 + 1229.1*x!14 + 124.63*x!15 - 56.089*x!16 + 180.3*x!17 - 312.48*x!18 - 445.95*x!19 - 24.274*x!20 + 86.343*x!21 - 2.8317*x!22 - 45.574*x!23 - 33.346*x!24 + 0.0055988*x!25 - 0.081453*x!26 + 0.034867*x!27 + 0.066234*x!28 + 0.31177*x!29 + 0.011839*x!30 - 0.070379*x!31 - 1.3476*x!32 + 0.060255*x!33 + 0.23288*x!34 - 3.0099*x!35 + 0.12643*x!36 + 0.60318*x!37 + 1.2948*x!38 + 0.13129*x!39 - 0.059087*x!40 + 0.18994*x!41 - 0.32918*x!42 - 0.46979*x!43 - 0.025571*x!44 + 0.090958*x!45 - 0.002983*x!46 - 0.04801*x!47 - 0.035129*x!48" definition "x36' x == 48.409*x!1 - 3.7335*x!2 + 32.385*x!3 + 123.4*x!4 - 10.509*x!5 - 100.46*x!6 + 251.0*x!7 + 15.115*x!8 - 306.96*x!9 + 1327.9*x!10 + 119.59*x!11 - 2059.6*x!12 - 316.77*x!13 + 309.66*x!14 - 870.95*x!15 - 176.05*x!16 + 612.8*x!17 + 311.81*x!18 - 23.027*x!19 + 201.61*x!20 - 171.24*x!21 + 169.57*x!22 - 91.873*x!23 - 44.001*x!24 + 0.050997*x!25 - 0.0039331*x!26 + 0.034116*x!27 + 0.13*x!28 - 0.011071*x!29 - 0.10583*x!30 + 0.26442*x!31 + 0.015922*x!32 - 0.32337*x!33 + 1.3989*x!34 + 0.12598*x!35 - 2.6645*x!36 - 0.3337*x!37 + 0.32621*x!38 - 0.9175*x!39 - 0.18546*x!40 + 0.64555*x!41 + 0.32848*x!42 - 0.024258*x!43 + 0.21239*x!44 - 0.18039*x!45 + 0.17864*x!46 - 0.096784*x!47 - 0.046353*x!48" definition "x37' x == 16.772*x!2 - 18.711*x!1 + 31.337*x!3 - 86.799*x!4 - 1.8261*x!5 - 326.22*x!6 - 26.875*x!7 + 48.423*x!8 - 1444.1*x!9 - 111.75*x!10 + 580.84*x!11 - 316.97*x!12 - 2618.4*x!13 + 213.69*x!14 - 227.06*x!15 - 1560.3*x!16 - 475.12*x!17 + 287.08*x!18 + 62.345*x!19 + 349.71*x!20 - 307.69*x!21 + 551.86*x!22 - 413.54*x!23 - 278.79*x!24 - 0.019712*x!25 + 0.017669*x!26 + 0.033012*x!27 - 0.091439*x!28 - 0.0019237*x!29 - 0.34366*x!30 - 0.028311*x!31 + 0.051011*x!32 - 1.5212*x!33 - 0.11773*x!34 + 0.61189*x!35 - 0.33391*x!36 - 3.2531*x!37 + 0.22511*x!38 - 0.23919*x!39 - 1.6437*x!40 - 0.50052*x!41 + 0.30242*x!42 + 0.065677*x!43 + 0.36841*x!44 - 0.32414*x!45 + 0.58136*x!46 - 0.43564*x!47 - 0.29369*x!48" definition "x38' x == 2.1605*x!1 + 59.152*x!2 - 19.397*x!3 + 2.4357*x!4 - 153.32*x!5 + 143.74*x!6 + 5.1672*x!7 + 235.26*x!8 + 606.81*x!9 - 229.22*x!10 + 1225.8*x!11 + 308.88*x!12 + 220.47*x!13 - 2060.7*x!14 - 41.024*x!15 + 847.46*x!16 - 549.36*x!17 + 704.76*x!18 + 575.93*x!19 - 182.76*x!20 + 62.77*x!21 - 246.62*x!22 + 238.45*x!23 + 159.35*x!24 + 0.002276*x!25 + 0.062314*x!26 - 0.020434*x!27 + 0.0025659*x!28 - 0.16151*x!29 + 0.15142*x!30 + 0.0054434*x!31 + 0.24784*x!32 + 0.63924*x!33 - 0.24147*x!34 + 1.2913*x!35 + 0.32539*x!36 + 0.23225*x!37 - 2.6656*x!38 - 0.043217*x!39 + 0.89276*x!40 - 0.57872*x!41 + 0.74243*x!42 + 0.60672*x!43 - 0.19253*x!44 + 0.066125*x!45 - 0.2598*x!46 + 0.25119*x!47 + 0.16786*x!48" definition "x39' x == 11.65*x!2 - 11.108*x!1 + 6.8996*x!3 + 10.745*x!4 - 14.165*x!5 - 24.224*x!6 + 58.6*x!7 + 1.0473*x!8 - 23.411*x!9 + 194.71*x!10 + 124.74*x!11 - 874.49*x!12 - 232.17*x!13 - 36.538*x!14 - 982.44*x!15 - 164.62*x!16 + 658.46*x!17 + 612.29*x!18 + 81.858*x!19 + 340.63*x!20 - 202.91*x!21 + 81.196*x!22 - 56.024*x!23 - 15.279*x!24 - 0.011702*x!25 + 0.012273*x!26 + 0.0072684*x!27 + 0.01132*x!28 - 0.014922*x!29 - 0.025519*x!30 + 0.061732*x!31 + 0.0011033*x!32 - 0.024662*x!33 + 0.20512*x!34 + 0.1314*x!35 - 0.92123*x!36 - 0.24458*x!37 - 0.038491*x!38 - 1.5297*x!39 - 0.17342*x!40 + 0.69366*x!41 + 0.64502*x!42 + 0.086233*x!43 + 0.35883*x!44 - 0.21375*x!45 + 0.085536*x!46 - 0.059019*x!47 - 0.016096*x!48" definition "x40' x == 0.34677*x!2 - 45.16*x!1 + 87.266*x!3 - 58.989*x!4 + 81.86*x!5 - 393.99*x!6 - 43.617*x!7 - 87.105*x!8 - 585.65*x!9 - 95.775*x!10 - 55.621*x!11 - 175.4*x!12 - 1550.2*x!13 + 849.01*x!14 - 150.24*x!15 - 2831.0*x!16 - 714.73*x!17 - 306.41*x!18 - 276.74*x!19 + 1235.6*x!20 - 213.26*x!21 + 900.37*x!22 - 708.02*x!23 - 408.51*x!24 - 0.047574*x!25 + 0.00036531*x!26 + 0.09193*x!27 - 0.062142*x!28 + 0.086236*x!29 - 0.41505*x!30 - 0.045948*x!31 - 0.091761*x!32 - 0.61696*x!33 - 0.10089*x!34 - 0.058594*x!35 - 0.18478*x!36 - 1.633*x!37 + 0.89439*x!38 - 0.15827*x!39 - 3.477*x!40 - 0.75293*x!41 - 0.32279*x!42 - 0.29153*x!43 + 1.3016*x!44 - 0.22465*x!45 + 0.94849*x!46 - 0.74586*x!47 - 0.43035*x!48" definition "x41' x == 3.5059*x!1 - 19.002*x!2 + 24.585*x!3 - 17.341*x!4 + 25.7*x!5 - 100.44*x!6 - 121.83*x!7 + 37.286*x!8 - 153.78*x!9 - 417.65*x!10 + 181.1*x!11 + 615.65*x!12 - 480.91*x!13 - 550.2*x!14 + 663.43*x!15 - 703.07*x!16 - 2922.4*x!17 - 629.58*x!18 + 982.78*x!19 - 72.585*x!20 + 517.72*x!21 + 573.36*x!22 - 34.103*x!23 - 92.294*x!24 + 0.0036933*x!25 - 0.020017*x!26 + 0.025899*x!27 - 0.018268*x!28 + 0.027073*x!29 - 0.1058*x!30 - 0.12834*x!31 + 0.039279*x!32 - 0.162*x!33 - 0.43998*x!34 + 0.19078*x!35 + 0.64856*x!36 - 0.50662*x!37 - 0.57961*x!38 + 0.69889*x!39 - 0.74065*x!40 - 3.5734*x!41 - 0.66323*x!42 + 1.0353*x!43 - 0.076465*x!44 + 0.54539*x!45 + 0.60401*x!46 - 0.035926*x!47 - 0.097227*x!48" definition "x42' x == 11.689*x!2 - 6.541*x!1 + 14.446*x!3 - 16.841*x!4 + 27.253*x!5 - 50.506*x!6 - 63.95*x!7 - 102.32*x!8 - 140.13*x!9 - 204.16*x!10 - 309.41*x!11 + 316.39*x!12 + 287.25*x!13 + 700.16*x!14 + 615.11*x!15 - 292.97*x!16 - 628.33*x!17 - 1965.7*x!18 - 1181.8*x!19 - 160.14*x!20 + 504.08*x!21 + 10.684*x!22 - 328.24*x!23 - 67.729*x!24 - 0.0068906*x!25 + 0.012313*x!26 + 0.015219*x!27 - 0.017741*x!28 + 0.028709*x!29 - 0.053205*x!30 - 0.067368*x!31 - 0.10779*x!32 - 0.14762*x!33 - 0.21507*x!34 - 0.32595*x!35 + 0.33331*x!36 + 0.3026*x!37 + 0.73758*x!38 + 0.64799*x!39 - 0.30863*x!40 - 0.66191*x!41 - 2.5655*x!42 - 1.245*x!43 - 0.16869*x!44 + 0.53103*x!45 + 0.011255*x!46 - 0.34578*x!47 - 0.071349*x!48" definition "x43' x == 5.9383*x!2 - 11.411*x!1 + 26.834*x!3 - 15.359*x!4 + 6.1346*x!5 - 51.136*x!6 - 1.6072*x!7 - 127.63*x!8 - 64.662*x!9 + 76.069*x!10 - 444.22*x!11 - 20.062*x!12 + 77.799*x!13 + 570.73*x!14 + 84.558*x!15 - 263.82*x!16 + 983.07*x!17 - 1184.4*x!18 - 2653.4*x!19 + 240.05*x!20 + 17.029*x!21 - 433.29*x!22 - 721.56*x!23 - 85.171*x!24 - 0.012021*x!25 + 0.0062557*x!26 + 0.028268*x!27 - 0.01618*x!28 + 0.0064625*x!29 - 0.05387*x!30 - 0.0016931*x!31 - 0.13445*x!32 - 0.068118*x!33 + 0.080135*x!34 - 0.46796*x!35 - 0.021135*x!36 + 0.081958*x!37 + 0.60124*x!38 + 0.089078*x!39 - 0.27792*x!40 + 1.0356*x!41 - 1.2477*x!42 - 3.2899*x!43 + 0.25288*x!44 + 0.017939*x!45 - 0.45645*x!46 - 0.76013*x!47 - 0.089724*x!48" definition "x44' x == 10.661*x!1 + 0.75629*x!2 - 31.37*x!3 + 18.757*x!4 - 29.414*x!5 + 193.91*x!6 - 34.395*x!7 + 32.908*x!8 + 271.89*x!9 - 54.429*x!10 - 33.158*x!11 + 199.94*x!12 + 354.88*x!13 - 181.87*x!14 + 333.88*x!15 + 1248.3*x!16 - 59.084*x!17 - 157.94*x!18 + 239.26*x!19 - 1528.9*x!20 + 772.83*x!21 - 858.92*x!22 + 734.27*x!23 + 372.63*x!24 + 0.011231*x!25 + 0.00079672*x!26 - 0.033047*x!27 + 0.01976*x!28 - 0.030986*x!29 + 0.20427*x!30 - 0.036234*x!31 + 0.034667*x!32 + 0.28643*x!33 - 0.057338*x!34 - 0.03493*x!35 + 0.21063*x!36 + 0.37385*x!37 - 0.19159*x!38 + 0.35173*x!39 + 1.315*x!40 - 0.062242*x!41 - 0.16638*x!42 + 0.25204*x!43 - 2.1053*x!44 + 0.81414*x!45 - 0.90483*x!46 + 0.77352*x!47 + 0.39255*x!48" definition "x45' x == 21.145*x!1 - 0.67362*x!2 + 25.339*x!3 + 82.664*x!4 - 18.143*x!5 - 108.58*x!6 + 131.04*x!7 + 40.675*x!8 - 205.5*x!9 + 125.37*x!10 + 92.038*x!11 - 169.72*x!12 - 294.36*x!13 + 57.772*x!14 - 200.48*x!15 - 211.37*x!16 + 512.5*x!17 + 500.79*x!18 + 15.006*x!19 + 785.7*x!20 - 3685.6*x!21 + 248.17*x!22 - 125.37*x!23 - 428.07*x!24 + 0.022275*x!25 - 0.00070963*x!26 + 0.026694*x!27 + 0.087083*x!28 - 0.019113*x!29 - 0.11438*x!30 + 0.13805*x!31 + 0.04285*x!32 - 0.21648*x!33 + 0.13207*x!34 + 0.096957*x!35 - 0.17879*x!36 - 0.31009*x!37 + 0.06086*x!38 - 0.2112*x!39 - 0.22266*x!40 + 0.5399*x!41 + 0.52756*x!42 + 0.015808*x!43 + 0.8277*x!44 - 4.3773*x!45 + 0.26143*x!46 - 0.13208*x!47 - 0.45095*x!48" definition "x46' x == 30.438*x!1 - 10.312*x!2 - 14.559*x!3 + 71.32*x!4 + 9.4074*x!5 + 217.12*x!6 + 3.4082*x!7 - 22.61*x!8 + 366.85*x!9 - 62.437*x!10 - 17.337*x!11 + 170.53*x!12 + 551.59*x!13 - 246.57*x!14 + 70.631*x!15 + 918.77*x!16 + 591.41*x!17 + 17.745*x!18 - 424.27*x!19 - 874.95*x!20 + 255.27*x!21 - 4294.4*x!22 + 510.73*x!23 + 1033.1*x!24 + 0.032064*x!25 - 0.010863*x!26 - 0.015337*x!27 + 0.075133*x!28 + 0.0099102*x!29 + 0.22872*x!30 + 0.0035904*x!31 - 0.023818*x!32 + 0.38646*x!33 - 0.065775*x!34 - 0.018263*x!35 + 0.17964*x!36 + 0.58107*x!37 - 0.25975*x!38 + 0.074406*x!39 + 0.96788*x!40 + 0.62302*x!41 + 0.018694*x!42 - 0.44695*x!43 - 0.92172*x!44 + 0.26891*x!45 - 5.0186*x!46 + 0.53803*x!47 + 1.0883*x!48" definition "x47' x == 11.835*x!3 - 37.47*x!2 - 18.027*x!1 - 25.981*x!4 + 110.26*x!5 - 133.09*x!6 - 21.113*x!7 - 89.428*x!8 - 265.96*x!9 + 11.486*x!10 - 31.545*x!11 - 88.775*x!12 - 389.13*x!13 + 228.95*x!14 - 47.085*x!15 - 703.45*x!16 - 45.639*x!17 - 333.53*x!18 - 724.09*x!19 + 748.64*x!20 - 119.88*x!21 + 488.53*x!22 - 3962.5*x!23 - 875.81*x!24 - 0.01899*x!25 - 0.039473*x!26 + 0.012468*x!27 - 0.027369*x!28 + 0.11615*x!29 - 0.1402*x!30 - 0.022242*x!31 - 0.094208*x!32 - 0.28018*x!33 + 0.0121*x!34 - 0.033231*x!35 - 0.093521*x!36 - 0.40993*x!37 + 0.24119*x!38 - 0.049602*x!39 - 0.74105*x!40 - 0.048079*x!41 - 0.35136*x!42 - 0.7628*x!43 + 0.78865*x!44 - 0.12629*x!45 + 0.51464*x!46 - 4.669*x!47 - 0.92262*x!48" definition "x48' x == 12.708*x!1 + 4.8669*x!2 - 6.4356*x!3 + 9.4963*x!4 + 3.5537*x!5 - 74.352*x!6 + 13.537*x!7 - 16.221*x!8 - 158.43*x!9 + 2.6078*x!10 - 31.934*x!11 - 48.095*x!12 - 252.14*x!13 + 158.72*x!14 - 25.236*x!15 - 416.39*x!16 - 103.09*x!17 - 75.108*x!18 - 90.118*x!19 + 375.82*x!20 - 425.25*x!21 + 1006.8*x!22 - 878.11*x!23 - 4455.6*x!24 + 0.013388*x!25 + 0.0051271*x!26 - 0.0067796*x!27 + 0.010004*x!28 + 0.0037436*x!29 - 0.078327*x!30 + 0.01426*x!31 - 0.017088*x!32 - 0.1669*x!33 + 0.0027471*x!34 - 0.033641*x!35 - 0.050665*x!36 - 0.26562*x!37 + 0.16721*x!38 - 0.026585*x!39 - 0.43865*x!40 - 0.1086*x!41 - 0.079123*x!42 - 0.094935*x!43 + 0.39591*x!44 - 0.44798*x!45 + 1.0606*x!46 - 0.92505*x!47 - 5.1884*x!48" definition "t' x == 1" definition "u1 x == x ! 49" schematic_goal building_fas: "[t' X, x1' X, x2' X, x3' X, x4' X, x5' X, x6' X, x7' X, x8' X, x9' X, x10' X, x11' X, x12' X, x13' X, x14' X, x15' X, x16' X, x17' X, x18' X, x19' X, x20' X, x21' X, x22' X, x23' X, x24' X, x25' X, x26' X, x27' X, x28' X, x29' X, x30' X, x31' X, x32' X, x33' X, x34' X, x35' X, x36' X, x37' X, x38' X, x39' X, x40' X, x41' X, x42' X, x43' X, x44' X, x45' X, x46' X, x47' X, x48' X, u1 X] = interpret_floatariths ?fas X" unfolding t'_def x1'_def x2'_def x3'_def x4'_def x5'_def x6'_def x7'_def x8'_def x9'_def x10'_def x11'_def x12'_def x13'_def x14'_def x15'_def x16'_def x17'_def x18'_def x19'_def x20'_def x21'_def x22'_def x23'_def x24'_def x25'_def x26'_def x27'_def x28'_def x29'_def x30'_def x31'_def x32'_def x33'_def x34'_def x35'_def x36'_def x37'_def x38'_def x39'_def x40'_def x41'_def x42'_def x43'_def x44'_def x45'_def x46'_def x47'_def x48'_def u1_def by (reify_floatariths) concrete_definition building_fas uses building_fas end end diff --git a/thys/Perron_Frobenius/Perron_Frobenius.thy b/thys/Perron_Frobenius/Perron_Frobenius.thy --- a/thys/Perron_Frobenius/Perron_Frobenius.thy +++ b/thys/Perron_Frobenius/Perron_Frobenius.thy @@ -1,272 +1,272 @@ (* Author: R. Thiemann *) subsection \Perron-Frobenius theorem via Brouwer's fixpoint theorem.\ theory Perron_Frobenius imports "HOL-Analysis.Brouwer_Fixpoint" Perron_Frobenius_Aux begin text \We follow the textbook proof of Serre \cite[Theorem 5.2.1]{SerreMatrices}.\ context fixes A :: "complex ^ 'n ^ 'n :: finite" assumes rnnA: "real_non_neg_mat A" begin private abbreviation(input) sr where "sr \ spectral_radius A" private definition max_v_ev :: "(complex^'n) \ complex" where "max_v_ev = (SOME v_ev. eigen_vector A (fst v_ev) (snd v_ev) \ norm (snd v_ev) = sr)" private definition "max_v = (1 / norm1 (fst max_v_ev)) *\<^sub>R fst max_v_ev" private definition "max_ev = snd max_v_ev" private lemma max_v_ev: "eigen_vector A max_v max_ev" "norm max_ev = sr" "norm1 max_v = 1" proof - obtain v ev where id: "max_v_ev = (v,ev)" by force from spectral_radius_ev[of A] someI_ex[of "\ v_ev. eigen_vector A (fst v_ev) (snd v_ev) \ norm (snd v_ev) = sr", folded max_v_ev_def, unfolded id] have v: "eigen_vector A v ev" and ev: "norm ev = sr" by auto from normalize_eigen_vector[OF v] ev show "eigen_vector A max_v max_ev" "norm max_ev = sr" "norm1 max_v = 1" unfolding max_v_def max_ev_def id by auto qed text \In the definition of S, we use the linear norm instead of the default euclidean norm which is defined via the type-class. The reason is that S is not convex if one uses the euclidean norm.\ private definition B :: "real ^ 'n ^ 'n" where "B \ \ i j. Re (A $ i $ j)" private definition S where "S = {v :: real ^ 'n . norm1 v = 1 \ (\ i. v $ i \ 0) \ (\ i. (B *v v) $ i \ sr * (v $ i))}" private definition f :: "real ^ 'n \ real ^ 'n" where "f v = (1 / norm1 (B *v v)) *\<^sub>R (B *v v)" private lemma closedS: "closed S" unfolding S_def matrix_vector_mult_def[abs_def] proof (intro closed_Collect_conj closed_Collect_all closed_Collect_le closed_Collect_eq) show "continuous_on UNIV norm1" by (simp add: continuous_at_imp_continuous_on) qed (auto intro!: continuous_intros continuous_on_component) private lemma boundedS: "bounded S" proof - { fix v :: "real ^ 'n" from norm1_ge_norm[of v] have "norm1 v = 1 \ norm v \ 1" by auto } thus ?thesis unfolding S_def bounded_iff by (auto intro!: exI[of _ 1]) qed private lemma compactS: "compact S" using boundedS closedS by (simp add: compact_eq_bounded_closed) private lemmas rnn = real_non_neg_matD[OF rnnA] lemma B_norm: "B $ i $ j = norm (A $ i $ j)" using rnn[of i j] by (cases "A $ i $ j", auto simp: B_def) lemma mult_B_mono: assumes "\ i. v $ i \ w $ i" shows "(B *v v) $ i \ (B *v w) $ i" unfolding matrix_vector_mult_def vec_lambda_beta by (rule sum_mono, rule mult_left_mono[OF assms], unfold B_norm, auto) private lemma non_emptyS: "S \ {}" proof - let ?v = "(\ i. norm (max_v $ i)) :: real ^ 'n" have "norm1 max_v = 1" by (rule max_v_ev(3)) hence nv: "norm1 ?v = 1" unfolding norm1_def by auto { fix i have "sr * (?v $ i) = sr * norm (max_v $ i)" by auto also have "\ = (norm max_ev) * norm (max_v $ i)" using max_v_ev by auto also have "\ = norm ((max_ev *s max_v) $ i)" by (auto simp: norm_mult) also have "max_ev *s max_v = A *v max_v" using max_v_ev(1)[unfolded eigen_vector_def] by auto also have "norm ((A *v max_v) $ i) \ (B *v ?v) $ i" unfolding matrix_vector_mult_def vec_lambda_beta by (rule sum_norm_le, auto simp: norm_mult B_norm) finally have "sr * (?v $ i) \ (B *v ?v) $ i" . } note le = this have "?v \ S" unfolding S_def using nv le by auto thus ?thesis by blast qed private lemma convexS: "convex S" proof (rule convexI) fix v w a b assume *: "v \ S" "w \ S" "0 \ a" "0 \ b" "a + b = (1 :: real)" let ?lin = "a *\<^sub>R v + b *\<^sub>R w" from * have 1: "norm1 v = 1" "norm1 w = 1" unfolding S_def by auto have "norm1 ?lin = a * norm1 v + b * norm1 w" unfolding norm1_def sum_distrib_left sum.distrib[symmetric] proof (rule sum.cong) fix i :: 'n from * have "v $ i \ 0" "w $ i \ 0" unfolding S_def by auto thus "norm (?lin $ i) = a * norm (v $ i) + b * norm (w $ i)" using *(3-4) by auto qed simp also have "\ = 1" using *(5) 1 by auto finally have norm1: "norm1 ?lin = 1" . { fix i from * have "0 \ v $ i" "sr * v $ i \ (B *v v) $ i" unfolding S_def by auto with \a \ 0\ have a: "a * (sr * v $ i) \ a * (B *v v) $ i" by (intro mult_left_mono) from * have "0 \ w $ i" "sr * w $ i \ (B *v w) $ i" unfolding S_def by auto with \b \ 0\ have b: "b * (sr * w $ i) \ b * (B *v w) $ i" by (intro mult_left_mono) from a b have "a * (sr * v $ i) + b * (sr * w $ i) \ a * (B *v v) $ i + b * (B *v w) $ i" by auto } note le = this have switch[simp]: "\ x y. x * a * y = a * x * y" "\ x y. x * b * y = b * x * y" by auto have [simp]: "x \ {v,w} \ a * (r * x $h i) = r * (a * x $h i)" for a r i x by auto show "a *\<^sub>R v + b *\<^sub>R w \ S" using * norm1 le unfolding S_def by (auto simp: matrix_vect_scaleR matrix_vector_right_distrib ring_distribs) qed private abbreviation (input) r :: "real \ complex" where "r \ of_real" private abbreviation rv :: "real ^'n \ complex ^'n" where "rv v \ \ i. r (v $ i)" private lemma rv_0: "(rv v = 0) = (v = 0)" by (simp add: of_real_hom.map_vector_0 map_vector_def vec_eq_iff) private lemma rv_mult: "A *v rv v = rv (B *v v)" proof - have "map_matrix r B = A" using rnnA unfolding map_matrix_def B_def real_non_neg_mat_def map_vector_def elements_mat_h_def by vector thus ?thesis using of_real_hom.matrix_vector_mult_hom[of B, where 'a = complex] unfolding map_vector_def by auto qed context assumes zero_no_ev: "\ v. v \ S \ A *v rv v \ 0" begin private lemma normB_S: assumes v: "v \ S" shows "norm1 (B *v v) \ 0" proof - from zero_no_ev[OF v, unfolded rv_mult rv_0] show ?thesis by auto qed private lemma image_f: "f ` S \ S" proof - { fix v assume v: "v \ S" hence norm: "norm1 v = 1" and ge: "\ i. v $ i \ 0" "\ i. sr * v $ i \ (B *v v) $ i" unfolding S_def by auto from normB_S[OF v] have normB: "norm1 (B *v v) > 0" using norm1_nonzero by auto have fv: "f v = (1 / norm1 (B *v v)) *\<^sub>R (B *v v)" unfolding f_def by auto from normB have Bv0: "B *v v \ 0" unfolding norm1_0_iff[symmetric] by linarith have norm: "norm1 (f v) = 1" unfolding fv using normB Bv0 by simp define c where "c = (1 / norm1 (B *v v))" have c: "c > 0" unfolding c_def using normB by auto { fix i have 1: "f v $ i \ 0" unfolding fv c_def[symmetric] using c ge by (auto simp: matrix_vector_mult_def sum_distrib_left B_norm intro!: sum_nonneg) have id1: "\ i. (B *v f v) $ i = c * ((B *v (B *v v)) $ i)" unfolding f_def c_def matrix_vect_scaleR by simp have id3: "\ i. sr * f v $ i = c * ((B *v (sr *\<^sub>R v)) $ i)" unfolding f_def c_def[symmetric] matrix_vect_scaleR by auto have 2: "sr * f v $ i \ (B *v f v) $ i" unfolding id1 id3 - unfolding real_mult_le_cancel_iff2[OF \c > 0\] + unfolding mult_le_cancel_iff2[OF \c > 0\] by (rule mult_B_mono, insert ge(2), auto) note 1 2 } with norm have "f v \ S" unfolding S_def by auto } thus ?thesis by blast qed private lemma cont_f: "continuous_on S f" unfolding f_def[abs_def] continuous_on using normB_S unfolding norm1_def by (auto intro!: tendsto_eq_intros) qualified lemma perron_frobenius_positive_ev: "\ v. eigen_vector A v (r sr) \ real_non_neg_vec v" proof - from brouwer[OF compactS convexS non_emptyS cont_f image_f] obtain v where v: "v \ S" and fv: "f v = v" by auto define ev where "ev = norm1 (B *v v)" from normB_S[OF v] have "ev \ 0" unfolding ev_def by auto with norm1_ge_0[of "B *v v", folded ev_def] have norm: "ev > 0" by auto from arg_cong[OF fv[unfolded f_def], of "\ (w :: real ^ 'n). ev *\<^sub>R w"] norm have ev: "B *v v = ev *s v" unfolding ev_def[symmetric] scalar_mult_eq_scaleR by simp with v[unfolded S_def] have ge: "\ i. sr * v $ i \ ev * v $ i" by auto have "A *v rv v = rv (B *v v)" unfolding rv_mult .. also have "\ = ev *s rv v" unfolding ev vec_eq_iff by (simp add: scaleR_conv_of_real scaleR_vec_def) finally have ev: "A *v rv v = ev *s rv v" . from v have v0: "v \ 0" unfolding S_def by auto hence "rv v \ 0" unfolding rv_0 . with ev have ev: "eigen_vector A (rv v) ev" unfolding eigen_vector_def by auto hence "eigen_value A ev" unfolding eigen_value_def by auto from spectral_radius_max[OF this] have le: "norm (r ev) \ sr" . from v0 obtain i where "v $ i \ 0" unfolding vec_eq_iff by auto from v have "v $ i \ 0" unfolding S_def by auto with \v $ i \ 0\ have "v $ i > 0" by auto with ge[of i] have ge: "sr \ ev" by auto with le have sr: "r sr = ev" by auto from v have *: "real_non_neg_vec (rv v)" unfolding S_def real_non_neg_vec_def vec_elements_h_def by auto show ?thesis unfolding sr by (rule exI[of _ "rv v"], insert * ev norm, auto) qed end qualified lemma perron_frobenius_both: "\ v. eigen_vector A v (r sr) \ real_non_neg_vec v" proof (cases "\ v \ S. A *v rv v \ 0") case True show ?thesis by (rule Perron_Frobenius.perron_frobenius_positive_ev[OF rnnA], insert True, auto) next case False then obtain v where v: "v \ S" and A0: "A *v rv v = 0" by auto hence id: "A *v rv v = 0 *s rv v" and v0: "v \ 0" unfolding S_def by auto from v0 have "rv v \ 0" unfolding rv_0 . with id have ev: "eigen_vector A (rv v) 0" unfolding eigen_vector_def by auto hence "eigen_value A 0" unfolding eigen_value_def .. from spectral_radius_max[OF this] have 0: "0 \ sr" by auto from v[unfolded S_def] have ge: "\ i. sr * v $ i \ (B *v v) $ i" by auto from v[unfolded S_def] have rnn: "real_non_neg_vec (rv v)" unfolding real_non_neg_vec_def vec_elements_h_def by auto from v0 obtain i where "v $ i \ 0" unfolding vec_eq_iff by auto from v have "v $ i \ 0" unfolding S_def by auto with \v $ i \ 0\ have vi: "v $ i > 0" by auto from rv_mult[of v, unfolded A0] have "rv (B *v v) = 0" by simp hence "B *v v = 0" unfolding rv_0 . from ge[of i, unfolded this] vi have ge: "sr \ 0" by (simp add: mult_le_0_iff) with \0 \ sr\ have "sr = 0" by auto show ?thesis unfolding \sr = 0\ using rnn ev by auto qed end text \Perron Frobenius: The largest complex eigenvalue of a real-valued non-negative matrix is a real one, and it has a real-valued non-negative eigenvector.\ lemma perron_frobenius: assumes "real_non_neg_mat A" shows "\v. eigen_vector A v (of_real (spectral_radius A)) \ real_non_neg_vec v" by (rule Perron_Frobenius.perron_frobenius_both[OF assms]) text \And a version which ignores the eigenvector.\ lemma perron_frobenius_eigen_value: assumes "real_non_neg_mat A" shows "eigen_value A (of_real (spectral_radius A))" using perron_frobenius[OF assms] unfolding eigen_value_def by blast end diff --git a/thys/Poincare_Bendixson/Invariance.thy b/thys/Poincare_Bendixson/Invariance.thy --- a/thys/Poincare_Bendixson/Invariance.thy +++ b/thys/Poincare_Bendixson/Invariance.thy @@ -1,299 +1,299 @@ section \Invariance\ theory Invariance imports ODE_Misc begin context auto_ll_on_open begin definition "invariant M \ (\x\M. trapped x M)" definition "positively_invariant M \ (\x\M. trapped_forward x M)" definition "negatively_invariant M \ (\x\M. trapped_backward x M)" lemma positively_invariant_iff: "positively_invariant M \ (\x\M. flow0 x ` (existence_ivl0 x \ {0..})) \ M" unfolding positively_invariant_def trapped_forward_def by auto lemma negatively_invariant_iff: "negatively_invariant M \ (\x\M. flow0 x ` (existence_ivl0 x \ {..0})) \ M" unfolding negatively_invariant_def trapped_backward_def by auto lemma invariant_iff_pos_and_neg_invariant: "invariant M \ positively_invariant M \ negatively_invariant M" unfolding invariant_def trapped_def positively_invariant_def negatively_invariant_def by blast lemma invariant_iff: "invariant M \ (\x\M. flow0 x ` (existence_ivl0 x)) \ M" unfolding invariant_iff_pos_and_neg_invariant positively_invariant_iff negatively_invariant_iff by (metis (mono_tags) SUP_le_iff invariant_def invariant_iff_pos_and_neg_invariant negatively_invariant_iff positively_invariant_iff trapped_iff_on_existence_ivl0) lemma positively_invariant_restrict_dom: "positively_invariant M = positively_invariant (M \ X)" unfolding positively_invariant_def trapped_forward_def by (auto intro!: flow_in_domain dest: mem_existence_ivl_iv_defined) lemma negatively_invariant_restrict_dom: "negatively_invariant M = negatively_invariant (M \ X)" unfolding negatively_invariant_def trapped_backward_def by (auto intro!: flow_in_domain dest: mem_existence_ivl_iv_defined) lemma invariant_restrict_dom: "invariant M = invariant (M \ X)" using invariant_iff_pos_and_neg_invariant negatively_invariant_restrict_dom positively_invariant_restrict_dom by auto (* lemma positively_invariant_imp_subset: "M \ X" if "positively_invariant M" using positively_invariant_iff that by blast lemma negatively_invariant_imp_subset: "M \ X" if "negatively_invariant M" using negatively_invariant_iff that by blast lemma invariant_imp_subset: "M \ X" if "invariant M" using invariant_iff that by blast *) end context auto_ll_on_open begin lemma positively_invariant_eq_rev: "positively_invariant M = rev.negatively_invariant M" unfolding positively_invariant_def rev.negatively_invariant_def by (simp add: rev.trapped_backward_iff_rev_trapped_forward) lemma negatively_invariant_eq_rev: "negatively_invariant M = rev.positively_invariant M" unfolding negatively_invariant_def rev.positively_invariant_def by (simp add: trapped_backward_iff_rev_trapped_forward) lemma invariant_eq_rev: "invariant M = rev.invariant M" unfolding invariant_iff_pos_and_neg_invariant rev.invariant_iff_pos_and_neg_invariant positively_invariant_eq_rev negatively_invariant_eq_rev by auto lemma negatively_invariant_complI: "negatively_invariant (X-M)" if "positively_invariant M" unfolding negatively_invariant_def trapped_backward_def proof clarsimp fix x t assume x: "x \ X" "x \ M" "t \ existence_ivl0 x" "t \ 0" have a1:"flow0 x t \ X" using x using flow_in_domain by blast have a2:"flow0 x t \ M" proof (rule ccontr) assume "\ flow0 x t \ M" then have "trapped_forward (flow0 x t) M" using positively_invariant_def that by auto moreover have "flow0 (flow0 x t) (-t) = x" using \t \ existence_ivl0 x\ flows_reverse by auto moreover have "-t \ existence_ivl0 (flow0 x t) \ {0..}" using existence_ivl_reverse x(3) x(4) by auto ultimately have "x \ M" unfolding trapped_forward_def by (metis image_subset_iff) thus False using x(2) by auto qed show "flow0 x t \ X \ flow0 x t \ M" using a1 a2 by auto qed end context auto_ll_on_open begin lemma negatively_invariant_complD: "positively_invariant M" if "negatively_invariant (X-M)" proof - have "rev.positively_invariant (X-M)" using that by (simp add: negatively_invariant_eq_rev) then have "rev.negatively_invariant (X-(X-M))" by (simp add: rev.negatively_invariant_complI) then have "positively_invariant (X-(X-M))" using rev.negatively_invariant_eq_rev by auto thus ?thesis using Diff_Diff_Int by (metis inf_commute positively_invariant_restrict_dom) qed lemma pos_invariant_iff_compl_neg_invariant: "positively_invariant M \ negatively_invariant (X - M)" by (safe intro!: negatively_invariant_complI dest!: negatively_invariant_complD) lemma neg_invariant_iff_compl_pos_invariant: shows "negatively_invariant M \ positively_invariant (X - M)" by (simp add: auto_ll_on_open.pos_invariant_iff_compl_neg_invariant negatively_invariant_eq_rev positively_invariant_eq_rev rev.auto_ll_on_open_axioms) lemma invariant_iff_compl_invariant: shows "invariant M \ invariant (X - M)" using invariant_iff_pos_and_neg_invariant neg_invariant_iff_compl_pos_invariant pos_invariant_iff_compl_neg_invariant by blast lemma invariant_iff_pos_invariant_and_compl_pos_invariant: shows "invariant M \ positively_invariant M \ positively_invariant (X-M)" by (simp add: invariant_iff_pos_and_neg_invariant neg_invariant_iff_compl_pos_invariant) end subsection \Tools for proving invariance\ context auto_ll_on_open begin lemma positively_invariant_left_inter: assumes "positively_invariant C" assumes "\x \ C \ D. trapped_forward x D" shows "positively_invariant (C \ D)" using assms positively_invariant_def trapped_forward_def by auto lemma trapped_forward_le: fixes V :: "'a \ real" assumes "V x \ 0" assumes contg: "continuous_on (flow0 x ` (existence_ivl0 x \ {0..})) g" assumes "\x. (V has_derivative V' x) (at x)" assumes "\s. s \ existence_ivl0 x \ {0..} \ V' (flow0 x s) (f (flow0 x s)) \ g (flow0 x s) * V (flow0 x s)" shows "trapped_forward x {x. V x \ 0}" unfolding trapped_forward_def proof clarsimp fix t assume t: "t \ existence_ivl0 x" "0 \ t" then have ex:"{0..t} \ existence_ivl0 x" by (simp add: local.ivl_subset_existence_ivl) have contV: "continuous_on UNIV V" using assms(3) has_derivative_continuous_on by blast have 1: "continuous_on {0..t} (g \ flow0 x)" apply (rule continuous_on_compose) using continuous_on_subset ex local.flow_continuous_on apply blast by (meson Int_subset_iff atLeastAtMost_iff atLeast_iff contg continuous_on_subset ex image_mono subsetI) have 2: "(\s. s \ {0..t} \ (V \ flow0 x has_real_derivative (V' (flow0 x s) \ f \ flow0 x) s) (at s))" apply (auto simp add:o_def has_field_derivative_def) proof - fix s assume "0 \ s" "s \ t" then have "s \ existence_ivl0 x" using ex by auto from flow_has_derivative[OF this] have "(flow0 x has_derivative (\i. i *\<^sub>R f (flow0 x s))) (at s)" . from has_derivative_compose[OF this assms(3)] have "((\t. V (flow0 x t)) has_derivative (\t. V' (flow0 x s) (t *\<^sub>R f (flow0 x s)))) (at s)" . moreover have "linear (V' (flow0 x s))" using assms(3) has_derivative_linear by blast ultimately have "((\t. V (flow0 x t)) has_derivative (\t. t *\<^sub>R V' (flow0 x s) (f (flow0 x s)))) (at s)" unfolding linear_cmul[OF \linear (V' (flow0 x s))\] by blast thus "((\t. V (flow0 x t)) has_derivative (*) (V' (flow0 x s) (f (flow0 x s)))) (at s)" by (auto intro!: derivative_eq_intros simp add: mult_commute_abs) qed have 3: "(\s. s \ {0..t} \ (V' (flow0 x s) \ f \ flow0 x) s \ (g \ flow0 x) s *\<^sub>R (V \ flow0 x) s)" using ex by (auto intro!:assms(4)) from comparison_principle_le_linear[OF 1 2 _ 3] assms(1) have "\s \ {0..t}. (V \ flow0 x) s \ 0" using local.mem_existence_ivl_iv_defined(2) t(1) by auto thus " V (flow0 x t) \ 0" by (simp add: t(2)) qed lemma positively_invariant_le_domain: fixes V :: "'a \ real" assumes "positively_invariant D" assumes contg: "continuous_on D g" assumes "\x. (V has_derivative V' x) (at x)" (* TODO: domain can be added here too ? *) assumes "\s. s \ D \ V' s (f s) \ g s * V s" shows "positively_invariant (D \ {x. V x \ 0})" apply (auto intro!:positively_invariant_left_inter[OF assms(1)]) proof - fix x assume "x \ D" "V x \ 0" have "continuous_on (flow0 x ` (existence_ivl0 x \ {0..})) g" by (meson \x \ D\ assms(1) contg continuous_on_subset positively_invariant_def trapped_forward_def) from trapped_forward_le[OF \V x \ 0\ this assms(3)] show "trapped_forward x {x. V x \ 0}" using assms(4) using \x \ D\ assms(1) positively_invariant_def trapped_forward_def by auto qed lemma positively_invariant_barrier_domain: fixes V :: "'a \ real" assumes "positively_invariant D" assumes "\x. (V has_derivative V' x) (at x)" assumes "continuous_on D (\x. V' x (f x))" assumes "\s. s \ D \ V s = 0 \ V' s (f s) < 0" shows "positively_invariant (D \ {x. V x \ 0})" apply (auto intro!:positively_invariant_left_inter[OF assms(1)]) proof - fix x assume "x \ D" "V x \ 0" have contV: "continuous_on UNIV V" using assms(2) has_derivative_continuous_on by blast then have *: "continuous_on (flow0 x ` (existence_ivl0 x \ {0..})) V" using continuous_on_subset by blast have sub: "flow0 x ` (existence_ivl0 x \ {0..}) \ D" using \x \ D\ assms(1) positively_invariant_def trapped_forward_def by auto then have contV': "continuous_on (flow0 x ` (existence_ivl0 x \ {0..})) (\x. V' x (f x))" by (metis assms(3) continuous_on_subset) have nz: "\i t. t \ existence_ivl0 x \ 0 \ t \ max (-V' (flow0 x t) (f (flow0 x t))) ((V (flow0 x t))\<^sup>2) > 0" proof - fix i t assume "t \ existence_ivl0 x" "0 \ t" then have "flow0 x t \ D" using \x \ D\ assms(1) positively_invariant_def trapped_forward_def by auto then have "V (flow0 x t) = 0 \ - V' (flow0 x t) (f (flow0 x t)) > 0" using assms(4) by simp then have "(V (flow0 x t))^2 > 0 \ - V' (flow0 x t) (f (flow0 x t)) > 0" by simp thus "max (-V' (flow0 x t) (f (flow0 x t))) ((V (flow0 x t))\<^sup>2) > 0" unfolding less_max_iff_disj by auto qed have *: "continuous_on (flow0 x ` (existence_ivl0 x \ {0..})) (\x. V' x (f x) * V x / max (- V' x (f x)) ((V x)^2))" apply (auto intro!:continuous_intros continuous_on_max simp add: * contV') using nz by fastforce have "(\t. t \ existence_ivl0 x \ {0..} \ V' (flow0 x t) (f (flow0 x t)) \ (V' (flow0 x t) (f (flow0 x t)) * V (flow0 x t) / max (- V' (flow0 x t) (f (flow0 x t))) ((V (flow0 x t))\<^sup>2)) * V (flow0 x t))" proof clarsimp fix t assume "t \ existence_ivl0 x" "0 \ t" then have p: "max (-V' (flow0 x t) (f (flow0 x t))) ((V (flow0 x t))\<^sup>2) > 0" using nz by auto have " V' (flow0 x t) (f (flow0 x t)) * max (- V' (flow0 x t) (f (flow0 x t))) ((V (flow0 x t))\<^sup>2) \ V' (flow0 x t) (f (flow0 x t)) * (V (flow0 x t))\<^sup>2" - by (smt mult_minus_left mult_minus_right power2_eq_square real_mult_le_cancel_iff2) + by (smt mult_minus_left mult_minus_right power2_eq_square mult_le_cancel_iff2) then have "V' (flow0 x t) (f (flow0 x t)) \ V' (flow0 x t) (f (flow0 x t)) * (V (flow0 x t))\<^sup>2 / max (- V' (flow0 x t) (f (flow0 x t))) ((V (flow0 x t))\<^sup>2)" using p pos_le_divide_eq by blast thus " V' (flow0 x t) (f (flow0 x t)) \ V' (flow0 x t) (f (flow0 x t)) * (V (flow0 x t)) * V (flow0 x t) / max (- V' (flow0 x t) (f (flow0 x t))) ((V (flow0 x t))\<^sup>2)" by (simp add: power2_eq_square) qed from trapped_forward_le[OF \V x \ 0\ * assms(2) this] show "trapped_forward x {x. V x \ 0}" by auto qed lemma positively_invariant_UNIV: shows "positively_invariant UNIV" using positively_invariant_iff by blast lemma positively_invariant_conj: assumes "positively_invariant C" assumes "positively_invariant D" shows "positively_invariant (C \ D)" using assms positively_invariant_def using positively_invariant_left_inter by auto lemma positively_invariant_le: fixes V :: "'a \ real" assumes contg: "continuous_on UNIV g" assumes "\x. (V has_derivative V' x) (at x)" assumes "\s. V' s (f s) \ g s * V s" shows "positively_invariant {x. V x \ 0}" proof - from positively_invariant_le_domain[OF positively_invariant_UNIV assms] have "positively_invariant (UNIV \ {x. V x \ 0})" . thus ?thesis by auto qed lemma positively_invariant_barrier: fixes V :: "'a \ real" assumes "\x. (V has_derivative V' x) (at x)" assumes "continuous_on UNIV (\x. V' x (f x))" assumes "\s. V s = 0 \ V' s (f s) < 0" shows "positively_invariant {x. V x \ 0}" proof - from positively_invariant_barrier_domain[OF positively_invariant_UNIV assms] have "positively_invariant (UNIV \ {x. V x \ 0})" . thus ?thesis by auto qed end end \ No newline at end of file diff --git a/thys/QHLProver/Quantum_Program.thy b/thys/QHLProver/Quantum_Program.thy --- a/thys/QHLProver/Quantum_Program.thy +++ b/thys/QHLProver/Quantum_Program.thy @@ -1,1110 +1,1109 @@ section \Quantum programs\ theory Quantum_Program imports Matrix_Limit begin subsection \Syntax\ text \Datatype for quantum programs\ datatype com = SKIP | Utrans "complex mat" | Seq com com ("_;;/ _" [60, 61] 60) | Measure nat "nat \ complex mat" "com list" | While "nat \ complex mat" com text \A state corresponds to the density operator\ type_synonym state = "complex mat" text \List of dimensions of quantum states\ locale state_sig = fixes dims :: "nat list" begin definition d :: nat where "d = prod_list dims" text \Wellformedness of commands\ fun well_com :: "com \ bool" where "well_com SKIP = True" | "well_com (Utrans U) = (U \ carrier_mat d d \ unitary U)" | "well_com (Seq S1 S2) = (well_com S1 \ well_com S2)" | "well_com (Measure n M S) = (measurement d n M \ length S = n \ list_all well_com S)" | "well_com (While M S) = (measurement d 2 M \ well_com S)" subsection \Denotational semantics\ text \Denotation of going through the while loop n times\ fun denote_while_n_iter :: "complex mat \ complex mat \ (state \ state) \ nat \ state \ state" where "denote_while_n_iter M0 M1 DS 0 \ = \" | "denote_while_n_iter M0 M1 DS (Suc n) \ = denote_while_n_iter M0 M1 DS n (DS (M1 * \ * adjoint M1))" fun denote_while_n :: "complex mat \ complex mat \ (state \ state) \ nat \ state \ state" where "denote_while_n M0 M1 DS n \ = M0 * denote_while_n_iter M0 M1 DS n \ * adjoint M0" fun denote_while_n_comp :: "complex mat \ complex mat \ (state \ state) \ nat \ state \ state" where "denote_while_n_comp M0 M1 DS n \ = M1 * denote_while_n_iter M0 M1 DS n \ * adjoint M1" lemma denote_while_n_iter_assoc: "denote_while_n_iter M0 M1 DS (Suc n) \ = DS (M1 * (denote_while_n_iter M0 M1 DS n \) * adjoint M1)" proof (induct n arbitrary: \) case 0 show ?case by auto next case (Suc n) show ?case apply (subst denote_while_n_iter.simps) apply (subst Suc, auto) done qed lemma denote_while_n_iter_dim: "\ \ carrier_mat m m \ partial_density_operator \ \ M1 \ carrier_mat m m \ adjoint M1 * M1 \\<^sub>L 1\<^sub>m m \ (\\. \ \ carrier_mat m m \ partial_density_operator \ \ DS \ \ carrier_mat m m \ partial_density_operator (DS \)) \ denote_while_n_iter M0 M1 DS n \ \ carrier_mat m m \ partial_density_operator (denote_while_n_iter M0 M1 DS n \)" proof (induct n arbitrary: \) case 0 then show ?case unfolding denote_while_n_iter.simps by auto next case (Suc n) then have dr: "\ \ carrier_mat m m" and dM1: "M1 \ carrier_mat m m" by auto have dMr: "M1 * \ * adjoint M1 \ carrier_mat m m" using dr dM1 by fastforce have pdoMr: "partial_density_operator (M1 * \ * adjoint M1)" using pdo_close_under_measurement Suc by auto from Suc dMr pdoMr have d: "DS (M1 * \ * adjoint M1) \ carrier_mat m m" and "partial_density_operator (DS (M1 * \ * adjoint M1))" by auto then show ?case unfolding denote_while_n_iter.simps using Suc by auto qed lemma pdo_denote_while_n_iter: "\ \ carrier_mat m m \ partial_density_operator \ \ M1 \ carrier_mat m m \ adjoint M1 * M1 \\<^sub>L 1\<^sub>m m \ (\\. \ \ carrier_mat m m \ partial_density_operator \ \ partial_density_operator (DS \)) \ (\\. \ \ carrier_mat m m \ partial_density_operator \ \ DS \ \ carrier_mat m m) \ partial_density_operator (denote_while_n_iter M0 M1 DS n \)" proof (induct n arbitrary: \) case 0 then show ?case unfolding denote_while_n_iter.simps by auto next case (Suc n) have "partial_density_operator (M1 * \ * adjoint M1)" using Suc pdo_close_under_measurement by auto moreover have "M1 * \ * adjoint M1 \ carrier_mat m m" using Suc by auto ultimately have p: "partial_density_operator (DS (M1 * \ * adjoint M1))" and d: "DS (M1 * \ * adjoint M1) \ carrier_mat m m "using Suc by auto show ?case unfolding denote_while_n_iter.simps using Suc(1)[OF d p Suc(4) Suc(5)] Suc by auto qed text \Denotation of while is simply the infinite sum of denote\_while\_n\ definition denote_while :: "complex mat \ complex mat \ (state \ state) \ state \ state" where "denote_while M0 M1 DS \ = matrix_inf_sum d (\n. denote_while_n M0 M1 DS n \)" lemma denote_while_n_dim: assumes "\ \ carrier_mat d d" "M0 \ carrier_mat d d" "M1 \ carrier_mat d d" "partial_density_operator \" "\\'. \' \ carrier_mat d d \ partial_density_operator \' \ positive (DS \') \ trace (DS \') \ trace \' \ DS \' \ carrier_mat d d" shows "denote_while_n M0 M1 DS n \ \ carrier_mat d d" proof (induction n arbitrary: \) case 0 then show ?case proof - have "M0 * \ * adjoint M0 \ carrier_mat d d" using assms assoc_mult_mat by auto then show ?thesis by auto qed next case (Suc n) then show ?case proof - have "denote_while_n M0 M1 DS n (DS (M1 * \ * adjoint M1)) \ carrier_mat d d" using Suc assms by auto then show ?thesis by auto qed qed lemma denote_while_n_sum_dim: assumes "\ \ carrier_mat d d" "M0 \ carrier_mat d d" "M1 \ carrier_mat d d" "partial_density_operator \" "\\'. \' \ carrier_mat d d \ partial_density_operator \' \ positive (DS \') \ trace (DS \') \ trace \' \ DS \' \ carrier_mat d d" shows "matrix_sum d (\n. denote_while_n M0 M1 DS n \) n \ carrier_mat d d" proof (induct n) case 0 then show ?case by auto next case (Suc n) then show ?case proof - have " denote_while_n M0 M1 DS n \ \ carrier_mat d d" using denote_while_n_dim assms by auto then have "matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n) \ carrier_mat d d" using Suc by auto then show ?thesis by auto qed qed lemma trace_decrease_mul_adj: assumes pdo: "partial_density_operator \" and dimr: "\ \ carrier_mat d d" and dimx: "x \ carrier_mat d d" and un: "adjoint x * x \\<^sub>L 1\<^sub>m d " shows "trace (x * \ * adjoint x) \ trace \" proof - have ad: "adjoint x * x \ carrier_mat d d" using adjoint_dim index_mult_mat dimx by auto have "trace (x * \ * adjoint x) = trace ((adjoint x * x) * \)" using dimx dimr by (mat_assoc d) also have "\ \ trace (1\<^sub>m d * \)" using lowner_le_trace un ad dimr pdo by auto also have "\ = trace \" using dimr by auto ultimately show ?thesis by auto qed lemma denote_while_n_positive: assumes dim0: "M0 \ carrier_mat d d" and dim1: "M1 \ carrier_mat d d" and un: "adjoint M1 * M1 \\<^sub>L 1\<^sub>m d" and DS: "\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive (DS \) \ trace (DS \) \ trace \ \ DS \ \ carrier_mat d d" shows "partial_density_operator \ \ \ \ carrier_mat d d \ positive (denote_while_n M0 M1 DS n \)" proof (induction n arbitrary: \) case 0 then show ?case using positive_close_under_left_right_mult_adjoint dim0 unfolding partial_density_operator_def by auto next case (Suc n) then show ?case proof - have pdoM: "partial_density_operator (M1 * \ * adjoint M1)" using pdo_close_under_measurement Suc dim1 un by auto moreover have cM: "M1 * \ * adjoint M1 \ carrier_mat d d" using Suc dim1 adjoint_dim index_mult_mat by auto ultimately have DSM1: "positive (DS (M1 * \ * adjoint M1)) \ trace (DS (M1 * \ * adjoint M1)) \ trace (M1 * \ * adjoint M1) \ DS (M1 * \ * adjoint M1) \ carrier_mat d d" using DS by auto moreover have "trace (M1 * \ * adjoint M1) \ trace \" using trace_decrease_mul_adj Suc dim1 un by auto ultimately have "partial_density_operator (DS (M1 * \ * adjoint M1))" using Suc unfolding partial_density_operator_def by auto then have "positive (M0 * denote_while_n_iter M0 M1 DS n (DS (M1 * \ * adjoint M1)) * adjoint M0)" using Suc DSM1 by auto then have "positive (denote_while_n M0 M1 DS (Suc n) \)" by auto then show ?thesis by auto qed qed lemma denote_while_n_sum_positive: assumes dim0: "M0 \ carrier_mat d d" and dim1: "M1 \ carrier_mat d d" and un: "adjoint M1 * M1 \\<^sub>L 1\<^sub>m d" and DS: "\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive (DS \) \ trace (DS \) \ trace \ \ DS \ \ carrier_mat d d" and pdo: "partial_density_operator \" and r: " \ \ carrier_mat d d" shows "positive (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n)" proof - have "\k. k < n \ positive (denote_while_n M0 M1 DS k \)" using assms denote_while_n_positive by auto moreover have "\k. k < n \ denote_while_n M0 M1 DS k \ \ carrier_mat d d" using denote_while_n_dim assms by auto ultimately show ?thesis using matrix_sum_positive by auto qed lemma trace_measure2_id: assumes dM0: "M0 \ carrier_mat n n" and dM1: "M1 \ carrier_mat n n" and id: "adjoint M0 * M0 + adjoint M1 * M1 = 1\<^sub>m n" and dA: "A \ carrier_mat n n" shows "trace (M0 * A * adjoint M0) + trace (M1 * A * adjoint M1) = trace A" proof - have "trace (M0 * A * adjoint M0) + trace (M1 * A * adjoint M1) = trace ((adjoint M0 * M0 + adjoint M1 * M1) * A)" using assms by (mat_assoc n) also have "\ = trace (1\<^sub>m n * A)" using id by auto also have "\ = trace A" using dA by auto finally show ?thesis. qed lemma measurement_lowner_le_one1: assumes dim0: "M0 \ carrier_mat d d" and dim1: "M1 \ carrier_mat d d" and id: "adjoint M0 * M0 + adjoint M1 * M1 = 1\<^sub>m d" shows "adjoint M1 * M1 \\<^sub>L 1\<^sub>m d" proof - have paM0: "positive (adjoint M0 * M0)" apply (subgoal_tac "adjoint M0 * adjoint (adjoint M0) = adjoint M0 * M0") subgoal using positive_if_decomp[of "adjoint M0 * M0"] dim0 adjoint_dim[OF dim0] by fastforce using adjoint_adjoint[of M0] by auto have le1: "adjoint M0 * M0 + adjoint M1 * M1 \\<^sub>L 1\<^sub>m d" using id lowner_le_refl[of "1\<^sub>m d"] by fastforce show "adjoint M1 * M1 \\<^sub>L 1\<^sub>m d" using add_positive_le_reduce2[OF _ _ _ paM0 le1] dim0 dim1 by fastforce qed lemma denote_while_n_sum_trace: assumes dim0: "M0 \ carrier_mat d d" and dim1: "M1 \ carrier_mat d d" and id: "adjoint M0 * M0 + adjoint M1 * M1 = 1\<^sub>m d" and DS: "\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive (DS \) \ trace (DS \) \ trace \ \ DS \ \ carrier_mat d d" and r: " \ \ carrier_mat d d" and pdor: "partial_density_operator \" shows "trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n) \ trace \" proof - have un: "adjoint M1 * M1 \\<^sub>L 1\<^sub>m d" using measurement_lowner_le_one1 using dim0 dim1 id by auto have DS': "(DS \ \ carrier_mat d d) \ partial_density_operator (DS \)" if "\ \ carrier_mat d d" and "partial_density_operator \" for \ proof - have res: "positive (DS \) \ trace (DS \) \ trace \ \ DS \ \ carrier_mat d d" using DS that by auto moreover have "trace \ \ 1" using that partial_density_operator_def by auto ultimately have "trace (DS \) \ 1" by auto with res show ?thesis unfolding partial_density_operator_def by auto qed have dWk: "denote_while_n_iter M0 M1 DS k \ \ carrier_mat d d" for k using denote_while_n_iter_dim[OF r pdor dim1 un] DS' dim0 dim1 by auto have pdoWk: "partial_density_operator (denote_while_n_iter M0 M1 DS k \)" for k using pdo_denote_while_n_iter[OF r pdor dim1 un] DS' dim0 dim1 by auto have dW0k: "denote_while_n M0 M1 DS k \ \ carrier_mat d d" for k using denote_while_n_dim r dim0 dim1 pdor by auto then have dsW0k: "matrix_sum d (\n. denote_while_n M0 M1 DS n \) k \ carrier_mat d d" for k using matrix_sum_dim[of k "\k. denote_while_n M0 M1 DS k \"] by auto have "(denote_while_n_comp M0 M1 DS n \) \ carrier_mat d d" for n unfolding denote_while_n_comp.simps using dim1 dWk by auto moreover have pdoW1k: "partial_density_operator (denote_while_n_comp M0 M1 DS n \)" for n unfolding denote_while_n_comp.simps using pdo_close_under_measurement[OF dim1 dWk pdoWk un] by auto ultimately have "trace (DS (denote_while_n_comp M0 M1 DS n \)) \ trace (denote_while_n_comp M0 M1 DS n \)" for n using DS by auto moreover have "trace (denote_while_n_iter M0 M1 DS (Suc n) \) = trace (DS (denote_while_n_comp M0 M1 DS n \))" for n using denote_while_n_iter_assoc[folded denote_while_n_comp.simps] by auto ultimately have leq3: "trace (denote_while_n_iter M0 M1 DS (Suc n) \) \ trace (denote_while_n_comp M0 M1 DS n \)" for n by auto have mainleq: "trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n)) + trace (denote_while_n_comp M0 M1 DS n \) \ trace \" for n proof (induct n) case 0 then show ?case unfolding matrix_sum.simps denote_while_n.simps denote_while_n_comp.simps denote_while_n_iter.simps apply (subgoal_tac "M0 * \ * adjoint M0 + 0\<^sub>m d d = M0 * \ * adjoint M0") using trace_measure2_id[OF dim0 dim1 id r] dim0 apply simp using dim0 by auto next case (Suc n) have eq1: "trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc (Suc n))) = trace (denote_while_n M0 M1 DS (Suc n) \) + trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n))" unfolding matrix_sum.simps using trace_add_linear dW0k[of "Suc n"] dsW0k[of "Suc n"] by auto have eq2: "trace (denote_while_n M0 M1 DS (Suc n) \) + trace (denote_while_n_comp M0 M1 DS (Suc n) \) = trace (denote_while_n_iter M0 M1 DS (Suc n) \)" unfolding denote_while_n.simps denote_while_n_comp.simps using trace_measure2_id[OF dim0 dim1 id dWk[of "Suc n"]] by auto have "trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc (Suc n))) + trace (denote_while_n_comp M0 M1 DS (Suc n) \) = trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n)) + trace (denote_while_n M0 M1 DS (Suc n) \) + trace (denote_while_n_comp M0 M1 DS (Suc n) \)" using eq1 by auto also have "\ = trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n)) + trace (denote_while_n_iter M0 M1 DS (Suc n) \)" using eq2 by auto also have "\ \ trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n)) + trace (denote_while_n_comp M0 M1 DS n \)" using leq3 by auto also have "\ \ trace \" using Suc by auto finally show ?case. qed have reduce_le_complex: "(b::complex) \ 0 \ a + b \ c \ a \ c" for a b c by simp have "positive (denote_while_n_comp M0 M1 DS n \)" for n using pdoW1k unfolding partial_density_operator_def by auto then have "trace (denote_while_n_comp M0 M1 DS n \) \ 0" for n using positive_trace using \\n. denote_while_n_comp M0 M1 DS n \ \ carrier_mat d d\ by blast then have "trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n)) \ trace \" for n using mainleq reduce_le_complex[of "trace (denote_while_n_comp M0 M1 DS n \)"] by auto moreover have "trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) 0) \ trace \" unfolding matrix_sum.simps using trace_zero positive_trace pdor unfolding partial_density_operator_def using r by auto ultimately show "trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n) \ trace \" for n apply (induct n) by auto qed lemma denote_while_n_sum_partial_density: assumes dim0: "M0 \ carrier_mat d d" and dim1: "M1 \ carrier_mat d d" and id: "adjoint M0 * M0 + adjoint M1 * M1 = 1\<^sub>m d" and DS: "\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive (DS \) \ trace (DS \) \ trace \ \ DS \ \ carrier_mat d d" and pdo: "partial_density_operator \" and r: " \ \ carrier_mat d d" shows "(partial_density_operator (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n))" proof - have "trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n) \ trace \" using denote_while_n_sum_trace assms by auto then have "trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n) \ 1" using pdo unfolding partial_density_operator_def by auto moreover have "positive (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n)" using assms DS denote_while_n_sum_positive measurement_lowner_le_one1[OF dim0 dim1 id] by auto ultimately show ?thesis unfolding partial_density_operator_def by auto qed lemma denote_while_n_sum_lowner_le: assumes dim0: "M0 \ carrier_mat d d" and dim1: "M1 \ carrier_mat d d" and id: "adjoint M0 * M0 + adjoint M1 * M1 = 1\<^sub>m d" and DS: "\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive (DS \) \ trace (DS \) \ trace \ \ DS \ \ carrier_mat d d" and pdo: "partial_density_operator \" and dimr: " \ \ carrier_mat d d" shows "(matrix_sum d (\n. denote_while_n M0 M1 DS n \) n \\<^sub>L matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n))" proof auto have whilenc: "denote_while_n M0 M1 DS n \ \ carrier_mat d d" using denote_while_n_dim assms by auto have sumc: "matrix_sum d (\n. denote_while_n M0 M1 DS n \) n \ carrier_mat d d" using denote_while_n_sum_dim assms by auto have "denote_while_n M0 M1 DS n \ + matrix_sum d (\n. denote_while_n M0 M1 DS n \) n - matrix_sum d (\n. denote_while_n M0 M1 DS n \) n = denote_while_n M0 M1 DS n \ + matrix_sum d (\n. denote_while_n M0 M1 DS n \) n + (- matrix_sum d (\n. denote_while_n M0 M1 DS n \) n)" using minus_add_uminus_mat[of "matrix_sum d (\n. denote_while_n M0 M1 DS n \) n" d d "matrix_sum d (\n. denote_while_n M0 M1 DS n \) n"] by auto also have "\ = denote_while_n M0 M1 DS n \ + 0\<^sub>m d d" by (smt assoc_add_mat minus_add_uminus_mat minus_r_inv_mat sumc uminus_carrier_mat whilenc) also have "\ = denote_while_n M0 M1 DS n \" using whilenc by auto finally have simp: "denote_while_n M0 M1 DS n \ + matrix_sum d (\n. denote_while_n M0 M1 DS n \) n - matrix_sum d (\n. denote_while_n M0 M1 DS n \) n = denote_while_n M0 M1 DS n \ " by auto have "positive (denote_while_n M0 M1 DS n \)" using denote_while_n_positive assms measurement_lowner_le_one1[OF dim0 dim1 id] by auto then have "matrix_sum d (\n. denote_while_n M0 M1 DS n \) n \\<^sub>L (denote_while_n M0 M1 DS n \ + matrix_sum d (\n. denote_while_n M0 M1 DS n \) n)" unfolding lowner_le_def using simp by auto then show "matrix_sum d (\n. M0 * denote_while_n_iter M0 M1 DS n \ * adjoint M0) n \\<^sub>L (M0 * denote_while_n_iter M0 M1 DS n \ * adjoint M0 + matrix_sum d (\n. M0 * denote_while_n_iter M0 M1 DS n \ * adjoint M0) n)" by auto qed lemma lowner_is_lub_matrix_sum: assumes dim0: "M0 \ carrier_mat d d" and dim1: "M1 \ carrier_mat d d" and id: "adjoint M0 * M0 + adjoint M1 * M1 = 1\<^sub>m d" and DS: "\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive (DS \) \ trace (DS \) \ trace \ \ DS \ \ carrier_mat d d" and pdo: "partial_density_operator \" and dimr: " \ \ carrier_mat d d" shows "matrix_seq.lowner_is_lub (matrix_sum d (\n. denote_while_n M0 M1 DS n \)) (matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n M0 M1 DS n \)))" proof- have sumdd: "\n. matrix_sum d (\n. denote_while_n M0 M1 DS n \) n \ carrier_mat d d" using denote_while_n_sum_dim assms by auto have sumtr: "\n. trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n) \ trace \" using denote_while_n_sum_trace assms by auto have sumpar: "\n. partial_density_operator (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n)" using denote_while_n_sum_partial_density assms by auto have sumle:"\n. matrix_sum d (\n. denote_while_n M0 M1 DS n \) n \\<^sub>L matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n)" using denote_while_n_sum_lowner_le assms by auto have seqd: "matrix_seq d (matrix_sum d (\n. denote_while_n M0 M1 DS n \))" using matrix_seq_def sumdd sumpar sumle by auto then show ?thesis using matrix_seq.lowner_lub_prop[of d "(matrix_sum d (\n. denote_while_n M0 M1 DS n \))"] by auto qed lemma denote_while_dim_positive: assumes dim0: "M0 \ carrier_mat d d" and dim1: "M1 \ carrier_mat d d" and id: "adjoint M0 * M0 + adjoint M1 * M1 = 1\<^sub>m d" and DS: "\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive (DS \) \ trace (DS \) \ trace \ \ DS \ \ carrier_mat d d" and pdo: "partial_density_operator \" and dimr: " \ \ carrier_mat d d" shows "denote_while M0 M1 DS \ \ carrier_mat d d \ positive (denote_while M0 M1 DS \) \ trace (denote_while M0 M1 DS \) \ trace \" proof - have sumdd: "\n. matrix_sum d (\n. denote_while_n M0 M1 DS n \) n \ carrier_mat d d" using denote_while_n_sum_dim assms by auto have sumtr: "\n. trace (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n) \ trace \" using denote_while_n_sum_trace assms by auto have sumpar: "\n. partial_density_operator (matrix_sum d (\n. denote_while_n M0 M1 DS n \) n)" using denote_while_n_sum_partial_density assms by auto have sumle:"\n. matrix_sum d (\n. denote_while_n M0 M1 DS n \) n \\<^sub>L matrix_sum d (\n. denote_while_n M0 M1 DS n \) (Suc n)" using denote_while_n_sum_lowner_le assms by auto have seqd: "matrix_seq d (matrix_sum d (\n. denote_while_n M0 M1 DS n \))" using matrix_seq_def sumdd sumpar sumle by auto have "matrix_seq.lowner_is_lub (matrix_sum d (\n. denote_while_n M0 M1 DS n \)) (matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n M0 M1 DS n \)))" using lowner_is_lub_matrix_sum assms by auto then have "matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n M0 M1 DS n \)) \ carrier_mat d d \ positive (matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n M0 M1 DS n \))) \ trace (matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n M0 M1 DS n \))) \ trace \" using matrix_seq.lowner_is_lub_dim seqd matrix_seq.lowner_lub_is_positive matrix_seq.lowner_lub_trace sumtr by auto then show ?thesis unfolding denote_while_def matrix_inf_sum_def by auto qed definition denote_measure :: "nat \ (nat \ complex mat) \ ((state \ state) list) \ state \ state" where "denote_measure n M DS \ = matrix_sum d (\k. (DS!k) ((M k) * \ * adjoint (M k))) n" lemma denote_measure_dim: assumes "\ \ carrier_mat d d" "measurement d n M" "\\' k. \' \ carrier_mat d d \ k < n \ (DS!k) \' \ carrier_mat d d" shows "denote_measure n M DS \ \ carrier_mat d d" proof - have dMk: "k < n \ M k \ carrier_mat d d" for k using assms measurement_def by auto have d: "k < n \ (M k) * \ * adjoint (M k) \ carrier_mat d d" for k using mult_carrier_mat[OF mult_carrier_mat[OF dMk assms(1)] adjoint_dim[OF dMk]] by auto then have "k < n \ (DS!k) ((M k) * \ * adjoint (M k)) \ carrier_mat d d" for k using assms(3) by auto then show ?thesis unfolding denote_measure_def using matrix_sum_dim[of n "\k. (DS!k) ((M k) * \ * adjoint (M k))"] by auto qed lemma measure_well_com: assumes "well_com (Measure n M S)" shows "\k. k < n \ well_com (S ! k)" using assms unfolding well_com.simps using list_all_length by auto text \Semantics of commands\ fun denote :: "com \ state \ state" where "denote SKIP \ = \" | "denote (Utrans U) \ = U * \ * adjoint U" | "denote (Seq S1 S2) \ = denote S2 (denote S1 \)" | "denote (Measure n M S) \ = denote_measure n M (map denote S) \" | "denote (While M S) \ = denote_while (M 0) (M 1) (denote S) \" lemma denote_measure_expand: assumes m: "m \ n" and wc: "well_com (Measure n M S)" shows "denote (Measure m M S) \ = matrix_sum d (\k. denote (S!k) ((M k) * \ * adjoint (M k))) m" unfolding denote.simps denote_measure_def proof - have "k < m \ map denote S ! k = denote (S!k)" for k using wc m by auto then have "k < m \ (map denote S ! k) (M k * \ * adjoint (M k)) = denote (S!k) ((M k) * \ * adjoint (M k))" for k by auto then show "matrix_sum d (\k. (map denote S ! k) (M k * \ * adjoint (M k))) m = matrix_sum d (\k. denote (S ! k) (M k * \ * adjoint (M k))) m" using matrix_sum_cong[of m "\k. (map denote S ! k) (M k * \ * adjoint (M k))" "\k. denote (S ! k) (M k * \ * adjoint (M k))"] by auto qed lemma matrix_sum_trace_le: fixes f :: "nat \ complex mat" and g :: "nat \ complex mat" assumes "(\k. k < n \ f k \ carrier_mat d d)" "(\k. k < n \ g k \ carrier_mat d d)" "(\k. k < n \ trace (f k) \ trace (g k))" shows "trace (matrix_sum d f n) \ trace (matrix_sum d g n)" proof - have "sum (\k. trace (f k)) {0.. sum (\k. trace (g k)) {0.. carrier_mat d d" "partial_density_operator x4" "\x3aa \. x3aa \ set x3a \ well_com x3aa \ \ \ carrier_mat d d \ partial_density_operator \ \ positive (denote x3aa \) \ trace (denote x3aa \) \ trace \ \ denote x3aa \ \ carrier_mat d d" shows "\k < x1. positive ((map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ ((map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ carrier_mat d d \ trace ((map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ trace (x2a k * x4 * adjoint (x2a k))" proof - have x2ak: "\ k < x1. x2a k \ carrier_mat d d" using assms(1) measurement_dim by auto then have x2aa:"\ k < x1. (x2a k * x4 * adjoint (x2a k)) \ carrier_mat d d" using assms(2) by fastforce have posct: "positive ((map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ ((map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ carrier_mat d d \ trace ((map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ trace (x2a k * x4 * adjoint (x2a k))" if k: "k < x1" for k proof - have lea: "adjoint (x2a k) * x2a k \\<^sub>L 1\<^sub>m d" using measurement_le_one_mat assms(1) k by auto have "(x2a k * x4 * adjoint (x2a k)) \ carrier_mat d d" using k x2aa assms(2) by fastforce moreover have "(x3a ! k) \ set x3a" using k assms(1) by simp moreover have "well_com (x3a ! k)" using k assms(1) using measure_well_com by blast moreover have "partial_density_operator (x2a k * x4 * adjoint (x2a k))" using pdo_close_under_measurement x2ak assms(2,3) lea k by blast ultimately have "positive (denote (x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ (denote (x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ carrier_mat d d \ trace (denote (x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ trace (x2a k * x4 * adjoint (x2a k))" using assms(4) by auto then show ?thesis using assms(1) k by auto qed then show ?thesis by auto qed lemma denote_measure_positive_trace_dim: assumes "well_com (Measure x1 x2a x3a)" "x4 \ carrier_mat d d" "partial_density_operator x4" "\x3aa \. x3aa \ set x3a \ well_com x3aa \ \ \ carrier_mat d d \ partial_density_operator \ \ positive (denote x3aa \) \ trace (denote x3aa \) \ trace \ \ denote x3aa \ \ carrier_mat d d" shows "positive (denote (Measure x1 x2a x3a) x4) \ trace (denote (Measure x1 x2a x3a) x4) \ trace x4 \ (denote (Measure x1 x2a x3a) x4) \ carrier_mat d d" proof - have x2ak: "\ k < x1. x2a k \ carrier_mat d d" using assms(1) measurement_dim by auto then have x2aa:"\ k < x1. (x2a k * x4 * adjoint (x2a k)) \ carrier_mat d d" using assms(2) by fastforce have posct:"\ k < x1. positive ((map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ ((map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ carrier_mat d d \ trace ((map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) \ trace (x2a k * x4 * adjoint (x2a k))" using map_denote_positive_trace_dim assms by auto have "trace (matrix_sum d (\k. (map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) x1) \ trace (matrix_sum d (\k. (x2a k * x4 * adjoint (x2a k))) x1)" using posct matrix_sum_trace_le[of x1 "(\k. (map denote x3a ! k) (x2a k * x4 * adjoint (x2a k)))" "(\k. x2a k * x4 * adjoint (x2a k)) "] x2aa by auto also have "\ = trace x4" using trace_measurement[of d "x1" "x2a" x4] assms(1,2) by auto finally have " trace (matrix_sum d (\k. (map denote x3a ! k) (x2a k * x4 * adjoint (x2a k))) x1) \ trace x4" by auto then have "trace (denote_measure x1 x2a (map denote x3a) x4) \ trace x4" unfolding denote_measure_def by auto then have "trace (denote (Measure x1 x2a x3a) x4) \ trace x4" by auto moreover from posct have "positive (denote (Measure x1 x2a x3a) x4)" apply auto unfolding denote_measure_def using matrix_sum_positive by auto moreover have "(denote (Measure x1 x2a x3a) x4) \ carrier_mat d d" apply auto unfolding denote_measure_def using matrix_sum_dim posct by (simp add: matrix_sum_dim) ultimately show ?thesis by auto qed lemma denote_positive_trace_dim: "well_com S \ \ \ carrier_mat d d \ partial_density_operator \ \ (positive (denote S \) \ trace (denote S \) \ trace \ \ denote S \ \ carrier_mat d d)" proof (induction arbitrary: \) case SKIP then show ?case unfolding partial_density_operator_def by auto next case (Utrans x) then show ?case proof - assume wc: "well_com (Utrans x)" and r: "\ \ carrier_mat d d" and pdo: "partial_density_operator \" show "positive (denote (Utrans x) \) \ trace (denote (Utrans x) \) \ trace \ \ denote (Utrans x) \ \ carrier_mat d d" proof - have "trace (x * \ * adjoint x) = trace ((adjoint x * x) * \)" using r apply (mat_assoc d) using wc by auto also have "\ = trace (1\<^sub>m d * \)" using wc inverts_mat_def inverts_mat_symm adjoint_dim by auto also have "\ = trace \" using r by auto finally have fst: "trace (x * \ * adjoint x) = trace \" by auto moreover have "positive (x * \ * adjoint x)" using positive_close_under_left_right_mult_adjoint r pdo wc unfolding partial_density_operator_def by auto moreover have "x * \ * adjoint x \ carrier_mat d d" using r wc adjoint_dim index_mult_mat by auto ultimately show ?thesis by auto qed qed next case (Seq x1 x2a) then show ?case proof - assume dx1: "(\\. well_com x1 \ \ \ carrier_mat d d \ partial_density_operator \ \ positive (denote x1 \) \ trace (denote x1 \) \ trace \ \ denote x1 \ \ carrier_mat d d)" and dx2a: "(\\. well_com x2a \ \ \ carrier_mat d d \ partial_density_operator \ \ positive (denote x2a \) \ trace (denote x2a \) \ trace \ \ denote x2a \ \ carrier_mat d d)" and wc: "well_com (Seq x1 x2a)" and r: "\ \ carrier_mat d d" and pdo: "partial_density_operator \" show "positive (denote (Seq x1 x2a) \) \ trace (denote (Seq x1 x2a) \) \ trace \ \ denote (Seq x1 x2a) \ \ carrier_mat d d" proof - have ptc: "positive (denote x1 \) \ trace (denote x1 \) \ trace \ \ denote x1 \ \ carrier_mat d d" using wc r pdo dx1 by auto then have "partial_density_operator (denote x1 \)" using pdo unfolding partial_density_operator_def by auto then show ?thesis using ptc dx2a wc dual_order.trans by auto qed qed next case (Measure x1 x2a x3a) then show ?case using denote_measure_positive_trace_dim by auto next case (While x1 x2a) then show ?case proof - have "adjoint (x1 0) * (x1 0) + adjoint (x1 1) * (x1 1) = 1\<^sub>m d" using measurement_id2 While by auto moreover have "(\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive (denote x2a \) \ trace (denote x2a \) \ trace \ \ denote x2a \ \ carrier_mat d d)" using While by fastforce moreover have "x1 0 \ carrier_mat d d \ x1 1 \ carrier_mat d d" using measurement_dim While by fastforce ultimately have "denote_while (x1 0) (x1 1) (denote x2a) \ \ carrier_mat d d \ positive (denote_while (x1 0) (x1 1) (denote x2a) \) \ trace (denote_while (x1 0) (x1 1) (denote x2a) \) \ trace \" using denote_while_dim_positive[of "x1 0" "x1 1" "denote x2a" "\"] While by fastforce then show ?thesis by auto qed qed lemma denote_dim_pdo: "well_com S \ \ \ carrier_mat d d \ partial_density_operator \ \ (denote S \ \ carrier_mat d d) \ (partial_density_operator (denote S \))" using denote_positive_trace_dim unfolding partial_density_operator_def by fastforce lemma denote_dim: "well_com S \ \ \ carrier_mat d d \ partial_density_operator \ \ (denote S \ \ carrier_mat d d)" using denote_positive_trace_dim by auto lemma denote_trace: "well_com S \ \ \ carrier_mat d d \ partial_density_operator \ \ trace (denote S \) \ trace \" using denote_positive_trace_dim by auto lemma denote_partial_density_operator: assumes "well_com S" "partial_density_operator \" "\ \ carrier_mat d d" shows "partial_density_operator (denote S \)" using assms denote_positive_trace_dim unfolding partial_density_operator_def using dual_order.trans by blast lemma denote_while_n_sum_mat_seq: assumes "\ \ carrier_mat d d" and "x1 0 \ carrier_mat d d" and "x1 1 \ carrier_mat d d" and "partial_density_operator \" and wc: "well_com x2" and mea: "measurement d 2 x1" shows "matrix_seq d (matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \))" proof - let ?A = "x1 0" and ?B = "x1 1" have dx2:"\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive ((denote x2) \) \ trace ((denote x2) \) \ trace \ \ (denote x2) \ \ carrier_mat d d" using denote_positive_trace_dim wc by auto have lo1: "adjoint ?A * ?A + adjoint ?B * ?B = 1\<^sub>m d" using measurement_id2 assms by auto have "\n. matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \) n \ carrier_mat d d" using assms dx2 by (metis denote_while_n_dim matrix_sum_dim) moreover have "(\n. partial_density_operator (matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \) n))" using assms dx2 lo1 by (metis denote_while_n_sum_partial_density) moreover have "(\n. matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \) n \\<^sub>L matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \) (Suc n))" using assms dx2 lo1 by (metis denote_while_n_sum_lowner_le) ultimately show ?thesis unfolding matrix_seq_def by auto qed lemma denote_while_n_add: assumes M0: "x1 0 \ carrier_mat d d" and M1: "x1 1 \ carrier_mat d d" and wc: "well_com x2" and mea: "measurement d 2 x1" and DS: "(\\\<^sub>1 \\<^sub>2. \\<^sub>1 \ carrier_mat d d \ \\<^sub>2 \ carrier_mat d d \ partial_density_operator \\<^sub>1 \ partial_density_operator \\<^sub>2 \ trace (\\<^sub>1 + \\<^sub>2) \ 1 \ denote x2 (\\<^sub>1 + \\<^sub>2) = denote x2 \\<^sub>1 + denote x2 \\<^sub>2)" shows "\\<^sub>1 \ carrier_mat d d \ \\<^sub>2 \ carrier_mat d d \ partial_density_operator \\<^sub>1 \ partial_density_operator \\<^sub>2 \ trace (\\<^sub>1 + \\<^sub>2) \ 1 \ denote_while_n (x1 0) (x1 1) (denote x2) k (\\<^sub>1 + \\<^sub>2) = denote_while_n (x1 0) (x1 1) (denote x2) k \\<^sub>1 + denote_while_n (x1 0) (x1 1) (denote x2) k \\<^sub>2" proof (auto, induct k arbitrary: \\<^sub>1 \\<^sub>2) case 0 then show ?case apply auto using M0 by (mat_assoc d) next case (Suc k) then show ?case proof - let ?A = "x1 0" and ?B = "x1 1" have dx2:"(\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive ((denote x2) \) \ trace ((denote x2) \) \ trace \ \ (denote x2) \ \ carrier_mat d d) " using denote_positive_trace_dim wc by auto have lo1: "adjoint ?B * ?B \\<^sub>L 1\<^sub>m d" using measurement_le_one_mat assms by auto have dim1: "x1 1 * \\<^sub>1 * adjoint (x1 1) \ carrier_mat d d" using assms Suc by (metis adjoint_dim mult_carrier_mat) moreover have pdo1: "partial_density_operator (x1 1 * \\<^sub>1 * adjoint (x1 1))" using pdo_close_under_measurement assms(2) Suc(2,4) lo1 by auto ultimately have dimr1: "denote x2 (x1 1 * \\<^sub>1 * adjoint (x1 1)) \ carrier_mat d d" using dx2 by auto have dim2: "x1 1 * \\<^sub>2 * adjoint (x1 1) \ carrier_mat d d" using assms Suc by (metis adjoint_dim mult_carrier_mat) moreover have pdo2: "partial_density_operator (x1 1 * \\<^sub>2 * adjoint (x1 1))" using pdo_close_under_measurement assms(2) Suc lo1 by auto ultimately have dimr2: "denote x2 (x1 1 * \\<^sub>2 * adjoint (x1 1)) \ carrier_mat d d" using dx2 by auto have pdor1: "partial_density_operator (denote x2 (x1 1 * \\<^sub>1 * adjoint (x1 1)))" using denote_partial_density_operator assms dim1 pdo1 by auto have pdor2: "partial_density_operator (denote x2 (x1 1 * \\<^sub>2 * adjoint (x1 1)))" using denote_partial_density_operator assms dim2 pdo2 by auto have "trace (denote x2 (x1 1 * \\<^sub>1 * adjoint (x1 1))) \ trace (x1 1 * \\<^sub>1 * adjoint (x1 1))" using dx2 dim1 pdo1 by auto also have tr1: "\ \ trace \\<^sub>1" using trace_decrease_mul_adj assms Suc lo1 by auto finally have trr1:" trace (denote x2 (x1 1 * \\<^sub>1 * adjoint (x1 1))) \ trace \\<^sub>1" by auto have "trace (denote x2 (x1 1 * \\<^sub>2 * adjoint (x1 1))) \ trace (x1 1 * \\<^sub>2 * adjoint (x1 1))" using dx2 dim2 pdo2 by auto also have tr2: "\ \ trace \\<^sub>2" using trace_decrease_mul_adj assms Suc lo1 by auto finally have trr2:" trace (denote x2 (x1 1 * \\<^sub>2 * adjoint (x1 1))) \ trace \\<^sub>2" by auto from tr1 tr2 Suc have "trace ( (x1 1 * \\<^sub>1 * adjoint (x1 1)) + (x1 1 * \\<^sub>2 * adjoint (x1 1))) \ trace (\\<^sub>1 + \\<^sub>2)" using trace_add_linear trace_add_linear[of "(x1 1 * \\<^sub>1 * adjoint (x1 1))" d "(x1 1 * \\<^sub>2 * adjoint (x1 1))"] trace_add_linear[of \\<^sub>1 d \\<^sub>2] using dim1 dim2 by auto then have trless1: "trace ( (x1 1 * \\<^sub>1 * adjoint (x1 1)) + (x1 1 * \\<^sub>2 * adjoint (x1 1))) \ 1" using Suc by auto from trr1 trr2 Suc have "trace (denote x2 (x1 1 * \\<^sub>1 * adjoint (x1 1)) + denote x2 (x1 1 * \\<^sub>2 * adjoint (x1 1))) \ trace (\\<^sub>1 + \\<^sub>2)" using trace_add_linear[of "denote x2 (x1 1 * \\<^sub>1 * adjoint (x1 1))" d "denote x2 (x1 1 * \\<^sub>2 * adjoint (x1 1))"] trace_add_linear[of \\<^sub>1 d \\<^sub>2] using dimr1 dimr2 by auto then have trless2: "trace (denote x2 (x1 1 * \\<^sub>1 * adjoint (x1 1)) + denote x2 (x1 1 * \\<^sub>2 * adjoint (x1 1))) \ 1" using Suc by auto have "x1 1 * (\\<^sub>1 + \\<^sub>2) * adjoint (x1 1) = (x1 1 * \\<^sub>1 * adjoint (x1 1)) + (x1 1 * \\<^sub>2 * adjoint (x1 1))" using M1 Suc by (mat_assoc d) then have deadd: "denote x2 (x1 1 * (\\<^sub>1 + \\<^sub>2) * adjoint (x1 1)) = denote x2 (x1 1 * \\<^sub>1 * adjoint (x1 1)) + denote x2 (x1 1 * \\<^sub>2 * adjoint (x1 1))" using assms(5) dim1 dim2 pdo1 pdo2 trless1 by auto from dimr1 dimr2 pdor1 pdor2 trless2 Suc(1) deadd show ?thesis by auto qed qed lemma denote_while_add: assumes r1: "\\<^sub>1 \ carrier_mat d d" and r2: "\\<^sub>2 \ carrier_mat d d" and M0: "x1 0 \ carrier_mat d d" and M1: "x1 1 \ carrier_mat d d" and pdo1: "partial_density_operator \\<^sub>1" and pdo2: "partial_density_operator \\<^sub>2" and tr12: "trace (\\<^sub>1 + \\<^sub>2) \ 1" and wc: "well_com x2" and mea: "measurement d 2 x1" and DS: "(\\\<^sub>1 \\<^sub>2. \\<^sub>1 \ carrier_mat d d \ \\<^sub>2 \ carrier_mat d d \ partial_density_operator \\<^sub>1 \ partial_density_operator \\<^sub>2 \ trace (\\<^sub>1 + \\<^sub>2) \ 1 \ denote x2 (\\<^sub>1 + \\<^sub>2) = denote x2 \\<^sub>1 + denote x2 \\<^sub>2)" shows "denote_while (x1 0) (x1 1) (denote x2) (\\<^sub>1 + \\<^sub>2) = denote_while (x1 0) (x1 1) (denote x2) \\<^sub>1 + denote_while (x1 0) (x1 1) (denote x2) \\<^sub>2" proof - let ?A = "x1 0" and ?B = "x1 1" have dx2:"(\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive ((denote x2) \) \ trace ((denote x2) \) \ trace \ \ (denote x2) \ \ carrier_mat d d) " using denote_positive_trace_dim wc by auto have lo1: "adjoint ?A * ?A + adjoint ?B * ?B = 1\<^sub>m d" using measurement_id2 assms by auto have pdo12: "partial_density_operator (\\<^sub>1 + \\<^sub>2)" using pdo1 pdo2 unfolding partial_density_operator_def using tr12 positive_add assms by auto have ms1: "matrix_seq d (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>1))" using denote_while_n_sum_mat_seq assms by auto have ms2: "matrix_seq d (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>2))" using denote_while_n_sum_mat_seq assms by auto have dim1: "(\n. matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \\<^sub>1) n \ carrier_mat d d)" using assms dx2 by (metis denote_while_n_dim matrix_sum_dim) have dim2: "(\n. matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \\<^sub>2) n \ carrier_mat d d)" using assms dx2 by (metis denote_while_n_dim matrix_sum_dim) have "trace (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>1) n + matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>2) n) \ trace (\\<^sub>1 + \\<^sub>2)" for n proof - have "trace (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>1) n) \ trace \\<^sub>1" using denote_while_n_sum_trace dx2 lo1 assms by auto moreover have "trace (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>2) n) \ trace \\<^sub>2" using denote_while_n_sum_trace dx2 lo1 assms by auto ultimately show ?thesis using trace_add_linear dim1 dim2 by (metis add_mono_thms_linordered_semiring(1) r1 r2) qed then have "\n. trace (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>1) n + matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>2) n) \ 1" using assms(7) dual_order.trans by blast then have lladd: "matrix_seq.lowner_lub (\n. (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>1)) n + (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>2)) n) = matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>1)) + matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>2))" using lowner_lub_add ms1 ms2 by auto have "matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n (\\<^sub>1 + \\<^sub>2)) m = matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>1) m + matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>2) m" for m proof - have "(\k. k < m \ denote_while_n (x1 0) (x1 1) (denote x2) k (\\<^sub>1 + \\<^sub>2) \ carrier_mat d d)" using denote_while_n_dim dx2 pdo12 assms measurement_dim by auto moreover have "(\k. k < m \ denote_while_n (x1 0) (x1 1) (denote x2) k \\<^sub>1 \ carrier_mat d d)" using denote_while_n_dim dx2 assms measurement_dim by auto moreover have "(\k. k < m \ denote_while_n (x1 0) (x1 1) (denote x2) k \\<^sub>2 \ carrier_mat d d)" using denote_while_n_dim dx2 assms measurement_dim by auto moreover have "(\ k < m. denote_while_n (x1 0) (x1 1) (denote x2) k (\\<^sub>1 + \\<^sub>2) = denote_while_n (x1 0) (x1 1) (denote x2) k \\<^sub>1 + denote_while_n (x1 0) (x1 1) (denote x2) k \\<^sub>2)" using denote_while_n_add assms by auto ultimately show ?thesis using matrix_sum_add[of m "(\n. denote_while_n (x1 0) (x1 1) (denote x2) n (\\<^sub>1 + \\<^sub>2))" d "(\n. denote_while_n (x1 0) (x1 1) (denote x2) n \\<^sub>1)" "(\n. denote_while_n (x1 0) (x1 1) (denote x2) n \\<^sub>2)"] by auto qed then have "matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n (\\<^sub>1 + \\<^sub>2))) = matrix_seq.lowner_lub (\n. (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>1)) n + (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \\<^sub>2)) n)" using lladd by presburger then show ?thesis unfolding denote_while_def matrix_inf_sum_def using lladd by auto qed lemma denote_add: "well_com S \ \\<^sub>1 \ carrier_mat d d \ \\<^sub>2 \ carrier_mat d d \ partial_density_operator \\<^sub>1 \ partial_density_operator \\<^sub>2 \ trace (\\<^sub>1 + \\<^sub>2) \ 1 \ denote S (\\<^sub>1 + \\<^sub>2) = denote S \\<^sub>1 + denote S \\<^sub>2" proof (induction arbitrary: \\<^sub>1 \\<^sub>2) case SKIP then show ?case by auto next case (Utrans U) then show ?case by (clarsimp, mat_assoc d) next case (Seq x1 x2a) then show ?case proof - have dim1: "denote x1 \\<^sub>1 \ carrier_mat d d" using denote_positive_trace_dim Seq by auto have dim2: "denote x1 \\<^sub>2 \ carrier_mat d d" using denote_positive_trace_dim Seq by auto have "trace (denote x1 \\<^sub>1) \ trace \\<^sub>1" using denote_positive_trace_dim Seq by auto moreover have "trace (denote x1 \\<^sub>2) \ trace \\<^sub>2" using denote_positive_trace_dim Seq by auto ultimately have tr: "trace (denote x1 \\<^sub>1 + denote x1 \\<^sub>2) \ 1" using Seq(4,5,8) trace_add_linear dim1 dim2 by (smt add_mono order_trans) have "denote (Seq x1 x2a) (\\<^sub>1 + \\<^sub>2) = denote x2a (denote x1 (\\<^sub>1 + \\<^sub>2))" by auto moreover have "denote x1 (\\<^sub>1 + \\<^sub>2) = denote x1 \\<^sub>1 + denote x1 \\<^sub>2" using Seq by auto moreover have "partial_density_operator (denote x1 \\<^sub>1)" using denote_partial_density_operator Seq by auto moreover have "partial_density_operator (denote x1 \\<^sub>2)" using denote_partial_density_operator Seq by auto ultimately show ?thesis using Seq dim1 dim2 tr by auto qed next case (Measure x1 x2a x3a) then show ?case proof - have ptc: "\x3aa \. x3aa \ set x3a \ well_com x3aa \ \ \ carrier_mat d d \ partial_density_operator \ \ positive (denote x3aa \) \ trace (denote x3aa \) \ trace \ \ denote x3aa \ \ carrier_mat d d" using denote_positive_trace_dim Measure by auto then have map:"\\. \ \ carrier_mat d d \ partial_density_operator \ \ \ k < x1. positive ((map denote x3a ! k) (x2a k * \ * adjoint (x2a k))) \ ((map denote x3a ! k) (x2a k * \ * adjoint (x2a k))) \ carrier_mat d d \ trace ((map denote x3a ! k) (x2a k * \ * adjoint (x2a k))) \ trace (x2a k * \ * adjoint (x2a k))" using Measure map_denote_positive_trace_dim by auto from map have mapd1: "\k. k < x1 \ (map denote x3a ! k) (x2a k * \\<^sub>1 * adjoint (x2a k)) \ carrier_mat d d" using Measure by auto from map have mapd2: "\k. k < x1 \ (map denote x3a ! k) (x2a k * \\<^sub>2 * adjoint (x2a k)) \ carrier_mat d d" using Measure by auto have dim1:"\k. k < x1 \ x2a k * \\<^sub>1 * adjoint (x2a k) \ carrier_mat d d" using well_com.simps(5) measurement_dim Measure by fastforce have dim2: "\k. k < x1 \ x2a k * \\<^sub>2 * adjoint (x2a k) \ carrier_mat d d" using well_com.simps(5) measurement_dim Measure by fastforce have "\k. k < x1 \ (x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k)) \ carrier_mat d d" using well_com.simps(5) measurement_dim Measure by fastforce have lea: "\k. k < x1 \ adjoint (x2a k) * x2a k \\<^sub>L 1\<^sub>m d" using measurement_le_one_mat Measure by auto moreover have dimx: "\k. k < x1 \ x2a k \ carrier_mat d d" using Measure measurement_dim by auto ultimately have pdo12:"\k. k < x1 \ partial_density_operator (x2a k * \\<^sub>1 * adjoint (x2a k)) \ partial_density_operator (x2a k * \\<^sub>2 * adjoint (x2a k))" using pdo_close_under_measurement Measure measurement_dim by blast have trless: "trace (x2a k * \\<^sub>1 * adjoint (x2a k) + x2a k * \\<^sub>2 * adjoint (x2a k)) \ 1" if k: "k < x1" for k proof - have "trace (x2a k * \\<^sub>1 * adjoint (x2a k)) \ trace \\<^sub>1" using trace_decrease_mul_adj dimx Measure lea k by auto moreover have "trace (x2a k * \\<^sub>2 * adjoint (x2a k)) \ trace \\<^sub>2" using trace_decrease_mul_adj dimx Measure lea k by auto ultimately have "trace (x2a k * \\<^sub>1 * adjoint (x2a k) + x2a k * \\<^sub>2 * adjoint (x2a k)) \ trace (\\<^sub>1 + \\<^sub>2)" using trace_add_linear dim1 dim2 Measure k by (metis add_mono_thms_linordered_semiring(1)) then show ?thesis using Measure(7) by auto qed have dist: "(x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k)) = (x2a k * \\<^sub>1 * adjoint (x2a k)) + (x2a k * \\<^sub>2 * adjoint (x2a k))" if k: "k < x1" for k proof - have "(x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k)) = ((x2a k * \\<^sub>1 + x2a k * \\<^sub>2) * adjoint (x2a k))" using mult_add_distrib_mat Measure well_com.simps(4) measurement_dim by (metis k) also have "\ = (x2a k * \\<^sub>1 * adjoint (x2a k)) + (x2a k * \\<^sub>2 * adjoint (x2a k))" apply (mat_assoc d) using Measure k well_com.simps(4) measurement_dim by auto finally show ?thesis by auto qed have mapadd: "(map denote x3a ! k) (x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k)) = (map denote x3a ! k) (x2a k * \\<^sub>1 * adjoint (x2a k)) + (map denote x3a ! k) (x2a k * \\<^sub>2 * adjoint (x2a k))" if k: "k < x1" for k proof - have "(map denote x3a ! k) (x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k)) = denote (x3a ! k) (x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k))" using Measure.prems(1) k by auto then have mapx: "(map denote x3a ! k) (x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k)) = denote (x3a ! k) ((x2a k * \\<^sub>1 * adjoint (x2a k)) + (x2a k * \\<^sub>2 * adjoint (x2a k)))" using dist k by auto have "denote (x3a ! k) ((x2a k * \\<^sub>1 * adjoint (x2a k)) + (x2a k * \\<^sub>2 * adjoint (x2a k))) = denote (x3a ! k) (x2a k * \\<^sub>1 * adjoint (x2a k)) + denote (x3a ! k) (x2a k * \\<^sub>2 * adjoint (x2a k))" using Measure(1,2) dim1 dim2 pdo12 trless k by (simp add: list_all_length) then show ?thesis using Measure.prems(1) mapx k by auto qed then have mapd12:"(\k. k < x1 \ (map denote x3a ! k) (x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k)) \ carrier_mat d d)" using mapd1 mapd2 by auto have "matrix_sum d (\k. (map denote x3a ! k) (x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k))) x1 = matrix_sum d (\k. (map denote x3a ! k) (x2a k * \\<^sub>1 * adjoint (x2a k))) x1 + matrix_sum d (\k. (map denote x3a ! k) (x2a k * \\<^sub>2 * adjoint (x2a k))) x1" using matrix_sum_add[of x1 "(\k. (map denote x3a ! k) (x2a k * (\\<^sub>1 + \\<^sub>2) * adjoint (x2a k)))" d "(\k. (map denote x3a ! k) (x2a k * \\<^sub>1 * adjoint (x2a k)))" "(\k. (map denote x3a ! k) (x2a k * \\<^sub>2 * adjoint (x2a k)))"] using mapd12 mapd1 mapd2 mapadd by auto then show ?thesis using denote.simps(4) unfolding denote_measure_def by auto qed next case (While x1 x2) then show ?case apply auto using denote_while_add measurement_dim by auto qed lemma mulfact: fixes c:: real and a:: complex and b:: complex assumes "c\0" "a \ b" shows "c * a \ c * b" - using assms Im_complex_of_real Re_complex_of_real less_eq_complex_def mult_eq_0_iff real_mult_le_cancel_iff2 times_complex.simps - using eq_iff by fastforce + using assms mult_le_cancel_iff2 by force lemma denote_while_n_scale: fixes c:: real assumes "c\0" "measurement d 2 x1" "well_com x2" "(\\. \ \ carrier_mat d d \ partial_density_operator \ \ trace (c \\<^sub>m \) \ 1 \ denote x2 (c \\<^sub>m \) = c \\<^sub>m denote x2 \)" shows "\ \ carrier_mat d d \ partial_density_operator \ \ trace (c \\<^sub>m \) \ 1 \ denote_while_n (x1 0) (x1 1) (denote x2) n (complex_of_real c \\<^sub>m \) = c \\<^sub>m (denote_while_n (x1 0) (x1 1) (denote x2) n \)" proof (auto, induct n arbitrary: \) case 0 then show ?case apply auto apply (mat_assoc d) using assms measurement_dim by auto next case (Suc n) then show ?case proof - let ?A = "x1 0" and ?B = "x1 1" have dx2:"(\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive ((denote x2) \) \ trace ((denote x2) \) \ trace \ \ (denote x2) \ \ carrier_mat d d) " using denote_positive_trace_dim assms by auto have lo1: "adjoint ?B * ?B \\<^sub>L 1\<^sub>m d" using measurement_le_one_mat assms by auto have dim1: "x1 1 * \ * adjoint (x1 1) \ carrier_mat d d" using assms(2) Suc(2) measurement_dim by (meson adjoint_dim mult_carrier_mat one_less_numeral_iff semiring_norm(76)) moreover have pdo1: "partial_density_operator (x1 1 * \ * adjoint (x1 1))" using pdo_close_under_measurement assms Suc lo1 measurement_dim by (metis One_nat_def lessI numeral_2_eq_2) ultimately have dimr: "denote x2 (x1 1 * \ * adjoint (x1 1)) \ carrier_mat d d" using dx2 by auto have pdor: "partial_density_operator (denote x2 (x1 1 * \ * adjoint (x1 1)))" using denote_partial_density_operator assms dim1 pdo1 by auto have "trace (denote x2 (x1 1 * \ * adjoint (x1 1))) \ trace (x1 1 * \ * adjoint (x1 1))" using dx2 dim1 pdo1 by auto also have trr1: "\ \ trace \" using trace_decrease_mul_adj assms Suc lo1 measurement_dim by auto finally have trr: "trace (denote x2 (x1 1 * \ * adjoint (x1 1))) \ trace \" by auto moreover have "trace (c \\<^sub>m denote x2 (x1 1 * \ * adjoint (x1 1))) = c * trace (denote x2 (x1 1 * \ * adjoint (x1 1)))" using trace_smult dimr by auto moreover have trcr: "trace (c \\<^sub>m \) = c * trace \" using trace_smult Suc by auto ultimately have "trace (c \\<^sub>m denote x2 (x1 1 * \ * adjoint (x1 1))) \ trace (c \\<^sub>m \)" using assms(1) state_sig.mulfact by auto then have trrc: "trace (c \\<^sub>m denote x2 (x1 1 * \ * adjoint (x1 1))) \ 1" using Suc by auto have "trace (c \\<^sub>m (x1 1 * \ * adjoint (x1 1))) = c * trace (x1 1 * \ * adjoint (x1 1))" using trace_smult dim1 by auto then have "trace (c \\<^sub>m (x1 1 * \ * adjoint (x1 1))) \ trace (c \\<^sub>m \)" using trcr trr1 assms(1) using state_sig.mulfact by auto then have trrle: "trace (c \\<^sub>m (x1 1 * \ * adjoint (x1 1))) \ 1" using Suc by auto have "x1 1 * (complex_of_real c \\<^sub>m \) * adjoint (x1 1) = complex_of_real c \\<^sub>m (x1 1 * \ * adjoint (x1 1))" apply (mat_assoc d) using Suc.prems(1) assms measurement_dim by auto then have "denote x2 (x1 1 * (complex_of_real c \\<^sub>m \) * adjoint (x1 1)) = (denote x2 (c \\<^sub>m (x1 1 * (\) * adjoint (x1 1))))" by auto moreover have "denote x2 (c \\<^sub>m (x1 1 * \ * adjoint (x1 1))) = c \\<^sub>m denote x2 (x1 1 * \ * adjoint (x1 1))" using assms(4) dim1 pdo1 trrle by auto ultimately have "denote x2 (x1 1 * (complex_of_real c \\<^sub>m \) * adjoint (x1 1)) = c \\<^sub>m denote x2 (x1 1 * \ * adjoint (x1 1))" using assms by auto then show ?thesis using Suc dimr pdor trrc by auto qed qed lemma denote_while_scale: fixes c:: real assumes "\ \ carrier_mat d d" "partial_density_operator \" "trace (c \\<^sub>m \) \ 1" "c \ 0" "measurement d 2 x1" "well_com x2" "(\\. \ \ carrier_mat d d \ partial_density_operator \ \ trace (c \\<^sub>m \) \ 1 \ denote x2 (c \\<^sub>m \) = c \\<^sub>m denote x2 \)" shows "denote_while (x1 0) (x1 1) (denote x2) (c \\<^sub>m \) = c \\<^sub>m denote_while (x1 0) (x1 1) (denote x2) \" proof - let ?A = "x1 0" and ?B = "x1 1" have dx2:"(\\. \ \ carrier_mat d d \ partial_density_operator \ \ positive ((denote x2) \) \ trace ((denote x2) \) \ trace \ \ (denote x2) \ \ carrier_mat d d) " using denote_positive_trace_dim assms by auto have lo1: "adjoint ?A * ?A + adjoint ?B * ?B = 1\<^sub>m d" using measurement_id2 assms by auto have ms: "matrix_seq d (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \))" using denote_while_n_sum_mat_seq assms measurement_dim by auto have trcless: "trace (c \\<^sub>m matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \) n) \ 1" for n proof - have dimr: "matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \) n \ carrier_mat d d" using assms dx2 denote_while_n_dim matrix_sum_dim using matrix_seq.dim ms by auto have "trace (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \) n) \ trace \" using denote_while_n_sum_trace dx2 lo1 assms measurement_dim by auto moreover have "trace (c \\<^sub>m matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \) n) = c * trace (matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \) n)" using trace_smult dimr by auto moreover have "trace (c \\<^sub>m \) = c * trace \" using trace_smult assms by auto ultimately have "trace (c \\<^sub>m matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \) n) \ trace (c \\<^sub>m \)" using assms(4) by (simp add: ordered_comm_semiring_class.comm_mult_left_mono) then show ?thesis using assms by auto qed have llscale: "matrix_seq.lowner_lub (\n. c \\<^sub>m (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \)) n) = c \\<^sub>m matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \))" using lowner_lub_scale[of d "(matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n \))" c] ms trcless assms(4) by auto have "matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n (complex_of_real c \\<^sub>m \)) m = c \\<^sub>m (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \)) m" for m proof - have dim:"(\k. k < m \ denote_while_n (x1 0) (x1 1) (denote x2) k \ \ carrier_mat d d)" using denote_while_n_dim dx2 assms measurement_dim by auto then have dimr: "(\k. k < m \ c \\<^sub>m denote_while_n (x1 0) (x1 1) (denote x2) k \ \ carrier_mat d d)" using smult_carrier_mat by auto have "\ n\<^sub>m \) = c \\<^sub>m (denote_while_n (x1 0) (x1 1) (denote x2) n \)" using denote_while_n_scale assms by auto then have "(matrix_sum d (\n. c \\<^sub>m denote_while_n ?A ?B (denote x2) n \)) m = matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n (complex_of_real c \\<^sub>m \)) m " using matrix_sum_cong[of m "\n. complex_of_real c \\<^sub>m denote_while_n (x1 0) (x1 1) (denote x2) n \"] dimr by fastforce moreover have "(matrix_sum d (\n. c \\<^sub>m denote_while_n ?A ?B (denote x2) n \)) m = c \\<^sub>m (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \)) m" using matrix_sum_smult[of m "(\n. denote_while_n (x1 0) (x1 1) (denote x2) n \)" d c] dim by auto ultimately show ?thesis by auto qed then have "matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n (x1 0) (x1 1) (denote x2) n (complex_of_real c \\<^sub>m \))) = matrix_seq.lowner_lub (\n. c \\<^sub>m (matrix_sum d (\n. denote_while_n ?A ?B (denote x2) n \)) n)" by meson then show ?thesis unfolding denote_while_def matrix_inf_sum_def using llscale by auto qed lemma denote_scale: fixes c :: real assumes "c\0" shows "well_com S \ \ \ carrier_mat d d \ partial_density_operator \ \ trace (c \\<^sub>m \) \ 1 \ denote S (c \\<^sub>m \) = c \\<^sub>m denote S \" proof (induction arbitrary: \) case SKIP then show ?case by auto next case (Utrans x) then show ?case unfolding denote.simps apply (mat_assoc d) using Utrans by auto next case (Seq x1 x2a) then show ?case proof - have cd: "denote x1 (c \\<^sub>m \) = c \\<^sub>m denote x1 \" using Seq by auto have x1: "denote x1 \ \ carrier_mat d d \ partial_density_operator (denote x1 \) \ trace (denote x1 \) \ trace \" using denote_positive_trace_dim Seq denote_partial_density_operator by auto have "trace (c \\<^sub>m denote x1 \) = c * trace (denote x1 \)" using trace_smult x1 by auto also have "\ \ c * trace \" using x1 assms by (metis Seq.prems cd denote_positive_trace_dim partial_density_operator_def positive_scale smult_carrier_mat trace_smult well_com.simps(3)) also have "\ \ 1" using Seq(6) trace_smult Seq(4) by (simp add: trace_smult) finally have "trace (c \\<^sub>m denote x1 \) \1" by auto then have "denote x2a (c \\<^sub>m denote x1 \) = c \\<^sub>m denote x2a ( denote x1 \)" using x1 Seq by auto then show ?thesis using denote.simps(4) cd by auto qed next case (Measure x1 x2a x3a) then show ?case proof - have ptc: "\x3aa \. x3aa \ set x3a \ well_com x3aa \ \ \ carrier_mat d d \ partial_density_operator \ \ positive (denote x3aa \) \ trace (denote x3aa \) \ trace \ \ denote x3aa \ \ carrier_mat d d" using denote_positive_trace_dim Measure by auto have cad: "x2a k * (c \\<^sub>m \) * adjoint (x2a k) = c \\<^sub>m (x2a k * \ * adjoint (x2a k))" if k: "k < x1" for k apply (mat_assoc d) using well_com.simps Measure measurement_dim k by auto have "\k * adjoint (x2a k) \ carrier_mat d d" using Measure(2) measurement_dim Measure(3) by fastforce have lea: "\k\<^sub>L 1\<^sub>m d" using measurement_le_one_mat Measure(2) by auto then have pdox: "\ k * adjoint (x2a k))" using pdo_close_under_measurement Measure(2,3,4) measurement_dim by (meson state_sig.well_com.simps(4)) have x2aa:"\ k < x1. (x2a k * \ * adjoint (x2a k)) \ carrier_mat d d" using Measure(2,3) measurement_dim by fastforce have dimm: "(\k. k < x1 \ (map denote x3a ! k) (x2a k * \ * adjoint (x2a k)) \ carrier_mat d d)" using map_denote_positive_trace_dim Measure(2,3,4) ptc by auto then have dimcm: "(\k. k < x1 \ c \\<^sub>m (map denote x3a ! k) (x2a k * \ * adjoint (x2a k)) \ carrier_mat d d)" using smult_carrier_mat by auto have tra: "\ k < x1. trace ((x2a k * \ * adjoint (x2a k))) \ trace \" using trace_decrease_mul_adj Measure lea measurement_dim by auto have tra1: "trace (c \\<^sub>m (x2a k * \ * adjoint (x2a k))) \ 1" if k: "k < x1" for k proof - have trle: "trace (x2a k * \ * adjoint (x2a k)) \ trace \" using tra k by auto have "trace (c \\<^sub>m (x2a k * \ * adjoint (x2a k))) = c * trace ((x2a k * \ * adjoint (x2a k)))" using trace_smult x2aa k by auto also have "\ \ c * trace \" using trle assms mulfact by auto also have "\ \ 1" using Measure(3,5) trace_smult by metis finally show ?thesis by auto qed have "(map denote x3a ! k) (x2a k * (c \\<^sub>m \) * adjoint (x2a k)) = c \\<^sub>m (map denote x3a ! k) (x2a k * \ * adjoint (x2a k))" if k: "k < x1" for k proof - have "denote (x3a ! k) (x2a k * (c \\<^sub>m \) * adjoint (x2a k)) = denote (x3a ! k) (c \\<^sub>m (x2a k * \ * adjoint (x2a k)))" using cad k by auto also have "\ = c \\<^sub>m denote (x3a ! k) ( (x2a k * \ * adjoint (x2a k)))" using Measure(1,2) pdox x2aa tra1 k using measure_well_com by auto finally have "denote (x3a ! k) (x2a k * (complex_of_real c \\<^sub>m \) * adjoint (x2a k)) = complex_of_real c \\<^sub>m denote (x3a ! k) (x2a k * \ * adjoint (x2a k))" by auto then show ?thesis using Measure.prems(1) k by auto qed then have "matrix_sum d (\k. c \\<^sub>m (map denote x3a ! k) (x2a k * \ * adjoint (x2a k))) x1 = matrix_sum d (\k. (map denote x3a ! k) (x2a k * (c \\<^sub>m \) * adjoint (x2a k))) x1" using matrix_sum_cong[of x1 "(\k. complex_of_real c \\<^sub>m (map denote x3a ! k) (x2a k * \ * adjoint (x2a k)))" "(\k. (map denote x3a ! k) (x2a k * (complex_of_real c \\<^sub>m \) * adjoint (x2a k)))"] dimcm by auto then have "matrix_sum d (\k. (map denote x3a ! k) (x2a k * (c \\<^sub>m \) * adjoint (x2a k))) x1 = c \\<^sub>m matrix_sum d (\k. (map denote x3a ! k) (x2a k * \ * adjoint (x2a k))) x1" using matrix_sum_smult[of x1 "(\k. (map denote x3a ! k) (x2a k * \ * adjoint (x2a k)))" d c] dimm by auto then have "denote (Measure x1 x2a x3a) (c \\<^sub>m \) = c \\<^sub>m denote (Measure x1 x2a x3a) \" using denote.simps(4)[of x1 x2a x3a "c \\<^sub>m \"] using denote.simps(4)[of x1 x2a x3a "\"] unfolding denote_measure_def by auto then show ?thesis by auto qed next case (While x1 x2a) then show ?case apply auto using denote_while_scale assms by auto qed lemma limit_mat_denote_while_n: assumes wc: "well_com (While M S)" and dr: "\ \ carrier_mat d d" and pdor: "partial_density_operator \" shows "limit_mat (matrix_sum d (\k. denote_while_n (M 0) (M 1) (denote S) k \)) (denote (While M S) \) d" proof - have m: "measurement d 2 M" using wc by auto then have dM0: "M 0 \ carrier_mat d d" and dM1: "M 1 \ carrier_mat d d" and id: "adjoint (M 0) * (M 0) + adjoint (M 1) * (M 1) = 1\<^sub>m d" using measurement_id2 m measurement_def by auto have wcs: "well_com S" using wc by auto have DS: "positive (denote S \) \ trace (denote S \) \ trace \ \ denote S \ \ carrier_mat d d" if "\ \ carrier_mat d d" and "partial_density_operator \" for \ using wcs that denote_positive_trace_dim by auto have sumdd: "(\n. matrix_sum d (\n. denote_while_n (M 0) (M 1) (denote S) n \) n \ carrier_mat d d)" if "\ \ carrier_mat d d" and "partial_density_operator \" for \ using denote_while_n_sum_dim dM0 dM1 DS that by auto have sumtr: "\ n. trace (matrix_sum d (\n. denote_while_n (M 0) (M 1) (denote S) n \) n) \ trace \" if "\ \ carrier_mat d d" and "partial_density_operator \" for \ using denote_while_n_sum_trace[OF dM0 dM1 id DS] that by auto have sumpar: "(\n. partial_density_operator (matrix_sum d (\n. denote_while_n (M 0) (M 1) (denote S) n \) n))" if "\ \ carrier_mat d d" and "partial_density_operator \" for \ using denote_while_n_sum_partial_density[OF dM0 dM1 id DS] that by auto have sumle:"(\n. matrix_sum d (\n. denote_while_n (M 0) (M 1) (denote S) n \) n \\<^sub>L matrix_sum d (\n. denote_while_n (M 0) (M 1) (denote S) n \) (Suc n))" if "\ \ carrier_mat d d" and "partial_density_operator \" for \ using denote_while_n_sum_lowner_le[OF dM0 dM1 id DS] that by auto have seqd: "matrix_seq d (matrix_sum d (\n. denote_while_n (M 0) (M 1) (denote S) n \))" if "\ \ carrier_mat d d" and "partial_density_operator \" for \ using matrix_seq_def sumdd sumpar sumle that by auto have "matrix_seq.lowner_is_lub (matrix_sum d (\n. denote_while_n (M 0) (M 1) (denote S) n \)) (matrix_seq.lowner_lub (matrix_sum d (\n. denote_while_n (M 0) (M 1) (denote S) n \)))" using DS lowner_is_lub_matrix_sum dM0 dM1 id pdor dr by auto then show "limit_mat (matrix_sum d (\k. denote_while_n (M 0) (M 1) (denote S) k \)) (denote (While M S) \) d" unfolding denote.simps denote_while_def matrix_inf_sum_def using matrix_seq.lowner_lub_is_limit[OF seqd[OF dr pdor]] by auto qed end end diff --git a/thys/Root_Balanced_Tree/Root_Balanced_Tree_Tab.thy b/thys/Root_Balanced_Tree/Root_Balanced_Tree_Tab.thy --- a/thys/Root_Balanced_Tree/Root_Balanced_Tree_Tab.thy +++ b/thys/Root_Balanced_Tree/Root_Balanced_Tree_Tab.thy @@ -1,109 +1,109 @@ section "Tabulating the Balanced Predicate" theory Root_Balanced_Tree_Tab imports Root_Balanced_Tree "HOL-Decision_Procs.Approximation" "HOL-Library.IArray" begin locale Min_tab = fixes p :: "nat \ nat \ bool" fixes tab :: "nat list" assumes mono_p: "n \ n' \ p n h \ p n' h" assumes p: "\n. p n h" assumes tab_LEAST: "h < length tab \ tab!h = (LEAST n. p n h)" begin lemma tab_correct: "h < length tab \ p n h = (n \ tab ! h)" apply auto using not_le_imp_less not_less_Least tab_LEAST apply auto[1] by (metis LeastI mono_p p tab_LEAST) end definition bal_tab :: "nat list" where "bal_tab = [0, 1, 1, 2, 4, 6, 10, 16, 25, 40, 64, 101, 161, 256, 406, 645, 1024, 1625, 2580, 4096, 6501, 10321, 16384, 26007, 41285, 65536, 104031, 165140, 262144, 416127, 660561, 1048576, 1664510, 2642245, 4194304, 6658042, 10568983, 16777216, 26632170, 42275935, 67108864, 106528681, 169103740, 268435456, 426114725, 676414963, 1073741824, 1704458900, 2705659852, 4294967296\<^cancel>\, 6817835603\]" (*ML\floor (Math.pow(2.0,5.0/1.5))\*) axiomatization where c_def: "c = 3/2" fun is_floor :: "nat \ nat \ bool" where "is_floor n h = (let m = floor((2::real) powr ((real(h)-1)/c)) in n \ m \ m \ n)" text\Note that @{prop"n \ m \ m \ n"} avoids the technical restriction of the \approximation\ method which does not support \=\, even on integers.\ lemma bal_tab_correct: "\i < length bal_tab. is_floor (bal_tab!i) i" apply(simp add: bal_tab_def c_def All_less_Suc) apply (approximation 50) done (* FIXME mv *) lemma ceiling_least_real: "ceiling(r::real) = (LEAST i. r \ i)" by (metis Least_equality ceiling_le le_of_int_ceiling) lemma floor_greatest_real: "floor(r::real) = (GREATEST i. i \ r)" by (metis Greatest_equality le_floor_iff of_int_floor_le) lemma LEAST_eq_floor: "(LEAST n. int h \ \c * log 2 (real n + 1)\) = floor((2::real) powr ((real(h)-1)/c))" proof - have "int h \ \c * log 2 (real n + 1)\ \ 2 powr ((real(h)-1)/c) < real(n)+1" (is "?L = ?R") for n proof - have "?L \ h < c * log 2 (real n + 1) + 1" by linarith also have "\ \ (real h-1)/c < log 2 (real n + 1)" using c1 by(simp add: field_simps) also have "\ \ 2 powr ((real h-1)/c) < 2 powr (log 2 (real n + 1))" by(simp del: powr_log_cancel) also have "\ \ ?R" by(simp) finally show ?thesis . qed moreover have "((LEAST n::nat. r < n+1) = nat(floor r))" for r :: real by(rule Least_equality) linarith+ ultimately show ?thesis by simp qed interpretation Min_tab where p = bal_i and tab = bal_tab proof(unfold bal_i_def, standard, goal_cases) case (1 n n' h) have "int h \ ceiling(c * log 2 (real n + 1))" by(rule 1[unfolded bal_i_def]) also have "\ \ ceiling(c * log 2 (real n' + 1))" using c1 "1"(1) by (simp add: ceiling_mono) finally show ?case . next case (2 h) show ?case proof show "int h \ \c * log 2 (real (2 ^ h - 1) + 1)\" apply(simp add: of_nat_diff log_nat_power) using c1 - by (metis ceiling_mono ceiling_of_nat order.order_iff_strict mult.left_neutral mult_eq_0_iff of_nat_0_le_iff real_mult_le_cancel_iff1) + by (metis ceiling_mono ceiling_of_nat order.order_iff_strict mult.left_neutral mult_eq_0_iff of_nat_0_le_iff mult_le_cancel_iff1) qed next case 3 thus ?case using bal_tab_correct LEAST_eq_floor by (simp add: eq_iff[symmetric]) (metis nat_int) qed text\Now we replace the list by an immutable array:\ definition bal_array :: "nat iarray" where "bal_array = IArray bal_tab" text\A trick for code generation: how to get rid of the precondition:\ lemma bal_i_code: "bal_i n h = (if h < IArray.length bal_array then IArray.sub bal_array h \ n else bal_i n h)" by (simp add: bal_array_def tab_correct) end diff --git a/thys/Safe_Distance/Safe_Distance.thy b/thys/Safe_Distance/Safe_Distance.thy --- a/thys/Safe_Distance/Safe_Distance.thy +++ b/thys/Safe_Distance/Safe_Distance.thy @@ -1,1472 +1,1472 @@ \<^marker>\creator "Albert Rizaldi" "Fabian Immler\ section \Safe Distance\ theory Safe_Distance imports "HOL-Analysis.Multivariate_Analysis" "HOL-Decision_Procs.Approximation" Sturm_Sequences.Sturm begin text \ This theory is about formalising the safe distance rule. The safe distance rule is obtained from Vienna Convention which basically states the following thing. ``The car at all times must maintain a safe distance towards the vehicle in front of it, such that whenever the vehicle in front and the ego vehicle apply maximum deceleration, there will not be a collision.'' To formalise this safe distance rule we have to define first what is a safe distance. To define this safe distance, we have to model the physics of the movement of the vehicle. The following model is sufficient. \s = s\<^sub>0 + v\<^sub>0 * t + 1 / 2 * a\<^sub>0 * t\<^sup>2\ Assumptions in this model are : \<^item> Both vehicles are assumed to be point mass. The exact location of the ego vehicle is the front-most occupancy of the ego vehicle. Similarly for the other vehicle, its exact location is the rearmost occupancy of the other vehicle. \<^item> Both cars can never drive backward. \ lemmas [simp del] = div_mult_self1 div_mult_self2 div_mult_self3 div_mult_self4 subsection \Quadratic Equations\ lemma discriminant: "a * x\<^sup>2 + b * x + c = (0::real) \ 0 \ b\<^sup>2 - 4 * a * c" by (sos "(((A<0 * R<1) + (R<1 * (R<1 * [2*a*x + b]^2))))") lemma quadratic_eq_factoring: assumes D : "D = b\<^sup>2 - 4 * a * c" assumes nn: "0 \ D" assumes x1: "x\<^sub>1 = (-b + sqrt D) / (2 * a)" assumes x2: "x\<^sub>2 = (-b - sqrt D) / (2 * a)" assumes a : "a \ 0" shows "a * x\<^sup>2 + b * x + c = a * (x - x\<^sub>1) * (x - x\<^sub>2)" using nn by (simp add: D x1 x2) (simp add: assms power2_eq_square power3_eq_cube field_split_simps) lemma quadratic_eq_zeroes_iff: assumes D : "D = b\<^sup>2 - 4 * a * c" assumes x1: "x\<^sub>1 = (-b + sqrt D) / (2 * a)" assumes x2: "x\<^sub>2 = (-b - sqrt D) / (2 * a)" assumes a : "a \ 0" shows "a * x\<^sup>2 + b * x + c = 0 \ (D \ 0 \ (x = x\<^sub>1 \ x = x\<^sub>2))" (is "?z \ _") using quadratic_eq_factoring[OF D _ x1 x2 a, of x] discriminant[of a x b c] a by (auto simp: D) subsection \Convexity Condition\ lemma p_convex: fixes a b c x y z :: real assumes p_def: "p = (\x. a * x\<^sup>2 + b * x + c)" assumes less : "x < y" "y < z" and ge : "p x > p y" "p y \ p z" shows "a > 0" using less ge unfolding p_def by (sos "((((A<0 * (A<1 * A<2)) * R<1) + (((A<2 * R<1) * (R<1/4 * [y + ~1*z]^2)) + (((A<=1 * R<1) * (R<1 * [x + ~1*y]^2)) + (((A<=1 * (A<0 * (A<1 * R<1))) * (R<1/4 * [1]^2)) + (((A<=0 * R<1) * (R<1/4 * [~1*y^2 + x*y + ~1*x*z + y*z]^2)) + ((A<=0 * (A<0 * (A<1 * R<1))) * (R<1 * [x + ~1/2*y + ~1/2*z]^2))))))))") definition root_in :: "real \ real \ (real \ real) \ bool" where "root_in m M f = (\x\{m .. M}. f x = 0)" definition quadroot_in :: "real \ real \ real \ real \ real \ bool" where "quadroot_in m M a b c = root_in m M (\x. a * x^2 + b * x + c)" lemma card_iff_exists: "0 < card X \ finite X \ (\x. x \ X)" by (auto simp: card_gt_0_iff) lemma quadroot_in_sturm[code]: "quadroot_in m M a b c \ (a = 0 \ b = 0 \ c = 0 \ m \ M) \ (m \ M \ poly [:c, b, a:] m = 0) \ count_roots_between [:c, b, a:] m M > 0" apply (cases "a = 0 \ b = 0 \ c = 0 \ m \ M") apply (force simp: quadroot_in_def root_in_def) apply (cases "m \ M \ poly [:c, b, a:] m = 0") apply (force simp: quadroot_in_def root_in_def algebra_simps power2_eq_square count_roots_between_correct card_iff_exists) proof - assume H: "\ (a = 0 \ b = 0 \ c = 0 \ m \ M)" "\ (m \ M \ poly [:c, b, a:] m = 0)" hence "poly [:c, b, a:] m \ 0 \ m > M" by auto then have "quadroot_in m M a b c \ 0 < count_roots_between [:c, b, a:] m M" proof (rule disjE) assume pnz: "poly [:c, b, a:] m \ 0" then have nz: "[:c, b, a:] \ 0" by auto show ?thesis unfolding count_roots_between_correct card_iff_exists apply safe apply (rule finite_subset[where B="{x. poly [:c, b, a:] x = 0}"]) apply force apply (rule poly_roots_finite) apply (rule nz) using pnz apply (auto simp add: count_roots_between_correct quadroot_in_def root_in_def card_iff_exists algebra_simps power2_eq_square) apply (case_tac "x = m") apply (force simp: algebra_simps) apply force done qed (auto simp: quadroot_in_def count_roots_between_correct root_in_def card_eq_0_iff) then show "quadroot_in m M a b c = (a = 0 \ b = 0 \ c = 0 \ m \ M \ m \ M \ poly [:c, b, a:] m = 0 \ 0 < count_roots_between [:c, b, a:] m M)" using H by metis qed lemma check_quadroot_linear: fixes a b c :: real assumes "a = 0" shows "\ quadroot_in m M a b c \ ((b = 0 \ c = 0 \ M < m) \ (b = 0 \ c \ 0) \ (b \ 0 \ (let x = - c / b in m > x \ x > M)))" proof - have "quadroot_in m M a b c \ (b = 0 \ quadroot_in m M a b c) \ (b \ 0 \ quadroot_in m M a b c)" by auto also have "(b = 0 \ quadroot_in m M a b c) \ ((b = 0 \ c = 0 \ m \ M) \ (b \ 0 \ c = 0))" by (auto simp: quadroot_in_def Let_def root_in_def assms field_split_simps intro!: bexI[where x="-c / b"]) also have "(b \ 0 \ quadroot_in m M a b c) \ (b = 0 \ (let x = -c / b in m \ x \ x \ M))" apply (auto simp: quadroot_in_def Let_def root_in_def assms field_split_simps intro!: bexI[where x="-c / b"]) - by (metis mult.commute mult_le_cancel_left_neg add_eq_0_iff real_mult_le_cancel_iff2)+ + by (metis mult.commute mult_le_cancel_left_neg add_eq_0_iff mult_le_cancel_iff2)+ finally show ?thesis by (simp add: Let_def not_less not_le) qed lemma check_quadroot_nonlinear: assumes "a \ 0" shows "quadroot_in m M a b c = (let D = b^2 - 4 * a * c in D \ 0 \ ((let x = (-b + sqrt D)/(2*a) in m \ x \ x \ M) \ (let x = (-b - sqrt D)/(2*a) in m \ x \ x \ M)))" by (auto simp: quadroot_in_def Let_def root_in_def quadratic_eq_zeroes_iff[OF refl refl refl assms]) lemma ncheck_quadroot: shows "\quadroot_in m M a b c \ (a = 0 \\quadroot_in m M a b c) \ (a = 0 \ \quadroot_in m M a b c)" by auto subsection \Movement\ locale movement = fixes a v s0 :: real begin text \ Function to compute the distance using the equation \s(t) = s\<^sub>0 + v\<^sub>0 * t + 1 / 2 * a\<^sub>0 * t\<^sup>2\ Input parameters: \<^item> \s\<^sub>0\: initial distance \<^item> \v\<^sub>0\: initial velocity (positive means forward direction and the converse is true) \<^item> \a\: acceleration (positive for increasing and negative for decreasing) \<^item> \t\: time For the time \t < 0\, we assume the output of the function is \s\<^sub>0\. Otherwise, the output is calculated according to the equation above. \ subsubsection \Continuous Dynamics\ definition p :: "real \ real" where "p t = s0 + v * t + 1/2 * a * t\<^sup>2" lemma p_all_zeroes: assumes D: "D = v\<^sup>2 - 2 * a * s0" shows "p t = 0 \ ((a \ 0 \ 0 \ D \ ((t = (- v + sqrt D) / a) \ t = (- v - sqrt D) / a)) \ (a = 0 \ v = 0 \ s0 = 0) \ (a = 0 \ v \ 0 \ t = (- s0 / v)))" using quadratic_eq_zeroes_iff[OF refl refl refl, of "a / 2" t v s0] by (auto simp: movement.p_def D power2_eq_square field_split_simps) lemma p_zero[simp]: "p 0 = s0" by (simp add: p_def) lemma p_continuous[continuous_intros]: "continuous_on T p" unfolding p_def by (auto intro!: continuous_intros) lemma isCont_p[continuous_intros]: "isCont p x" using p_continuous[of UNIV] by (auto simp: continuous_on_eq_continuous_at) definition p' :: "real \ real" where "p' t = v + a * t" lemma p'_zero: "p' 0 = v" by (simp add: p'_def) lemma p_has_vector_derivative[derivative_intros]: "(p has_vector_derivative p' t) (at t within s)" by (auto simp: p_def[abs_def] p'_def has_vector_derivative_def algebra_simps intro!: derivative_eq_intros) lemma p_has_real_derivative[derivative_intros]: "(p has_real_derivative p' t) (at t within s)" using p_has_vector_derivative by (simp add: has_field_derivative_iff_has_vector_derivative) definition p'' :: "real \ real" where "p'' t = a" lemma p'_has_vector_derivative[derivative_intros]: "(p' has_vector_derivative p'' t) (at t within s)" by (auto simp: p'_def[abs_def] p''_def has_vector_derivative_def algebra_simps intro!: derivative_eq_intros) lemma p'_has_real_derivative[derivative_intros]: "(p' has_real_derivative p'' t) (at t within s)" using p'_has_vector_derivative by (simp add: has_field_derivative_iff_has_vector_derivative) definition t_stop :: real where "t_stop = - v / a" lemma p'_stop_zero: "p' t_stop = (if a = 0 then v else 0)" by (auto simp: p'_def t_stop_def) lemma p'_pos_iff: "p' x > 0 \ (if a > 0 then x > -v / a else if a < 0 then x < -v / a else v > 0)" by (auto simp: p'_def field_split_simps) lemma le_t_stop_iff: "a \ 0 \ x \ t_stop \ (if a < 0 then p' x \ 0 else p' x \ 0)" by (auto simp: p'_def field_split_simps t_stop_def) lemma p'_continuous[continuous_intros]: "continuous_on T p'" unfolding p'_def by (auto intro: continuous_intros) lemma isCont_p'[continuous_intros]: "isCont p' x" using p'_continuous[of UNIV] by (auto simp: continuous_on_eq_continuous_at) definition p_max :: real where "p_max = p t_stop" lemmas p_t_stop = p_max_def[symmetric] lemma p_max_eq: "p_max = s0 - v\<^sup>2 / a / 2" by (auto simp: p_max_def p_def t_stop_def field_split_simps power2_eq_square) subsubsection \Hybrid Dynamics\ definition s :: "real \ real" where "s t = ( if t \ 0 then s0 else if t \ t_stop then p t else p_max)" definition q :: "real \ real" where "q t = s0 + v * t" definition q' :: "real \ real" where "q' t = v" lemma init_q: "q 0 = s0" unfolding q_def by auto lemma q_continuous[continuous_intros]: "continuous_on T q" unfolding q_def by (auto intro: continuous_intros) lemma isCont_q[continuous_intros]: "isCont q x" using q_continuous[of UNIV] by (auto simp:continuous_on_eq_continuous_at) lemma q_has_vector_derivative[derivative_intros]: "(q has_vector_derivative q' t) (at t within u)" by (auto simp: q_def[abs_def] q'_def has_vector_derivative_def algebra_simps intro!: derivative_eq_intros) lemma q_has_real_derivative[derivative_intros]: "(q has_real_derivative q' t) (at t within u)" using q_has_vector_derivative by (simp add:has_field_derivative_iff_has_vector_derivative) lemma s_cond_def: "t \ 0 \ s t = s0" "0 \ t \ t \ t_stop \ s t = p t" by (simp_all add: s_def) end locale braking_movement = movement + assumes decel: "a < 0" assumes nonneg_vel: "v \ 0" begin lemma t_stop_nonneg: "0 \ t_stop" using decel nonneg_vel by (auto simp: t_stop_def divide_simps) lemma t_stop_pos: assumes "v \ 0" shows "0 < t_stop" using decel nonneg_vel assms by (auto simp: t_stop_def divide_simps) lemma t_stop_zero: assumes "t_stop = 0" shows "v = 0" using assms decel by (auto simp: t_stop_def) lemma t_stop_zero_not_moving: "t_stop = 0 \ q t = s0" unfolding q_def using t_stop_zero by auto abbreviation "s_stop \ s t_stop" lemma s_t_stop: "s_stop = p_max" using t_stop_nonneg by (auto simp: s_def t_stop_def p_max_def p_def) lemma s0_le_s_stop: "s0 \ s_stop" proof (rule subst[where t="s_stop" and s="p_max"]) show "p_max = s_stop" by (rule sym[OF s_t_stop]) next show "s0 \ p_max" proof (rule subst[where t="p_max" and s="s0 - v\<^sup>2 / a / 2"]) show " s0 - v\<^sup>2 / a / 2 = p_max" using p_max_eq by auto next have "0 \ - v\<^sup>2 / a / 2" using decel zero_le_square[of v] proof - have f1: "a \ 0" using \a < 0\ by linarith have "(- 1 * v\<^sup>2 \ 0) = (0 \ v\<^sup>2)" by auto then have "0 \ - 1 * v\<^sup>2 / a" using f1 by (meson zero_le_divide_iff zero_le_power2) then show ?thesis by force qed thus "s0 \ s0 - v\<^sup>2 / a / 2" by auto qed qed lemma p_mono: "x \ y \ y \ t_stop \ p x \ p y" using decel proof - assume "x \ y" and "y \ t_stop" and "a < 0" hence "x + y \ - 2 * v / a" unfolding t_stop_def by auto hence "-1 / 2 * a * (x + y) \ v" (is "?lhs0 \ ?rhs0") using `a < 0` by (auto simp add: field_simps) hence "?lhs0 * (x- y) \ ?rhs0 * (x - y)" using `x \ y` by sos hence "v * x + 1 / 2 * a * x\<^sup>2 \ v * y + 1 / 2 * a * y\<^sup>2" by (auto simp add: field_simps power_def) thus " p x \ p y" unfolding p_max_def p_def t_stop_def by auto qed lemma p_antimono: "x \ y \ t_stop \ x \ p y \ p x" using decel proof - assume "x \ y" and "t_stop \ x" and "a < 0" hence "- 2 * v / a \ x + y" unfolding t_stop_def by auto hence "v \ - 1/ 2 * a * (x + y)" using `a < 0` by (auto simp add: field_simps) hence "v * (x - y) \ -1/2 * a * (x\<^sup>2 - y\<^sup>2) " using `x \ y` by sos hence "v * y + 1/2 * a * y\<^sup>2 \ v * x + 1/2 * a * x\<^sup>2" by (auto simp add: field_simps) thus "p y \ p x" unfolding p_max_def p_def t_stop_def by auto qed lemma p_strict_mono: "x < y \ y \ t_stop \ p x < p y" using decel proof - assume "x < y" and "y \ t_stop" and "a < 0" hence "x + y < - 2 * v / a" unfolding t_stop_def by auto hence "-1 / 2 * a * (x + y) < v" (is "?lhs0 < ?rhs0") using `a < 0` by (auto simp add: field_simps) hence "?lhs0 * (x- y) > ?rhs0 * (x - y)" using `x < y` by sos hence "v * x + 1 / 2 * a * x\<^sup>2 < v * y + 1 / 2 * a * y\<^sup>2" by (auto simp add: field_simps power_def) thus " p x < p y" unfolding p_max_def p_def t_stop_def by auto qed lemma p_strict_antimono: "x < y \ t_stop \ x\ p y < p x" using decel proof - assume "x < y" and "t_stop \ x" and "a < 0" hence "- 2 * v / a < x + y" unfolding t_stop_def by auto hence "v < - 1/ 2 * a * (x + y)" using `a < 0` by (auto simp add: field_simps) hence "v * (x - y) > -1/2 * a * (x\<^sup>2 - y\<^sup>2) " using `x < y` by sos hence "v * y + 1/2 * a * y\<^sup>2 < v * x + 1/2 * a * x\<^sup>2" by (auto simp add: field_simps) thus "p y < p x" unfolding p_max_def p_def t_stop_def by auto qed lemma p_max: "p x \ p_max" unfolding p_max_def by (cases "x \ t_stop") (auto intro: p_mono p_antimono) lemma continuous_on_s[continuous_intros]: "continuous_on T s" unfolding s_def[abs_def] using t_stop_nonneg by (intro continuous_on_subset[where t=T and s = "{.. 0}\({0 .. t_stop} \ {t_stop ..})"] continuous_on_If) (auto simp: p_continuous p_max_def antisym_conv[where x=0]) lemma isCont_s[continuous_intros]: "isCont s x" using continuous_on_s[of UNIV] by (auto simp: continuous_on_eq_continuous_at) definition s' :: "real \ real" where "s' t = (if t \ t_stop then p' t else 0)" lemma s_has_real_derivative: assumes "t \ 0" "v / a \ 0" "a \ 0" shows "(s has_real_derivative s' t) (at t within {0..})" proof - from assms have *: "t \ t_stop \ t \ {0 .. t_stop}" by simp from assms have "0 \ t_stop" by (auto simp: t_stop_def) have "((\t. if t \ {0 .. t_stop} then p t else p_max) has_real_derivative (if t \ {0..t_stop} then p' t else 0)) (at t within {0..})" unfolding s_def[abs_def] s'_def has_field_derivative_iff_has_vector_derivative apply (rule has_vector_derivative_If_within_closures[where T = "{t_stop ..}"]) using \0 \ t_stop\ \a \ 0\ by (auto simp: assms p'_stop_zero p_t_stop max_def insert_absorb intro!: p_has_vector_derivative) from _ _ this show ?thesis unfolding has_vector_derivative_def has_field_derivative_iff_has_vector_derivative s'_def s_def[abs_def] * by (rule has_derivative_transform) (auto simp: assms s_def p_max_def t_stop_def) qed lemma s_has_vector_derivative[derivative_intros]: assumes "t \ 0" "v / a \ 0" "a \ 0" shows "(s has_vector_derivative s' t) (at t within {0..})" using s_has_real_derivative[OF assms] by (simp add: has_field_derivative_iff_has_vector_derivative) lemma s_has_field_derivative[derivative_intros]: assumes "t \ 0" "v / a \ 0" "a \ 0" shows "(s has_field_derivative s' t) (at t within {0..})" using s_has_vector_derivative[OF assms] by(simp add: has_field_derivative_iff_has_vector_derivative) lemma s_has_real_derivative_at: assumes "0 < x" "0 \ v" "a < 0" shows "(s has_real_derivative s' x) (at x)" proof - from assms have "(s has_real_derivative s' x) (at x within {0 ..})" by (intro s_has_real_derivative) (auto intro!: divide_nonneg_nonpos) then have "(s has_real_derivative s' x) (at x within {0<..})" by (rule DERIV_subset) auto then show "(s has_real_derivative s' x) (at x)" using assms by (subst (asm) at_within_open) auto qed lemma s_delayed_has_field_derivative[derivative_intros]: assumes "\ < t" "0 \ v" "a < 0" shows "((\x. s (x - \)) has_field_derivative s' (t - \)) (at t within {\<..})" proof - from assms have "((\x. s (x + - \)) has_real_derivative s' (t - \)) (at t)" using DERIV_shift[of "s" "(s' (t - \))" t "-\"] s_has_real_derivative_at by auto thus "((\x. s (x - \)) has_field_derivative s' (t - \)) (at t within {\<..})" using has_field_derivative_at_within by auto qed lemma s_delayed_has_vector_derivative[derivative_intros]: assumes "\ < t" "0 \ v" "a < 0" shows "((\x. s (x - \)) has_vector_derivative s' (t - \)) (at t within {\<..})" using s_delayed_has_field_derivative[OF assms] by(simp add:has_field_derivative_iff_has_vector_derivative) lemma s'_nonneg: "0 \ v \ a \ 0 \ 0 \ s' x" by (auto simp: s'_def p'_def t_stop_def field_split_simps) lemma s'_pos: "0 \ x \ x < t_stop \ 0 \ v \ a \ 0 \ 0 < s' x" by (intro le_neq_trans s'_nonneg) (auto simp: s'_def p'_def t_stop_def field_split_simps) subsubsection \Monotonicity of Movement\ lemma s_mono: assumes "t \ u" "u \ 0" shows "s t \ s u" using p_mono[of u t] assms p_max[of u] by (auto simp: s_def) lemma s_strict_mono: assumes "u < t" "t \ t_stop" "u \ 0" shows "s u < s t" using p_strict_mono[of u t] assms p_max[of u] by (auto simp: s_def) lemma s_antimono: assumes "x \ y" assumes "t_stop \ x" shows "s y \ s x" proof - from assms have "t_stop \ y" by auto hence "s y \ p_max" unfolding s_def p_max_eq using p_max_def p_max_eq s0_le_s_stop s_t_stop by auto also have "... \ s x" using \t_stop \ x\ s_mono s_t_stop t_stop_nonneg by fastforce ultimately show "s y \ s x" by auto qed lemma init_s : "t \ 0 \ s t = s0" using decel nonneg_vel by (simp add: divide_simps s_def) lemma q_min: "0 \ t \ s0 \ q t" unfolding q_def using nonneg_vel by auto lemma q_mono: "x \ y \ q x \ q y" unfolding q_def using nonneg_vel by (auto simp: mult_left_mono) subsubsection \Maximum at Stopping Time\ lemma s_max: "s x \ s_stop" using p_max[of x] p_max[of 0] unfolding s_t_stop by (auto simp: s_def) lemma s_eq_s_stop: "NO_MATCH t_stop x \ x \ t_stop \ s x = s_stop" using t_stop_nonneg by (auto simp: s_def p_max_def) end subsection \Safe Distance\ locale safe_distance = fixes a\<^sub>e v\<^sub>e s\<^sub>e :: real fixes a\<^sub>o v\<^sub>o s\<^sub>o :: real assumes nonneg_vel_ego : "0 \ v\<^sub>e" assumes nonneg_vel_other : "0 \ v\<^sub>o" assumes decelerate_ego : "a\<^sub>e < 0" assumes decelerate_other : "a\<^sub>o < 0" assumes in_front : "s\<^sub>e < s\<^sub>o" begin lemmas hyps = nonneg_vel_ego nonneg_vel_other decelerate_ego decelerate_other in_front sublocale ego: braking_movement a\<^sub>e v\<^sub>e s\<^sub>e by (unfold_locales; rule hyps) sublocale other: braking_movement a\<^sub>o v\<^sub>o s\<^sub>o by (unfold_locales; rule hyps) sublocale ego_other: movement "a\<^sub>o - a\<^sub>e" "v\<^sub>o - v\<^sub>e" "s\<^sub>o - s\<^sub>e" by unfold_locales subsubsection \Collision\ definition collision :: "real set \ bool" where "collision time_set \ (\t \ time_set. ego.s t = other.s t )" abbreviation no_collision :: "real set \ bool" where "no_collision time_set \ \ collision time_set" lemma no_collision_initially : "no_collision {.. 0}" using decelerate_ego nonneg_vel_ego using decelerate_other nonneg_vel_other in_front by (auto simp: divide_simps collision_def ego.s_def other.s_def) lemma no_collisionI: "(\t. t \ S \ ego.s t \ other.s t) \ no_collision S" unfolding collision_def by blast theorem cond_1: "ego.s_stop < s\<^sub>o \ no_collision {0..}" proof (rule no_collisionI, simp) fix t::real assume "t \ 0" have "ego.s t \ ego.s_stop" by (rule ego.s_max) also assume "\ < s\<^sub>o" also have "\ = other.s 0" by (simp add: other.init_s) also have "\ \ other.s t" using \0 \ t\ hyps by (intro other.s_mono) auto finally show "ego.s t \ other.s t" by simp qed lemma ego_other_strict_ivt: assumes "ego.s t > other.s t" shows "collision {0 ..< t}" proof cases have [simp]: "s\<^sub>e < s\<^sub>o \ ego.s 0 \ other.s 0" by (simp add: movement.s_def) assume "0 \ t" with assms in_front have "\x\0. x \ t \ other.s x - ego.s x = 0" by (intro IVT2, auto simp: continuous_diff other.isCont_s ego.isCont_s) then show ?thesis using assms by (auto simp add: algebra_simps collision_def Bex_def order.order_iff_strict) qed (insert assms hyps, auto simp: collision_def ego.init_s other.init_s intro!: bexI[where x=0]) lemma collision_subset: "collision s \ s \ t \ collision t" by (auto simp: collision_def) lemma ego_other_ivt: assumes "ego.s t \ other.s t" shows "collision {0 .. t}" proof cases assume "ego.s t > other.s t" from ego_other_strict_ivt[OF this] show ?thesis by (rule collision_subset) auto qed (insert hyps assms; cases "t \ 0"; force simp: collision_def ego.init_s other.init_s) theorem cond_2: assumes "ego.s_stop \ other.s_stop" shows "collision {0 ..}" using assms apply (intro collision_subset[where t="{0 ..}" and s = "{0 .. max ego.t_stop other.t_stop}"]) apply (intro ego_other_ivt[where t = "max ego.t_stop other.t_stop"]) apply (auto simp: ego.s_eq_s_stop other.s_eq_s_stop) done abbreviation D2 :: "real" where "D2 \ (v\<^sub>o - v\<^sub>e)^2 - 2 * (a\<^sub>o - a\<^sub>e) * (s\<^sub>o - s\<^sub>e)" abbreviation t\<^sub>D' :: "real" where "t\<^sub>D' \ sqrt (2 * (ego.s_stop - other.s_stop) / a\<^sub>o)" lemma pos_via_half_dist: "dist a b < b / 2 \ b > 0 \ a > 0" by (auto simp: dist_real_def abs_real_def split: if_splits) lemma collision_within_p: assumes "s\<^sub>o \ ego.s_stop" "ego.s_stop < other.s_stop" shows "collision {0..} \ (\t\0. ego.p t = other.p t \ t < ego.t_stop \ t < other.t_stop)" proof (auto simp: collision_def, goal_cases) case (2 t) then show ?case by (intro bexI[where x = t]) (auto simp: ego.s_def other.s_def) next case (1 t) then show ?case using assms hyps ego.t_stop_nonneg other.t_stop_nonneg apply (auto simp: ego.s_def other.s_def ego.s_t_stop other.s_t_stop ego.p_t_stop other.p_t_stop not_le split: if_splits) defer proof goal_cases case 1 from 1 have le: "ego.t_stop \ other.t_stop" by auto from 1 have "ego.t_stop < t" by simp from other.s_strict_mono[OF this] 1 have "other.s ego.t_stop < other.s t" by auto also have "\ = ego.s ego.t_stop" using ego.s_t_stop ego.t_stop_nonneg 1 other.s_def by auto finally have "other.s ego.t_stop < ego.s ego.t_stop" . from ego_other_strict_ivt[OF this] le in_front show ?case by (auto simp add: collision_def) (auto simp: movement.s_def split: if_splits) next case 2 from 2 have "other.p_max = ego.p t" by simp also have "\ \ ego.p ego.t_stop" using 2 by (intro ego.p_mono) auto also have "\ = ego.p_max" by (simp add: ego.p_t_stop) also note \\ < other.p_max\ finally show ?case by arith next case 3 thus "\t\0. ego.p t = other.p t \ t < ego.t_stop \ t < other.t_stop" apply (cases "t = other.t_stop") apply (simp add: other.p_t_stop ) apply (metis (no_types) ego.p_max not_le) apply (cases "t = ego.t_stop") apply (simp add: ego.p_t_stop) defer apply force proof goal_cases case (1) let ?d = "\t. other.p' t - ego.p' t" define d' where "d' = ?d ego.t_stop / 2" have d_cont: "isCont ?d ego.t_stop" unfolding ego.t_stop_def other.p'_def ego.p'_def by simp have "?d ego.t_stop > 0" using 1 by (simp add: ego.p'_stop_zero other.p'_pos_iff) (simp add: ego.t_stop_def other.t_stop_def) then have "d' > 0" by (auto simp: d'_def) from d_cont[unfolded continuous_at_eps_delta, THEN spec, rule_format, OF \d' > 0\] obtain e where e: "e > 0" "\x. dist x ego.t_stop < e \ ?d x > 0" unfolding d'_def using \?d ego.t_stop > 0\ pos_via_half_dist by force define t' where "t' = ego.t_stop - min (ego.t_stop / 2) (e / 2)" have "0 < ego.t_stop" using 1 by auto have "other.p t' - ego.p t' < other.p ego.t_stop - ego.p ego.t_stop" apply (rule DERIV_pos_imp_increasing[of t']) apply (force simp: t'_def e min_def \0 < ego.t_stop\) apply (auto intro!: exI[where x = "?d x" for x] intro!: derivative_intros e) using \e > 0\ apply (auto simp: t'_def dist_real_def algebra_simps) done also have "\ = 0" using 1 by (simp add: ego.p_t_stop) finally have less: "other.p t' < ego.p t'" by simp have "t' > 0" using 1 by (auto simp: t'_def algebra_simps min_def) have "t' < ego.t_stop" by (auto simp: t'_def \e > 0\ \ego.t_stop > 0\) from less_le_trans[OF \t' < ego.t_stop\ \ego.t_stop \ other.t_stop\] have "t' < other.t_stop" . from ego_other_strict_ivt[of t'] less have "collision {0..t' > 0\ \t' < ego.t_stop\ \t' < other.t_stop\ by (auto simp: other.s_def ego.s_def split: if_splits) thus ?case using \t' > 0\ \t' < ego.t_stop\ \t' < other.t_stop\ apply (auto simp: collision_def ego.s_def other.s_def movement.p_def split: if_splits) apply (rule_tac x = t in exI) apply (auto simp: movement.p_def)[] done qed qed qed lemma collision_within_eq: assumes "s\<^sub>o \ ego.s_stop" "ego.s_stop < other.s_stop" shows "collision {0..} \ collision {0 ..< min ego.t_stop other.t_stop}" unfolding collision_within_p[OF assms] unfolding collision_def by (safe; force simp: ego.s_def other.s_def movement.p_def ego.t_stop_def other.t_stop_def split: if_splits) lemma collision_excluded: "(\t. t \ T \ ego.s t \ other.s t) \ collision S \ collision (S - T)" by (auto simp: collision_def) lemma collision_within_less: assumes "s\<^sub>o \ ego.s_stop" "ego.s_stop < other.s_stop" shows "collision {0..} \ collision {0 <..< min ego.t_stop other.t_stop}" proof - note collision_within_eq[OF assms] also have "collision {0 ..< min ego.t_stop other.t_stop} \ collision ({0 ..< min ego.t_stop other.t_stop} - {0})" using hyps assms by (intro collision_excluded) (auto simp: ego.s_def other.s_def) also have "{0 ..< min ego.t_stop other.t_stop} - {0} = {0 <..< min ego.t_stop other.t_stop}" by auto finally show ?thesis unfolding collision_def by (safe; force simp: ego.s_def other.s_def movement.p_def ego.t_stop_def other.t_stop_def split: if_splits) qed theorem cond_3: assumes "s\<^sub>o \ ego.s_stop" "ego.s_stop < other.s_stop" shows "collision {0..} \ (a\<^sub>o > a\<^sub>e \ v\<^sub>o < v\<^sub>e \ 0 \ D2 \ sqrt D2 > v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o)" proof - have "v\<^sub>o \ 0" using assms(1) assms(2) movement.s_def movement.t_stop_def by auto with hyps have "v\<^sub>o > 0" by auto note hyps = hyps this define t1 where "t1 = (- (v\<^sub>o - v\<^sub>e) + sqrt D2) / (a\<^sub>o - a\<^sub>e)" define t2 where "t2 = (- (v\<^sub>o - v\<^sub>e) - sqrt D2) / (a\<^sub>o - a\<^sub>e)" define bounded where "bounded \ \t. (0 \ t \ t \ ego.t_stop \ t \ other.t_stop)" have ego_other_conv: "\t. bounded t \ ego.p t = other.p t \ ego_other.p t = 0" by (auto simp: movement.p_def field_split_simps) let ?r = "{0 <..< min ego.t_stop other.t_stop}" have D2: "D2 = (v\<^sub>o - v\<^sub>e)\<^sup>2 - 4 * ((a\<^sub>o - a\<^sub>e) / 2) * (s\<^sub>o - s\<^sub>e)" by simp define D where "D = D2" note D = D_def[symmetric] define x1 where "x1 \ (- (v\<^sub>o - v\<^sub>e) + sqrt D2) / (2 * ((a\<^sub>o - a\<^sub>e) / 2))" define x2 where "x2 \ (- (v\<^sub>o - v\<^sub>e) - sqrt D2) / (2 * ((a\<^sub>o - a\<^sub>e) / 2))" have x2: "x2 =(- (v\<^sub>o - v\<^sub>e) - sqrt D2) / (a\<^sub>o - a\<^sub>e)" by (simp add: x2_def field_split_simps) have x1: "x1 =(- (v\<^sub>o - v\<^sub>e) + sqrt D2) / (a\<^sub>o - a\<^sub>e)" by (simp add: x1_def field_split_simps) from collision_within_less[OF assms] have coll_eq: "collision {0..} = collision ?r" by (auto simp add: bounded_def) also have "\ \ (a\<^sub>o > a\<^sub>e \ v\<^sub>o < v\<^sub>e \ 0 \ D2 \ sqrt D2 > v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o)" proof safe assume H: "a\<^sub>e < a\<^sub>o" "v\<^sub>o < v\<^sub>e" "0 \ D2" assume sqrt: "sqrt D2 > v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o" have nz: "(a\<^sub>o - a\<^sub>e) / 2 \ 0" using \a\<^sub>e < a\<^sub>o\ by simp note sol = quadratic_eq_zeroes_iff[OF D2 x1_def[THEN meta_eq_to_obj_eq] x2_def[THEN meta_eq_to_obj_eq] nz] from sol[of x2] \0 \ D2\ have "other.p x2 = ego.p x2" by (auto simp: ego.p_def other.p_def field_split_simps) moreover have "x2 > 0" proof (rule ccontr) assume "\ 0 < x2" then have "ego_other.p x2 \ ego_other.p 0" using H hyps by (intro DERIV_nonpos_imp_nonincreasing[of x2]) (auto intro!: exI[where x="ego_other.p' x" for x] derivative_eq_intros simp: ego_other.p'_def add_nonpos_nonpos mult_nonneg_nonpos) also have "ego_other.p 0 > 0" using hyps by (simp add: ego_other.p_def) finally (xtrans) show False using \other.p x2 = ego.p x2\ by (simp add: movement.p_def field_split_simps power2_eq_square) qed moreover have "x2 < other.t_stop" using sqrt H hyps by (auto simp: x2 other.t_stop_def field_split_simps power2_eq_square) ultimately show "collision {0<.. \ ego.p ego.t_stop" by (auto intro!: ego.p_antimono) also have "\ = ego.s x2" using 2 by (auto simp: ego.s_def ego.p_t_stop) finally have "other.s x2 \ ego.s x2" . from ego_other_ivt[OF this] show ?thesis unfolding coll_eq[symmetric] by (rule collision_subset) auto qed (auto simp: collision_def ego.s_def other.s_def not_le intro!: bexI[where x=x2]) next let ?max = "max ego.t_stop other.t_stop" let ?min = "min ego.t_stop other.t_stop" assume "collision ?r" then obtain t where t: "ego.p t = other.p t" "0 < t" "t < ?min" by (auto simp: collision_def ego.s_def other.s_def) then have "t < - (v\<^sub>e / a\<^sub>e)" "t < - (v\<^sub>o / a\<^sub>o)" "t < other.t_stop" by (simp_all add: ego.t_stop_def other.t_stop_def) from t have "ego_other.p t = 0" by (auto simp: movement.p_def field_split_simps) from t have "t < ?max" by auto from hyps assms have "0 < ego_other.p 0" by simp from ego_other.p_def[abs_def, THEN meta_eq_to_obj_eq] have eop_eq: "ego_other.p = (\t. 1 / 2 * (a\<^sub>o - a\<^sub>e) * t\<^sup>2 + (v\<^sub>o - v\<^sub>e) * t + (s\<^sub>o - s\<^sub>e))" by (simp add: algebra_simps) show "a\<^sub>o > a\<^sub>e" proof - have "ego.p other.t_stop \ ego.p_max" by (rule ego.p_max) also have "... \ other.p other.t_stop" using hyps assms by (auto simp:other.s_def ego.s_def ego.p_t_stop split:if_splits) finally have "0 \ ego_other.p other.t_stop" by (auto simp add:movement.p_def field_simps) from p_convex[OF eop_eq, of 0 t other.t_stop, simplified \ego_other.p t = 0\, OF \0 < t\ \t < other.t_stop\ \0 < ego_other.p 0\ \0 \ ego_other.p other.t_stop\] show "a\<^sub>o > a\<^sub>e" by (simp add: algebra_simps) qed have rewr: "4 * ((a\<^sub>o - a\<^sub>e) / 2) = 2 * (a\<^sub>o - a\<^sub>e)" by simp from \a\<^sub>o > a\<^sub>e\ \ego_other.p t = 0\ ego_other.p_all_zeroes[OF D2[symmetric], of t] have "0 \ D2" and disj: "(t = (- (v\<^sub>o - v\<^sub>e) + sqrt D2) / (a\<^sub>o - a\<^sub>e) \ t = (- (v\<^sub>o - v\<^sub>e) - sqrt D2) / (a\<^sub>o - a\<^sub>e))" using hyps assms unfolding rewr by simp_all show "0 \ D2" by fact from add_strict_mono[OF \t < - (v\<^sub>e / a\<^sub>e)\ \t < - (v\<^sub>o / a\<^sub>o)\] `0 < t` \a\<^sub>o > a\<^sub>e\ have "0 < - (v\<^sub>e / a\<^sub>e) + - (v\<^sub>o / a\<^sub>o)" by (simp add: divide_simps) then have "0 > v\<^sub>e * a\<^sub>o + a\<^sub>e * v\<^sub>o" using hyps by (simp add: field_split_simps split: if_splits) show "v\<^sub>o < v\<^sub>e" using `a\<^sub>e < a\<^sub>o` `movement.p (a\<^sub>o - a\<^sub>e) (v\<^sub>o - v\<^sub>e) (s\<^sub>o - s\<^sub>e) t = 0` in_front t(2) apply (auto simp: movement.p_def divide_less_0_iff algebra_simps power2_eq_square) by (smt divide_less_0_iff mult_le_cancel_right mult_mono mult_nonneg_nonneg nonneg_vel_ego) from disj have "x2 < ?min" proof rule assume "t = (- (v\<^sub>o - v\<^sub>e) - sqrt D2) / (a\<^sub>o - a\<^sub>e)" then show ?thesis using \t < ?min\ by (simp add: x2) next assume "t = (- (v\<^sub>o - v\<^sub>e) + sqrt D2) / (a\<^sub>o - a\<^sub>e)" also have "\ \ x2" unfolding x2 apply (rule divide_right_mono) apply (subst (2) diff_conv_add_uminus) apply (rule add_left_mono) using \a\<^sub>o > a\<^sub>e\ \D2 \ 0\ by auto also (xtrans) note \t < ?min\ finally show ?thesis . qed then show "sqrt D2 > v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o" using hyps \a\<^sub>o > a\<^sub>e\ by (auto simp: x2 field_split_simps other.t_stop_def) qed finally show ?thesis . qed subsubsection \Formalising Safe Distance\ text \First definition for Safe Distance based on \cond_1\.\ definition absolute_safe_distance :: real where "absolute_safe_distance = - v\<^sub>e\<^sup>2 / (2 * a\<^sub>e)" lemma absolute_safe_distance: assumes "s\<^sub>o - s\<^sub>e > absolute_safe_distance" shows "no_collision {0..}" proof - from assms hyps absolute_safe_distance_def have "ego.s_stop < s\<^sub>o" by (auto simp add:ego.s_def ego.p_def ego.t_stop_def power_def) thus ?thesis by (rule cond_1) qed text \First Fallback for Safe Distance.\ definition fst_safe_distance :: real where "fst_safe_distance = v\<^sub>o\<^sup>2 / (2 * a\<^sub>o) - v\<^sub>e\<^sup>2 / (2 * a\<^sub>e)" definition distance_leq_d2 :: real where "distance_leq_d2 = (a\<^sub>e + a\<^sub>o) / (2 * a\<^sub>o\<^sup>2) * v\<^sub>o\<^sup>2 - v\<^sub>o * v\<^sub>e / a\<^sub>o" lemma snd_leq_fst_exp: "distance_leq_d2 \ fst_safe_distance" proof - have "0 \ (other.t_stop - ego.t_stop)\<^sup>2" by auto hence "- ego.t_stop\<^sup>2 \ other.t_stop\<^sup>2 - 2 * other.t_stop * ego.t_stop" by (simp add:power_def algebra_simps) with hyps(3) have "- ego.t_stop\<^sup>2 * (a\<^sub>e / 2) \ (other.t_stop\<^sup>2 - 2 * other.t_stop * ego.t_stop) * (a\<^sub>e / 2)" by (smt half_gt_zero_iff mult_le_cancel_right) with ego.t_stop_def other.t_stop_def hyps have "- v\<^sub>e\<^sup>2 / (2 * a\<^sub>e) \ a\<^sub>e * v\<^sub>o\<^sup>2 / (2 * a\<^sub>o\<^sup>2) - v\<^sub>o * v\<^sub>e / a\<^sub>o" by (simp add:power_def algebra_simps) with fst_safe_distance_def distance_leq_d2_def have 1: "fst_safe_distance \ a\<^sub>e * v\<^sub>o\<^sup>2 / (2 * a\<^sub>o\<^sup>2) - v\<^sub>o * v\<^sub>e / a\<^sub>o + v\<^sub>o\<^sup>2 / (2 * a\<^sub>o)" by (auto simp add:algebra_simps) have "a\<^sub>e * v\<^sub>o\<^sup>2 / (2 * a\<^sub>o\<^sup>2) - v\<^sub>o * v\<^sub>e / a\<^sub>o + v\<^sub>o\<^sup>2 / (2 * a\<^sub>o) = distance_leq_d2" (is "?LHS = _") proof - have "?LHS = a\<^sub>e * v\<^sub>o\<^sup>2 / (2 * a\<^sub>o\<^sup>2) - v\<^sub>o * v\<^sub>e / a\<^sub>o + a\<^sub>o * v\<^sub>o\<^sup>2 / (2 * a\<^sub>o\<^sup>2)" by (auto simp add: algebra_simps power_def) also have "... = distance_leq_d2" by (auto simp add: power_def field_split_simps distance_leq_d2_def) finally show ?thesis by auto qed with 1 show ?thesis by auto qed lemma sqrt_D2_leq_stop_time_diff: assumes "a\<^sub>e < a\<^sub>o" assumes "0 \ v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o " assumes "s\<^sub>o - s\<^sub>e \ distance_leq_d2" shows "sqrt D2 \ v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o" proof - from assms have "- 2 * (a\<^sub>o - a\<^sub>e) * (s\<^sub>o - s\<^sub>e) \ - 2 * (a\<^sub>o - a\<^sub>e) * distance_leq_d2" (is "?L \ ?R") by simp hence "D2 \ (v\<^sub>o - v\<^sub>e)\<^sup>2 - 2 * (a\<^sub>o - a\<^sub>e) * distance_leq_d2" by (simp add: algebra_simps) also have "... = (v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o)\<^sup>2" proof - from distance_leq_d2_def have 1: "(v\<^sub>o - v\<^sub>e)\<^sup>2 - 2 * (a\<^sub>o - a\<^sub>e) * distance_leq_d2 = (v\<^sub>o - v\<^sub>e)\<^sup>2 - (a\<^sub>o - a\<^sub>e) * (a\<^sub>e + a\<^sub>o) / a\<^sub>o\<^sup>2 * v\<^sub>o\<^sup>2 + 2 * (a\<^sub>o - a\<^sub>e) * v\<^sub>o * v\<^sub>e / a\<^sub>o" by (auto simp add: field_split_simps) with hyps(4) have "... = (v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o)\<^sup>2" by (auto simp add: power_def field_split_simps) with 1 show ?thesis by auto qed finally show ?thesis by (smt assms(2) real_le_lsqrt real_sqrt_le_0_iff) qed lemma cond2_imp_pos_vo: assumes "s\<^sub>o \ ego.s_stop" "ego.s_stop < other.s_stop" shows "v\<^sub>o \ 0" proof (rule ccontr) assume "\ v\<^sub>o \ 0" with other.s_def other.t_stop_def have "other.s_stop = s\<^sub>o" by auto with assms(2) have "ego.s_stop < s\<^sub>o" by auto with assms(1) show "False" by auto qed lemma cond2_imp_gt_fst_sd: assumes "s\<^sub>o \ ego.s_stop" "ego.s_stop < other.s_stop" shows "fst_safe_distance < s\<^sub>o - s\<^sub>e" proof (cases "v\<^sub>e \ 0") case True from fst_safe_distance_def assms ego.s_def ego.t_stop_pos[OF \v\<^sub>e \ 0\] ego.p_def ego.t_stop_def other.s_def other.t_stop_pos[OF cond2_imp_pos_vo[OF assms]] other.p_def other.t_stop_def hyps show ?thesis by (simp add: power_def algebra_simps) next case False with fst_safe_distance_def have "fst_safe_distance = v\<^sub>o\<^sup>2 / (2 * a\<^sub>o)" by auto also have "... \ 0" by (simp add: divide_nonneg_neg hyps) also have "... < s\<^sub>o - s\<^sub>e" by (simp add: algebra_simps hyps) finally show ?thesis by auto qed text \Second Fallback for Safe Distance.\ definition snd_safe_distance :: real where "snd_safe_distance = (v\<^sub>o - v\<^sub>e)\<^sup>2 / (2 * (a\<^sub>o - a\<^sub>e))" lemma fst_leq_snd_safe_distance: assumes "a\<^sub>e < a\<^sub>o" shows"fst_safe_distance \ snd_safe_distance" proof - have "0 \ (v\<^sub>o / a\<^sub>o - v\<^sub>e / a\<^sub>e)\<^sup>2" by auto hence 1: "0 \ (v\<^sub>o / a\<^sub>o)\<^sup>2 - 2 * v\<^sub>o * v\<^sub>e / (a\<^sub>o * a\<^sub>e) + (v\<^sub>e / a\<^sub>e)\<^sup>2" by (auto simp add: power_def algebra_simps) from hyps have "0 \ a\<^sub>o * a\<^sub>e" by (simp add: mult_nonpos_nonpos) from mult_right_mono[OF 1 this] hyps have "0 \ v\<^sub>o\<^sup>2 * a\<^sub>e / a\<^sub>o - 2 * v\<^sub>o * v\<^sub>e + v\<^sub>e\<^sup>2 * a\<^sub>o / a\<^sub>e" by (auto simp add: power_def algebra_simps) with hyps have 2: "(v\<^sub>o\<^sup>2 / (2 * a\<^sub>o) - v\<^sub>e\<^sup>2 / (2 * a\<^sub>e)) * (2 * (a\<^sub>o - a\<^sub>e)) \ (v\<^sub>o - v\<^sub>e)\<^sup>2" by (auto simp add: power_def field_split_simps) from assms have "0 \ 2 * (a\<^sub>o - a\<^sub>e)" by auto from divide_right_mono[OF 2 this] assms fst_safe_distance_def snd_safe_distance_def show ?thesis by auto qed lemma snd_safe_distance_iff_nonneg_D2: assumes "a\<^sub>e < a\<^sub>o" shows "s\<^sub>o - s\<^sub>e \ snd_safe_distance \ 0 \ D2" proof - from snd_safe_distance_def assms pos_le_divide_eq[of "2 * (a\<^sub>o - a\<^sub>e)"] have "s\<^sub>o - s\<^sub>e \ snd_safe_distance \ (s\<^sub>o - s\<^sub>e) * (2 * (a\<^sub>o - a\<^sub>e)) \ (v\<^sub>o - v\<^sub>e)\<^sup>2" by auto also have "... \ 0 \ D2" by (auto simp add: algebra_simps) finally show ?thesis by auto qed lemma t_stop_diff_neg_means_leq_D2: assumes "s\<^sub>o \ ego.s_stop" "ego.s_stop < other.s_stop" "a\<^sub>e < a\<^sub>o" "0 \ D2" shows "v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < 0 \ sqrt D2 > v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o" proof assume only_if: "v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < 0" from assms have "... \ sqrt D2" by auto with only_if show "v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < sqrt D2" by linarith next assume if_part: "v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < sqrt D2" from cond2_imp_gt_fst_sd[OF assms(1) assms(2)] snd_leq_fst_exp have "distance_leq_d2 \ s\<^sub>o - s\<^sub>e" by auto from if_part and sqrt_D2_leq_stop_time_diff [OF \a\<^sub>e < a\<^sub>o\ _ \distance_leq_d2 \ s\<^sub>o - s\<^sub>e\] show " v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < 0" by linarith qed theorem cond_3': assumes "s\<^sub>o \ ego.s_stop" "ego.s_stop < other.s_stop" shows "collision {0..} \ (a\<^sub>o > a\<^sub>e \ v\<^sub>o < v\<^sub>e \ s\<^sub>o - s\<^sub>e \ snd_safe_distance \ v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < 0)" proof (cases "a\<^sub>o \ a\<^sub>e \ v\<^sub>o \ v\<^sub>e") case True with cond_3[OF assms] show ?thesis by auto next case False from \\ (a\<^sub>o \ a\<^sub>e \ v\<^sub>e \ v\<^sub>o)\ have "a\<^sub>o > a\<^sub>e" by auto from \\ (a\<^sub>o \ a\<^sub>e \ v\<^sub>e \ v\<^sub>o)\ have "v\<^sub>o < v\<^sub>e" by auto show ?thesis proof - from snd_safe_distance_iff_nonneg_D2 [OF \a\<^sub>o > a\<^sub>e\] have 1: "(a\<^sub>e < a\<^sub>o \ v\<^sub>o < v\<^sub>e \ s\<^sub>o - s\<^sub>e \ snd_safe_distance \ v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < 0) \ (a\<^sub>e < a\<^sub>o \ v\<^sub>o < v\<^sub>e \ 0 \ D2 \ v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < 0)" by auto from t_stop_diff_neg_means_leq_D2[OF assms \a\<^sub>e < a\<^sub>o\] have "... = (a\<^sub>e < a\<^sub>o \ v\<^sub>o < v\<^sub>e \ 0 \ D2 \ sqrt D2 > v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o)" by auto with 1 cond_3[OF assms] show ?thesis by blast qed qed definition d :: "real \ real" where "d t = ( if t \ 0 then s\<^sub>o - s\<^sub>e else if t \ ego.t_stop \ t \ other.t_stop then ego_other.p t else if ego.t_stop \ t \ t \ other.t_stop then other.p t - ego.s_stop else if other.t_stop \ t \ t \ ego.t_stop then other.s_stop - ego.p t else other.s_stop - ego.s_stop )" lemma d_diff: "d t = other.s t - ego.s t" by (auto simp: d_def ego.s_eq_s_stop other.s_eq_s_stop ego.s_cond_def other.s_cond_def movement.p_def field_split_simps) lemma collision_d: "collision S \ (\t\S. d t = 0)" by (force simp: d_diff collision_def ) lemma collision_restrict: "collision {0..} \ collision {0..max ego.t_stop other.t_stop}" by (auto simp: max.coboundedI1 ego.t_stop_nonneg min_def ego.s_eq_s_stop other.s_eq_s_stop collision_def intro!: bexI[where x = "min t (max (movement.t_stop a\<^sub>e v\<^sub>e) (movement.t_stop a\<^sub>o v\<^sub>o))" for t]) lemma collision_union: "collision (A \ B) \ collision A \ collision B" by (auto simp: collision_def) lemma symbolic_checker: "collision {0..} \ (quadroot_in 0 (min ego.t_stop other.t_stop) (1/2 * (a\<^sub>o - a\<^sub>e)) (v\<^sub>o - v\<^sub>e) (s\<^sub>o - s\<^sub>e)) \ (quadroot_in ego.t_stop other.t_stop (1/2 * a\<^sub>o) v\<^sub>o (s\<^sub>o - ego.s_stop)) \ (quadroot_in other.t_stop ego.t_stop (1/2 * a\<^sub>e) v\<^sub>e (s\<^sub>e - other.s_stop))" (is "_ \ ?q1 \ ?q2 \ ?q3") proof - have *: "{0..max ego.t_stop other.t_stop} = {0 .. min ego.t_stop other.t_stop} \ {ego.t_stop .. other.t_stop} \ {other.t_stop .. ego.t_stop}" using ego.t_stop_nonneg other.t_stop_nonneg by auto have "collision {0..min (movement.t_stop a\<^sub>e v\<^sub>e) (movement.t_stop a\<^sub>o v\<^sub>o)} = ?q1" by (force simp: collision_def quadroot_in_def root_in_def d_def power2_eq_square field_split_simps movement.p_def movement.s_cond_def) moreover have "collision {ego.t_stop .. other.t_stop} = ?q2" using ego.t_stop_nonneg by (force simp: collision_def quadroot_in_def root_in_def d_def ego.s_eq_s_stop movement.s_cond_def movement.p_def) moreover have "collision {other.t_stop .. ego.t_stop} = ?q3" using other.t_stop_nonneg by (force simp: collision_def quadroot_in_def root_in_def d_def other.s_eq_s_stop movement.s_cond_def movement.p_def) ultimately show ?thesis unfolding collision_restrict * collision_union by auto qed end subsection \Checker Design\ definition rel_dist_to_stop :: "real \ real \ real" where "rel_dist_to_stop v a \ - v\<^sup>2 / (2 * a)" context includes floatarith_notation begin definition rel_dist_to_stop_expr :: "nat \ nat \ floatarith" where "rel_dist_to_stop_expr v a = Mult (Minus (Power (Var v) 2)) (Inverse (Mult (Num 2) (Var a)))" definition rel_dist_to_stop' :: "nat \ float interval option \ float interval option \ float interval option" where "rel_dist_to_stop' p v a = approx p (rel_dist_to_stop_expr 0 1) [v, a]" lemma rel_dist_to_stop': "interpret_floatarith (rel_dist_to_stop_expr 0 1) [v, a] = rel_dist_to_stop v a" by (simp add: rel_dist_to_stop_def rel_dist_to_stop_expr_def inverse_eq_divide) definition first_safe_dist :: "real \ real \ real" where "first_safe_dist v\<^sub>e a\<^sub>e \ rel_dist_to_stop v\<^sub>e a\<^sub>e" definition second_safe_dist :: "real \ real \ real \ real \ real" where "second_safe_dist v\<^sub>e a\<^sub>e v\<^sub>o a\<^sub>o \ rel_dist_to_stop v\<^sub>e a\<^sub>e - rel_dist_to_stop v\<^sub>o a\<^sub>o" definition second_safe_dist_expr :: "nat \ nat \ nat \ nat \ floatarith" where "second_safe_dist_expr ve ae vo ao = Add (rel_dist_to_stop_expr ve ae) (Minus (rel_dist_to_stop_expr vo ao))" definition second_safe_dist' :: "nat \ float interval option \ float interval option \ float interval option \ float interval option \ float interval option" where "second_safe_dist' p v\<^sub>e a\<^sub>e v\<^sub>o a\<^sub>o = approx p (second_safe_dist_expr 0 1 2 3) [v\<^sub>e, a\<^sub>e, v\<^sub>o, a\<^sub>o]" lemma second_safe_dist': "interpret_floatarith (second_safe_dist_expr 0 1 2 3) [v, a, v', a'] = second_safe_dist v a v' a'" by (simp add: second_safe_dist_def second_safe_dist_expr_def rel_dist_to_stop_def rel_dist_to_stop_expr_def inverse_eq_divide) definition t_stop :: "real \ real \ real" where "t_stop v a \ - v / a" definition t_stop_expr :: "nat \ nat \ floatarith" where "t_stop_expr v a = Minus (Mult (Var v) (Inverse (Var a)))" end definition s_stop :: "real \ real \ real \ real" where "s_stop s v a \ s + rel_dist_to_stop v a" definition discriminant :: "real \ real \ real \ real \ real \ real \ real" where "discriminant s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ (v\<^sub>o - v\<^sub>e)\<^sup>2 - 2 * (a\<^sub>o - a\<^sub>e) * (s\<^sub>o - s\<^sub>e)" definition suff_cond_safe_dist2 :: "real \ real \ real \ real \ real \ real \ bool" where "suff_cond_safe_dist2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ let D2 = discriminant s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o in \ (a\<^sub>e < a\<^sub>o \ v\<^sub>o < v\<^sub>e \ 0 \ D2 \ v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < sqrt D2 )" lemma less_sqrt_iff: "y \ 0 \ x < sqrt y \ (x \ 0 \ x\<^sup>2 < y)" by (smt real_le_lsqrt real_less_rsqrt real_sqrt_ge_zero) lemma suff_cond_safe_dist2_code[code]: "suff_cond_safe_dist2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o = (let D2 = discriminant s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o in (a\<^sub>e < a\<^sub>o \ v\<^sub>o < v\<^sub>e \ 0 \ D2 \ (v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o \ 0 \ (v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o)\<^sup>2 \ D2)))" using real_sqrt_ge_zero real_less_rsqrt less_sqrt_iff by (auto simp: suff_cond_safe_dist2_def Let_def) text \ There are two expressions for safe distance. The first safe distance \first_safe_dist\ is always valid. Whenever the distance is bigger than \first_safe_dist\, it is guarantee to be collision free. The second one is \second_safe_dist\. If the sufficient condition \suff_cond_safe_dist2\ is satisfied and the distance is bigger than \second_safe_dist\, it is guaranteed to be collision free. \ definition check_precond :: "real \ real \ real \ real \ real \ real \ bool" where "check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ s\<^sub>o > s\<^sub>e \ 0 \ v\<^sub>e \ 0 \ v\<^sub>o \ a\<^sub>e < 0 \ a\<^sub>o < 0 " lemma check_precond_safe_distance: "check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o = safe_distance a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o" proof assume "safe_distance a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o" then interpret safe_distance a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o . show "check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o" by (auto simp: check_precond_def in_front nonneg_vel_ego other.nonneg_vel ego.decel other.decel) qed (unfold_locales; auto simp: check_precond_def) subsubsection \Prescriptive Checker\ definition checker :: "real \ real \ real \ real \ real \ real \ bool" where "checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ let distance = s\<^sub>o - s\<^sub>e; precond = check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o; safe_dist1 = first_safe_dist v\<^sub>e a\<^sub>e; safe_dist2 = second_safe_dist v\<^sub>e a\<^sub>e v\<^sub>o a\<^sub>o; cond2 = suff_cond_safe_dist2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o in precond \ (safe_dist1 < distance \ (safe_dist2 < distance \ distance \ safe_dist1 \ cond2))" lemma aux_logic: assumes "a \ b" assumes "b \ a \ c" shows "a \ b \ c" using assms by blast theorem soundness_correctness: "checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ safe_distance.no_collision a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o {0..}" proof (rule aux_logic, simp add: checker_def Let_def) assume cp: "check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o" then have in_front': "s\<^sub>o > s\<^sub>e" and nonneg_vel_ego: "0 \ v\<^sub>e" and nonneg_vel_other: "0 \ v\<^sub>o" and decelerate_ego: "a\<^sub>e < 0" and decelerate_other: "a\<^sub>o < 0" by (auto simp: check_precond_def) from in_front' have in_front: "0 < s\<^sub>o - s\<^sub>e" by arith interpret safe_distance a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o by (unfold_locales; fact) interpret ego: braking_movement a\<^sub>e v\<^sub>e s\<^sub>e by (unfold_locales; fact) interpret other: braking_movement a\<^sub>o v\<^sub>o s\<^sub>o by (unfold_locales; fact) have "ego.p_max < s\<^sub>o \ other.p_max \ ego.p_max \ s\<^sub>o \ ego.p_max \ ego.p_max < other.p_max" by arith then show "checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o = safe_distance.no_collision a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o {0..}" proof (elim disjE) assume "ego.p_max < s\<^sub>o" then have "checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o" using \a\<^sub>e < 0\ cp by (simp add: checker_def Let_def first_safe_dist_def rel_dist_to_stop_def ego.p_max_def ego.p_def ego.t_stop_def algebra_simps power2_eq_square) moreover have "no_collision {0..}" using \ego.p_max < s\<^sub>o\ by (intro cond_1) (auto simp: ego.s_t_stop) ultimately show ?thesis by auto next assume "other.p_max \ ego.p_max" then have "\ checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o" using \a\<^sub>e < 0\ \a\<^sub>o < 0\ other.nonneg_vel by (auto simp add: checker_def Let_def first_safe_dist_def second_safe_dist_def rel_dist_to_stop_def movement.p_max_def movement.p_def movement.t_stop_def algebra_simps power2_eq_square) (smt divide_nonneg_neg mult_nonneg_nonneg) moreover have "collision {0..}" using \other.p_max \ ego.p_max\ by (intro cond_2) (auto simp: other.s_t_stop ego.s_t_stop) ultimately show ?thesis by auto next assume H: "s\<^sub>o \ ego.p_max \ ego.p_max < other.p_max" then have "checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o = (\ (a\<^sub>e < a\<^sub>o \ v\<^sub>o < v\<^sub>e \ 0 \ D2 \ v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < sqrt D2))" using \a\<^sub>e < 0\ \a\<^sub>o < 0\ cp by (simp add: checker_def Let_def first_safe_dist_def rel_dist_to_stop_def ego.p_max_def ego.p_def ego.t_stop_def algebra_simps power2_eq_square second_safe_dist_def suff_cond_safe_dist2_def discriminant_def not_less other.p_max_def other.p_def other.t_stop_def) also have "\ = no_collision {0..}" using H unfolding Not_eq_iff by (intro cond_3[symmetric]) (auto simp: ego.s_t_stop other.s_t_stop) finally show ?thesis by auto qed qed definition checker2 :: "real \ real \ real \ real \ real \ real \ bool" where "checker2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ let distance = s\<^sub>o - s\<^sub>e; precond = check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o; safe_dist1 = first_safe_dist v\<^sub>e a\<^sub>e; safe_dist2 = second_safe_dist v\<^sub>e a\<^sub>e v\<^sub>o a\<^sub>o; safe_dist3 = - rel_dist_to_stop (v\<^sub>o - v\<^sub>e) (a\<^sub>o - a\<^sub>e) in if \ precond then False else if distance > safe_dist1 then True else if a\<^sub>o > a\<^sub>e \ v\<^sub>o < v\<^sub>e \ v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < 0 then distance > safe_dist3 else distance > safe_dist2" theorem checker_eq_checker2: "checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ checker2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o" proof (cases "check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o") case False with checker_def checker2_def show ?thesis by auto next case True with check_precond_def safe_distance_def have "safe_distance a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o" by (simp add: check_precond_safe_distance) from this interpret safe_distance a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o by auto interpret ego: braking_movement a\<^sub>e v\<^sub>e s\<^sub>e by (unfold_locales; fact) interpret other: braking_movement a\<^sub>o v\<^sub>o s\<^sub>o by (unfold_locales; fact) from \check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o\ cond_3 cond_3'[symmetric] fst_leq_snd_safe_distance ego.s_t_stop ego.p_max_def ego.p_def ego.t_stop_def hyps other.s_t_stop other.p_max_def other.p_def other.t_stop_def checker2_def checker_def suff_cond_safe_dist2_def fst_safe_distance_def first_safe_dist_def snd_safe_distance_def second_safe_dist_def rel_dist_to_stop_def discriminant_def show ?thesis by (auto simp add:power_def Let_def split: if_splits) qed definition checker3 :: "real \ real \ real \ real \ real \ real \ bool" where "checker3 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ let distance = s\<^sub>o - s\<^sub>e; precond = check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o; s_stop_e = s\<^sub>e + rel_dist_to_stop v\<^sub>e a\<^sub>e; s_stop_o = s\<^sub>o + rel_dist_to_stop v\<^sub>o a\<^sub>o in precond \ (s_stop_e < s\<^sub>o \ (s\<^sub>o \ s_stop_e \ s_stop_e < s_stop_o \ (\(a\<^sub>o > a\<^sub>e \ v\<^sub>o < v\<^sub>e \ v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o < 0 \ distance * (a\<^sub>o - a\<^sub>e) \ (v\<^sub>o - v\<^sub>e)\<^sup>2 / 2))))" theorem checker2_eq_checker3: "checker2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ checker3 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o" apply (auto simp: checker2_def checker3_def Let_def first_safe_dist_def not_less suff_cond_safe_dist2_def second_safe_dist_def rel_dist_to_stop_def check_precond_def) proof goal_cases case 1 then interpret safe_distance by unfold_locales auto from fst_leq_snd_safe_distance 1 show ?case by (auto simp: fst_safe_distance_def snd_safe_distance_def) next case 2 then interpret safe_distance by unfold_locales auto from fst_leq_snd_safe_distance 2 show ?case by (auto simp: fst_safe_distance_def snd_safe_distance_def field_split_simps) next case 3 then interpret safe_distance by unfold_locales auto from fst_leq_snd_safe_distance 3 show ?case by (auto simp: fst_safe_distance_def snd_safe_distance_def field_split_simps) qed subsubsection \Approximate Checker\ lemma checker2_def': "checker2 a b c d e f = ( let distance = d - a; precond = check_precond a b c d e f; safe_dist1 = first_safe_dist b c; safe_dist2 = second_safe_dist b c e f; C = c < f \ e < b \ b * f > c * e; P1 = (e - b)\<^sup>2 < 2 * distance * (f - c); P2 = - b\<^sup>2 / c + e\<^sup>2 / f < 2 * distance in precond \ (safe_dist1 < distance \ safe_dist1 \ distance \ (C \ P1 \ \C \ P2)))" unfolding checker2_def by (auto simp: Let_def field_split_simps check_precond_def second_safe_dist_def rel_dist_to_stop_def) lemma power2_less_sqrt_iff: "(x::real)\<^sup>2 < y \ (y \ 0 \ abs x < sqrt y)" apply (auto simp: real_less_rsqrt abs_real_def less_sqrt_iff) apply (meson le_less le_less_trans not_less power2_less_0)+ done schematic_goal checker_form: "interpret_form ?x ?y \ checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o" unfolding checker_eq_checker2 checker2_eq_checker3 checker3_def check_precond_def first_safe_dist_def second_safe_dist_def suff_cond_safe_dist2_def Let_def t_stop_def s_stop_def rel_dist_to_stop_def discriminant_def not_le not_less de_Morgan_conj de_Morgan_disj power2_less_sqrt_iff apply (tactic \(Reification.tac @{context} @{thms interpret_form.simps interpret_floatarith.simps interpret_floatarith_divide interpret_floatarith_diff}) NONE 1\) apply assumption done context includes floatarith_notation begin definition "checker' p s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o = approx_form p (Conj (Conj (Less (Var (Suc (Suc 0))) (Var (Suc (Suc (Suc 0))))) (Conj (LessEqual (Var (Suc (Suc (Suc (Suc (Suc (Suc (Suc 0)))))))) (Var (Suc (Suc (Suc (Suc (Suc 0))))))) (Conj (LessEqual (Var (Suc (Suc (Suc (Suc (Suc (Suc (Suc 0)))))))) (Var (Suc (Suc (Suc (Suc (Suc (Suc 0)))))))) (Conj (Less (Var 0) (Var (Suc (Suc (Suc (Suc (Suc (Suc (Suc 0))))))))) (Less (Var (Suc 0)) (Var (Suc (Suc (Suc (Suc (Suc (Suc (Suc 0))))))))))))) (Disj (Less (Add (Var (Suc (Suc 0))) (Mult (Minus (Power (Var (Suc (Suc (Suc (Suc (Suc 0)))))) 2)) (Inverse (Mult (Var (Suc (Suc (Suc (Suc 0))))) (Var 0))))) (Var (Suc (Suc (Suc 0))))) (Conj (LessEqual (Var (Suc (Suc (Suc 0)))) (Add (Var (Suc (Suc 0))) (Mult (Minus (Power (Var (Suc (Suc (Suc (Suc (Suc 0)))))) 2)) (Inverse (Mult (Var (Suc (Suc (Suc (Suc 0))))) (Var 0)))))) (Conj (Less (Add (Var (Suc (Suc 0))) (Mult (Minus (Power (Var (Suc (Suc (Suc (Suc (Suc 0)))))) 2)) (Inverse (Mult (Var (Suc (Suc (Suc (Suc 0))))) (Var 0))))) (Add (Var (Suc (Suc (Suc 0)))) (Mult (Minus (Power (Var (Suc (Suc (Suc (Suc (Suc (Suc 0))))))) 2)) (Inverse (Mult (Var (Suc (Suc (Suc (Suc 0))))) (Var (Suc 0))))))) (Disj (LessEqual (Var (Suc 0)) (Var 0)) (Disj (LessEqual (Var (Suc (Suc (Suc (Suc (Suc 0)))))) (Var (Suc (Suc (Suc (Suc (Suc (Suc 0)))))))) (Disj (LessEqual (Var (Suc (Suc (Suc (Suc (Suc (Suc (Suc 0)))))))) (Add (Var (Suc (Suc (Suc (Suc (Suc 0)))))) (Minus (Mult (Mult (Var 0) (Inverse (Var (Suc 0)))) (Var (Suc (Suc (Suc (Suc (Suc (Suc 0))))))))))) (Less (Mult (Power (Add (Var (Suc (Suc (Suc (Suc (Suc (Suc 0))))))) (Minus (Var (Suc (Suc (Suc (Suc (Suc 0)))))))) 2) (Inverse (Var (Suc (Suc (Suc (Suc 0))))))) (Mult (Add (Var (Suc (Suc (Suc 0)))) (Minus (Var (Suc (Suc 0))))) (Add (Var (Suc 0)) (Minus (Var 0)))))))))))) ([a\<^sub>e, a\<^sub>o, s\<^sub>e, s\<^sub>o, Interval' (Float 2 0) (Float 2 0), v\<^sub>e, v\<^sub>o, Interval' (Float 0 1) (Float 0 1)]) (replicate 8 0)" lemma less_Suc_iff_disj: "i < Suc x \ i = x \ i < x" by auto lemma checker'_soundness_correctness: assumes "a \ {real_of_float al .. real_of_float au}" assumes "b \ {real_of_float bl .. real_of_float bu}" assumes "c \ {real_of_float cl .. real_of_float cu}" assumes "d \ {real_of_float dl .. real_of_float du}" assumes "e \ {real_of_float el .. real_of_float eu}" assumes "f \ {real_of_float fl .. real_of_float fu}" assumes chk: "checker' p (Interval' al au) (Interval' bl bu) (Interval' cl cu) (Interval' dl du) (Interval' el eu) (Interval' fl fu)" shows "checker a b c d e f" apply (rule checker_form) apply (rule approx_form_aux) apply (rule chk[unfolded checker'_def]) using assms(1-6) unfolding bounded_by_def proof (auto split: option.splits) fix i x2 assume *: "[Interval' cl cu, Interval' fl fu, Interval' al au, Interval' dl du, Interval' (Float 2 0) (Float 2 0), Interval' bl bu, Interval' el eu, Interval' 0 0] ! i = Some x2" assume " i < Suc (Suc (Suc (Suc (Suc (Suc (Suc (Suc 0)))))))" then consider "i = 0" | "i = 1" | "i = 2" | "i = 3" | "i = 4" | "i = 5" | "i = 6" | "i = 7" by linarith thus " [c, f, a, d, 2, b, e, 0] ! i \\<^sub>r x2" apply cases using assms(1-6) * by (auto intro!: in_real_intervalI dest!: Interval'_eq_Some) qed lemma approximate_soundness_correctness: assumes "a \ {real_of_float al .. real_of_float au}" assumes "b \ {real_of_float bl .. real_of_float bu}" assumes "c \ {real_of_float cl .. real_of_float cu}" assumes "d \ {real_of_float dl .. real_of_float du}" assumes "e \ {real_of_float el .. real_of_float eu}" assumes "f \ {real_of_float fl .. real_of_float fu}" assumes chk: "checker' p (Interval' al au) (Interval' bl bu) (Interval' cl cu) (Interval' dl du) (Interval' el eu) (Interval' fl fu)" shows checker'_precond: "check_precond a b c d e f" and checker'_no_collision: "safe_distance.no_collision c b a f e d {0..}" unfolding atomize_conj apply (subst soundness_correctness[symmetric]) using checker'_soundness_correctness[OF assms] by (auto simp: checker_def Let_def) subsubsection \Symbolic Checker\ definition symbolic_checker :: "real \ real \ real \ real \ real \ real \ bool" where "symbolic_checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ let e_stop = - v\<^sub>e / a\<^sub>e; o_stop = - v\<^sub>o / a\<^sub>o in check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ (\quadroot_in 0 (min e_stop o_stop) (1/2 * (a\<^sub>o - a\<^sub>e)) (v\<^sub>o - v\<^sub>e) (s\<^sub>o - s\<^sub>e) \ \quadroot_in e_stop o_stop (1/2 * a\<^sub>o) v\<^sub>o (s\<^sub>o - movement.p a\<^sub>e v\<^sub>e s\<^sub>e e_stop) \ \quadroot_in o_stop e_stop (1/2 * a\<^sub>e) v\<^sub>e (s\<^sub>e - movement.p a\<^sub>o v\<^sub>o s\<^sub>o o_stop))" theorem symbolic_soundness_correctness: "symbolic_checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ safe_distance.no_collision a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o {0..}" proof - { assume c: "check_precond s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o" then interpret safe_distance a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o by (simp add: check_precond_safe_distance) have "symbolic_checker s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o = no_collision {0..}" using c unfolding symbolic_checker symbolic_checker_def ego.s_t_stop other.s_t_stop ego.p_max_def other.p_max_def by (auto simp: Let_def movement.t_stop_def) } then show ?thesis by (auto simp: symbolic_checker_def Let_def) qed end end \ No newline at end of file diff --git a/thys/Safe_Distance/Safe_Distance_Reaction.thy b/thys/Safe_Distance/Safe_Distance_Reaction.thy --- a/thys/Safe_Distance/Safe_Distance_Reaction.thy +++ b/thys/Safe_Distance/Safe_Distance_Reaction.thy @@ -1,1757 +1,1757 @@ \<^marker>\creator "Albert Rizaldi" "Fabian Immler\ section \Safe Distance with Reaction Time\ theory Safe_Distance_Reaction imports Safe_Distance begin subsection \Normal Safe Distance\ locale safe_distance_normal = safe_distance + fixes \ :: real assumes pos_react: "0 < \" begin sublocale ego2: braking_movement a\<^sub>e v\<^sub>e "(ego.q \)" .. lemma ego2_s_init: "ego2.s 0 = ego.q \" unfolding ego2.s_def by auto definition \ :: "real \ real" where "\ t = t - \" definition \' :: "real \ real" where "\' t = 1" lemma \_continuous[continuous_intros]: "continuous_on T \" unfolding \_def by (auto intro: continuous_intros) lemma isCont_\[continuous_intros]: "isCont \ x" using \_continuous[of UNIV] by (auto simp: continuous_on_eq_continuous_at) lemma del_has_vector_derivative[derivative_intros]: "(\ has_vector_derivative \' t) (at t within u)" by (auto simp: \_def[abs_def] \'_def has_vector_derivative_def algebra_simps intro!: derivative_eq_intros) lemma del_has_real_derivative[derivative_intros]: "(\ has_real_derivative \' t) (at t within u)" using del_has_vector_derivative by (simp add:has_field_derivative_iff_has_vector_derivative) lemma delay_image: "\ ` {\..} = {0..}" proof (rule subset_antisym, unfold image_def, unfold \_def) show "{y. \x\{\..}. y = x - \} \ {0..}" by auto next show "{0..} \ {y. \x\{\..}. y = x - \}" proof (rule subsetI) fix a assume "(a::real) \ {0..}" hence "0 \ a" by simp hence "\x\{\..}. a = x - \" using bexI[where x = "a + \"] by auto thus "a \ {y. \x\{\..}. y = x - \}" by auto qed qed lemma s_delayed_has_real_derivative[derivative_intros]: assumes "\ \ t" shows "((ego2.s \ \) has_field_derivative ego2.s' (t - \) * \' t) (at t within {\..})" proof (rule DERIV_image_chain) from assms have 0: "0 \ t - \" by simp from ego2.t_stop_nonneg have 1: "v\<^sub>e / a\<^sub>e \ 0" unfolding ego2.t_stop_def by simp from ego2.decel have 2: "a\<^sub>e \ 0" by simp show "(ego2.s has_real_derivative ego2.s' (t - \)) (at (\ t) within \ ` {\..})" using ego2.s_has_real_derivative[OF 0 1 2] sym[OF delay_image] unfolding \_def by simp next from del_has_real_derivative show "(\ has_real_derivative \' t) (at t within {\..})" by auto qed lemma s_delayed_has_real_derivative' [derivative_intros]: assumes "\ \ t" shows "((ego2.s \ \) has_field_derivative (ego2.s' \ \) t) (at t within {\..})" proof - from s_delayed_has_real_derivative[OF assms] have "((ego2.s \ \) has_field_derivative ego2.s' (t - \) * \' t) (at t within {\..})" by auto hence "((ego2.s \ \) has_field_derivative ego2.s' (t - \) * 1) (at t within {\..})" using \'_def[of t] by metis hence "((ego2.s \ \) has_field_derivative ego2.s' (t - \)) (at t within {\..})" by (simp add:algebra_simps) thus ?thesis unfolding comp_def \_def by auto qed lemma s_delayed_has_vector_derivative' [derivative_intros]: assumes "\ \ t" shows "((ego2.s \ \) has_vector_derivative (ego2.s' \ \) t) (at t within {\..})" using s_delayed_has_real_derivative'[OF assms] by (simp add:has_field_derivative_iff_has_vector_derivative) definition u :: "real \ real" where "u t = ( if t \ 0 then s\<^sub>e else if t \ \ then ego.q t else (ego2.s \ \) t)" lemma init_u: "t \ 0 \ u t = s\<^sub>e" unfolding u_def by auto lemma u_delta: "u \ = ego2.s 0" proof - have "u \ = ego.q \" using pos_react unfolding u_def by auto also have "... = ego2.s 0" unfolding ego2.s_def by auto finally show "u \ = ego2.s 0" . qed lemma q_delta: "ego.q \ = ego2.s 0" using u_delta pos_react unfolding u_def by auto definition u' :: "real \ real" where "u' t = (if t \ \ then ego.q' t else ego2.s' (t - \))" lemma u'_delta: "u' \ = ego2.s' 0" proof - have "u' \ = ego.q' \" unfolding u'_def by auto also have "... = v\<^sub>e" unfolding ego2.q'_def by simp also have "... = ego2.p' 0" unfolding ego2.p'_def by simp also have "... = ego2.s' 0" using ego2.t_stop_nonneg unfolding ego2.s'_def by auto finally show "u' \ = ego.s' 0" . qed lemma q'_delta: "ego.q' \ = ego2.s' 0" using u'_delta unfolding u'_def by auto lemma u_has_real_derivative[derivative_intros]: assumes nonneg_t: "t \ 0" shows "(u has_real_derivative u' t) (at t within {0..})" proof - from pos_react have "0 \ \" by simp have temp: "((\t. if t \ {0 .. \} then ego.q t else (ego2.s \ \) t) has_real_derivative (if t \ {0..\} then ego.q' t else (ego2.s' \ \) t)) (at t within {0..})" (is "(?f1 has_real_derivative ?f2) (?net)") unfolding u_def[abs_def] u'_def has_field_derivative_iff_has_vector_derivative apply (rule has_vector_derivative_If_within_closures[where T = "{\..}"]) using \0 \ \\ q_delta q'_delta ego.s_has_vector_derivative[OF assms] ego.decel ego.t_stop_nonneg s_delayed_has_vector_derivative'[of "t"] \_def unfolding comp_def by (auto simp: assms max_def insert_absorb intro!: ego.q_has_vector_derivative) show ?thesis unfolding has_vector_derivative_def has_field_derivative_iff_has_vector_derivative u'_def u_def[abs_def] proof (rule has_derivative_transform[where f="(\t. if t \ {0..\} then ego.q t else (ego2.s \ \) t)"]) from nonneg_t show " t \ {0..}" by auto next fix x assume "(x::real) \ {0..}" hence "x \ \ \ x \ {0 .. \}" by simp thus " (if x \ 0 then s\<^sub>e else if x \ \ then ego.q x else (ego2.s \ \) x) = (if x \ {0..\} then ego.q x else (ego2.s \ \) x)" using pos_react unfolding ego.q_def by auto next from temp have "(?f1 has_vector_derivative ?f2) ?net" using has_field_derivative_iff_has_vector_derivative by auto moreover with assms have "t \ {0 .. \} \ t \ \" by auto ultimately show " ((\t. if t \ {0..\} then ego.q t else (ego2.s \ \) t) has_derivative (\x. x *\<^sub>R (if t \ \ then ego2.q' t else ego2.s' (t - \)))) (at t within {0..})" unfolding comp_def \_def has_vector_derivative_def by auto qed qed definition t_stop :: real where "t_stop = ego2.t_stop + \" lemma t_stop_nonneg: "0 \ t_stop" unfolding t_stop_def using ego2.t_stop_nonneg pos_react by auto lemma t_stop_pos: "0 < t_stop" unfolding t_stop_def using ego2.t_stop_nonneg pos_react by auto lemma t_stop_zero: assumes "t_stop \ x" assumes "x \ \" shows "v\<^sub>e = 0" using assms unfolding t_stop_def using ego2.t_stop_nonneg pos_react ego2.t_stop_zero by auto lemma u'_stop_zero: "u' t_stop = 0" unfolding u'_def t_stop_def ego2.q'_def ego2.s'_def using ego2.t_stop_nonneg ego2.p'_stop_zero decelerate_ego ego2.t_stop_zero by auto definition u_max :: real where "u_max = u (ego2.t_stop + \)" lemma u_max_eq: "u_max = ego.q \ - v\<^sub>e\<^sup>2 / a\<^sub>e / 2" proof (cases "ego2.t_stop = 0") assume "ego2.t_stop = 0" hence "v\<^sub>e = 0" using ego2.t_stop_zero by simp with \ego2.t_stop = 0\ show "u_max = ego.q \ - v\<^sub>e\<^sup>2 / a\<^sub>e / 2" unfolding u_max_def u_def using pos_react by auto next assume "ego2.t_stop \ 0" hence "u_max = (ego2.s \ \) (ego2.t_stop + \)" unfolding u_max_def u_def using ego2.t_stop_nonneg pos_react by auto moreover have "... = ego2.s ego2.t_stop" unfolding comp_def \_def by auto moreover have "... = ego2.p_max" unfolding ego2.s_def ego2.p_max_def using \ego2.t_stop \ 0\ ego2.t_stop_nonneg by auto moreover have "... = ego.q \ - v\<^sub>e\<^sup>2 / a\<^sub>e / 2" using ego2.p_max_eq . ultimately show ?thesis by auto qed lemma u_mono: assumes "x \ y" "y \ t_stop" shows "u x \ u y" proof - have "y \ 0 \ (0 < y \ y \ \) \ \ < y" by auto moreover { assume "y \ 0" with assms have "x \ 0" by auto with \y \ 0\ have "u x \ u y" unfolding u_def by auto } moreover { assume "0 < y \ y \ \" with assms have "x \ \" by auto hence "u x \ u y" proof (cases "x \ 0") assume "x \ 0" with \x \ \\ and \0 < y \ y \ \\ show "u x \ u y" unfolding u_def using ego.q_min by auto next assume "\ x \ 0" with \0 < y \ y \ \\ and assms show "u x \ u y" unfolding u_def using ego.q_mono by auto qed } moreover { assume "\ < y" have "u x \ u y" proof (cases "\ < x") assume "\ < x" with pos_react have "\ x \ 0" by auto moreover from \\ < y\ and pos_react have "\ y \ 0" by auto ultimately show "u x \ u y" unfolding u_def comp_def using assms ego2.s_mono[of "x - \" "y - \"] \\ < y\ \\ < x\ by (auto simp:\_def) next assume "\ \ < x" hence "x \ \" by simp hence "u x \ ego.q \" unfolding u_def using pos_react nonneg_vel_ego by (auto simp add:ego.q_def mult_left_mono) also have "... = ego2.s (\ \)" unfolding ego2.s_def unfolding \_def by auto also have "... \ ego2.s (\ y)" unfolding \_def using \\ < y\ by (auto simp add:ego2.s_mono) also have "... = u y" unfolding u_def using \\ < y\ pos_react by auto ultimately show "u x \ u y" by auto qed } ultimately show "u x \ u y" by auto qed lemma u_antimono: "x \ y \ t_stop \ x \ u y \ u x" proof - assume 1: "x \ y" assume 2: "t_stop \ x" hence "\ \ x" unfolding \_def t_stop_def using pos_react ego2.t_stop_nonneg by auto with 1 have "\ \ y" by auto from 1 and 2 have 3: "t_stop \ y" by auto show "u y \ u x" proof (cases "x \ \ \ y \ \") assume "x \ \ \ y \ \" hence "x \ \" and "y \ \" by auto have "u y \ (ego2.s \ \) y" unfolding u_def using \\ \ y\ \y \ \\ pos_react by auto also have "... \ (ego2.s \ \) x" unfolding comp_def proof (intro ego2.s_antimono) show "\ x \ \ y" unfolding \_def using \x \ y\ by auto next show "ego2.t_stop \ \ x" unfolding \_def using \t_stop \ x\ by (auto simp: t_stop_def) qed also have "... \ u x" unfolding u_def using \\ \ x\\x \ \\ pos_react by auto ultimately show "u y \ u x" by auto next assume "\ (x \ \ \ y \ \)" have "x \ \ \ y \ \" proof (rule impI; erule contrapos_pp[where Q="\ x = \"]) assume "\ y \ \" hence "y = \" by simp with \t_stop \ y\ have "ego2.t_stop = 0" unfolding t_stop_def using ego2.t_stop_nonneg by auto with \t_stop \ x\ have "x = \" unfolding t_stop_def using \x \ y\ \y = \\ by auto thus "\ x \ \" by auto qed with \\ (x \ \ \ y \ \)\ have "(x = \ \ y = \) \ (x = \)" by auto moreover { assume "x = \ \ y = \" hence "x = \" and "y = \" by auto hence "u y \ ego.q \" unfolding u_def using pos_react by auto also have "... \ u x" unfolding u_def using \x = \\ pos_react by auto ultimately have "u y \ u x" by auto } moreover { assume "x = \" hence "ego2.t_stop = 0" using \t_stop \ x\ ego2.t_stop_nonneg by (auto simp:t_stop_def) hence "v\<^sub>e = 0" by (rule ego2.t_stop_zero) hence "u y \ ego.q \" using pos_react \x = \\ \x \ y\ \v\<^sub>e = 0\ unfolding u_def comp_def \_def ego2.s_def ego2.p_def ego2.p_max_def ego2.t_stop_def by auto also have "... \ u x" using \x = \\ pos_react unfolding u_def by auto ultimately have "u y \ u x" by auto } ultimately show ?thesis by auto qed qed lemma u_max: "u x \ u_max" unfolding u_max_def using t_stop_def by (cases "x \ t_stop") (auto intro: u_mono u_antimono) lemma u_eq_u_stop: "NO_MATCH t_stop x \ x \ t_stop \ u x = u_max" proof - assume "t_stop \ x" with t_stop_pos have "0 < x" by auto from \t_stop \ x\ have "\ \ x" unfolding t_stop_def using ego2.t_stop_nonneg by auto show "u x = u_max" proof (cases "x \ \") assume "x \ \" with \t_stop \ x\ have "v\<^sub>e = 0" by (rule t_stop_zero) also have "x = \" using \x \ \\ and \\ \ x\ by auto ultimately have "u x = ego.q \" unfolding u_def using pos_react by auto also have "... = u_max" unfolding u_max_eq using \v\<^sub>e = 0\ by auto ultimately show "u x = u_max" by simp next assume "\ x \ \" hence "\ < x" by auto hence "u x = (ego2.s \ \) x" unfolding u_def using pos_react by auto also have "... = ego2.s ego2.t_stop" proof (unfold comp_def; unfold \_def; intro order.antisym) have "x - \ \ ego2.t_stop" using \t_stop \ x\ unfolding t_stop_def by auto thus "ego2.s (x - \) \ ego2.s ego2.t_stop" by (rule ego2.s_antimono) simp next have "x - \ \ ego2.t_stop" using \t_stop \ x\ unfolding t_stop_def by auto thus "ego2.s ego2.t_stop \ ego2.s (x - \)" using ego2.t_stop_nonneg by (rule ego2.s_mono) qed also have "... = u_max" unfolding u_max_eq ego2.s_t_stop ego2.p_max_eq by auto ultimately show "u x = u_max" by auto qed qed lemma at_least_delta: assumes "x \ \" assumes "t_stop \ x" shows "ego.q x = ego2.s (x - \)" using assms ego2.t_stop_nonneg unfolding t_stop_def ego2.s_def less_eq_real_def by auto lemma continuous_on_u[continuous_intros]: "continuous_on T u" unfolding u_def[abs_def] using t_stop_nonneg pos_react at_least_delta proof (intro continuous_on_subset[where t=T and s = "{..0} \ ({0..\} \ ({\ .. t_stop} \ {t_stop ..}))"] continuous_on_If continuous_intros) fix x assume " \ x \ \" assume "x \ {0..\}" hence "0 \ x" and "x \ \" by auto thus "ego.q x = (ego2.s \ \) x" unfolding comp_def \_def ego2.s_def using \\ x \ \\ by auto next fix x assume "x \ {\..t_stop} \ {t_stop..}" hence "\ \ x" unfolding t_stop_def using pos_react ego.t_stop_nonneg by auto also assume "x \ \" ultimately have "x = \" by auto thus "ego.q x = (ego2.s \ \) x" unfolding comp_def \_def ego2.s_def by auto next fix t::real assume "t \ {.. 0}" hence "t \ 0" by auto also assume "\ t \ 0" ultimately have "t = 0" by auto hence "s\<^sub>e = ego.q t" unfolding ego.q_def by auto with pos_react \t = 0\ show "s\<^sub>e = (if t \ \ then ego.q t else (ego2.s \ \) t)" by auto next fix t::real assume "t \ {0..\} \ ({\..t_stop} \ {t_stop..})" hence "0 \ t" using pos_react ego2.t_stop_nonneg by (auto simp: t_stop_def) also assume "t \ 0" ultimately have "t = 0" by auto hence " s\<^sub>e = (if t \ \ then ego.q t else (ego2.s \ \) t)" using pos_react ego.init_q by auto thus "s\<^sub>e = (if t \ \ then ego.q t else (ego2.s \ \) t)" by auto next show "T \ {..0} \ ({0..\} \ ({\..t_stop} \ {t_stop..}))" by auto qed lemma isCont_u[continuous_intros]: "isCont u x" using continuous_on_u[of UNIV] by (auto simp:continuous_on_eq_continuous_at) definition collision_react :: "real set \ bool" where "collision_react time_set \ (\t\time_set. u t = other.s t )" abbreviation no_collision_react :: "real set \ bool" where "no_collision_react time_set \ \ collision_react time_set" lemma no_collision_reactI: assumes "\t. t \ S \ u t \ other.s t" shows "no_collision_react S" using assms unfolding collision_react_def by blast lemma no_collision_union: assumes "no_collision_react S" assumes "no_collision_react T" shows "no_collision_react (S \ T)" using assms unfolding collision_react_def by auto lemma collision_trim_subset: assumes "collision_react S" assumes "no_collision_react T" assumes "T \ S" shows "collision_react (S - T)" using assms unfolding collision_react_def by auto theorem cond_1r : "u_max < s\<^sub>o \ no_collision_react {0..}" proof (rule no_collision_reactI, simp) fix t :: real assume "0 \ t" have "u t \ u_max" by (rule u_max) also assume "... < s\<^sub>o" also have "... = other.s 0" by (simp add: other.init_s) also have "... \ other.s t" using \0 \ t\ hyps by (intro other.s_mono) auto finally show "u t \ other.s t" by simp qed definition safe_distance_1r :: real where "safe_distance_1r = v\<^sub>e * \ - v\<^sub>e\<^sup>2 / a\<^sub>e / 2" lemma sd_1r_eq: "(s\<^sub>o - s\<^sub>e > safe_distance_1r) = (u_max < s\<^sub>o)" proof - have "(s\<^sub>o - s\<^sub>e > safe_distance_1r) = (s\<^sub>o - s\<^sub>e > v\<^sub>e * \ - v\<^sub>e\<^sup>2 / a\<^sub>e / 2)" unfolding safe_distance_1r_def by auto moreover have "... = (s\<^sub>e + v\<^sub>e * \ - v\<^sub>e\<^sup>2 / a\<^sub>e / 2 < s\<^sub>o)" by auto ultimately show ?thesis using u_max_eq ego.q_def by auto qed lemma sd_1r_correct: assumes "s\<^sub>o - s\<^sub>e > safe_distance_1r" shows "no_collision_react {0..}" proof - from assms have "u_max < s\<^sub>o" using sd_1r_eq by auto thus ?thesis by (rule cond_1r) qed lemma u_other_strict_ivt: assumes "u t > other.s t" shows "collision_react {0.. t" with assms in_front have "\x\0. x \ t \ other.s x - u x = 0" by (intro IVT2) (auto simp: init_u other.init_s continuous_diff isCont_u other.isCont_s) then show ?thesis using assms by (auto simp add: algebra_simps collision_react_def Bex_def order.order_iff_strict) qed(insert assms hyps, auto simp: collision_react_def init_u other.init_s) lemma collision_react_subset: "collision_react s \ s \ t \ collision_react t" by (auto simp:collision_react_def) lemma u_other_ivt: assumes "u t \ other.s t" shows "collision_react {0 .. t}" proof cases assume "u t > other.s t" from u_other_strict_ivt[OF this] show ?thesis by (rule collision_react_subset) auto qed (insert hyps assms; cases "t \ 0"; force simp: collision_react_def init_u other.init_s) theorem cond_2r: assumes "u_max \ other.s_stop" shows "collision_react {0 ..}" using assms apply(intro collision_react_subset[where t="{0..}" and s ="{0 .. max t_stop other.t_stop}"]) apply(intro u_other_ivt[where t ="max t_stop other.t_stop"]) apply(auto simp: u_eq_u_stop other.s_eq_s_stop) done definition ego_other2 :: "real \ real" where "ego_other2 t = other.s t - u t" lemma continuous_on_ego_other2[continuous_intros]: "continuous_on T ego_other2" unfolding ego_other2_def[abs_def] by (intro continuous_intros) lemma isCont_ego_other2[continuous_intros]: "isCont ego_other2 x" using continuous_on_ego_other2[of UNIV] by (auto simp: continuous_on_eq_continuous_at) definition ego_other2' :: "real \ real" where "ego_other2' t = other.s' t - u' t" lemma ego_other2_has_real_derivative[derivative_intros]: assumes "0 \ t" shows "(ego_other2 has_real_derivative ego_other2' t) (at t within {0..})" using assms other.t_stop_nonneg decelerate_other unfolding other.t_stop_def by (auto simp: ego_other2_def[abs_def] ego_other2'_def algebra_simps intro!: derivative_eq_intros) theorem cond_3r_1: assumes "u \ \ other.s \" shows "collision_react {0 .. \}" proof (unfold collision_react_def) have 1: "\t\0. t \ \ \ ego_other2 t = 0" proof (intro IVT2) show "ego_other2 \ \ 0" unfolding ego_other2_def using assms by auto next show "0 \ ego_other2 0" unfolding ego_other2_def using other.init_s[of 0] init_u[of 0] in_front by auto next show "0 \ \" using pos_react by auto next show "\t. 0 \ t \ t \ \ \ isCont ego_other2 t" using isCont_ego_other2 by auto qed then obtain t where "0 \ t \ t \ \ \ ego_other2 t = 0" by auto hence "t \ {0 .. \}" and "u t = other.s t" unfolding ego_other2_def by auto thus "\t\{0..\}. u t = other.s t" by (intro bexI) qed definition distance0 :: real where "distance0 = v\<^sub>e * \ - v\<^sub>o * \ - a\<^sub>o * \\<^sup>2 / 2" definition distance0_2 :: real where "distance0_2 = v\<^sub>e * \ + 1 / 2 * v\<^sub>o\<^sup>2 / a\<^sub>o" theorem cond_3r_1': assumes "s\<^sub>o - s\<^sub>e \ distance0" assumes "\ \ other.t_stop" shows "collision_react {0 .. \}" proof - from assms have "u \ \ other.s \" unfolding distance0_def other.s_def other.p_def u_def ego.q_def using pos_react by auto thus ?thesis using cond_3r_1 by auto qed theorem distance0_2_eq: assumes "\ > other.t_stop" shows "(u \ < other.s \) = (s\<^sub>o - s\<^sub>e > distance0_2)" proof - from assms have "(u \ < other.s \) = (ego.q \ < other.p_max)" using u_def other.s_def pos_react by auto also have "... = (s\<^sub>e + v\<^sub>e * \ < s\<^sub>o + v\<^sub>o * (- v\<^sub>o / a\<^sub>o) + 1 / 2 * a\<^sub>o * (- v\<^sub>o / a\<^sub>o)\<^sup>2)" using ego.q_def other.p_max_def other.p_def other.t_stop_def by auto also have "... = (v\<^sub>e * \ - v\<^sub>o * (- v\<^sub>o / a\<^sub>o) - 1 / 2 * a\<^sub>o * (- v\<^sub>o / a\<^sub>o)\<^sup>2 < s\<^sub>o - s\<^sub>e)" by linarith also have "... = (v\<^sub>e * \ + v\<^sub>o\<^sup>2 / a\<^sub>o - 1 / 2 * v\<^sub>o\<^sup>2 / a\<^sub>o < s\<^sub>o - s\<^sub>e)" using other.p_def other.p_max_def other.p_max_eq other.t_stop_def by auto also have "... = (v\<^sub>e * \ + 1 / 2 * v\<^sub>o\<^sup>2 / a\<^sub>o < s\<^sub>o - s\<^sub>e)" by linarith thus ?thesis using distance0_2_def by (simp add: calculation) qed theorem cond_3r_1'_2: assumes "s\<^sub>o - s\<^sub>e \ distance0_2" assumes "\ > other.t_stop" shows "collision_react {0 .. \}" proof - from assms distance0_2_eq have "u \ \ other.s \" unfolding distance0_def other.s_def other.p_def u_def ego.q_def using pos_react by auto thus ?thesis using cond_3r_1 by auto qed definition safe_distance_3r :: real where "safe_distance_3r = v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e - v\<^sub>o * \ - 1/2 * a\<^sub>o * \\<^sup>2" lemma distance0_at_most_sd3r: "distance0 \ safe_distance_3r" unfolding distance0_def safe_distance_3r_def using nonneg_vel_ego decelerate_ego by (auto simp add:field_simps) definition safe_distance_4r :: real where "safe_distance_4r = (v\<^sub>o + a\<^sub>o * \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e) - v\<^sub>o * \ - 1/2 * a\<^sub>o * \\<^sup>2 + v\<^sub>e * \" lemma distance0_at_most_sd4r: assumes "a\<^sub>o > a\<^sub>e" shows "distance0 \ safe_distance_4r" proof - from assms have "a\<^sub>o \ a\<^sub>e" by auto have "0 \ (v\<^sub>o + a\<^sub>o * \ - v\<^sub>e)\<^sup>2 / (2 * a\<^sub>o - 2 * a\<^sub>e)" by (rule divide_nonneg_nonneg) (auto simp add:assms `a\<^sub>e \ a\<^sub>o`) thus ?thesis unfolding distance0_def safe_distance_4r_def by auto qed definition safe_distance_2r :: real where "safe_distance_2r = v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o" lemma vo_start_geq_ve: assumes "\ \ other.t_stop" assumes "other.s' \ \ v\<^sub>e" shows "u \ < other.s \" proof - from assms have "v\<^sub>e \ v\<^sub>o + a\<^sub>o * \" unfolding other.s'_def other.p'_def by auto with mult_right_mono[OF this, of "\"] have "v\<^sub>e * \ \ v\<^sub>o * \ + a\<^sub>o * \\<^sup>2" (is "?l0 \ ?r0") using pos_react by (auto simp add:field_simps power_def) hence "s\<^sub>e + ?l0 \ s\<^sub>e + ?r0" by auto also have "... < s\<^sub>o + ?r0" using in_front by auto also have "... < s\<^sub>o + v\<^sub>o * \ + a\<^sub>o * \\<^sup>2 / 2" using decelerate_other pos_react by auto finally show ?thesis using pos_react assms(1) unfolding u_def ego.q_def other.s_def other.t_stop_def other.p_def by auto qed theorem so_star_stop_leq_se_stop: assumes "\ \ other.t_stop" assumes "other.s' \ < v\<^sub>e" assumes "\ (a\<^sub>o > a\<^sub>e \ other.s' \ < v\<^sub>e \ v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ < 0)" shows "0 \ - v\<^sub>e\<^sup>2 / a\<^sub>e / 2 + (v\<^sub>o + a\<^sub>o * \)\<^sup>2 / a\<^sub>o / 2" proof - consider "v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ \ 0" | "\ (v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ \ 0)" by auto thus ?thesis proof (cases) case 1 hence "v\<^sub>e - a\<^sub>e / a\<^sub>o * (v\<^sub>o + a\<^sub>o * \) \ 0" unfolding other.s'_def other.p'_def by (auto simp add:assms(1)) hence "v\<^sub>e - a\<^sub>e / a\<^sub>o * v\<^sub>o - a\<^sub>e * \ \ 0" (is "?l0 \ 0") using decelerate_other by (auto simp add:field_simps) hence "?l0 / a\<^sub>e \ 0" using divide_right_mono_neg[OF `?l0 \ 0`] decelerate_ego by auto hence "0 \ v\<^sub>e / a\<^sub>e - v\<^sub>o / a\<^sub>o - \" using decelerate_ego by (auto simp add:field_simps) hence *: "- v\<^sub>e / a\<^sub>e \ - (v\<^sub>o + a\<^sub>o * \) / a\<^sub>o" using decelerate_other by (auto simp add:field_simps) from assms have **: "v\<^sub>o + a\<^sub>o * \ \ v\<^sub>e" unfolding other.s'_def other.p'_def by auto have vo_star_nneg: "v\<^sub>o + a\<^sub>o * \ \ 0" proof - from assms(1) have "- v\<^sub>o \ a\<^sub>o * \" unfolding other.t_stop_def using decelerate_other by (auto simp add:field_simps) thus ?thesis by auto qed from mult_mono[OF * ** _ `0 \ v\<^sub>o + a\<^sub>o * \`] have "- (v\<^sub>o + a\<^sub>o * \) / a\<^sub>o * (v\<^sub>o + a\<^sub>o * \) \ - v\<^sub>e / a\<^sub>e * v\<^sub>e" using nonneg_vel_ego decelerate_ego by (auto simp add:field_simps) hence "- (v\<^sub>o + a\<^sub>o * \)\<^sup>2 / a\<^sub>o \ - v\<^sub>e\<^sup>2 / a\<^sub>e " by (auto simp add: field_simps power_def) thus ?thesis by (auto simp add:field_simps) next case 2 with assms have "a\<^sub>o \ a\<^sub>e" by auto from assms(2) have "(v\<^sub>o + a\<^sub>o * \) \ v\<^sub>e" unfolding other.s'_def using assms unfolding other.p'_def by auto have vo_star_nneg: "v\<^sub>o + a\<^sub>o * \ \ 0" proof - from assms(1) have "- v\<^sub>o \ a\<^sub>o * \" unfolding other.t_stop_def using decelerate_other by (auto simp add:field_simps) thus ?thesis by auto qed with mult_mono[OF `v\<^sub>o + a\<^sub>o * \ \ v\<^sub>e` `v\<^sub>o + a\<^sub>o * \ \ v\<^sub>e`] have *: "(v\<^sub>o + a\<^sub>o * \)\<^sup>2 \ v\<^sub>e\<^sup>2" using nonneg_vel_ego by (auto simp add:power_def) from `a\<^sub>o \ a\<^sub>e` have "- 1 /a\<^sub>o \ - 1 / a\<^sub>e" using decelerate_ego decelerate_other by (auto simp add:field_simps) from mult_mono[OF * this] have "(v\<^sub>o + a\<^sub>o * \)\<^sup>2 * (- 1 / a\<^sub>o) \ v\<^sub>e\<^sup>2 * (- 1 / a\<^sub>e)" using nonneg_vel_ego decelerate_other by (auto simp add:field_simps) then show ?thesis by auto qed qed theorem distance0_at_most_distance2r: assumes "\ \ other.t_stop" assumes "other.s' \ < v\<^sub>e" assumes "\ (a\<^sub>o > a\<^sub>e \ other.s' \ < v\<^sub>e \ v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ < 0)" shows "distance0 \ safe_distance_2r" proof - from so_star_stop_leq_se_stop[OF assms] have " 0 \ - v\<^sub>e\<^sup>2 / a\<^sub>e / 2 + (v\<^sub>o + a\<^sub>o * \)\<^sup>2 / a\<^sub>o / 2 " (is "0 \ ?term") by auto have "safe_distance_2r = v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o" unfolding safe_distance_2r_def by auto also have "... = v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + (v\<^sub>o + a\<^sub>o * \)\<^sup>2 / 2 / a\<^sub>o - v\<^sub>o * \ - a\<^sub>o * \\<^sup>2 / 2" using decelerate_other by (auto simp add:field_simps power_def) also have "... = v\<^sub>e * \ - v\<^sub>o * \ - a\<^sub>o * \\<^sup>2 / 2 + ?term" (is "_ = ?left + ?term") by (auto simp add:field_simps) finally have "safe_distance_2r = distance0 + ?term" unfolding distance0_def by auto with `0 \ ?term` show "distance0 \ safe_distance_2r" by auto qed theorem dist0_sd2r_1: assumes "\ \ other.t_stop" assumes "\ (a\<^sub>o > a\<^sub>e \ other.s' \ < v\<^sub>e \ v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ < 0)" assumes "s\<^sub>o - s\<^sub>e > safe_distance_2r" shows "s\<^sub>o - s\<^sub>e > distance0" proof (cases "other.s' \ \ v\<^sub>e") assume "v\<^sub>e \ other.s' \" from vo_start_geq_ve[OF assms(1) this] have "u \ < other.s \" by auto thus ?thesis unfolding distance0_def u_def using pos_react assms(1) unfolding ego.q_def other.s_def other.p_def by auto next assume "\ v\<^sub>e \ other.s' \" hence "v\<^sub>e > other.s' \" by auto from distance0_at_most_distance2r[OF assms(1) this assms(2)] have "distance0 \ safe_distance_2r" by auto with assms(3) show ?thesis by auto qed theorem sd2r_eq: assumes "\ > other.t_stop" shows "(u_max < other.s \) = (s\<^sub>o - s\<^sub>e > safe_distance_2r)" proof - from assms have "(u_max < other.s \) = (ego2.s (- v\<^sub>e / a\<^sub>e) < other.p_max)" using u_max_def ego2.t_stop_def u_def other.s_def \_def pos_react ego2.p_max_eq ego2.s_t_stop u_max_eq by auto also have "... = (s\<^sub>e + v\<^sub>e * \ + v\<^sub>e * (- v\<^sub>e / a\<^sub>e) + 1 / 2 * a\<^sub>e * (- v\<^sub>e / a\<^sub>e)\<^sup>2 < s\<^sub>o + v\<^sub>o * (- v\<^sub>o / a\<^sub>o) + 1 / 2 * a\<^sub>o * (- v\<^sub>o / a\<^sub>o)\<^sup>2)" using ego2.s_def ego2.p_def ego.q_def other.p_max_def other.p_def other.t_stop_def ego2.p_max_def ego2.s_t_stop ego2.t_stop_def by auto also have "... = (v\<^sub>e * \ + v\<^sub>e * (- v\<^sub>e / a\<^sub>e) + 1 / 2 * a\<^sub>e * (- v\<^sub>e / a\<^sub>e)\<^sup>2 - v\<^sub>o * (- v\<^sub>o / a\<^sub>o) - 1 / 2 * a\<^sub>o * (- v\<^sub>o / a\<^sub>o)\<^sup>2 < s\<^sub>o - s\<^sub>e)" by linarith also have "... = (v\<^sub>e * \ - v\<^sub>e\<^sup>2 / a\<^sub>e + 1 / 2 * v\<^sub>e\<^sup>2 / a\<^sub>e + v\<^sub>o\<^sup>2 / a\<^sub>o - 1 / 2 * v\<^sub>o\<^sup>2 / a\<^sub>o < s\<^sub>o - s\<^sub>e)" using ego2.p_def ego2.p_max_def ego2.p_max_eq ego2.t_stop_def other.p_def other.p_max_def other.p_max_eq other.t_stop_def by auto also have "... = (v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o < s\<^sub>o - s\<^sub>e)" by linarith thus ?thesis using distance0_2_def by (simp add: calculation safe_distance_2r_def) qed theorem dist0_sd2r_2: assumes "\ > - v\<^sub>o / a\<^sub>o" assumes "s\<^sub>o - s\<^sub>e > safe_distance_2r" shows "s\<^sub>o - s\<^sub>e > distance0_2" proof - have "- v\<^sub>e\<^sup>2 / 2 / a\<^sub>e \ 0" using zero_le_power2 hyps(3) divide_nonneg_neg by (auto simp add:field_simps) hence "v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o \ v\<^sub>e * \ + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o" by simp hence "safe_distance_2r \ distance0_2" using safe_distance_2r_def distance0_2_def by auto thus ?thesis using assms(2) by linarith qed end subsection \Safe Distance Delta\ locale safe_distance_no_collsion_delta = safe_distance_normal + assumes no_collision_delta: "u \ < other.s \" begin sublocale delayed_safe_distance: safe_distance a\<^sub>e v\<^sub>e "ego.q \" a\<^sub>o "other.s' \" "other.s \" proof (unfold_locales) from nonneg_vel_ego show "0 \ v\<^sub>e" by auto next from nonneg_vel_other show "0 \ other.s' \" unfolding other.s'_def other.p'_def other.t_stop_def using decelerate_other by (auto simp add: field_split_simps) next from decelerate_ego show "a\<^sub>e < 0" by auto next from decelerate_other show "a\<^sub>o < 0" by auto next from no_collision_delta show "ego.q \ < other.s \" unfolding u_def using pos_react by auto qed lemma no_collision_react_initially_strict: assumes "s\<^sub>o \ u_max" assumes "u_max < other.s_stop" shows "no_collision_react {0 <..< \}" proof (rule no_collision_reactI) fix t::real assume "t \ {0 <..< \}" show "u t \ other.s t" proof (rule ccontr) assume "\ u t \ other.s t" hence "ego_other2 t = 0" unfolding ego_other2_def by auto from \t \ {0 <..< \}\ have "ego_other2 t = other.s t - ego.q t" unfolding ego_other2_def u_def using ego.init_q by auto have "\ \ other.t_stop \ other.t_stop < \" by auto moreover { assume le_t_stop: "\ \ other.t_stop" with \ego_other2 t = other.s t - ego.q t\ have "ego_other2 t = other.p t - ego.q t" unfolding other.s_def using \t \ {0 <..< \}\ by auto with \ego_other2 t = 0\ have "other.p t - ego.q t = 0" by auto hence eq: "(s\<^sub>o- s\<^sub>e) + (v\<^sub>o - v\<^sub>e) * t + (1/2 * a\<^sub>o) * t\<^sup>2 = 0" unfolding other.p_def ego.q_def by (auto simp: algebra_simps) define p where "p \ \x. (1/2 * a\<^sub>o) * x\<^sup>2 + (v\<^sub>o - v\<^sub>e) * x + (s\<^sub>o - s\<^sub>e)" have "0 < 1/2 * a\<^sub>o" proof (intro p_convex[where p=p and b="v\<^sub>o - v\<^sub>e" and c="s\<^sub>o - s\<^sub>e"]) show "0 < t" using \t \ {0 <..< \}\ by auto next show "t < \" using \t \ {0 <..< \}\ by auto next show "p t < p 0" unfolding p_def using eq in_front by (auto simp: algebra_simps) next from eq have "p t = 0" unfolding p_def by auto also have "... < p \" using no_collision_delta pos_react le_t_stop unfolding p_def u_def other.s_def ego.q_def other.p_def by (auto simp:algebra_simps) finally have "p t < p \" by simp thus "p t \ p \" by auto next show "p = (\x. 1 / 2 * a\<^sub>o * x\<^sup>2 + (v\<^sub>o - v\<^sub>e) * x + (s\<^sub>o - s\<^sub>e))" unfolding p_def by (rule refl) qed hence "0 < a\<^sub>o" by auto with decelerate_other have False by simp } moreover { assume gt_t_stop: "\ > other.t_stop" have t_lt_t_stop: "t < other.t_stop" proof (rule ccontr) assume "\ t < other.t_stop" hence "other.t_stop \ t" by simp from \ego_other2 t = 0\ have "ego.q t = other.p_max" unfolding ego_other2_def u_def other.s_def comp_def \_def other.p_max_def using \t \ {0 <..< \}\ \other.t_stop \ t\ gt_t_stop by (auto split:if_splits) have "ego.q t = u t" unfolding u_def using \t \ {0 <..< \}\ by auto also have "... \ u_max" using u_max by auto also have "... < other.p_max" using assms(2) other.s_t_stop by auto finally have "ego.q t < other.p_max" by auto with \ego.q t = other.p_max\ show False by auto qed with \ego_other2 t = other.s t - ego.q t\ have "ego_other2 t = other.p t - ego.q t" unfolding other.s_def using \t \ {0 <..< \}\ by auto with \ego_other2 t = 0\ have "other.p t - ego.q t = 0" by auto hence eq: "(s\<^sub>o- s\<^sub>e) + (v\<^sub>o - v\<^sub>e) * t + (1/2 * a\<^sub>o) * t\<^sup>2 = 0" unfolding other.p_def ego.q_def by (auto simp: algebra_simps) define p where "p \ \x. (1/2 * a\<^sub>o) * x\<^sup>2 + (v\<^sub>o - v\<^sub>e) * x + (s\<^sub>o - s\<^sub>e)" have "0 < 1/2 * a\<^sub>o" proof (intro p_convex[where p=p and b="v\<^sub>o - v\<^sub>e" and c="s\<^sub>o - s\<^sub>e"]) show "0 < t" using \t \ {0 <..< \}\ by auto next show "t < other.t_stop" using t_lt_t_stop by auto next show "p t < p 0" unfolding p_def using eq in_front by (auto simp: algebra_simps) next from eq have zero: "p t = 0" unfolding p_def by auto have eq: "p other.t_stop = ego_other2 other.t_stop" unfolding ego_other2_def other.s_t_stop u_def ego.q_def other.s_def other.p_def p_def using \\ > other.t_stop\ other.t_stop_nonneg other.t_stop_def by (auto simp: diff_divide_distrib left_diff_distrib') have "u other.t_stop \ u_max" using u_max by auto also have "... < other.s_stop" using assms by auto finally have "0 \ other.s_stop - u other.t_stop" by auto hence "0 \ ego_other2 other.t_stop" unfolding ego_other2_def by auto hence "0 \ p other.t_stop" using eq by auto with zero show "p t \ p other.t_stop" by auto next show "p = (\x. 1 / 2 * a\<^sub>o * x\<^sup>2 + (v\<^sub>o - v\<^sub>e) * x + (s\<^sub>o - s\<^sub>e))" unfolding p_def by (rule refl) qed hence False using decelerate_other by auto } ultimately show False by auto qed qed lemma no_collision_react_initially: assumes "s\<^sub>o \ u_max" assumes "u_max < other.s_stop" shows "no_collision_react {0 .. \}" proof - have "no_collision_react {0 <..< \}" by (rule no_collision_react_initially_strict[OF assms]) have "u 0 \ other.s 0" using init_u other.init_s in_front by auto hence "no_collision_react {0}" unfolding collision_react_def by auto with \no_collision_react {0 <..< \}\ have "no_collision_react ({0} \ {0 <..< \})" using no_collision_union[of "{0}" "{0 <..< \}"] by auto moreover have "{0} \ {0 <..< \} = {0 ..< \}" using pos_react by auto ultimately have "no_collision_react {0 ..< \}" by auto have "u \ \ other.s \" using no_collision_delta by auto hence "no_collision_react {\}" unfolding collision_react_def by auto with \no_collision_react {0 ..< \}\ have "no_collision_react ({\} \ {0 ..< \})" using no_collision_union[of "{\}" "{0 ..< \}"] by auto moreover have "{\} \ {0 ..< \} = {0 .. \}" using pos_react by auto ultimately show "no_collision_react {0 .. \}" by auto qed lemma collision_after_delta: assumes "s\<^sub>o \ u_max" assumes "u_max < other.s_stop" shows "collision_react {0 ..} \ collision_react {\..}" proof assume "collision_react {0 ..}" have "no_collision_react {0 .. \}" by (rule no_collision_react_initially[OF assms]) with \collision_react {0..}\ have "collision_react ({0..} - {0 .. \})" using pos_react by (auto intro: collision_trim_subset) moreover have "{0..} - {0 .. \} = {\ <..}" using pos_react by auto ultimately have "collision_react {\ <..}" by auto thus "collision_react {\ ..}" by (auto intro:collision_react_subset) next assume "collision_react {\..}" moreover have "{\..} \ {0 ..}" using pos_react by auto ultimately show "collision_react {0 ..}" by (rule collision_react_subset) qed lemma collision_react_strict: assumes "s\<^sub>o \ u_max" assumes "u_max < other.s_stop" shows "collision_react {\ ..} \ collision_react {\ <..}" proof assume asm: "collision_react {\ ..}" have "no_collision_react {\}" using no_collision_delta unfolding collision_react_def by auto moreover have "{\ <..} \ {\ ..}" by auto ultimately have "collision_react ({\ ..} - {\})" using asm collision_trim_subset by simp moreover have "{\ <..} = {\ ..} - {\}" by auto ultimately show "collision_react {\ <..}" by auto next assume "collision_react {\ <..}" thus "collision_react {\ ..}" using collision_react_subset[where t="{\ ..}" and s="{\ <..}"] by fastforce qed lemma delayed_other_s_stop_eq: "delayed_safe_distance.other.s_stop = other.s_stop" proof (unfold other.s_t_stop; unfold delayed_safe_distance.other.s_t_stop; unfold movement.p_max_eq) have "\ \ other.t_stop \ other.t_stop < \" by auto moreover { assume "\ \ other.t_stop" hence "other.s \ - (other.s' \)\<^sup>2 / a\<^sub>o / 2 = s\<^sub>o - v\<^sub>o\<^sup>2 / a\<^sub>o / 2" unfolding other.s_def other.s'_def using pos_react decelerate_other by (auto simp add: other.p_def other.p'_def power2_eq_square field_split_simps) } moreover { assume "other.t_stop < \" hence "other.s \ - (other.s' \)\<^sup>2 / a\<^sub>o / 2 = s\<^sub>o - v\<^sub>o\<^sup>2 / a\<^sub>o / 2" unfolding other.s_def other.s'_def other.p_max_eq using pos_react decelerate_other by (auto) } ultimately show "other.s \ - (other.s' \)\<^sup>2 / a\<^sub>o / 2 = s\<^sub>o - v\<^sub>o\<^sup>2 / a\<^sub>o / 2" by auto qed lemma delayed_cond3': assumes "other.s \ \ u_max" assumes "u_max < other.s_stop" shows "delayed_safe_distance.collision {0 ..} \ (a\<^sub>o > a\<^sub>e \ other.s' \ < v\<^sub>e \ other.s \ - ego.q \ \ delayed_safe_distance.snd_safe_distance \ v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ < 0)" proof (rule delayed_safe_distance.cond_3') have "other.s \ \ u_max" using \other.s \ \ u_max\ . also have "... = ego2.s_stop" unfolding u_max_eq ego2.s_t_stop ego2.p_max_eq by (rule refl) finally show "other.s \ \ ego2.s_stop" by auto next have "ego2.s_stop = u_max" unfolding ego2.s_t_stop ego2.p_max_eq u_max_eq by (rule refl) also have "... < other.s_stop" using assms by auto also have "... \ delayed_safe_distance.other.s_stop" using delayed_other_s_stop_eq by auto finally show "ego2.s_stop < delayed_safe_distance.other.s_stop" by auto qed lemma delayed_other_t_stop_eq: assumes "\ \ other.t_stop" shows "delayed_safe_distance.other.t_stop + \ = other.t_stop" using assms decelerate_other unfolding delayed_safe_distance.other.t_stop_def other.t_stop_def other.s'_def movement.t_stop_def other.p'_def by (auto simp add: field_split_simps) lemma delayed_other_s_eq: assumes "0 \ t" shows "delayed_safe_distance.other.s t = other.s (t + \)" proof (cases "\ \ other.t_stop") assume 1: "\ \ other.t_stop" have "t + \ \ other.t_stop \ other.t_stop < t + \" by auto moreover { assume "t + \ \ other.t_stop" hence "delayed_safe_distance.other.s t = delayed_safe_distance.other.p t" using delayed_other_t_stop_eq [OF 1] assms unfolding delayed_safe_distance.other.s_def by auto also have "... = other.p (t + \)" unfolding movement.p_def other.s_def other.s'_def other.p'_def using pos_react 1 by (auto simp add: power2_eq_square field_split_simps) also have "... = other.s (t + \)" unfolding other.s_def using assms pos_react \t + \ \ other.t_stop\ by auto finally have "delayed_safe_distance.other.s t = other.s (t + \)" by auto } moreover { assume "other.t_stop < t + \" hence "delayed_safe_distance.other.s t = delayed_safe_distance.other.p_max" using delayed_other_t_stop_eq [OF 1] assms delayed_safe_distance.other.t_stop_nonneg unfolding delayed_safe_distance.other.s_def by auto also have "... = other.p_max" unfolding movement.p_max_eq other.s_def other.s'_def other.p_def other.p'_def using pos_react 1 decelerate_other by (auto simp add: power2_eq_square field_split_simps) also have "... = other.s (t + \)" unfolding other.s_def using assms pos_react \other.t_stop < t + \\ by auto finally have "delayed_safe_distance.other.s t = other.s (t + \)" by auto } ultimately show ?thesis by auto next assume "\ \ \ other.t_stop" hence "other.t_stop < \" by auto hence "other.s' \ = 0" and "other.s \ = other.p_max" unfolding other.s'_def other.s_def using pos_react by auto hence "delayed_safe_distance.other.s t = delayed_safe_distance.other.p_max" unfolding delayed_safe_distance.other.s_def using assms decelerate_other by (auto simp add:movement.p_max_eq movement.p_def movement.t_stop_def) also have "... = other.p_max" unfolding movement.p_max_eq using \other.s' \ = 0\ \other.s \ = other.p_max\ using other.p_max_eq by auto also have "... = other.s (t + \)" unfolding other.s_def using pos_react assms \other.t_stop < \\ by auto finally show "delayed_safe_distance.other.s t = other.s (t + \)" by auto qed lemma translate_collision_range: assumes "s\<^sub>o \ u_max" assumes "u_max < other.s_stop" shows "delayed_safe_distance.collision {0 ..} \ collision_react {\ ..}" proof assume "delayed_safe_distance.collision {0 ..}" then obtain t where eq: "ego2.s t = delayed_safe_distance.other.s t" and "0 \ t" unfolding delayed_safe_distance.collision_def by auto have "ego2.s t = (ego2.s \ \) (t + \)" unfolding comp_def \_def by auto also have "... = u (t + \)" unfolding u_def using \0 \ t\ pos_react by (auto simp: \_def ego2.init_s) finally have left:"ego2.s t = u (t + \)" by auto have right: "delayed_safe_distance.other.s t = other.s (t + \)" using delayed_other_s_eq pos_react \0 \ t\ by auto with eq and left have "u (t + \) = other.s (t + \)" by auto moreover have "\ \ t + \" using \0 \ t\ by auto ultimately show "collision_react {\ ..}" unfolding collision_react_def by auto next assume "collision_react {\ ..}" hence "collision_react {\ <..}" using collision_react_strict[OF assms] by simp then obtain t where eq: "u t = other.s t" and "\ < t" unfolding collision_react_def by auto moreover hence "u t = (ego2.s \ \) t" unfolding u_def using pos_react by auto moreover have "other.s t = delayed_safe_distance.other.s (t - \)" using delayed_other_s_eq \\ < t\ by auto ultimately have "ego2.s (t - \) = delayed_safe_distance.other.s (t - \)" unfolding comp_def \_def by auto with \\ < t\ show "delayed_safe_distance.collision {0 ..}" unfolding delayed_safe_distance.collision_def by auto qed theorem cond_3r_2: assumes "s\<^sub>o \ u_max" assumes "u_max < other.s_stop" assumes "other.s \ \ u_max" shows "collision_react {0 ..} \ (a\<^sub>o > a\<^sub>e \ other.s' \ < v\<^sub>e \ other.s \ - ego.q \ \ delayed_safe_distance.snd_safe_distance \ v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ < 0)" proof - have "collision_react {0 ..} \ collision_react {\ ..}" by (rule collision_after_delta[OF assms(1) assms(2)]) also have "... \ delayed_safe_distance.collision {0 ..}" by (simp add: translate_collision_range[OF assms(1) assms(2)]) also have "... \ (a\<^sub>o > a\<^sub>e \ other.s' \ < v\<^sub>e \ other.s \ - ego.q \ \ delayed_safe_distance.snd_safe_distance \ v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ < 0)" by (rule delayed_cond3'[OF assms(3) assms(2)]) finally show "collision_react {0 ..} \ (a\<^sub>o > a\<^sub>e \ other.s' \ < v\<^sub>e \ other.s \ - ego.q \ \ delayed_safe_distance.snd_safe_distance \ v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ < 0)" by auto qed lemma sd_2r_correct_for_3r_2: assumes "s\<^sub>o - s\<^sub>e > safe_distance_2r" assumes "other.s \ \ u_max" assumes "\ (a\<^sub>o > a\<^sub>e \ other.s' \ < v\<^sub>e \ v\<^sub>e - a\<^sub>e / a\<^sub>o * other.s' \ < 0)" shows "no_collision_react {0..}" proof - from assms have "s\<^sub>o - s\<^sub>e > v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o" unfolding safe_distance_2r_def by auto hence "s\<^sub>o - v\<^sub>o\<^sup>2 / 2 / a\<^sub>o > s\<^sub>e + v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e" by auto hence "s\<^sub>o - v\<^sub>o\<^sup>2 / a\<^sub>o + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o > s\<^sub>e + v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e" by auto hence "s\<^sub>o + v\<^sub>o * (- v\<^sub>o / a\<^sub>o) + 1/2 * a\<^sub>o * (-v\<^sub>o / a\<^sub>o)\<^sup>2 > s\<^sub>e + v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e" using other.p_def other.p_max_def other.p_max_eq other.t_stop_def by auto hence "other.s_stop > u_max" unfolding other.s_def using u_max_eq other.t_stop_def using ego.q_def other.p_def other.p_max_def other.s_def other.s_t_stop by auto thus ?thesis using assms(2) assms(3) collision_after_delta cond_1r delayed_cond3' translate_collision_range by linarith qed lemma sd2_at_most_sd4: assumes "a\<^sub>o > a\<^sub>e" shows "safe_distance_2r \ safe_distance_4r" proof - have "a\<^sub>o \ 0" and "a\<^sub>e \ 0" and "a\<^sub>o - a\<^sub>e \ 0" and "0 < 2 * (a\<^sub>o - a\<^sub>e)" using hyps assms(1) by auto have "0 \ (- v\<^sub>e * a\<^sub>o + v\<^sub>o * a\<^sub>e + a\<^sub>o * a\<^sub>e * \) * (- v\<^sub>e * a\<^sub>o + v\<^sub>o * a\<^sub>e + a\<^sub>o * a\<^sub>e * \)" (is "0 \ (?l1 + ?l2 + ?l3) * ?r") by auto also have "... = v\<^sub>e\<^sup>2 * a\<^sub>o\<^sup>2 + v\<^sub>o\<^sup>2 * a\<^sub>e\<^sup>2 + a\<^sub>o\<^sup>2 * a\<^sub>e\<^sup>2 * \\<^sup>2 - 2 * v\<^sub>e * a\<^sub>o * v\<^sub>o * a\<^sub>e - 2 * a\<^sub>o\<^sup>2 * a\<^sub>e * \ * v\<^sub>e + 2 * a\<^sub>o * a\<^sub>e\<^sup>2 * \ * v\<^sub>o" (is "?lhs = ?rhs") by (auto simp add:algebra_simps power_def) finally have "0 \ ?rhs" by auto hence "(- v\<^sub>e\<^sup>2 * a\<^sub>o / a\<^sub>e - v\<^sub>o\<^sup>2 * a\<^sub>e / a\<^sub>o) * (a\<^sub>o * a\<^sub>e) \ (a\<^sub>o * a\<^sub>e * \\<^sup>2 - 2 * v\<^sub>e * v\<^sub>o - 2 * a\<^sub>o * \ * v\<^sub>e + 2 * a\<^sub>e * \ * v\<^sub>o) * (a\<^sub>o * a\<^sub>e)" by (auto simp add: algebra_simps power_def) hence "2 * v\<^sub>e * \ * (a\<^sub>o - a\<^sub>e) - v\<^sub>e\<^sup>2 * a\<^sub>o / a\<^sub>e + v\<^sub>e\<^sup>2 + v\<^sub>o\<^sup>2 - v\<^sub>o\<^sup>2 * a\<^sub>e / a\<^sub>o \ v\<^sub>o\<^sup>2 + a\<^sub>o\<^sup>2 * \\<^sup>2 + v\<^sub>e\<^sup>2 + 2 * v\<^sub>o * \ * a\<^sub>o - 2 * v\<^sub>e * v\<^sub>o - 2 * a\<^sub>o * \ * v\<^sub>e - 2 * v\<^sub>o * \ * a\<^sub>o + 2 * a\<^sub>e * \ * v\<^sub>o - a\<^sub>o\<^sup>2 * \\<^sup>2 + a\<^sub>o * a\<^sub>e * \\<^sup>2 + 2 * v\<^sub>e * \ * (a\<^sub>o - a\<^sub>e)" by (auto simp add: ego2.decel other.decel) hence "2 * v\<^sub>e * \ * (a\<^sub>o - a\<^sub>e) - v\<^sub>e\<^sup>2 * a\<^sub>o / a\<^sub>e + v\<^sub>e\<^sup>2 + v\<^sub>o\<^sup>2 - v\<^sub>o\<^sup>2 * a\<^sub>e / a\<^sub>o \ (v\<^sub>o + \ * a\<^sub>o - v\<^sub>e)\<^sup>2 - 2 * v\<^sub>o * \ * a\<^sub>o + 2 * a\<^sub>e * \ * v\<^sub>o - a\<^sub>o\<^sup>2 * \\<^sup>2 + a\<^sub>o * a\<^sub>e * \\<^sup>2 + 2 * v\<^sub>e * \ * (a\<^sub>o - a\<^sub>e)" by (auto simp add: algebra_simps power_def) hence "v\<^sub>e * \ * 2 * (a\<^sub>o - a\<^sub>e) - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e * 2 * a\<^sub>o + v\<^sub>e\<^sup>2 / 2 / a\<^sub>e * 2 * a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o * 2 * a\<^sub>o - v\<^sub>o\<^sup>2 / 2 / a\<^sub>o * 2 * a\<^sub>e \ (v\<^sub>o + \ * a\<^sub>o - v\<^sub>e)\<^sup>2 - v\<^sub>o * \ * 2 * a\<^sub>o - v\<^sub>o * \ * 2 * -a\<^sub>e - a\<^sub>o * \\<^sup>2 / 2 * 2 * a\<^sub>o - a\<^sub>o * \\<^sup>2 / 2 * 2 * -a\<^sub>e + v\<^sub>e * \ * 2 * (a\<^sub>o - a\<^sub>e)" (is "?lhs1 \ ?rhs1") by (simp add: \a\<^sub>o \ 0\ \a\<^sub>e \ 0\ power2_eq_square algebra_simps) hence "v\<^sub>e * \ * 2 * (a\<^sub>o - a\<^sub>e) - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e * 2 * (a\<^sub>o - a\<^sub>e) + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o * 2 * (a\<^sub>o - a\<^sub>e) \ (v\<^sub>o + a\<^sub>o * \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e) * 2 * (a\<^sub>o - a\<^sub>e) - v\<^sub>o * \ * 2 * (a\<^sub>o - a\<^sub>e) - a\<^sub>o * \\<^sup>2 / 2 * 2 * (a\<^sub>o - a\<^sub>e) + v\<^sub>e * \ * 2 *(a\<^sub>o - a\<^sub>e)" (is "?lhs2 \ ?rhs2") proof - assume "?lhs1 \ ?rhs1" have "?lhs1 = ?lhs2" by (auto simp add:field_simps) moreover have "?rhs1 = ?rhs2" using `a\<^sub>o - a\<^sub>e \ 0` by (auto simp add:field_simps) ultimately show ?thesis using `?lhs1 \ ?rhs1` by auto qed hence "(v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o) * 2 * (a\<^sub>o - a\<^sub>e) \ ((v\<^sub>o + a\<^sub>o * \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e) - v\<^sub>o * \ - 1/2 * a\<^sub>o * \\<^sup>2 + v\<^sub>e * \) * 2 *(a\<^sub>o - a\<^sub>e)" by (simp add: algebra_simps) hence "v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o \ (v\<^sub>o + a\<^sub>o * \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e) - v\<^sub>o * \ - 1/2 * a\<^sub>o * \\<^sup>2 + v\<^sub>e * \" - using \a\<^sub>o > a\<^sub>e\ real_mult_le_cancel_iff1[OF `0 < 2 * (a\<^sub>o - a\<^sub>e)`, of "(v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o)" + using \a\<^sub>o > a\<^sub>e\ mult_le_cancel_iff1[OF `0 < 2 * (a\<^sub>o - a\<^sub>e)`, of "(v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o)" "(v\<^sub>o + a\<^sub>o * \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e) - v\<^sub>o * \ - 1/2 * a\<^sub>o * \\<^sup>2 + v\<^sub>e * \"] semiring_normalization_rules(18) by (metis (no_types, lifting)) thus ?thesis using safe_distance_2r_def safe_distance_4r_def by auto qed lemma sd_4r_correct: assumes "s\<^sub>o - s\<^sub>e > safe_distance_4r" assumes "other.s \ \ u_max" assumes "\ \ other.t_stop" assumes "a\<^sub>o > a\<^sub>e" shows "no_collision_react {0..}" proof - from assms have "s\<^sub>o - s\<^sub>e > (v\<^sub>o + a\<^sub>o * \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e) - v\<^sub>o * \ - 1/2 * a\<^sub>o * \\<^sup>2 + v\<^sub>e * \" unfolding safe_distance_4r_def by auto hence "s\<^sub>o + v\<^sub>o * \ + 1/2 * a\<^sub>o * \\<^sup>2 - s\<^sub>e - v\<^sub>e * \ > (v\<^sub>o + a\<^sub>o * \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e)" by linarith hence "other.s \ - ego.q \ > (other.s' \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e)" using assms(3) ego.q_def other.p_def other.s_def other.p'_def other.s'_def pos_react by auto hence "other.s \ - ego.q \ > delayed_safe_distance.snd_safe_distance" by (simp add: delayed_safe_distance.snd_safe_distance_def) hence c: "\ (other.s \ - ego.q \ \ delayed_safe_distance.snd_safe_distance)" by linarith have "u_max < other.s_stop" unfolding u_max_eq other.s_t_stop other.p_max_eq ego.q_def using assms(1) sd2_at_most_sd4[OF assms(4)] unfolding safe_distance_4r_def safe_distance_2r_def by auto consider "s\<^sub>o \ u_max" | "s\<^sub>o > u_max" by linarith thus ?thesis proof (cases) case 1 from cond_3r_2[OF this `u_max < other.s_stop` assms(2)] show ?thesis using c by auto next case 2 then show ?thesis using cond_1r by auto qed qed text \Irrelevant, this Safe Distance is unreachable in the checker.\ definition safe_distance_5r :: real where "safe_distance_5r = v\<^sub>e\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e) + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o + v\<^sub>e * \" lemma sd_5r_correct: assumes "s\<^sub>o - s\<^sub>e > safe_distance_5r" assumes "u_max < other.s_stop" assumes "other.s \ \ u_max" assumes "\ > other.t_stop" shows "no_collision_react {0..}" proof - from assms have "s\<^sub>o - s\<^sub>e > v\<^sub>e\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e) + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o + v\<^sub>e * \" unfolding safe_distance_5r_def by auto hence "s\<^sub>o + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o - s\<^sub>e - v\<^sub>e * \ > (0 - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e)" using assms(2-4) unfolding other.s_def other.s_t_stop apply (auto simp: movement.p_t_stop split: if_splits) using cond_1r cond_2r other.s_t_stop by linarith+ hence "other.s \ - ego.q \ > (other.s' \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e)" using assms(2) assms(3) assms(4) other.s_def other.s_t_stop by auto hence "other.s \ - ego.q \ > delayed_safe_distance.snd_safe_distance" by (simp add: delayed_safe_distance.snd_safe_distance_def) hence "\ (other.s \ - ego.q \ \ delayed_safe_distance.snd_safe_distance)" by linarith thus ?thesis using assms(2) assms(3) cond_1r cond_3r_2 by linarith qed lemma translate_no_collision_range: "delayed_safe_distance.no_collision {0 ..} \ no_collision_react {\ ..}" proof assume left: "delayed_safe_distance.no_collision {0 ..}" show "no_collision_react {\ ..}" proof (unfold collision_react_def; simp; rule ballI) fix t::real assume "t \ {\ ..}" hence "\ \ t" by simp with pos_react have "0 \ t - \" by simp with left have ineq: "ego2.s (t - \) \ delayed_safe_distance.other.s (t - \)" unfolding delayed_safe_distance.collision_def by auto have "ego2.s (t - \) = (ego2.s \ \) t" unfolding comp_def \_def by auto also have "... = u t" unfolding u_def using \\ \ t\ pos_react by (auto simp: \_def ego2.init_s) finally have "ego2.s (t - \) = u t" by auto moreover have "delayed_safe_distance.other.s (t - \) = other.s t" using delayed_other_s_eq pos_react \\ \ t\ by auto ultimately show "u t \ other.s t" using ineq by auto qed next assume right: "no_collision_react {\ ..}" show "delayed_safe_distance.no_collision {0 ..}" proof (unfold delayed_safe_distance.collision_def; simp; rule ballI) fix t ::real assume "t \ {0 ..}" hence "0 \ t" by auto hence "\ \ t + \" by auto with right have ineq: "u (t + \) \ other.s (t + \)" unfolding collision_react_def by auto have "u (t + \) = ego2.s t" unfolding u_def comp_def \_def using \0 \ t\ pos_react \\ \ t+ \\ by (auto simp add:ego2.init_s) moreover have "other.s (t + \) = delayed_safe_distance.other.s t" using delayed_other_s_eq[of t] using \0 \ t\ by auto ultimately show "ego2.s t \ delayed_safe_distance.other.s t" using ineq by auto qed qed lemma delayed_cond1: assumes "other.s \ > u_max" shows "delayed_safe_distance.no_collision {0 ..}" proof - have "ego2.s_stop = u_max" unfolding ego2.s_t_stop ego2.p_max_eq u_max_eq by auto also have "... < other.s \" using assms by simp finally have "ego2.s_stop < other.s \" by auto thus "delayed_safe_distance.no_collision {0 ..}" by (simp add: delayed_safe_distance.cond_1) qed theorem cond_3r_3: assumes "s\<^sub>o \ u_max" assumes "u_max < other.s_stop" assumes "other.s \ > u_max" shows "no_collision_react {0 ..}" proof - have eq: "{0 ..} = {0 .. \} \ {\ ..}" using pos_react by auto show ?thesis unfolding eq proof (intro no_collision_union) show "no_collision_react {0 .. \}" by (rule no_collision_react_initially[OF assms(1) assms(2)]) next have "delayed_safe_distance.no_collision {0 ..}" by (rule delayed_cond1[OF assms(3)]) with translate_no_collision_range show "no_collision_react {\ ..}" by auto qed qed lemma sd_2r_correct_for_3r_3: assumes "s\<^sub>o - s\<^sub>e > safe_distance_2r" assumes "other.s \ > u_max" shows "no_collision_react {0..}" proof - from assms have "s\<^sub>o - s\<^sub>e > v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o" unfolding safe_distance_2r_def by auto hence "s\<^sub>o - v\<^sub>o\<^sup>2 / 2 / a\<^sub>o > s\<^sub>e + v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e" by auto hence "s\<^sub>o - v\<^sub>o\<^sup>2 / a\<^sub>o + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o > s\<^sub>e + v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e" by auto hence "s\<^sub>o + v\<^sub>o * (- v\<^sub>o / a\<^sub>o) + 1/2 * a\<^sub>o * (-v\<^sub>o / a\<^sub>o)\<^sup>2 > s\<^sub>e + v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e" using other.p_def other.p_max_def other.p_max_eq other.t_stop_def by auto hence "other.s_stop > u_max" unfolding other.s_def using u_max_eq other.t_stop_def using ego.q_def other.p_def other.p_max_def other.s_def other.s_t_stop by auto thus ?thesis using assms(2) cond_1r cond_3r_3 by linarith qed lemma sd_3r_correct: assumes "s\<^sub>o - s\<^sub>e > safe_distance_3r" assumes "\ \ other.t_stop" shows "no_collision_react {0 ..}" proof - from assms have "s\<^sub>o - s\<^sub>e > v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e - v\<^sub>o * \ - 1/2 * a\<^sub>o * \\<^sup>2" unfolding safe_distance_3r_def by auto hence "s\<^sub>o + v\<^sub>o * \ + 1/2 * a\<^sub>o * \\<^sup>2 > s\<^sub>e + v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e" by auto hence "other.s \ > u_max" using other.s_def u_max_eq assms(2) ego.q_def other.p_def pos_react by auto thus ?thesis using cond_1r cond_3r_3 delayed_other_s_stop_eq delayed_safe_distance.other.s0_le_s_stop by linarith qed lemma sd_2_at_least_sd_3: assumes "\ \ other.t_stop" shows "safe_distance_3r \ safe_distance_2r" proof - from assms have "\ = other.t_stop \ \ < other.t_stop" by auto then have "safe_distance_3r = safe_distance_2r \ safe_distance_3r > safe_distance_2r" proof (rule Meson.disj_forward) assume "\ = other.t_stop" hence "\ = - v\<^sub>o / a\<^sub>o" unfolding other.t_stop_def by auto hence "- v\<^sub>o * \ - 1/2 * a\<^sub>o * \\<^sup>2 = - v\<^sub>o * other.t_stop - 1/2 * a\<^sub>o * other.t_stop\<^sup>2" by (simp add: movement.t_stop_def) thus "safe_distance_3r = safe_distance_2r" using other.p_def other.p_max_def other.p_max_eq safe_distance_2r_def safe_distance_3r_def by auto next assume "\ < other.t_stop" hence "\ < - v\<^sub>o / a\<^sub>o" unfolding other.t_stop_def by auto hence "0 < v\<^sub>o + a\<^sub>o * \" using other.decel other.p'_def other.p'_pos_iff by auto hence "0 < v\<^sub>o + 1/2 * a\<^sub>o * (\ + other.t_stop)" by (auto simp add:field_simps other.t_stop_def) hence "0 > v\<^sub>o * (\ - other.t_stop) + 1/2 * a\<^sub>o * (\ + other.t_stop) * (\ - other.t_stop)" using `\ < other.t_stop` mult_less_cancel_left_neg[where c="\ - other.t_stop" and a ="v\<^sub>o + 1 / 2 * a\<^sub>o * (\ + other.t_stop)" and b="0"] by (auto simp add: field_simps) hence " (\ + other.t_stop) * (\ - other.t_stop) = (\\<^sup>2 - other.t_stop\<^sup>2)" by (simp add: power2_eq_square square_diff_square_factored) hence "0 > v\<^sub>o * (\ - other.t_stop) + 1/2 * a\<^sub>o * (\\<^sup>2 - other.t_stop\<^sup>2)" by (metis (no_types, hide_lams) \v\<^sub>o * (\ - other.t_stop) + 1 / 2 * a\<^sub>o * (\ + other.t_stop) * (\ - other.t_stop) < 0\ divide_divide_eq_left divide_divide_eq_right times_divide_eq_left) hence "0 > v\<^sub>o * \ - v\<^sub>o * other.t_stop + 1/2 * a\<^sub>o * \\<^sup>2 - 1/2 * a\<^sub>o * other.t_stop\<^sup>2 " by (simp add: right_diff_distrib) hence "- v\<^sub>o * \ - 1/2 * a\<^sub>o * \\<^sup>2 > - v\<^sub>o * (- v\<^sub>o / a\<^sub>o) - 1/2 * a\<^sub>o * (- v\<^sub>o / a\<^sub>o)\<^sup>2" unfolding movement.t_stop_def by argo thus "safe_distance_3r > safe_distance_2r" using other.p_def other.p_max_def other.p_max_eq other.t_stop_def safe_distance_2r_def safe_distance_3r_def by auto qed thus ?thesis by auto qed end subsection \Checker Design\ text \ We define two checkers for different cases: \<^item> one checker for the case that \\ \ other.t_stop (other.t_stop = - v\<^sub>o / a\<^sub>o)\ \<^item> a second checker for the case that \\ > other.t_stop\ \ definition check_precond_r1 :: "real \ real \ real \ real \ real \ real \ real \ bool" where "check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ s\<^sub>o > s\<^sub>e \ 0 \ v\<^sub>e \ 0 \ v\<^sub>o \ a\<^sub>e < 0 \ a\<^sub>o < 0 \ 0 < \ \ \ \ - v\<^sub>o / a\<^sub>o" definition safe_distance0 :: "real \ real \ real \ real \ real" where "safe_distance0 v\<^sub>e a\<^sub>o v\<^sub>o \ = v\<^sub>e * \ - v\<^sub>o * \ - a\<^sub>o * \\<^sup>2 / 2" definition safe_distance_1r :: "real \ real \ real \ real" where "safe_distance_1r a\<^sub>e v\<^sub>e \ = v\<^sub>e * \ - v\<^sub>e\<^sup>2 / a\<^sub>e / 2" definition safe_distance_2r :: "real \ real \ real \ real \ real \ real" where "safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e + v\<^sub>o\<^sup>2 / 2 / a\<^sub>o" definition safe_distance_4r :: "real \ real \ real \ real \ real \ real" where "safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = (v\<^sub>o + a\<^sub>o * \ - v\<^sub>e)\<^sup>2 / 2 / (a\<^sub>o - a\<^sub>e) - v\<^sub>o * \ - 1 / 2 * a\<^sub>o * \\<^sup>2 + v\<^sub>e * \" definition safe_distance_3r :: "real \ real \ real \ real \ real \ real" where "safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = v\<^sub>e * \ - v\<^sub>e\<^sup>2 / 2 / a\<^sub>e - v\<^sub>o * \ - 1 / 2 * a\<^sub>o * \\<^sup>2" definition checker_r1 :: "real \ real \ real \ real \ real \ real \ real \ bool" where "checker_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ let distance = s\<^sub>o - s\<^sub>e; precond = check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \; vo_star = v\<^sub>o + a\<^sub>o * \; t_stop_o_star = - vo_star / a\<^sub>o; t_stop_e = - v\<^sub>e / a\<^sub>e; safe_dist0 = safe_distance_1r a\<^sub>e v\<^sub>e \; safe_dist1 = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \; safe_dist2 = safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \; safe_dist3 = safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ in if \ precond then False else if distance > safe_dist0 \ distance > safe_dist3 then True else if (a\<^sub>o > a\<^sub>e \ vo_star < v\<^sub>e \ t_stop_e < t_stop_o_star) then distance > safe_dist2 else distance > safe_dist1" theorem checker_r1_correctness: "(checker_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..})" proof assume asm: "checker_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" have pre: "check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" proof (rule ccontr) assume "\ check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" with asm show "False" unfolding checker_r1_def Let_def by auto qed from pre have sdn': "safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \" by (unfold_locales) (auto simp add: check_precond_r1_def) interpret sdn: safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ rewrites "sdn.distance0 = safe_distance0 v\<^sub>e a\<^sub>o v\<^sub>o \" and "sdn.safe_distance_1r = safe_distance_1r a\<^sub>e v\<^sub>e \" and "sdn.safe_distance_2r = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" and "sdn.safe_distance_4r = safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" and "sdn.safe_distance_3r = safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" proof - from sdn' show "safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \" by auto next show "safe_distance_normal.distance0 v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance0 v\<^sub>e a\<^sub>o v\<^sub>o \ " unfolding safe_distance_normal.distance0_def[OF sdn'] safe_distance0_def by auto next show "safe_distance_normal.safe_distance_1r a\<^sub>e v\<^sub>e \ = safe_distance_1r a\<^sub>e v\<^sub>e \" unfolding safe_distance_normal.safe_distance_1r_def[OF sdn'] safe_distance_1r_def by auto next show "safe_distance_normal.safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" unfolding safe_distance_normal.safe_distance_2r_def[OF sdn'] safe_distance_2r_def by auto next show "safe_distance_normal.safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ " unfolding safe_distance_normal.safe_distance_4r_def[OF sdn'] safe_distance_4r_def by auto next show "safe_distance_normal.safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" unfolding safe_distance_normal.safe_distance_3r_def[OF sdn'] safe_distance_3r_def by auto qed have "0 < \" and "\ \ - v\<^sub>o / a\<^sub>o" using pre unfolding check_precond_r1_def by auto define so_delta where "so_delta = s\<^sub>o + v\<^sub>o * \ + a\<^sub>o * \\<^sup>2 / 2" define q_e_delta where "q_e_delta \ s\<^sub>e + v\<^sub>e * \" define u_stop_e where "u_stop_e \ q_e_delta - v\<^sub>e\<^sup>2 / (2 * a\<^sub>e)" define vo_star where "vo_star = v\<^sub>o + a\<^sub>o * \" define t_stop_o_star where "t_stop_o_star \ - vo_star / a\<^sub>o" define t_stop_e where "t_stop_e = - v\<^sub>e / a\<^sub>e" define distance where "distance \ s\<^sub>o - s\<^sub>e" define distance0 where "distance0 = safe_distance0 v\<^sub>e a\<^sub>o v\<^sub>o \" define safe_dist0 where "safe_dist0 = safe_distance_1r a\<^sub>e v\<^sub>e \" define safe_dist2 where "safe_dist2 \ safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" define safe_dist1 where "safe_dist1 \ safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" define safe_dist3 where "safe_dist3 = safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" note abb = so_delta_def q_e_delta_def u_stop_e_def vo_star_def t_stop_o_star_def t_stop_e_def distance_def safe_dist2_def safe_dist1_def safe_dist0_def safe_dist3_def distance0_def consider "distance > safe_dist0" | "distance > safe_dist3" | "distance \ safe_dist0 \ distance \ safe_dist3" by linarith hence "sdn.no_collision_react {0..}" proof (cases) case 1 then show ?thesis using sdn.sd_1r_correct unfolding abb by auto next case 2 hence pre2: "distance > distance0" using sdn.distance0_at_most_sd3r unfolding abb by auto hence "sdn.u \ < sdn.other.s \" using pre unfolding sdn.u_def sdn.ego.q_def sdn.other.s_def sdn.other.t_stop_def sdn.other.p_def abb check_precond_r1_def sdn.distance0_def by auto from pre interpret sdr: safe_distance_no_collsion_delta a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ by (unfold_locales) (auto simp add:check_precond_r1_def `sdn.u \ < sdn.other.s \`) show ?thesis using sdr.sd_3r_correct 2 pre unfolding check_precond_r1_def abb sdn.other.t_stop_def by auto next case 3 hence "distance \ safe_dist3" by auto hence "sdn.other.s \ \ sdn.u_max" using pre unfolding check_precond_r1_def sdn.other.s_def sdn.other.t_stop_def sdn.other.p_def sdn.u_max_eq sdn.ego.q_def abb sdn.safe_distance_3r_def by auto have " (a\<^sub>o > a\<^sub>e \ vo_star < v\<^sub>e \ t_stop_e < t_stop_o_star) \ \ (a\<^sub>o > a\<^sub>e \ vo_star < v\<^sub>e \ t_stop_e < t_stop_o_star) " by auto moreover { assume cond: "(a\<^sub>o > a\<^sub>e \ vo_star < v\<^sub>e \ t_stop_e < t_stop_o_star)" with 3 pre have "distance > safe_dist2" using asm unfolding checker_r1_def Let_def abb by auto with sdn.distance0_at_most_sd4r have "distance > distance0" unfolding abb using cond by auto hence "sdn.u \ < sdn.other.s \" using pre unfolding sdn.u_def sdn.ego.q_def sdn.other.s_def sdn.other.t_stop_def sdn.other.p_def abb check_precond_r1_def sdn.distance0_def by auto from pre interpret sdr: safe_distance_no_collsion_delta a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ by (unfold_locales) (auto simp add:check_precond_r1_def `sdn.u \ < sdn.other.s \`) from sdr.sd_4r_correct[OF _ `sdn.other.s \ \ sdn.u_max`] `distance > safe_dist2` have ?thesis using pre cond unfolding check_precond_r1_def sdn.other.t_stop_def abb by auto } moreover { assume not_cond: "\ (a\<^sub>o > a\<^sub>e \ vo_star < v\<^sub>e \ t_stop_e < t_stop_o_star)" with 3 pre have "distance > safe_dist1" using asm unfolding checker_r1_def Let_def abb by auto with sdn.dist0_sd2r_1 have "distance > distance0" using pre not_cond unfolding check_precond_r1_def sdn.other.t_stop_def sdn.other.s'_def sdn.other.p'_def abb by (auto simp add:field_simps) hence "sdn.u \ < sdn.other.s \" using pre unfolding sdn.u_def sdn.ego.q_def sdn.other.s_def sdn.other.t_stop_def sdn.other.p_def abb check_precond_r1_def sdn.distance0_def by auto from pre interpret sdr: safe_distance_no_collsion_delta a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ by (unfold_locales) (auto simp add:check_precond_r1_def `sdn.u \ < sdn.other.s \`) from sdr.sd_2r_correct_for_3r_2[OF _ `sdn.other.s \ \ sdn.u_max`] not_cond `distance > safe_dist1` have ?thesis using pre unfolding abb sdn.other.s'_def check_precond_r1_def sdn.other.t_stop_def sdn.other.p'_def by (auto simp add:field_simps) } ultimately show ?thesis by auto qed with pre show " check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ sdn.no_collision_react {0..}" by auto next assume "check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..}" hence pre: "check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" and as2: "safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..}" by auto show "checker_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ " proof (rule ccontr) assume as1: "\ checker_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" from pre have "0 < \" and "\ \ - v\<^sub>o / a\<^sub>o" unfolding check_precond_r1_def by auto define so_delta where "so_delta = s\<^sub>o + v\<^sub>o * \ + a\<^sub>o * \\<^sup>2 / 2" define q_e_delta where "q_e_delta \ s\<^sub>e + v\<^sub>e * \" define u_stop_e where "u_stop_e \ q_e_delta - v\<^sub>e\<^sup>2 / (2 * a\<^sub>e)" define vo_star where "vo_star \ v\<^sub>o + a\<^sub>o * \" define t_stop_o_star where "t_stop_o_star \ - vo_star / a\<^sub>o" define t_stop_e where "t_stop_e \ - v\<^sub>e / a\<^sub>e" define distance where "distance \ s\<^sub>o - s\<^sub>e" define distance0 where "distance0 \ safe_distance0 v\<^sub>e a\<^sub>o v\<^sub>o \" define safe_dist0 where "safe_dist0 \ safe_distance_1r a\<^sub>e v\<^sub>e \" define safe_dist2 where "safe_dist2 \ safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" define safe_dist1 where "safe_dist1 \ safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" define safe_dist3 where "safe_dist3 \ safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" note abb = so_delta_def q_e_delta_def u_stop_e_def vo_star_def t_stop_o_star_def t_stop_e_def distance_def safe_dist2_def safe_dist1_def safe_dist0_def safe_dist3_def distance0_def from pre have sdn': "safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \" by (unfold_locales) (auto simp add: check_precond_r1_def) interpret sdn: safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ rewrites "sdn.distance0 = safe_distance0 v\<^sub>e a\<^sub>o v\<^sub>o \" and "sdn.safe_distance_1r = safe_distance_1r a\<^sub>e v\<^sub>e \" and "sdn.safe_distance_2r = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" and "sdn.safe_distance_4r = safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" and "sdn.safe_distance_3r = safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" proof - from sdn' show "safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \" by auto next show "safe_distance_normal.distance0 v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance0 v\<^sub>e a\<^sub>o v\<^sub>o \ " unfolding safe_distance_normal.distance0_def[OF sdn'] safe_distance0_def by auto next show "safe_distance_normal.safe_distance_1r a\<^sub>e v\<^sub>e \ = safe_distance_1r a\<^sub>e v\<^sub>e \" unfolding safe_distance_normal.safe_distance_1r_def[OF sdn'] safe_distance_1r_def by auto next show "safe_distance_normal.safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" unfolding safe_distance_normal.safe_distance_2r_def[OF sdn'] safe_distance_2r_def by auto next show "safe_distance_normal.safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ " unfolding safe_distance_normal.safe_distance_4r_def[OF sdn'] safe_distance_4r_def by auto next show "safe_distance_normal.safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" unfolding safe_distance_normal.safe_distance_3r_def[OF sdn'] safe_distance_3r_def by auto qed have "\ distance > distance0 \ distance > distance0" by auto moreover { assume "\ distance > distance0" hence "distance \ distance0" by auto with sdn.cond_3r_1' have "sdn.collision_react {0..\}" using pre unfolding check_precond_r1_def abb sdn.other.t_stop_def by auto with sdn.collision_react_subset have "sdn.collision_react {0..}" by auto with as2 have "False" by auto } moreover { assume if2: "distance > distance0" have "\ (distance > safe_dist0 \ distance > safe_dist3)" proof (rule ccontr) assume "\ \ (safe_dist0 < distance \ safe_dist3 < distance)" hence "(safe_dist0 < distance \ safe_dist3 < distance)" by auto with as1 show "False" using pre if2 unfolding checker_r1_def Let_def abb by auto qed hence if31: "distance \ safe_dist0" and if32: "distance \ safe_dist3" by auto have "sdn.u \ < sdn.other.s \" using if2 pre unfolding sdn.u_def sdn.ego.q_def sdn.other.s_def sdn.other.t_stop_def sdn.other.p_def abb check_precond_r1_def sdn.distance0_def by auto from pre interpret sdr: safe_distance_no_collsion_delta a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ by (unfold_locales) (auto simp add:check_precond_r1_def `sdn.u \ < sdn.other.s \`) have " s\<^sub>o \ sdn.u_max" using if31 unfolding sdn.u_max_eq sdn.ego.q_def abb sdn.safe_distance_1r_def by auto have "sdn.other.s \ \ sdn.u_max" using if32 pre unfolding sdn.other.s_def check_precond_r1_def sdn.other.t_stop_def sdn.other.p_def sdn.u_max_eq sdn.ego.q_def abb sdn.safe_distance_3r_def by auto consider "(a\<^sub>o > a\<^sub>e \ vo_star < v\<^sub>e \ t_stop_e < t_stop_o_star)" | "\ (a\<^sub>o > a\<^sub>e \ vo_star < v\<^sub>e \ t_stop_e < t_stop_o_star)" by auto hence "False" proof (cases) case 1 hence rest_conjunct:"(a\<^sub>e < a\<^sub>o \ sdn.other.s' \ < v\<^sub>e \ v\<^sub>e - a\<^sub>e / a\<^sub>o * sdn.other.s' \ < 0)" using pre unfolding check_precond_r1_def unfolding sdn.other.s'_def sdn.other.t_stop_def sdn.other.p'_def abb by (auto simp add:field_simps) from 1 have "distance \ safe_dist2" using as1 pre if2 if31 if32 unfolding checker_r1_def Let_def abb by auto hence cond_f: "sdn.other.s \ - sdn.ego.q \ \ sdr.delayed_safe_distance.snd_safe_distance" using pre unfolding check_precond_r1_def sdn.other.s_def sdn.other.t_stop_def sdn.other.p_def sdn.ego.q_def sdr.delayed_safe_distance.snd_safe_distance_def using sdn.other.s'_def[of "\"] unfolding sdn.other.t_stop_def sdn.other.p'_def abb sdn.safe_distance_4r_def by auto have "distance > safe_dist1 \ distance \ safe_dist1" by auto moreover { assume "distance > safe_dist1" hence "sdn.u_max < sdn.other.s_stop" unfolding sdn.u_max_eq sdn.ego.q_def sdn.other.s_t_stop sdn.other.p_max_eq abb sdn.safe_distance_2r_def by (auto simp add:field_simps) from sdr.cond_3r_2[OF `s\<^sub>o \ sdn.u_max` this `sdn.other.s \ \ sdn.u_max`] have "sdn.collision_react {0..}" using cond_f rest_conjunct by auto with as2 have "False" by auto } moreover { assume "distance \ safe_dist1" hence "sdn.u_max \ sdn.other.s_stop" unfolding sdn.u_max_eq sdn.ego.q_def sdn.other.s_t_stop sdn.other.p_max_eq abb sdn.safe_distance_2r_def by (auto simp add:field_simps) with sdn.cond_2r[OF this] have "sdn.collision_react {0..}" by auto with as2 have "False" by auto } ultimately show ?thesis by auto next case 2 hence "distance \ safe_dist1" using as1 pre if2 if31 if32 unfolding checker_r1_def Let_def abb by auto hence "sdn.u_max \ sdn.other.s_stop" unfolding sdn.u_max_eq sdn.ego.q_def sdn.other.s_t_stop sdn.other.p_max_eq abb sdn.safe_distance_2r_def by (auto simp add:field_simps) with sdn.cond_2r[OF this] have "sdn.collision_react {0..}" by auto with as2 show "False" by auto qed } ultimately show "False" by auto qed qed definition check_precond_r2 :: "real \ real \ real \ real \ real \ real \ real \ bool" where "check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ s\<^sub>o > s\<^sub>e \ 0 \ v\<^sub>e \ 0 \ v\<^sub>o \ a\<^sub>e < 0 \ a\<^sub>o < 0 \ 0 < \ \ \ > - v\<^sub>o / a\<^sub>o" definition safe_distance0_2 :: "real \ real \ real \ real \ real" where "safe_distance0_2 v\<^sub>e a\<^sub>o v\<^sub>o \ = v\<^sub>e * \ + 1 / 2 * v\<^sub>o\<^sup>2 / a\<^sub>o" definition checker_r2 :: "real \ real \ real \ real \ real \ real \ real \ bool" where "checker_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ let distance = s\<^sub>o - s\<^sub>e; precond = check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \; safe_dist0 = safe_distance_1r a\<^sub>e v\<^sub>e \; safe_dist1 = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ in if \ precond then False else if distance > safe_dist0 then True else distance > safe_dist1" theorem checker_r2_correctness: "(checker_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..})" proof assume asm: "checker_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" have pre: "check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" proof (rule ccontr) assume "\ check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" with asm show "False" unfolding checker_r2_def Let_def by auto qed from pre have sdn': "safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \" by (unfold_locales) (auto simp add: check_precond_r2_def) interpret sdn: safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ rewrites "sdn.distance0_2 = safe_distance0_2 v\<^sub>e a\<^sub>o v\<^sub>o \" and "sdn.safe_distance_1r = safe_distance_1r a\<^sub>e v\<^sub>e \" and "sdn.safe_distance_2r = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" proof - from sdn' show "safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \" by auto next show "safe_distance_normal.distance0_2 v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance0_2 v\<^sub>e a\<^sub>o v\<^sub>o \" unfolding safe_distance_normal.distance0_2_def[OF sdn'] safe_distance0_2_def by auto next show "safe_distance_normal.safe_distance_1r a\<^sub>e v\<^sub>e \ = safe_distance_1r a\<^sub>e v\<^sub>e \" unfolding safe_distance_normal.safe_distance_1r_def[OF sdn'] safe_distance_1r_def by auto next show "safe_distance_normal.safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" unfolding safe_distance_normal.safe_distance_2r_def[OF sdn'] safe_distance_2r_def by auto qed have "0 < \" and "\ > - v\<^sub>o / a\<^sub>o" using pre unfolding check_precond_r2_def by auto define distance where "distance \ s\<^sub>o - s\<^sub>e" define distance0_2 where "distance0_2 = safe_distance0_2 v\<^sub>e a\<^sub>o v\<^sub>o \" define safe_dist0 where "safe_dist0 = safe_distance_1r a\<^sub>e v\<^sub>e \" define safe_dist1 where "safe_dist1 \ safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" note abb = distance_def safe_dist1_def safe_dist0_def distance0_2_def consider "distance > safe_dist0" | "distance \ safe_dist0" by linarith hence "sdn.no_collision_react {0..}" proof (cases) case 1 then show ?thesis using sdn.sd_1r_correct unfolding abb by auto next case 2 hence "(s\<^sub>o \ sdn.u_max)" using distance_def safe_dist0_def sdn.sd_1r_eq by linarith with 2 pre have "distance > safe_dist1" using asm unfolding checker_r2_def Let_def abb by auto with sdn.dist0_sd2r_2 have "distance > distance0_2" using abb \- v\<^sub>o / a\<^sub>o < \\ by auto hence "sdn.u \ < sdn.other.s \" using abb sdn.distance0_2_eq \\ > - v\<^sub>o / a\<^sub>o\ sdn.other.t_stop_def by auto have "sdn.u_max < sdn.other.s \" using abb sdn.sd2r_eq \\ > - v\<^sub>o / a\<^sub>o\ sdn.other.t_stop_def `distance > safe_dist1` by auto from pre interpret sdr: safe_distance_no_collsion_delta a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ by (unfold_locales) (auto simp add:check_precond_r2_def `sdn.u \ < sdn.other.s \`) from sdr.sd_2r_correct_for_3r_3[OF] `distance > safe_dist1` `sdn.u \ < sdn.other.s \` `sdn.u_max < sdn.other.s \` show ?thesis using pre unfolding abb sdn.other.s'_def check_precond_r2_def sdn.other.t_stop_def sdn.other.p'_def by (auto simp add:field_simps) qed with pre show " check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ sdn.no_collision_react {0..}" by auto next assume "check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..}" hence pre: "check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" and as2: "safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..}" by auto show "checker_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" proof (rule ccontr) assume as1: "\ checker_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" from pre have "0 < \" and "\ > - v\<^sub>o / a\<^sub>o" unfolding check_precond_r2_def by auto define distance where "distance \ s\<^sub>o - s\<^sub>e" define distance0_2 where "distance0_2 = safe_distance0_2 v\<^sub>e a\<^sub>o v\<^sub>o \" define safe_dist0 where "safe_dist0 = safe_distance_1r a\<^sub>e v\<^sub>e \" define safe_dist1 where "safe_dist1 \ safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" note abb = distance_def safe_dist1_def safe_dist0_def distance0_2_def from pre have sdn': "safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \" by (unfold_locales) (auto simp add: check_precond_r2_def) interpret sdn: safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ rewrites "sdn.distance0_2 = safe_distance0_2 v\<^sub>e a\<^sub>o v\<^sub>o \" and "sdn.safe_distance_1r = safe_distance_1r a\<^sub>e v\<^sub>e \" and "sdn.safe_distance_2r = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" proof - from sdn' show "safe_distance_normal a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \" by auto next show "safe_distance_normal.distance0_2 v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance0_2 v\<^sub>e a\<^sub>o v\<^sub>o \" unfolding safe_distance_normal.distance0_2_def[OF sdn'] safe_distance0_2_def by auto next show "safe_distance_normal.safe_distance_1r a\<^sub>e v\<^sub>e \ = safe_distance_1r a\<^sub>e v\<^sub>e \" unfolding safe_distance_normal.safe_distance_1r_def[OF sdn'] safe_distance_1r_def by auto next show "safe_distance_normal.safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \" unfolding safe_distance_normal.safe_distance_2r_def[OF sdn'] safe_distance_2r_def by auto qed have "\ distance > distance0_2 \ distance > distance0_2" by auto moreover { assume "\ distance > distance0_2" hence "distance \ distance0_2" by auto with sdn.cond_3r_1'_2 have "sdn.collision_react {0..\}" using pre unfolding check_precond_r2_def abb sdn.other.t_stop_def by auto with sdn.collision_react_subset have "sdn.collision_react {0..}" by auto with as2 have "False" by auto } moreover { assume if2: "distance > distance0_2" have "\ (distance > safe_dist0)" proof (rule ccontr) assume "\ \ (safe_dist0 < distance)" hence "(safe_dist0 < distance)" by auto with as1 show "False" using pre if2 unfolding checker_r2_def Let_def abb by auto qed hence if3: "distance \ safe_dist0" by auto with pre have "distance \ safe_dist1" using as1 unfolding checker_r2_def Let_def abb by auto have "sdn.u \ < sdn.other.s \" using abb if2 sdn.distance0_2_eq \\ > - v\<^sub>o / a\<^sub>o\ sdn.other.t_stop_def by auto from pre interpret sdr: safe_distance_no_collsion_delta a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ by (unfold_locales) (auto simp add:check_precond_r2_def `sdn.u \ < sdn.other.s \`) have "sdn.u_max \ sdn.other.s \" using abb sdn.sd2r_eq \\ > - v\<^sub>o / a\<^sub>o\ sdn.other.t_stop_def `distance \ safe_dist1` by auto with `\ > - v\<^sub>o / a\<^sub>o` have "sdn.u_max \ sdn.other.s_stop" using sdn.other.s_mono sdn.other.t_stop_nonneg sdn.other.p_t_stop sdn.other.p_zero sdn.other.t_stop_def apply (auto simp: sdn.other.s_def movement.t_stop_def split: if_splits) using sdn.other.p_zero by auto hence "sdn.collision_react {0..}" using sdn.cond_2r by auto with as2 have "False" by auto } ultimately show "False" by auto qed qed text \Combine the two checkers into one.\ definition check_precond_r :: "real \ real \ real \ real \ real \ real \ real \ bool" where "check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ s\<^sub>o > s\<^sub>e \ 0 \ v\<^sub>e \ 0 \ v\<^sub>o \ a\<^sub>e < 0 \ a\<^sub>o < 0 \ 0 < \" definition checker_r :: "real \ real \ real \ real \ real \ real \ real \ bool" where "checker_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ let distance = s\<^sub>o - s\<^sub>e; precond = check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \; vo_star = v\<^sub>o + a\<^sub>o * \; t_stop_o_star = -vo_star / a\<^sub>o; t_stop_e = -v\<^sub>e / a\<^sub>e; t_stop_o = -v\<^sub>o / a\<^sub>o; safe_dist0 = safe_distance_1r a\<^sub>e v\<^sub>e \; safe_dist1 = safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \; safe_dist2 = safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \; safe_dist3 = safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ in if \ precond then False else if distance > safe_dist0 then True else if \ \ t_stop_o \ distance > safe_dist3 then True else if \ \ t_stop_o \ (a\<^sub>o > a\<^sub>e \ vo_star < v\<^sub>e \ t_stop_e < t_stop_o_star) then distance > safe_dist2 else distance > safe_dist1" theorem checker_eq_1: "checker_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ \ \ - v\<^sub>o / a\<^sub>o \ checker_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" proof - have "checker_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ \ \ - v\<^sub>o / a\<^sub>o \ check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ (s\<^sub>o - s\<^sub>e > safe_distance_1r a\<^sub>e v\<^sub>e \ \ s\<^sub>o - s\<^sub>e > safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ \ (((a\<^sub>o > a\<^sub>e \ v\<^sub>o + a\<^sub>o * \ < v\<^sub>e \ - v\<^sub>e / a\<^sub>e < - (v\<^sub>o + a\<^sub>o * \) / a\<^sub>o) \ s\<^sub>o - s\<^sub>e > safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \) \ (\ (a\<^sub>o > a\<^sub>e \ v\<^sub>o + a\<^sub>o * \ < v\<^sub>e \ - v\<^sub>e / a\<^sub>e < - (v\<^sub>o + a\<^sub>o * \) / a\<^sub>o) \ s\<^sub>o - s\<^sub>e > safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \))) \ \ \ - v\<^sub>o / a\<^sub>o" using checker_r_def by metis also have "... \ check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ (s\<^sub>o - s\<^sub>e > safe_distance_1r a\<^sub>e v\<^sub>e \ \ s\<^sub>o - s\<^sub>e > safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \ \ (((a\<^sub>o > a\<^sub>e \ v\<^sub>o + a\<^sub>o * \ < v\<^sub>e \ - v\<^sub>e / a\<^sub>e < - (v\<^sub>o + a\<^sub>o * \) / a\<^sub>o) \ s\<^sub>o - s\<^sub>e > safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \) \ (\ (a\<^sub>o > a\<^sub>e \ v\<^sub>o + a\<^sub>o * \ < v\<^sub>e \ - v\<^sub>e / a\<^sub>e < - (v\<^sub>o + a\<^sub>o * \) / a\<^sub>o) \ s\<^sub>o - s\<^sub>e > safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \)))" by (auto simp add:check_precond_r_def check_precond_r1_def) also have "... \ checker_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" by (metis checker_r1_def) finally show ?thesis by auto qed theorem checker_eq_2: "checker_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ \ > - v\<^sub>o / a\<^sub>o \ checker_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" proof - have "checker_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ \ > - v\<^sub>o / a\<^sub>o \ check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ (\ check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ s\<^sub>o - s\<^sub>e > safe_distance_1r a\<^sub>e v\<^sub>e \ \ (\ \ - v\<^sub>o / a\<^sub>o \ s\<^sub>o - s\<^sub>e > safe_distance_3r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \) \ (\ \ - v\<^sub>o / a\<^sub>o \ a\<^sub>o > a\<^sub>e \ v\<^sub>o + a\<^sub>o * \ < v\<^sub>e \ - v\<^sub>e / a\<^sub>e < - (v\<^sub>o + a\<^sub>o * \) / a\<^sub>o \ s\<^sub>o - s\<^sub>e > safe_distance_4r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \) \ s\<^sub>o - s\<^sub>e > safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \) \ \ > - v\<^sub>o / a\<^sub>o" unfolding checker_r_def Let_def if_splits by auto also have "... \ check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ (s\<^sub>o - s\<^sub>e > safe_distance_1r a\<^sub>e v\<^sub>e \ \ s\<^sub>o - s\<^sub>e > safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \) \ \ > - v\<^sub>o / a\<^sub>o" by (auto simp add:HOL.disjE) also have "... \ check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ (s\<^sub>o - s\<^sub>e > safe_distance_1r a\<^sub>e v\<^sub>e \ \ s\<^sub>o - s\<^sub>e > safe_distance_2r a\<^sub>e v\<^sub>e a\<^sub>o v\<^sub>o \)" by (auto simp add:check_precond_r_def check_precond_r2_def) also have "... \ checker_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" by (auto simp add:checker_r2_def Let_def if_splits) thus ?thesis using calculation by auto qed theorem checker_r_correctness: "(checker_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..})" proof - have "checker_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ (checker_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ \ \ - v\<^sub>o / a\<^sub>o) \ (checker_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ \ > - v\<^sub>o / a\<^sub>o)" by auto also have "... \ checker_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ checker_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \" using checker_eq_1 checker_eq_2 by auto also have "... \ (check_precond_r1 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..}) \ (check_precond_r2 s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..})" using checker_r1_correctness checker_r2_correctness by auto also have "... \ (\ \ - v\<^sub>o / a\<^sub>o \ check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..}) \ (\ > - v\<^sub>o / a\<^sub>o \ check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..})" by (auto simp add:check_precond_r_def check_precond_r1_def check_precond_r2_def) also have "... \ check_precond_r s\<^sub>e v\<^sub>e a\<^sub>e s\<^sub>o v\<^sub>o a\<^sub>o \ \ safe_distance_normal.no_collision_react a\<^sub>e v\<^sub>e s\<^sub>e a\<^sub>o v\<^sub>o s\<^sub>o \ {0..}" by auto finally show ?thesis by auto qed end \ No newline at end of file