diff --git a/thys/First_Welfare_Theorem/Microeconomics/Arrow_Debreu_Model.thy b/thys/First_Welfare_Theorem/Microeconomics/Arrow_Debreu_Model.thy --- a/thys/First_Welfare_Theorem/Microeconomics/Arrow_Debreu_Model.thy +++ b/thys/First_Welfare_Theorem/Microeconomics/Arrow_Debreu_Model.thy @@ -1,503 +1,502 @@ (* License: LGPL *) (* Author: Julian Parsert Author: Cezary Kaliszyk *) section \ Arrow-Debreu model \ theory Arrow_Debreu_Model imports - "HOL-Analysis.Analysis" "../Preferences" "../Preferences" "../Utility_Functions" "../Argmax" Consumers Common begin locale pre_arrow_debreu_model = fixes production_sets :: "'f \ ('a::ordered_euclidean_space) set" fixes consumption_set :: "'a set" fixes agents :: "'i set" fixes firms :: "'f set" fixes \ :: "'i \ 'a" ("\[_]") fixes Pref :: "'i \ 'a relation" ("Pr[_]") fixes U :: "'i \ 'a \ real" ("U[_]") fixes \ :: "'i \ 'f \ real" ("\[_,_]") assumes cons_set_props: "arrow_debreu_consum_set consumption_set" assumes agent_props: "i \ agents \ eucl_ordinal_utility consumption_set (Pr[i]) (U[i])" assumes firms_comp_owned: "j \ firms \ (\i\agents. \[i,j]) = 1" assumes finite_nonepty_agents: "finite agents" and "agents \ {}" sublocale pre_arrow_debreu_model \ pareto_ordering agents U . (*sublocale pre_arrow_debreu_model \ exchange_economy consumption_set agents \ Pref U Price by (metis exchange_economy.intro pre_arrow_debreu_model_axioms pre_arrow_debreu_model_def) *) context pre_arrow_debreu_model begin text \ Calculate wealth of individual i in context of Private Ownership economy. \ context begin private abbreviation poe_wealth where "poe_wealth P i Y \ P \ \[i] + (\j\firms. \[i,j] *\<^sub>R (P \ Y j))" subsection \ Feasiblity \ private abbreviation feasible where "feasible X Y \ feasible_private_ownership agents firms \ consumption_set production_sets X Y" private abbreviation calculate_value where "calculate_value P x \ P \ x" subsection \ Profit maximisation \ text \ In a production economy (which this is) we need to specify profit maximisation. \ definition profit_maximisation where "profit_maximisation P S = arg_max_set (\x. P \ x) S" subsection \ Competitive Equilibirium \ text \ Competitive equilibrium in context of production economy with private ownership. This includes the profit maximisation condition. \ definition competitive_equilibrium where "competitive_equilibrium P X Y \ feasible X Y \ (\j \ firms. (Y j) \ profit_maximisation P (production_sets j)) \ (\i \ agents. (X i) \ arg_max_set U[i] (budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y)))" lemma competitive_equilibriumD [dest]: assumes "competitive_equilibrium P X Y" shows "feasible X Y \ (\j \ firms. (Y j) \ profit_maximisation P (production_sets j)) \ (\i \ agents. (X i) \ arg_max_set U[i] (budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y)))" using assms by (simp add: competitive_equilibrium_def) lemma compet_max_profit: assumes "j \ firms" assumes "competitive_equilibrium P X Y" shows "Y j \ profit_maximisation P (production_sets j)" using assms(1) assms(2) by blast subsection \ Pareto Optimality \ definition pareto_optimal where "pareto_optimal X Y \ (feasible X Y \ (\X' Y'. feasible X' Y' \ X' \Pareto X))" lemma pareto_optimalI[intro]: assumes "feasible X Y" and "\X' Y'. feasible X' Y' \ X' \Pareto X" shows "pareto_optimal X Y" using pareto_optimal_def assms(1) assms(2) by blast lemma pareto_optimalD[dest]: assumes "pareto_optimal X Y" shows "feasible X Y" and "\X' Y'. feasible X' Y' \ X' \Pareto X" using pareto_optimal_def assms by auto lemma util_fun_def_holds: assumes "i \ agents" and "x \ consumption_set" and "y \ consumption_set" shows "x \[Pr[i]] y \ U[i] x \ U[i] y" proof assume "x \[Pr[i]] y" show "U[i] x \ U[i] y" by (meson \x \[Pr[i]] y\ agent_props assms eucl_ordinal_utility_def ordinal_utility_def) next assume "U[i] x \ U[i] y" have "eucl_ordinal_utility consumption_set (Pr[i]) (U[i])" by (simp add: agent_props assms) then show "x \[Pr[i]] y" by (meson \U[i] y \ U[i] x\ assms(2) assms(3) eucl_ordinal_utility_def ordinal_utility.util_def_conf) qed lemma base_pref_is_ord_eucl_rpr: "i \ agents \ rational_preference consumption_set Pr[i]" using agent_props ord_eucl_utility_imp_rpr real_vector_rpr.have_rpr by blast lemma prof_max_ge_all_in_pset: assumes "j \ firms" assumes "Y j \ profit_maximisation P (production_sets j)" shows "\y \ production_sets j. P \ Y j \ P \ y" using all_leq assms(2) profit_maximisation_def by fastforce subsection \ Lemmas for final result \ text \ Strictly preferred bundles are strictly more expensive. \ lemma all_prefered_are_more_expensive: assumes i_agt: "i \ agents" assumes equil: "competitive_equilibrium P \ \" assumes "z \ consumption_set" assumes "(U i) z > (U i) (\ i)" shows "z \ P > P \ (\ i)" proof (rule ccontr) assume neg_as : "\(z \ P > P \ (\ i))" have xp_leq : "z \ P \ P \ (\ i)" using \\z \ P > P \ (\ i)\ by auto have x_in_argmax: "(\ i) \ arg_max_set U[i] (budget_constraint (calculate_value P) consumption_set (poe_wealth P i \))" using equil i_agt by blast hence x_in: "\ i \ (budget_constraint (calculate_value P) consumption_set (poe_wealth P i \))" using argmax_sol_in_s [of "(\ i)" "U[i]" "budget_constraint (calculate_value P) consumption_set (poe_wealth P i \)"] by blast hence z_in_budget: "z \ (budget_constraint (calculate_value P) consumption_set (poe_wealth P i \))" proof - have z_leq_endow: "P \ z \ P \ (\ i)" by (metis xp_leq inner_commute) have z_in_cons: "z \ consumption_set" using assms by auto then show ?thesis using x_in budget_constraint_def z_leq_endow proof - have "\r. P \ \ i \ r \ P \ z \ r" using z_leq_endow by linarith then show ?thesis using budget_constraint_def x_in z_in_cons by (simp add: budget_constraint_def) qed qed have nex_prop: "\e. e \ (budget_constraint (calculate_value P) consumption_set (poe_wealth P i \)) \ U[i] e > U[i] (\ i)" using no_better_in_s[of "\ i" "U[i]" "budget_constraint (calculate_value P) consumption_set (poe_wealth P i \)"] x_in_argmax by blast have "z \ budget_constraint (calculate_value P) consumption_set (poe_wealth P i \) \ U[i] z > U[i] (\ i)" using assms z_in_budget by blast thus False using nex_prop by blast qed text \ Given local non-satiation, argmax will use the entire budget. \ lemma am_utilises_entire_bgt: assumes i_agts: "i \ agents" assumes lns : "local_nonsatiation consumption_set Pr[i]" assumes argmax_sol : "X \ arg_max_set U[i] (budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y))" shows "P \ X = P \ \[i] + (\j\firms. \[i,j] *\<^sub>R (P \ Y j))" proof - let ?wlt = "P \ \[i] + (\j\firms. \[i,j] *\<^sub>R (P \ Y j))" let ?bc = "budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y)" have xin: "X \ budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y)" using argmax_sol_in_s [of "X" "U[i]" ?bc] argmax_sol by blast hence is_leq: "X \ P \ (poe_wealth P i Y)" by (metis (mono_tags, lifting) budget_constraint_def inner_commute mem_Collect_eq) have not_less: "\X \ P < (poe_wealth P i Y)" proof assume neg: "X \ P < (poe_wealth P i Y)" have bgt_leq: "\x\ ?bc. U[i] X \ U[i] x" using leq_all_in_sol [of "X" "U[i]" "?bc"] all_leq [of "X" "U[i]" "?bc"] argmax_sol by blast define s_low where "s_low = {x . P \ x < ?wlt}" have "\e > 0. ball X e \ s_low" proof - have x_in_budget: "P \ X < ?wlt" by (metis inner_commute neg) have s_low_open: "open s_low" using open_halfspace_lt s_low_def by blast then show ?thesis using s_low_open open_contains_ball_eq s_low_def x_in_budget by blast qed obtain e where "e > 0" and e: "ball X e \ s_low" using \\e>0. ball X e \ s_low\ by blast obtain y where y_props: "y \ ball X e" "y \[Pref i] X" using \0 < e\ xin assms(2) budget_constraint_def by (metis (no_types, lifting) lns_alt_def2 mem_Collect_eq) have "y \ budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y)" proof - have "y \ s_low" using \y \ ball X e\ e by blast moreover have "y \ consumption_set" by (meson agent_props eucl_ordinal_utility_def i_agts ordinal_utility_def y_props(2)) moreover have "P \ y \ poe_wealth P i Y" using calculation(1) s_low_def by auto ultimately show ?thesis by (simp add: budget_constraint_def) qed then show False using bgt_leq i_agts y_props(2) util_fun_def_holds xin budget_constraint_def by (metis (no_types, lifting) mem_Collect_eq) qed then show ?thesis by (metis inner_commute is_leq less_eq_real_def) qed corollary x_equil_x_ext_budget: assumes i_agt: "i \ agents" assumes lns : "local_nonsatiation consumption_set Pr[i]" assumes equilibrium : "competitive_equilibrium P X Y" shows "P \ X i = P \ \[i] + (\j\firms. \[i,j] *\<^sub>R (P \ Y j))" proof - have "X i \ arg_max_set U[i] (budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y))" using equilibrium i_agt by blast then show ?thesis using am_utilises_entire_bgt i_agt lns by blast qed lemma same_price_in_argmax : assumes i_agt: "i \ agents" assumes lns : "local_nonsatiation consumption_set Pr[i]" assumes "x \ arg_max_set (U[i]) (budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y))" assumes "y \ arg_max_set (U[i]) (budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y))" shows "(P \ x) = (P \ y)" using am_utilises_entire_bgt assms lns by (metis (no_types) am_utilises_entire_bgt assms(3) assms(4) i_agt lns) text \ Greater or equal utility implies greater or equal value. \ lemma utility_ge_price_ge : assumes agts: "i \ agents" assumes lns : "local_nonsatiation consumption_set Pr[i]" assumes equil: "competitive_equilibrium P X Y" assumes geq: "U[i] z \ U[i] (X i)" and "z \ consumption_set" shows "P \ z \ P \ (X i)" proof - let ?bc = "(budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y))" have not_in : "z \ arg_max_set (U[i]) ?bc \ P \ z > (P \ \[i]) + (\j\(firms). (\[i,j] *\<^sub>R (P \ Y j)))" proof- assume "z \ arg_max_set (U[i]) ?bc" moreover have "X i \ arg_max_set (U[i]) ?bc" using competitive_equilibriumD assms pareto_optimal_def by auto ultimately have "z \ budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y)" by (meson geq leq_all_in_sol) then show ?thesis using budget_constraint_def assms by (simp add: budget_constraint_def) qed have x_in_argmax: "(X i) \ arg_max_set U[i] ?bc" using agts equil by blast hence x_in_budget: "(X i) \ ?bc" using argmax_sol_in_s [of "(X i)" "U[i]" "?bc"] by blast have "U[i] z = U[i] (X i) \ P \ z \ P \ (X i)" proof(rule contrapos_pp) assume con_neg: "\ P \ z \ P \ (X i)" then have "P \ z < P \ (X i)" by linarith then have z_in_argmax: "z \ arg_max_set U[i] ?bc" proof - have "P \(X i) = P \ \[i] + (\j\firms. \[i,j] *\<^sub>R (P \ Y j))" using agts am_utilises_entire_bgt lns x_in_argmax by blast then show ?thesis by (metis (no_types) con_neg less_eq_real_def not_in) qed have z_budget_utilisation: "P \ z = P \ (X i)" by (metis (no_types) agts am_utilises_entire_bgt lns x_in_argmax z_in_argmax) have "P \ (X i) = P \ \[i] + (\j\firms. \[i,j] *\<^sub>R (P \ Y j))" using agts am_utilises_entire_bgt lns x_in_argmax by blast show "\ U[i] z = U[i] (X i)" using z_budget_utilisation con_neg by linarith qed thus ?thesis by (metis (no_types) agts am_utilises_entire_bgt eq_iff eucl_less_le_not_le lns not_in x_in_argmax) qed lemma commutativity_sums_over_funs: fixes X :: "'x set" fixes Y :: "'y set" shows "(\i\X. \j\Y. (f i j *\<^sub>R C \ g j)) = (\j\Y.\i\X. (f i j *\<^sub>R C \ g j))" using Groups_Big.comm_monoid_add_class.sum.swap by auto lemma assoc_fun_over_sum: fixes X :: "'x set" fixes Y :: "'y set" shows "(\j\Y. \i\X. f i j *\<^sub>R C \ g j) = (\j\Y. (\i\X. f i j) *\<^sub>R C \ g j)" by (simp add: inner_sum_left scaleR_left.sum) text \ Walras' law in context of production economy with private ownership. That is, in an equilibrium demand equals supply. \ lemma walras_law: assumes "\i. i\agents \ local_nonsatiation consumption_set Pr[i]" assumes "(\i \ agents. (X i) \ arg_max_set U[i] (budget_constraint (calculate_value P) consumption_set (poe_wealth P i Y)))" shows "P \ (\i\agents. (X i)) = P \ ((\i\agents. \[i]) + (\j\firms. Y j))" proof - have value_equal: "P \ (\i\agents. (X i)) = P \ (\i\agents. \[i]) + (\i\agents. \f\firms. \[i,f] *\<^sub>R (P \ Y f))" proof - have all_exhaust_bgt: "\i\agents. P \ (X i) = P \ \[i] + (\j\firms. \[i,j] *\<^sub>R (P \ (Y j)))" using assms am_utilises_entire_bgt by blast then show ?thesis by (simp add:all_exhaust_bgt inner_sum_right sum.distrib) qed have eq_1: "(\i\agents. \j\firms. (\[i,j] *\<^sub>R P \ Y j)) = (\j\firms. \i\agents. (\[i,j] *\<^sub>R P \ Y j))" using commutativity_sums_over_funs [of \ P Y firms agents] by blast hence eq_2: "P \ (\i\agents. X i) = P \ (\i\agents. \[i]) + (\j\firms. \i\agents. \[i,j] *\<^sub>R P \ Y j)" using value_equal by auto also have eq_3: "...= P \ (\i\agents. \[i]) + (\j\firms. (\i\agents. \[i,j]) *\<^sub>R P \ Y j)" using assoc_fun_over_sum[of "\" P Y agents firms] by auto also have eq_4: "... = P \ (\i\agents. \[i]) + (\f\firms. P \ Y f)" using firms_comp_owned by auto have comp_wise_inner: "P \ (\i\agents. X i) - (P \ (\i\agents. \[i])) - (\f\firms. P \ Y f) = 0" using eq_1 eq_2 eq_3 eq_4 by linarith then show ?thesis by (simp add: inner_right_distrib inner_sum_right) qed lemma walras_law_in_compeq: assumes "\i. i\agents \ local_nonsatiation consumption_set Pr[i]" assumes "competitive_equilibrium P X Y" shows "P \ ((\i\agents. (X i)) - (\i\agents. \[i]) - (\j\firms. Y j)) = 0" proof- have "P \ (\i\agents. (X i)) = P \ ((\i\agents. \[i]) + (\j\firms. Y j))" using assms(1) assms(2) walras_law by auto then show ?thesis by (simp add: inner_diff_right inner_right_distrib) qed subsection \ First Welfare Theorem \ text \ Proof of First Welfare Theorem in context of production economy with private ownership. \ theorem first_welfare_theorem_priv_own: assumes "\i. i \ agents \ local_nonsatiation consumption_set Pr[i]" and "Price > 0" assumes "competitive_equilibrium Price \ \" shows "pareto_optimal \ \" proof (rule ccontr) assume neg_as: "\ pareto_optimal \ \" have equili_feasible : "feasible \ \" using assms by (simp add: competitive_equilibrium_def) obtain X' Y' where xprime_pareto: "feasible X' Y' \ (\i \ agents. U[i] (X' i) \ U[i] (\ i)) \ (\i \ agents. U[i] (X' i) > U[i] (\ i))" using equili_feasible pareto_optimal_def pareto_dominating_def neg_as by auto have is_feasible: "feasible X' Y'" using xprime_pareto by blast have xprime_leq_y: "\i \ agents. (Price \ (X' i) \ (Price \ \[i]) + (\j\(firms). \[i,j] *\<^sub>R (Price \ \ j)))" proof fix i assume as: "i \ agents" have xprime_cons: "X' i \ consumption_set" using feasible_private_ownershipD as is_feasible by blast have x_leq_xprime: "U[i] (X' i) \ U[i] (\ i)" using \i \ agents\ xprime_pareto by blast have lns_pref: "local_nonsatiation consumption_set Pr[i]" using as assms by blast hence xprime_ge_x: "Price \ (X' i) \ Price \ (\ i)" using x_leq_xprime xprime_cons as assms utility_ge_price_ge by blast then show "Price \ (X' i) \ (Price \ \[i]) + (\j\(firms). \[i,j] *\<^sub>R (Price \ \ j))" using xprime_ge_x \i \ agents\ lns_pref assms x_equil_x_ext_budget by fastforce qed have ex_greater_value : "\i \ agents. Price \ (X' i) > Price \ (\ i)" proof(rule ccontr) assume cpos : "\(\i \ agents. Price \ (X' i) > Price \ (\ i))" obtain i where obt_witness : "i \ agents" "(U[i]) (X' i) > U[i] (\ i)" using xprime_pareto by blast show False by (metis all_prefered_are_more_expensive assms(3) cpos feasible_private_ownershipD(2) inner_commute xprime_pareto) qed have dom_g : "Price \ (\i\agents. X' i) > Price \ (\i\agents. (\ i))" (is "_ > _ \ ?x_sum") proof- have "(\i\agents. Price \ X' i) > (\i\agents. Price \ (\ i))" by (metis (mono_tags, lifting) xprime_leq_y assms(1,3) ex_greater_value finite_nonepty_agents sum_strict_mono_ex1 x_equil_x_ext_budget) thus "Price \ (\i\agents. X' i) > Price \ ?x_sum" by (simp add: inner_sum_right) qed let ?y_sum = "(\j\firms. \ j)" have equili_walras_law: "Price \ ?x_sum = (\i\agents. Price \ \[i] + (\j\firms. \[i,j] *\<^sub>R (Price \ \ j)))" (is "_ = ?ws") proof- have "\i\agents. Price \ \ i = Price \ \[i] + (\j\firms. \[i,j] *\<^sub>R (Price \ \ j))" by (metis (no_types, lifting) assms(1,3) x_equil_x_ext_budget) then show ?thesis by (simp add: inner_sum_right) qed also have remove_firm_pct: "... = Price \ (\i\agents. \[i]) + (Price \ ?y_sum)" proof- have equals_inner_price:"0 = Price \ (?x_sum - ((\i\agents. \ i) + ?y_sum))" by (metis (no_types) diff_diff_add assms(1,3) walras_law_in_compeq) have "Price \ ?x_sum = Price \ ((\i\agents. \ i) + ?y_sum)" by (metis (no_types) equals_inner_price inner_diff_right right_minus_eq) then show ?thesis by (simp add: equili_walras_law inner_right_distrib) qed have xp_l_yp: "(\i\agents. X' i) \ (\i\agents. \[i]) + (\f\firms. Y' f)" using feasible_private_ownership_def is_feasible by blast hence yprime_sgr_y: "Price \ (\i\agents. \[i]) + Price \ (\f\firms. Y' f) > ?ws" proof - have "Price \ (\i\agents. X' i) \ Price \ ((\i\agents. \[i]) + (\j\firms. Y' j))" by (metis xp_l_yp atLeastAtMost_iff inner_commute interval_inner_leI(2) less_imp_le order_refl assms(2)) hence "?ws < Price \ ((\i\agents. \ i) + (\j\firms. Y' j))" using dom_g equili_walras_law by linarith then show ?thesis by (simp add: inner_right_distrib) qed have Y_is_optimum: "\j\firms. \y \ production_sets j. Price \ \ j \ Price \ y" using assms prof_max_ge_all_in_pset by blast have yprime_in_prod_set: "\j \ firms. Y' j \ production_sets j" using feasible_private_ownershipD xprime_pareto by fastforce hence "\j \ firms. \y \ production_sets j. Price \ \ j \ Price \ y" using Y_is_optimum by blast hence Y_ge_yprime: "\j \ firms. Price \ \ j \ Price \ Y' j" using yprime_in_prod_set by blast hence yprime_p_leq_Y: "Price \ (\f\firms. Y' f) \ Price \ ?y_sum" by (simp add: Y_ge_yprime inner_sum_right sum_mono) then show False using remove_firm_pct yprime_sgr_y by linarith qed text \ Equilibrium cannot be Pareto dominated. \ lemma equilibria_dom_eachother: assumes "\i. i \ agents \ local_nonsatiation consumption_set Pr[i]" and "Price > 0" assumes equil: "competitive_equilibrium Price \ \" shows "\X' Y'. competitive_equilibrium P X' Y' \ X' \Pareto \" proof - have "pareto_optimal \ \" by (meson equil first_welfare_theorem_priv_own assms) hence "\X' Y'. feasible X' Y' \ X' \Pareto \" using pareto_optimal_def by blast thus ?thesis by auto qed text \ Using monotonicity instead of local non-satiation proves the First Welfare Theorem. \ corollary first_welfare_thm_monotone: assumes "\M \ carrier. (\x > M. x \ carrier)" assumes "\i. i \ agents \ monotone_preference consumption_set Pr[i]" and "Price > 0" assumes "competitive_equilibrium Price \ \" shows "pareto_optimal \ \" by (meson arrow_debreu_consum_set_def assms cons_set_props first_welfare_theorem_priv_own unbounded_above_mono_imp_lns) end end end diff --git a/thys/Ptolemys_Theorem/Ptolemys_Theorem.thy b/thys/Ptolemys_Theorem/Ptolemys_Theorem.thy --- a/thys/Ptolemys_Theorem/Ptolemys_Theorem.thy +++ b/thys/Ptolemys_Theorem/Ptolemys_Theorem.thy @@ -1,441 +1,441 @@ (* Author: Lukas Bulwahn *) section \Ptolemy's Theorem\ theory Ptolemys_Theorem imports - "HOL-Analysis.Analysis" + "HOL-Analysis.Multivariate_Analysis" begin subsection \Preliminaries\ subsubsection \Additions to Rat theory\ hide_const (open) normalize subsubsection \Additions to Transcendental theory\ text \ Lemmas about @{const arcsin} and @{const arccos} commonly involve to show that their argument is in the domain of those partial functions, i.e., the argument @{term y} is between @{term "-1::real"} and @{term "1::real"}. As the argumentation for @{term "(-1::real) \ y"} and @{term "y \ (1::real)"} is often very similar, we prefer to prove @{term "\y\ \ (1::real)"} to the two goals above. The lemma for rewriting the term @{term "cos (arccos y)"} is already provided in the Isabelle distribution with name @{thm [source] cos_arccos_abs}. Here, we further provide the analogue on @{term "arcsin"} for rewriting @{term "sin (arcsin y)"}. \ lemma sin_arcsin_abs: "\y\ \ 1 \ sin (arcsin y) = y" by (simp add: abs_le_iff) text \ The further lemmas are the required variants from existing lemmas @{thm [source] arccos_lbound} and @{thm [source] arccos_ubound}. \ lemma arccos_lbound_abs [simp]: "\y\ \ 1 \ 0 \ arccos y" by (simp add: arccos_lbound) lemma arccos_ubound_abs [simp]: "\y\ \ 1 \ arccos y \ pi" by (simp add: arccos_ubound) text \ As we choose angles to be between @{term "0::real"} between @{term "2 * pi"}, we need some lemmas to reason about the sign of @{term "sin x"} for angles @{term "x"}. \ lemma sin_ge_zero_iff: assumes "0 \ x" "x < 2 * pi" shows "0 \ sin x \ x \ pi" proof assume "0 \ sin x" show "x \ pi" proof (rule ccontr) assume "\ x \ pi" from this \x < 2 * pi\ have "sin x < 0" using sin_lt_zero by auto from this \0 \ sin x\ show False by auto qed next assume "x \ pi" from this \0 \ x\ show "0 \ sin x" by (simp add: sin_ge_zero) qed lemma sin_less_zero_iff: assumes "0 \ x" "x < 2 * pi" shows "sin x < 0 \ pi < x" using assms sin_ge_zero_iff by fastforce subsubsection \Addition to Finite-Cartesian-Product theory\ text \ Here follow generally useful additions and specialised equations for two-dimensional real-valued vectors. \ lemma axis_nth_eq_0 [simp]: assumes "i \ j" shows "axis i x $ j = 0" using assms unfolding axis_def by simp lemma norm_axis: fixes x :: real shows "norm (axis i x) = abs x" by (simp add: norm_eq_sqrt_inner inner_axis_axis) lemma norm_eq_on_real_2_vec: fixes x :: "real ^ 2" shows "norm x = sqrt ((x $ 1) ^ 2 + (x $ 2) ^ 2)" by (simp add: norm_eq_sqrt_inner inner_vec_def UNIV_2 power2_eq_square) lemma dist_eq_on_real_2_vec: fixes a b :: "real ^ 2" shows "dist a b = sqrt ((a $ 1 - b $ 1) ^ 2 + (a $ 2 - b $ 2) ^ 2)" unfolding dist_norm norm_eq_on_real_2_vec by simp subsection \Polar Form of Two-Dimensional Real-Valued Vectors\ subsubsection \Definitions to Transfer to Polar Form and Back\ definition of_radiant :: "real \ real ^ 2" where "of_radiant \ = axis 1 (cos \) + axis 2 (sin \)" definition normalize :: "real ^ 2 \ real ^ 2" where "normalize p = (if p = 0 then axis 1 1 else (1 / norm p) *\<^sub>R p)" definition radiant_of :: "real ^ 2 \ real" where "radiant_of p = (THE \. 0 \ \ \ \ < 2 * pi \ of_radiant \ = normalize p)" text \ The vector @{term "of_radiant \"} is the vector with length @{term "1::real"} and angle @{term "\"} to the first axis. We normalize vectors to length @{term "1::real"} keeping their orientation with the normalize function. Conversely, @{term "radiant_of p"} is the angle of vector @{term p} to the first axis, where we choose @{term "radiant_of"} to return angles between @{term "0::real"} and @{term "2 * pi"}, following the usual high-school convention. With these definitions, we can express the main result @{term "norm p *\<^sub>R of_radiant (radiant_of p) = p"}. Note that the main result holds for any definition of @{term "radiant_of 0"}. So, we choose to define @{term "normalize 0"} and @{term "radiant_of 0"}, such that @{term "radiant_of 0 = 0"}. \ subsubsection \Lemmas on @{const of_radiant}\ lemma nth_of_radiant_1 [simp]: "of_radiant \ $ 1 = cos \" unfolding of_radiant_def by simp lemma nth_of_radiant_2 [simp]: "of_radiant \ $ 2 = sin \" unfolding of_radiant_def by simp lemma norm_of_radiant: "norm (of_radiant \) = 1" unfolding of_radiant_def norm_eq_on_real_2_vec by simp lemma of_radiant_plus_2pi: "of_radiant (\ + 2 * pi) = of_radiant \" unfolding of_radiant_def by simp lemma of_radiant_minus_2pi: "of_radiant (\ - 2 * pi) = of_radiant \" proof - have "of_radiant (\ - 2 * pi) = of_radiant (\ - 2 * pi + 2 * pi)" by (simp only: of_radiant_plus_2pi) also have "\ = of_radiant \" by simp finally show ?thesis . qed subsubsection \Lemmas on @{const normalize}\ lemma normalize_eq: "norm p *\<^sub>R normalize p = p" unfolding normalize_def by simp lemma norm_normalize: "norm (normalize p) = 1" unfolding normalize_def by (auto simp add: norm_axis) lemma nth_normalize [simp]: "\normalize p $ i\ \ 1" using norm_normalize component_le_norm_cart by metis lemma normalize_square: "(normalize p $ 1)\<^sup>2 + (normalize p $ 2)\<^sup>2 = 1" using dot_square_norm[of "normalize p"] by (simp add: inner_vec_def UNIV_2 power2_eq_square norm_normalize) lemma nth_normalize_ge_zero_iff: "0 \ normalize p $ i \ 0 \ p $ i" proof assume "0 \ normalize p $ i" from this show "0 \ p $ i" unfolding normalize_def by (auto split: if_split_asm simp add: zero_le_divide_iff) next assume "0 \ p $ i" have "0 \ axis 1 (1 :: real) $ i" using exhaust_2[of i] by auto from this \0 \ p $ i\ show "0 \ normalize p $ i" unfolding normalize_def by auto qed lemma nth_normalize_less_zero_iff: "normalize p $ i < 0 \ p $ i < 0" using nth_normalize_ge_zero_iff leD leI by metis lemma normalize_boundary_iff: "\normalize p $ 1\ = 1 \ p $ 2 = 0" proof assume "\normalize p $ 1\ = 1" from this have 1: "(p $ 1) ^ 2 = norm p ^ 2" unfolding normalize_def by (auto split: if_split_asm simp add: power2_eq_iff) moreover have "(p $ 1) ^ 2 + (p $ 2) ^ 2 = norm p ^ 2" using norm_eq_on_real_2_vec by auto ultimately show "p $ 2 = 0" by simp next assume "p $ 2 = 0" from this have "\p $ 1\ = norm p" by (auto simp add: norm_eq_on_real_2_vec) from this show "\normalize p $ 1\ = 1" unfolding normalize_def by simp qed lemma between_normalize_if_distant_from_0: assumes "norm p \ 1" shows "between (0, p) (normalize p)" using assms by (auto simp add: between_mem_segment closed_segment_def normalize_def) lemma between_normalize_if_near_0: assumes "norm p \ 1" shows "between (0, normalize p) p" proof - have "0 \ norm p" by simp from assms have "p = (norm p / norm p) *\<^sub>R p \ 0 \ norm p \ norm p \ 1" by auto from this have "\u. p = (u / norm p) *\<^sub>R p \ 0 \ u \ u \ 1" by blast from this show ?thesis by (auto simp add: between_mem_segment closed_segment_def normalize_def) qed subsubsection \Lemmas on @{const radiant_of}\ lemma radiant_of: "0 \ radiant_of p \ radiant_of p < 2 * pi \ of_radiant (radiant_of p) = normalize p" proof - let ?a = "if 0 \ p $ 2 then arccos (normalize p $ 1) else pi + arccos (- (normalize p $ 1))" have "0 \ ?a \ ?a < 2 * pi \ of_radiant ?a = normalize p" proof - have "0 \ ?a" by auto moreover have "?a < 2 * pi" proof cases assume "0 \ p $ 2" from this have "?a \ pi" by simp from this show ?thesis using pi_gt_zero by linarith next assume "\ 0 \ p $ 2" have "arccos (- normalize p $ 1) < pi" proof - have "\normalize p $ 1\ \ 1" using \\ 0 \ p $ 2\ by (simp only: normalize_boundary_iff) from this have "arccos (- normalize p $ 1) \ pi" unfolding arccos_minus_1[symmetric] by (subst arccos_eq_iff) auto moreover have "arccos (- normalize p $ 1) \ pi" by simp ultimately show "arccos (- normalize p $ 1) < pi" by linarith qed from this \\ 0 \ p $ 2\ show ?thesis by simp qed moreover have "of_radiant ?a = normalize p" proof - have "of_radiant ?a $ i = normalize p $ i" for i proof - have "of_radiant ?a $ 1 = normalize p $ 1" unfolding of_radiant_def by (simp add: cos_arccos_abs) moreover have "of_radiant ?a $ 2 = normalize p $ 2" proof cases assume "0 \ p $ 2" have "sin (arccos (normalize p $ 1)) = sqrt (1 - (normalize p $ 1) ^ 2)" by (simp add: sin_arccos_abs) also have "\ = normalize p $ 2" proof - have "1 - (normalize p $ 1)\<^sup>2 = (normalize p $ 2)\<^sup>2" using normalize_square[of p] by auto from this \0 \ p $ 2\ show ?thesis by (simp add: nth_normalize_ge_zero_iff) qed finally show ?thesis using \0 \ p $ 2\ unfolding of_radiant_def by auto next assume "\ 0 \ p $ 2" have "- sin (arccos (- normalize p $ 1)) = - sqrt (1 - (normalize p $ 1)\<^sup>2)" by (simp add: sin_arccos_abs) also have "\ = normalize p $ 2" proof - have "1 - (normalize p $ 1)\<^sup>2 = (normalize p $ 2)\<^sup>2" using normalize_square[of p] by auto from this \\ 0 \ p $ 2\ show ?thesis using nth_normalize_ge_zero_iff by fastforce qed finally show ?thesis using \\ 0 \ p $ 2\ unfolding of_radiant_def by auto qed ultimately show ?thesis by (metis exhaust_2[of i]) qed from this show ?thesis by (simp add: vec_eq_iff) qed ultimately show ?thesis by blast qed moreover { fix \ assume "0 \ \ \ \ < 2 * pi \ of_radiant \ = normalize p" from this have "0 \ \" "\ < 2 * pi" "normalize p = of_radiant \" by auto from this have "cos \ = normalize p $ 1" "sin \ = normalize p $ 2" by auto have "\ = ?a" proof cases assume "0 \ p $ 2" from this have "\ \ pi" using \0 \ \\ \\ < 2 * pi\ \sin \ = normalize p $ 2\ by (simp add: sin_ge_zero_iff[symmetric] nth_normalize_ge_zero_iff) from \0 \ \\ this have "\ = arccos (cos \)" by (simp add: arccos_cos) from \cos \ = normalize p $ 1\ this have "\ = arccos (normalize p $ 1)" by (simp add: arccos_eq_iff) from this show "\ = ?a" using \0 \ p $ 2\ by auto next assume "\ 0 \ p $ 2" from this have "\ > pi" using \0 \ \\ \\ < 2 * pi\ \sin \ = normalize p $ 2\ by (simp add: sin_less_zero_iff[symmetric] nth_normalize_less_zero_iff) from this \\ < 2 * pi\ have "\ - pi = arccos (cos (\ - pi))" by (auto simp only: arccos_cos) from this \cos \ = normalize p $ 1\ have "\ - pi = arccos (- normalize p $ 1)" by simp from this have "\ = pi + arccos (- normalize p $ 1)" by simp from this show "\ = ?a" using \\ 0 \ p $ 2\ by auto qed } ultimately show ?thesis unfolding radiant_of_def by (rule theI) qed lemma radiant_of_bounds [simp]: "0 \ radiant_of p" "radiant_of p < 2 * pi" using radiant_of by auto lemma radiant_of_weak_ubound [simp]: "radiant_of p \ 2 * pi" using radiant_of_bounds(2)[of p] by linarith subsubsection \Main Equations for Transforming to Polar Form\ lemma polar_form_eq: "norm p *\<^sub>R of_radiant (radiant_of p) = p" using radiant_of normalize_eq by simp lemma relative_polar_form_eq: "Q + dist P Q *\<^sub>R of_radiant (radiant_of (P - Q)) = P" proof - have "norm (P - Q) *\<^sub>R of_radiant (radiant_of (P - Q)) = P - Q" unfolding polar_form_eq .. moreover have "dist P Q = norm (P - Q)" by (simp add: dist_norm) ultimately show ?thesis by (metis add.commute diff_add_cancel) qed subsection \Ptolemy's Theorem\ lemma dist_circle_segment: assumes "0 \ radius" "0 \ \" "\ \ \" "\ \ 2 * pi" shows "dist (center + radius *\<^sub>R of_radiant \) (center + radius *\<^sub>R of_radiant \) = 2 * radius * sin ((\ - \) / 2)" (is "?lhs = ?rhs") proof - have trigonometry: "(cos \ - cos \)\<^sup>2 + (sin \ - sin \)\<^sup>2 = (2 * sin ((\ - \) / 2))\<^sup>2" proof - have sin_diff_minus: "sin ((\ - \) / 2) = - sin ((\ - \) / 2)" by (simp only: sin_minus[symmetric] minus_divide_left minus_diff_eq) have "(cos \ - cos \)\<^sup>2 + (sin \ - sin \)\<^sup>2 = (2 * sin ((\ + \) / 2) * sin ((\ - \) / 2))\<^sup>2 + (2 * sin ((\ - \) / 2) * cos ((\ + \) / 2))\<^sup>2" by (simp only: cos_diff_cos sin_diff_sin) also have "\ = (2 * sin ((\ - \) / 2))\<^sup>2 * ((sin ((\ + \) / 2))\<^sup>2 + (cos ((\ + \) / 2))\<^sup>2)" unfolding sin_diff_minus by algebra also have "\ = (2 * sin ((\ - \) / 2))\<^sup>2" by simp finally show ?thesis . qed from assms have "0 \ sin ((\ - \) / 2)" by (simp add: sin_ge_zero) have "?lhs = sqrt (radius\<^sup>2 * ((cos \ - cos \)\<^sup>2 + (sin \ - sin \)\<^sup>2))" unfolding dist_eq_on_real_2_vec by simp algebra also have "\ = sqrt (radius\<^sup>2 * (2 * sin ((\ - \) / 2))\<^sup>2)" by (simp add: trigonometry) also have "\ = ?rhs" using \0 \ radius\ \0 \ sin ((\ - \) / 2)\ by (simp add: real_sqrt_mult) finally show ?thesis . qed theorem ptolemy_trigonometric: fixes \\<^sub>1 \\<^sub>2 \\<^sub>3 :: real shows "sin (\\<^sub>1 + \\<^sub>2) * sin (\\<^sub>2 + \\<^sub>3) = sin \\<^sub>1 * sin \\<^sub>3 + sin \\<^sub>2 * sin (\\<^sub>1 + \\<^sub>2 + \\<^sub>3)" proof - have "sin (\\<^sub>1 + \\<^sub>2) * sin (\\<^sub>2 + \\<^sub>3) = ((sin \\<^sub>2)\<^sup>2 + (cos \\<^sub>2)\<^sup>2) * sin \\<^sub>1 * sin \\<^sub>3 + sin \\<^sub>2 * sin (\\<^sub>1 + \\<^sub>2 + \\<^sub>3)" by (simp only: sin_add cos_add) algebra also have "\ = sin \\<^sub>1 * sin \\<^sub>3 + sin \\<^sub>2 * sin (\\<^sub>1 + \\<^sub>2 + \\<^sub>3)" by simp finally show ?thesis . qed theorem ptolemy: fixes A B C D center :: "real ^ 2" assumes "dist center A = radius" and "dist center B = radius" assumes "dist center C = radius" and "dist center D = radius" assumes ordering_of_points: "radiant_of (A - center) \ radiant_of (B - center)" "radiant_of (B - center) \ radiant_of (C - center)" "radiant_of (C - center) \ radiant_of (D - center)" shows "dist A C * dist B D = dist A B * dist C D + dist A D * dist B C" proof - from \dist center A = radius\ have "0 \ radius" by auto define \ \ \ \ where "\ = radiant_of (A - center)" and "\ = radiant_of (B - center)" and "\ = radiant_of (C - center)" and "\ = radiant_of (D - center)" from ordering_of_points have angle_basics: "\ \ \" "\ \ \" "\ \ \" "0 \ \" "\ \ 2 * pi" "0 \ \" "\ \ 2 * pi" "0 \ \" "\ \ 2 * pi" "0 \ \" "\ \ 2 * pi" unfolding \_def \_def \_def \_def by auto from assms(1-4) have "A = center + radius *\<^sub>R of_radiant \" "B = center + radius *\<^sub>R of_radiant \" "C = center + radius *\<^sub>R of_radiant \" "D = center + radius *\<^sub>R of_radiant \" unfolding \_def \_def \_def \_def using relative_polar_form_eq dist_commute by metis+ from this have dist_eqs: "dist A C = 2 * radius * sin ((\ - \) / 2)" "dist B D = 2 * radius * sin ((\ - \) / 2)" "dist A B = 2 * radius * sin ((\ - \) / 2)" "dist C D = 2 * radius * sin ((\ - \) / 2)" "dist A D = 2 * radius * sin ((\ - \) / 2)" "dist B C = 2 * radius * sin ((\ - \) / 2)" using angle_basics \radius \ 0\ dist_circle_segment by (auto) have "dist A C * dist B D = 4 * radius ^ 2 * sin ((\ - \) / 2) * sin ((\ - \) / 2)" unfolding dist_eqs by (simp add: power2_eq_square) also have "\ = 4 * radius ^ 2 * (sin ((\ - \) / 2) * sin ((\ - \) / 2) + sin ((\ - \) / 2) * sin ((\ - \) / 2))" proof - define \\<^sub>1 \\<^sub>2 \\<^sub>3 where "\\<^sub>1 = (\ - \) / 2" and "\\<^sub>2 = (\ - \) / 2" and "\\<^sub>3 = (\ - \) / 2" have "(\ - \) / 2 = \\<^sub>1 + \\<^sub>2" and "(\ - \) / 2 = \\<^sub>2 + \\<^sub>3" and "(\ - \) / 2 = \\<^sub>1 + \\<^sub>2 + \\<^sub>3" unfolding \\<^sub>1_def \\<^sub>2_def \\<^sub>3_def by (auto simp add: field_simps) have "sin ((\ - \) / 2) * sin ((\ - \) / 2) = sin (\\<^sub>1 + \\<^sub>2) * sin (\\<^sub>2 + \\<^sub>3)" using \(\ - \) / 2 = \\<^sub>1 + \\<^sub>2\ \(\ - \) / 2 = \\<^sub>2 + \\<^sub>3\ by (simp only:) also have "\ = sin \\<^sub>1 * sin \\<^sub>3 + sin \\<^sub>2 * sin (\\<^sub>1 + \\<^sub>2 + \\<^sub>3)" by (rule ptolemy_trigonometric) also have "\ = (sin ((\ - \) / 2) * sin ((\ - \) / 2) + sin ((\ - \) / 2) * sin ((\ - \) / 2))" using \\<^sub>1_def \\<^sub>2_def \\<^sub>3_def \(\ - \) / 2 = \\<^sub>1 + \\<^sub>2 + \\<^sub>3\ by (simp only:) finally show ?thesis by simp qed also have "\ = dist A B * dist C D + dist A D * dist B C" unfolding dist_eqs by (simp add: distrib_left power2_eq_square) finally show ?thesis . qed end diff --git a/thys/Tarskis_Geometry/Metric.thy b/thys/Tarskis_Geometry/Metric.thy --- a/thys/Tarskis_Geometry/Metric.thy +++ b/thys/Tarskis_Geometry/Metric.thy @@ -1,64 +1,64 @@ (* Title: Metric and semimetric spaces Author: Tim Makarios , 2012 Maintainer: Tim Makarios *) section "Metric and semimetric spaces" theory Metric -imports "HOL-Analysis.Analysis" +imports "HOL-Analysis.Multivariate_Analysis" begin locale semimetric = fixes dist :: "'p \ 'p \ real" assumes nonneg [simp]: "dist x y \ 0" and eq_0 [simp]: "dist x y = 0 \ x = y" and symm: "dist x y = dist y x" begin lemma refl [simp]: "dist x x = 0" by simp end locale metric = fixes dist :: "'p \ 'p \ real" assumes [simp]: "dist x y = 0 \ x = y" and triangle: "dist x z \ dist y x + dist y z" sublocale metric < semimetric proof { fix w have "dist w w = 0" by simp } note [simp] = this fix x y show "0 \ dist x y" proof - from triangle [of y y x] show "0 \ dist x y" by simp qed show "dist x y = 0 \ x = y" by simp show "dist x y = dist y x" proof - { fix w z have "dist w z \ dist z w" proof - from triangle [of w z z] show "dist w z \ dist z w" by simp qed } hence "dist x y \ dist y x" and "dist y x \ dist x y" by simp+ thus "dist x y = dist y x" by simp qed qed definition norm_dist :: "('a::real_normed_vector) \ 'a \ real" where [simp]: "norm_dist x y \ norm (x - y)" interpretation norm_metric: metric norm_dist proof fix x y show "norm_dist x y = 0 \ x = y" by simp fix z from norm_triangle_ineq [of "x - y" "y - z"] have "norm (x - z) \ norm (x - y) + norm (y - z)" by simp with norm_minus_commute [of x y] show "norm_dist x z \ norm_dist y x + norm_dist y z" by simp qed end diff --git a/thys/Triangle/Angles.thy b/thys/Triangle/Angles.thy --- a/thys/Triangle/Angles.thy +++ b/thys/Triangle/Angles.thy @@ -1,253 +1,253 @@ (* File: Angles.thy Author: Manuel Eberl Definition of angles between vectors and between three points. *) section \Definition of angles\ theory Angles imports - "HOL-Analysis.Analysis" + "HOL-Analysis.Multivariate_Analysis" begin lemma collinear_translate_iff: "collinear (((+) a) ` A) \ collinear A" by (auto simp: collinear_def) definition vangle where "vangle u v = (if u = 0 \ v = 0 then pi / 2 else arccos (u \ v / (norm u * norm v)))" definition angle where "angle a b c = vangle (a - b) (c - b)" lemma angle_altdef: "angle a b c = arccos ((a - b) \ (c - b) / (dist a b * dist c b))" by (simp add: angle_def vangle_def dist_norm) lemma vangle_0_left [simp]: "vangle 0 v = pi / 2" and vangle_0_right [simp]: "vangle u 0 = pi / 2" by (simp_all add: vangle_def) lemma vangle_refl [simp]: "u \ 0 \ vangle u u = 0" by (simp add: vangle_def dot_square_norm power2_eq_square) lemma angle_refl [simp]: "angle a a b = pi / 2" "angle a b b = pi / 2" by (simp_all add: angle_def) lemma angle_refl_mid [simp]: "a \ b \ angle a b a = 0" by (simp add: angle_def) lemma cos_vangle: "cos (vangle u v) = u \ v / (norm u * norm v)" unfolding vangle_def using Cauchy_Schwarz_ineq2[of u v] by (auto simp: field_simps) lemma cos_angle: "cos (angle a b c) = (a - b) \ (c - b) / (dist a b * dist c b)" by (simp add: angle_def cos_vangle dist_norm) lemma inner_conv_angle: "(a - b) \ (c - b) = dist a b * dist c b * cos (angle a b c)" by (simp add: cos_angle) lemma vangle_commute: "vangle u v = vangle v u" by (simp add: vangle_def inner_commute mult.commute) lemma angle_commute: "angle a b c = angle c b a" by (simp add: angle_def vangle_commute) lemma vangle_nonneg: "vangle u v \ 0" and vangle_le_pi: "vangle u v \ pi" using Cauchy_Schwarz_ineq2[of u v] by (auto simp: vangle_def field_simps intro!: arccos_lbound arccos_ubound) lemmas vangle_bounds = vangle_nonneg vangle_le_pi lemma angle_nonneg: "angle a b c \ 0" and angle_le_pi: "angle a b c \ pi" using vangle_bounds unfolding angle_def by blast+ lemmas angle_bounds = angle_nonneg angle_le_pi lemma sin_vangle_nonneg: "sin (vangle u v) \ 0" using vangle_bounds by (rule sin_ge_zero) lemma sin_angle_nonneg: "sin (angle a b c) \ 0" using angle_bounds by (rule sin_ge_zero) lemma vangle_eq_0D: assumes "vangle u v = 0" shows "norm u *\<^sub>R v = norm v *\<^sub>R u" proof - from assms have "u \ v = norm u * norm v" using arccos_eq_iff[of "(u \ v) / (norm u * norm v)" 1] Cauchy_Schwarz_ineq2[of u v] by (fastforce simp: vangle_def split: if_split_asm) thus ?thesis by (subst (asm) norm_cauchy_schwarz_eq) simp_all qed lemma vangle_eq_piD: assumes "vangle u v = pi" shows "norm u *\<^sub>R v + norm v *\<^sub>R u = 0" proof - from assms have "(-u) \ v = norm (-u) * norm v" using arccos_eq_iff[of "(u \ v) / (norm u * norm v)" "-1"] Cauchy_Schwarz_ineq2[of u v] by (simp add: field_simps vangle_def split: if_split_asm) thus ?thesis by (subst (asm) norm_cauchy_schwarz_eq) simp_all qed lemma dist_triangle_eq: fixes a b c :: "'a :: real_inner" shows "(dist a c = dist a b + dist b c) \ dist a b *\<^sub>R (c - b) + dist b c *\<^sub>R (a - b) = 0" using norm_triangle_eq[of "b - a" "c - b"] by (simp add: dist_norm norm_minus_commute algebra_simps) lemma angle_eq_pi_imp_dist_additive: assumes "angle a b c = pi" shows "dist a c = dist a b + dist b c" using vangle_eq_piD[OF assms[unfolded angle_def]] by (subst dist_triangle_eq) (simp add: dist_norm norm_minus_commute) lemma orthogonal_iff_vangle: "orthogonal u v \ vangle u v = pi / 2" using arccos_eq_iff[of "u \ v / (norm u * norm v)" 0] Cauchy_Schwarz_ineq2[of u v] by (auto simp: vangle_def orthogonal_def) lemma cos_minus1_imp_pi: assumes "cos x = -1" "x \ 0" "x < 3 * pi" shows "x = pi" proof - have "cos (x - pi) = 1" by (simp add: assms) then obtain n :: int where n: "of_int n = (x / pi - 1) / 2" by (subst (asm) cos_one_2pi_int) (auto simp: field_simps) also from assms have "\ \ {-1<..<1}" by (auto simp: field_simps) finally have "n = 0" by simp with n show ?thesis by simp qed lemma vangle_eqI: assumes "u \ 0" "v \ 0" "w \ 0" "x \ 0" assumes "(u \ v) * norm w * norm x = (w \ x) * norm u * norm v" shows "vangle u v = vangle w x" using assms Cauchy_Schwarz_ineq2[of u v] Cauchy_Schwarz_ineq2[of w x] unfolding vangle_def by (auto simp: arccos_eq_iff field_simps) lemma angle_eqI: assumes "a \ b" "a \ c" "d \ e" "d \ f" assumes "((b-a) \ (c-a)) * dist d e * dist d f = ((e-d) \ (f-d)) * dist a b * dist a c" shows "angle b a c = angle e d f" using assms unfolding angle_def by (intro vangle_eqI) (simp_all add: dist_norm norm_minus_commute) lemma cos_vangle_eqD: "cos (vangle u v) = cos (vangle w x) \ vangle u v = vangle w x" by (rule cos_inj_pi) (simp_all add: vangle_bounds) lemma cos_angle_eqD: "cos (angle a b c) = cos (angle d e f) \ angle a b c = angle d e f" unfolding angle_def by (rule cos_vangle_eqD) lemma sin_vangle_zero_iff: "sin (vangle u v) = 0 \ vangle u v \ {0, pi}" proof assume "sin (vangle u v) = 0" then obtain n :: int where n: "of_int n = vangle u v / pi" by (subst (asm) sin_zero_iff_int2) auto also have "\ \ {0..1}" using vangle_bounds by (auto simp: field_simps) finally have "n \ {0,1}" by auto thus "vangle u v \ {0,pi}" using n by (auto simp: field_simps) qed auto lemma sin_angle_zero_iff: "sin (angle a b c) = 0 \ angle a b c \ {0, pi}" unfolding angle_def by (simp only: sin_vangle_zero_iff) lemma vangle_collinear: "vangle u v \ {0, pi} \ collinear {0, u, v}" apply (subst norm_cauchy_schwarz_equal [symmetric]) apply (subst norm_cauchy_schwarz_abs_eq) apply (auto dest!: vangle_eq_0D vangle_eq_piD simp: eq_neg_iff_add_eq_0) done lemma angle_collinear: "angle a b c \ {0, pi} \ collinear {a, b, c}" apply (unfold angle_def, drule vangle_collinear) apply (subst collinear_translate_iff[symmetric, of _ "-b"]) apply (auto simp: insert_commute) done lemma not_collinear_vangle: "\collinear {0,u,v} \ vangle u v \ {0<.. vangle u v = pi") auto lemma not_collinear_angle: "\collinear {a,b,c} \ angle a b c \ {0<.. angle a b c = pi") auto subsection\Contributions from Lukas Bulwahn\ lemma vangle_scales: assumes "0 < c" shows "vangle (c *\<^sub>R v\<^sub>1) v\<^sub>2 = vangle v\<^sub>1 v\<^sub>2" using assms unfolding vangle_def by auto lemma vangle_inverse: "vangle (- v\<^sub>1) v\<^sub>2 = pi - vangle v\<^sub>1 v\<^sub>2" proof - have "\v\<^sub>1 \ v\<^sub>2 / (norm v\<^sub>1 * norm v\<^sub>2)\ \ 1" proof cases assume "v\<^sub>1 \ 0 \ v\<^sub>2 \ 0" from this show ?thesis by (simp add: Cauchy_Schwarz_ineq2) next assume "\ (v\<^sub>1 \ 0 \ v\<^sub>2 \ 0)" from this show ?thesis by auto qed from this show ?thesis unfolding vangle_def by (simp add: arccos_minus_abs) qed lemma orthogonal_iff_angle: shows "orthogonal (A - B) (C - B) \ angle A B C = pi / 2" unfolding angle_def by (auto simp only: orthogonal_iff_vangle) lemma angle_inverse: assumes "between (A, C) B" assumes "A \ B" "B \ C" shows "angle A B D = pi - angle C B D" proof - from \between (A, C) B\ obtain u where u: "u \ 0" "u \ 1" and X: "B = u *\<^sub>R A + (1 - u) *\<^sub>R C" by (metis add.commute betweenE between_commute) from \A \ B\ \B \ C\ X have "u \ 0" "u \ 1" by auto have "0 < ((1 - u) / u)" using \u \ 0\ \u \ 1\ \u \ 0\ \u \ 1\ by simp from X have "A - B = - (1 - u) *\<^sub>R (C - A)" by (simp add: real_vector.scale_right_diff_distrib real_vector.scale_left_diff_distrib) moreover from X have "C - B = u *\<^sub>R (C - A)" by (simp add: scaleR_diff_left real_vector.scale_right_diff_distrib) ultimately have "A - B = - (((1 - u) / u) *\<^sub>R (C - B))" using \u \ 0\ by simp (metis minus_diff_eq real_vector.scale_minus_left) from this have "vangle (A - B) (D - B) = pi - vangle (C - B) (D - B)" using \0 < (1 - u) / u\ by (simp add: vangle_inverse vangle_scales) from this show ?thesis unfolding angle_def by simp qed lemma strictly_between_implies_angle_eq_pi: assumes "between (A, C) B" assumes "A \ B" "B \ C" shows "angle A B C = pi" proof - from \between (A, C) B\ obtain u where u: "u \ 0" "u \ 1" and X: "B = u *\<^sub>R A + (1 - u) *\<^sub>R C" by (metis add.commute betweenE between_commute) from \A \ B\ \B \ C\ X have "u \ 0" "u \ 1" by auto from \A \ B\ \B \ C\ \between (A, C) B\ have "A \ C" by auto from X have "A - B = - (1 - u) *\<^sub>R (C - A)" by (simp add: real_vector.scale_right_diff_distrib real_vector.scale_left_diff_distrib) moreover from this have "dist A B = norm ((1 - u) *\<^sub>R (C - A))" using \u \ 0\ \u \ 1\ by (simp add: dist_norm) moreover from X have "C - B = u *\<^sub>R (C - A)" by (simp add: scaleR_diff_left real_vector.scale_right_diff_distrib) moreover from this have "dist C B = norm (u *\<^sub>R (C - A))" by (simp add: dist_norm) ultimately have "(A - B) \ (C - B) / (dist A B * dist C B) = u * (u - 1) / (\1 - u\ * \u\)" using \A \ C\ by (simp add: dot_square_norm power2_eq_square) also have "\ = - 1" using \u \ 0\ \u \ 1\ \u \ 0\ \u \ 1\ by (simp add: divide_eq_minus_1_iff) finally show ?thesis unfolding angle_altdef by simp qed end diff --git a/thys/Triangle/Triangle.thy b/thys/Triangle/Triangle.thy --- a/thys/Triangle/Triangle.thy +++ b/thys/Triangle/Triangle.thy @@ -1,354 +1,352 @@ (* File: Triangle.thy Author: Manuel Eberl Sine and cosine laws, angle sum in a triangle, congruence theorems, Isosceles Triangle Theorem *) section \Basic Properties of Triangles\ theory Triangle imports - Complex_Main - "HOL-Analysis.Topology_Euclidean_Space" Angles begin text \ We prove a number of basic geometric properties of triangles. All theorems hold in any real inner product space. \ subsection \Thales' theorem\ theorem thales: fixes A B C :: "'a :: real_inner" assumes "dist B (midpoint A C) = dist A C / 2" shows "orthogonal (A - B) (C - B)" proof - have "dist A C ^ 2 = dist B (midpoint A C) ^ 2 * 4" by (subst assms) (simp add: field_simps power2_eq_square) thus ?thesis by (auto simp: orthogonal_def dist_norm power2_norm_eq_inner midpoint_def algebra_simps inner_commute) qed subsection \Sine and cosine laws\ text \ The proof of the Law of Cosines follows trivially from the definition of the angle, the definition of the norm in vector spaces with an inner product and the bilinearity of the inner product. \ lemma cosine_law_vector: "norm (u - v) ^ 2 = norm u ^ 2 + norm v ^ 2 - 2 * norm u * norm v * cos (vangle u v)" by (simp add: power2_norm_eq_inner cos_vangle algebra_simps inner_commute) lemma cosine_law_triangle: "dist b c ^ 2 = dist a b ^ 2 + dist a c ^ 2 - 2 * dist a b * dist a c * cos (angle b a c)" using cosine_law_vector[of "b - a" "c - a"] by (simp add: dist_norm angle_def vangle_commute norm_minus_commute) text \ According to our definition, angles are always between $0$ and $\pi$ and therefore, the sign of an angle is always non-negative. We can therefore look at $\sin(\alpha)^2$, which we can express in terms of $\cos(\alpha)$ using the identity $\sin(\alpha)^2 + \cos(\alpha)^2 = 1$. The remaining proof is then a trivial consequence of the definitions. \ lemma sine_law_triangle: "sin (angle a b c) * dist b c = sin (angle b a c) * dist a c" (is "?A = ?B") proof (cases "a = b") assume neq: "a \ b" show ?thesis proof (rule power2_eq_imp_eq) from neq have "(sin (angle a b c) * dist b c) ^ 2 * dist a b ^ 2 = dist a b ^ 2 * dist b c ^ 2 - ((a - b) \ (c - b)) ^ 2" by (simp add: sin_squared_eq cos_angle dist_commute field_simps) also have "\ = dist a b ^ 2 * dist a c ^ 2 - ((b - a) \ (c - a)) ^ 2" by (simp only: dist_norm power2_norm_eq_inner) (simp add: power2_eq_square algebra_simps inner_commute) also from neq have "\ = (sin (angle b a c) * dist a c) ^ 2 * dist a b ^ 2" by (simp add: sin_squared_eq cos_angle dist_commute field_simps) finally show "?A^2 = ?B^2" using neq by (subst (asm) mult_cancel_right) simp_all qed (auto intro!: mult_nonneg_nonneg sin_angle_nonneg) qed simp_all text \ The following forms of the Law of Sines/Cosines are more convenient for eliminating sines/cosines from a goal completely. \ lemma cosine_law_triangle': "2 * dist a b * dist a c * cos (angle b a c) = (dist a b ^ 2 + dist a c ^ 2 - dist b c ^ 2)" using cosine_law_triangle[of b c a] by simp lemma cosine_law_triangle'': "cos (angle b a c) = (dist a b ^ 2 + dist a c ^ 2 - dist b c ^ 2) / (2 * dist a b * dist a c)" using cosine_law_triangle[of b c a] by simp lemma sine_law_triangle': "b \ c \ sin (angle a b c) = sin (angle b a c) * dist a c / dist b c" using sine_law_triangle[of a b c] by (simp add: divide_simps) lemma sine_law_triangle'': "b \ c \ sin (angle c b a) = sin (angle b a c) * dist a c / dist b c" using sine_law_triangle[of a b c] by (simp add: divide_simps angle_commute) subsection \Sum of angles\ context begin private lemma gather_squares: "a * (a * b) = a^2 * (b :: real)" by (simp_all add: power2_eq_square) private lemma eval_power: "x ^ numeral n = x * x ^ pred_numeral n" by (subst numeral_eq_Suc, subst power_Suc) simp text \ The proof that the sum of the angles in a triangle is $\pi$ is somewhat more involved. Following the HOL Light proof by John Harrison, we first prove that $\cos(\alpha + \beta + \gamma) = -1$ and $\alpha + \beta + \gamma \in [0;3\pi)$, which then implies the theorem. The main work is proving $\cos(\alpha + \beta + \gamma)$. This is done using the addition theorems for the sine and cosine, then using the Laws of Sines to eliminate all $\sin$ terms save $\sin(\gamma)^2$, which only appears squared in the remaining goal. We then use $\sin(\gamma)^2 = 1 - \cos(\gamma)^2$ to eliminate this term and apply the law of cosines to eliminate this term as well. The remaining goal is a non-linear equation containing only the length of the sides of the triangle. It can be shown by simple algebraic rewriting. \ lemma angle_sum_triangle: assumes "a \ b \ b \ c \ a \ c" shows "angle c a b + angle a b c + angle b c a = pi" proof (rule cos_minus1_imp_pi) show "cos (angle c a b + angle a b c + angle b c a) = - 1" proof (cases "a \ b") case True thus "cos (angle c a b + angle a b c + angle b c a) = -1" apply (simp add: cos_add sin_add cosine_law_triangle'' field_simps sine_law_triangle''[of a b c] sine_law_triangle''[of b a c] angle_commute dist_commute gather_squares sin_squared_eq) apply (simp add: eval_power algebra_simps dist_commute) done qed (insert assms, auto) show "angle c a b + angle a b c + angle b c a < 3 * pi" proof (rule ccontr) assume "\(angle c a b + angle a b c + angle b c a < 3 * pi)" with angle_le_pi[of c a b] angle_le_pi[of a b c] angle_le_pi[of b c a] have A: "angle c a b = pi" "angle a b c = pi" by simp_all thus False using angle_eq_pi_imp_dist_additive[of c a b] angle_eq_pi_imp_dist_additive[of a b c] by (simp add: dist_commute) qed qed (auto intro!: add_nonneg_nonneg angle_nonneg) end subsection \Congruence Theorems\ text \ If two triangles agree on two angles at a non-degenerate side, the third angle must also be equal. \ lemma similar_triangle_aa: assumes "b1 \ c1" "b2 \ c2" assumes "angle a1 b1 c1 = angle a2 b2 c2" assumes "angle b1 c1 a1 = angle b2 c2 a2" shows "angle b1 a1 c1 = angle b2 a2 c2" proof - from assms angle_sum_triangle[of a1 b1 c1] angle_sum_triangle[of a2 b2 c2, symmetric] show ?thesis by (auto simp: algebra_simps angle_commute) qed text \ A triangle is defined by its three angles and the lengths of three sides up to congruence. Two triangles are congruent if they have their angles are the same and their sides have the same length. \ locale congruent_triangle = fixes a1 b1 c1 :: "'a :: real_inner" and a2 b2 c2 :: "'b :: real_inner" assumes sides': "dist a1 b1 = dist a2 b2" "dist a1 c1 = dist a2 c2" "dist b1 c1 = dist b2 c2" and angles': "angle b1 a1 c1 = angle b2 a2 c2" "angle a1 b1 c1 = angle a2 b2 c2" "angle a1 c1 b1 = angle a2 c2 b2" begin lemma sides: "dist a1 b1 = dist a2 b2" "dist a1 c1 = dist a2 c2" "dist b1 c1 = dist b2 c2" "dist b1 a1 = dist a2 b2" "dist c1 a1 = dist a2 c2" "dist c1 b1 = dist b2 c2" "dist a1 b1 = dist b2 a2" "dist a1 c1 = dist c2 a2" "dist b1 c1 = dist c2 b2" "dist b1 a1 = dist b2 a2" "dist c1 a1 = dist c2 a2" "dist c1 b1 = dist c2 b2" using sides' by (simp_all add: dist_commute) lemma angles: "angle b1 a1 c1 = angle b2 a2 c2" "angle a1 b1 c1 = angle a2 b2 c2" "angle a1 c1 b1 = angle a2 c2 b2" "angle c1 a1 b1 = angle b2 a2 c2" "angle c1 b1 a1 = angle a2 b2 c2" "angle b1 c1 a1 = angle a2 c2 b2" "angle b1 a1 c1 = angle c2 a2 b2" "angle a1 b1 c1 = angle c2 b2 a2" "angle a1 c1 b1 = angle b2 c2 a2" "angle c1 a1 b1 = angle c2 a2 b2" "angle c1 b1 a1 = angle c2 b2 a2" "angle b1 c1 a1 = angle b2 c2 a2" using angles' by (simp_all add: angle_commute) end lemmas congruent_triangleD = congruent_triangle.sides congruent_triangle.angles text \ Given two triangles that agree on a subset of its side lengths and angles that are sufficient to define a triangle uniquely up to congruence, one can conclude that they must also agree on all remaining quantities, i.e. that they are congruent. The following four congruence theorems state what constitutes such a uniquely-defining subset of quantities. Each theorem states in its name which quantities are required and in which order (clockwise or counter-clockwise): an ``s'' stands for a side, an ``a'' stands for an angle. The lemma ``congruent-triangleI-sas, for example, requires that two adjacent sides and the angle inbetween are the same in both triangles. \ lemma congruent_triangleI_sss: fixes a1 b1 c1 :: "'a :: real_inner" and a2 b2 c2 :: "'b :: real_inner" assumes "dist a1 b1 = dist a2 b2" assumes "dist b1 c1 = dist b2 c2" assumes "dist a1 c1 = dist a2 c2" shows "congruent_triangle a1 b1 c1 a2 b2 c2" proof - have A: "angle a1 b1 c1 = angle a2 b2 c2" if "dist a1 b1 = dist a2 b2" "dist b1 c1 = dist b2 c2" "dist a1 c1 = dist a2 c2" for a1 b1 c1 :: 'a and a2 b2 c2 :: 'b proof - from that cosine_law_triangle''[of a1 b1 c1] cosine_law_triangle''[of a2 b2 c2] show ?thesis by (intro cos_angle_eqD) (simp add: dist_commute) qed from assms show ?thesis by unfold_locales (auto intro!: A simp: dist_commute) qed lemmas congruent_triangle_sss = congruent_triangleD[OF congruent_triangleI_sss] lemma congruent_triangleI_sas: assumes "dist a1 b1 = dist a2 b2" assumes "dist b1 c1 = dist b2 c2" assumes "angle a1 b1 c1 = angle a2 b2 c2" shows "congruent_triangle a1 b1 c1 a2 b2 c2" proof (rule congruent_triangleI_sss) show "dist a1 c1 = dist a2 c2" proof (rule power2_eq_imp_eq) from cosine_law_triangle[of a1 c1 b1] cosine_law_triangle[of a2 c2 b2] assms show "(dist a1 c1)\<^sup>2 = (dist a2 c2)\<^sup>2" by (simp add: dist_commute) qed simp_all qed fact+ lemmas congruent_triangle_sas = congruent_triangleD[OF congruent_triangleI_sas] lemma congruent_triangleI_aas: assumes "angle a1 b1 c1 = angle a2 b2 c2" assumes "angle b1 c1 a1 = angle b2 c2 a2" assumes "dist a1 b1 = dist a2 b2" assumes "\collinear {a1,b1,c1}" shows "congruent_triangle a1 b1 c1 a2 b2 c2" proof (rule congruent_triangleI_sas) from \\collinear {a1,b1,c1}\ have neq: "a1 \ b1" by auto with assms(3) have neq': "a2 \ b2" by auto have A: "angle c1 a1 b1 = angle c2 a2 b2" using neq neq' assms using angle_sum_triangle[of a1 b1 c1] angle_sum_triangle[of a2 b2 c2] by simp from assms have B: "angle b1 a1 c1 \ {0<..collinear {a1, b1, c1}" shows "congruent_triangle a1 b1 c1 a2 b2 c2" proof (rule congruent_triangleI_aas) from assms have neq: "a1 \ b1" "a2 \ b2" by auto show "angle b1 c1 a1 = angle b2 c2 a2" by (rule similar_triangle_aa) (insert assms neq, simp_all add: angle_commute) qed fact+ lemmas congruent_triangle_asa = congruent_triangleD[OF congruent_triangleI_asa] subsection \Isosceles Triangle Theorem\ text \ We now prove the Isosceles Triangle Theorem: in a triangle where two sides have the same length, the two angles that are adjacent to only one of the two sides must be equal. \ lemma isosceles_triangle: assumes "dist a c = dist b c" shows "angle b a c = angle a b c" by (rule congruent_triangle_sss) (insert assms, simp_all add: dist_commute) text \ For the non-degenerate case (i.e. the three points are not collinear), We also prove the converse. \ lemma isosceles_triangle_converse: assumes "angle a b c = angle b a c" "\collinear {a,b,c}" shows "dist a c = dist b c" by (rule congruent_triangle_asa[OF assms(1) _ _ assms(2)]) (simp_all add: dist_commute angle_commute assms) subsection\Contributions by Lukas Bulwahn\ lemma Pythagoras: fixes A B C :: "'a :: real_inner" assumes "orthogonal (A - C) (B - C)" shows "(dist B C) ^ 2 + (dist C A) ^ 2 = (dist A B) ^ 2" proof - from assms have "cos (angle A C B) = 0" by (metis orthogonal_iff_angle cos_pi_half) from this show ?thesis by (simp add: cosine_law_triangle[of A B C] dist_commute) qed lemma isosceles_triangle_orthogonal_on_midpoint: fixes A B C :: "'a :: euclidean_space" assumes "dist C A = dist C B" shows "orthogonal (C - midpoint A B) (A - midpoint A B)" proof (cases "A = B") assume "A \ B" let ?M = "midpoint A B" from \A \ B\ have "angle A ?M C = pi - angle B ?M C" by (intro angle_inverse between_midpoint) (auto simp: between_midpoint eq_commute[of _ "midpoint A B" for A B]) moreover have "angle A ?M C = angle C ?M B" proof - have congruence: "congruent_triangle C A ?M C B ?M" proof (rule congruent_triangleI_sss) show "dist C A = dist C B" using assms . show "dist A ?M = dist B ?M" by (simp add: dist_midpoint) show "dist C (midpoint A B) = dist C (midpoint A B)" .. qed from this show ?thesis by (simp add: congruent_triangle.angles(6)) qed ultimately have "angle A ?M C = pi / 2" by (simp add: angle_commute) from this show ?thesis by (simp add: orthogonal_iff_angle orthogonal_commute) next assume "A = B" from this show ?thesis by (simp add: orthogonal_clauses(1)) qed end diff --git a/thys/UpDown_Scheme/Grid_Point.thy b/thys/UpDown_Scheme/Grid_Point.thy --- a/thys/UpDown_Scheme/Grid_Point.thy +++ b/thys/UpDown_Scheme/Grid_Point.thy @@ -1,186 +1,186 @@ section \ Grid Points \ theory Grid_Point -imports "HOL-Analysis.Analysis" +imports "HOL-Analysis.Multivariate_Analysis" begin type_synonym grid_point = "(nat \ int) list" definition lv :: "grid_point \ nat \ nat" where "lv p d = fst (p ! d)" definition ix :: "grid_point \ nat \ int" where "ix p d = snd (p ! d)" definition level :: "grid_point \ nat" where "level p = (\ i < length p. lv p i)" lemma level_all_eq: assumes "\d. d < length p \ lv p d = lv p' d" and "length p = length p'" shows "level p' = level p" unfolding level_def using assms by auto datatype dir = left | right fun sgn :: "dir \ int" where "sgn left = -1" | "sgn right = 1" fun inv :: "dir \ dir" where "inv left = right" | "inv right = left" lemma inv_inv[simp]: "inv (inv dir) = dir" by (cases dir) simp_all lemma sgn_inv[simp]: "sgn (inv dir) = - sgn dir" by (cases dir, auto) definition child :: "grid_point \ dir \ nat \ grid_point" where "child p dir d = p[d := (lv p d + 1, 2 * (ix p d) + sgn dir)]" lemma child_length[simp]: "length (child p dir d) = length p" unfolding child_def by simp lemma child_odd[simp]: "d < length p \ odd (ix (child p dir d) d)" unfolding child_def ix_def by (cases dir, auto) lemma child_eq: "p ! d = (l, i) \ \ j. child p dir d = p[d := (l + 1, j)]" by (auto simp add: child_def ix_def lv_def) lemma child_other: "d \ d' \ child p dir d ! d' = p ! d'" unfolding child_def lv_def ix_def by (cases "d' < length p", auto) lemma child_invariant: assumes "d' < length p" shows "(child p dir d ! d' = p ! d') = (d \ d')" proof - obtain l i where "p ! d' = (l, i)" using prod.exhaust . with assms show ?thesis unfolding child_def ix_def lv_def by auto qed lemma child_single_level: "d < length p \ lv (child p dir d) d > lv p d" unfolding lv_def child_def by simp lemma child_lv: "d < length p \ lv (child p dir d) d = lv p d + 1" unfolding child_def lv_def by simp lemma child_lv_other: assumes "d' \ d" shows "lv (child p dir d') d = lv p d" using child_other[OF assms] unfolding lv_def by simp lemma child_ix_left: "d < length p \ ix (child p left d) d = 2 * ix p d - 1" unfolding child_def ix_def by simp lemma child_ix_right: "d < length p \ ix (child p right d) d = 2 * ix p d + 1" unfolding child_def ix_def by simp lemma child_ix: "d < length p \ ix (child p dir d) d = 2 * ix p d + sgn dir" unfolding child_def ix_def by simp lemma child_level[simp]: assumes "d < length p" shows "level (child p dir d) = level p + 1" proof - have inter: "{0.. {d} = {d}" using assms by auto have "level (child p dir d) = (\ d' = 0.. {d} then lv p d + 1 else lv p d')" by (auto intro!: sum.cong simp add: child_lv_other child_lv level_def) moreover have "level p + 1 = (\ d' = 0.. {d} then lv p d else lv p d') + 1" by (auto intro!: sum.cong simp add: child_lv_other child_lv level_def) ultimately show ?thesis unfolding sum.If_cases[OF finite_atLeastLessThan] inter using assms by auto qed lemma child_ex_neighbour: "\ b'. child b dir d = child b' (inv dir) d" proof show "child b dir d = child (b[d := (lv b d, ix b d + sgn dir)]) (inv dir) d" unfolding child_def ix_def lv_def by (cases "d < length b", auto simp add: algebra_simps) qed lemma child_level_gt[simp]: "level (child p dir d) >= level p" by (cases "d < length p", simp, simp add: child_def) lemma child_estimate_child: assumes "d < length p" and "l \ lv p d" and i'_range: "ix p d < (i + 1) * 2^(lv p d - l) \ ix p d > (i - 1) * 2^(lv p d - l)" (is "?top p \ ?bottom p") and is_child: "p' = child p dir d" shows "?top p' \ ?bottom p'" proof from is_child and \d < length p\ have "lv p' d = lv p d + 1" by (auto simp add: child_def ix_def lv_def) hence "lv p' d - l = lv p d - l + 1" using \lv p d >= l\ by auto hence pow_l'': "(2^(lv p' d - l) :: int) = 2 * 2^(lv p d - l)" by auto show "?top p'" proof - from is_child and \d < length p\ have "ix p' d \ 2 * (ix p d) + 1" by (cases dir, auto simp add: child_def lv_def ix_def) also have "\ < (i + 1) * (2 * 2^(lv p d - l))" using i'_range by auto finally show ?thesis using pow_l'' by auto qed show "?bottom p'" proof - have "(i - 1) * 2^(lv p' d - l) = 2 * ((i - 1) * 2^(lv p d - l))" using pow_l'' by simp also have "\ < 2 * ix p d - 1" using i'_range by auto finally show ?thesis using is_child and \d < length p\ by (cases dir, auto simp add: child_def lv_def ix_def) qed qed lemma child_neighbour: assumes "child p (inv dir) d = child ps dir d" (is "?c_p = ?c_ps") shows "ps = p[d := (lv p d, ix p d - sgn dir)]" (is "_ = ?ps") proof (rule nth_equalityI) have "length ?c_ps = length ?c_p" using \?c_p = ?c_ps\ by simp hence len_eq: "length ps = length p" by simp thus "length ps = length ?ps" by simp show "ps ! i = ?ps ! i" if "i < length ps" for i proof - have "i < length p" using that len_eq by auto show "ps ! i = ?ps ! i" proof (cases "d = i") case [simp]: True have "?c_p ! i = ?c_ps ! i" using \?c_p = ?c_ps\ by auto hence "ix p i = ix ps d + sgn dir" and "lv p i = lv ps i" by (auto simp add: child_def nth_list_update_eq[OF \i < length p\] nth_list_update_eq[OF \i < length ps\]) thus ?thesis by (simp add: lv_def ix_def \i < length p\) next assume "d \ i" with child_other[OF this, of ps dir] child_other[OF this, of p "inv dir"] show ?thesis using assms by auto qed qed qed definition start :: "nat \ grid_point" where "start dm = replicate dm (0, 1)" lemma start_lv[simp]: "d < dm \ lv (start dm) d = 0" unfolding start_def lv_def by simp lemma start_ix[simp]: "d < dm \ ix (start dm) d = 1" unfolding start_def ix_def by simp lemma start_length[simp]: "length (start dm) = dm" unfolding start_def by auto lemma level_start_0[simp]: "level (start dm) = 0" using level_def by auto end diff --git a/thys/UpDown_Scheme/Triangular_Function.thy b/thys/UpDown_Scheme/Triangular_Function.thy --- a/thys/UpDown_Scheme/Triangular_Function.thy +++ b/thys/UpDown_Scheme/Triangular_Function.thy @@ -1,420 +1,422 @@ section \ Hat Functions \ theory Triangular_Function -imports "HOL-Analysis.Analysis" Grid +imports + "HOL-Analysis.Equivalence_Lebesgue_Henstock_Integration" + Grid begin lemma continuous_on_max[continuous_intros]: fixes f :: "_ \ 'a::linorder_topology" shows "continuous_on S f \ continuous_on S g \ continuous_on S (\x. max (f x) (g x))" by (auto simp: continuous_on_def intro: tendsto_max) definition \ :: "(nat \ int) \ real \ real" where "\ \ (\(l,i) x. max 0 (1 - \ x * 2^(l + 1) - real_of_int i \))" definition \ :: "(nat \ int) list \ (nat \ real) \ real" where "\ p x = (\d (p ! d) (x d))" definition l2_\ where "l2_\ p1 p2 = (\x. \ p1 x * \ p2 x \lborel)" definition l2 where "l2 a b = (\x. \ a x * \ b x \(\\<^sub>M d\{..[measurable]: "\ p \ borel_measurable borel" by (cases p) (simp add: \_def) lemma \_nonneg: "0 \ \ p x" by (simp add: \_def split: prod.split) lemma \_zero_iff: "\ (l,i) x = 0 \ x \ {real_of_int (i - 1) / 2^(l + 1) <..< real_of_int (i + 1) / 2^(l + 1)}" by (auto simp: \_def field_simps split: split_max) lemma \_zero: "x \ {real_of_int (i - 1) / 2^(l + 1) <..< real_of_int (i + 1) / 2^(l + 1)} \ \ (l,i) x = 0" unfolding \_zero_iff by simp lemma \_eq_0: assumes x: "x < 0 \ 1 < x" and i: "0 < i" "i < 2^Suc l" shows "\ (l, i) x = 0" using x proof assume "x < 0" also have "0 \ real_of_int (i - 1) / 2^(l + 1)" using i by (auto simp: field_simps) finally show ?thesis by (auto intro!: \_zero simp: field_simps) next have "real_of_int (i + 1) / 2^(l + 1) \ 1" using i by (subst divide_le_eq_1_pos) (auto simp del: of_int_add power_Suc) also assume "1 < x" finally show ?thesis by (auto intro!: \_zero simp: field_simps) qed lemma ix_lt: "p \ sparsegrid dm lm \ d < dm \ ix p d < 2^(lv p d + 1)" unfolding sparsegrid_def lgrid_def using grid_estimate[of d "start dm" p "{0 ..< dm}"] by auto lemma ix_gt: "p \ sparsegrid dm lm \ d < dm \ 0 < ix p d" unfolding sparsegrid_def lgrid_def using grid_estimate[of d "start dm" p "{0 ..< dm}"] by auto lemma \_eq_0: assumes x: "\d 1 < x d" and p: "p \ sparsegrid dm lm" shows "\ p x = 0" unfolding \_def proof (rule prod_zero) from x guess d .. with p[THEN ix_lt, of d] p[THEN ix_gt, of d] p show "\a\{.. (p ! a) (x a) = 0" apply (cases "p!d") apply (intro bexI[of _ d]) apply (auto intro!: \_eq_0 simp: sparsegrid_length ix_def lv_def) done qed simp lemma \_left_support': "x \ {real_of_int (i - 1) / 2^(l + 1) .. real_of_int i / 2^(l + 1)} \ \ (l,i) x = 1 + x * 2^(l + 1) - real_of_int i" by (auto simp: \_def field_simps split: split_max) lemma \_left_support: "x \ {-1 .. 0::real} \ \ (l,i) ((x + real_of_int i) / 2^(l + 1)) = 1 + x" by (auto simp: \_def field_simps split: split_max) lemma \_right_support': "x \ {real_of_int i / 2^(l + 1) .. real_of_int (i + 1) / 2^(l + 1)} \ \ (l,i) x = 1 - x * 2^(l + 1) + real_of_int i" by (auto simp: \_def field_simps split: split_max) lemma \_right_support: "x \ {0 .. 1::real} \ \ (l,i) ((x + real i) / 2^(l + 1)) = 1 - x" by (auto simp: \_def field_simps split: split_max) lemma integrable_\: "integrable lborel (\ p)" proof (induct p) case (Pair l i) have "integrable lborel (\x. indicator {real_of_int (i - 1) / 2^(l + 1) .. real_of_int (i + 1) / 2^(l + 1)} x *\<^sub>R \ (l, i) x)" unfolding \_def by (intro borel_integrable_compact) (auto intro!: continuous_intros) then show ?case by (rule integrable_cong[THEN iffD1, rotated -1]) (auto simp: \_zero_iff) qed lemma integrable_\2: "integrable lborel (\x. \ p x * \ q x)" proof (cases p q rule: prod.exhaust[case_product prod.exhaust]) case (Pair_Pair l i l' i') have "integrable lborel (\x. indicator {real_of_int (i - 1) / 2^(l + 1) .. real_of_int (i + 1) / 2^(l + 1)} x *\<^sub>R (\ (l, i) x * \ (l', i') x))" unfolding \_def by (intro borel_integrable_compact) (auto intro!: continuous_intros) then show ?thesis unfolding Pair_Pair by (rule integrable_cong[THEN iffD1, rotated -1]) (auto simp: \_zero_iff) qed lemma l2_\I_DERIV: assumes n: "\ x. x \ { (real_of_int i' - 1) / 2^(l' + 1) .. real_of_int i' / 2^(l' + 1) } \ DERIV \_n x :> (\ (l', i') x * \ (l, i) x)" (is "\ x. x \ {?a..?b} \ DERIV _ _ :> ?P x") and p: "\ x. x \ { real_of_int i' / 2^(l' + 1) .. (real_of_int i' + 1) / 2^(l' + 1) } \ DERIV \_p x :> (\ (l', i') x * \ (l, i) x)" (is "\ x. x \ {?b..?c} \ _") shows "l2_\ (l', i') (l, i) = (\_n ?b - \_n ?a) + (\_p ?c - \_p ?b)" proof - have "has_bochner_integral lborel (\x. ?P x * indicator {?a..?b} x + ?P x * indicator {?b..?c} x) ((\_n ?b - \_n ?a) + (\_p ?c - \_p ?b))" by (intro has_bochner_integral_add has_bochner_integral_FTC_Icc_nonneg n p) (auto simp: \_nonneg field_simps) then have "has_bochner_integral lborel?P ((\_n ?b - \_n ?a) + (\_p ?c - \_p ?b))" by (rule has_bochner_integral_discrete_difference[where X="{?b}", THEN iffD1, rotated -1]) (auto simp: power_add intro!: \_zero integral_cong split: split_indicator) then show ?thesis by (simp add: has_bochner_integral_iff l2_\_def) qed lemma l2_eq: "length a = length b \ l2 a b = (\d (a!d) (b!d))" unfolding l2_def l2_\_def \_def apply (simp add: prod.distrib[symmetric]) proof (rule product_sigma_finite.product_integral_prod) show "product_sigma_finite (\d. lborel)" .. qed (auto intro: integrable_\2) lemma l2_when_disjoint: assumes "l \ l'" defines "d == l' - l" assumes "(i + 1) * 2^d < i' \ i' < (i - 1) * 2^d" (is "?right \ ?left") shows "l2_\ (l', i') (l, i) = 0" proof - let ?sup = "\l i. {real_of_int (i - 1) / 2^(l + 1) <..< real_of_int (i + 1) / 2^(l + 1)}" have l': "l' = l + d" using assms by simp have *: "\i l. 2 ^ l = real_of_int (2 ^ l::int)" by simp have [arith]: "0 < (2^d::int)" by simp from \?right \ ?left\ \l \ l'\ have empty_support: "?sup l i \ ?sup l' i' = {}" by (auto simp add: min_def max_def divide_simps l' power_add * of_int_mult[symmetric] simp del: of_int_diff of_int_add of_int_mult of_int_power) (simp_all add: field_simps) then have "\x. \ (l', i') x * \ (l, i) x = 0" unfolding \_zero_iff mult_eq_0_iff by blast then show ?thesis by (simp add: l2_\_def del: mult_eq_0_iff vector_space_over_itself.scale_eq_0_iff) qed lemma l2_commutative: "l2_\ p q = l2_\ q p" by (simp add: l2_\_def mult.commute) lemma l2_when_same: "l2_\ (l, i) (l, i) = 1/3 / 2^l" proof (subst l2_\I_DERIV) let ?l = "(2 :: real)^(l + 1)" let ?in = "real_of_int i - 1" let ?ip = "real_of_int i + 1" let ?\ = "\ (l,i)" let ?\2 = "\x. ?\ x * ?\ x" { fix x assume "x \ {?in / ?l .. real_of_int i / ?l}" hence \_eq: "?\ x = ?l * x - ?in" using \_left_support' by auto show "DERIV (\x. x^3 / 3 * ?l^2 + x * ?in^2 - x^2/2 * 2 * ?l * ?in) x :> ?\2 x" by (auto intro!: derivative_eq_intros simp add: power2_eq_square field_simps \_eq) } { fix x assume "x \ {real_of_int i / ?l .. ?ip / ?l}" hence \_eq: "?\ x = ?ip - ?l * x" using \_right_support' by auto show "DERIV (\x. x^3 / 3 * ?l^2 + x * ?ip^2 - x^2/2 * 2 * ?l * ?ip) x :> ?\2 x" by (auto intro!: derivative_eq_intros simp add: power2_eq_square field_simps \_eq) } qed (simp_all add: field_simps power_eq_if[of _ 2] power_eq_if[of _ 3]) lemma l2_when_left_child: assumes "l < l'" and i'_bot: "i' > (i - 1) * 2^(l' - l)" and i'_top: "i' < i * 2^(l' - l)" shows "l2_\ (l', i') (l, i) = (1 + real_of_int i' / 2^(l' - l) - real_of_int i) / 2^(l' + 1)" proof (subst l2_\I_DERIV) let ?l' = "(2 :: real)^(l' + 1)" let ?in' = "real_of_int i' - 1" let ?ip' = "real_of_int i' + 1" let ?l = "(2 :: real)^(l + 1)" let ?i = "real_of_int i - 1" let ?\' = "\ (l',i')" let ?\ = "\ (l, i)" let "?\2 x" = "?\' x * ?\ x" define \_n where "\_n x = x^3 / 3 * ?l' * ?l + x * ?i * ?in' - x^2 / 2 * (?in' * ?l + ?i * ?l')" for x define \_p where "\_p x = x^2 / 2 * (?ip' * ?l + ?i * ?l') - x^3 / 3 * ?l' * ?l - x * ?i * ?ip'" for x have level_diff: "2^(l' - l) = 2^l' / (2^l :: real)" using power_diff[of "2::real" l l'] \l < l'\ by auto { fix x assume x: "x \ {?in' / ?l' .. ?ip' / ?l'}" have "?i * 2^(l' - l) \ ?in'" using i'_bot int_less_real_le by auto hence "?i / ?l \ ?in' / ?l'" using level_diff by (auto simp: field_simps) hence "?i / ?l \ x" using x by auto moreover have "?ip' \ real_of_int i * 2^(l' - l)" using i'_top int_less_real_le by auto hence ip'_le_i: "?ip' / ?l' \ real_of_int i / ?l" using level_diff by (auto simp: field_simps) hence "x \ real_of_int i / ?l" using x by auto ultimately have "?\ x = ?l * x - ?i" using \_left_support' by auto } note \_eq = this { fix x assume x: "x \ {?in' / ?l' .. real_of_int i' / ?l'}" hence \'_eq: "?\' x = ?l' * x - ?in'" using \_left_support' by auto from x have x': "x \ {?in' / ?l' .. ?ip' / ?l'}" by (auto simp add: field_simps) show "DERIV \_n x :> ?\2 x" unfolding \_eq[OF x'] \'_eq \_n_def by (auto intro!: derivative_eq_intros simp add: power2_eq_square algebra_simps) } { fix x assume x: "x \ {real_of_int i' / ?l' .. ?ip' / ?l'}" hence \'_eq: "?\' x = ?ip' - ?l' * x" using \_right_support' by auto from x have x': "x \ {?in' / ?l' .. ?ip' / ?l'}" by (simp add: field_simps) show "DERIV \_p x :> ?\2 x" unfolding \_eq[OF x'] \'_eq \_p_def by (auto intro!: derivative_eq_intros simp add: power2_eq_square algebra_simps) } qed (simp_all add: field_simps power_eq_if[of _ 2] power_eq_if[of _ 3] power_diff[of "2::real", OF _ \l < l'\[THEN less_imp_le]] ) lemma l2_when_right_child: assumes "l < l'" and i'_bot: "i' > i * 2^(l' - l)" and i'_top: "i' < (i + 1) * 2^(l' - l)" shows "l2_\ (l', i') (l, i) = (1 - real_of_int i' / 2^(l' - l) + real_of_int i) / 2^(l' + 1)" proof (subst l2_\I_DERIV) let ?l' = "(2 :: real)^(l' + 1)" let ?in' = "real_of_int i' - 1" let ?ip' = "real_of_int i' + 1" let ?l = "(2 :: real)^(l + 1)" let ?i = "real_of_int i + 1" let ?\' = "\ (l',i')" let ?\ = "\ (l, i)" let ?\2 = "\x. ?\' x * ?\ x" define \_n where "\_n x = x^2 / 2 * (?in' * ?l + ?i * ?l') - x^3 / 3 * ?l' * ?l - x * ?i * ?in'" for x define \_p where "\_p x = x^3 / 3 * ?l' * ?l + x * ?i * ?ip' - x^2 / 2 * (?ip' * ?l + ?i * ?l')" for x have level_diff: "2^(l' - l) = 2^l' / (2^l :: real)" using power_diff[of "2::real" l l'] \l < l'\ by auto { fix x assume x: "x \ {?in' / ?l' .. ?ip' / ?l'}" have "real_of_int i * 2^(l' - l) \ ?in'" using i'_bot int_less_real_le by auto hence "real_of_int i / ?l \ ?in' / ?l'" using level_diff by (auto simp: field_simps) hence "real_of_int i / ?l \ x" using x by auto moreover have "?ip' \ ?i * 2^(l' - l)" using i'_top int_less_real_le by auto hence ip'_le_i: "?ip' / ?l' \ ?i / ?l" using level_diff by (auto simp: field_simps) hence "x \ ?i / ?l" using x by auto ultimately have "?\ x = ?i - ?l * x" using \_right_support' by auto } note \_eq = this { fix x assume x: "x \ {?in' / ?l' .. real_of_int i' / ?l'}" hence \'_eq: "?\' x = ?l' * x - ?in'" using \_left_support' by auto from x have x': "x \ {?in' / ?l' .. ?ip' / ?l'}" by (simp add: field_simps) show "DERIV \_n x :> ?\2 x" unfolding \_n_def \_eq[OF x'] \'_eq by (auto intro!: derivative_eq_intros simp add: simp add: power2_eq_square algebra_simps) } { fix x assume x: "x \ {real_of_int i' / ?l' .. ?ip' / ?l'}" hence \'_eq: "?\' x = ?ip' - ?l' * x" using \_right_support' by auto from x have x': "x \ {?in' / ?l' .. ?ip' / ?l'}" by (auto simp: field_simps) show "DERIV \_p x :> ?\2 x" unfolding \_eq[OF x'] \'_eq \_p_def by (auto intro!: derivative_eq_intros simp add: power2_eq_square algebra_simps) } qed (simp_all add: field_simps power_eq_if[of _ 2] power_eq_if[of _ 3] power_diff[of "2::real", OF _ \l < l'\[THEN less_imp_le]] ) lemma level_shift: "lc > l \ (x :: real) / 2 ^ (lc - Suc l) = x * 2 / 2 ^ (lc - l)" by (auto simp add: power_diff) lemma l2_child: assumes "d < length b" and p_grid: "p \ grid (child b dir d) ds" (is "p \ grid ?child ds") shows "l2_\ (p ! d) (b ! d) = (1 - real_of_int (sgn dir) * (real_of_int (ix p d) / 2^(lv p d - lv b d) - real_of_int (ix b d))) / 2^(lv p d + 1)" proof - have "lv ?child d \ lv p d" using \d < length b\ and p_grid using grid_single_level by auto hence "lv b d < lv p d" using \d < length b\ and p_grid using child_lv by auto let ?i_c = "ix ?child d" and ?l_c = "lv ?child d" let ?i_p = "ix p d" and ?l_p = "lv p d" let ?i_b = "ix b d" and ?l_b = "lv b d" have "(2::int) * 2^(?l_p - ?l_c) = 2^Suc (?l_p - ?l_c)" by auto also have "\ = 2^(Suc ?l_p - ?l_c)" proof - have "Suc (?l_p - ?l_c) = Suc ?l_p - ?l_c" using \lv ?child d \ lv p d\ by auto thus ?thesis by auto qed also have "\ = 2^(?l_p - ?l_b)" using \d < length b\ and \lv b d < lv p d\ by (auto simp add: child_def lv_def) finally have level: "2^(?l_p - ?l_b) = (2::int) * 2^(?l_p - ?l_c)" .. from \d < length b\ and p_grid have range_left: "?i_p > (?i_c - 1) * 2^(?l_p - ?l_c)" and range_right: "?i_p < (?i_c + 1) * 2^(?l_p - ?l_c)" using grid_estimate by auto show ?thesis proof (cases dir) case left with child_ix_left[OF \d < length b\] have "(?i_b - 1) * 2^(?l_p - ?l_b) = (?i_c - 1) * 2^(?l_p - ?l_c)" and "?i_b * 2^(?l_p - ?l_b) = (?i_c + 1) * 2^(?l_p - ?l_c)" using level by auto hence "?i_p > (?i_b - 1) * 2^(?l_p - ?l_b)" and "?i_p < ?i_b * 2^(?l_p - ?l_b)" using range_left and range_right by auto with \?l_b < ?l_p\ have "l2_\ (?l_p, ?i_p) (?l_b, ?i_b) = (1 + real_of_int ?i_p / 2^(?l_p - ?l_b) - real_of_int ?i_b) / 2^(?l_p + 1)" by (rule l2_when_left_child) thus ?thesis using left by (auto simp add: ix_def lv_def) next case right hence "?i_c = 2 * ?i_b + 1" using child_ix_right and \d < length b\ by auto hence "?i_b * 2^(?l_p - ?l_b) = (?i_c - 1) * 2^(?l_p - ?l_c)" and "(?i_b + 1) * 2^(?l_p - ?l_b) = (?i_c + 1) * 2^(?l_p - ?l_c)" using level by auto hence "?i_p > ?i_b * 2^(?l_p - ?l_b)" and "?i_p < (?i_b + 1) * 2^(?l_p - ?l_b)" using range_left and range_right by auto with \?l_b < ?l_p\ have "l2_\ (?l_p, ?i_p) (?l_b, ?i_b) = (1 - real_of_int ?i_p / 2^(?l_p - ?l_b) + real_of_int ?i_b) / 2^(?l_p + 1)" by (rule l2_when_right_child) thus ?thesis using right by (auto simp add: ix_def lv_def) qed qed lemma l2_same: "l2_\ (p!d) (p!d) = 1/3 / 2^(lv p d)" proof - have "l2_\ (p!d) (p!d) = l2_\ (lv p d, ix p d) (lv p d, ix p d)" by (auto simp add: lv_def ix_def) thus ?thesis using l2_when_same by auto qed lemma l2_disjoint: assumes "d < length b" and "p \ grid b {d}" and "p' \ grid b {d}" and "p' \ grid p {d}" and "lv p' d \ lv p d" shows "l2_\ (p' ! d) (p ! d) = 0" proof - have range: "ix p' d > (ix p d + 1) * 2^(lv p' d - lv p d) \ ix p' d < (ix p d - 1) * 2^(lv p' d - lv p d)" proof (rule ccontr) assume "\ ?thesis" hence "ix p' d \ (ix p d + 1) * 2^(lv p' d - lv p d)" and "ix p' d \ (ix p d - 1) * 2^(lv p' d - lv p d)" by auto with \p' \ grid b {d}\ and \p \ grid b {d}\ and \lv p' d \ lv p d\ and \d < length b\ have "p' \ grid p {d}" using grid_part[where p=p and b=b and d=d and p'=p'] by auto with \p' \ grid p {d}\ show False by auto qed have "l2_\ (p' ! d) (p ! d) = l2_\ (lv p' d, ix p' d) (lv p d, ix p d)" by (auto simp add: ix_def lv_def) also have "\ = 0" using range and \lv p' d \ lv p d\ and l2_when_disjoint by auto finally show ?thesis . qed lemma l2_down2: fixes pc pd p assumes "d < length pd" assumes pc_in_grid: "pc \ grid (child pd dir d) {d}" assumes pd_is_child: "pd = child p dir d" (is "pd = ?pd") shows "l2_\ (pc ! d) (pd ! d) / 2 = l2_\ (pc ! d) (p ! d)" proof - have "d < length p" using pd_is_child \d < length pd\ by auto moreover have "pc \ grid ?pd {d}" using pd_is_child and grid_child and pc_in_grid by auto hence "lv p d < lv pc d" using grid_child_level and \d < length pd\ and pd_is_child by auto moreover have "real_of_int (sgn dir) * real_of_int (sgn dir) = 1" by (cases dir, auto) ultimately show ?thesis unfolding l2_child[OF \d < length pd\ pc_in_grid] l2_child[OF \d < length p\ \pc \ grid ?pd {d}\] using child_lv and child_ix and pd_is_child and level_shift by (auto simp add: algebra_simps diff_divide_distrib add_divide_distrib) qed lemma l2_zigzag: assumes "d < length p" and p_child: "p = child p_p dir d" and p'_grid: "p' \ grid (child p (inv dir) d) {d}" and ps_intro: "child p (inv dir) d = child ps dir d" (is "?c_p = ?c_ps") shows "l2_\ (p' ! d) (p_p ! d) = l2_\ (p' ! d) (ps ! d) + l2_\ (p' ! d) (p ! d) / 2" proof - have "length p = length ?c_p" by auto also have "\ = length ?c_ps" using ps_intro by auto finally have "length p = length ps" using ps_intro by auto hence "d < length p_p" using p_child and \d < length p\ by auto moreover from ps_intro have "ps = p[d := (lv p d, ix p d - sgn dir)]" by (rule child_neighbour) hence "lv ps d = lv p d" and "real_of_int (ix ps d) = real_of_int (ix p d) - real_of_int (sgn dir)" using lv_def and ix_def and \length p = length ps\ and \d < length p\ by auto moreover have "d < length ps" and *: "p' \ grid (child ps dir d) {d}" using p'_grid ps_intro \length p = length ps\ \d < length p\ by auto have "p' \ grid p {d}" using p'_grid and grid_child by auto hence p_p_grid: "p' \ grid (child p_p dir d) {d}" using p_child by auto hence "lv p' d > lv p_p d" using grid_child_level and \d < length p_p\ by auto moreover have "real_of_int (sgn dir) * real_of_int (sgn dir) = 1" by (cases dir, auto) ultimately show ?thesis unfolding l2_child[OF \d < length p\ p'_grid] l2_child[OF \d < length ps\ *] l2_child[OF \d < length p_p\ p_p_grid] using child_lv and child_ix and p_child level_shift by (auto simp add: add_divide_distrib algebra_simps diff_divide_distrib) qed end diff --git a/thys/pGCL/Misc.thy b/thys/pGCL/Misc.thy --- a/thys/pGCL/Misc.thy +++ b/thys/pGCL/Misc.thy @@ -1,455 +1,455 @@ (* * Copyright (C) 2014 NICTA * All rights reserved. *) (* Author: David Cock - David.Cock@nicta.com.au *) section \Miscellaneous Mathematics\ theory Misc imports - "HOL-Analysis.Analysis" + "HOL-Analysis.Multivariate_Analysis" begin text_raw \\label{s:misc}\ lemma sum_UNIV: fixes S::"'a::finite set" assumes complete: "\x. x\S \ f x = 0" shows "sum f S = sum f UNIV" proof - from complete have "sum f S = sum f (UNIV - S) + sum f S" by(simp) also have "... = sum f UNIV" by(auto intro: sum.subset_diff[symmetric]) finally show ?thesis . qed lemma cInf_mono: fixes A::"'a::conditionally_complete_lattice set" assumes lower: "\b. b \ B \ \a\A. a \ b" and bounded: "\a. a \ A \ c \ a" and ne: "B \ {}" shows "Inf A \ Inf B" proof(rule cInf_greatest[OF ne]) fix b assume bin: "b \ B" with lower obtain a where ain: "a \ A" and le: "a \ b" by(auto) from ain bounded have "Inf A \ a" by(intro cInf_lower bdd_belowI, auto) also note le finally show "Inf A \ b" . qed lemma max_distrib: fixes c::real assumes nn: "0 \ c" shows "c * max a b = max (c * a) (c * b)" proof(cases "a \ b") case True moreover with nn have "c * a \ c * b" by(auto intro:mult_left_mono) ultimately show ?thesis by(simp add:max.absorb2) next case False then have "b \ a" by(auto) moreover with nn have "c * b \ c * a" by(auto intro:mult_left_mono) ultimately show ?thesis by(simp add:max.absorb1) qed lemma mult_div_mono_left: fixes c::real assumes nnc: "0 \ c" and nzc: "c \ 0" and inv: "a \ inverse c * b" shows "c * a \ b" proof - from nnc inv have "c * a \ (c * inverse c) * b" by(auto simp:mult.assoc intro:mult_left_mono) also from nzc have "... = b" by(simp) finally show "c * a \ b" . qed lemma mult_div_mono_right: fixes c::real assumes nnc: "0 \ c" and nzc: "c \ 0" and inv: "inverse c * a \ b" shows "a \ c * b" proof - from nzc have "a = (c * inverse c) * a " by(simp) also from nnc inv have "(c * inverse c) * a \ c * b" by(auto simp:mult.assoc intro:mult_left_mono) finally show "a \ c * b" . qed lemma min_distrib: fixes c::real assumes nnc: "0 \ c" shows "c * min a b = min (c * a) (c * b)" proof(cases "a \ b") case True moreover with nnc have "c * a \ c * b" by(blast intro:mult_left_mono) ultimately show ?thesis by(auto) next case False hence "b \ a" by(auto) moreover with nnc have "c * b \ c * a" by(blast intro:mult_left_mono) ultimately show ?thesis by(simp add:min.absorb2) qed lemma finite_set_least: fixes S::"'a::linorder set" assumes finite: "finite S" and ne: "S \ {}" shows "\x\S. \y\S. x \ y" proof - have "S = {} \ (\x\S. \y\S. x \ y)" proof(rule finite_induct, simp_all add:assms) fix x::'a and S::"'a set" assume IH: "S = {} \ (\x\S. \y\S. x \ y)" show "(\y\S. x \ y) \ (\x'\S. x' \ x \ (\y\S. x' \ y))" proof(cases "S={}") case True then show ?thesis by(auto) next case False with IH have "\x\S. \y\S. x \ y" by(auto) then obtain z where zin: "z \ S" and zmin: "\y\S. z \ y" by(auto) thus ?thesis by(cases "z \ x", auto) qed qed with ne show ?thesis by(auto) qed lemma cSup_add: fixes c::real assumes ne: "S \ {}" and bS: "\x. x\S \ x \ b" shows "Sup S + c = Sup {x + c |x. x \ S}" proof(rule antisym) from ne bS show "Sup {x + c |x. x \ S} \ Sup S + c" by(auto intro!:cSup_least add_right_mono cSup_upper bdd_aboveI) have "Sup S \ Sup {x + c |x. x \ S} - c" proof(intro cSup_least ne) fix x assume xin: "x \ S" from bS have "\x. x\S \ x + c \ b + c" by(auto intro:add_right_mono) hence "bdd_above {x + c |x. x \ S}" by(intro bdd_aboveI, blast) with xin have "x + c \ Sup {x + c |x. x \ S}" by(auto intro:cSup_upper) thus "x \ Sup {x + c |x. x \ S} - c" by(auto) qed thus "Sup S + c \ Sup {x + c |x. x \ S}" by(auto) qed lemma cSup_mult: fixes c::real assumes ne: "S \ {}" and bS: "\x. x\S \ x \ b" and nnc: "0 \ c" shows "c * Sup S = Sup {c * x |x. x \ S}" proof(cases) assume "c = 0" moreover from ne have "\x. x \ S" by(auto) ultimately show ?thesis by(simp) next assume cnz: "c \ 0" show ?thesis proof(rule antisym) from bS have baS: "bdd_above S" by(intro bdd_aboveI, auto) with ne nnc show "Sup {c * x |x. x \ S} \ c * Sup S" by(blast intro!:cSup_least mult_left_mono[OF cSup_upper]) have "Sup S \ inverse c * Sup {c * x |x. x \ S}" proof(intro cSup_least ne) fix x assume xin: "x\S" moreover from bS nnc have "\x. x\S \ c * x \ c * b" by(auto intro:mult_left_mono) ultimately have "c * x \ Sup {c * x |x. x \ S}" by(auto intro!:cSup_upper bdd_aboveI) moreover from nnc have "0 \ inverse c" by(auto) ultimately have "inverse c * (c * x) \ inverse c * Sup {c * x |x. x \ S}" by(auto intro:mult_left_mono) with cnz show "x \ inverse c * Sup {c * x |x. x \ S}" by(simp add:mult.assoc[symmetric]) qed with nnc have "c * Sup S \ c * (inverse c * Sup {c * x |x. x \ S})" by(auto intro:mult_left_mono) with cnz show "c * Sup S \ Sup {c * x |x. x \ S}" by(simp add:mult.assoc[symmetric]) qed qed lemma closure_contains_Sup: fixes S :: "real set" assumes neS: "S \ {}" and bS: "\x\S. x \ B" shows "Sup S \ closure S" proof - let ?T = "uminus ` S" from neS have neT: "?T \ {}" by(auto) from bS have bT: "\x\?T. -B \ x" by(auto) hence bbT: "bdd_below ?T" by(intro bdd_belowI, blast) have "Sup S = - Inf ?T" proof(rule antisym) from neT bbT have "\x. x\S \ Inf (uminus ` S) \ -x" by(blast intro:cInf_lower) hence "\x. x\S \ -1 * -x \ -1 * Inf (uminus ` S)" by(rule mult_left_mono_neg, auto) hence lenInf: "\x. x\S \ x \ - Inf (uminus ` S)" by(simp) with neS bS show "Sup S \ - Inf ?T" by(blast intro:cSup_least) have "- Sup S \ Inf ?T" proof(rule cInf_greatest[OF neT]) fix x assume "x \ uminus ` S" then obtain y where yin: "y \ S" and rwx: "x = -y" by(auto) from yin bS have "y \ Sup S" by(intro cSup_upper bdd_belowI, auto) hence "-1 * Sup S \ -1 * y" by(simp add:mult_left_mono_neg) with rwx show "- Sup S \ x" by(simp) qed hence "-1 * Inf ?T \ -1 * (- Sup S)" by(simp add:mult_left_mono_neg) thus "- Inf ?T \ Sup S" by(simp) qed also { from neT bbT have "Inf ?T \ closure ?T" by(rule closure_contains_Inf) hence "- Inf ?T \ uminus ` closure ?T" by(auto) } also { have "linear uminus" by(auto intro:linearI) hence "uminus ` closure ?T \ closure (uminus ` ?T)" by(rule closure_linear_image_subset) } also { have "uminus ` ?T \ S" by(auto) hence "closure (uminus ` ?T) \ closure S" by(rule closure_mono) } finally show "Sup S \ closure S" . qed lemma tendsto_min: fixes x y::real assumes ta: "a \ x" and tb: "b \ y" shows "(\i. min (a i) (b i)) \ min x y" proof(rule LIMSEQ_I, simp) fix e::real assume pe: "0 < e" from ta pe obtain noa where balla: "\n\noa. abs (a n - x) < e" by(auto dest:LIMSEQ_D) from tb pe obtain nob where ballb: "\n\nob. abs (b n - y) < e" by(auto dest:LIMSEQ_D) { fix n assume ge: "max noa nob \ n" hence gea: "noa \ n" and geb: "nob \ n" by(auto) have "abs (min (a n) (b n) - min x y) < e" proof cases assume le: "min (a n) (b n) \ min x y" show ?thesis proof cases assume "a n \ b n" hence rwmin: "min (a n) (b n) = a n" by(auto) with le have "a n \ min x y" by(simp) moreover from gea balla have "abs (a n - x) < e" by(simp) moreover have "min x y \ x" by(auto) ultimately have "abs (a n - min x y) < e" by(auto) with rwmin show "abs (min (a n) (b n) - min x y) < e" by(simp) next assume "\ a n \ b n" hence "b n \ a n" by(auto) hence rwmin: "min (a n) (b n) = b n" by(auto) with le have "b n \ min x y" by(simp) moreover from geb ballb have "abs (b n - y) < e" by(simp) moreover have "min x y \ y" by(auto) ultimately have "abs (b n - min x y) < e" by(auto) with rwmin show "abs (min (a n) (b n) - min x y) < e" by(simp) qed next assume "\ min (a n) (b n) \ min x y" hence le: "min x y \ min (a n) (b n)" by(auto) show ?thesis proof cases assume "x \ y" hence rwmin: "min x y = x" by(auto) with le have "x \ min (a n) (b n)" by(simp) moreover from gea balla have "abs (a n - x) < e" by(simp) moreover have "min (a n) (b n) \ a n" by(auto) ultimately have "abs (min (a n) (b n) - x) < e" by(auto) with rwmin show "abs (min (a n) (b n) - min x y) < e" by(simp) next assume "\ x \ y" hence "y \ x" by(auto) hence rwmin: "min x y = y" by(auto) with le have "y \ min (a n) (b n)" by(simp) moreover from geb ballb have "abs (b n - y) < e" by(simp) moreover have "min (a n) (b n) \ b n" by(auto) ultimately have "abs (min (a n) (b n) - y) < e" by(auto) with rwmin show "abs (min (a n) (b n) - min x y) < e" by(simp) qed qed } thus "\no. \n\no. \min (a n) (b n) - min x y\ < e" by(blast) qed definition supp :: "('s \ real) \ 's set" where "supp f = {x. f x \ 0}" definition dist_remove :: "('s \ real) \ 's \ 's \ real" where "dist_remove p x = (\y. if y=x then 0 else p y / (1 - p x))" lemma supp_dist_remove: "p x \ 0 \ p x \ 1 \ supp (dist_remove p x) = supp p - {x}" by(auto simp:dist_remove_def supp_def) lemma supp_empty: "supp f = {} \ f x = 0" by(simp add:supp_def) lemma nsupp_zero: "x \ supp f \ f x = 0" by(simp add:supp_def) lemma sum_supp: fixes f::"'a::finite \ real" shows "sum f (supp f) = sum f UNIV" proof - have "sum f (UNIV - supp f) = 0" by(simp add:supp_def) hence "sum f (supp f) = sum f (UNIV - supp f) + sum f (supp f)" by(simp) also have "... = sum f UNIV" by(simp add:sum.subset_diff[symmetric]) finally show ?thesis . qed subsection \Truncated Subtraction\ text_raw \\label{s:trunc_sub}\ definition tminus :: "real \ real \ real" (infixl "\" 60) where "x \ y = max (x - y) 0" lemma minus_le_tminus[intro!,simp]: "a - b \ a \ b" unfolding tminus_def by(auto) lemma tminus_cancel_1: "0 \ a \ a + 1 \ 1 = a" unfolding tminus_def by(simp) lemma tminus_zero_imp_le: "x \ y \ 0 \ x \ y" by(simp add:tminus_def) lemma tminus_zero[simp]: "0 \ x \ x \ 0 = x" by(simp add:tminus_def) lemma tminus_left_mono: "a \ b \ a \ c \ b \ c" unfolding tminus_def by(case_tac "a \ c", simp_all) lemma tminus_less: "\ 0 \ a; 0 \ b \ \ a \ b \ a" unfolding tminus_def by(force) lemma tminus_left_distrib: assumes nna: "0 \ a" shows "a * (b \ c) = a * b \ a * c" proof(cases "b \ c") case True note le = this hence "a * max (b - c) 0 = 0" by(simp add:max.absorb2) also { from nna le have "a * b \ a * c" by(blast intro:mult_left_mono) hence "0 = max (a * b - a * c) 0" by(simp add:max.absorb1) } finally show ?thesis by(simp add:tminus_def) next case False hence le: "c \ b" by(auto) hence "a * max (b - c) 0 = a * (b - c)" by(simp only:max.absorb1) also { from nna le have "a * c \ a * b" by(blast intro:mult_left_mono) hence "a * (b - c) = max (a * b - a * c) 0" by(simp add:max.absorb1 field_simps) } finally show ?thesis by(simp add:tminus_def) qed lemma tminus_le[simp]: "b \ a \ a \ b = a - b" unfolding tminus_def by(simp) lemma tminus_le_alt[simp]: "a \ b \ a \ b = 0" by(simp add:tminus_def) lemma tminus_nle[simp]: "\b \ a \ a \ b = 0" unfolding tminus_def by(simp) lemma tminus_add_mono: "(a+b) \ (c+d) \ (a\c) + (b\d)" proof(cases "0 \ a - c") case True note pac = this show ?thesis proof(cases "0 \ b - d") case True note pbd = this from pac and pbd have "(c + d) \ (a + b)" by(simp) with pac and pbd show ?thesis by(simp) next case False with pac show ?thesis by(cases "c + d \ a + b", auto) qed next case False note nac = this show ?thesis proof(cases "0 \ b - d") case True with nac show ?thesis by(cases "c + d \ a + b", auto) next case False note nbd = this with nac have "\(c + d) \ (a + b)" by(simp) with nac and nbd show ?thesis by(simp) qed qed lemma tminus_sum_mono: assumes fS: "finite S" shows "sum f S \ sum g S \ sum (\x. f x \ g x) S" (is "?X S") proof(rule finite_induct) from fS show "finite S" . show "?X {}" by(simp) fix x and F assume fF: "finite F" and xniF: "x \ F" and IH: "?X F" have "f x + sum f F \ g x + sum g F \ (f x \ g x) + (sum f F \ sum g F)" by(rule tminus_add_mono) also from IH have "... \ (f x \ g x) + (\x\F. f x \ g x)" by(rule add_left_mono) finally show "?X (insert x F)" by(simp add:sum.insert[OF fF xniF]) qed lemma tminus_nneg[simp,intro]: "0 \ a \ b" by(cases "b \ a", auto) lemma tminus_right_antimono: assumes clb: "c \ b" shows "a \ b \ a \ c" proof(cases "b \ a") case True moreover with clb have "c \ a" by(auto) moreover note clb ultimately show ?thesis by(simp) next case False then show ?thesis by(simp) qed lemma min_tminus_distrib: "min a b \ c = min (a \ c) (b \ c)" unfolding tminus_def by(auto) end