diff --git a/thys/Optics/Lens_Algebra.thy b/thys/Optics/Lens_Algebra.thy --- a/thys/Optics/Lens_Algebra.thy +++ b/thys/Optics/Lens_Algebra.thy @@ -1,399 +1,418 @@ section \Lens Algebraic Operators\ theory Lens_Algebra imports Lens_Laws begin subsection \Lens Composition, Plus, Unit, and Identity\ text \ \begin{figure} \begin{center} \includegraphics[width=7cm]{figures/Composition} \end{center} \vspace{-5ex} \caption{Lens Composition} \label{fig:Comp} \end{figure} We introduce the algebraic lens operators; for more information please see our paper~\cite{Foster16a}. Lens composition, illustrated in Figure~\ref{fig:Comp}, constructs a lens by composing the source of one lens with the view of another.\ -definition lens_comp :: "('a \ 'b) \ ('b \ 'c) \ ('a \ 'c)" (infixr ";\<^sub>L" 80) where +definition lens_comp :: "('a \ 'b) \ ('b \ 'c) \ ('a \ 'c)" (infixl ";\<^sub>L" 80) where [lens_defs]: "lens_comp Y X = \ lens_get = get\<^bsub>Y\<^esub> \ lens_get X , lens_put = (\ \ v. lens_put X \ (lens_put Y (lens_get X \) v)) \" text \ \begin{figure} \begin{center} \includegraphics[width=7cm]{figures/Sum} \end{center} \vspace{-5ex} \caption{Lens Sum} \label{fig:Sum} \end{figure} Lens plus, as illustrated in Figure~\ref{fig:Sum} parallel composes two independent lenses, resulting in a lens whose view is the product of the two underlying lens views.\ definition lens_plus :: "('a \ 'c) \ ('b \ 'c) \ 'a \ 'b \ 'c" (infixr "+\<^sub>L" 75) where [lens_defs]: "X +\<^sub>L Y = \ lens_get = (\ \. (lens_get X \, lens_get Y \)) , lens_put = (\ \ (u, v). lens_put X (lens_put Y \ v) u) \" text \The product functor lens similarly parallel composes two lenses, but in this case the lenses have different sources and so the resulting source is also a product.\ definition lens_prod :: "('a \ 'c) \ ('b \ 'd) \ ('a \ 'b \ 'c \ 'd)" (infixr "\\<^sub>L" 85) where [lens_defs]: "lens_prod X Y = \ lens_get = map_prod get\<^bsub>X\<^esub> get\<^bsub>Y\<^esub> , lens_put = \ (u, v) (x, y). (put\<^bsub>X\<^esub> u x, put\<^bsub>Y\<^esub> v y) \" text \The $\lfst$ and $\lsnd$ lenses project the first and second elements, respectively, of a product source type.\ definition fst_lens :: "'a \ 'a \ 'b" ("fst\<^sub>L") where [lens_defs]: "fst\<^sub>L = \ lens_get = fst, lens_put = (\ (\, \) u. (u, \)) \" definition snd_lens :: "'b \ 'a \ 'b" ("snd\<^sub>L") where [lens_defs]: "snd\<^sub>L = \ lens_get = snd, lens_put = (\ (\, \) u. (\, u)) \" lemma get_fst_lens [simp]: "get\<^bsub>fst\<^sub>L\<^esub> (x, y) = x" by (simp add: fst_lens_def) lemma get_snd_lens [simp]: "get\<^bsub>snd\<^sub>L\<^esub> (x, y) = y" by (simp add: snd_lens_def) text \The swap lens is a bijective lens which swaps over the elements of the product source type.\ abbreviation swap_lens :: "'a \ 'b \ 'b \ 'a" ("swap\<^sub>L") where "swap\<^sub>L \ snd\<^sub>L +\<^sub>L fst\<^sub>L" text \The zero lens is an ineffectual lens whose view is a unit type. This means the zero lens cannot distinguish or change the source type.\ definition zero_lens :: "unit \ 'a" ("0\<^sub>L") where [lens_defs]: "0\<^sub>L = \ lens_get = (\ _. ()), lens_put = (\ \ x. \) \" text \The identity lens is a bijective lens where the source and view type are the same.\ definition id_lens :: "'a \ 'a" ("1\<^sub>L") where [lens_defs]: "1\<^sub>L = \ lens_get = id, lens_put = (\ _. id) \" text \The quotient operator $X \lquot Y$ shortens lens $X$ by cutting off $Y$ from the end. It is thus the dual of the composition operator.\ definition lens_quotient :: "('a \ 'c) \ ('b \ 'c) \ 'a \ 'b" (infixr "'/\<^sub>L" 90) where [lens_defs]: "X /\<^sub>L Y = \ lens_get = \ \. get\<^bsub>X\<^esub> (create\<^bsub>Y\<^esub> \) , lens_put = \ \ v. get\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> (create\<^bsub>Y\<^esub> \) v) \" -text \Lens override uses a lens to replace part of a source type with a given value for the - corresponding view.\ - -definition lens_override :: "'a \ 'a \ ('b \ 'a) \ 'a" ("_ \\<^sub>L _ on _" [95,0,96] 95) where -[lens_defs]: "S\<^sub>1 \\<^sub>L S\<^sub>2 on X = put\<^bsub>X\<^esub> S\<^sub>1 (get\<^bsub>X\<^esub> S\<^sub>2)" - text \Lens inverse take a bijective lens and swaps the source and view types.\ definition lens_inv :: "('a \ 'b) \ ('b \ 'a)" ("inv\<^sub>L") where [lens_defs]: "lens_inv x = \ lens_get = create\<^bsub>x\<^esub>, lens_put = \ \. get\<^bsub>x\<^esub> \" subsection \Closure Poperties\ text \We show that the core lenses combinators defined above are closed under the key lens classes.\ lemma id_wb_lens: "wb_lens 1\<^sub>L" by (unfold_locales, simp_all add: id_lens_def) lemma source_id_lens: "\\<^bsub>1\<^sub>L\<^esub> = UNIV" by (simp add: id_lens_def lens_source_def) lemma unit_wb_lens: "wb_lens 0\<^sub>L" by (unfold_locales, simp_all add: zero_lens_def) lemma source_zero_lens: "\\<^bsub>0\<^sub>L\<^esub> = UNIV" by (simp_all add: zero_lens_def lens_source_def) lemma comp_weak_lens: "\ weak_lens x; weak_lens y \ \ weak_lens (x ;\<^sub>L y)" by (unfold_locales, simp_all add: lens_comp_def) lemma comp_wb_lens: "\ wb_lens x; wb_lens y \ \ wb_lens (x ;\<^sub>L y)" by (unfold_locales, auto simp add: lens_comp_def wb_lens_def weak_lens.put_closure) lemma comp_mwb_lens: "\ mwb_lens x; mwb_lens y \ \ mwb_lens (x ;\<^sub>L y)" by (unfold_locales, auto simp add: lens_comp_def mwb_lens_def weak_lens.put_closure) lemma source_lens_comp: "\ mwb_lens x; mwb_lens y \ \ \\<^bsub>x ;\<^sub>L y\<^esub> = {s \ \\<^bsub>y\<^esub>. get\<^bsub>y\<^esub> s \ \\<^bsub>x\<^esub>}" by (auto simp add: lens_comp_def lens_source_def, blast, metis mwb_lens.put_put mwb_lens_def weak_lens.put_get) lemma id_vwb_lens [simp]: "vwb_lens 1\<^sub>L" by (unfold_locales, simp_all add: id_lens_def) lemma unit_vwb_lens [simp]: "vwb_lens 0\<^sub>L" by (unfold_locales, simp_all add: zero_lens_def) lemma comp_vwb_lens: "\ vwb_lens x; vwb_lens y \ \ vwb_lens (x ;\<^sub>L y)" by (unfold_locales, simp_all add: lens_comp_def weak_lens.put_closure) lemma unit_ief_lens: "ief_lens 0\<^sub>L" by (unfold_locales, simp_all add: zero_lens_def) text \Lens plus requires that the lenses be independent to show closure.\ lemma plus_mwb_lens: assumes "mwb_lens x" "mwb_lens y" "x \ y" shows "mwb_lens (x +\<^sub>L y)" using assms apply (unfold_locales) apply (simp_all add: lens_plus_def prod.case_eq_if lens_indep_sym) apply (simp add: lens_indep_comm) done lemma plus_wb_lens: assumes "wb_lens x" "wb_lens y" "x \ y" shows "wb_lens (x +\<^sub>L y)" using assms apply (unfold_locales, simp_all add: lens_plus_def) apply (simp add: lens_indep_sym prod.case_eq_if) done -lemma plus_vwb_lens: +lemma plus_vwb_lens [simp]: assumes "vwb_lens x" "vwb_lens y" "x \ y" shows "vwb_lens (x +\<^sub>L y)" using assms apply (unfold_locales, simp_all add: lens_plus_def) apply (simp add: lens_indep_sym prod.case_eq_if) apply (simp add: lens_indep_comm prod.case_eq_if) done lemma source_plus_lens: assumes "mwb_lens x" "mwb_lens y" "x \ y" shows "\\<^bsub>x +\<^sub>L y\<^esub> = \\<^bsub>x\<^esub> \ \\<^bsub>y\<^esub>" apply (auto simp add: lens_source_def lens_plus_def) apply (meson assms(3) lens_indep_comm) apply (metis assms(1) mwb_lens.weak_get_put mwb_lens_weak weak_lens.put_closure) done lemma prod_mwb_lens: "\ mwb_lens X; mwb_lens Y \ \ mwb_lens (X \\<^sub>L Y)" by (unfold_locales, simp_all add: lens_prod_def prod.case_eq_if) lemma prod_wb_lens: "\ wb_lens X; wb_lens Y \ \ wb_lens (X \\<^sub>L Y)" by (unfold_locales, simp_all add: lens_prod_def prod.case_eq_if) lemma prod_vwb_lens: "\ vwb_lens X; vwb_lens Y \ \ vwb_lens (X \\<^sub>L Y)" by (unfold_locales, simp_all add: lens_prod_def prod.case_eq_if) lemma prod_bij_lens: "\ bij_lens X; bij_lens Y \ \ bij_lens (X \\<^sub>L Y)" by (unfold_locales, simp_all add: lens_prod_def prod.case_eq_if) lemma fst_vwb_lens: "vwb_lens fst\<^sub>L" by (unfold_locales, simp_all add: fst_lens_def prod.case_eq_if) lemma snd_vwb_lens: "vwb_lens snd\<^sub>L" by (unfold_locales, simp_all add: snd_lens_def prod.case_eq_if) lemma id_bij_lens: "bij_lens 1\<^sub>L" by (unfold_locales, simp_all add: id_lens_def) lemma inv_id_lens: "inv\<^sub>L 1\<^sub>L = 1\<^sub>L" by (auto simp add: lens_inv_def id_lens_def lens_create_def) +lemma inv_inv_lens: "bij_lens X \ inv\<^sub>L (inv\<^sub>L X) = X" + apply (cases X) + apply (auto simp add: lens_defs fun_eq_iff) + apply (metis (no_types) bij_lens.strong_get_put bij_lens_def select_convs(2) weak_lens.put_get) + done + lemma lens_inv_bij: "bij_lens X \ bij_lens (inv\<^sub>L X)" by (unfold_locales, simp_all add: lens_inv_def lens_create_def) lemma swap_bij_lens: "bij_lens swap\<^sub>L" by (unfold_locales, simp_all add: lens_plus_def prod.case_eq_if fst_lens_def snd_lens_def) subsection \Composition Laws\ text \Lens composition is monoidal, with unit @{term "1\<^sub>L"}, as the following theorems demonstrate. It also has @{term "0\<^sub>L"} as a right annihilator. \ -lemma lens_comp_assoc: "(X ;\<^sub>L Y) ;\<^sub>L Z = X ;\<^sub>L (Y ;\<^sub>L Z)" +lemma lens_comp_assoc: "X ;\<^sub>L (Y ;\<^sub>L Z) = (X ;\<^sub>L Y) ;\<^sub>L Z" by (auto simp add: lens_comp_def) lemma lens_comp_left_id [simp]: "1\<^sub>L ;\<^sub>L X = X" by (simp add: id_lens_def lens_comp_def) lemma lens_comp_right_id [simp]: "X ;\<^sub>L 1\<^sub>L = X" by (simp add: id_lens_def lens_comp_def) lemma lens_comp_anhil [simp]: "wb_lens X \ 0\<^sub>L ;\<^sub>L X = 0\<^sub>L" by (simp add: zero_lens_def lens_comp_def comp_def) +lemma lens_comp_anhil_right [simp]: "wb_lens X \ X ;\<^sub>L 0\<^sub>L = 0\<^sub>L" + by (simp add: zero_lens_def lens_comp_def comp_def) + subsection \Independence Laws\ text \The zero lens @{term "0\<^sub>L"} is independent of any lens. This is because nothing can be observed or changed using @{term "0\<^sub>L"}. \ lemma zero_lens_indep [simp]: "0\<^sub>L \ X" by (auto simp add: zero_lens_def lens_indep_def) lemma zero_lens_indep' [simp]: "X \ 0\<^sub>L" by (auto simp add: zero_lens_def lens_indep_def) text \Lens independence is irreflexive, but only for effectual lenses as otherwise nothing can be observed.\ lemma lens_indep_quasi_irrefl: "\ wb_lens x; eff_lens x \ \ \ (x \ x)" by (auto simp add: lens_indep_def ief_lens_def ief_lens_axioms_def, metis (full_types) wb_lens.get_put) text \Lens independence is a congruence with respect to composition, as the following properties demonstrate.\ lemma lens_indep_left_comp [simp]: "\ mwb_lens z; x \ y \ \ (x ;\<^sub>L z) \ (y ;\<^sub>L z)" apply (rule lens_indepI) apply (auto simp add: lens_comp_def) apply (simp add: lens_indep_comm) apply (simp add: lens_indep_sym) done lemma lens_indep_right_comp: "y \ z \ (x ;\<^sub>L y) \ (x ;\<^sub>L z)" apply (auto intro!: lens_indepI simp add: lens_comp_def) using lens_indep_comm lens_indep_sym apply fastforce apply (simp add: lens_indep_sym) done lemma lens_indep_left_ext [intro]: "y \ z \ (x ;\<^sub>L y) \ z" apply (auto intro!: lens_indepI simp add: lens_comp_def) apply (simp add: lens_indep_comm) apply (simp add: lens_indep_sym) done lemma lens_indep_right_ext [intro]: "x \ z \ x \ (y ;\<^sub>L z)" by (simp add: lens_indep_left_ext lens_indep_sym) lemma lens_comp_indep_cong_left: "\ mwb_lens Z; X ;\<^sub>L Z \ Y ;\<^sub>L Z \ \ X \ Y" apply (rule lens_indepI) apply (rename_tac u v \) apply (drule_tac u=u and v=v and \="create\<^bsub>Z\<^esub> \" in lens_indep_comm) apply (simp add: lens_comp_def) apply (meson mwb_lens_weak weak_lens.view_determination) apply (rename_tac v \) apply (drule_tac v=v and \="create\<^bsub>Z\<^esub> \" in lens_indep_get) apply (simp add: lens_comp_def) apply (drule lens_indep_sym) apply (rename_tac u \) apply (drule_tac v=u and \="create\<^bsub>Z\<^esub> \" in lens_indep_get) apply (simp add: lens_comp_def) done lemma lens_comp_indep_cong: "mwb_lens Z \ (X ;\<^sub>L Z) \ (Y ;\<^sub>L Z) \ X \ Y" using lens_comp_indep_cong_left lens_indep_left_comp by blast text \The first and second lenses are independent since the view different parts of a product source.\ lemma fst_snd_lens_indep [simp]: "fst\<^sub>L \ snd\<^sub>L" by (simp add: lens_indep_def fst_lens_def snd_lens_def) lemma snd_fst_lens_indep [simp]: "snd\<^sub>L \ fst\<^sub>L" by (simp add: lens_indep_def fst_lens_def snd_lens_def) lemma split_prod_lens_indep: assumes "mwb_lens X" shows "(fst\<^sub>L ;\<^sub>L X) \ (snd\<^sub>L ;\<^sub>L X)" using assms fst_snd_lens_indep lens_indep_left_comp vwb_lens_mwb by blast text \Lens independence is preserved by summation.\ lemma plus_pres_lens_indep [simp]: "\ X \ Z; Y \ Z \ \ (X +\<^sub>L Y) \ Z" apply (rule lens_indepI) apply (simp_all add: lens_plus_def prod.case_eq_if) apply (simp add: lens_indep_comm) apply (simp add: lens_indep_sym) done lemma plus_pres_lens_indep' [simp]: "\ X \ Y; X \ Z \ \ X \ Y +\<^sub>L Z" by (auto intro: lens_indep_sym plus_pres_lens_indep) text \Lens independence is preserved by product.\ lemma lens_indep_prod: "\ X\<^sub>1 \ X\<^sub>2; Y\<^sub>1 \ Y\<^sub>2 \ \ X\<^sub>1 \\<^sub>L Y\<^sub>1 \ X\<^sub>2 \\<^sub>L Y\<^sub>2" apply (rule lens_indepI) apply (auto simp add: lens_prod_def prod.case_eq_if lens_indep_comm map_prod_def) apply (simp_all add: lens_indep_sym) -done + done + +subsection \ Compatibility Laws \ + +lemma zero_lens_compat [simp]: "0\<^sub>L ##\<^sub>L X" + by (auto simp add: zero_lens_def lens_override_def lens_compat_def) + +lemma id_lens_compat [simp]: "vwb_lens X \ 1\<^sub>L ##\<^sub>L X" + by (auto simp add: id_lens_def lens_override_def lens_compat_def) subsection \Algebraic Laws\ text \Lens plus distributes to the right through composition.\ lemma plus_lens_distr: "mwb_lens Z \ (X +\<^sub>L Y) ;\<^sub>L Z = (X ;\<^sub>L Z) +\<^sub>L (Y ;\<^sub>L Z)" by (auto simp add: lens_comp_def lens_plus_def comp_def) text \The first lens projects the first part of a summation.\ lemma fst_lens_plus: "wb_lens y \ fst\<^sub>L ;\<^sub>L (x +\<^sub>L y) = x" by (simp add: fst_lens_def lens_plus_def lens_comp_def comp_def) text \The second law requires independence as we have to apply x first, before y\ lemma snd_lens_plus: "\ wb_lens x; x \ y \ \ snd\<^sub>L ;\<^sub>L (x +\<^sub>L y) = y" apply (simp add: snd_lens_def lens_plus_def lens_comp_def comp_def) apply (subst lens_indep_comm) apply (simp_all) done text \The swap lens switches over a summation.\ lemma lens_plus_swap: "X \ Y \ swap\<^sub>L ;\<^sub>L (X +\<^sub>L Y) = (Y +\<^sub>L X)" by (auto simp add: lens_plus_def fst_lens_def snd_lens_def id_lens_def lens_comp_def lens_indep_comm) text \The first, second, and swap lenses are all closely related.\ lemma fst_snd_id_lens: "fst\<^sub>L +\<^sub>L snd\<^sub>L = 1\<^sub>L" by (auto simp add: lens_plus_def fst_lens_def snd_lens_def id_lens_def) lemma swap_lens_idem: "swap\<^sub>L ;\<^sub>L swap\<^sub>L = 1\<^sub>L" by (simp add: fst_snd_id_lens lens_indep_sym lens_plus_swap) lemma swap_lens_fst: "fst\<^sub>L ;\<^sub>L swap\<^sub>L = snd\<^sub>L" by (simp add: fst_lens_plus fst_vwb_lens) lemma swap_lens_snd: "snd\<^sub>L ;\<^sub>L swap\<^sub>L = fst\<^sub>L" by (simp add: lens_indep_sym snd_lens_plus snd_vwb_lens) text \The product lens can be rewritten as a sum lens.\ lemma prod_as_plus: "X \\<^sub>L Y = X ;\<^sub>L fst\<^sub>L +\<^sub>L Y ;\<^sub>L snd\<^sub>L" by (auto simp add: lens_prod_def fst_lens_def snd_lens_def lens_comp_def lens_plus_def) lemma prod_lens_id_equiv: "1\<^sub>L \\<^sub>L 1\<^sub>L = 1\<^sub>L" by (auto simp add: lens_prod_def id_lens_def) lemma prod_lens_comp_plus: "X\<^sub>2 \ Y\<^sub>2 \ ((X\<^sub>1 \\<^sub>L Y\<^sub>1) ;\<^sub>L (X\<^sub>2 +\<^sub>L Y\<^sub>2)) = (X\<^sub>1 ;\<^sub>L X\<^sub>2) +\<^sub>L (Y\<^sub>1 ;\<^sub>L Y\<^sub>2)" by (auto simp add: lens_comp_def lens_plus_def lens_prod_def prod.case_eq_if fun_eq_iff) text \The following laws about quotient are similar to their arithmetic analogues. Lens quotient reverse the effect of a composition.\ lemma lens_comp_quotient: "weak_lens Y \ (X ;\<^sub>L Y) /\<^sub>L Y = X" by (simp add: lens_quotient_def lens_comp_def) lemma lens_quotient_id: "weak_lens X \ (X /\<^sub>L X) = 1\<^sub>L" by (force simp add: lens_quotient_def id_lens_def) lemma lens_quotient_id_denom: "X /\<^sub>L 1\<^sub>L = X" by (simp add: lens_quotient_def id_lens_def lens_create_def) lemma lens_quotient_unit: "weak_lens X \ (0\<^sub>L /\<^sub>L X) = 0\<^sub>L" by (simp add: lens_quotient_def zero_lens_def) - +lemma lens_obs_eq_zero: "s\<^sub>1 \\<^bsub>0\<^sub>L\<^esub> s\<^sub>2 = (s\<^sub>1 = s\<^sub>2)" + by (simp add: lens_defs) + +lemma lens_obs_eq_one: "s\<^sub>1 \\<^bsub>1\<^sub>L\<^esub> s\<^sub>2" + by (simp add: lens_defs) + +lemma lens_obs_eq_as_override: "vwb_lens X \ s\<^sub>1 \\<^bsub>X\<^esub> s\<^sub>2 \ (s\<^sub>2 = s\<^sub>1 \\<^sub>L s\<^sub>2 on X)" + by (auto simp add: lens_defs; metis vwb_lens.put_eq) + end \ No newline at end of file diff --git a/thys/Optics/Lens_Instances.thy b/thys/Optics/Lens_Instances.thy --- a/thys/Optics/Lens_Instances.thy +++ b/thys/Optics/Lens_Instances.thy @@ -1,296 +1,345 @@ section \Lens Instances\ theory Lens_Instances - imports Lens_Order "HOL-Eisbach.Eisbach" + imports Lens_Order Lens_Symmetric "HOL-Eisbach.Eisbach" keywords "alphabet" :: "thy_defn" begin text \In this section we define a number of concrete instantiations of the lens locales, including functions lenses, list lenses, and record lenses.\ subsection \Function Lens\ text \A function lens views the valuation associated with a particular domain element @{typ "'a"}. We require that range type of a lens function has cardinality of at least 2; this ensures that properties of independence are provable.\ definition fun_lens :: "'a \ ('b::two \ ('a \ 'b))" where [lens_defs]: "fun_lens x = \ lens_get = (\ f. f x), lens_put = (\ f u. f(x := u)) \" -lemma fun_wb_lens: "wb_lens (fun_lens x)" +lemma fun_vwb_lens: "vwb_lens (fun_lens x)" by (unfold_locales, simp_all add: fun_lens_def) text \Two function lenses are independent if and only if the domain elements are different.\ lemma fun_lens_indep: "fun_lens x \ fun_lens y \ x \ y" proof - obtain u v :: "'a::two" where "u \ v" using two_diff by auto thus ?thesis by (auto simp add: fun_lens_def lens_indep_def) qed subsection \Function Range Lens\ text \The function range lens allows us to focus on a particular region of a function's range.\ definition fun_ran_lens :: "('c \ 'b) \ (('a \ 'b) \ '\) \ (('a \ 'c) \ '\)" where [lens_defs]: "fun_ran_lens X Y = \ lens_get = \ s. get\<^bsub>X\<^esub> \ get\<^bsub>Y\<^esub> s , lens_put = \ s v. put\<^bsub>Y\<^esub> s (\ x::'a. put\<^bsub>X\<^esub> (get\<^bsub>Y\<^esub> s x) (v x)) \" lemma fun_ran_mwb_lens: "\ mwb_lens X; mwb_lens Y \ \ mwb_lens (fun_ran_lens X Y)" by (unfold_locales, auto simp add: fun_ran_lens_def) lemma fun_ran_wb_lens: "\ wb_lens X; wb_lens Y \ \ wb_lens (fun_ran_lens X Y)" by (unfold_locales, auto simp add: fun_ran_lens_def) lemma fun_ran_vwb_lens: "\ vwb_lens X; vwb_lens Y \ \ vwb_lens (fun_ran_lens X Y)" by (unfold_locales, auto simp add: fun_ran_lens_def) subsection \Map Lens\ text \The map lens allows us to focus on a particular region of a partial function's range. It is only a mainly well-behaved lens because it does not satisfy the PutGet law when the view is not in the domain.\ definition map_lens :: "'a \ ('b \ ('a \ 'b))" where [lens_defs]: "map_lens x = \ lens_get = (\ f. the (f x)), lens_put = (\ f u. f(x \ u)) \" lemma map_mwb_lens: "mwb_lens (map_lens x)" by (unfold_locales, simp_all add: map_lens_def) lemma source_map_lens: "\\<^bsub>map_lens x\<^esub> = {f. x \ dom(f)}" by (force simp add: map_lens_def lens_source_def) subsection \List Lens\ text \The list lens allows us to view a particular element of a list. In order to show it is mainly well-behaved we need to define to additional list functions. The following function adds a number undefined elements to the end of a list.\ definition list_pad_out :: "'a list \ nat \ 'a list" where "list_pad_out xs k = xs @ replicate (k + 1 - length xs) undefined" text \The following function is like @{term "list_update"} but it adds additional elements to the list if the list isn't long enough first.\ definition list_augment :: "'a list \ nat \ 'a \ 'a list" where "list_augment xs k v = (list_pad_out xs k)[k := v]" text \The following function is like @{term nth} but it expressly returns @{term undefined} when the list isn't long enough.\ definition nth' :: "'a list \ nat \ 'a" where "nth' xs i = (if (length xs > i) then xs ! i else undefined)" text \We can prove some additional laws about list update and append.\ lemma list_update_append_lemma1: "i < length xs \ xs[i := v] @ ys = (xs @ ys)[i := v]" by (simp add: list_update_append) lemma list_update_append_lemma2: "i < length ys \ xs @ ys[i := v] = (xs @ ys)[i + length xs := v]" by (simp add: list_update_append) text \We can also prove some laws about our new operators.\ lemma nth'_0 [simp]: "nth' (x # xs) 0 = x" by (simp add: nth'_def) lemma nth'_Suc [simp]: "nth' (x # xs) (Suc n) = nth' xs n" by (simp add: nth'_def) lemma list_augment_0 [simp]: "list_augment (x # xs) 0 y = y # xs" by (simp add: list_augment_def list_pad_out_def) lemma list_augment_Suc [simp]: "list_augment (x # xs) (Suc n) y = x # list_augment xs n y" by (simp add: list_augment_def list_pad_out_def) - + lemma list_augment_twice: - "list_augment (list_augment xs i u) j v = (list_pad_out xs (max i j))[i := u, j := v]" + "list_augment (list_augment xs i u) j v = (list_pad_out xs (max i j))[i:=u, j:=v]" apply (auto simp add: list_augment_def list_pad_out_def list_update_append_lemma1 replicate_add[THEN sym] max_def) apply (metis Suc_le_mono add.commute diff_diff_add diff_le_mono le_add_diff_inverse2) done lemma list_augment_last [simp]: "list_augment (xs @ [y]) (length xs) z = xs @ [z]" by (induct xs, simp_all) lemma list_augment_idem [simp]: "i < length xs \ list_augment xs i (xs ! i) = xs" by (simp add: list_augment_def list_pad_out_def) text \We can now prove that @{term list_augment} is commutative for different (arbitrary) indices.\ lemma list_augment_commute: "i \ j \ list_augment (list_augment \ j v) i u = list_augment (list_augment \ i u) j v" by (simp add: list_augment_twice list_update_swap max.commute) text \We can also prove that we can always retrieve an element we have added to the list, since @{term list_augment} extends the list when necessary. This isn't true of @{term list_update}. \ lemma nth_list_augment: "list_augment xs k v ! k = v" by (simp add: list_augment_def list_pad_out_def) lemma nth'_list_augment: "nth' (list_augment xs k v) k = v" by (auto simp add: nth'_def nth_list_augment list_augment_def list_pad_out_def) text \ The length is expanded if not already long enough, or otherwise left as it is. \ lemma length_list_augment_1: "k \ length xs \ length (list_augment xs k v) = Suc k" by (simp add: list_augment_def list_pad_out_def) lemma length_list_augment_2: "k < length xs \ length (list_augment xs k v) = length xs" by (simp add: list_augment_def list_pad_out_def) text \We also have it that @{term list_augment} cancels itself.\ lemma list_augment_same_twice: "list_augment (list_augment xs k u) k v = list_augment xs k v" by (simp add: list_augment_def list_pad_out_def) lemma nth'_list_augment_diff: "i \ j \ nth' (list_augment \ i v) j = nth' \ j" by (auto simp add: list_augment_def list_pad_out_def nth_append nth'_def) text \Finally we can create the list lenses, of which there are three varieties. One that allows us to view an index, one that allows us to view the head, and one that allows us to view the tail. They are all mainly well-behaved lenses.\ definition list_lens :: "nat \ ('a::two \ 'a list)" where [lens_defs]: "list_lens i = \ lens_get = (\ xs. nth' xs i) , lens_put = (\ xs x. list_augment xs i x) \" -abbreviation "hd_lens \ list_lens 0" +abbreviation hd_lens ("hd\<^sub>L") where "hd_lens \ list_lens 0" -definition tl_lens :: "'a list \ 'a list" where +definition tl_lens :: "'a list \ 'a list" ("tl\<^sub>L") where [lens_defs]: "tl_lens = \ lens_get = (\ xs. tl xs) , lens_put = (\ xs xs'. hd xs # xs') \" lemma list_mwb_lens: "mwb_lens (list_lens x)" by (unfold_locales, simp_all add: list_lens_def nth'_list_augment list_augment_same_twice) text \ The set of constructible sources is precisely those where the length is greater than the given index. \ lemma source_list_lens: "\\<^bsub>list_lens i\<^esub> = {xs. length xs > i}" apply (auto simp add: lens_source_def list_lens_def) apply (metis length_list_augment_1 length_list_augment_2 lessI not_less) apply (metis list_augment_idem) done lemma tail_lens_mwb: - "mwb_lens tl_lens" + "mwb_lens tl\<^sub>L" by (unfold_locales, simp_all add: tl_lens_def) -lemma source_tail_lens: "\\<^bsub>tl_lens\<^esub> = {xs. xs \ []}" +lemma source_tail_lens: "\\<^bsub>tl\<^sub>L\<^esub> = {xs. xs \ []}" using list.exhaust_sel by (auto simp add: tl_lens_def lens_source_def) text \Independence of list lenses follows when the two indices are different.\ lemma list_lens_indep: "i \ j \ list_lens i \ list_lens j" by (simp add: list_lens_def lens_indep_def list_augment_commute nth'_list_augment_diff) lemma hd_tl_lens_indep [simp]: - "hd_lens \ tl_lens" + "hd\<^sub>L \ tl\<^sub>L" apply (rule lens_indepI) apply (simp_all add: list_lens_def tl_lens_def) apply (metis hd_conv_nth hd_def length_greater_0_conv list.case(1) nth'_def nth'_list_augment) apply (metis (full_types) hd_conv_nth hd_def length_greater_0_conv list.case(1) nth'_def) - apply (metis Nitpick.size_list_simp(2) One_nat_def add_Suc_right append.simps(1) append_Nil2 diff_Suc_Suc diff_zero hd_Cons_tl list.inject list.size(4) list_augment_0 list_augment_def list_augment_same_twice list_pad_out_def nth_list_augment replicate.simps(1) replicate.simps(2) tl_Nil) + apply (metis One_nat_def diff_Suc_Suc diff_zero length_0_conv length_list_augment_1 length_tl linorder_not_less list.exhaust list.sel(2) list.sel(3) list_augment_0 not_less_zero) done +lemma hd_tl_lens_pbij: "pbij_lens (hd\<^sub>L +\<^sub>L tl\<^sub>L)" + by (unfold_locales, auto simp add: lens_defs) + subsection \Record Field Lenses\ text \We also add support for record lenses. Every record created can yield a lens for each field. These cannot be created generically and thus must be defined case by case as new records are created. We thus create a new Isabelle outer syntax command \textbf{alphabet} which enables this. We first create syntax that allows us to obtain a lens from a given field using the internal record syntax translations.\ abbreviation (input) "fld_put f \ (\ \ u. f (\_. u) \)" -syntax "_FLDLENS" :: "id \ ('a \ 'r)" ("FLDLENS _") -translations "FLDLENS x" => "\ lens_get = x, lens_put = CONST fld_put (_update_name x) \" +syntax + "_FLDLENS" :: "id \ logic" ("FLDLENS _") +translations + "FLDLENS x" => "\ lens_get = x, lens_put = CONST fld_put (_update_name x) \" + +text \ We also allow the extraction of the "base lens", which characterises all the fields added + by a record without the extension. \ + +syntax + "_BASELENS" :: "id \ logic" ("BASELENS _") + +abbreviation (input) "base_lens t e m \ \ lens_get = t, lens_put = \ s v. e v (m s) \" + +ML \ + fun baselens_tr [Free (name, _)] = + let + val extend = Free (name ^ ".extend", dummyT); + val truncate = Free (name ^ ".truncate", dummyT); + val more = Free (name ^ ".more", dummyT); + in + Const (@{const_syntax "base_lens"}, dummyT) $ truncate $ extend $ more + end + | baselens_tr _ = raise Match; +\ + +parse_translation \[(@{syntax_const "_BASELENS"}, K baselens_tr)]\ text \We also introduce the \textbf{alphabet} command that creates a record with lenses for each field. For each field a lens is created together with a proof that it is very well-behaved, and for each pair of lenses an independence theorem is generated. Alphabets can also be extended which yields sublens proofs between the extension field lens and record extension lenses. \ ML_file \Lens_Record.ML\ text \The following theorem attribute stores splitting theorems for alphabet types which which is useful for proof automation.\ named_theorems alpha_splits +subsection \Type Definition Lens\ + +text \ Every type defined by a \<^bold>\typedef\ command induces a partial bijective lens constructed + using the abstraction and representation functions. \ + +context type_definition +begin + +definition typedef_lens :: "'b \ 'a" ("typedef\<^sub>L") where +[lens_defs]: "typedef\<^sub>L = \ lens_get = Abs, lens_put = (\ s. Rep) \" + +lemma pbij_typedef_lens [simp]: "pbij_lens typedef\<^sub>L" + by (unfold_locales, simp_all add: lens_defs Rep_inverse) + +lemma source_typedef_lens: "\\<^bsub>typedef\<^sub>L\<^esub> = A" + using Rep_cases by (auto simp add: lens_source_def lens_defs Rep) + +lemma bij_typedef_lens_UNIV: "A = UNIV \ bij_lens typedef\<^sub>L" + by (auto intro: pbij_vwb_is_bij_lens simp add: mwb_UNIV_src_is_vwb_lens source_typedef_lens) + +end + subsection \Mapper Lenses\ definition lmap_lens :: "(('\ \ '\) \ ('\ \ '\)) \ (('\ \ '\) \ '\ \ '\) \ ('\ \ '\) \ ('\ \ '\) \ ('\ \ '\)" where [lens_defs]: "lmap_lens f g h l = \ lens_get = f (get\<^bsub>l\<^esub>), lens_put = g o (put\<^bsub>l\<^esub>) o h \" text \ The parse translation below yields a heterogeneous mapping lens for any record type. This achieved through the utility function above that constructs a functorial lens. This takes as input a heterogeneous mapping function that lifts a function on a record's extension type to an update on the entire record, and also the record's ``more'' function. The first input is given twice as it has different polymorphic types, being effectively a type functor construction which are not explicitly supported by HOL. We note that the \more_update\ function does something similar to the extension lifting, but is not precisely suitable here since it only considers homogeneous functions, namely of type \'a \ 'a\ rather than \'a \ 'b\. \ syntax "_lmap" :: "id \ logic" ("lmap[_]") ML \ fun lmap_tr [Free (name, _)] = let val extend = Free (name ^ ".extend", dummyT); val truncate = Free (name ^ ".truncate", dummyT); val more = Free (name ^ ".more", dummyT); val map_ext = Abs ("f", dummyT, Abs ("r", dummyT, extend $ (truncate $ Bound 0) $ (Bound 1 $ (more $ (Bound 0))))) in Const (@{const_syntax "lmap_lens"}, dummyT) $ map_ext $ map_ext $ more end | lmap_tr _ = raise Match; \ parse_translation \[(@{syntax_const "_lmap"}, K lmap_tr)]\ subsection \Lens Interpretation\ named_theorems lens_interp_laws locale lens_interp = interp begin declare meta_interp_law [lens_interp_laws] declare all_interp_law [lens_interp_laws] declare exists_interp_law [lens_interp_laws] end subsection \ Tactic \ text \ A simple tactic for simplifying lens expressions \ declare split_paired_all [alpha_splits] method lens_simp = (simp add: alpha_splits lens_defs prod.case_eq_if) -end \ No newline at end of file +end diff --git a/thys/Optics/Lens_Laws.thy b/thys/Optics/Lens_Laws.thy --- a/thys/Optics/Lens_Laws.thy +++ b/thys/Optics/Lens_Laws.thy @@ -1,317 +1,461 @@ section \Core Lens Laws\ theory Lens_Laws imports Two Interp begin subsection \Lens Signature\ text \This theory introduces the signature of lenses and indentifies the core algebraic hierarchy of lens classes, including laws for well-behaved, very well-behaved, and bijective lenses~\cite{Foster07,Fischer2015,Gibbons17}.\ record ('a, 'b) lens = lens_get :: "'b \ 'a" ("get\") lens_put :: "'b \ 'a \ 'b" ("put\") type_notation lens (infixr "\" 0) +text \ Alternative parameters ordering, inspired by Back and von Wright's refinement + calculus~\cite{Back1998}, which similarly uses two functions to characterise updates to variables. \ + +abbreviation (input) lens_set :: "('a \ 'b) \ 'a \ 'b \ 'b" ("lset\") where +"lens_set \ (\ X v s. put\<^bsub>X\<^esub> s v)" + text \ \begin{figure} \begin{center} \includegraphics[width=6cm]{figures/Lens} \end{center} \vspace{-5ex} \caption{Visualisation of a simple lens} \label{fig:Lens} \end{figure} A lens $X : \view \lto \src$, for source type $\src$ and view type $\view$, identifies $\view$ with a subregion of $\src$~\cite{Foster07,Foster09}, as illustrated in Figure~\ref{fig:Lens}. The arrow denotes $X$ and the hatched area denotes the subregion $\view$ it characterises. Transformations on $\view$ can be performed without affecting the parts of $\src$ outside the hatched area. The lens signature consists of a pair of functions $\lget_X : \src \Rightarrow \view$ that extracts a view from a source, and $\lput_X : \src \Rightarrow \view \Rightarrow \src$ that updates a view within a given source. \ named_theorems lens_defs -text \ \lens_source\ gives the set of constructible sources; that is those that can be built +text \ @{text lens_source} gives the set of constructible sources; that is those that can be built by putting a value into an arbitrary source. \ definition lens_source :: "('a \ 'b) \ 'b set" ("\\") where "lens_source X = {s. \ v s'. s = put\<^bsub>X\<^esub> s' v}" abbreviation some_source :: "('a \ 'b) \ 'b" ("src\") where "some_source X \ (SOME s. s \ \\<^bsub>X\<^esub>)" definition lens_create :: "('a \ 'b) \ 'a \ 'b" ("create\") where [lens_defs]: "create\<^bsub>X\<^esub> v = put\<^bsub>X\<^esub> (src\<^bsub>X\<^esub>) v" text \ Function $\lcreate_X~v$ creates an instance of the source type of $X$ by injecting $v$ as the view, and leaving the remaining context arbitrary. \ + +definition lens_update :: "('a \ 'b) \ ('a \ 'a) \ ('b \ 'b)" ("update\") where +[lens_defs]: "lens_update X f \ = put\<^bsub>X\<^esub> \ (f (get\<^bsub>X\<^esub> \))" + +text \ The update function is analogous to the record update function which lifts a function + on a view type to one on the source type. \ + +definition lens_obs_eq :: "('b \ 'a) \ 'a \ 'a \ bool" (infix "\\" 50) where +[lens_defs]: "s\<^sub>1 \\<^bsub>X\<^esub> s\<^sub>2 = (s\<^sub>1 = put\<^bsub>X\<^esub> s\<^sub>2 (get\<^bsub>X\<^esub> s\<^sub>1))" + +text \ This relation states that two sources are equivalent outside of the region characterised + by lens $X$. \ + +definition lens_override :: "('b \ 'a) \ 'a \ 'a \ 'a" (infixl "\\" 95) where +[lens_defs]: "S\<^sub>1 \\<^bsub>X\<^esub> S\<^sub>2 = put\<^bsub>X\<^esub> S\<^sub>1 (get\<^bsub>X\<^esub> S\<^sub>2)" + +abbreviation (input) lens_override' :: "'a \ 'a \ ('b \ 'a) \ 'a" ("_ \\<^sub>L _ on _" [95,0,96] 95) where +"S\<^sub>1 \\<^sub>L S\<^sub>2 on X \ S\<^sub>1 \\<^bsub>X\<^esub> S\<^sub>2" + +text \Lens override uses a lens to replace part of a source type with a given value for the + corresponding view.\ subsection \Weak Lenses\ text \ Weak lenses are the least constrained class of lenses in our algebraic hierarchy. They simply require that the PutGet law~\cite{Foster09,Fischer2015} is satisfied, meaning that $\lget$ is the inverse of $\lput$. \ locale weak_lens = fixes x :: "'a \ 'b" (structure) assumes put_get: "get (put \ v) = v" begin lemma source_nonempty: "\ s. s \ \" by (auto simp add: lens_source_def) lemma put_closure: "put \ v \ \" by (auto simp add: lens_source_def) lemma create_closure: "create v \ \" by (simp add: lens_create_def put_closure) lemma src_source [simp]: "src \ \" using some_in_eq source_nonempty by auto lemma create_get: "get (create v) = v" by (simp add: lens_create_def put_get) lemma create_inj: "inj create" by (metis create_get injI) - text \ The update function is analogous to the record update function which lifts a function - on a view type to one on the source type. \ - - definition update :: "('a \ 'a) \ ('b \ 'b)" where - [lens_defs]: "update f \ = put \ (f (get \))" - lemma get_update: "get (update f \) = f (get \)" - by (simp add: put_get update_def) + by (simp add: put_get lens_update_def) lemma view_determination: assumes "put \ u = put \ v" shows "u = v" by (metis assms put_get) lemma put_inj: "inj (put \)" by (simp add: injI view_determination) + end declare weak_lens.put_get [simp] declare weak_lens.create_get [simp] subsection \Well-behaved Lenses\ text \ Well-behaved lenses add to weak lenses that requirement that the GetPut law~\cite{Foster09,Fischer2015} is satisfied, meaning that $\lput$ is the inverse of $\lget$. \ locale wb_lens = weak_lens + assumes get_put: "put \ (get \) = \" begin lemma put_twice: "put (put \ v) v = put \ v" by (metis get_put put_get) lemma put_surjectivity: "\ \ v. put \ v = \" using get_put by blast lemma source_stability: "\ v. put \ v = \" using get_put by auto lemma source_UNIV [simp]: "\ = UNIV" by (metis UNIV_eq_I put_closure wb_lens.source_stability wb_lens_axioms) end declare wb_lens.get_put [simp] lemma wb_lens_weak [simp]: "wb_lens x \ weak_lens x" by (simp add: wb_lens_def) subsection \ Mainly Well-behaved Lenses \ text \ Mainly well-behaved lenses extend weak lenses with the PutPut law that shows how one put override a previous one. \ locale mwb_lens = weak_lens + assumes put_put: "put (put \ v) u = put \ u" begin lemma update_comp: "update f (update g \) = update (f \ g) \" - by (simp add: put_get put_put update_def) + by (simp add: put_get put_put lens_update_def) text \ Mainly well-behaved lenses give rise to a weakened version of the $get-put$ law, where the source must be within the set of constructible sources. \ lemma weak_get_put: "\ \ \ \ put \ (get \) = \" by (auto simp add: lens_source_def put_get put_put) lemma weak_source_determination: assumes "\ \ \" "\ \ \" "get \ = get \" "put \ v = put \ v" shows "\ = \" by (metis assms put_put weak_get_put) lemma weak_put_eq: assumes "\ \ \" "get \ = k" "put \ u = put \ v" shows "put \ k = \" by (metis assms put_put weak_get_put) text \ Provides $s$ is constructible, then @{term get} can be uniquely determined from @{term put} \ lemma weak_get_via_put: "s \ \ \ get s = (THE v. put s v = s)" by (rule sym, auto intro!: the_equality weak_get_put, metis put_get) end +abbreviation (input) "partial_lens \ mwb_lens" + declare mwb_lens.put_put [simp] declare mwb_lens.weak_get_put [simp] lemma mwb_lens_weak [simp]: "mwb_lens x \ weak_lens x" by (simp add: mwb_lens.axioms(1)) subsection \Very Well-behaved Lenses\ -text \Very well-behaved lenses combine all three laws, as in the literature~\cite{Foster09,Fischer2015}.\ +text \Very well-behaved lenses combine all three laws, as in the literature~\cite{Foster09,Fischer2015}. + The same set of axioms can be found in Back and von Wright's refinement calculus~\cite{Back1998}, + though with different names for the functions. \ locale vwb_lens = wb_lens + mwb_lens begin lemma source_determination: assumes "get \ = get \" "put \ v = put \ v" shows "\ = \" by (metis assms get_put put_put) lemma put_eq: assumes "get \ = k" "put \ u = put \ v" shows "put \ k = \" using assms weak_put_eq[of \ k u \ v] by (simp) text \ @{term get} can be uniquely determined from @{term put} \ lemma get_via_put: "get s = (THE v. put s v = s)" by (simp add: weak_get_via_put) + lemma get_surj: "surj get" + by (metis put_get surjI) + + text \ Observation equivalence is an equivalence relation. \ + + lemma lens_obs_equiv: "equivp (\)" + proof (rule equivpI) + show "reflp (\)" + by (rule reflpI, simp add: lens_obs_eq_def get_put) + show "symp (\)" + by (rule sympI, simp add: lens_obs_eq_def, metis get_put put_put) + show "transp (\)" + by (rule transpI, simp add: lens_obs_eq_def, metis put_put) + qed + end +abbreviation (input) "total_lens \ vwb_lens" + lemma vwb_lens_wb [simp]: "vwb_lens x \ wb_lens x" by (simp add: vwb_lens_def) lemma vwb_lens_mwb [simp]: "vwb_lens x \ mwb_lens x" using vwb_lens_def by auto +lemma mwb_UNIV_src_is_vwb_lens: + "\ mwb_lens X; \\<^bsub>X\<^esub> = UNIV \ \ vwb_lens X" + using vwb_lens_def wb_lens_axioms_def wb_lens_def by fastforce + +text \ Alternative characterisation: a very well-behaved (i.e. total) lens is a mainly well-behaved + (i.e. partial) lens whose source is the universe set. \ + +lemma vwb_lens_iff_mwb_UNIV_src: + "vwb_lens X \ (mwb_lens X \ \\<^bsub>X\<^esub> = UNIV)" + by (meson mwb_UNIV_src_is_vwb_lens vwb_lens_def wb_lens.source_UNIV) + subsection \ Ineffectual Lenses \ text \Ineffectual lenses can have no effect on the view type -- application of the $\lput$ function always yields the same source. They are thus, trivially, very well-behaved lenses.\ locale ief_lens = weak_lens + assumes put_inef: "put \ v = \" begin sublocale vwb_lens proof fix \ v u show "put \ (get \) = \" by (simp add: put_inef) show "put (put \ v) u = put \ u" by (simp add: put_inef) qed lemma ineffectual_const_get: "\ v. \ \\\. get \ = v" using put_get put_inef by auto end abbreviation "eff_lens X \ (weak_lens X \ (\ ief_lens X))" +subsection \ Partially Bijective Lenses \ + +locale pbij_lens = weak_lens + + assumes put_det: "put \ v = put \ v" +begin + + sublocale mwb_lens + proof + fix \ v u + show "put (put \ v) u = put \ u" + using put_det by blast + qed + + lemma put_is_create: "put \ v = create v" + by (simp add: lens_create_def put_det) + + lemma partial_get_put: "\ \ \ \ put \ (get \) = \" + by (metis put_det weak_get_put) + +end + +lemma pbij_lens_weak [simp]: + "pbij_lens x \ weak_lens x" + by (simp_all add: pbij_lens_def) + +lemma pbij_lens_mwb [simp]: "pbij_lens x \ mwb_lens x" + by (simp add: mwb_lens_axioms.intro mwb_lens_def pbij_lens.put_is_create) + +lemma pbij_alt_intro: + "\ weak_lens X; \ s. s \ \\<^bsub>X\<^esub> \ create\<^bsub>X\<^esub> (get\<^bsub>X\<^esub> s) = s \ \ pbij_lens X" + by (metis pbij_lens_axioms_def pbij_lens_def weak_lens.put_closure weak_lens.put_get) + subsection \ Bijective Lenses \ text \Bijective lenses characterise the situation where the source and view type are equivalent: in other words the view type full characterises the whole source type. It is often useful when the view type and source type are syntactically different, but nevertheless correspond precisely in terms of what they observe. Bijective lenses are formulates using the strong GetPut law~\cite{Foster09,Fischer2015}.\ locale bij_lens = weak_lens + assumes strong_get_put: "put \ (get \) = \" begin +sublocale pbij_lens +proof + fix \ v \ + show "put \ v = put \ v" + by (metis put_get strong_get_put) +qed + sublocale vwb_lens proof fix \ v u show "put \ (get \) = \" by (simp add: strong_get_put) - show "put (put \ v) u = put \ u" - by (metis bij_lens.strong_get_put bij_lens_axioms put_get) qed - + lemma put_bij: "bij_betw (put \) UNIV UNIV" by (metis bijI put_inj strong_get_put surj_def) - lemma put_is_create: "\ \ \ \ put \ v = create v" - by (metis create_get strong_get_put) - - lemma get_create: "\ \ \ \ create (get \) = \" + lemma get_create: "create (get \) = \" by (simp add: lens_create_def strong_get_put) end declare bij_lens.strong_get_put [simp] declare bij_lens.get_create [simp] lemma bij_lens_weak [simp]: "bij_lens x \ weak_lens x" by (simp_all add: bij_lens_def) +lemma bij_lens_pbij [simp]: + "bij_lens x \ pbij_lens x" + by (metis bij_lens.get_create bij_lens_def pbij_lens_axioms.intro pbij_lens_def weak_lens.put_get) + lemma bij_lens_vwb [simp]: "bij_lens x \ vwb_lens x" by (metis bij_lens.strong_get_put bij_lens_weak mwb_lens.intro mwb_lens_axioms.intro vwb_lens_def wb_lens.intro wb_lens_axioms.intro weak_lens.put_get) +text \ Alternative characterisation: a bijective lens is a partial bijective lens that is also + very well-behaved (i.e. total). \ + +lemma pbij_vwb_is_bij_lens: + "\ pbij_lens X; vwb_lens X \ \ bij_lens X" + by (unfold_locales, simp_all, meson pbij_lens.put_det vwb_lens.put_eq) + +lemma bij_lens_iff_pbij_vwb: + "bij_lens X \ (pbij_lens X \ vwb_lens X)" + using pbij_vwb_is_bij_lens by auto + subsection \Lens Independence\ text \ \begin{figure} \begin{center} \includegraphics[width=6cm]{figures/Independence} \end{center} \vspace{-5ex} \caption{Lens Independence} \label{fig:Indep} \end{figure} Lens independence shows when two lenses $X$ and $Y$ characterise disjoint regions of the source type, as illustrated in Figure~\ref{fig:Indep}. We specify this by requiring that the $\lput$ functions of the two lenses commute, and that the $\lget$ function of each lens is unaffected by application of $\lput$ from the corresponding lens. \ locale lens_indep = fixes X :: "'a \ 'c" and Y :: "'b \ 'c" assumes lens_put_comm: "put\<^bsub>X\<^esub> (put\<^bsub>Y\<^esub> \ v) u = put\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> \ u) v" and lens_put_irr1: "get\<^bsub>X\<^esub> (put\<^bsub>Y\<^esub> \ v) = get\<^bsub>X\<^esub> \" and lens_put_irr2: "get\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> \ u) = get\<^bsub>Y\<^esub> \" notation lens_indep (infix "\" 50) lemma lens_indepI: - "\ \ u v \. lens_put x (lens_put y \ v) u = lens_put y (lens_put x \ u) v; - \ v \. lens_get x (lens_put y \ v) = lens_get x \; - \ u \. lens_get y (lens_put x \ u) = lens_get y \ \ \ x \ y" + "\ \ u v \. put\<^bsub>x\<^esub> (put\<^bsub>y\<^esub> \ v) u = put\<^bsub>y\<^esub> (put\<^bsub>x\<^esub> \ u) v; + \ v \. get\<^bsub>x\<^esub> (put\<^bsub>y\<^esub> \ v) = get\<^bsub>x\<^esub> \; + \ u \. get\<^bsub>y\<^esub> (put\<^bsub>x\<^esub> \ u) = get\<^bsub>y\<^esub> \ \ \ x \ y" by (simp add: lens_indep_def) text \Lens independence is symmetric.\ lemma lens_indep_sym: "x \ y \ y \ x" by (simp add: lens_indep_def) lemma lens_indep_comm: - "x \ y \ lens_put x (lens_put y \ v) u = lens_put y (lens_put x \ u) v" + "x \ y \ put\<^bsub>x\<^esub> (put\<^bsub>y\<^esub> \ v) u = put\<^bsub>y\<^esub> (put\<^bsub>x\<^esub> \ u) v" by (simp add: lens_indep_def) lemma lens_indep_get [simp]: assumes "x \ y" - shows "lens_get x (lens_put y \ v) = lens_get x \" + shows "get\<^bsub>x\<^esub> (put\<^bsub>y\<^esub> \ v) = get\<^bsub>x\<^esub> \" using assms lens_indep_def by fastforce +text \ Characterisation of independence for two very well-behaved lenses \ + +lemma lens_indep_vwb_iff: + assumes "vwb_lens x" "vwb_lens y" + shows "x \ y \ (\ u v \. put\<^bsub>x\<^esub> (put\<^bsub>y\<^esub> \ v) u = put\<^bsub>y\<^esub> (put\<^bsub>x\<^esub> \ u) v)" +proof + assume "x \ y" + thus "\ u v \. put\<^bsub>x\<^esub> (put\<^bsub>y\<^esub> \ v) u = put\<^bsub>y\<^esub> (put\<^bsub>x\<^esub> \ u) v" + by (simp add: lens_indep_comm) +next + assume a: "\ u v \. put\<^bsub>x\<^esub> (put\<^bsub>y\<^esub> \ v) u = put\<^bsub>y\<^esub> (put\<^bsub>x\<^esub> \ u) v" + show "x \ y" + proof (unfold_locales) + fix \ v u + from a show "put\<^bsub>x\<^esub> (put\<^bsub>y\<^esub> \ v) u = put\<^bsub>y\<^esub> (put\<^bsub>x\<^esub> \ u) v" + by auto + show "get\<^bsub>x\<^esub> (put\<^bsub>y\<^esub> \ v) = get\<^bsub>x\<^esub> \" + by (metis a assms(1) vwb_lens.put_eq vwb_lens_wb wb_lens_def weak_lens.put_get) + show "get\<^bsub>y\<^esub> (put\<^bsub>x\<^esub> \ u) = get\<^bsub>y\<^esub> \" + by (metis a assms(2) vwb_lens.put_eq vwb_lens_wb wb_lens_def weak_lens.put_get) + qed +qed + +subsection \ Lens Compatibility \ + +text \ Lens compatibility is a weaker notion than independence. It allows that two lenses can overlap + so long as they manipulate the source in the same way in that region. It is most easily defined + in terms of a function for copying a region from one source to another using a lens. \ + +definition lens_compat (infix "##\<^sub>L" 50) where +[lens_defs]: "lens_compat X Y = (\s\<^sub>1 s\<^sub>2. s\<^sub>1 \\<^bsub>X\<^esub> s\<^sub>2 \\<^bsub>Y\<^esub> s\<^sub>2 = s\<^sub>1 \\<^bsub>Y\<^esub> s\<^sub>2 \\<^bsub>X\<^esub> s\<^sub>2)" + +lemma lens_compat_idem [simp]: "x ##\<^sub>L x" + by (simp add: lens_defs) + +lemma lens_compat_sym: "x ##\<^sub>L y \ y ##\<^sub>L x" + by (simp add: lens_defs) + +lemma lens_indep_compat [simp]: "x \ y \ x ##\<^sub>L y" + by (simp add: lens_override_def lens_compat_def lens_indep_comm) + end \ No newline at end of file diff --git a/thys/Optics/Lens_Order.thy b/thys/Optics/Lens_Order.thy --- a/thys/Optics/Lens_Order.thy +++ b/thys/Optics/Lens_Order.thy @@ -1,582 +1,634 @@ section \Order and Equivalence on Lenses\ theory Lens_Order imports Lens_Algebra begin subsection \Sub-lens Relation\ text \A lens $X$ is a sub-lens of $Y$ if there is a well-behaved lens $Z$ such that $X = Z \lcomp Y$, or in other words if $X$ can be expressed purely in terms of $Y$.\ definition sublens :: "('a \ 'c) \ ('b \ 'c) \ bool" (infix "\\<^sub>L" 55) where [lens_defs]: "sublens X Y = (\ Z :: ('a, 'b) lens. vwb_lens Z \ X = Z ;\<^sub>L Y)" text \Various lens classes are downward closed under the sublens relation.\ lemma sublens_pres_mwb: "\ mwb_lens Y; X \\<^sub>L Y \ \ mwb_lens X" by (unfold_locales, auto simp add: sublens_def lens_comp_def) lemma sublens_pres_wb: "\ wb_lens Y; X \\<^sub>L Y \ \ wb_lens X" by (unfold_locales, auto simp add: sublens_def lens_comp_def) lemma sublens_pres_vwb: "\ vwb_lens Y; X \\<^sub>L Y \ \ vwb_lens X" by (unfold_locales, auto simp add: sublens_def lens_comp_def) text \Sublens is a preorder as the following two theorems show.\ lemma sublens_refl: "X \\<^sub>L X" using id_vwb_lens sublens_def by fastforce lemma sublens_trans [trans]: "\ X \\<^sub>L Y; Y \\<^sub>L Z \ \ X \\<^sub>L Z" apply (auto simp add: sublens_def lens_comp_assoc) apply (rename_tac Z\<^sub>1 Z\<^sub>2) apply (rule_tac x="Z\<^sub>1 ;\<^sub>L Z\<^sub>2" in exI) apply (simp add: lens_comp_assoc) using comp_vwb_lens apply blast done -text \Sublens has a least element -- \0\<^sub>L\ -- and a greatest element -- \1\<^sub>L\. +text \Sublens has a least element -- @{text "0\<^sub>L"} -- and a greatest element -- @{text "1\<^sub>L"}. Intuitively, this shows that sublens orders how large a portion of the source type a particular - lens views, with \0\<^sub>L\ observing the least, and \1\<^sub>L\ observing the most.\ + lens views, with @{text "0\<^sub>L"} observing the least, and @{text "1\<^sub>L"} observing the most.\ lemma sublens_least: "wb_lens X \ 0\<^sub>L \\<^sub>L X" using sublens_def unit_vwb_lens by fastforce lemma sublens_greatest: "vwb_lens X \ X \\<^sub>L 1\<^sub>L" by (simp add: sublens_def) text \If $Y$ is a sublens of $X$ then any put using $X$ will necessarily erase any put using $Y$. Similarly, any two source types are observationally equivalent by $Y$ when performed following a put using $X$.\ lemma sublens_put_put: "\ mwb_lens X; Y \\<^sub>L X \ \ put\<^bsub>X\<^esub> (put\<^bsub>Y\<^esub> \ v) u = put\<^bsub>X\<^esub> \ u" by (auto simp add: sublens_def lens_comp_def) lemma sublens_obs_get: "\ mwb_lens X; Y \\<^sub>L X \ \ get\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> \ v) = get\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> \ v)" by (auto simp add: sublens_def lens_comp_def) text \Sublens preserves independence; in other words if $Y$ is independent of $Z$, then also any $X$ smaller than $Y$ is independent of $Z$.\ lemma sublens_pres_indep: "\ X \\<^sub>L Y; Y \ Z \ \ X \ Z" apply (auto intro!:lens_indepI simp add: sublens_def lens_comp_def lens_indep_comm) apply (simp add: lens_indep_sym) done +lemma sublens_pres_indep': + "\ X \\<^sub>L Y; Z \ Y \ \ Z \ X" + by (meson lens_indep_sym sublens_pres_indep) + +lemma sublens_compat: "\ vwb_lens X; vwb_lens Y; X \\<^sub>L Y \ \ X ##\<^sub>L Y" + unfolding lens_compat_def lens_override_def + by (metis (no_types, hide_lams) sublens_obs_get sublens_put_put vwb_lens_mwb vwb_lens_wb wb_lens.get_put) + text \Well-behavedness of lens quotient has sublens as a proviso. This is because we can only remove $X$ from $Y$ if $X$ is smaller than $Y$. \ lemma lens_quotient_mwb: "\ mwb_lens Y; X \\<^sub>L Y \ \ mwb_lens (X /\<^sub>L Y)" by (unfold_locales, auto simp add: lens_quotient_def lens_create_def sublens_def lens_comp_def comp_def) subsection \Lens Equivalence\ text \Using our preorder, we can also derive an equivalence on lenses as follows. It should be noted that this equality, like sublens, is heterogeneously typed -- it can compare lenses whose view types are different, so long as the source types are the same. We show that it is reflexive, symmetric, and transitive. \ definition lens_equiv :: "('a \ 'c) \ ('b \ 'c) \ bool" (infix "\\<^sub>L" 51) where [lens_defs]: "lens_equiv X Y = (X \\<^sub>L Y \ Y \\<^sub>L X)" lemma lens_equivI [intro]: "\ X \\<^sub>L Y; Y \\<^sub>L X \ \ X \\<^sub>L Y" by (simp add: lens_equiv_def) lemma lens_equiv_refl: "X \\<^sub>L X" by (simp add: lens_equiv_def sublens_refl) lemma lens_equiv_sym: "X \\<^sub>L Y \ Y \\<^sub>L X" by (simp add: lens_equiv_def) lemma lens_equiv_trans [trans]: "\ X \\<^sub>L Y; Y \\<^sub>L Z \ \ X \\<^sub>L Z" by (auto intro: sublens_trans simp add: lens_equiv_def) +lemma lens_equiv_pres_indep: + "\ X \\<^sub>L Y; Y \ Z \ \ X \ Z" + using lens_equiv_def sublens_pres_indep by blast + +lemma lens_equiv_pres_indep': + "\ X \\<^sub>L Y; Z \ Y \ \ Z \ X" + using lens_equiv_def sublens_pres_indep' by blast + +lemma lens_comp_cong_1: "X \\<^sub>L Y \ X ;\<^sub>L Z \\<^sub>L Y ;\<^sub>L Z" + unfolding lens_equiv_def + by (metis (no_types, lifting) lens_comp_assoc sublens_def) + subsection \Further Algebraic Laws\ text \This law explains the behaviour of lens quotient.\ lemma lens_quotient_comp: "\ weak_lens Y; X \\<^sub>L Y \ \ (X /\<^sub>L Y) ;\<^sub>L Y = X" by (auto simp add: lens_quotient_def lens_comp_def comp_def sublens_def) text \Plus distributes through quotient.\ lemma lens_quotient_plus: "\ mwb_lens Z; X \\<^sub>L Z; Y \\<^sub>L Z \ \ (X +\<^sub>L Y) /\<^sub>L Z = (X /\<^sub>L Z) +\<^sub>L (Y /\<^sub>L Z)" apply (auto simp add: lens_quotient_def lens_plus_def sublens_def lens_comp_def comp_def) apply (rule ext) apply (rule ext) apply (simp add: prod.case_eq_if) done text \There follows a number of laws relating sublens and summation. Firstly, sublens is preserved by summation. \ lemma plus_pred_sublens: "\ mwb_lens Z; X \\<^sub>L Z; Y \\<^sub>L Z; X \ Y \ \ (X +\<^sub>L Y) \\<^sub>L Z" apply (auto simp add: sublens_def) apply (rename_tac Z\<^sub>1 Z\<^sub>2) apply (rule_tac x="Z\<^sub>1 +\<^sub>L Z\<^sub>2" in exI) apply (auto intro!: plus_wb_lens) apply (simp add: lens_comp_indep_cong_left plus_vwb_lens) apply (simp add: plus_lens_distr) done text \Intuitively, lens plus is associative. However we cannot prove this using HOL equality due to monomorphic typing of this operator. But since sublens and lens equivalence are both heterogeneous we can now prove this in the following three lemmas.\ lemma lens_plus_sub_assoc_1: "X +\<^sub>L Y +\<^sub>L Z \\<^sub>L (X +\<^sub>L Y) +\<^sub>L Z" apply (simp add: sublens_def) apply (rule_tac x="(fst\<^sub>L ;\<^sub>L fst\<^sub>L) +\<^sub>L (snd\<^sub>L ;\<^sub>L fst\<^sub>L) +\<^sub>L snd\<^sub>L" in exI) apply (auto) apply (rule plus_vwb_lens) apply (simp add: comp_vwb_lens fst_vwb_lens) apply (rule plus_vwb_lens) apply (simp add: comp_vwb_lens fst_vwb_lens snd_vwb_lens) apply (simp add: snd_vwb_lens) apply (simp add: lens_indep_left_ext) apply (rule lens_indep_sym) apply (rule plus_pres_lens_indep) using fst_snd_lens_indep fst_vwb_lens lens_indep_left_comp lens_indep_sym vwb_lens_mwb apply blast using fst_snd_lens_indep lens_indep_left_ext lens_indep_sym apply blast apply (auto simp add: lens_plus_def lens_comp_def fst_lens_def snd_lens_def prod.case_eq_if split_beta')[1] done lemma lens_plus_sub_assoc_2: "(X +\<^sub>L Y) +\<^sub>L Z \\<^sub>L X +\<^sub>L Y +\<^sub>L Z" apply (simp add: sublens_def) apply (rule_tac x="(fst\<^sub>L +\<^sub>L (fst\<^sub>L ;\<^sub>L snd\<^sub>L)) +\<^sub>L (snd\<^sub>L ;\<^sub>L snd\<^sub>L)" in exI) apply (auto) apply (rule plus_vwb_lens) apply (rule plus_vwb_lens) apply (simp add: fst_vwb_lens) apply (simp add: comp_vwb_lens fst_vwb_lens snd_vwb_lens) apply (rule lens_indep_sym) apply (rule lens_indep_left_ext) using fst_snd_lens_indep lens_indep_sym apply blast apply (auto intro: comp_vwb_lens simp add: snd_vwb_lens) apply (rule plus_pres_lens_indep) apply (simp add: lens_indep_left_ext lens_indep_sym) apply (simp add: snd_vwb_lens) apply (auto simp add: lens_plus_def lens_comp_def fst_lens_def snd_lens_def prod.case_eq_if split_beta')[1] done lemma lens_plus_assoc: "(X +\<^sub>L Y) +\<^sub>L Z \\<^sub>L X +\<^sub>L Y +\<^sub>L Z" by (simp add: lens_equivI lens_plus_sub_assoc_1 lens_plus_sub_assoc_2) text \We can similarly show that it is commutative.\ lemma lens_plus_sub_comm: "X \ Y \ X +\<^sub>L Y \\<^sub>L Y +\<^sub>L X" apply (simp add: sublens_def) apply (rule_tac x="snd\<^sub>L +\<^sub>L fst\<^sub>L" in exI) apply (auto) apply (simp add: fst_vwb_lens lens_indep_sym plus_vwb_lens snd_vwb_lens) apply (simp add: lens_indep_sym lens_plus_swap) done lemma lens_plus_comm: "X \ Y \ X +\<^sub>L Y \\<^sub>L Y +\<^sub>L X" by (simp add: lens_equivI lens_indep_sym lens_plus_sub_comm) + + text \Any composite lens is larger than an element of the lens, as demonstrated by the following four laws.\ lemma lens_plus_ub: "wb_lens Y \ X \\<^sub>L X +\<^sub>L Y" by (metis fst_lens_plus fst_vwb_lens sublens_def) lemma lens_plus_right_sublens: "\ vwb_lens Y; Y \ Z; X \\<^sub>L Z \ \ X \\<^sub>L Y +\<^sub>L Z" apply (auto simp add: sublens_def) apply (rename_tac Z') apply (rule_tac x="Z' ;\<^sub>L snd\<^sub>L" in exI) apply (auto) - using comp_vwb_lens snd_vwb_lens apply blast - apply (simp add: lens_comp_assoc snd_lens_plus) -done + using comp_vwb_lens snd_vwb_lens apply blast + apply (metis lens_comp_assoc snd_lens_plus vwb_lens_def) + done lemma lens_plus_mono_left: "\ Y \ Z; X \\<^sub>L Y \ \ X +\<^sub>L Z \\<^sub>L Y +\<^sub>L Z" apply (auto simp add: sublens_def) apply (rename_tac Z') apply (rule_tac x="Z' \\<^sub>L 1\<^sub>L" in exI) apply (subst prod_lens_comp_plus) apply (simp_all) using id_vwb_lens prod_vwb_lens apply blast -done + done lemma lens_plus_mono_right: "\ X \ Z; Y \\<^sub>L Z \ \ X +\<^sub>L Y \\<^sub>L X +\<^sub>L Z" by (metis prod_lens_comp_plus prod_vwb_lens sublens_def sublens_refl) text \If we compose a lens $X$ with lens $Y$ then naturally the resulting lens must be smaller than $Y$, as $X$ views a part of $Y$. \ lemma lens_comp_lb [simp]: "vwb_lens X \ X ;\<^sub>L Y \\<^sub>L Y" by (auto simp add: sublens_def) -text \We can now also show that \0\<^sub>L\ is the unit of lens plus\ +lemma sublens_comp [simp]: + assumes "vwb_lens b" "c \\<^sub>L a" + shows "(b ;\<^sub>L c) \\<^sub>L a" + by (metis assms sublens_def sublens_trans) + +text \We can now also show that @{text "0\<^sub>L"} is the unit of lens plus\ lemma lens_unit_plus_sublens_1: "X \\<^sub>L 0\<^sub>L +\<^sub>L X" by (metis lens_comp_lb snd_lens_plus snd_vwb_lens zero_lens_indep unit_wb_lens) lemma lens_unit_prod_sublens_2: "0\<^sub>L +\<^sub>L X \\<^sub>L X" apply (auto simp add: sublens_def) apply (rule_tac x="0\<^sub>L +\<^sub>L 1\<^sub>L" in exI) apply (auto) - apply (rule plus_vwb_lens) - apply (simp_all) apply (auto simp add: lens_plus_def zero_lens_def lens_comp_def id_lens_def prod.case_eq_if comp_def) apply (rule ext) apply (rule ext) apply (auto) done lemma lens_plus_left_unit: "0\<^sub>L +\<^sub>L X \\<^sub>L X" by (simp add: lens_equivI lens_unit_plus_sublens_1 lens_unit_prod_sublens_2) lemma lens_plus_right_unit: "X +\<^sub>L 0\<^sub>L \\<^sub>L X" using lens_equiv_trans lens_indep_sym lens_plus_comm lens_plus_left_unit zero_lens_indep by blast text \We can also show that both sublens and equivalence are congruences with respect to lens plus and lens product.\ lemma lens_plus_subcong: "\ Y\<^sub>1 \ Y\<^sub>2; X\<^sub>1 \\<^sub>L Y\<^sub>1; X\<^sub>2 \\<^sub>L Y\<^sub>2 \ \ X\<^sub>1 +\<^sub>L X\<^sub>2 \\<^sub>L Y\<^sub>1 +\<^sub>L Y\<^sub>2" by (metis prod_lens_comp_plus prod_vwb_lens sublens_def) lemma lens_plus_eq_left: "\ X \ Z; X \\<^sub>L Y \ \ X +\<^sub>L Z \\<^sub>L Y +\<^sub>L Z" by (meson lens_equiv_def lens_plus_mono_left sublens_pres_indep) lemma lens_plus_eq_right: "\ X \ Y; Y \\<^sub>L Z \ \ X +\<^sub>L Y \\<^sub>L X +\<^sub>L Z" by (meson lens_equiv_def lens_indep_sym lens_plus_mono_right sublens_pres_indep) lemma lens_plus_cong: assumes "X\<^sub>1 \ X\<^sub>2" "X\<^sub>1 \\<^sub>L Y\<^sub>1" "X\<^sub>2 \\<^sub>L Y\<^sub>2" shows "X\<^sub>1 +\<^sub>L X\<^sub>2 \\<^sub>L Y\<^sub>1 +\<^sub>L Y\<^sub>2" proof - have "X\<^sub>1 +\<^sub>L X\<^sub>2 \\<^sub>L Y\<^sub>1 +\<^sub>L X\<^sub>2" by (simp add: assms(1) assms(2) lens_plus_eq_left) moreover have "... \\<^sub>L Y\<^sub>1 +\<^sub>L Y\<^sub>2" using assms(1) assms(2) assms(3) lens_equiv_def lens_plus_eq_right sublens_pres_indep by blast ultimately show ?thesis using lens_equiv_trans by blast qed lemma prod_lens_sublens_cong: "\ X\<^sub>1 \\<^sub>L X\<^sub>2; Y\<^sub>1 \\<^sub>L Y\<^sub>2 \ \ (X\<^sub>1 \\<^sub>L Y\<^sub>1) \\<^sub>L (X\<^sub>2 \\<^sub>L Y\<^sub>2)" apply (auto simp add: sublens_def) apply (rename_tac Z\<^sub>1 Z\<^sub>2) apply (rule_tac x="Z\<^sub>1 \\<^sub>L Z\<^sub>2" in exI) apply (auto) using prod_vwb_lens apply blast apply (auto simp add: lens_prod_def lens_comp_def prod.case_eq_if) apply (rule ext, rule ext) apply (auto simp add: lens_prod_def lens_comp_def prod.case_eq_if) done lemma prod_lens_equiv_cong: "\ X\<^sub>1 \\<^sub>L X\<^sub>2; Y\<^sub>1 \\<^sub>L Y\<^sub>2 \ \ (X\<^sub>1 \\<^sub>L Y\<^sub>1) \\<^sub>L (X\<^sub>2 \\<^sub>L Y\<^sub>2)" by (simp add: lens_equiv_def prod_lens_sublens_cong) text \We also have the following "exchange" law that allows us to switch over a lens product and plus.\ lemma lens_plus_prod_exchange: "(X\<^sub>1 +\<^sub>L X\<^sub>2) \\<^sub>L (Y\<^sub>1 +\<^sub>L Y\<^sub>2) \\<^sub>L (X\<^sub>1 \\<^sub>L Y\<^sub>1) +\<^sub>L (X\<^sub>2 \\<^sub>L Y\<^sub>2)" proof (rule lens_equivI) show "(X\<^sub>1 +\<^sub>L X\<^sub>2) \\<^sub>L (Y\<^sub>1 +\<^sub>L Y\<^sub>2) \\<^sub>L (X\<^sub>1 \\<^sub>L Y\<^sub>1) +\<^sub>L (X\<^sub>2 \\<^sub>L Y\<^sub>2)" apply (simp add: sublens_def) apply (rule_tac x="((fst\<^sub>L ;\<^sub>L fst\<^sub>L) +\<^sub>L (fst\<^sub>L ;\<^sub>L snd\<^sub>L)) +\<^sub>L ((snd\<^sub>L ;\<^sub>L fst\<^sub>L) +\<^sub>L (snd\<^sub>L ;\<^sub>L snd\<^sub>L))" in exI) apply (auto) apply (auto intro!: plus_vwb_lens comp_vwb_lens fst_vwb_lens snd_vwb_lens lens_indep_right_comp) apply (auto intro!: lens_indepI simp add: lens_comp_def lens_plus_def fst_lens_def snd_lens_def) apply (auto simp add: lens_prod_def lens_plus_def lens_comp_def fst_lens_def snd_lens_def prod.case_eq_if comp_def)[1] apply (rule ext, rule ext, auto simp add: prod.case_eq_if) done show "(X\<^sub>1 \\<^sub>L Y\<^sub>1) +\<^sub>L (X\<^sub>2 \\<^sub>L Y\<^sub>2) \\<^sub>L (X\<^sub>1 +\<^sub>L X\<^sub>2) \\<^sub>L (Y\<^sub>1 +\<^sub>L Y\<^sub>2)" apply (simp add: sublens_def) apply (rule_tac x="((fst\<^sub>L ;\<^sub>L fst\<^sub>L) +\<^sub>L (fst\<^sub>L ;\<^sub>L snd\<^sub>L)) +\<^sub>L ((snd\<^sub>L ;\<^sub>L fst\<^sub>L) +\<^sub>L (snd\<^sub>L ;\<^sub>L snd\<^sub>L))" in exI) apply (auto) apply (auto intro!: plus_vwb_lens comp_vwb_lens fst_vwb_lens snd_vwb_lens lens_indep_right_comp) apply (auto intro!: lens_indepI simp add: lens_comp_def lens_plus_def fst_lens_def snd_lens_def) apply (auto simp add: lens_prod_def lens_plus_def lens_comp_def fst_lens_def snd_lens_def prod.case_eq_if comp_def)[1] apply (rule ext, rule ext, auto simp add: lens_prod_def prod.case_eq_if) done qed lemma lens_get_put_quasi_commute: "\ vwb_lens Y; X \\<^sub>L Y \ \ get\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> s v) = put\<^bsub>X /\<^sub>L Y\<^esub> (get\<^bsub>Y\<^esub> s) v" proof - assume a1: "vwb_lens Y" assume a2: "X \\<^sub>L Y" have "\l la. put\<^bsub>l ;\<^sub>L la\<^esub> = (\b c. put\<^bsub>la\<^esub> (b::'b) (put\<^bsub>l\<^esub> (get\<^bsub>la\<^esub> b::'a) (c::'c)))" by (simp add: lens_comp_def) then have "\l la b c. get\<^bsub>l\<^esub> (put\<^bsub>la ;\<^sub>L l\<^esub> (b::'b) (c::'c)) = put\<^bsub>la\<^esub> (get\<^bsub>l\<^esub> b::'a) c \ \ weak_lens l" by force then show ?thesis using a2 a1 by (metis lens_quotient_comp vwb_lens_wb wb_lens_def) qed lemma lens_put_of_quotient: "\ vwb_lens Y; X \\<^sub>L Y \ \ put\<^bsub>Y\<^esub> s (put\<^bsub>X /\<^sub>L Y\<^esub> v\<^sub>2 v\<^sub>1) = put\<^bsub>X\<^esub> (put\<^bsub>Y\<^esub> s v\<^sub>2) v\<^sub>1" proof - assume a1: "vwb_lens Y" assume a2: "X \\<^sub>L Y" have f3: "\l b. put\<^bsub>l\<^esub> (b::'b) (get\<^bsub>l\<^esub> b::'a) = b \ \ vwb_lens l" by force have f4: "\b c. put\<^bsub>X /\<^sub>L Y\<^esub> (get\<^bsub>Y\<^esub> b) c = get\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> b c)" using a2 a1 by (simp add: lens_get_put_quasi_commute) have "\b c a. put\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> b c) a = put\<^bsub>Y\<^esub> b a" using a2 a1 by (simp add: sublens_put_put) then show ?thesis using f4 f3 a1 by (metis mwb_lens.put_put mwb_lens_def vwb_lens_mwb weak_lens.put_get) qed subsection \Bijective Lens Equivalences\ text \A bijective lens, like a bijective function, is its own inverse. Thus, if we compose its inverse with itself we get @{term "1\<^sub>L"}.\ lemma bij_lens_inv_left: "bij_lens X \ inv\<^sub>L X ;\<^sub>L X = 1\<^sub>L" by (auto simp add: lens_inv_def lens_comp_def lens_create_def comp_def id_lens_def, rule ext, auto) lemma bij_lens_inv_right: "bij_lens X \ X ;\<^sub>L inv\<^sub>L X = 1\<^sub>L" by (auto simp add: lens_inv_def lens_comp_def comp_def id_lens_def, rule ext, auto) text \The following important results shows that bijective lenses are precisely those that are equivalent to identity. In other words, a bijective lens views all of the source type.\ lemma bij_lens_equiv_id: "bij_lens X \ X \\<^sub>L 1\<^sub>L" apply (auto) apply (rule lens_equivI) apply (simp_all add: sublens_def) apply (rule_tac x="lens_inv X" in exI) apply (simp add: bij_lens_inv_left lens_inv_bij) apply (auto simp add: lens_equiv_def sublens_def id_lens_def lens_comp_def comp_def) apply (unfold_locales) apply (simp) apply (simp) apply (metis (no_types, lifting) vwb_lens_wb wb_lens_weak weak_lens.put_get) done text \For this reason, by transitivity, any two bijective lenses with the same source type must be equivalent.\ lemma bij_lens_equiv: "\ bij_lens X; X \\<^sub>L Y \ \ bij_lens Y" by (meson bij_lens_equiv_id lens_equiv_def sublens_trans) +lemma bij_lens_cong: + "X \\<^sub>L Y \ bij_lens X = bij_lens Y" + by (meson bij_lens_equiv lens_equiv_sym) + text \We can also show that the identity lens @{term "1\<^sub>L"} is unique. That is to say it is the only lens which when compose with $Y$ will yield $Y$.\ lemma lens_id_unique: "weak_lens Y \ Y = X ;\<^sub>L Y \ X = 1\<^sub>L" apply (cases Y) apply (cases X) apply (auto simp add: lens_comp_def comp_def id_lens_def fun_eq_iff) apply (metis select_convs(1) weak_lens.create_get) apply (metis select_convs(1) select_convs(2) weak_lens.put_get) done -text \Consequently, if composition of two lenses $X$ and $Y$ yields \1\<^sub>L\, then both +text \Consequently, if composition of two lenses $X$ and $Y$ yields @{text "1\<^sub>L"}, then both of the composed lenses must be bijective.\ lemma bij_lens_via_comp_id_left: "\ wb_lens X; wb_lens Y; X ;\<^sub>L Y = 1\<^sub>L \ \ bij_lens X" apply (cases Y) apply (cases X) apply (auto simp add: lens_comp_def comp_def id_lens_def fun_eq_iff) apply (unfold_locales) apply (simp_all) using vwb_lens_wb wb_lens_weak weak_lens.put_get apply fastforce apply (metis select_convs(1) select_convs(2) wb_lens_weak weak_lens.put_get) done lemma bij_lens_via_comp_id_right: "\ wb_lens X; wb_lens Y; X ;\<^sub>L Y = 1\<^sub>L \ \ bij_lens Y" apply (cases Y) apply (cases X) apply (auto simp add: lens_comp_def comp_def id_lens_def fun_eq_iff) apply (unfold_locales) apply (simp_all) using vwb_lens_wb wb_lens_weak weak_lens.put_get apply fastforce apply (metis select_convs(1) select_convs(2) wb_lens_weak weak_lens.put_get) done text \Importantly, an equivalence between two lenses can be demonstrated by showing that one lens can be converted to the other by application of a suitable bijective lens $Z$. This $Z$ lens converts the view type of one to the view type of the other.\ lemma lens_equiv_via_bij: assumes "bij_lens Z" "X = Z ;\<^sub>L Y" shows "X \\<^sub>L Y" using assms apply (auto simp add: lens_equiv_def sublens_def) using bij_lens_vwb apply blast apply (rule_tac x="lens_inv Z" in exI) apply (auto simp add: lens_comp_assoc bij_lens_inv_left) using bij_lens_vwb lens_inv_bij apply blast - apply (simp add: bij_lens_inv_left lens_comp_assoc[THEN sym]) done text \Indeed, we actually have a stronger result than this -- the equivalent lenses are precisely those than can be converted to one another through a suitable bijective lens. Bijective lenses can thus be seen as a special class of "adapter" lens.\ lemma lens_equiv_iff_bij: assumes "weak_lens Y" shows "X \\<^sub>L Y \ (\ Z. bij_lens Z \ X = Z ;\<^sub>L Y)" apply (rule iffI) apply (auto simp add: lens_equiv_def sublens_def lens_id_unique)[1] apply (rename_tac Z\<^sub>1 Z\<^sub>2) apply (rule_tac x="Z\<^sub>1" in exI) apply (simp) apply (subgoal_tac "Z\<^sub>2 ;\<^sub>L Z\<^sub>1 = 1\<^sub>L") apply (meson bij_lens_via_comp_id_right vwb_lens_wb) apply (metis assms lens_comp_assoc lens_id_unique) using lens_equiv_via_bij apply blast done +lemma pbij_plus_commute: + "\ a \ b; mwb_lens a; mwb_lens b; pbij_lens (b +\<^sub>L a) \ \ pbij_lens (a +\<^sub>L b)" + apply (unfold_locales, simp_all add:lens_defs lens_indep_sym prod.case_eq_if) + using lens_indep.lens_put_comm pbij_lens.put_det apply fastforce + done + subsection \Lens Override Laws\ text \The following laws are analogus to the equivalent laws for functions.\ lemma lens_override_id [simp]: "S\<^sub>1 \\<^sub>L S\<^sub>2 on 1\<^sub>L = S\<^sub>2" by (simp add: lens_override_def id_lens_def) lemma lens_override_unit [simp]: "S\<^sub>1 \\<^sub>L S\<^sub>2 on 0\<^sub>L = S\<^sub>1" by (simp add: lens_override_def zero_lens_def) lemma lens_override_overshadow: assumes "mwb_lens Y" "X \\<^sub>L Y" shows "(S\<^sub>1 \\<^sub>L S\<^sub>2 on X) \\<^sub>L S\<^sub>3 on Y = S\<^sub>1 \\<^sub>L S\<^sub>3 on Y" using assms by (simp add: lens_override_def sublens_put_put) +lemma lens_override_irr: + assumes "X \ Y" + shows "S\<^sub>1 \\<^sub>L (S\<^sub>2 \\<^sub>L S\<^sub>3 on Y) on X = S\<^sub>1 \\<^sub>L S\<^sub>2 on X" + using assms by (simp add: lens_override_def) + lemma lens_override_overshadow_left: assumes "mwb_lens X" shows "(S\<^sub>1 \\<^sub>L S\<^sub>2 on X) \\<^sub>L S\<^sub>3 on X = S\<^sub>1 \\<^sub>L S\<^sub>3 on X" by (simp add: assms lens_override_def) lemma lens_override_overshadow_right: assumes "mwb_lens X" shows "S\<^sub>1 \\<^sub>L (S\<^sub>2 \\<^sub>L S\<^sub>3 on X) on X = S\<^sub>1 \\<^sub>L S\<^sub>3 on X" by (simp add: assms lens_override_def) lemma lens_override_plus: "X \ Y \ S\<^sub>1 \\<^sub>L S\<^sub>2 on (X +\<^sub>L Y) = (S\<^sub>1 \\<^sub>L S\<^sub>2 on X) \\<^sub>L S\<^sub>2 on Y" by (simp add: lens_indep_comm lens_override_def lens_plus_def) lemma lens_override_idem [simp]: "vwb_lens X \ S \\<^sub>L S on X = S" by (simp add: lens_override_def) lemma lens_override_mwb_idem [simp]: "\ mwb_lens X; S \ \\<^bsub>X\<^esub> \ \ S \\<^sub>L S on X = S" by (simp add: lens_override_def) lemma lens_override_put_right_in: "\ vwb_lens A; X \\<^sub>L A \ \ S\<^sub>1 \\<^sub>L (put\<^bsub>X\<^esub> S\<^sub>2 v) on A = put\<^bsub>X\<^esub> (S\<^sub>1 \\<^sub>L S\<^sub>2 on A) v" by (simp add: lens_override_def lens_get_put_quasi_commute lens_put_of_quotient) lemma lens_override_put_right_out: "\ vwb_lens A; X \ A \ \ S\<^sub>1 \\<^sub>L (put\<^bsub>X\<^esub> S\<^sub>2 v) on A = (S\<^sub>1 \\<^sub>L S\<^sub>2 on A)" by (simp add: lens_override_def lens_indep.lens_put_irr2) lemma lens_indep_overrideI: assumes "vwb_lens X" "vwb_lens Y" "(\ s\<^sub>1 s\<^sub>2 s\<^sub>3. s\<^sub>1 \\<^sub>L s\<^sub>2 on X \\<^sub>L s\<^sub>3 on Y = s\<^sub>1 \\<^sub>L s\<^sub>3 on Y \\<^sub>L s\<^sub>2 on X)" shows "X \ Y" using assms apply (unfold_locales) apply (simp_all add: lens_override_def) apply (metis mwb_lens_def vwb_lens_mwb weak_lens.put_get) apply (metis lens_override_def lens_override_idem mwb_lens_def vwb_lens_mwb weak_lens.put_get) apply (metis mwb_lens_weak vwb_lens_mwb vwb_lens_wb wb_lens.get_put weak_lens.put_get) done lemma lens_indep_override_def: assumes "vwb_lens X" "vwb_lens Y" shows "X \ Y \ (\ s\<^sub>1 s\<^sub>2 s\<^sub>3. s\<^sub>1 \\<^sub>L s\<^sub>2 on X \\<^sub>L s\<^sub>3 on Y = s\<^sub>1 \\<^sub>L s\<^sub>3 on Y \\<^sub>L s\<^sub>2 on X)" by (metis assms(1) assms(2) lens_indep_comm lens_indep_overrideI lens_override_def) text \ Alternative characterisation of very-well behaved lenses: override is idempotent. \ lemma override_idem_implies_vwb: "\ mwb_lens X; \ s. s \\<^sub>L s on X = s \ \ vwb_lens X" by (unfold_locales, simp_all add: lens_defs) subsection \ Alternative Sublens Characterisation \ text \ The following definition is equivalent to the above when the two lenses are very well behaved. \ definition sublens' :: "('a \ 'c) \ ('b \ 'c) \ bool" (infix "\\<^sub>L''" 55) where [lens_defs]: "sublens' X Y = (\ s\<^sub>1 s\<^sub>2 s\<^sub>3. s\<^sub>1 \\<^sub>L s\<^sub>2 on Y \\<^sub>L s\<^sub>3 on X = s\<^sub>1 \\<^sub>L s\<^sub>2 \\<^sub>L s\<^sub>3 on X on Y)" text \ We next prove some characteristic properties of our alternative definition of sublens. \ lemma sublens'_prop1: assumes "vwb_lens X" "X \\<^sub>L' Y" shows "put\<^bsub>X\<^esub> (put\<^bsub>Y\<^esub> s\<^sub>1 (get\<^bsub>Y\<^esub> s\<^sub>2)) s\<^sub>3 = put\<^bsub>Y\<^esub> s\<^sub>1 (get\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> s\<^sub>2 s\<^sub>3))" using assms by (simp add: sublens'_def, metis lens_override_def mwb_lens_def vwb_lens_mwb weak_lens.put_get) lemma sublens'_prop2: assumes "vwb_lens X" "X \\<^sub>L' Y" shows "get\<^bsub>X\<^esub> (put\<^bsub>Y\<^esub> s\<^sub>1 (get\<^bsub>Y\<^esub> s\<^sub>2)) = get\<^bsub>X\<^esub> s\<^sub>2" using assms unfolding sublens'_def by (metis lens_override_def vwb_lens_wb wb_lens_axioms_def wb_lens_def weak_lens.put_get) lemma sublens'_prop3: assumes "vwb_lens X" "vwb_lens Y" "X \\<^sub>L' Y" shows "put\<^bsub>Y\<^esub> \ (get\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> (put\<^bsub>Y\<^esub> \ (get\<^bsub>Y\<^esub> \)) v)) = put\<^bsub>X\<^esub> \ v" by (metis assms(1) assms(2) assms(3) mwb_lens_def sublens'_prop1 vwb_lens.put_eq vwb_lens_mwb weak_lens.put_get) text \ Finally we show our two definitions of sublens are equivalent, assuming very well behaved lenses. \ lemma sublens'_implies_sublens: assumes "vwb_lens X" "vwb_lens Y" "X \\<^sub>L' Y" shows "X \\<^sub>L Y" proof - have "vwb_lens (X /\<^sub>L Y)" by (unfold_locales ,auto simp add: assms lens_quotient_def lens_comp_def lens_create_def sublens'_prop1 sublens'_prop2) moreover have "X = X /\<^sub>L Y ;\<^sub>L Y" proof - have "get\<^bsub>X\<^esub> = (\\. get\<^bsub>X\<^esub> (create\<^bsub>Y\<^esub> \)) \ get\<^bsub>Y\<^esub>" by (rule ext, simp add: assms(1) assms(3) lens_create_def sublens'_prop2) moreover have "put\<^bsub>X\<^esub> = (\\ v. put\<^bsub>Y\<^esub> \ (get\<^bsub>Y\<^esub> (put\<^bsub>X\<^esub> (create\<^bsub>Y\<^esub> (get\<^bsub>Y\<^esub> \)) v)))" by (rule ext, rule ext, simp add: assms(1) assms(2) assms(3) lens_create_def sublens'_prop3) ultimately show ?thesis by (simp add: lens_quotient_def lens_comp_def) qed ultimately show ?thesis using sublens_def by blast qed lemma sublens_implies_sublens': assumes "vwb_lens Y" "X \\<^sub>L Y" shows "X \\<^sub>L' Y" by (metis assms lens_override_def lens_override_put_right_in sublens'_def) lemma sublens_iff_sublens': assumes "vwb_lens X" "vwb_lens Y" shows "X \\<^sub>L Y \ X \\<^sub>L' Y" using assms sublens'_implies_sublens sublens_implies_sublens' by blast +subsection \ Alternative Equivalence Characterisation \ + +definition lens_equiv' :: "('a \ 'c) \ ('b \ 'c) \ bool" (infix "\\<^sub>L''" 51) where +[lens_defs]: "lens_equiv' X Y = (\ s\<^sub>1 s\<^sub>2. (s\<^sub>1 \\<^sub>L s\<^sub>2 on X = s\<^sub>1 \\<^sub>L s\<^sub>2 on Y))" + +lemma lens_equiv_iff_lens_equiv': + assumes "vwb_lens X" "vwb_lens Y" + shows "X \\<^sub>L Y \ X \\<^sub>L' Y" + apply (simp add: lens_equiv_def sublens_iff_sublens' assms) + apply (auto simp add: lens_defs assms) + apply (metis assms(2) mwb_lens.put_put vwb_lens_mwb vwb_lens_wb wb_lens.get_put) + done + end \ No newline at end of file diff --git a/thys/Optics/Lens_Record.ML b/thys/Optics/Lens_Record.ML --- a/thys/Optics/Lens_Record.ML +++ b/thys/Optics/Lens_Record.ML @@ -1,254 +1,341 @@ signature LENS_UTILS = sig val add_alphabet_cmd : {overloaded: bool} -> (string * string option) list * binding -> string option -> (binding * string * mixfix) list -> theory -> theory end; structure Lens_Utils : LENS_UTILS = struct (* We set up the following syntactic entities that correspond to various parts of Isabelle syntax and names that we depend on. These code would need to be updated if the names of the Isabelle and lens theories and/or theorems change. *) val FLDLENS = "FLDLENS" -val child_lens_suffix = "_child_lens" +val BASELENS = "BASELENS" +val base_lensN = "base\<^sub>L" +val child_lensN = "more\<^sub>L" +val all_lensN = "all\<^sub>L" +val base_moreN = "base_more" +val bij_lens_suffix = "_bij_lens" val vwb_lens_suffix = "_vwb_lens" -val indeps_suffix = "_indeps" -val pl_indeps_suffix = "_pl_indeps" -val sublens_suffix = "_sublenses" -val Trueprop = "HOL.Trueprop" -val imp = "Pure.imp" -val lens_laws_thy = "Lens_Laws" -val lens_algebra_thy = "Lens_Algebra" -val sublens_thy = "Lens_Order" -val vwb_lens = lens_laws_thy ^ ".vwb_lens" -val lens_indep = lens_laws_thy ^ ".lens_indep" -val lens_comp = lens_algebra_thy ^ ".lens_comp" -val sublens = sublens_thy ^ ".sublens" +val sym_lens_suffix = "_sym_lens" +val indepsN = "indeps" +val sublensN = "sublenses" +val quotientN = "quotients" +val compositionN = "compositions" +val Trueprop = @{const_name Trueprop} +val HOLeq = @{const_name HOL.eq} +val bij_lens = @{const_name bij_lens} +val vwb_lens = @{const_name vwb_lens} +val sym_lens = @{const_name sym_lens} +val lens_indep = @{const_name lens_indep} +val lens_plus = @{const_name lens_plus} +val lens_quotient = @{const_name lens_quotient} +val lens_comp = @{const_name lens_comp} +val id_lens = @{const_name id_lens} +val sublens = @{const_name sublens} +val lens_equiv = @{const_name lens_equiv} val lens_suffix = "\<^sub>v" -val ext_suffix = "\<^sub>a" -val lens_defs = (Binding.empty, [Token.make_src ("lens_defs", Position.none) []]) -val alpha_splits = [Token.make_src ("alpha_splits", Position.none) []] +val lens_defsN = "lens_defs" +val lens_defs = (Binding.empty, [Token.make_src (lens_defsN, Position.none) []]) +val alpha_splitsN = "alpha_splits" +val alpha_splits = [Token.make_src (alpha_splitsN, Position.none) []] +val equivN = "equivs" val splits_suffix = ".splits" +val defs_suffix = ".defs" +val prod_case_eq_ifN = "prod.case_eq_if" +val slens_view = "view" +val slens_coview = "coview" (* The following code is adapted from the record package. We generate a record, but also create lenses for each field and prove properties about them. *) fun read_parent NONE ctxt = (NONE, ctxt) | read_parent (SOME raw_T) ctxt = (case Proof_Context.read_typ_abbrev ctxt raw_T of Type (name, Ts) => (SOME (Ts, name), fold Variable.declare_typ Ts ctxt) | T => error ("Bad parent record specification: " ^ Syntax.string_of_typ ctxt T)); fun read_fields raw_fields ctxt = let val Ts = Syntax.read_typs ctxt (map (fn (_, raw_T, _) => raw_T) raw_fields); val fields = map2 (fn (x, _, mx) => fn T => (x, T, mx)) raw_fields Ts; val ctxt' = fold Variable.declare_typ Ts ctxt; in (fields, ctxt') end; fun add_record_cmd overloaded (raw_params, binding) raw_parent raw_fields thy = let val ctxt = Proof_Context.init_global thy; val params = map (apsnd (Typedecl.read_constraint ctxt)) raw_params; val ctxt1 = fold (Variable.declare_typ o TFree) params ctxt; val (parent, ctxt2) = read_parent raw_parent ctxt1; val (fields, ctxt3) = read_fields raw_fields ctxt2; val params' = map (Proof_Context.check_tfree ctxt3) params; in thy |> Record.add_record overloaded (params', binding) parent fields end; (* Construct a theorem and proof that a given field lens is very well-behaved *) -fun lens_proof x thy = +fun lens_proof tname x thy = + let open Simplifier; open Global_Theory in Goal.prove_global thy [] [] (hd (Type_Infer_Context.infer_types (Proof_Context.init_global thy) [ Const (Trueprop, dummyT) - $ (Const (vwb_lens, dummyT) $ Const (Context.theory_name thy ^ "." ^ x, dummyT))])) + $ (Const (vwb_lens, dummyT) $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ x, dummyT))])) (fn {context = context, prems = _} - => EVERY [ Locale.intro_locales_tac {strict = true, eager = true} context [] - , PARALLEL_ALLGOALS (asm_simp_tac (Simplifier.add_simp (Global_Theory.get_thm thy (x ^ "_def")) context))]) + => EVERY [ Locale.intro_locales_tac {strict = true, eager = true} context [] + , PARALLEL_ALLGOALS (asm_simp_tac + (fold add_simp (get_thm thy (x ^ "_def") :: get_thms thy (tname ^ ".defs")) + context))]) + end -(* Child lenses correspond to the field where a record extension may reside. This function constructs - a theorem that a given child lens field is very well-behaved *) - -fun lens_child_proof thy x = +fun lens_sym_proof tname thy = + let open Simplifier; open Global_Theory in Goal.prove_global thy [] [] (hd (Type_Infer_Context.infer_types (Proof_Context.init_global thy) [ Const (Trueprop, dummyT) - $ (Const (vwb_lens, dummyT) $ Const (Context.theory_name thy ^ "." ^ x, dummyT))])) - (fn {context = context, prems = _} => - EVERY [ PARALLEL_ALLGOALS - (asm_simp_tac (Simplifier.add_simp @{thm comp_vwb_lens} (Simplifier.add_simp (Global_Theory.get_thm thy (x ^ "_def")) context)))]) + $ (Const (sym_lens, dummyT) $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ all_lensN, dummyT))])) + (fn {context = context, prems = _} + => EVERY [ Classical.rule_tac context [@{thm sym_lens.intro}] [] 1 + , PARALLEL_ALLGOALS (asm_simp_tac + (fold add_simp (@{thms slens.defs} @ get_thms thy (tname ^ ".defs")) + context))]) + end + + +fun prove_lens_goal tname thy ctx = + let open Simplifier; open Global_Theory in + auto_tac (fold add_simp (get_thms thy lens_defsN @ + get_thms thy (tname ^ splits_suffix) @ + [get_thm thy prod_case_eq_ifN]) ctx) +end + +fun prove_indep tname thy = + let open Simplifier; open Global_Theory in + (fn {context, prems = _} => + EVERY [auto_tac (add_simp @{thm lens_indep_vwb_iff} context) + ,prove_lens_goal tname thy context]) +end + +fun prove_sublens tname thy = + let open Simplifier; open Global_Theory in + (fn {context, prems = _} => + EVERY [auto_tac (add_simp @{thm sublens_iff_sublens'} context) + ,prove_lens_goal tname thy context]) +end + +fun prove_quotient tname thy = + let open Simplifier; open Global_Theory in + (fn {context, prems = _} => + EVERY [prove_lens_goal tname thy context]) +end + + +fun prove_equiv tname thy = + let open Simplifier; open Global_Theory in + (fn {context, prems = _} => + EVERY [auto_tac (add_simp @{thm lens_equiv_iff_lens_equiv'} context) + ,prove_lens_goal tname thy context]) +end + +(* Constrct a proof that base + more is a bijective lens *) + +fun lens_bij_proof tname thy = + let open Simplifier; open Global_Theory in + Goal.prove_global thy [] [] + (hd (Type_Infer_Context.infer_types + (Proof_Context.init_global thy) + [ Const (Trueprop, dummyT) + $ (Const (bij_lens, dummyT) $ + (Const (lens_plus, dummyT) $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ base_lensN, dummyT) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ child_lensN, dummyT)))])) + (fn {context = context, prems = _} + => EVERY [ Locale.intro_locales_tac {strict = true, eager = true} context [] + , auto_tac (fold add_simp (get_thms thy lens_defsN @ [get_thm thy prod_case_eq_ifN]) + context)]) + end (* Construct a theorem and proof that two lenses, x and y, are independent. Since some lenses exist both with the source type as the record extension, and in the context of the extended record we need two versions of this function. The first shows it for the lenses on the extension, and thus uses an "intro_locales" as a means to discharge the individual lens laws of the vwb_lens locale. *) -fun indep_proof thy (x, y) = - Goal.prove_global thy [] [] - (hd (Type_Infer_Context.infer_types - (Proof_Context.init_global thy) - [ Const (Trueprop, dummyT) - $ ( Const (lens_indep, dummyT) - $ Const (Context.theory_name thy ^ "." ^ x, dummyT) - $ Const (Context.theory_name thy ^ "." ^ y, dummyT) - )])) - (fn {context = context, prems = _} - => EVERY [ Locale.intro_locales_tac {strict = true, eager = true} context [] - , PARALLEL_ALLGOALS - (asm_simp_tac (Simplifier.add_simp (Global_Theory.get_thm thy (x ^ "_def")) - (Simplifier.add_simp (Global_Theory.get_thm thy (y ^ "_def")) - context)))]) - -(* The next function is similar, but does not unfold locales instead relying on existing independence - proofs. The reason for this is that we when invoking this we have already proved that the lenses - on the extension record are independent, here we just apply preservation of independence of - the lenses through composition. *) - -fun pl_indep_proof thy (x, y) = +fun indep_proof tname thy (x, y) = + let open Simplifier; open Global_Theory in Goal.prove_global thy [] [] (hd (Type_Infer_Context.infer_types (Proof_Context.init_global thy) [ Const (Trueprop, dummyT) $ ( Const (lens_indep, dummyT) - $ Const (Context.theory_name thy ^ "." ^ x, dummyT) - $ Const (Context.theory_name thy ^ "." ^ y, dummyT) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ x, dummyT) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ y, dummyT) )])) - (fn {context = context, prems = _} - => EVERY [ PARALLEL_ALLGOALS - (asm_simp_tac (Simplifier.add_simp (Global_Theory.get_thm thy (x ^ "_def")) - (Simplifier.add_simp (Global_Theory.get_thm thy (y ^ "_def")) - context)))]) + (prove_indep tname thy) + end -(* The following two functions construct contigent independence proofs for the child lenses: if - two lenses are independent, then the composition of the two lenses with the child lens from - their parent must also be independent. *) - -fun parent_indep_proof_1 thy y x = +fun equiv_more_proof tname pname thy fs = + let open Simplifier; open Global_Theory; open Context; open Term in Goal.prove_global thy [] [] (hd (Type_Infer_Context.infer_types (Proof_Context.init_global thy) - [ Const ("Pure.all", dummyT) $ (Abs ("x", dummyT, ( - Const (imp, dummyT) $ - (Const (Trueprop, dummyT) - $ ( Const (lens_indep, dummyT) - $ Bound 0 - $ Const (y, dummyT) - ) - ) - $ - (Const (Trueprop, dummyT) - $ ( Const (lens_indep, dummyT) - $ Bound 0 - $ Const (Context.theory_name thy ^ "." ^ x, dummyT) - ) - )))) - ])) - (fn {context = context, prems = _} => - EVERY [(auto_tac (Simplifier.add_simp (Global_Theory.get_thm thy (x ^ "_def")) context))]) + [ Const (Trueprop, dummyT) + $ ( Const (lens_equiv, dummyT) + $ Const (pname ^ "." ^ child_lensN, dummyT) + $ foldr1 (fn (x, y) => Const (lens_plus, dummyT) $ x $ y) + (map (fn n => Const (theory_name thy ^ "." ^ tname ^ "." ^ n, dummyT)) (fs @ [child_lensN])) + )])) + (prove_equiv tname thy) + end -fun parent_indep_proof_2 thy y x = +fun equiv_base_proof tname parent thy fs = + let open Simplifier; open Global_Theory; open Context; open Term in Goal.prove_global thy [] [] (hd (Type_Infer_Context.infer_types (Proof_Context.init_global thy) - [ Const ("Pure.all", dummyT) $ (Abs ("x", dummyT, ( - Const (imp, dummyT) $ - (Const (Trueprop, dummyT) - $ ( Const (lens_indep, dummyT) - $ Const (y, dummyT) - $ Bound 0 - ) - ) - $ - (Const (Trueprop, dummyT) - $ ( Const (lens_indep, dummyT) - $ Const (Context.theory_name thy ^ "." ^ x, dummyT) - $ Bound 0 - ) - )))) - ])) - (fn {context, prems = _} => - EVERY [(auto_tac (Simplifier.add_simp (Global_Theory.get_thm thy (x ^ "_def")) context))]) + [ Const (Trueprop, dummyT) + $ ( Const (lens_equiv, dummyT) + $ Const (theory_name thy ^ "." ^ tname ^ "." ^ base_lensN, dummyT) + $ foldr1 (fn (x, y) => Const (lens_plus, dummyT) $ x $ y) + ((case parent of NONE => [] | SOME (_, pname) => [Const (pname ^ "." ^ base_lensN, dummyT)]) @ + map (fn n => Const (theory_name thy ^ "." ^ tname ^ "." ^ n, dummyT)) fs) + )])) + (prove_equiv tname thy) + end + +fun equiv_partition_proof tname thy = + let open Simplifier; open Global_Theory; open Context; open Term in + Goal.prove_global thy [] [] + (hd (Type_Infer_Context.infer_types + (Proof_Context.init_global thy) + [ Const (Trueprop, dummyT) + $ ( Const (lens_equiv, dummyT) + $ ( Const (lens_plus, dummyT) + $ Const (theory_name thy ^ "." ^ tname ^ "." ^ base_lensN, dummyT) + $ Const (theory_name thy ^ "." ^ tname ^ "." ^ child_lensN, dummyT)) + $ Const (id_lens, dummyT) + )])) + (prove_equiv tname thy) + end (* Prove a theorem that every child lens is a sublens of the parent. *) -fun sublens_proof thy y x = +fun sublens_proof tname pname thy y x = + let open Simplifier; open Global_Theory in Goal.prove_global thy [] [] (hd (Type_Infer_Context.infer_types (Proof_Context.init_global thy) [ Const (Trueprop, dummyT) $ ( Const (sublens, dummyT) - $ Const (Context.theory_name thy ^ "." ^ x, dummyT) - $ Const (y, dummyT) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ x, dummyT) + $ Const (pname ^ "." ^ y, dummyT) )])) - (fn {context, prems = _} => - EVERY [ PARALLEL_ALLGOALS - (asm_simp_tac (Simplifier.add_simp (Global_Theory.get_thm thy (x ^ "_def")) context))]) + (prove_sublens tname thy) +end + +fun quotient_proof tname thy x = + let open Simplifier; open Global_Theory in + Goal.prove_global thy [] [] + (hd (Type_Infer_Context.infer_types + (Proof_Context.init_global thy) + [ Const (Trueprop, dummyT) + $ ( Const (HOLeq, dummyT) + $ (Const (lens_quotient, dummyT) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ x, dummyT) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ base_lensN, dummyT) + ) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ x, dummyT) + )])) + (prove_quotient tname thy) +end + +fun composition_proof tname thy x = + let open Simplifier; open Global_Theory in + Goal.prove_global thy [] [] + (hd (Type_Infer_Context.infer_types + (Proof_Context.init_global thy) + [ Const (Trueprop, dummyT) + $ ( Const (HOLeq, dummyT) + $ (Const (lens_comp, dummyT) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ x, dummyT) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ base_lensN, dummyT) + ) + $ Const (Context.theory_name thy ^ "." ^ tname ^ "." ^ x, dummyT) + )])) + (prove_quotient tname thy) +end + fun pairWith _ [] = [] | pairWith x (y :: ys) = [(x, y), (y, x)] @ pairWith x ys; fun pairings [] = [] | pairings (x :: xs) = pairWith x xs @ pairings xs; (* Finally we have the function that actually constructs the record, lenses for each field, independence proofs, and also sublens proofs. *) fun add_alphabet_cmd _ (raw_params, binding) raw_parent raw_fields thy = let + open Simplifier; open Global_Theory val tname = Binding.name_of binding val fields = map (fn (x, y, z) => (Binding.suffix_name lens_suffix x, y, z)) raw_fields val lnames = map (fn (x, _, _) => Binding.name_of x) raw_fields val (parent, _) = read_parent raw_parent (Proof_Context.init_global thy); - fun ldef_name x = if (raw_parent = NONE) then x else x ^ ext_suffix - val ldefs = map (fn x => (ldef_name x) ^ " = " ^ FLDLENS ^ " " ^ x ^ lens_suffix) lnames - val mpldef = - case parent of - SOME (_,r) => tname ^ child_lens_suffix ^ " = " ^ lens_comp ^ " " ^ tname ^ child_lens_suffix ^ ext_suffix ^ " " ^ r ^ child_lens_suffix | - NONE => "" + fun ldef x = (x, x ^ " = " ^ FLDLENS ^ " " ^ x ^ lens_suffix) + val pname = case parent of SOME (_,r) => r | NONE => ""; val plchild = case parent of - SOME (_, r) => r ^ child_lens_suffix | + SOME (_, _) => child_lensN | NONE => "" - val pldefs = - case parent of - SOME (_,r) => map (fn x => x ^ " = " ^ lens_comp ^ " " ^ x ^ ext_suffix ^ " " ^ r ^ child_lens_suffix) lnames @ [mpldef] | - NONE => []; - val mldef_name = if (raw_parent = NONE) then tname ^ child_lens_suffix else tname ^ child_lens_suffix ^ ext_suffix - val mldef = mldef_name ^ " = " ^ FLDLENS ^ " more"; - val plnames = if (raw_parent = NONE) then [] else lnames @ [tname ^ child_lens_suffix]; + val bldef = (base_lensN, base_lensN ^ " = " ^ BASELENS ^ " " ^ tname); + val mldef = (child_lensN, child_lensN ^ " = " ^ FLDLENS ^ " more"); + val sldef = (all_lensN, all_lensN ^ " \ \ " ^ slens_view ^ " = " ^ base_lensN ^ ", " ^ slens_coview ^ " = " ^ child_lensN ^ " \"); + val plnames = if (raw_parent = NONE) then [] else lnames @ [child_lensN]; + fun pindeps thy = map (fn thm => @{thm sublens_pres_indep} OF [thm]) (get_thms thy sublensN) + @ map (fn thm => @{thm sublens_pres_indep'} OF [thm]) (get_thms thy sublensN) in thy (* Add a new record for the new alphabet lenses *) - |> add_record_cmd {overloaded = false} (raw_params, binding) NONE fields + |> add_record_cmd {overloaded = false} (raw_params, binding) raw_parent fields + (* Add the record definition theorems to lens_defs *) + |> Named_Target.theory_map (snd o Specification.theorems_cmd "" [((Binding.empty, []), [(Facts.named (tname ^ defs_suffix), snd lens_defs)])] [] false) (* Add the record splitting theorems to the alpha_splits set for proof automation *) |> Named_Target.theory_map (snd o Specification.theorems_cmd "" [((Binding.empty, []), [(Facts.named (tname ^ splits_suffix), alpha_splits)])] [] false) (* Add definitions for each of the lenses corresponding to each record field in-situ *) - |> Named_Target.theory_map - (fold (fn x => snd o Specification.definition_cmd NONE [] [] (lens_defs, x) true) (ldefs @ [mldef])) - (* Add definitions for each of the lenses corresponding to each record field augmented with the parent's child lens *) + |> Sign.qualified_path false binding |> Named_Target.theory_map - (fold (fn x => snd o Specification.definition_cmd NONE [] [] (lens_defs, x) true) pldefs) - (* Add a vwb lens proof for each in-situ lens (i.e. on the record extension) *) - |> fold (fn x => fn thy => snd (Global_Theory.add_thm ((Binding.make (x ^ vwb_lens_suffix, Position.none), lens_proof x thy), [Simplifier.simp_add]) thy)) ((map ldef_name lnames) @ [mldef_name]) + (fold (fn (n, d) => snd o Specification.definition_cmd (SOME (Binding.make (n, Position.none), NONE, NoSyn)) [] [] (lens_defs, d) true) (map ldef lnames @ [bldef, mldef])) + (* Add definition of the underlying symmetric lens *) + |> Named_Target.theory_map + (fold (fn (n, d) => Specification.abbreviation_cmd Syntax.mode_default (SOME (Binding.make (n, Position.none), NONE, NoSyn)) [] d true) [sldef]) + (* Add a vwb lens proof for each field lens *) + |> fold (fn x => fn thy => snd (add_thm ((Binding.make (x ^ vwb_lens_suffix, Position.none), lens_proof tname x thy), [Simplifier.simp_add]) thy)) (lnames @ [base_lensN, child_lensN]) + (* Add a bij lens proof for the base and more lenses *) + |> (fn thy => snd (add_thm ((Binding.make (base_moreN ^ bij_lens_suffix, Position.none), lens_bij_proof tname thy), [Simplifier.simp_add]) thy)) + (* Add sublens proofs *) + |> (fn thy => snd (add_thmss [((Binding.make (sublensN, Position.none), map (sublens_proof tname pname thy plchild) plnames @ map (sublens_proof tname (Context.theory_name thy ^ "." ^ tname) thy base_lensN) lnames), [Simplifier.simp_add])] thy)) + (* Add quotient proofs *) + |> (fn thy => snd (add_thmss [((Binding.make (quotientN, Position.none), map (quotient_proof tname thy) lnames), [Simplifier.simp_add])] thy)) + (* Add composition proofs *) + |> (fn thy => snd (add_thmss [((Binding.make (compositionN, Position.none), map (composition_proof tname thy) lnames), [Simplifier.simp_add])] thy)) (* Add independence proofs for each pairing of lenses *) - |> (fn thy => snd (Global_Theory.add_thmss - [((Binding.make (Binding.name_of binding ^ indeps_suffix, Position.none), map (indep_proof thy) (pairings ((map ldef_name lnames) @ [mldef_name])) @ map (parent_indep_proof_1 thy plchild) plnames @ map (parent_indep_proof_2 thy plchild) plnames), [Simplifier.simp_add])] thy)) - |> fold (fn x => fn thy => snd (Global_Theory.add_thm ((Binding.make (x ^ vwb_lens_suffix, Position.none), lens_child_proof thy x), [Simplifier.simp_add]) thy)) plnames - |> (fn thy => snd (Global_Theory.add_thmss - [((Binding.make (Binding.name_of binding ^ pl_indeps_suffix, Position.none), map (pl_indep_proof thy) (pairings (plnames))), [Simplifier.simp_add])] thy)) - |> (fn thy => snd (Global_Theory.add_thmss [((Binding.make (Binding.name_of binding ^ sublens_suffix, Position.none), map (sublens_proof thy plchild) plnames), [Simplifier.simp_add])] thy)) + |> (fn thy => snd (add_thmss + [((Binding.make (indepsN, Position.none), map (indep_proof tname thy) (pairings (lnames @ [child_lensN]) @ pairings [base_lensN, child_lensN]) (*@ map (parent_indep_proof_1 tname pname thy plchild) plnames @ map (parent_indep_proof_2 tname pname thy plchild) plnames *) @ pindeps thy), [Simplifier.simp_add])] thy)) + (* Add equivalence properties *) + |> (fn thy => snd (add_thmss + [((Binding.make (equivN, Position.none), (if (raw_parent = NONE) then [] else [equiv_more_proof tname pname thy lnames]) @ [equiv_base_proof tname parent thy lnames, equiv_partition_proof tname thy]), [Simplifier.simp_add])] thy)) + (* Add a symmetric lens proof for the base and more lenses *) + |> (fn thy => snd (add_thm ((Binding.make (all_lensN ^ sym_lens_suffix, Position.none), lens_sym_proof tname thy), [Simplifier.simp_add]) thy)) + |> Sign.parent_path end; val _ = Outer_Syntax.command @{command_keyword alphabet} "define record with lenses" (Parse_Spec.overloaded -- (Parse.type_args_constrained -- Parse.binding) -- (@{keyword "="} |-- Scan.option (Parse.typ --| @{keyword "+"}) -- Scan.repeat1 Parse.const_binding) >> (fn ((overloaded, x), (y, z)) => Toplevel.theory (add_alphabet_cmd {overloaded = overloaded} x y z))); end \ No newline at end of file diff --git a/thys/Optics/Lens_Record_Example.thy b/thys/Optics/Lens_Record_Example.thy --- a/thys/Optics/Lens_Record_Example.thy +++ b/thys/Optics/Lens_Record_Example.thy @@ -1,23 +1,55 @@ theory Lens_Record_Example -imports Lens_Instances + imports Optics begin text \The alphabet command supports syntax illustrated in the following comments.\ alphabet mylens = x :: nat y :: string +thm base_more_bij_lens +thm indeps +thm equivs +thm sublenses +thm quotients +thm compositions + +lemma mylens_composition: + "x +\<^sub>L y +\<^sub>L more\<^sub>L \\<^sub>L 1\<^sub>L" (is "?P \\<^sub>L ?Q") +proof - + have "?Q \\<^sub>L base\<^sub>L +\<^sub>L more\<^sub>L" + by (simp add: lens_equiv_sym) + also have "... \\<^sub>L (x +\<^sub>L y) +\<^sub>L more\<^sub>L" + by (simp add: lens_plus_eq_left) + also have "... \\<^sub>L x +\<^sub>L y +\<^sub>L more\<^sub>L" + by (simp add: lens_plus_assoc) + finally show ?thesis + using lens_equiv_sym by auto +qed + lemma mylens_bij_lens: - "bij_lens (x +\<^sub>L y +\<^sub>L mylens_child_lens)" - by (unfold_locales, simp_all add: lens_plus_def x_def y_def mylens_child_lens_def id_lens_def sublens_def lens_comp_def prod.case_eq_if) + "bij_lens (x +\<^sub>L y +\<^sub>L more\<^sub>L)" + using bij_lens_equiv_id mylens_composition by auto alphabet mylens_2 = mylens + z :: int k :: "string list" +thm lens_defs + +thm base_more_bij_lens +thm indeps +thm equivs +thm sublenses + alphabet mylens_3 = mylens_2 + n :: real h :: nat +thm base_more_bij_lens +thm indeps +thm equivs +thm sublenses + end diff --git a/thys/Optics/Lens_State.thy b/thys/Optics/Lens_State.thy --- a/thys/Optics/Lens_State.thy +++ b/thys/Optics/Lens_State.thy @@ -1,79 +1,80 @@ section \State and Lens integration\ theory Lens_State imports "HOL-Library.State_Monad" Lens_Algebra begin + text \Inspired by Haskell's lens package\ definition zoom :: "('a \ 'b) \ ('a, 'c) state \ ('b, 'c) state" where "zoom l m = State (\b. case run_state m (lens_get l b) of (c, a) \ (c, lens_put l b a))" definition use :: "('a \ 'b) \ ('b, 'a) state" where "use l = zoom l State_Monad.get" definition modify :: "('a \ 'b) \ ('a \ 'a) \ ('b, unit) state" where "modify l f = zoom l (State_Monad.update f)" definition assign :: "('a \ 'b) \ 'a \ ('b, unit) state" where "assign l b = zoom l (State_Monad.set b)" context begin qualified abbreviation "add l n \ modify l (\x. x + n)" qualified abbreviation "sub l n \ modify l (\x. x - n)" qualified abbreviation "mul l n \ modify l (\x. x * n)" qualified abbreviation "inc l \ add l 1" qualified abbreviation "dec l \ sub l 1" end bundle lens_state_notation begin notation zoom (infixr "\" 80) notation modify (infix "%=" 80) notation assign (infix ".=" 80) notation Lens_State.add (infix "+=" 80) notation Lens_State.sub (infix "-=" 80) notation Lens_State.mul (infix "*=" 80) notation Lens_State.inc ("_ ++") notation Lens_State.dec ("_ --") end context includes lens_state_notation begin lemma zoom_comp1: "l1 \ l2 \ s = (l2 ;\<^sub>L l1) \ s" unfolding zoom_def lens_comp_def by (auto split: prod.splits) lemma zoom_zero[simp]: "zero_lens \ s = s" unfolding zoom_def zero_lens_def by simp lemma zoom_id[simp]: "id_lens \ s = s" unfolding zoom_def id_lens_def by simp end lemma (in mwb_lens) zoom_comp2[simp]: "zoom x m \ (\a. zoom x (n a)) = zoom x (m \ n)" unfolding zoom_def State_Monad.bind_def by (auto split: prod.splits simp: put_get put_put) lemma (in wb_lens) use_alt_def: "use x = map_state (lens_get x) State_Monad.get" unfolding State_Monad.get_def use_def zoom_def by (simp add: comp_def get_put) lemma (in wb_lens) modify_alt_def: "modify x f = State_Monad.update (update f)" -unfolding modify_def zoom_def update_def State_Monad.update_def State_Monad.get_def State_Monad.set_def State_Monad.bind_def -by auto - + unfolding modify_def zoom_def lens_update_def State_Monad.update_def State_Monad.get_def State_Monad.set_def State_Monad.bind_def + by (auto) + lemma (in wb_lens) modify_id[simp]: "modify x (\x. x) = State_Monad.return ()" -unfolding update_def modify_alt_def +unfolding lens_update_def modify_alt_def by (simp add: get_put) lemma (in mwb_lens) modify_comp[simp]: "bind (modify x f) (\_. modify x g) = modify x (g \ f)" unfolding modify_def by simp end diff --git a/thys/Optics/Lens_Symmetric.thy b/thys/Optics/Lens_Symmetric.thy new file mode 100644 --- /dev/null +++ b/thys/Optics/Lens_Symmetric.thy @@ -0,0 +1,99 @@ +section \ Symmetric Lenses \ + +theory Lens_Symmetric + imports Lens_Order +begin + +text \ A characterisation of Hofmann's ``Symmetric Lenses''~\cite{Hofmann2011}, where + a lens is accompanied by its complement. \ + +record ('a, 'b, 's) slens = + view :: "'a \ 's" ("\\") \ \ The region characterised \ + coview :: "'b \ 's" ("\\") \ \ The complement of the region \ + +type_notation + slens ("<_, _> \ _" [0, 0, 0] 0) + +declare slens.defs [lens_defs] + +definition slens_compl :: "(<'a, 'c> \ 'b) \ <'c, 'a> \ 'b" ("-\<^sub>L _" [81] 80) where +[lens_defs]: "slens_compl a = \ view = coview a, coview = view a \" + +lemma view_slens_compl [simp]: "\\<^bsub>-\<^sub>L a\<^esub> = \\<^bsub>a\<^esub>" + by (simp add: slens_compl_def) + +lemma coview_slens_compl [simp]: "\\<^bsub>-\<^sub>L a\<^esub> = \\<^bsub>a\<^esub>" + by (simp add: slens_compl_def) + +subsection \ Partial Symmetric Lenses \ + +locale psym_lens = + fixes S :: "<'a, 'b> \ 's" (structure) + assumes + mwb_region [simp]: "mwb_lens \" and + mwb_coregion [simp]: "mwb_lens \" and + indep_region_coregion [simp]: "\ \ \" and + pbij_region_coregion [simp]: "pbij_lens (\ +\<^sub>L \)" + +declare psym_lens.mwb_region [simp] +declare psym_lens.mwb_coregion [simp] +declare psym_lens.indep_region_coregion [simp] + +lemma psym_lens_compl [simp]: "psym_lens a \ psym_lens (-\<^sub>L a)" + apply (simp add: slens_compl_def) + apply (rule psym_lens.intro) + apply (simp_all) + using lens_indep_sym psym_lens.indep_region_coregion apply blast + using lens_indep_sym pbij_plus_commute psym_lens_def apply blast + done + +subsection \ Symmetric Lenses \ + +locale sym_lens = + fixes S :: "<'a, 'b> \ 's" (structure) + assumes + vwb_region: "vwb_lens \" and + vwb_coregion: "vwb_lens \" and + indep_region_coregion: "\ \ \" and + bij_region_coregion: "bij_lens (\ +\<^sub>L \)" +begin + +sublocale psym_lens +proof (rule psym_lens.intro) + show "mwb_lens \" + by (simp add: vwb_region) + show "mwb_lens \" + by (simp add: vwb_coregion) + show "\ \ \" + using indep_region_coregion by blast + show "pbij_lens (\ +\<^sub>L \)" + by (simp add: bij_region_coregion) +qed + +lemma put_region_coregion_cover: + "put\<^bsub>\\<^esub> (put\<^bsub>\\<^esub> s\<^sub>1 (get\<^bsub>\\<^esub> s\<^sub>2)) (get\<^bsub>\\<^esub> s\<^sub>2) = s\<^sub>2" +proof - + have "put\<^bsub>\\<^esub> (put\<^bsub>\\<^esub> s\<^sub>1 (get\<^bsub>\\<^esub> s\<^sub>2)) (get\<^bsub>\\<^esub> s\<^sub>2) = put\<^bsub>\ +\<^sub>L \\<^esub> s\<^sub>1 (get\<^bsub>\ +\<^sub>L \\<^esub> s\<^sub>2)" + by (simp add: lens_defs) + also have "... = s\<^sub>2" + by (simp add: bij_region_coregion) + finally show ?thesis . +qed + +end + +declare sym_lens.vwb_region [simp] +declare sym_lens.vwb_coregion [simp] +declare sym_lens.indep_region_coregion [simp] + +lemma sym_lens_psym [simp]: "sym_lens x \ psym_lens x" + by (simp add: psym_lens_def sym_lens.bij_region_coregion) + +lemma sym_lens_compl [simp]: "sym_lens a \ sym_lens (-\<^sub>L a)" + apply (simp add: slens_compl_def) + apply (rule sym_lens.intro, simp_all) + using lens_indep_sym sym_lens.indep_region_coregion apply blast + using bij_lens_equiv lens_plus_comm sym_lens_def apply blast + done + +end \ No newline at end of file diff --git a/thys/Optics/Lenses.thy b/thys/Optics/Lenses.thy --- a/thys/Optics/Lenses.thy +++ b/thys/Optics/Lenses.thy @@ -1,9 +1,10 @@ section \Lenses\ theory Lenses imports Lens_Laws Lens_Algebra Lens_Order + Lens_Symmetric Lens_Instances begin end diff --git a/thys/Optics/ROOT b/thys/Optics/ROOT --- a/thys/Optics/ROOT +++ b/thys/Optics/ROOT @@ -1,38 +1,39 @@ (******************************************************************************) (* Project: The Optics Library *) (* File: ROOT *) (* Authors: Simon Foster (University of York, UK) *) (* Emails: simon.foster@york.ac.uk *) (******************************************************************************) (* Optics Library *) chapter AFP session "Optics" (AFP) = "HOL-Eisbach" + options [timeout = 600] sessions "HOL-Library" theories Interp Two Lens_Laws Lens_Algebra Lens_Order Lens_Instances + Lens_Symmetric Lenses Prisms Scenes Optics theories [document = false] Lens_Record_Example theories Lens_State document_files "root.bib" "root.tex" "document.sty" "figures/Lens.pdf" "figures/Composition.pdf" "figures/Sum.pdf" "figures/Independence.pdf" diff --git a/thys/Optics/Scenes.thy b/thys/Optics/Scenes.thy --- a/thys/Optics/Scenes.thy +++ b/thys/Optics/Scenes.thy @@ -1,342 +1,400 @@ section \ Scenes \ theory Scenes imports Lens_Instances begin text \ Like lenses, scenes characterise a region of a source type. However, unlike lenses, scenes do not explicitly assign a view type to this region, and consequently they have just one type parameter. This means they can be more flexibly composed, and in particular it is possible to show they form nice algebraic structures in Isabelle/HOL. They are mainly of use in characterising sets of variables, where, of course, we do not care about the types of those variables and therefore representing them as lenses is inconvenient. \ subsection \ Overriding Functions \ text \ Overriding functions provide an abstract way of replacing a region of an existing source with the corresponding region of another source. \ locale overrider = fixes F :: "'s \ 's \ 's" (infixl "\" 65) assumes ovr_overshadow_left: "x \ y \ z = x \ z" and ovr_overshadow_right: "x \ (y \ z) = x \ z" begin lemma ovr_assoc: "x \ (y \ z) = x \ y \ z" by (simp add: ovr_overshadow_left ovr_overshadow_right) end locale idem_overrider = overrider + assumes ovr_idem: "x \ x = x" declare overrider.ovr_overshadow_left [simp] declare overrider.ovr_overshadow_right [simp] declare idem_overrider.ovr_idem [simp] subsection \ Scene Type \ typedef 's scene = "{F :: 's \ 's \ 's. overrider F}" by (rule_tac x="\ x y. x" in exI, simp, unfold_locales, simp_all) setup_lifting type_definition_scene lift_definition idem_scene :: "'s scene \ bool" is idem_overrider . lift_definition region :: "'s scene \ 's rel" is "\ F. {(s\<^sub>1, s\<^sub>2). (\ s. F s s\<^sub>1 = F s s\<^sub>2)}" . lift_definition coregion :: "'s scene \ 's rel" is "\ F. {(s\<^sub>1, s\<^sub>2). (\ s. F s\<^sub>1 s = F s\<^sub>2 s)}" . lemma equiv_region: "equiv UNIV (region X)" apply (transfer) apply (rule equivI) apply (rule refl_onI) apply (auto) apply (rule symI) apply (auto) apply (rule transI) apply (auto) done lemma equiv_coregion: "equiv UNIV (coregion X)" apply (transfer) apply (rule equivI) apply (rule refl_onI) apply (auto) apply (rule symI) apply (auto) apply (rule transI) apply (auto) done lemma region_coregion_Id: "idem_scene X \ region X \ coregion X = Id" by (transfer, auto, metis idem_overrider.ovr_idem) lemma state_eq_iff: "idem_scene S \ x = y \ (x, y) \ region S \ (x, y) \ coregion S" by (metis IntE IntI pair_in_Id_conv region_coregion_Id) lift_definition scene_override :: "'a \ 'a \ ('a scene) \ 'a" ("_ \\<^sub>S _ on _" [95,0,96] 95) is "\ s\<^sub>1 s\<^sub>2 F. F s\<^sub>1 s\<^sub>2" . lemma scene_override_idem [simp]: "idem_scene X \ s \\<^sub>S s on X = s" by (transfer, simp) lemma scene_override_overshadow_left [simp]: "S\<^sub>1 \\<^sub>S S\<^sub>2 on X \\<^sub>S S\<^sub>3 on X = S\<^sub>1 \\<^sub>S S\<^sub>3 on X" by (transfer, simp) lemma scene_override_overshadow_right [simp]: "S\<^sub>1 \\<^sub>S (S\<^sub>2 \\<^sub>S S\<^sub>3 on X) on X = S\<^sub>1 \\<^sub>S S\<^sub>3 on X" by (transfer, simp) +definition scene_equiv :: "'a \ 'a \ ('a scene) \ bool" ("_ \\<^sub>S _ on _" [65,0,66] 65) where +[lens_defs]: "S\<^sub>1 \\<^sub>S S\<^sub>2 on X = (S\<^sub>1 \\<^sub>S S\<^sub>2 on X = S\<^sub>1)" + +lemma scene_equiv_region: "idem_scene X \ region X = {(S\<^sub>1, S\<^sub>2). S\<^sub>1 \\<^sub>S S\<^sub>2 on X}" + by (simp add: lens_defs, transfer, auto) + (metis idem_overrider.ovr_idem, metis overrider.ovr_overshadow_right) + lift_definition scene_indep :: "'a scene \ 'a scene \ bool" (infix "\\<^sub>S" 50) is "\ F G. (\ s\<^sub>1 s\<^sub>2 s\<^sub>3. G (F s\<^sub>1 s\<^sub>2) s\<^sub>3 = F (G s\<^sub>1 s\<^sub>3) s\<^sub>2)" . lemma scene_indep_override: "X \\<^sub>S Y = (\ s\<^sub>1 s\<^sub>2 s\<^sub>3. s\<^sub>1 \\<^sub>S s\<^sub>2 on X \\<^sub>S s\<^sub>3 on Y = s\<^sub>1 \\<^sub>S s\<^sub>3 on Y \\<^sub>S s\<^sub>2 on X)" by (transfer, auto) lemma scene_indep_sym: "X \\<^sub>S Y \ Y \\<^sub>S X" by (transfer, auto) text \ Compatibility is a weaker notion than independence; the scenes can overlap but they must agree when they do. \ - + lift_definition scene_compat :: "'a scene \ 'a scene \ bool" (infix "##\<^sub>S" 50) is "\ F G. (\ s\<^sub>1 s\<^sub>2. G (F s\<^sub>1 s\<^sub>2) s\<^sub>2 = F (G s\<^sub>1 s\<^sub>2) s\<^sub>2)" . lemma scene_indep_compat [simp]: "X \\<^sub>S Y \ X ##\<^sub>S Y" by (transfer, auto) lemma scene_compat_refl: "X ##\<^sub>S X" by (transfer, simp) lemma scene_compat_sym: "X ##\<^sub>S Y \ Y ##\<^sub>S X" by (transfer, simp) lemma scene_override_commute_indep: assumes "X \\<^sub>S Y" shows "S\<^sub>1 \\<^sub>S S\<^sub>2 on X \\<^sub>S S\<^sub>3 on Y = S\<^sub>1 \\<^sub>S S\<^sub>3 on Y \\<^sub>S S\<^sub>2 on X" using assms by (transfer, auto) instantiation scene :: (type) "{bot, top, uminus, sup, inf}" begin lift_definition bot_scene :: "'s scene" is "\ x y. x" by (unfold_locales, simp_all) lift_definition top_scene :: "'s scene" is "\ x y. y" by (unfold_locales, simp_all) lift_definition uminus_scene :: "'s scene \ 's scene" is "\ F x y. F y x" by (unfold_locales, simp_all) text \ Scene union requires that the two scenes are at least compatible. If they are not, the result is the bottom scene. \ lift_definition sup_scene :: "'s scene \ 's scene \ 's scene" is "\ F G. if (\ s\<^sub>1 s\<^sub>2. G (F s\<^sub>1 s\<^sub>2) s\<^sub>2 = F (G s\<^sub>1 s\<^sub>2) s\<^sub>2) then (\ s\<^sub>1 s\<^sub>2. G (F s\<^sub>1 s\<^sub>2) s\<^sub>2) else (\ s\<^sub>1 s\<^sub>2. s\<^sub>1)" by (unfold_locales, auto, metis overrider.ovr_overshadow_right) definition inf_scene :: "'s scene \ 's scene \ 's scene" where - "inf_scene X Y = - (sup (- X) (- Y))" + [lens_defs]: "inf_scene X Y = - (sup (- X) (- Y))" instance .. end abbreviation union_scene :: "'s scene \ 's scene \ 's scene" (infixl "\\<^sub>S" 65) where "union_scene \ sup" abbreviation inter_scene :: "'s scene \ 's scene \ 's scene" (infixl "\\<^sub>S" 70) where "inter_scene \ inf" abbreviation top_scene :: "'s scene" ("\\<^sub>S") where "top_scene \ top" abbreviation bot_scene :: "'s scene" ("\\<^sub>S") where "bot_scene \ bot" lemma uminus_scene_twice: "- (- (X :: 's scene)) = X" by (transfer, simp) lemma scene_override_id [simp]: "S\<^sub>1 \\<^sub>S S\<^sub>2 on \\<^sub>S = S\<^sub>2" by (transfer, simp) lemma scene_override_unit [simp]: "S\<^sub>1 \\<^sub>S S\<^sub>2 on \\<^sub>S = S\<^sub>1" by (transfer, simp) lemma scene_override_commute: "S\<^sub>2 \\<^sub>S S\<^sub>1 on (- X) = S\<^sub>1 \\<^sub>S S\<^sub>2 on X" by (transfer, simp) lemma scene_union_incompat: "\ X ##\<^sub>S Y \ X \\<^sub>S Y = \\<^sub>S" by (transfer, auto) lemma scene_override_union: "X ##\<^sub>S Y \ S\<^sub>1 \\<^sub>S S\<^sub>2 on (X \\<^sub>S Y) = (S\<^sub>1 \\<^sub>S S\<^sub>2 on X) \\<^sub>S S\<^sub>2 on Y" by (transfer, auto) lemma scene_union_unit: "X \\<^sub>S \\<^sub>S = X" by (transfer, simp) lemma scene_union_annhil: "idem_scene X \ X \\<^sub>S \\<^sub>S = \\<^sub>S" by (transfer, simp) +lemma scene_union_pres_compat: "\ A ##\<^sub>S B; A ##\<^sub>S C \ \ A ##\<^sub>S (B \\<^sub>S C)" + by (transfer, auto) + +lemma scene_indep_self_compl: "A \\<^sub>S -A" + by (transfer, simp) + lemma scene_union_assoc: assumes "X ##\<^sub>S Y" "X ##\<^sub>S Z" "Y ##\<^sub>S Z" shows "X \\<^sub>S (Y \\<^sub>S Z) = (X \\<^sub>S Y) \\<^sub>S Z" + using assms by (transfer, auto) + +lemma scene_inter_indep: + assumes "idem_scene X" "idem_scene Y" "X \\<^sub>S Y" + shows "X \\<^sub>S Y = \\<^sub>S" using assms - by (transfer, auto) + unfolding lens_defs + apply (transfer, auto) + apply (metis (no_types, hide_lams) idem_overrider.ovr_idem overrider.ovr_assoc overrider.ovr_overshadow_right) + apply (metis (no_types, hide_lams) idem_overrider.ovr_idem overrider.ovr_overshadow_right) + done lemma scene_union_idem: "X \\<^sub>S X = X" by (transfer, simp) lemma scene_union_compl: "idem_scene X \ X \\<^sub>S - X = \\<^sub>S" by (transfer, auto) lemma scene_inter_idem: "X \\<^sub>S X = X" by (simp add: inf_scene_def, transfer, auto) lemma scene_union_commute: "X \\<^sub>S Y = Y \\<^sub>S X" by (transfer, auto) lemma scene_inter_compl: "idem_scene X \ X \\<^sub>S - X = \\<^sub>S" by (simp add: inf_scene_def, transfer, auto) lemma scene_demorgan1: "-(X \\<^sub>S Y) = -X \\<^sub>S -Y" by (simp add: inf_scene_def, transfer, auto) lemma scene_demorgan2: "-(X \\<^sub>S Y) = -X \\<^sub>S -Y" by (simp add: inf_scene_def, transfer, auto) +lemma scene_compat_top: "idem_scene X \ X ##\<^sub>S \\<^sub>S" + by (transfer, simp) + instantiation scene :: (type) ord begin text \ $X$ is a subscene of $Y$ provided that overriding with first $Y$ and then $X$ can be rewritten using the complement of $X$. \ definition less_eq_scene :: "'a scene \ 'a scene \ bool" where - "less_eq_scene X Y = (\ s\<^sub>1 s\<^sub>2 s\<^sub>3. s\<^sub>1 \\<^sub>S s\<^sub>2 on Y \\<^sub>S s\<^sub>3 on X = s\<^sub>1 \\<^sub>S (s\<^sub>2 \\<^sub>S s\<^sub>3 on X) on Y)" + [lens_defs]: "less_eq_scene X Y = (\ s\<^sub>1 s\<^sub>2 s\<^sub>3. s\<^sub>1 \\<^sub>S s\<^sub>2 on Y \\<^sub>S s\<^sub>3 on X = s\<^sub>1 \\<^sub>S (s\<^sub>2 \\<^sub>S s\<^sub>3 on X) on Y)" definition less_scene :: "'a scene \ 'a scene \ bool" where - "less_scene x y = (x \ y \ \ y \ x)" + [lens_defs]: "less_scene x y = (x \ y \ \ y \ x)" instance .. end abbreviation subscene :: "'a scene \ 'a scene \ bool" (infix "\\<^sub>S" 55) where "subscene X Y \ X \ Y" lemma subscene_refl: "X \\<^sub>S X" by (simp add: less_eq_scene_def) lemma subscene_trans: "\ idem_scene Y; X \\<^sub>S Y; Y \\<^sub>S Z \ \ X \\<^sub>S Z" by (simp add: less_eq_scene_def, transfer, auto, metis (no_types, hide_lams) idem_overrider.ovr_idem) lemma subscene_antisym: "\ idem_scene Y; X \\<^sub>S Y; Y \\<^sub>S X \ \ X = Y" apply (simp add: less_eq_scene_def, transfer, auto) apply (rule ext) apply (rule ext) apply (metis (full_types) idem_overrider.ovr_idem overrider.ovr_overshadow_left) done lemma subscene_eliminate: "\ idem_scene Y; X \ Y \ \ s\<^sub>1 \\<^sub>S s\<^sub>2 on X \\<^sub>S s\<^sub>3 on Y = s\<^sub>1 \\<^sub>S s\<^sub>3 on Y" by (metis less_eq_scene_def scene_override_overshadow_left scene_override_idem) lemma scene_bot_least: "\\<^sub>S \ X" unfolding less_eq_scene_def by (transfer, auto) lemma scene_top_greatest: "X \ \\<^sub>S" unfolding less_eq_scene_def by (transfer, auto) lemma scene_union_ub: "\ idem_scene Y; X \\<^sub>S Y \ \ X \ (X \\<^sub>S Y)" by (simp add: less_eq_scene_def, transfer, auto) (metis (no_types, hide_lams) idem_overrider.ovr_idem overrider.ovr_overshadow_right) +lemma scene_le_then_compat: "\ idem_scene X; idem_scene Y; X \ Y \ \ X ##\<^sub>S Y" + unfolding less_eq_scene_def + by (transfer, auto, metis (no_types, lifting) idem_overrider.ovr_idem overrider_def) + +lemma indep_then_compl_in: "A \\<^sub>S B \ A \ -B" + unfolding less_eq_scene_def by (transfer, simp) + subsection \ Linking Scenes and Lenses \ text \ The following function extracts a scene from a very well behaved lens \ lift_definition lens_scene :: "('v \ 's) \ 's scene" ("\_\\<^sub>\") is "\ X s\<^sub>1 s\<^sub>2. if (mwb_lens X) then s\<^sub>1 \\<^sub>L s\<^sub>2 on X else s\<^sub>1" by (unfold_locales, auto simp add: lens_override_def) lemma vwb_impl_idem_scene [simp]: "vwb_lens X \ idem_scene \X\\<^sub>\" by (transfer, unfold_locales, auto simp add: lens_override_overshadow_left lens_override_overshadow_right) lemma idem_scene_impl_vwb: "\ mwb_lens X; idem_scene \X\\<^sub>\ \ \ vwb_lens X" apply (cases "mwb_lens X") apply (transfer, unfold idem_overrider_def overrider_def, auto) apply (simp add: idem_overrider_axioms_def override_idem_implies_vwb) done +lemma lens_compat_scene: "\ mwb_lens X; mwb_lens Y \ \ X ##\<^sub>L Y \ \X\\<^sub>\ ##\<^sub>S \Y\\<^sub>\" + by (auto simp add: lens_scene.rep_eq scene_compat.rep_eq lens_defs) + text \ Next we show some important congruence properties \ lemma zero_lens_scene: "\0\<^sub>L\\<^sub>\ = \\<^sub>S" by (transfer, simp) lemma one_lens_scene: "\1\<^sub>L\\<^sub>\ = \\<^sub>S" by (transfer, simp) lemma lens_scene_override: "mwb_lens X \ s\<^sub>1 \\<^sub>S s\<^sub>2 on \X\\<^sub>\ = s\<^sub>1 \\<^sub>L s\<^sub>2 on X" by (transfer, simp) lemma lens_indep_scene: assumes "vwb_lens X" "vwb_lens Y" shows "(X \ Y) \ \X\\<^sub>\ \\<^sub>S \Y\\<^sub>\" using assms by (auto, (simp add: scene_indep_override, transfer, simp add: lens_indep_override_def)+) lemma lens_indep_impl_scene_indep [simp]: "(X \ Y) \ \X\\<^sub>\ \\<^sub>S \Y\\<^sub>\" by (transfer, auto simp add: lens_indep_comm lens_override_def) lemma lens_plus_scene: "\ vwb_lens X; vwb_lens Y; X \ Y \ \ \X +\<^sub>L Y\\<^sub>\ = \X\\<^sub>\ \\<^sub>S \Y\\<^sub>\" by (transfer, auto simp add: lens_override_plus lens_indep_override_def lens_indep_overrideI plus_vwb_lens) lemma subscene_implies_sublens': "\ vwb_lens X; vwb_lens Y \ \ \X\\<^sub>\ \ \Y\\<^sub>\ \ X \\<^sub>L' Y" by (simp add: lens_defs less_eq_scene_def, transfer, simp add: lens_override_def) lemma sublens'_implies_subscene: "\ vwb_lens X; vwb_lens Y; X \\<^sub>L' Y \ \ \X\\<^sub>\ \ \Y\\<^sub>\" by (simp add: lens_defs less_eq_scene_def, auto simp add: lens_override_def lens_scene_override) lemma sublens_iff_subscene: assumes "vwb_lens X" "vwb_lens Y" shows "X \\<^sub>L Y \ \X\\<^sub>\ \ \Y\\<^sub>\" by (simp add: assms sublens_iff_sublens' subscene_implies_sublens') text \ Equality on scenes is sound and complete with respect to lens equivalence. \ lemma lens_equiv_scene: assumes "vwb_lens X" "vwb_lens Y" shows "X \\<^sub>L Y \ \X\\<^sub>\ = \Y\\<^sub>\" proof assume a: "X \\<^sub>L Y" show "\X\\<^sub>\ = \Y\\<^sub>\" by (meson a assms lens_equiv_def sublens_iff_subscene subscene_antisym vwb_impl_idem_scene) next assume b: "\X\\<^sub>\ = \Y\\<^sub>\" show "X \\<^sub>L Y" by (simp add: assms b lens_equiv_def sublens_iff_subscene subscene_refl) qed text \ Membership operations. These have slightly hacky definitions at the moment in order to cater for @{term mwb_lens}. See if they can be generalised? \ definition lens_member :: "('a \ 'b) \ 'b scene \ bool" (infix "\\<^sub>S" 50) where [lens_defs]: "lens_member x A = ((\ s\<^sub>1 s\<^sub>2 s\<^sub>3. s\<^sub>1 \\<^sub>S s\<^sub>2 on A \\<^sub>L s\<^sub>3 on x = s\<^sub>1 \\<^sub>S (s\<^sub>2 \\<^sub>L s\<^sub>3 on x) on A) \ (\ b b'. get\<^bsub>x\<^esub> (b \\<^sub>S b' on A) = get\<^bsub>x\<^esub> b'))" lemma lens_member_top: "x \\<^sub>S \\<^sub>S" by (auto simp add: lens_member_def) abbreviation lens_not_member :: "('a \ 'b) \ 'b scene \ bool" (infix "\\<^sub>S" 50) where "x \\<^sub>S A \ (x \\<^sub>S - A)" lemma lens_member_get_override [simp]: "x \\<^sub>S a \ get\<^bsub>x\<^esub> (b \\<^sub>S b' on a) = get\<^bsub>x\<^esub> b'" by (simp add: lens_member_def) lemma lens_not_member_get_override [simp]: "x \\<^sub>S a \ get\<^bsub>x\<^esub> (b \\<^sub>S b' on a) = get\<^bsub>x\<^esub> b" by (simp add: lens_member_def scene_override_commute) -text \ Hide implementation details for scenes \ +subsection \ Function Domain Scene \ + +lift_definition fun_dom_scene :: "'a set \ ('a \ 'b) scene" ("fds") is +"\ A f g. override_on f g A" by (unfold_locales, simp_all add: override_on_def fun_eq_iff) + +lemma fun_dom_scene_empty: "fds({}) = \\<^sub>S" + by (transfer, simp) + +lemma fun_dom_scene_union: "fds(A \ B) = fds(A) \\<^sub>S fds(B)" + by (transfer, auto simp add: fun_eq_iff override_on_def) + +lemma fun_dom_scene_compl: "fds(- A) = - fds(A)" + by (transfer, auto simp add: fun_eq_iff override_on_def) + +lemma fun_dom_scene_inter: "fds(A \ B) = fds(A) \\<^sub>S fds(B)" + by (simp add: inf_scene_def fun_dom_scene_union[THEN sym] fun_dom_scene_compl[THEN sym]) + +lemma fun_dom_scene_UNIV: "fds(UNIV) = \\<^sub>S" + by (transfer, auto simp add: fun_eq_iff override_on_def) + +lemma fun_dom_scene_always_compat [simp]: "fds(A) ##\<^sub>S fds(B)" + by (transfer, simp add: override_on_def fun_eq_iff) + +text \ Hide implementation details for scenes \ lifting_update scene.lifting lifting_forget scene.lifting end \ No newline at end of file diff --git a/thys/Optics/Two.thy b/thys/Optics/Two.thy --- a/thys/Optics/Two.thy +++ b/thys/Optics/Two.thy @@ -1,50 +1,50 @@ -section \Types of Cardinality 2 or Greater\ +section {* Types of Cardinality 2 or Greater *} theory Two imports HOL.Real begin -text \The two class states that a type's carrier is either infinite, or else it has a finite +text {* The two class states that a type's carrier is either infinite, or else it has a finite cardinality of at least 2. It is needed when we depend on having at least two distinguishable - elements.\ + elements. *} class two = assumes card_two: "infinite (UNIV :: 'a set) \ card (UNIV :: 'a set) \ 2" begin lemma two_diff: "\ x y :: 'a. x \ y" proof - obtain A where "finite A" "card A = 2" "A \ (UNIV :: 'a set)" proof (cases "infinite (UNIV :: 'a set)") case True with infinite_arbitrarily_large[of "UNIV :: 'a set" 2] that show ?thesis by auto next case False with card_two that show ?thesis by (metis UNIV_bool card_UNIV_bool card_image card_le_inj finite.intros(1) finite_insert finite_subset) qed thus ?thesis by (metis (full_types) One_nat_def Suc_1 UNIV_eq_I card.empty card.insert finite.intros(1) insertCI nat.inject nat.simps(3)) qed end instance bool :: two by (intro_classes, auto) instance nat :: two by (intro_classes, auto) instance int :: two by (intro_classes, auto simp add: infinite_UNIV_int) instance rat :: two by (intro_classes, auto simp add: infinite_UNIV_char_0) instance real :: two by (intro_classes, auto simp add: infinite_UNIV_char_0) instance list :: (type) two by (intro_classes, auto simp add: infinite_UNIV_listI) end diff --git a/thys/Optics/document/root.bib b/thys/Optics/document/root.bib --- a/thys/Optics/document/root.bib +++ b/thys/Optics/document/root.bib @@ -1,361 +1,376 @@ @InProceedings{Alkassar2008, author="Alkassar, E. and Hillebrand, M. and Leinenbach, D. and Schirmer, N. and Starostin, A.", title="The {Verisoft} Approach to Systems Verification", booktitle="VSTTE 2008", year="2008", publisher="Springer", pages="209--224", series="LNCS", volume="5295" } @InProceedings{Armstrong2012, Title = {Automated Reasoning in Higher-Order Regular Algebra}, Author = {Armstrong, A. and Struth, G.}, Booktitle = {RAMiCS 2012}, Year = {2012}, Month = {September}, Publisher = {Springer}, Series = {LNCS}, Volume = {7560} } @InProceedings{Armstrong2013, Title = {Program Analysis and Verification Based on {Kleene Algebra} in {Isabelle/HOL}}, Author = {Armstrong, A. and Struth, G. and Weber, T.}, Booktitle = {ITP}, Year = 2013, Publisher = {Springer}, Series = {LNCS}, Volume = 7998 } @Article{Armstrong2015, author="Armstrong, A. and Gomes, V. and Struth, G.", title="Building program construction and verification tools from algebraic principles", journal="Formal Aspects of Computing", year="2015", volume="28", number="2", pages="265--293" } +@Book{Back1998, + author = {Back, R.-J. and von Wright, J.}, + title = {Refinement Calculus: A Systematic Introduction}, + publisher = {Springer}, + year = 1998} + @inproceedings{Back2003, author = {Back, R.-J. and Preoteasa, V.}, title = {Reasoning About Recursive Procedures with Parameters}, booktitle = {Proc. Workshop on Mechanized Reasoning About Languages with Variable Binding}, series = {MERLIN '03}, year = {2003}, location = {Uppsala, Sweden}, pages = {1--7}, publisher = {ACM} } @InProceedings{Blanchette2011, Title = {Automatic Proof and Disproof in {Isabelle/HOL}}, Author = {Blanchette, J. C. and Bulwahn, L. and Nipkow, T.}, Booktitle = {FroCoS}, Year = {2011}, Pages = {12--27}, Publisher = {Springer}, Series = {LNCS}, Volume = {6989} } @INPROCEEDINGS{Calcagno2007, author={Calcagno, C. and O'Hearn, P. and Yang, H.}, booktitle={LICS}, title={Local Action and Abstract Separation Logic}, year={2007}, pages={366--378}, month={July}, publisher={IEEE}} @INCOLLECTION{Cavalcanti&06, KEY = "Cavalcanti\&06", AUTHOR = {Cavalcanti, A. and Woodcock, J.}, TITLE = {A Tutorial Introduction to {CSP} in Unifying Theories of Programming}, BOOKTITLE = {Refinement Techniques in Software Engineering}, SERIES = {LNCS}, PUBLISHER = {Springer}, ISBN = {978-3-540-46253-8}, PAGES = {220--268}, VOLUME = {3167}, YEAR = {2006}, COMMENT = "BIB PGL"} @inproceedings{Dongol15, author = {Dongol, B. and Gomes, V. and Struth, G.}, title = {A Program Construction and Verification Tool for Separation Logic}, booktitle = {{MPC} 2015}, series = {LNCS}, volume = {9129}, pages = {137--158}, publisher = {Springer}, year = {2015} } @InProceedings{Feliachi2010, author = {Feliachi, A. and Gaudel, M.-C. and Wolff, B.}, title = {Unifying Theories in {Isabelle/HOL}}, booktitle = {UTP 2010}, pages = {188--206}, year = 2010, volume = 6445, series = {LNCS}, publisher = {Springer}} @InProceedings{Feliachi2012, author = {Feliachi, A. and Gaudel, M.-C. and Wolff, B.}, title = {{Isabelle/Circus}: a Process Specification and Verification Environment}, booktitle = {VSTTE 2012}, pages = {243--260}, year = 2012, volume = 7152, series = {LNCS}, publisher = {Springer}} @InProceedings{Fischer2015, author="Fischer, S. and Hu, Z. and Pacheco, H.", title="A Clear Picture of Lens Laws", booktitle="MPC 2015", year="2015", publisher="Springer", pages="215--223", } @PhdThesis{Foster09, author = {Foster, J.}, title = {Bidirectional programming languages}, school = {University of Pennsylvania}, year = 2009} @article{Foster07, author = {Foster, J. and Greenwald, M. and Moore, J. and Pierce, B. and Schmitt, A.}, title = {Combinators for Bidirectional Tree Transformations: A Linguistic Approach to the View-update Problem}, journal = {ACM Trans. Program. Lang. Syst.}, issue_date = {May 2007}, volume = {29}, number = {3}, month = may, year = {2007}, issn = {0164-0925}, articleno = {17}, url = {http://doi.acm.org/10.1145/1232420.1232424}, doi = {10.1145/1232420.1232424}, acmid = {1232424}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {Bidirectional programming, Harmony, XML, lenses, view update problem}, } @InProceedings{Foster11, Title = {Automated Engineering of Relational and Algebraic Methods in {Isabelle/HOL}}, Author = {Foster, S. and Struth, G. and Weber, T.}, Booktitle = {RAMICS 2011}, Year = {2011}, Pages = {52--67}, Publisher = {Springer}, Series = {LNCS}, Volume = {6663}, ISBN = {978-3-642-21069-3} } @InProceedings{Foster14, Title = {{Isabelle/UTP}: A Mechanised Theory Engineering Framework}, Author = {Foster, S. and Zeyda, F. and Woodcock, J.}, Booktitle = {UTP}, Year = {2014}, Pages = {21--41}, Publisher = {Springer}, Series = {LNCS}, Volume = {8963} } @InProceedings{Foster16a, author = {Foster, S. and Zeyda, F. and Woodcock, J.}, title = {Unifying heterogeneous state-spaces with lenses}, booktitle = {Proc. 13th Intl. Conf. on Theoretical Aspects of Computing (ICTAC)}, year = 2016, volume = 9965, series = {LNCS}, publisher = {Springer}} @InProceedings{Foster16b, author = {Foster, S. and Thiele, B. and Cavalcanti, A. and Woodcock, J.}, title = {Towards a {UTP} semantics for {Modelica}}, booktitle = {Proc. 6th Intl. Symp. on Unifying Theories of Programming}, month = {June}, year = 2016, series = {LNCS}, volume = {10134}, publisher = {Springer} } @article{Gibbons17, title = "Profunctor Optics: Modular Data Accessors", author = "Matthew Pickering and Jeremy Gibbons and Nicolas Wu", year = "2017", journal = "The Art, Science, and Engineering of Programming", number = "2", publisher = "AOSA", url = "http://www.cs.ox.ac.uk/people/jeremy.gibbons/publications/poptics.pdf", volume = "1", } @Book{Hehner93, Title = {A Practical Theory of Programming}, Author = {E. C. R. Hehner}, Publisher = {Springer}, Year = {1993} } @Book{Hoare85, Title = {{Communicating Sequential Processes}}, Author = {Hoare, T.}, Publisher = {Prentice-Hall}, Year = {1985}, Size = {256} } @ARTICLE{Hoare87, AUTHOR = "Hoare, T. and Hayes, I. and He, J. and Morgan, C. and Roscoe, A. and Sanders, J. and S{\o}rensen, I. and Spivey, J. and Sufrin, B.", TITLE = "The Laws of Programming", JOURNAL = "Communications of the ACM", VOLUME = "30", NUMBER = "8", PAGES = "672--687", MONTH = "August", YEAR = "1987"} @Book{Hoare&98, Title = {Unifying {Theories} of {Programming}}, Author = {Hoare, T. and He, J.}, Publisher = {Prentice-Hall}, Year = {1998}, ISBN = {ISBN-10: 0134587618}, Comment = {NA PGL}, Key = {Hoare\&98} } +@InProceedings{Hofmann2011, + Title = {Symmetric lenses}, + Author = {Hofmann, M. and Pierce, B. and Wagner, D.}, + Booktitle = {POPL}, + Year = {2011}, + Pages = {371--384}, + Publisher = {IEEE} +} + @InProceedings{Huffman13, Title = {Lifting and Transfer: A Modular Design for Quotients in {Isabelle/HOL}}, Author = {Huffman, B. and Kun\v{c}ar, O.}, Booktitle = {CPP}, Year = {2013}, Pages = {131--146}, Publisher = {Springer}, Series = {LNCS}, Volume = {8307} } @Book{Isabelle, Title = {{Isabelle/HOL: A Proof Assistant for Higher-Order Logic}}, Author = {Nipkow, T. and Wenzel, M. and Paulson, L. C.}, Publisher = {Springer}, Year = {2002}, Series = {LNCS}, Volume = {2283} } @inproceedings{Klein2009, author = {Klein, G. and others}, title = {{seL4}: Formal Verification of an {OS} Kernel}, booktitle = {Proc. 22nd Symp. on Operating Systems Principles (SOSP)}, year = {2009}, pages = {207--220}, publisher = {ACM} } @INPROCEEDINGS{Oliveira07, author = {Oliveira, M. and Cavalcanti, A. and Woodcock, J.}, title = {{Unifying theories in ProofPower-Z}}, booktitle = {UTP 2006}, pages = {123--140}, year = {2007}, volume = {4010}, series = {LNCS}, publisher = {Springer} } @ARTICLE{Oliveira&09, AUTHOR = {Oliveira, M. and Cavalcanti, A. and Woodcock, J.}, TITLE = "{A UTP semantics for {C}ircus}", JOURNAL = {Formal Aspects of Computing}, VOLUME = {21}, ISSUE = {1-2}, YEAR = {2009}, PAGES = {3--32}, PUBLISHER = {Springer}} @inproceedings{Reynolds2002, author = {Reynolds, J.}, title = {Separation Logic: A Logic for Shared Mutable Data Structures}, booktitle = {LICS 2012}, year = {2002}, pages = {55--74}, publisher = {IEEE Computer Society} } @InProceedings{Schirmer2009, title = "State Spaces -- The Locale Way ", series = "ENTCS", volume = "254", pages = "161--179", year = "2009", booktitle = "SSV 2009", author = "Schirmer, N. and Wenzel, M." } @Article{Tarski41, author = {Tarski, A.}, title = {On the calculus of relations}, journal = {J. Symbolic Logic}, year = 1941, volume = 6, number = 3, pages = {73--89}} @Book{Tarski71, author = {Henkin, L. and Monk, J. and Tarski, A.}, title = {Cylindric Algebras, Part I.}, publisher = {North-Holland}, year = 1971} @InProceedings{Woodcock2016, author = {Woodcock, J. and Foster, S. and Butterfield, A.}, title = {Heterogeneous Semantics and Unifying Theories}, OPTcrossref = {}, OPTkey = {}, booktitle = {7th Intl. Symp. on Leveraging Applications of Formal Methods, Verification, and Validation (ISoLA)}, year = {2016}, OPTeditor = {}, OPTvolume = {}, OPTnumber = {}, OPTseries = {}, OPTpages = {}, OPTmonth = {}, OPTaddress = {}, OPTorganization = {}, OPTpublisher = {}, note = {To appear}, OPTannote = {} } @InProceedings{Zeyda16, author = {Zeyda, F. and Foster, S. and Freitas, L.}, title = {An Axiomatic Value Model for {Isabelle/UTP}}, booktitle = {Proc. 6th Intl. Symp. on Unifying Theories of Programming}, year = 2016, note = {To appear}} diff --git a/thys/UTP/utp/utp_alphabet.thy b/thys/UTP/utp/utp_alphabet.thy --- a/thys/UTP/utp/utp_alphabet.thy +++ b/thys/UTP/utp/utp_alphabet.thy @@ -1,347 +1,347 @@ section \ Alphabet Manipulation \ theory utp_alphabet imports utp_pred utp_usedby begin subsection \ Preliminaries \ text \ Alphabets are simply types that characterise the state-space of an expression. Thus the Isabelle type system ensures that predicates cannot refer to variables not in the alphabet as this would be a type error. Often one would like to add or remove additional variables, for example if we wish to have a predicate which ranges only a smaller state-space, and then lift it into a predicate over a larger one. This is useful, for example, when dealing with relations which refer only to undashed variables (conditions) since we can use the type system to ensure well-formedness. In this theory we will set up operators for extending and contracting and alphabet. We first set up a theorem attribute for alphabet laws and a tactic. \ named_theorems alpha method alpha_tac = (simp add: alpha unrest)? subsection \ Alphabet Extrusion \ text \ Alter an alphabet by application of a lens that demonstrates how the smaller alphabet ($\beta$) injects into the larger alphabet ($\alpha$). This changes the type of the expression so it is parametrised over the large alphabet. We do this by using the lens \emph{get} function to extract the smaller state binding, and then apply this to the expression. We call this "extrusion" rather than "extension" because if the extension lens is bijective then it does not extend the alphabet. Nevertheless, it does have an effect because the type will be different which can be useful when converting predicates with equivalent alphabets. \ lift_definition aext :: "('a, '\) uexpr \ ('\, '\) lens \ ('a, '\) uexpr" (infixr "\\<^sub>p" 95) is "\ P x b. P (get\<^bsub>x\<^esub> b)" . update_uexpr_rep_eq_thms text \ Next we prove some of the key laws. Extending an alphabet twice is equivalent to extending by the composition of the two lenses. \ lemma aext_twice: "(P \\<^sub>p a) \\<^sub>p b = P \\<^sub>p (a ;\<^sub>L b)" by (pred_auto) text \ The bijective @{term "1\<^sub>L"} lens identifies the source and view types. Thus an alphabet extension using this has no effect. \ lemma aext_id [simp]: "P \\<^sub>p 1\<^sub>L = P" by (pred_auto) text \ Literals do not depend on any variables, and thus applying an alphabet extension only alters the predicate's type, and not its valuation .\ lemma aext_lit [simp]: "\v\ \\<^sub>p a = \v\" by (pred_auto) lemma aext_zero [simp]: "0 \\<^sub>p a = 0" by (pred_auto) lemma aext_one [simp]: "1 \\<^sub>p a = 1" by (pred_auto) lemma aext_numeral [simp]: "numeral n \\<^sub>p a = numeral n" by (pred_auto) lemma aext_true [simp]: "true \\<^sub>p a = true" by (pred_auto) lemma aext_false [simp]: "false \\<^sub>p a = false" by (pred_auto) lemma aext_not [alpha]: "(\ P) \\<^sub>p x = (\ (P \\<^sub>p x))" by (pred_auto) lemma aext_and [alpha]: "(P \ Q) \\<^sub>p x = (P \\<^sub>p x \ Q \\<^sub>p x)" by (pred_auto) lemma aext_or [alpha]: "(P \ Q) \\<^sub>p x = (P \\<^sub>p x \ Q \\<^sub>p x)" by (pred_auto) lemma aext_imp [alpha]: "(P \ Q) \\<^sub>p x = (P \\<^sub>p x \ Q \\<^sub>p x)" by (pred_auto) lemma aext_iff [alpha]: "(P \ Q) \\<^sub>p x = (P \\<^sub>p x \ Q \\<^sub>p x)" by (pred_auto) lemma aext_shAll [alpha]: "(\<^bold>\ x \ P(x)) \\<^sub>p a = (\<^bold>\ x \ P(x) \\<^sub>p a)" by (pred_auto) lemma aext_UINF_ind [alpha]: "(\ x \ P x) \\<^sub>p a =(\ x \ (P x \\<^sub>p a))" by (pred_auto) lemma aext_UINF_mem [alpha]: "(\ x\A \ P x) \\<^sub>p a =(\ x\A \ (P x \\<^sub>p a))" by (pred_auto) text \ Alphabet extension distributes through the function liftings. \ lemma aext_uop [alpha]: "uop f u \\<^sub>p a = uop f (u \\<^sub>p a)" by (pred_auto) lemma aext_bop [alpha]: "bop f u v \\<^sub>p a = bop f (u \\<^sub>p a) (v \\<^sub>p a)" by (pred_auto) lemma aext_trop [alpha]: "trop f u v w \\<^sub>p a = trop f (u \\<^sub>p a) (v \\<^sub>p a) (w \\<^sub>p a)" by (pred_auto) lemma aext_qtop [alpha]: "qtop f u v w x \\<^sub>p a = qtop f (u \\<^sub>p a) (v \\<^sub>p a) (w \\<^sub>p a) (x \\<^sub>p a)" by (pred_auto) lemma aext_plus [alpha]: "(x + y) \\<^sub>p a = (x \\<^sub>p a) + (y \\<^sub>p a)" by (pred_auto) lemma aext_minus [alpha]: "(x - y) \\<^sub>p a = (x \\<^sub>p a) - (y \\<^sub>p a)" by (pred_auto) lemma aext_uminus [simp]: "(- x) \\<^sub>p a = - (x \\<^sub>p a)" by (pred_auto) lemma aext_times [alpha]: "(x * y) \\<^sub>p a = (x \\<^sub>p a) * (y \\<^sub>p a)" by (pred_auto) lemma aext_divide [alpha]: "(x / y) \\<^sub>p a = (x \\<^sub>p a) / (y \\<^sub>p a)" by (pred_auto) text \ Extending a variable expression over $x$ is equivalent to composing $x$ with the alphabet, thus effectively yielding a variable whose source is the large alphabet. \ lemma aext_var [alpha]: "var x \\<^sub>p a = var (x ;\<^sub>L a)" by (pred_auto) lemma aext_ulambda [alpha]: "((\ x \ P(x)) \\<^sub>p a) = (\ x \ P(x) \\<^sub>p a)" by (pred_auto) text \ Alphabet extension is monotonic and continuous. \ lemma aext_mono: "P \ Q \ P \\<^sub>p a \ Q \\<^sub>p a" by (pred_auto) lemma aext_cont [alpha]: "vwb_lens a \ (\ A) \\<^sub>p a = (\ P\A. P \\<^sub>p a)" by (pred_simp) text \ If a variable is unrestricted in a predicate, then the extended variable is unrestricted in the predicate with an alphabet extension. \ lemma unrest_aext [unrest]: "\ mwb_lens a; x \ p \ \ unrest (x ;\<^sub>L a) (p \\<^sub>p a)" by (transfer, simp add: lens_comp_def) text \ If a given variable (or alphabet) $b$ is independent of the extension lens $a$, that is, it is outside the original state-space of $p$, then it follows that once $p$ is extended by $a$ then $b$ cannot be restricted. \ lemma unrest_aext_indep [unrest]: "a \ b \ b \ (p \\<^sub>p a)" by pred_auto subsection \ Expression Alphabet Restriction \ text \ Restrict an alphabet by application of a lens that demonstrates how the smaller alphabet ($\beta$) injects into the larger alphabet ($\alpha$). Unlike extension, this operation can lose information if the expressions refers to variables in the larger alphabet. \ lift_definition arestr :: "('a, '\) uexpr \ ('\, '\) lens \ ('a, '\) uexpr" (infixr "\\<^sub>e" 90) is "\ P x b. P (create\<^bsub>x\<^esub> b)" . update_uexpr_rep_eq_thms lemma arestr_id [simp]: "P \\<^sub>e 1\<^sub>L = P" by (pred_auto) lemma arestr_aext [simp]: "mwb_lens a \ (P \\<^sub>p a) \\<^sub>e a = P" by (pred_auto) text \ If an expression's alphabet can be divided into two disjoint sections and the expression does not depend on the second half then restricting the expression to the first half is loss-less. \ lemma aext_arestr [alpha]: assumes "mwb_lens a" "bij_lens (a +\<^sub>L b)" "a \ b" "b \ P" shows "(P \\<^sub>e a) \\<^sub>p a = P" proof - from assms(2) have "1\<^sub>L \\<^sub>L a +\<^sub>L b" by (simp add: bij_lens_equiv_id lens_equiv_def) with assms(1,3,4) show ?thesis apply (auto simp add: id_lens_def lens_plus_def sublens_def lens_comp_def prod.case_eq_if) apply (pred_simp) apply (metis lens_indep_comm mwb_lens_weak weak_lens.put_get) done qed text \ Alternative formulation of the above law using used-by instead of unrestriction. \ lemma aext_arestr' [alpha]: assumes "a \ P" shows "(P \\<^sub>e a) \\<^sub>p a = P" by (rel_simp, metis assms lens_override_def usedBy_uexpr.rep_eq) lemma arestr_lit [simp]: "\v\ \\<^sub>e a = \v\" by (pred_auto) lemma arestr_zero [simp]: "0 \\<^sub>e a = 0" by (pred_auto) lemma arestr_one [simp]: "1 \\<^sub>e a = 1" by (pred_auto) lemma arestr_numeral [simp]: "numeral n \\<^sub>e a = numeral n" by (pred_auto) lemma arestr_var [alpha]: "var x \\<^sub>e a = var (x /\<^sub>L a)" by (pred_auto) lemma arestr_true [simp]: "true \\<^sub>e a = true" by (pred_auto) lemma arestr_false [simp]: "false \\<^sub>e a = false" by (pred_auto) lemma arestr_not [alpha]: "(\ P)\\<^sub>ea = (\ (P\\<^sub>ea))" by (pred_auto) lemma arestr_and [alpha]: "(P \ Q)\\<^sub>ex = (P\\<^sub>ex \ Q\\<^sub>ex)" by (pred_auto) lemma arestr_or [alpha]: "(P \ Q)\\<^sub>ex = (P\\<^sub>ex \ Q\\<^sub>ex)" by (pred_auto) lemma arestr_imp [alpha]: "(P \ Q)\\<^sub>ex = (P\\<^sub>ex \ Q\\<^sub>ex)" by (pred_auto) subsection \ Predicate Alphabet Restriction \ text \ In order to restrict the variables of a predicate, we also need to existentially quantify away the other variables. We can't do this at the level of expressions, as quantifiers are not applicable here. Consequently, we need a specialised version of alphabet restriction for predicates. It both restricts the variables using quantification and then removes them from the alphabet type using expression restriction. \ definition upred_ares :: "'\ upred \ ('\ \ '\) \ '\ upred" where [upred_defs]: "upred_ares P a = (P \\<^sub>v a) \\<^sub>e a" syntax "_upred_ares" :: "logic \ salpha \ logic" (infixl "\\<^sub>p" 90) translations "_upred_ares P a" == "CONST upred_ares P a" lemma upred_aext_ares [alpha]: "vwb_lens a \ P \\<^sub>p a \\<^sub>p a = P" by (pred_auto) lemma upred_ares_aext [alpha]: "a \ P \ (P \\<^sub>p a) \\<^sub>p a = P" by (pred_auto) lemma upred_arestr_lit [simp]: "\v\ \\<^sub>p a = \v\" by (pred_auto) lemma upred_arestr_true [simp]: "true \\<^sub>p a = true" by (pred_auto) lemma upred_arestr_false [simp]: "false \\<^sub>p a = false" by (pred_auto) lemma upred_arestr_or [alpha]: "(P \ Q)\\<^sub>px = (P\\<^sub>px \ Q\\<^sub>px)" by (pred_auto) subsection \ Alphabet Lens Laws \ lemma alpha_in_var [alpha]: "x ;\<^sub>L fst\<^sub>L = in_var x" by (simp add: in_var_def) lemma alpha_out_var [alpha]: "x ;\<^sub>L snd\<^sub>L = out_var x" by (simp add: out_var_def) lemma in_var_prod_lens [alpha]: "wb_lens Y \ in_var x ;\<^sub>L (X \\<^sub>L Y) = in_var (x ;\<^sub>L X)" - by (simp add: in_var_def prod_as_plus lens_comp_assoc fst_lens_plus) + by (simp add: in_var_def prod_as_plus lens_comp_assoc[THEN sym] fst_lens_plus) lemma out_var_prod_lens [alpha]: "wb_lens X \ out_var x ;\<^sub>L (X \\<^sub>L Y) = out_var (x ;\<^sub>L Y)" - apply (simp add: out_var_def prod_as_plus lens_comp_assoc) + apply (simp add: out_var_def prod_as_plus lens_comp_assoc[THEN sym]) apply (subst snd_lens_plus) using comp_wb_lens fst_vwb_lens vwb_lens_wb apply blast apply (simp add: alpha_in_var alpha_out_var) apply (simp) done subsection \ Substitution Alphabet Extension \ text \ This allows us to extend the alphabet of a substitution, in a similar way to expressions. \ definition subst_ext :: "'\ usubst \ ('\ \ '\) \ '\ usubst" (infix "\\<^sub>s" 65) where [upred_defs]: "\ \\<^sub>s x = (\ s. put\<^bsub>x\<^esub> s (\ (get\<^bsub>x\<^esub> s)))" lemma id_subst_ext [usubst]: "wb_lens x \ id \\<^sub>s x = id" by pred_auto lemma upd_subst_ext [alpha]: "vwb_lens x \ \(y \\<^sub>s v) \\<^sub>s x = (\ \\<^sub>s x)(&x:y \\<^sub>s v \\<^sub>p x)" by pred_auto lemma apply_subst_ext [alpha]: "vwb_lens x \ (\ \ e) \\<^sub>p x = (\ \\<^sub>s x) \ (e \\<^sub>p x)" by (pred_auto) lemma aext_upred_eq [alpha]: "((e =\<^sub>u f) \\<^sub>p a) = ((e \\<^sub>p a) =\<^sub>u (f \\<^sub>p a))" by (pred_auto) lemma subst_aext_comp [usubst]: "vwb_lens a \ (\ \\<^sub>s a) \ (\ \\<^sub>s a) = (\ \ \) \\<^sub>s a" by pred_auto subsection \ Substitution Alphabet Restriction \ text \ This allows us to reduce the alphabet of a substitution, in a similar way to expressions. \ definition subst_res :: "'\ usubst \ ('\ \ '\) \ '\ usubst" (infix "\\<^sub>s" 65) where [upred_defs]: "\ \\<^sub>s x = (\ s. get\<^bsub>x\<^esub> (\ (create\<^bsub>x\<^esub> s)))" lemma id_subst_res [usubst]: "mwb_lens x \ id \\<^sub>s x = id" by pred_auto lemma upd_subst_res [alpha]: "mwb_lens x \ \(&x:y \\<^sub>s v) \\<^sub>s x = (\ \\<^sub>s x)(&y \\<^sub>s v \\<^sub>e x)" by (pred_auto) lemma subst_ext_res [usubst]: "mwb_lens x \ (\ \\<^sub>s x) \\<^sub>s x = \" by (pred_auto) lemma unrest_subst_alpha_ext [unrest]: "x \ y \ x \ (P \\<^sub>s y)" by (pred_simp robust, metis lens_indep_def) end \ No newline at end of file diff --git a/thys/UTP/utp/utp_expr.thy b/thys/UTP/utp/utp_expr.thy --- a/thys/UTP/utp/utp_expr.thy +++ b/thys/UTP/utp/utp_expr.thy @@ -1,338 +1,336 @@ section \ UTP Expressions \ theory utp_expr imports utp_var begin subsection \ Expression type \ purge_notation BNF_Def.convol ("\(_,/ _)\") text \ Before building the predicate model, we will build a model of expressions that generalise alphabetised predicates. Expressions are represented semantically as mapping from the alphabet @{typ "'\"} to the expression's type @{typ "'a"}. This general model will allow us to unify all constructions under one type. The majority definitions in the file are given using the \emph{lifting} package~\cite{Huffman13}, which allows us to reuse much of the existing library of HOL functions. \ typedef ('t, '\) uexpr = "UNIV :: ('\ \ 't) set" .. setup_lifting type_definition_uexpr notation Rep_uexpr ("\_\\<^sub>e") notation Abs_uexpr ("mk\<^sub>e") lemma uexpr_eq_iff: "e = f \ (\ b. \e\\<^sub>e b = \f\\<^sub>e b)" using Rep_uexpr_inject[of e f, THEN sym] by (auto) text \ The term @{term "\e\\<^sub>e b"} effectively refers to the semantic interpretation of the expression under the state-space valuation (or variables binding) @{term b}. It can be used, in concert with the lifting package, to interpret UTP constructs to their HOL equivalents. We create some theorem sets to store such transfer theorems. \ named_theorems uexpr_defs and ueval and lit_simps and lit_norm subsection \ Core expression constructs \ text \ A variable expression corresponds to the lens $get$ function associated with a variable. Specifically, given a lens the expression always returns that portion of the state-space referred to by the lens. \ lift_definition var :: "('t \ '\) \ ('t, '\) uexpr" is lens_get . text \ A literal is simply a constant function expression, always returning the same value for any binding. \ lift_definition lit :: "'t \ ('t, '\) uexpr" ("\_\") is "\ v b. v" . text \ We define lifting for unary, binary, ternary, and quaternary expression constructs, that simply take a HOL function with correct number of arguments and apply it function to all possible results of the expressions. \ lift_definition uop :: "('a \ 'b) \ ('a, '\) uexpr \ ('b, '\) uexpr" is "\ f e b. f (e b)" . lift_definition bop :: "('a \ 'b \ 'c) \ ('a, '\) uexpr \ ('b, '\) uexpr \ ('c, '\) uexpr" is "\ f u v b. f (u b) (v b)" . lift_definition trop :: "('a \ 'b \ 'c \ 'd) \ ('a, '\) uexpr \ ('b, '\) uexpr \ ('c, '\) uexpr \ ('d, '\) uexpr" is "\ f u v w b. f (u b) (v b) (w b)" . lift_definition qtop :: "('a \ 'b \ 'c \ 'd \ 'e) \ ('a, '\) uexpr \ ('b, '\) uexpr \ ('c, '\) uexpr \ ('d, '\) uexpr \ ('e, '\) uexpr" is "\ f u v w x b. f (u b) (v b) (w b) (x b)" . text \ We also define a UTP expression version of function ($\lambda$) abstraction, that takes a function producing an expression and produces an expression producing a function. \ lift_definition ulambda :: "('a \ ('b, '\) uexpr) \ ('a \ 'b, '\) uexpr" is "\ f A x. f x A" . text \ We set up syntax for the conditional. This is effectively an infix version of if-then-else where the condition is in the middle. \ definition uIf :: "bool \ 'a \ 'a \ 'a" where [uexpr_defs]: "uIf = If" abbreviation cond :: "('a,'\) uexpr \ (bool, '\) uexpr \ ('a,'\) uexpr \ ('a,'\) uexpr" ("(3_ \ _ \/ _)" [52,0,53] 52) where "P \ b \ Q \ trop uIf b P Q" text \ UTP expression is equality is simply HOL equality lifted using the @{term bop} binary expression constructor. \ definition eq_upred :: "('a, '\) uexpr \ ('a, '\) uexpr \ (bool, '\) uexpr" (infixl "=\<^sub>u" 50) where [uexpr_defs]: "eq_upred x y = bop HOL.eq x y" text \ A literal is the expression @{term "\v\"}, where @{term v} is any HOL term. Actually, the literal construct is very versatile and also allows us to refer to HOL variables within UTP expressions, and has a variety of other uses. It can therefore also be considered as a kind of quotation mechanism. We also set up syntax for UTP variable expressions. \ syntax "_uuvar" :: "svar \ logic" ("_") translations "_uuvar x" == "CONST var x" text \ Since we already have a parser for variables, we can directly reuse it and simply apply the @{term var} expression construct to lift the resulting variable to an expression. \ subsection \ Type class instantiations \ text \ Isabelle/HOL of course provides a large hierarchy of type classes that provide constructs such as numerals and the arithmetic operators. Fortunately we can directly make use of these for UTP expressions, and thus we now perform a long list of appropriate instantiations. We first lift the core arithemtic constants and operators using a mixture of literals, unary, and binary expression constructors. \ instantiation uexpr :: (zero, type) zero begin definition zero_uexpr_def [uexpr_defs]: "0 = lit 0" instance .. end instantiation uexpr :: (one, type) one begin definition one_uexpr_def [uexpr_defs]: "1 = lit 1" instance .. end instantiation uexpr :: (plus, type) plus begin definition plus_uexpr_def [uexpr_defs]: "u + v = bop (+) u v" instance .. end instance uexpr :: (semigroup_add, type) semigroup_add by (intro_classes) (simp add: plus_uexpr_def zero_uexpr_def, transfer, simp add: add.assoc)+ text \ The following instantiation sets up numerals. This will allow us to have Isabelle number representations (i.e. 3,7,42,198 etc.) to UTP expressions directly. \ instance uexpr :: (numeral, type) numeral by (intro_classes, simp add: plus_uexpr_def, transfer, simp add: add.assoc) text \ We can also define the order relation on expressions. Now, unlike the previous group and ring constructs, the order relations @{term "(\)"} and @{term "(\)"} return a @{type bool} type. This order is not therefore the lifted order which allows us to compare the valuation of two expressions, but rather the order on expressions themselves. Notably, this instantiation will later allow us to talk about predicate refinements and complete lattices. \ instantiation uexpr :: (ord, type) ord begin lift_definition less_eq_uexpr :: "('a, 'b) uexpr \ ('a, 'b) uexpr \ bool" is "\ P Q. (\ A. P A \ Q A)" . definition less_uexpr :: "('a, 'b) uexpr \ ('a, 'b) uexpr \ bool" where [uexpr_defs]: "less_uexpr P Q = (P \ Q \ \ Q \ P)" instance .. end text \ UTP expressions whose return type is a partial ordered type, are also partially ordered as the following instantiation demonstrates. \ instance uexpr :: (order, type) order proof fix x y z :: "('a, 'b) uexpr" show "(x < y) = (x \ y \ \ y \ x)" by (simp add: less_uexpr_def) show "x \ x" by (transfer, auto) show "x \ y \ y \ z \ x \ z" by (transfer, blast intro:order.trans) show "x \ y \ y \ x \ x = y" by (transfer, rule ext, simp add: eq_iff) qed subsection \ Syntax translations \ text \ The follows a large number of translations that lift HOL functions to UTP expressions using the various expression constructors defined above. Much of the time we try to keep the HOL syntax but add a "u" subscript. \ - -abbreviation (input) "ulens_override x f g \ lens_override f g x" text \ This operator allows us to get the characteristic set of a type. Essentially this is @{term "UNIV"}, but it retains the type syntactically for pretty printing. \ definition set_of :: "'a itself \ 'a set" where [uexpr_defs]: "set_of t = UNIV" text \ We add new non-terminals for UTP tuples and maplets. \ nonterminal utuple_args and umaplet and umaplets syntax \ \ Core expression constructs \ "_ucoerce" :: "logic \ type \ logic" (infix ":\<^sub>u" 50) "_ulambda" :: "pttrn \ logic \ logic" ("\ _ \ _" [0, 10] 10) "_ulens_ovrd" :: "logic \ logic \ salpha \ logic" ("_ \ _ on _" [85, 0, 86] 86) "_ulens_get" :: "logic \ svar \ logic" ("_:_" [900,901] 901) "_umem" :: "('a, '\) uexpr \ ('a set, '\) uexpr \ (bool, '\) uexpr" (infix "\\<^sub>u" 50) translations "\ x \ p" == "CONST ulambda (\ x. p)" "x :\<^sub>u 'a" == "x :: ('a, _) uexpr" - "_ulens_ovrd f g a" => "CONST bop (CONST ulens_override a) f g" + "_ulens_ovrd f g a" => "CONST bop (CONST lens_override a) f g" "_ulens_ovrd f g a" <= "CONST bop (\x y. CONST lens_override x1 y1 a) f g" "_ulens_get x y" == "CONST uop (CONST lens_get y) x" "x \\<^sub>u A" == "CONST bop (\) x A" syntax \ \ Tuples \ "_utuple" :: "('a, '\) uexpr \ utuple_args \ ('a * 'b, '\) uexpr" ("(1'(_,/ _')\<^sub>u)") "_utuple_arg" :: "('a, '\) uexpr \ utuple_args" ("_") "_utuple_args" :: "('a, '\) uexpr => utuple_args \ utuple_args" ("_,/ _") "_uunit" :: "('a, '\) uexpr" ("'(')\<^sub>u") "_ufst" :: "('a \ 'b, '\) uexpr \ ('a, '\) uexpr" ("\\<^sub>1'(_')") "_usnd" :: "('a \ 'b, '\) uexpr \ ('b, '\) uexpr" ("\\<^sub>2'(_')") translations "()\<^sub>u" == "\()\" "(x, y)\<^sub>u" == "CONST bop (CONST Pair) x y" "_utuple x (_utuple_args y z)" == "_utuple x (_utuple_arg (_utuple y z))" "\\<^sub>1(x)" == "CONST uop CONST fst x" "\\<^sub>2(x)" == "CONST uop CONST snd x" syntax \ \ Orders \ "_uless" :: "logic \ logic \ logic" (infix "<\<^sub>u" 50) "_uleq" :: "logic \ logic \ logic" (infix "\\<^sub>u" 50) "_ugreat" :: "logic \ logic \ logic" (infix ">\<^sub>u" 50) "_ugeq" :: "logic \ logic \ logic" (infix "\\<^sub>u" 50) translations "x <\<^sub>u y" == "CONST bop (<) x y" "x \\<^sub>u y" == "CONST bop (\) x y" "x >\<^sub>u y" => "y <\<^sub>u x" "x \\<^sub>u y" => "y \\<^sub>u x" subsection \ Evaluation laws for expressions \ text \ The following laws show how to evaluate the core expressions constructs in terms of which the above definitions are defined. Thus, using these theorems together, we can convert any UTP expression into a pure HOL expression. All these theorems are marked as \emph{ueval} theorems which can be used for evaluation. \ lemma lit_ueval [ueval]: "\\x\\\<^sub>eb = x" by (transfer, simp) lemma var_ueval [ueval]: "\var x\\<^sub>eb = get\<^bsub>x\<^esub> b" by (transfer, simp) lemma uop_ueval [ueval]: "\uop f x\\<^sub>eb = f (\x\\<^sub>eb)" by (transfer, simp) lemma bop_ueval [ueval]: "\bop f x y\\<^sub>eb = f (\x\\<^sub>eb) (\y\\<^sub>eb)" by (transfer, simp) lemma trop_ueval [ueval]: "\trop f x y z\\<^sub>eb = f (\x\\<^sub>eb) (\y\\<^sub>eb) (\z\\<^sub>eb)" by (transfer, simp) lemma qtop_ueval [ueval]: "\qtop f x y z w\\<^sub>eb = f (\x\\<^sub>eb) (\y\\<^sub>eb) (\z\\<^sub>eb) (\w\\<^sub>eb)" by (transfer, simp) subsection \ Misc laws \ text \ We also prove a few useful algebraic and expansion laws for expressions. \ lemma uop_const [simp]: "uop id u = u" by (transfer, simp) lemma bop_const_1 [simp]: "bop (\x y. y) u v = v" by (transfer, simp) lemma bop_const_2 [simp]: "bop (\x y. x) u v = u" by (transfer, simp) lemma uexpr_fst [simp]: "\\<^sub>1((e, f)\<^sub>u) = e" by (transfer, simp) lemma uexpr_snd [simp]: "\\<^sub>2((e, f)\<^sub>u) = f" by (transfer, simp) subsection \ Literalise tactics \ text \ The following tactic converts literal HOL expressions to UTP expressions and vice-versa via a collection of simplification rules. The two tactics are called "literalise", which converts UTP to expressions to HOL expressions -- i.e. it pushes them into literals -- and unliteralise that reverses this. We collect the equations in a theorem attribute called "lit\_simps". \ lemma lit_fun_simps [lit_simps]: "\i x y z u\ = qtop i \x\ \y\ \z\ \u\" "\h x y z\ = trop h \x\ \y\ \z\" "\g x y\ = bop g \x\ \y\" "\f x\ = uop f \x\" by (transfer, simp)+ text \ The following two theorems also set up interpretation of numerals, meaning a UTP numeral can always be converted to a HOL numeral. \ lemma numeral_uexpr_rep_eq [ueval]: "\numeral x\\<^sub>e b = numeral x" apply (induct x) apply (simp add: lit.rep_eq one_uexpr_def) apply (simp add: bop.rep_eq numeral_Bit0 plus_uexpr_def) apply (simp add: bop.rep_eq lit.rep_eq numeral_code(3) one_uexpr_def plus_uexpr_def) done lemma numeral_uexpr_simp: "numeral x = \numeral x\" by (simp add: uexpr_eq_iff numeral_uexpr_rep_eq lit.rep_eq) lemma lit_zero [lit_simps]: "\0\ = 0" by (simp add:uexpr_defs) lemma lit_one [lit_simps]: "\1\ = 1" by (simp add: uexpr_defs) lemma lit_plus [lit_simps]: "\x + y\ = \x\ + \y\" by (simp add: uexpr_defs, transfer, simp) lemma lit_numeral [lit_simps]: "\numeral n\ = numeral n" by (simp add: numeral_uexpr_simp) text \ In general unliteralising converts function applications to corresponding expression liftings. Since some operators, like + and *, have specific operators we also have to use @{thm uexpr_defs} in reverse to correctly interpret these. Moreover, numerals must be handled separately by first simplifying them and then converting them into UTP expression numerals; hence the following two simplification rules. \ lemma lit_numeral_1: "uop numeral x = Abs_uexpr (\b. numeral (\x\\<^sub>e b))" by (simp add: uop_def) lemma lit_numeral_2: "Abs_uexpr (\ b. numeral v) = numeral v" by (metis lit.abs_eq lit_numeral) method literalise = (unfold lit_simps[THEN sym]) method unliteralise = (unfold lit_simps uexpr_defs[THEN sym]; (unfold lit_numeral_1 ; (unfold uexpr_defs ueval); (unfold lit_numeral_2))?)+ text \ The following tactic can be used to evaluate literal expressions. It first literalises UTP expressions, that is pushes as many operators into literals as possible. Then it tries to simplify, and final unliteralises at the end. \ method uexpr_simp uses simps = ((literalise)?, simp add: lit_norm simps, (unliteralise)?) (* Example *) lemma "(1::(int, '\) uexpr) + \2\ = 4 \ \3\ = 4" apply (literalise) apply (uexpr_simp) oops end \ No newline at end of file