diff --git a/src/Doc/ROOT b/src/Doc/ROOT --- a/src/Doc/ROOT +++ b/src/Doc/ROOT @@ -1,515 +1,515 @@ chapter Doc session Classes (doc) in "Classes" = HOL + options [document_variants = "classes", quick_and_dirty] theories [document = false] Setup theories Classes document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "manual.bib" document_files "build" "root.tex" "style.sty" session Codegen (doc) in "Codegen" = HOL + options [document_variants = "codegen", print_mode = "no_brackets,iff"] sessions "HOL-Library" theories [document = false] Setup theories Introduction Foundations Refinement Inductive_Predicate Evaluation Computations Adaptation Further document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "manual.bib" document_files "build" "root.tex" "style.sty" session Corec (doc) in "Corec" = Datatypes + options [document_variants = "corec"] theories Corec document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "manual.bib" document_files "build" "root.tex" "style.sty" session Datatypes (doc) in "Datatypes" = HOL + options [document_variants = "datatypes"] sessions "HOL-Library" theories [document = false] Setup theories Datatypes document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "manual.bib" document_files "build" "root.tex" "style.sty" session Eisbach (doc) in "Eisbach" = HOL + options [document_variants = "eisbach", quick_and_dirty, print_mode = "no_brackets,iff", show_question_marks = false] sessions "HOL-Eisbach" theories [document = false] Base theories Preface Manual document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "ttbox.sty" "underscore.sty" "manual.bib" document_files "build" "root.tex" "style.sty" session Functions (doc) in "Functions" = HOL + options [document_variants = "functions", skip_proofs = false, quick_and_dirty] theories Functions document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "manual.bib" document_files "build" "conclusion.tex" "intro.tex" "root.tex" "style.sty" session How_to_Prove_it (no_doc) in "How_to_Prove_it" = HOL + options [document_variants = "how_to_prove_it", show_question_marks = false] theories How_to_Prove_it document_files "root.tex" "root.bib" "prelude.tex" session Intro (doc) in "Intro" = Pure + options [document_variants = "intro"] document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "ttbox.sty" "manual.bib" document_files "advanced.tex" "build" "foundations.tex" "getting.tex" "root.tex" session Implementation (doc) in "Implementation" = HOL + options [document_variants = "implementation", quick_and_dirty] theories Eq Integration Isar Local_Theory "ML" Prelim Proof Syntax Tactic theories [parallel_proofs = 0] Logic document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "ttbox.sty" "underscore.sty" "manual.bib" document_files "build" "root.tex" "style.sty" session Isar_Ref (doc) in "Isar_Ref" = HOL + options [document_variants = "isar-ref", quick_and_dirty, thy_output_source] sessions "HOL-Library" theories Preface Synopsis Framework First_Order_Logic Outer_Syntax Document_Preparation Spec Proof Proof_Script Inner_Syntax Generic HOL_Specific Quick_Reference Symbols document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "ttbox.sty" "underscore.sty" "manual.bib" document_files "build" "isar-vm.pdf" "isar-vm.svg" "root.tex" "showsymbols" "style.sty" session JEdit (doc) in "JEdit" = HOL + options [document_variants = "jedit", thy_output_source] theories JEdit document_files (in "..") "extra.sty" "iman.sty" "isar.sty" "manual.bib" "pdfsetup.sty" "prepare_document" "ttbox.sty" "underscore.sty" document_files (in "../Isar_Ref/document") "style.sty" document_files "auto-tools.png" "bibtex-mode.png" "build" "cite-completion.png" "isabelle-jedit-hdpi.png" "isabelle-jedit.png" "markdown-document.png" "ml-debugger.png" "output-and-state.png" "output-including-state.png" "output.png" "popup1.png" "popup2.png" "query.png" "root.tex" "scope1.png" "scope2.png" "sidekick-document.png" "sidekick.png" "sledgehammer.png" "theories.png" session Sugar (doc) in "Sugar" = HOL + options [document_variants = "sugar"] sessions "HOL-Library" theories Sugar document_files (in "..") "prepare_document" "pdfsetup.sty" document_files "build" "root.bib" "root.tex" session Locales (doc) in "Locales" = HOL + options [document_variants = "locales", thy_output_margin = 65, skip_proofs = false] theories Examples1 Examples2 Examples3 document_files (in "..") "prepare_document" "pdfsetup.sty" document_files "build" "root.bib" "root.tex" session Logics (doc) in "Logics" = Pure + options [document_variants = "logics"] document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "ttbox.sty" "manual.bib" document_files "CTT.tex" "HOL.tex" "LK.tex" "Sequents.tex" "build" "preface.tex" "root.tex" "syntax.tex" session Logics_ZF (doc) in "Logics_ZF" = ZF + options [document_variants = "logics-ZF", print_mode = "brackets", thy_output_source] sessions FOL theories IFOL_examples FOL_examples ZF_examples If ZF_Isar document_files (in "..") "prepare_document" "pdfsetup.sty" "isar.sty" "ttbox.sty" "manual.bib" document_files (in "../Logics/document") "syntax.tex" document_files "FOL.tex" "ZF.tex" "build" "logics.sty" "root.tex" session Main (doc) in "Main" = HOL + options [document_variants = "main"] theories Main_Doc document_files (in "..") "prepare_document" "pdfsetup.sty" document_files "build" "root.tex" session Nitpick (doc) in "Nitpick" = Pure + options [document_variants = "nitpick"] document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "manual.bib" document_files "build" "root.tex" session Prog_Prove (doc) in "Prog_Prove" = HOL + options [document_variants = "prog-prove", show_question_marks = false] theories Basics Bool_nat_list MyList Types_and_funs Logic Isar document_files (in ".") "MyList.thy" document_files (in "..") "prepare_document" "pdfsetup.sty" document_files "bang.pdf" "build" "intro-isabelle.tex" "prelude.tex" "root.bib" "root.tex" "svmono.cls" session Sledgehammer (doc) in "Sledgehammer" = Pure + options [document_variants = "sledgehammer"] document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "manual.bib" document_files "build" "root.tex" session System (doc) in "System" = Pure + options [document_variants = "system", thy_output_source] sessions "HOL-Library" theories Environment Sessions Presentation Server Scala Phabricator Misc document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "ttbox.sty" "underscore.sty" "manual.bib" document_files (in "../Isar_Ref/document") "style.sty" document_files "build" "root.tex" session Tutorial (doc) in "Tutorial" = HOL + options [document_variants = "tutorial", print_mode = "brackets", skip_proofs = false] directories "Advanced" "CTL" "CodeGen" "Datatype" "Documents" "Fun" "Ifexpr" "Inductive" "Misc" "Protocol" "Rules" "Sets" "ToyList" "Trie" "Types" + theories [document = false] + Base theories [threads = 1] "ToyList/ToyList_Test" theories [thy_output_indent = 5] "ToyList/ToyList" "Ifexpr/Ifexpr" "CodeGen/CodeGen" "Trie/Trie" "Datatype/ABexpr" "Datatype/unfoldnested" "Datatype/Nested" "Datatype/Fundata" "Fun/fun0" "Advanced/simp2" "CTL/PDL" "CTL/CTL" "CTL/CTLind" "Inductive/Even" "Inductive/Mutual" "Inductive/Star" "Inductive/AB" "Inductive/Advanced" "Misc/Tree" "Misc/Tree2" "Misc/Plus" "Misc/case_exprs" "Misc/fakenat" "Misc/natsum" "Misc/pairs2" "Misc/Option2" "Misc/types" "Misc/prime_def" "Misc/simp" "Misc/Itrev" "Misc/AdvancedInd" "Misc/appendix" theories "Protocol/NS_Public" "Documents/Documents" - theories [document = false] - "Types/Setup" theories [thy_output_margin = 64, thy_output_indent = 0] "Types/Numbers" "Types/Pairs" "Types/Records" "Types/Typedefs" "Types/Overloading" "Types/Axioms" "Rules/Basic" "Rules/Blast" "Rules/Force" theories [thy_output_margin = 64, thy_output_indent = 5] "Rules/TPrimes" "Rules/Forward" "Rules/Tacticals" "Rules/find2" "Sets/Examples" "Sets/Functions" "Sets/Relations" "Sets/Recur" document_files (in "ToyList") "ToyList1.txt" "ToyList2.txt" document_files (in "..") "pdfsetup.sty" "ttbox.sty" "manual.bib" document_files "advanced0.tex" "appendix0.tex" "basics.tex" "build" "cl2emono-modified.sty" "ctl0.tex" "documents0.tex" "fp.tex" "inductive0.tex" "isa-index" "Isa-logics.pdf" "numerics.tex" "pghead.pdf" "preface.tex" "protocol.tex" "root.tex" "rules.tex" "sets.tex" "tutorial.sty" "typedef.pdf" "types0.tex" session Typeclass_Hierarchy (doc) in "Typeclass_Hierarchy" = HOL + options [document_variants = "typeclass_hierarchy"] sessions "HOL-Library" theories [document = false] Setup theories Typeclass_Hierarchy document_files (in "..") "prepare_document" "pdfsetup.sty" "iman.sty" "extra.sty" "isar.sty" "manual.bib" document_files "build" "root.tex" "style.sty" diff --git a/src/Doc/Tutorial/Inductive/Advanced.thy b/src/Doc/Tutorial/Inductive/Advanced.thy --- a/src/Doc/Tutorial/Inductive/Advanced.thy +++ b/src/Doc/Tutorial/Inductive/Advanced.thy @@ -1,406 +1,404 @@ -(*<*)theory Advanced imports Even begin -ML_file \../../antiquote_setup.ML\ -(*>*) +(*<*)theory Advanced imports Even begin(*>*) text \ The premises of introduction rules may contain universal quantifiers and monotone functions. A universal quantifier lets the rule refer to any number of instances of the inductively defined set. A monotone function lets the rule refer to existing constructions (such as ``list of'') over the inductively defined set. The examples below show how to use the additional expressiveness and how to reason from the resulting definitions. \ subsection\Universal Quantifiers in Introduction Rules \label{sec:gterm-datatype}\ text \ \index{ground terms example|(}% \index{quantifiers!and inductive definitions|(}% As a running example, this section develops the theory of \textbf{ground terms}: terms constructed from constant and function symbols but not variables. To simplify matters further, we regard a constant as a function applied to the null argument list. Let us declare a datatype \gterm\ for the type of ground terms. It is a type constructor whose argument is a type of function symbols. \ datatype 'f gterm = Apply 'f "'f gterm list" text \ To try it out, we declare a datatype of some integer operations: integer constants, the unary minus operator and the addition operator. \ datatype integer_op = Number int | UnaryMinus | Plus text \ Now the type \<^typ>\integer_op gterm\ denotes the ground terms built over those symbols. The type constructor \gterm\ can be generalized to a function over sets. It returns the set of ground terms that can be formed over a set \F\ of function symbols. For example, we could consider the set of ground terms formed from the finite set \{Number 2, UnaryMinus, Plus}\. This concept is inductive. If we have a list \args\ of ground terms over~\F\ and a function symbol \f\ in \F\, then we can apply \f\ to \args\ to obtain another ground term. The only difficulty is that the argument list may be of any length. Hitherto, each rule in an inductive definition referred to the inductively defined set a fixed number of times, typically once or twice. A universal quantifier in the premise of the introduction rule expresses that every element of \args\ belongs to our inductively defined set: is a ground term over~\F\. The function \<^term>\set\ denotes the set of elements in a given list. \ inductive_set gterms :: "'f set \ 'f gterm set" for F :: "'f set" where step[intro!]: "\\t \ set args. t \ gterms F; f \ F\ \ (Apply f args) \ gterms F" text \ To demonstrate a proof from this definition, let us show that the function \<^term>\gterms\ is \textbf{monotone}. We shall need this concept shortly. \ lemma gterms_mono: "F\G \ gterms F \ gterms G" apply clarify apply (erule gterms.induct) apply blast done (*<*) lemma gterms_mono: "F\G \ gterms F \ gterms G" apply clarify apply (erule gterms.induct) (*>*) txt\ Intuitively, this theorem says that enlarging the set of function symbols enlarges the set of ground terms. The proof is a trivial rule induction. First we use the \clarify\ method to assume the existence of an element of \<^term>\gterms F\. (We could have used \intro subsetI\.) We then apply rule induction. Here is the resulting subgoal: @{subgoals[display,indent=0]} The assumptions state that \f\ belongs to~\F\, which is included in~\G\, and that every element of the list \args\ is a ground term over~\G\. The \blast\ method finds this chain of reasoning easily. \ (*<*)oops(*>*) text \ \begin{warn} Why do we call this function \gterms\ instead of \gterm\? A constant may have the same name as a type. However, name clashes could arise in the theorems that Isabelle generates. Our choice of names keeps \gterms.induct\ separate from \gterm.induct\. \end{warn} Call a term \textbf{well-formed} if each symbol occurring in it is applied to the correct number of arguments. (This number is called the symbol's \textbf{arity}.) We can express well-formedness by generalizing the inductive definition of \isa{gterms}. Suppose we are given a function called \arity\, specifying the arities of all symbols. In the inductive step, we have a list \args\ of such terms and a function symbol~\f\. If the length of the list matches the function's arity then applying \f\ to \args\ yields a well-formed term. \ inductive_set well_formed_gterm :: "('f \ nat) \ 'f gterm set" for arity :: "'f \ nat" where step[intro!]: "\\t \ set args. t \ well_formed_gterm arity; length args = arity f\ \ (Apply f args) \ well_formed_gterm arity" text \ The inductive definition neatly captures the reasoning above. The universal quantification over the \set\ of arguments expresses that all of them are well-formed.% \index{quantifiers!and inductive definitions|)} \ subsection\Alternative Definition Using a Monotone Function\ text \ \index{monotone functions!and inductive definitions|(}% An inductive definition may refer to the inductively defined set through an arbitrary monotone function. To demonstrate this powerful feature, let us change the inductive definition above, replacing the quantifier by a use of the function \<^term>\lists\. This function, from the Isabelle theory of lists, is analogous to the function \<^term>\gterms\ declared above: if \A\ is a set then \<^term>\lists A\ is the set of lists whose elements belong to \<^term>\A\. In the inductive definition of well-formed terms, examine the one introduction rule. The first premise states that \args\ belongs to the \lists\ of well-formed terms. This formulation is more direct, if more obscure, than using a universal quantifier. \ inductive_set well_formed_gterm' :: "('f \ nat) \ 'f gterm set" for arity :: "'f \ nat" where step[intro!]: "\args \ lists (well_formed_gterm' arity); length args = arity f\ \ (Apply f args) \ well_formed_gterm' arity" monos lists_mono text \ We cite the theorem \lists_mono\ to justify using the function \<^term>\lists\.% \footnote{This particular theorem is installed by default already, but we include the \isakeyword{monos} declaration in order to illustrate its syntax.} @{named_thms [display,indent=0] lists_mono [no_vars] (lists_mono)} Why must the function be monotone? An inductive definition describes an iterative construction: each element of the set is constructed by a finite number of introduction rule applications. For example, the elements of \isa{even} are constructed by finitely many applications of the rules @{thm [display,indent=0] even.intros [no_vars]} All references to a set in its inductive definition must be positive. Applications of an introduction rule cannot invalidate previous applications, allowing the construction process to converge. The following pair of rules do not constitute an inductive definition: \begin{trivlist} \item \<^term>\0 \ even\ \item \<^term>\n \ even \ (Suc n) \ even\ \end{trivlist} Showing that 4 is even using these rules requires showing that 3 is not even. It is far from trivial to show that this set of rules characterizes the even numbers. Even with its use of the function \isa{lists}, the premise of our introduction rule is positive: @{thm [display,indent=0] (prem 1) step [no_vars]} To apply the rule we construct a list \<^term>\args\ of previously constructed well-formed terms. We obtain a new term, \<^term>\Apply f args\. Because \<^term>\lists\ is monotone, applications of the rule remain valid as new terms are constructed. Further lists of well-formed terms become available and none are taken away.% \index{monotone functions!and inductive definitions|)} \ subsection\A Proof of Equivalence\ text \ We naturally hope that these two inductive definitions of ``well-formed'' coincide. The equality can be proved by separate inclusions in each direction. Each is a trivial rule induction. \ lemma "well_formed_gterm arity \ well_formed_gterm' arity" apply clarify apply (erule well_formed_gterm.induct) apply auto done (*<*) lemma "well_formed_gterm arity \ well_formed_gterm' arity" apply clarify apply (erule well_formed_gterm.induct) (*>*) txt \ The \clarify\ method gives us an element of \<^term>\well_formed_gterm arity\ on which to perform induction. The resulting subgoal can be proved automatically: @{subgoals[display,indent=0]} This proof resembles the one given in {\S}\ref{sec:gterm-datatype} above, especially in the form of the induction hypothesis. Next, we consider the opposite inclusion: \ (*<*)oops(*>*) lemma "well_formed_gterm' arity \ well_formed_gterm arity" apply clarify apply (erule well_formed_gterm'.induct) apply auto done (*<*) lemma "well_formed_gterm' arity \ well_formed_gterm arity" apply clarify apply (erule well_formed_gterm'.induct) (*>*) txt \ The proof script is virtually identical, but the subgoal after applying induction may be surprising: @{subgoals[display,indent=0,margin=65]} The induction hypothesis contains an application of \<^term>\lists\. Using a monotone function in the inductive definition always has this effect. The subgoal may look uninviting, but fortunately \<^term>\lists\ distributes over intersection: @{named_thms [display,indent=0] lists_Int_eq [no_vars] (lists_Int_eq)} Thanks to this default simplification rule, the induction hypothesis is quickly replaced by its two parts: \begin{trivlist} \item \<^term>\args \ lists (well_formed_gterm' arity)\ \item \<^term>\args \ lists (well_formed_gterm arity)\ \end{trivlist} Invoking the rule \well_formed_gterm.step\ completes the proof. The call to \auto\ does all this work. This example is typical of how monotone functions \index{monotone functions} can be used. In particular, many of them distribute over intersection. Monotonicity implies one direction of this set equality; we have this theorem: @{named_thms [display,indent=0] mono_Int [no_vars] (mono_Int)} \ (*<*)oops(*>*) subsection\Another Example of Rule Inversion\ text \ \index{rule inversion|(}% Does \<^term>\gterms\ distribute over intersection? We have proved that this function is monotone, so \mono_Int\ gives one of the inclusions. The opposite inclusion asserts that if \<^term>\t\ is a ground term over both of the sets \<^term>\F\ and~\<^term>\G\ then it is also a ground term over their intersection, \<^term>\F \ G\. \ lemma gterms_IntI: "t \ gterms F \ t \ gterms G \ t \ gterms (F\G)" (*<*)oops(*>*) text \ Attempting this proof, we get the assumption \<^term>\Apply f args \ gterms G\, which cannot be broken down. It looks like a job for rule inversion:\cmmdx{inductive\protect\_cases} \ inductive_cases gterm_Apply_elim [elim!]: "Apply f args \ gterms F" text \ Here is the result. @{named_thms [display,indent=0,margin=50] gterm_Apply_elim [no_vars] (gterm_Apply_elim)} This rule replaces an assumption about \<^term>\Apply f args\ by assumptions about \<^term>\f\ and~\<^term>\args\. No cases are discarded (there was only one to begin with) but the rule applies specifically to the pattern \<^term>\Apply f args\. It can be applied repeatedly as an elimination rule without looping, so we have given the \elim!\ attribute. Now we can prove the other half of that distributive law. \ lemma gterms_IntI [rule_format, intro!]: "t \ gterms F \ t \ gterms G \ t \ gterms (F\G)" apply (erule gterms.induct) apply blast done (*<*) lemma "t \ gterms F \ t \ gterms G \ t \ gterms (F\G)" apply (erule gterms.induct) (*>*) txt \ The proof begins with rule induction over the definition of \<^term>\gterms\, which leaves a single subgoal: @{subgoals[display,indent=0,margin=65]} To prove this, we assume \<^term>\Apply f args \ gterms G\. Rule inversion, in the form of \gterm_Apply_elim\, infers that every element of \<^term>\args\ belongs to \<^term>\gterms G\; hence (by the induction hypothesis) it belongs to \<^term>\gterms (F \ G)\. Rule inversion also yields \<^term>\f \ G\ and hence \<^term>\f \ F \ G\. All of this reasoning is done by \blast\. \smallskip Our distributive law is a trivial consequence of previously-proved results: \ (*<*)oops(*>*) lemma gterms_Int_eq [simp]: "gterms (F \ G) = gterms F \ gterms G" by (blast intro!: mono_Int monoI gterms_mono) text_raw \ \index{rule inversion|)}% \index{ground terms example|)} \begin{isamarkuptext} \begin{exercise} A function mapping function symbols to their types is called a \textbf{signature}. Given a type ranging over type symbols, we can represent a function's type by a list of argument types paired with the result type. Complete this inductive definition: \begin{isabelle} \ inductive_set well_typed_gterm :: "('f \ 't list * 't) \ ('f gterm * 't)set" for sig :: "'f \ 't list * 't" (*<*) where step[intro!]: "\\pair \ set args. pair \ well_typed_gterm sig; sig f = (map snd args, rtype)\ \ (Apply f (map fst args), rtype) \ well_typed_gterm sig" (*>*) text_raw \ \end{isabelle} \end{exercise} \end{isamarkuptext} \ (*<*) text\the following declaration isn't actually used\ primrec integer_arity :: "integer_op \ nat" where "integer_arity (Number n) = 0" | "integer_arity UnaryMinus = 1" | "integer_arity Plus = 2" text\the rest isn't used: too complicated. OK for an exercise though.\ inductive_set integer_signature :: "(integer_op * (unit list * unit)) set" where Number: "(Number n, ([], ())) \ integer_signature" | UnaryMinus: "(UnaryMinus, ([()], ())) \ integer_signature" | Plus: "(Plus, ([(),()], ())) \ integer_signature" inductive_set well_typed_gterm' :: "('f \ 't list * 't) \ ('f gterm * 't)set" for sig :: "'f \ 't list * 't" where step[intro!]: "\args \ lists(well_typed_gterm' sig); sig f = (map snd args, rtype)\ \ (Apply f (map fst args), rtype) \ well_typed_gterm' sig" monos lists_mono lemma "well_typed_gterm sig \ well_typed_gterm' sig" apply clarify apply (erule well_typed_gterm.induct) apply auto done lemma "well_typed_gterm' sig \ well_typed_gterm sig" apply clarify apply (erule well_typed_gterm'.induct) apply auto done end (*>*) diff --git a/src/Doc/Tutorial/Inductive/Even.thy b/src/Doc/Tutorial/Inductive/Even.thy --- a/src/Doc/Tutorial/Inductive/Even.thy +++ b/src/Doc/Tutorial/Inductive/Even.thy @@ -1,290 +1,288 @@ -(*<*)theory Even imports Main begin -ML_file \../../antiquote_setup.ML\ -(*>*) +(*<*)theory Even imports "../Setup" begin(*>*) section\The Set of Even Numbers\ text \ \index{even numbers!defining inductively|(}% The set of even numbers can be inductively defined as the least set containing 0 and closed under the operation $+2$. Obviously, \emph{even} can also be expressed using the divides relation (\dvd\). We shall prove below that the two formulations coincide. On the way we shall examine the primary means of reasoning about inductively defined sets: rule induction. \ subsection\Making an Inductive Definition\ text \ Using \commdx{inductive\protect\_set}, we declare the constant \even\ to be a set of natural numbers with the desired properties. \ inductive_set even :: "nat set" where zero[intro!]: "0 \ even" | step[intro!]: "n \ even \ (Suc (Suc n)) \ even" text \ An inductive definition consists of introduction rules. The first one above states that 0 is even; the second states that if $n$ is even, then so is~$n+2$. Given this declaration, Isabelle generates a fixed point definition for \<^term>\even\ and proves theorems about it, thus following the definitional approach (see {\S}\ref{sec:definitional}). These theorems include the introduction rules specified in the declaration, an elimination rule for case analysis and an induction rule. We can refer to these theorems by automatically-generated names. Here are two examples: @{named_thms[display,indent=0] even.zero[no_vars] (even.zero) even.step[no_vars] (even.step)} The introduction rules can be given attributes. Here both rules are specified as \isa{intro!},% \index{intro"!@\isa {intro"!} (attribute)} directing the classical reasoner to apply them aggressively. Obviously, regarding 0 as even is safe. The \step\ rule is also safe because $n+2$ is even if and only if $n$ is even. We prove this equivalence later. \ subsection\Using Introduction Rules\ text \ Our first lemma states that numbers of the form $2\times k$ are even. Introduction rules are used to show that specific values belong to the inductive set. Such proofs typically involve induction, perhaps over some other inductive set. \ lemma two_times_even[intro!]: "2*k \ even" apply (induct_tac k) apply auto done (*<*) lemma "2*k \ even" apply (induct_tac k) (*>*) txt \ \noindent The first step is induction on the natural number \k\, which leaves two subgoals: @{subgoals[display,indent=0,margin=65]} Here \auto\ simplifies both subgoals so that they match the introduction rules, which are then applied automatically. Our ultimate goal is to prove the equivalence between the traditional definition of \even\ (using the divides relation) and our inductive definition. One direction of this equivalence is immediate by the lemma just proved, whose \intro!\ attribute ensures it is applied automatically. \ (*<*)oops(*>*) lemma dvd_imp_even: "2 dvd n \ n \ even" by (auto simp add: dvd_def) subsection\Rule Induction \label{sec:rule-induction}\ text \ \index{rule induction|(}% From the definition of the set \<^term>\even\, Isabelle has generated an induction rule: @{named_thms [display,indent=0,margin=40] even.induct [no_vars] (even.induct)} A property \<^term>\P\ holds for every even number provided it holds for~\0\ and is closed under the operation \isa{Suc(Suc \(\cdot\))}. Then \<^term>\P\ is closed under the introduction rules for \<^term>\even\, which is the least set closed under those rules. This type of inductive argument is called \textbf{rule induction}. Apart from the double application of \<^term>\Suc\, the induction rule above resembles the familiar mathematical induction, which indeed is an instance of rule induction; the natural numbers can be defined inductively to be the least set containing \0\ and closed under~\<^term>\Suc\. Induction is the usual way of proving a property of the elements of an inductively defined set. Let us prove that all members of the set \<^term>\even\ are multiples of two. \ lemma even_imp_dvd: "n \ even \ 2 dvd n" txt \ We begin by applying induction. Note that \even.induct\ has the form of an elimination rule, so we use the method \erule\. We get two subgoals: \ apply (erule even.induct) txt \ @{subgoals[display,indent=0]} We unfold the definition of \dvd\ in both subgoals, proving the first one and simplifying the second: \ apply (simp_all add: dvd_def) txt \ @{subgoals[display,indent=0]} The next command eliminates the existential quantifier from the assumption and replaces \n\ by \2 * k\. \ apply clarify txt \ @{subgoals[display,indent=0]} To conclude, we tell Isabelle that the desired value is \<^term>\Suc k\. With this hint, the subgoal falls to \simp\. \ apply (rule_tac x = "Suc k" in exI, simp) (*<*)done(*>*) text \ Combining the previous two results yields our objective, the equivalence relating \<^term>\even\ and \dvd\. % %we don't want [iff]: discuss? \ theorem even_iff_dvd: "(n \ even) = (2 dvd n)" by (blast intro: dvd_imp_even even_imp_dvd) subsection\Generalization and Rule Induction \label{sec:gen-rule-induction}\ text \ \index{generalizing for induction}% Before applying induction, we typically must generalize the induction formula. With rule induction, the required generalization can be hard to find and sometimes requires a complete reformulation of the problem. In this example, our first attempt uses the obvious statement of the result. It fails: \ lemma "Suc (Suc n) \ even \ n \ even" apply (erule even.induct) oops (*<*) lemma "Suc (Suc n) \ even \ n \ even" apply (erule even.induct) (*>*) txt \ Rule induction finds no occurrences of \<^term>\Suc(Suc n)\ in the conclusion, which it therefore leaves unchanged. (Look at \even.induct\ to see why this happens.) We have these subgoals: @{subgoals[display,indent=0]} The first one is hopeless. Rule induction on a non-variable term discards information, and usually fails. How to deal with such situations in general is described in {\S}\ref{sec:ind-var-in-prems} below. In the current case the solution is easy because we have the necessary inverse, subtraction: \ (*<*)oops(*>*) lemma even_imp_even_minus_2: "n \ even \ n - 2 \ even" apply (erule even.induct) apply auto done (*<*) lemma "n \ even \ n - 2 \ even" apply (erule even.induct) (*>*) txt \ This lemma is trivially inductive. Here are the subgoals: @{subgoals[display,indent=0]} The first is trivial because \0 - 2\ simplifies to \0\, which is even. The second is trivial too: \<^term>\Suc (Suc n) - 2\ simplifies to \<^term>\n\, matching the assumption.% \index{rule induction|)} %the sequel isn't really about induction \medskip Using our lemma, we can easily prove the result we originally wanted: \ (*<*)oops(*>*) lemma Suc_Suc_even_imp_even: "Suc (Suc n) \ even \ n \ even" by (drule even_imp_even_minus_2, simp) text \ We have just proved the converse of the introduction rule \even.step\. This suggests proving the following equivalence. We give it the \attrdx{iff} attribute because of its obvious value for simplification. \ lemma [iff]: "((Suc (Suc n)) \ even) = (n \ even)" by (blast dest: Suc_Suc_even_imp_even) subsection\Rule Inversion \label{sec:rule-inversion}\ text \ \index{rule inversion|(}% Case analysis on an inductive definition is called \textbf{rule inversion}. It is frequently used in proofs about operational semantics. It can be highly effective when it is applied automatically. Let us look at how rule inversion is done in Isabelle/HOL\@. Recall that \<^term>\even\ is the minimal set closed under these two rules: @{thm [display,indent=0] even.intros [no_vars]} Minimality means that \<^term>\even\ contains only the elements that these rules force it to contain. If we are told that \<^term>\a\ belongs to \<^term>\even\ then there are only two possibilities. Either \<^term>\a\ is \0\ or else \<^term>\a\ has the form \<^term>\Suc(Suc n)\, for some suitable \<^term>\n\ that belongs to \<^term>\even\. That is the gist of the \<^term>\cases\ rule, which Isabelle proves for us when it accepts an inductive definition: @{named_thms [display,indent=0,margin=40] even.cases [no_vars] (even.cases)} This general rule is less useful than instances of it for specific patterns. For example, if \<^term>\a\ has the form \<^term>\Suc(Suc n)\ then the first case becomes irrelevant, while the second case tells us that \<^term>\n\ belongs to \<^term>\even\. Isabelle will generate this instance for us: \ inductive_cases Suc_Suc_cases [elim!]: "Suc(Suc n) \ even" text \ The \commdx{inductive\protect\_cases} command generates an instance of the \cases\ rule for the supplied pattern and gives it the supplied name: @{named_thms [display,indent=0] Suc_Suc_cases [no_vars] (Suc_Suc_cases)} Applying this as an elimination rule yields one case where \even.cases\ would yield two. Rule inversion works well when the conclusions of the introduction rules involve datatype constructors like \<^term>\Suc\ and \#\ (list ``cons''); freeness reasoning discards all but one or two cases. In the \isacommand{inductive\_cases} command we supplied an attribute, \elim!\, \index{elim"!@\isa {elim"!} (attribute)}% indicating that this elimination rule can be applied aggressively. The original \<^term>\cases\ rule would loop if used in that manner because the pattern~\<^term>\a\ matches everything. The rule \Suc_Suc_cases\ is equivalent to the following implication: @{term [display,indent=0] "Suc (Suc n) \ even \ n \ even"} Just above we devoted some effort to reaching precisely this result. Yet we could have obtained it by a one-line declaration, dispensing with the lemma \even_imp_even_minus_2\. This example also justifies the terminology \textbf{rule inversion}: the new rule inverts the introduction rule \even.step\. In general, a rule can be inverted when the set of elements it introduces is disjoint from those of the other introduction rules. For one-off applications of rule inversion, use the \methdx{ind_cases} method. Here is an example: \ (*<*)lemma "Suc(Suc n) \ even \ P"(*>*) apply (ind_cases "Suc(Suc n) \ even") (*<*)oops(*>*) text \ The specified instance of the \cases\ rule is generated, then applied as an elimination rule. To summarize, every inductive definition produces a \cases\ rule. The \commdx{inductive\protect\_cases} command stores an instance of the \cases\ rule for a given pattern. Within a proof, the \ind_cases\ method applies an instance of the \cases\ rule. The even numbers example has shown how inductive definitions can be used. Later examples will show that they are actually worth using.% \index{rule inversion|)}% \index{even numbers!defining inductively|)} \ (*<*)end(*>*) diff --git a/src/Doc/Tutorial/Protocol/Message.thy b/src/Doc/Tutorial/Protocol/Message.thy --- a/src/Doc/Tutorial/Protocol/Message.thy +++ b/src/Doc/Tutorial/Protocol/Message.thy @@ -1,917 +1,916 @@ (* Author: Lawrence C Paulson, Cambridge University Computer Laboratory Copyright 1996 University of Cambridge Datatypes of agents and messages; Inductive relations "parts", "analz" and "synth" *)(*<*) section\Theory of Agents and Messages for Security Protocols\ -theory Message imports Main begin -ML_file \../../antiquote_setup.ML\ +theory Message imports "../Setup" begin (*Needed occasionally with spy_analz_tac, e.g. in analz_insert_Key_newK*) lemma [simp] : "A \ (B \ A) = B \ A" by blast (*>*) section\Agents and Messages\ text \ All protocol specifications refer to a syntactic theory of messages. Datatype \agent\ introduces the constant \Server\ (a trusted central machine, needed for some protocols), an infinite population of friendly agents, and the~\Spy\: \ datatype agent = Server | Friend nat | Spy text \ Keys are just natural numbers. Function \invKey\ maps a public key to the matching private key, and vice versa: \ type_synonym key = nat consts invKey :: "key \ key" (*<*) consts all_symmetric :: bool \ \true if all keys are symmetric\ specification (invKey) invKey [simp]: "invKey (invKey K) = K" invKey_symmetric: "all_symmetric \ invKey = id" by (rule exI [of _ id], auto) text\The inverse of a symmetric key is itself; that of a public key is the private key and vice versa\ definition symKeys :: "key set" where "symKeys == {K. invKey K = K}" (*>*) text \ Datatype \msg\ introduces the message forms, which include agent names, nonces, keys, compound messages, and encryptions. \ datatype msg = Agent agent | Nonce nat | Key key | MPair msg msg | Crypt key msg text \ \noindent The notation $\comp{X\sb 1,\ldots X\sb{n-1},X\sb n}$ abbreviates $\isa{MPair}\,X\sb 1\,\ldots\allowbreak(\isa{MPair}\,X\sb{n-1}\,X\sb n)$. Since datatype constructors are injective, we have the theorem @{thm [display,indent=0] msg.inject(5) [THEN iffD1, of K X K' X']} A ciphertext can be decrypted using only one key and can yield only one plaintext. In the real world, decryption with the wrong key succeeds but yields garbage. Our model of encryption is realistic if encryption adds some redundancy to the plaintext, such as a checksum, so that garbage can be detected. \ (*<*) text\Concrete syntax: messages appear as \\A,B,NA\\, etc...\ syntax "_MTuple" :: "['a, args] \ 'a * 'b" ("(2\_,/ _\)") translations "\x, y, z\" == "\x, \y, z\\" "\x, y\" == "CONST MPair x y" definition keysFor :: "msg set \ key set" where \ \Keys useful to decrypt elements of a message set\ "keysFor H == invKey ` {K. \X. Crypt K X \ H}" subsubsection\Inductive Definition of All Parts" of a Message\ inductive_set parts :: "msg set \ msg set" for H :: "msg set" where Inj [intro]: "X \ H \ X \ parts H" | Fst: "\X,Y\ \ parts H \ X \ parts H" | Snd: "\X,Y\ \ parts H \ Y \ parts H" | Body: "Crypt K X \ parts H \ X \ parts H" text\Monotonicity\ lemma parts_mono: "G \ H \ parts(G) \ parts(H)" apply auto apply (erule parts.induct) apply (blast dest: parts.Fst parts.Snd parts.Body)+ done text\Equations hold because constructors are injective.\ lemma Friend_image_eq [simp]: "(Friend x \ Friend`A) = (x\A)" by auto lemma Key_image_eq [simp]: "(Key x \ Key`A) = (x\A)" by auto lemma Nonce_Key_image_eq [simp]: "(Nonce x \ Key`A)" by auto subsubsection\Inverse of keys\ lemma invKey_eq [simp]: "(invKey K = invKey K') = (K=K')" apply safe apply (drule_tac f = invKey in arg_cong, simp) done subsection\keysFor operator\ lemma keysFor_empty [simp]: "keysFor {} = {}" by (unfold keysFor_def, blast) lemma keysFor_Un [simp]: "keysFor (H \ H') = keysFor H \ keysFor H'" by (unfold keysFor_def, blast) lemma keysFor_UN [simp]: "keysFor (\i\A. H i) = (\i\A. keysFor (H i))" by (unfold keysFor_def, blast) text\Monotonicity\ lemma keysFor_mono: "G \ H \ keysFor(G) \ keysFor(H)" by (unfold keysFor_def, blast) lemma keysFor_insert_Agent [simp]: "keysFor (insert (Agent A) H) = keysFor H" by (unfold keysFor_def, auto) lemma keysFor_insert_Nonce [simp]: "keysFor (insert (Nonce N) H) = keysFor H" by (unfold keysFor_def, auto) lemma keysFor_insert_Key [simp]: "keysFor (insert (Key K) H) = keysFor H" by (unfold keysFor_def, auto) lemma keysFor_insert_MPair [simp]: "keysFor (insert \X,Y\ H) = keysFor H" by (unfold keysFor_def, auto) lemma keysFor_insert_Crypt [simp]: "keysFor (insert (Crypt K X) H) = insert (invKey K) (keysFor H)" by (unfold keysFor_def, auto) lemma keysFor_image_Key [simp]: "keysFor (Key`E) = {}" by (unfold keysFor_def, auto) lemma Crypt_imp_invKey_keysFor: "Crypt K X \ H \ invKey K \ keysFor H" by (unfold keysFor_def, blast) subsection\Inductive relation "parts"\ lemma MPair_parts: "[| \X,Y\ \ parts H; [| X \ parts H; Y \ parts H |] ==> P |] ==> P" by (blast dest: parts.Fst parts.Snd) declare MPair_parts [elim!] parts.Body [dest!] text\NB These two rules are UNSAFE in the formal sense, as they discard the compound message. They work well on THIS FILE. \MPair_parts\ is left as SAFE because it speeds up proofs. The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.\ lemma parts_increasing: "H \ parts(H)" by blast lemmas parts_insertI = subset_insertI [THEN parts_mono, THEN subsetD] lemma parts_empty [simp]: "parts{} = {}" apply safe apply (erule parts.induct, blast+) done lemma parts_emptyE [elim!]: "X\ parts{} \ P" by simp text\WARNING: loops if H = {Y}, therefore must not be repeated!\ lemma parts_singleton: "X\ parts H \ \Y\H. X\ parts {Y}" by (erule parts.induct, fast+) subsubsection\Unions\ lemma parts_Un_subset1: "parts(G) \ parts(H) \ parts(G \ H)" by (intro Un_least parts_mono Un_upper1 Un_upper2) lemma parts_Un_subset2: "parts(G \ H) \ parts(G) \ parts(H)" apply (rule subsetI) apply (erule parts.induct, blast+) done lemma parts_Un [simp]: "parts(G \ H) = parts(G) \ parts(H)" by (intro equalityI parts_Un_subset1 parts_Un_subset2) lemma parts_insert: "parts (insert X H) = parts {X} \ parts H" apply (subst insert_is_Un [of _ H]) apply (simp only: parts_Un) done text\TWO inserts to avoid looping. This rewrite is better than nothing. Not suitable for Addsimps: its behaviour can be strange.\ lemma parts_insert2: "parts (insert X (insert Y H)) = parts {X} \ parts {Y} \ parts H" apply (simp add: Un_assoc) apply (simp add: parts_insert [symmetric]) done lemma parts_UN_subset1: "(\x\A. parts(H x)) \ parts(\x\A. H x)" by (intro UN_least parts_mono UN_upper) lemma parts_UN_subset2: "parts(\x\A. H x) \ (\x\A. parts(H x))" apply (rule subsetI) apply (erule parts.induct, blast+) done lemma parts_UN [simp]: "parts(\x\A. H x) = (\x\A. parts(H x))" by (intro equalityI parts_UN_subset1 parts_UN_subset2) text\Added to simplify arguments to parts, analz and synth. NOTE: the UN versions are no longer used!\ text\This allows \blast\ to simplify occurrences of \<^term>\parts(G\H)\ in the assumption.\ lemmas in_parts_UnE = parts_Un [THEN equalityD1, THEN subsetD, THEN UnE] declare in_parts_UnE [elim!] lemma parts_insert_subset: "insert X (parts H) \ parts(insert X H)" by (blast intro: parts_mono [THEN [2] rev_subsetD]) subsubsection\Idempotence and transitivity\ lemma parts_partsD [dest!]: "X \ parts (parts H) \ X\ parts H" by (erule parts.induct, blast+) lemma parts_idem [simp]: "parts (parts H) = parts H" by blast lemma parts_subset_iff [simp]: "(parts G \ parts H) = (G \ parts H)" apply (rule iffI) apply (iprover intro: subset_trans parts_increasing) apply (frule parts_mono, simp) done lemma parts_trans: "[| X\ parts G; G \ parts H |] ==> X\ parts H" by (drule parts_mono, blast) text\Cut\ lemma parts_cut: "[| Y\ parts (insert X G); X\ parts H |] ==> Y\ parts (G \ H)" by (blast intro: parts_trans) lemma parts_cut_eq [simp]: "X\ parts H ==> parts (insert X H) = parts H" by (force dest!: parts_cut intro: parts_insertI) subsubsection\Rewrite rules for pulling out atomic messages\ lemmas parts_insert_eq_I = equalityI [OF subsetI parts_insert_subset] lemma parts_insert_Agent [simp]: "parts (insert (Agent agt) H) = insert (Agent agt) (parts H)" apply (rule parts_insert_eq_I) apply (erule parts.induct, auto) done lemma parts_insert_Nonce [simp]: "parts (insert (Nonce N) H) = insert (Nonce N) (parts H)" apply (rule parts_insert_eq_I) apply (erule parts.induct, auto) done lemma parts_insert_Key [simp]: "parts (insert (Key K) H) = insert (Key K) (parts H)" apply (rule parts_insert_eq_I) apply (erule parts.induct, auto) done lemma parts_insert_Crypt [simp]: "parts (insert (Crypt K X) H) = insert (Crypt K X) (parts (insert X H))" apply (rule equalityI) apply (rule subsetI) apply (erule parts.induct, auto) apply (blast intro: parts.Body) done lemma parts_insert_MPair [simp]: "parts (insert \X,Y\ H) = insert \X,Y\ (parts (insert X (insert Y H)))" apply (rule equalityI) apply (rule subsetI) apply (erule parts.induct, auto) apply (blast intro: parts.Fst parts.Snd)+ done lemma parts_image_Key [simp]: "parts (Key`N) = Key`N" apply auto apply (erule parts.induct, auto) done text\In any message, there is an upper bound N on its greatest nonce.\ lemma msg_Nonce_supply: "\N. \n. N\n \ Nonce n \ parts {msg}" apply (induct_tac "msg") apply (simp_all (no_asm_simp) add: exI parts_insert2) txt\MPair case: blast works out the necessary sum itself!\ prefer 2 apply auto apply (blast elim!: add_leE) txt\Nonce case\ apply (rename_tac nat) apply (rule_tac x = "N + Suc nat" in exI, auto) done (*>*) section\Modelling the Adversary\ text \ The spy is part of the system and must be built into the model. He is a malicious user who does not have to follow the protocol. He watches the network and uses any keys he knows to decrypt messages. Thus he accumulates additional keys and nonces. These he can use to compose new messages, which he may send to anybody. Two functions enable us to formalize this behaviour: \analz\ and \synth\. Each function maps a sets of messages to another set of messages. The set \analz H\ formalizes what the adversary can learn from the set of messages~$H$. The closure properties of this set are defined inductively. \ inductive_set analz :: "msg set \ msg set" for H :: "msg set" where Inj [intro,simp] : "X \ H \ X \ analz H" | Fst: "\X,Y\ \ analz H \ X \ analz H" | Snd: "\X,Y\ \ analz H \ Y \ analz H" | Decrypt [dest]: "\Crypt K X \ analz H; Key(invKey K) \ analz H\ \ X \ analz H" (*<*) text\Monotonicity; Lemma 1 of Lowe's paper\ lemma analz_mono: "G\H \ analz(G) \ analz(H)" apply auto apply (erule analz.induct) apply (auto dest: analz.Fst analz.Snd) done text\Making it safe speeds up proofs\ lemma MPair_analz [elim!]: "[| \X,Y\ \ analz H; [| X \ analz H; Y \ analz H |] ==> P |] ==> P" by (blast dest: analz.Fst analz.Snd) lemma analz_increasing: "H \ analz(H)" by blast lemma analz_subset_parts: "analz H \ parts H" apply (rule subsetI) apply (erule analz.induct, blast+) done lemmas analz_into_parts = analz_subset_parts [THEN subsetD] lemmas not_parts_not_analz = analz_subset_parts [THEN contra_subsetD] lemma parts_analz [simp]: "parts (analz H) = parts H" apply (rule equalityI) apply (rule analz_subset_parts [THEN parts_mono, THEN subset_trans], simp) apply (blast intro: analz_increasing [THEN parts_mono, THEN subsetD]) done lemma analz_parts [simp]: "analz (parts H) = parts H" apply auto apply (erule analz.induct, auto) done lemmas analz_insertI = subset_insertI [THEN analz_mono, THEN [2] rev_subsetD] subsubsection\General equational properties\ lemma analz_empty [simp]: "analz{} = {}" apply safe apply (erule analz.induct, blast+) done text\Converse fails: we can analz more from the union than from the separate parts, as a key in one might decrypt a message in the other\ lemma analz_Un: "analz(G) \ analz(H) \ analz(G \ H)" by (intro Un_least analz_mono Un_upper1 Un_upper2) lemma analz_insert: "insert X (analz H) \ analz(insert X H)" by (blast intro: analz_mono [THEN [2] rev_subsetD]) subsubsection\Rewrite rules for pulling out atomic messages\ lemmas analz_insert_eq_I = equalityI [OF subsetI analz_insert] lemma analz_insert_Agent [simp]: "analz (insert (Agent agt) H) = insert (Agent agt) (analz H)" apply (rule analz_insert_eq_I) apply (erule analz.induct, auto) done lemma analz_insert_Nonce [simp]: "analz (insert (Nonce N) H) = insert (Nonce N) (analz H)" apply (rule analz_insert_eq_I) apply (erule analz.induct, auto) done text\Can only pull out Keys if they are not needed to decrypt the rest\ lemma analz_insert_Key [simp]: "K \ keysFor (analz H) \ analz (insert (Key K) H) = insert (Key K) (analz H)" apply (unfold keysFor_def) apply (rule analz_insert_eq_I) apply (erule analz.induct, auto) done lemma analz_insert_MPair [simp]: "analz (insert \X,Y\ H) = insert \X,Y\ (analz (insert X (insert Y H)))" apply (rule equalityI) apply (rule subsetI) apply (erule analz.induct, auto) apply (erule analz.induct) apply (blast intro: analz.Fst analz.Snd)+ done text\Can pull out enCrypted message if the Key is not known\ lemma analz_insert_Crypt: "Key (invKey K) \ analz H \ analz (insert (Crypt K X) H) = insert (Crypt K X) (analz H)" apply (rule analz_insert_eq_I) apply (erule analz.induct, auto) done lemma lemma1: "Key (invKey K) \ analz H \ analz (insert (Crypt K X) H) \ insert (Crypt K X) (analz (insert X H))" apply (rule subsetI) apply (erule_tac x = x in analz.induct, auto) done lemma lemma2: "Key (invKey K) \ analz H \ insert (Crypt K X) (analz (insert X H)) \ analz (insert (Crypt K X) H)" apply auto apply (erule_tac x = x in analz.induct, auto) apply (blast intro: analz_insertI analz.Decrypt) done lemma analz_insert_Decrypt: "Key (invKey K) \ analz H \ analz (insert (Crypt K X) H) = insert (Crypt K X) (analz (insert X H))" by (intro equalityI lemma1 lemma2) text\Case analysis: either the message is secure, or it is not! Effective, but can cause subgoals to blow up! Use with \if_split\; apparently \split_tac\ does not cope with patterns such as \<^term>\analz (insert (Crypt K X) H)\\ lemma analz_Crypt_if [simp]: "analz (insert (Crypt K X) H) = (if (Key (invKey K) \ analz H) then insert (Crypt K X) (analz (insert X H)) else insert (Crypt K X) (analz H))" by (simp add: analz_insert_Crypt analz_insert_Decrypt) text\This rule supposes "for the sake of argument" that we have the key.\ lemma analz_insert_Crypt_subset: "analz (insert (Crypt K X) H) \ insert (Crypt K X) (analz (insert X H))" apply (rule subsetI) apply (erule analz.induct, auto) done lemma analz_image_Key [simp]: "analz (Key`N) = Key`N" apply auto apply (erule analz.induct, auto) done subsubsection\Idempotence and transitivity\ lemma analz_analzD [dest!]: "X\ analz (analz H) \ X\ analz H" by (erule analz.induct, blast+) lemma analz_idem [simp]: "analz (analz H) = analz H" by blast lemma analz_subset_iff [simp]: "(analz G \ analz H) = (G \ analz H)" apply (rule iffI) apply (iprover intro: subset_trans analz_increasing) apply (frule analz_mono, simp) done lemma analz_trans: "[| X\ analz G; G \ analz H |] ==> X\ analz H" by (drule analz_mono, blast) text\Cut; Lemma 2 of Lowe\ lemma analz_cut: "[| Y\ analz (insert X H); X\ analz H |] ==> Y\ analz H" by (erule analz_trans, blast) (*Cut can be proved easily by induction on "Y \ analz (insert X H) \ X \ analz H \ Y \ analz H" *) text\This rewrite rule helps in the simplification of messages that involve the forwarding of unknown components (X). Without it, removing occurrences of X can be very complicated.\ lemma analz_insert_eq: "X\ analz H \ analz (insert X H) = analz H" by (blast intro: analz_cut analz_insertI) text\A congruence rule for "analz"\ lemma analz_subset_cong: "[| analz G \ analz G'; analz H \ analz H' |] ==> analz (G \ H) \ analz (G' \ H')" apply simp apply (iprover intro: conjI subset_trans analz_mono Un_upper1 Un_upper2) done lemma analz_cong: "[| analz G = analz G'; analz H = analz H' |] ==> analz (G \ H) = analz (G' \ H')" by (intro equalityI analz_subset_cong, simp_all) lemma analz_insert_cong: "analz H = analz H' \ analz(insert X H) = analz(insert X H')" by (force simp only: insert_def intro!: analz_cong) text\If there are no pairs or encryptions then analz does nothing\ lemma analz_trivial: "[| \X Y. \X,Y\ \ H; \X K. Crypt K X \ H |] ==> analz H = H" apply safe apply (erule analz.induct, blast+) done text\These two are obsolete (with a single Spy) but cost little to prove...\ lemma analz_UN_analz_lemma: "X\ analz (\i\A. analz (H i)) \ X\ analz (\i\A. H i)" apply (erule analz.induct) apply (blast intro: analz_mono [THEN [2] rev_subsetD])+ done lemma analz_UN_analz [simp]: "analz (\i\A. analz (H i)) = analz (\i\A. H i)" by (blast intro: analz_UN_analz_lemma analz_mono [THEN [2] rev_subsetD]) (*>*) text \ Note the \Decrypt\ rule: the spy can decrypt a message encrypted with key~$K$ if he has the matching key,~$K^{-1}$. Properties proved by rule induction include the following: @{named_thms [display,indent=0] analz_mono [no_vars] (analz_mono) analz_idem [no_vars] (analz_idem)} The set of fake messages that an intruder could invent starting from~\H\ is \synth(analz H)\, where \synth H\ formalizes what the adversary can build from the set of messages~$H$. \ inductive_set synth :: "msg set \ msg set" for H :: "msg set" where Inj [intro]: "X \ H \ X \ synth H" | Agent [intro]: "Agent agt \ synth H" | MPair [intro]: "\X \ synth H; Y \ synth H\ \ \X,Y\ \ synth H" | Crypt [intro]: "\X \ synth H; Key K \ H\ \ Crypt K X \ synth H" (*<*) lemma synth_mono: "G\H \ synth(G) \ synth(H)" by (auto, erule synth.induct, auto) inductive_cases Key_synth [elim!]: "Key K \ synth H" inductive_cases MPair_synth [elim!]: "\X,Y\ \ synth H" inductive_cases Crypt_synth [elim!]: "Crypt K X \ synth H" lemma analz_synth_Un [simp]: "analz (synth G \ H) = analz (G \ H) \ synth G" apply (rule equalityI) apply (rule subsetI) apply (erule analz.induct) prefer 5 apply (blast intro: analz_mono [THEN [2] rev_subsetD]) apply (blast intro: analz.Fst analz.Snd analz.Decrypt)+ done lemma analz_synth [simp]: "analz (synth H) = analz H \ synth H" apply (cut_tac H = "{}" in analz_synth_Un) apply (simp (no_asm_use)) done (*>*) text \ The set includes all agent names. Nonces and keys are assumed to be unguessable, so none are included beyond those already in~$H$. Two elements of \<^term>\synth H\ can be combined, and an element can be encrypted using a key present in~$H$. Like \analz\, this set operator is monotone and idempotent. It also satisfies an interesting equation involving \analz\: @{named_thms [display,indent=0] analz_synth [no_vars] (analz_synth)} Rule inversion plays a major role in reasoning about \synth\, through declarations such as this one: \ inductive_cases Nonce_synth [elim!]: "Nonce n \ synth H" text \ \noindent The resulting elimination rule replaces every assumption of the form \<^term>\Nonce n \ synth H\ by \<^term>\Nonce n \ H\, expressing that a nonce cannot be guessed. A third operator, \parts\, is useful for stating correctness properties. The set \<^term>\parts H\ consists of the components of elements of~$H$. This set includes~\H\ and is closed under the projections from a compound message to its immediate parts. Its definition resembles that of \analz\ except in the rule corresponding to the constructor \Crypt\: @{thm [display,indent=5] parts.Body [no_vars]} The body of an encrypted message is always regarded as part of it. We can use \parts\ to express general well-formedness properties of a protocol, for example, that an uncompromised agent's private key will never be included as a component of any message. \ (*<*) lemma synth_increasing: "H \ synth(H)" by blast subsubsection\Unions\ text\Converse fails: we can synth more from the union than from the separate parts, building a compound message using elements of each.\ lemma synth_Un: "synth(G) \ synth(H) \ synth(G \ H)" by (intro Un_least synth_mono Un_upper1 Un_upper2) lemma synth_insert: "insert X (synth H) \ synth(insert X H)" by (blast intro: synth_mono [THEN [2] rev_subsetD]) subsubsection\Idempotence and transitivity\ lemma synth_synthD [dest!]: "X\ synth (synth H) \ X\ synth H" by (erule synth.induct, blast+) lemma synth_idem: "synth (synth H) = synth H" by blast lemma synth_subset_iff [simp]: "(synth G \ synth H) = (G \ synth H)" apply (rule iffI) apply (iprover intro: subset_trans synth_increasing) apply (frule synth_mono, simp add: synth_idem) done lemma synth_trans: "[| X\ synth G; G \ synth H |] ==> X\ synth H" by (drule synth_mono, blast) text\Cut; Lemma 2 of Lowe\ lemma synth_cut: "[| Y\ synth (insert X H); X\ synth H |] ==> Y\ synth H" by (erule synth_trans, blast) lemma Agent_synth [simp]: "Agent A \ synth H" by blast lemma Nonce_synth_eq [simp]: "(Nonce N \ synth H) = (Nonce N \ H)" by blast lemma Key_synth_eq [simp]: "(Key K \ synth H) = (Key K \ H)" by blast lemma Crypt_synth_eq [simp]: "Key K \ H \ (Crypt K X \ synth H) = (Crypt K X \ H)" by blast lemma keysFor_synth [simp]: "keysFor (synth H) = keysFor H \ invKey`{K. Key K \ H}" by (unfold keysFor_def, blast) subsubsection\Combinations of parts, analz and synth\ lemma parts_synth [simp]: "parts (synth H) = parts H \ synth H" apply (rule equalityI) apply (rule subsetI) apply (erule parts.induct) apply (blast intro: synth_increasing [THEN parts_mono, THEN subsetD] parts.Fst parts.Snd parts.Body)+ done lemma analz_analz_Un [simp]: "analz (analz G \ H) = analz (G \ H)" apply (intro equalityI analz_subset_cong)+ apply simp_all done subsubsection\For reasoning about the Fake rule in traces\ lemma parts_insert_subset_Un: "X \ G \ parts(insert X H) \ parts G \ parts H" by (rule subset_trans [OF parts_mono parts_Un_subset2], blast) text\More specifically for Fake. Very occasionally we could do with a version of the form \<^term>\parts{X} \ synth (analz H) \ parts H\\ lemma Fake_parts_insert: "X \ synth (analz H) \ parts (insert X H) \ synth (analz H) \ parts H" apply (drule parts_insert_subset_Un) apply (simp (no_asm_use)) apply blast done lemma Fake_parts_insert_in_Un: "[|Z \ parts (insert X H); X \ synth (analz H)|] ==> Z \ synth (analz H) \ parts H" by (blast dest: Fake_parts_insert [THEN subsetD, dest]) text\\<^term>\H\ is sometimes \<^term>\Key ` KK \ spies evs\, so can't put \<^term>\G=H\.\ lemma Fake_analz_insert: "X \ synth (analz G) \ analz (insert X H) \ synth (analz G) \ analz (G \ H)" apply (rule subsetI) apply (subgoal_tac "x \ analz (synth (analz G) \ H) ") prefer 2 apply (blast intro: analz_mono [THEN [2] rev_subsetD] analz_mono [THEN synth_mono, THEN [2] rev_subsetD]) apply (simp (no_asm_use)) apply blast done lemma analz_conj_parts [simp]: "(X \ analz H & X \ parts H) = (X \ analz H)" by (blast intro: analz_subset_parts [THEN subsetD]) lemma analz_disj_parts [simp]: "(X \ analz H | X \ parts H) = (X \ parts H)" by (blast intro: analz_subset_parts [THEN subsetD]) text\Without this equation, other rules for synth and analz would yield redundant cases\ lemma MPair_synth_analz [iff]: "(\X,Y\ \ synth (analz H)) = (X \ synth (analz H) & Y \ synth (analz H))" by blast lemma Crypt_synth_analz: "[| Key K \ analz H; Key (invKey K) \ analz H |] ==> (Crypt K X \ synth (analz H)) = (X \ synth (analz H))" by blast text\We do NOT want Crypt... messages broken up in protocols!!\ declare parts.Body [rule del] text\Rewrites to push in Key and Crypt messages, so that other messages can be pulled out using the \analz_insert\ rules\ lemmas pushKeys = insert_commute [of "Key K" "Agent C"] insert_commute [of "Key K" "Nonce N"] insert_commute [of "Key K" "MPair X Y"] insert_commute [of "Key K" "Crypt X K'"] for K C N X Y K' lemmas pushCrypts = insert_commute [of "Crypt X K" "Agent C"] insert_commute [of "Crypt X K" "Agent C"] insert_commute [of "Crypt X K" "Nonce N"] insert_commute [of "Crypt X K" "MPair X' Y"] for X K C N X' Y text\Cannot be added with \[simp]\ -- messages should not always be re-ordered.\ lemmas pushes = pushKeys pushCrypts subsection\Tactics useful for many protocol proofs\ ML \ val invKey = @{thm invKey}; val keysFor_def = @{thm keysFor_def}; val symKeys_def = @{thm symKeys_def}; val parts_mono = @{thm parts_mono}; val analz_mono = @{thm analz_mono}; val synth_mono = @{thm synth_mono}; val analz_increasing = @{thm analz_increasing}; val analz_insertI = @{thm analz_insertI}; val analz_subset_parts = @{thm analz_subset_parts}; val Fake_parts_insert = @{thm Fake_parts_insert}; val Fake_analz_insert = @{thm Fake_analz_insert}; val pushes = @{thms pushes}; (*Analysis of Fake cases. Also works for messages that forward unknown parts, but this application is no longer necessary if analz_insert_eq is used. Abstraction over i is ESSENTIAL: it delays the dereferencing of claset DEPENDS UPON "X" REFERRING TO THE FRADULENT MESSAGE *) (*Apply rules to break down assumptions of the form Y \ parts(insert X H) and Y \ analz(insert X H) *) fun impOfSubs th = th RSN (2, @{thm rev_subsetD}) fun Fake_insert_tac ctxt = dresolve_tac ctxt [impOfSubs Fake_analz_insert, impOfSubs Fake_parts_insert] THEN' eresolve_tac ctxt [asm_rl, @{thm synth.Inj}]; fun Fake_insert_simp_tac ctxt i = REPEAT (Fake_insert_tac ctxt i) THEN asm_full_simp_tac ctxt i; fun atomic_spy_analz_tac ctxt = SELECT_GOAL (Fake_insert_simp_tac ctxt 1 THEN IF_UNSOLVED (Blast.depth_tac (ctxt addIs [analz_insertI, impOfSubs analz_subset_parts]) 4 1)); fun spy_analz_tac ctxt i = DETERM (SELECT_GOAL (EVERY [ (*push in occurrences of X...*) (REPEAT o CHANGED) (Rule_Insts.res_inst_tac ctxt [((("x", 1), Position.none), "X")] [] (insert_commute RS ssubst) 1), (*...allowing further simplifications*) simp_tac ctxt 1, REPEAT (FIRSTGOAL (resolve_tac ctxt [allI,impI,notI,conjI,iffI])), DEPTH_SOLVE (atomic_spy_analz_tac ctxt 1)]) i); \ text\By default only \o_apply\ is built-in. But in the presence of eta-expansion this means that some terms displayed as \<^term>\f o g\ will be rewritten, and others will not!\ declare o_def [simp] lemma Crypt_notin_image_Key [simp]: "Crypt K X \ Key ` A" by auto lemma synth_analz_mono: "G\H \ synth (analz(G)) \ synth (analz(H))" by (iprover intro: synth_mono analz_mono) lemma Fake_analz_eq [simp]: "X \ synth(analz H) \ synth (analz (insert X H)) = synth (analz H)" apply (drule Fake_analz_insert[of _ _ "H"]) apply (simp add: synth_increasing[THEN Un_absorb2]) apply (drule synth_mono) apply (simp add: synth_idem) apply (rule equalityI) apply (simp add: ) apply (rule synth_analz_mono, blast) done text\Two generalizations of \analz_insert_eq\\ lemma gen_analz_insert_eq [rule_format]: "X \ analz H \ \G. H \ G \ analz (insert X G) = analz G" by (blast intro: analz_cut analz_insertI analz_mono [THEN [2] rev_subsetD]) lemma synth_analz_insert_eq [rule_format]: "X \ synth (analz H) \ \G. H \ G \ (Key K \ analz (insert X G)) = (Key K \ analz G)" apply (erule synth.induct) apply (simp_all add: gen_analz_insert_eq subset_trans [OF _ subset_insertI]) done lemma Fake_parts_sing: "X \ synth (analz H) \ parts{X} \ synth (analz H) \ parts H" apply (rule subset_trans) apply (erule_tac [2] Fake_parts_insert) apply (rule parts_mono, blast) done lemmas Fake_parts_sing_imp_Un = Fake_parts_sing [THEN [2] rev_subsetD] method_setup spy_analz = \ Scan.succeed (SIMPLE_METHOD' o spy_analz_tac)\ "for proving the Fake case when analz is involved" method_setup atomic_spy_analz = \ Scan.succeed (SIMPLE_METHOD' o atomic_spy_analz_tac)\ "for debugging spy_analz" method_setup Fake_insert_simp = \ Scan.succeed (SIMPLE_METHOD' o Fake_insert_simp_tac)\ "for debugging spy_analz" end (*>*) diff --git a/src/Doc/Tutorial/Setup.thy b/src/Doc/Tutorial/Setup.thy new file mode 100644 --- /dev/null +++ b/src/Doc/Tutorial/Setup.thy @@ -0,0 +1,10 @@ +(*:maxLineLen=78:*) + +theory Setup +imports Main +begin + +ML_file \../antiquote_setup.ML\ +ML_file \../more_antiquote.ML\ + +end diff --git a/src/Doc/Tutorial/Types/Axioms.thy b/src/Doc/Tutorial/Types/Axioms.thy --- a/src/Doc/Tutorial/Types/Axioms.thy +++ b/src/Doc/Tutorial/Types/Axioms.thy @@ -1,258 +1,258 @@ -(*<*)theory Axioms imports Overloading Setup begin(*>*) +(*<*)theory Axioms imports Overloading "../Setup" begin(*>*) subsection \Axioms\ text \Attaching axioms to our classes lets us reason on the level of classes. The results will be applicable to all types in a class, just as in axiomatic mathematics. \begin{warn} Proofs in this section use structured \emph{Isar} proofs, which are not covered in this tutorial; but see @{cite "Nipkow-TYPES02"}.% \end{warn}\ subsubsection \Semigroups\ text\We specify \emph{semigroups} as subclass of \<^class>\plus\:\ class semigroup = plus + assumes assoc: "(x \ y) \ z = x \ (y \ z)" text \\noindent This @{command class} specification requires that all instances of \<^class>\semigroup\ obey @{fact "assoc:"}~@{prop [source] "\x y z :: 'a::semigroup. (x \ y) \ z = x \ (y \ z)"}. We can use this class axiom to derive further abstract theorems relative to class \<^class>\semigroup\:\ lemma assoc_left: fixes x y z :: "'a::semigroup" shows "x \ (y \ z) = (x \ y) \ z" using assoc by (rule sym) text \\noindent The \<^class>\semigroup\ constraint on type \<^typ>\'a\ restricts instantiations of \<^typ>\'a\ to types of class \<^class>\semigroup\ and during the proof enables us to use the fact @{fact assoc} whose type parameter is itself constrained to class \<^class>\semigroup\. The main advantage of classes is that theorems can be proved in the abstract and freely reused for each instance. On instantiation, we have to give a proof that the given operations obey the class axioms:\ instantiation nat :: semigroup begin instance proof txt \\noindent The proof opens with a default proof step, which for instance judgements invokes method @{method intro_classes}.\ fix m n q :: nat show "(m \ n) \ q = m \ (n \ q)" by (induct m) simp_all qed end text \\noindent Again, the interesting things enter the stage with parametric types:\ instantiation prod :: (semigroup, semigroup) semigroup begin instance proof fix p\<^sub>1 p\<^sub>2 p\<^sub>3 :: "'a::semigroup \ 'b::semigroup" show "p\<^sub>1 \ p\<^sub>2 \ p\<^sub>3 = p\<^sub>1 \ (p\<^sub>2 \ p\<^sub>3)" by (cases p\<^sub>1, cases p\<^sub>2, cases p\<^sub>3) (simp add: assoc) txt \\noindent Associativity of product semigroups is established using the hypothetical associativity @{fact assoc} of the type components, which holds due to the \<^class>\semigroup\ constraints imposed on the type components by the @{command instance} proposition. Indeed, this pattern often occurs with parametric types and type classes.\ qed end subsubsection \Monoids\ text \We define a subclass \monoidl\ (a semigroup with a left-hand neutral) by extending \<^class>\semigroup\ with one additional parameter \neutral\ together with its property:\ class monoidl = semigroup + fixes neutral :: "'a" ("\") assumes neutl: "\ \ x = x" text \\noindent Again, we prove some instances, by providing suitable parameter definitions and proofs for the additional specifications.\ instantiation nat :: monoidl begin definition neutral_nat_def: "\ = (0::nat)" instance proof fix n :: nat show "\ \ n = n" unfolding neutral_nat_def by simp qed end text \\noindent In contrast to the examples above, we here have both specification of class operations and a non-trivial instance proof. This covers products as well: \ instantiation prod :: (monoidl, monoidl) monoidl begin definition neutral_prod_def: "\ = (\, \)" instance proof fix p :: "'a::monoidl \ 'b::monoidl" show "\ \ p = p" by (cases p) (simp add: neutral_prod_def neutl) qed end text \\noindent Fully-fledged monoids are modelled by another subclass which does not add new parameters but tightens the specification:\ class monoid = monoidl + assumes neutr: "x \ \ = x" text \\noindent Corresponding instances for \<^typ>\nat\ and products are left as an exercise to the reader.\ subsubsection \Groups\ text \\noindent To finish our small algebra example, we add a \group\ class:\ class group = monoidl + fixes inv :: "'a \ 'a" ("\
_" [81] 80) assumes invl: "\
x \ x = \" text \\noindent We continue with a further example for abstract proofs relative to type classes:\ lemma left_cancel: fixes x y z :: "'a::group" shows "x \ y = x \ z \ y = z" proof assume "x \ y = x \ z" then have "\
x \ (x \ y) = \
x \ (x \ z)" by simp then have "(\
x \ x) \ y = (\
x \ x) \ z" by (simp add: assoc) then show "y = z" by (simp add: invl neutl) next assume "y = z" then show "x \ y = x \ z" by simp qed text \\noindent Any \group\ is also a \monoid\; this can be made explicit by claiming an additional subclass relation, together with a proof of the logical difference:\ instance group \ monoid proof fix x from invl have "\
x \ x = \" . then have "\
x \ (x \ \) = \
x \ x" by (simp add: neutl invl assoc [symmetric]) then show "x \ \ = x" by (simp add: left_cancel) qed text \\noindent The proof result is propagated to the type system, making \group\ an instance of \monoid\ by adding an additional edge to the graph of subclass relation; see also Figure~\ref{fig:subclass}. \begin{figure}[htbp] \begin{center} \small \unitlength 0.6mm \begin{picture}(40,60)(0,0) \put(20,60){\makebox(0,0){\semigroup\}} \put(20,40){\makebox(0,0){\monoidl\}} \put(00,20){\makebox(0,0){\monoid\}} \put(40,00){\makebox(0,0){\group\}} \put(20,55){\vector(0,-1){10}} \put(15,35){\vector(-1,-1){10}} \put(25,35){\vector(1,-3){10}} \end{picture} \hspace{8em} \begin{picture}(40,60)(0,0) \put(20,60){\makebox(0,0){\semigroup\}} \put(20,40){\makebox(0,0){\monoidl\}} \put(00,20){\makebox(0,0){\monoid\}} \put(40,00){\makebox(0,0){\group\}} \put(20,55){\vector(0,-1){10}} \put(15,35){\vector(-1,-1){10}} \put(05,15){\vector(3,-1){30}} \end{picture} \caption{Subclass relationship of monoids and groups: before and after establishing the relationship \group \ monoid\; transitive edges are left out.} \label{fig:subclass} \end{center} \end{figure} \ subsubsection \Inconsistencies\ text \The reader may be wondering what happens if we attach an inconsistent set of axioms to a class. So far we have always avoided to add new axioms to HOL for fear of inconsistencies and suddenly it seems that we are throwing all caution to the wind. So why is there no problem? The point is that by construction, all type variables in the axioms of a \isacommand{class} are automatically constrained with the class being defined (as shown for axiom @{thm[source]refl} above). These constraints are always carried around and Isabelle takes care that they are never lost, unless the type variable is instantiated with a type that has been shown to belong to that class. Thus you may be able to prove \<^prop>\False\ from your axioms, but Isabelle will remind you that this theorem has the hidden hypothesis that the class is non-empty. Even if each individual class is consistent, intersections of (unrelated) classes readily become inconsistent in practice. Now we know this need not worry us.\ subsubsection\Syntactic Classes and Predefined Overloading\ text \In our algebra example, we have started with a \emph{syntactic class} \<^class>\plus\ which only specifies operations but no axioms; it would have been also possible to start immediately with class \<^class>\semigroup\, specifying the \\\ operation and associativity at the same time. Which approach is more appropriate depends. Usually it is more convenient to introduce operations and axioms in the same class: then the type checker will automatically insert the corresponding class constraints whenever the operations occur, reducing the need of manual annotations. However, when operations are decorated with popular syntax, syntactic classes can be an option to re-use the syntax in different contexts; this is indeed the way most overloaded constants in HOL are introduced, of which the most important are listed in Table~\ref{tab:overloading} in the appendix. Section \ref{sec:numeric-classes} covers a range of corresponding classes \emph{with} axioms. Further note that classes may contain axioms but \emph{no} operations. An example is class \<^class>\finite\ from theory \<^theory>\HOL.Finite_Set\ which specifies a type to be finite: @{lemma [source] "finite (UNIV :: 'a::finite set)" by (fact finite_UNIV)}.\ (*<*)end(*>*) diff --git a/src/Doc/Tutorial/Types/Overloading.thy b/src/Doc/Tutorial/Types/Overloading.thy --- a/src/Doc/Tutorial/Types/Overloading.thy +++ b/src/Doc/Tutorial/Types/Overloading.thy @@ -1,77 +1,77 @@ -(*<*)theory Overloading imports Main Setup begin +(*<*)theory Overloading imports "../Setup" begin hide_class (open) plus (*>*) text \Type classes allow \emph{overloading}; thus a constant may have multiple definitions at non-overlapping types.\ subsubsection \Overloading\ text \We can introduce a binary infix addition operator \\\ for arbitrary types by means of a type class:\ class plus = fixes plus :: "'a \ 'a \ 'a" (infixl "\" 70) text \\noindent This introduces a new class @{class [source] plus}, along with a constant @{const [source] plus} with nice infix syntax. @{const [source] plus} is also named \emph{class operation}. The type of @{const [source] plus} carries a class constraint @{typ [source] "'a :: plus"} on its type variable, meaning that only types of class @{class [source] plus} can be instantiated for @{typ [source] "'a"}. To breathe life into @{class [source] plus} we need to declare a type to be an \bfindex{instance} of @{class [source] plus}:\ instantiation nat :: plus begin text \\noindent Command \isacommand{instantiation} opens a local theory context. Here we can now instantiate @{const [source] plus} on \<^typ>\nat\:\ primrec plus_nat :: "nat \ nat \ nat" where "(0::nat) \ n = n" | "Suc m \ n = Suc (m \ n)" text \\noindent Note that the name @{const [source] plus} carries a suffix \_nat\; by default, the local name of a class operation \f\ to be instantiated on type constructor \\\ is mangled as \f_\\. In case of uncertainty, these names may be inspected using the @{command "print_context"} command. Although class @{class [source] plus} has no axioms, the instantiation must be formally concluded by a (trivial) instantiation proof ``..'':\ instance .. text \\noindent More interesting \isacommand{instance} proofs will arise below. The instantiation is finished by an explicit\ end text \\noindent From now on, terms like \<^term>\Suc (m \ 2)\ are legal.\ instantiation prod :: (plus, plus) plus begin text \\noindent Here we instantiate the product type \<^type>\prod\ to class @{class [source] plus}, given that its type arguments are of class @{class [source] plus}:\ fun plus_prod :: "'a \ 'b \ 'a \ 'b \ 'a \ 'b" where "(x, y) \ (w, z) = (x \ w, y \ z)" text \\noindent Obviously, overloaded specifications may include recursion over the syntactic structure of types.\ instance .. end text \\noindent This way we have encoded the canonical lifting of binary operations to products by means of type classes.\ (*<*)end(*>*) diff --git a/src/Doc/Tutorial/Types/Setup.thy b/src/Doc/Tutorial/Types/Setup.thy deleted file mode 100644 --- a/src/Doc/Tutorial/Types/Setup.thy +++ /dev/null @@ -1,8 +0,0 @@ -theory Setup -imports Main -begin - -ML_file \../../antiquote_setup.ML\ -ML_file \../../more_antiquote.ML\ - -end