diff --git a/etc/settings b/etc/settings --- a/etc/settings +++ b/etc/settings @@ -1,171 +1,171 @@ # -*- shell-script -*- :mode=shellscript: # # Isabelle system settings. # # Important notes: # * See the "system" manual for explanations on Isabelle settings # * User settings go into $ISABELLE_HOME_USER/etc/settings # * DO NOT EDIT the repository copy of this file! # * DO NOT COPY this file into the $ISABELLE_HOME_USER directory! ### ### Isabelle/Scala ### ISABELLE_JAVA_SYSTEM_OPTIONS="-server -Dfile.encoding=UTF-8 -Disabelle.threads=0" ISABELLE_TOOL_JAVA_OPTIONS="-Djava.awt.headless=true -Xms512m -Xmx4g -Xss16m" ISABELLE_JAVAC_OPTIONS="-encoding UTF-8 -Xlint:-options -deprecation -source 11 -target 11" -ISABELLE_SCALAC_OPTIONS="-encoding UTF-8 -Wconf:cat=other-match-analysis:silent -feature -deprecation -target:11 -Xsource:3 -J-Xms512m -J-Xmx4g -J-Xss16m" +ISABELLE_SCALAC_OPTIONS="-encoding UTF-8 -feature -deprecation -target:11 -Xsource:3 -J-Xms512m -J-Xmx4g -J-Xss16m" ISABELLE_SCALA_JAR="$ISABELLE_HOME/lib/classes/isabelle.jar" #paranoia settings -- avoid intrusion of alien options unset "_JAVA_OPTIONS" unset "JAVA_TOOL_OPTIONS" #paranoia settings -- avoid problems of Java/Swing versus XIM/IBus etc. unset XMODIFIERS ### ### Interactive sessions (cf. isabelle console) ### ISABELLE_LINE_EDITOR="rlwrap" ### ### Batch sessions (cf. isabelle build) ### ISABELLE_BUILD_OPTIONS="" ### ### Document preparation (cf. isabelle latex) ### if [ "$ISABELLE_PLATFORM_FAMILY" = "windows" ]; then ISABELLE_PDFLATEX="pdflatex -interaction=nonstopmode -c-style-errors" else ISABELLE_PDFLATEX="pdflatex -interaction=nonstopmode -file-line-error" fi ISABELLE_LUALATEX="lualatex --interaction=nonstopmode --file-line-error" ISABELLE_BIBTEX="bibtex" ISABELLE_MAKEINDEX="makeindex -c -q" ISABELLE_EPSTOPDF="epstopdf" ### ### Misc path settings ### isabelle_directory '~' isabelle_directory '$ISABELLE_HOME_USER' isabelle_directory '~~' ISABELLE_COMPONENT_REPOSITORY="https://isabelle.sketis.net/components" ISABELLE_COMPONENTS_BASE="$USER_HOME/.isabelle/contrib" # The place for user configuration, heap files, etc. if [ -z "$ISABELLE_IDENTIFIER" ]; then ISABELLE_HOME_USER="$USER_HOME/.isabelle" else ISABELLE_HOME_USER="$USER_HOME/.isabelle/$ISABELLE_IDENTIFIER" fi # Where to look for isabelle tools (multiple dirs separated by ':'). ISABELLE_TOOLS="$ISABELLE_HOME/lib/Tools" # Location for temporary files (should be on a local file system). ISABELLE_TMP_PREFIX="${TMPDIR:-/tmp}/isabelle-$USER" # Heap locations. ISABELLE_HEAPS="$ISABELLE_HOME_USER/heaps" ISABELLE_HEAPS_SYSTEM="$ISABELLE_HOME/heaps" # HTML browser info. ISABELLE_BROWSER_INFO="$ISABELLE_HOME_USER/browser_info" ISABELLE_BROWSER_INFO_SYSTEM="$ISABELLE_HOME/browser_info" # Site settings check -- just to make it a little bit harder to copy this file verbatim! [ -n "$ISABELLE_SITE_SETTINGS_PRESENT" ] && \ { echo >&2 "### Isabelle site settings already present! Maybe copied etc/settings in full?"; } ISABELLE_SITE_SETTINGS_PRESENT=true ### ### Default logic ### ISABELLE_LOGIC=HOL ### ### Docs and external files ### # Where to look for docs (multiple dirs separated by ':'). ISABELLE_DOCS="$ISABELLE_HOME/doc" ISABELLE_DOCS_RELEASE_NOTES="~~/ANNOUNCE:~~/README:~~/NEWS:~~/COPYRIGHT:~~/CONTRIBUTORS:~~/contrib/README:~~/src/Tools/jEdit/README:~~/README_REPOSITORY" ISABELLE_DOCS_EXAMPLES="~~/src/HOL/Examples/Seq.thy:~~/src/HOL/Examples/Drinker.thy:~~/src/HOL/Examples/ML.thy:~~/src/HOL/Unix/Unix.thy:~~/src/Tools/SML/Examples.thy:~~/src/Pure/ROOT.ML" # "open" within desktop environment (potentially asynchronous) case "$ISABELLE_PLATFORM_FAMILY" in linux) ISABELLE_OPEN="xdg-open" ;; macos) ISABELLE_OPEN="open" ;; windows) ISABELLE_OPEN="cygstart" ;; esac PDF_VIEWER="$ISABELLE_OPEN" ISABELLE_EXTERNAL_FILES="bmp:eps:gif:jpeg:jpg:pdf:png:xmp" ### ### Symbol rendering ### ISABELLE_SYMBOLS="$ISABELLE_HOME/etc/symbols:$ISABELLE_HOME_USER/etc/symbols" ### ### OCaml ### ISABELLE_OPAM_ROOT="$USER_HOME/.opam" ISABELLE_OCAML_VERSION="ocaml-base-compiler.4.12.0" ### ### Haskell ### ISABELLE_STACK_ROOT="$USER_HOME/.stack" ISABELLE_STACK_RESOLVER="lts-18.12" ISABELLE_GHC_VERSION="ghc-8.10.7" ### ### Misc settings ### ISABELLE_GNUPLOT="gnuplot" ISABELLE_FONTFORGE="fontforge" #ISABELLE_MLTON="/usr/bin/mlton" #ISABELLE_SMLNJ="/usr/bin/sml" #ISABELLE_SWIPL="/usr/bin/swipl" diff --git a/src/Doc/Isar_Ref/HOL_Specific.thy b/src/Doc/Isar_Ref/HOL_Specific.thy --- a/src/Doc/Isar_Ref/HOL_Specific.thy +++ b/src/Doc/Isar_Ref/HOL_Specific.thy @@ -1,2507 +1,2518 @@ (*:maxLineLen=78:*) theory HOL_Specific imports Main "HOL-Library.Old_Datatype" "HOL-Library.Old_Recdef" "HOL-Library.Adhoc_Overloading" "HOL-Library.Dlist" "HOL-Library.FSet" Base begin chapter \Higher-Order Logic\ text \ Isabelle/HOL is based on Higher-Order Logic, a polymorphic version of Church's Simple Theory of Types. HOL can be best understood as a simply-typed version of classical set theory. The logic was first implemented in Gordon's HOL system @{cite "mgordon-hol"}. It extends Church's original logic @{cite "church40"} by explicit type variables (naive polymorphism) and a sound axiomatization scheme for new types based on subsets of existing types. Andrews's book @{cite andrews86} is a full description of the original Church-style higher-order logic, with proofs of correctness and completeness wrt.\ certain set-theoretic interpretations. The particular extensions of Gordon-style HOL are explained semantically in two chapters of the 1993 HOL book @{cite pitts93}. Experience with HOL over decades has demonstrated that higher-order logic is widely applicable in many areas of mathematics and computer science. In a sense, Higher-Order Logic is simpler than First-Order Logic, because there are fewer restrictions and special cases. Note that HOL is \<^emph>\weaker\ than FOL with axioms for ZF set theory, which is traditionally considered the standard foundation of regular mathematics, but for most applications this does not matter. If you prefer ML to Lisp, you will probably prefer HOL to ZF. \<^medskip> The syntax of HOL follows \\\-calculus and functional programming. Function application is curried. To apply the function \f\ of type \\\<^sub>1 \ \\<^sub>2 \ \\<^sub>3\ to the arguments \a\ and \b\ in HOL, you simply write \f a b\ (as in ML or Haskell). There is no ``apply'' operator; the existing application of the Pure \\\-calculus is re-used. Note that in HOL \f (a, b)\ means ``\f\ applied to the pair \(a, b)\ (which is notation for \Pair a b\). The latter typically introduces extra formal efforts that can be avoided by currying functions by default. Explicit tuples are as infrequent in HOL formalizations as in good ML or Haskell programs. \<^medskip> Isabelle/HOL has a distinct feel, compared to other object-logics like Isabelle/ZF. It identifies object-level types with meta-level types, taking advantage of the default type-inference mechanism of Isabelle/Pure. HOL fully identifies object-level functions with meta-level functions, with native abstraction and application. These identifications allow Isabelle to support HOL particularly nicely, but they also mean that HOL requires some sophistication from the user. In particular, an understanding of Hindley-Milner type-inference with type-classes, which are both used extensively in the standard libraries and applications. \ chapter \Derived specification elements\ section \Inductive and coinductive definitions \label{sec:hol-inductive}\ text \ \begin{matharray}{rcl} @{command_def (HOL) "inductive"} & : & \local_theory \ local_theory\ \\ @{command_def (HOL) "inductive_set"} & : & \local_theory \ local_theory\ \\ @{command_def (HOL) "coinductive"} & : & \local_theory \ local_theory\ \\ @{command_def (HOL) "coinductive_set"} & : & \local_theory \ local_theory\ \\ @{command_def "print_inductives"}\\<^sup>*\ & : & \context \\ \\ @{attribute_def (HOL) mono} & : & \attribute\ \\ \end{matharray} An \<^emph>\inductive definition\ specifies the least predicate or set \R\ closed under given rules: applying a rule to elements of \R\ yields a result within \R\. For example, a structural operational semantics is an inductive definition of an evaluation relation. Dually, a \<^emph>\coinductive definition\ specifies the greatest predicate or set \R\ that is consistent with given rules: every element of \R\ can be seen as arising by applying a rule to elements of \R\. An important example is using bisimulation relations to formalise equivalence of processes and infinite data structures. Both inductive and coinductive definitions are based on the Knaster-Tarski fixed-point theorem for complete lattices. The collection of introduction rules given by the user determines a functor on subsets of set-theoretic relations. The required monotonicity of the recursion scheme is proven as a prerequisite to the fixed-point definition and the resulting consequences. This works by pushing inclusion through logical connectives and any other operator that might be wrapped around recursive occurrences of the defined relation: there must be a monotonicity theorem of the form \A \ B \ \ A \ \ B\, for each premise \\ R t\ in an introduction rule. The default rule declarations of Isabelle/HOL already take care of most common situations. \<^rail>\ (@@{command (HOL) inductive} | @@{command (HOL) inductive_set} | @@{command (HOL) coinductive} | @@{command (HOL) coinductive_set}) @{syntax vars} @{syntax for_fixes} \ (@'where' @{syntax multi_specs})? (@'monos' @{syntax thms})? ; @@{command print_inductives} ('!'?) ; @@{attribute (HOL) mono} (() | 'add' | 'del') \ \<^descr> @{command (HOL) "inductive"} and @{command (HOL) "coinductive"} define (co)inductive predicates from the introduction rules. The propositions given as \clauses\ in the @{keyword "where"} part are either rules of the usual \\/\\ format (with arbitrary nesting), or equalities using \\\. The latter specifies extra-logical abbreviations in the sense of @{command_ref abbreviation}. Introducing abstract syntax simultaneously with the actual introduction rules is occasionally useful for complex specifications. The optional @{keyword "for"} part contains a list of parameters of the (co)inductive predicates that remain fixed throughout the definition, in contrast to arguments of the relation that may vary in each occurrence within the given \clauses\. The optional @{keyword "monos"} declaration contains additional \<^emph>\monotonicity theorems\, which are required for each operator applied to a recursive set in the introduction rules. \<^descr> @{command (HOL) "inductive_set"} and @{command (HOL) "coinductive_set"} are wrappers for to the previous commands for native HOL predicates. This allows to define (co)inductive sets, where multiple arguments are simulated via tuples. \<^descr> @{command "print_inductives"} prints (co)inductive definitions and monotonicity rules; the ``\!\'' option indicates extra verbosity. \<^descr> @{attribute (HOL) mono} declares monotonicity rules in the context. These rule are involved in the automated monotonicity proof of the above inductive and coinductive definitions. \ subsection \Derived rules\ text \ A (co)inductive definition of \R\ provides the following main theorems: \<^descr> \R.intros\ is the list of introduction rules as proven theorems, for the recursive predicates (or sets). The rules are also available individually, using the names given them in the theory file; \<^descr> \R.cases\ is the case analysis (or elimination) rule; \<^descr> \R.induct\ or \R.coinduct\ is the (co)induction rule; \<^descr> \R.simps\ is the equation unrolling the fixpoint of the predicate one step. When several predicates \R\<^sub>1, \, R\<^sub>n\ are defined simultaneously, the list of introduction rules is called \R\<^sub>1_\_R\<^sub>n.intros\, the case analysis rules are called \R\<^sub>1.cases, \, R\<^sub>n.cases\, and the list of mutual induction rules is called \R\<^sub>1_\_R\<^sub>n.inducts\. \ subsection \Monotonicity theorems\ text \ The context maintains a default set of theorems that are used in monotonicity proofs. New rules can be declared via the @{attribute (HOL) mono} attribute. See the main Isabelle/HOL sources for some examples. The general format of such monotonicity theorems is as follows: \<^item> Theorems of the form \A \ B \ \ A \ \ B\, for proving monotonicity of inductive definitions whose introduction rules have premises involving terms such as \\ R t\. \<^item> Monotonicity theorems for logical operators, which are of the general form \(\ \ \) \ \ (\ \ \) \ \ \ \\. For example, in the case of the operator \\\, the corresponding theorem is \[ \infer{\P\<^sub>1 \ P\<^sub>2 \ Q\<^sub>1 \ Q\<^sub>2\}{\P\<^sub>1 \ Q\<^sub>1\ & \P\<^sub>2 \ Q\<^sub>2\} \] \<^item> De Morgan style equations for reasoning about the ``polarity'' of expressions, e.g. \[ \<^prop>\\ \ P \ P\ \qquad\qquad \<^prop>\\ (P \ Q) \ \ P \ \ Q\ \] \<^item> Equations for reducing complex operators to more primitive ones whose monotonicity can easily be proved, e.g. \[ \<^prop>\(P \ Q) \ \ P \ Q\ \qquad\qquad \<^prop>\Ball A P \ \x. x \ A \ P x\ \] \ subsubsection \Examples\ text \The finite powerset operator can be defined inductively like this:\ (*<*)experiment begin(*>*) inductive_set Fin :: "'a set \ 'a set set" for A :: "'a set" where empty: "{} \ Fin A" | insert: "a \ A \ B \ Fin A \ insert a B \ Fin A" text \The accessible part of a relation is defined as follows:\ inductive acc :: "('a \ 'a \ bool) \ 'a \ bool" for r :: "'a \ 'a \ bool" (infix "\" 50) where acc: "(\y. y \ x \ acc r y) \ acc r x" (*<*)end(*>*) text \ Common logical connectives can be easily characterized as non-recursive inductive definitions with parameters, but without arguments. \ (*<*)experiment begin(*>*) inductive AND for A B :: bool where "A \ B \ AND A B" inductive OR for A B :: bool where "A \ OR A B" | "B \ OR A B" inductive EXISTS for B :: "'a \ bool" where "B a \ EXISTS B" (*<*)end(*>*) text \ Here the \cases\ or \induct\ rules produced by the @{command inductive} package coincide with the expected elimination rules for Natural Deduction. Already in the original article by Gerhard Gentzen @{cite "Gentzen:1935"} there is a hint that each connective can be characterized by its introductions, and the elimination can be constructed systematically. \ section \Recursive functions \label{sec:recursion}\ text \ \begin{matharray}{rcl} @{command_def (HOL) "primrec"} & : & \local_theory \ local_theory\ \\ @{command_def (HOL) "fun"} & : & \local_theory \ local_theory\ \\ @{command_def (HOL) "function"} & : & \local_theory \ proof(prove)\ \\ @{command_def (HOL) "termination"} & : & \local_theory \ proof(prove)\ \\ @{command_def (HOL) "fun_cases"} & : & \local_theory \ local_theory\ \\ \end{matharray} \<^rail>\ @@{command (HOL) primrec} @{syntax specification} ; (@@{command (HOL) fun} | @@{command (HOL) function}) opts? @{syntax specification} ; opts: '(' (('sequential' | 'domintros') + ',') ')' ; @@{command (HOL) termination} @{syntax term}? ; @@{command (HOL) fun_cases} (@{syntax thmdecl}? @{syntax prop} + @'and') \ \<^descr> @{command (HOL) "primrec"} defines primitive recursive functions over datatypes (see also @{command_ref (HOL) datatype}). The given \equations\ specify reduction rules that are produced by instantiating the generic combinator for primitive recursion that is available for each datatype. Each equation needs to be of the form: @{text [display] "f x\<^sub>1 \ x\<^sub>m (C y\<^sub>1 \ y\<^sub>k) z\<^sub>1 \ z\<^sub>n = rhs"} such that \C\ is a datatype constructor, \rhs\ contains only the free variables on the left-hand side (or from the context), and all recursive occurrences of \f\ in \rhs\ are of the form \f \ y\<^sub>i \\ for some \i\. At most one reduction rule for each constructor can be given. The order does not matter. For missing constructors, the function is defined to return a default value, but this equation is made difficult to access for users. The reduction rules are declared as @{attribute simp} by default, which enables standard proof methods like @{method simp} and @{method auto} to normalize expressions of \f\ applied to datatype constructions, by simulating symbolic computation via rewriting. \<^descr> @{command (HOL) "function"} defines functions by general wellfounded recursion. A detailed description with examples can be found in @{cite "isabelle-function"}. The function is specified by a set of (possibly conditional) recursive equations with arbitrary pattern matching. The command generates proof obligations for the completeness and the compatibility of patterns. The defined function is considered partial, and the resulting simplification rules (named \f.psimps\) and induction rule (named \f.pinduct\) are guarded by a generated domain predicate \f_dom\. The @{command (HOL) "termination"} command can then be used to establish that the function is total. \<^descr> @{command (HOL) "fun"} is a shorthand notation for ``@{command (HOL) "function"}~\(sequential)\'', followed by automated proof attempts regarding pattern matching and termination. See @{cite "isabelle-function"} for further details. \<^descr> @{command (HOL) "termination"}~\f\ commences a termination proof for the previously defined function \f\. If this is omitted, the command refers to the most recent function definition. After the proof is closed, the recursive equations and the induction principle is established. \<^descr> @{command (HOL) "fun_cases"} generates specialized elimination rules for function equations. It expects one or more function equations and produces rules that eliminate the given equalities, following the cases given in the function definition. Recursive definitions introduced by the @{command (HOL) "function"} command accommodate reasoning by induction (cf.\ @{method induct}): rule \f.induct\ refers to a specific induction rule, with parameters named according to the user-specified equations. Cases are numbered starting from 1. For @{command (HOL) "primrec"}, the induction principle coincides with structural recursion on the datatype where the recursion is carried out. The equations provided by these packages may be referred later as theorem list \f.simps\, where \f\ is the (collective) name of the functions defined. Individual equations may be named explicitly as well. The @{command (HOL) "function"} command accepts the following options. \<^descr> \sequential\ enables a preprocessor which disambiguates overlapping patterns by making them mutually disjoint. Earlier equations take precedence over later ones. This allows to give the specification in a format very similar to functional programming. Note that the resulting simplification and induction rules correspond to the transformed specification, not the one given originally. This usually means that each equation given by the user may result in several theorems. Also note that this automatic transformation only works for ML-style datatype patterns. \<^descr> \domintros\ enables the automated generation of introduction rules for the domain predicate. While mostly not needed, they can be helpful in some proofs about partial functions. \ subsubsection \Example: evaluation of expressions\ text \ Subsequently, we define mutual datatypes for arithmetic and boolean expressions, and use @{command primrec} for evaluation functions that follow the same recursive structure. \ (*<*)experiment begin(*>*) datatype 'a aexp = IF "'a bexp" "'a aexp" "'a aexp" | Sum "'a aexp" "'a aexp" | Diff "'a aexp" "'a aexp" | Var 'a | Num nat and 'a bexp = Less "'a aexp" "'a aexp" | And "'a bexp" "'a bexp" | Neg "'a bexp" text \\<^medskip> Evaluation of arithmetic and boolean expressions\ primrec evala :: "('a \ nat) \ 'a aexp \ nat" and evalb :: "('a \ nat) \ 'a bexp \ bool" where "evala env (IF b a1 a2) = (if evalb env b then evala env a1 else evala env a2)" | "evala env (Sum a1 a2) = evala env a1 + evala env a2" | "evala env (Diff a1 a2) = evala env a1 - evala env a2" | "evala env (Var v) = env v" | "evala env (Num n) = n" | "evalb env (Less a1 a2) = (evala env a1 < evala env a2)" | "evalb env (And b1 b2) = (evalb env b1 \ evalb env b2)" | "evalb env (Neg b) = (\ evalb env b)" text \ Since the value of an expression depends on the value of its variables, the functions \<^const>\evala\ and \<^const>\evalb\ take an additional parameter, an \<^emph>\environment\ that maps variables to their values. \<^medskip> Substitution on expressions can be defined similarly. The mapping \f\ of type \<^typ>\'a \ 'a aexp\ given as a parameter is lifted canonically on the types \<^typ>\'a aexp\ and \<^typ>\'a bexp\, respectively. \ primrec substa :: "('a \ 'b aexp) \ 'a aexp \ 'b aexp" and substb :: "('a \ 'b aexp) \ 'a bexp \ 'b bexp" where "substa f (IF b a1 a2) = IF (substb f b) (substa f a1) (substa f a2)" | "substa f (Sum a1 a2) = Sum (substa f a1) (substa f a2)" | "substa f (Diff a1 a2) = Diff (substa f a1) (substa f a2)" | "substa f (Var v) = f v" | "substa f (Num n) = Num n" | "substb f (Less a1 a2) = Less (substa f a1) (substa f a2)" | "substb f (And b1 b2) = And (substb f b1) (substb f b2)" | "substb f (Neg b) = Neg (substb f b)" text \ In textbooks about semantics one often finds substitution theorems, which express the relationship between substitution and evaluation. For \<^typ>\'a aexp\ and \<^typ>\'a bexp\, we can prove such a theorem by mutual induction, followed by simplification. \ lemma subst_one: "evala env (substa (Var (v := a')) a) = evala (env (v := evala env a')) a" "evalb env (substb (Var (v := a')) b) = evalb (env (v := evala env a')) b" by (induct a and b) simp_all lemma subst_all: "evala env (substa s a) = evala (\x. evala env (s x)) a" "evalb env (substb s b) = evalb (\x. evala env (s x)) b" by (induct a and b) simp_all (*<*)end(*>*) subsubsection \Example: a substitution function for terms\ text \Functions on datatypes with nested recursion are also defined by mutual primitive recursion.\ (*<*)experiment begin(*>*) datatype ('a, 'b) "term" = Var 'a | App 'b "('a, 'b) term list" text \ A substitution function on type \<^typ>\('a, 'b) term\ can be defined as follows, by working simultaneously on \<^typ>\('a, 'b) term list\: \ primrec subst_term :: "('a \ ('a, 'b) term) \ ('a, 'b) term \ ('a, 'b) term" and subst_term_list :: "('a \ ('a, 'b) term) \ ('a, 'b) term list \ ('a, 'b) term list" where "subst_term f (Var a) = f a" | "subst_term f (App b ts) = App b (subst_term_list f ts)" | "subst_term_list f [] = []" | "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts" text \ The recursion scheme follows the structure of the unfolded definition of type \<^typ>\('a, 'b) term\. To prove properties of this substitution function, mutual induction is needed: \ lemma "subst_term (subst_term f1 \ f2) t = subst_term f1 (subst_term f2 t)" and "subst_term_list (subst_term f1 \ f2) ts = subst_term_list f1 (subst_term_list f2 ts)" by (induct t and ts rule: subst_term.induct subst_term_list.induct) simp_all (*<*)end(*>*) subsubsection \Example: a map function for infinitely branching trees\ text \Defining functions on infinitely branching datatypes by primitive recursion is just as easy.\ (*<*)experiment begin(*>*) datatype 'a tree = Atom 'a | Branch "nat \ 'a tree" primrec map_tree :: "('a \ 'b) \ 'a tree \ 'b tree" where "map_tree f (Atom a) = Atom (f a)" | "map_tree f (Branch ts) = Branch (\x. map_tree f (ts x))" text \ Note that all occurrences of functions such as \ts\ above must be applied to an argument. In particular, \<^term>\map_tree f \ ts\ is not allowed here. \<^medskip> Here is a simple composition lemma for \<^term>\map_tree\: \ lemma "map_tree g (map_tree f t) = map_tree (g \ f) t" by (induct t) simp_all (*<*)end(*>*) subsection \Proof methods related to recursive definitions\ text \ \begin{matharray}{rcl} @{method_def (HOL) pat_completeness} & : & \method\ \\ @{method_def (HOL) relation} & : & \method\ \\ @{method_def (HOL) lexicographic_order} & : & \method\ \\ @{method_def (HOL) size_change} & : & \method\ \\ @{attribute_def (HOL) termination_simp} & : & \attribute\ \\ @{method_def (HOL) induction_schema} & : & \method\ \\ \end{matharray} \<^rail>\ @@{method (HOL) relation} @{syntax term} ; @@{method (HOL) lexicographic_order} (@{syntax clasimpmod} * ) ; @@{method (HOL) size_change} ( orders (@{syntax clasimpmod} * ) ) ; @@{method (HOL) induction_schema} ; orders: ( 'max' | 'min' | 'ms' ) * \ \<^descr> @{method (HOL) pat_completeness} is a specialized method to solve goals regarding the completeness of pattern matching, as required by the @{command (HOL) "function"} package (cf.\ @{cite "isabelle-function"}). \<^descr> @{method (HOL) relation}~\R\ introduces a termination proof using the relation \R\. The resulting proof state will contain goals expressing that \R\ is wellfounded, and that the arguments of recursive calls decrease with respect to \R\. Usually, this method is used as the initial proof step of manual termination proofs. \<^descr> @{method (HOL) "lexicographic_order"} attempts a fully automated termination proof by searching for a lexicographic combination of size measures on the arguments of the function. The method accepts the same arguments as the @{method auto} method, which it uses internally to prove local descents. The @{syntax clasimpmod} modifiers are accepted (as for @{method auto}). In case of failure, extensive information is printed, which can help to analyse the situation (cf.\ @{cite "isabelle-function"}). \<^descr> @{method (HOL) "size_change"} also works on termination goals, using a variation of the size-change principle, together with a graph decomposition technique (see @{cite krauss_phd} for details). Three kinds of orders are used internally: \max\, \min\, and \ms\ (multiset), which is only available when the theory \Multiset\ is loaded. When no order kinds are given, they are tried in order. The search for a termination proof uses SAT solving internally. For local descent proofs, the @{syntax clasimpmod} modifiers are accepted (as for @{method auto}). \<^descr> @{attribute (HOL) termination_simp} declares extra rules for the simplifier, when invoked in termination proofs. This can be useful, e.g., for special rules involving size estimations. \<^descr> @{method (HOL) induction_schema} derives user-specified induction rules from well-founded induction and completeness of patterns. This factors out some operations that are done internally by the function package and makes them available separately. See \<^file>\~~/src/HOL/Examples/Induction_Schema.thy\ for examples. \ subsection \Functions with explicit partiality\ text \ \begin{matharray}{rcl} @{command_def (HOL) "partial_function"} & : & \local_theory \ local_theory\ \\ @{attribute_def (HOL) "partial_function_mono"} & : & \attribute\ \\ \end{matharray} \<^rail>\ @@{command (HOL) partial_function} '(' @{syntax name} ')' @{syntax specification} \ \<^descr> @{command (HOL) "partial_function"}~\(mode)\ defines recursive functions based on fixpoints in complete partial orders. No termination proof is required from the user or constructed internally. Instead, the possibility of non-termination is modelled explicitly in the result type, which contains an explicit bottom element. Pattern matching and mutual recursion are currently not supported. Thus, the specification consists of a single function described by a single recursive equation. There are no fixed syntactic restrictions on the body of the function, but the induced functional must be provably monotonic wrt.\ the underlying order. The monotonicity proof is performed internally, and the definition is rejected when it fails. The proof can be influenced by declaring hints using the @{attribute (HOL) partial_function_mono} attribute. The mandatory \mode\ argument specifies the mode of operation of the command, which directly corresponds to a complete partial order on the result type. By default, the following modes are defined: \<^descr> \option\ defines functions that map into the \<^type>\option\ type. Here, the value \<^term>\None\ is used to model a non-terminating computation. Monotonicity requires that if \<^term>\None\ is returned by a recursive call, then the overall result must also be \<^term>\None\. This is best achieved through the use of the monadic operator \<^const>\Option.bind\. \<^descr> \tailrec\ defines functions with an arbitrary result type and uses the slightly degenerated partial order where \<^term>\undefined\ is the bottom element. Now, monotonicity requires that if \<^term>\undefined\ is returned by a recursive call, then the overall result must also be \<^term>\undefined\. In practice, this is only satisfied when each recursive call is a tail call, whose result is directly returned. Thus, this mode of operation allows the definition of arbitrary tail-recursive functions. Experienced users may define new modes by instantiating the locale \<^const>\partial_function_definitions\ appropriately. \<^descr> @{attribute (HOL) partial_function_mono} declares rules for use in the internal monotonicity proofs of partial function definitions. \ subsection \Old-style recursive function definitions (TFL)\ text \ \begin{matharray}{rcl} @{command_def (HOL) "recdef"} & : & \theory \ theory)\ \\ \end{matharray} The old TFL command @{command (HOL) "recdef"} for defining recursive is mostly obsolete; @{command (HOL) "function"} or @{command (HOL) "fun"} should be used instead. \<^rail>\ @@{command (HOL) recdef} ('(' @'permissive' ')')? \ @{syntax name} @{syntax term} (@{syntax prop} +) hints? ; hints: '(' @'hints' ( recdefmod * ) ')' ; recdefmod: (('recdef_simp' | 'recdef_cong' | 'recdef_wf') (() | 'add' | 'del') ':' @{syntax thms}) | @{syntax clasimpmod} \ \<^descr> @{command (HOL) "recdef"} defines general well-founded recursive functions (using the TFL package). The ``\(permissive)\'' option tells TFL to recover from failed proof attempts, returning unfinished results. The \recdef_simp\, \recdef_cong\, and \recdef_wf\ hints refer to auxiliary rules to be used in the internal automated proof process of TFL. Additional @{syntax clasimpmod} declarations may be given to tune the context of the Simplifier (cf.\ \secref{sec:simplifier}) and Classical reasoner (cf.\ \secref{sec:classical}). \<^medskip> Hints for @{command (HOL) "recdef"} may be also declared globally, using the following attributes. \begin{matharray}{rcl} @{attribute_def (HOL) recdef_simp} & : & \attribute\ \\ @{attribute_def (HOL) recdef_cong} & : & \attribute\ \\ @{attribute_def (HOL) recdef_wf} & : & \attribute\ \\ \end{matharray} \<^rail>\ (@@{attribute (HOL) recdef_simp} | @@{attribute (HOL) recdef_cong} | @@{attribute (HOL) recdef_wf}) (() | 'add' | 'del') \ \ section \Adhoc overloading of constants\ text \ \begin{tabular}{rcll} @{command_def "adhoc_overloading"} & : & \local_theory \ local_theory\ \\ @{command_def "no_adhoc_overloading"} & : & \local_theory \ local_theory\ \\ @{attribute_def "show_variants"} & : & \attribute\ & default \false\ \\ \end{tabular} \<^medskip> Adhoc overloading allows to overload a constant depending on its type. Typically this involves the introduction of an uninterpreted constant (used for input and output) and the addition of some variants (used internally). For examples see \<^file>\~~/src/HOL/Examples/Adhoc_Overloading_Examples.thy\ and \<^file>\~~/src/HOL/Library/Monad_Syntax.thy\. \<^rail>\ (@@{command adhoc_overloading} | @@{command no_adhoc_overloading}) (@{syntax name} (@{syntax term} + ) + @'and') \ \<^descr> @{command "adhoc_overloading"}~\c v\<^sub>1 ... v\<^sub>n\ associates variants with an existing constant. \<^descr> @{command "no_adhoc_overloading"} is similar to @{command "adhoc_overloading"}, but removes the specified variants from the present context. \<^descr> @{attribute "show_variants"} controls printing of variants of overloaded constants. If enabled, the internally used variants are printed instead of their respective overloaded constants. This is occasionally useful to check whether the system agrees with a user's expectations about derived variants. \ section \Definition by specification \label{sec:hol-specification}\ text \ \begin{matharray}{rcl} @{command_def (HOL) "specification"} & : & \theory \ proof(prove)\ \\ \end{matharray} \<^rail>\ @@{command (HOL) specification} '(' (decl +) ')' \ (@{syntax thmdecl}? @{syntax prop} +) ; decl: (@{syntax name} ':')? @{syntax term} ('(' @'overloaded' ')')? \ \<^descr> @{command (HOL) "specification"}~\decls \\ sets up a goal stating the existence of terms with the properties specified to hold for the constants given in \decls\. After finishing the proof, the theory will be augmented with definitions for the given constants, as well as with theorems stating the properties for these constants. \decl\ declares a constant to be defined by the specification given. The definition for the constant \c\ is bound to the name \c_def\ unless a theorem name is given in the declaration. Overloaded constants should be declared as such. \ section \Old-style datatypes \label{sec:hol-datatype}\ text \ \begin{matharray}{rcl} @{command_def (HOL) "old_rep_datatype"} & : & \theory \ proof(prove)\ \\ \end{matharray} \<^rail>\ @@{command (HOL) old_rep_datatype} ('(' (@{syntax name} +) ')')? (@{syntax term} +) ; spec: @{syntax typespec_sorts} @{syntax mixfix}? '=' (cons + '|') ; cons: @{syntax name} (@{syntax type} * ) @{syntax mixfix}? \ \<^descr> @{command (HOL) "old_rep_datatype"} represents existing types as old-style datatypes. These commands are mostly obsolete; @{command (HOL) "datatype"} should be used instead. See @{cite "isabelle-datatypes"} for more details on datatypes. Apart from proper proof methods for case analysis and induction, there are also emulations of ML tactics @{method (HOL) case_tac} and @{method (HOL) induct_tac} available, see \secref{sec:hol-induct-tac}; these admit to refer directly to the internal structure of subgoals (including internally bound parameters). \ subsubsection \Examples\ text \ We define a type of finite sequences, with slightly different names than the existing \<^typ>\'a list\ that is already in \<^theory>\Main\: \ (*<*)experiment begin(*>*) datatype 'a seq = Empty | Seq 'a "'a seq" text \We can now prove some simple lemma by structural induction:\ lemma "Seq x xs \ xs" proof (induct xs arbitrary: x) case Empty txt \This case can be proved using the simplifier: the freeness properties of the datatype are already declared as @{attribute simp} rules.\ show "Seq x Empty \ Empty" by simp next case (Seq y ys) txt \The step case is proved similarly.\ show "Seq x (Seq y ys) \ Seq y ys" using \Seq y ys \ ys\ by simp qed text \Here is a more succinct version of the same proof:\ lemma "Seq x xs \ xs" by (induct xs arbitrary: x) simp_all (*<*)end(*>*) section \Records \label{sec:hol-record}\ text \ In principle, records merely generalize the concept of tuples, where components may be addressed by labels instead of just position. The logical infrastructure of records in Isabelle/HOL is slightly more advanced, though, supporting truly extensible record schemes. This admits operations that are polymorphic with respect to record extension, yielding ``object-oriented'' effects like (single) inheritance. See also @{cite "NaraschewskiW-TPHOLs98"} for more details on object-oriented verification and record subtyping in HOL. \ subsection \Basic concepts\ text \ Isabelle/HOL supports both \<^emph>\fixed\ and \<^emph>\schematic\ records at the level of terms and types. The notation is as follows: \begin{center} \begin{tabular}{l|l|l} & record terms & record types \\ \hline fixed & \\x = a, y = b\\ & \\x :: A, y :: B\\ \\ schematic & \\x = a, y = b, \ = m\\ & \\x :: A, y :: B, \ :: M\\ \\ \end{tabular} \end{center} The ASCII representation of \\x = a\\ is \(| x = a |)\. A fixed record \\x = a, y = b\\ has field \x\ of value \a\ and field \y\ of value \b\. The corresponding type is \\x :: A, y :: B\\, assuming that \a :: A\ and \b :: B\. A record scheme like \\x = a, y = b, \ = m\\ contains fields \x\ and \y\ as before, but also possibly further fields as indicated by the ``\\\'' notation (which is actually part of the syntax). The improper field ``\\\'' of a record scheme is called the \<^emph>\more part\. Logically it is just a free variable, which is occasionally referred to as ``row variable'' in the literature. The more part of a record scheme may be instantiated by zero or more further components. For example, the previous scheme may get instantiated to \\x = a, y = b, z = c, \ = m'\\, where \m'\ refers to a different more part. Fixed records are special instances of record schemes, where ``\\\'' is properly terminated by the \() :: unit\ element. In fact, \\x = a, y = b\\ is just an abbreviation for \\x = a, y = b, \ = ()\\. \<^medskip> Two key observations make extensible records in a simply typed language like HOL work out: \<^enum> the more part is internalized, as a free term or type variable, \<^enum> field names are externalized, they cannot be accessed within the logic as first-class values. \<^medskip> In Isabelle/HOL record types have to be defined explicitly, fixing their field names and types, and their (optional) parent record. Afterwards, records may be formed using above syntax, while obeying the canonical order of fields as given by their declaration. The record package provides several standard operations like selectors and updates. The common setup for various generic proof tools enable succinct reasoning patterns. See also the Isabelle/HOL tutorial @{cite "isabelle-hol-book"} for further instructions on using records in practice. \ subsection \Record specifications\ text \ \begin{matharray}{rcl} @{command_def (HOL) "record"} & : & \theory \ theory\ \\ @{command_def (HOL) "print_record"} & : & \context \\ \\ \end{matharray} \<^rail>\ @@{command (HOL) record} @{syntax "overloaded"}? @{syntax typespec_sorts} '=' \ (@{syntax type} '+')? (constdecl +) ; constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}? ; @@{command (HOL) print_record} modes? @{syntax typespec_sorts} ; modes: '(' (@{syntax name} +) ')' \ \<^descr> @{command (HOL) "record"}~\(\\<^sub>1, \, \\<^sub>m) t = \ + c\<^sub>1 :: \\<^sub>1 \ c\<^sub>n :: \\<^sub>n\ defines extensible record type \(\\<^sub>1, \, \\<^sub>m) t\, derived from the optional parent record \\\ by adding new field components \c\<^sub>i :: \\<^sub>i\ etc. The type variables of \\\ and \\\<^sub>i\ need to be covered by the (distinct) parameters \\\<^sub>1, \, \\<^sub>m\. Type constructor \t\ has to be new, while \\\ needs to specify an instance of an existing record type. At least one new field \c\<^sub>i\ has to be specified. Basically, field names need to belong to a unique record. This is not a real restriction in practice, since fields are qualified by the record name internally. The parent record specification \\\ is optional; if omitted \t\ becomes a root record. The hierarchy of all records declared within a theory context forms a forest structure, i.e.\ a set of trees starting with a root record each. There is no way to merge multiple parent records! For convenience, \(\\<^sub>1, \, \\<^sub>m) t\ is made a type abbreviation for the fixed record type \\c\<^sub>1 :: \\<^sub>1, \, c\<^sub>n :: \\<^sub>n\\, likewise is \(\\<^sub>1, \, \\<^sub>m, \) t_scheme\ made an abbreviation for \\c\<^sub>1 :: \\<^sub>1, \, c\<^sub>n :: \\<^sub>n, \ :: \\\. \<^descr> @{command (HOL) "print_record"}~\(\\<^sub>1, \, \\<^sub>m) t\ prints the definition of record \(\\<^sub>1, \, \\<^sub>m) t\. Optionally \modes\ can be specified, which are appended to the current print mode; see \secref{sec:print-modes}. \ subsection \Record operations\ text \ Any record definition of the form presented above produces certain standard operations. Selectors and updates are provided for any field, including the improper one ``\more\''. There are also cumulative record constructor functions. To simplify the presentation below, we assume for now that \(\\<^sub>1, \, \\<^sub>m) t\ is a root record with fields \c\<^sub>1 :: \\<^sub>1, \, c\<^sub>n :: \\<^sub>n\. \<^medskip> \<^bold>\Selectors\ and \<^bold>\updates\ are available for any field (including ``\more\''): \begin{matharray}{lll} \c\<^sub>i\ & \::\ & \\\<^vec>c :: \<^vec>\, \ :: \\ \ \\<^sub>i\ \\ \c\<^sub>i_update\ & \::\ & \\\<^sub>i \ \\<^vec>c :: \<^vec>\, \ :: \\ \ \\<^vec>c :: \<^vec>\, \ :: \\\ \\ \end{matharray} There is special syntax for application of updates: \r\x := a\\ abbreviates term \x_update a r\. Further notation for repeated updates is also available: \r\x := a\\y := b\\z := c\\ may be written \r\x := a, y := b, z := c\\. Note that because of postfix notation the order of fields shown here is reverse than in the actual term. Since repeated updates are just function applications, fields may be freely permuted in \\x := a, y := b, z := c\\, as far as logical equality is concerned. Thus commutativity of independent updates can be proven within the logic for any two fields, but not as a general theorem. \<^medskip> The \<^bold>\make\ operation provides a cumulative record constructor function: \begin{matharray}{lll} \t.make\ & \::\ & \\\<^sub>1 \ \ \\<^sub>n \ \\<^vec>c :: \<^vec>\\\ \\ \end{matharray} \<^medskip> We now reconsider the case of non-root records, which are derived of some parent. In general, the latter may depend on another parent as well, resulting in a list of \<^emph>\ancestor records\. Appending the lists of fields of all ancestors results in a certain field prefix. The record package automatically takes care of this by lifting operations over this context of ancestor fields. Assuming that \(\\<^sub>1, \, \\<^sub>m) t\ has ancestor fields \b\<^sub>1 :: \\<^sub>1, \, b\<^sub>k :: \\<^sub>k\, the above record operations will get the following types: \<^medskip> \begin{tabular}{lll} \c\<^sub>i\ & \::\ & \\\<^vec>b :: \<^vec>\, \<^vec>c :: \<^vec>\, \ :: \\ \ \\<^sub>i\ \\ \c\<^sub>i_update\ & \::\ & \\\<^sub>i \ \\<^vec>b :: \<^vec>\, \<^vec>c :: \<^vec>\, \ :: \\ \ \\<^vec>b :: \<^vec>\, \<^vec>c :: \<^vec>\, \ :: \\\ \\ \t.make\ & \::\ & \\\<^sub>1 \ \ \\<^sub>k \ \\<^sub>1 \ \ \\<^sub>n \ \\<^vec>b :: \<^vec>\, \<^vec>c :: \<^vec>\\\ \\ \end{tabular} \<^medskip> Some further operations address the extension aspect of a derived record scheme specifically: \t.fields\ produces a record fragment consisting of exactly the new fields introduced here (the result may serve as a more part elsewhere); \t.extend\ takes a fixed record and adds a given more part; \t.truncate\ restricts a record scheme to a fixed record. \<^medskip> \begin{tabular}{lll} \t.fields\ & \::\ & \\\<^sub>1 \ \ \\<^sub>n \ \\<^vec>c :: \<^vec>\\\ \\ \t.extend\ & \::\ & \\\<^vec>b :: \<^vec>\, \<^vec>c :: \<^vec>\\ \ \ \ \\<^vec>b :: \<^vec>\, \<^vec>c :: \<^vec>\, \ :: \\\ \\ \t.truncate\ & \::\ & \\\<^vec>b :: \<^vec>\, \<^vec>c :: \<^vec>\, \ :: \\ \ \\<^vec>b :: \<^vec>\, \<^vec>c :: \<^vec>\\\ \\ \end{tabular} \<^medskip> Note that \t.make\ and \t.fields\ coincide for root records. \ subsection \Derived rules and proof tools\ text \ The record package proves several results internally, declaring these facts to appropriate proof tools. This enables users to reason about record structures quite conveniently. Assume that \t\ is a record type as specified above. \<^enum> Standard conversions for selectors or updates applied to record constructor terms are made part of the default Simplifier context; thus proofs by reduction of basic operations merely require the @{method simp} method without further arguments. These rules are available as \t.simps\, too. \<^enum> Selectors applied to updated records are automatically reduced by an internal simplification procedure, which is also part of the standard Simplifier setup. \<^enum> Inject equations of a form analogous to \<^prop>\(x, y) = (x', y') \ x = x' \ y = y'\ are declared to the Simplifier and Classical Reasoner as @{attribute iff} rules. These rules are available as \t.iffs\. \<^enum> The introduction rule for record equality analogous to \x r = x r' \ y r = y r' \ \ r = r'\ is declared to the Simplifier, and as the basic rule context as ``@{attribute intro}\?\''. The rule is called \t.equality\. \<^enum> Representations of arbitrary record expressions as canonical constructor terms are provided both in @{method cases} and @{method induct} format (cf.\ the generic proof methods of the same name, \secref{sec:cases-induct}). Several variations are available, for fixed records, record schemes, more parts etc. The generic proof methods are sufficiently smart to pick the most sensible rule according to the type of the indicated record expression: users just need to apply something like ``\(cases r)\'' to a certain proof problem. \<^enum> The derived record operations \t.make\, \t.fields\, \t.extend\, \t.truncate\ are \<^emph>\not\ treated automatically, but usually need to be expanded by hand, using the collective fact \t.defs\. \ subsubsection \Examples\ text \See \<^file>\~~/src/HOL/Examples/Records.thy\, for example.\ section \Semantic subtype definitions \label{sec:hol-typedef}\ text \ \begin{matharray}{rcl} @{command_def (HOL) "typedef"} & : & \local_theory \ proof(prove)\ \\ \end{matharray} A type definition identifies a new type with a non-empty subset of an existing type. More precisely, the new type is defined by exhibiting an existing type \\\, a set \A :: \ set\, and proving \<^prop>\\x. x \ A\. Thus \A\ is a non-empty subset of \\\, and the new type denotes this subset. New functions are postulated that establish an isomorphism between the new type and the subset. In general, the type \\\ may involve type variables \\\<^sub>1, \, \\<^sub>n\ which means that the type definition produces a type constructor \(\\<^sub>1, \, \\<^sub>n) t\ depending on those type arguments. \<^rail>\ @@{command (HOL) typedef} @{syntax "overloaded"}? abs_type '=' rep_set ; @{syntax_def "overloaded"}: ('(' @'overloaded' ')') ; abs_type: @{syntax typespec_sorts} @{syntax mixfix}? ; rep_set: @{syntax term} (@'morphisms' @{syntax name} @{syntax name})? \ To understand the concept of type definition better, we need to recount its somewhat complex history. The HOL logic goes back to the ``Simple Theory of Types'' (STT) of A. Church @{cite "church40"}, which is further explained in the book by P. Andrews @{cite "andrews86"}. The overview article by W. Farmer @{cite "Farmer:2008"} points out the ``seven virtues'' of this relatively simple family of logics. STT has only ground types, without polymorphism and without type definitions. \<^medskip> M. Gordon @{cite "Gordon:1985:HOL"} augmented Church's STT by adding schematic polymorphism (type variables and type constructors) and a facility to introduce new types as semantic subtypes from existing types. This genuine extension of the logic was explained semantically by A. Pitts in the book of the original Cambridge HOL88 system @{cite "pitts93"}. Type definitions work in this setting, because the general model-theory of STT is restricted to models that ensure that the universe of type interpretations is closed by forming subsets (via predicates taken from the logic). \<^medskip> Isabelle/HOL goes beyond Gordon-style HOL by admitting overloaded constant definitions @{cite "Wenzel:1997:TPHOL" and "Haftmann-Wenzel:2006:classes"}, which are actually a concept of Isabelle/Pure and do not depend on particular set-theoretic semantics of HOL. Over many years, there was no formal checking of semantic type definitions in Isabelle/HOL versus syntactic constant definitions in Isabelle/Pure. So the @{command typedef} command was described as ``axiomatic'' in the sense of \secref{sec:axiomatizations}, only with some local checks of the given type and its representing set. Recent clarification of overloading in the HOL logic proper @{cite "Kuncar-Popescu:2015"} demonstrates how the dissimilar concepts of constant definitions versus type definitions may be understood uniformly. This requires an interpretation of Isabelle/HOL that substantially reforms the set-theoretic model of A. Pitts @{cite "pitts93"}, by taking a schematic view on polymorphism and interpreting only ground types in the set-theoretic sense of HOL88. Moreover, type-constructors may be explicitly overloaded, e.g.\ by making the subset depend on type-class parameters (cf.\ \secref{sec:class}). This is semantically like a dependent type: the meaning relies on the operations provided by different type-class instances. \<^descr> @{command (HOL) "typedef"}~\(\\<^sub>1, \, \\<^sub>n) t = A\ defines a new type \(\\<^sub>1, \, \\<^sub>n) t\ from the set \A\ over an existing type. The set \A\ may contain type variables \\\<^sub>1, \, \\<^sub>n\ as specified on the LHS, but no term variables. Non-emptiness of \A\ needs to be proven on the spot, in order to turn the internal conditional characterization into usable theorems. The ``\(overloaded)\'' option allows the @{command "typedef"} specification to depend on constants that are not (yet) specified and thus left open as parameters, e.g.\ type-class parameters. Within a local theory specification, the newly introduced type constructor cannot depend on parameters or assumptions of the context: this is syntactically impossible in HOL. The non-emptiness proof may formally depend on local assumptions, but this has little practical relevance. For @{command (HOL) "typedef"}~\t = A\ the newly introduced type \t\ is accompanied by a pair of morphisms to relate it to the representing set over the old type. By default, the injection from type to set is called \Rep_t\ and its inverse \Abs_t\: An explicit @{keyword (HOL) "morphisms"} specification allows to provide alternative names. The logical characterization of @{command typedef} uses the predicate of locale \<^const>\type_definition\ that is defined in Isabelle/HOL. Various basic consequences of that are instantiated accordingly, re-using the locale facts with names derived from the new type constructor. Thus the generic theorem @{thm type_definition.Rep} is turned into the specific \Rep_t\, for example. Theorems @{thm type_definition.Rep}, @{thm type_definition.Rep_inverse}, and @{thm type_definition.Abs_inverse} provide the most basic characterization as a corresponding injection/surjection pair (in both directions). The derived rules @{thm type_definition.Rep_inject} and @{thm type_definition.Abs_inject} provide a more convenient version of injectivity, suitable for automated proof tools (e.g.\ in declarations involving @{attribute simp} or @{attribute iff}). Furthermore, the rules @{thm type_definition.Rep_cases}~/ @{thm type_definition.Rep_induct}, and @{thm type_definition.Abs_cases}~/ @{thm type_definition.Abs_induct} provide alternative views on surjectivity. These rules are already declared as set or type rules for the generic @{method cases} and @{method induct} methods, respectively. \ subsubsection \Examples\ text \ The following trivial example pulls a three-element type into existence within the formal logical environment of Isabelle/HOL.\ (*<*)experiment begin(*>*) typedef three = "{(True, True), (True, False), (False, True)}" by blast definition "One = Abs_three (True, True)" definition "Two = Abs_three (True, False)" definition "Three = Abs_three (False, True)" lemma three_distinct: "One \ Two" "One \ Three" "Two \ Three" by (simp_all add: One_def Two_def Three_def Abs_three_inject) lemma three_cases: fixes x :: three obtains "x = One" | "x = Two" | "x = Three" by (cases x) (auto simp: One_def Two_def Three_def Abs_three_inject) (*<*)end(*>*) text \Note that such trivial constructions are better done with derived specification mechanisms such as @{command datatype}:\ (*<*)experiment begin(*>*) datatype three = One | Two | Three (*<*)end(*>*) text \This avoids re-doing basic definitions and proofs from the primitive @{command typedef} above.\ section \Functorial structure of types\ text \ \begin{matharray}{rcl} @{command_def (HOL) "functor"} & : & \local_theory \ proof(prove)\ \end{matharray} \<^rail>\ @@{command (HOL) functor} (@{syntax name} ':')? @{syntax term} \ \<^descr> @{command (HOL) "functor"}~\prefix: m\ allows to prove and register properties about the functorial structure of type constructors. These properties then can be used by other packages to deal with those type constructors in certain type constructions. Characteristic theorems are noted in the current local theory. By default, they are prefixed with the base name of the type constructor, an explicit prefix can be given alternatively. The given term \m\ is considered as \<^emph>\mapper\ for the corresponding type constructor and must conform to the following type pattern: \begin{matharray}{lll} \m\ & \::\ & \\\<^sub>1 \ \ \\<^sub>k \ (\<^vec>\\<^sub>n) t \ (\<^vec>\\<^sub>n) t\ \\ \end{matharray} where \t\ is the type constructor, \\<^vec>\\<^sub>n\ and \\<^vec>\\<^sub>n\ are distinct type variables free in the local theory and \\\<^sub>1\, \ldots, \\\<^sub>k\ is a subsequence of \\\<^sub>1 \ \\<^sub>1\, \\\<^sub>1 \ \\<^sub>1\, \ldots, \\\<^sub>n \ \\<^sub>n\, \\\<^sub>n \ \\<^sub>n\. \ section \Quotient types with lifting and transfer\ text \ The quotient package defines a new quotient type given a raw type and a partial equivalence relation (\secref{sec:quotient-type}). The package also historically includes automation for transporting definitions and theorems (\secref{sec:old-quotient}), but most of this automation was superseded by the Lifting (\secref{sec:lifting}) and Transfer (\secref{sec:transfer}) packages. \ subsection \Quotient type definition \label{sec:quotient-type}\ text \ \begin{matharray}{rcl} @{command_def (HOL) "quotient_type"} & : & \local_theory \ proof(prove)\\\ \end{matharray} \<^rail>\ @@{command (HOL) quotient_type} @{syntax "overloaded"}? \ @{syntax typespec} @{syntax mixfix}? '=' quot_type \ quot_morphisms? quot_parametric? ; quot_type: @{syntax type} '/' ('partial' ':')? @{syntax term} ; quot_morphisms: @'morphisms' @{syntax name} @{syntax name} ; quot_parametric: @'parametric' @{syntax thm} \ \<^descr> @{command (HOL) "quotient_type"} defines a new quotient type \\\. The injection from a quotient type to a raw type is called \rep_\\, its inverse \abs_\\ unless explicit @{keyword (HOL) "morphisms"} specification provides alternative names. @{command (HOL) "quotient_type"} requires the user to prove that the relation is an equivalence relation (predicate \equivp\), unless the user specifies explicitly \partial\ in which case the obligation is \part_equivp\. A quotient defined with \partial\ is weaker in the sense that less things can be proved automatically. The command internally proves a Quotient theorem and sets up the Lifting package by the command @{command (HOL) setup_lifting}. Thus the Lifting and Transfer packages can be used also with quotient types defined by @{command (HOL) "quotient_type"} without any extra set-up. The parametricity theorem for the equivalence relation R can be provided as an extra argument of the command and is passed to the corresponding internal call of @{command (HOL) setup_lifting}. This theorem allows the Lifting package to generate a stronger transfer rule for equality. \ subsection \Lifting package \label{sec:lifting}\ text \ The Lifting package allows users to lift terms of the raw type to the abstract type, which is a necessary step in building a library for an abstract type. Lifting defines a new constant by combining coercion functions (\<^term>\Abs\ and \<^term>\Rep\) with the raw term. It also proves an appropriate transfer rule for the Transfer (\secref{sec:transfer}) package and, if possible, an equation for the code generator. The Lifting package provides two main commands: @{command (HOL) "setup_lifting"} for initializing the package to work with a new type, and @{command (HOL) "lift_definition"} for lifting constants. The Lifting package works with all four kinds of type abstraction: type copies, subtypes, total quotients and partial quotients. Theoretical background can be found in @{cite "Huffman-Kuncar:2013:lifting_transfer"}. \begin{matharray}{rcl} @{command_def (HOL) "setup_lifting"} & : & \local_theory \ local_theory\\\ @{command_def (HOL) "lift_definition"} & : & \local_theory \ proof(prove)\\\ @{command_def (HOL) "lifting_forget"} & : & \local_theory \ local_theory\\\ @{command_def (HOL) "lifting_update"} & : & \local_theory \ local_theory\\\ @{command_def (HOL) "print_quot_maps"} & : & \context \\\\ @{command_def (HOL) "print_quotients"} & : & \context \\\\ @{attribute_def (HOL) "quot_map"} & : & \attribute\ \\ @{attribute_def (HOL) "relator_eq_onp"} & : & \attribute\ \\ @{attribute_def (HOL) "relator_mono"} & : & \attribute\ \\ @{attribute_def (HOL) "relator_distr"} & : & \attribute\ \\ @{attribute_def (HOL) "quot_del"} & : & \attribute\ \\ @{attribute_def (HOL) "lifting_restore"} & : & \attribute\ \\ \end{matharray} \<^rail>\ @@{command (HOL) setup_lifting} @{syntax thm} @{syntax thm}? \ (@'parametric' @{syntax thm})? ; @@{command (HOL) lift_definition} ('(' 'code_dt' ')')? \ @{syntax name} '::' @{syntax type} @{syntax mixfix}? 'is' @{syntax term} \ (@'parametric' (@{syntax thm}+))? ; @@{command (HOL) lifting_forget} @{syntax name} ; @@{command (HOL) lifting_update} @{syntax name} ; @@{attribute (HOL) lifting_restore} @{syntax thm} (@{syntax thm} @{syntax thm})? \ \<^descr> @{command (HOL) "setup_lifting"} Sets up the Lifting package to work with a user-defined type. The command supports two modes. \<^enum> The first one is a low-level mode when the user must provide as a first argument of @{command (HOL) "setup_lifting"} a quotient theorem \<^term>\Quotient R Abs Rep T\. The package configures a transfer rule for equality, a domain transfer rules and sets up the @{command_def (HOL) "lift_definition"} command to work with the abstract type. An optional theorem \<^term>\reflp R\, which certifies that the equivalence relation R is total, can be provided as a second argument. This allows the package to generate stronger transfer rules. And finally, the parametricity theorem for \<^term>\R\ can be provided as a third argument. This allows the package to generate a stronger transfer rule for equality. Users generally will not prove the \Quotient\ theorem manually for new types, as special commands exist to automate the process. \<^enum> When a new subtype is defined by @{command (HOL) typedef}, @{command (HOL) "lift_definition"} can be used in its second mode, where only the \<^term>\type_definition\ theorem \<^term>\type_definition Rep Abs A\ is used as an argument of the command. The command internally proves the corresponding \<^term>\Quotient\ theorem and registers it with @{command (HOL) setup_lifting} using its first mode. For quotients, the command @{command (HOL) quotient_type} can be used. The command defines a new quotient type and similarly to the previous case, the corresponding Quotient theorem is proved and registered by @{command (HOL) setup_lifting}. \<^medskip> The command @{command (HOL) "setup_lifting"} also sets up the code generator for the new type. Later on, when a new constant is defined by @{command (HOL) "lift_definition"}, the Lifting package proves and registers a code equation (if there is one) for the new constant. \<^descr> @{command (HOL) "lift_definition"} \f :: \\ @{keyword (HOL) "is"} \t\ Defines a new function \f\ with an abstract type \\\ in terms of a corresponding operation \t\ on a representation type. More formally, if \t :: \\, then the command builds a term \F\ as a corresponding combination of abstraction and representation functions such that \F :: \ \ \\ and defines \f \ F t\. The term \t\ does not have to be necessarily a constant but it can be any term. The command opens a proof and the user must discharge a respectfulness proof obligation. For a type copy, i.e.\ a typedef with \UNIV\, the obligation is discharged automatically. The proof goal is presented in a user-friendly, readable form. A respectfulness theorem in the standard format \f.rsp\ and a transfer rule \f.transfer\ for the Transfer package are generated by the package. The user can specify a parametricity theorems for \t\ after the keyword @{keyword "parametric"}, which allows the command to generate parametric transfer rules for \f\. For each constant defined through trivial quotients (type copies or subtypes) \f.rep_eq\ is generated. The equation is a code certificate that defines \f\ using the representation function. For each constant \f.abs_eq\ is generated. The equation is unconditional for total quotients. The equation defines \f\ using the abstraction function. \<^medskip> Integration with [@{attribute code} abstract]: For subtypes (e.g.\ corresponding to a datatype invariant, such as \<^typ>\'a dlist\), @{command (HOL) "lift_definition"} uses a code certificate theorem \f.rep_eq\ as a code equation. Because of the limitation of the code generator, \f.rep_eq\ cannot be used as a code equation if the subtype occurs inside the result type rather than at the top level (e.g.\ function returning \<^typ>\'a dlist option\ vs. \<^typ>\'a dlist\). In this case, an extension of @{command (HOL) "lift_definition"} can be invoked by specifying the flag \code_dt\. This extension enables code execution through series of internal type and lifting definitions if the return type \\\ meets the following inductive conditions: \<^descr> \\\ is a type variable \<^descr> \\ = \\<^sub>1 \ \\<^sub>n \\, where \\\ is an abstract type constructor and \\\<^sub>1 \ \\<^sub>n\ do not contain abstract types (i.e.\ \<^typ>\int dlist\ is allowed whereas \<^typ>\int dlist dlist\ not) \<^descr> \\ = \\<^sub>1 \ \\<^sub>n \\, \\\ is a type constructor that was defined as a (co)datatype whose constructor argument types do not contain either non-free datatypes or the function type. Integration with [@{attribute code} equation]: For total quotients, @{command (HOL) "lift_definition"} uses \f.abs_eq\ as a code equation. \<^descr> @{command (HOL) lifting_forget} and @{command (HOL) lifting_update} These two commands serve for storing and deleting the set-up of the Lifting package and corresponding transfer rules defined by this package. This is useful for hiding of type construction details of an abstract type when the construction is finished but it still allows additions to this construction when this is later necessary. Whenever the Lifting package is set up with a new abstract type \\\ by @{command_def (HOL) "lift_definition"}, the package defines a new bundle that is called \\.lifting\. This bundle already includes set-up for the Lifting package. The new transfer rules introduced by @{command (HOL) "lift_definition"} can be stored in the bundle by the command @{command (HOL) "lifting_update"} \\.lifting\. The command @{command (HOL) "lifting_forget"} \\.lifting\ deletes set-up of the Lifting package for \\\ and deletes all the transfer rules that were introduced by @{command (HOL) "lift_definition"} using \\\ as an abstract type. The stored set-up in a bundle can be reintroduced by the Isar commands for including a bundle (@{command "include"}, @{keyword "includes"} and @{command "including"}). \<^descr> @{command (HOL) "print_quot_maps"} prints stored quotient map theorems. \<^descr> @{command (HOL) "print_quotients"} prints stored quotient theorems. \<^descr> @{attribute (HOL) quot_map} registers a quotient map theorem, a theorem showing how to ``lift'' quotients over type constructors. E.g.\ \<^term>\Quotient R Abs Rep T \ Quotient (rel_set R) (image Abs) (image Rep) (rel_set T)\. For examples see \<^file>\~~/src/HOL/Lifting_Set.thy\ or \<^file>\~~/src/HOL/Lifting.thy\. This property is proved automatically if the involved type is BNF without dead variables. \<^descr> @{attribute (HOL) relator_eq_onp} registers a theorem that shows that a relator applied to an equality restricted by a predicate \<^term>\P\ (i.e.\ \<^term>\eq_onp P\) is equal to a predicator applied to the \<^term>\P\. The combinator \<^const>\eq_onp\ is used for internal encoding of proper subtypes. Such theorems allows the package to hide \eq_onp\ from a user in a user-readable form of a respectfulness theorem. For examples see \<^file>\~~/src/HOL/Lifting_Set.thy\ or \<^file>\~~/src/HOL/Lifting.thy\. This property is proved automatically if the involved type is BNF without dead variables. \<^descr> @{attribute (HOL) "relator_mono"} registers a property describing a monotonicity of a relator. E.g.\ \<^prop>\A \ B \ rel_set A \ rel_set B\. This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"} when a parametricity theorem for the raw term is specified and also for the reflexivity prover. For examples see \<^file>\~~/src/HOL/Lifting_Set.thy\ or \<^file>\~~/src/HOL/Lifting.thy\. This property is proved automatically if the involved type is BNF without dead variables. \<^descr> @{attribute (HOL) "relator_distr"} registers a property describing a distributivity of the relation composition and a relator. E.g.\ \rel_set R \\ rel_set S = rel_set (R \\ S)\. This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"} when a parametricity theorem for the raw term is specified. When this equality does not hold unconditionally (e.g.\ for the function type), the user can specified each direction separately and also register multiple theorems with different set of assumptions. This attribute can be used only after the monotonicity property was already registered by @{attribute (HOL) "relator_mono"}. For examples see \<^file>\~~/src/HOL/Lifting_Set.thy\ or \<^file>\~~/src/HOL/Lifting.thy\. This property is proved automatically if the involved type is BNF without dead variables. \<^descr> @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem from the Lifting infrastructure and thus de-register the corresponding quotient. This effectively causes that @{command (HOL) lift_definition} will not do any lifting for the corresponding type. This attribute is rather used for low-level manipulation with set-up of the Lifting package because @{command (HOL) lifting_forget} is preferred for normal usage. \<^descr> @{attribute (HOL) lifting_restore} \Quotient_thm pcr_def pcr_cr_eq_thm\ registers the Quotient theorem \Quotient_thm\ in the Lifting infrastructure and thus sets up lifting for an abstract type \\\ (that is defined by \Quotient_thm\). Optional theorems \pcr_def\ and \pcr_cr_eq_thm\ can be specified to register the parametrized correspondence relation for \\\. E.g.\ for \<^typ>\'a dlist\, \pcr_def\ is \pcr_dlist A \ list_all2 A \\ cr_dlist\ and \pcr_cr_eq_thm\ is \pcr_dlist (=) = (=)\. This attribute is rather used for low-level manipulation with set-up of the Lifting package because using of the bundle \\.lifting\ together with the commands @{command (HOL) lifting_forget} and @{command (HOL) lifting_update} is preferred for normal usage. \<^descr> Integration with the BNF package @{cite "isabelle-datatypes"}: As already mentioned, the theorems that are registered by the following attributes are proved and registered automatically if the involved type is BNF without dead variables: @{attribute (HOL) quot_map}, @{attribute (HOL) relator_eq_onp}, @{attribute (HOL) "relator_mono"}, @{attribute (HOL) "relator_distr"}. Also the definition of a relator and predicator is provided automatically. Moreover, if the BNF represents a datatype, simplification rules for a predicator are again proved automatically. \ subsection \Transfer package \label{sec:transfer}\ text \ \begin{matharray}{rcl} @{method_def (HOL) "transfer"} & : & \method\ \\ @{method_def (HOL) "transfer'"} & : & \method\ \\ @{method_def (HOL) "transfer_prover"} & : & \method\ \\ @{attribute_def (HOL) "Transfer.transferred"} & : & \attribute\ \\ @{attribute_def (HOL) "untransferred"} & : & \attribute\ \\ @{method_def (HOL) "transfer_start"} & : & \method\ \\ @{method_def (HOL) "transfer_prover_start"} & : & \method\ \\ @{method_def (HOL) "transfer_step"} & : & \method\ \\ @{method_def (HOL) "transfer_end"} & : & \method\ \\ @{method_def (HOL) "transfer_prover_end"} & : & \method\ \\ @{attribute_def (HOL) "transfer_rule"} & : & \attribute\ \\ @{attribute_def (HOL) "transfer_domain_rule"} & : & \attribute\ \\ @{attribute_def (HOL) "relator_eq"} & : & \attribute\ \\ @{attribute_def (HOL) "relator_domain"} & : & \attribute\ \\ \end{matharray} \<^descr> @{method (HOL) "transfer"} method replaces the current subgoal with a logically equivalent one that uses different types and constants. The replacement of types and constants is guided by the database of transfer rules. Goals are generalized over all free variables by default; this is necessary for variables whose types change, but can be overridden for specific variables with e.g. \transfer fixing: x y z\. \<^descr> @{method (HOL) "transfer'"} is a variant of @{method (HOL) transfer} that allows replacing a subgoal with one that is logically stronger (rather than equivalent). For example, a subgoal involving equality on a quotient type could be replaced with a subgoal involving equality (instead of the corresponding equivalence relation) on the underlying raw type. \<^descr> @{method (HOL) "transfer_prover"} method assists with proving a transfer rule for a new constant, provided the constant is defined in terms of other constants that already have transfer rules. It should be applied after unfolding the constant definitions. \<^descr> @{method (HOL) "transfer_start"}, @{method (HOL) "transfer_step"}, @{method (HOL) "transfer_end"}, @{method (HOL) "transfer_prover_start"} and @{method (HOL) "transfer_prover_end"} methods are meant to be used for debugging of @{method (HOL) "transfer"} and @{method (HOL) "transfer_prover"}, which we can decompose as follows: @{method (HOL) "transfer"} = (@{method (HOL) "transfer_start"}, @{method (HOL) "transfer_step"}+, @{method (HOL) "transfer_end"}) and @{method (HOL) "transfer_prover"} = (@{method (HOL) "transfer_prover_start"}, @{method (HOL) "transfer_step"}+, @{method (HOL) "transfer_prover_end"}). For usage examples see \<^file>\~~/src/HOL/ex/Transfer_Debug.thy\. \<^descr> @{attribute (HOL) "untransferred"} proves the same equivalent theorem as @{method (HOL) "transfer"} internally does. \<^descr> @{attribute (HOL) Transfer.transferred} works in the opposite direction than @{method (HOL) "transfer'"}. E.g.\ given the transfer relation \ZN x n \ (x = int n)\, corresponding transfer rules and the theorem \\x::int \ {0..}. x < x + 1\, the attribute would prove \\n::nat. n < n + 1\. The attribute is still in experimental phase of development. \<^descr> @{attribute (HOL) "transfer_rule"} attribute maintains a collection of transfer rules, which relate constants at two different types. Typical transfer rules may relate different type instances of the same polymorphic constant, or they may relate an operation on a raw type to a corresponding operation on an abstract type (quotient or subtype). For example: \((A ===> B) ===> list_all2 A ===> list_all2 B) map map\ \\ \(cr_int ===> cr_int ===> cr_int) (\(x,y) (u,v). (x+u, y+v)) plus\ Lemmas involving predicates on relations can also be registered using the same attribute. For example: \bi_unique A \ (list_all2 A ===> (=)) distinct distinct\ \\ \\bi_unique A; bi_unique B\ \ bi_unique (rel_prod A B)\ Preservation of predicates on relations (\bi_unique, bi_total, right_unique, right_total, left_unique, left_total\) with the respect to a relator is proved automatically if the involved type is BNF @{cite "isabelle-datatypes"} without dead variables. \<^descr> @{attribute (HOL) "transfer_domain_rule"} attribute maintains a collection of rules, which specify a domain of a transfer relation by a predicate. E.g.\ given the transfer relation \ZN x n \ (x = int n)\, one can register the following transfer domain rule: \Domainp ZN = (\x. x \ 0)\. The rules allow the package to produce more readable transferred goals, e.g.\ when quantifiers are transferred. \<^descr> @{attribute (HOL) relator_eq} attribute collects identity laws for relators of various type constructors, e.g. \<^term>\rel_set (=) = (=)\. The @{method (HOL) transfer} method uses these lemmas to infer transfer rules for non-polymorphic constants on the fly. For examples see \<^file>\~~/src/HOL/Lifting_Set.thy\ or \<^file>\~~/src/HOL/Lifting.thy\. This property is proved automatically if the involved type is BNF without dead variables. \<^descr> @{attribute_def (HOL) "relator_domain"} attribute collects rules describing domains of relators by predicators. E.g.\ \<^term>\Domainp (rel_set T) = (\A. Ball A (Domainp T))\. This allows the package to lift transfer domain rules through type constructors. For examples see \<^file>\~~/src/HOL/Lifting_Set.thy\ or \<^file>\~~/src/HOL/Lifting.thy\. This property is proved automatically if the involved type is BNF without dead variables. Theoretical background can be found in @{cite "Huffman-Kuncar:2013:lifting_transfer"}. \ subsection \Old-style definitions for quotient types \label{sec:old-quotient}\ text \ \begin{matharray}{rcl} @{command_def (HOL) "quotient_definition"} & : & \local_theory \ proof(prove)\\\ @{command_def (HOL) "print_quotmapsQ3"} & : & \context \\\\ @{command_def (HOL) "print_quotientsQ3"} & : & \context \\\\ @{command_def (HOL) "print_quotconsts"} & : & \context \\\\ @{method_def (HOL) "lifting"} & : & \method\ \\ @{method_def (HOL) "lifting_setup"} & : & \method\ \\ @{method_def (HOL) "descending"} & : & \method\ \\ @{method_def (HOL) "descending_setup"} & : & \method\ \\ @{method_def (HOL) "partiality_descending"} & : & \method\ \\ @{method_def (HOL) "partiality_descending_setup"} & : & \method\ \\ @{method_def (HOL) "regularize"} & : & \method\ \\ @{method_def (HOL) "injection"} & : & \method\ \\ @{method_def (HOL) "cleaning"} & : & \method\ \\ @{attribute_def (HOL) "quot_thm"} & : & \attribute\ \\ @{attribute_def (HOL) "quot_lifted"} & : & \attribute\ \\ @{attribute_def (HOL) "quot_respect"} & : & \attribute\ \\ @{attribute_def (HOL) "quot_preserve"} & : & \attribute\ \\ \end{matharray} \<^rail>\ @@{command (HOL) quotient_definition} constdecl? @{syntax thmdecl}? \ @{syntax term} 'is' @{syntax term} ; constdecl: @{syntax name} ('::' @{syntax type})? @{syntax mixfix}? ; @@{method (HOL) lifting} @{syntax thms}? ; @@{method (HOL) lifting_setup} @{syntax thms}? \ \<^descr> @{command (HOL) "quotient_definition"} defines a constant on the quotient type. \<^descr> @{command (HOL) "print_quotmapsQ3"} prints quotient map functions. \<^descr> @{command (HOL) "print_quotientsQ3"} prints quotients. \<^descr> @{command (HOL) "print_quotconsts"} prints quotient constants. \<^descr> @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"} methods match the current goal with the given raw theorem to be lifted producing three new subgoals: regularization, injection and cleaning subgoals. @{method (HOL) "lifting"} tries to apply the heuristics for automatically solving these three subgoals and leaves only the subgoals unsolved by the heuristics to the user as opposed to @{method (HOL) "lifting_setup"} which leaves the three subgoals unsolved. \<^descr> @{method (HOL) "descending"} and @{method (HOL) "descending_setup"} try to guess a raw statement that would lift to the current subgoal. Such statement is assumed as a new subgoal and @{method (HOL) "descending"} continues in the same way as @{method (HOL) "lifting"} does. @{method (HOL) "descending"} tries to solve the arising regularization, injection and cleaning subgoals with the analogous method @{method (HOL) "descending_setup"} which leaves the four unsolved subgoals. \<^descr> @{method (HOL) "partiality_descending"} finds the regularized theorem that would lift to the current subgoal, lifts it and leaves as a subgoal. This method can be used with partial equivalence quotients where the non regularized statements would not be true. @{method (HOL) "partiality_descending_setup"} leaves the injection and cleaning subgoals unchanged. \<^descr> @{method (HOL) "regularize"} applies the regularization heuristics to the current subgoal. \<^descr> @{method (HOL) "injection"} applies the injection heuristics to the current goal using the stored quotient respectfulness theorems. \<^descr> @{method (HOL) "cleaning"} applies the injection cleaning heuristics to the current subgoal using the stored quotient preservation theorems. \<^descr> @{attribute (HOL) quot_lifted} attribute tries to automatically transport the theorem to the quotient type. The attribute uses all the defined quotients types and quotient constants often producing undesired results or theorems that cannot be lifted. \<^descr> @{attribute (HOL) quot_respect} and @{attribute (HOL) quot_preserve} attributes declare a theorem as a respectfulness and preservation theorem respectively. These are stored in the local theory store and used by the @{method (HOL) "injection"} and @{method (HOL) "cleaning"} methods respectively. \<^descr> @{attribute (HOL) quot_thm} declares that a certain theorem is a quotient extension theorem. Quotient extension theorems allow for quotienting inside container types. Given a polymorphic type that serves as a container, a map function defined for this container using @{command (HOL) "functor"} and a relation map defined for for the container type, the quotient extension theorem should be \<^term>\Quotient3 R Abs Rep \ Quotient3 (rel_map R) (map Abs) (map Rep)\. Quotient extension theorems are stored in a database and are used all the steps of lifting theorems. \ chapter \Proof tools\ section \Proving propositions\ text \ In addition to the standard proof methods, a number of diagnosis tools search for proofs and provide an Isar proof snippet on success. These tools are available via the following commands. \begin{matharray}{rcl} @{command_def (HOL) "solve_direct"}\\<^sup>*\ & : & \proof \\ \\ @{command_def (HOL) "try"}\\<^sup>*\ & : & \proof \\ \\ @{command_def (HOL) "try0"}\\<^sup>*\ & : & \proof \\ \\ @{command_def (HOL) "sledgehammer"}\\<^sup>*\ & : & \proof \\ \\ @{command_def (HOL) "sledgehammer_params"} & : & \theory \ theory\ \end{matharray} \<^rail>\ @@{command (HOL) try} ; @@{command (HOL) try0} ( ( ( 'simp' | 'intro' | 'elim' | 'dest' ) ':' @{syntax thms} ) + ) ? @{syntax nat}? ; @@{command (HOL) sledgehammer} ( '[' args ']' )? facts? @{syntax nat}? ; @@{command (HOL) sledgehammer_params} ( ( '[' args ']' ) ? ) ; args: ( @{syntax name} '=' value + ',' ) ; facts: '(' ( ( ( ( 'add' | 'del' ) ':' ) ? @{syntax thms} ) + ) ? ')' \ % FIXME check args "value" \<^descr> @{command (HOL) "solve_direct"} checks whether the current subgoals can be solved directly by an existing theorem. Duplicate lemmas can be detected in this way. \<^descr> @{command (HOL) "try0"} attempts to prove a subgoal using a combination of standard proof methods (@{method auto}, @{method simp}, @{method blast}, etc.). Additional facts supplied via \simp:\, \intro:\, \elim:\, and \dest:\ are passed to the appropriate proof methods. \<^descr> @{command (HOL) "try"} attempts to prove or disprove a subgoal using a combination of provers and disprovers (@{command (HOL) "solve_direct"}, @{command (HOL) "quickcheck"}, @{command (HOL) "try0"}, @{command (HOL) "sledgehammer"}, @{command (HOL) "nitpick"}). \<^descr> @{command (HOL) "sledgehammer"} attempts to prove a subgoal using external automatic provers (resolution provers and SMT solvers). See the Sledgehammer manual @{cite "isabelle-sledgehammer"} for details. \<^descr> @{command (HOL) "sledgehammer_params"} changes @{command (HOL) "sledgehammer"} configuration options persistently. \ section \Checking and refuting propositions\ text \ Identifying incorrect propositions usually involves evaluation of particular assignments and systematic counterexample search. This is supported by the following commands. \begin{matharray}{rcl} @{command_def (HOL) "value"}\\<^sup>*\ & : & \context \\ \\ @{command_def (HOL) "values"}\\<^sup>*\ & : & \context \\ \\ @{command_def (HOL) "quickcheck"}\\<^sup>*\ & : & \proof \\ \\ @{command_def (HOL) "nitpick"}\\<^sup>*\ & : & \proof \\ \\ @{command_def (HOL) "quickcheck_params"} & : & \theory \ theory\ \\ @{command_def (HOL) "nitpick_params"} & : & \theory \ theory\ \\ @{command_def (HOL) "quickcheck_generator"} & : & \theory \ theory\ \\ @{command_def (HOL) "find_unused_assms"} & : & \context \\ \end{matharray} \<^rail>\ @@{command (HOL) value} ( '[' @{syntax name} ']' )? modes? @{syntax term} ; @@{command (HOL) values} modes? @{syntax nat}? @{syntax term} ; (@@{command (HOL) quickcheck} | @@{command (HOL) nitpick}) ( '[' args ']' )? @{syntax nat}? ; (@@{command (HOL) quickcheck_params} | @@{command (HOL) nitpick_params}) ( '[' args ']' )? ; @@{command (HOL) quickcheck_generator} @{syntax name} \ 'operations:' ( @{syntax term} +) ; @@{command (HOL) find_unused_assms} @{syntax name}? ; modes: '(' (@{syntax name} +) ')' ; args: ( @{syntax name} '=' value + ',' ) \ % FIXME check "value" \<^descr> @{command (HOL) "value"}~\t\ evaluates and prints a term; optionally \modes\ can be specified, which are appended to the current print mode; see \secref{sec:print-modes}. Evaluation is tried first using ML, falling back to normalization by evaluation if this fails. Alternatively a specific evaluator can be selected using square brackets; typical evaluators use the current set of code equations to normalize and include \simp\ for fully symbolic evaluation using the simplifier, \nbe\ for \<^emph>\normalization by evaluation\ and \<^emph>\code\ for code generation in SML. \<^descr> @{command (HOL) "values"}~\t\ enumerates a set comprehension by evaluation and prints its values up to the given number of solutions; optionally \modes\ can be specified, which are appended to the current print mode; see \secref{sec:print-modes}. \<^descr> @{command (HOL) "quickcheck"} tests the current goal for counterexamples using a series of assignments for its free variables; by default the first subgoal is tested, an other can be selected explicitly using an optional goal index. Assignments can be chosen exhausting the search space up to a given size, or using a fixed number of random assignments in the search space, or exploring the search space symbolically using narrowing. By default, quickcheck uses exhaustive testing. A number of configuration options are supported for @{command (HOL) "quickcheck"}, notably: \<^descr>[\tester\] specifies which testing approach to apply. There are three testers, \exhaustive\, \random\, and \narrowing\. An unknown configuration option is treated as an argument to tester, making \tester =\ optional. When multiple testers are given, these are applied in parallel. If no tester is specified, quickcheck uses the testers that are set active, i.e.\ configurations @{attribute quickcheck_exhaustive_active}, @{attribute quickcheck_random_active}, @{attribute quickcheck_narrowing_active} are set to true. \<^descr>[\size\] specifies the maximum size of the search space for assignment values. \<^descr>[\genuine_only\] sets quickcheck only to return genuine counterexample, but not potentially spurious counterexamples due to underspecified functions. \<^descr>[\abort_potential\] sets quickcheck to abort once it found a potentially spurious counterexample and to not continue to search for a further genuine counterexample. For this option to be effective, the \genuine_only\ option must be set to false. \<^descr>[\eval\] takes a term or a list of terms and evaluates these terms under the variable assignment found by quickcheck. This option is currently only supported by the default (exhaustive) tester. \<^descr>[\iterations\] sets how many sets of assignments are generated for each particular size. \<^descr>[\no_assms\] specifies whether assumptions in structured proofs should be ignored. \<^descr>[\locale\] specifies how to process conjectures in a locale context, i.e.\ they can be interpreted or expanded. The option is a whitespace-separated list of the two words \interpret\ and \expand\. The list determines the order they are employed. The default setting is to first use interpretations and then test the expanded conjecture. The option is only provided as attribute declaration, but not as parameter to the command. \<^descr>[\timeout\] sets the time limit in seconds. \<^descr>[\default_type\] sets the type(s) generally used to instantiate type variables. \<^descr>[\report\] if set quickcheck reports how many tests fulfilled the preconditions. \<^descr>[\use_subtype\] if set quickcheck automatically lifts conjectures to registered subtypes if possible, and tests the lifted conjecture. \<^descr>[\quiet\] if set quickcheck does not output anything while testing. \<^descr>[\verbose\] if set quickcheck informs about the current size and cardinality while testing. \<^descr>[\expect\] can be used to check if the user's expectation was met (\no_expectation\, \no_counterexample\, or \counterexample\). These option can be given within square brackets. Using the following type classes, the testers generate values and convert them back into Isabelle terms for displaying counterexamples. \<^descr>[\exhaustive\] The parameters of the type classes \<^class>\exhaustive\ and \<^class>\full_exhaustive\ implement the testing. They take a testing function as a parameter, which takes a value of type \<^typ>\'a\ and optionally produces a counterexample, and a size parameter for the test values. In \<^class>\full_exhaustive\, the testing function parameter additionally expects a lazy term reconstruction in the type \<^typ>\Code_Evaluation.term\ of the tested value. The canonical implementation for \exhaustive\ testers calls the given testing function on all values up to the given size and stops as soon as a counterexample is found. \<^descr>[\random\] The operation \<^const>\Quickcheck_Random.random\ of the type class \<^class>\random\ generates a pseudo-random value of the given size and a lazy term reconstruction of the value in the type \<^typ>\Code_Evaluation.term\. A pseudo-randomness generator is defined in theory \<^theory>\HOL.Random\. \<^descr>[\narrowing\] implements Haskell's Lazy Smallcheck @{cite "runciman-naylor-lindblad"} using the type classes \<^class>\narrowing\ and \<^class>\partial_term_of\. Variables in the current goal are initially represented as symbolic variables. If the execution of the goal tries to evaluate one of them, the test engine replaces it with refinements provided by \<^const>\narrowing\. Narrowing views every value as a sum-of-products which is expressed using the operations \<^const>\Quickcheck_Narrowing.cons\ (embedding a value), \<^const>\Quickcheck_Narrowing.apply\ (product) and \<^const>\Quickcheck_Narrowing.sum\ (sum). The refinement should enable further evaluation of the goal. For example, \<^const>\narrowing\ for the list type \<^typ>\'a :: narrowing list\ can be recursively defined as \<^term>\Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons []) (Quickcheck_Narrowing.apply (Quickcheck_Narrowing.apply (Quickcheck_Narrowing.cons (#)) narrowing) narrowing)\. If a symbolic variable of type \<^typ>\_ list\ is evaluated, it is replaced by (i)~the empty list \<^term>\[]\ and (ii)~by a non-empty list whose head and tail can then be recursively refined if needed. To reconstruct counterexamples, the operation \<^const>\partial_term_of\ transforms \narrowing\'s deep representation of terms to the type \<^typ>\Code_Evaluation.term\. The deep representation models symbolic variables as \<^const>\Quickcheck_Narrowing.Narrowing_variable\, which are normally converted to \<^const>\Code_Evaluation.Free\, and refined values as \<^term>\Quickcheck_Narrowing.Narrowing_constructor i args\, where \<^term>\i :: integer\ denotes the index in the sum of refinements. In the above example for lists, \<^term>\0\ corresponds to \<^term>\[]\ and \<^term>\1\ to \<^term>\(#)\. The command @{command (HOL) "code_datatype"} sets up \<^const>\partial_term_of\ such that the \<^term>\i\-th refinement is interpreted as the \<^term>\i\-th constructor, but it does not ensures consistency with \<^const>\narrowing\. \<^descr> @{command (HOL) "quickcheck_params"} changes @{command (HOL) "quickcheck"} configuration options persistently. \<^descr> @{command (HOL) "quickcheck_generator"} creates random and exhaustive value generators for a given type and operations. It generates values by using the operations as if they were constructors of that type. \<^descr> @{command (HOL) "nitpick"} tests the current goal for counterexamples using a reduction to first-order relational logic. See the Nitpick manual @{cite "isabelle-nitpick"} for details. \<^descr> @{command (HOL) "nitpick_params"} changes @{command (HOL) "nitpick"} configuration options persistently. \<^descr> @{command (HOL) "find_unused_assms"} finds potentially superfluous assumptions in theorems using quickcheck. It takes the theory name to be checked for superfluous assumptions as optional argument. If not provided, it checks the current theory. Options to the internal quickcheck invocations can be changed with common configuration declarations. \ section \Coercive subtyping\ text \ \begin{matharray}{rcl} @{attribute_def (HOL) coercion} & : & \attribute\ \\ @{attribute_def (HOL) coercion_delete} & : & \attribute\ \\ @{attribute_def (HOL) coercion_enabled} & : & \attribute\ \\ @{attribute_def (HOL) coercion_map} & : & \attribute\ \\ @{attribute_def (HOL) coercion_args} & : & \attribute\ \\ \end{matharray} Coercive subtyping allows the user to omit explicit type conversions, also called \<^emph>\coercions\. Type inference will add them as necessary when parsing a term. See @{cite "traytel-berghofer-nipkow-2011"} for details. \<^rail>\ @@{attribute (HOL) coercion} (@{syntax term}) ; @@{attribute (HOL) coercion_delete} (@{syntax term}) ; @@{attribute (HOL) coercion_map} (@{syntax term}) ; @@{attribute (HOL) coercion_args} (@{syntax const}) (('+' | '0' | '-')+) \ \<^descr> @{attribute (HOL) "coercion"}~\f\ registers a new coercion function \f :: \\<^sub>1 \ \\<^sub>2\ where \\\<^sub>1\ and \\\<^sub>2\ are type constructors without arguments. Coercions are composed by the inference algorithm if needed. Note that the type inference algorithm is complete only if the registered coercions form a lattice. \<^descr> @{attribute (HOL) "coercion_delete"}~\f\ deletes a preceding declaration (using @{attribute (HOL) "coercion"}) of the function \f :: \\<^sub>1 \ \\<^sub>2\ as a coercion. \<^descr> @{attribute (HOL) "coercion_map"}~\map\ registers a new map function to lift coercions through type constructors. The function \map\ must conform to the following type pattern \begin{matharray}{lll} \map\ & \::\ & \f\<^sub>1 \ \ \ f\<^sub>n \ (\\<^sub>1, \, \\<^sub>n) t \ (\\<^sub>1, \, \\<^sub>n) t\ \\ \end{matharray} where \t\ is a type constructor and \f\<^sub>i\ is of type \\\<^sub>i \ \\<^sub>i\ or \\\<^sub>i \ \\<^sub>i\. Registering a map function overwrites any existing map function for this particular type constructor. \<^descr> @{attribute (HOL) "coercion_args"} can be used to disallow coercions to be inserted in certain positions in a term. For example, given the constant \c :: \\<^sub>1 \ \\<^sub>2 \ \\<^sub>3 \ \\<^sub>4\ and the list of policies \- + 0\ as arguments, coercions will not be inserted in the first argument of \c\ (policy \-\); they may be inserted in the second argument (policy \+\) even if the constant \c\ itself is in a position where coercions are disallowed; the third argument inherits the allowance of coercsion insertion from the position of the constant \c\ (policy \0\). The standard usage of policies is the definition of syntatic constructs (usually extralogical, i.e., processed and stripped during type inference), that should not be destroyed by the insertion of coercions (see, for example, the setup for the case syntax in \<^theory>\HOL.Ctr_Sugar\). \<^descr> @{attribute (HOL) "coercion_enabled"} enables the coercion inference algorithm. \ section \Arithmetic proof support\ text \ \begin{matharray}{rcl} @{method_def (HOL) arith} & : & \method\ \\ @{attribute_def (HOL) arith} & : & \attribute\ \\ @{attribute_def (HOL) arith_split} & : & \attribute\ \\ \end{matharray} \<^descr> @{method (HOL) arith} decides linear arithmetic problems (on types \nat\, \int\, \real\). Any current facts are inserted into the goal before running the procedure. \<^descr> @{attribute (HOL) arith} declares facts that are supplied to the arithmetic provers implicitly. \<^descr> @{attribute (HOL) arith_split} attribute declares case split rules to be expanded before @{method (HOL) arith} is invoked. Note that a simpler (but faster) arithmetic prover is already invoked by the Simplifier. \ section \Intuitionistic proof search\ text \ \begin{matharray}{rcl} @{method_def (HOL) iprover} & : & \method\ \\ \end{matharray} \<^rail>\ @@{method (HOL) iprover} (@{syntax rulemod} *) \ \<^descr> @{method (HOL) iprover} performs intuitionistic proof search, depending on specifically declared rules from the context, or given as explicit arguments. Chained facts are inserted into the goal before commencing proof search. Rules need to be classified as @{attribute (Pure) intro}, @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the ``\!\'' indicator refers to ``safe'' rules, which may be applied aggressively (without considering back-tracking later). Rules declared with ``\?\'' are ignored in proof search (the single-step @{method (Pure) rule} method still observes these). An explicit weight annotation may be given as well; otherwise the number of rule premises will be taken into account here. \ section \Model Elimination and Resolution\ text \ \begin{matharray}{rcl} @{method_def (HOL) "meson"} & : & \method\ \\ @{method_def (HOL) "metis"} & : & \method\ \\ \end{matharray} \<^rail>\ @@{method (HOL) meson} @{syntax thms}? ; @@{method (HOL) metis} ('(' ('partial_types' | 'full_types' | 'no_types' | @{syntax name}) ')')? @{syntax thms}? \ \<^descr> @{method (HOL) meson} implements Loveland's model elimination procedure @{cite "loveland-78"}. See \<^file>\~~/src/HOL/ex/Meson_Test.thy\ for examples. \<^descr> @{method (HOL) metis} combines ordered resolution and ordered paramodulation to find first-order (or mildly higher-order) proofs. The first optional argument specifies a type encoding; see the Sledgehammer manual @{cite "isabelle-sledgehammer"} for details. The directory \<^dir>\~~/src/HOL/Metis_Examples\ contains several small theories developed to a large extent using @{method (HOL) metis}. \ section \Algebraic reasoning via Gr\"obner bases\ text \ \begin{matharray}{rcl} @{method_def (HOL) "algebra"} & : & \method\ \\ @{attribute_def (HOL) algebra} & : & \attribute\ \\ \end{matharray} \<^rail>\ @@{method (HOL) algebra} ('add' ':' @{syntax thms})? ('del' ':' @{syntax thms})? ; @@{attribute (HOL) algebra} (() | 'add' | 'del') \ \<^descr> @{method (HOL) algebra} performs algebraic reasoning via Gr\"obner bases, see also @{cite "Chaieb-Wenzel:2007"} and @{cite \\S3.2\ "Chaieb-thesis"}. The method handles deals with two main classes of problems: \<^enum> Universal problems over multivariate polynomials in a (semi)-ring/field/idom; the capabilities of the method are augmented according to properties of these structures. For this problem class the method is only complete for algebraically closed fields, since the underlying method is based on Hilbert's Nullstellensatz, where the equivalence only holds for algebraically closed fields. The problems can contain equations \p = 0\ or inequations \q \ 0\ anywhere within a universal problem statement. \<^enum> All-exists problems of the following restricted (but useful) form: @{text [display] "\x\<^sub>1 \ x\<^sub>n. e\<^sub>1(x\<^sub>1, \, x\<^sub>n) = 0 \ \ \ e\<^sub>m(x\<^sub>1, \, x\<^sub>n) = 0 \ (\y\<^sub>1 \ y\<^sub>k. p\<^sub>1\<^sub>1(x\<^sub>1, \ ,x\<^sub>n) * y\<^sub>1 + \ + p\<^sub>1\<^sub>k(x\<^sub>1, \, x\<^sub>n) * y\<^sub>k = 0 \ \ \ p\<^sub>t\<^sub>1(x\<^sub>1, \, x\<^sub>n) * y\<^sub>1 + \ + p\<^sub>t\<^sub>k(x\<^sub>1, \, x\<^sub>n) * y\<^sub>k = 0)"} Here \e\<^sub>1, \, e\<^sub>n\ and the \p\<^sub>i\<^sub>j\ are multivariate polynomials only in the variables mentioned as arguments. The proof method is preceded by a simplification step, which may be modified by using the form \(algebra add: ths\<^sub>1 del: ths\<^sub>2)\. This acts like declarations for the Simplifier (\secref{sec:simplifier}) on a private simpset for this tool. \<^descr> @{attribute algebra} (as attribute) manages the default collection of pre-simplification rules of the above proof method. \ subsubsection \Example\ text \ The subsequent example is from geometry: collinearity is invariant by rotation. \ (*<*)experiment begin(*>*) type_synonym point = "int \ int" fun collinear :: "point \ point \ point \ bool" where "collinear (Ax, Ay) (Bx, By) (Cx, Cy) \ (Ax - Bx) * (By - Cy) = (Ay - By) * (Bx - Cx)" lemma collinear_inv_rotation: assumes "collinear (Ax, Ay) (Bx, By) (Cx, Cy)" and "c\<^sup>2 + s\<^sup>2 = 1" shows "collinear (Ax * c - Ay * s, Ay * c + Ax * s) (Bx * c - By * s, By * c + Bx * s) (Cx * c - Cy * s, Cy * c + Cx * s)" using assms by (algebra add: collinear.simps) (*<*)end(*>*) text \ See also \<^file>\~~/src/HOL/Examples/Groebner_Examples.thy\. \ section \Coherent Logic\ text \ \begin{matharray}{rcl} @{method_def (HOL) "coherent"} & : & \method\ \\ \end{matharray} \<^rail>\ @@{method (HOL) coherent} @{syntax thms}? \ \<^descr> @{method (HOL) coherent} solves problems of \<^emph>\Coherent Logic\ @{cite "Bezem-Coquand:2005"}, which covers applications in confluence theory, lattice theory and projective geometry. See \<^file>\~~/src/HOL/Examples/Coherent.thy\ for some examples. \ section \Unstructured case analysis and induction \label{sec:hol-induct-tac}\ text \ The following tools of Isabelle/HOL support cases analysis and induction in unstructured tactic scripts; see also \secref{sec:cases-induct} for proper Isar versions of similar ideas. \begin{matharray}{rcl} @{method_def (HOL) case_tac}\\<^sup>*\ & : & \method\ \\ @{method_def (HOL) induct_tac}\\<^sup>*\ & : & \method\ \\ @{method_def (HOL) ind_cases}\\<^sup>*\ & : & \method\ \\ @{command_def (HOL) "inductive_cases"}\\<^sup>*\ & : & \local_theory \ local_theory\ \\ \end{matharray} \<^rail>\ @@{method (HOL) case_tac} @{syntax goal_spec}? @{syntax term} rule? ; @@{method (HOL) induct_tac} @{syntax goal_spec}? (@{syntax insts} * @'and') rule? ; @@{method (HOL) ind_cases} (@{syntax prop}+) @{syntax for_fixes} ; @@{command (HOL) inductive_cases} (@{syntax thmdecl}? (@{syntax prop}+) + @'and') ; rule: 'rule' ':' @{syntax thm} \ \<^descr> @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit to reason about inductive types. Rules are selected according to the declarations by the @{attribute cases} and @{attribute induct} attributes, cf.\ \secref{sec:cases-induct}. The @{command (HOL) datatype} package already takes care of this. These unstructured tactics feature both goal addressing and dynamic instantiation. Note that named rule cases are \<^emph>\not\ provided as would be by the proper @{method cases} and @{method induct} proof methods (see \secref{sec:cases-induct}). Unlike the @{method induct} method, @{method induct_tac} does not handle structured rule statements, only the compact object-logic conclusion of the subgoal being addressed. \<^descr> @{method (HOL) ind_cases} and @{command (HOL) "inductive_cases"} provide an interface to the internal \<^ML_text>\mk_cases\ operation. Rules are simplified in an unrestricted forward manner. While @{method (HOL) ind_cases} is a proof method to apply the result immediately as elimination rules, @{command (HOL) "inductive_cases"} provides case split theorems at the theory level for later use. The @{keyword "for"} argument of the @{method (HOL) ind_cases} method allows to specify a list of variables that should be generalized before applying the resulting rule. \ section \Adhoc tuples\ text \ \begin{matharray}{rcl} @{attribute_def (HOL) split_format}\\<^sup>*\ & : & \attribute\ \\ \end{matharray} \<^rail>\ @@{attribute (HOL) split_format} ('(' 'complete' ')')? \ \<^descr> @{attribute (HOL) split_format}\ \(complete)\ causes arguments in function applications to be represented canonically according to their tuple type structure. Note that this operation tends to invent funny names for new local parameters introduced. \ chapter \Executable code \label{ch:export-code}\ text \ For validation purposes, it is often useful to \<^emph>\execute\ specifications. In principle, execution could be simulated by Isabelle's inference kernel, i.e. by a combination of resolution and simplification. Unfortunately, this approach is rather inefficient. A more efficient way of executing specifications is to translate them into a functional programming language such as ML. Isabelle provides a generic framework to support code generation from executable specifications. Isabelle/HOL instantiates these mechanisms in a way that is amenable to end-user applications. Code can be generated for functional programs (including overloading using type classes) targeting SML @{cite SML}, OCaml @{cite OCaml}, Haskell @{cite "haskell-revised-report"} and Scala @{cite "scala-overview-tech-report"}. Conceptually, code generation is split up in three steps: \<^emph>\selection\ of code theorems, \<^emph>\translation\ into an abstract executable view and \<^emph>\serialization\ to a specific \<^emph>\target language\. Inductive specifications can be executed using the predicate compiler which operates within HOL. See @{cite "isabelle-codegen"} for an introduction. \begin{matharray}{rcl} @{command_def (HOL) "export_code"}\\<^sup>*\ & : & \local_theory \ local_theory\ \\ @{attribute_def (HOL) code} & : & \attribute\ \\ @{command_def (HOL) "code_datatype"} & : & \theory \ theory\ \\ @{command_def (HOL) "print_codesetup"}\\<^sup>*\ & : & \context \\ \\ @{attribute_def (HOL) code_unfold} & : & \attribute\ \\ @{attribute_def (HOL) code_post} & : & \attribute\ \\ @{attribute_def (HOL) code_abbrev} & : & \attribute\ \\ @{command_def (HOL) "print_codeproc"}\\<^sup>*\ & : & \context \\ \\ @{command_def (HOL) "code_thms"}\\<^sup>*\ & : & \context \\ \\ @{command_def (HOL) "code_deps"}\\<^sup>*\ & : & \context \\ \\ @{command_def (HOL) "code_reserved"} & : & \theory \ theory\ \\ @{command_def (HOL) "code_printing"} & : & \theory \ theory\ \\ @{command_def (HOL) "code_identifier"} & : & \theory \ theory\ \\ @{command_def (HOL) "code_monad"} & : & \theory \ theory\ \\ @{command_def (HOL) "code_reflect"} & : & \theory \ theory\ \\ - @{command_def (HOL) "code_pred"} & : & \theory \ proof(prove)\ + @{command_def (HOL) "code_pred"} & : & \theory \ proof(prove)\ \\ + @{attribute_def (HOL) code_timing} & : & \attribute\ \\ + @{attribute_def (HOL) code_simp_trace} & : & \attribute\ \\ + @{attribute_def (HOL) code_runtime_trace} & : & \attribute\ \end{matharray} \<^rail>\ @@{command (HOL) export_code} @'open'? \ (const_expr+) (export_target*) ; export_target: @'in' target (@'module_name' @{syntax name})? \ (@'file_prefix' @{syntax path})? ('(' args ')')? ; target: 'SML' | 'OCaml' | 'Haskell' | 'Scala' | 'Eval' ; const_expr: (const | 'name._' | '_') ; const: @{syntax term} ; type_constructor: @{syntax name} ; class: @{syntax name} ; path: @{syntax embedded} ; @@{attribute (HOL) code} ('equation' | 'nbe' | 'abstype' | 'abstract' | 'del' | 'drop:' (const+) | 'abort:' (const+))? ; @@{command (HOL) code_datatype} (const+) ; @@{attribute (HOL) code_unfold} 'del'? ; @@{attribute (HOL) code_post} 'del'? ; @@{attribute (HOL) code_abbrev} 'del'? ; @@{command (HOL) code_thms} (const_expr+) ; @@{command (HOL) code_deps} (const_expr+) ; @@{command (HOL) code_reserved} target (@{syntax string}+) ; symbol_const: @'constant' const ; symbol_type_constructor: @'type_constructor' type_constructor ; symbol_class: @'type_class' class ; symbol_class_relation: @'class_relation' class ('<' | '\') class ; symbol_class_instance: @'class_instance' type_constructor @'::' class ; symbol_module: @'code_module' name ; syntax: @{syntax string} | (@'infix' | @'infixl' | @'infixr') @{syntax nat} @{syntax string} ; printing_const: symbol_const ('\' | '=>') \ ('(' target ')' syntax ? + @'and') ; printing_type_constructor: symbol_type_constructor ('\' | '=>') \ ('(' target ')' syntax ? + @'and') ; printing_class: symbol_class ('\' | '=>') \ ('(' target ')' @{syntax string} ? + @'and') ; printing_class_relation: symbol_class_relation ('\' | '=>') \ ('(' target ')' @{syntax string} ? + @'and') ; printing_class_instance: symbol_class_instance ('\'| '=>') \ ('(' target ')' '-' ? + @'and') ; printing_module: symbol_module ('\' | '=>') \ ('(' target ')' (@{syntax string} for_symbol?)? + @'and') ; for_symbol: @'for' ((symbol_const | symbol_typeconstructor | symbol_class | symbol_class_relation | symbol_class_instance)+) ; @@{command (HOL) code_printing} ((printing_const | printing_type_constructor | printing_class | printing_class_relation | printing_class_instance | printing_module) + '|') ; @@{command (HOL) code_identifier} ((symbol_const | symbol_type_constructor | symbol_class | symbol_class_relation | symbol_class_instance | symbol_module ) ('\' | '=>') \ ('(' target ')' @{syntax string} ? + @'and') + '|') ; @@{command (HOL) code_monad} const const target ; @@{command (HOL) code_reflect} @{syntax string} \ (@'datatypes' (@{syntax string} '=' ('_' | (@{syntax string} + '|') + @'and')))? \ (@'functions' (@{syntax string} +))? (@'file_prefix' @{syntax path})? ; @@{command (HOL) code_pred} \ ('(' @'modes' ':' modedecl ')')? \ const ; modedecl: (modes | ((const ':' modes) \ (@'and' ((const ':' modes @'and')+))?)) ; modes: mode @'as' const \ \<^descr> @{command (HOL) "export_code"} generates code for a given list of constants in the specified target language(s). If no serialization instruction is given, only abstract code is generated internally. Constants may be specified by giving them literally, referring to all executable constants within a certain theory by giving \name._\, or referring to \<^emph>\all\ executable constants currently available by giving \_\. By default, exported identifiers are minimized per module. This can be suppressed by prepending @{keyword "open"} before the list of constants. By default, for each involved theory one corresponding name space module is generated. Alternatively, a module name may be specified after the @{keyword "module_name"} keyword; then \<^emph>\all\ code is placed in this module. Generated code is output as logical files within the theory context, as well as session exports that can be retrieved using @{tool_ref export}, or @{tool build} with option \<^verbatim>\-e\ and suitable \isakeyword{export\_files} specifications in the session \<^verbatim>\ROOT\ entry. All files have a common directory prefix: the long theory name plus ``\<^verbatim>\code\''. The actual file name is determined by the target language together with an optional \<^theory_text>\file_prefix\ (the default is ``\<^verbatim>\export\'' with a consecutive number within the current theory). For \SML\, \OCaml\ and \Scala\, the file prefix becomes a plain file with extension (e.g.\ ``\<^verbatim>\.ML\'' for SML). For \Haskell\ the file prefix becomes a directory that is populated with a separate file for each module (with extension ``\<^verbatim>\.hs\''). Serializers take an optional list of arguments in parentheses. \<^item> For \<^emph>\Haskell\ a module name prefix may be given using the ``\root:\'' argument; ``\string_classes\'' adds a ``\<^verbatim>\deriving (Read, Show)\'' clause to each appropriate datatype declaration. \<^item> For \<^emph>\Scala\, ``\case_insensitive\'' avoids name clashes on case-insensitive file systems. \<^descr> @{attribute (HOL) code} declares code equations for code generation. Variant \code equation\ declares a conventional equation as code equation. Variants \code abstype\ and \code abstract\ declare abstract datatype certificates or code equations on abstract datatype representations respectively. Vanilla \code\ falls back to \code equation\ or \code abstract\ depending on the syntactic shape of the underlying equation. Variant \code del\ deselects a code equation for code generation. Variant \code nbe\ accepts also non-left-linear equations for \<^emph>\normalization by evaluation\ only. Variants \code drop:\ and \code abort:\ take a list of constants as arguments and drop all code equations declared for them. In the case of \abort\, - these constants then do not require to a specification by means of - code equations; if needed these are implemented by program abort (exception) - instead. + these constants if needed are implemented by program abort + (exception). Packages declaring code equations usually provide a reasonable default setup. \<^descr> @{command (HOL) "code_datatype"} specifies a constructor set for a logical type. \<^descr> @{command (HOL) "print_codesetup"} gives an overview on selected code equations and code generator datatypes. \<^descr> @{attribute (HOL) code_unfold} declares (or with option ``\del\'' removes) theorems which during preprocessing are applied as rewrite rules to any code equation or evaluation input. \<^descr> @{attribute (HOL) code_post} declares (or with option ``\del\'' removes) theorems which are applied as rewrite rules to any result of an evaluation. \<^descr> @{attribute (HOL) code_abbrev} declares (or with option ``\del\'' removes) equations which are applied as rewrite rules to any result of an evaluation and symmetrically during preprocessing to any code equation or evaluation input. \<^descr> @{command (HOL) "print_codeproc"} prints the setup of the code generator preprocessor. \<^descr> @{command (HOL) "code_thms"} prints a list of theorems representing the corresponding program containing all given constants after preprocessing. \<^descr> @{command (HOL) "code_deps"} visualizes dependencies of theorems representing the corresponding program containing all given constants after preprocessing. \<^descr> @{command (HOL) "code_reserved"} declares a list of names as reserved for a given target, preventing it to be shadowed by any generated code. \<^descr> @{command (HOL) "code_printing"} associates a series of symbols (constants, type constructors, classes, class relations, instances, module names) with target-specific serializations; omitting a serialization deletes an existing serialization. \<^descr> @{command (HOL) "code_monad"} provides an auxiliary mechanism to generate monadic code for Haskell. \<^descr> @{command (HOL) "code_identifier"} associates a a series of symbols (constants, type constructors, classes, class relations, instances, module names) with target-specific hints how these symbols shall be named. These hints gain precedence over names for symbols with no hints at all. Conflicting hints are subject to name disambiguation. \<^emph>\Warning:\ It is at the discretion of the user to ensure that name prefixes of identifiers in compound statements like type classes or datatypes are still the same. \<^descr> @{command (HOL) "code_reflect"} without a ``\<^theory_text>\file_prefix\'' argument compiles code into the system runtime environment and modifies the code generator setup that future invocations of system runtime code generation referring to one of the ``\datatypes\'' or ``\functions\'' entities use these precompiled entities. With a ``\<^theory_text>\file_prefix\'' argument, the corresponding code is generated/exported to the specified file (as for \<^theory_text>\export_code\) without modifying the code generator setup. \<^descr> @{command (HOL) "code_pred"} creates code equations for a predicate given a set of introduction rules. Optional mode annotations determine which arguments are supposed to be input or output. If alternative introduction rules are declared, one must prove a corresponding elimination rule. + + \<^descr> @{attribute (HOL) "code_timing"} scrapes timing samples from different + stages of the code generator. + + \<^descr> @{attribute (HOL) "code_simp_trace"} traces the simplifier when it is + used with code equations. + + \<^descr> @{attribute (HOL) "code_runtime_trace"} traces ML code generated + dynamically for execution. \ end diff --git a/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML b/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML @@ -1,579 +1,580 @@ (* Title: HOL/Tools/ATP/atp_systems.ML Author: Fabian Immler, TU Muenchen Author: Jasmin Blanchette, TU Muenchen Setup for supported ATPs. *) signature SLEDGEHAMMER_ATP_SYSTEMS = sig type atp_format = ATP_Problem.atp_format type atp_formula_role = ATP_Problem.atp_formula_role type atp_failure = ATP_Proof.atp_failure type base_slice = int * int * string type atp_slice = atp_format * string * string * bool * string type atp_config = {exec : string list * string list, arguments : Proof.context -> bool -> string -> Time.time -> Path.T -> string list, proof_delims : (string * string) list, known_failures : (atp_failure * string) list, prem_role : atp_formula_role, good_slices : Proof.context -> (base_slice * atp_slice) list, good_max_mono_iters : int, good_max_new_mono_instances : int} val default_max_mono_iters : int val default_max_new_mono_instances : int val spass_H1SOS : string val spass_H2 : string val spass_H2LR0LT0 : string val spass_H2NuVS0 : string val spass_H2NuVS0Red2 : string val spass_H2SOS : string val isabelle_scala_function: string list * string list val remote_atp : string -> string -> string list -> (string * string) list -> (atp_failure * string) list -> atp_formula_role -> (Proof.context -> base_slice * atp_slice) -> string * (unit -> atp_config) val add_atp : string * (unit -> atp_config) -> theory -> theory val get_atp : theory -> string -> (unit -> atp_config) val is_atp_installed : theory -> string -> bool val refresh_systems_on_tptp : unit -> unit val local_atps : string list val remote_atps : string list val atps : string list end; structure Sledgehammer_ATP_Systems : SLEDGEHAMMER_ATP_SYSTEMS = struct open ATP_Problem open ATP_Proof open ATP_Problem_Generate (* ATP configuration *) val TF0 = TFF (Monomorphic, Without_FOOL) val TF1 = TFF (Polymorphic, Without_FOOL) val TX0 = TFF (Monomorphic, With_FOOL {with_ite = true, with_let = true}) val TX1 = TFF (Polymorphic, With_FOOL {with_ite = true, with_let = true}) val TH0 = THF (Monomorphic, {with_ite = true, with_let = true}, THF_With_Choice) val TH1 = THF (Polymorphic, {with_ite = true, with_let = true}, THF_With_Choice) val default_max_mono_iters = 3 (* FUDGE *) val default_max_new_mono_instances = 100 (* FUDGE *) (* desired slice size, desired number of facts, fact filter *) type base_slice = int * int * string (* problem file format, type encoding, lambda translation scheme, uncurried aliases?, prover-specific extra information *) type atp_slice = atp_format * string * string * bool * string type atp_config = {exec : string list * string list, arguments : Proof.context -> bool -> string -> Time.time -> Path.T -> string list, proof_delims : (string * string) list, known_failures : (atp_failure * string) list, prem_role : atp_formula_role, good_slices : Proof.context -> (base_slice * atp_slice) list, good_max_mono_iters : int, good_max_new_mono_instances : int} (* "good_slices" must be found empirically, ideally taking a holistic approach since the ATPs are run in parallel. *) val mepoN = "mepo" val mashN = "mash" val meshN = "mesh" val tstp_proof_delims = [("% SZS output start CNFRefutation", "% SZS output end CNFRefutation"), ("% SZS output start Refutation", "% SZS output end Refutation"), ("% SZS output start Proof", "% SZS output end Proof")] fun known_szs_failures wrap = [(Unprovable, wrap "CounterSatisfiable"), (Unprovable, wrap "Satisfiable"), (GaveUp, wrap "GaveUp"), (GaveUp, wrap "Unknown"), (GaveUp, wrap "Incomplete"), (ProofMissing, wrap "Theorem"), (ProofMissing, wrap "Unsatisfiable"), (TimedOut, wrap "Timeout"), (Inappropriate, wrap "Inappropriate"), (OutOfResources, wrap "ResourceOut"), (OutOfResources, wrap "MemoryOut"), (Interrupted, wrap "Forced"), (Interrupted, wrap "User")] val known_szs_status_failures = known_szs_failures (prefix "SZS status ") val known_says_failures = known_szs_failures (prefix " says ") structure Data = Theory_Data ( type T = ((unit -> atp_config) * stamp) Symtab.table val empty = Symtab.empty fun merge data : T = Symtab.merge (eq_snd (op =)) data handle Symtab.DUP name => error ("Duplicate ATP: " ^ quote name) ) fun to_secs min time = Int.max (min, (Time.toMilliseconds time + 999) div 1000) val sosN = "sos" val no_sosN = "no_sos" (* agsyHOL *) val agsyhol_config : atp_config = {exec = (["AGSYHOL_HOME"], ["agsyHOL"]), arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => ["--proof --time-out " ^ string_of_int (to_secs 1 timeout) ^ " " ^ File.bash_path problem], proof_delims = tstp_proof_delims, known_failures = known_szs_status_failures, prem_role = Hypothesis, good_slices = (* FUDGE *) K [((1, 60, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, ""))], good_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), good_max_new_mono_instances = default_max_new_mono_instances} val agsyhol = (agsyholN, fn () => agsyhol_config) (* Alt-Ergo *) val alt_ergo_config : atp_config = {exec = (["WHY3_HOME"], ["why3"]), arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => ["--format tptp --prover 'Alt-Ergo,0.95.2,' --timelimit " ^ string_of_int (to_secs 1 timeout) ^ " " ^ File.bash_path problem], proof_delims = [], known_failures = [(ProofMissing, ": Valid"), (TimedOut, ": Timeout"), (GaveUp, ": Unknown")], prem_role = Hypothesis, good_slices = (* FUDGE *) K [((1000 (* infinity *), 100, meshN), (TF1, "poly_native", liftingN, false, ""))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} val alt_ergo = (alt_ergoN, fn () => alt_ergo_config) (* E *) val e_config : atp_config = {exec = (["E_HOME"], ["eprover-ho", "eprover"]), arguments = fn _ => fn _ => fn extra_options => fn timeout => fn problem => - ["--tstp-in --tstp-out --silent " ^ extra_options ^ " --cpu-limit=" ^ - string_of_int (to_secs 2 timeout) ^ " --proof-object=1 " ^ File.bash_path problem], + ["--tstp-in --tstp-out --silent " ^ extra_options ^ + " --demod-under-lambda=true --cpu-limit=" ^ string_of_int (to_secs 2 timeout) ^ + " --proof-object=1 " ^ File.bash_path problem], proof_delims = [("# SZS output start CNFRefutation", "# SZS output end CNFRefutation")] @ tstp_proof_delims, known_failures = [(TimedOut, "Failure: Resource limit exceeded (time)"), (TimedOut, "time limit exceeded")] @ known_szs_status_failures, prem_role = Conjecture, good_slices = let val (format, type_enc, lam_trans, extra_options) = if string_ord (getenv "E_VERSION", "2.7") <> LESS then (THF (Monomorphic, {with_ite = true, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, "--auto-schedule=4 --serialize-schedule=true") else if string_ord (getenv "E_VERSION", "2.6") <> LESS then (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, "--auto-schedule") else (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Lambda_Free), "mono_native_higher", combsN, "--auto-schedule") in (* FUDGE *) K [((1000 (* infinity *), 512, meshN), (format, type_enc, lam_trans, false, extra_options)), ((1000 (* infinity *), 1024, meshN), (format, type_enc, lam_trans, false, extra_options)), ((1000 (* infinity *), 128, mepoN), (format, type_enc, lam_trans, false, extra_options)), ((1000 (* infinity *), 724, meshN), (TF0, "poly_guards??", lam_trans, false, extra_options)), ((1000 (* infinity *), 256, mepoN), (format, type_enc, liftingN, false, extra_options)), ((1000 (* infinity *), 64, mashN), (format, type_enc, combsN, false, extra_options))] end, good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} val e = (eN, fn () => e_config) (* iProver *) val iprover_config : atp_config = {exec = (["IPROVER_HOME"], ["iproveropt", "iprover"]), arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => ["--clausifier \"$VAMPIRE_HOME\"/vampire " ^ "--clausifier_options \"--mode clausify\" " ^ "--time_out_real " ^ string_of_real (Time.toReal timeout) ^ " " ^ File.bash_path problem], proof_delims = tstp_proof_delims, known_failures = [(ProofIncomplete, "% SZS output start CNFRefutation")] @ known_szs_status_failures, prem_role = Hypothesis, good_slices = (* FUDGE *) K [((1, 32, meshN), (TF0, "mono_native", liftingN, false, "")), ((1, 512, meshN), (TX0, "mono_native", liftingN, false, "")), ((1, 128, mashN), (TF0, "mono_native", combsN, false, "")), ((1, 1024, meshN), (TF0, "mono_native", liftingN, false, "")), ((1, 256, mepoN), (TF0, "mono_native", combsN, false, ""))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} val iprover = (iproverN, fn () => iprover_config) (* LEO-II *) val leo2_config : atp_config = {exec = (["LEO2_HOME"], ["leo.opt", "leo"]), arguments = fn _ => fn full_proofs => fn _ => fn timeout => fn problem => ["--foatp e --atp e=\"$E_HOME\"/eprover \ \--atp epclextract=\"$E_HOME\"/epclextract \ \--proofoutput 1 --timeout " ^ string_of_int (to_secs 1 timeout) ^ " " ^ (if full_proofs then "--notReplLeibnizEQ --notReplAndrewsEQ --notUseExtCnfCmbd " else "") ^ File.bash_path problem], proof_delims = tstp_proof_delims, known_failures = [(TimedOut, "CPU time limit exceeded, terminating"), (GaveUp, "No.of.Axioms")] @ known_szs_status_failures, prem_role = Hypothesis, good_slices = (* FUDGE *) K [((1, 40, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, ""))], good_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), good_max_new_mono_instances = default_max_new_mono_instances} val leo2 = (leo2N, fn () => leo2_config) (* Leo-III *) (* Include choice? Disabled now since it's disabled for Satallax as well. *) val leo3_config : atp_config = {exec = (["LEO3_HOME"], ["leo3"]), arguments = fn _ => fn full_proofs => fn _ => fn timeout => fn problem => [File.bash_path problem ^ " " ^ "--atp cvc=\"$CVC4_SOLVER\" --atp e=\"$E_HOME\"/eprover \ \-p -t " ^ string_of_int (to_secs 1 timeout) ^ " " ^ (if full_proofs then "--nleq --naeq " else "")], proof_delims = tstp_proof_delims, known_failures = known_szs_status_failures, prem_role = Hypothesis, good_slices = (* FUDGE *) K [((3, 512, meshN), (TH0, "mono_native_higher", keep_lamsN, false, "")), ((3, 512, meshN), (TF0, "mono_native", liftingN, false, ""))], good_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), good_max_new_mono_instances = default_max_new_mono_instances} val leo3 = (leo3N, fn () => leo3_config) (* Satallax *) (* Choice is disabled until there is proper reconstruction for it. *) val satallax_config : atp_config = {exec = (["SATALLAX_HOME"], ["satallax.opt", "satallax"]), arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => [(case getenv "E_HOME" of "" => "" | home => "-E " ^ home ^ "/eprover ") ^ "-p tstp -t " ^ string_of_int (to_secs 1 timeout) ^ " " ^ File.bash_path problem], proof_delims = [("% SZS output start Proof", "% SZS output end Proof")], known_failures = known_szs_status_failures, prem_role = Hypothesis, good_slices = (* FUDGE *) K [((6, 256, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, ""))], good_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), good_max_new_mono_instances = default_max_new_mono_instances} val satallax = (satallaxN, fn () => satallax_config) (* SPASS *) val spass_H1SOS = "-Heuristic=1 -SOS" val spass_H2 = "-Heuristic=2" val spass_H2LR0LT0 = "-Heuristic=2 -LR=0 -LT=0" val spass_H2NuVS0 = "-Heuristic=2 -RNuV=1 -Sorts=0" val spass_H2NuVS0Red2 = "-Heuristic=2 -RNuV=1 -Sorts=0 -RFRew=2 -RBRew=2 -RTaut=2" val spass_H2SOS = "-Heuristic=2 -SOS" val spass_config : atp_config = let val format = DFG Monomorphic in {exec = (["SPASS_HOME"], ["SPASS"]), arguments = fn _ => fn full_proofs => fn extra_options => fn timeout => fn problem => ["-Isabelle=1 " ^ (if full_proofs then "-CNFRenaming=0 -Splits=0 " else "") ^ "-TimeLimit=" ^ string_of_int (to_secs 1 timeout) ^ " " ^ File.bash_path problem |> extra_options <> "" ? prefix (extra_options ^ " ")], proof_delims = [("Here is a proof", "Formulae used in the proof")], known_failures = [(GaveUp, "SPASS beiseite: Completion found"), (TimedOut, "SPASS beiseite: Ran out of time"), (OutOfResources, "SPASS beiseite: Maximal number of loops exceeded"), (MalformedInput, "Undefined symbol"), (MalformedInput, "Free Variable"), (Unprovable, "No formulae and clauses found in input file"), (InternalError, "Please report this error")], prem_role = Conjecture, good_slices = (* FUDGE *) K [((1, 150, meshN), (format, "mono_native", combsN, true, "")), ((1, 500, meshN), (format, "mono_native", liftingN, true, spass_H2SOS)), ((1, 50, meshN), (format, "mono_native", liftingN, true, spass_H2LR0LT0)), ((1, 250, meshN), (format, "mono_native", combsN, true, spass_H2NuVS0)), ((1, 1000, mepoN), (format, "mono_native", liftingN, true, spass_H1SOS)), ((1, 150, meshN), (format, "poly_guards??", liftingN, false, spass_H2NuVS0Red2)), ((1, 300, meshN), (format, "mono_native", combsN, true, spass_H2SOS)), ((1, 100, meshN), (format, "mono_native", combs_and_liftingN, true, spass_H2))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} end val spass = (spassN, fn () => spass_config) (* Vampire *) val vampire_basic_options = "--proof tptp --output_axiom_names on" ^ (if ML_System.platform_is_windows then "" (*time slicing is not support in the Windows version of Vampire*) else " --mode casc") val vampire_full_proof_options = " --proof_extra free --forced_options avatar=off:equality_proxy=off:general_splitting=off:inequality_splitting=0:naming=0" val vampire_config : atp_config = {exec = (["VAMPIRE_HOME"], ["vampire"]), arguments = fn _ => fn full_proofs => fn sos => fn timeout => fn problem => [vampire_basic_options ^ (if full_proofs then " " ^ vampire_full_proof_options else "") ^ " -t " ^ string_of_int (to_secs 1 timeout) ^ " --input_file " ^ File.bash_path problem |> sos = sosN ? prefix "--sos on "], proof_delims = [("=========== Refutation ==========", "======= End of refutation =======")] @ tstp_proof_delims, known_failures = [(GaveUp, "UNPROVABLE"), (GaveUp, "CANNOT PROVE"), (Unprovable, "Satisfiability detected"), (Unprovable, "Termination reason: Satisfiable"), (Interrupted, "Aborted by signal SIGINT")] @ known_szs_status_failures, prem_role = Hypothesis, good_slices = (* FUDGE *) K [((1, 512, meshN), (TX1, "mono_native_fool", combsN, false, sosN)), ((1, 1024, meshN), (TX1, "mono_native_fool", liftingN, false, sosN)), ((1, 256, mashN), (TX1, "mono_native_fool", liftingN, false, no_sosN)), ((1, 512, mepoN), (TF1, "poly_native", liftingN, false, no_sosN)), ((1, 16, meshN), (TX1, "mono_native_fool", liftingN, false, no_sosN)), ((1, 32, meshN), (TX1, "mono_native_fool", combsN, false, no_sosN)), ((1, 64, meshN), (TX1, "mono_native_fool", combs_or_liftingN, false, no_sosN)), ((1, 128, meshN), (TX1, "mono_native_fool", liftingN, false, no_sosN))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = 2 * default_max_new_mono_instances (* FUDGE *)} val vampire = (vampireN, fn () => vampire_config) (* Zipperposition *) val zipperposition_config : atp_config = let val format = THF (Polymorphic, {with_ite = true, with_let = false}, THF_Without_Choice) in {exec = (["ZIPPERPOSITION_HOME"], ["zipperposition"]), arguments = fn _ => fn _ => fn extra_options => fn timeout => fn problem => ["--input tptp", "--output tptp", "--timeout " ^ Time.toString timeout, extra_options, File.bash_path problem], proof_delims = tstp_proof_delims, known_failures = [(TimedOut, "SZS status ResourceOut")] @ (* odd way of timing out *) known_szs_status_failures, prem_role = Hypothesis, good_slices = K [((1, 1024, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --max-inferences=1 --ho-unif-max-depth=1 --ho-max-elims=0 --ho-max-app-projections=0 --ho-max-rigid-imitations=1 --ho-max-identifications=0 --boolean-reasoning=bool-hoist --bool-hoist-simpl=true --bool-select=LI --recognize-injectivity=true --ext-rules=ext-family --ext-rules-max-depth=1 --ho-choice-inst=true --ho-prim-enum=none --ho-elim-leibniz=0 --interpret-bool-funs=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --ho-unif-level=pragmatic-framework --select=bb+e-selection2 --post-cnf-lambda-lifting=true -q \"4|prefer-sos|pnrefined(2,1,1,1,2,2,2)\" -q \"6|prefer-processed|conjecture-relative-struct(1.5,3.5,2,3)\" -q \"1|const|fifo\" -q \"4|prefer-ground|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-struct(1,5,2,3)\" --avatar=off --recognize-injectivity=true --ho-neg-ext=true --e-timeout=2 --ho-pattern-decider=true --ho-fixpoint-decider=true --e-max-derived=50 --ignore-orphans=true --e-auto=true --presaturate=true --e-call-point=0.1")), ((1, 128, mepoN), (format, "mono_native_higher_fool", keep_lamsN, false, "-nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=off --ho-prim-enum=full --ho-prim-max=1 --avatar=off --recognize-injectivity=true --ho-elim-leibniz=4 --ho-unif-level=full-framework --no-max-vars -q \"2|prefer-goals|conjecture-relative-e(0.5,1,100,100,100,100,1.5,1.5,1)\" -q \"4|const|conjecture-relative-e(0.1,1,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-e(0.5,1,100,100,100,100,1.5,1.5,1.5)\" -q \"4|prefer-sos|pnrefined(1,1,1,1,2,1.5,2)\" --select=ho-selection5 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=5 --e-call-point=0.25 --e-auto=true --sine=50 --sine-tolerance=2 --sine-depth-max=4 --sine-depth-min=1 --e-max-derived=96 --e-encode-lambdas=lift --scan-clause-ac=false --kbo-weight-fun=arity0 --prec-gen-fun=invfreq_conj")), ((1, 512, mashN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=none --avatar=off --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars --max-inferences=2 --ho-unif-max-depth=1 -q \"6|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"6|const|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-easy-ho|conjecture-relative-var(1.01,s,f)\" --select=e-selection16 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-auto=true --sine=50 --sine-tolerance=1.0 --sine-depth-max=3 --sine-depth-min=1 --sine-trim-implications=true --ho-unif-level=pragmatic-framework --e-encode-lambdas=lift --scan-clause-ac=false --kbo-weight-fun=lambda-def-invfreqrank --e-call-point=0.1")), ((1, 32, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic -nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=none --avatar=off --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars --max-inferences=4 --ho-max-app-projections=1 --ho-max-elims=0 --ho-max-rigid-imitations=2 --ho-max-identifications=0 --ho-unif-max-depth=3 -q \"6|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"6|const|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-easy-ho|conjecture-relative-var(1.01,s,f)\" --select=e-selection7 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=7 --sine=50 --sine-tolerance=1 --sine-depth-max=2 --sine-depth-min=1 --e-max-derived=64 --sine-ignore-k-most-common-syms=2 --sine-trim-implications=true --e-encode-lambdas=lift --scan-clause-ac=false --lambdasup=0 --kbo-weight-fun=lambda-def-invfreqrank --demod-in-var-args=true --bool-demod=true --lambda-demod=true --e-call-point=0.1 --lazy-cnf-kind=simp")), ((1, 64, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-comb-complete --boolean-reasoning=simpl-only --ext-rules=off --kbo-weight-fun=lambda-def-sqarity --ho-prim-enum=none --tptp-def-as-rewrite -q \"4|prefer-sos|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-var(1,s,f)\" -q \"3|const|default\" -q \"1|prefer-processed|fifo\" --ho-elim-leibniz=1 --select=NoSelection --solve-formulas=true --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=8 --sine=60 --sine-tolerance=2 --sine-depth-max=5 --sine-depth-min=1 --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-auto=true --e-max-derived=50 --e-encode-lambdas=ignore --scan-clause-ac=false --presaturate=true --comb-b-penalty=3 --comb-c-penalty=3 --comb-k-penalty=1 --comb-s-penalty=5 --subvarsup=false --e-call-point=0.15 --lazy-cnf-kind=simp --trigger-bool-ind=1")), ((1, 256, meshN), (format, "mono_native_higher_fool", keep_lamsN, false, "--mode=ho-pragmatic --boolean-reasoning=simpl-only --ho-unif-max-depth=0 --ho-prim-enum=none -q \"2|prefer-ho-steps|conjecture-relative-e(0.1,0.5,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"2|prefer-ground|default\" -q \"2|prefer-empty-trail|conjecture-relative-e(0.1,0.5,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-processed|fifo\" --select=bb+e-selection7 --ho-pattern-decider=false --ho-fixpoint-decider=true --ho-solid-decider=false --sine=150 --sine-tolerance=2 --sine-depth-max=3 --sine-depth-min=1 --prec-gen-fun=invfreqhack --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=2 --fluid-log-hoist=false --tptp-def-as-rewrite --rewrite-before-cnf=true --ho-prim-enum=eq --ho-prim-enum-add-var=true --ho-prim-max=1 --ho-prim-enum-early-bird=true -o tptp --avatar=eager --split-only-ground=true"))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} end val zipperposition = (zipperpositionN, fn () => zipperposition_config) (* Remote ATP invocation via SystemOnTPTP *) val no_remote_systems = {url = "", systems = [] : string list} val remote_systems = Synchronized.var "atp_remote_systems" no_remote_systems fun get_remote_systems () = Timeout.apply (seconds 10.0) SystemOnTPTP.list_systems () handle ERROR msg => (warning msg; no_remote_systems) | Timeout.TIMEOUT _ => no_remote_systems fun find_remote_system name [] systems = find_first (String.isPrefix (name ^ "---")) systems | find_remote_system name (version :: versions) systems = case find_first (String.isPrefix (name ^ "---" ^ version)) systems of NONE => find_remote_system name versions systems | res => res fun get_remote_system name versions = Synchronized.change_result remote_systems (fn remote => (if #url remote <> SystemOnTPTP.get_url () orelse null (#systems remote) then get_remote_systems () else remote) |> ` #systems) |> `(find_remote_system name versions) fun the_remote_system name versions = (case get_remote_system name versions of (SOME sys, _) => sys | (NONE, []) => error "SystemOnTPTP is currently not available" | (NONE, syss) => (case syss |> filter_out (String.isPrefix "%") |> filter_out (curry (op =) "") of [] => error "SystemOnTPTP is currently not available" | [msg] => error ("SystemOnTPTP is currently not available: " ^ msg) | syss => error ("System " ^ quote name ^ " is not available at SystemOnTPTP.\n(Available systems: " ^ commas_quote syss ^ ".)"))) val max_remote_secs = 1000 (* give Geoff Sutcliffe's servers a break *) val isabelle_scala_function = (["SCALA_HOME"], ["bin/scala"]) fun remote_config system_name system_versions proof_delims known_failures prem_role good_slice = {exec = isabelle_scala_function, arguments = fn _ => fn _ => fn command => fn timeout => fn problem => [the_remote_system system_name system_versions, Isabelle_System.absolute_path problem, command, string_of_int (Int.min (max_remote_secs, to_secs 1 timeout) * 1000)], proof_delims = union (op =) tstp_proof_delims proof_delims, known_failures = known_failures @ known_says_failures, prem_role = prem_role, good_slices = fn ctxt => [good_slice ctxt], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} : atp_config fun remotify_config system_name system_versions good_slice ({proof_delims, known_failures, prem_role, ...} : atp_config) = remote_config system_name system_versions proof_delims known_failures prem_role good_slice fun remote_atp name system_name system_versions proof_delims known_failures prem_role good_slice = (remote_prefix ^ name, fn () => remote_config system_name system_versions proof_delims known_failures prem_role good_slice) fun remotify_atp (name, config) system_name system_versions good_slice = (remote_prefix ^ name, remotify_config system_name system_versions good_slice o config) fun gen_remote_waldmeister name type_enc = remote_atp name "Waldmeister" ["710"] tstp_proof_delims ([(OutOfResources, "Too many function symbols"), (Inappropriate, "**** Unexpected end of file."), (Crashed, "Unrecoverable Segmentation Fault")] @ known_szs_status_failures) Hypothesis (K ((1000 (* infinity *), 50, meshN), (CNF_UEQ, type_enc, combsN, false, "")) (* FUDGE *)) val remote_agsyhol = remotify_atp agsyhol "agsyHOL" ["1.0", "1"] (K ((1000 (* infinity *), 60, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, "")) (* FUDGE *)) val remote_alt_ergo = remotify_atp alt_ergo "Alt-Ergo" ["0.95.2"] (K ((1000 (* infinity *), 250, meshN), (TF1, "poly_native", keep_lamsN, false, "")) (* FUDGE *)) val remote_e = remotify_atp e "E" ["2.0", "1.9.1", "1.8"] (K ((1000 (* infinity *), 750, meshN), (TF0, "mono_native", combsN, false, "")) (* FUDGE *)) val remote_iprover = remotify_atp iprover "iProver" ["0.99"] (K ((1000 (* infinity *), 150, meshN), (FOF, "mono_guards??", liftingN, false, "")) (* FUDGE *)) val remote_leo2 = remotify_atp leo2 "LEO-II" ["1.5.0", "1.4", "1.3", "1.2", "1"] (K ((1000 (* infinity *), 40, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", liftingN, false, "")) (* FUDGE *)) val remote_leo3 = remotify_atp leo3 "Leo-III" ["1.1"] (K ((1000 (* infinity *), 150, meshN), (THF (Polymorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "poly_native_higher", keep_lamsN, false, "")) (* FUDGE *)) val remote_waldmeister = gen_remote_waldmeister waldmeisterN "raw_mono_tags??" val remote_zipperposition = remotify_atp zipperposition "Zipperpin" ["2.1", "2.0"] (K ((1000 (* infinity *), 512, meshN), (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false, "")) (* FUDGE *)) (* Dummy prover *) fun dummy_config prem_role format type_enc uncurried_aliases : atp_config = {exec = (["ISABELLE_ATP"], ["scripts/dummy_atp"]), arguments = K (K (K (K (K [])))), proof_delims = [], known_failures = known_szs_status_failures, prem_role = prem_role, good_slices = K [((1, 256, "mepo"), (format, type_enc, if is_format_higher_order format then keep_lamsN else combsN, uncurried_aliases, ""))], good_max_mono_iters = default_max_mono_iters, good_max_new_mono_instances = default_max_new_mono_instances} val dummy_fof = (dummy_fofN, fn () => dummy_config Hypothesis FOF "mono_guards??" false) val dummy_tfx = (dummy_tfxN, fn () => dummy_config Hypothesis TX1 "poly_native_fool" false) val dummy_thf = (dummy_thfN, fn () => dummy_config Hypothesis TH1 "poly_native_higher" false) val dummy_thf_reduced = let val format = THF (Polymorphic, {with_ite = false, with_let = false}, THF_Without_Choice) val config = dummy_config Hypothesis format "poly_native_higher" false in (dummy_thfN ^ "_reduced", fn () => config) end (* Setup *) fun add_atp (name, config) thy = Data.map (Symtab.update_new (name, (config, stamp ()))) thy handle Symtab.DUP name => error ("Duplicate ATP: " ^ quote name) fun get_atp thy name = fst (the (Symtab.lookup (Data.get thy) name)) handle Option.Option => error ("Unknown ATP: " ^ name) fun is_atp_installed thy name = let val {exec, ...} = get_atp thy name () in exists (fn var => getenv var <> "") (fst exec) end fun refresh_systems_on_tptp () = Synchronized.change remote_systems (fn _ => get_remote_systems ()) val local_atps = [agsyhol, alt_ergo, e, iprover, leo2, leo3, satallax, spass, vampire, zipperposition] val remote_atps = [remote_agsyhol, remote_alt_ergo, remote_e, remote_iprover, remote_leo2, remote_leo3, remote_waldmeister, remote_zipperposition, dummy_fof, dummy_tfx, dummy_thf, dummy_thf_reduced] val atps = local_atps @ remote_atps val _ = Theory.setup (fold add_atp atps) val local_atps = map fst local_atps val remote_atps = map fst remote_atps val atps = map fst atps end; diff --git a/src/Pure/Admin/build_scala.scala b/src/Pure/Admin/build_scala.scala --- a/src/Pure/Admin/build_scala.scala +++ b/src/Pure/Admin/build_scala.scala @@ -1,163 +1,163 @@ /* Title: Pure/Admin/build_scala.scala Author: Makarius Build Isabelle Scala component from official downloads. */ package isabelle object Build_Scala { /* downloads */ sealed case class Download( name: String, version: String, url: String, physical_url: String = "", base_version: String = "3" ) { def make_url(template: String): String = template.replace("{V}", version).replace("{B}", base_version) def proper_url: String = make_url(proper_string(physical_url).getOrElse(url)) def artifact: String = Library.take_suffix[Char](_ != '/', proper_url.toList)._2.mkString def get(path: Path, progress: Progress = new Progress): Unit = Isabelle_System.download_file(proper_url, path, progress = progress) def get_unpacked(dir: Path, strip: Int = 0, progress: Progress = new Progress): Unit = Isabelle_System.with_tmp_file("archive"){ archive_path => get(archive_path, progress = progress) progress.echo("Unpacking " + artifact) Isabelle_System.gnutar( "-xzf " + File.bash_path(archive_path), dir = dir, strip = strip).check } def print: String = " * " + name + " " + version + (if (base_version.nonEmpty) " for Scala " + base_version else "") + ":\n " + make_url(url) } val main_download: Download = - Download("scala", "3.0.2", base_version = "", + Download("scala", "3.1.1", base_version = "", url = "https://github.com/lampepfl/dotty/releases/download/{V}/scala3-{V}.tar.gz") val lib_downloads: List[Download] = List( Download("scala-parallel-collections", "1.0.4", "https://mvnrepository.com/artifact/org.scala-lang.modules/scala-parallel-collections_{B}/{V}", physical_url = "https://repo1.maven.org/maven2/org/scala-lang/modules/scala-parallel-collections_{B}/{V}/scala-parallel-collections_{B}-{V}.jar"), - Download("scala-parser-combinators", "2.1.0", + Download("scala-parser-combinators", "2.1.1", "https://mvnrepository.com/artifact/org.scala-lang.modules/scala-parser-combinators_{B}/{V}", physical_url = "https://repo1.maven.org/maven2/org/scala-lang/modules/scala-parser-combinators_{B}/{V}/scala-parser-combinators_{B}-{V}.jar"), Download("scala-swing", "3.0.0", "https://mvnrepository.com/artifact/org.scala-lang.modules/scala-swing_{B}/{V}", physical_url = "https://repo1.maven.org/maven2/org/scala-lang/modules/scala-swing_{B}/{V}/scala-swing_{B}-{V}.jar"), - Download("scala-xml", "2.0.1", + Download("scala-xml", "2.1.0", "https://mvnrepository.com/artifact/org.scala-lang.modules/scala-xml_{B}/{V}", - physical_url = "https://repo1.maven.org/maven2/org/scala-lang/modules/scala-xml_3/2.0.1/scala-xml_{B}-{V}.jar") + physical_url = "https://repo1.maven.org/maven2/org/scala-lang/modules/scala-xml_{B}/{V}/scala-xml_{B}-{V}.jar") ) /* build Scala component */ def build_scala( target_dir: Path = Path.current, progress: Progress = new Progress ): Unit = { /* component */ val component_name = main_download.name + "-" + main_download.version val component_dir = Isabelle_System.new_directory(target_dir + Path.basic(component_name)) progress.echo("Component " + component_dir) /* download */ main_download.get_unpacked(component_dir, strip = 1, progress = progress) val lib_dir = component_dir + Path.explode("lib") lib_downloads.foreach(download => download.get(lib_dir + Path.basic(download.artifact), progress = progress)) File.write(component_dir + Path.basic("LICENSE"), Url.read(Url("https://www.apache.org/licenses/LICENSE-2.0.txt"))) /* classpath */ val classpath: List[String] = { def no_function(name: String): String = "function " + name + "() {\n:\n}" val script = cat_lines(List( no_function("stty"), no_function("tput"), "PROG_HOME=" + File.bash_path(component_dir), File.read(component_dir + Path.explode("bin/common")) .replace("scala_exit_status=127", "scala_exit_status=0"), "compilerJavaClasspathArgs", "echo \"$jvm_cp_args\"")) val main_classpath = Path.split(Isabelle_System.bash(script).check.out).map(_.file_name) val lib_classpath = lib_downloads.map(_.artifact) main_classpath ::: lib_classpath } val interfaces = classpath.find(_.startsWith("scala3-interfaces")) .getOrElse(error("Missing jar for scala3-interfaces")) /* settings */ val etc_dir = Isabelle_System.make_directory(component_dir + Path.basic("etc")) File.write(etc_dir + Path.basic("settings"), """# -*- shell-script -*- :mode=shellscript: SCALA_HOME="$COMPONENT" SCALA_INTERFACES="$SCALA_HOME/lib/""" + interfaces + """" """ + terminate_lines(classpath.map(jar => "classpath \"$SCALA_HOME/lib/" + jar + "\""))) /* README */ File.write(component_dir + Path.basic("README"), "This distribution of Scala integrates the following parts:\n\n" + (main_download :: lib_downloads).map(_.print).mkString("\n\n") + """ Makarius """ + Date.Format.date(Date.now()) + "\n") } /* Isabelle tool wrapper */ val isabelle_tool = Isabelle_Tool("build_scala", "build Isabelle Scala component from official downloads", Scala_Project.here, { args => var target_dir = Path.current val getopts = Getopts(""" Usage: isabelle build_scala [OPTIONS] Options are: -D DIR target directory (default ".") Build Isabelle Scala component from official downloads. """, "D:" -> (arg => target_dir = Path.explode(arg))) val more_args = getopts(args) if (more_args.nonEmpty) getopts.usage() val progress = new Console_Progress() build_scala(target_dir = target_dir, progress = progress) }) } diff --git a/src/Pure/General/antiquote.scala b/src/Pure/General/antiquote.scala --- a/src/Pure/General/antiquote.scala +++ b/src/Pure/General/antiquote.scala @@ -1,61 +1,62 @@ /* Title: Pure/General/antiquote.scala Author: Makarius Antiquotations within plain text. */ package isabelle object Antiquote { sealed abstract class Antiquote { def source: String } case class Text(source: String) extends Antiquote case class Control(source: String) extends Antiquote case class Antiq(source: String) extends Antiquote /* parsers */ object Parsers extends Parsers trait Parsers extends Scan.Parsers { private val txt: Parser[String] = rep1(many1(s => !Symbol.is_control(s) && !Symbol.is_open(s) && s != "@") | "@" <~ guard(opt_term(one(s => s != "{")))) ^^ (x => x.mkString) val control: Parser[String] = opt(one(Symbol.is_control)) ~ cartouche ^^ { case Some(x) ~ y => x + y case None ~ x => x } | one(Symbol.is_control) val antiq_other: Parser[String] = many1(s => s != "\"" && s != "`" && s != "}" && !Symbol.is_open(s) && !Symbol.is_close(s)) private val antiq_body: Parser[String] = quoted("\"") | (quoted("`") | (cartouche | antiq_other)) val antiq: Parser[String] = "@{" ~ rep(antiq_body) ~ "}" ^^ { case x ~ y ~ z => x + y.mkString + z } val antiquote: Parser[Antiquote] = txt ^^ (x => Text(x)) | (control ^^ (x => Control(x)) | antiq ^^ (x => Antiq(x))) } /* read */ def read(input: CharSequence): List[Antiquote] = { - Parsers.parseAll(Parsers.rep(Parsers.antiquote), Scan.char_reader(input)) match { + val result = Parsers.parseAll(Parsers.rep(Parsers.antiquote), Scan.char_reader(input)) + (result : @unchecked) match { case Parsers.Success(xs, _) => xs case Parsers.NoSuccess(_, next) => error("Malformed quotation/antiquotation source" + Position.here(Position.Line(next.pos.line))) } } def read_antiq_body(input: CharSequence): Option[String] = { read(input) match { case List(Antiq(source)) => Some(source.substring(2, source.length - 1)) case _ => None } } } diff --git a/src/Pure/General/json.scala b/src/Pure/General/json.scala --- a/src/Pure/General/json.scala +++ b/src/Pure/General/json.scala @@ -1,393 +1,394 @@ /* Title: Pure/General/json.scala Author: Makarius Support for JSON: https://www.json.org/. See also http://seriot.ch/parsing_json.php "Parsing JSON is a Minefield". */ package isabelle import scala.util.matching.Regex import scala.util.parsing.combinator import scala.util.parsing.combinator.lexical.Scanners import scala.util.parsing.input.CharArrayReader.EofCh object JSON { type T = Any type S = String object Object { type Entry = (String, JSON.T) type T = Map[String, JSON.T] val empty: Object.T = Map.empty def apply(entries: Entry*): Object.T = Map(entries:_*) def unapply(obj: Any): Option[Object.T] = obj match { case m: Map[_, _] if m.keySet.forall(_.isInstanceOf[String]) => Some(m.asInstanceOf[Object.T]) case _ => None } } /* lexer */ object Kind extends Enumeration { val KEYWORD, STRING, NUMBER, ERROR = this.Value } sealed case class Token(kind: Kind.Value, text: String) { def is_keyword: Boolean = kind == Kind.KEYWORD def is_keyword(name: String): Boolean = kind == Kind.KEYWORD && text == name def is_string: Boolean = kind == Kind.STRING def is_number: Boolean = kind == Kind.NUMBER def is_error: Boolean = kind == Kind.ERROR } object Lexer extends Scanners with Scan.Parsers { override type Elem = Char type Token = JSON.Token def errorToken(msg: String): Token = Token(Kind.ERROR, msg) val white_space: String = " \t\n\r" override val whiteSpace: Regex = ("[" + white_space + "]+").r def whitespace: Parser[Any] = many(character(white_space.contains(_))) val letter: Parser[String] = one(character(Symbol.is_ascii_letter)) val letters1: Parser[String] = many1(character(Symbol.is_ascii_letter)) def digits: Parser[String] = many(character(Symbol.is_ascii_digit)) def digits1: Parser[String] = many1(character(Symbol.is_ascii_digit)) /* keyword */ def keyword: Parser[Token] = (letters1 | one(character("{}[],:".contains(_)))) ^^ (s => Token(Kind.KEYWORD, s)) /* string */ def string: Parser[Token] = '\"' ~> rep(string_body) <~ '\"' ^^ (cs => Token(Kind.STRING, cs.mkString)) def string_body: Parser[Char] = elem("", c => c > '\u001f' && c != '\"' && c != '\\' && c != EofCh) | '\\' ~> string_escape def string_escape: Parser[Char] = elem("", "\"\\/".contains(_)) | elem("", "bfnrt".contains(_)) ^^ { case 'b' => '\b' case 'f' => '\f' case 'n' => '\n' case 'r' => '\r' case 't' => '\t' } | 'u' ~> repeated(character("0123456789abcdefABCDEF".contains(_)), 4, 4) ^^ (s => Integer.parseInt(s, 16).toChar) def string_failure: Parser[Token] = '\"' ~> failure("Unterminated string") /* number */ def number: Parser[Token] = opt("-") ~ number_body ~ opt(letter) ^^ { case a ~ b ~ None => Token(Kind.NUMBER, a.getOrElse("") + b) case a ~ b ~ Some(c) => errorToken("Invalid number format: " + quote(a.getOrElse("") + b + c)) } def number_body: Parser[String] = (zero | positive) ~ opt(number_fract) ~ opt(number_exp) ^^ { case a ~ b ~ c => a + b.getOrElse("") + c.getOrElse("") } def number_fract: Parser[String] = "." ~ digits1 ^^ { case a ~ b => a + b } def number_exp: Parser[String] = one(character("eE".contains(_))) ~ maybe(character("-+".contains(_))) ~ digits1 ^^ { case a ~ b ~ c => a + b + c } def zero: Parser[String] = one(character(c => c == '0')) def nonzero: Parser[String] = one(character(c => c != '0' && Symbol.is_ascii_digit(c))) def positive: Parser[String] = nonzero ~ digits ^^ { case a ~ b => a + b } /* token */ def token: Parser[Token] = keyword | (string | (string_failure | (number | failure("Illegal character")))) } /* parser */ trait Parsers extends combinator.Parsers { type Elem = Token def $$$(name: String): Parser[Token] = elem(name, _.is_keyword(name)) def string: Parser[String] = elem("string", _.is_string) ^^ (_.text) def number: Parser[Double] = elem("number", _.is_number) ^^ (tok => tok.text.toDouble) def json_object: Parser[Object.T] = $$$("{") ~> repsep(string ~ ($$$(":") ~> json_value) ^^ { case a ~ b => (a, b) }, $$$(",")) <~ $$$("}") ^^ (_.toMap) def json_array: Parser[List[T]] = $$$("[") ~> repsep(json_value, $$$(",")) <~ $$$("]") def json_value: Parser[T] = json_object | (json_array | (number | (string | ($$$("true") ^^^ true | ($$$("false") ^^^ false | ($$$("null") ^^^ null)))))) def parse(input: CharSequence, strict: Boolean): T = { val scanner = new Lexer.Scanner(Scan.char_reader(input)) - phrase(if (strict) json_object | json_array else json_value)(scanner) match { + val result = phrase(if (strict) json_object | json_array else json_value)(scanner) + (result : @unchecked) match { case Success(json, _) => json case NoSuccess(_, next) => error("Malformed JSON input at " + next.pos) } } } object Parsers extends Parsers /* standard format */ def parse(s: S, strict: Boolean = true): T = Parsers.parse(s, strict) object Format { def unapply(s: S): Option[T] = try { Some(parse(s, strict = false)) } catch { case ERROR(_) => None } def apply_lines(json: List[T]): S = json.map(apply).mkString("[", ",\n", "]") def apply(json: T): S = { val result = new StringBuilder def string(s: String): Unit = { result += '"' result ++= s.iterator.map { case '"' => "\\\"" case '\\' => "\\\\" case '\b' => "\\b" case '\f' => "\\f" case '\n' => "\\n" case '\r' => "\\r" case '\t' => "\\t" case c => if (c <= '\u001f' || c >= '\u007f' && c <= '\u009f') "\\u%04x".format(c.toInt) else c }.mkString result += '"' } def array(list: List[T]): Unit = { result += '[' Library.separate(None, list.map(Some(_))).foreach({ case None => result += ',' case Some(x) => json_format(x) }) result += ']' } def object_(obj: Object.T): Unit = { result += '{' Library.separate(None, obj.toList.map(Some(_))).foreach({ case None => result += ',' case Some((x, y)) => string(x) result += ':' json_format(y) }) result += '}' } def json_format(x: T): Unit = { x match { case null => result ++= "null" case _: Int | _: Long | _: Boolean => result ++= x.toString case n: Double => val i = n.toLong result ++= (if (i.toDouble == n) i.toString else n.toString) case s: String => string(s) case Object(m) => object_(m) case list: List[T] => array(list) case _ => error("Bad JSON value: " + x.toString) } } json_format(json) result.toString } } /* typed values */ object Value { object UUID { def unapply(json: T): Option[isabelle.UUID.T] = json match { case x: java.lang.String => isabelle.UUID.unapply(x) case _ => None } } object String { def unapply(json: T): Option[java.lang.String] = json match { case x: java.lang.String => Some(x) case _ => None } } object String0 { def unapply(json: T): Option[java.lang.String] = json match { case null => Some("") case x: java.lang.String => Some(x) case _ => None } } object Double { def unapply(json: T): Option[scala.Double] = json match { case x: scala.Double => Some(x) case x: scala.Long => Some(x.toDouble) case x: scala.Int => Some(x.toDouble) case _ => None } } object Long { def unapply(json: T): Option[scala.Long] = json match { case x: scala.Double if x.toLong.toDouble == x => Some(x.toLong) case x: scala.Long => Some(x) case x: scala.Int => Some(x.toLong) case _ => None } } object Int { def unapply(json: T): Option[scala.Int] = json match { case x: scala.Double if x.toInt.toDouble == x => Some(x.toInt) case x: scala.Long if x.toInt.toLong == x => Some(x.toInt) case x: scala.Int => Some(x) case _ => None } } object Boolean { def unapply(json: T): Option[scala.Boolean] = json match { case x: scala.Boolean => Some(x) case _ => None } } object List { def unapply[A](json: T, unapply: T => Option[A]): Option[List[A]] = json match { case xs: List[T] => val ys = xs.map(unapply) if (ys.forall(_.isDefined)) Some(ys.map(_.get)) else None case _ => None } } object Seconds { def unapply(json: T): Option[Time] = Double.unapply(json).map(Time.seconds) } } /* object values */ def optional(entry: (String, Option[T])): Object.T = entry match { case (name, Some(x)) => Object(name -> x) case (_, None) => Object.empty } def value(obj: T, name: String): Option[T] = obj match { case Object(m) => m.get(name) case _ => None } def value[A](obj: T, name: String, unapply: T => Option[A]): Option[A] = for { json <- value(obj, name) x <- unapply(json) } yield x def array(obj: T, name: String): Option[List[T]] = value(obj, name) match { case Some(a: List[T]) => Some(a) case _ => None } def value_default[A](obj: T, name: String, unapply: T => Option[A], default: => A): Option[A] = value(obj, name) match { case None => Some(default) case Some(json) => unapply(json) } def uuid(obj: T, name: String): Option[UUID.T] = value(obj, name, Value.UUID.unapply) def string(obj: T, name: String): Option[String] = value(obj, name, Value.String.unapply) def string_default(obj: T, name: String, default: => String = ""): Option[String] = value_default(obj, name, Value.String.unapply, default) def string0(obj: T, name: String): Option[String] = value(obj, name, Value.String0.unapply) def string0_default(obj: T, name: String, default: => String = ""): Option[String] = value_default(obj, name, Value.String0.unapply, default) def double(obj: T, name: String): Option[Double] = value(obj, name, Value.Double.unapply) def double_default(obj: T, name: String, default: => Double = 0.0): Option[Double] = value_default(obj, name, Value.Double.unapply, default) def long(obj: T, name: String): Option[Long] = value(obj, name, Value.Long.unapply) def long_default(obj: T, name: String, default: => Long = 0): Option[Long] = value_default(obj, name, Value.Long.unapply, default) def int(obj: T, name: String): Option[Int] = value(obj, name, Value.Int.unapply) def int_default(obj: T, name: String, default: => Int = 0): Option[Int] = value_default(obj, name, Value.Int.unapply, default) def bool(obj: T, name: String): Option[Boolean] = value(obj, name, Value.Boolean.unapply) def bool_default(obj: T, name: String, default: => Boolean = false): Option[Boolean] = value_default(obj, name, Value.Boolean.unapply, default) def list[A](obj: T, name: String, unapply: T => Option[A]): Option[List[A]] = value(obj, name, Value.List.unapply(_, unapply)) def list_default[A](obj: T, name: String, unapply: T => Option[A], default: => List[A] = Nil) : Option[List[A]] = value_default(obj, name, Value.List.unapply(_, unapply), default) def strings(obj: T, name: String): Option[List[String]] = list(obj, name, JSON.Value.String.unapply) def strings_default(obj: T, name: String, default: => List[String] = Nil): Option[List[String]] = list_default(obj, name, JSON.Value.String.unapply, default) def seconds(obj: T, name: String): Option[Time] = value(obj, name, Value.Seconds.unapply) def seconds_default(obj: T, name: String, default: => Time = Time.zero): Option[Time] = value_default(obj, name, Value.Seconds.unapply, default) } diff --git a/src/Pure/Isar/token.scala b/src/Pure/Isar/token.scala --- a/src/Pure/Isar/token.scala +++ b/src/Pure/Isar/token.scala @@ -1,320 +1,321 @@ /* Title: Pure/Isar/token.scala Author: Makarius Outer token syntax for Isabelle/Isar. */ package isabelle import scala.collection.mutable import scala.util.parsing.input object Token { /* tokens */ object Kind extends Enumeration { /*immediate source*/ val COMMAND = Value("command") val KEYWORD = Value("keyword") val IDENT = Value("identifier") val LONG_IDENT = Value("long identifier") val SYM_IDENT = Value("symbolic identifier") val VAR = Value("schematic variable") val TYPE_IDENT = Value("type variable") val TYPE_VAR = Value("schematic type variable") val NAT = Value("natural number") val FLOAT = Value("floating-point number") val SPACE = Value("white space") /*delimited content*/ val STRING = Value("string") val ALT_STRING = Value("back-quoted string") val CARTOUCHE = Value("text cartouche") val CONTROL = Value("control cartouche") val INFORMAL_COMMENT = Value("informal comment") val FORMAL_COMMENT = Value("formal comment") /*special content*/ val ERROR = Value("bad input") val UNPARSED = Value("unparsed input") } /* parsers */ object Parsers extends Parsers trait Parsers extends Scan.Parsers with Comment.Parsers { private def delimited_token: Parser[Token] = { val string = quoted("\"") ^^ (x => Token(Token.Kind.STRING, x)) val alt_string = quoted("`") ^^ (x => Token(Token.Kind.ALT_STRING, x)) val cmt = comment ^^ (x => Token(Token.Kind.INFORMAL_COMMENT, x)) val formal_cmt = comment_cartouche ^^ (x => Token(Token.Kind.FORMAL_COMMENT, x)) val cart = cartouche ^^ (x => Token(Token.Kind.CARTOUCHE, x)) val ctrl = control_cartouche ^^ (x => Token(Token.Kind.CONTROL, x)) string | (alt_string | (cmt | (formal_cmt | (cart | ctrl)))) } private def other_token(keywords: Keyword.Keywords): Parser[Token] = { val letdigs1 = many1(Symbol.is_letdig) val sub = one(s => s == Symbol.sub_decoded || s == Symbol.sub) val id = one(Symbol.is_letter) ~ (rep(letdigs1 | (sub ~ letdigs1 ^^ { case x ~ y => x + y })) ^^ (_.mkString)) ^^ { case x ~ y => x + y } val nat = many1(Symbol.is_digit) val natdot = nat ~ "." ~ nat ^^ { case x ~ y ~ z => x + y + z } val id_nat = id ~ opt("." ~ nat) ^^ { case x ~ Some(y ~ z) => x + y + z case x ~ None => x } val ident = id ~ rep("." ~> id) ^^ { case x ~ Nil => Token(Token.Kind.IDENT, x) case x ~ ys => Token(Token.Kind.LONG_IDENT, (x :: ys).mkString(".")) } val var_ = "?" ~ id_nat ^^ { case x ~ y => Token(Token.Kind.VAR, x + y) } val type_ident = "'" ~ id ^^ { case x ~ y => Token(Token.Kind.TYPE_IDENT, x + y) } val type_var = "?'" ~ id_nat ^^ { case x ~ y => Token(Token.Kind.TYPE_VAR, x + y) } val nat_ = nat ^^ (x => Token(Token.Kind.NAT, x)) val float = ("-" ~ natdot ^^ { case x ~ y => x + y } | natdot) ^^ (x => Token(Token.Kind.FLOAT, x)) val sym_ident = (many1(Symbol.is_symbolic_char) | one(sym => Symbol.is_symbolic(sym))) ^^ (x => Token(Token.Kind.SYM_IDENT, x)) val keyword = literal(keywords.major) ^^ (x => Token(Token.Kind.COMMAND, x)) ||| literal(keywords.minor) ^^ (x => Token(Token.Kind.KEYWORD, x)) val space = many1(Symbol.is_blank) ^^ (x => Token(Token.Kind.SPACE, x)) val recover_delimited = (recover_quoted("\"") | (recover_quoted("`") | (recover_cartouche | recover_comment))) ^^ (x => Token(Token.Kind.ERROR, x)) val bad = one(_ => true) ^^ (x => Token(Token.Kind.ERROR, x)) space | (recover_delimited | ((keyword ||| (ident | (var_ | (type_ident | (type_var | (float | (nat_ | sym_ident))))))) | bad)) } def token(keywords: Keyword.Keywords): Parser[Token] = delimited_token | other_token(keywords) def token_line( keywords: Keyword.Keywords, ctxt: Scan.Line_Context ) : Parser[(Token, Scan.Line_Context)] = { val string = quoted_line("\"", ctxt) ^^ { case (x, c) => (Token(Token.Kind.STRING, x), c) } val alt_string = quoted_line("`", ctxt) ^^ { case (x, c) => (Token(Token.Kind.ALT_STRING, x), c) } val cart = cartouche_line(ctxt) ^^ { case (x, c) => (Token(Token.Kind.CARTOUCHE, x), c) } val cmt = comment_line(ctxt) ^^ { case (x, c) => (Token(Token.Kind.INFORMAL_COMMENT, x), c) } val formal_cmt = comment_cartouche_line(ctxt) ^^ { case (x, c) => (Token(Token.Kind.FORMAL_COMMENT, x), c) } val other = other_token(keywords) ^^ { case x => (x, Scan.Finished) } string | (alt_string | (cart | (cmt | (formal_cmt | other)))) } } /* explode */ def explode(keywords: Keyword.Keywords, inp: CharSequence): List[Token] = Parsers.parseAll(Parsers.rep(Parsers.token(keywords)), Scan.char_reader(inp)) match { case Parsers.Success(tokens, _) => tokens case _ => error("Unexpected failure of tokenizing input:\n" + inp.toString) } def explode_line( keywords: Keyword.Keywords, inp: CharSequence, context: Scan.Line_Context ) : (List[Token], Scan.Line_Context) = { var in: input.Reader[Char] = Scan.char_reader(inp) val toks = new mutable.ListBuffer[Token] var ctxt = context while (!in.atEnd) { - Parsers.parse(Parsers.token_line(keywords, ctxt), in) match { + val result = Parsers.parse(Parsers.token_line(keywords, ctxt), in) + (result : @unchecked) match { case Parsers.Success((x, c), rest) => toks += x; ctxt = c; in = rest case Parsers.NoSuccess(_, rest) => error("Unexpected failure of tokenizing input:\n" + rest.source.toString) } } (toks.toList, ctxt) } val newline: Token = explode(Keyword.Keywords.empty, "\n").head /* embedded */ def read_embedded(keywords: Keyword.Keywords, inp: CharSequence): Option[Token] = explode(keywords, inp) match { case List(tok) if tok.is_embedded => Some(tok) case _ => None } /* names */ def read_name(keywords: Keyword.Keywords, inp: CharSequence): Option[Token] = explode(keywords, inp) match { case List(tok) if tok.is_name => Some(tok) case _ => None } def quote_name(keywords: Keyword.Keywords, name: String): String = if (read_name(keywords, name).isDefined) name else quote(name.replace("\"", "\\\"")) /* plain antiquotation (0 or 1 args) */ def read_antiq_arg(keywords: Keyword.Keywords, inp: CharSequence): Option[(String, Option[String])] = explode(keywords, inp).filter(_.is_proper) match { case List(t) if t.is_name => Some(t.content, None) case List(t1, t2) if t1.is_name && t2.is_embedded => Some(t1.content, Some(t2.content)) case _ => None } /* implode */ def implode(toks: List[Token]): String = toks match { case List(tok) => tok.source case _ => toks.map(_.source).mkString } /* token reader */ object Pos { val none: Pos = new Pos(0, 0, "", "") val start: Pos = new Pos(1, 1, "", "") def file(file: String): Pos = new Pos(1, 1, file, "") def id(id: String): Pos = new Pos(0, 1, "", id) val command: Pos = id(Markup.COMMAND) } final class Pos private[Token]( val line: Int, val offset: Symbol.Offset, val file: String, val id: String) extends input.Position { def column = 0 def lineContents = "" def advance(token: Token): Pos = advance(token.source) def advance(source: String): Pos = { var line1 = line var offset1 = offset for (s <- Symbol.iterator(source)) { if (line1 > 0 && Symbol.is_newline(s)) line1 += 1 if (offset1 > 0) offset1 += 1 } if (line1 == line && offset1 == offset) this else new Pos(line1, offset1, file, id) } private def position(end_offset: Symbol.Offset): Position.T = (if (line > 0) Position.Line(line) else Nil) ::: (if (offset > 0) Position.Offset(offset) else Nil) ::: (if (end_offset > 0) Position.End_Offset(end_offset) else Nil) ::: (if (file != "") Position.File(file) else Nil) ::: (if (id != "") Position.Id_String(id) else Nil) def position(): Position.T = position(0) def position(token: Token): Position.T = position(advance(token).offset) def position(source: String): Position.T = position(advance(source).offset) override def toString: String = Position.here(position(), delimited = false) } abstract class Reader extends input.Reader[Token] private class Token_Reader(tokens: List[Token], val pos: Pos) extends Reader { def first: Token = tokens.head def rest: Token_Reader = new Token_Reader(tokens.tail, pos.advance(first)) def atEnd: Boolean = tokens.isEmpty } def reader(tokens: List[Token], start: Token.Pos): Reader = new Token_Reader(tokens, start) } sealed case class Token(kind: Token.Kind.Value, source: String) { def is_command: Boolean = kind == Token.Kind.COMMAND def is_command(name: String): Boolean = kind == Token.Kind.COMMAND && source == name def is_keyword: Boolean = kind == Token.Kind.KEYWORD def is_keyword(name: String): Boolean = kind == Token.Kind.KEYWORD && source == name def is_keyword(name: Char): Boolean = kind == Token.Kind.KEYWORD && source.length == 1 && source(0) == name def is_delimiter: Boolean = is_keyword && !Symbol.is_ascii_identifier(source) def is_ident: Boolean = kind == Token.Kind.IDENT def is_sym_ident: Boolean = kind == Token.Kind.SYM_IDENT def is_string: Boolean = kind == Token.Kind.STRING def is_nat: Boolean = kind == Token.Kind.NAT def is_float: Boolean = kind == Token.Kind.FLOAT def is_name: Boolean = kind == Token.Kind.IDENT || kind == Token.Kind.LONG_IDENT || kind == Token.Kind.SYM_IDENT || kind == Token.Kind.STRING || kind == Token.Kind.NAT def is_embedded: Boolean = is_name || kind == Token.Kind.CARTOUCHE || kind == Token.Kind.VAR || kind == Token.Kind.TYPE_IDENT || kind == Token.Kind.TYPE_VAR def is_space: Boolean = kind == Token.Kind.SPACE def is_informal_comment: Boolean = kind == Token.Kind.INFORMAL_COMMENT def is_formal_comment: Boolean = kind == Token.Kind.FORMAL_COMMENT def is_marker: Boolean = kind == Token.Kind.FORMAL_COMMENT && (source.startsWith(Symbol.marker) || source.startsWith(Symbol.marker_decoded)) def is_comment: Boolean = is_informal_comment || is_formal_comment def is_ignored: Boolean = is_space || is_informal_comment def is_proper: Boolean = !is_space && !is_comment def is_error: Boolean = kind == Token.Kind.ERROR def is_unparsed: Boolean = kind == Token.Kind.UNPARSED def is_unfinished: Boolean = is_error && (source.startsWith("\"") || source.startsWith("`") || source.startsWith("(*") || source.startsWith(Symbol.open) || source.startsWith(Symbol.open_decoded)) def is_open_bracket: Boolean = is_keyword && Word.open_brackets.exists(is_keyword) def is_close_bracket: Boolean = is_keyword && Word.close_brackets.exists(is_keyword) def is_begin: Boolean = is_keyword("begin") def is_end: Boolean = is_command("end") def is_begin_or_command: Boolean = is_begin || is_command def symbol_length: Symbol.Offset = Symbol.iterator(source).length def content: String = if (kind == Token.Kind.STRING) Scan.Parsers.quoted_content("\"", source) else if (kind == Token.Kind.ALT_STRING) Scan.Parsers.quoted_content("`", source) else if (kind == Token.Kind.CARTOUCHE) Scan.Parsers.cartouche_content(source) else if (kind == Token.Kind.INFORMAL_COMMENT) Scan.Parsers.comment_content(source) else if (kind == Token.Kind.FORMAL_COMMENT) Comment.content(source) else source def is_system_name: Boolean = { val s = content is_name && Path.is_wellformed(s) && !s.exists(Symbol.is_ascii_blank) && !Path.is_reserved(s) } } diff --git a/src/Pure/ML/ml_lex.scala b/src/Pure/ML/ml_lex.scala --- a/src/Pure/ML/ml_lex.scala +++ b/src/Pure/ML/ml_lex.scala @@ -1,292 +1,293 @@ /* Title: Pure/ML/ml_lex.scala Author: Makarius Lexical syntax for Isabelle/ML and Standard ML. */ package isabelle import scala.collection.mutable import scala.util.parsing.input.Reader object ML_Lex { /** keywords **/ val keywords: Set[String] = Set("#", "(", ")", ",", "->", "...", ":", ":>", ";", "=", "=>", "[", "]", "_", "{", "|", "}", "abstype", "and", "andalso", "as", "case", "datatype", "do", "else", "end", "eqtype", "exception", "fn", "fun", "functor", "handle", "if", "in", "include", "infix", "infixr", "let", "local", "nonfix", "of", "op", "open", "orelse", "raise", "rec", "sharing", "sig", "signature", "struct", "structure", "then", "type", "val", "where", "while", "with", "withtype") val keywords2: Set[String] = Set("and", "case", "do", "else", "end", "if", "in", "let", "local", "of", "sig", "struct", "then", "while", "with") val keywords3: Set[String] = Set("handle", "open", "raise") private val lexicon: Scan.Lexicon = Scan.Lexicon(keywords.toList: _*) /** tokens **/ object Kind extends Enumeration { val KEYWORD = Value("keyword") val IDENT = Value("identifier") val LONG_IDENT = Value("long identifier") val TYPE_VAR = Value("type variable") val WORD = Value("word") val INT = Value("integer") val REAL = Value("real") val CHAR = Value("character") val STRING = Value("quoted string") val SPACE = Value("white space") val INFORMAL_COMMENT = Value("informal comment") val FORMAL_COMMENT = Value("formal comment") val CONTROL = Value("control symbol antiquotation") val ANTIQ = Value("antiquotation") val ANTIQ_START = Value("antiquotation: start") val ANTIQ_STOP = Value("antiquotation: stop") val ANTIQ_OTHER = Value("antiquotation: other") val ANTIQ_STRING = Value("antiquotation: quoted string") val ANTIQ_ALT_STRING = Value("antiquotation: back-quoted string") val ANTIQ_CARTOUCHE = Value("antiquotation: text cartouche") val ERROR = Value("bad input") } sealed case class Token(kind: Kind.Value, source: String) { def is_keyword: Boolean = kind == Kind.KEYWORD def is_delimiter: Boolean = is_keyword && !Symbol.is_ascii_identifier(source) def is_space: Boolean = kind == Kind.SPACE def is_comment: Boolean = kind == Kind.INFORMAL_COMMENT || kind == Kind.FORMAL_COMMENT } /** parsers **/ case object ML_String extends Scan.Line_Context case class Antiq(ctxt: Scan.Line_Context) extends Scan.Line_Context private object Parsers extends Scan.Parsers with Antiquote.Parsers with Comment.Parsers { /* string material */ private val blanks = many(character(Symbol.is_ascii_blank)) private val blanks1 = many1(character(Symbol.is_ascii_blank)) private val gap = "\\" ~ blanks1 ~ "\\" ^^ { case x ~ y ~ z => x + y + z } private val gap_start = "\\" ~ blanks ~ """\z""".r ^^ { case x ~ y ~ _ => x + y } private val escape = one(character("\"\\abtnvfr".contains(_))) | "^" ~ one(character(c => '@' <= c && c <= '_')) ^^ { case x ~ y => x + y } | repeated(character(Symbol.is_ascii_digit), 3, 3) private val str = one(character(c => c != '"' && c != '\\' && ' ' <= c && c <= '~')) | one(s => Symbol.is_open(s) || Symbol.is_close(s) || Symbol.is_symbolic(s) || Symbol.is_control(s)) | "\\" ~ escape ^^ { case x ~ y => x + y } /* ML char -- without gaps */ private val ml_char: Parser[Token] = "#\"" ~ str ~ "\"" ^^ { case x ~ y ~ z => Token(Kind.CHAR, x + y + z) } private val recover_ml_char: Parser[String] = "#\"" ~ opt(str) ^^ { case x ~ Some(y) => x + y case x ~ None => x } /* ML string */ private val ml_string_body: Parser[String] = rep(gap | str) ^^ (_.mkString) private val recover_ml_string: Parser[String] = "\"" ~ ml_string_body ^^ { case x ~ y => x + y } private val ml_string: Parser[Token] = "\"" ~ ml_string_body ~ "\"" ^^ { case x ~ y ~ z => Token(Kind.STRING, x + y + z) } private def ml_string_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = { def result(x: String, c: Scan.Line_Context) = (Token(Kind.STRING, x), c) ctxt match { case Scan.Finished => "\"" ~ ml_string_body ~ ("\"" | gap_start) ^^ { case x ~ y ~ z => result(x + y + z, if (z == "\"") Scan.Finished else ML_String) } case ML_String => blanks ~ opt_term("\\" ~ ml_string_body ~ ("\"" | gap_start)) ^^ { case x ~ Some(y ~ z ~ w) => result(x + y + z + w, if (w == "\"") Scan.Finished else ML_String) case x ~ None => result(x, ML_String) } case _ => failure("") } } /* ML comment */ private val ml_comment: Parser[Token] = comment ^^ (x => Token(Kind.INFORMAL_COMMENT, x)) private def ml_comment_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = comment_line(ctxt) ^^ { case (x, c) => (Token(Kind.INFORMAL_COMMENT, x), c) } private val ml_formal_comment: Parser[Token] = comment_cartouche ^^ (x => Token(Kind.FORMAL_COMMENT, x)) private def ml_formal_comment_line(ctxt: Scan.Line_Context) : Parser[(Token, Scan.Line_Context)] = comment_cartouche_line(ctxt) ^^ { case (x, c) => (Token(Kind.FORMAL_COMMENT, x), c) } /* antiquotations (line-oriented) */ def ml_cartouche_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = cartouche_line(ctxt) ^^ { case (x, c) => (Token(Kind.ANTIQ_CARTOUCHE, x), c) } def ml_antiq_start(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = ctxt match { case Scan.Finished => "@{" ^^ (x => (Token(Kind.ANTIQ_START, x), Antiq(Scan.Finished))) case _ => failure("") } def ml_antiq_stop(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = ctxt match { case Antiq(Scan.Finished) => "}" ^^ (x => (Token(Kind.ANTIQ_STOP, x), Scan.Finished)) case _ => failure("") } def ml_antiq_body(context: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = context match { case Antiq(ctxt) => (if (ctxt == Scan.Finished) antiq_other ^^ (x => (Token(Kind.ANTIQ_OTHER, x), context)) else failure("")) | quoted_line("\"", ctxt) ^^ { case (x, c) => (Token(Kind.ANTIQ_STRING, x), Antiq(c)) } | quoted_line("`", ctxt) ^^ { case (x, c) => (Token(Kind.ANTIQ_ALT_STRING, x), Antiq(c)) } | cartouche_line(ctxt) ^^ { case (x, c) => (Token(Kind.ANTIQ_CARTOUCHE, x), Antiq(c)) } case _ => failure("") } /* token */ private def other_token: Parser[Token] = { /* identifiers */ val letdigs = many(character(Symbol.is_ascii_letdig)) val alphanumeric = one(character(Symbol.is_ascii_letter)) ~ letdigs ^^ { case x ~ y => x + y } val symbolic = many1(character("!#$%&*+-/:<=>?@\\^`|~".contains(_))) val ident = (alphanumeric | symbolic) ^^ (x => Token(Kind.IDENT, x)) val long_ident = rep1(alphanumeric ~ "." ^^ { case x ~ y => x + y }) ~ (alphanumeric | (symbolic | "=")) ^^ { case x ~ y => Token(Kind.LONG_IDENT, x.mkString + y) } val type_var = "'" ~ letdigs ^^ { case x ~ y => Token(Kind.TYPE_VAR, x + y) } /* numerals */ val dec = many1(character(Symbol.is_ascii_digit)) val hex = many1(character(Symbol.is_ascii_hex)) val sign = opt("~") ^^ { case Some(x) => x case None => "" } val decint = sign ~ dec ^^ { case x ~ y => x + y } val exp = ("E" | "e") ~ decint ^^ { case x ~ y => x + y } val word = ("0wx" ~ hex ^^ { case x ~ y => x + y } | "0w" ~ dec ^^ { case x ~ y => x + y }) ^^ (x => Token(Kind.WORD, x)) val int = sign ~ ("0x" ~ hex ^^ { case x ~ y => x + y } | dec) ^^ { case x ~ y => Token(Kind.INT, x + y) } val rat = decint ~ opt("/" ~ dec) ^^ { case x ~ None => x case x ~ Some(y ~ z) => x + y + z } val real = (decint ~ "." ~ dec ~ (opt(exp) ^^ { case Some(x) => x case None => "" }) ^^ { case x ~ y ~ z ~ w => x + y + z + w } | decint ~ exp ^^ { case x ~ y => x + y }) ^^ (x => Token(Kind.REAL, x)) /* main */ val space = blanks1 ^^ (x => Token(Kind.SPACE, x)) val keyword = literal(lexicon) ^^ (x => Token(Kind.KEYWORD, x)) val ml_control = control ^^ (x => Token(Kind.CONTROL, x)) val ml_antiq = "@" ~ rat ^^ { case x ~ y => Token(Kind.ANTIQ, x + y) } | antiq ^^ (x => Token(Kind.ANTIQ, x)) val bad = one(_ => true) ^^ (x => Token(Kind.ERROR, x)) val recover = (recover_ml_char | (recover_ml_string | (recover_cartouche | recover_comment))) ^^ (x => Token(Kind.ERROR, x)) space | (ml_control | (recover | (ml_antiq | ((keyword ||| (word | (real | (int | (long_ident | (ident | type_var)))))) | bad)))) } def token: Parser[Token] = ml_char | (ml_string | (ml_comment | (ml_formal_comment | other_token))) def token_line(SML: Boolean, ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] = { val other = (ml_char | other_token) ^^ (x => (x, Scan.Finished)) if (SML) ml_string_line(ctxt) | (ml_comment_line(ctxt) | other) else { ml_string_line(ctxt) | (ml_comment_line(ctxt) | (ml_formal_comment_line(ctxt) | (ml_cartouche_line(ctxt) | (ml_antiq_start(ctxt) | (ml_antiq_stop(ctxt) | (ml_antiq_body(ctxt) | other)))))) } } } /* tokenize */ def tokenize(input: CharSequence): List[Token] = Parsers.parseAll(Parsers.rep(Parsers.token), Scan.char_reader(input)) match { case Parsers.Success(tokens, _) => tokens case _ => error("Unexpected failure of tokenizing input:\n" + input.toString) } def tokenize_line( SML: Boolean, input: CharSequence, context: Scan.Line_Context ) : (List[Token], Scan.Line_Context) = { var in: Reader[Char] = Scan.char_reader(input) val toks = new mutable.ListBuffer[Token] var ctxt = context while (!in.atEnd) { - Parsers.parse(Parsers.token_line(SML, ctxt), in) match { + val result = Parsers.parse(Parsers.token_line(SML, ctxt), in) + (result : @unchecked) match { case Parsers.Success((x, c), rest) => toks += x; ctxt = c; in = rest case Parsers.NoSuccess(_, rest) => error("Unexpected failure of tokenizing input:\n" + rest.source.toString) } } (toks.toList, ctxt) } } diff --git a/src/Pure/PIDE/rendering.scala b/src/Pure/PIDE/rendering.scala --- a/src/Pure/PIDE/rendering.scala +++ b/src/Pure/PIDE/rendering.scala @@ -1,773 +1,772 @@ /* Title: Pure/PIDE/rendering.scala Author: Makarius Isabelle-specific implementation of quasi-abstract rendering and markup interpretation. */ package isabelle import java.io.{File => JFile} import java.nio.file.FileSystems import scala.collection.immutable.SortedMap object Rendering { /* color */ object Color extends Enumeration { // background val unprocessed1, running1, canceled, bad, intensify, entity, active, active_result, markdown_bullet1, markdown_bullet2, markdown_bullet3, markdown_bullet4 = Value val background_colors: ValueSet = values // foreground val quoted, antiquoted = Value val foreground_colors: ValueSet = values -- background_colors // message underline val writeln, information, warning, legacy, error = Value val message_underline_colors: ValueSet = values -- background_colors -- foreground_colors // message background val writeln_message, information_message, tracing_message, warning_message, legacy_message, error_message = Value val message_background_colors: ValueSet = values -- background_colors -- foreground_colors -- message_underline_colors // text val main, keyword1, keyword2, keyword3, quasi_keyword, improper, operator, tfree, tvar, free, skolem, bound, `var`, inner_numeral, inner_quoted, inner_cartouche, comment1, comment2, comment3, dynamic, class_parameter, antiquote, raw_text, plain_text = Value val text_colors: ValueSet = values -- background_colors -- foreground_colors -- message_underline_colors -- message_background_colors // text overview val unprocessed, running = Value val text_overview_colors = Set(unprocessed, running, error, warning) } /* output messages */ val state_pri = 1 val writeln_pri = 2 val information_pri = 3 val tracing_pri = 4 val warning_pri = 5 val legacy_pri = 6 val error_pri = 7 val message_pri = Map( Markup.STATE -> state_pri, Markup.STATE_MESSAGE -> state_pri, Markup.WRITELN -> writeln_pri, Markup.WRITELN_MESSAGE -> writeln_pri, Markup.INFORMATION -> information_pri, Markup.INFORMATION_MESSAGE -> information_pri, Markup.TRACING -> tracing_pri, Markup.TRACING_MESSAGE -> tracing_pri, Markup.WARNING -> warning_pri, Markup.WARNING_MESSAGE -> warning_pri, Markup.LEGACY -> legacy_pri, Markup.LEGACY_MESSAGE -> legacy_pri, Markup.ERROR -> error_pri, Markup.ERROR_MESSAGE -> error_pri ).withDefaultValue(0) val message_underline_color = Map( writeln_pri -> Color.writeln, information_pri -> Color.information, warning_pri -> Color.warning, legacy_pri -> Color.legacy, error_pri -> Color.error) val message_background_color = Map( writeln_pri -> Color.writeln_message, information_pri -> Color.information_message, tracing_pri -> Color.tracing_message, warning_pri -> Color.warning_message, legacy_pri -> Color.legacy_message, error_pri -> Color.error_message) def output_messages(results: Command.Results): List[XML.Elem] = { val (states, other) = results.iterator.map(_._2).filterNot(Protocol.is_result).toList .partition(Protocol.is_state) states ::: other } /* text color */ val text_color = Map( Markup.KEYWORD1 -> Color.keyword1, Markup.KEYWORD2 -> Color.keyword2, Markup.KEYWORD3 -> Color.keyword3, Markup.QUASI_KEYWORD -> Color.quasi_keyword, Markup.IMPROPER -> Color.improper, Markup.OPERATOR -> Color.operator, Markup.STRING -> Color.main, Markup.ALT_STRING -> Color.main, Markup.CARTOUCHE -> Color.main, Markup.LITERAL -> Color.keyword1, Markup.DELIMITER -> Color.main, Markup.TFREE -> Color.tfree, Markup.TVAR -> Color.tvar, Markup.FREE -> Color.free, Markup.SKOLEM -> Color.skolem, Markup.BOUND -> Color.bound, Markup.VAR -> Color.`var`, Markup.INNER_STRING -> Color.inner_quoted, Markup.INNER_CARTOUCHE -> Color.inner_cartouche, Markup.DYNAMIC_FACT -> Color.dynamic, Markup.CLASS_PARAMETER -> Color.class_parameter, Markup.ANTIQUOTE -> Color.antiquote, Markup.RAW_TEXT -> Color.raw_text, Markup.PLAIN_TEXT -> Color.plain_text, Markup.ML_KEYWORD1 -> Color.keyword1, Markup.ML_KEYWORD2 -> Color.keyword2, Markup.ML_KEYWORD3 -> Color.keyword3, Markup.ML_DELIMITER -> Color.main, Markup.ML_NUMERAL -> Color.inner_numeral, Markup.ML_CHAR -> Color.inner_quoted, Markup.ML_STRING -> Color.inner_quoted, Markup.ML_COMMENT -> Color.comment1, Markup.COMMENT -> Color.comment1, Markup.COMMENT1 -> Color.comment1, Markup.COMMENT2 -> Color.comment2, Markup.COMMENT3 -> Color.comment3) val foreground = Map( Markup.STRING -> Color.quoted, Markup.ALT_STRING -> Color.quoted, Markup.CARTOUCHE -> Color.quoted, Markup.ANTIQUOTED -> Color.antiquoted) /* tooltips */ val tooltip_descriptions = Map( Markup.CITATION -> "citation", Markup.TOKEN_RANGE -> "inner syntax token", Markup.FREE -> "free variable", Markup.SKOLEM -> "skolem variable", Markup.BOUND -> "bound variable", Markup.VAR -> "schematic variable", Markup.TFREE -> "free type variable", Markup.TVAR -> "schematic type variable") /* entity focus */ object Focus { def apply(ids: Set[Long]): Focus = new Focus(ids) val empty: Focus = apply(Set.empty) def make(args: List[Text.Info[Focus]]): Focus = args.foldLeft(empty) { case (focus1, Text.Info(_, focus2)) => focus1 ++ focus2 } val full: Focus = new Focus(Set.empty) { override def apply(id: Long): Boolean = true override def toString: String = "Focus.full" } } sealed class Focus private[Rendering](protected val rep: Set[Long]) { def defined: Boolean = rep.nonEmpty def apply(id: Long): Boolean = rep.contains(id) def + (id: Long): Focus = if (rep.contains(id)) this else new Focus(rep + id) def ++ (other: Focus): Focus = if (this eq other) this else if (rep.isEmpty) other else other.rep.iterator.foldLeft(this)(_ + _) override def toString: String = rep.mkString("Focus(", ",", ")") } /* markup elements */ val position_elements = Markup.Elements(Markup.BINDING, Markup.ENTITY, Markup.REPORT, Markup.POSITION) val semantic_completion_elements = Markup.Elements(Markup.COMPLETION, Markup.NO_COMPLETION) val language_context_elements = Markup.Elements(Markup.STRING, Markup.ALT_STRING, Markup.CARTOUCHE, Markup.COMMENT, Markup.LANGUAGE, Markup.ML_STRING, Markup.ML_COMMENT) val language_elements = Markup.Elements(Markup.LANGUAGE) val citation_elements = Markup.Elements(Markup.CITATION) val active_elements = Markup.Elements(Markup.DIALOG, Markup.BROWSER, Markup.GRAPHVIEW, Markup.THEORY_EXPORTS, Markup.SENDBACK, Markup.JEDIT_ACTION, Markup.SIMP_TRACE_PANEL) val background_elements = Document_Status.Command_Status.proper_elements + Markup.WRITELN_MESSAGE + Markup.STATE_MESSAGE + Markup.INFORMATION_MESSAGE + Markup.TRACING_MESSAGE + Markup.WARNING_MESSAGE + Markup.LEGACY_MESSAGE + Markup.ERROR_MESSAGE + Markup.BAD + Markup.INTENSIFY + Markup.ENTITY + Markup.Markdown_Bullet.name ++ active_elements val foreground_elements = Markup.Elements(foreground.keySet) val text_color_elements = Markup.Elements(text_color.keySet) val tooltip_elements = Markup.Elements(Markup.LANGUAGE, Markup.EXPRESSION, Markup.TIMING, Markup.ENTITY, Markup.SORTING, Markup.TYPING, Markup.CLASS_PARAMETER, Markup.ML_TYPING, Markup.ML_BREAKPOINT, Markup.PATH, Markup.DOC, Markup.URL, Markup.MARKDOWN_PARAGRAPH, Markup.MARKDOWN_ITEM, Markup.Markdown_List.name) ++ Markup.Elements(tooltip_descriptions.keySet) val tooltip_message_elements = Markup.Elements(Markup.WRITELN, Markup.INFORMATION, Markup.WARNING, Markup.LEGACY, Markup.ERROR, Markup.BAD) val message_elements = Markup.Elements(message_pri.keySet) val warning_elements = Markup.Elements(Markup.WARNING, Markup.LEGACY) val error_elements = Markup.Elements(Markup.ERROR) val entity_elements = Markup.Elements(Markup.ENTITY) val antiquoted_elements = Markup.Elements(Markup.ANTIQUOTED) val meta_data_elements = Markup.Elements(Markup.META_TITLE, Markup.META_CREATOR, Markup.META_CONTRIBUTOR, Markup.META_DATE, Markup.META_DESCRIPTION, Markup.META_LICENSE) val document_tag_elements = Markup.Elements(Markup.Document_Tag.name) val markdown_elements = Markup.Elements(Markup.MARKDOWN_PARAGRAPH, Markup.MARKDOWN_ITEM, Markup.Markdown_List.name, Markup.Markdown_Bullet.name) } class Rendering( val snapshot: Document.Snapshot, val options: Options, val session: Session ) { override def toString: String = "Rendering(" + snapshot.toString + ")" def get_text(range: Text.Range): Option[String] = None /* caret */ def before_caret_range(caret: Text.Offset): Text.Range = { val former_caret = snapshot.revert(caret) snapshot.convert(Text.Range(former_caret - 1, former_caret)) } /* completion */ def semantic_completion(completed_range: Option[Text.Range], caret_range: Text.Range) : Option[Text.Info[Completion.Semantic]] = if (snapshot.is_outdated) None else { snapshot.select(caret_range, Rendering.semantic_completion_elements, _ => { case Completion.Semantic.Info(info) => completed_range match { case Some(range0) if range0.contains(info.range) && range0 != info.range => None case _ => Some(info) } case _ => None }).headOption.map(_.info) } def semantic_completion_result( history: Completion.History, unicode: Boolean, completed_range: Option[Text.Range], caret_range: Text.Range ): (Boolean, Option[Completion.Result]) = { semantic_completion(completed_range, caret_range) match { case Some(Text.Info(_, Completion.No_Completion)) => (true, None) case Some(Text.Info(range, names: Completion.Names)) => get_text(range) match { case Some(original) => (false, names.complete(range, history, unicode, original)) case None => (false, None) } case None => (false, None) } } def language_context(range: Text.Range): Option[Completion.Language_Context] = snapshot.select(range, Rendering.language_context_elements, _ => { case Text.Info(_, XML.Elem(Markup.Language(language, symbols, antiquotes, delimited), _)) => if (delimited) Some(Completion.Language_Context(language, symbols, antiquotes)) else None case Text.Info(_, elem) if elem.name == Markup.ML_STRING || elem.name == Markup.ML_COMMENT => Some(Completion.Language_Context.ML_inner) case Text.Info(_, _) => Some(Completion.Language_Context.inner) }).headOption.map(_.info) def citations(range: Text.Range): List[Text.Info[String]] = snapshot.select(range, Rendering.citation_elements, _ => { case Text.Info(info_range, XML.Elem(Markup.Citation(name), _)) => Some(Text.Info(snapshot.convert(info_range), name)) case _ => None }).map(_.info) /* file-system path completion */ def language_path(range: Text.Range): Option[Text.Info[Boolean]] = snapshot.select(range, Rendering.language_elements, _ => { case Text.Info(info_range, XML.Elem(Markup.Language.Path(delimited), _)) => Some((delimited, snapshot.convert(info_range))) case _ => None }).headOption.map({ case Text.Info(_, (delimited, range)) => Text.Info(range, delimited) }) def path_completion(caret: Text.Offset): Option[Completion.Result] = { def complete(text: String): List[(String, List[String])] = { try { val path = Path.explode(text) val (dir, base_name) = if (text == "" || text.endsWith("/")) (path, "") else (path.dir, path.file_name) val directory = new JFile(session.resources.append(snapshot.node_name, dir)) val files = directory.listFiles if (files == null) Nil else { val ignore = space_explode(':', options.string("completion_path_ignore")). map(s => FileSystems.getDefault.getPathMatcher("glob:" + s)) (for { file <- files.iterator name = file.getName if name.startsWith(base_name) path_name = new JFile(name).toPath if !ignore.exists(matcher => matcher.matches(path_name)) text1 = (dir + Path.basic(name)).implode_short if text != text1 is_dir = new JFile(directory, name).isDirectory replacement = text1 + (if (is_dir) "/" else "") descr = List(text1, if (is_dir) "(directory)" else "(file)") } yield (replacement, descr)).take(options.int("completion_limit")).toList } } catch { case ERROR(_) => Nil } } def is_wrapped(s: String): Boolean = s.startsWith("\"") && s.endsWith("\"") || s.startsWith(Symbol.open_decoded) && s.endsWith(Symbol.close_decoded) for { Text.Info(r1, delimited) <- language_path(before_caret_range(caret)) s1 <- get_text(r1) (r2, s2) <- if (is_wrapped(s1)) { Some((Text.Range(r1.start + 1, r1.stop - 1), s1.substring(1, s1.length - 1))) } else if (delimited) Some((r1, s1)) else None if Path.is_valid(s2) paths = complete(s2) if paths.nonEmpty items = paths.map(p => Completion.Item(r2, s2, "", p._2, p._1, 0, false)) } yield Completion.Result(r2, s2, false, items) } /* spell checker */ private lazy val spell_checker_include = Markup.Elements(space_explode(',', options.string("spell_checker_include")): _*) private lazy val spell_checker_elements = spell_checker_include ++ Markup.Elements(space_explode(',', options.string("spell_checker_exclude")): _*) def spell_checker(range: Text.Range): List[Text.Info[Text.Range]] = { val result = snapshot.select(range, spell_checker_elements, _ => { case info => Some( if (spell_checker_include(info.info.name)) Some(snapshot.convert(info.range)) else None) }) for (Text.Info(range, Some(range1)) <- result) yield Text.Info(range, range1) } def spell_checker_point(range: Text.Range): Option[Text.Range] = spell_checker(range).headOption.map(_.info) /* text background */ def background( elements: Markup.Elements, range: Text.Range, focus: Rendering.Focus ) : List[Text.Info[Rendering.Color.Value]] = { for { Text.Info(r, result) <- snapshot.cumulate[(List[Markup], Option[Rendering.Color.Value])]( range, (List(Markup.Empty), None), elements, command_states => { case ((markups, color), Text.Info(_, XML.Elem(markup, _))) if markups.nonEmpty && Document_Status.Command_Status.proper_elements(markup.name) => Some((markup :: markups, color)) case (_, Text.Info(_, XML.Elem(Markup(Markup.BAD, _), _))) => Some((Nil, Some(Rendering.Color.bad))) case (_, Text.Info(_, XML.Elem(Markup(Markup.INTENSIFY, _), _))) => Some((Nil, Some(Rendering.Color.intensify))) case (_, Text.Info(_, XML.Elem(Markup.Entity.Occ(i), _))) if focus(i) => Some((Nil, Some(Rendering.Color.entity))) case (_, Text.Info(_, XML.Elem(Markup.Markdown_Bullet(depth), _))) => val color = depth % 4 match { case 1 => Rendering.Color.markdown_bullet1 case 2 => Rendering.Color.markdown_bullet2 case 3 => Rendering.Color.markdown_bullet3 case _ => Rendering.Color.markdown_bullet4 } Some((Nil, Some(color))) case (_, Text.Info(_, Protocol.Dialog(_, serial, result))) => command_states.collectFirst( { case st if st.results.defined(serial) => st.results.get(serial).get }) match { case Some(Protocol.Dialog_Result(res)) if res == result => Some((Nil, Some(Rendering.Color.active_result))) case _ => Some((Nil, Some(Rendering.Color.active))) } case (_, Text.Info(_, elem)) => if (Rendering.active_elements(elem.name)) Some((Nil, Some(Rendering.Color.active))) else None }) color <- result match { case (markups, opt_color) if markups.nonEmpty => val status = Document_Status.Command_Status.make(markups.iterator) if (status.is_unprocessed) Some(Rendering.Color.unprocessed1) else if (status.is_running) Some(Rendering.Color.running1) else if (status.is_canceled) Some(Rendering.Color.canceled) else opt_color case (_, opt_color) => opt_color } } yield Text.Info(r, color) } /* text foreground */ def foreground(range: Text.Range): List[Text.Info[Rendering.Color.Value]] = snapshot.select(range, Rendering.foreground_elements, _ => { case info => Some(Rendering.foreground(info.info.name)) }) /* entity focus */ def entity_focus_defs(range: Text.Range): Rendering.Focus = Rendering.Focus.make( snapshot.cumulate(range, Rendering.Focus.empty, Rendering.entity_elements, _ => { case (focus, Text.Info(_, XML.Elem(Markup.Entity.Def(i), _))) => Some(focus + i) case _ => None })) def entity_focus(range: Text.Range, defs_focus: Rendering.Focus): Rendering.Focus = Rendering.Focus.make( snapshot.cumulate(range, Rendering.Focus.empty, Rendering.entity_elements, _ => { case (focus, Text.Info(_, XML.Elem(Markup.Entity.Ref(i), _))) if defs_focus(i) => Some(focus + i) case _ => None })) /* caret focus */ def caret_focus(caret_range: Text.Range, defs_range: Text.Range): Rendering.Focus = { val focus = entity_focus_defs(caret_range) if (focus.defined) focus else if (defs_range == Text.Range.offside) Rendering.Focus.empty else { val defs_focus = if (defs_range == Text.Range.full) Rendering.Focus.full else entity_focus_defs(defs_range) entity_focus(caret_range, defs_focus) } } def caret_focus_ranges(caret_range: Text.Range, defs_range: Text.Range): List[Text.Range] = { val focus = caret_focus(caret_range, defs_range) if (focus.defined) { snapshot.cumulate[Boolean](defs_range, false, Rendering.entity_elements, _ => { case (_, Text.Info(_, XML.Elem(Markup.Entity.Occ(i), _))) if focus(i) => Some(true) case _ => None }).flatMap(info => if (info.info) Some(info.range) else None) } else Nil } /* messages */ def message_underline_color( elements: Markup.Elements, range: Text.Range ) : List[Text.Info[Rendering.Color.Value]] = { val results = snapshot.cumulate[Int](range, 0, elements, _ => { case (pri, Text.Info(_, elem)) => Some(pri max Rendering.message_pri(elem.name)) }) for { Text.Info(r, pri) <- results color <- Rendering.message_underline_color.get(pri) } yield Text.Info(r, color) } def text_messages(range: Text.Range): List[Text.Info[XML.Elem]] = { val results = snapshot.cumulate[Vector[Command.Results.Entry]]( range, Vector.empty, Rendering.message_elements, command_states => { case (res, Text.Info(_, elem)) => Command.State.get_result_proper(command_states, elem.markup.properties) .map(res :+ _) - case _ => None }) var seen_serials = Set.empty[Long] def seen(i: Long): Boolean = { val b = seen_serials(i) seen_serials += i b } for { Text.Info(range, entries) <- results (i, elem) <- entries if !seen(i) } yield Text.Info(range, elem) } /* tooltips */ def timing_threshold: Double = 0.0 private sealed case class Tooltip_Info( range: Text.Range, timing: Timing = Timing.zero, messages: List[(Long, XML.Tree)] = Nil, rev_infos: List[(Boolean, XML.Tree)] = Nil ) { def + (t: Timing): Tooltip_Info = copy(timing = timing + t) def + (r0: Text.Range, serial: Long, tree: XML.Tree): Tooltip_Info = { val r = snapshot.convert(r0) if (range == r) copy(messages = (serial -> tree) :: messages) else copy(range = r, messages = List(serial -> tree)) } def + (r0: Text.Range, important: Boolean, tree: XML.Tree): Tooltip_Info = { val r = snapshot.convert(r0) if (range == r) copy(rev_infos = (important -> tree) :: rev_infos) else copy (range = r, rev_infos = List(important -> tree)) } def timing_info(tree: XML.Tree): Option[XML.Tree] = tree match { case XML.Elem(Markup(Markup.TIMING, _), _) => if (timing.elapsed.seconds >= timing_threshold) Some(XML.Text(timing.message)) else None case _ => Some(tree) } def infos(important: Boolean): List[XML.Tree] = for { (is_important, tree) <- rev_infos.reverse if is_important == important tree1 <- timing_info(tree) } yield tree1 } def perhaps_append_file(node_name: Document.Node.Name, name: String): String = if (Path.is_valid(name)) session.resources.append(node_name, Path.explode(name)) else name def tooltips(elements: Markup.Elements, range: Text.Range): Option[Text.Info[List[XML.Tree]]] = { val results = snapshot.cumulate[Tooltip_Info](range, Tooltip_Info(range), elements, command_states => { case (info, Text.Info(_, XML.Elem(Markup.Timing(t), _))) => Some(info + t) case (info, Text.Info(r0, msg @ XML.Elem(Markup(Markup.BAD, Markup.Serial(i)), body))) if body.nonEmpty => Some(info + (r0, i, msg)) case (info, Text.Info(r0, XML.Elem(Markup(name, props), _))) if Rendering.tooltip_message_elements(name) => for ((i, tree) <- Command.State.get_result_proper(command_states, props)) yield (info + (r0, i, tree)) case (info, Text.Info(r0, XML.Elem(Markup.Entity(kind, name), _))) if kind != "" && kind != Markup.ML_DEF => val kind1 = Word.implode(Word.explode('_', kind)) val txt1 = if (name == "") kind1 else if (kind1 == "") quote(name) else kind1 + " " + quote(name) val info1 = info + (r0, true, XML.Text(txt1)) Some(if (kind == Markup.COMMAND) info1 + (r0, true, XML.elem(Markup.TIMING)) else info1) case (info, Text.Info(r0, XML.Elem(Markup.Path(name), _))) => val file = perhaps_append_file(snapshot.node_name, name) val text = if (name == file) "file " + quote(file) else "path " + quote(name) + "\nfile " + quote(file) Some(info + (r0, true, XML.Text(text))) case (info, Text.Info(r0, XML.Elem(Markup.Doc(name), _))) => val text = "doc " + quote(name) Some(info + (r0, true, XML.Text(text))) case (info, Text.Info(r0, XML.Elem(Markup.Url(name), _))) => Some(info + (r0, true, XML.Text("URL " + quote(name)))) case (info, Text.Info(r0, XML.Elem(Markup(name, _), body))) if name == Markup.SORTING || name == Markup.TYPING => Some(info + (r0, true, Pretty.block(XML.Text("::") :: Pretty.brk(1) :: body))) case (info, Text.Info(r0, XML.Elem(Markup(Markup.CLASS_PARAMETER, _), body))) => Some(info + (r0, true, Pretty.block(0, body))) case (info, Text.Info(r0, XML.Elem(Markup(Markup.ML_TYPING, _), body))) => Some(info + (r0, false, Pretty.block(XML.Text("ML:") :: Pretty.brk(1) :: body))) case (info, Text.Info(r0, Protocol.ML_Breakpoint(breakpoint))) => val text = if (session.debugger.breakpoint_state(breakpoint)) "breakpoint (enabled)" else "breakpoint (disabled)" Some(info + (r0, true, XML.Text(text))) case (info, Text.Info(r0, XML.Elem(Markup.Language(language, _, _, _), _))) => val lang = Word.implode(Word.explode('_', language)) Some(info + (r0, true, XML.Text("language: " + lang))) case (info, Text.Info(r0, XML.Elem(Markup.Expression(kind), _))) => val descr = if (kind == "") "expression" else "expression: " + kind Some(info + (r0, true, XML.Text(descr))) case (info, Text.Info(r0, XML.Elem(Markup(Markup.MARKDOWN_PARAGRAPH, _), _))) => Some(info + (r0, true, XML.Text("Markdown: paragraph"))) case (info, Text.Info(r0, XML.Elem(Markup(Markup.MARKDOWN_ITEM, _), _))) => Some(info + (r0, true, XML.Text("Markdown: item"))) case (info, Text.Info(r0, XML.Elem(Markup.Markdown_List(kind), _))) => Some(info + (r0, true, XML.Text("Markdown: " + kind))) case (info, Text.Info(r0, XML.Elem(Markup(name, _), _))) => Rendering.tooltip_descriptions.get(name).map(desc => info + (r0, true, XML.Text(desc))) }).map(_.info) if (results.isEmpty) None else { val r = Text.Range(results.head.range.start, results.last.range.stop) val all_tips = results.flatMap(_.messages).foldLeft(SortedMap.empty[Long, XML.Tree])(_ + _) .iterator.map(_._2).toList ::: results.flatMap(res => res.infos(true)) ::: results.flatMap(res => res.infos(false)).lastOption.toList if (all_tips.isEmpty) None else Some(Text.Info(r, all_tips)) } } /* messages */ def warnings(range: Text.Range): List[Text.Markup] = snapshot.select(range, Rendering.warning_elements, _ => Some(_)).map(_.info) def errors(range: Text.Range): List[Text.Markup] = snapshot.select(range, Rendering.error_elements, _ => Some(_)).map(_.info) /* command status overview */ def overview_color(range: Text.Range): Option[Rendering.Color.Value] = { if (snapshot.is_outdated) None else { val results = snapshot.cumulate[List[Markup]](range, Nil, Document_Status.Command_Status.liberal_elements, _ => { case (status, Text.Info(_, elem)) => Some(elem.markup :: status) }, status = true) if (results.isEmpty) None else { val status = Document_Status.Command_Status.make(results.iterator.flatMap(_.info)) if (status.is_running) Some(Rendering.Color.running) else if (status.is_failed) Some(Rendering.Color.error) else if (status.is_warned) Some(Rendering.Color.warning) else if (status.is_unprocessed) Some(Rendering.Color.unprocessed) else None } } } /* antiquoted text */ def antiquoted(range: Text.Range): Option[Text.Range] = snapshot.cumulate[Option[Text.Range]](range, None, Rendering.antiquoted_elements, _ => { case (res, info) => if (res.isEmpty) Some(Some(info.range)) else None }).headOption.flatMap(_.info) /* meta data */ def meta_data(range: Text.Range): Properties.T = { val results = snapshot.cumulate[Properties.T](range, Nil, Rendering.meta_data_elements, _ => { case (res, Text.Info(_, elem)) => val plain_text = XML.content(elem) Some((elem.name -> plain_text) :: res) }) Library.distinct(results.flatMap(_.info.reverse)) } /* document tags */ def document_tags(range: Text.Range): List[String] = { val results = snapshot.cumulate[List[String]](range, Nil, Rendering.document_tag_elements, _ => { case (res, Text.Info(_, XML.Elem(Markup.Document_Tag(name), _))) => Some(name :: res) case _ => None }) Library.distinct(results.flatMap(_.info.reverse)) } } diff --git a/src/Pure/System/isabelle_system.scala b/src/Pure/System/isabelle_system.scala --- a/src/Pure/System/isabelle_system.scala +++ b/src/Pure/System/isabelle_system.scala @@ -1,499 +1,499 @@ /* Title: Pure/System/isabelle_system.scala Author: Makarius Miscellaneous Isabelle system operations. */ package isabelle import java.util.{Map => JMap, HashMap} import java.io.{File => JFile, IOException} import java.nio.file.{Path => JPath, Files, SimpleFileVisitor, FileVisitResult, StandardCopyOption, FileSystemException} import java.nio.file.attribute.BasicFileAttributes import scala.jdk.CollectionConverters._ object Isabelle_System { /* settings environment */ def settings(putenv: List[(String, String)] = Nil): JMap[String, String] = { val env0 = isabelle.setup.Environment.settings() if (putenv.isEmpty) env0 else { val env = new HashMap(env0) for ((a, b) <- putenv) env.put(a, b) env } } def getenv(name: String, env: JMap[String, String] = settings()): String = Option(env.get(name)).getOrElse("") def getenv_strict(name: String, env: JMap[String, String] = settings()): String = proper_string(getenv(name, env)) getOrElse error("Undefined Isabelle environment variable: " + quote(name)) /* services */ abstract class Service @volatile private var _services: Option[List[Class[Service]]] = None def services(): List[Class[Service]] = { if (_services.isEmpty) init() // unsynchronized check _services.get } def make_services[C](c: Class[C]): List[C] = for { c1 <- services() if Library.is_subclass(c1, c) } yield c1.getDeclaredConstructor().newInstance().asInstanceOf[C] /* init settings + services */ def make_services(): List[Class[Service]] = { def make(where: String, names: List[String]): List[Class[Service]] = { for (name <- names) yield { def err(msg: String): Nothing = error("Bad Isabelle/Scala service " + quote(name) + " in " + where + "\n" + msg) try { Class.forName(name).asInstanceOf[Class[Service]] } catch { case _: ClassNotFoundException => err("Class not found") case exn: Throwable => err(Exn.message(exn)) } } } def from_env(variable: String): List[Class[Service]] = make(quote(variable), space_explode(':', getenv_strict(variable))) def from_jar(platform_jar: String): List[Class[Service]] = make(quote(platform_jar), isabelle.setup.Build.get_services(JPath.of(platform_jar)).asScala.toList) from_env("ISABELLE_SCALA_SERVICES") ::: Scala.class_path().flatMap(from_jar) } def init(isabelle_root: String = "", cygwin_root: String = ""): Unit = { isabelle.setup.Environment.init(isabelle_root, cygwin_root) synchronized { if (_services.isEmpty) { _services = Some(make_services()) } } } /* getetc -- static distribution parameters */ def getetc(name: String, root: Path = Path.ISABELLE_HOME): Option[String] = { val path = root + Path.basic("etc") + Path.basic(name) if (path.is_file) { Library.trim_split_lines(File.read(path)) match { case Nil => None case List(s) => Some(s) case _ => error("Single line expected in " + path.absolute) } } else None } /* Isabelle distribution identification */ def isabelle_id(root: Path = Path.ISABELLE_HOME): String = getetc("ISABELLE_ID", root = root) orElse Mercurial.archive_id(root) getOrElse { if (Mercurial.is_repository(root)) Mercurial.repository(root).parent() else error("Failed to identify Isabelle distribution " + root.expand) } object Isabelle_Id extends Scala.Fun_String("isabelle_id") { val here = Scala_Project.here def apply(arg: String): String = isabelle_id() } def isabelle_tags(root: Path = Path.ISABELLE_HOME): String = getetc("ISABELLE_TAGS", root = root) orElse Mercurial.archive_tags(root) getOrElse { if (Mercurial.is_repository(root)) { val hg = Mercurial.repository(root) hg.tags(rev = hg.parent()) } else "" } def export_isabelle_identifier(isabelle_identifier: String): String = if (isabelle_identifier == "") "" else "export ISABELLE_IDENTIFIER=" + Bash.string(isabelle_identifier) + "\n" def isabelle_identifier(): Option[String] = proper_string(getenv("ISABELLE_IDENTIFIER")) def isabelle_heading(): String = isabelle_identifier() match { case None => "" case Some(version) => " (" + version + ")" } def isabelle_name(): String = getenv_strict("ISABELLE_NAME") def identification(): String = "Isabelle" + (try { "/" + isabelle_id () } catch { case ERROR(_) => "" }) + isabelle_heading() /** file-system operations **/ /* scala functions */ private def apply_paths(args: List[String], fun: List[Path] => Unit): List[String] = { fun(args.map(Path.explode)) Nil } private def apply_paths1(args: List[String], fun: Path => Unit): List[String] = - apply_paths(args, { case List(path) => fun(path) }) + apply_paths(args, { case List(path) => fun(path) case _ => ??? }) private def apply_paths2(args: List[String], fun: (Path, Path) => Unit): List[String] = - apply_paths(args, { case List(path1, path2) => fun(path1, path2) }) + apply_paths(args, { case List(path1, path2) => fun(path1, path2) case _ => ??? }) private def apply_paths3(args: List[String], fun: (Path, Path, Path) => Unit): List[String] = - apply_paths(args, { case List(path1, path2, path3) => fun(path1, path2, path3) }) + apply_paths(args, { case List(path1, path2, path3) => fun(path1, path2, path3) case _ => ??? }) /* permissions */ def chmod(arg: String, path: Path): Unit = bash("chmod " + arg + " " + File.bash_path(path)).check def chown(arg: String, path: Path): Unit = bash("chown " + arg + " " + File.bash_path(path)).check /* directories */ def make_directory(path: Path): Path = { if (!path.is_dir) { try { Files.createDirectories(path.java_path) } catch { case ERROR(_) => error("Failed to create directory: " + path.absolute) } } path } def new_directory(path: Path): Path = if (path.is_dir) error("Directory already exists: " + path.absolute) else make_directory(path) def copy_dir(dir1: Path, dir2: Path): Unit = { val res = bash("cp -a " + File.bash_path(dir1) + " " + File.bash_path(dir2)) if (!res.ok) { cat_error("Failed to copy directory " + dir1.absolute + " to " + dir2.absolute, res.err) } } def with_copy_dir[A](dir1: Path, dir2: Path)(body: => A): A = { if (dir2.is_file || dir2.is_dir) error("Directory already exists: " + dir2.absolute) else { try { copy_dir(dir1, dir2); body } finally { rm_tree(dir2 ) } } } object Make_Directory extends Scala.Fun_Strings("make_directory") { val here = Scala_Project.here def apply(args: List[String]): List[String] = apply_paths1(args, make_directory) } object Copy_Dir extends Scala.Fun_Strings("copy_dir") { val here = Scala_Project.here def apply(args: List[String]): List[String] = apply_paths2(args, copy_dir) } /* copy files */ def copy_file(src: JFile, dst: JFile): Unit = { val target = if (dst.isDirectory) new JFile(dst, src.getName) else dst if (!File.eq(src, target)) { try { Files.copy(src.toPath, target.toPath, StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING) } catch { case ERROR(msg) => cat_error("Failed to copy file " + File.path(src).absolute + " to " + File.path(dst).absolute, msg) } } } def copy_file(src: Path, dst: Path): Unit = copy_file(src.file, dst.file) def copy_file_base(base_dir: Path, src: Path, target_dir: Path): Unit = { val src1 = src.expand val src1_dir = src1.dir if (!src1.starts_basic) error("Illegal path specification " + src1 + " beyond base directory") copy_file(base_dir + src1, Isabelle_System.make_directory(target_dir + src1_dir)) } object Copy_File extends Scala.Fun_Strings("copy_file") { val here = Scala_Project.here def apply(args: List[String]): List[String] = apply_paths2(args, copy_file) } object Copy_File_Base extends Scala.Fun_Strings("copy_file_base") { val here = Scala_Project.here def apply(args: List[String]): List[String] = apply_paths3(args, copy_file_base) } /* move files */ def move_file(src: JFile, dst: JFile): Unit = { val target = if (dst.isDirectory) new JFile(dst, src.getName) else dst if (!File.eq(src, target)) Files.move(src.toPath, target.toPath, StandardCopyOption.REPLACE_EXISTING) } def move_file(src: Path, dst: Path): Unit = move_file(src.file, dst.file) /* symbolic link */ def symlink(src: Path, dst: Path, force: Boolean = false, native: Boolean = false): Unit = { val src_file = src.file val dst_file = dst.file val target = if (dst_file.isDirectory) new JFile(dst_file, src_file.getName) else dst_file if (force) target.delete def cygwin_link(): Unit = { if (native) { error("Failed to create native symlink on Windows: " + quote(src_file.toString) + "\n(but it could work as Administrator)") } else isabelle.setup.Environment.cygwin_link(File.standard_path(src), target) } try { Files.createSymbolicLink(target.toPath, src_file.toPath) } catch { case _: UnsupportedOperationException if Platform.is_windows => cygwin_link() case _: FileSystemException if Platform.is_windows => cygwin_link() } } /* tmp files */ def isabelle_tmp_prefix(): JFile = { val path = Path.explode("$ISABELLE_TMP_PREFIX") path.file.mkdirs // low-level mkdirs to avoid recursion via Isabelle environment File.platform_file(path) } def tmp_file(name: String, ext: String = "", base_dir: JFile = isabelle_tmp_prefix()): JFile = { val suffix = if (ext == "") "" else "." + ext val file = Files.createTempFile(base_dir.toPath, name, suffix).toFile file.deleteOnExit() file } def with_tmp_file[A](name: String, ext: String = "")(body: Path => A): A = { val file = tmp_file(name, ext) try { body(File.path(file)) } finally { file.delete } } /* tmp dirs */ def rm_tree(root: JFile): Unit = { root.delete if (root.isDirectory) { Files.walkFileTree(root.toPath, new SimpleFileVisitor[JPath] { override def visitFile(file: JPath, attrs: BasicFileAttributes): FileVisitResult = { try { Files.deleteIfExists(file) } catch { case _: IOException => } FileVisitResult.CONTINUE } override def postVisitDirectory(dir: JPath, e: IOException): FileVisitResult = { if (e == null) { try { Files.deleteIfExists(dir) } catch { case _: IOException => } FileVisitResult.CONTINUE } else throw e } } ) } } def rm_tree(root: Path): Unit = rm_tree(root.file) object Rm_Tree extends Scala.Fun_Strings("rm_tree") { val here = Scala_Project.here def apply(args: List[String]): List[String] = apply_paths1(args, rm_tree) } def tmp_dir(name: String, base_dir: JFile = isabelle_tmp_prefix()): JFile = { val dir = Files.createTempDirectory(base_dir.toPath, name).toFile dir.deleteOnExit() dir } def with_tmp_dir[A](name: String)(body: Path => A): A = { val dir = tmp_dir(name) try { body(File.path(dir)) } finally { rm_tree(dir) } } /* quasi-atomic update of directory */ def update_directory(dir: Path, f: Path => Unit): Unit = { val new_dir = dir.ext("new") val old_dir = dir.ext("old") rm_tree(new_dir) rm_tree(old_dir) f(new_dir) if (dir.is_dir) move_file(dir, old_dir) move_file(new_dir, dir) rm_tree(old_dir) } /** external processes **/ /* GNU bash */ def bash(script: String, description: String = "", cwd: JFile = null, env: JMap[String, String] = settings(), redirect: Boolean = false, input: String = "", progress_stdout: String => Unit = (_: String) => (), progress_stderr: String => Unit = (_: String) => (), watchdog: Option[Bash.Watchdog] = None, strict: Boolean = true, cleanup: () => Unit = () => () ): Process_Result = { Bash.process(script, description = description, cwd = cwd, env = env, redirect = redirect, cleanup = cleanup). result(input = input, progress_stdout = progress_stdout, progress_stderr = progress_stderr, watchdog = watchdog, strict = strict) } /* command-line tools */ def require_command(cmd: String, test: String = "--version"): Unit = { if (!bash(Bash.string(cmd) + " " + test).ok) error("Missing system command: " + quote(cmd)) } private lazy val gnutar_check: Boolean = try { bash("tar --version").check.out.containsSlice("GNU tar") || error("") } catch { case ERROR(_) => false } def gnutar( args: String, dir: Path = Path.current, original_owner: Boolean = false, strip: Int = 0, redirect: Boolean = false ): Process_Result = { val options = (if (dir.is_current) "" else "-C " + File.bash_path(dir) + " ") + (if (original_owner) "" else "--owner=root --group=staff ") + (if (strip <= 0) "" else "--strip-components=" + strip + " ") if (gnutar_check) bash("tar " + options + args, redirect = redirect) else error("Expected to find GNU tar executable") } def make_patch(base_dir: Path, src: Path, dst: Path, diff_options: String = ""): String = { with_tmp_file("patch") { patch => Isabelle_System.bash( "diff -ru " + diff_options + " -- " + File.bash_path(src) + " " + File.bash_path(dst) + " > " + File.bash_path(patch), cwd = base_dir.file).check_rc(_ <= 1) File.read(patch) } } def hostname(): String = bash("hostname -s").check.out def open(arg: String): Unit = bash("exec \"$ISABELLE_OPEN\" " + Bash.string(arg) + " >/dev/null 2>/dev/null &") def pdf_viewer(arg: Path): Unit = bash("exec \"$PDF_VIEWER\" " + File.bash_path(arg) + " >/dev/null 2>/dev/null &") def open_external_file(name: String): Boolean = { val ext = Library.take_suffix((c: Char) => c != '.', name.toList)._2.mkString val external = ext.nonEmpty && Library.space_explode(':', getenv("ISABELLE_EXTERNAL_FILES")).contains(ext) if (external) { if (ext == "pdf" && Path.is_wellformed(name)) pdf_viewer(Path.explode(name)) else open(name) } external } /** Isabelle resources **/ /* repository clone with Admin */ def admin(): Boolean = Path.explode("~~/Admin").is_dir /* default logic */ def default_logic(args: String*): String = { args.find(_ != "") match { case Some(logic) => logic case None => getenv_strict("ISABELLE_LOGIC") } } /* download file */ def download(url_name: String, progress: Progress = new Progress): HTTP.Content = { val url = Url(url_name) progress.echo("Getting " + quote(url_name)) try { HTTP.Client.get(url) } catch { case ERROR(msg) => cat_error("Failed to download " + quote(url_name), msg) } } def download_file(url_name: String, file: Path, progress: Progress = new Progress): Unit = Bytes.write(file, download(url_name, progress = progress).bytes) object Download extends Scala.Fun("download", thread = true) { val here = Scala_Project.here override def invoke(args: List[Bytes]): List[Bytes] = - args match { case List(url) => List(download(url.text).bytes) } + args match { case List(url) => List(download(url.text).bytes) case _ => ??? } } /* repositories */ val isabelle_repository: Mercurial.Server = Mercurial.Server("https://isabelle.sketis.net/repos/isabelle") val afp_repository: Mercurial.Server = Mercurial.Server("https://isabelle.sketis.net/repos/afp-devel") def official_releases(): List[String] = Library.trim_split_lines( isabelle_repository.read_file(Path.explode("Admin/Release/official"))) } diff --git a/src/Pure/Thy/bibtex.scala b/src/Pure/Thy/bibtex.scala --- a/src/Pure/Thy/bibtex.scala +++ b/src/Pure/Thy/bibtex.scala @@ -1,687 +1,688 @@ /* Title: Pure/Thy/bibtex.scala Author: Makarius BibTeX support. */ package isabelle import java.io.{File => JFile} import scala.collection.mutable import scala.util.parsing.combinator.RegexParsers import scala.util.parsing.input.Reader object Bibtex { /** file format **/ def is_bibtex(name: String): Boolean = name.endsWith(".bib") class File_Format extends isabelle.File_Format { val format_name: String = "bibtex" val file_ext: String = "bib" override def theory_suffix: String = "bibtex_file" override def theory_content(name: String): String = """theory "bib" imports Pure begin bibtex_file """ + Outer_Syntax.quote_string(name) + """ end""" override def html_document(snapshot: Document.Snapshot): Option[Presentation.HTML_Document] = { val name = snapshot.node_name if (detect(name.node)) { val title = "Bibliography " + quote(snapshot.node_name.path.file_name) val content = Isabelle_System.with_tmp_file("bib", "bib") { bib => File.write(bib, snapshot.node.source) Bibtex.html_output(List(bib), style = "unsort", title = title) } Some(Presentation.HTML_Document(title, content)) } else None } } /** bibtex errors **/ def bibtex_errors(dir: Path, root_name: String): List[String] = { val log_path = dir + Path.explode(root_name).ext("blg") if (log_path.is_file) { val Error1 = """^(I couldn't open database file .+)$""".r val Error2 = """^(I found no .+)$""".r val Error3 = """^(.+)---line (\d+) of file (.+)""".r Line.logical_lines(File.read(log_path)).flatMap(line => line match { case Error1(msg) => Some("Bibtex error: " + msg) case Error2(msg) => Some("Bibtex error: " + msg) case Error3(msg, Value.Int(l), file) => val path = File.standard_path(file) if (Path.is_wellformed(path)) { val pos = Position.Line_File(l, (dir + Path.explode(path)).canonical.implode) Some("Bibtex error" + Position.here(pos) + ":\n " + msg) } else None case _ => None }) } else Nil } /** check database **/ def check_database(database: String): (List[(String, Position.T)], List[(String, Position.T)]) = { val chunks = parse(Line.normalize(database)) var chunk_pos = Map.empty[String, Position.T] val tokens = new mutable.ListBuffer[(Token, Position.T)] var line = 1 var offset = 1 def make_pos(length: Int): Position.T = Position.Offset(offset) ::: Position.End_Offset(offset + length) ::: Position.Line(line) def advance_pos(tok: Token): Unit = { for (s <- Symbol.iterator(tok.source)) { if (Symbol.is_newline(s)) line += 1 offset += 1 } } def get_line_pos(l: Int): Position.T = if (0 < l && l <= tokens.length) tokens(l - 1)._2 else Position.none for (chunk <- chunks) { val name = chunk.name if (name != "" && !chunk_pos.isDefinedAt(name)) { chunk_pos += (name -> make_pos(chunk.heading_length)) } for (tok <- chunk.tokens) { tokens += (tok.copy(source = tok.source.replace("\n", " ")) -> make_pos(tok.source.length)) advance_pos(tok) } } Isabelle_System.with_tmp_dir("bibtex") { tmp_dir => File.write(tmp_dir + Path.explode("root.bib"), tokens.iterator.map(p => p._1.source).mkString("", "\n", "\n")) File.write(tmp_dir + Path.explode("root.aux"), "\\bibstyle{plain}\n\\bibdata{root}\n\\citation{*}") Isabelle_System.bash("\"$ISABELLE_BIBTEX\" root", cwd = tmp_dir.file) val Error = """^(.*)---line (\d+) of file root.bib$""".r val Warning = """^Warning--(.+)$""".r val Warning_Line = """--line (\d+) of file root.bib$""".r val Warning_in_Chunk = """^Warning--(.+) in (.+)$""".r val log_file = tmp_dir + Path.explode("root.blg") val lines = if (log_file.is_file) Line.logical_lines(File.read(log_file)) else Nil val (errors, warnings) = if (lines.isEmpty) (Nil, Nil) else { lines.zip(lines.tail ::: List("")).flatMap( { case (Error(msg, Value.Int(l)), _) => Some((true, (msg, get_line_pos(l)))) case (Warning_in_Chunk(msg, name), _) if chunk_pos.isDefinedAt(name) => Some((false, (Word.capitalize(msg + " in entry " + quote(name)), chunk_pos(name)))) case (Warning(msg), Warning_Line(Value.Int(l))) => Some((false, (Word.capitalize(msg), get_line_pos(l)))) case (Warning(msg), _) => Some((false, (Word.capitalize(msg), Position.none))) case _ => None } ).partition(_._1) } (errors.map(_._2), warnings.map(_._2)) } } object Check_Database extends Scala.Fun_String("bibtex_check_database") { val here = Scala_Project.here def apply(database: String): String = { import XML.Encode._ YXML.string_of_body(pair(list(pair(string, properties)), list(pair(string, properties)))( check_database(database))) } } /** document model **/ /* entries */ def entries(text: String): List[Text.Info[String]] = { val result = new mutable.ListBuffer[Text.Info[String]] var offset = 0 for (chunk <- Bibtex.parse(text)) { val end_offset = offset + chunk.source.length if (chunk.name != "" && !chunk.is_command) result += Text.Info(Text.Range(offset, end_offset), chunk.name) offset = end_offset } result.toList } def entries_iterator[A, B <: Document.Model]( models: Map[A, B] ): Iterator[Text.Info[(String, B)]] = { for { (_, model) <- models.iterator info <- model.bibtex_entries.iterator } yield info.map((_, model)) } /* completion */ def completion[A, B <: Document.Model]( history: Completion.History, rendering: Rendering, caret: Text.Offset, models: Map[A, B] ): Option[Completion.Result] = { for { Text.Info(r, name) <- rendering.citations(rendering.before_caret_range(caret)).headOption name1 <- Completion.clean_name(name) original <- rendering.get_text(r) original1 <- Completion.clean_name(Library.perhaps_unquote(original)) entries = (for { Text.Info(_, (entry, _)) <- entries_iterator(models) if entry.toLowerCase.containsSlice(name1.toLowerCase) && entry != original1 } yield entry).toList if entries.nonEmpty items = entries.sorted.map({ case entry => val full_name = Long_Name.qualify(Markup.CITATION, entry) val description = List(entry, "(BibTeX entry)") val replacement = quote(entry) Completion.Item(r, original, full_name, description, replacement, 0, false) }).sorted(history.ordering).take(rendering.options.int("completion_limit")) } yield Completion.Result(r, original, false, items) } /** content **/ private val months = List( "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec") def is_month(s: String): Boolean = months.contains(s.toLowerCase) private val commands = List("preamble", "string") def is_command(s: String): Boolean = commands.contains(s.toLowerCase) sealed case class Entry( kind: String, required: List[String], optional_crossref: List[String], optional_other: List[String] ) { val optional_standard: List[String] = List("url", "doi", "ee") def is_required(s: String): Boolean = required.contains(s.toLowerCase) def is_optional(s: String): Boolean = optional_crossref.contains(s.toLowerCase) || optional_other.contains(s.toLowerCase) || optional_standard.contains(s.toLowerCase) def fields: List[String] = required ::: optional_crossref ::: optional_other ::: optional_standard def template: String = "@" + kind + "{,\n" + fields.map(x => " " + x + " = {},\n").mkString + "}\n" } val known_entries: List[Entry] = List( Entry("Article", List("author", "title"), List("journal", "year"), List("volume", "number", "pages", "month", "note")), Entry("InProceedings", List("author", "title"), List("booktitle", "year"), List("editor", "volume", "number", "series", "pages", "month", "address", "organization", "publisher", "note")), Entry("InCollection", List("author", "title", "booktitle"), List("publisher", "year"), List("editor", "volume", "number", "series", "type", "chapter", "pages", "edition", "month", "address", "note")), Entry("InBook", List("author", "editor", "title", "chapter"), List("publisher", "year"), List("volume", "number", "series", "type", "address", "edition", "month", "pages", "note")), Entry("Proceedings", List("title", "year"), List(), List("booktitle", "editor", "volume", "number", "series", "address", "month", "organization", "publisher", "note")), Entry("Book", List("author", "editor", "title"), List("publisher", "year"), List("volume", "number", "series", "address", "edition", "month", "note")), Entry("Booklet", List("title"), List(), List("author", "howpublished", "address", "month", "year", "note")), Entry("PhdThesis", List("author", "title", "school", "year"), List(), List("type", "address", "month", "note")), Entry("MastersThesis", List("author", "title", "school", "year"), List(), List("type", "address", "month", "note")), Entry("TechReport", List("author", "title", "institution", "year"), List(), List("type", "number", "address", "month", "note")), Entry("Manual", List("title"), List(), List("author", "organization", "address", "edition", "month", "year", "note")), Entry("Unpublished", List("author", "title", "note"), List(), List("month", "year")), Entry("Misc", List(), List(), List("author", "title", "howpublished", "month", "year", "note"))) def get_entry(kind: String): Option[Entry] = known_entries.find(entry => entry.kind.toLowerCase == kind.toLowerCase) def is_entry(kind: String): Boolean = get_entry(kind).isDefined /** tokens and chunks **/ object Token { object Kind extends Enumeration { val COMMAND = Value("command") val ENTRY = Value("entry") val KEYWORD = Value("keyword") val NAT = Value("natural number") val STRING = Value("string") val NAME = Value("name") val IDENT = Value("identifier") val SPACE = Value("white space") val COMMENT = Value("ignored text") val ERROR = Value("bad input") } } sealed case class Token(kind: Token.Kind.Value, source: String) { def is_kind: Boolean = kind == Token.Kind.COMMAND || kind == Token.Kind.ENTRY || kind == Token.Kind.IDENT def is_name: Boolean = kind == Token.Kind.NAME || kind == Token.Kind.IDENT def is_ignored: Boolean = kind == Token.Kind.SPACE || kind == Token.Kind.COMMENT def is_malformed: Boolean = kind == Token.Kind.ERROR def is_open: Boolean = kind == Token.Kind.KEYWORD && (source == "{" || source == "(") } case class Chunk(kind: String, tokens: List[Token]) { val source = tokens.map(_.source).mkString private val content: Option[List[Token]] = tokens match { case Token(Token.Kind.KEYWORD, "@") :: body if body.nonEmpty => (body.init.filterNot(_.is_ignored), body.last) match { case (tok :: Token(Token.Kind.KEYWORD, "{") :: toks, Token(Token.Kind.KEYWORD, "}")) if tok.is_kind => Some(toks) case (tok :: Token(Token.Kind.KEYWORD, "(") :: toks, Token(Token.Kind.KEYWORD, ")")) if tok.is_kind => Some(toks) case _ => None } case _ => None } def name: String = content match { case Some(tok :: _) if tok.is_name => tok.source case _ => "" } def heading_length: Int = if (name == "") 1 else { tokens.takeWhile(tok => !tok.is_open).foldLeft(0) { case (n, tok) => n + tok.source.length } } def is_ignored: Boolean = kind == "" && tokens.forall(_.is_ignored) def is_malformed: Boolean = kind == "" || tokens.exists(_.is_malformed) def is_command: Boolean = Bibtex.is_command(kind) && name != "" && content.isDefined def is_entry: Boolean = Bibtex.is_entry(kind) && name != "" && content.isDefined } /** parsing **/ // context of partial line-oriented scans abstract class Line_Context case object Ignored extends Line_Context case object At extends Line_Context case class Item_Start(kind: String) extends Line_Context case class Item_Open(kind: String, end: String) extends Line_Context case class Item(kind: String, end: String, delim: Delimited) extends Line_Context case class Delimited(quoted: Boolean, depth: Int) val Closed = Delimited(false, 0) private def token(kind: Token.Kind.Value)(source: String): Token = Token(kind, source) private def keyword(source: String): Token = Token(Token.Kind.KEYWORD, source) // See also https://ctan.org/tex-archive/biblio/bibtex/base/bibtex.web // module @. object Parsers extends RegexParsers { /* white space and comments */ override val whiteSpace = "".r private val space = """[ \t\n\r]+""".r ^^ token(Token.Kind.SPACE) private val spaces = rep(space) /* ignored text */ private val ignored: Parser[Chunk] = rep1("""(?i)([^@]+|@[ \t]*comment)""".r) ^^ { case ss => Chunk("", List(Token(Token.Kind.COMMENT, ss.mkString))) } private def ignored_line: Parser[(Chunk, Line_Context)] = ignored ^^ { case a => (a, Ignored) } /* delimited string: outermost "..." or {...} and body with balanced {...} */ // see also bibtex.web: scan_a_field_token_and_eat_white, scan_balanced_braces private def delimited_depth(delim: Delimited): Parser[(String, Delimited)] = new Parser[(String, Delimited)] { require(if (delim.quoted) delim.depth > 0 else delim.depth >= 0, "bad delimiter depth") def apply(in: Input) = { val start = in.offset val end = in.source.length var i = start var q = delim.quoted var d = delim.depth var finished = false while (!finished && i < end) { val c = in.source.charAt(i) if (c == '"' && d == 0) { i += 1; d = 1; q = true } else if (c == '"' && d == 1 && q) { i += 1; d = 0; q = false; finished = true } else if (c == '{') { i += 1; d += 1 } else if (c == '}') { if (d == 1 && !q || d > 1) { i += 1; d -= 1; if (d == 0) finished = true } else {i = start; finished = true } } else if (d > 0) i += 1 else finished = true } if (i == start) Failure("bad input", in) else { val s = in.source.subSequence(start, i).toString Success((s, Delimited(q, d)), in.drop(i - start)) } } }.named("delimited_depth") private def delimited: Parser[Token] = delimited_depth(Closed) ^? { case (s, delim) if delim == Closed => Token(Token.Kind.STRING, s) } private def recover_delimited: Parser[Token] = """["{][^@]*""".r ^^ token(Token.Kind.ERROR) def delimited_line(ctxt: Item): Parser[(Chunk, Line_Context)] = delimited_depth(ctxt.delim) ^^ { case (s, delim1) => (Chunk(ctxt.kind, List(Token(Token.Kind.STRING, s))), ctxt.copy(delim = delim1)) } | recover_delimited ^^ { case a => (Chunk(ctxt.kind, List(a)), Ignored) } /* other tokens */ private val at = "@" ^^ keyword private val nat = "[0-9]+".r ^^ token(Token.Kind.NAT) private val name = """[\x21-\x7f&&[^"#%'(),={}]]+""".r ^^ token(Token.Kind.NAME) private val identifier = """[\x21-\x7f&&[^"#%'(),={}0-9]][\x21-\x7f&&[^"#%'(),={}]]*""".r private val ident = identifier ^^ token(Token.Kind.IDENT) val other_token = "[=#,]".r ^^ keyword | (nat | (ident | space)) /* body */ private val body = delimited | (recover_delimited | other_token) private def body_line(ctxt: Item) = if (ctxt.delim.depth > 0) delimited_line(ctxt) else delimited_line(ctxt) | other_token ^^ { case a => (Chunk(ctxt.kind, List(a)), ctxt) } | ctxt.end ^^ { case a => (Chunk(ctxt.kind, List(keyword(a))), Ignored) } /* items: command or entry */ private val item_kind = identifier ^^ { case a => val kind = if (is_command(a)) Token.Kind.COMMAND else if (is_entry(a)) Token.Kind.ENTRY else Token.Kind.IDENT Token(kind, a) } private val item_begin = "{" ^^ { case a => ("}", keyword(a)) } | "(" ^^ { case a => (")", keyword(a)) } private def item_name(kind: String) = kind.toLowerCase match { case "preamble" => failure("") case "string" => identifier ^^ token(Token.Kind.NAME) case _ => name } private val item_start = at ~ spaces ~ item_kind ~ spaces ^^ { case a ~ b ~ c ~ d => (c.source, List(a) ::: b ::: List(c) ::: d) } private val item: Parser[Chunk] = (item_start ~ item_begin ~ spaces) into { case (kind, a) ~ ((end, b)) ~ c => opt(item_name(kind)) ~ rep(body) ~ opt(end ^^ keyword) ^^ { case d ~ e ~ f => Chunk(kind, a ::: List(b) ::: c ::: d.toList ::: e ::: f.toList) } } private val recover_item: Parser[Chunk] = at ~ "[^@]*".r ^^ { case a ~ b => Chunk("", List(a, Token(Token.Kind.ERROR, b))) } /* chunks */ val chunk: Parser[Chunk] = ignored | (item | recover_item) def chunk_line(ctxt: Line_Context): Parser[(Chunk, Line_Context)] = { ctxt match { case Ignored => ignored_line | at ^^ { case a => (Chunk("", List(a)), At) } case At => space ^^ { case a => (Chunk("", List(a)), ctxt) } | item_kind ^^ { case a => (Chunk(a.source, List(a)), Item_Start(a.source)) } | recover_item ^^ { case a => (a, Ignored) } | ignored_line case Item_Start(kind) => space ^^ { case a => (Chunk(kind, List(a)), ctxt) } | item_begin ^^ { case (end, a) => (Chunk(kind, List(a)), Item_Open(kind, end)) } | recover_item ^^ { case a => (a, Ignored) } | ignored_line case Item_Open(kind, end) => space ^^ { case a => (Chunk(kind, List(a)), ctxt) } | item_name(kind) ^^ { case a => (Chunk(kind, List(a)), Item(kind, end, Closed)) } | body_line(Item(kind, end, Closed)) | ignored_line case item_ctxt: Item => body_line(item_ctxt) | ignored_line case _ => failure("") } } } /* parse */ def parse(input: CharSequence): List[Chunk] = Parsers.parseAll(Parsers.rep(Parsers.chunk), Scan.char_reader(input)) match { case Parsers.Success(result, _) => result case _ => error("Unexpected failure to parse input:\n" + input.toString) } def parse_line(input: CharSequence, context: Line_Context): (List[Chunk], Line_Context) = { var in: Reader[Char] = Scan.char_reader(input) val chunks = new mutable.ListBuffer[Chunk] var ctxt = context while (!in.atEnd) { - Parsers.parse(Parsers.chunk_line(ctxt), in) match { + val result = Parsers.parse(Parsers.chunk_line(ctxt), in) + (result : @unchecked) match { case Parsers.Success((x, c), rest) => chunks += x; ctxt = c; in = rest case Parsers.NoSuccess(_, rest) => error("Unepected failure to parse input:\n" + rest.source.toString) } } (chunks.toList, ctxt) } /** HTML output **/ private val output_styles = List( "" -> "html-n", "plain" -> "html-n", "alpha" -> "html-a", "named" -> "html-n", "paragraph" -> "html-n", "unsort" -> "html-u", "unsortlist" -> "html-u") def html_output(bib: List[Path], title: String = "Bibliography", body: Boolean = false, citations: List[String] = List("*"), style: String = "", chronological: Boolean = false ): String = { Isabelle_System.with_tmp_dir("bibtex") { tmp_dir => /* database files */ val bib_files = bib.map(_.drop_ext) val bib_names = { val names0 = bib_files.map(_.file_name) if (Library.duplicates(names0).isEmpty) names0 else names0.zipWithIndex.map({ case (name, i) => (i + 1).toString + "-" + name }) } for ((a, b) <- bib_files zip bib_names) { Isabelle_System.copy_file(a.ext("bib"), tmp_dir + Path.basic(b).ext("bib")) } /* style file */ val bst = output_styles.toMap.get(style) match { case Some(base) => base + (if (chronological) "c" else "") + ".bst" case None => error("Bad style for bibtex HTML output: " + quote(style) + "\n(expected: " + commas_quote(output_styles.map(_._1)) + ")") } Isabelle_System.copy_file(Path.explode("$BIB2XHTML_HOME/bst") + Path.explode(bst), tmp_dir) /* result */ val in_file = Path.explode("bib.aux") val out_file = Path.explode("bib.html") File.write(tmp_dir + in_file, bib_names.mkString("\\bibdata{", ",", "}\n") + citations.map(cite => "\\citation{" + cite + "}\n").mkString) Isabelle_System.bash( "\"$BIB2XHTML_HOME/main/bib2xhtml.pl\" -B \"$ISABELLE_BIBTEX\"" + " -u -s " + Bash.string(proper_string(style) getOrElse "empty") + (if (chronological) " -c" else "") + (if (title != "") " -h " + Bash.string(title) + " " else "") + " " + File.bash_path(in_file) + " " + File.bash_path(out_file), cwd = tmp_dir.file).check val html = File.read(tmp_dir + out_file) if (body) { cat_lines( split_lines(html). dropWhile(line => !line.startsWith("