diff --git a/src/Doc/Sledgehammer/document/root.tex b/src/Doc/Sledgehammer/document/root.tex --- a/src/Doc/Sledgehammer/document/root.tex +++ b/src/Doc/Sledgehammer/document/root.tex @@ -1,1395 +1,1395 @@ \documentclass[a4paper,12pt]{article} \usepackage[T1]{fontenc} \usepackage{amsmath} \usepackage{amssymb} \usepackage{color} \usepackage{footmisc} \usepackage{graphicx} %\usepackage{mathpazo} \usepackage{multicol} \usepackage{stmaryrd} %\usepackage[scaled=.85]{beramono} \usepackage{isabelle,iman,pdfsetup} \newcommand\download{\url{https://isabelle.in.tum.de/components/}} \let\oldS=\S \def\S{\oldS\,} \def\qty#1{\ensuremath{\left<\mathit{#1\/}\right>}} \def\qtybf#1{$\mathbf{\left<\textbf{\textit{#1\/}}\right>}$} \newcommand\const[1]{\textsf{#1}} %\oddsidemargin=4.6mm %\evensidemargin=4.6mm %\textwidth=150mm %\topmargin=4.6mm %\headheight=0mm %\headsep=0mm %\textheight=234mm \def\Colon{\mathord{:\mkern-1.5mu:}} %\def\lbrakk{\mathopen{\lbrack\mkern-3.25mu\lbrack}} %\def\rbrakk{\mathclose{\rbrack\mkern-3.255mu\rbrack}} \def\lparr{\mathopen{(\mkern-4mu\mid}} \def\rparr{\mathclose{\mid\mkern-4mu)}} \def\unk{{?}} \def\undef{(\lambda x.\; \unk)} %\def\unr{\textit{others}} \def\unr{\ldots} \def\Abs#1{\hbox{\rm{\guillemetleft}}{\,#1\,}\hbox{\rm{\guillemetright}}} \def\Q{{\smash{\lower.2ex\hbox{$\scriptstyle?$}}}} \urlstyle{tt} \renewcommand\_{\hbox{\textunderscore\kern-.05ex}} \hyphenation{Isa-belle super-posi-tion zipper-posi-tion} \begin{document} %%% TYPESETTING %\renewcommand\labelitemi{$\bullet$} \renewcommand\labelitemi{\raise.065ex\hbox{\small\textbullet}} \title{\includegraphics[scale=0.5]{isabelle_logo} \\[4ex] Hammering Away \\[\smallskipamount] \Large A User's Guide to Sledgehammer for Isabelle/HOL} \author{\hbox{} \\ Jasmin Blanchette \\ {\normalsize Institut f\"ur Informatik, Technische Universit\"at M\"unchen} \\[4\smallskipamount] {\normalsize with contributions from} \\[4\smallskipamount] Martin Desharnais \\ {\normalsize Forschungsinstitut CODE, Universit\"at der Bundeswehr M\"unchen} \\[4\smallskipamount] Lawrence C. Paulson \\ {\normalsize Computer Laboratory, University of Cambridge} \\ \hbox{}} \maketitle \tableofcontents \setlength{\parskip}{.7em plus .2em minus .1em} \setlength{\parindent}{0pt} \setlength{\abovedisplayskip}{\parskip} \setlength{\abovedisplayshortskip}{.9\parskip} \setlength{\belowdisplayskip}{\parskip} \setlength{\belowdisplayshortskip}{.9\parskip} % general-purpose enum environment with correct spacing \newenvironment{enum}% {\begin{list}{}{% \setlength{\topsep}{.1\parskip}% \setlength{\partopsep}{.1\parskip}% \setlength{\itemsep}{\parskip}% \advance\itemsep by-\parsep}} {\end{list}} \def\pre{\begingroup\vskip0pt plus1ex\advance\leftskip by\leftmargin \advance\rightskip by\leftmargin} \def\post{\vskip0pt plus1ex\endgroup} \def\prew{\pre\advance\rightskip by-\leftmargin} \def\postw{\post} \section{Introduction} \label{introduction} Sledgehammer is a tool that applies automatic theorem provers (ATPs) and satisfiability-modulo-theories (SMT) solvers on the current goal, mostly to find proofs but also to find inconsistencies.% \footnote{The distinction between ATPs and SMT solvers is mostly historical but convenient.} % The supported ATPs include agsyHOL \cite{agsyHOL}, Alt-Ergo \cite{alt-ergo}, E \cite{schulz-2019}, iProver \cite{korovin-2009}, LEO-II \cite{leo2}, Leo-III \cite{leo3}, Satallax \cite{satallax}, SPASS \cite{weidenbach-et-al-2009}, Vampire \cite{riazanov-voronkov-2002}, Waldmeister \cite{waldmeister}, and Zipperposition \cite{cruanes-2014}. The ATPs are run either locally or remotely via the System\-On\-TPTP web service \cite{sutcliffe-2000}. The supported SMT solvers are CVC4 \cite{cvc4}, cvc5 \cite{barbosa-et-al-cvc5}, veriT \cite{bouton-et-al-2009}, and Z3 \cite{de-moura-2008}. These are always run locally. The problem passed to the automatic provers (or solvers) consists of your current goal together with a heuristic selection of hundreds of facts (theorems) from the current theory context, filtered by relevance. The result of a successful proof search is some source text that typically reconstructs the proof within Isabelle. For ATPs, the reconstructed proof typically relies on the general-purpose \textit{metis} proof method, which integrates the Metis ATP in Isabelle/HOL with explicit inferences going through the kernel. Thus its results are correct by construction. Sometimes the automatic provers might detect that the goal is inconsistent with the background facts---or even that the background facts are inconsistent regardless of of the goal. For Isabelle/jEdit users, Sledgehammer provides an automatic mode that can be enabled via the ``Auto Sledgehammer'' option under ``Plugins > Plugin Options > Isabelle > General.'' In this mode, a reduced version of Sledgehammer is run on every newly entered theorem for a few seconds. \newbox\boxA \setbox\boxA=\hbox{\texttt{NOSPAM}} \newcommand\authoremail{\texttt{jasmin.blan{\color{white}NOSPAM}\kern-\wd\boxA{}chette@\allowbreak gmail.\allowbreak com}} To run Sledgehammer, you must make sure that the theory \textit{Sledgehammer} is imported---this is rarely a problem in practice since it is part of \textit{Main}. Examples of Sledgehammer use can be found in the \texttt{src/HOL/Metis\_Examples} directory. Comments and bug reports concerning Sledgehammer or this manual should be directed to the author at \authoremail. \section{Installation} \label{installation} Sledgehammer is part of Isabelle, so you do not need to install it. However, it relies on third-party automatic provers (ATPs and SMT solvers). Among the ATPs, agsyHOL, Alt-Ergo, E, LEO-II, Leo-III, Satallax, SPASS, Vampire, and Zipperposition can be run locally; in addition, agsyHOL, Alt-Ergo, E, iProver, LEO-II, Leo-III, Satallax, Vampire, Waldmeister, and Zipperposition are available remotely via System\-On\-TPTP \cite{sutcliffe-2000}. The SMT solvers CVC4, cvc5, veriT, and Z3 can be run locally. There are three main ways to install automatic provers on your machine: \begin{sloppy} \begin{enum} \item[\labelitemi] If you installed an official Isabelle package, it should already include properly set up executables for CVC4, cvc5, E, SPASS, Vampire, veriT, Z3, and Zipperposition ready to use. \item[\labelitemi] Alternatively, you can download the Isabelle-aware CVC4, cvc5, E, SPASS, Vampire, veriT, Z3, and Zipperposition binary packages from \download. Extract the archives, then add a line to your \texttt{\$ISABELLE\_HOME\_USER\slash etc\slash components}% \footnote{The variable \texttt{\$ISABELLE\_HOME\_USER} is set by Isabelle at startup. Its value can be retrieved by executing \texttt{isabelle} \texttt{getenv} \texttt{ISABELLE\_HOME\_USER} on the command line.} file with the absolute path to the prover. For example, if the \texttt{components} file does not exist yet and you extracted SPASS to \texttt{/usr/local/spass-3.8ds}, create it with the single line \prew \texttt{/usr/local/spass-3.8ds} \postw in it. \item[\labelitemi] If you prefer to build agsyHOL, Alt-Ergo, E, LEO-II, Leo-III, Satallax, or Zipperposition manually, set the environment variable \texttt{AGSYHOL\_HOME}, \texttt{E\_HOME}, \texttt{LEO2\_HOME}, \texttt{LEO3\_HOME}, \texttt{SATALLAX\_HOME}, or \texttt{ZIPPERPOSITION\_HOME} to the directory that contains the \texttt{agsyHOL}, \texttt{eprover} (or \texttt{eprover-ho}), \texttt{leo}, \texttt{leo3}, \texttt{satallax}, or \texttt{zipperposition} executable; for Alt-Ergo, set the environment variable \texttt{WHY3\_HOME} to the directory that contains the \texttt{why3} executable. Ideally, you should also set \texttt{E\_VERSION}, \texttt{LEO2\_VERSION}, \texttt{LEO3\_VERSION}, \texttt{SATALLAX\_VERSION}, or \texttt{ZIPPERPOSITION\_VERSION} to the prover's version number (e.g., ``3.6''). Similarly, if you want to install CVC4, cvc5, veriT, or Z3, set the environment variable \texttt{CVC4\_\allowbreak SOLVER}, \texttt{CVC5\_\allowbreak SOLVER}, \texttt{ISABELLE\_\allowbreak VERIT}, or \texttt{Z3\_SOLVER} to the complete path of the executable, \emph{including the file name}. Ideally, also set \texttt{CVC4\_VERSION}, \texttt{CVC5\_VERSION}, \texttt{VERIT\_VERSION}, or \texttt{Z3\_VERSION} to the solver's version number (e.g., ``4.4.0''). \end{enum} \end{sloppy} To check whether the provers are successfully installed, try out the example in \S\ref{first-steps}. If the remote versions of any of these provers is used (identified by the prefix ``\textit{remote\_\/}''), or if the local versions fail to solve the easy goal presented there, something must be wrong with the installation. \section{First Steps} \label{first-steps} To illustrate Sledgehammer in context, let us start a theory file and attempt to prove a simple lemma: \prew \textbf{theory}~\textit{Scratch} \\ \noindent\hbox{}\quad \textbf{imports}~\textit{Main} \\ \textbf{begin} \\[2\smallskipamount] % \textbf{lemma} ``$[a] = [b] \,\Longrightarrow\, a = b$'' \\ \textbf{sledgehammer} \postw Instead of issuing the \textbf{sledgehammer} command, you can also use the Sledgehammer panel in Isabelle/jEdit. Either way, Sledgehammer will produce something like the following output after a few seconds: \prew \slshape e found a proof\ldots \\ cvc4 found a proof\ldots \\ z3 found a proof\ldots \\ vampire found a proof\ldots \\ e: Try this: \textbf{by} \textit{simp} (0.3 ms) \\ cvc4: Try this: \textbf{by} \textit{simp} (0.4 ms) \\ z3: Try this: \textbf{by} \textit{simp} (0.5 ms) \\ vampire: Try this: \textbf{by} \textit{simp} (0.3 ms) \postw Sledgehammer ran CVC4, E, Vampire, Z3, and possibly other provers in parallel. The list may vary depending on which provers are installed and how many processor cores are available. For each successful prover, Sledgehammer gives a one-line Isabelle proof. Rough timings are shown in parentheses, indicating how fast the call is. You can click the proof to insert it into the theory text. In addition, you can ask Sledgehammer for an Isar text proof by enabling the \textit{isar\_proofs} option (\S\ref{output-format}): \prew \textbf{sledgehammer} [\textit{isar\_proofs}] \postw When Isar proof construction is successful, it can yield proofs that are more readable and also faster than \textit{metis} or \textit{smt} one-line proofs. This feature is experimental. For goals that are inconsistent with the background theory (and hence unprovable unless the theory is itself inconsistent), such as \prew \textbf{lemma} ``$\mathit{length}\; (\mathit{zip}\; \mathit{xs}\; \mathit{ys}) = \mathit{length}\; \mathit{xs}$'' \\ \textbf{sledgehammer} \postw Sledgehammer's output might look as follows: \prew \slshape vampire found an inconsistency\ldots \\ vampire: The goal is inconsistent with these facts: append\_Cons, nth\_append\_length, self\_append\_conv2, zip\_eq\_Nil\_iff \postw Sometimes Sledgehammer will helpfully suggest a missing assumption: \prew \slshape -e: Candidate for missing hypothesis: \\ +e: Candidate missing assumption: \\ length ys = length xs \postw \section{Hints} \label{hints} This section presents a few hints that should help you get the most out of Sledgehammer. Frequently asked questions are answered in \S\ref{frequently-asked-questions}. %\newcommand\point[1]{\medskip\par{\sl\bfseries#1}\par\nopagebreak} \newcommand\point[1]{\subsection{\slshape #1}} \point{Presimplify the goal} For best results, first simplify your problem by calling \textit{auto} or at least \textit{safe} followed by \textit{simp\_all}. The SMT solvers provide arithmetic decision procedures, but the ATPs typically do not (or if they do, Sledgehammer does not use it yet). Apart from Waldmeister, they are not particularly good at heavy rewriting, but because they regard equations as undirected, they often prove theorems that require the reverse orientation of a \textit{simp} rule. Higher-order problems can be tackled, but the success rate is better for first-order problems. \point{Familiarize yourself with the main options} Sledgehammer's options are fully documented in \S\ref{command-syntax}. Many of the options are very specialized, but serious users of the tool should at least familiarize themselves with the following options: \begin{enum} \item[\labelitemi] \textbf{\textit{provers}} (\S\ref{mode-of-operation}) specifies the automatic provers (ATPs and SMT solvers) that should be run whenever Sledgehammer is invoked (e.g., ``\textit{provers}~= \textit{cvc4 e vampire zipperposition\/}''). \item[\labelitemi] \textbf{\textit{max\_facts}} (\S\ref{relevance-filter}) specifies the maximum number of facts that should be passed to the provers. By default, the value is prover-dependent but varies between about 50 and 1000. If the provers time out, you can try lowering this value to, say, 25 or 50 and see if that helps. \item[\labelitemi] \textbf{\textit{isar\_proofs}} (\S\ref{output-format}) specifies that Isar proofs should be generated, in addition to one-line proofs. The length of the Isar proofs can be controlled by setting \textit{compress} (\S\ref{output-format}). \item[\labelitemi] \textbf{\textit{timeout}} (\S\ref{timeouts}) controls the provers' time limit. It is set to 30 seconds by default. \end{enum} Options can be set globally using \textbf{sledgehammer\_params} (\S\ref{command-syntax}). The command also prints the list of all available options with their current value. Fact selection can be influenced by specifying ``$(\textit{add}{:}~\textit{my\_facts})$'' after the \textbf{sledgehammer} call to ensure that certain facts are included, or simply ``$(\textit{my\_facts})$'' to force Sledgehammer to run only with $\textit{my\_facts}$ (and any facts chained into the goal). \section{Frequently Asked Questions} \label{frequently-asked-questions} This sections answers frequently (and infrequently) asked questions about Sledgehammer. It is a good idea to skim over it now even if you do not have any questions at this stage. \point{Which facts are passed to the automatic provers?} Sledgehammer heuristically selects a subset of lemmas from the currently loaded libraries. The component that performs this selection is called \emph{relevance filter} (\S\ref{relevance-filter}). \begin{enum} \item[\labelitemi] The traditional relevance filter, \emph{MePo} (\underline{Me}ng--\underline{Pau}lson), assigns a score to every available fact (lemma, theorem, definition, or axiom) based upon how many constants that fact shares with the goal. This process iterates to include facts relevant to those just accepted. The constants are weighted to give unusual ones greater significance. MePo copes best when the goal contains some unusual constants; if all the constants are common, it is unable to discriminate among the hundreds of facts that are picked up. The filter is also memoryless: It has no information about how many times a particular fact has been used in a proof, and it cannot learn. \item[\labelitemi] An alternative to MePo is \emph{MaSh} (\underline{Ma}chine Learner for \underline{S}ledge\underline{h}ammer). It applies machine learning to the problem of finding relevant facts. \item[\labelitemi] The \emph{MeSh} filter combines MePo and MaSh. This is the default. \end{enum} The number of facts included in a problem varies from prover to prover, since some provers get overwhelmed more easily than others. You can show the number of facts given using the \textit{verbose} option (\S\ref{output-format}) and the actual facts using \textit{debug} (\S\ref{output-format}). Sledgehammer is good at finding short proofs combining a handful of existing lemmas. If you are looking for longer proofs, you must typically restrict the number of facts, by setting the \textit{max\_facts} option (\S\ref{relevance-filter}) to, say, 25 or 50. You can also influence which facts are actually selected in a number of ways. If you simply want to ensure that a fact is included, you can specify it using the syntax ``$(\textit{add}{:}~\textit{my\_facts})$''. For example: % \prew \textbf{sledgehammer} (\textit{add}: \textit{hd.simps} \textit{tl.simps}) \postw % The specified facts then replace the least relevant facts that would otherwise be included; the other selected facts remain the same. If you want to direct the selection in a particular direction, you can specify the facts via \textbf{using}: % \prew \textbf{using} \textit{hd.simps} \textit{tl.simps} \\ \textbf{sledgehammer} \postw % The facts are then more likely to be selected than otherwise, and if they are selected at a given iteration of MePo, they also influence which facts are selected at subsequent iterations. \point{Why does Metis fail to reconstruct the proof?} There are many reasons. If Metis runs seemingly forever, that is a sign that the proof is too difficult for it. Metis's search is complete for first-order logic with equality, so if the proof was found by a superposition-based ATP such as E, SPASS, or Vampire, Metis should \emph{eventually} find it---in principle. In some rare cases, \textit{metis} fails fairly quickly, and you get the error message ``One-line proof reconstruction failed.'' This indicates that Sledgehammer determined that the goal is provable, but the proof is, for technical reasons, beyond \textit{metis}'s power. You can then try again with the \textit{strict} option (\S\ref{problem-encoding}). \point{What are the \textit{full\_types}, \textit{no\_types}, and \\ \textit{mono\_tags} arguments to Metis?} The \textit{metis}~(\textit{full\_types}) proof method and its cousin \textit{metis}~(\textit{mono\_tags}) are fully-typed versions of Metis. It is somewhat slower than \textit{metis}, but the proof search is fully typed, and it also includes more powerful rules such as the axiom ``$x = \const{True} \mathrel{\lor} x = \const{False}$'' for reasoning in higher-order positions (e.g., in set comprehensions). The method is tried as a fallback when \textit{metis} fails, and it is sometimes generated by Sledgehammer instead of \textit{metis} if the proof obviously requires type information or if \textit{metis} failed when Sledgehammer preplayed the proof. % At the other end of the soundness spectrum, \textit{metis} (\textit{no\_types}) uses no type information at all during the proof search, which is more efficient but often fails. Calls to \textit{metis} (\textit{no\_types}) are occasionally generated by Sledgehammer. % See the \textit{type\_enc} option (\S\ref{problem-encoding}) for details. Incidentally, if you ever see warnings such as \prew \slshape Metis: Falling back on ``\textit{metis} (\textit{full\_types})'' \postw for a successful \textit{metis} proof, you can advantageously pass the \textit{full\_types} option to \textit{metis} directly. \point{And what are the \textit{lifting} and \textit{opaque\_lifting} \\ arguments to Metis?} Orthogonally to the encoding of types, it is important to choose an appropriate translation of $\lambda$-abstractions. Metis supports three translation schemes, in decreasing order of power: Curry combinators (the default), $\lambda$-lifting, and a ``hiding'' scheme that disables all reasoning under $\lambda$-abstractions. See the \textit{lam\_trans} option (\S\ref{problem-encoding}) for details. \point{Are the generated proofs minimal?} Automatic provers frequently use many more facts than are necessary. Sledgehammer includes a proof minimization tool that takes a set of facts returned by a given prover and repeatedly calls a prover or proof method with subsets of those facts to find a minimal set. Reducing the number of facts typically helps reconstruction and declutters the proof documents. \point{A strange error occurred---what should I do?} Sledgehammer tries to give informative error messages. Please report any strange error to the author at \authoremail. \point{Auto can solve it---why not Sledgehammer?} Problems can be easy for \textit{auto} and difficult for automatic provers, but the reverse is also true, so do not be discouraged if your first attempts fail. Because the system refers to all theorems known to Isabelle, it is particularly suitable when your goal has a short proof but requires lemmas that you do not know about. \point{Why are there so many options?} Sledgehammer's philosophy is that it should work out of the box, without user guidance. Most of the options are meant to be used by the Sledgehammer developers for experiments. \section{Command Syntax} \label{command-syntax} \subsection{Sledgehammer} \label{sledgehammer} Sledgehammer can be invoked at any point when there is an open goal by entering the \textbf{sledgehammer} command in the theory file. Its general syntax is as follows: \prew \textbf{sledgehammer} \qty{subcommand}$^?$ \qty{options}$^?$ \qty{facts\_override}$^?$ \qty{num}$^?$ \postw In the general syntax, the \qty{subcommand} may be any of the following: \begin{enum} \item[\labelitemi] \textbf{\textit{run} (the default):} Runs Sledgehammer on subgoal number \qty{num} (1 by default), with the given options and facts. \item[\labelitemi] \textbf{\textit{supported\_provers}:} Prints the list of automatic provers supported by Sledgehammer. See \S\ref{installation} and \S\ref{mode-of-operation} for more information on how to install automatic provers. \item[\labelitemi] \textbf{\textit{refresh\_tptp}:} Refreshes the list of remote ATPs available at System\-On\-TPTP \cite{sutcliffe-2000}. \end{enum} In addition, the following subcommands provide finer control over machine learning with MaSh: \begin{enum} \item[\labelitemi] \textbf{\textit{unlearn}:} Resets MaSh, erasing any persistent state. \item[\labelitemi] \textbf{\textit{learn\_isar}:} Invokes MaSh on the current theory to process all the available facts, learning from their Isabelle/Isar proofs. This happens automatically at Sledgehammer invocations if the \textit{learn} option (\S\ref{relevance-filter}) is enabled. \item[\labelitemi] \textbf{\textit{learn\_prover}:} Invokes MaSh on the current theory to process all the available facts, learning from proofs generated by automatic provers. The prover to use and its timeout can be set using the \textit{prover} (\S\ref{mode-of-operation}) and \textit{timeout} (\S\ref{timeouts}) options. It is recommended to perform learning using a first-order ATP (such as E, SPASS, and Vampire) as opposed to a higher-order ATP or an SMT solver. \item[\labelitemi] \textbf{\textit{relearn\_isar}:} Same as \textit{unlearn} followed by \textit{learn\_isar}. \item[\labelitemi] \textbf{\textit{relearn\_prover}:} Same as \textit{unlearn} followed by \textit{learn\_prover}. \end{enum} Sledgehammer's behavior can be influenced by various \qty{options}, which can be specified in brackets after the \textbf{sledgehammer} command. The \qty{options} are a list of key--value pairs of the form ``[$k_1 = v_1, \ldots, k_n = v_n$]''. For Boolean options, ``= \textit{true\/}'' is optional. For example: \prew \textbf{sledgehammer} [\textit{isar\_proofs}, \,\textit{timeout} = 120] \postw Default values can be set using \textbf{sledgehammer\_\allowbreak params}: \prew \textbf{sledgehammer\_params} \qty{options} \postw The supported options are described in \S\ref{option-reference}. The \qty{facts\_override} argument lets you alter the set of facts that go through the relevance filter. It may be of the form ``(\qty{facts})'', where \qty{facts} is a space-separated list of Isabelle facts (theorems, local assumptions, etc.), in which case the relevance filter is bypassed and the given facts are used. It may also be of the form ``(\textit{add}:\ \qty{facts\/_{\mathrm{1}}})'', ``(\textit{del}:\ \qty{facts\/_{\mathrm{2}}})'', or ``(\textit{add}:\ \qty{facts\/_{\mathrm{1}}}\ \textit{del}:\ \qty{facts\/_{\mathrm{2}}})'', where the relevance filter is instructed to proceed as usual except that it should consider \qty{facts\/_{\mathrm{1}}} highly-relevant and \qty{facts\/_{\mathrm{2}}} fully irrelevant. If you use Isabelle/jEdit, Sledgehammer also provides an automatic mode that can be enabled via the ``Auto Sledgehammer'' option under ``Plugins > Plugin Options > Isabelle > General.'' For automatic runs, only the first prover set using \textit{provers} (\S\ref{mode-of-operation}) is considered, \textit{dont\_slice} (\S\ref{timeouts}) is set, fewer facts are passed to the prover, \textit{fact\_filter} (\S\ref{relevance-filter}) is set to \textit{mepo}, \textit{strict} (\S\ref{problem-encoding}) is enabled, \textit{verbose} (\S\ref{output-format}) and \textit{debug} (\S\ref{output-format}) are disabled, and \textit{timeout} (\S\ref{timeouts}) is superseded by the ``Auto Time Limit'' option in jEdit. Sledgehammer's output is also more concise. \subsection{Metis} \label{metis} The \textit{metis} proof method has the syntax \prew \textbf{\textit{metis}}~(\qty{options})${}^?$~\qty{facts}${}^?$ \postw where \qty{facts} is a list of arbitrary facts and \qty{options} is a comma-separated list consisting of at most one $\lambda$ translation scheme specification with the same semantics as Sledgehammer's \textit{lam\_trans} option (\S\ref{problem-encoding}) and at most one type encoding specification with the same semantics as Sledgehammer's \textit{type\_enc} option (\S\ref{problem-encoding}). % The supported $\lambda$ translation schemes are \textit{opaque\_lifting}, \textit{lifting}, and \textit{combs} (the default). % All the untyped type encodings listed in \S\ref{problem-encoding} are supported. For convenience, the following aliases are provided: \begin{enum} \item[\labelitemi] \textbf{\textit{full\_types}:} Alias for \textit{poly\_guards\_query}. \item[\labelitemi] \textbf{\textit{partial\_types}:} Alias for \textit{poly\_args}. \item[\labelitemi] \textbf{\textit{no\_types}:} Alias for \textit{erased}. \end{enum} \section{Option Reference} \label{option-reference} \def\defl{\{} \def\defr{\}} \def\flushitem#1{\item[]\noindent\kern-\leftmargin \textbf{#1}} \def\optrueonly#1{\flushitem{\textit{#1} $\bigl[$= \textit{true}$\bigr]$\enskip}\nopagebreak\\[\parskip]} \def\optrue#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{bool}$\bigr]$\enskip \defl\textit{true}\defr\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opfalse#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{bool}$\bigr]$\enskip \defl\textit{false}\defr\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opsmart#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{smart\_bool}$\bigr]$\enskip \defl\textit{smart}\defr\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opsmartx#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{smart\_bool}$\bigr]$\enskip \defl\textit{smart}\defr\\\hbox{}\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opnodefault#1#2{\flushitem{\textit{#1} = \qtybf{#2}} \nopagebreak\\[\parskip]} \def\opnodefaultbrk#1#2{\flushitem{$\bigl[$\textit{#1} =$\bigr]$ \qtybf{#2}} \nopagebreak\\[\parskip]} \def\opdefault#1#2#3{\flushitem{\textit{#1} = \qtybf{#2}\enskip \defl\textit{#3}\defr} \nopagebreak\\[\parskip]} \def\oparg#1#2#3{\flushitem{\textit{#1} \qtybf{#2} = \qtybf{#3}} \nopagebreak\\[\parskip]} \def\opargbool#1#2#3{\flushitem{\textit{#1} \qtybf{#2} $\bigl[$= \qtybf{bool}$\bigr]$\hfill (neg.: \textit{#3})}\nopagebreak\\[\parskip]} \def\opargboolorsmart#1#2#3{\flushitem{\textit{#1} \qtybf{#2} $\bigl[$= \qtybf{smart\_bool}$\bigr]$\hfill (neg.: \textit{#3})}\nopagebreak\\[\parskip]} Sledgehammer's options are categorized as follows:\ mode of operation (\S\ref{mode-of-operation}), problem encoding (\S\ref{problem-encoding}), relevance filter (\S\ref{relevance-filter}), output format (\S\ref{output-format}), regression testing (\S\ref{regression-testing}), and timeouts (\S\ref{timeouts}). The descriptions below refer to the following syntactic quantities: \begin{enum} \item[\labelitemi] \qtybf{string}: A string. \item[\labelitemi] \qtybf{bool\/}: \textit{true} or \textit{false}. \item[\labelitemi] \qtybf{smart\_bool\/}: \textit{true}, \textit{false}, or \textit{smart}. \item[\labelitemi] \qtybf{int\/}: An integer. \item[\labelitemi] \qtybf{float}: A floating-point number (e.g., 2.5 or 60) expressing a number of seconds. \item[\labelitemi] \qtybf{float\_pair\/}: A pair of floating-point numbers (e.g., 0.6 0.95). \item[\labelitemi] \qtybf{smart\_int\/}: An integer or \textit{smart}. \end{enum} Default values are indicated in curly brackets (\textrm{\{\}}). Boolean options have a negative counterpart (e.g., \textit{minimize} vs.\ \textit{dont\_minimize}). When setting Boolean options or their negative counterparts, ``= \textit{true\/}'' may be omitted. \subsection{Mode of Operation} \label{mode-of-operation} \begin{enum} \opnodefaultbrk{provers}{string} Specifies the automatic provers to use as a space-separated list (e.g., ``\textit{cvc4}~\textit{e}~\textit{spass}~\textit{vampire\/}''). Provers can be run locally or remotely; see \S\ref{installation} for installation instructions. By default, \textit{provers} is set to the list of all installed local provers. The following local provers are supported: \begin{sloppy} \begin{enum} \item[\labelitemi] \textbf{\textit{agsyhol}:} agsyHOL is an automatic higher-order prover developed by Fredrik Lindblad \cite{agsyHOL}. To use agsyHOL, set the environment variable \texttt{AGSYHOL\_HOME} to the directory that contains the \texttt{agsyHOL} executable. \item[\labelitemi] \textbf{\textit{alt\_ergo}:} Alt-Ergo is a polymorphic ATP developed by Bobot et al.\ \cite{alt-ergo}. It supports the TPTP polymorphic typed first-order format (TF1) via Why3 \cite{why3}. To use Alt-Ergo, set the environment variable \texttt{WHY3\_HOME} to the directory that contains the \texttt{why3} executable. Sledgehammer requires Alt-Ergo 0.95.2 and Why3 0.83. \item[\labelitemi] \textbf{\textit{cvc4}:} CVC4 is an SMT solver developed by Barrett et al.\ \cite{cvc4}. To use CVC4, set the environment variable \texttt{CVC4\_SOLVER} to the complete path of the executable, including the file name, or install the prebuilt CVC4 package from \download. \item[\labelitemi] \textbf{\textit{cvc5}:} cvc5 is an SMT solver developed by Barbosa et al.\ \cite{barbosa-et-al-cvc5}. To use cvc5, set the environment variable \texttt{CVC5\_SOLVER} to the complete path of the executable, including the file name. \item[\labelitemi] \textbf{\textit{e}:} E is a first-order resolution prover developed by Stephan Schulz \cite{schulz-2019}. To use E, set the environment variable \texttt{E\_HOME} to the directory that contains the \texttt{eproof} executable and \texttt{E\_VERSION} to the version number (e.g., ``1.8''), or install the prebuilt E package from \download. \item[\labelitemi] \textbf{\textit{iprover}:} iProver is a pure instantiation-based prover developed by Konstantin Korovin \cite{korovin-2009}. To use iProver, set the environment variable \texttt{IPROVER\_HOME} to the directory that contains the \texttt{iproveropt} executable. iProver depends on Vampire to clausify problems, so make sure that Vampire is installed as well. \item[\labelitemi] \textbf{\textit{leo2}:} LEO-II is an automatic higher-order prover developed by Christoph Benzm\"uller et al.\ \cite{leo2}, with support for the TPTP typed higher-order syntax (TH0). To use LEO-II, set the environment variable \texttt{LEO2\_HOME} to the directory that contains the \texttt{leo} executable. \item[\labelitemi] \textbf{\textit{leo3}:} Leo-III is an automatic higher-order prover developed by Alexander Steen, Christoph Benzm\"uller, et al.\ \cite{leo3}, with support for the TPTP typed higher-order syntax (TH0). To use Leo-III, set the environment variable \texttt{LEO3\_HOME} to the directory that contains the \texttt{leo3} executable. \item[\labelitemi] \textbf{\textit{satallax}:} Satallax is an automatic higher-order prover developed by Chad Brown et al.\ \cite{satallax}, with support for the TPTP typed higher-order syntax (TH0). To use Satallax, set the environment variable \texttt{SATALLAX\_HOME} to the directory that contains the \texttt{satallax} executable. \item[\labelitemi] \textbf{\textit{spass}:} SPASS is a first-order resolution prover developed by Christoph Weidenbach et al.\ \cite{weidenbach-et-al-2009}. To use SPASS, set the environment variable \texttt{SPASS\_HOME} to the directory that contains the \texttt{SPASS} executable and \texttt{SPASS\_VERSION} to the version number (e.g., ``3.8ds''), or install the prebuilt SPASS package from \download. \item[\labelitemi] \textbf{\textit{vampire}:} Vampire is a first-order resolution prover developed by Andrei Voronkov and his colleagues \cite{riazanov-voronkov-2002}. To use Vampire, set the environment variable \texttt{VAMPIRE\_HOME} to the directory that contains the \texttt{vampire} executable and \texttt{VAMPIRE\_VERSION} to the version number (e.g., ``4.2.2''). \item[\labelitemi] \textbf{\textit{verit}:} veriT \cite{bouton-et-al-2009} is an SMT solver developed by David D\'eharbe, Pascal Fontaine, and their colleagues. It is designed to produce detailed proofs for reconstruction in proof assistants. To use veriT, set the environment variable \texttt{ISABELLE\_VERIT} to the complete path of the executable, including the file name. \item[\labelitemi] \textbf{\textit{z3}:} Z3 is an SMT solver developed at Microsoft Research \cite{de-moura-2008}. To use Z3, set the environment variable \texttt{Z3\_SOLVER} to the complete path of the executable, including the file name. \item[\labelitemi] \textbf{\textit{zipperposition}:} Zipperposition \cite{cruanes-2014} is a higher-order superposition prover developed by Simon Cruanes, Petar Vukmirovi\'c, and colleagues. To use Zipperposition, set the environment variable \texttt{ZIPPERPOSITION\_HOME} to the directory that contains the \texttt{zipperposition} executable and \texttt{ZIPPERPOSITION\_VERSION} to the version number (e.g., ``2.0.1''). \end{enum} \end{sloppy} Moreover, the following remote provers are supported: \begin{enum} \item[\labelitemi] \textbf{\textit{remote\_agsyhol}:} The remote version of agsyHOL runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_alt\_ergo}:} The remote version of Alt-Ergo runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_e}:} The remote version of E runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_iprover}:} The remote version of iProver runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_leo2}:} The remote version of LEO-II runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_leo3}:} The remote version of Leo-III runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_waldmeister}:} Waldmeister is a unit equality prover developed by Hillenbrand et al.\ \cite{waldmeister}. It can be used to prove universally quantified equations using unconditional equations, corresponding to the TPTP CNF UEQ division. The remote version of Waldmeister runs on Geoff Sutcliffe's Miami servers. \item[\labelitemi] \textbf{\textit{remote\_zipperposition}:} The remote version of Zipperposition runs on Geoff Sutcliffe's Miami servers. \end{enum} By default, Sledgehammer runs a subset of CVC4, E, SPASS, Vampire, veriT, Z3, and Zipperposition in parallel, either locally or remotely---depending on the number of processor cores available and on which provers are actually installed. It is generally beneficial to run several provers in parallel. \opnodefault{prover}{string} Alias for \textit{provers}. \opsmartx{check\_inconsistent}{dont\_check\_inconsistent} Specifies whether Sledgehammer should look for inconsistencies or for proofs. By default, it looks for both proofs and inconsistencies. An inconsistency indicates that the goal, taken as an axiom, would be inconsistent with some specific background facts if it were added as a lemma, indicating a likely issue with the goal. Sometimes the inconsistency involves only the background theory; this might happen, for example, if a flawed axiom is used or if a flawed lemma was introduced with \textbf{sorry}. \opdefault{abduce}{smart\_int}{smart} -Specifies the maximum number of candidate missing hypotheses that may be +Specifies the maximum number of candidate missing assumptions that may be displayed. These hypotheses are discovered heuristically by a process called abduction (which stands in contrast to deduction)---that is, they are guessed and found to be sufficient to prove the goal. Abduction is currently supported only by E. If the option is set to \textit{smart} (the default), abduction is enabled only in some of the E time -slices, and only one candidate missing hypothesis is displayed. You can disable -abduction altogether by setting the option to 0 or enable it in all time slices -by setting it to a nonzero value. +slices, and at most one candidate missing assumption is displayed. You can +disable abduction altogether by setting the option to 0 or enable it in all +time slices by setting it to a nonzero value. \optrueonly{dont\_abduce} Alias for ``\textit{abduce} = 0''. \optrue{minimize}{dont\_minimize} Specifies whether the proof minimization tool should be invoked automatically after proof search. \nopagebreak {\small See also \textit{preplay\_timeout} (\S\ref{timeouts}) and \textit{dont\_preplay} (\S\ref{timeouts}).} \opfalse{spy}{dont\_spy} Specifies whether Sledgehammer should record statistics in \texttt{\$ISA\-BELLE\_\allowbreak HOME\_\allowbreak USER/\allowbreak spy\_\allowbreak sledgehammer}. These statistics can be useful to the developers of Sledgehammer. If you are willing to have your interactions recorded in the name of science, please enable this feature and send the statistics file every now and then to the author of this manual (\authoremail). To change the default value of this option globally, set the environment variable \texttt{SLEDGEHAMMER\_SPY} to \textit{yes}. \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}).} \opfalse{overlord}{no\_overlord} Specifies whether Sledgehammer should put its temporary files in \texttt{\$ISA\-BELLE\_\allowbreak HOME\_\allowbreak USER}, which is useful for debugging Sledgehammer but also unsafe if several instances of the tool are run simultaneously. The files are identified by the prefixes \texttt{prob\_} and \texttt{mash\_}; you may safely remove them after Sledgehammer has run. \textbf{Warning:} This option is not thread-safe. Use at your own risks. \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}).} \end{enum} \subsection{Relevance Filter} \label{relevance-filter} \begin{enum} \opdefault{fact\_filter}{string}{smart} Specifies the relevance filter to use. The following filters are available: \begin{enum} \item[\labelitemi] \textbf{\textit{mepo}:} The traditional memoryless MePo relevance filter. \item[\labelitemi] \textbf{\textit{mash}:} The MaSh machine learner. Three learning algorithms are provided: \begin{enum} \item[\labelitemi] \textbf{\textit{nb}} is an implementation of naive Bayes. \item[\labelitemi] \textbf{\textit{knn}} is an implementation of $k$-nearest neighbors. \item[\labelitemi] \textbf{\textit{nb\_knn}} (also called \textbf{\textit{yes}} and \textbf{\textit{sml}}) is a combination of naive Bayes and $k$-nearest neighbors. \end{enum} In addition, the special value \textit{none} is used to disable machine learning by default (cf.\ \textit{smart} below). The default algorithm is \textit{nb\_knn}. The algorithm can be selected by setting the ``MaSh'' option under ``Plugins > Plugin Options > Isabelle > General'' in Isabelle/jEdit. Persistent data for both algorithms is stored in the directory \texttt{\$ISABELLE\_\allowbreak HOME\_\allowbreak USER/\allowbreak mash}. \item[\labelitemi] \textbf{\textit{mesh}:} The MeSh filter, which combines the rankings from MePo and MaSh. \item[\labelitemi] \textbf{\textit{smart}:} A combination of MePo, MaSh, and MeSh. If the learning algorithm is set to be \textit{none}, \textit{smart} behaves like MePo. \end{enum} \opdefault{max\_facts}{smart\_int}{smart} Specifies the maximum number of facts that may be returned by the relevance filter. If the option is set to \textit{smart} (the default), it effectively takes a value that was empirically found to be appropriate for the prover. Typical values lie between 50 and 1000. \opdefault{fact\_thresholds}{float\_pair}{\upshape 0.45~0.85} Specifies the thresholds above which facts are considered relevant by the relevance filter. The first threshold is used for the first iteration of the relevance filter and the second threshold is used for the last iteration (if it is reached). The effective threshold is quadratically interpolated for the other iterations. Each threshold ranges from 0 to 1, where 0 means that all theorems are relevant and 1 only theorems that refer to previously seen constants. \optrue{learn}{dont\_learn} Specifies whether Sledgehammer invocations should run MaSh to learn the available theories (and hence provide more accurate results). Learning takes place only if MaSh is enabled. \opdefault{max\_new\_mono\_instances}{int}{smart} Specifies the maximum number of monomorphic instances to generate beyond \textit{max\_facts}. The higher this limit is, the more monomorphic instances are potentially generated. Whether monomorphization takes place depends on the type encoding used. If the option is set to \textit{smart} (the default), it takes a value that was empirically found to be appropriate for the prover. For most provers, this value is 100. \nopagebreak {\small See also \textit{type\_enc} (\S\ref{problem-encoding}).} \opdefault{max\_mono\_iters}{int}{smart} Specifies the maximum number of iterations for the monomorphization fixpoint construction. The higher this limit is, the more monomorphic instances are potentially generated. Whether monomorphization takes place depends on the type encoding used. If the option is set to \textit{smart} (the default), it takes a value that was empirically found to be appropriate for the prover. For most provers, this value is 3. \nopagebreak {\small See also \textit{type\_enc} (\S\ref{problem-encoding}).} \opdefault{induction\_rules}{string}{exclude} Specifies whether induction rules should be considered as relevant facts. The following behaviors are available: \begin{enum} \item[\labelitemi] \textbf{\textit{exclude}:} Induction rules are ignored by the relevance filter. \item[\labelitemi] \textbf{\textit{instantiate}:} Induction rules are instantiated based on the goal and then considered by the relevance filter. \item[\labelitemi] \textbf{\textit{include}:} Induction rules are considered by the relevance filter. \end{enum} \end{enum} \subsection{Problem Encoding} \label{problem-encoding} \newcommand\comb[1]{\const{#1}} \begin{enum} \opdefault{lam\_trans}{string}{smart} Specifies the $\lambda$ translation scheme to use in ATP problems. The supported translation schemes are listed below: \begin{enum} \item[\labelitemi] \textbf{\textit{lifting}:} Introduce a new supercombinator \const{c} for each cluster of $n$~$\lambda$-abstractions, defined using an equation $\const{c}~x_1~\ldots~x_n = t$ ($\lambda$-lifting). \item[\labelitemi] \textbf{\textit{opaque\_lifting}:} Same as \textit{lifting}, except that the supercombinators are kept opaque, i.e. they are unspecified fresh constants. This effectively disables all reasoning under $\lambda$-abstractions. \item[\labelitemi] \textbf{\textit{combs}:} Rewrite lambdas to the Curry combinators (\comb{I}, \comb{K}, \comb{S}, \comb{B}, \comb{C}). Combinators enable the ATPs to synthesize $\lambda$-terms but tend to yield bulkier formulas than $\lambda$-lifting: The translation is quadratic in the worst case, and the equational definitions of the combinators are very prolific in the context of resolution. \item[\labelitemi] \textbf{\textit{opaque\_combs}:} Same as \textit{combs}, except that the combinators are kept opaque, i.e. without equational definitions. \item[\labelitemi] \textbf{\textit{combs\_and\_lifting}:} Introduce a new supercombinator \const{c} for each cluster of $\lambda$-abstractions and characterize it both using a lifted equation $\const{c}~x_1~\ldots~x_n = t$ and via Curry combinators. \item[\labelitemi] \textbf{\textit{combs\_or\_lifting}:} For each cluster of $\lambda$-abstractions, heuristically choose between $\lambda$-lifting and Curry combinators. \item[\labelitemi] \textbf{\textit{keep\_lams}:} Keep the $\lambda$-abstractions in the generated problems. This is available only with provers that support $\lambda$s. \item[\labelitemi] \textbf{\textit{smart}:} The actual translation scheme used depends on the ATP and should be the most efficient scheme for that ATP. \end{enum} For SMT solvers, the $\lambda$ translation scheme is always \textit{lifting}, irrespective of the value of this option. \opsmartx{uncurried\_aliases}{no\_uncurried\_aliases} Specifies whether fresh function symbols should be generated as aliases for applications of curried functions in ATP problems. \opdefault{type\_enc}{string}{smart} Specifies the type encoding to use in ATP problems. Some of the type encodings are unsound, meaning that they can give rise to spurious proofs (unreconstructible using \textit{metis}). The type encodings are listed below, with an indication of their soundness in parentheses. An asterisk (*) indicates that the encoding is slightly incomplete for reconstruction with \textit{metis}, unless the \textit{strict} option (described below) is enabled. \begin{enum} \item[\labelitemi] \textbf{\textit{erased} (unsound):} No type information is supplied to the ATP, not even to resolve overloading. Types are simply erased. \item[\labelitemi] \textbf{\textit{poly\_guards} (sound):} Types are encoded using a predicate \const{g}$(\tau, t)$ that guards bound variables. Constants are annotated with their types, supplied as extra arguments, to resolve overloading. \item[\labelitemi] \textbf{\textit{poly\_tags} (sound):} Each term and subterm is tagged with its type using a function $\const{t\/}(\tau, t)$. \item[\labelitemi] \textbf{\textit{poly\_args} (unsound):} Like for \textit{poly\_guards} constants are annotated with their types to resolve overloading, but otherwise no type information is encoded. This is the default encoding used by the \textit{metis} proof method. \item[\labelitemi] \textbf{% \textit{raw\_mono\_guards}, \textit{raw\_mono\_tags} (sound); \\ \textit{raw\_mono\_args} (unsound):} \\ Similar to \textit{poly\_guards}, \textit{poly\_tags}, and \textit{poly\_args}, respectively, but the problem is additionally monomorphized, meaning that type variables are instantiated with heuristically chosen ground types. Monomorphization can simplify reasoning but also leads to larger fact bases, which can slow down the ATPs. \item[\labelitemi] \textbf{% \textit{mono\_guards}, \textit{mono\_tags} (sound); \textit{mono\_args} \\ (unsound):} \\ Similar to \textit{raw\_mono\_guards}, \textit{raw\_mono\_tags}, and \textit{raw\_mono\_\allowbreak args}, respectively but types are mangled in constant names instead of being supplied as ground term arguments. The binary predicate $\const{g}(\tau, t)$ becomes a unary predicate $\const{g\_}\tau(t)$, and the binary function $\const{t}(\tau, t)$ becomes a unary function $\const{t\_}\tau(t)$. \item[\labelitemi] \textbf{\textit{mono\_native} (sound):} Exploits native first-order types if the prover supports the TF0, TF1, TH0, or TH1 syntax; otherwise, falls back on \textit{mono\_guards}. The problem is monomorphized. \item[\labelitemi] \textbf{\textit{mono\_native\_fool} (sound):} Exploits native first-order types, including Booleans, if the prover supports the TFX0, TFX1, TH0, or TH1 syntax; otherwise, falls back on \textit{mono\_native}. The problem is monomorphized. \item[\labelitemi] \textbf{\textit{mono\_native\_higher}, \textit{mono\_native\_higher\_fool} \\ (sound):} Exploits native higher-order types, including Booleans if ending with ``\textit{\_fool}'', if the prover supports the TH0 syntax; otherwise, falls back on \textit{mono\_native} or \textit{mono\_native\_fool}. The problem is monomorphized. \item[\labelitemi] \textbf{\textit{poly\_native}, \textit{poly\_native\_fool}, \textit{poly\_native\_higher}, \\ \textit{poly\_native\_higher\_fool} (sound):} Exploits native first-order polymorphic types if the prover supports the TF1, TFX1, or TH1 syntax; otherwise, falls back on \textit{mono\_native}, \textit{mono\_native\_fool}, \textit{mono\_native\_higher}, or \textit{mono\_native\_higher\_fool}. \item[\labelitemi] \textbf{% \textit{poly\_guards}?, \textit{poly\_tags}?, \textit{raw\_mono\_guards}?, \\ \textit{raw\_mono\_tags}?, \textit{mono\_guards}?, \textit{mono\_tags}?, \\ \textit{mono\_native}? (sound*):} \\ The type encodings \textit{poly\_guards}, \textit{poly\_tags}, \textit{raw\_mono\_guards}, \textit{raw\_mono\_tags}, \textit{mono\_guards}, \textit{mono\_tags}, and \textit{mono\_native} are fully typed and sound. For each of these, Sledgehammer also provides a lighter variant identified by a question mark (`\hbox{?}')\ that detects and erases monotonic types, notably infinite types. (For \textit{mono\_native}, the types are not actually erased but rather replaced by a shared uniform type of individuals.) As argument to the \textit{metis} proof method, the question mark is replaced by a \hbox{``\textit{\_query\/}''} suffix. \item[\labelitemi] \textbf{% \textit{poly\_guards}??, \textit{poly\_tags}??, \textit{raw\_mono\_guards}??, \\ \textit{raw\_mono\_tags}??, \textit{mono\_guards}??, \textit{mono\_tags}?? \\ (sound*):} \\ Even lighter versions of the `\hbox{?}' encodings. As argument to the \textit{metis} proof method, the `\hbox{??}' suffix is replaced by \hbox{``\textit{\_query\_query\/}''}. \item[\labelitemi] \textbf{% \textit{poly\_guards}@, \textit{poly\_tags}@, \textit{raw\_mono\_guards}@, \\ \textit{raw\_mono\_tags}@ (sound*):} \\ Alternative versions of the `\hbox{??}' encodings. As argument to the \textit{metis} proof method, the `\hbox{@}' suffix is replaced by \hbox{``\textit{\_at\/}''}. \item[\labelitemi] \textbf{\textit{poly\_args}?, \textit{raw\_mono\_args}? (unsound):} \\ Lighter versions of \textit{poly\_args} and \textit{raw\_mono\_args}. \item[\labelitemi] \textbf{\textit{smart}:} The actual encoding used depends on the ATP and should be the most efficient sound encoding for that ATP. \end{enum} For SMT solvers, the type encoding is always \textit{mono\_native}, irrespective of the value of this option. \nopagebreak {\small See also \textit{max\_new\_mono\_instances} (\S\ref{relevance-filter}) and \textit{max\_mono\_iters} (\S\ref{relevance-filter}).} \opfalse{strict}{non\_strict} Specifies whether Sledgehammer should run in its strict mode. In that mode, sound type encodings marked with an asterisk (*) above are made complete for reconstruction with \textit{metis}, at the cost of some clutter in the generated problems. This option has no effect if \textit{type\_enc} is deliberately set to an unsound encoding. \end{enum} \subsection{Output Format} \label{output-format} \begin{enum} \opfalse{verbose}{quiet} Specifies whether the \textbf{sledgehammer} command should explain what it does. \opfalse{debug}{no\_debug} Specifies whether Sledgehammer should display additional debugging information beyond what \textit{verbose} already displays. Enabling \textit{debug} also enables \textit{verbose} behind the scenes. \nopagebreak {\small See also \textit{spy} (\S\ref{mode-of-operation}) and \textit{overlord} (\S\ref{mode-of-operation}).} \opdefault{max\_proofs}{int}{\upshape 4} Specifies the maximum number of proofs to display before stopping. This is a soft limit. \opsmart{isar\_proofs}{no\_isar\_proofs} Specifies whether Isar proofs should be output in addition to one-line proofs. The construction of Isar proof is still experimental and may sometimes fail; however, when they succeed they can be faster and sometimes more intelligible than one-line proofs. If the option is set to \textit{smart} (the default), Isar proofs are generated only when no working one-line proof is available. \opdefault{compress}{int}{smart} Specifies the granularity of the generated Isar proofs if \textit{isar\_proofs} is explicitly enabled. A value of $n$ indicates that each Isar proof step should correspond to a group of up to $n$ consecutive proof steps in the ATP proof. If the option is set to \textit{smart} (the default), the compression factor is 10 if the \textit{isar\_proofs} option is explicitly enabled; otherwise, it is $\infty$. \optrueonly{dont\_compress} Alias for ``\textit{compress} = 1''. \optrue{try0}{dont\_try0} Specifies whether standard proof methods such as \textit{auto} and \textit{blast} should be tried as alternatives to \textit{metis} in Isar proofs. The collection of methods is roughly the same as for the \textbf{try0} command. \optrue{smt\_proofs}{no\_smt\_proofs} Specifies whether the \textit{smt} proof method should be tried in addition to Isabelle's built-in proof methods. \end{enum} \subsection{Regression Testing} \label{regression-testing} \begin{enum} \opnodefault{expect}{string} Specifies the expected outcome, which must be one of the following: \begin{enum} \item[\labelitemi] \textbf{\textit{some}:} Sledgehammer found a proof. \item[\labelitemi] \textbf{\textit{some\_preplayed}:} Sledgehammer found a proof that was successfully preplayed. \item[\labelitemi] \textbf{\textit{none}:} Sledgehammer found no proof. \item[\labelitemi] \textbf{\textit{timeout}:} Sledgehammer timed out. \item[\labelitemi] \textbf{\textit{resources\_out}:} Sledgehammer ran out of resources. \item[\labelitemi] \textbf{\textit{unknown}:} Sledgehammer encountered some problem. \end{enum} Sledgehammer emits an error if the actual outcome differs from the expected outcome. This option is useful for regression testing. The expected outcomes are not mutually exclusive. More specifically, \textit{some} is accepted whenever \textit{some\_preplayed} is accepted as the former has strictly fewer requirements than the later. \nopagebreak {\small See also \textit{timeout} (\S\ref{timeouts}).} \end{enum} \subsection{Timeouts} \label{timeouts} \begin{enum} \opdefault{timeout}{float}{\upshape 30} Specifies the maximum number of seconds that the automatic provers should spend searching for a proof. This excludes problem preparation and is a soft limit. \opdefault{slices}{int}{\upshape 12 times the number of cores detected} Specifies the number of time slices. Time slices are the basic unit for prover invocations. They are divided among the available provers. A single prover invocation can occupy a single slice, two slices, or more, depending on the prover. Slicing (and thereby parallelism) can be disable by setting \textit{slices} to 1. Since slicing is a valuable optimization, you should probably leave it enabled unless you are conducting experiments. \nopagebreak {\small See also \textit{verbose} (\S\ref{output-format}).} \optrueonly{dont\_slice} Alias for ``\textit{slices} = 1''. \opdefault{preplay\_timeout}{float}{\upshape 1} Specifies the maximum number of seconds that \textit{metis} or other proof methods should spend trying to ``preplay'' the found proof. If this option is set to 0, no preplaying takes place, and no timing information is displayed next to the suggested proof method calls. \nopagebreak {\small See also \textit{minimize} (\S\ref{mode-of-operation}).} \optrueonly{dont\_preplay} Alias for ``\textit{preplay\_timeout} = 0''. \end{enum} \section{Mirabelle Testing Tool} \label{mirabelle} The \texttt{isabelle mirabelle} tool executes Sledgehammer or other advisory tools (e.g., Nitpick) or tactics (e.g., \textit{auto}) on all subgoals emerging in a theory. It is typically used to measure the success rate of a proof tool on some benchmark. Its command-line usage is as follows: {\small \begin{verbatim} Usage: isabelle mirabelle [OPTIONS] [SESSIONS ...] Options are: -A ACTION add to list of actions -O DIR output directory for log files (default: "mirabelle") -T THEORY theory restriction: NAME or NAME[FIRST_LINE:LAST_LINE] -m INT max. no. of calls to each action (0: unbounded) (default 0) -s INT run actions on every nth goal (0: uniform distribution) (default 1) -t SECONDS timeout in seconds for each action (default 30) ... Apply the given ACTIONs at all theories and proof steps of the specified sessions. The absence of theory restrictions (option -T) means to check all theories fully. Otherwise only the named theories are checked. \end{verbatim} } Option \texttt{-A ACTION} specifies an action to run on all subgoals. When specified multiple times, all actions are performed in parallel on all selected subgoals. Available actions are \texttt{arith}, \texttt{metis}, \texttt{quickcheck}, \texttt{sledgehammer}, \texttt{sledgehammer\_filter}, and \texttt{try0}. Option \texttt{-O DIR} specifies the output directory, which is created if needed. In this directory, a log file named "mirabelle.log" records the position of each tested subgoal and the result of executing the actions. Option \texttt{-T THEORY} restricts the subgoals to those emerging from this theory. When not provided, all subgoals from are theories are selected. When provided multiple times, the union of all specified theories' subgoals is selected. Option \texttt{-m INT} specifies a maximum number of goals on which the action are run. Option \texttt{-s INT} specifies a stride, effectively running the actions on every $n$th goal. Option \texttt{-t SECONDS} specifies a generic timeout that the actions may interpret differently. More specific documentation about low-level options, the \texttt{ACTION} parameter, and its corresponding options can be found in the Isabelle tool usage by entering \texttt{isabelle mirabelle -?} on the command line. The following subsections assume that the environment variable \texttt{AFP} is defined and points to a release of the Archive of Formal Proofs. \subsection{Example of Benchmarking Sledgehammer} \begin{verbatim} isabelle mirabelle -d '$AFP' -O output \ -A "sledgehammer[provers = e, timeout = 30]" \ VeriComp \end{verbatim} This command specifies to run the Sledgehammer action, using the E prover with a timeout of 30 seconds, on all subgoals emerging from all theory in the AFP session VeriComp. The results are written to \texttt{output/mirabelle.log}. \begin{verbatim} isabelle mirabelle -d '$AFP' -O output \ -T Semantics -T Compiler \ -A "sledgehammer[provers = e, timeout = 30]" \ VeriComp \end{verbatim} This command also specifies to run the Sledgehammer action, but this time only on subgoals emerging from theories Semantics or Compiler. \subsection{Example of Benchmarking Multiple Tools} \begin{verbatim} isabelle mirabelle -d '$AFP' -O output -t 10 \ -A "try0" -A "metis" \ VeriComp \end{verbatim} This command specifies two actions running the \textbf{try0} and \textbf{metis} commands, respectively, each with a timeout of 10 seconds. The results are written to \texttt{output/mirabelle.log}. \subsection{Example of Generating TPTP Files} \begin{verbatim} isabelle mirabelle -d '$AFP' -O output \ -A "sledgehammer[provers = e, timeout = 5, keep_probs = true]" \ VeriComp \end{verbatim} This command generates TPTP files using Sledgehammer. Since the file is generated at the very beginning of every Sledgehammer invocation, a timeout of five seconds making the prover fail faster speeds up processing the subgoals. The results are written in an action-specific subdirectory of the specified output directory (\texttt{output}). A TPTP file is generated for each subgoal. \let\em=\sl \bibliography{manual}{} \bibliographystyle{abbrv} \end{document} diff --git a/src/HOL/Tools/ATP/atp_proof_reconstruct.ML b/src/HOL/Tools/ATP/atp_proof_reconstruct.ML --- a/src/HOL/Tools/ATP/atp_proof_reconstruct.ML +++ b/src/HOL/Tools/ATP/atp_proof_reconstruct.ML @@ -1,854 +1,854 @@ (* Title: HOL/Tools/ATP/atp_proof_reconstruct.ML Author: Lawrence C. Paulson, Cambridge University Computer Laboratory Author: Claire Quigley, Cambridge University Computer Laboratory Author: Jasmin Blanchette, TU Muenchen Basic proof reconstruction from ATP proofs. *) signature ATP_PROOF_RECONSTRUCT = sig type 'a atp_type = 'a ATP_Problem.atp_type type ('a, 'b) atp_term = ('a, 'b) ATP_Problem.atp_term type ('a, 'b, 'c, 'd) atp_formula = ('a, 'b, 'c, 'd) ATP_Problem.atp_formula type stature = ATP_Problem_Generate.stature type atp_step_name = ATP_Proof.atp_step_name type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step type 'a atp_proof = 'a ATP_Proof.atp_proof val metisN : string val full_typesN : string val partial_typesN : string val no_typesN : string val really_full_type_enc : string val full_type_enc : string val partial_type_enc : string val no_type_enc : string val full_type_encs : string list val partial_type_encs : string list val default_metis_lam_trans : string val leo2_extcnf_equal_neg_rule : string val satallax_tab_rule_prefix : string val forall_of : term -> term -> term val exists_of : term -> term -> term val simplify_bool : term -> term val var_name_of_typ : typ -> string val rename_bound_vars : term -> term val type_enc_aliases : (string * string list) list val unalias_type_enc : string -> string list val term_of_atp : Proof.context -> ATP_Problem.atp_format -> ATP_Problem_Generate.type_enc -> bool -> int Symtab.table -> typ option -> (string, string atp_type) atp_term -> term val prop_of_atp : Proof.context -> ATP_Problem.atp_format -> ATP_Problem_Generate.type_enc -> bool -> int Symtab.table -> (string, string, (string, string atp_type) atp_term, string) atp_formula -> term val used_facts_in_atp_proof : Proof.context -> (string * stature) list -> string atp_proof -> (string * stature) list val atp_proof_prefers_lifting : string atp_proof -> bool val is_typed_helper_used_in_atp_proof : string atp_proof -> bool val replace_dependencies_in_line : atp_step_name * atp_step_name list -> ('a, 'b) atp_step -> ('a, 'b) atp_step val termify_atp_proof : Proof.context -> string -> ATP_Problem.atp_format -> ATP_Problem_Generate.type_enc -> string Symtab.table -> (string * term) list -> int Symtab.table -> string atp_proof -> (term, string) atp_step list val repair_waldmeister_endgame : (term, 'a) atp_step list -> (term, 'a) atp_step list val infer_formulas_types : Proof.context -> term list -> term list val introduce_spass_skolems : (term, string) atp_step list -> (term, string) atp_step list val factify_atp_proof : (string * 'a) list -> term list -> term -> (term, string) atp_step list -> (term, string) atp_step list val termify_atp_abduce_candidate : Proof.context -> string -> ATP_Problem.atp_format -> ATP_Problem_Generate.type_enc -> string Symtab.table -> (string * term) list -> int Symtab.table -> (string, string, (string, string atp_type) atp_term, string) atp_formula -> term val top_abduce_candidates : int -> term list -> term list end; structure ATP_Proof_Reconstruct : ATP_PROOF_RECONSTRUCT = struct open ATP_Util open ATP_Problem open ATP_Proof open ATP_Problem_Generate val metisN = "metis" val full_typesN = "full_types" val partial_typesN = "partial_types" val no_typesN = "no_types" val really_full_type_enc = "mono_tags" val full_type_enc = "poly_guards_query" val partial_type_enc = "poly_args" val no_type_enc = "erased" val full_type_encs = [full_type_enc, really_full_type_enc] val partial_type_encs = partial_type_enc :: full_type_encs val type_enc_aliases = [(full_typesN, full_type_encs), (partial_typesN, partial_type_encs), (no_typesN, [no_type_enc])] fun unalias_type_enc s = AList.lookup (op =) type_enc_aliases s |> the_default [s] val default_metis_lam_trans = combsN val leo2_extcnf_equal_neg_rule = "extcnf_equal_neg" val satallax_tab_rule_prefix = "tab_" fun term_name' (Var ((s, _), _)) = perhaps (try Name.dest_skolem) s | term_name' _ = "" fun lambda' v = Term.lambda_name (term_name' v, v) fun forall_of v t = HOLogic.all_const (fastype_of v) $ lambda' v t fun exists_of v t = HOLogic.exists_const (fastype_of v) $ lambda' v t fun make_tfree ctxt w = let val ww = "'" ^ w in TFree (ww, the_default \<^sort>\type\ (Variable.def_sort ctxt (ww, ~1))) end fun simplify_bool ((all as Const (\<^const_name>\All\, _)) $ Abs (s, T, t)) = let val t' = simplify_bool t in if loose_bvar1 (t', 0) then all $ Abs (s, T, t') else t' end | simplify_bool (Const (\<^const_name>\Not\, _) $ t) = s_not (simplify_bool t) | simplify_bool (Const (\<^const_name>\conj\, _) $ t $ u) = s_conj (simplify_bool t, simplify_bool u) | simplify_bool (Const (\<^const_name>\disj\, _) $ t $ u) = s_disj (simplify_bool t, simplify_bool u) | simplify_bool (Const (\<^const_name>\implies\, _) $ t $ u) = s_imp (simplify_bool t, simplify_bool u) | simplify_bool ((t as Const (\<^const_name>\HOL.eq\, _)) $ u $ v) = (case (u, v) of (Const (\<^const_name>\True\, _), _) => v | (u, Const (\<^const_name>\True\, _)) => u | (Const (\<^const_name>\False\, _), v) => s_not v | (u, Const (\<^const_name>\False\, _)) => s_not u | _ => if u aconv v then \<^Const>\True\ else t $ simplify_bool u $ simplify_bool v) | simplify_bool (t $ u) = simplify_bool t $ simplify_bool u | simplify_bool (Abs (s, T, t)) = Abs (s, T, simplify_bool t) | simplify_bool t = t fun single_letter upper s = let val s' = if String.isPrefix "o" s orelse String.isPrefix "O" s then "z" else s in String.extract (Name.desymbolize (SOME upper) (Long_Name.base_name s'), 0, SOME 1) end fun var_name_of_typ (Type (\<^type_name>\fun\, [_, T])) = if body_type T = HOLogic.boolT then "p" else "f" | var_name_of_typ (Type (\<^type_name>\set\, [T])) = let fun default () = single_letter true (var_name_of_typ T) in (case T of Type (s, [T1, T2]) => if String.isSuffix "prod" s andalso T1 = T2 then "r" else default () | _ => default ()) end | var_name_of_typ (Type (s, Ts)) = if String.isSuffix "list" s then var_name_of_typ (the_single Ts) ^ "s" else single_letter false (Long_Name.base_name s) | var_name_of_typ (TFree (s, _)) = single_letter false (perhaps (try (unprefix "'")) s) | var_name_of_typ (TVar ((s, _), T)) = var_name_of_typ (TFree (s, T)) fun rename_bound_vars (t $ u) = rename_bound_vars t $ rename_bound_vars u | rename_bound_vars (Abs (_, T, t)) = Abs (var_name_of_typ T, T, rename_bound_vars t) | rename_bound_vars t = t exception ATP_CLASS of string list exception ATP_TYPE of string atp_type list exception ATP_TERM of (string, string atp_type) atp_term list exception ATP_FORMULA of (string, string, (string, string atp_type) atp_term, string) atp_formula list exception SAME of unit fun class_of_atp_class cls = (case unprefix_and_unascii class_prefix cls of SOME s => s | NONE => raise ATP_CLASS [cls]) fun atp_type_of_atp_term (ATerm ((s, _), us)) = let val tys = map atp_type_of_atp_term us in if s = tptp_fun_type then (case tys of [ty1, ty2] => AFun (ty1, ty2) | _ => raise ATP_TYPE tys) else AType ((s, []), tys) end (* Type variables are given the basic sort "HOL.type". Some will later be constrained by information from type literals, or by type inference. *) fun typ_of_atp_type ctxt (ty as AType ((a, clss), tys)) = let val Ts = map (typ_of_atp_type ctxt) tys in (case unprefix_and_unascii native_type_prefix a of SOME b => typ_of_atp_type ctxt (atp_type_of_atp_term (unmangled_type b)) | NONE => (case unprefix_and_unascii type_const_prefix a of SOME b => Type (invert_const b, Ts) | NONE => if not (null tys) then raise ATP_TYPE [ty] (* only "tconst"s have type arguments *) else (case unprefix_and_unascii tfree_prefix a of SOME b => make_tfree ctxt b | NONE => (* The term could be an Isabelle variable or a variable from the ATP, say "X1" or "_5018". Sometimes variables from the ATP are indistinguishable from Isabelle variables, which forces us to use a type parameter in all cases. *) Type_Infer.param 0 ("'" ^ perhaps (unprefix_and_unascii tvar_prefix) a, (if null clss then \<^sort>\type\ else map class_of_atp_class clss))))) end | typ_of_atp_type ctxt (AFun (ty1, ty2)) = typ_of_atp_type ctxt ty1 --> typ_of_atp_type ctxt ty2 fun typ_of_atp_term ctxt = typ_of_atp_type ctxt o atp_type_of_atp_term (* Type class literal applied to a type. Returns triple of polarity, class, type. *) fun type_constraint_of_term ctxt (u as ATerm ((a, _), us)) = (case (unprefix_and_unascii class_prefix a, map (typ_of_atp_term ctxt) us) of (SOME b, [T]) => (b, T) | _ => raise ATP_TERM [u]) (* Accumulate type constraints in a formula: negative type literals. *) fun add_var (key, z) = Vartab.map_default (key, []) (cons z) fun add_type_constraint false (cl, TFree (a ,_)) = add_var ((a, ~1), cl) | add_type_constraint false (cl, TVar (ix, _)) = add_var (ix, cl) | add_type_constraint _ _ = I fun repair_var_name s = (case unprefix_and_unascii schematic_var_prefix s of SOME s' => s' | NONE => s) (* The number of type arguments of a constant, zero if it's monomorphic. For (instances of) Skolem pseudoconstants, this information is encoded in the constant name. *) fun robust_const_num_type_args thy s = if String.isPrefix skolem_const_prefix s then s |> Long_Name.explode |> List.last |> Int.fromString |> the else if String.isPrefix lam_lifted_prefix s then if String.isPrefix lam_lifted_poly_prefix s then 2 else 0 else (s, Sign.the_const_type thy s) |> Sign.const_typargs thy |> length fun slack_fastype_of t = fastype_of t handle TERM _ => Type_Infer.anyT \<^sort>\type\ val spass_skolem_prefix = "sk" (* "skc" or "skf" *) val vampire_skolem_prefix = "sK" fun var_index_of_textual textual = if textual then 0 else 1 fun quantify_over_var textual quant_of var_s var_T t = let val vars = ((var_s, var_index_of_textual textual), var_T) :: filter (fn ((s, _), _) => s = var_s) (Term.add_vars t []) val normTs = vars |> AList.group (op =) |> map (apsnd hd) fun norm_var_types (Var (x, T)) = Var (x, the_default T (AList.lookup (op =) normTs x)) | norm_var_types t = t in t |> map_aterms norm_var_types |> fold_rev quant_of (map Var normTs) end (* This assumes that distinct names are mapped to distinct names by "Variable.variant_frees". This does not hold in general but should hold for ATP-generated Skolem function names, since these end with a digit and "variant_frees" appends letters. *) fun fresh_up ctxt s = fst (hd (Variable.variant_frees ctxt [] [(s, ())])) (* Higher-order translation. Variables are typed (although we don't use that information). Lambdas are typed. The code is similar to "term_of_atp_fo". *) fun term_of_atp_ho ctxt sym_tab = let val thy = Proof_Context.theory_of ctxt val var_index = var_index_of_textual true fun do_term opt_T u = (case u of AAbs (((var, ty), term), []) => let val typ = typ_of_atp_type ctxt ty val var_name = repair_var_name var val tm = do_term NONE term in quantify_over_var true lambda' var_name typ tm end | ATerm ((s, tys), us) => if s = "" (* special marker generated on parse error *) then error "Isar proof reconstruction failed because the ATP proof contains unparsable \ \material" else if s = tptp_equal then list_comb (Const (\<^const_name>\HOL.eq\, Type_Infer.anyT \<^sort>\type\), map (do_term NONE) us) else if s = tptp_not_equal andalso length us = 2 then \<^const>\HOL.Not\ $ list_comb (Const (\<^const_name>\HOL.eq\, Type_Infer.anyT \<^sort>\type\), map (do_term NONE) us) else if not (null us) then let val args = map (do_term NONE) us val opt_T' = SOME (map slack_fastype_of args ---> the_default dummyT opt_T) val func = do_term opt_T' (ATerm ((s, tys), [])) in foldl1 (op $) (func :: args) end else if s = tptp_or then HOLogic.disj else if s = tptp_and then HOLogic.conj else if s = tptp_implies then HOLogic.imp else if s = tptp_iff orelse s = tptp_equal then HOLogic.eq_const dummyT else if s = tptp_not_iff orelse s = tptp_not_equal then \<^term>\\x y. x \ y\ else if s = tptp_if then \<^term>\\P Q. Q \ P\ else if s = tptp_not_and then \<^term>\\P Q. \ (P \ Q)\ else if s = tptp_not_or then \<^term>\\P Q. \ (P \ Q)\ else if s = tptp_not then HOLogic.Not else if s = tptp_ho_forall then HOLogic.all_const dummyT else if s = tptp_ho_exists then HOLogic.exists_const dummyT else if s = tptp_hilbert_choice then HOLogic.choice_const dummyT else if s = tptp_hilbert_the then \<^term>\The\ else (case unprefix_and_unascii const_prefix s of SOME s' => let val ((s', _), mangled_us) = s' |> unmangled_const |>> `invert_const val num_ty_args = length us - the_default 0 (Symtab.lookup sym_tab s) val (type_us, term_us) = chop num_ty_args us |>> append mangled_us val term_ts = map (do_term NONE) term_us val Ts = map (typ_of_atp_type ctxt) tys @ map (typ_of_atp_term ctxt) type_us val T = (if not (null Ts) andalso robust_const_num_type_args thy s' = length Ts then try (Sign.const_instance thy) (s', Ts) else NONE) |> (fn SOME T => T | NONE => map slack_fastype_of term_ts ---> the_default (Type_Infer.anyT \<^sort>\type\) opt_T) val t = Const (unproxify_const s', T) in list_comb (t, term_ts) end | NONE => (* a free or schematic variable *) let val ts = map (do_term NONE) us val T = (case tys of [ty] => typ_of_atp_type ctxt ty | _ => map slack_fastype_of ts ---> (case opt_T of SOME T => T | NONE => Type_Infer.anyT \<^sort>\type\)) val t = (case unprefix_and_unascii fixed_var_prefix s of SOME s => Free (s, T) | NONE => if not (is_tptp_variable s) then Free (fresh_up ctxt s, T) else Var ((repair_var_name s, var_index), T)) in list_comb (t, ts) end)) in do_term end (* First-order translation. No types are known for variables. "Type_Infer.anyT @{sort type}" should allow them to be inferred. *) fun term_of_atp_fo ctxt textual sym_tab = let val thy = Proof_Context.theory_of ctxt (* For Metis, we use 1 rather than 0 because variable references in clauses may otherwise conflict with variable constraints in the goal. At least, type inference often fails otherwise. See also "axiom_inference" in "Metis_Reconstruct". *) val var_index = var_index_of_textual textual fun do_term extra_ts opt_T u = (case u of ATerm ((s, tys), us) => if s = "" (* special marker generated on parse error *) then error "Isar proof reconstruction failed because the ATP proof contains unparsable \ \material" else if String.isPrefix native_type_prefix s then \<^Const>\True\ (* ignore TPTP type information (needed?) *) else if s = tptp_equal then list_comb (Const (\<^const_name>\HOL.eq\, Type_Infer.anyT \<^sort>\type\), map (do_term [] NONE) us) else (case unprefix_and_unascii const_prefix s of SOME s' => let val ((s', s''), mangled_us) = s' |> unmangled_const |>> `invert_const in if s' = type_tag_name then (case mangled_us @ us of [typ_u, term_u] => do_term extra_ts (SOME (typ_of_atp_term ctxt typ_u)) term_u | _ => raise ATP_TERM us) else if s' = predicator_name then do_term [] (SOME \<^typ>\bool\) (hd us) else if s' = app_op_name then let val extra_t = do_term [] NONE (List.last us) in do_term (extra_t :: extra_ts) (case opt_T of SOME T => SOME (slack_fastype_of extra_t --> T) | NONE => NONE) (nth us (length us - 2)) end else if s' = type_guard_name then \<^Const>\True\ (* ignore type predicates *) else let val new_skolem = String.isPrefix new_skolem_const_prefix s'' val num_ty_args = length us - the_default 0 (Symtab.lookup sym_tab s) val (type_us, term_us) = chop num_ty_args us |>> append mangled_us val term_ts = map (do_term [] NONE) term_us val Ts = map (typ_of_atp_type ctxt) tys @ map (typ_of_atp_term ctxt) type_us val T = (if not (null Ts) andalso robust_const_num_type_args thy s' = length Ts then if new_skolem then SOME (Type_Infer.paramify_vars (tl Ts ---> hd Ts)) else if textual then try (Sign.const_instance thy) (s', Ts) else NONE else NONE) |> (fn SOME T => T | NONE => map slack_fastype_of term_ts ---> the_default (Type_Infer.anyT \<^sort>\type\) opt_T) val t = if new_skolem then Var ((new_skolem_var_name_of_const s'', var_index), T) else Const (unproxify_const s', T) in list_comb (t, term_ts @ extra_ts) end end | NONE => (* a free or schematic variable *) let val term_ts = map (do_term [] NONE) us (* SPASS (3.8ds) and Vampire (2.6) pass arguments to Skolem functions in reverse order, which is incompatible with "metis"'s new skolemizer. *) |> exists (fn pre => String.isPrefix pre s) [spass_skolem_prefix, vampire_skolem_prefix] ? rev val ts = term_ts @ extra_ts val T = (case tys of [ty] => typ_of_atp_type ctxt ty | _ => (case opt_T of SOME T => map slack_fastype_of term_ts ---> T | NONE => map slack_fastype_of ts ---> Type_Infer.anyT \<^sort>\type\)) val t = (case unprefix_and_unascii fixed_var_prefix s of SOME s => Free (s, T) | NONE => if textual andalso not (is_tptp_variable s) then Free (s |> textual ? fresh_up ctxt, T) else Var ((repair_var_name s, var_index), T)) in list_comb (t, ts) end)) in do_term [] end fun term_of_atp ctxt (ATP_Problem.THF _) type_enc = if ATP_Problem_Generate.is_type_enc_higher_order type_enc then K (term_of_atp_ho ctxt) else error "Unsupported Isar reconstruction" | term_of_atp ctxt _ type_enc = if not (ATP_Problem_Generate.is_type_enc_higher_order type_enc) then term_of_atp_fo ctxt else error "Unsupported Isar reconstruction" fun term_of_atom ctxt format type_enc textual sym_tab pos (u as ATerm ((s, _), _)) = if String.isPrefix class_prefix s then add_type_constraint pos (type_constraint_of_term ctxt u) #> pair \<^Const>\True\ else pair (term_of_atp ctxt format type_enc textual sym_tab (SOME \<^typ>\bool\) u) (* Update schematic type variables with detected sort constraints. It's not totally clear whether this code is necessary. *) fun repair_tvar_sorts (t, tvar_tab) = let fun do_type (Type (a, Ts)) = Type (a, map do_type Ts) | do_type (TVar (xi, s)) = TVar (xi, the_default s (Vartab.lookup tvar_tab xi)) | do_type (TFree z) = TFree z fun do_term (Const (a, T)) = Const (a, do_type T) | do_term (Free (a, T)) = Free (a, do_type T) | do_term (Var (xi, T)) = Var (xi, do_type T) | do_term (t as Bound _) = t | do_term (Abs (a, T, t)) = Abs (a, do_type T, do_term t) | do_term (t1 $ t2) = do_term t1 $ do_term t2 in t |> not (Vartab.is_empty tvar_tab) ? do_term end (* Interpret an ATP formula as a HOL term, extracting sort constraints as they appear in the formula. *) fun prop_of_atp ctxt format type_enc textual sym_tab phi = let fun do_formula pos phi = (case phi of AQuant (_, [], phi) => do_formula pos phi | AQuant (q, (s, _) :: xs, phi') => do_formula pos (AQuant (q, xs, phi')) (* FIXME: TFF *) #>> quantify_over_var textual (case q of AForall => forall_of | AExists => exists_of) (repair_var_name s) dummyT | AConn (ANot, [phi']) => do_formula (not pos) phi' #>> s_not | AConn (c, [phi1, phi2]) => do_formula (pos |> c = AImplies ? not) phi1 ##>> do_formula pos phi2 #>> (case c of AAnd => s_conj | AOr => s_disj | AImplies => s_imp | AIff => s_iff | ANot => raise Fail "impossible connective") | AAtom tm => term_of_atom ctxt format type_enc textual sym_tab pos tm | _ => raise ATP_FORMULA [phi]) in repair_tvar_sorts (do_formula true phi Vartab.empty) end val unprefix_fact_number = space_implode "_" o tl o space_explode "_" fun resolve_fact facts s = (case try (unprefix fact_prefix) s of SOME s' => let val s' = s' |> unprefix_fact_number |> unascii_of in AList.lookup (op =) facts s' |> Option.map (pair s') end | NONE => NONE) fun resolve_conjecture s = (case try (unprefix conjecture_prefix) s of SOME s' => Int.fromString s' | NONE => NONE) fun resolve_facts facts = map_filter (resolve_fact facts) val resolve_conjectures = map_filter resolve_conjecture fun is_axiom_used_in_proof pred = exists (fn ((_, ss), _, _, _, []) => exists pred ss | _ => false) fun add_fact ctxt facts ((num, ss), _, _, rule, deps) = (if member (op =) [agsyhol_core_rule, leo2_extcnf_equal_neg_rule] rule orelse String.isPrefix satallax_tab_rule_prefix rule then insert (op =) (short_thm_name ctxt ext, (Global, General)) else I) #> (if null deps then union (op =) (resolve_facts facts (num :: ss)) else I) fun used_facts_in_atp_proof ctxt facts atp_proof = if null atp_proof then facts else fold (add_fact ctxt facts) atp_proof [] val ascii_of_lam_fact_prefix = ascii_of lam_fact_prefix (* overapproximation (good enough) *) fun is_lam_lifted s = String.isPrefix fact_prefix s andalso String.isSubstring ascii_of_lam_fact_prefix s val is_combinator_def = String.isPrefix (helper_prefix ^ combinator_prefix) fun atp_proof_prefers_lifting atp_proof = (is_axiom_used_in_proof is_combinator_def atp_proof, is_axiom_used_in_proof is_lam_lifted atp_proof) = (false, true) val is_typed_helper_name = String.isPrefix helper_prefix andf String.isSuffix typed_helper_suffix fun is_typed_helper_used_in_atp_proof atp_proof = is_axiom_used_in_proof is_typed_helper_name atp_proof fun replace_one_dependency (old, new) dep = if is_same_atp_step dep old then new else [dep] fun replace_dependencies_in_line old_new (name, role, t, rule, deps) = (name, role, t, rule, fold (union (op =) o replace_one_dependency old_new) deps []) fun repair_name "$true" = "c_True" | repair_name "$false" = "c_False" | repair_name "$$e" = tptp_equal (* seen in Vampire proofs *) | repair_name s = if is_tptp_equal s orelse (* seen in Vampire proofs *) (String.isPrefix "sQ" s andalso String.isSuffix "_eqProxy" s) then tptp_equal else s fun set_var_index j = map_aterms (fn Var ((s, 0), T) => Var ((s, j), T) | t => t) fun infer_formulas_types ctxt = map_index (uncurry (fn j => set_var_index j #> Type.constraint HOLogic.boolT)) #> Syntax.check_terms (Proof_Context.set_mode Proof_Context.mode_schematic ctxt) val combinator_table = [(\<^const_name>\Meson.COMBI\, @{thm Meson.COMBI_def [abs_def]}), (\<^const_name>\Meson.COMBK\, @{thm Meson.COMBK_def [abs_def]}), (\<^const_name>\Meson.COMBB\, @{thm Meson.COMBB_def [abs_def]}), (\<^const_name>\Meson.COMBC\, @{thm Meson.COMBC_def [abs_def]}), (\<^const_name>\Meson.COMBS\, @{thm Meson.COMBS_def [abs_def]})] fun uncombine_term thy = let fun uncomb (t1 $ t2) = betapply (uncomb t1, uncomb t2) | uncomb (Abs (s, T, t)) = Abs (s, T, uncomb t) | uncomb (t as Const (x as (s, _))) = (case AList.lookup (op =) combinator_table s of SOME thm => thm |> Thm.prop_of |> specialize_type thy x |> Logic.dest_equals |> snd | NONE => t) | uncomb t = t in uncomb end fun unlift_aterm lifted (t as Const (s, _)) = if String.isPrefix lam_lifted_prefix s then (* FIXME: do something about the types *) (case AList.lookup (op =) lifted s of SOME t' => unlift_term lifted t' | NONE => t) else t | unlift_aterm _ t = t and unlift_term lifted = map_aterms (unlift_aterm lifted) fun termify_line _ _ _ _ _ (_, Type_Role, _, _, _) = NONE | termify_line ctxt format type_enc lifted sym_tab (name, role, u, rule, deps) = let val thy = Proof_Context.theory_of ctxt val t = u |> prop_of_atp ctxt format type_enc true sym_tab |> unlift_term lifted |> uncombine_term thy |> simplify_bool in SOME (name, role, t, rule, deps) end val waldmeister_conjecture_num = "1.0.0.0" fun repair_waldmeister_endgame proof = let fun repair_tail (name, _, \<^Const>\Trueprop for t\, rule, deps) = (name, Negated_Conjecture, \<^Const>\Trueprop for \s_not t\\, rule, deps) fun repair_body [] = [] | repair_body ((line as ((num, _), _, _, _, _)) :: lines) = if num = waldmeister_conjecture_num then map repair_tail (line :: lines) else line :: repair_body lines in repair_body proof end fun map_proof_terms f (lines : ('a * 'b * 'c * 'd * 'e) list) = map2 (fn c => fn (a, b, _, d, e) => (a, b, c, d, e)) (f (map #3 lines)) lines fun termify_atp_proof ctxt local_prover format type_enc pool lifted sym_tab = nasty_atp_proof pool #> map_term_names_in_atp_proof repair_name #> map_filter (termify_line ctxt format type_enc lifted sym_tab) #> map_proof_terms (infer_formulas_types ctxt #> map HOLogic.mk_Trueprop) #> local_prover = waldmeisterN ? repair_waldmeister_endgame fun unskolemize_spass_term skos = let val is_skolem_name = member (op =) skos fun find_argless_skolem (Free _ $ Var _) = NONE | find_argless_skolem (Free (x as (s, _))) = if is_skolem_name s then SOME x else NONE | find_argless_skolem (t $ u) = (case find_argless_skolem t of NONE => find_argless_skolem u | sk => sk) | find_argless_skolem (Abs (_, _, t)) = find_argless_skolem t | find_argless_skolem _ = NONE fun find_skolem_arg (Free (s, _) $ Var v) = if is_skolem_name s then SOME v else NONE | find_skolem_arg (t $ u) = (case find_skolem_arg t of NONE => find_skolem_arg u | v => v) | find_skolem_arg (Abs (_, _, t)) = find_skolem_arg t | find_skolem_arg _ = NONE fun kill_skolem_arg (t as Free (s, T) $ Var _) = if is_skolem_name s then Free (s, range_type T) else t | kill_skolem_arg (t $ u) = kill_skolem_arg t $ kill_skolem_arg u | kill_skolem_arg (Abs (s, T, t)) = Abs (s, T, kill_skolem_arg t) | kill_skolem_arg t = t fun find_var (Var v) = SOME v | find_var (t $ u) = (case find_var t of NONE => find_var u | v => v) | find_var (Abs (_, _, t)) = find_var t | find_var _ = NONE val safe_abstract_over = abstract_over o apsnd (incr_boundvars 1) fun unskolem_inner t = (case find_argless_skolem t of SOME (x as (s, T)) => HOLogic.exists_const T $ Abs (s, T, unskolem_inner (safe_abstract_over (Free x, t))) | NONE => (case find_skolem_arg t of SOME (v as ((s, _), T)) => let val (haves, have_nots) = HOLogic.disjuncts t |> List.partition (exists_subterm (curry (op =) (Var v))) |> apply2 (fn lits => fold (curry s_disj) lits \<^term>\False\) in s_disj (HOLogic.all_const T $ Abs (s, T, unskolem_inner (safe_abstract_over (Var v, kill_skolem_arg haves))), unskolem_inner have_nots) end | NONE => (case find_var t of SOME (v as ((s, _), T)) => HOLogic.all_const T $ Abs (s, T, unskolem_inner (safe_abstract_over (Var v, t))) | NONE => t))) fun unskolem_outer (@{const Trueprop} $ t) = @{const Trueprop} $ unskolem_outer t | unskolem_outer t = unskolem_inner t in unskolem_outer end fun rename_skolem_args t = let fun add_skolem_args (Abs (_, _, t)) = add_skolem_args t | add_skolem_args t = (case strip_comb t of (Free (s, _), args as _ :: _) => if String.isPrefix spass_skolem_prefix s then insert (op =) (s, take_prefix is_Var args) else fold add_skolem_args args | (u, args as _ :: _) => fold add_skolem_args (u :: args) | _ => I) fun subst_of_skolem (sk, args) = map_index (fn (j, Var (z, T)) => (z, Var ((sk ^ "_" ^ string_of_int j, 0), T))) args val subst = maps subst_of_skolem (add_skolem_args t []) in subst_vars ([], subst) t end fun introduce_spass_skolems proof = let fun add_skolem (Free (s, _)) = String.isPrefix spass_skolem_prefix s ? insert (op =) s | add_skolem _ = I (* union-find would be faster *) fun add_cycle [] = I | add_cycle ss = fold (fn s => Graph.default_node (s, ())) ss #> fold Graph.add_edge (ss ~~ tl ss @ [hd ss]) val (input_steps, other_steps) = List.partition (null o #5) proof (* The names of the formulas are added to the Skolem constants, to ensure that formulas giving rise to several clauses are skolemized together. *) val skoXss = map (fn ((_, ss), _, t, _, _) => Term.fold_aterms add_skolem t ss) input_steps val groups0 = Graph.strong_conn (fold add_cycle skoXss Graph.empty) val groups = filter (exists (String.isPrefix spass_skolem_prefix)) groups0 val skoXss_input_steps = skoXss ~~ input_steps fun step_name_of_group skoXs = (implode skoXs, []) fun in_group group = member (op =) group o hd fun group_of skoX = find_first (fn group => in_group group skoX) groups fun new_steps (skoXss_steps : (string list * (term, 'a) atp_step) list) group = let val name = step_name_of_group group val name0 = name |>> prefix "0" val t = (case map (snd #> #3) skoXss_steps of [t] => t | ts => ts |> map (HOLogic.dest_Trueprop #> rename_skolem_args) |> Library.foldr1 s_conj |> HOLogic.mk_Trueprop) val skos = fold (union (op =) o filter (String.isPrefix spass_skolem_prefix) o fst) skoXss_steps [] val deps = map (snd #> #1) skoXss_steps in [(name0, Unknown, unskolemize_spass_term skos t, spass_pre_skolemize_rule, deps), (name, Unknown, t, spass_skolemize_rule, [name0])] end val sko_steps = maps (fn group => new_steps (filter (in_group group o fst) skoXss_input_steps) group) groups val old_news = map_filter (fn (skoXs, (name, _, _, _, _)) => Option.map (pair name o single o step_name_of_group) (group_of skoXs)) skoXss_input_steps val repair_deps = fold replace_dependencies_in_line old_news in input_steps @ sko_steps @ map repair_deps other_steps end fun factify_atp_proof facts hyp_ts concl_t atp_proof = let fun factify_step ((num, ss), role, t, rule, deps) = let val (ss', role', t') = (case resolve_conjectures ss of [j] => if j = length hyp_ts then ([], Conjecture, concl_t) else ([], Hypothesis, close_form (nth hyp_ts j)) | _ => (case resolve_facts facts (num :: ss) of [] => (ss, if member (op =) [Definition, Lemma] role then role else Plain, t) | facts => (map fst facts, Axiom, t))) in ((num, ss'), role', t', rule, deps) end val atp_proof = map factify_step atp_proof val names = map #1 atp_proof fun repair_dep (num, ss) = (num, the_default ss (AList.lookup (op =) names num)) fun repair_deps (name, role, t, rule, deps) = (name, role, t, rule, map repair_dep deps) in map repair_deps atp_proof end fun termify_atp_abduce_candidate ctxt local_prover format type_enc pool lifted sym_tab phi = let val proof = [(("", []), Conjecture, mk_anot phi, "", [])] val new_proof = termify_atp_proof ctxt local_prover format type_enc pool lifted sym_tab proof in (case new_proof of [(_, _, phi', _, _)] => phi' | _ => error "Impossible case in termify_atp_abduce_candidate") end fun sort_top n scored_items = if n <= 0 orelse null scored_items then [] else let fun split_min accum [] (_, best_item) = (best_item, List.rev accum) | split_min accum ((score, item) :: scored_items) (best_score, best_item) = if score < best_score then split_min ((best_score, best_item) :: accum) scored_items (score, item) else split_min ((score, item) :: accum) scored_items (best_score, best_item) val (min_item, other_scored_items) = split_min [] (tl scored_items) (hd scored_items) in min_item :: sort_top (n - 1) (filter_out (equal min_item o snd) other_scored_items) |> distinct (op aconv) end fun top_abduce_candidates max_candidates candidates = let (* We prefer free variables to other constructs, so that e.g. "x \ y" is prioritized over "x \ 0". *) fun score t = - Term.fold_aterms (fn t => fn score => score + (case t of Free _ => ~1 | _ => 4)) t 0 + Term.fold_aterms (fn t => fn score => score + (case t of Free _ => 1 | _ => 2)) t 0 (* Equations of the form "x = ..." or "... = x" or similar are too specific to be useful. Quantified formulas are also filtered out. As for "True", it may seem an odd choice for abduction, but it sometimes arises in conjunction with type class constraints, which are removed by the termifier. *) fun maybe_score t = (case t of @{prop True} => NONE | @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ Free _ $ _) => NONE | @{const Trueprop} $ (Const (@{const_name HOL.eq}, _) $ _ $ Free _) => NONE | @{const Trueprop} $ (@{const less(nat)} $ _ $ @{const zero_class.zero(nat)}) => NONE | @{const Trueprop} $ (@{const less_eq(nat)} $ _ $ @{const zero_class.zero(nat)}) => NONE | @{const Trueprop} $ (@{const less(nat)} $ _ $ @{const one_class.one(nat)}) => NONE | @{const Trueprop} $ (@{const Not} $ (@{const less(nat)} $ @{const zero_class.zero(nat)} $ _)) => NONE | @{const Trueprop} $ (@{const Not} $ (@{const less_eq(nat)} $ @{const zero_class.zero(nat)} $ _)) => NONE | @{const Trueprop} $ (@{const Not} $ - (@{const less(nat)} $ @{const one_class.one(nat)} $ _)) => NONE + (@{const less_eq(nat)} $ @{const one_class.one(nat)} $ _)) => NONE | @{const Trueprop} $ (Const (@{const_name All}, _) $ _) => NONE | @{const Trueprop} $ (Const (@{const_name Ex}, _) $ _) => NONE | _ => SOME (score t, t)) in sort_top max_candidates (map_filter maybe_score candidates) end end; diff --git a/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML b/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML @@ -1,536 +1,536 @@ (* Title: HOL/Tools/Sledgehammer/sledgehammer_isar.ML Author: Jasmin Blanchette, TU Muenchen Author: Steffen Juilf Smolka, TU Muenchen Isar proof reconstruction from ATP proofs. *) signature SLEDGEHAMMER_ISAR = sig type atp_step_name = ATP_Proof.atp_step_name type ('a, 'b) atp_step = ('a, 'b) ATP_Proof.atp_step type 'a atp_proof = 'a ATP_Proof.atp_proof type stature = ATP_Problem_Generate.stature type one_line_params = Sledgehammer_Proof_Methods.one_line_params val trace : bool Config.T type isar_params = bool * (string option * string option) * Time.time * real option * bool * bool * (term, string) atp_step list * thm val proof_text : Proof.context -> bool -> bool option -> bool -> (unit -> isar_params) -> int -> one_line_params -> string val abduce_text : Proof.context -> term list -> string end; structure Sledgehammer_Isar : SLEDGEHAMMER_ISAR = struct open ATP_Util open ATP_Problem open ATP_Problem_Generate open ATP_Proof open ATP_Proof_Reconstruct open Sledgehammer_Util open Sledgehammer_Proof_Methods open Sledgehammer_Isar_Proof open Sledgehammer_Isar_Preplay open Sledgehammer_Isar_Compress open Sledgehammer_Isar_Minimize structure String_Redirect = ATP_Proof_Redirect( type key = atp_step_name val ord = fn ((s, _ : string list), (s', _)) => fast_string_ord (s, s') val string_of = fst) open String_Redirect val trace = Attrib.setup_config_bool \<^binding>\sledgehammer_isar_trace\ (K false) val e_definition_rule = "definition" val e_skolemize_rule = "skolemize" val leo2_extcnf_forall_neg_rule = "extcnf_forall_neg" val satallax_skolemize_rule = "tab_ex" val vampire_skolemisation_rule = "skolemisation" val veriT_la_generic_rule = "la_generic" val veriT_simp_arith_rule = "simp_arith" val veriT_skolemize_rules = Lethe_Proof.skolemization_steps val z3_skolemize_rule = Z3_Proof.string_of_rule Z3_Proof.Skolemize val z3_th_lemma_rule_prefix = Z3_Proof.string_of_rule (Z3_Proof.Th_Lemma "") val zipperposition_cnf_rule = "cnf" val symbol_introduction_rules = [e_definition_rule, e_skolemize_rule, leo2_extcnf_forall_neg_rule, satallax_skolemize_rule, spass_skolemize_rule, vampire_skolemisation_rule, z3_skolemize_rule, zipperposition_cnf_rule, zipperposition_define_rule] @ veriT_skolemize_rules fun is_ext_rule rule = (rule = leo2_extcnf_equal_neg_rule) val is_maybe_ext_rule = is_ext_rule orf String.isPrefix satallax_tab_rule_prefix val is_symbol_introduction_rule = member (op =) symbol_introduction_rules fun is_arith_rule rule = String.isPrefix z3_th_lemma_rule_prefix rule orelse rule = veriT_simp_arith_rule orelse rule = veriT_la_generic_rule fun raw_label_of_num num = (num, 0) fun label_of_clause [(num, _)] = raw_label_of_num num | label_of_clause c = (space_implode "___" (map (fst o raw_label_of_num o fst) c), 0) fun add_global_fact ss = apsnd (union (op =) ss) fun add_fact_of_dependency [(_, ss as _ :: _)] = add_global_fact ss | add_fact_of_dependency names = apfst (insert (op =) (label_of_clause names)) fun add_line_pass1 (line as (name, role, t, rule, [])) lines = (* No dependencies: lemma (for Z3), fact, conjecture, or (for Vampire) internal facts or definitions. *) if role = Conjecture orelse role = Negated_Conjecture then line :: lines else if t aconv \<^prop>\True\ then map (replace_dependencies_in_line (name, [])) lines else if role = Definition orelse role = Lemma orelse role = Hypothesis orelse is_arith_rule rule then line :: lines else if role = Axiom then lines (* axioms (facts) need no proof lines *) else map (replace_dependencies_in_line (name, [])) lines | add_line_pass1 line lines = line :: lines fun add_lines_pass2 res [] = rev res | add_lines_pass2 res ((line as (name, role, t, rule, deps)) :: lines) = let fun normalize role = role = Conjecture ? (HOLogic.dest_Trueprop #> s_not #> HOLogic.mk_Trueprop) val norm_t = normalize role t val is_duplicate = exists (fn (prev_name, prev_role, prev_t, _, _) => (prev_role = Hypothesis andalso prev_t aconv t) orelse (member (op =) deps prev_name andalso Term.aconv_untyped (normalize prev_role prev_t, norm_t))) res fun looks_boring () = t aconv \<^prop>\False\ orelse length deps < 2 fun is_symbol_introduction_line (_, _, _, rule', deps') = is_symbol_introduction_rule rule' andalso member (op =) deps' name fun is_before_symbol_introduction_rule () = exists is_symbol_introduction_line lines in if is_duplicate orelse (role = Plain andalso not (is_symbol_introduction_rule rule) andalso not (is_ext_rule rule) andalso not (is_arith_rule rule) andalso not (null lines) andalso looks_boring () andalso not (is_before_symbol_introduction_rule ())) then add_lines_pass2 res (map (replace_dependencies_in_line (name, deps)) lines) else add_lines_pass2 (line :: res) lines end type isar_params = bool * (string option * string option) * Time.time * real option * bool * bool * (term, string) atp_step list * thm val basic_systematic_methods = [Metis_Method (NONE, NONE), Meson_Method, Blast_Method, SATx_Method] val basic_simp_based_methods = [Auto_Method, Simp_Method, Fastforce_Method, Force_Method] val basic_arith_methods = [Linarith_Method, Presburger_Method, Algebra_Method] val arith_methods = basic_arith_methods @ basic_simp_based_methods @ basic_systematic_methods val systematic_methods = basic_systematic_methods @ basic_arith_methods @ basic_simp_based_methods @ [Metis_Method (SOME full_typesN, NONE), Metis_Method (SOME no_typesN, NONE)] val rewrite_methods = basic_simp_based_methods @ basic_systematic_methods @ basic_arith_methods val skolem_methods = Moura_Method :: systematic_methods fun isar_proof_text ctxt debug num_chained isar_proofs smt_proofs isar_params (one_line_params as ((used_facts, (_, one_line_play)), banner, subgoal, subgoal_count)) = let val _ = if debug then writeln "Constructing Isar proof..." else () fun generate_proof_text () = let val (verbose, alt_metis_args, preplay_timeout, compress, try0, minimize, atp_proof0, goal) = isar_params () in if null atp_proof0 then one_line_proof_text ctxt 0 one_line_params else let val systematic_methods' = insert (op =) (Metis_Method alt_metis_args) systematic_methods fun massage_methods (meths as meth :: _) = if not try0 then [meth] else if smt_proofs then insert (op =) (SMT_Method SMT_Z3) meths else meths val (params, _, concl_t) = strip_subgoal goal subgoal ctxt val fixes = map (fn (s, T) => (Binding.name s, SOME T, NoSyn)) params val ctxt = ctxt |> Variable.set_body false |> Proof_Context.add_fixes fixes |> snd val do_preplay = preplay_timeout <> Time.zeroTime val compress = (case compress of NONE => if isar_proofs = NONE andalso do_preplay then 1000.0 else 10.0 | SOME n => n) fun is_fixed ctxt = Variable.is_declared ctxt orf Name.is_skolem fun introduced_symbols_of ctxt t = Term.add_frees t [] |> filter_out (is_fixed ctxt o fst) |> rev fun get_role keep_role ((num, _), role, t, rule, _) = if keep_role role then SOME ((raw_label_of_num num, t), rule) else NONE val trace = Config.get ctxt trace val string_of_atp_steps = let val to_string = ATP_Proof.string_of_atp_step (Syntax.string_of_term ctxt) I in enclose "[\n" "\n]" o cat_lines o map (enclose " " "," o to_string) end val atp_proof = atp_proof0 |> trace ? tap (tracing o prefix "atp_proof0 = " o string_of_atp_steps) |> distinct (op =) (* Zipperposition generates duplicate lines *) |> (fn lines => fold_rev add_line_pass1 lines []) |> add_lines_pass2 [] |> trace ? tap (tracing o prefix "atp_proof = " o string_of_atp_steps) val conjs = map_filter (fn (name, role, _, _, _) => if member (op =) [Conjecture, Negated_Conjecture] role then SOME name else NONE) atp_proof val assms = map_filter (Option.map fst o get_role (curry (op =) Hypothesis)) atp_proof fun add_lemma ((label, goal), rule) ctxt = let val (obtains, proof_methods) = (if is_symbol_introduction_rule rule then (introduced_symbols_of ctxt goal, skolem_methods) else if is_arith_rule rule then ([], arith_methods) else ([], rewrite_methods)) ||> massage_methods val prove = Prove { qualifiers = [], obtains = obtains, label = label, goal = goal, subproofs = [], facts = ([], []), proof_methods = proof_methods, comment = ""} in (prove, ctxt |> not (null obtains) ? (Variable.add_fixes (map fst obtains) #> snd)) end val (lems, _) = fold_map add_lemma (map_filter (get_role (member (op =) [Definition, Lemma])) atp_proof) ctxt val bot = #1 (List.last atp_proof) val refute_graph = atp_proof |> map (fn (name, _, _, _, from) => (from, name)) |> make_refute_graph bot |> fold (Atom_Graph.default_node o rpair ()) conjs val axioms = axioms_of_refute_graph refute_graph conjs val tainted = tainted_atoms_of_refute_graph refute_graph conjs val is_clause_tainted = exists (member (op =) tainted) val steps = Symtab.empty |> fold (fn (name as (s, _), role, t, rule, _) => Symtab.update_new (s, (rule, t |> (if is_clause_tainted [name] then HOLogic.dest_Trueprop #> role <> Conjecture ? s_not #> fold exists_of (map Var (Term.add_vars t [])) #> HOLogic.mk_Trueprop else I)))) atp_proof fun is_referenced_in_step _ (Let _) = false | is_referenced_in_step l (Prove {subproofs, facts = (ls, _), ...}) = member (op =) ls l orelse exists (is_referenced_in_proof l) subproofs and is_referenced_in_proof l (Proof {steps, ...}) = exists (is_referenced_in_step l) steps fun insert_lemma_in_step lem (step as Prove {qualifiers, obtains, label, goal, subproofs, facts = (ls, gs), proof_methods, comment}) = let val l' = the (label_of_isar_step lem) in if member (op =) ls l' then [lem, step] else let val refs = map (is_referenced_in_proof l') subproofs in if length (filter I refs) = 1 then [Prove { qualifiers = qualifiers, obtains = obtains, label = label, goal = goal, subproofs = map2 (fn false => I | true => insert_lemma_in_proof lem) refs subproofs, facts = (ls, gs), proof_methods = proof_methods, comment = comment}] else [lem, step] end end and insert_lemma_in_steps lem [] = [lem] | insert_lemma_in_steps lem (step :: steps) = if is_referenced_in_step (the (label_of_isar_step lem)) step then insert_lemma_in_step lem step @ steps else step :: insert_lemma_in_steps lem steps and insert_lemma_in_proof lem (proof as Proof {steps, ...}) = isar_proof_with_steps proof (insert_lemma_in_steps lem steps) val rule_of_clause_id = fst o the o Symtab.lookup steps o fst val finish_off = close_form #> rename_bound_vars fun prop_of_clause [(num, _)] = Symtab.lookup steps num |> the |> snd |> finish_off | prop_of_clause names = let val lits = map (HOLogic.dest_Trueprop o snd) (map_filter (Symtab.lookup steps o fst) names) in (case List.partition (can HOLogic.dest_not) lits of (negs as _ :: _, pos as _ :: _) => s_imp (Library.foldr1 s_conj (map HOLogic.dest_not negs), Library.foldr1 s_disj pos) | _ => fold (curry s_disj) lits \<^term>\False\) end |> HOLogic.mk_Trueprop |> finish_off fun maybe_show outer c = if outer andalso eq_set (op =) (c, conjs) then [Show] else [] fun isar_steps outer predecessor accum [] = accum |> (if tainted = [] then (* e.g., trivial, empty proof by Z3 *) cons (Prove { qualifiers = if outer then [Show] else [], obtains = [], label = no_label, goal = concl_t, subproofs = [], facts = sort_facts (the_list predecessor, []), proof_methods = massage_methods systematic_methods', comment = ""}) else I) |> rev | isar_steps outer _ accum (Have (id, (gamma, c)) :: infs) = let val l = label_of_clause c val t = prop_of_clause c val rule = rule_of_clause_id id val introduces_symbols = is_symbol_introduction_rule rule val deps = ([], []) |> fold add_fact_of_dependency gamma |> is_maybe_ext_rule rule ? add_global_fact [short_thm_name ctxt ext] |> sort_facts val meths = (if introduces_symbols then skolem_methods else if is_arith_rule rule then arith_methods else systematic_methods') |> massage_methods fun prove subproofs facts = Prove { qualifiers = maybe_show outer c, obtains = [], label = l, goal = t, subproofs = subproofs, facts = facts, proof_methods = meths, comment = ""} fun steps_of_rest step = isar_steps outer (SOME l) (step :: accum) infs in if is_clause_tainted c then (case gamma of [g] => if introduces_symbols andalso is_clause_tainted g andalso not (null accum) then let val fixes = introduced_symbols_of ctxt (prop_of_clause g) val subproof = Proof {fixes = fixes, assumptions = [], steps = rev accum} in isar_steps outer (SOME l) [prove [subproof] ([], [])] infs end else steps_of_rest (prove [] deps) | _ => steps_of_rest (prove [] deps)) else steps_of_rest (if introduces_symbols then (case introduced_symbols_of ctxt t of [] => prove [] deps | skos => Prove { qualifiers = [], obtains = skos, label = l, goal = t, subproofs = [], facts = deps, proof_methods = meths, comment = ""}) else prove [] deps) end | isar_steps outer predecessor accum (Cases cases :: infs) = let fun isar_case (c, subinfs) = isar_proof false [] [(label_of_clause c, prop_of_clause c)] [] subinfs val c = succedent_of_cases cases val l = label_of_clause c val t = prop_of_clause c val step = Prove { qualifiers = maybe_show outer c, obtains = [], label = l, goal = t, subproofs = map isar_case (filter_out (null o snd) cases), facts = sort_facts (the_list predecessor, []), proof_methods = massage_methods systematic_methods', comment = ""} in isar_steps outer (SOME l) (step :: accum) infs end and isar_proof outer fixes assumptions lems infs = let val steps = fold_rev insert_lemma_in_steps lems (isar_steps outer NONE [] infs) in Proof {fixes = fixes, assumptions = assumptions, steps = steps} end val canonical_isar_proof = refute_graph |> trace ? tap (tracing o prefix "Refute graph:\n" o string_of_refute_graph) |> redirect_graph axioms tainted bot |> trace ? tap (tracing o prefix "Direct proof:\n" o string_of_direct_proof) |> isar_proof true params assms lems |> postprocess_isar_proof_remove_show_stuttering |> postprocess_isar_proof_remove_unreferenced_steps I |> relabel_isar_proof_canonically val ctxt = ctxt |> enrich_context_with_local_facts canonical_isar_proof val preplay_data = Unsynchronized.ref Canonical_Label_Tab.empty val _ = fold_isar_steps (fn meth => K (set_preplay_outcomes_of_isar_step ctxt preplay_timeout preplay_data meth [])) (steps_of_isar_proof canonical_isar_proof) () fun str_of_preplay_outcome outcome = if Lazy.is_finished outcome then string_of_play_outcome (Lazy.force outcome) else "?" fun str_of_meth l meth = string_of_proof_method [] meth ^ " " ^ str_of_preplay_outcome (preplay_outcome_of_isar_step_for_method (!preplay_data) l meth) fun comment_of l = map (str_of_meth l) #> commas fun trace_isar_proof label proof = if trace then tracing (timestamp () ^ "\n" ^ label ^ ":\n\n" ^ string_of_isar_proof ctxt subgoal subgoal_count (comment_isar_proof comment_of proof) ^ "\n") else () fun comment_of l (meth :: _) = (case (verbose, Lazy.force (preplay_outcome_of_isar_step_for_method (!preplay_data) l meth)) of (false, Played _) => "" | (_, outcome) => string_of_play_outcome outcome) val (play_outcome, isar_proof) = canonical_isar_proof |> tap (trace_isar_proof "Original") |> compress_isar_proof ctxt compress preplay_timeout preplay_data |> tap (trace_isar_proof "Compressed") |> postprocess_isar_proof_remove_unreferenced_steps (do_preplay ? keep_fastest_method_of_isar_step (!preplay_data) #> minimize ? minimize_isar_step_dependencies ctxt preplay_data) |> tap (trace_isar_proof "Minimized") |> `(if do_preplay then preplay_outcome_of_isar_proof (!preplay_data) else K (Play_Timed_Out Time.zeroTime)) ||> (comment_isar_proof comment_of #> chain_isar_proof #> kill_useless_labels_in_isar_proof #> relabel_isar_proof_nicely #> rationalize_obtains_in_isar_proofs ctxt) in (case (num_chained, add_isar_steps (steps_of_isar_proof isar_proof) 0) of (0, 1) => one_line_proof_text ctxt 0 (if is_less (play_outcome_ord (play_outcome, one_line_play)) then (case isar_proof of Proof {steps = [Prove {facts = (_, gfs), proof_methods = meth :: _, ...}], ...} => let val used_facts' = map_filter (fn s => if exists (fn (s', (sc, _)) => s' = s andalso sc = Chained) used_facts then NONE else SOME (s, (Global, General))) gfs in ((used_facts', (meth, play_outcome)), banner, subgoal, subgoal_count) end) else one_line_params) ^ (if isar_proofs = SOME true then "\n(No Isar proof available)" else "") | (_, num_steps) => let val msg = (if verbose then [string_of_int num_steps ^ " step" ^ plural_s num_steps] else []) @ (if do_preplay then [string_of_play_outcome play_outcome] else []) in one_line_proof_text ctxt 0 one_line_params ^ (if isar_proofs <> NONE orelse (case play_outcome of Played _ => true | _ => false) then "\n\nIsar proof" ^ (commas msg |> not (null msg) ? enclose " (" ")") ^ ":\n" ^ Active.sendback_markup_command (string_of_isar_proof ctxt subgoal subgoal_count isar_proof) else "") end) end end in if debug then generate_proof_text () else (case try generate_proof_text () of SOME s => s | NONE => one_line_proof_text ctxt 0 one_line_params ^ (if isar_proofs = SOME true then "\nWarning: Isar proof construction failed" else "")) end fun isar_proof_would_be_a_good_idea (_, play) = (case play of Played _ => false | Play_Timed_Out time => time > Time.zeroTime | Play_Failed => true) fun proof_text ctxt debug isar_proofs smt_proofs isar_params num_chained (one_line_params as ((_, preplay), _, _, _)) = (if isar_proofs = SOME true orelse (isar_proofs = NONE andalso isar_proof_would_be_a_good_idea preplay) then isar_proof_text ctxt debug num_chained isar_proofs smt_proofs isar_params else one_line_proof_text ctxt num_chained) one_line_params fun abduce_text ctxt tms = - "Candidate" ^ plural_s (length tms) ^ " for missing hypothesis:\n" ^ + "Candidate missing assumption" ^ plural_s (length tms) ^ ":\n" ^ cat_lines (map (Syntax.string_of_term ctxt) tms) end; diff --git a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML @@ -1,364 +1,364 @@ (* Title: HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML Author: Fabian Immler, TU Muenchen Author: Makarius Author: Jasmin Blanchette, TU Muenchen ATPs as Sledgehammer provers. *) signature SLEDGEHAMMER_PROVER_ATP = sig type mode = Sledgehammer_Prover.mode type prover = Sledgehammer_Prover.prover val atp_problem_dest_dir : string Config.T val atp_proof_dest_dir : string Config.T val atp_problem_prefix : string Config.T val atp_completish : int Config.T val atp_full_names : bool Config.T val run_atp : mode -> string -> prover end; structure Sledgehammer_Prover_ATP : SLEDGEHAMMER_PROVER_ATP = struct open ATP_Util open ATP_Problem open ATP_Problem_Generate open ATP_Proof open ATP_Proof_Reconstruct open Sledgehammer_Util open Sledgehammer_Proof_Methods open Sledgehammer_Isar open Sledgehammer_ATP_Systems open Sledgehammer_Prover (* Empty string means create files in Isabelle's temporary files directory. *) val atp_problem_dest_dir = Attrib.setup_config_string \<^binding>\sledgehammer_atp_problem_dest_dir\ (K "") val atp_proof_dest_dir = Attrib.setup_config_string \<^binding>\sledgehammer_atp_proof_dest_dir\ (K "") val atp_problem_prefix = Attrib.setup_config_string \<^binding>\sledgehammer_atp_problem_prefix\ (K "prob") val atp_completish = Attrib.setup_config_int \<^binding>\sledgehammer_atp_completish\ (K 0) (* In addition to being easier to read, readable names are often much shorter, especially if types are mangled in names. This makes a difference for some provers (e.g., E). For these reason, short names are enabled by default. *) val atp_full_names = Attrib.setup_config_bool \<^binding>\sledgehammer_atp_full_names\ (K false) fun choose_type_enc strictness format good_type_enc = type_enc_of_string strictness good_type_enc |> adjust_type_enc format fun has_bound_or_var_of_type pred = exists_subterm (fn Var (_, T as Type _) => pred T | Abs (_, T as Type _, _) => pred T | _ => false) (* Unwanted equalities are those between a (bound or schematic) variable that does not properly occur in the second operand. *) val is_exhaustive_finite = let fun is_bad_equal (Var z) t = not (exists_subterm (fn Var z' => z = z' | _ => false) t) | is_bad_equal (Bound j) t = not (loose_bvar1 (t, j)) | is_bad_equal _ _ = false fun do_equals t1 t2 = is_bad_equal t1 t2 orelse is_bad_equal t2 t1 fun do_formula pos t = (case (pos, t) of (_, \<^Const_>\Trueprop for t1\) => do_formula pos t1 | (true, Const (\<^const_name>\Pure.all\, _) $ Abs (_, _, t')) => do_formula pos t' | (true, Const (\<^const_name>\All\, _) $ Abs (_, _, t')) => do_formula pos t' | (false, Const (\<^const_name>\Ex\, _) $ Abs (_, _, t')) => do_formula pos t' | (_, \<^Const_>\Pure.imp for t1 t2\) => do_formula (not pos) t1 andalso (t2 = \<^prop>\False\ orelse do_formula pos t2) | (_, \<^Const_>\implies for t1 t2\) => do_formula (not pos) t1 andalso (t2 = \<^Const>\False\ orelse do_formula pos t2) | (_, \<^Const_>\Not for t1\) => do_formula (not pos) t1 | (true, \<^Const_>\disj for t1 t2\) => forall (do_formula pos) [t1, t2] | (false, \<^Const_>\conj for t1 t2\) => forall (do_formula pos) [t1, t2] | (true, Const (\<^const_name>\HOL.eq\, _) $ t1 $ t2) => do_equals t1 t2 | (true, Const (\<^const_name>\Pure.eq\, _) $ t1 $ t2) => do_equals t1 t2 | _ => false) in do_formula true end (* Facts containing variables of finite types such as "unit" or "bool" or of the form "ALL x. x = A | x = B | x = C" are likely to lead to untypable proofs for unsound type encodings. *) fun is_dangerous_prop ctxt = transform_elim_prop #> (has_bound_or_var_of_type (is_type_surely_finite ctxt) orf is_exhaustive_finite) val mono_max_privileged_facts = 10 fun suffix_of_mode Auto_Try = "_try" | suffix_of_mode Try = "_try" | suffix_of_mode Normal = "" | suffix_of_mode MaSh = "" | suffix_of_mode Minimize = "_min" (* Important messages are important but not so important that users want to see them each time. *) val important_message_keep_quotient = 25 fun run_atp mode name ({debug, verbose, overlord, abduce = abduce_max_candidates, strict, max_mono_iters, max_new_mono_instances, isar_proofs, compress, try0, smt_proofs, minimize, slices, timeout, preplay_timeout, spy, ...} : params) ({comment, state, goal, subgoal, subgoal_count, factss, has_already_found_something, found_something} : prover_problem) slice = let val (basic_slice as (slice_size, abduce, _, _, _), ATP_Slice (good_format, good_type_enc, good_lam_trans, good_uncurried_aliases, extra)) = slice val facts = facts_of_basic_slice basic_slice factss val thy = Proof.theory_of state val ctxt = Proof.context_of state val {exec, arguments, proof_delims, known_failures, prem_role, generate_isabelle_info, good_max_mono_iters, good_max_new_mono_instances, ...} = get_atp thy name () val full_proofs = isar_proofs |> the_default (mode = Minimize) val local_name = perhaps (try (unprefix remote_prefix)) name val completish = Config.get ctxt atp_completish val atp_mode = if completish > 0 then Sledgehammer_Completish completish else Sledgehammer val (_, hyp_ts, concl_t) = strip_subgoal goal subgoal ctxt val (problem_dest_dir, proof_dest_dir, problem_prefix) = if overlord then overlord_file_location_of_prover name |> (fn (dir, prefix) => (dir, dir, prefix)) else (Config.get ctxt atp_problem_dest_dir, Config.get ctxt atp_proof_dest_dir, Config.get ctxt atp_problem_prefix) val problem_file_name = Path.basic (problem_prefix ^ (if overlord then "" else serial_string ()) ^ suffix_of_mode mode ^ "_" ^ string_of_int subgoal) |> Path.ext "p" val prob_path = if problem_dest_dir = "" then File.tmp_path problem_file_name else if File.exists (Path.explode problem_dest_dir) then Path.explode problem_dest_dir + problem_file_name else error ("No such directory: " ^ quote problem_dest_dir) val executable = (case find_first (fn var => getenv var <> "") (fst exec) of SOME var => let val pref = getenv var ^ "/" val paths = map (Path.explode o prefix pref) (if ML_System.platform_is_windows then map (suffix ".exe") (snd exec) @ snd exec else snd exec); in (case find_first File.exists paths of SOME path => path | NONE => error ("Bad executable: " ^ Path.print (hd paths))) end | NONE => error ("The environment variable " ^ quote (List.last (fst exec)) ^ " is not set")) fun run () = let fun monomorphize_facts facts = let val ctxt = ctxt |> repair_monomorph_context max_mono_iters good_max_mono_iters max_new_mono_instances good_max_new_mono_instances (* pseudo-theorem involving the same constants as the subgoal *) val subgoal_th = Logic.list_implies (hyp_ts, concl_t) |> Skip_Proof.make_thm thy val rths = facts |> chop mono_max_privileged_facts |>> map (pair 1 o snd) ||> map (pair 2 o snd) |> op @ |> cons (0, subgoal_th) in Monomorph.monomorph atp_schematic_consts_of ctxt rths |> tl |> curry ListPair.zip (map fst facts) |> maps (fn (name, rths) => map (pair name o zero_var_indexes o snd) rths) end val strictness = if strict then Strict else Non_Strict val type_enc = choose_type_enc strictness good_format good_type_enc val run_timeout = slice_timeout slice_size slices timeout val generous_run_timeout = if mode = MaSh then one_day else if abduce then run_timeout + seconds 5.0 else run_timeout val ({elapsed, ...}, atp_problem_data as (atp_problem, _, _, _)) = Timing.timing (fn () => let val readable_names = not (Config.get ctxt atp_full_names) in facts |> not (is_type_enc_sound type_enc) ? filter_out (is_dangerous_prop ctxt o Thm.prop_of o snd) |> not (is_type_enc_polymorphic type_enc) ? monomorphize_facts |> map (apsnd Thm.prop_of) |> generate_atp_problem ctxt generate_isabelle_info good_format prem_role type_enc atp_mode good_lam_trans good_uncurried_aliases readable_names true hyp_ts concl_t end) () val () = spying spy (fn () => (state, subgoal, name, "Generating ATP problem in " ^ string_of_int (Time.toMilliseconds elapsed) ^ " ms")) val args = arguments abduce full_proofs extra run_timeout prob_path val command = space_implode " " (File.bash_path executable :: args) fun run_command () = if exec = isabelle_scala_function then let val {output, timing} = SystemOnTPTP.run_system_encoded args in (output, timing) end else let val res = Isabelle_System.bash_process (Bash.script command |> Bash.redirect) in (Process_Result.out res, Process_Result.timing_elapsed res) end val _ = atp_problem |> lines_of_atp_problem good_format (fn () => atp_problem_term_order_info atp_problem) |> (exec <> isabelle_scala_function) ? cons ("% " ^ command ^ "\n" ^ (if comment = "" then "" else "% " ^ comment ^ "\n")) |> File.write_list prob_path val local_name = name |> perhaps (try (unprefix remote_prefix)) val ((output, run_time), ((atp_proof, tstplike_proof), outcome)) = Timeout.apply generous_run_timeout run_command () |>> overlord ? (fn output => prefix ("% " ^ command ^ "\n% " ^ timestamp () ^ "\n") output) |> (fn accum as (output, _) => (accum, extract_tstplike_proof_and_outcome verbose proof_delims known_failures output |>> `(atp_proof_of_tstplike_proof false local_name atp_problem) handle UNRECOGNIZED_ATP_PROOF () => (([], ""), SOME ProofUnparsable))) handle Timeout.TIMEOUT _ => (("", run_timeout), (([], ""), SOME TimedOut)) | ERROR msg => (("", Time.zeroTime), (([], ""), SOME (UnknownError msg))) val atp_abduce_candidates = if abduce andalso outcome <> NONE andalso not (has_already_found_something ()) then atp_abduce_candidates_of_output local_name atp_problem output else [] val () = spying spy (fn () => (state, subgoal, name, "Running command in " ^ string_of_int (Time.toMilliseconds run_time) ^ " ms")) val _ = (case outcome of NONE => found_something name | _ => ()) in (atp_problem_data, (output, run_time, facts, atp_problem, tstplike_proof, atp_proof, atp_abduce_candidates, outcome), (good_format, type_enc)) end (* If the problem file has not been exported, remove it; otherwise, export the proof file too. *) fun clean_up () = if problem_dest_dir = "" then (try File.rm prob_path; ()) else () fun export (_, (output, _, _, _, _, _, _, _), _) = let val proof_dest_dir_path = Path.explode proof_dest_dir val make_export_file_name = Path.split_ext #> apfst (Path.explode o suffix "_proof" o Path.implode) #> swap #> uncurry Path.ext in if proof_dest_dir = "" then Output.system_message "don't export proof" else if File.exists proof_dest_dir_path then File.write (proof_dest_dir_path + make_export_file_name problem_file_name) output else error ("No such directory: " ^ quote proof_dest_dir) end val ((_, pool, lifted, sym_tab), (output, run_time, used_from, atp_problem, tstplike_proof, atp_proof, atp_abduce_candidates, outcome), (format, type_enc)) = with_cleanup clean_up run () |> tap export val important_message = if mode = Normal andalso Random.random_range 0 (important_message_keep_quotient - 1) = 0 then extract_important_message output else "" val (outcome, used_facts, preferred_methss, message) = (case outcome of NONE => let val used_facts = sort_by fst (used_facts_in_atp_proof ctxt (map fst used_from) atp_proof) val needs_full_types = is_typed_helper_used_in_atp_proof atp_proof val preferred = Metis_Method (NONE, NONE) val preferred_methss = (preferred, if try0 then bunches_of_proof_methods ctxt smt_proofs needs_full_types (if atp_proof_prefers_lifting atp_proof then liftingN else opaque_liftingN) else [[preferred]]) in (NONE, used_facts, preferred_methss, fn preplay => let val _ = if verbose then writeln "Generating proof text..." else () fun isar_params () = let val full_atp_proof = atp_proof_of_tstplike_proof true (perhaps (try (unprefix remote_prefix)) name) atp_problem tstplike_proof val metis_type_enc = if is_typed_helper_used_in_atp_proof full_atp_proof then SOME full_typesN else NONE val metis_lam_trans = if atp_proof_prefers_lifting full_atp_proof then SOME liftingN else NONE val full_atp_proof = full_atp_proof |> termify_atp_proof ctxt name format type_enc pool lifted sym_tab |> local_name = spassN ? introduce_spass_skolems |> factify_atp_proof (map fst used_from) hyp_ts concl_t in (verbose, (metis_type_enc, metis_lam_trans), preplay_timeout, compress, try0, minimize, full_atp_proof, goal) end val one_line_params = (preplay (), proof_banner mode name, subgoal, subgoal_count) val num_chained = length (#facts (Proof.goal state)) in proof_text ctxt debug isar_proofs smt_proofs isar_params num_chained one_line_params ^ (if important_message <> "" then "\n\nImportant message from Dr. Geoff Sutcliffe:\n" ^ important_message else "") end) end | SOME failure => let val max_abduce_candidates = the_default default_abduce_max_candidates abduce_max_candidates - val abduce_candidates = - map (termify_atp_abduce_candidate ctxt name format type_enc pool lifted sym_tab) - atp_abduce_candidates + val abduce_candidates = atp_abduce_candidates + |> map (termify_atp_abduce_candidate ctxt name format type_enc pool lifted sym_tab) + |> top_abduce_candidates max_abduce_candidates in if null abduce_candidates then (SOME failure, [], (Auto_Method (* dummy *), []), fn _ => string_of_atp_failure failure) else (NONE, [], (Auto_Method (* dummy *), []), fn _ => - abduce_text ctxt (top_abduce_candidates max_abduce_candidates abduce_candidates)) + abduce_text ctxt abduce_candidates) end) in {outcome = outcome, used_facts = used_facts, used_from = used_from, preferred_methss = preferred_methss, run_time = run_time, message = message} end end;