diff --git a/src/Doc/Corec/document/root.tex b/src/Doc/Corec/document/root.tex --- a/src/Doc/Corec/document/root.tex +++ b/src/Doc/Corec/document/root.tex @@ -1,84 +1,83 @@ \documentclass[12pt,a4paper]{article} % fleqn -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{amsfonts} \usepackage{amsmath} \usepackage{cite} \usepackage{enumitem} \usepackage{footmisc} \usepackage{latexsym} \usepackage{graphicx} \usepackage{iman} \usepackage{extra} \usepackage{isar} \usepackage{isabelle} \usepackage{isabellesym} \usepackage{style} \usepackage{pdfsetup} \usepackage{railsetup} \usepackage{framed} \setcounter{secnumdepth}{3} \setcounter{tocdepth}{3} \renewcommand\_{\hbox{\textunderscore\kern-.05ex}} \newbox\boxA \setbox\boxA=\hbox{\ } \parindent=4\wd\boxA \newcommand\blankline{\vskip-.25\baselineskip} \newenvironment{indentblock}{\list{}{\setlength{\leftmargin}{\parindent}}\item[]}{\endlist} \newcommand{\keyw}[1]{\textbf{#1}} \newcommand{\synt}[1]{\textit{#1}} \newcommand{\hthm}[1]{\textbf{\textit{#1}}} %\renewcommand{\isactrlsub}[1]{\/$\sb{\mathrm{#1}}$} \renewcommand\isactrlsub[1]{\/$\sb{#1}$} \renewcommand\isadigit[1]{\/\ensuremath{\mathrm{#1}}} \renewcommand\isacharprime{\isamath{{'}\mskip-2mu}} \renewcommand\isacharunderscore{\mbox{\_}} \renewcommand\isacharunderscorekeyword{\mbox{\_}} \renewcommand\isachardoublequote{\mbox{\upshape{``}}} \renewcommand\isachardoublequoteopen{\mbox{\upshape{``}\kern.1ex}} \renewcommand\isachardoublequoteclose{\/\kern.15ex\mbox{\upshape{''}}} \renewcommand{\isasyminverse}{\isamath{{}^{-}}} \hyphenation{isa-belle} \isadroptag{theory} \title{%\includegraphics[scale=0.5]{isabelle_hol} \\[4ex] Defining Nonprimitively (Co)recursive Functions in Isabelle/HOL} \author{Jasmin Christian Blanchette, Aymeric Bouzy, \\ Andreas Lochbihler, Andrei Popescu, and \\ Dmitriy Traytel} \urlstyle{tt} \begin{document} \maketitle \begin{sloppy} \begin{abstract} \noindent This tutorial describes the definitional package for nonprimitively corecursive functions in Isabelle/HOL. The following commands are provided: \keyw{corec}, \keyw{corecursive}, \keyw{friend\_of\_corec}, and \keyw{coinduction\_\allowbreak upto}. They supplement \keyw{codatatype}, \keyw{primcorec}, and \keyw{primco\-rec\-ur\-sive}, which define codatatypes and primitively corecursive functions. \end{abstract} \end{sloppy} \tableofcontents \input{Corec.tex} \let\em=\sl \bibliography{manual}{} \bibliographystyle{abbrv} \end{document} diff --git a/src/Doc/Datatypes/document/root.tex b/src/Doc/Datatypes/document/root.tex --- a/src/Doc/Datatypes/document/root.tex +++ b/src/Doc/Datatypes/document/root.tex @@ -1,86 +1,85 @@ \documentclass[12pt,a4paper]{article} % fleqn -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{amsfonts} \usepackage{amsmath} \usepackage{cite} \usepackage{enumitem} \usepackage{footmisc} \usepackage{latexsym} \usepackage{graphicx} \usepackage{iman} \usepackage{extra} \usepackage{isar} \usepackage{isabelle} \usepackage{isabellesym} \usepackage{style} \usepackage{pdfsetup} \usepackage{railsetup} \usepackage{framed} \setcounter{secnumdepth}{3} \setcounter{tocdepth}{3} \renewcommand\_{\hbox{\textunderscore\kern-.05ex}} \newbox\boxA \setbox\boxA=\hbox{\ } \parindent=4\wd\boxA \newcommand\blankline{\vskip-.25\baselineskip} \newenvironment{indentblock}{\list{}{\setlength{\leftmargin}{\parindent}}\item[]}{\endlist} \newcommand{\keyw}[1]{\textbf{#1}} \newcommand{\synt}[1]{\textit{#1}} \newcommand{\hthm}[1]{\textbf{\textit{#1}}} %\renewcommand{\isactrlsub}[1]{\/$\sb{\mathrm{#1}}$} \renewcommand\isactrlsub[1]{\/$\sb{#1}$} \renewcommand\isadigit[1]{\/\ensuremath{\mathrm{#1}}} \renewcommand\isacharprime{\isamath{{'}\mskip-2mu}} \renewcommand\isacharunderscore{\mbox{\_}} \renewcommand\isacharunderscorekeyword{\mbox{\_}} \renewcommand\isachardoublequote{\mbox{\upshape{``}}} \renewcommand\isachardoublequoteopen{\mbox{\upshape{``}\kern.1ex}} \renewcommand\isachardoublequoteclose{\/\kern.15ex\mbox{\upshape{''}}} \renewcommand{\isasyminverse}{\isamath{{}^{-}}} \hyphenation{isa-belle} \isadroptag{theory} \title{%\includegraphics[scale=0.5]{isabelle_hol} \\[4ex] Defining (Co)datatypes and Primitively (Co)recursive Functions in Isabelle/HOL} \author{Julian Biendarra, Jasmin Blanchette, \\ Martin Desharnais, Lorenz Panny, \\ Andrei Popescu, and Dmitriy Traytel} \urlstyle{tt} \begin{document} \maketitle \begin{sloppy} \begin{abstract} \noindent This tutorial describes the definitional package for datatypes and codatatypes, and for primitively recursive and corecursive functions, in Isabelle/HOL. The following commands are provided: \keyw{datatype}, \keyw{datatype_\allowbreak compat}, \keyw{prim\-rec}, \keyw{co\-data\-type}, \keyw{prim\-co\-rec}, \keyw{prim\-co\-recursive}, \keyw{bnf}, \keyw{lift_\allowbreak bnf}, \keyw{copy_\allowbreak bnf}, \keyw{bnf_\allowbreak axiom\-atization}, \keyw{print_\allowbreak bnfs}, and \keyw{free_\allowbreak constructors}. \end{abstract} \end{sloppy} \tableofcontents \input{Datatypes.tex} \let\em=\sl \bibliography{manual}{} \bibliographystyle{abbrv} \end{document} diff --git a/src/Doc/Eisbach/document/root.tex b/src/Doc/Eisbach/document/root.tex --- a/src/Doc/Eisbach/document/root.tex +++ b/src/Doc/Eisbach/document/root.tex @@ -1,88 +1,87 @@ \documentclass[12pt,a4paper,fleqn]{report} -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{latexsym,graphicx} \usepackage[refpage]{nomencl} \usepackage{iman,extra,isar,proof} \usepackage[nohyphen,strings]{underscore} \usepackage{isabelle} \usepackage{isabellesym} \usepackage{railsetup} \usepackage{supertabular} \usepackage{style} \usepackage{pdfsetup} \hyphenation{Isabelle} \hyphenation{Eisbach} \isadroptag{theory} \title{\includegraphics[scale=0.5]{isabelle_eisbach} \\[4ex] The Eisbach User Manual} \author{Daniel Matichuk \\ Makarius Wenzel \\ Toby Murray } % Control fixmes etc. \newif\ifDraft \newif\ifFinal %\Drafttrue\Finalfalse \Draftfalse\Finaltrue \ifDraft \usepackage{draftcopy} \newcommand{\Comment}[1]{\textbf{\textsl{#1}}} \newenvironment{LongComment}[1] % multi-paragraph comment, argument is owner {\begingroup\par\noindent\slshape \textbf{Begin Comment[#1]}\par} {\par\noindent\textbf{End Comment}\endgroup\par} \newcommand{\FIXME}[1]{\textbf{\textsl{FIXME: #1}}} \newcommand{\TODO}[1]{\textbf{\textsl{TODO: #1}}} \else \newcommand{\Comment}[1]{\relax} \newenvironment{LongComment}[1]{\expandafter\comment}{\expandafter\endcomment} \newcommand{\FIXME}[1]{\relax} \newcommand{\TODO}[1]{\relax} \fi % This sort of command for each active author can be convenient \newcommand{\dan}[1]{\Comment{#1 [dan]}} \newcommand{\toby}[1]{\Comment{#1 [toby]}} \newcommand{\makarius}[1]{\Comment{#1 [makarius]}} \makeindex \chardef\charbackquote=`\` \newcommand{\backquote}{\mbox{\tt\charbackquote}} \begin{document} \maketitle \pagenumbering{roman} \chapter*{Preface} \input{Preface.tex} \tableofcontents \clearfirst \input{Manual.tex} \begingroup \tocentry{\bibname} \bibliographystyle{abbrv} \small\raggedright\frenchspacing \bibliography{manual} \endgroup \tocentry{\indexname} \printindex \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: diff --git a/src/Doc/How_to_Prove_it/document/prelude.tex b/src/Doc/How_to_Prove_it/document/prelude.tex --- a/src/Doc/How_to_Prove_it/document/prelude.tex +++ b/src/Doc/How_to_Prove_it/document/prelude.tex @@ -1,68 +1,67 @@ \usepackage{makeidx} % allows index generation \usepackage{graphicx} % standard LaTeX graphics tool % when including figure files \usepackage{multicol} % used for the two-column index \usepackage[bottom]{footmisc}% places footnotes at page bottom -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{isabelle,isabellesym} %\usepackage{amssymb} \renewcommand*\descriptionlabel[1]{\hspace\labelsep \textbf{#1}\hfil} % this should be the last package used \usepackage{xcolor} \definecolor{linkcolor}{rgb}{0,0,0.4} \usepackage[colorlinks=true,linkcolor=linkcolor,citecolor=linkcolor, filecolor=linkcolor,pagecolor=linkcolor, urlcolor=linkcolor]{hyperref} % urls in roman style, theory text in math-similar italics \urlstyle{tt} \isabellestyle{it} \renewcommand{\isadigit}[1]{\ensuremath{#1}} % font size \renewcommand{\isastyle}{\isastyleminor} % indexing \usepackage{ifthen} \newcommand{\indexdef}[3]% {\ifthenelse{\equal{}{#1}}{\index{#3 (#2)|bold}}{\index{#3 (#1\ #2)|bold}}} \renewcommand{\isamarkupsection}[1]{{\rmfamily\subsection{#1}}} \renewcommand{\isamarkupsubsection}[1]{{\rmfamily\subsubsection{#1}}} % isabelle in-text command font \newcommand{\isacom}[1]{\isa{\isacommand{#1}}} \protected\def\isacharunderscore{\raisebox{2pt}{\_\kern-1.7pt}} \protected\def\isacharunderscorekeyword{\raisebox{2pt}{\_}} % isabelle keyword, adapted from isabelle.sty \renewcommand{\isakeyword}[1] {\emph{\def\isachardot{.}\def\isacharunderscore{\isacharunderscorekeyword}% \def\isacharbraceleft{\{}\def\isacharbraceright{\}}\textsf{\textbf{#1}}}} % add \noindent to text blocks automatically \renewenvironment{isamarkuptext}{\par\isastyletext\begin{isapar}\noindent}{\end{isapar}} \renewenvironment{isamarkuptxt}{\par\isastyletext\begin{isapar}\noindent}{\end{isapar}} \newcommand{\noquotes}[1]{{\renewcommand{\isachardoublequote}{}\renewcommand{\isachardoublequoteopen}{}\renewcommand{\isachardoublequoteclose}{}#1}} \newcommand{\xsymbol}[1]{\texttt{\char`\\\char`<#1>}} %index \newcommand{\conceptnoidx}[1]{\textbf{#1}} \newcommand{\concept}[1]{\conceptnoidx{#1}\index{#1}} \newcommand{\conceptidx}[2]{\conceptnoidx{#1}\index{#2}} \newcommand{\indexed}[2]{#1\index{#2@\protect#1}} \chardef\isachardoublequote=`\"% \chardef\isachardoublequoteopen=`\"% \chardef\isachardoublequoteclose=`\"% \renewcommand{\isacharbackquoteopen}{\isacharbackquote} \renewcommand{\isacharbackquoteclose}{\isacharbackquote} \hyphenation{Isa-belle} diff --git a/src/Doc/Implementation/document/root.tex b/src/Doc/Implementation/document/root.tex --- a/src/Doc/Implementation/document/root.tex +++ b/src/Doc/Implementation/document/root.tex @@ -1,125 +1,124 @@ \documentclass[12pt,a4paper,fleqn]{report} -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{latexsym,graphicx} \usepackage[refpage]{nomencl} \usepackage{iman,extra,isar,proof} \usepackage[nohyphen,strings]{underscore} \usepackage{isabelle} \usepackage{isabellesym} \usepackage{railsetup} \usepackage{supertabular} \usepackage{style} \usepackage{pdfsetup} \hyphenation{Isabelle} \hyphenation{Isar} \isadroptag{theory} \title{\includegraphics[scale=0.5]{isabelle_isar} \\[4ex] The Isabelle/Isar Implementation} \author{\emph{Makarius Wenzel} \\[3ex] With Contributions by Stefan Berghofer, \\ Florian Haftmann and Larry Paulson } \makeindex \begin{document} \maketitle \begin{abstract} We describe the key concepts underlying the Isabelle/Isar implementation, including ML references for the most important functions. The aim is to give some insight into the overall system architecture, and provide clues on implementing applications within this framework. \end{abstract} \vspace*{2.5cm} \begin{quote} {\small\em Isabelle was not designed; it evolved. Not everyone likes this idea. Specification experts rightly abhor trial-and-error programming. They suggest that no one should write a program without first writing a complete formal specification. But university departments are not software houses. Programs like Isabelle are not products: when they have served their purpose, they are discarded.} Lawrence C. Paulson, ``Isabelle: The Next 700 Theorem Provers'' \vspace*{1cm} {\small\em As I did 20 years ago, I still fervently believe that the only way to make software secure, reliable, and fast is to make it small. Fight features.} Andrew S. Tanenbaum \vspace*{1cm} {\small\em One thing that UNIX does not need is more features. It is successful in part because it has a small number of good ideas that work well together. Merely adding features does not make it easier for users to do things --- it just makes the manual thicker. The right solution in the right place is always more effective than haphazard hacking.} Rob Pike and Brian W. Kernighan \vspace*{1cm} {\small\em If you look at software today, through the lens of the history of engineering, it's certainly engineering of a sort--but it's the kind of engineering that people without the concept of the arch did. Most software today is very much like an Egyptian pyramid with millions of bricks piled on top of each other, with no structural integrity, but just done by brute force and thousands of slaves.} Alan Kay \end{quote} \thispagestyle{empty}\clearpage \pagenumbering{roman} \tableofcontents \listoffigures \clearfirst \setcounter{chapter}{-1} \input{ML.tex} \input{Prelim.tex} \input{Logic.tex} \input{Syntax.tex} \input{Tactic.tex} \input{Eq.tex} \input{Proof.tex} \input{Isar.tex} \input{Local_Theory.tex} \input{Integration.tex} \begingroup \tocentry{\bibname} \bibliographystyle{abbrv} \small\raggedright\frenchspacing \bibliography{manual} \endgroup \tocentry{\indexname} \printindex \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: diff --git a/src/Doc/Isar_Ref/document/root.tex b/src/Doc/Isar_Ref/document/root.tex --- a/src/Doc/Isar_Ref/document/root.tex +++ b/src/Doc/Isar_Ref/document/root.tex @@ -1,101 +1,100 @@ \documentclass[12pt,a4paper,fleqn]{report} -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{amsmath} \usepackage{amssymb} \usepackage{wasysym} \usepackage{eurosym} \usepackage{pifont} \usepackage[english]{babel} \usepackage[only,bigsqcap]{stmaryrd} \usepackage{textcomp} \usepackage{latexsym} \usepackage{graphicx} \let\intorig=\int %iman.sty redefines \int \usepackage{iman,extra,isar,proof} \usepackage[nohyphen,strings]{underscore} \usepackage{isabelle} \usepackage{isabellesym} \usepackage{railsetup} \usepackage{supertabular} \usepackage{style} \usepackage{pdfsetup} \hyphenation{Isabelle} \hyphenation{Isar} \isadroptag{theory} \title{\includegraphics[scale=0.5]{isabelle_isar} \\[4ex] The Isabelle/Isar Reference Manual} \author{\emph{Makarius Wenzel} \\[3ex] With Contributions by Clemens Ballarin, Stefan Berghofer, \\ Jasmin Blanchette, Timothy Bourke, Lukas Bulwahn, \\ Amine Chaieb, Lucas Dixon, Florian Haftmann, \\ Brian Huffman, Lars Hupel, Gerwin Klein, \\ Alexander Krauss, Ond\v{r}ej Kun\v{c}ar, Andreas Lochbihler, \\ Tobias Nipkow, Lars Noschinski, David von Oheimb, \\ Larry Paulson, Sebastian Skalberg, \\ Christian Sternagel, Dmitriy Traytel } \makeindex \chardef\charbackquote=`\` \newcommand{\backquote}{\mbox{\tt\charbackquote}} \begin{document} \maketitle \pagenumbering{roman} \chapter*{Preface} \input{Preface.tex} \tableofcontents \listoffigures \clearfirst \part{Basic Concepts} \input{Synopsis.tex} \input{Framework.tex} \input{First_Order_Logic.tex} \part{General Language Elements} \input{Outer_Syntax.tex} \input{Document_Preparation.tex} \input{Spec.tex} \input{Proof.tex} \input{Proof_Script.tex} \input{Inner_Syntax.tex} \input{Generic.tex} \part{Isabelle/HOL}\label{part:hol} \input{HOL_Specific.tex} \part{Appendix} \appendix \input{Quick_Reference.tex} \let\int\intorig \input{Symbols.tex} \begingroup \tocentry{\bibname} \bibliographystyle{abbrv} \small\raggedright\frenchspacing \bibliography{manual} \endgroup \tocentry{\indexname} \printindex \end{document} diff --git a/src/Doc/JEdit/document/root.tex b/src/Doc/JEdit/document/root.tex --- a/src/Doc/JEdit/document/root.tex +++ b/src/Doc/JEdit/document/root.tex @@ -1,90 +1,89 @@ \documentclass[12pt,a4paper]{report} -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{supertabular} \usepackage{rotating} \usepackage{graphicx} \usepackage{iman,extra,isar} \usepackage[nohyphen,strings]{underscore} \usepackage{amssymb} \usepackage{isabelle,isabellesym} \usepackage{railsetup} \usepackage{style} \usepackage{pdfsetup} \hyphenation{Edinburgh} \hyphenation{Isabelle} \hyphenation{Isar} \isadroptag{theory} \isabellestyle{literal} \def\isastylett{\footnotesize\tt} \title{\includegraphics[scale=0.5]{isabelle_jedit} \\[4ex] Isabelle/jEdit} \author{\emph{Makarius Wenzel}} \makeindex \begin{document} \maketitle \begin{abstract} Isabelle/jEdit is a fully-featured Prover IDE, based on Isabelle/Scala and the jEdit text editor. This document provides an overview of general principles and its main IDE functionality. \end{abstract} \vspace*{2.5cm} \begin{quote} {\small\em Isabelle's user interface is no advance over LCF's, which is widely condemned as ``user-unfriendly'': hard to use, bewildering to beginners. Hence the interest in proof editors, where a proof can be constructed and modified rule-by-rule using windows, mouse, and menus. But Edinburgh LCF was invented because real proofs require millions of inferences. Sophisticated tools --- rules, tactics and tacticals, the language ML, the logics themselves --- are hard to learn, yet they are essential. We may demand a mouse, but we need better education and training.} Lawrence C. Paulson, ``Isabelle: The Next 700 Theorem Provers'' \end{quote} \vspace*{2.5cm} \subsubsection*{Acknowledgements} Research and implementation of concepts around PIDE and Isabelle/jEdit has started in 2008 and was kindly supported by: \begin{itemize} \item TU M\"unchen \url{https://www.in.tum.de} \item BMBF \url{https://www.bmbf.de} \item Universit\'e Paris-Sud \url{https://www.u-psud.fr} \item Digiteo \url{https://www.digiteo.fr} \item ANR \url{https://www.agence-nationale-recherche.fr} \end{itemize} \pagenumbering{roman} \tableofcontents \listoffigures \clearfirst \input{JEdit.tex} \begingroup \tocentry{\bibname} \bibliographystyle{abbrv} \small\raggedright\frenchspacing \bibliography{manual} \endgroup \tocentry{\indexname} \printindex \end{document} diff --git a/src/Doc/Locales/document/root.tex b/src/Doc/Locales/document/root.tex --- a/src/Doc/Locales/document/root.tex +++ b/src/Doc/Locales/document/root.tex @@ -1,62 +1,61 @@ \documentclass[11pt,a4paper]{article} -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{tikz} \usepackage{subfigure} \usepackage[nohyphen,strings]{underscore} \usepackage{amsmath} \usepackage{isabelle,isabellesym} \usepackage{verbatim} \usepackage{alltt} \usepackage{array} \usepackage{amssymb} \usepackage{pdfsetup} \isadroptag{theory} \isafoldtag{proof} % urls in roman style, theory text in typewriter \urlstyle{rm} \isabellestyle{tt} \begin{document} \title{Tutorial to Locales and Locale Interpretation% \thanks{Published in L.~Lamb\'an, A.~Romero, J.~Rubio, editors, {\em Contribuciones Cient\'{\i}ficas en honor de Mirian Andr\'es.} Servicio de Publicaciones de la Universidad de La Rioja, Logro\~no, Spain, 2010. Reproduced by permission.}} \author{Clemens Ballarin} \date{} \maketitle \begin{abstract} Locales are Isabelle's approach for dealing with parametric theories. They have been designed as a module system for a theorem prover that can adequately represent the complex inter-dependencies between structures found in abstract algebra, but have proven fruitful also in other applications --- for example, software verification. Both design and implementation of locales have evolved considerably since Kamm\"uller did his initial experiments. Today, locales are a simple yet powerful extension of the Isar proof language. The present tutorial covers all major facilities of locales. It is intended for locale novices; familiarity with Isabelle and Isar is presumed. \end{abstract} \parindent 0pt\parskip 0.5ex \input{session} \bibliographystyle{abbrv} \bibliography{root} \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: diff --git a/src/Doc/Main/document/root.tex b/src/Doc/Main/document/root.tex --- a/src/Doc/Main/document/root.tex +++ b/src/Doc/Main/document/root.tex @@ -1,41 +1,40 @@ \documentclass[12pt,a4paper]{article} -\usepackage{lmodern} \usepackage[T1]{fontenc} %shortens document but can cause odd page breaks %\oddsidemargin=4.6mm %\evensidemargin=4.6mm %\textwidth=150mm %\topmargin=4.6mm %\headheight=0mm %\headsep=0mm %\textheight=234mm \usepackage{isabelle,isabellesym} \usepackage{amssymb} \usepackage[only,bigsqcap]{stmaryrd} % this should be the last package used \usepackage{pdfsetup} % urls in roman style, theory text in math-similar italics, with literal underscore \urlstyle{rm} \isabellestyle{literal} % for uniform font size \renewcommand{\isastyle}{\isastyleminor} \parindent 0pt\parskip 0.5ex \usepackage{supertabular} \begin{document} \title{What's in Main} \author{Tobias Nipkow} \date{\today} \maketitle \input{Main_Doc.tex} \end{document} diff --git a/src/Doc/Nitpick/document/root.tex b/src/Doc/Nitpick/document/root.tex --- a/src/Doc/Nitpick/document/root.tex +++ b/src/Doc/Nitpick/document/root.tex @@ -1,2760 +1,2759 @@ \documentclass[a4paper,12pt]{article} -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{amsmath} \usepackage{amssymb} \usepackage[english]{babel} \usepackage{color} \usepackage{footmisc} \usepackage{graphicx} %\usepackage{mathpazo} \usepackage{multicol} \usepackage{stmaryrd} %\usepackage[scaled=.85]{beramono} \usepackage{isabelle,iman,pdfsetup} %\oddsidemargin=4.6mm %\evensidemargin=4.6mm %\textwidth=150mm %\topmargin=4.6mm %\headheight=0mm %\headsep=0mm %\textheight=234mm \def\Colon{\mathord{:\mkern-1.5mu:}} %\def\lbrakk{\mathopen{\lbrack\mkern-3.25mu\lbrack}} %\def\rbrakk{\mathclose{\rbrack\mkern-3.255mu\rbrack}} \def\lparr{\mathopen{(\mkern-4mu\mid}} \def\rparr{\mathclose{\mid\mkern-4mu)}} %\def\unk{{?}} \def\unk{{\_}} \def\unkef{(\lambda x.\; \unk)} \def\undef{(\lambda x.\; \_)} %\def\unr{\textit{others}} \def\unr{\ldots} \def\Abs#1{\hbox{\rm{\flqq}}{\,#1\,}\hbox{\rm{\frqq}}} \def\Q{{\smash{\lower.2ex\hbox{$\scriptstyle?$}}}} \hyphenation{Mini-Sat size-change First-Steps grand-parent nit-pick counter-example counter-examples data-type data-types co-data-type co-data-types in-duc-tive co-in-duc-tive} \urlstyle{tt} \renewcommand\_{\hbox{\textunderscore\kern-.05ex}} \begin{document} %%% TYPESETTING %\renewcommand\labelitemi{$\bullet$} \renewcommand\labelitemi{\raise.065ex\hbox{\small\textbullet}} \title{\includegraphics[scale=0.5]{isabelle_nitpick} \\[4ex] Picking Nits \\[\smallskipamount] \Large A User's Guide to Nitpick for Isabelle/HOL} \author{\hbox{} \\ Jasmin Blanchette \\ {\normalsize Institut f\"ur Informatik, Technische Universit\"at M\"unchen} \\ \hbox{}} \maketitle \tableofcontents \setlength{\parskip}{.7em plus .2em minus .1em} \setlength{\parindent}{0pt} \setlength{\abovedisplayskip}{\parskip} \setlength{\abovedisplayshortskip}{.9\parskip} \setlength{\belowdisplayskip}{\parskip} \setlength{\belowdisplayshortskip}{.9\parskip} % General-purpose enum environment with correct spacing \newenvironment{enum}% {\begin{list}{}{% \setlength{\topsep}{.1\parskip}% \setlength{\partopsep}{.1\parskip}% \setlength{\itemsep}{\parskip}% \advance\itemsep by-\parsep}} {\end{list}} \def\pre{\begingroup\vskip0pt plus1ex\advance\leftskip by\leftmargin \advance\rightskip by\leftmargin} \def\post{\vskip0pt plus1ex\endgroup} \def\prew{\pre\advance\rightskip by-\leftmargin} \def\postw{\post} \section{Introduction} \label{introduction} Nitpick \cite{blanchette-nipkow-2010} is a counterexample generator for Isabelle/HOL \cite{isa-tutorial} that is designed to handle formulas combining (co)in\-duc\-tive datatypes, (co)in\-duc\-tively defined predicates, and quantifiers. It builds on Kodkod \cite{torlak-jackson-2007}, a highly optimized first-order relational model finder developed by the Software Design Group at MIT. It is conceptually similar to Refute \cite{weber-2008}, from which it borrows many ideas and code fragments, but it benefits from Kodkod's optimizations and a new encoding scheme. The name Nitpick is shamelessly appropriated from a now retired Alloy precursor. Nitpick is easy to use---you simply enter \textbf{nitpick} after a putative theorem and wait a few seconds. Nonetheless, there are situations where knowing how it works under the hood and how it reacts to various options helps increase the test coverage. This manual also explains how to install the tool on your workstation. Should the motivation fail you, think of the many hours of hard work Nitpick will save you. Proving non-theorems is \textsl{hard work}. Another common use of Nitpick is to find out whether the axioms of a locale are satisfiable, while the locale is being developed. To check this, it suffices to write \prew \textbf{lemma}~``$\textit{False\/}$'' \\ \textbf{nitpick}~[\textit{show\_all}] \postw after the locale's \textbf{begin} keyword. To falsify \textit{False}, Nitpick must find a model for the axioms. If it finds no model, we have an indication that the axioms might be unsatisfiable. Nitpick provides an automatic mode that can be enabled via the ``Auto Nitpick'' option under ``Plugins > Plugin Options > Isabelle > General'' in Isabelle/jEdit. In this mode, Nitpick is run on every newly entered theorem. \newbox\boxA \setbox\boxA=\hbox{\texttt{nospam}} \newcommand\authoremail{\texttt{jasmin.blan{\color{white}nospam}\kern-\wd\boxA{}chette@\allowbreak inria\allowbreak .\allowbreak fr}} To run Nitpick, you must also make sure that the theory \textit{Nitpick} is imported---this is rarely a problem in practice since it is part of \textit{Main}. The examples presented in this manual can be found in Isabelle's \texttt{src/HOL/\allowbreak Nitpick\_\allowbreak Examples/\allowbreak Manual\_Nits.thy} theory. The known bugs and limitations at the time of writing are listed in \S\ref{known-bugs-and-limitations}. Comments and bug reports concerning the tool or the manual should be directed to the author at \authoremail. \vskip2.5\smallskipamount \textbf{Acknowledgment.} The author would like to thank Mark Summerfield for suggesting several textual improvements. % and Perry James for reporting a typo. \section{Installation} \label{installation} Nitpick is part of Isabelle, so you do not need to install it. It relies on a third-party Kodkod front-end called Kodkodi, which in turn requires a Java virtual machine. Both are provided as official Isabelle components. %There are two main ways of installing Kodkodi: % %\begin{enum} %\item[\labelitemi] If you installed an official Isabelle package, %it should already include a properly setup version of Kodkodi. % %\item[\labelitemi] If you use a repository or snapshot version of Isabelle, you %an official Isabelle package, you can download the Isabelle-aware Kodkodi package %from \url{http://www21.in.tum.de/~blanchet/\#software}. Extract the archive, then add a %line to your \texttt{\$ISABELLE\_HOME\_USER\slash etc\slash components}% %\footnote{The variable \texttt{\$ISABELLE\_HOME\_USER} is set by Isabelle at %startup. Its value can be retrieved by executing \texttt{isabelle} %\texttt{getenv} \texttt{ISABELLE\_HOME\_USER} on the command line.} %file with the absolute path to Kodkodi. For example, if the %\texttt{components} file does not exist yet and you extracted Kodkodi to %\texttt{/usr/local/kodkodi-1.5.2}, create it with the single line % %\prew %\texttt{/usr/local/kodkodi-1.5.2} %\postw % %(including an invisible newline character) in it. %\end{enum} To check whether Kodkodi is successfully installed, you can try out the example in \S\ref{propositional-logic}. \section{First Steps} \label{first-steps} This section introduces Nitpick by presenting small examples. If possible, you should try out the examples on your workstation. Your theory file should start as follows: \prew \textbf{theory}~\textit{Scratch} \\ \textbf{imports}~\textit{Main~Quotient\_Product~RealDef} \\ \textbf{begin} \postw The results presented here were obtained using the JNI (Java Native Interface) version of MiniSat and with multithreading disabled to reduce nondeterminism. This was done by adding the line \prew \textbf{nitpick\_params} [\textit{sat\_solver}~= \textit{MiniSat\_JNI}, \,\textit{max\_threads}~= 1] \postw after the \textbf{begin} keyword. The JNI version of MiniSat is bundled with Kodkodi and is precompiled for Linux, Mac~OS~X, and Windows (Cygwin). Other SAT solvers can also be used, as explained in \S\ref{optimizations}. If you have already configured SAT solvers in Isabelle (e.g., for Refute), these will also be available to Nitpick. \subsection{Propositional Logic} \label{propositional-logic} Let's start with a trivial example from propositional logic: \prew \textbf{lemma}~``$P \longleftrightarrow Q$'' \\ \textbf{nitpick} \postw You should get the following output: \prew \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $P = \textit{True}$ \\ \hbox{}\qquad\qquad $Q = \textit{False}$ \postw Nitpick can also be invoked on individual subgoals, as in the example below: \prew \textbf{apply}~\textit{auto} \\[2\smallskipamount] {\slshape goal (2 subgoals): \\ \phantom{0}1. $P\,\Longrightarrow\, Q$ \\ \phantom{0}2. $Q\,\Longrightarrow\, P$} \\[2\smallskipamount] \textbf{nitpick}~1 \\[2\smallskipamount] {\slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $P = \textit{True}$ \\ \hbox{}\qquad\qquad $Q = \textit{False}$} \\[2\smallskipamount] \textbf{nitpick}~2 \\[2\smallskipamount] {\slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $P = \textit{False}$ \\ \hbox{}\qquad\qquad $Q = \textit{True}$} \\[2\smallskipamount] \textbf{oops} \postw \subsection{Type Variables} \label{type-variables} If you are left unimpressed by the previous example, don't worry. The next one is more mind- and computer-boggling: \prew \textbf{lemma} ``$x \in A\,\Longrightarrow\, (\textrm{THE}~y.\;y \in A) \in A$'' \postw \pagebreak[2] %% TYPESETTING The putative lemma involves the definite description operator, {THE}, presented in section 5.10.1 of the Isabelle tutorial \cite{isa-tutorial}. The operator is defined by the axiom $(\textrm{THE}~x.\; x = a) = a$. The putative lemma is merely asserting the indefinite description operator axiom with {THE} substituted for {SOME}. The free variable $x$ and the bound variable $y$ have type $'a$. For formulas containing type variables, Nitpick enumerates the possible domains for each type variable, up to a given cardinality (10 by default), looking for a finite countermodel: \prew \textbf{nitpick} [\textit{verbose}] \\[2\smallskipamount] \slshape Trying 10 scopes: \nopagebreak \\ \hbox{}\qquad \textit{card}~$'a$~= 1; \\ \hbox{}\qquad \textit{card}~$'a$~= 2; \\ \hbox{}\qquad $\qquad\vdots$ \\[.5\smallskipamount] \hbox{}\qquad \textit{card}~$'a$~= 10 \\[2\smallskipamount] Nitpick found a counterexample for \textit{card} $'a$~= 3: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $A = \{a_2,\, a_3\}$ \\ \hbox{}\qquad\qquad $x = a_3$ \\[2\smallskipamount] Total time: 963 ms \postw Nitpick found a counterexample in which $'a$ has cardinality 3. (For cardinalities 1 and 2, the formula holds.) In the counterexample, the three values of type $'a$ are written $a_1$, $a_2$, and $a_3$. The message ``Trying $n$ scopes: {\ldots}''\ is shown only if the option \textit{verbose} is enabled. You can specify \textit{verbose} each time you invoke \textbf{nitpick}, or you can set it globally using the command \prew \textbf{nitpick\_params} [\textit{verbose}] \postw This command also displays the current default values for all of the options supported by Nitpick. The options are listed in \S\ref{option-reference}. \subsection{Constants} \label{constants} By just looking at Nitpick's output, it might not be clear why the counterexample in \S\ref{type-variables} is genuine. Let's invoke Nitpick again, this time telling it to show the values of the constants that occur in the formula: \prew \textbf{lemma} ``$x \in A\,\Longrightarrow\, (\textrm{THE}~y.\;y \in A) \in A$'' \\ \textbf{nitpick}~[\textit{show\_consts}] \\[2\smallskipamount] \slshape Nitpick found a counterexample for \textit{card} $'a$~= 3: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $A = \{a_2,\, a_3\}$ \\ \hbox{}\qquad\qquad $x = a_3$ \\ \hbox{}\qquad Constant: \nopagebreak \\ \hbox{}\qquad\qquad $\hbox{\slshape THE}~y.\;y \in A = a_1$ \postw As the result of an optimization, Nitpick directly assigned a value to the subterm $\textrm{THE}~y.\;y \in A$, rather than to the \textit{The} constant. We can disable this optimization by using the command \prew \textbf{nitpick}~[\textit{dont\_specialize},\, \textit{show\_consts}] \postw Our misadventures with THE suggest adding `$\exists!x{.}$' (``there exists a unique $x$ such that'') at the front of our putative lemma's assumption: \prew \textbf{lemma} ``$\exists {!}x.\; x \in A\,\Longrightarrow\, (\textrm{THE}~y.\;y \in A) \in A$'' \postw The fix appears to work: \prew \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found no counterexample \postw We can further increase our confidence in the formula by exhausting all cardinalities up to 50: \prew \textbf{nitpick} [\textit{card} $'a$~= 1--50]\footnote{The symbol `--' is entered as \texttt{-} (hyphen).} \\[2\smallskipamount] \slshape Nitpick found no counterexample. \postw Let's see if Sledgehammer can find a proof: \prew \textbf{sledgehammer} \\[2\smallskipamount] {\slshape Sledgehammer: ``$e$'' on goal \\ Try this: \textbf{by}~(\textit{metis~theI}) (42 ms)} \\ \hbox{}\qquad\vdots \\[2\smallskipamount] \textbf{by}~(\textit{metis~theI\/}) \postw This must be our lucky day. \subsection{Skolemization} \label{skolemization} Are all invertible functions onto? Let's find out: \prew \textbf{lemma} ``$\exists g.\; \forall x.~g~(f~x) = x \,\Longrightarrow\, \forall y.\; \exists x.~y = f~x$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample for \textit{card} $'a$~= 2 and \textit{card} $'b$~=~1: \\[2\smallskipamount] \hbox{}\qquad Free variable: \nopagebreak \\ \hbox{}\qquad\qquad $f = \undef{}(b_1 := a_1)$ \\ \hbox{}\qquad Skolem constants: \nopagebreak \\ \hbox{}\qquad\qquad $g = \undef{}(a_1 := b_1,\> a_2 := b_1)$ \\ \hbox{}\qquad\qquad $y = a_2$ \postw (The Isabelle/HOL notation $f(x := y)$ denotes the function that maps $x$ to $y$ and that otherwise behaves like $f$.) Although $f$ is the only free variable occurring in the formula, Nitpick also displays values for the bound variables $g$ and $y$. These values are available to Nitpick because it performs skolemization as a preprocessing step. In the previous example, skolemization only affected the outermost quantifiers. This is not always the case, as illustrated below: \prew \textbf{lemma} ``$\exists x.\; \forall f.\; f~x = x$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample for \textit{card} $'a$~= 2: \\[2\smallskipamount] \hbox{}\qquad Skolem constant: \nopagebreak \\ \hbox{}\qquad\qquad $\lambda x.\; f = \undef{}(\!\begin{aligned}[t] & a_1 := \undef{}(a_1 := a_2,\> a_2 := a_1), \\[-2pt] & a_2 := \undef{}(a_1 := a_1,\> a_2 := a_1))\end{aligned}$ \postw The variable $f$ is bound within the scope of $x$; therefore, $f$ depends on $x$, as suggested by the notation $\lambda x.\,f$. If $x = a_1$, then $f$ is the function that maps $a_1$ to $a_2$ and vice versa; otherwise, $x = a_2$ and $f$ maps both $a_1$ and $a_2$ to $a_1$. In both cases, $f~x \not= x$. The source of the Skolem constants is sometimes more obscure: \prew \textbf{lemma} ``$\mathit{refl}~r\,\Longrightarrow\, \mathit{sym}~r$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample for \textit{card} $'a$~= 2: \\[2\smallskipamount] \hbox{}\qquad Free variable: \nopagebreak \\ \hbox{}\qquad\qquad $r = \{(a_1, a_1),\, (a_2, a_1),\, (a_2, a_2)\}$ \\ \hbox{}\qquad Skolem constants: \nopagebreak \\ \hbox{}\qquad\qquad $\mathit{sym}.x = a_2$ \\ \hbox{}\qquad\qquad $\mathit{sym}.y = a_1$ \postw What happened here is that Nitpick expanded \textit{sym} to its definition: \prew $\mathit{sym}~r \,\equiv\, \forall x\> y.\,\> (x, y) \in r \longrightarrow (y, x) \in r.$ \postw As their names suggest, the Skolem constants $\mathit{sym}.x$ and $\mathit{sym}.y$ are simply the bound variables $x$ and $y$ from \textit{sym}'s definition. \subsection{Natural Numbers and Integers} \label{natural-numbers-and-integers} Because of the axiom of infinity, the type \textit{nat} does not admit any finite models. To deal with this, Nitpick's approach is to consider finite subsets $N$ of \textit{nat} and maps all numbers $\notin N$ to the undefined value (displayed as `$\unk$'). The type \textit{int} is handled similarly. Internally, undefined values lead to a three-valued logic. Here is an example involving \textit{int\/}: \prew \textbf{lemma} ``$\lbrakk i \le j;\> n \le (m{\Colon}\mathit{int})\rbrakk \,\Longrightarrow\, i * n + j * m \le i * m + j * n$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $i = 0$ \\ \hbox{}\qquad\qquad $j = 1$ \\ \hbox{}\qquad\qquad $m = 1$ \\ \hbox{}\qquad\qquad $n = 0$ \postw Internally, Nitpick uses either a unary or a binary representation of numbers. The unary representation is more efficient but only suitable for numbers very close to zero. By default, Nitpick attempts to choose the more appropriate encoding by inspecting the formula at hand. This behavior can be overridden by passing either \textit{unary\_ints} or \textit{binary\_ints} as option. For binary notation, the number of bits to use can be specified using the \textit{bits} option. For example: \prew \textbf{nitpick} [\textit{binary\_ints}, \textit{bits}${} = 16$] \postw With infinite types, we don't always have the luxury of a genuine counterexample and must often content ourselves with a potentially spurious one. For example: \prew \textbf{lemma} ``$\forall n.\; \textit{Suc}~n \mathbin{\not=} n \,\Longrightarrow\, P$'' \\ \textbf{nitpick} [\textit{card~nat}~= 50] \\[2\smallskipamount] \slshape Warning: The conjecture either trivially holds for the given scopes or lies outside Nitpick's supported fragment; only potentially spurious counterexamples may be found \\[2\smallskipamount] Nitpick found a potentially spurious counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variable: \nopagebreak \\ \hbox{}\qquad\qquad $P = \textit{False}$ \postw The issue is that the bound variable in $\forall n.\; \textit{Suc}~n \mathbin{\not=} n$ ranges over an infinite type. If Nitpick finds an $n$ such that $\textit{Suc}~n \mathbin{=} n$, it evaluates the assumption to \textit{False}; but otherwise, it does not know anything about values of $n \ge \textit{card~nat}$ and must therefore evaluate the assumption to~$\unk$, not \textit{True}. Since the assumption can never be fully satisfied by Nitpick, the putative lemma can never be falsified. Some conjectures involving elementary number theory make Nitpick look like a giant with feet of clay: \prew \textbf{lemma} ``$P~\textit{Suc\/}$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found no counterexample \postw On any finite set $N$, \textit{Suc} is a partial function; for example, if $N = \{0, 1, \ldots, k\}$, then \textit{Suc} is $\{0 \mapsto 1,\, 1 \mapsto 2,\, \ldots,\, k \mapsto \unk\}$, which evaluates to $\unk$ when passed as argument to $P$. As a result, $P~\textit{Suc}$ is always $\unk$. The next example is similar: \prew \textbf{lemma} ``$P~(\textit{op}~{+}\Colon \textit{nat}\mathbin{\Rightarrow}\textit{nat}\mathbin{\Rightarrow}\textit{nat})$'' \\ \textbf{nitpick} [\textit{card nat} = 1] \\[2\smallskipamount] {\slshape Nitpick found a counterexample:} \\[2\smallskipamount] \hbox{}\qquad Free variable: \nopagebreak \\ \hbox{}\qquad\qquad $P = \unkef(\unkef(0 := \unkef(0 := 0)) := \mathit{False})$ \\[2\smallskipamount] \textbf{nitpick} [\textit{card nat} = 2] \\[2\smallskipamount] {\slshape Nitpick found no counterexample.} \postw The problem here is that \textit{op}~+ is total when \textit{nat} is taken to be $\{0\}$ but becomes partial as soon as we add $1$, because $1 + 1 \notin \{0, 1\}$. Because numbers are infinite and are approximated using a three-valued logic, there is usually no need to systematically enumerate domain sizes. If Nitpick cannot find a genuine counterexample for \textit{card~nat}~= $k$, it is very unlikely that one could be found for smaller domains. (The $P~(\textit{op}~{+})$ example above is an exception to this principle.) Nitpick nonetheless enumerates all cardinalities from 1 to 10 for \textit{nat}, mainly because smaller cardinalities are fast to handle and give rise to simpler counterexamples. This is explained in more detail in \S\ref{scope-monotonicity}. \subsection{Inductive Datatypes} \label{inductive-datatypes} Like natural numbers and integers, inductive datatypes with recursive constructors admit no finite models and must be approximated by a subterm-closed subset. For example, using a cardinality of 10 for ${'}a~\textit{list}$, Nitpick looks for all counterexamples that can be built using at most 10 different lists. Let's see with an example involving \textit{hd} (which returns the first element of a list) and $@$ (which concatenates two lists): \prew \textbf{lemma} ``$\textit{hd}~(\textit{xs} \mathbin{@} [y, y]) = \textit{hd}~\textit{xs\/}$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample for \textit{card} $'a$~= 3: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $\textit{xs} = []$ \\ \hbox{}\qquad\qquad $\textit{y} = a_1$ \postw To see why the counterexample is genuine, we enable \textit{show\_consts} and \textit{show\_\allowbreak datatypes}: \prew {\slshape Type:} \\ \hbox{}\qquad $'a$~\textit{list}~= $\{[],\, [a_1],\, [a_1, a_1],\, \unr\}$ \\ {\slshape Constants:} \\ \hbox{}\qquad $\lambda x_1.\; x_1 \mathbin{@} [y, y] = \unkef([] := [a_1, a_1])$ \\ \hbox{}\qquad $\textit{hd} = \unkef([] := a_2,\> [a_1] := a_1,\> [a_1, a_1] := a_1)$ \postw Since $\mathit{hd}~[]$ is undefined in the logic, it may be given any value, including $a_2$. The second constant, $\lambda x_1.\; x_1 \mathbin{@} [y, y]$, is simply the append operator whose second argument is fixed to be $[y, y]$. Appending $[a_1, a_1]$ to $[a_1]$ would normally give $[a_1, a_1, a_1]$, but this value is not representable in the subset of $'a$~\textit{list} considered by Nitpick, which is shown under the ``Type'' heading; hence the result is $\unk$. Similarly, appending $[a_1, a_1]$ to itself gives $\unk$. Given \textit{card}~$'a = 3$ and \textit{card}~$'a~\textit{list} = 3$, Nitpick considers the following subsets: \kern-.5\smallskipamount %% TYPESETTING \prew \begin{multicols}{3} $\{[],\, [a_1],\, [a_2]\}$; \\ $\{[],\, [a_1],\, [a_3]\}$; \\ $\{[],\, [a_2],\, [a_3]\}$; \\ $\{[],\, [a_1],\, [a_1, a_1]\}$; \\ $\{[],\, [a_1],\, [a_2, a_1]\}$; \\ $\{[],\, [a_1],\, [a_3, a_1]\}$; \\ $\{[],\, [a_2],\, [a_1, a_2]\}$; \\ $\{[],\, [a_2],\, [a_2, a_2]\}$; \\ $\{[],\, [a_2],\, [a_3, a_2]\}$; \\ $\{[],\, [a_3],\, [a_1, a_3]\}$; \\ $\{[],\, [a_3],\, [a_2, a_3]\}$; \\ $\{[],\, [a_3],\, [a_3, a_3]\}$. \end{multicols} \postw \kern-2\smallskipamount %% TYPESETTING All subterm-closed subsets of $'a~\textit{list}$ consisting of three values are listed and only those. As an example of a non-subterm-closed subset, consider $\mathcal{S} = \{[],\, [a_1],\,\allowbreak [a_1, a_2]\}$, and observe that $[a_1, a_2]$ (i.e., $a_1 \mathbin{\#} [a_2]$) has $[a_2] \notin \mathcal{S}$ as a subterm. Here's another m\"ochtegern-lemma that Nitpick can refute without a blink: \prew \textbf{lemma} ``$\lbrakk \textit{length}~\textit{xs} = 1;\> \textit{length}~\textit{ys} = 1 \rbrakk \,\Longrightarrow\, \textit{xs} = \textit{ys\/}$'' \\ \textbf{nitpick} [\textit{show\_types}] \\[2\smallskipamount] \slshape Nitpick found a counterexample for \textit{card} $'a$~= 3: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $\textit{xs} = [a_2]$ \\ \hbox{}\qquad\qquad $\textit{ys} = [a_1]$ \\ \hbox{}\qquad Types: \\ \hbox{}\qquad\qquad $\textit{nat} = \{0,\, 1,\, 2,\, \unr\}$ \\ \hbox{}\qquad\qquad $'a$~\textit{list} = $\{[],\, [a_1],\, [a_2],\, \unr\}$ \postw Because datatypes are approximated using a three-valued logic, there is usually no need to systematically enumerate cardinalities: If Nitpick cannot find a genuine counterexample for \textit{card}~$'a~\textit{list}$~= 10, it is very unlikely that one could be found for smaller cardinalities. \subsection{Typedefs, Quotient Types, Records, Rationals, and Reals} \label{typedefs-quotient-types-records-rationals-and-reals} Nitpick generally treats types declared using \textbf{typedef} as datatypes whose single constructor is the corresponding \textit{Abs\_\kern.1ex} function. For example: \prew \textbf{typedef}~\textit{three} = ``$\{0\Colon\textit{nat},\, 1,\, 2\}$'' \\ \textbf{by}~\textit{blast} \\[2\smallskipamount] \textbf{definition}~$A \mathbin{\Colon} \textit{three}$ \textbf{where} ``\kern-.1em$A \,\equiv\, \textit{Abs\_\allowbreak three}~0$'' \\ \textbf{definition}~$B \mathbin{\Colon} \textit{three}$ \textbf{where} ``$B \,\equiv\, \textit{Abs\_three}~1$'' \\ \textbf{definition}~$C \mathbin{\Colon} \textit{three}$ \textbf{where} ``$C \,\equiv\, \textit{Abs\_three}~2$'' \\[2\smallskipamount] \textbf{lemma} ``$\lbrakk A \in X;\> B \in X\rbrakk \,\Longrightarrow\, c \in X$'' \\ \textbf{nitpick} [\textit{show\_types}] \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $X = \{\Abs{0},\, \Abs{1}\}$ \\ \hbox{}\qquad\qquad $c = \Abs{2}$ \\ \hbox{}\qquad Types: \\ \hbox{}\qquad\qquad $\textit{nat} = \{0,\, 1,\, 2,\, \unr\}$ \\ \hbox{}\qquad\qquad $\textit{three} = \{\Abs{0},\, \Abs{1},\, \Abs{2},\, \unr\}$ \postw In the output above, $\Abs{n}$ abbreviates $\textit{Abs\_three}~n$. Quotient types are handled in much the same way. The following fragment defines the integer type \textit{my\_int} by encoding the integer $x$ by a pair of natural numbers $(m, n)$ such that $x + n = m$: \prew \textbf{fun} \textit{my\_int\_rel} \textbf{where} \\ ``$\textit{my\_int\_rel}~(x,\, y)~(u,\, v) = (x + v = u + y)$'' \\[2\smallskipamount] % \textbf{quotient\_type}~\textit{my\_int} = ``$\textit{nat} \times \textit{nat\/}$''$\;{/}\;$\textit{my\_int\_rel} \\ \textbf{by}~(\textit{auto simp add\/}:\ \textit{equivp\_def fun\_eq\_iff}) \\[2\smallskipamount] % \textbf{definition}~\textit{add\_raw}~\textbf{where} \\ ``$\textit{add\_raw} \,\equiv\, \lambda(x,\, y)~(u,\, v).\; (x + (u\Colon\textit{nat}), y + (v\Colon\textit{nat}))$'' \\[2\smallskipamount] % \textbf{quotient\_definition} ``$\textit{add\/}\Colon\textit{my\_int} \Rightarrow \textit{my\_int} \Rightarrow \textit{my\_int\/}$'' \textbf{is} \textit{add\_raw} \\[2\smallskipamount] % \textbf{lemma} ``$\textit{add}~x~y = \textit{add}~x~x$'' \\ \textbf{nitpick} [\textit{show\_types}] \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $x = \Abs{(0,\, 0)}$ \\ \hbox{}\qquad\qquad $y = \Abs{(0,\, 1)}$ \\ \hbox{}\qquad Types: \\ \hbox{}\qquad\qquad $\textit{nat} = \{0,\, 1,\, \unr\}$ \\ \hbox{}\qquad\qquad $\textit{nat} \times \textit{nat}~[\textsl{boxed\/}] = \{(0,\, 0),\> (1,\, 0),\> \unr\}$ \\ \hbox{}\qquad\qquad $\textit{my\_int} = \{\Abs{(0,\, 0)},\> \Abs{(0,\, 1)},\> \unr\}$ \postw The values $\Abs{(0,\, 0)}$ and $\Abs{(0,\, 1)}$ represent the integers $0$ and $-1$, respectively. Other representants would have been possible---e.g., $\Abs{(5,\, 5)}$ and $\Abs{(11,\, 12)}$. If we are going to use \textit{my\_int} extensively, it pays off to install a term postprocessor that converts the pair notation to the standard mathematical notation: \prew $\textbf{ML}~\,\{{*} \\ \!\begin{aligned}[t] %& ({*}~\,\textit{Proof.context} \rightarrow \textit{string} \rightarrow (\textit{typ} \rightarrow \textit{term~list\/}) \rightarrow \textit{typ} \rightarrow \textit{term} \\[-2pt] %& \phantom{(*}~\,{\rightarrow}\;\textit{term}~\,{*}) \\[-2pt] & \textbf{fun}\,~\textit{my\_int\_postproc}~\_~\_~\_~T~(\textit{Const}~\_~\$~(\textit{Const}~\_~\$~\textit{t1}~\$~\textit{t2\/})) = {} \\[-2pt] & \phantom{fun}\,~\textit{HOLogic.mk\_number}~T~(\textit{snd}~(\textit{HOLogic.dest\_number~t1}) \\[-2pt] & \phantom{fun\,~\textit{HOLogic.mk\_number}~T~(}{-}~\textit{snd}~(\textit{HOLogic.dest\_number~t2\/})) \\[-2pt] & \phantom{fun}\!{\mid}\,~\textit{my\_int\_postproc}~\_~\_~\_~\_~t = t \\[-2pt] {*}\}\end{aligned}$ \\[2\smallskipamount] $\textbf{declaration}~\,\{{*} \\ \!\begin{aligned}[t] & \textit{Nitpick\_Model.register\_term\_postprocessor}~\!\begin{aligned}[t] & @\{\textrm{typ}~\textit{my\_int}\} \\[-2pt] & \textit{my\_int\_postproc}\end{aligned} \\[-2pt] {*}\}\end{aligned}$ \postw Records are handled as datatypes with a single constructor: \prew \textbf{record} \textit{point} = \\ \hbox{}\quad $\textit{Xcoord} \mathbin{\Colon} \textit{int}$ \\ \hbox{}\quad $\textit{Ycoord} \mathbin{\Colon} \textit{int}$ \\[2\smallskipamount] \textbf{lemma} ``$\textit{Xcoord}~(p\Colon\textit{point}) = \textit{Xcoord}~(q\Colon\textit{point})$'' \\ \textbf{nitpick} [\textit{show\_types}] \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $p = \lparr\textit{Xcoord} = 1,\> \textit{Ycoord} = 1\rparr$ \\ \hbox{}\qquad\qquad $q = \lparr\textit{Xcoord} = 0,\> \textit{Ycoord} = 0\rparr$ \\ \hbox{}\qquad Types: \\ \hbox{}\qquad\qquad $\textit{int} = \{0,\, 1,\, \unr\}$ \\ \hbox{}\qquad\qquad $\textit{point} = \{\!\begin{aligned}[t] & \lparr\textit{Xcoord} = 0,\> \textit{Ycoord} = 0\rparr, \\[-2pt] %% TYPESETTING & \lparr\textit{Xcoord} = 1,\> \textit{Ycoord} = 1\rparr,\, \unr\}\end{aligned}$ \postw Finally, Nitpick provides rudimentary support for rationals and reals using a similar approach: \prew \textbf{lemma} ``$4 * x + 3 * (y\Colon\textit{real}) \not= 1/2$'' \\ \textbf{nitpick} [\textit{show\_types}] \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $x = 1/2$ \\ \hbox{}\qquad\qquad $y = -1/2$ \\ \hbox{}\qquad Types: \\ \hbox{}\qquad\qquad $\textit{nat} = \{0,\, 1,\, 2,\, 3,\, 4,\, 5,\, 6,\, 7,\, \unr\}$ \\ \hbox{}\qquad\qquad $\textit{int} = \{-3,\, -2,\, -1,\, 0,\, 1,\, 2,\, 3,\, 4,\, \unr\}$ \\ \hbox{}\qquad\qquad $\textit{real} = \{-3/2,\, -1/2,\, 0,\, 1/2,\, 1,\, 2,\, 3,\, 4,\, \unr\}$ \postw \subsection{Inductive and Coinductive Predicates} \label{inductive-and-coinductive-predicates} Inductively defined predicates (and sets) are particularly problematic for counterexample generators. They can make Quickcheck~\cite{berghofer-nipkow-2004} loop forever and Refute~\cite{weber-2008} run out of resources. The crux of the problem is that they are defined using a least fixed-point construction. Nitpick's philosophy is that not all inductive predicates are equal. Consider the \textit{even} predicate below: \prew \textbf{inductive}~\textit{even}~\textbf{where} \\ ``\textit{even}~0'' $\,\mid$ \\ ``\textit{even}~$n\,\Longrightarrow\, \textit{even}~(\textit{Suc}~(\textit{Suc}~n))$'' \postw This predicate enjoys the desirable property of being well-founded, which means that the introduction rules don't give rise to infinite chains of the form \prew $\cdots\,\Longrightarrow\, \textit{even}~k'' \,\Longrightarrow\, \textit{even}~k' \,\Longrightarrow\, \textit{even}~k.$ \postw For \textit{even}, this is obvious: Any chain ending at $k$ will be of length $k/2 + 1$: \prew $\textit{even}~0\,\Longrightarrow\, \textit{even}~2\,\Longrightarrow\, \cdots \,\Longrightarrow\, \textit{even}~(k - 2) \,\Longrightarrow\, \textit{even}~k.$ \postw Wellfoundedness is desirable because it enables Nitpick to use a very efficient fixed-point computation.% \footnote{If an inductive predicate is well-founded, then it has exactly one fixed point, which is simultaneously the least and the greatest fixed point. In these circumstances, the computation of the least fixed point amounts to the computation of an arbitrary fixed point, which can be performed using a straightforward recursive equation.} Moreover, Nitpick can prove wellfoundedness of most well-founded predicates, just as Isabelle's \textbf{function} package usually discharges termination proof obligations automatically. Let's try an example: \prew \textbf{lemma} ``$\exists n.\; \textit{even}~n \mathrel{\land} \textit{even}~(\textit{Suc}~n)$'' \\ \textbf{nitpick}~[\textit{card nat}~= 50, \textit{unary\_ints}, \textit{verbose}] \\[2\smallskipamount] \slshape The inductive predicate ``\textit{even}'' was proved well-founded; Nitpick can compute it efficiently \\[2\smallskipamount] Trying 1 scope: \\ \hbox{}\qquad \textit{card nat}~= 50. \\[2\smallskipamount] Warning: The conjecture either trivially holds for the given scopes or lies outside Nitpick's supported fragment; only potentially spurious counterexamples may be found \\[2\smallskipamount] Nitpick found a potentially spurious counterexample for \textit{card nat}~= 50: \\[2\smallskipamount] \hbox{}\qquad Empty assignment \\[2\smallskipamount] Nitpick could not find a better counterexample It checked 1 of 1 scope \\[2\smallskipamount] Total time: 1.62 s. \postw No genuine counterexample is possible because Nitpick cannot rule out the existence of a natural number $n \ge 50$ such that both $\textit{even}~n$ and $\textit{even}~(\textit{Suc}~n)$ are true. To help Nitpick, we can bound the existential quantifier: \prew \textbf{lemma} ``$\exists n \mathbin{\le} 49.\; \textit{even}~n \mathrel{\land} \textit{even}~(\textit{Suc}~n)$'' \\ \textbf{nitpick}~[\textit{card nat}~= 50, \textit{unary\_ints}] \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Empty assignment \postw So far we were blessed by the wellfoundedness of \textit{even}. What happens if we use the following definition instead? \prew \textbf{inductive} $\textit{even}'$ \textbf{where} \\ ``$\textit{even}'~(0{\Colon}\textit{nat})$'' $\,\mid$ \\ ``$\textit{even}'~2$'' $\,\mid$ \\ ``$\lbrakk\textit{even}'~m;\> \textit{even}'~n\rbrakk \,\Longrightarrow\, \textit{even}'~(m + n)$'' \postw This definition is not well-founded: From $\textit{even}'~0$ and $\textit{even}'~0$, we can derive that $\textit{even}'~0$. Nonetheless, the predicates $\textit{even}$ and $\textit{even}'$ are equivalent. Let's check a property involving $\textit{even}'$. To make up for the foreseeable computational hurdles entailed by non-wellfoundedness, we decrease \textit{nat}'s cardinality to a mere 10: \prew \textbf{lemma}~``$\exists n \in \{0, 2, 4, 6, 8\}.\; \lnot\;\textit{even}'~n$'' \\ \textbf{nitpick}~[\textit{card nat}~= 10,\, \textit{verbose},\, \textit{show\_consts}] \\[2\smallskipamount] \slshape The inductive predicate ``$\textit{even}'\!$'' could not be proved well-founded; Nitpick might need to unroll it \\[2\smallskipamount] Trying 6 scopes: \\ \hbox{}\qquad \textit{card nat}~= 10 and \textit{iter} $\textit{even}'$~= 0; \\ \hbox{}\qquad \textit{card nat}~= 10 and \textit{iter} $\textit{even}'$~= 1; \\ \hbox{}\qquad \textit{card nat}~= 10 and \textit{iter} $\textit{even}'$~= 2; \\ \hbox{}\qquad \textit{card nat}~= 10 and \textit{iter} $\textit{even}'$~= 4; \\ \hbox{}\qquad \textit{card nat}~= 10 and \textit{iter} $\textit{even}'$~= 8; \\ \hbox{}\qquad \textit{card nat}~= 10 and \textit{iter} $\textit{even}'$~= 9 \\[2\smallskipamount] Nitpick found a counterexample for \textit{card nat}~= 10 and \textit{iter} $\textit{even}'$~= 2: \\[2\smallskipamount] \hbox{}\qquad Constant: \nopagebreak \\ \hbox{}\qquad\qquad $\lambda i.\; \textit{even}'$ = $\unkef(\!\begin{aligned}[t] & 0 := \unkef(0 := \textit{True},\, 2 := \textit{True}),\, \\[-2pt] & 1 := \unkef(0 := \textit{True},\, 2 := \textit{True},\, 4 := \textit{True}),\, \\[-2pt] & 2 := \unkef(0 := \textit{True},\, 2 := \textit{True},\, 4 := \textit{True},\, \\[-2pt] & \phantom{2 := \unkef(}6 := \textit{True},\, 8 := \textit{True}))\end{aligned}$ \\[2\smallskipamount] Total time: 1.87 s. \postw Nitpick's output is very instructive. First, it tells us that the predicate is unrolled, meaning that it is computed iteratively from the empty set. Then it lists six scopes specifying different bounds on the numbers of iterations:\ 0, 1, 2, 4, 8, and~9. The output also shows how each iteration contributes to $\textit{even}'$. The notation $\lambda i.\; \textit{even}'$ indicates that the value of the predicate depends on an iteration counter. Iteration 0 provides the basis elements, $0$ and $2$. Iteration 1 contributes $4$ ($= 2 + 2$). Iteration 2 throws $6$ ($= 2 + 4 = 4 + 2$) and $8$ ($= 4 + 4$) into the mix. Further iterations would not contribute any new elements. The predicate $\textit{even}'$ evaluates to either \textit{True} or $\unk$, never \textit{False}. %Some values are marked with superscripted question %marks~(`\lower.2ex\hbox{$^\Q$}'). These are the elements for which the %predicate evaluates to $\unk$. When unrolling a predicate, Nitpick tries 0, 1, 2, 4, 8, 12, 16, 20, 24, and 28 iterations. However, these numbers are bounded by the cardinality of the predicate's domain. With \textit{card~nat}~= 10, no more than 9 iterations are ever needed to compute the value of a \textit{nat} predicate. You can specify the number of iterations using the \textit{iter} option, as explained in \S\ref{scope-of-search}. In the next formula, $\textit{even}'$ occurs both positively and negatively: \prew \textbf{lemma} ``$\textit{even}'~(n - 2) \,\Longrightarrow\, \textit{even}'~n$'' \\ \textbf{nitpick} [\textit{card nat} = 10, \textit{show\_consts}] \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variable: \nopagebreak \\ \hbox{}\qquad\qquad $n = 1$ \\ \hbox{}\qquad Constants: \nopagebreak \\ \hbox{}\qquad\qquad $\lambda i.\; \textit{even}'$ = $\unkef(\!\begin{aligned}[t] & 0 := \unkef(0 := \mathit{True},\, 2 := \mathit{True}))\end{aligned}$ \\ \hbox{}\qquad\qquad $\textit{even}' \leq \unkef(\!\begin{aligned}[t] & 0 := \mathit{True},\, 1 := \mathit{False},\, 2 := \mathit{True},\, \\[-2pt] & 4 := \mathit{True},\, 6 := \mathit{True},\, 8 := \mathit{True})\end{aligned}$ \postw Notice the special constraint $\textit{even}' \leq \ldots$ in the output, whose right-hand side represents an arbitrary fixed point (not necessarily the least one). It is used to falsify $\textit{even}'~n$. In contrast, the unrolled predicate is used to satisfy $\textit{even}'~(n - 2)$. Coinductive predicates are handled dually. For example: \prew \textbf{coinductive} \textit{nats} \textbf{where} \\ ``$\textit{nats}~(x\Colon\textit{nat}) \,\Longrightarrow\, \textit{nats}~x$'' \\[2\smallskipamount] \textbf{lemma} ``$\textit{nats} = (\lambda n.\; n \mathbin\in \{0, 1, 2, 3, 4\})$'' \\ \textbf{nitpick}~[\textit{card nat} = 10,\, \textit{show\_consts}] \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Constants: \nopagebreak \\ \hbox{}\qquad\qquad $\lambda i.\; \textit{nats} = \unkef(0 := \unkef,\, 1 := \unkef,\, 2 := \unkef)$ \\ \hbox{}\qquad\qquad $\textit{nats} \geq \unkef(3 := \textit{True},\, 4 := \textit{False},\, 5 := \textit{True})$ \postw As a special case, Nitpick uses Kodkod's transitive closure operator to encode negative occurrences of non-well-founded ``linear inductive predicates,'' i.e., inductive predicates for which each the predicate occurs in at most one assumption of each introduction rule. For example: \prew \textbf{inductive} \textit{odd} \textbf{where} \\ ``$\textit{odd}~1$'' $\,\mid$ \\ ``$\lbrakk \textit{odd}~m;\>\, \textit{even}~n\rbrakk \,\Longrightarrow\, \textit{odd}~(m + n)$'' \\[2\smallskipamount] \textbf{lemma}~``$\textit{odd}~n \,\Longrightarrow\, \textit{odd}~(n - 2)$'' \\ \textbf{nitpick}~[\textit{card nat} = 4,\, \textit{show\_consts}] \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variable: \nopagebreak \\ \hbox{}\qquad\qquad $n = 1$ \\ \hbox{}\qquad Constants: \nopagebreak \\ \hbox{}\qquad\qquad $\textit{even} = \unkef(0 := True, 1 := False, 2 := True, 3 := False)$ \\ \hbox{}\qquad\qquad $\textit{odd}_{\textsl{base}} = {}$ \\ \hbox{}\qquad\qquad\quad $\unkef(0 := \textit{False},\, 1 := \textit{True},\, 2 := \textit{False},\, 3 := \textit{False})$ \\ \hbox{}\qquad\qquad $\textit{odd}_{\textsl{step}} = \unkef$\\ \hbox{}\qquad\qquad\quad $( \!\begin{aligned}[t] & 0 := \unkef(0 := \textit{True},\, 1 := \textit{False},\, 2 := \textit{True},\, 3 := \textit{False}), \\[-2pt] & 1 := \unkef(0 := \textit{False},\, 1 := \textit{True},\, 2 := \textit{False},\, 3 := \textit{True}), \\[-2pt] & 2 := \unkef(0 := \textit{False},\, 1 := \textit{False},\, 2 := \textit{True},\, 3 := \textit{False}), \\[-2pt] & 3 := \unkef(0 := \textit{False},\, 1 := \textit{False},\, 2 := \textit{False},\, 3 := \textit{True})) \end{aligned}$ \\ \hbox{}\qquad\qquad $\textit{odd} \leq \unkef(0 := \textit{False},\, 1 := \textit{True},\, 2 := \textit{False},\, 3 := \textit{True})$ \postw \noindent In the output, $\textit{odd}_{\textrm{base}}$ represents the base elements and $\textit{odd}_{\textrm{step}}$ is a transition relation that computes new elements from known ones. The set $\textit{odd}$ consists of all the values reachable through the reflexive transitive closure of $\textit{odd}_{\textrm{step}}$ starting with any element from $\textit{odd}_{\textrm{base}}$, namely 1 and 3. Using Kodkod's transitive closure to encode linear predicates is normally either more thorough or more efficient than unrolling (depending on the value of \textit{iter}), but you can disable it by passing the \textit{dont\_star\_linear\_preds} option. \subsection{Coinductive Datatypes} \label{coinductive-datatypes} A coinductive datatype is similar to an inductive datatype but allows infinite objects. Thus, the infinite lists $\textit{ps}$ $=$ $[a, a, a, \ldots]$, $\textit{qs}$ $=$ $[a, b, a, b, \ldots]$, and $\textit{rs}$ $=$ $[0, 1, 2, 3, \ldots]$ can be defined as coinductive lists, or ``lazy lists,'' using the $\textit{LNil}\mathbin{\Colon}{'}a~\textit{llist}$ and $\textit{LCons}\mathbin{\Colon}{'}a \mathbin{\Rightarrow} {'}a~\textit{llist} \mathbin{\Rightarrow} {'}a~\textit{llist}$ constructors. Although it is otherwise no friend of infinity, Nitpick can find counterexamples involving cyclic lists such as \textit{ps} and \textit{qs} above as well as finite lists: \prew \textbf{codatatype} $'a$ \textit{llist} = \textit{LNil}~$\mid$~\textit{LCons}~$'a$~``$'a\;\textit{llist}$'' \\[2\smallskipamount] \textbf{lemma} ``$\textit{xs} \not= \textit{LCons}~a~\textit{xs\/}$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample for {\itshape card}~$'a$ = 1: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $\textit{a} = a_1$ \\ \hbox{}\qquad\qquad $\textit{xs} = \textsl{THE}~\omega.\; \omega = \textit{LCons}~a_1~\omega$ \postw The notation $\textrm{THE}~\omega.\; \omega = t(\omega)$ stands for the infinite term $t(t(t(\ldots)))$. Hence, \textit{xs} is simply the infinite list $[a_1, a_1, a_1, \ldots]$. The next example is more interesting: \prew \textbf{primcorec}~$\textit{iterates}$~\textbf{where} \\ ``$\textit{iterates}~f\>a = \textit{LCons}~a~(\textit{iterates}~f\>(f\>a))$'' \\[2\smallskipamount] \textbf{lemma}~``$\lbrakk\textit{xs} = \textit{LCons}~a~\textit{xs};\>\, \textit{ys} = \textit{iterates}~(\lambda b.\> a)~b\rbrakk \,\Longrightarrow\, \textit{xs} = \textit{ys\/}$'' \\ \textbf{nitpick} [\textit{verbose}] \\[2\smallskipamount] \slshape The type $'a$ passed the monotonicity test; Nitpick might be able to skip some scopes \\[2\smallskipamount] Trying 10 scopes: \\ \hbox{}\qquad \textit{card} $'a$~= 1, \textit{card} ``\kern1pt$'a~\textit{list\/}$''~= 1, and \textit{bisim\_depth}~= 0; \\ \hbox{}\qquad $\qquad\vdots$ \\[.5\smallskipamount] \hbox{}\qquad \textit{card} $'a$~= 10, \textit{card} ``\kern1pt$'a~\textit{list\/}$''~= 10, and \textit{bisim\_depth}~= 9 \\[2\smallskipamount] Nitpick found a counterexample for {\itshape card}~$'a$ = 2, \textit{card}~``\kern1pt$'a~\textit{llist\/}$''~= 2, and \textit{bisim\_\allowbreak depth}~= 1: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $\textit{a} = a_1$ \\ \hbox{}\qquad\qquad $\textit{b} = a_2$ \\ \hbox{}\qquad\qquad $\textit{xs} = \textsl{THE}~\omega.\; \omega = \textit{LCons}~a_1~\omega$ \\ \hbox{}\qquad\qquad $\textit{ys} = \textit{LCons}~a_2~(\textsl{THE}~\omega.\; \omega = \textit{LCons}~a_1~\omega)$ \\[2\smallskipamount] Total time: 1.11 s \postw The lazy list $\textit{xs}$ is simply $[a_1, a_1, a_1, \ldots]$, whereas $\textit{ys}$ is $[a_2, a_1, a_1, a_1, \ldots]$, i.e., a lasso-shaped list with $[a_2]$ as its stem and $[a_1]$ as its cycle. In general, the list segment within the scope of the {THE} binder corresponds to the lasso's cycle, whereas the segment leading to the binder is the stem. A salient property of coinductive datatypes is that two objects are considered equal if and only if they lead to the same observations. For example, the two lazy lists % \begin{gather*} \textrm{THE}~\omega.\; \omega = \textit{LCons}~a~(\textit{LCons}~b~\omega) \\ \textit{LCons}~a~(\textrm{THE}~\omega.\; \omega = \textit{LCons}~b~(\textit{LCons}~a~\omega)) \end{gather*} % are identical, because both lead to the sequence of observations $a$, $b$, $a$, $b$, \hbox{\ldots} (or, equivalently, both encode the infinite list $[a, b, a, b, \ldots]$). This concept of equality for coinductive datatypes is called bisimulation and is defined coinductively. Internally, Nitpick encodes the coinductive bisimilarity predicate as part of the Kodkod problem to ensure that distinct objects lead to different observations. This precaution is somewhat expensive and often unnecessary, so it can be disabled by setting the \textit{bisim\_depth} option to $-1$. The bisimilarity check is then performed \textsl{after} the counterexample has been found to ensure correctness. If this after-the-fact check fails, the counterexample is tagged as ``quasi genuine'' and Nitpick recommends to try again with \textit{bisim\_depth} set to a nonnegative integer. The next formula illustrates the need for bisimilarity (either as a Kodkod predicate or as an after-the-fact check) to prevent spurious counterexamples: \prew \textbf{lemma} ``$\lbrakk xs = \textit{LCons}~a~\textit{xs};\>\, \textit{ys} = \textit{LCons}~a~\textit{ys}\rbrakk \,\Longrightarrow\, \textit{xs} = \textit{ys\/}$'' \\ \textbf{nitpick} [\textit{bisim\_depth} = $-1$, \textit{show\_types}] \\[2\smallskipamount] \slshape Nitpick found a quasi genuine counterexample for $\textit{card}~'a$ = 2: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $a = a_1$ \\ \hbox{}\qquad\qquad $\textit{xs} = \textsl{THE}~\omega.\; \omega = \textit{LCons}~a_1~\omega$ \\ \hbox{}\qquad\qquad $\textit{ys} = \textsl{THE}~\omega.\; \omega = \textit{LCons}~a_1~\omega$ \\ \hbox{}\qquad Type:\strut \nopagebreak \\ \hbox{}\qquad\qquad $'a~\textit{llist} = \{\!\begin{aligned}[t] & \textsl{THE}~\omega.\; \omega = \textit{LCons}~a_1~\omega, \\[-2pt] & \textsl{THE}~\omega.\; \omega = \textit{LCons}~a_1~\omega,\> \unr\}\end{aligned}$ \\[2\smallskipamount] Try again with ``\textit{bisim\_depth}'' set to a nonnegative value to confirm that the counterexample is genuine \\[2\smallskipamount] {\upshape\textbf{nitpick}} \\[2\smallskipamount] \slshape Nitpick found no counterexample \postw In the first \textbf{nitpick} invocation, the after-the-fact check discovered that the two known elements of type $'a~\textit{llist}$ are bisimilar, prompting Nitpick to label the example as only ``quasi genuine.'' A compromise between leaving out the bisimilarity predicate from the Kodkod problem and performing the after-the-fact check is to specify a low nonnegative \textit{bisim\_depth} value. In general, a value of $K$ means that Nitpick will require all lists to be distinguished from each other by their prefixes of length $K$. However, setting $K$ to a too low value can overconstrain Nitpick, preventing it from finding any counterexamples. \subsection{Boxing} \label{boxing} Nitpick normally maps function and product types directly to the corresponding Kodkod concepts. As a consequence, if $'a$ has cardinality 3 and $'b$ has cardinality 4, then $'a \times {'}b$ has cardinality 12 ($= 4 \times 3$) and $'a \Rightarrow {'}b$ has cardinality 64 ($= 4^3$). In some circumstances, it pays off to treat these types in the same way as plain datatypes, by approximating them by a subset of a given cardinality. This technique is called ``boxing'' and is particularly useful for functions passed as arguments to other functions, for high-arity functions, and for large tuples. Under the hood, boxing involves wrapping occurrences of the types $'a \times {'}b$ and $'a \Rightarrow {'}b$ in isomorphic datatypes, as can be seen by enabling the \textit{debug} option. To illustrate boxing, we consider a formalization of $\lambda$-terms represented using de Bruijn's notation: \prew \textbf{datatype} \textit{tm} = \textit{Var}~\textit{nat}~$\mid$~\textit{Lam}~\textit{tm} $\mid$ \textit{App~tm~tm} \postw The $\textit{lift}~t~k$ function increments all variables with indices greater than or equal to $k$ by one: \prew \textbf{primrec} \textit{lift} \textbf{where} \\ ``$\textit{lift}~(\textit{Var}~j)~k = \textit{Var}~(\textrm{if}~j < k~\textrm{then}~j~\textrm{else}~j + 1)$'' $\mid$ \\ ``$\textit{lift}~(\textit{Lam}~t)~k = \textit{Lam}~(\textit{lift}~t~(k + 1))$'' $\mid$ \\ ``$\textit{lift}~(\textit{App}~t~u)~k = \textit{App}~(\textit{lift}~t~k)~(\textit{lift}~u~k)$'' \postw The $\textit{loose}~t~k$ predicate returns \textit{True} if and only if term $t$ has a loose variable with index $k$ or more: \prew \textbf{primrec}~\textit{loose} \textbf{where} \\ ``$\textit{loose}~(\textit{Var}~j)~k = (j \ge k)$'' $\mid$ \\ ``$\textit{loose}~(\textit{Lam}~t)~k = \textit{loose}~t~(\textit{Suc}~k)$'' $\mid$ \\ ``$\textit{loose}~(\textit{App}~t~u)~k = (\textit{loose}~t~k \mathrel{\lor} \textit{loose}~u~k)$'' \postw Next, the $\textit{subst}~\sigma~t$ function applies the substitution $\sigma$ on $t$: \prew \textbf{primrec}~\textit{subst} \textbf{where} \\ ``$\textit{subst}~\sigma~(\textit{Var}~j) = \sigma~j$'' $\mid$ \\ ``$\textit{subst}~\sigma~(\textit{Lam}~t) = {}$\phantom{''} \\ \phantom{``}$\textit{Lam}~(\textit{subst}~(\lambda n.\> \textrm{case}~n~\textrm{of}~0 \Rightarrow \textit{Var}~0 \mid \textit{Suc}~m \Rightarrow \textit{lift}~(\sigma~m)~1)~t)$'' $\mid$ \\ ``$\textit{subst}~\sigma~(\textit{App}~t~u) = \textit{App}~(\textit{subst}~\sigma~t)~(\textit{subst}~\sigma~u)$'' \postw A substitution is a function that maps variable indices to terms. Observe that $\sigma$ is a function passed as argument and that Nitpick can't optimize it away, because the recursive call for the \textit{Lam} case involves an altered version. Also notice the \textit{lift} call, which increments the variable indices when moving under a \textit{Lam}. A reasonable property to expect of substitution is that it should leave closed terms unchanged. Alas, even this simple property does not hold: \pre \textbf{lemma}~``$\lnot\,\textit{loose}~t~0 \,\Longrightarrow\, \textit{subst}~\sigma~t = t$'' \\ \textbf{nitpick} [\textit{verbose}] \\[2\smallskipamount] \slshape Trying 10 scopes: \nopagebreak \\ \hbox{}\qquad \textit{card~nat}~= 1, \textit{card tm}~= 1, and \textit{card} ``$\textit{nat} \Rightarrow \textit{tm\/}$'' = 1; \\ \hbox{}\qquad \textit{card~nat}~= 2, \textit{card tm}~= 2, and \textit{card} ``$\textit{nat} \Rightarrow \textit{tm\/}$'' = 2; \\ \hbox{}\qquad $\qquad\vdots$ \\[.5\smallskipamount] \hbox{}\qquad \textit{card~nat}~= 10, \textit{card tm}~= 10, and \textit{card} ``$\textit{nat} \Rightarrow \textit{tm\/}$'' = 10 \\[2\smallskipamount] Nitpick found a counterexample for \textit{card~nat}~= 6, \textit{card~tm}~= 6, and \textit{card}~``$\textit{nat} \Rightarrow \textit{tm\/}$''~= 6: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $\sigma = \unkef(\!\begin{aligned}[t] & 0 := \textit{Var}~0,\> 1 := \textit{Var}~0,\> 2 := \textit{Var}~0, \\[-2pt] & 3 := \textit{Var}~0,\> 4 := \textit{Var}~0,\> 5 := \textit{Lam}~(\textit{Lam}~(\textit{Var}~0)))\end{aligned}$ \\ \hbox{}\qquad\qquad $t = \textit{Lam}~(\textit{Lam}~(\textit{Var}~1))$ \\[2\smallskipamount] Total time: 3.08 s \postw Using \textit{eval}, we find out that $\textit{subst}~\sigma~t = \textit{Lam}~(\textit{Lam}~(\textit{Var}~0))$. Using the traditional $\lambda$-calculus notation, $t$ is $\lambda x\, y.\> x$ whereas $\textit{subst}~\sigma~t$ is (wrongly) $\lambda x\, y.\> y$. The bug is in \textit{subst\/}: The $\textit{lift}~(\sigma~m)~1$ call should be replaced with $\textit{lift}~(\sigma~m)~0$. An interesting aspect of Nitpick's verbose output is that it assigned inceasing cardinalities from 1 to 10 to the type $\textit{nat} \Rightarrow \textit{tm}$ of the higher-order argument $\sigma$ of \textit{subst}. For the formula of interest, knowing 6 values of that type was enough to find the counterexample. Without boxing, $6^6 = 46\,656$ values must be considered, a hopeless undertaking: \prew \textbf{nitpick} [\textit{dont\_box}] \\[2\smallskipamount] {\slshape Nitpick ran out of time after checking 3 of 10 scopes} \postw Boxing can be enabled or disabled globally or on a per-type basis using the \textit{box} option. Nitpick usually performs reasonable choices about which types should be boxed, but option tweaking sometimes helps. %A related optimization, %``finitization,'' attempts to wrap functions that are constant at all but finitely %many points (e.g., finite sets); see the documentation for the \textit{finitize} %option in \S\ref{scope-of-search} for details. \subsection{Scope Monotonicity} \label{scope-monotonicity} The \textit{card} option (together with \textit{iter}, \textit{bisim\_depth}, and \textit{max}) controls which scopes are actually tested. In general, to exhaust all models below a certain cardinality bound, the number of scopes that Nitpick must consider increases exponentially with the number of type variables (and \textbf{typedecl}'d types) occurring in the formula. Given the default cardinality specification of 1--10, no fewer than $10^4 = 10\,000$ scopes must be considered for a formula involving $'a$, $'b$, $'c$, and $'d$. Fortunately, many formulas exhibit a property called \textsl{scope monotonicity}, meaning that if the formula is falsifiable for a given scope, it is also falsifiable for all larger scopes \cite[p.~165]{jackson-2006}. Consider the formula \prew \textbf{lemma}~``$\textit{length~xs} = \textit{length~ys} \,\Longrightarrow\, \textit{rev}~(\textit{zip~xs~ys}) = \textit{zip~xs}~(\textit{rev~ys})$'' \postw where \textit{xs} is of type $'a~\textit{list}$ and \textit{ys} is of type $'b~\textit{list}$. A priori, Nitpick would need to consider $1\,000$ scopes to exhaust the specification \textit{card}~= 1--10 (10 cardinalies for $'a$ $\times$ 10 cardinalities for $'b$ $\times$ 10 cardinalities for the datatypes). However, our intuition tells us that any counterexample found with a small scope would still be a counterexample in a larger scope---by simply ignoring the fresh $'a$ and $'b$ values provided by the larger scope. Nitpick comes to the same conclusion after a careful inspection of the formula and the relevant definitions: \prew \textbf{nitpick}~[\textit{verbose}] \\[2\smallskipamount] \slshape The types $'a$ and $'b$ passed the monotonicity test; Nitpick might be able to skip some scopes. \\[2\smallskipamount] Trying 10 scopes: \\ \hbox{}\qquad \textit{card} $'a$~= 1, \textit{card} $'b$~= 1, \textit{card} \textit{nat}~= 1, \textit{card} ``$('a \times {'}b)$ \textit{list\/}''~= 1, \\ \hbox{}\qquad\quad \textit{card} ``\kern1pt$'a$ \textit{list\/}''~= 1, and \textit{card} ``\kern1pt$'b$ \textit{list\/}''~= 1; \\ \hbox{}\qquad \textit{card} $'a$~= 2, \textit{card} $'b$~= 2, \textit{card} \textit{nat}~= 2, \textit{card} ``$('a \times {'}b)$ \textit{list\/}''~= 2, \\ \hbox{}\qquad\quad \textit{card} ``\kern1pt$'a$ \textit{list\/}''~= 2, and \textit{card} ``\kern1pt$'b$ \textit{list\/}''~= 2; \\ \hbox{}\qquad $\qquad\vdots$ \\[.5\smallskipamount] \hbox{}\qquad \textit{card} $'a$~= 10, \textit{card} $'b$~= 10, \textit{card} \textit{nat}~= 10, \textit{card} ``$('a \times {'}b)$ \textit{list\/}''~= 10, \\ \hbox{}\qquad\quad \textit{card} ``\kern1pt$'a$ \textit{list\/}''~= 10, and \textit{card} ``\kern1pt$'b$ \textit{list\/}''~= 10 \\[2\smallskipamount] Nitpick found a counterexample for \textit{card} $'a$~= 5, \textit{card} $'b$~= 5, \textit{card} \textit{nat}~= 5, \textit{card} ``$('a \times {'}b)$ \textit{list\/}''~= 5, \textit{card} ``\kern1pt$'a$ \textit{list\/}''~= 5, and \textit{card} ``\kern1pt$'b$ \textit{list\/}''~= 5: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $\textit{xs} = [a_1, a_2]$ \\ \hbox{}\qquad\qquad $\textit{ys} = [b_1, b_1]$ \\[2\smallskipamount] Total time: 1.63 s. \postw In theory, it should be sufficient to test a single scope: \prew \textbf{nitpick}~[\textit{card}~= 10] \postw However, this is often less efficient in practice and may lead to overly complex counterexamples. If the monotonicity check fails but we believe that the formula is monotonic (or we don't mind missing some counterexamples), we can pass the \textit{mono} option. To convince yourself that this option is risky, simply consider this example from \S\ref{skolemization}: \prew \textbf{lemma} ``$\exists g.\; \forall x\Colon 'b.~g~(f~x) = x \,\Longrightarrow\, \forall y\Colon {'}a.\; \exists x.~y = f~x$'' \\ \textbf{nitpick} [\textit{mono}] \\[2\smallskipamount] {\slshape Nitpick found no counterexample} \\[2\smallskipamount] \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample for \textit{card} $'a$~= 2 and \textit{card} $'b$~=~1: \\ \hbox{}\qquad $\vdots$ \postw (It turns out the formula holds if and only if $\textit{card}~'a \le \textit{card}~'b$.) Although this is rarely advisable, the automatic monotonicity checks can be disabled by passing \textit{non\_mono} (\S\ref{optimizations}). As insinuated in \S\ref{natural-numbers-and-integers} and \S\ref{inductive-datatypes}, \textit{nat}, \textit{int}, and inductive datatypes are normally monotonic and treated as such. The same is true for record types, \textit{rat}, and \textit{real}. Thus, given the cardinality specification 1--10, a formula involving \textit{nat}, \textit{int}, \textit{int~list}, \textit{rat}, and \textit{rat~list} will lead Nitpick to consider only 10~scopes instead of $10^4 = 10\,000$. On the other hand, \textbf{typedef}s and quotient types are generally nonmonotonic. \subsection{Inductive Properties} \label{inductive-properties} Inductive properties are a particular pain to prove, because the failure to establish an induction step can mean several things: % \begin{enumerate} \item The property is invalid. \item The property is valid but is too weak to support the induction step. \item The property is valid and strong enough; it's just that we haven't found the proof yet. \end{enumerate} % Depending on which scenario applies, we would take the appropriate course of action: % \begin{enumerate} \item Repair the statement of the property so that it becomes valid. \item Generalize the property and/or prove auxiliary properties. \item Work harder on a proof. \end{enumerate} % How can we distinguish between the three scenarios? Nitpick's normal mode of operation can often detect scenario 1, and Isabelle's automatic tactics help with scenario 3. Using appropriate techniques, it is also often possible to use Nitpick to identify scenario 2. Consider the following transition system, in which natural numbers represent states: \prew \textbf{inductive\_set}~\textit{reach}~\textbf{where} \\ ``$(4\Colon\textit{nat}) \in \textit{reach\/}$'' $\mid$ \\ ``$\lbrakk n < 4;\> n \in \textit{reach\/}\rbrakk \,\Longrightarrow\, 3 * n + 1 \in \textit{reach\/}$'' $\mid$ \\ ``$n \in \textit{reach} \,\Longrightarrow n + 2 \in \textit{reach\/}$'' \postw We will try to prove that only even numbers are reachable: \prew \textbf{lemma}~``$n \in \textit{reach} \,\Longrightarrow\, 2~\textrm{dvd}~n$'' \postw Does this property hold? Nitpick cannot find a counterexample within 30 seconds, so let's attempt a proof by induction: \prew \textbf{apply}~(\textit{induct~set}{:}~\textit{reach\/}) \\ \textbf{apply}~\textit{auto} \postw This leaves us in the following proof state: \prew {\slshape goal (2 subgoals): \\ \phantom{0}1. ${\bigwedge}n.\;\, \lbrakk n \in \textit{reach\/};\, n < 4;\, 2~\textsl{dvd}~n\rbrakk \,\Longrightarrow\, 2~\textsl{dvd}~\textit{Suc}~(3 * n)$ \\ \phantom{0}2. ${\bigwedge}n.\;\, \lbrakk n \in \textit{reach\/};\, 2~\textsl{dvd}~n\rbrakk \,\Longrightarrow\, 2~\textsl{dvd}~\textit{Suc}~(\textit{Suc}~n)$ } \postw If we run Nitpick on the first subgoal, it still won't find any counterexample; and yet, \textit{auto} fails to go further, and \textit{arith} is helpless. However, notice the $n \in \textit{reach}$ assumption, which strengthens the induction hypothesis but is not immediately usable in the proof. If we remove it and invoke Nitpick, this time we get a counterexample: \prew \textbf{apply}~(\textit{thin\_tac}~``$n \in \textit{reach\/}$'') \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Skolem constant: \nopagebreak \\ \hbox{}\qquad\qquad $n = 0$ \postw Indeed, 0 < 4, 2 divides 0, but 2 does not divide 1. We can use this information to strength the lemma: \prew \textbf{lemma}~``$n \in \textit{reach} \,\Longrightarrow\, 2~\textrm{dvd}~n \mathrel{\lor} n \not= 0$'' \postw Unfortunately, the proof by induction still gets stuck, except that Nitpick now finds the counterexample $n = 2$. We generalize the lemma further to \prew \textbf{lemma}~``$n \in \textit{reach} \,\Longrightarrow\, 2~\textrm{dvd}~n \mathrel{\lor} n \ge 4$'' \postw and this time \textit{arith} can finish off the subgoals. \section{Case Studies} \label{case-studies} As a didactic device, the previous section focused mostly on toy formulas whose validity can easily be assessed just by looking at the formula. We will now review two somewhat more realistic case studies that are within Nitpick's reach:\ a context-free grammar modeled by mutually inductive sets and a functional implementation of AA trees. The results presented in this section were produced with the following settings: \prew \textbf{nitpick\_params} [\textit{max\_potential}~= 0] \postw \subsection{A Context-Free Grammar} \label{a-context-free-grammar} Our first case study is taken from section 7.4 in the Isabelle tutorial \cite{isa-tutorial}. The following grammar, originally due to Hopcroft and Ullman, produces all strings with an equal number of $a$'s and $b$'s: \prew \begin{tabular}{@{}r@{$\;\,$}c@{$\;\,$}l@{}} $S$ & $::=$ & $\epsilon \mid bA \mid aB$ \\ $A$ & $::=$ & $aS \mid bAA$ \\ $B$ & $::=$ & $bS \mid aBB$ \end{tabular} \postw The intuition behind the grammar is that $A$ generates all strings with one more $a$ than $b$'s and $B$ generates all strings with one more $b$ than $a$'s. The alphabet consists exclusively of $a$'s and $b$'s: \prew \textbf{datatype} \textit{alphabet}~= $a$ $\mid$ $b$ \postw Strings over the alphabet are represented by \textit{alphabet list}s. Nonterminals in the grammar become sets of strings. The production rules presented above can be expressed as a mutually inductive definition: \prew \textbf{inductive\_set} $S$ \textbf{and} $A$ \textbf{and} $B$ \textbf{where} \\ \textit{R1}:\kern.4em ``$[] \in S$'' $\,\mid$ \\ \textit{R2}:\kern.4em ``$w \in A\,\Longrightarrow\, b \mathbin{\#} w \in S$'' $\,\mid$ \\ \textit{R3}:\kern.4em ``$w \in B\,\Longrightarrow\, a \mathbin{\#} w \in S$'' $\,\mid$ \\ \textit{R4}:\kern.4em ``$w \in S\,\Longrightarrow\, a \mathbin{\#} w \in A$'' $\,\mid$ \\ \textit{R5}:\kern.4em ``$w \in S\,\Longrightarrow\, b \mathbin{\#} w \in S$'' $\,\mid$ \\ \textit{R6}:\kern.4em ``$\lbrakk v \in B;\> v \in B\rbrakk \,\Longrightarrow\, a \mathbin{\#} v \mathbin{@} w \in B$'' \postw The conversion of the grammar into the inductive definition was done manually by Joe Blow, an underpaid undergraduate student. As a result, some errors might have sneaked in. Debugging faulty specifications is at the heart of Nitpick's \textsl{raison d'\^etre}. A good approach is to state desirable properties of the specification (here, that $S$ is exactly the set of strings over $\{a, b\}$ with as many $a$'s as $b$'s) and check them with Nitpick. If the properties are correctly stated, counterexamples will point to bugs in the specification. For our grammar example, we will proceed in two steps, separating the soundness and the completeness of the set $S$. First, soundness: \prew \textbf{theorem}~\textit{S\_sound\/}: \\ ``$w \in S \longrightarrow \textit{length}~[x\mathbin{\leftarrow} w.\; x = a] = \textit{length}~[x\mathbin{\leftarrow} w.\; x = b]$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variable: \nopagebreak \\ \hbox{}\qquad\qquad $w = [b]$ \postw It would seem that $[b] \in S$. How could this be? An inspection of the introduction rules reveals that the only rule with a right-hand side of the form $b \mathbin{\#} {\ldots} \in S$ that could have introduced $[b]$ into $S$ is \textit{R5}: \prew ``$w \in S\,\Longrightarrow\, b \mathbin{\#} w \in S$'' \postw On closer inspection, we can see that this rule is wrong. To match the production $B ::= bS$, the second $S$ should be a $B$. We fix the typo and try again: \prew \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variable: \nopagebreak \\ \hbox{}\qquad\qquad $w = [a, a, b]$ \postw Some detective work is necessary to find out what went wrong here. To get $[a, a, b] \in S$, we need $[a, b] \in B$ by \textit{R3}, which in turn can only come from \textit{R6}: \prew ``$\lbrakk v \in B;\> v \in B\rbrakk \,\Longrightarrow\, a \mathbin{\#} v \mathbin{@} w \in B$'' \postw Now, this formula must be wrong: The same assumption occurs twice, and the variable $w$ is unconstrained. Clearly, one of the two occurrences of $v$ in the assumptions should have been a $w$. With the correction made, we don't get any counterexample from Nitpick. Let's move on and check completeness: \prew \textbf{theorem}~\textit{S\_complete}: \\ ``$\textit{length}~[x\mathbin{\leftarrow} w.\; x = a] = \textit{length}~[x\mathbin{\leftarrow} w.\; x = b] \longrightarrow w \in S$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variable: \nopagebreak \\ \hbox{}\qquad\qquad $w = [b, b, a, a]$ \postw Apparently, $[b, b, a, a] \notin S$, even though it has the same numbers of $a$'s and $b$'s. But since our inductive definition passed the soundness check, the introduction rules we have are probably correct. Perhaps we simply lack an introduction rule. Comparing the grammar with the inductive definition, our suspicion is confirmed: Joe Blow simply forgot the production $A ::= bAA$, without which the grammar cannot generate two or more $b$'s in a row. So we add the rule \prew ``$\lbrakk v \in A;\> w \in A\rbrakk \,\Longrightarrow\, b \mathbin{\#} v \mathbin{@} w \in A$'' \postw With this last change, we don't get any counterexamples from Nitpick for either soundness or completeness. We can even generalize our result to cover $A$ and $B$ as well: \prew \textbf{theorem} \textit{S\_A\_B\_sound\_and\_complete}: \\ ``$w \in S \longleftrightarrow \textit{length}~[x \mathbin{\leftarrow} w.\; x = a] = \textit{length}~[x \mathbin{\leftarrow} w.\; x = b]$'' \\ ``$w \in A \longleftrightarrow \textit{length}~[x \mathbin{\leftarrow} w.\; x = a] = \textit{length}~[x \mathbin{\leftarrow} w.\; x = b] + 1$'' \\ ``$w \in B \longleftrightarrow \textit{length}~[x \mathbin{\leftarrow} w.\; x = b] = \textit{length}~[x \mathbin{\leftarrow} w.\; x = a] + 1$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found no counterexample \postw \subsection{AA Trees} \label{aa-trees} AA trees are a kind of balanced trees discovered by Arne Andersson that provide similar performance to red-black trees, but with a simpler implementation \cite{andersson-1993}. They can be used to store sets of elements equipped with a total order $<$. We start by defining the datatype and some basic extractor functions: \prew \textbf{datatype} $'a$~\textit{aa\_tree} = \\ \hbox{}\quad $\Lambda$ $\mid$ $N$ ``\kern1pt$'a\Colon \textit{linorder\/}$'' \textit{nat} ``\kern1pt$'a$ \textit{aa\_tree}'' ``\kern1pt$'a$ \textit{aa\_tree}'' \\[2\smallskipamount] \textbf{primrec} \textit{data} \textbf{where} \\ ``$\textit{data}~\Lambda = \unkef$'' $\,\mid$ \\ ``$\textit{data}~(N~x~\_~\_~\_) = x$'' \\[2\smallskipamount] \textbf{primrec} \textit{dataset} \textbf{where} \\ ``$\textit{dataset}~\Lambda = \{\}$'' $\,\mid$ \\ ``$\textit{dataset}~(N~x~\_~t~u) = \{x\} \cup \textit{dataset}~t \mathrel{\cup} \textit{dataset}~u$'' \\[2\smallskipamount] \textbf{primrec} \textit{level} \textbf{where} \\ ``$\textit{level}~\Lambda = 0$'' $\,\mid$ \\ ``$\textit{level}~(N~\_~k~\_~\_) = k$'' \\[2\smallskipamount] \textbf{primrec} \textit{left} \textbf{where} \\ ``$\textit{left}~\Lambda = \Lambda$'' $\,\mid$ \\ ``$\textit{left}~(N~\_~\_~t~\_) = t$'' \\[2\smallskipamount] \textbf{primrec} \textit{right} \textbf{where} \\ ``$\textit{right}~\Lambda = \Lambda$'' $\,\mid$ \\ ``$\textit{right}~(N~\_~\_~\_~u) = u$'' \postw The wellformedness criterion for AA trees is fairly complex. Wikipedia states it as follows \cite{wikipedia-2009-aa-trees}: \kern.2\parskip %% TYPESETTING \pre Each node has a level field, and the following invariants must remain true for the tree to be valid: \raggedright \kern-.4\parskip %% TYPESETTING \begin{enum} \item[] \begin{enum} \item[1.] The level of a leaf node is one. \item[2.] The level of a left child is strictly less than that of its parent. \item[3.] The level of a right child is less than or equal to that of its parent. \item[4.] The level of a right grandchild is strictly less than that of its grandparent. \item[5.] Every node of level greater than one must have two children. \end{enum} \end{enum} \post \kern.4\parskip %% TYPESETTING The \textit{wf} predicate formalizes this description: \prew \textbf{primrec} \textit{wf} \textbf{where} \\ ``$\textit{wf}~\Lambda = \textit{True\/}$'' $\,\mid$ \\ ``$\textit{wf}~(N~\_~k~t~u) =$ \\ \phantom{``}$(\textrm{if}~t = \Lambda~\textrm{then}$ \\ \phantom{``$(\quad$}$k = 1 \mathrel{\land} (u = \Lambda \mathrel{\lor} (\textit{level}~u = 1 \mathrel{\land} \textit{left}~u = \Lambda \mathrel{\land} \textit{right}~u = \Lambda))$ \\ \phantom{``$($}$\textrm{else}$ \\ \hbox{}\phantom{``$(\quad$}$\textit{wf}~t \mathrel{\land} \textit{wf}~u \mathrel{\land} u \not= \Lambda \mathrel{\land} \textit{level}~t < k \mathrel{\land} \textit{level}~u \le k$ \\ \hbox{}\phantom{``$(\quad$}${\land}\; \textit{level}~(\textit{right}~u) < k)$'' \postw Rebalancing the tree upon insertion and removal of elements is performed by two auxiliary functions called \textit{skew} and \textit{split}, defined below: \prew \textbf{primrec} \textit{skew} \textbf{where} \\ ``$\textit{skew}~\Lambda = \Lambda$'' $\,\mid$ \\ ``$\textit{skew}~(N~x~k~t~u) = {}$ \\ \phantom{``}$(\textrm{if}~t \not= \Lambda \mathrel{\land} k = \textit{level}~t~\textrm{then}$ \\ \phantom{``(\quad}$N~(\textit{data}~t)~k~(\textit{left}~t)~(N~x~k~ (\textit{right}~t)~u)$ \\ \phantom{``(}$\textrm{else}$ \\ \phantom{``(\quad}$N~x~k~t~u)$'' \postw \prew \textbf{primrec} \textit{split} \textbf{where} \\ ``$\textit{split}~\Lambda = \Lambda$'' $\,\mid$ \\ ``$\textit{split}~(N~x~k~t~u) = {}$ \\ \phantom{``}$(\textrm{if}~u \not= \Lambda \mathrel{\land} k = \textit{level}~(\textit{right}~u)~\textrm{then}$ \\ \phantom{``(\quad}$N~(\textit{data}~u)~(\textit{Suc}~k)~ (N~x~k~t~(\textit{left}~u))~(\textit{right}~u)$ \\ \phantom{``(}$\textrm{else}$ \\ \phantom{``(\quad}$N~x~k~t~u)$'' \postw Performing a \textit{skew} or a \textit{split} should have no impact on the set of elements stored in the tree: \prew \textbf{theorem}~\textit{dataset\_skew\_split\/}:\\ ``$\textit{dataset}~(\textit{skew}~t) = \textit{dataset}~t$'' \\ ``$\textit{dataset}~(\textit{split}~t) = \textit{dataset}~t$'' \\ \textbf{nitpick} \\[2\smallskipamount] {\slshape Nitpick ran out of time after checking 9 of 10 scopes} \postw Furthermore, applying \textit{skew} or \textit{split} on a well-formed tree should not alter the tree: \prew \textbf{theorem}~\textit{wf\_skew\_split\/}:\\ ``$\textit{wf}~t\,\Longrightarrow\, \textit{skew}~t = t$'' \\ ``$\textit{wf}~t\,\Longrightarrow\, \textit{split}~t = t$'' \\ \textbf{nitpick} \\[2\smallskipamount] {\slshape Nitpick found no counterexample} \postw Insertion is implemented recursively. It preserves the sort order: \prew \textbf{primrec}~\textit{insort} \textbf{where} \\ ``$\textit{insort}~\Lambda~x = N~x~1~\Lambda~\Lambda$'' $\,\mid$ \\ ``$\textit{insort}~(N~y~k~t~u)~x =$ \\ \phantom{``}$({*}~(\textit{split} \circ \textit{skew})~{*})~(N~y~k~(\textrm{if}~x < y~\textrm{then}~\textit{insort}~t~x~\textrm{else}~t)$ \\ \phantom{``$({*}~(\textit{split} \circ \textit{skew})~{*})~(N~y~k~$}$(\textrm{if}~x > y~\textrm{then}~\textit{insort}~u~x~\textrm{else}~u))$'' \postw Notice that we deliberately commented out the application of \textit{skew} and \textit{split}. Let's see if this causes any problems: \prew \textbf{theorem}~\textit{wf\_insort\/}:\kern.4em ``$\textit{wf}~t\,\Longrightarrow\, \textit{wf}~(\textit{insort}~t~x)$'' \\ \textbf{nitpick} \\[2\smallskipamount] \slshape Nitpick found a counterexample for \textit{card} $'a$ = 4: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $t = N~a_1~1~\Lambda~\Lambda$ \\ \hbox{}\qquad\qquad $x = a_2$ \postw It's hard to see why this is a counterexample. To improve readability, we will restrict the theorem to \textit{nat}, so that we don't need to look up the value of the $\textit{op}~{<}$ constant to find out which element is smaller than the other. In addition, we will tell Nitpick to display the value of $\textit{insort}~t~x$ using the \textit{eval} option. This gives \prew \textbf{theorem} \textit{wf\_insort\_nat\/}:\kern.4em ``$\textit{wf}~t\,\Longrightarrow\, \textit{wf}~(\textit{insort}~t~(x\Colon\textit{nat}))$'' \\ \textbf{nitpick} [\textit{eval} = ``$\textit{insort}~t~x$''] \\[2\smallskipamount] \slshape Nitpick found a counterexample: \\[2\smallskipamount] \hbox{}\qquad Free variables: \nopagebreak \\ \hbox{}\qquad\qquad $t = N~1~1~\Lambda~\Lambda$ \\ \hbox{}\qquad\qquad $x = 0$ \\ \hbox{}\qquad Evaluated term: \\ \hbox{}\qquad\qquad $\textit{insort}~t~x = N~1~1~(N~0~1~\Lambda~\Lambda)~\Lambda$ \postw Nitpick's output reveals that the element $0$ was added as a left child of $1$, where both nodes have a level of 1. This violates the second AA tree invariant, which states that a left child's level must be less than its parent's. This shouldn't come as a surprise, considering that we commented out the tree rebalancing code. Reintroducing the code seems to solve the problem: \prew \textbf{theorem}~\textit{wf\_insort\/}:\kern.4em ``$\textit{wf}~t\,\Longrightarrow\, \textit{wf}~(\textit{insort}~t~x)$'' \\ \textbf{nitpick} \\[2\smallskipamount] {\slshape Nitpick ran out of time after checking 8 of 10 scopes} \postw Insertion should transform the set of elements represented by the tree in the obvious way: \prew \textbf{theorem} \textit{dataset\_insort\/}:\kern.4em ``$\textit{dataset}~(\textit{insort}~t~x) = \{x\} \cup \textit{dataset}~t$'' \\ \textbf{nitpick} \\[2\smallskipamount] {\slshape Nitpick ran out of time after checking 7 of 10 scopes} \postw We could continue like this and sketch a full-blown theory of AA trees. Once the definitions and main theorems are in place and have been thoroughly tested using Nitpick, we could start working on the proofs. Developing theories this way usually saves time, because faulty theorems and definitions are discovered much earlier in the process. \section{Option Reference} \label{option-reference} \def\defl{\{} \def\defr{\}} \def\flushitem#1{\item[]\noindent\kern-\leftmargin \textbf{#1}} \def\qty#1{$\left<\textit{#1}\right>$} \def\qtybf#1{$\mathbf{\left<\textbf{\textit{#1}}\right>}$} \def\optrue#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{bool}$\bigr]$\enskip \defl\textit{true}\defr\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opfalse#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{bool}$\bigr]$\enskip \defl\textit{false}\defr\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opsmart#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{smart\_bool}$\bigr]$\enskip \defl\textit{smart}\defr\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opnodefault#1#2{\flushitem{\textit{#1} = \qtybf{#2}} \nopagebreak\\[\parskip]} \def\opdefault#1#2#3{\flushitem{\textit{#1} = \qtybf{#2}\enskip \defl\textit{#3}\defr} \nopagebreak\\[\parskip]} \def\oparg#1#2#3{\flushitem{\textit{#1} \qtybf{#2} = \qtybf{#3}} \nopagebreak\\[\parskip]} \def\opargbool#1#2#3{\flushitem{\textit{#1} \qtybf{#2} $\bigl[$= \qtybf{bool}$\bigr]$\hfill (neg.: \textit{#3})}\nopagebreak\\[\parskip]} \def\opargboolorsmart#1#2#3{\flushitem{\textit{#1} \qtybf{#2} $\bigl[$= \qtybf{smart\_bool}$\bigr]$\hfill (neg.: \textit{#3})}\nopagebreak\\[\parskip]} Nitpick's behavior can be influenced by various options, which can be specified in brackets after the \textbf{nitpick} command. Default values can be set using \textbf{nitpick\_\allowbreak params}. For example: \prew \textbf{nitpick\_params} [\textit{verbose}, \,\textit{timeout} = 60] \postw The options are categorized as follows:\ mode of operation (\S\ref{mode-of-operation}), scope of search (\S\ref{scope-of-search}), output format (\S\ref{output-format}), regression testing (\S\ref{regression-testing}), optimizations (\S\ref{optimizations}), and timeouts (\S\ref{timeouts}). Nitpick also provides an automatic mode that can be enabled via the ``Auto Nitpick'' option under ``Plugins > Plugin Options > Isabelle > General'' in Isabelle/jEdit. For automatic runs, \textit{user\_axioms} (\S\ref{mode-of-operation}), \textit{assms} (\S\ref{mode-of-operation}), and \textit{mono} (\S\ref{scope-of-search}) are implicitly enabled, \textit{verbose} (\S\ref{output-format}) and \textit{debug} (\S\ref{output-format}) are disabled, \textit{max\_potential} (\S\ref{output-format}) is taken to be 0, \textit{max\_threads} (\S\ref{optimizations}) is taken to be 1, and \textit{timeout} (\S\ref{timeouts}) is superseded by the ``Auto Time Limit'' in Isabelle/jEdit. Nitpick's output is also more concise. The number of options can be overwhelming at first glance. Do not let that worry you: Nitpick's defaults have been chosen so that it almost always does the right thing, and the most important options have been covered in context in \S\ref{first-steps}. The descriptions below refer to the following syntactic quantities: \begin{enum} \item[\labelitemi] \qtybf{string}: A string. \item[\labelitemi] \qtybf{string\_list\/}: A space-separated list of strings (e.g., ``\textit{ichi ni san}''). \item[\labelitemi] \qtybf{bool\/}: \textit{true} or \textit{false}. \item[\labelitemi] \qtybf{smart\_bool\/}: \textit{true}, \textit{false}, or \textit{smart}. \item[\labelitemi] \qtybf{int\/}: An integer. Negative integers are prefixed with a hyphen. \item[\labelitemi] \qtybf{smart\_int\/}: An integer or \textit{smart}. \item[\labelitemi] \qtybf{int\_range}: An integer (e.g., 3) or a range of nonnegative integers (e.g., $1$--$4$). The range symbol `--' is entered as \texttt{-} (hyphen). \item[\labelitemi] \qtybf{int\_seq}: A comma-separated sequence of ranges of integers (e.g.,~1{,}3{,}\allowbreak6--8). \item[\labelitemi] \qtybf{float}: An floating-point number (e.g., 0.5 or 60) expressing a number of seconds. \item[\labelitemi] \qtybf{const\/}: The name of a HOL constant. \item[\labelitemi] \qtybf{term}: A HOL term (e.g., ``$f~x$''). \item[\labelitemi] \qtybf{term\_list\/}: A space-separated list of HOL terms (e.g., ``$f~x$''~``$g~y$''). \item[\labelitemi] \qtybf{type}: A HOL type. \end{enum} Default values are indicated in curly brackets (\textrm{\{\}}). Boolean options have a negated counterpart (e.g., \textit{mono} vs.\ \textit{non\_mono}). When setting them, ``= \textit{true}'' may be omitted. \subsection{Mode of Operation} \label{mode-of-operation} \begin{enum} \optrue{falsify}{satisfy} Specifies whether Nitpick should look for falsifying examples (countermodels) or satisfying examples (models). This manual assumes throughout that \textit{falsify} is enabled. \opsmart{user\_axioms}{no\_user\_axioms} Specifies whether the user-defined axioms (specified using \textbf{axiomatization} and \textbf{axioms}) should be considered. If the option is set to \textit{smart}, Nitpick performs an ad hoc axiom selection based on the constants that occur in the formula to falsify. The option is implicitly set to \textit{true} for automatic runs. \textbf{Warning:} If the option is set to \textit{true}, Nitpick might nonetheless ignore some polymorphic axioms. Counterexamples generated under these conditions are tagged as ``quasi genuine.'' The \textit{debug} (\S\ref{output-format}) option can be used to find out which axioms were considered. \nopagebreak {\small See also \textit{assms} (\S\ref{mode-of-operation}) and \textit{debug} (\S\ref{output-format}).} \optrue{assms}{no\_assms} Specifies whether the relevant assumptions in structured proofs should be considered. The option is implicitly enabled for automatic runs. \nopagebreak {\small See also \textit{user\_axioms} (\S\ref{mode-of-operation}).} \opfalse{spy}{dont\_spy} Specifies whether Nitpick should record statistics in \texttt{\$ISA\-BELLE\_\allowbreak HOME\_\allowbreak USER/\allowbreak spy\_\allowbreak nitpick}. These statistics can be useful to the developer of Nitpick. If you are willing to have your interactions recorded in the name of science, please enable this feature and send the statistics file every now and then to the author of this manual (\authoremail). To change the default value of this option globally, set the environment variable \texttt{NITPICK\_SPY} to \texttt{yes}. \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}).} \opfalse{overlord}{no\_overlord} Specifies whether Nitpick should put its temporary files in \texttt{\$ISABELLE\_\allowbreak HOME\_\allowbreak USER}, which is useful for debugging Nitpick but also unsafe if several instances of the tool are run simultaneously. The files are identified by the extensions \texttt{.kki}, \texttt{.cnf}, \texttt{.out}, and \texttt{.err}; you may safely remove them after Nitpick has run. \textbf{Warning:} This option is not thread-safe. Use at your own risks. \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}).} \end{enum} \subsection{Scope of Search} \label{scope-of-search} \begin{enum} \oparg{card}{type}{int\_seq} Specifies the sequence of cardinalities to use for a given type. For free types, and often also for \textbf{typedecl}'d types, it usually makes sense to specify cardinalities as a range of the form \textit{$1$--$n$}. \nopagebreak {\small See also \textit{box} (\S\ref{scope-of-search}) and \textit{mono} (\S\ref{scope-of-search}).} \opdefault{card}{int\_seq}{\upshape 1--10} Specifies the default sequence of cardinalities to use. This can be overridden on a per-type basis using the \textit{card}~\qty{type} option described above. \oparg{max}{const}{int\_seq} Specifies the sequence of maximum multiplicities to use for a given (co)in\-duc\-tive datatype constructor. A constructor's multiplicity is the number of distinct values that it can construct. Nonsensical values (e.g., \textit{max}~[]~$=$~2) are silently repaired. This option is only available for datatypes equipped with several constructors. \opnodefault{max}{int\_seq} Specifies the default sequence of maximum multiplicities to use for (co)in\-duc\-tive datatype constructors. This can be overridden on a per-constructor basis using the \textit{max}~\qty{const} option described above. \opsmart{binary\_ints}{unary\_ints} Specifies whether natural numbers and integers should be encoded using a unary or binary notation. In unary mode, the cardinality fully specifies the subset used to approximate the type. For example: % $$\hbox{\begin{tabular}{@{}rll@{}}% \textit{card nat} = 4 & induces & $\{0,\, 1,\, 2,\, 3\}$ \\ \textit{card int} = 4 & induces & $\{-1,\, 0,\, +1,\, +2\}$ \\ \textit{card int} = 5 & induces & $\{-2,\, -1,\, 0,\, +1,\, +2\}.$% \end{tabular}}$$ % In general: % $$\hbox{\begin{tabular}{@{}rll@{}}% \textit{card nat} = $K$ & induces & $\{0,\, \ldots,\, K - 1\}$ \\ \textit{card int} = $K$ & induces & $\{-\lceil K/2 \rceil + 1,\, \ldots,\, +\lfloor K/2 \rfloor\}.$% \end{tabular}}$$ % In binary mode, the cardinality specifies the number of distinct values that can be constructed. Each of these value is represented by a bit pattern whose length is specified by the \textit{bits} (\S\ref{scope-of-search}) option. By default, Nitpick attempts to choose the more appropriate encoding by inspecting the formula at hand, preferring the binary notation for problems involving multiplicative operators or large constants. \textbf{Warning:} For technical reasons, Nitpick always reverts to unary for problems that refer to the types \textit{rat} or \textit{real} or the constants \textit{Suc}, \textit{gcd}, or \textit{lcm}. {\small See also \textit{bits} (\S\ref{scope-of-search}) and \textit{show\_types} (\S\ref{output-format}).} \opdefault{bits}{int\_seq}{\upshape 1--10} Specifies the number of bits to use to represent natural numbers and integers in binary, excluding the sign bit. The minimum is 1 and the maximum is 31. {\small See also \textit{binary\_ints} (\S\ref{scope-of-search}).} \opargboolorsmart{wf}{const}{non\_wf} Specifies whether the specified (co)in\-duc\-tively defined predicate is well-founded. The option can take the following values: \begin{enum} \item[\labelitemi] \textbf{\textit{true}:} Tentatively treat the (co)in\-duc\-tive predicate as if it were well-founded. Since this is generally not sound when the predicate is not well-founded, the counterexamples are tagged as ``quasi genuine.'' \item[\labelitemi] \textbf{\textit{false}:} Treat the (co)in\-duc\-tive predicate as if it were not well-founded. The predicate is then unrolled as prescribed by the \textit{star\_linear\_preds}, \textit{iter}~\qty{const}, and \textit{iter} options. \item[\labelitemi] \textbf{\textit{smart}:} Try to prove that the inductive predicate is well-founded using Isabelle's \textit{lexicographic\_order} and \textit{size\_change} tactics. If this succeeds (or the predicate occurs with an appropriate polarity in the formula to falsify), use an efficient fixed-point equation as specification of the predicate; otherwise, unroll the predicates according to the \textit{iter}~\qty{const} and \textit{iter} options. \end{enum} \nopagebreak {\small See also \textit{iter} (\S\ref{scope-of-search}), \textit{star\_linear\_preds} (\S\ref{optimizations}), and \textit{tac\_timeout} (\S\ref{timeouts}).} \opsmart{wf}{non\_wf} Specifies the default wellfoundedness setting to use. This can be overridden on a per-predicate basis using the \textit{wf}~\qty{const} option above. \oparg{iter}{const}{int\_seq} Specifies the sequence of iteration counts to use when unrolling a given (co)in\-duc\-tive predicate. By default, unrolling is applied for inductive predicates that occur negatively and coinductive predicates that occur positively in the formula to falsify and that cannot be proved to be well-founded, but this behavior is influenced by the \textit{wf} option. The iteration counts are automatically bounded by the cardinality of the predicate's domain. {\small See also \textit{wf} (\S\ref{scope-of-search}) and \textit{star\_linear\_preds} (\S\ref{optimizations}).} \opdefault{iter}{int\_seq}{\upshape 0{,}1{,}2{,}4{,}8{,}12{,}16{,}20{,}24{,}28} Specifies the sequence of iteration counts to use when unrolling (co)in\-duc\-tive predicates. This can be overridden on a per-predicate basis using the \textit{iter} \qty{const} option above. \opdefault{bisim\_depth}{int\_seq}{\upshape 9} Specifies the sequence of iteration counts to use when unrolling the bisimilarity predicate generated by Nitpick for coinductive datatypes. A value of $-1$ means that no predicate is generated, in which case Nitpick performs an after-the-fact check to see if the known coinductive datatype values are bidissimilar. If two values are found to be bisimilar, the counterexample is tagged as ``quasi genuine.'' The iteration counts are automatically bounded by the sum of the cardinalities of the coinductive datatypes occurring in the formula to falsify. \opargboolorsmart{box}{type}{dont\_box} Specifies whether Nitpick should attempt to wrap (``box'') a given function or product type in an isomorphic datatype internally. Boxing is an effective mean to reduce the search space and speed up Nitpick, because the isomorphic datatype is approximated by a subset of the possible function or pair values. Like other drastic optimizations, it can also prevent the discovery of counterexamples. The option can take the following values: \begin{enum} \item[\labelitemi] \textbf{\textit{true}:} Box the specified type whenever practicable. \item[\labelitemi] \textbf{\textit{false}:} Never box the type. \item[\labelitemi] \textbf{\textit{smart}:} Box the type only in contexts where it is likely to help. For example, $n$-tuples where $n > 2$ and arguments to higher-order functions are good candidates for boxing. \end{enum} \nopagebreak {\small See also \textit{finitize} (\S\ref{scope-of-search}), \textit{verbose} (\S\ref{output-format}), and \textit{debug} (\S\ref{output-format}).} \opsmart{box}{dont\_box} Specifies the default boxing setting to use. This can be overridden on a per-type basis using the \textit{box}~\qty{type} option described above. \opargboolorsmart{finitize}{type}{dont\_finitize} Specifies whether Nitpick should attempt to finitize an infinite datatype. The option can then take the following values: \begin{enum} \item[\labelitemi] \textbf{\textit{true}:} Finitize the datatype. Since this is unsound, counterexamples generated under these conditions are tagged as ``quasi genuine.'' \item[\labelitemi] \textbf{\textit{false}:} Don't attempt to finitize the datatype. \item[\labelitemi] \textbf{\textit{smart}:} If the datatype's constructors don't appear in the problem, perform a monotonicity analysis to detect whether the datatype can be soundly finitized; otherwise, don't finitize it. \end{enum} \nopagebreak {\small See also \textit{box} (\S\ref{scope-of-search}), \textit{mono} (\S\ref{scope-of-search}), \textit{verbose} (\S\ref{output-format}), and \textit{debug} (\S\ref{output-format}).} \opsmart{finitize}{dont\_finitize} Specifies the default finitization setting to use. This can be overridden on a per-type basis using the \textit{finitize}~\qty{type} option described above. \opargboolorsmart{mono}{type}{non\_mono} Specifies whether the given type should be considered monotonic when enumerating scopes and finitizing types. If the option is set to \textit{smart}, Nitpick performs a monotonicity check on the type. Setting this option to \textit{true} can reduce the number of scopes tried, but it can also diminish the chance of finding a counterexample, as demonstrated in \S\ref{scope-monotonicity}. The option is implicitly set to \textit{true} for automatic runs. \nopagebreak {\small See also \textit{card} (\S\ref{scope-of-search}), \textit{finitize} (\S\ref{scope-of-search}), \textit{merge\_type\_vars} (\S\ref{scope-of-search}), and \textit{verbose} (\S\ref{output-format}).} \opsmart{mono}{non\_mono} Specifies the default monotonicity setting to use. This can be overridden on a per-type basis using the \textit{mono}~\qty{type} option described above. \opfalse{merge\_type\_vars}{dont\_merge\_type\_vars} Specifies whether type variables with the same sort constraints should be merged. Setting this option to \textit{true} can reduce the number of scopes tried and the size of the generated Kodkod formulas, but it also diminishes the theoretical chance of finding a counterexample. {\small See also \textit{mono} (\S\ref{scope-of-search}).} \end{enum} \subsection{Output Format} \label{output-format} \begin{enum} \opfalse{verbose}{quiet} Specifies whether the \textbf{nitpick} command should explain what it does. This option is useful to determine which scopes are tried or which SAT solver is used. This option is implicitly disabled for automatic runs. \opfalse{debug}{no\_debug} Specifies whether Nitpick should display additional debugging information beyond what \textit{verbose} already displays. Enabling \textit{debug} also enables \textit{verbose} and \textit{show\_all} behind the scenes. The \textit{debug} option is implicitly disabled for automatic runs. \nopagebreak {\small See also \textit{spy} (\S\ref{mode-of-operation}), \textit{overlord} (\S\ref{mode-of-operation}), and \textit{batch\_size} (\S\ref{optimizations}).} \opfalse{show\_types}{hide\_types} Specifies whether the subsets used to approximate (co)in\-duc\-tive data\-types should be displayed as part of counterexamples. Such subsets are sometimes helpful when investigating whether a potentially spurious counterexample is genuine, but their potential for clutter is real. \optrue{show\_skolems}{hide\_skolem} Specifies whether the values of Skolem constants should be displayed as part of counterexamples. Skolem constants correspond to bound variables in the original formula and usually help us to understand why the counterexample falsifies the formula. \opfalse{show\_consts}{hide\_consts} Specifies whether the values of constants occurring in the formula (including its axioms) should be displayed along with any counterexample. These values are sometimes helpful when investigating why a counterexample is genuine, but they can clutter the output. \opnodefault{show\_all}{bool} Abbreviation for \textit{show\_types}, \textit{show\_skolems}, and \textit{show\_consts}. \opdefault{max\_potential}{int}{\upshape 1} Specifies the maximum number of potentially spurious counterexamples to display. Setting this option to 0 speeds up the search for a genuine counterexample. This option is implicitly set to 0 for automatic runs. If you set this option to a value greater than 1, you will need an incremental SAT solver, such as \textit{MiniSat\_JNI} (recommended) and \textit{SAT4J}. Be aware that many of the counterexamples may be identical. \nopagebreak {\small See also \textit{sat\_solver} (\S\ref{optimizations}).} \opdefault{max\_genuine}{int}{\upshape 1} Specifies the maximum number of genuine counterexamples to display. If you set this option to a value greater than 1, you will need an incremental SAT solver, such as \textit{MiniSat\_JNI} (recommended) and \textit{SAT4J}. Be aware that many of the counterexamples may be identical. \nopagebreak {\small See also \textit{sat\_solver} (\S\ref{optimizations}).} \opnodefault{eval}{term\_list} Specifies the list of terms whose values should be displayed along with counterexamples. This option suffers from an ``observer effect'': Nitpick might find different counterexamples for different values of this option. \oparg{atoms}{type}{string\_list} Specifies the names to use to refer to the atoms of the given type. By default, Nitpick generates names of the form $a_1, \ldots, a_n$, where $a$ is the first letter of the type's name. \opnodefault{atoms}{string\_list} Specifies the default names to use to refer to atoms of any type. For example, to call the three atoms of type ${'}a$ \textit{ichi}, \textit{ni}, and \textit{san} instead of $a_1$, $a_2$, $a_3$, specify the option ``\textit{atoms}~${'}a$ = \textit{ichi~ni~san}''. The default names can be overridden on a per-type basis using the \textit{atoms}~\qty{type} option described above. \oparg{format}{term}{int\_seq} Specifies how to uncurry the value displayed for a variable or constant. Uncurrying sometimes increases the readability of the output for high-arity functions. For example, given the variable $y \mathbin{\Colon} {'a}\Rightarrow {'b}\Rightarrow {'c}\Rightarrow {'d}\Rightarrow {'e}\Rightarrow {'f}\Rightarrow {'g}$, setting \textit{format}~$y$ = 3 tells Nitpick to group the last three arguments, as if the type had been ${'a}\Rightarrow {'b}\Rightarrow {'c}\Rightarrow {'d}\times {'e}\times {'f}\Rightarrow {'g}$. In general, a list of values $n_1,\ldots,n_k$ tells Nitpick to show the last $n_k$ arguments as an $n_k$-tuple, the previous $n_{k-1}$ arguments as an $n_{k-1}$-tuple, and so on; arguments that are not accounted for are left alone, as if the specification had been $1,\ldots,1,n_1,\ldots,n_k$. \opdefault{format}{int\_seq}{\upshape 1} Specifies the default format to use. Irrespective of the default format, the extra arguments to a Skolem constant corresponding to the outer bound variables are kept separated from the remaining arguments, the \textbf{for} arguments of an inductive definitions are kept separated from the remaining arguments, and the iteration counter of an unrolled inductive definition is shown alone. The default format can be overridden on a per-variable or per-constant basis using the \textit{format}~\qty{term} option described above. \end{enum} \subsection{Regression Testing} \label{regression-testing} \begin{enum} \opnodefault{expect}{string} Specifies the expected outcome, which must be one of the following: \begin{enum} \item[\labelitemi] \textbf{\textit{genuine}:} Nitpick found a genuine counterexample. \item[\labelitemi] \textbf{\textit{quasi\_genuine}:} Nitpick found a ``quasi genuine'' counterexample (i.e., a counterexample that is genuine unless it contradicts a missing axiom or a dangerous option was used inappropriately). \item[\labelitemi] \textbf{\textit{potential}:} Nitpick found a potentially spurious counterexample. \item[\labelitemi] \textbf{\textit{none}:} Nitpick found no counterexample. \item[\labelitemi] \textbf{\textit{unknown}:} Nitpick encountered some problem (e.g., Kodkod ran out of memory). \end{enum} Nitpick emits an error if the actual outcome differs from the expected outcome. This option is useful for regression testing. \end{enum} \subsection{Optimizations} \label{optimizations} \def\cpp{C\nobreak\raisebox{.1ex}{+}\nobreak\raisebox{.1ex}{+}} \sloppy \begin{enum} \opdefault{sat\_solver}{string}{smart} Specifies which SAT solver to use. SAT solvers implemented in C or \cpp{} tend to be faster than their Java counterparts, but they can be more difficult to install. Also, if you set the \textit{max\_potential} (\S\ref{output-format}) or \textit{max\_genuine} (\S\ref{output-format}) option to a value greater than 1, you will need an incremental SAT solver, such as \textit{MiniSat\_JNI} (recommended) or \textit{SAT4J}. The supported solvers are listed below: \begin{enum} \item[\labelitemi] \textbf{\textit{Lingeling\_JNI}:} Lingeling is an efficient solver written in C. The JNI (Java Native Interface) version of Lingeling is bundled with Kodkodi and is precompiled for Linux and Mac~OS~X. It is also available from the Kodkod web site \cite{kodkod-2009}. \item[\labelitemi] \textbf{\textit{CryptoMiniSat}:} CryptoMiniSat is the winner of the 2010 SAT Race. To use CryptoMiniSat, set the environment variable \texttt{CRYPTO\-MINISAT\_}\discretionary{}{}{}\texttt{HOME} to the directory that contains the \texttt{crypto\-minisat} executable.% \footnote{Important note for Cygwin users: The path must be specified using native Windows syntax. Make sure to escape backslashes properly.% \label{cygwin-paths}} The \cpp{} sources and executables for Crypto\-Mini\-Sat are available at \url{http://planete.inrialpes.fr/~soos/}\allowbreak\url{CryptoMiniSat2/index.php}. Nitpick has been tested with version 2.51. \item[\labelitemi] \textbf{\textit{CryptoMiniSat\_JNI}:} The JNI (Java Native Interface) version of CryptoMiniSat is bundled with Kodkodi and is precompiled for Linux and Mac~OS~X. It is also available from the Kodkod web site \cite{kodkod-2009}. \item[\labelitemi] \textbf{\textit{MiniSat}:} MiniSat is an efficient solver written in \cpp{}. To use MiniSat, set the environment variable \texttt{MINISAT\_HOME} to the directory that contains the \texttt{minisat} executable.% \footref{cygwin-paths} The \cpp{} sources and executables for MiniSat are available at \url{http://minisat.se/MiniSat.html}. Nitpick has been tested with versions 1.14 and 2.2. \item[\labelitemi] \textbf{\textit{MiniSat\_JNI}:} The JNI version of MiniSat is bundled with Kodkodi and is precompiled for Linux, Mac~OS~X, and Windows (Cygwin). It is also available from the Kodkod web site \cite{kodkod-2009}. Unlike the standard version of MiniSat, the JNI version can be used incrementally. \item[\labelitemi] \textbf{\textit{Riss3g}:} Riss3g is an efficient solver written in \cpp{}. To use Riss3g, set the environment variable \texttt{RISS3G\_HOME} to the directory that contains the \texttt{riss3g} executable.% \footref{cygwin-paths} The \cpp{} sources for Riss3g are available at \url{http://tools.computational-logic.org/content/riss3g.php}. Nitpick has been tested with the SAT Competition 2013 version. \item[\labelitemi] \textbf{\textit{zChaff}:} zChaff is an older solver written in \cpp{}. To use zChaff, set the environment variable \texttt{ZCHAFF\_HOME} to the directory that contains the \texttt{zchaff} executable.% \footref{cygwin-paths} The \cpp{} sources and executables for zChaff are available at \url{http://www.princeton.edu/~chaff/zchaff.html}. Nitpick has been tested with versions 2004-05-13, 2004-11-15, and 2007-03-12. \item[\labelitemi] \textbf{\textit{RSat}:} RSat is an efficient solver written in \cpp{}. To use RSat, set the environment variable \texttt{RSAT\_HOME} to the directory that contains the \texttt{rsat} executable.% \footref{cygwin-paths} The \cpp{} sources for RSat are available at \url{http://reasoning.cs.ucla.edu/rsat/}. Nitpick has been tested with version 2.01. \item[\labelitemi] \textbf{\textit{BerkMin}:} BerkMin561 is an efficient solver written in C. To use BerkMin, set the environment variable \texttt{BERKMIN\_HOME} to the directory that contains the \texttt{BerkMin561} executable.\footref{cygwin-paths} The BerkMin executables are available at \url{http://eigold.tripod.com/BerkMin.html}. \item[\labelitemi] \textbf{\textit{BerkMin\_Alloy}:} Variant of BerkMin that is included with Alloy 4 and calls itself ``sat56'' in its banner text. To use this version of BerkMin, set the environment variable \texttt{BERKMINALLOY\_HOME} to the directory that contains the \texttt{berkmin} executable.% \footref{cygwin-paths} \item[\labelitemi] \textbf{\textit{SAT4J}:} SAT4J is a reasonably efficient solver written in Java that can be used incrementally. It is bundled with Kodkodi and requires no further installation or configuration steps. Do not attempt to install the official SAT4J packages, because their API is incompatible with Kodkod. \item[\labelitemi] \textbf{\textit{SAT4J\_Light}:} Variant of SAT4J that is optimized for small problems. It can also be used incrementally. \item[\labelitemi] \textbf{\textit{smart}:} If \textit{sat\_solver} is set to \textit{smart}, Nitpick selects the first solver among the above that is recognized by Isabelle. If \textit{verbose} (\S\ref{output-format}) is enabled, Nitpick displays which SAT solver was chosen. \end{enum} \fussy \opdefault{batch\_size}{smart\_int}{smart} Specifies the maximum number of Kodkod problems that should be lumped together when invoking Kodkodi. Each problem corresponds to one scope. Lumping problems together ensures that Kodkodi is launched less often, but it makes the verbose output less readable and is sometimes detrimental to performance. If \textit{batch\_size} is set to \textit{smart}, the actual value used is 1 if \textit{debug} (\S\ref{output-format}) is set and 50 otherwise. \optrue{destroy\_constrs}{dont\_destroy\_constrs} Specifies whether formulas involving (co)in\-duc\-tive datatype constructors should be rewritten to use (automatically generated) discriminators and destructors. This optimization can drastically reduce the size of the Boolean formulas given to the SAT solver. \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}).} \optrue{specialize}{dont\_specialize} Specifies whether functions invoked with static arguments should be specialized. This optimization can drastically reduce the search space, especially for higher-order functions. \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}) and \textit{show\_consts} (\S\ref{output-format}).} \optrue{star\_linear\_preds}{dont\_star\_linear\_preds} Specifies whether Nitpick should use Kodkod's transitive closure operator to encode non-well-founded ``linear inductive predicates,'' i.e., inductive predicates for which each the predicate occurs in at most one assumption of each introduction rule. Using the reflexive transitive closure is in principle equivalent to setting \textit{iter} to the cardinality of the predicate's domain, but it is usually more efficient. {\small See also \textit{wf} (\S\ref{scope-of-search}), \textit{debug} (\S\ref{output-format}), and \textit{iter} (\S\ref{scope-of-search}).} \opnodefault{whack}{term\_list} Specifies a list of atomic terms (usually constants, but also free and schematic variables) that should be taken as being $\unk$ (unknown). This can be useful to reduce the size of the Kodkod problem if you can guess in advance that a constant might not be needed to find a countermodel. {\small See also \textit{debug} (\S\ref{output-format}).} \opnodefault{need}{term\_list} Specifies a list of datatype values (normally ground constructor terms) that should be part of the subterm-closed subsets used to approximate datatypes. If you know that a value must necessarily belong to the subset of representable values that approximates a datatype, specifying it can speed up the search, especially for high cardinalities. %By default, Nitpick inspects the conjecture to infer needed datatype values. \opsmart{total\_consts}{partial\_consts} Specifies whether constants occurring in the problem other than constructors can be assumed to be considered total for the representable values that approximate a datatype. This option is highly incomplete; it should be used only for problems that do not construct datatype values explicitly. Since this option is (in rare cases) unsound, counterexamples generated under these conditions are tagged as ``quasi genuine.'' \opdefault{datatype\_sym\_break}{int}{\upshape 5} Specifies an upper bound on the number of datatypes for which Nitpick generates symmetry breaking predicates. Symmetry breaking can speed up the SAT solver considerably, especially for unsatisfiable problems, but too much of it can slow it down. \opdefault{kodkod\_sym\_break}{int}{\upshape 15} Specifies an upper bound on the number of relations for which Kodkod generates symmetry breaking predicates. Symmetry breaking can speed up the SAT solver considerably, especially for unsatisfiable problems, but too much of it can slow it down. \optrue{peephole\_optim}{no\_peephole\_optim} Specifies whether Nitpick should simplify the generated Kodkod formulas using a peephole optimizer. These optimizations can make a significant difference. Unless you are tracking down a bug in Nitpick or distrust the peephole optimizer, you should leave this option enabled. \opdefault{max\_threads}{int}{\upshape 0} Specifies the maximum number of threads to use in Kodkod. If this option is set to 0, Kodkod will compute an appropriate value based on the number of processor cores available. The option is implicitly set to 1 for automatic runs. \nopagebreak {\small See also \textit{batch\_size} (\S\ref{optimizations}) and \textit{timeout} (\S\ref{timeouts}).} \end{enum} \subsection{Timeouts} \label{timeouts} \begin{enum} \opdefault{timeout}{float}{\upshape 30} Specifies the maximum number of seconds that the \textbf{nitpick} command should spend looking for a counterexample. Nitpick tries to honor this constraint as well as it can but offers no guarantees. For automatic runs, the ``Auto Time Limit'' option under ``Plugins > Plugin Options > Isabelle > General'' is used instead. \nopagebreak {\small See also \textit{max\_threads} (\S\ref{optimizations}).} \opdefault{tac\_timeout}{float}{\upshape 0.5} Specifies the maximum number of seconds that should be used by internal tactics---\textit{lexicographic\_order} and \textit{size\_change} when checking whether a (co)in\-duc\-tive predicate is well-founded or the monotonicity inference. Nitpick tries to honor this constraint but offers no guarantees. \nopagebreak {\small See also \textit{wf} (\S\ref{scope-of-search}) and \textit{mono} (\S\ref{scope-of-search}).} \end{enum} \section{Attribute Reference} \label{attribute-reference} Nitpick needs to consider the definitions of all constants occurring in a formula in order to falsify it. For constants introduced using the \textbf{definition} command, the definition is simply the associated \textit{\_def} axiom. In contrast, instead of using the internal representation of functions synthesized by Isabelle's \textbf{primrec}, \textbf{function}, and \textbf{nominal\_primrec} packages, Nitpick relies on the more natural equational specification entered by the user. Behind the scenes, Isabelle's built-in packages and theories rely on the following attributes to affect Nitpick's behavior: \begin{enum} \flushitem{\textit{nitpick\_unfold}} \nopagebreak This attribute specifies an equation that Nitpick should use to expand a constant. The equation should be logically equivalent to the constant's actual definition and should be of the form \qquad $c~{?}x_1~\ldots~{?}x_n \,=\, t$, or \qquad $c~{?}x_1~\ldots~{?}x_n \,\equiv\, t$, where ${?}x_1, \ldots, {?}x_n$ are distinct variables and $c$ does not occur in $t$. Each occurrence of $c$ in the problem is expanded to $\lambda x_1\,\ldots x_n.\; t$. \flushitem{\textit{nitpick\_simp}} \nopagebreak This attribute specifies the equations that constitute the specification of a constant. The \textbf{primrec}, \textbf{function}, and \textbf{nominal\_\allowbreak primrec} packages automatically attach this attribute to their \textit{simps} rules. The equations must be of the form \qquad $c~t_1~\ldots\ t_n \;\bigl[{=}\; u\bigr]$ or \qquad $c~t_1~\ldots\ t_n \,\equiv\, u.$ \flushitem{\textit{nitpick\_psimp}} \nopagebreak This attribute specifies the equations that constitute the partial specification of a constant. The \textbf{function} package automatically attaches this attribute to its \textit{psimps} rules. The conditional equations must be of the form \qquad $\lbrakk P_1;\> \ldots;\> P_m\rbrakk \,\Longrightarrow\, c\ t_1\ \ldots\ t_n \;\bigl[{=}\; u\bigr]$ or \qquad $\lbrakk P_1;\> \ldots;\> P_m\rbrakk \,\Longrightarrow\, c\ t_1\ \ldots\ t_n \,\equiv\, u$. \flushitem{\textit{nitpick\_choice\_spec}} \nopagebreak This attribute specifies the (free-form) specification of a constant defined using the \textbf{specification} command. \end{enum} When faced with a constant, Nitpick proceeds as follows: \begin{enum} \item[1.] If the \textit{nitpick\_simp} set associated with the constant is not empty, Nitpick uses these rules as the specification of the constant. \item[2.] Otherwise, if the \textit{nitpick\_psimp} set associated with the constant is not empty, it uses these rules as the specification of the constant. \item[3.] Otherwise, if the constant was defined using the \allowbreak\textbf{specification} command and the \textit{nitpick\_choice\_spec} set associated with the constant is not empty, it uses these theorems as the specification of the constant. \item[4.] Otherwise, it looks up the definition of the constant. If the \textit{nitpick\_unfold} set associated with the constant is not empty, it uses the latest rule added to the set as the definition of the constant; otherwise it uses the actual definition axiom. \begin{enum} \item[1.] If the definition is of the form \qquad $c~{?}x_1~\ldots~{?}x_m \,\equiv\, \lambda y_1~\ldots~y_n.\; \textit{lfp}~(\lambda f.\; t)$ or \qquad $c~{?}x_1~\ldots~{?}x_m \,\equiv\, \lambda y_1~\ldots~y_n.\; \textit{gfp}~(\lambda f.\; t).$ Nitpick assumes that the definition was made using a (co)inductive package based on the user-specified introduction rules registered in Isabelle's internal \textit{Spec\_Rules} table. The tool uses the introduction rules to ascertain whether the definition is well-founded and the definition to generate a fixed-point equation or an unrolled equation. \item[2.] If the definition is compact enough, the constant is \textsl{unfolded} wherever it appears; otherwise, it is defined equationally, as with the \textit{nitpick\_simp} attribute. \end{enum} \end{enum} As an illustration, consider the inductive definition \prew \textbf{inductive}~\textit{odd}~\textbf{where} \\ ``\textit{odd}~1'' $\,\mid$ \\ ``\textit{odd}~$n\,\Longrightarrow\, \textit{odd}~(\textit{Suc}~(\textit{Suc}~n))$'' \postw By default, Nitpick uses the \textit{lfp}-based definition in conjunction with the introduction rules. To override this, you can specify an alternative definition as follows: \prew \textbf{lemma} $\mathit{odd\_alt\_unfold}$ [\textit{nitpick\_unfold}]:\kern.4em ``$\textit{odd}~n \,\equiv\, n~\textrm{mod}~2 = 1$'' \postw Nitpick then expands all occurrences of $\mathit{odd}~n$ to $n~\textrm{mod}~2 = 1$. Alternatively, you can specify an equational specification of the constant: \prew \textbf{lemma} $\mathit{odd\_simp}$ [\textit{nitpick\_simp}]:\kern.4em ``$\textit{odd}~n = (n~\textrm{mod}~2 = 1)$'' \postw Such tweaks should be done with great care, because Nitpick will assume that the constant is completely defined by its equational specification. For example, if you make ``$\textit{odd}~(2 * k + 1)$'' a \textit{nitpick\_simp} rule and neglect to provide rules to handle the $2 * k$ case, Nitpick will define $\textit{odd}~n$ arbitrarily for even values of $n$. The \textit{debug} (\S\ref{output-format}) option is extremely useful to understand what is going on when experimenting with \textit{nitpick\_} attributes. Because of its internal three-valued logic, Nitpick tends to lose a lot of precision in the presence of partially specified constants. For example, \prew \textbf{lemma} \textit{odd\_simp} [\textit{nitpick\_simp}]:\kern.4em ``$\textit{odd~x} = \lnot\, \textit{even}~x$'' \postw is superior to \prew \textbf{lemma} \textit{odd\_psimps} [\textit{nitpick\_simp}]: \\ ``$\textit{even~x} \,\Longrightarrow\, \textit{odd~x} = \textit{False\/}$'' \\ ``$\lnot\, \textit{even~x} \,\Longrightarrow\, \textit{odd~x} = \textit{True\/}$'' \postw Because Nitpick sometimes unfolds definitions but never simplification rules, you can ensure that a constant is defined explicitly using the \textit{nitpick\_simp}. For example: \prew \textbf{definition}~\textit{optimum} \textbf{where} [\textit{nitpick\_simp}]: \\ ``$\textit{optimum}~t = (\forall u.\; \textit{consistent}~u \mathrel{\land} \textit{alphabet}~t = \textit{alphabet}~u$ \\ \phantom{``$\textit{optimum}~t = (\forall u.\;$}${\mathrel{\land}}\; \textit{freq}~t = \textit{freq}~u \longrightarrow \textit{cost}~t \le \textit{cost}~u)$'' \postw In some rare occasions, you might want to provide an inductive or coinductive view on top of an existing constant $c$. The easiest way to achieve this is to define a new constant $c'$ (co)inductively. Then prove that $c$ equals $c'$ and let Nitpick know about it: \prew \textbf{lemma} \textit{c\_alt\_unfold} [\textit{nitpick\_unfold}]:\kern.4em ``$c \equiv c'$\kern2pt '' \postw This ensures that Nitpick will substitute $c'$ for $c$ and use the (co)inductive definition. \section{Standard ML Interface} \label{standard-ml-interface} Nitpick provides a rich Standard ML interface used mainly for internal purposes and debugging. Among the most interesting functions exported by Nitpick are those that let you invoke the tool programmatically and those that let you register and unregister custom term postprocessors as well as coinductive datatypes. \subsection{Invoking Nitpick} \label{invoking-nitpick} The \textit{Nitpick} structure offers the following functions for invoking your favorite counterexample generator: \prew $\textbf{val}\,~\textit{pick\_nits\_in\_term} : \\ \hbox{}\quad\textit{Proof.state} \rightarrow \textit{params} \rightarrow \textit{mode} \rightarrow \textit{int} \rightarrow \textit{int} \rightarrow \textit{int}$ \\ $\hbox{}\quad{\rightarrow}\; (\textit{term} * \textit{term})~\textit{list} \rightarrow \textit{term~list} \rightarrow \textit{term} \rightarrow \textit{string} * \textit{Proof.state}$ \\ $\textbf{val}\,~\textit{pick\_nits\_in\_subgoal} : \\ \hbox{}\quad\textit{Proof.state} \rightarrow \textit{params} \rightarrow \textit{mode} \rightarrow \textit{int} \rightarrow \textit{int} \rightarrow \textit{string} * \textit{Proof.state}$ \postw The return value is a new proof state paired with an outcome string (``genuine'', ``quasi\_genuine'', ``potential'', ``none'', or ``unknown''). The \textit{params} type is a large record that lets you set Nitpick's options. The current default options can be retrieved by calling the following function defined in the \textit{Nitpick\_Isar} structure: \prew $\textbf{val}\,~\textit{default\_params} :\, \textit{theory} \rightarrow (\textit{string} * \textit{string})~\textit{list} \rightarrow \textit{params}$ \postw The second argument lets you override option values before they are parsed and put into a \textit{params} record. Here is an example where Nitpick is invoked on subgoal $i$ of $n$ with no time limit: \prew $\textbf{val}\,~\textit{params} = \textit{Nitpick\_Isar.default\_params}~\textit{thy}~[(\textrm{``}\textrm{timeout\/}\textrm{''},\, \textrm{``}\textrm{none}\textrm{''})]$ \\ $\textbf{val}\,~(\textit{outcome},\, \textit{state}') = {}$ \\ $\hbox{}\quad\textit{Nitpick.pick\_nits\_in\_subgoal}~\textit{state}~\textit{params}~\textit{Nitpick.Normal}~\textit{i}~\textit{n}$ \postw \let\antiq=\textrm \subsection{Registering Term Postprocessors} \label{registering-term-postprocessors} It is possible to change the output of any term that Nitpick considers a datatype by registering a term postprocessor. The interface for registering and unregistering postprocessors consists of the following pair of functions defined in the \textit{Nitpick\_Model} structure: \prew $\textbf{type}\,~\textit{term\_postprocessor}\,~{=} {}$ \\ $\hbox{}\quad\textit{Proof.context} \rightarrow \textit{string} \rightarrow (\textit{typ} \rightarrow \textit{term~list\/}) \rightarrow \textit{typ} \rightarrow \textit{term} \rightarrow \textit{term}$ \\ $\textbf{val}\,~\textit{register\_term\_postprocessor} : {}$ \\ $\hbox{}\quad\textit{typ} \rightarrow \textit{term\_postprocessor} \rightarrow \textit{morphism} \rightarrow \textit{Context.generic}$ \\ $\hbox{}\quad{\rightarrow}\; \textit{Context.generic}$ \\ $\textbf{val}\,~\textit{unregister\_term\_postprocessor} : {}$ \\ $\hbox{}\quad\textit{typ} \rightarrow \textit{morphism} \rightarrow \textit{Context.generic} \rightarrow \textit{Context.generic}$ \postw \S\ref{typedefs-quotient-types-records-rationals-and-reals} and \texttt{src/HOL/Library/Multiset.thy} illustrate this feature in context. \subsection{Registering Coinductive Datatypes} \label{registering-coinductive-datatypes} Coinductive datatypes defined using the \textbf{codatatype} command that do not involve nested recursion through non-codatatypes are supported by Nitpick. If you have defined a custom coinductive datatype, you can tell Nitpick about it, so that it can use an efficient Kodkod axiomatization. The interface for registering and unregistering coinductive datatypes consists of the following pair of functions defined in the \textit{Nitpick\_HOL} structure: \prew $\textbf{val}\,~\textit{register\_codatatype\/} : {}$ \\ $\hbox{}\quad\textit{morphism} \rightarrow \textit{typ} \rightarrow \textit{string} \rightarrow (\textit{string} \times \textit{typ})\;\textit{list} \rightarrow \textit{Context.generic} {}$ \\ $\hbox{}\quad{\rightarrow}\; \textit{Context.generic}$ \\ $\textbf{val}\,~\textit{unregister\_codatatype\/} : {}$ \\ $\hbox{}\quad\textit{morphism} \rightarrow \textit{typ} \rightarrow \textit{Context.generic} \rightarrow \textit{Context.generic} {}$ \postw The type $'a~\textit{llist}$ of lazy lists is already registered; had it not been, you could have told Nitpick about it by adding the following line to your theory file: \prew $\textbf{declaration}~\,\{{*}$ \\ $\hbox{}\quad\textit{Nitpick\_HOL.register\_codatatype}~@\{\antiq{typ}~``\kern1pt'a~\textit{llist\/}\textrm{''}\}$ \\ $\hbox{}\qquad\quad @\{\antiq{const\_name}~ \textit{llist\_case}\}$ \\ $\hbox{}\qquad\quad (\textit{map}~\textit{dest\_Const}~[@\{\antiq{term}~\textit{LNil}\},\, @\{\antiq{term}~\textit{LCons}\}])$ \\ ${*}\}$ \postw The \textit{register\_codatatype} function takes a coinductive datatype, its case function, and the list of its constructors (in addition to the current morphism and generic proof context). The case function must take its arguments in the order that the constructors are listed. If no case function with the correct signature is available, simply pass the empty string. On the other hand, if your goal is to cripple Nitpick, add the following line to your theory file and try to check a few conjectures about lazy lists: \prew $\textbf{declaration}~\,\{{*}$ \\ $\hbox{}\quad\textit{Nitpick\_HOL.unregister\_codatatype}~@\{\antiq{typ}~``\kern1pt'a~\textit{llist\/}\textrm{''}\}$ \\ ${*}\}$ \postw Inductive datatypes can be registered as coinductive datatypes, given appropriate coinductive constructors. However, doing so precludes the use of the inductive constructors---Nitpick will generate an error if they are needed. \section{Known Bugs and Limitations} \label{known-bugs-and-limitations} Here are the known bugs and limitations in Nitpick at the time of writing: \begin{enum} \item[\labelitemi] Underspecified functions defined using the \textbf{primrec}, \textbf{function}, or \textbf{nominal\_\allowbreak primrec} packages can lead Nitpick to generate spurious counterexamples for theorems that refer to values for which the function is not defined. For example: \prew \textbf{primrec} \textit{prec} \textbf{where} \\ ``$\textit{prec}~(\textit{Suc}~n) = n$'' \\[2\smallskipamount] \textbf{lemma} ``$\textit{prec}~0 = \textit{undefined\/}$'' \\ \textbf{nitpick} \\[2\smallskipamount] \quad{\slshape Nitpick found a counterexample for \textit{card nat}~= 2: \nopagebreak \\[2\smallskipamount] \hbox{}\qquad Empty assignment} \nopagebreak\\[2\smallskipamount] \textbf{by}~(\textit{auto simp}:~\textit{prec\_def}) \postw Such theorems are generally considered bad style because they rely on the internal representation of functions synthesized by Isabelle, an implementation detail. \item[\labelitemi] Similarly, Nitpick might find spurious counterexamples for theorems that rely on the use of the indefinite description operator internally by \textbf{specification} and \textbf{quot\_type}. \item[\labelitemi] Axioms or definitions that restrict the possible values of the \textit{undefined} constant or other partially specified built-in Isabelle constants (e.g., \textit{Abs\_} and \textit{Rep\_} constants) are in general ignored. Again, such nonconservative extensions are generally considered bad style. \item[\labelitemi] Nitpick produces spurious counterexamples when invoked after a \textbf{guess} command in a structured proof. \item[\labelitemi] Datatypes defined using \textbf{datatype} and codatatypes defined using \textbf{codatatype} that involve nested (co)recursion through non-(co)datatypes are not properly supported and may result in spurious counterexamples. \item[\labelitemi] Types that are registered with several distinct sets of constructors, including \textit{enat} if the \textit{Coinductive} entry of the \textit{Archive of Formal Proofs} is loaded, can confuse Nitpick. \item[\labelitemi] The \textit{nitpick\_xxx} attributes and the \textit{Nitpick\_xxx.register\_yyy} functions can cause havoc if used improperly. \item[\labelitemi] Although this has never been observed, arbitrary theorem morphisms could possibly confuse Nitpick, resulting in spurious counterexamples. \item[\labelitemi] All constants, types, free variables, and schematic variables whose names start with \textit{Nitpick}{.} are reserved for internal use. \item[\labelitemi] Some users report technical issues with the default SAT solver on Windows. Setting the \textit{sat\_solver} option (\S\ref{optimizations}) to \textit{MiniSat\_JNI} should solve this. \end{enum} \let\em=\sl \bibliography{manual}{} \bibliographystyle{abbrv} \end{document} diff --git a/src/Doc/Sledgehammer/document/root.tex b/src/Doc/Sledgehammer/document/root.tex --- a/src/Doc/Sledgehammer/document/root.tex +++ b/src/Doc/Sledgehammer/document/root.tex @@ -1,1294 +1,1293 @@ \documentclass[a4paper,12pt]{article} -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{amsmath} \usepackage{amssymb} \usepackage[english]{babel} \usepackage{color} \usepackage{footmisc} \usepackage{graphicx} %\usepackage{mathpazo} \usepackage{multicol} \usepackage{stmaryrd} %\usepackage[scaled=.85]{beramono} \usepackage{isabelle,iman,pdfsetup} \newcommand\download{\url{https://isabelle.in.tum.de/components/}} \let\oldS=\S \def\S{\oldS\,} \def\qty#1{\ensuremath{\left<\mathit{#1\/}\right>}} \def\qtybf#1{$\mathbf{\left<\textbf{\textit{#1\/}}\right>}$} \newcommand\const[1]{\textsf{#1}} %\oddsidemargin=4.6mm %\evensidemargin=4.6mm %\textwidth=150mm %\topmargin=4.6mm %\headheight=0mm %\headsep=0mm %\textheight=234mm \def\Colon{\mathord{:\mkern-1.5mu:}} %\def\lbrakk{\mathopen{\lbrack\mkern-3.25mu\lbrack}} %\def\rbrakk{\mathclose{\rbrack\mkern-3.255mu\rbrack}} \def\lparr{\mathopen{(\mkern-4mu\mid}} \def\rparr{\mathclose{\mid\mkern-4mu)}} \def\unk{{?}} \def\undef{(\lambda x.\; \unk)} %\def\unr{\textit{others}} \def\unr{\ldots} \def\Abs#1{\hbox{\rm{\flqq}}{\,#1\,}\hbox{\rm{\frqq}}} \def\Q{{\smash{\lower.2ex\hbox{$\scriptstyle?$}}}} \urlstyle{tt} \renewcommand\_{\hbox{\textunderscore\kern-.05ex}} \begin{document} %%% TYPESETTING %\renewcommand\labelitemi{$\bullet$} \renewcommand\labelitemi{\raise.065ex\hbox{\small\textbullet}} \title{\includegraphics[scale=0.5]{isabelle_sledgehammer} \\[4ex] Hammering Away \\[\smallskipamount] \Large A User's Guide to Sledgehammer for Isabelle/HOL} \author{\hbox{} \\ Jasmin Blanchette \\ {\normalsize Institut f\"ur Informatik, Technische Universit\"at M\"unchen} \\[4\smallskipamount] {\normalsize with contributions from} \\[4\smallskipamount] Lawrence C. Paulson \\ {\normalsize Computer Laboratory, University of Cambridge} \\ \hbox{}} \maketitle \tableofcontents \setlength{\parskip}{.7em plus .2em minus .1em} \setlength{\parindent}{0pt} \setlength{\abovedisplayskip}{\parskip} \setlength{\abovedisplayshortskip}{.9\parskip} \setlength{\belowdisplayskip}{\parskip} \setlength{\belowdisplayshortskip}{.9\parskip} % general-purpose enum environment with correct spacing \newenvironment{enum}% {\begin{list}{}{% \setlength{\topsep}{.1\parskip}% \setlength{\partopsep}{.1\parskip}% \setlength{\itemsep}{\parskip}% \advance\itemsep by-\parsep}} {\end{list}} \def\pre{\begingroup\vskip0pt plus1ex\advance\leftskip by\leftmargin \advance\rightskip by\leftmargin} \def\post{\vskip0pt plus1ex\endgroup} \def\prew{\pre\advance\rightskip by-\leftmargin} \def\postw{\post} \section{Introduction} \label{introduction} Sledgehammer is a tool that applies automatic theorem provers (ATPs) and satisfiability-modulo-theories (SMT) solvers on the current goal.% \footnote{The distinction between ATPs and SMT solvers is convenient but mostly historical.} % The supported ATPs are agsyHOL \cite{agsyHOL}, Alt-Ergo \cite{alt-ergo}, E \cite{schulz-2002}, iProver \cite{korovin-2009}, LEO-II \cite{leo2}, Leo-III \cite{leo3}, Satallax \cite{satallax}, SNARK \cite{snark}, SPASS \cite{weidenbach-et-al-2009}, Vampire \cite{riazanov-voronkov-2002}, Waldmeister \cite{waldmeister}, and Zipperposition \cite{cruanes-2014}. The ATPs are run either locally or remotely via the System\-On\-TPTP web service \cite{sutcliffe-2000}. The supported SMT solvers are CVC3 \cite{cvc3}, CVC4 \cite{cvc4}, veriT \cite{bouton-et-al-2009}, and Z3 \cite{z3}. These are always run locally. The problem passed to the external provers (or solvers) consists of your current goal together with a heuristic selection of hundreds of facts (theorems) from the current theory context, filtered by relevance. The result of a successful proof search is some source text that usually (but not always) reconstructs the proof within Isabelle. For ATPs, the reconstructed proof typically relies on the general-purpose \textit{metis} proof method, which integrates the Metis ATP in Isabelle/HOL with explicit inferences going through the kernel. Thus its results are correct by construction. For Isabelle/jEdit users, Sledgehammer provides an automatic mode that can be enabled via the ``Auto Sledgehammer'' option under ``Plugins > Plugin Options > Isabelle > General.'' In this mode, a reduced version of Sledgehammer is run on every newly entered theorem for a few seconds. \newbox\boxA \setbox\boxA=\hbox{\texttt{NOSPAM}} \newcommand\authoremail{\texttt{blan{\color{white}NOSPAM}\kern-\wd\boxA{}chette@\allowbreak in.\allowbreak tum.\allowbreak de}} To run Sledgehammer, you must make sure that the theory \textit{Sledgehammer} is imported---this is rarely a problem in practice since it is part of \textit{Main}. Examples of Sledgehammer use can be found in Isabelle's \texttt{src/HOL/Metis\_Examples} directory. Comments and bug reports concerning Sledgehammer or this manual should be directed to the author at \authoremail. \section{Installation} \label{installation} Sledgehammer is part of Isabelle, so you do not need to install it. However, it relies on third-party automatic provers (ATPs and SMT solvers). Among the ATPs, agsyHOL, Alt-Ergo, E, LEO-II, Leo-III, Satallax, SPASS, Vampire, and Zipperposition can be run locally; in addition, agsyHOL, Alt-Ergo, E, iProver, LEO-II, Leo-III, Satallax, SNARK, Vampire, Waldmeister, and Zipperposition are available remotely via System\-On\-TPTP \cite{sutcliffe-2000}. The SMT solvers CVC3, CVC4, veriT, and Z3 can be run locally. There are three main ways to install automatic provers on your machine: \begin{sloppy} \begin{enum} \item[\labelitemi] If you installed an official Isabelle package, it should already include properly setup executables for CVC4, E, SPASS, Vampire, and Z3, ready to use. To use Vampire, you must confirm that you are a noncommercial user, as indicated by the message that is displayed when Sledgehammer is invoked the first time. \item[\labelitemi] Alternatively, you can download the Isabelle-aware CVC3, CVC4, E, SPASS, Vampire, and Z3 binary packages from \download. Extract the archives, then add a line to your \texttt{\$ISABELLE\_HOME\_USER\slash etc\slash components}% \footnote{The variable \texttt{\$ISABELLE\_HOME\_USER} is set by Isabelle at startup. Its value can be retrieved by executing \texttt{isabelle} \texttt{getenv} \texttt{ISABELLE\_HOME\_USER} on the command line.} file with the absolute path to CVC3, CVC4, E, SPASS, Vampire, or Z3. For example, if the \texttt{components} file does not exist yet and you extracted SPASS to \texttt{/usr/local/spass-3.8ds}, create it with the single line \prew \texttt{/usr/local/spass-3.8ds} \postw in it. \item[\labelitemi] If you prefer to build agsyHOL, Alt-Ergo, E, LEO-II, Leo-III, or Satallax manually, set the environment variable \texttt{AGSYHOL\_HOME}, \texttt{E\_HOME}, \texttt{LEO2\_HOME}, \texttt{LEO3\_HOME}, or \texttt{SATALLAX\_HOME} to the directory that contains the \texttt{agsyHOL}, \texttt{eprover} (and/or \texttt{eproof} or \texttt{eproof\_ram}), \texttt{leo}, \texttt{leo3}, or \texttt{satallax} executable; for Alt-Ergo, set the environment variable \texttt{WHY3\_HOME} to the directory that contains the \texttt{why3} executable. Sledgehammer has been tested with agsyHOL 1.0, Alt-Ergo 0.95.2, E 1.6 to 2.0, LEO-II 1.3.4, Leo-III 1.1, and Satallax 2.7. Since the ATPs' output formats are neither documented nor stable, other versions might not work well with Sledgehammer. Ideally, you should also set \texttt{E\_VERSION}, \texttt{LEO2\_VERSION}, \texttt{LEO3\_VERSION}, or \texttt{SATALLAX\_VERSION} to the prover's version number (e.g., ``2.7''); this might help Sledgehammer invoke the prover optimally. Similarly, if you want to install CVC3, CVC4, veriT, or Z3, set the environment variable \texttt{CVC3\_\allowbreak SOLVER}, \texttt{CVC4\_\allowbreak SOLVER}, \texttt{VERIT\_\allowbreak SOLVER}, or \texttt{Z3\_SOLVER} to the complete path of the executable, \emph{including the file name}. Sledgehammer has been tested with CVC3 2.2 and 2.4.1, CVC4 1.5-prerelease, veriT smtcomp2019, and Z3 4.3.2. Since Z3's output format is somewhat unstable, other versions of the solver might not work well with Sledgehammer. Ideally, also set \texttt{CVC3\_VERSION}, \texttt{CVC4\_VERSION}, \texttt{VERIT\_VERSION}, or \texttt{Z3\_VERSION} to the solver's version number (e.g., ``4.4.0''). \end{enum} \end{sloppy} To check whether the provers are successfully installed, try out the example in \S\ref{first-steps}. If the remote versions of any of these provers is used (identified by the prefix ``\textit{remote\_\/}''), or if the local versions fail to solve the easy goal presented there, something must be wrong with the installation. Remote prover invocation requires Perl with the World Wide Web Library (\texttt{libwww-perl}) installed. If you must use a proxy server to access the Internet, set the \texttt{http\_proxy} environment variable to the proxy, either in the environment in which Isabelle is launched or in your \texttt{\$ISABELLE\_HOME\_USER/etc/settings} file. Here are a few examples: \prew \texttt{http\_proxy=http://proxy.example.org} \\ \texttt{http\_proxy=http://proxy.example.org:8080} \\ \texttt{http\_proxy=http://joeblow:pAsSwRd@proxy.example.org} \postw \section{First Steps} \label{first-steps} To illustrate Sledgehammer in context, let us start a theory file and attempt to prove a simple lemma: \prew \textbf{theory}~\textit{Scratch} \\ \textbf{imports}~\textit{Main} \\ \textbf{begin} \\[2\smallskipamount] % \textbf{lemma} ``$[a] = [b] \,\Longrightarrow\, a = b$'' \\ \textbf{sledgehammer} \postw Instead of issuing the \textbf{sledgehammer} command, you can also use the Sledgehammer panel in Isabelle/jEdit. Sledgehammer produces the following output after a few seconds: \prew \slshape Proof found\ldots \\ ``\textit{e\/}'': Try this: \textbf{by} \textit{simp} (0.3 ms) \\ % ``\textit{cvc4\/}'': Try this: \textbf{by} \textit{simp} (0.4 ms) \\ % ``\textit{z3\/}'': Try this: \textbf{by} \textit{simp} (0.5 ms) \\ % ``\textit{spass\/}'': Try this: \textbf{by} \textit{simp} (0.3 ms) % \postw Sledgehammer ran CVC4, E, SPASS, and Z3 in parallel. Depending on which provers are installed and how many processor cores are available, some of the provers might be missing or present with a \textit{remote\_} prefix. For each successful prover, Sledgehammer gives a one-line \textit{metis} or \textit{smt} method call. Rough timings are shown in parentheses, indicating how fast the call is. You can click the proof to insert it into the theory text. In addition, you can ask Sledgehammer for an Isar text proof by enabling the \textit{isar\_proofs} option (\S\ref{output-format}): \prew \textbf{sledgehammer} [\textit{isar\_proofs}] \postw When Isar proof construction is successful, it can yield proofs that are more readable and also faster than the \textit{metis} or \textit{smt} one-line proofs. This feature is experimental and is only available for ATPs. \section{Hints} \label{hints} This section presents a few hints that should help you get the most out of Sledgehammer. Frequently asked questions are answered in \S\ref{frequently-asked-questions}. %\newcommand\point[1]{\medskip\par{\sl\bfseries#1}\par\nopagebreak} \newcommand\point[1]{\subsection{\emph{#1}}} \point{Presimplify the goal} For best results, first simplify your problem by calling \textit{auto} or at least \textit{safe} followed by \textit{simp\_all}. The SMT solvers provide arithmetic decision procedures, but the ATPs typically do not (or if they do, Sledgehammer does not use it yet). Apart from Waldmeister, they are not particularly good at heavy rewriting, but because they regard equations as undirected, they often prove theorems that require the reverse orientation of a \textit{simp} rule. Higher-order problems can be tackled, but the success rate is better for first-order problems. Hence, you may get better results if you first simplify the problem to remove higher-order features. \point{Familiarize yourself with the main options} Sledgehammer's options are fully documented in \S\ref{command-syntax}. Many of the options are very specialized, but serious users of the tool should at least familiarize themselves with the following options: \begin{enum} \item[\labelitemi] \textbf{\textit{provers}} (\S\ref{mode-of-operation}) specifies the automatic provers (ATPs and SMT solvers) that should be run whenever Sledgehammer is invoked (e.g., ``\textit{provers}~= \textit{cvc4 e spass vampire\/}''). For convenience, you can omit ``\textit{provers}~='' and simply write the prover names as a space-separated list (e.g., ``\textit{cvc4 e spass vampire\/}''). \item[\labelitemi] \textbf{\textit{max\_facts}} (\S\ref{relevance-filter}) specifies the maximum number of facts that should be passed to the provers. By default, the value is prover-dependent but varies between about 50 and 1000. If the provers time out, you can try lowering this value to, say, 25 or 50 and see if that helps. \item[\labelitemi] \textbf{\textit{isar\_proofs}} (\S\ref{output-format}) specifies that Isar proofs should be generated, in addition to one-line \textit{metis} or \textit{smt} proofs. The length of the Isar proofs can be controlled by setting \textit{compress} (\S\ref{output-format}). \item[\labelitemi] \textbf{\textit{timeout}} (\S\ref{timeouts}) controls the provers' time limit. It is set to 30 seconds by default. \end{enum} Options can be set globally using \textbf{sledgehammer\_params} (\S\ref{command-syntax}). The command also prints the list of all available options with their current value. Fact selection can be influenced by specifying ``$(\textit{add}{:}~\textit{my\_facts})$'' after the \textbf{sledgehammer} call to ensure that certain facts are included, or simply ``$(\textit{my\_facts})$'' to force Sledgehammer to run only with $\textit{my\_facts}$ (and any facts chained into the goal). \section{Frequently Asked Questions} \label{frequently-asked-questions} This sections answers frequently (and infrequently) asked questions about Sledgehammer. It is a good idea to skim over it now even if you do not have any questions at this stage. And if you have any further questions not listed here, send them to the author at \authoremail. \point{Which facts are passed to the automatic provers?} Sledgehammer heuristically selects a few hundred relevant lemmas from the currently loaded libraries. The component that performs this selection is called \emph{relevance filter} (\S\ref{relevance-filter}). \begin{enum} \item[\labelitemi] The traditional relevance filter, called \emph{MePo} (\underline{Me}ng--\underline{Pau}lson), assigns a score to every available fact (lemma, theorem, definition, or axiom) based upon how many constants that fact shares with the conjecture. This process iterates to include facts relevant to those just accepted. The constants are weighted to give unusual ones greater significance. MePo copes best when the conjecture contains some unusual constants; if all the constants are common, it is unable to discriminate among the hundreds of facts that are picked up. The filter is also memoryless: It has no information about how many times a particular fact has been used in a proof, and it cannot learn. \item[\labelitemi] An alternative to MePo is \emph{MaSh} (\underline{Ma}chine Learner for \underline{S}ledge\underline{h}ammer). It applies machine learning to the problem of finding relevant facts. \item[\labelitemi] The \emph{MeSh} filter combines MePo and MaSh. This is the default. \end{enum} The number of facts included in a problem varies from prover to prover, since some provers get overwhelmed more easily than others. You can show the number of facts given using the \textit{verbose} option (\S\ref{output-format}) and the actual facts using \textit{debug} (\S\ref{output-format}). Sledgehammer is good at finding short proofs combining a handful of existing lemmas. If you are looking for longer proofs, you must typically restrict the number of facts, by setting the \textit{max\_facts} option (\S\ref{relevance-filter}) to, say, 25 or 50. You can also influence which facts are actually selected in a number of ways. If you simply want to ensure that a fact is included, you can specify it using the ``$(\textit{add}{:}~\textit{my\_facts})$'' syntax. For example: % \prew \textbf{sledgehammer} (\textit{add}: \textit{hd.simps} \textit{tl.simps}) \postw % The specified facts then replace the least relevant facts that would otherwise be included; the other selected facts remain the same. If you want to direct the selection in a particular direction, you can specify the facts via \textbf{using}: % \prew \textbf{using} \textit{hd.simps} \textit{tl.simps} \\ \textbf{sledgehammer} \postw % The facts are then more likely to be selected than otherwise, and if they are selected at iteration $j$ they also influence which facts are selected at iterations $j + 1$, $j + 2$, etc. To give them even more weight, try % \prew \textbf{using} \textit{hd.simps} \textit{tl.simps} \\ \textbf{apply}~\textbf{--} \\ \textbf{sledgehammer} \postw \point{Why does Metis fail to reconstruct the proof?} There are many reasons. If Metis runs seemingly forever, that is a sign that the proof is too difficult for it. Metis's search is complete for first-order logic with equality, so if the proof was found by a superposition-based ATP such as E, SPASS, or Vampire, Metis should eventually find it, but that is little consolation. In some rare cases, \textit{metis} fails fairly quickly, and you get the error message ``One-line proof reconstruction failed.'' This indicates that Sledgehammer determined that the goal is provable, but the proof is, for technical reasons, beyond \textit{metis}'s power. You can then try again with the \textit{strict} option (\S\ref{problem-encoding}). If the goal is actually unprovable and you did not specify an unsound encoding using \textit{type\_enc} (\S\ref{problem-encoding}), this is a bug, and you are strongly encouraged to report this to the author at \authoremail. \point{How can I tell whether a suggested proof is sound?} Earlier versions of Sledgehammer often suggested unsound proofs---either proofs of nontheorems or simply proofs that rely on type-unsound inferences. This is a thing of the past, unless you explicitly specify an unsound encoding using \textit{type\_enc} (\S\ref{problem-encoding}). % Officially, the only form of ``unsoundness'' that lurks in the sound encodings is related to missing characteristic theorems of datatypes. For example, \prew \textbf{lemma}~``$\exists \mathit{xs}.\; \mathit{xs} \neq []$'' \\ \textbf{sledgehammer} () \postw suggests an argumentless \textit{metis} call that fails. However, the conjecture does actually hold, and the \textit{metis} call can be repaired by adding \textit{list.distinct}. % We hope to address this problem in a future version of Isabelle. In the meantime, you can avoid it by passing the \textit{strict} option (\S\ref{problem-encoding}). \point{What are the \textit{full\_types}, \textit{no\_types}, and \textit{mono\_tags} arguments to Metis?} The \textit{metis}~(\textit{full\_types}) proof method and its cousin \textit{metis}~(\textit{mono\_tags}) are fully-typed versions of Metis. It is somewhat slower than \textit{metis}, but the proof search is fully typed, and it also includes more powerful rules such as the axiom ``$x = \const{True} \mathrel{\lor} x = \const{False}$'' for reasoning in higher-order places (e.g., in set comprehensions). The method is automatically tried as a fallback when \textit{metis} fails, and it is sometimes generated by Sledgehammer instead of \textit{metis} if the proof obviously requires type information or if \textit{metis} failed when Sledgehammer preplayed the proof. (By default, Sledgehammer tries to run \textit{metis} with various sets of option for up to 1~second each time to ensure that the generated one-line proofs actually work and to display timing information. This can be configured using the \textit{preplay\_timeout} and \textit{dont\_preplay} options (\S\ref{timeouts}).) % At the other end of the soundness spectrum, \textit{metis} (\textit{no\_types}) uses no type information at all during the proof search, which is more efficient but often fails. Calls to \textit{metis} (\textit{no\_types}) are occasionally generated by Sledgehammer. % See the \textit{type\_enc} option (\S\ref{problem-encoding}) for details. Incidentally, if you ever see warnings such as \prew \slshape Metis: Falling back on ``\textit{metis} (\textit{full\_types})'' \postw for a successful \textit{metis} proof, you can advantageously pass the \textit{full\_types} option to \textit{metis} directly. \point{And what are the \textit{lifting} and \textit{hide\_lams} arguments to Metis?} Orthogonally to the encoding of types, it is important to choose an appropriate translation of $\lambda$-abstractions. Metis supports three translation schemes, in decreasing order of power: Curry combinators (the default), $\lambda$-lifting, and a ``hiding'' scheme that disables all reasoning under $\lambda$-abstractions. The more powerful schemes also give the automatic provers more rope to hang themselves. See the \textit{lam\_trans} option (\S\ref{problem-encoding}) for details. \point{Are the generated proofs minimal?} Automatic provers frequently use many more facts than are necessary. Sledgehammer includes a minimization tool that takes a set of facts returned by a given prover and repeatedly calls a prover or proof method with subsets of those facts to find a minimal set. Reducing the number of facts typically helps reconstruction, while also removing superfluous clutter from the proof scripts. In earlier versions of Sledgehammer, generated proofs were systematically accompanied by a suggestion to invoke the minimization tool. This step is now performed by default but can be disabled using the \textit{minimize} option (\S\ref{mode-of-operation}). \point{A strange error occurred---what should I do?} Sledgehammer tries to give informative error messages. Please report any strange error to the author at \authoremail. \point{Auto can solve it---why not Sledgehammer?} Problems can be easy for \textit{auto} and difficult for automatic provers, but the reverse is also true, so do not be discouraged if your first attempts fail. Because the system refers to all theorems known to Isabelle, it is particularly suitable when your goal has a short proof but requires lemmas that you do not know about. \point{Why are there so many options?} Sledgehammer's philosophy should work out of the box, without user guidance. Many of the options are meant to be used mostly by the Sledgehammer developers for experiments. Of course, feel free to try them out if you are so inclined. \section{Command Syntax} \label{command-syntax} \subsection{Sledgehammer} \label{sledgehammer} Sledgehammer can be invoked at any point when there is an open goal by entering the \textbf{sledgehammer} command in the theory file. Its general syntax is as follows: \prew \textbf{sledgehammer} \qty{subcommand}$^?$ \qty{options}$^?$ \qty{facts\_override}$^?$ \qty{num}$^?$ \postw In the general syntax, the \qty{subcommand} may be any of the following: \begin{enum} \item[\labelitemi] \textbf{\textit{run} (the default):} Runs Sledgehammer on subgoal number \qty{num} (1 by default), with the given options and facts. \item[\labelitemi] \textbf{\textit{supported\_provers}:} Prints the list of automatic provers supported by Sledgehammer. See \S\ref{installation} and \S\ref{mode-of-operation} for more information on how to install automatic provers. \item[\labelitemi] \textbf{\textit{refresh\_tptp}:} Refreshes the list of remote ATPs available at System\-On\-TPTP \cite{sutcliffe-2000}. \end{enum} In addition, the following subcommands provide finer control over machine learning with MaSh: \begin{enum} \item[\labelitemi] \textbf{\textit{unlearn}:} Resets MaSh, erasing any persistent state. \item[\labelitemi] \textbf{\textit{learn\_isar}:} Invokes MaSh on the current theory to process all the available facts, learning from their Isabelle/Isar proofs. This happens automatically at Sledgehammer invocations if the \textit{learn} option (\S\ref{relevance-filter}) is enabled. \item[\labelitemi] \textbf{\textit{learn\_prover}:} Invokes MaSh on the current theory to process all the available facts, learning from proofs generated by automatic provers. The prover to use and its timeout can be set using the \textit{prover} (\S\ref{mode-of-operation}) and \textit{timeout} (\S\ref{timeouts}) options. It is recommended to perform learning using a first-order ATP (such as E, SPASS, and Vampire) as opposed to a higher-order ATP or an SMT solver. \item[\labelitemi] \textbf{\textit{relearn\_isar}:} Same as \textit{unlearn} followed by \textit{learn\_isar}. \item[\labelitemi] \textbf{\textit{relearn\_prover}:} Same as \textit{unlearn} followed by \textit{learn\_prover}. \end{enum} Sledgehammer's behavior can be influenced by various \qty{options}, which can be specified in brackets after the \textbf{sledgehammer} command. The \qty{options} are a list of key--value pairs of the form ``[$k_1 = v_1, \ldots, k_n = v_n$]''. For Boolean options, ``= \textit{true\/}'' is optional. For example: \prew \textbf{sledgehammer} [\textit{isar\_proofs}, \,\textit{timeout} = 120] \postw Default values can be set using \textbf{sledgehammer\_\allowbreak params}: \prew \textbf{sledgehammer\_params} \qty{options} \postw The supported options are described in \S\ref{option-reference}. The \qty{facts\_override} argument lets you alter the set of facts that go through the relevance filter. It may be of the form ``(\qty{facts})'', where \qty{facts} is a space-separated list of Isabelle facts (theorems, local assumptions, etc.), in which case the relevance filter is bypassed and the given facts are used. It may also be of the form ``(\textit{add}:\ \qty{facts\/_{\mathrm{1}}})'', ``(\textit{del}:\ \qty{facts\/_{\mathrm{2}}})'', or ``(\textit{add}:\ \qty{facts\/_{\mathrm{1}}}\ \textit{del}:\ \qty{facts\/_{\mathrm{2}}})'', where the relevance filter is instructed to proceed as usual except that it should consider \qty{facts\/_{\mathrm{1}}} highly-relevant and \qty{facts\/_{\mathrm{2}}} fully irrelevant. If you use Isabelle/jEdit, Sledgehammer also provides an automatic mode that can be enabled via the ``Auto Sledgehammer'' option under ``Plugins > Plugin Options > Isabelle > General.'' For automatic runs, only the first prover set using \textit{provers} (\S\ref{mode-of-operation}) is considered (typically E), \textit{slice} (\S\ref{mode-of-operation}) is disabled, fewer facts are passed to the prover, \textit{fact\_filter} (\S\ref{relevance-filter}) is set to \textit{mepo}, \textit{strict} (\S\ref{problem-encoding}) is enabled, \textit{verbose} (\S\ref{output-format}) and \textit{debug} (\S\ref{output-format}) are disabled, and \textit{timeout} (\S\ref{timeouts}) is superseded by the ``Auto Time Limit'' option in jEdit. Sledgehammer's output is also more concise. \subsection{Metis} \label{metis} The \textit{metis} proof method has the syntax \prew \textbf{\textit{metis}}~(\qty{options})${}^?$~\qty{facts}${}^?$ \postw where \qty{facts} is a list of arbitrary facts and \qty{options} is a comma-separated list consisting of at most one $\lambda$ translation scheme specification with the same semantics as Sledgehammer's \textit{lam\_trans} option (\S\ref{problem-encoding}) and at most one type encoding specification with the same semantics as Sledgehammer's \textit{type\_enc} option (\S\ref{problem-encoding}). % The supported $\lambda$ translation schemes are \textit{hide\_lams}, \textit{lifting}, and \textit{combs} (the default). % All the untyped type encodings listed in \S\ref{problem-encoding} are supported. For convenience, the following aliases are provided: \begin{enum} \item[\labelitemi] \textbf{\textit{full\_types}:} Alias for \textit{poly\_guards\_query}. \item[\labelitemi] \textbf{\textit{partial\_types}:} Alias for \textit{poly\_args}. \item[\labelitemi] \textbf{\textit{no\_types}:} Alias for \textit{erased}. \end{enum} \section{Option Reference} \label{option-reference} \def\defl{\{} \def\defr{\}} \def\flushitem#1{\item[]\noindent\kern-\leftmargin \textbf{#1}} \def\optrueonly#1{\flushitem{\textit{#1} $\bigl[$= \textit{true}$\bigr]$\enskip}\nopagebreak\\[\parskip]} \def\optrue#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{bool}$\bigr]$\enskip \defl\textit{true}\defr\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opfalse#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{bool}$\bigr]$\enskip \defl\textit{false}\defr\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opsmart#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{smart\_bool}$\bigr]$\enskip \defl\textit{smart}\defr\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opsmartx#1#2{\flushitem{\textit{#1} $\bigl[$= \qtybf{smart\_bool}$\bigr]$\enskip \defl\textit{smart}\defr\\\hbox{}\hfill (neg.: \textit{#2})}\nopagebreak\\[\parskip]} \def\opnodefault#1#2{\flushitem{\textit{#1} = \qtybf{#2}} \nopagebreak\\[\parskip]} \def\opnodefaultbrk#1#2{\flushitem{$\bigl[$\textit{#1} =$\bigr]$ \qtybf{#2}} \nopagebreak\\[\parskip]} \def\opdefault#1#2#3{\flushitem{\textit{#1} = \qtybf{#2}\enskip \defl\textit{#3}\defr} \nopagebreak\\[\parskip]} \def\oparg#1#2#3{\flushitem{\textit{#1} \qtybf{#2} = \qtybf{#3}} \nopagebreak\\[\parskip]} \def\opargbool#1#2#3{\flushitem{\textit{#1} \qtybf{#2} $\bigl[$= \qtybf{bool}$\bigr]$\hfill (neg.: \textit{#3})}\nopagebreak\\[\parskip]} \def\opargboolorsmart#1#2#3{\flushitem{\textit{#1} \qtybf{#2} $\bigl[$= \qtybf{smart\_bool}$\bigr]$\hfill (neg.: \textit{#3})}\nopagebreak\\[\parskip]} Sledgehammer's options are categorized as follows:\ mode of operation (\S\ref{mode-of-operation}), problem encoding (\S\ref{problem-encoding}), relevance filter (\S\ref{relevance-filter}), output format (\S\ref{output-format}), regression testing (\S\ref{regression-testing}), and timeouts (\S\ref{timeouts}). The descriptions below refer to the following syntactic quantities: \begin{enum} \item[\labelitemi] \qtybf{string}: A string. \item[\labelitemi] \qtybf{bool\/}: \textit{true} or \textit{false}. \item[\labelitemi] \qtybf{smart\_bool\/}: \textit{true}, \textit{false}, or \textit{smart}. \item[\labelitemi] \qtybf{int\/}: An integer. \item[\labelitemi] \qtybf{float}: A floating-point number (e.g., 2.5 or 60) expressing a number of seconds. \item[\labelitemi] \qtybf{float\_pair\/}: A pair of floating-point numbers (e.g., 0.6 0.95). \item[\labelitemi] \qtybf{smart\_int\/}: An integer or \textit{smart}. \end{enum} Default values are indicated in curly brackets (\textrm{\{\}}). Boolean options have a negative counterpart (e.g., \textit{minimize} vs.\ \textit{dont\_minimize}). When setting Boolean options or their negative counterparts, ``= \textit{true\/}'' may be omitted. \subsection{Mode of Operation} \label{mode-of-operation} \begin{enum} \opnodefaultbrk{provers}{string} Specifies the automatic provers to use as a space-separated list (e.g., ``\textit{cvc4}~\textit{e}~\textit{spass}~\textit{vampire\/}''). Provers can be run locally or remotely; see \S\ref{installation} for installation instructions. The following local provers are supported: \begin{sloppy} \begin{enum} \item[\labelitemi] \textbf{\textit{agsyhol}:} agsyHOL is an automatic higher-order prover developed by Fredrik Lindblad \cite{agsyHOL}. To use agsyHOL, set the environment variable \texttt{AGSYHOL\_HOME} to the directory that contains the \texttt{agsyHOL} executable. Sledgehammer has been tested with version 1.0. \item[\labelitemi] \textbf{\textit{alt\_ergo}:} Alt-Ergo is a polymorphic ATP developed by Bobot et al.\ \cite{alt-ergo}. It supports the TPTP polymorphic typed first-order format (TF1) via Why3 \cite{why3}. To use Alt-Ergo, set the environment variable \texttt{WHY3\_HOME} to the directory that contains the \texttt{why3} executable. Sledgehammer requires Alt-Ergo 0.95.2 and Why3 0.83. \item[\labelitemi] \textbf{\textit{cvc3}:} CVC3 is an SMT solver developed by Clark Barrett, Cesare Tinelli, and their colleagues \cite{cvc3}. To use CVC3, set the environment variable \texttt{CVC3\_SOLVER} to the complete path of the executable, including the file name, or install the prebuilt CVC3 package from \download. Sledgehammer has been tested with versions 2.2 and 2.4.1. \item[\labelitemi] \textbf{\textit{cvc4}:} CVC4 \cite{cvc4} is the successor to CVC3. To use CVC4, set the environment variable \texttt{CVC4\_SOLVER} to the complete path of the executable, including the file name, or install the prebuilt CVC4 package from \download. Sledgehammer has been tested with version 1.5-prerelease. \item[\labelitemi] \textbf{\textit{e}:} E is a first-order resolution prover developed by Stephan Schulz \cite{schulz-2002}. To use E, set the environment variable \texttt{E\_HOME} to the directory that contains the \texttt{eproof} executable and \texttt{E\_VERSION} to the version number (e.g., ``1.8''), or install the prebuilt E package from \download. Sledgehammer has been tested with versions 1.6 to 1.8. \item[\labelitemi] \textbf{\textit{e\_par}:} E-Par is an experimental metaprover developed by Josef Urban that implements strategy scheduling on top of E. To use E-Par, set the environment variable \texttt{E\_HOME} to the directory that contains the \texttt{runepar.pl} script and the \texttt{eprover} and \texttt{epclextract} executables, or use the prebuilt E package from \download. Be aware that E-Par is experimental software. It has been known to generate zombie processes. Use at your own risks. \item[\labelitemi] \textbf{\textit{ehoh}:} Ehoh is an experimental version of E that supports a $\lambda$-free fragment of higher-order logic. Use at your own risks. \item[\labelitemi] \textbf{\textit{iprover}:} iProver is a pure instantiation-based prover developed by Konstantin Korovin \cite{korovin-2009}. To use iProver, set the environment variable \texttt{IPROVER\_HOME} to the directory that contains the \texttt{iproveropt} executable. Sledgehammer has been tested with version 2.8. iProver depends on E to clausify problems, so make sure that E is installed as well. \item[\labelitemi] \textbf{\textit{leo2}:} LEO-II is an automatic higher-order prover developed by Christoph Benzm\"uller et al.\ \cite{leo2}, with support for the TPTP typed higher-order syntax (TH0). To use LEO-II, set the environment variable \texttt{LEO2\_HOME} to the directory that contains the \texttt{leo} executable. Sledgehammer requires version 1.3.4 or above. \item[\labelitemi] \textbf{\textit{leo3}:} Leo-III is an automatic higher-order prover developed by Alexander Steen, Max Wisniewski, Christoph Benzm\"uller et al.\ \cite{leo3}, with support for the TPTP typed higher-order syntax (TH0). To use Leo-III, set the environment variable \texttt{LEO3\_HOME} to the directory that contains the \texttt{leo3} executable. Sledgehammer requires version 1.1 or above. \item[\labelitemi] \textbf{\textit{satallax}:} Satallax is an automatic higher-order prover developed by Chad Brown et al.\ \cite{satallax}, with support for the TPTP typed higher-order syntax (TH0). To use Satallax, set the environment variable \texttt{SATALLAX\_HOME} to the directory that contains the \texttt{satallax} executable. Sledgehammer requires version 2.2 or above. \item[\labelitemi] \textbf{\textit{spass}:} SPASS is a first-order resolution prover developed by Christoph Weidenbach et al.\ \cite{weidenbach-et-al-2009}. To use SPASS, set the environment variable \texttt{SPASS\_HOME} to the directory that contains the \texttt{SPASS} executable and \texttt{SPASS\_VERSION} to the version number (e.g., ``3.8ds''), or install the prebuilt SPASS package from \download. Sledgehammer requires version 3.8ds or above. \item[\labelitemi] \textbf{\textit{vampire}:} Vampire is a first-order resolution prover developed by Andrei Voronkov and his colleagues \cite{riazanov-voronkov-2002}. To use Vampire, set the environment variable \texttt{VAMPIRE\_HOME} to the directory that contains the \texttt{vampire} executable and \texttt{VAMPIRE\_VERSION} to the version number (e.g., ``4.2.2''). Sledgehammer has been tested with versions 1.8 to 4.2.2 (in the post-2010 numbering scheme). \item[\labelitemi] \textbf{\textit{verit}:} veriT \cite{bouton-et-al-2009} is an SMT solver developed by David D\'eharbe, Pascal Fontaine, and their colleagues. It is specifically designed to produce detailed proofs for reconstruction in proof assistants. To use veriT, set the environment variable \texttt{VERIT\_SOLVER} to the complete path of the executable, including the file name. Sledgehammer has been tested with version smtcomp2019. \item[\labelitemi] \textbf{\textit{z3}:} Z3 is an SMT solver developed at Microsoft Research \cite{z3}. To use Z3, set the environment variable \texttt{Z3\_SOLVER} to the complete path of the executable, including the file name. Sledgehammer has been tested with a pre-release version of 4.4.0. \item[\labelitemi] \textbf{\textit{z3\_tptp}:} This version of Z3 pretends to be an ATP, exploiting Z3's support for the TPTP typed first-order format (TF0). It is included for experimental purposes. It requires version 4.3.1 of Z3 or above. To use it, set the environment variable \texttt{Z3\_TPTP\_HOME} to the directory that contains the \texttt{z3\_tptp} executable. \item[\labelitemi] \textbf{\textit{zipperposition}:} Zipperposition \cite{cruanes-2014} is a higher-order superposition prover developed by Simon Cruanes and colleagues. To use Zipperposition, set the environment variable \texttt{ZIPPERPOSITION\_HOME} to the directory that contains the \texttt{zipperposition} executable and \texttt{ZIPPERPOSITION\_VERSION} to the version number (e.g., ``2.0.1''). Sledgehammer has been tested with version 2.0.1. \end{enum} \end{sloppy} Moreover, the following remote provers are supported: \begin{enum} \item[\labelitemi] \textbf{\textit{remote\_agsyhol}:} The remote version of agsyHOL runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_alt\_ergo}:} The remote version of Alt-Ergo runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_e}:} The remote version of E runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_iprover}:} The remote version of iProver runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_leo2}:} The remote version of LEO-II runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_leo3}:} The remote version of Leo-III runs on Geoff Sutcliffe's Miami servers \cite{sutcliffe-2000}. \item[\labelitemi] \textbf{\textit{remote\_pirate}:} Pirate is a highly experimental first-order resolution prover developed by Daniel Wand. The remote version of Pirate run on a private server he generously set up. \item[\labelitemi] \textbf{\textit{remote\_snark}:} SNARK is a first-order resolution prover developed by Stickel et al.\ \cite{snark}. The remote version of SNARK runs on Geoff Sutcliffe's Miami servers. \item[\labelitemi] \textbf{\textit{remote\_vampire}:} The remote version of Vampire runs on Geoff Sutcliffe's Miami servers. \item[\labelitemi] \textbf{\textit{remote\_waldmeister}:} Waldmeister is a unit equality prover developed by Hillenbrand et al.\ \cite{waldmeister}. It can be used to prove universally quantified equations using unconditional equations, corresponding to the TPTP CNF UEQ division. The remote version of Waldmeister runs on Geoff Sutcliffe's Miami servers. \item[\labelitemi] \textbf{\textit{remote\_zipperposition}:} The remote version of Zipperposition runs on Geoff Sutcliffe's Miami servers. \end{enum} By default, Sledgehammer runs a subset of CVC4, E, SPASS, Vampire, veriT, and Z3 in parallel, either locally or remotely---depending on the number of processor cores available and on which provers are actually installed. It is generally desirable to run several provers in parallel. \opnodefault{prover}{string} Alias for \textit{provers}. \optrue{slice}{dont\_slice} Specifies whether the time allocated to a prover should be sliced into several segments, each of which has its own set of possibly prover-dependent options. For SPASS and Vampire, the first slice tries the fast but incomplete set-of-support (SOS) strategy, whereas the second slice runs without it. For E, up to three slices are tried, with different weighted search strategies and number of facts. For SMT solvers, several slices are tried with the same options each time but fewer and fewer facts. According to benchmarks with a timeout of 30 seconds, slicing is a valuable optimization, and you should probably leave it enabled unless you are conducting experiments. \nopagebreak {\small See also \textit{verbose} (\S\ref{output-format}).} \optrue{minimize}{dont\_minimize} Specifies whether the minimization tool should be invoked automatically after proof search. \nopagebreak {\small See also \textit{preplay\_timeout} (\S\ref{timeouts}) and \textit{dont\_preplay} (\S\ref{timeouts}).} \opfalse{spy}{dont\_spy} Specifies whether Sledgehammer should record statistics in \texttt{\$ISA\-BELLE\_\allowbreak HOME\_\allowbreak USER/\allowbreak spy\_\allowbreak sledgehammer}. These statistics can be useful to the developers of Sledgehammer. If you are willing to have your interactions recorded in the name of science, please enable this feature and send the statistics file every now and then to the author of this manual (\authoremail). To change the default value of this option globally, set the environment variable \texttt{SLEDGEHAMMER\_SPY} to \textit{yes}. \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}).} \opfalse{overlord}{no\_overlord} Specifies whether Sledgehammer should put its temporary files in \texttt{\$ISA\-BELLE\_\allowbreak HOME\_\allowbreak USER}, which is useful for debugging Sledgehammer but also unsafe if several instances of the tool are run simultaneously. The files are identified by the prefixes \texttt{prob\_} and \texttt{mash\_}; you may safely remove them after Sledgehammer has run. \textbf{Warning:} This option is not thread-safe. Use at your own risks. \nopagebreak {\small See also \textit{debug} (\S\ref{output-format}).} \end{enum} \subsection{Relevance Filter} \label{relevance-filter} \begin{enum} \opdefault{fact\_filter}{string}{smart} Specifies the relevance filter to use. The following filters are available: \begin{enum} \item[\labelitemi] \textbf{\textit{mepo}:} The traditional memoryless MePo relevance filter. \item[\labelitemi] \textbf{\textit{mash}:} The MaSh machine learner. Three learning algorithms are provided: \begin{enum} \item[\labelitemi] \textbf{\textit{nb}} is an implementation of naive Bayes. \item[\labelitemi] \textbf{\textit{knn}} is an implementation of $k$-nearest neighbors. \item[\labelitemi] \textbf{\textit{nb\_knn}} (also called \textbf{\textit{yes}} and \textbf{\textit{sml}}) is a combination of naive Bayes and $k$-nearest neighbors. \end{enum} In addition, the special value \textit{none} is used to disable machine learning by default (cf.\ \textit{smart} below). The default algorithm is \textit{nb\_knn}. The algorithm can be selected by setting the ``MaSh'' option under ``Plugins > Plugin Options > Isabelle > General'' in Isabelle/jEdit. Persistent data for both algorithms is stored in the directory \texttt{\$ISABELLE\_\allowbreak HOME\_\allowbreak USER/\allowbreak mash}. \item[\labelitemi] \textbf{\textit{mesh}:} The MeSh filter, which combines the rankings from MePo and MaSh. \item[\labelitemi] \textbf{\textit{smart}:} A combination of MePo, MaSh, and MeSh. If the learning algorithm is set to be \textit{none}, \textit{smart} behaves like MePo. \end{enum} \opdefault{max\_facts}{smart\_int}{smart} Specifies the maximum number of facts that may be returned by the relevance filter. If the option is set to \textit{smart} (the default), it effectively takes a value that was empirically found to be appropriate for the prover. Typical values lie between 50 and 1000. \opdefault{fact\_thresholds}{float\_pair}{\upshape 0.45~0.85} Specifies the thresholds above which facts are considered relevant by the relevance filter. The first threshold is used for the first iteration of the relevance filter and the second threshold is used for the last iteration (if it is reached). The effective threshold is quadratically interpolated for the other iterations. Each threshold ranges from 0 to 1, where 0 means that all theorems are relevant and 1 only theorems that refer to previously seen constants. \optrue{learn}{dont\_learn} Specifies whether MaSh should be run automatically by Sledgehammer to learn the available theories (and hence provide more accurate results). Learning takes place only if MaSh is enabled. \opdefault{max\_new\_mono\_instances}{int}{smart} Specifies the maximum number of monomorphic instances to generate beyond \textit{max\_facts}. The higher this limit is, the more monomorphic instances are potentially generated. Whether monomorphization takes place depends on the type encoding used. If the option is set to \textit{smart} (the default), it takes a value that was empirically found to be appropriate for the prover. For most provers, this value is 100. \nopagebreak {\small See also \textit{type\_enc} (\S\ref{problem-encoding}).} \opdefault{max\_mono\_iters}{int}{smart} Specifies the maximum number of iterations for the monomorphization fixpoint construction. The higher this limit is, the more monomorphic instances are potentially generated. Whether monomorphization takes place depends on the type encoding used. If the option is set to \textit{smart} (the default), it takes a value that was empirically found to be appropriate for the prover. For most provers, this value is 3. \nopagebreak {\small See also \textit{type\_enc} (\S\ref{problem-encoding}).} \end{enum} \subsection{Problem Encoding} \label{problem-encoding} \newcommand\comb[1]{\const{#1}} \begin{enum} \opdefault{lam\_trans}{string}{smart} Specifies the $\lambda$ translation scheme to use in ATP problems. The supported translation schemes are listed below: \begin{enum} \item[\labelitemi] \textbf{\textit{hide\_lams}:} Hide the $\lambda$-abstractions by replacing them by unspecified fresh constants, effectively disabling all reasoning under $\lambda$-abstractions. \item[\labelitemi] \textbf{\textit{lifting}:} Introduce a new supercombinator \const{c} for each cluster of $n$~$\lambda$-abstractions, defined using an equation $\const{c}~x_1~\ldots~x_n = t$ ($\lambda$-lifting). \item[\labelitemi] \textbf{\textit{combs}:} Rewrite lambdas to the Curry combinators (\comb{I}, \comb{K}, \comb{S}, \comb{B}, \comb{C}). Combinators enable the ATPs to synthesize $\lambda$-terms but tend to yield bulkier formulas than $\lambda$-lifting: The translation is quadratic in the worst case, and the equational definitions of the combinators are very prolific in the context of resolution. \item[\labelitemi] \textbf{\textit{combs\_and\_lifting}:} Introduce a new supercombinator \const{c} for each cluster of $\lambda$-abstractions and characterize it both using a lifted equation $\const{c}~x_1~\ldots~x_n = t$ and via Curry combinators. \item[\labelitemi] \textbf{\textit{combs\_or\_lifting}:} For each cluster of $\lambda$-abstractions, heuristically choose between $\lambda$-lifting and Curry combinators. \item[\labelitemi] \textbf{\textit{keep\_lams}:} Keep the $\lambda$-abstractions in the generated problems. This is available only with provers that support the TH0 syntax. \item[\labelitemi] \textbf{\textit{smart}:} The actual translation scheme used depends on the ATP and should be the most efficient scheme for that ATP. \end{enum} For SMT solvers, the $\lambda$ translation scheme is always \textit{lifting}, irrespective of the value of this option. \opsmartx{uncurried\_aliases}{no\_uncurried\_aliases} Specifies whether fresh function symbols should be generated as aliases for applications of curried functions in ATP problems. \opdefault{type\_enc}{string}{smart} Specifies the type encoding to use in ATP problems. Some of the type encodings are unsound, meaning that they can give rise to spurious proofs (unreconstructible using \textit{metis}). The type encodings are listed below, with an indication of their soundness in parentheses. An asterisk (*) indicates that the encoding is slightly incomplete for reconstruction with \textit{metis}, unless the \textit{strict} option (described below) is enabled. \begin{enum} \item[\labelitemi] \textbf{\textit{erased} (unsound):} No type information is supplied to the ATP, not even to resolve overloading. Types are simply erased. \item[\labelitemi] \textbf{\textit{poly\_guards} (sound):} Types are encoded using a predicate \const{g}$(\tau, t)$ that guards bound variables. Constants are annotated with their types, supplied as extra arguments, to resolve overloading. \item[\labelitemi] \textbf{\textit{poly\_tags} (sound):} Each term and subterm is tagged with its type using a function $\const{t\/}(\tau, t)$. \item[\labelitemi] \textbf{\textit{poly\_args} (unsound):} Like for \textit{poly\_guards} constants are annotated with their types to resolve overloading, but otherwise no type information is encoded. This is the default encoding used by the \textit{metis} proof method. \item[\labelitemi] \textbf{% \textit{raw\_mono\_guards}, \textit{raw\_mono\_tags} (sound); \\ \textit{raw\_mono\_args} (unsound):} \\ Similar to \textit{poly\_guards}, \textit{poly\_tags}, and \textit{poly\_args}, respectively, but the problem is additionally monomorphized, meaning that type variables are instantiated with heuristically chosen ground types. Monomorphization can simplify reasoning but also leads to larger fact bases, which can slow down the ATPs. \item[\labelitemi] \textbf{% \textit{mono\_guards}, \textit{mono\_tags} (sound); \textit{mono\_args} (unsound):} \\ Similar to \textit{raw\_mono\_guards}, \textit{raw\_mono\_tags}, and \textit{raw\_mono\_args}, respectively but types are mangled in constant names instead of being supplied as ground term arguments. The binary predicate $\const{g}(\tau, t)$ becomes a unary predicate $\const{g\_}\tau(t)$, and the binary function $\const{t}(\tau, t)$ becomes a unary function $\const{t\_}\tau(t)$. \item[\labelitemi] \textbf{\textit{mono\_native} (sound):} Exploits native first-order types if the prover supports the TF0, TF1, TH0, or TH1 syntax; otherwise, falls back on \textit{mono\_guards}. The problem is monomorphized. \item[\labelitemi] \textbf{\textit{mono\_native\_higher} (sound):} Exploits native higher-order types if the prover supports the TH0 syntax; otherwise, falls back on \textit{mono\_native} or \textit{mono\_guards}. The problem is monomorphized. \item[\labelitemi] \textbf{\textit{poly\_native} (sound):} Exploits native first-order polymorphic types if the prover supports the TF1 or TH1 syntax; otherwise, falls back on \textit{mono\_native}. \item[\labelitemi] \textbf{% \textit{poly\_guards}?, \textit{poly\_tags}?, \textit{raw\_mono\_guards}?, \\ \textit{raw\_mono\_tags}?, \textit{mono\_guards}?, \textit{mono\_tags}?, \\ \textit{mono\_native}? (sound*):} \\ The type encodings \textit{poly\_guards}, \textit{poly\_tags}, \textit{raw\_mono\_guards}, \textit{raw\_mono\_tags}, \textit{mono\_guards}, \textit{mono\_tags}, and \textit{mono\_native} are fully typed and sound. For each of these, Sledgehammer also provides a lighter variant identified by a question mark (`\hbox{?}')\ that detects and erases monotonic types, notably infinite types. (For \textit{mono\_native}, the types are not actually erased but rather replaced by a shared uniform type of individuals.) As argument to the \textit{metis} proof method, the question mark is replaced by a \hbox{``\textit{\_query\/}''} suffix. \item[\labelitemi] \textbf{% \textit{poly\_guards}??, \textit{poly\_tags}??, \textit{raw\_mono\_guards}??, \\ \textit{raw\_mono\_tags}??, \textit{mono\_guards}??, \textit{mono\_tags}?? \\ (sound*):} \\ Even lighter versions of the `\hbox{?}' encodings. As argument to the \textit{metis} proof method, the `\hbox{??}' suffix is replaced by \hbox{``\textit{\_query\_query\/}''}. \item[\labelitemi] \textbf{% \textit{poly\_guards}@, \textit{poly\_tags}@, \textit{raw\_mono\_guards}@, \\ \textit{raw\_mono\_tags}@ (sound*):} \\ Alternative versions of the `\hbox{??}' encodings. As argument to the \textit{metis} proof method, the `\hbox{@}' suffix is replaced by \hbox{``\textit{\_at\/}''}. \item[\labelitemi] \textbf{\textit{poly\_args}?, \textit{raw\_mono\_args}? (unsound):} \\ Lighter versions of \textit{poly\_args} and \textit{raw\_mono\_args}. \item[\labelitemi] \textbf{\textit{smart}:} The actual encoding used depends on the ATP and should be the most efficient sound encoding for that ATP. \end{enum} For SMT solvers, the type encoding is always \textit{mono\_native}, irrespective of the value of this option. \nopagebreak {\small See also \textit{max\_new\_mono\_instances} (\S\ref{relevance-filter}) and \textit{max\_mono\_iters} (\S\ref{relevance-filter}).} \opfalse{strict}{non\_strict} Specifies whether Sledgehammer should run in its strict mode. In that mode, sound type encodings marked with an asterisk (*) above are made complete for reconstruction with \textit{metis}, at the cost of some clutter in the generated problems. This option has no effect if \textit{type\_enc} is deliberately set to an unsound encoding. \end{enum} \subsection{Output Format} \label{output-format} \begin{enum} \opfalse{verbose}{quiet} Specifies whether the \textbf{sledgehammer} command should explain what it does. \opfalse{debug}{no\_debug} Specifies whether Sledgehammer should display additional debugging information beyond what \textit{verbose} already displays. Enabling \textit{debug} also enables \textit{verbose} behind the scenes. \nopagebreak {\small See also \textit{spy} (\S\ref{mode-of-operation}) and \textit{overlord} (\S\ref{mode-of-operation}).} \opsmart{isar\_proofs}{no\_isar\_proofs} Specifies whether Isar proofs should be output in addition to one-line proofs. The construction of Isar proof is still experimental and may sometimes fail; however, when they succeed they are usually faster and more intelligible than one-line proofs. If the option is set to \textit{smart} (the default), Isar proofs are only generated when no working one-line proof is available. \opdefault{compress}{int}{smart} Specifies the granularity of the generated Isar proofs if \textit{isar\_proofs} is explicitly enabled. A value of $n$ indicates that each Isar proof step should correspond to a group of up to $n$ consecutive proof steps in the ATP proof. If the option is set to \textit{smart} (the default), the compression factor is 10 if the \textit{isar\_proofs} option is explicitly enabled; otherwise, it is $\infty$. \optrueonly{dont\_compress} Alias for ``\textit{compress} = 1''. \optrue{try0}{dont\_try0} Specifies whether standard proof methods such as \textit{auto} and \textit{blast} should be tried as alternatives to \textit{metis} in Isar proofs. The collection of methods is roughly the same as for the \textbf{try0} command. \optrue{smt\_proofs}{no\_smt\_proofs} Specifies whether the \textit{smt} proof method should be tried in addition to Isabelle's built-in proof methods. \end{enum} \subsection{Regression Testing} \label{regression-testing} \begin{enum} \opnodefault{expect}{string} Specifies the expected outcome, which must be one of the following: \begin{enum} \item[\labelitemi] \textbf{\textit{some}:} Sledgehammer found a proof. \item[\labelitemi] \textbf{\textit{none}:} Sledgehammer found no proof. \item[\labelitemi] \textbf{\textit{timeout}:} Sledgehammer timed out. \item[\labelitemi] \textbf{\textit{unknown}:} Sledgehammer encountered some problem. \end{enum} Sledgehammer emits an error if the actual outcome differs from the expected outcome. This option is useful for regression testing. \nopagebreak {\small See also \textit{timeout} (\S\ref{timeouts}).} \end{enum} \subsection{Timeouts} \label{timeouts} \begin{enum} \opdefault{timeout}{float}{\upshape 30} Specifies the maximum number of seconds that the automatic provers should spend searching for a proof. This excludes problem preparation and is a soft limit. \opdefault{preplay\_timeout}{float}{\upshape 1} Specifies the maximum number of seconds that \textit{metis} or other proof methods should spend trying to ``preplay'' the found proof. If this option is set to 0, no preplaying takes place, and no timing information is displayed next to the suggested proof method calls. \nopagebreak {\small See also \textit{minimize} (\S\ref{mode-of-operation}).} \optrueonly{dont\_preplay} Alias for ``\textit{preplay\_timeout} = 0''. \end{enum} \let\em=\sl \bibliography{manual}{} \bibliographystyle{abbrv} \end{document} diff --git a/src/Doc/System/document/root.tex b/src/Doc/System/document/root.tex --- a/src/Doc/System/document/root.tex +++ b/src/Doc/System/document/root.tex @@ -1,50 +1,49 @@ \documentclass[12pt,a4paper]{report} -\usepackage{lmodern} \usepackage[T1]{fontenc} \usepackage{supertabular} \usepackage{graphicx} \usepackage{iman,extra,isar} \usepackage[nohyphen,strings]{underscore} \usepackage{isabelle,isabellesym} \usepackage{railsetup} \usepackage{style} \usepackage{pdfsetup} \hyphenation{Isabelle} \hyphenation{Isar} \isadroptag{theory} \isabellestyle{literal} \def\isastylett{\footnotesize\tt} \title{\includegraphics[scale=0.5]{isabelle} \\[4ex] The Isabelle System Manual} \author{\emph{Makarius Wenzel}} \makeindex \begin{document} \maketitle \pagenumbering{roman} \tableofcontents \clearfirst \input{Environment.tex} \input{Sessions.tex} \input{Presentation.tex} \input{Server.tex} \input{Scala.tex} \input{Phabricator.tex} \input{Misc.tex} \begingroup \tocentry{\bibname} \bibliographystyle{abbrv} \small\raggedright\frenchspacing \bibliography{manual} \endgroup \tocentry{\indexname} \printindex \end{document}