diff options
Diffstat (limited to 'doc')
64 files changed, 23744 insertions, 23373 deletions
diff --git a/doc/refman/CanonicalStructures.tex b/doc/refman/CanonicalStructures.tex deleted file mode 100644 index 8961b00964..0000000000 --- a/doc/refman/CanonicalStructures.tex +++ /dev/null @@ -1,383 +0,0 @@ -\achapter{Canonical Structures} -%HEVEA\cutname{canonical-structures.html} -\aauthor{Assia Mahboubi and Enrico Tassi} - -\label{CS-full} -\index{Canonical Structures!presentation} - -\noindent This chapter explains the basics of Canonical Structure and how they can be used -to overload notations and build a hierarchy of algebraic structures. -The examples are taken from~\cite{CSwcu}. We invite the interested reader -to refer to this paper for all the details that are omitted here for brevity. -The interested reader shall also find in~\cite{CSlessadhoc} a detailed -description of another, complementary, use of Canonical Structures: -advanced proof search. This latter papers also presents many techniques one -can employ to tune the inference of Canonical Structures. - -\section{Notation overloading} - -We build an infix notation $==$ for a comparison predicate. Such notation -will be overloaded, and its meaning will depend on the types of the terms -that are compared. - -\begin{coq_eval} -Require Import Arith. -\end{coq_eval} - -\begin{coq_example} -Module EQ. - Record class (T : Type) := Class { cmp : T -> T -> Prop }. - Structure type := Pack { obj : Type; class_of : class obj }. - Definition op (e : type) : obj e -> obj e -> Prop := - let 'Pack _ (Class _ the_cmp) := e in the_cmp. - Check op. - Arguments op {e} x y : simpl never. - Arguments Class {T} cmp. - Module theory. - Notation "x == y" := (op x y) (at level 70). - End theory. -End EQ. -\end{coq_example} - -We use Coq modules as name spaces. This allows us to follow the same pattern -and naming convention for the rest of the chapter. The base name space -contains the definitions of the algebraic structure. To keep the example -small, the algebraic structure \texttt{EQ.type} we are defining is very simplistic, -and characterizes terms on which a binary relation is defined, without -requiring such relation to validate any property. -The inner \texttt{theory} module contains the overloaded notation \texttt{==} and -will eventually contain lemmas holding on all the instances of the -algebraic structure (in this case there are no lemmas). - -Note that in practice the user may want to declare \texttt{EQ.obj} as a coercion, -but we will not do that here. - -The following line tests that, when we assume a type \texttt{e} that is in the -\texttt{EQ} class, then we can relates two of its objects with \texttt{==}. - -\begin{coq_example} -Import EQ.theory. -Check forall (e : EQ.type) (a b : EQ.obj e), a == b. -\end{coq_example} - -Still, no concrete type is in the \texttt{EQ} class. We amend that by equipping \texttt{nat} -with a comparison relation. - -\begin{coq_example} -Fail Check 3 == 3. -Definition nat_eq (x y : nat) := nat_compare x y = Eq. -Definition nat_EQcl : EQ.class nat := EQ.Class nat_eq. -Canonical Structure nat_EQty : EQ.type := EQ.Pack nat nat_EQcl. -Check 3 == 3. -Eval compute in 3 == 4. -\end{coq_example} - -This last test shows that Coq is now not only able to typecheck \texttt{3==3}, but -also that the infix relation was bound to the \texttt{nat\_eq} relation. This -relation is selected whenever \texttt{==} is used on terms of type \texttt{nat}. This -can be read in the line declaring the canonical structure \texttt{nat\_EQty}, -where the first argument to \texttt{Pack} is the key and its second argument -a group of canonical values associated to the key. In this case we associate -to \texttt{nat} only one canonical value (since its class, \texttt{nat\_EQcl} has just one -member). The use of the projection \texttt{op} requires its argument to be in -the class \texttt{EQ}, and uses such a member (function) to actually compare -its arguments. - -Similarly, we could equip any other type with a comparison relation, and -use the \texttt{==} notation on terms of this type. - -\subsection{Derived Canonical Structures} - -We know how to use \texttt{==} on base types, like \texttt{nat}, \texttt{bool}, \texttt{Z}. -Here we show how to deal with type constructors, i.e. how to make the -following example work: - -\begin{coq_example} -Fail Check forall (e : EQ.type) (a b : EQ.obj e), (a,b) == (a,b). -\end{coq_example} - -The error message is telling that Coq has no idea on how to compare -pairs of objects. The following construction is telling Coq exactly how to do -that. - -\begin{coq_example} -Definition pair_eq (e1 e2 : EQ.type) (x y : EQ.obj e1 * EQ.obj e2) := - fst x == fst y /\ snd x == snd y. -Definition pair_EQcl e1 e2 := EQ.Class (pair_eq e1 e2). -Canonical Structure pair_EQty (e1 e2 : EQ.type) : EQ.type := - EQ.Pack (EQ.obj e1 * EQ.obj e2) (pair_EQcl e1 e2). -Check forall (e : EQ.type) (a b : EQ.obj e), (a,b) == (a,b). -Check forall n m : nat, (3,4) == (n,m). -\end{coq_example} - -Thanks to the \texttt{pair\_EQty} declaration, Coq is able to build a comparison -relation for pairs whenever it is able to build a comparison relation -for each component of the pair. The declaration associates to the key -\texttt{*} (the type constructor of pairs) the canonical comparison relation -\texttt{pair\_eq} whenever the type constructor \texttt{*} is applied to two types -being themselves in the \texttt{EQ} class. - -\section{Hierarchy of structures} - -To get to an interesting example we need another base class to be available. -We choose the class of types that are equipped with an order relation, -to which we associate the infix \texttt{<=} notation. - -\begin{coq_example} -Module LE. - Record class T := Class { cmp : T -> T -> Prop }. - Structure type := Pack { obj : Type; class_of : class obj }. - Definition op (e : type) : obj e -> obj e -> Prop := - let 'Pack _ (Class _ f) := e in f. - Arguments op {_} x y : simpl never. - Arguments Class {T} cmp. - Module theory. - Notation "x <= y" := (op x y) (at level 70). - End theory. -End LE. -\end{coq_example} - -As before we register a canonical \texttt{LE} class for \texttt{nat}. - -\begin{coq_example} -Import LE.theory. -Definition nat_le x y := nat_compare x y <> Gt. -Definition nat_LEcl : LE.class nat := LE.Class nat_le. -Canonical Structure nat_LEty : LE.type := LE.Pack nat nat_LEcl. -\end{coq_example} - -And we enable Coq to relate pair of terms with \texttt{<=}. - -\begin{coq_example} -Definition pair_le e1 e2 (x y : LE.obj e1 * LE.obj e2) := - fst x <= fst y /\ snd x <= snd y. -Definition pair_LEcl e1 e2 := LE.Class (pair_le e1 e2). -Canonical Structure pair_LEty (e1 e2 : LE.type) : LE.type := - LE.Pack (LE.obj e1 * LE.obj e2) (pair_LEcl e1 e2). -Check (3,4,5) <= (3,4,5). -\end{coq_example} - -At the current stage we can use \texttt{==} and \texttt{<=} on concrete types, -like tuples of natural numbers, but we can't develop an algebraic -theory over the types that are equipped with both relations. - -\begin{coq_example} -Check 2 <= 3 /\ 2 == 2. -Fail Check forall (e : EQ.type) (x y : EQ.obj e), x <= y -> y <= x -> x == y. -Fail Check forall (e : LE.type) (x y : LE.obj e), x <= y -> y <= x -> x == y. -\end{coq_example} - -We need to define a new class that inherits from both \texttt{EQ} and \texttt{LE}. - -\begin{coq_example} -Module LEQ. - Record mixin (e : EQ.type) (le : EQ.obj e -> EQ.obj e -> Prop) := - Mixin { compat : forall x y : EQ.obj e, le x y /\ le y x <-> x == y }. - Record class T := Class { - EQ_class : EQ.class T; - LE_class : LE.class T; - extra : mixin (EQ.Pack T EQ_class) (LE.cmp T LE_class) }. - Structure type := _Pack { obj : Type; class_of : class obj }. - Arguments Mixin {e le} _. - Arguments Class {T} _ _ _. -\end{coq_example} - -The \texttt{mixin} component of the \texttt{LEQ} class contains all the extra content -we are adding to \texttt{EQ} and \texttt{LE}. In particular it contains the requirement -that the two relations we are combining are compatible. - -Unfortunately there is still an obstacle to developing the algebraic theory -of this new class. - -\begin{coq_example} - Module theory. - Fail Check forall (le : type) (n m : obj le), n <= m -> n <= m -> n == m. -\end{coq_example} - -The problem is that the two classes \texttt{LE} and \texttt{LEQ} are not yet related by -a subclass relation. In other words Coq does not see that an object -of the \texttt{LEQ} class is also an object of the \texttt{LE} class. - -The following two constructions tell Coq how to canonically build -the \texttt{LE.type} and \texttt{EQ.type} structure given an \texttt{LEQ.type} structure -on the same type. - -\begin{coq_example} - Definition to_EQ (e : type) : EQ.type := - EQ.Pack (obj e) (EQ_class _ (class_of e)). - Canonical Structure to_EQ. - Definition to_LE (e : type) : LE.type := - LE.Pack (obj e) (LE_class _ (class_of e)). - Canonical Structure to_LE. -\end{coq_example} -We can now formulate out first theorem on the objects of the \texttt{LEQ} structure. -\begin{coq_example} - Lemma lele_eq (e : type) (x y : obj e) : x <= y -> y <= x -> x == y. - now intros; apply (compat _ _ (extra _ (class_of e)) x y); split. Qed. - Arguments lele_eq {e} x y _ _. - End theory. -End LEQ. -Import LEQ.theory. -Check lele_eq. -\end{coq_example} - -Of course one would like to apply results proved in the algebraic -setting to any concrete instate of the algebraic structure. - -\begin{coq_example} -Example test_algebraic (n m : nat) : n <= m -> m <= n -> n == m. - Fail apply (lele_eq n m). Abort. -Example test_algebraic2 (l1 l2 : LEQ.type) (n m : LEQ.obj l1 * LEQ.obj l2) : - n <= m -> m <= n -> n == m. - Fail apply (lele_eq n m). Abort. -\end{coq_example} - -Again one has to tell Coq that the type \texttt{nat} is in the \texttt{LEQ} class, and how -the type constructor \texttt{*} interacts with the \texttt{LEQ} class. In the following -proofs are omitted for brevity. - -\begin{coq_example} -Lemma nat_LEQ_compat (n m : nat) : n <= m /\ m <= n <-> n == m. -\end{coq_example} -\begin{coq_eval} - -split. - unfold EQ.op; unfold LE.op; simpl; unfold nat_le; unfold nat_eq. - case (nat_compare_spec n m); [ reflexivity | | now intros _ [H _]; case H ]. - now intro H; apply nat_compare_gt in H; rewrite -> H; intros [_ K]; case K. -unfold EQ.op; unfold LE.op; simpl; unfold nat_le; unfold nat_eq. -case (nat_compare_spec n m); [ | intros H1 H2; discriminate H2 .. ]. -intro H; rewrite H; intros _; split; [ intro H1; discriminate H1 | ]. -case (nat_compare_eq_iff m m); intros _ H1. -now rewrite H1; auto; intro H2; discriminate H2. -Qed. -\end{coq_eval} -\begin{coq_example} -Definition nat_LEQmx := LEQ.Mixin nat_LEQ_compat. -Lemma pair_LEQ_compat (l1 l2 : LEQ.type) (n m : LEQ.obj l1 * LEQ.obj l2) : -n <= m /\ m <= n <-> n == m. -\end{coq_example} -\begin{coq_eval} - -case n; case m; unfold EQ.op; unfold LE.op; simpl. -intros n1 n2 m1 m2; split; [ intros [[Le1 Le2] [Ge1 Ge2]] | intros [H1 H2] ]. - now split; apply lele_eq. -case (LEQ.compat _ _ (LEQ.extra _ (LEQ.class_of l1)) m1 n1). -case (LEQ.compat _ _ (LEQ.extra _ (LEQ.class_of l2)) m2 n2). -intros _ H3 _ H4; apply H3 in H2; apply H4 in H1; clear H3 H4. -now case H1; case H2; split; split. -Qed. -\end{coq_eval} -\begin{coq_example} -Definition pair_LEQmx l1 l2 := LEQ.Mixin (pair_LEQ_compat l1 l2). -\end{coq_example} - -The following script registers an \texttt{LEQ} class for \texttt{nat} and for the -type constructor \texttt{*}. It also tests that they work as expected. - -Unfortunately, these declarations are very verbose. In the following -subsection we show how to make these declaration more compact. - -\begin{coq_example} -Module Add_instance_attempt. - Canonical Structure nat_LEQty : LEQ.type := - LEQ._Pack nat (LEQ.Class nat_EQcl nat_LEcl nat_LEQmx). - Canonical Structure pair_LEQty (l1 l2 : LEQ.type) : LEQ.type := - LEQ._Pack (LEQ.obj l1 * LEQ.obj l2) - (LEQ.Class - (EQ.class_of (pair_EQty (to_EQ l1) (to_EQ l2))) - (LE.class_of (pair_LEty (to_LE l1) (to_LE l2))) - (pair_LEQmx l1 l2)). - Example test_algebraic (n m : nat) : n <= m -> m <= n -> n == m. - now apply (lele_eq n m). Qed. - Example test_algebraic2 (n m : nat * nat) : n <= m -> m <= n -> n == m. - now apply (lele_eq n m). Qed. -End Add_instance_attempt. -\end{coq_example} - -Note that no direct proof of \texttt{n <= m -> m <= n -> n == m} is provided by the -user for \texttt{n} and \texttt{m} of type \texttt{nat * nat}. What the user provides is a proof of -this statement for \texttt{n} and \texttt{m} of type \texttt{nat} and a proof that the pair -constructor preserves this property. The combination of these two facts is a -simple form of proof search that Coq performs automatically while inferring -canonical structures. - -\subsection{Compact declaration of Canonical Structures} - -We need some infrastructure for that. - -\begin{coq_example*} -Require Import Strings.String. -\end{coq_example*} -\begin{coq_example} -Module infrastructure. - Inductive phantom {T : Type} (t : T) : Type := Phantom. - Definition unify {T1 T2} (t1 : T1) (t2 : T2) (s : option string) := - phantom t1 -> phantom t2. - Definition id {T} {t : T} (x : phantom t) := x. - Notation "[find v | t1 ~ t2 ] p" := (fun v (_ : unify t1 t2 None) => p) - (at level 50, v ident, only parsing). - Notation "[find v | t1 ~ t2 | s ] p" := (fun v (_ : unify t1 t2 (Some s)) => p) - (at level 50, v ident, only parsing). - Notation "'Error : t : s" := (unify _ t (Some s)) - (at level 50, format "''Error' : t : s"). - Open Scope string_scope. -End infrastructure. -\end{coq_example} - -To explain the notation \texttt{[find v | t1 \textasciitilde t2]} let us pick one -of its instances: \texttt{[find e | EQ.obj e \textasciitilde T | "is not an EQ.type" ]}. -It should be read as: ``find a class e such that its objects have type T -or fail with message "T is not an EQ.type"''. - -The other utilities are used to ask Coq to solve a specific unification -problem, that will in turn require the inference of some canonical -structures. They are explained in mode details in~\cite{CSwcu}. - -We now have all we need to create a compact ``packager'' to declare -instances of the \texttt{LEQ} class. - -\begin{coq_example} -Import infrastructure. -Definition packager T e0 le0 (m0 : LEQ.mixin e0 le0) := - [find e | EQ.obj e ~ T | "is not an EQ.type" ] - [find o | LE.obj o ~ T | "is not an LE.type" ] - [find ce | EQ.class_of e ~ ce ] - [find co | LE.class_of o ~ co ] - [find m | m ~ m0 | "is not the right mixin" ] - LEQ._Pack T (LEQ.Class ce co m). -Notation Pack T m := (packager T _ _ m _ id _ id _ id _ id _ id). -\end{coq_example} - -The object \texttt{Pack} takes a type \texttt{T} (the key) and a mixin \texttt{m}. It infers all -the other pieces of the class \texttt{LEQ} and declares them as canonical values -associated to the \texttt{T} key. All in all, the only new piece of information -we add in the \texttt{LEQ} class is the mixin, all the rest is already canonical -for \texttt{T} and hence can be inferred by Coq. - -\texttt{Pack} is a notation, hence it is not type checked at the time of its -declaration. It will be type checked when it is used, an in that case -\texttt{T} is going to be a concrete type. The odd arguments \texttt{\_} and \texttt{id} we -pass to the -packager represent respectively the classes to be inferred (like \texttt{e}, \texttt{o}, etc) and a token (\texttt{id}) to force their inference. Again, for all the details the -reader can refer to~\cite{CSwcu}. - -The declaration of canonical instances can now be way more compact: - -\begin{coq_example} -Canonical Structure nat_LEQty := Eval hnf in Pack nat nat_LEQmx. -Canonical Structure pair_LEQty (l1 l2 : LEQ.type) := - Eval hnf in Pack (LEQ.obj l1 * LEQ.obj l2) (pair_LEQmx l1 l2). -\end{coq_example} - -Error messages are also quite intelligible (if one skips to the end of -the message). - -\begin{coq_example} -Fail Canonical Structure err := Eval hnf in Pack bool nat_LEQmx. -\end{coq_example} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/Cases.tex b/doc/refman/Cases.tex deleted file mode 100644 index 376ef031db..0000000000 --- a/doc/refman/Cases.tex +++ /dev/null @@ -1,843 +0,0 @@ -\achapter{Extended pattern-matching} -%HEVEA\cutname{cases.html} -%BEGIN LATEX -\defaultheaders -%END LATEX -\aauthor{Cristina Cornes and Hugo Herbelin} - -\label{Mult-match-full} -\ttindex{Cases} -\index{ML-like patterns} - -This section describes the full form of pattern-matching in {\Coq} terms. - -\asection{Patterns}\label{implementation} The full syntax of {\tt -match} is presented in Figures~\ref{term-syntax} -and~\ref{term-syntax-aux}. Identifiers in patterns are either -constructor names or variables. Any identifier that is not the -constructor of an inductive or co-inductive type is considered to be a -variable. A variable name cannot occur more than once in a given -pattern. It is recommended to start variable names by a lowercase -letter. - -If a pattern has the form $(c~\vec{x})$ where $c$ is a constructor -symbol and $\vec{x}$ is a linear vector of (distinct) variables, it is -called {\em simple}: it is the kind of pattern recognized by the basic -version of {\tt match}. On the opposite, if it is a variable $x$ or -has the form $(c~\vec{p})$ with $p$ not only made of variables, the -pattern is called {\em nested}. - -A variable pattern matches any value, and the identifier is bound to -that value. The pattern ``\texttt{\_}'' (called ``don't care'' or -``wildcard'' symbol) also matches any value, but does not bind -anything. It may occur an arbitrary number of times in a -pattern. Alias patterns written \texttt{(}{\sl pattern} \texttt{as} -{\sl identifier}\texttt{)} are also accepted. This pattern matches the -same values as {\sl pattern} does and {\sl identifier} is bound to the -matched value. -A pattern of the form {\pattern}{\tt |}{\pattern} is called -disjunctive. A list of patterns separated with commas is also -considered as a pattern and is called {\em multiple pattern}. However -multiple patterns can only occur at the root of pattern-matching -equations. Disjunctions of {\em multiple pattern} are allowed though. - -Since extended {\tt match} expressions are compiled into the primitive -ones, the expressiveness of the theory remains the same. Once the -stage of parsing has finished only simple patterns remain. Re-nesting -of pattern is performed at printing time. An easy way to see the -result of the expansion is to toggle off the nesting performed at -printing (use here {\tt Set Printing Matching}), then by printing the term -with \texttt{Print} if the term is a constant, or using the command -\texttt{Check}. - -The extended \texttt{match} still accepts an optional {\em elimination -predicate} given after the keyword \texttt{return}. Given a pattern -matching expression, if all the right-hand-sides of \texttt{=>} ({\em -rhs} in short) have the same type, then this type can be sometimes -synthesized, and so we can omit the \texttt{return} part. Otherwise -the predicate after \texttt{return} has to be provided, like for the basic -\texttt{match}. - -Let us illustrate through examples the different aspects of extended -pattern matching. Consider for example the function that computes the -maximum of two natural numbers. We can write it in primitive syntax -by: - -\begin{coq_example} -Fixpoint max (n m:nat) {struct m} : nat := - match n with - | O => m - | S n' => match m with - | O => S n' - | S m' => S (max n' m') - end - end. -\end{coq_example} - -\paragraph{Multiple patterns} - -Using multiple patterns in the definition of {\tt max} lets us write: - -\begin{coq_eval} -Reset max. -\end{coq_eval} -\begin{coq_example} -Fixpoint max (n m:nat) {struct m} : nat := - match n, m with - | O, _ => m - | S n', O => S n' - | S n', S m' => S (max n' m') - end. -\end{coq_example} - -which will be compiled into the previous form. - -The pattern-matching compilation strategy examines patterns from left -to right. A \texttt{match} expression is generated {\bf only} when -there is at least one constructor in the column of patterns. E.g. the -following example does not build a \texttt{match} expression. - -\begin{coq_example} -Check (fun x:nat => match x return nat with - | y => y - end). -\end{coq_example} - -\paragraph{Aliasing subpatterns} - -We can also use ``\texttt{as} {\ident}'' to associate a name to a -sub-pattern: - -\begin{coq_eval} -Reset max. -\end{coq_eval} -\begin{coq_example} -Fixpoint max (n m:nat) {struct n} : nat := - match n, m with - | O, _ => m - | S n' as p, O => p - | S n', S m' => S (max n' m') - end. -\end{coq_example} - -\paragraph{Nested patterns} - -Here is now an example of nested patterns: - -\begin{coq_example} -Fixpoint even (n:nat) : bool := - match n with - | O => true - | S O => false - | S (S n') => even n' - end. -\end{coq_example} - -This is compiled into: - -\begin{coq_example} -Unset Printing Matching. -Print even. -\end{coq_example} -\begin{coq_eval} -Set Printing Matching. -\end{coq_eval} - -In the previous examples patterns do not conflict with, but -sometimes it is comfortable to write patterns that admit a non -trivial superposition. Consider -the boolean function \texttt{lef} that given two natural numbers -yields \texttt{true} if the first one is less or equal than the second -one and \texttt{false} otherwise. We can write it as follows: - -\begin{coq_example} -Fixpoint lef (n m:nat) {struct m} : bool := - match n, m with - | O, x => true - | x, O => false - | S n, S m => lef n m - end. -\end{coq_example} - -Note that the first and the second multiple pattern superpose because -the couple of values \texttt{O O} matches both. Thus, what is the result -of the function on those values? To eliminate ambiguity we use the -{\em textual priority rule}: we consider patterns ordered from top to -bottom, then a value is matched by the pattern at the $ith$ row if and -only if it is not matched by some pattern of a previous row. Thus in the -example, -\texttt{O O} is matched by the first pattern, and so \texttt{(lef O O)} -yields \texttt{true}. - -Another way to write this function is: - -\begin{coq_eval} -Reset lef. -\end{coq_eval} -\begin{coq_example} -Fixpoint lef (n m:nat) {struct m} : bool := - match n, m with - | O, x => true - | S n, S m => lef n m - | _, _ => false - end. -\end{coq_example} - -Here the last pattern superposes with the first two. Because -of the priority rule, the last pattern -will be used only for values that do not match neither the first nor -the second one. - -Terms with useless patterns are not accepted by the -system. Here is an example: -% Test failure: "This clause is redundant." -\begin{coq_eval} -Set Printing Depth 50. -\end{coq_eval} -\begin{coq_example} -Fail Check (fun x:nat => - match x with - | O => true - | S _ => false - | x => true - end). -\end{coq_example} - -\paragraph{Disjunctive patterns} - -Multiple patterns that share the same right-hand-side can be -factorized using the notation \nelist{\multpattern}{\tt |}. For instance, -{\tt max} can be rewritten as follows: - -\begin{coq_eval} -Reset max. -\end{coq_eval} -\begin{coq_example} -Fixpoint max (n m:nat) {struct m} : nat := - match n, m with - | S n', S m' => S (max n' m') - | 0, p | p, 0 => p - end. -\end{coq_example} - -Similarly, factorization of (non necessary multiple) patterns -that share the same variables is possible by using the notation -\nelist{\pattern}{\tt |}. Here is an example: - -\begin{coq_example} -Definition filter_2_4 (n:nat) : nat := - match n with - | 2 as m | 4 as m => m - | _ => 0 - end. -\end{coq_example} - -Here is another example using disjunctive subpatterns. - -\begin{coq_example} -Definition filter_some_square_corners (p:nat*nat) : nat*nat := - match p with - | ((2 as m | 4 as m), (3 as n | 5 as n)) => (m,n) - | _ => (0,0) - end. -\end{coq_example} - -\asection{About patterns of parametric types} -\paragraph{Parameters in patterns} -When matching objects of a parametric type, parameters do not bind in patterns. -They must be substituted by ``\_''. -Consider for example the type of polymorphic lists: - -\begin{coq_example} -Inductive List (A:Set) : Set := - | nil : List A - | cons : A -> List A -> List A. -\end{coq_example} - -We can check the function {\em tail}: - -\begin{coq_example} -Check - (fun l:List nat => - match l with - | nil _ => nil nat - | cons _ _ l' => l' - end). -\end{coq_example} - - -When we use parameters in patterns there is an error message: -% Test failure: "The parameters do not bind in patterns." -\begin{coq_eval} -Set Printing Depth 50. -\end{coq_eval} -\begin{coq_example} -Fail Check - (fun l:List nat => - match l with - | nil A => nil nat - | cons A _ l' => l' - end). -\end{coq_example} - -The option {\tt Set Asymmetric Patterns} \optindex{Asymmetric Patterns} -(off by default) removes parameters from constructors in patterns: -\begin{coq_example} - Set Asymmetric Patterns. - Check (fun l:List nat => - match l with - | nil => nil - | cons _ l' => l' - end) - Unset Asymmetric Patterns. -\end{coq_example} - -\paragraph{Implicit arguments in patterns} -By default, implicit arguments are omitted in patterns. So we write: - -\begin{coq_example} -Arguments nil [A]. -Arguments cons [A] _ _. -Check - (fun l:List nat => - match l with - | nil => nil - | cons _ l' => l' - end). -\end{coq_example} - -But the possibility to use all the arguments is given by ``{\tt @}'' implicit -explicitations (as for terms~\ref{Implicits-explicitation}). - -\begin{coq_example} -Check - (fun l:List nat => - match l with - | @nil _ => @nil nat - | @cons _ _ l' => l' - end). -\end{coq_example} - -\asection{Matching objects of dependent types} -The previous examples illustrate pattern matching on objects of -non-dependent types, but we can also -use the expansion strategy to destructure objects of dependent type. -Consider the type \texttt{listn} of lists of a certain length: -\label{listn} - -\begin{coq_example} -Inductive listn : nat -> Set := - | niln : listn 0 - | consn : forall n:nat, nat -> listn n -> listn (S n). -\end{coq_example} - -\asubsection{Understanding dependencies in patterns} -We can define the function \texttt{length} over \texttt{listn} by: - -\begin{coq_example} -Definition length (n:nat) (l:listn n) := n. -\end{coq_example} - -Just for illustrating pattern matching, -we can define it by case analysis: - -\begin{coq_eval} -Reset length. -\end{coq_eval} -\begin{coq_example} -Definition length (n:nat) (l:listn n) := - match l with - | niln => 0 - | consn n _ _ => S n - end. -\end{coq_example} - -We can understand the meaning of this definition using the -same notions of usual pattern matching. - -% -% Constraining of dependencies is not longer valid in V7 -% -\iffalse -Now suppose we split the second pattern of \texttt{length} into two -cases so to give an -alternative definition using nested patterns: -\begin{coq_example} -Definition length1 (n:nat) (l:listn n) := - match l with - | niln => 0 - | consn n _ niln => S n - | consn n _ (consn _ _ _) => S n - end. -\end{coq_example} - -It is obvious that \texttt{length1} is another version of -\texttt{length}. We can also give the following definition: -\begin{coq_example} -Definition length2 (n:nat) (l:listn n) := - match l with - | niln => 0 - | consn n _ niln => 1 - | consn n _ (consn m _ _) => S (S m) - end. -\end{coq_example} - -If we forget that \texttt{listn} is a dependent type and we read these -definitions using the usual semantics of pattern matching, we can conclude -that \texttt{length1} -and \texttt{length2} are different functions. -In fact, they are equivalent -because the pattern \texttt{niln} implies that \texttt{n} can only match -the value $0$ and analogously the pattern \texttt{consn} determines that \texttt{n} can -only match values of the form $(S~v)$ where $v$ is the value matched by -\texttt{m}. - -The converse is also true. If -we destructure the length value with the pattern \texttt{O} then the list -value should be $niln$. -Thus, the following term \texttt{length3} corresponds to the function -\texttt{length} but this time defined by case analysis on the dependencies instead of on the list: - -\begin{coq_example} -Definition length3 (n:nat) (l:listn n) := - match l with - | niln => 0 - | consn O _ _ => 1 - | consn (S n) _ _ => S (S n) - end. -\end{coq_example} - -When we have nested patterns of dependent types, the semantics of -pattern matching becomes a little more difficult because -the set of values that are matched by a sub-pattern may be conditioned by the -values matched by another sub-pattern. Dependent nested patterns are -somehow constrained patterns. -In the examples, the expansion of -\texttt{length1} and \texttt{length2} yields exactly the same term - but the -expansion of \texttt{length3} is completely different. \texttt{length1} and -\texttt{length2} are expanded into two nested case analysis on -\texttt{listn} while \texttt{length3} is expanded into a case analysis on -\texttt{listn} containing a case analysis on natural numbers inside. - - -In practice the user can think about the patterns as independent and -it is the expansion algorithm that cares to relate them. \\ -\fi -% -% -% - -\asubsection{When the elimination predicate must be provided} -\paragraph{Dependent pattern matching} -The examples given so far do not need an explicit elimination predicate - because all the rhs have the same type and the -strategy succeeds to synthesize it. -Unfortunately when dealing with dependent patterns it often happens -that we need to write cases where the type of the rhs are -different instances of the elimination predicate. -The function \texttt{concat} for \texttt{listn} -is an example where the branches have different type -and we need to provide the elimination predicate: - -\begin{coq_example} -Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : - listn (n + m) := - match l in listn n return listn (n + m) with - | niln => l' - | consn n' a y => consn (n' + m) a (concat n' y m l') - end. -\end{coq_example} -The elimination predicate is {\tt fun (n:nat) (l:listn n) => listn~(n+m)}. -In general if $m$ has type {\tt (}$I$ $q_1$ {\ldots} $q_r$ $t_1$ {\ldots} $t_s${\tt )} where -$q_1$, {\ldots}, $q_r$ are parameters, the elimination predicate should be of -the form~: -{\tt fun} $y_1$ {\ldots} $y_s$ $x${\tt :}($I$~$q_1$ {\ldots} $q_r$ $y_1$ {\ldots} - $y_s${\tt ) =>} $Q$. - -In the concrete syntax, it should be written~: -\[ \kw{match}~m~\kw{as}~x~\kw{in}~(I~\_~\mbox{\ldots}~\_~y_1~\mbox{\ldots}~y_s)~\kw{return}~Q~\kw{with}~\mbox{\ldots}~\kw{end}\] - -The variables which appear in the \kw{in} and \kw{as} clause are new -and bounded in the property $Q$ in the \kw{return} clause. The -parameters of the inductive definitions should not be mentioned and -are replaced by \kw{\_}. - -\paragraph{Multiple dependent pattern matching} -Recall that a list of patterns is also a pattern. So, when we destructure several -terms at the same time and the branches have different types we need to provide the -elimination predicate for this multiple pattern. It is done using the same -scheme, each term may be associated to an \kw{as} and \kw{in} clause in order to -introduce a dependent product. - -For example, an equivalent definition for \texttt{concat} (even though the -matching on the second term is trivial) would have been: - -\begin{coq_eval} -Reset concat. -\end{coq_eval} -\begin{coq_example} -Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : - listn (n + m) := - match l in listn n, l' return listn (n + m) with - | niln, x => x - | consn n' a y, x => consn (n' + m) a (concat n' y m x) - end. -\end{coq_example} - -Even without real matching over the second term, this construction can be used to -keep types linked. If {\tt a} and {\tt b} are two {\tt listn} of the same length, -by writing -\begin{coq_eval} - Unset Printing Matching. -\end{coq_eval} -\begin{coq_example} -Check (fun n (a b: listn n) => match a,b with - |niln,b0 => tt - |consn n' a y, bS => tt -end). -\end{coq_example} -\begin{coq_eval} - Set Printing Matching. -\end{coq_eval} - -I have a copy of {\tt b} in type {\tt listn 0} resp {\tt listn (S n')}. - -% Notice that this time, the predicate \texttt{[n,\_:nat](listn (plus n -% m))} is binary because we -% destructure both \texttt{l} and \texttt{l'} whose types have arity one. -% In general, if we destructure the terms $e_1\ldots e_n$ -% the predicate will be of arity $m$ where $m$ is the sum of the -% number of dependencies of the type of $e_1, e_2,\ldots e_n$ -% (the $\lambda$-abstractions -% should correspond from left to right to each dependent argument of the -% type of $e_1\ldots e_n$). -% When the arity of the predicate (i.e. number of abstractions) is not -% correct Coq raises an error message. For example: - -% % Test failure -% \begin{coq_eval} -% Reset concat. -% Set Printing Depth 50. -% (********** The following is not correct and should produce ***********) -% (** Error: the term l' has type listn m while it is expected to have **) -% (** type listn (?31 + ?32) **) -% \end{coq_eval} -% \begin{coq_example} -% Fixpoint concat -% (n:nat) (l:listn n) (m:nat) -% (l':listn m) {struct l} : listn (n + m) := -% match l, l' with -% | niln, x => x -% | consn n' a y, x => consn (n' + m) a (concat n' y m x) -% end. -% \end{coq_example} - -\paragraph{Patterns in {\tt in}} -\label{match-in-patterns} - -If the type of the matched term is more precise than an inductive applied to -variables, arguments of the inductive in the {\tt in} branch can be more -complicated patterns than a variable. - -Moreover, constructors whose type do not follow the same pattern will -become impossible branches. In an impossible branch, you can answer -anything but {\tt False\_rect unit} has the advantage to be subterm of -anything. % ??? - -To be concrete: the {\tt tail} function can be written: -\begin{coq_example} -Definition tail n (v: listn (S n)) := - match v in listn (S m) return listn m with - | niln => False_rect unit - | consn n' a y => y - end. -\end{coq_example} -and {\tt tail n v} will be subterm of {\tt v}. - -\asection{Using pattern matching to write proofs} -In all the previous examples the elimination predicate does not depend -on the object(s) matched. But it may depend and the typical case -is when we write a proof by induction or a function that yields an -object of dependent type. An example of proof using \texttt{match} in -given in Section~\ref{refine-example}. - -For example, we can write -the function \texttt{buildlist} that given a natural number -$n$ builds a list of length $n$ containing zeros as follows: - -\begin{coq_example} -Fixpoint buildlist (n:nat) : listn n := - match n return listn n with - | O => niln - | S n => consn n 0 (buildlist n) - end. -\end{coq_example} - -We can also use multiple patterns. -Consider the following definition of the predicate less-equal -\texttt{Le}: - -\begin{coq_example} -Inductive LE : nat -> nat -> Prop := - | LEO : forall n:nat, LE 0 n - | LES : forall n m:nat, LE n m -> LE (S n) (S m). -\end{coq_example} - -We can use multiple patterns to write the proof of the lemma - \texttt{forall (n m:nat), (LE n m)}\verb=\/=\texttt{(LE m n)}: - -\begin{coq_example} -Fixpoint dec (n m:nat) {struct n} : LE n m \/ LE m n := - match n, m return LE n m \/ LE m n with - | O, x => or_introl (LE x 0) (LEO x) - | x, O => or_intror (LE x 0) (LEO x) - | S n as n', S m as m' => - match dec n m with - | or_introl h => or_introl (LE m' n') (LES n m h) - | or_intror h => or_intror (LE n' m') (LES m n h) - end - end. -\end{coq_example} -In the example of \texttt{dec}, -the first \texttt{match} is dependent while -the second is not. - -% In general, consider the terms $e_1\ldots e_n$, -% where the type of $e_i$ is an instance of a family type -% $\lb (\vec{d_i}:\vec{D_i}) \mto T_i$ ($1\leq i -% \leq n$). Then, in expression \texttt{match} $e_1,\ldots, -% e_n$ \texttt{of} \ldots \texttt{end}, the -% elimination predicate ${\cal P}$ should be of the form: -% $[\vec{d_1}:\vec{D_1}][x_1:T_1]\ldots [\vec{d_n}:\vec{D_n}][x_n:T_n]Q.$ - -The user can also use \texttt{match} in combination with the tactic -\texttt{refine} (see Section~\ref{refine}) to build incomplete proofs -beginning with a \texttt{match} construction. - -\asection{Pattern-matching on inductive objects involving local -definitions} - -If local definitions occur in the type of a constructor, then there are two ways -to match on this constructor. Either the local definitions are skipped and -matching is done only on the true arguments of the constructors, or the bindings -for local definitions can also be caught in the matching. - -Example. - -\begin{coq_eval} -Reset Initial. -Require Import Arith. -\end{coq_eval} - -\begin{coq_example*} -Inductive list : nat -> Set := - | nil : list 0 - | cons : forall n:nat, let m := (2 * n) in list m -> list (S (S m)). -\end{coq_example*} - -In the next example, the local definition is not caught. - -\begin{coq_example} -Fixpoint length n (l:list n) {struct l} : nat := - match l with - | nil => 0 - | cons n l0 => S (length (2 * n) l0) - end. -\end{coq_example} - -But in this example, it is. - -\begin{coq_example} -Fixpoint length' n (l:list n) {struct l} : nat := - match l with - | nil => 0 - | @cons _ m l0 => S (length' m l0) - end. -\end{coq_example} - -\Rem for a given matching clause, either none of the local definitions or all of -them can be caught. - -\Rem you can only catch {\tt let} bindings in mode where you bind all variables and so you -have to use @ syntax. - -\Rem this feature is incoherent with the fact that parameters cannot be caught and -consequently is somehow hidden. For example, there is no mention of it in error messages. - -\asection{Pattern-matching and coercions} - -If a mismatch occurs between the expected type of a pattern and its -actual type, a coercion made from constructors is sought. If such a -coercion can be found, it is automatically inserted around the -pattern. - -Example: - -\begin{coq_example} -Inductive I : Set := - | C1 : nat -> I - | C2 : I -> I. -Coercion C1 : nat >-> I. -Check (fun x => match x with - | C2 O => 0 - | _ => 0 - end). -\end{coq_example} - - -\asection{When does the expansion strategy fail ?}\label{limitations} -The strategy works very like in ML languages when treating -patterns of non-dependent type. -But there are new cases of failure that are due to the presence of -dependencies. - -The error messages of the current implementation may be sometimes -confusing. When the tactic fails because patterns are somehow -incorrect then error messages refer to the initial expression. But the -strategy may succeed to build an expression whose sub-expressions are -well typed when the whole expression is not. In this situation the -message makes reference to the expanded expression. We encourage -users, when they have patterns with the same outer constructor in -different equations, to name the variable patterns in the same -positions with the same name. -E.g. to write {\small\texttt{(cons n O x) => e1}} -and {\small\texttt{(cons n \_ x) => e2}} instead of -{\small\texttt{(cons n O x) => e1}} and -{\small\texttt{(cons n' \_ x') => e2}}. -This helps to maintain certain name correspondence between the -generated expression and the original. - -Here is a summary of the error messages corresponding to each situation: - -\begin{ErrMsgs} -\item \sverb{The constructor } {\sl - ident} \sverb{ expects } {\sl num} \sverb{ arguments} - - \sverb{The variable } {\sl ident} \sverb{ is bound several times - in pattern } {\sl term} - - \sverb{Found a constructor of inductive type } {\term} - \sverb{ while a constructor of } {\term} \sverb{ is expected} - - Patterns are incorrect (because constructors are not applied to - the correct number of the arguments, because they are not linear or - they are wrongly typed). - -\item \errindex{Non exhaustive pattern-matching} - -The pattern matching is not exhaustive. - -\item \sverb{The elimination predicate } {\sl term} \sverb{ should be - of arity } {\sl num} \sverb{ (for non dependent case) or } {\sl - num} \sverb{ (for dependent case)} - -The elimination predicate provided to \texttt{match} has not the - expected arity. - - -%\item the whole expression is wrongly typed - -% CADUC ? -% , or the synthesis of -% implicit arguments fails (for example to find the elimination -% predicate or to resolve implicit arguments in the rhs). - -% There are {\em nested patterns of dependent type}, the elimination -% predicate corresponds to non-dependent case and has the form -% $[x_1:T_1]...[x_n:T_n]T$ and {\bf some} $x_i$ occurs {\bf free} in -% $T$. Then, the strategy may fail to find out a correct elimination -% predicate during some step of compilation. In this situation we -% recommend the user to rewrite the nested dependent patterns into -% several \texttt{match} with {\em simple patterns}. - -\item {\tt Unable to infer a match predicate\\ - Either there is a type incompatibility or the problem involves\\ - dependencies} - - There is a type mismatch between the different branches. - The user should provide an elimination predicate. - -% Obsolete ? -% \item because of nested patterns, it may happen that even though all -% the rhs have the same type, the strategy needs dependent elimination -% and so an elimination predicate must be provided. The system warns -% about this situation, trying to compile anyway with the -% non-dependent strategy. The risen message is: - -% \begin{itemize} -% \item {\tt Warning: This pattern matching may need dependent -% elimination to be compiled. I will try, but if fails try again -% giving dependent elimination predicate.} -% \end{itemize} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% % LA PROPAGATION DES CONTRAINTES ARRIERE N'EST PAS FAITE DANS LA V7 -% TODO -% \item there are {\em nested patterns of dependent type} and the -% strategy builds a term that is well typed but recursive calls in fix -% point are reported as illegal: -% \begin{itemize} -% \item {\tt Error: Recursive call applied to an illegal term ...} -% \end{itemize} - -% This is because the strategy generates a term that is correct w.r.t. -% the initial term but which does not pass the guard condition. In -% this situation we recommend the user to transform the nested dependent -% patterns into {\em several \texttt{match} of simple patterns}. Let us -% explain this with an example. Consider the following definition of a -% function that yields the last element of a list and \texttt{O} if it is -% empty: - -% \begin{coq_example} -% Fixpoint last [n:nat; l:(listn n)] : nat := -% match l of -% (consn _ a niln) => a -% | (consn m _ x) => (last m x) | niln => O -% end. -% \end{coq_example} - -% It fails because of the priority between patterns, we know that this -% definition is equivalent to the following more explicit one (which -% fails too): - -% \begin{coq_example*} -% Fixpoint last [n:nat; l:(listn n)] : nat := -% match l of -% (consn _ a niln) => a -% | (consn n _ (consn m b x)) => (last n (consn m b x)) -% | niln => O -% end. -% \end{coq_example*} - -% Note that the recursive call {\tt (last n (consn m b x))} is not -% guarded. When treating with patterns of dependent types the strategy -% interprets the first definition of \texttt{last} as the second -% one\footnote{In languages of the ML family the first definition would -% be translated into a term where the variable \texttt{x} is shared in -% the expression. When patterns are of non-dependent types, Coq -% compiles as in ML languages using sharing. When patterns are of -% dependent types the compilation reconstructs the term as in the -% second definition of \texttt{last} so to ensure the result of -% expansion is well typed.}. Thus it generates a term where the -% recursive call is rejected by the guard condition. - -% You can get rid of this problem by writing the definition with -% \emph{simple patterns}: - -% \begin{coq_example} -% Fixpoint last [n:nat; l:(listn n)] : nat := -% <[_:nat]nat>match l of -% (consn m a x) => Cases x of niln => a | _ => (last m x) end -% | niln => O -% end. -% \end{coq_example} - -\end{ErrMsgs} - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/Coercion.tex b/doc/refman/Coercion.tex index 53b6b7827a..9862a9b30d 100644 --- a/doc/refman/Coercion.tex +++ b/doc/refman/Coercion.tex @@ -33,7 +33,8 @@ classes: \begin{itemize} \item {\tt Sortclass}, the class of sorts; - its objects are the terms whose type is a sort (e.g., \ssrC{Prop} or \ssrC{Type}). + its objects are the terms whose type is a sort (e.g., \texttt{Prop} + or \texttt{Type}). \item {\tt Funclass}, the class of functions; its objects are all the terms with a functional type, i.e. of form $forall~ x:A, B$. diff --git a/doc/refman/Micromega.tex b/doc/refman/Micromega.tex deleted file mode 100644 index 2617142f5a..0000000000 --- a/doc/refman/Micromega.tex +++ /dev/null @@ -1,256 +0,0 @@ -\achapter{Micromega: tactics for solving arithmetic goals over ordered rings} -%HEVEA\cutname{micromega.html} -\aauthor{Frédéric Besson and Evgeny Makarov} -\newtheorem{theorem}{Theorem} - - -\asection{Short description of the tactics} -\tacindex{psatz} \tacindex{lra} \tacindex{lia} \tacindex{nia} \tacindex{nra} -\label{sec:psatz-hurry} -The {\tt Psatz} module ({\tt Require Import Psatz.}) gives access to -several tactics for solving arithmetic goals over {\tt Z}, {\tt Q}, and -{\tt R}:\footnote{Support for {\tt nat} and {\tt N} is obtained by - pre-processing the goal with the {\tt zify} tactic.}. -It also possible to get the tactics for integers by a {\tt Require Import Lia}, rationals {\tt Require Import Lqa} -and reals {\tt Require Import Lra}. -\begin{itemize} -\item {\tt lia} is a decision procedure for linear integer arithmetic (see Section~\ref{sec:lia}); -\item {\tt nia} is an incomplete proof procedure for integer non-linear arithmetic (see Section~\ref{sec:nia}); -\item {\tt lra} is a decision procedure for linear (real or rational) arithmetic (see Section~\ref{sec:lra}); -\item {\tt nra} is an incomplete proof procedure for non-linear (real or rational) arithmetic (see Section~\ref{sec:nra}); -\item {\tt psatz D n} where {\tt D} is {\tt Z} or {\tt Q} or {\tt R}, and - {\tt n} is an optional integer limiting the proof search depth is is an - incomplete proof procedure for non-linear arithmetic. It is based on - John Harrison's HOL Light driver to the external prover {\tt - csdp}\footnote{Sources and binaries can be found at - \url{https://projects.coin-or.org/Csdp}}. Note that the {\tt csdp} - driver is generating a \emph{proof cache} which makes it possible to - rerun scripts even without {\tt csdp} (see Section~\ref{sec:psatz}). -\end{itemize} - -The tactics solve propositional formulas parameterized by atomic arithmetic expressions -interpreted over a domain $D \in \{\mathbb{Z}, \mathbb{Q}, \mathbb{R} \}$. -The syntax of the formulas is the following: -\[ -\begin{array}{lcl} - F &::=& A \mid P \mid \mathit{True} \mid \mathit{False} \mid F_1 \land F_2 \mid F_1 \lor F_2 \mid F_1 \leftrightarrow F_2 \mid F_1 \to F_2 \mid \neg F\\ - A &::=& p_1 = p_2 \mid p_1 > p_2 \mid p_1 < p_2 \mid p_1 \ge p_2 \mid p_1 \le p_2 \\ - p &::=& c \mid x \mid {-}p \mid p_1 - p_2 \mid p_1 + p_2 \mid p_1 \times p_2 \mid p \verb!^! n -\end{array} -\] -where $c$ is a numeric constant, $x\in D$ is a numeric variable, the -operators $-$, $+$, $\times$ are respectively subtraction, addition, -product, $p \verb!^!n $ is exponentiation by a constant $n$, $P$ is an -arbitrary proposition. - % - For {\tt Q}, equality is not Leibniz equality {\tt =} but the equality of rationals {\tt ==}. - -For {\tt Z} (resp. {\tt Q} ), $c$ ranges over integer constants (resp. rational constants). -%% The following table details for each domain $D \in \{\mathbb{Z},\mathbb{Q},\mathbb{R}\}$ the range of constants $c$ and exponent $n$. -%% \[ -%% \begin{array}{|c|c|c|c|} -%% \hline -%% &\mathbb{Z} & \mathbb{Q} & \mathbb{R} \\ -%% \hline -%% c &\mathtt{Z} & \mathtt{Q} & (see below) \\ -%% \hline -%% n &\mathtt{Z} & \mathtt{Z} & \mathtt{nat}\\ -%% \hline -%% \end{array} -%% \] -For {\tt R}, the tactic recognizes as real constants the following expressions: -\begin{verbatim} -c ::= R0 | R1 | Rmul(c,c) | Rplus(c,c) | Rminus(c,c) | IZR z | IQR q - | Rdiv(c,c) | Rinv c -\end{verbatim} -where {\tt z} is a constant in {\tt Z} and {\tt q} is a constant in {\tt Q}. -This includes integer constants written using the decimal notation \emph{i.e.,} {\tt c\%R}. - -\asection{\emph{Positivstellensatz} refutations} -\label{sec:psatz-back} - -The name {\tt psatz} is an abbreviation for \emph{positivstellensatz} -- literally positivity theorem -- which -generalizes Hilbert's \emph{nullstellensatz}. -% -It relies on the notion of $\mathit{Cone}$. Given a (finite) set of -polynomials $S$, $\mathit{Cone}(S)$ is inductively defined as the -smallest set of polynomials closed under the following rules: -\[ -\begin{array}{l} -\dfrac{p \in S}{p \in \mathit{Cone}(S)} \quad -\dfrac{}{p^2 \in \mathit{Cone}(S)} \quad -\dfrac{p_1 \in \mathit{Cone}(S) \quad p_2 \in \mathit{Cone}(S) \quad -\Join \in \{+,*\}} {p_1 \Join p_2 \in \mathit{Cone}(S)}\\ -\end{array} -\] -The following theorem provides a proof principle for checking that a set -of polynomial inequalities does not have solutions.\footnote{Variants - deal with equalities and strict inequalities.} -\begin{theorem} - \label{thm:psatz} - Let $S$ be a set of polynomials.\\ - If ${-}1$ belongs to $\mathit{Cone}(S)$ then the conjunction - $\bigwedge_{p \in S} p\ge 0$ is unsatisfiable. -\end{theorem} -A proof based on this theorem is called a \emph{positivstellensatz} refutation. -% -The tactics work as follows. Formulas are normalized into conjunctive normal form $\bigwedge_i C_i$ where -$C_i$ has the general form $(\bigwedge_{j\in S_i} p_j \Join 0) \to \mathit{False})$ and $\Join \in \{>,\ge,=\}$ for $D\in -\{\mathbb{Q},\mathbb{R}\}$ and $\Join \in \{\ge, =\}$ for $\mathbb{Z}$. -% -For each conjunct $C_i$, the tactic calls a oracle which searches for $-1$ within the cone. -% -Upon success, the oracle returns a \emph{cone expression} that is normalized by the {\tt ring} tactic (see chapter~\ref{ring}) and checked to be -$-1$. - - -\asection{{\tt lra}: a decision procedure for linear real and rational arithmetic} -\label{sec:lra} -The {\tt lra} tactic is searching for \emph{linear} refutations using -Fourier elimination.\footnote{More efficient linear programming - techniques could equally be employed.} As a result, this tactic -explores a subset of the $\mathit{Cone}$ defined as -\[ -\mathit{LinCone}(S) =\left\{ \left. \sum_{p \in S} \alpha_p \times p~\right| -~\alpha_p \mbox{ are positive constants} \right\}. -\] -The deductive power of {\tt lra} is the combined deductive power of {\tt ring\_simplify} and {\tt fourier}. -% -There is also an overlap with the {\tt field} tactic {\emph e.g.}, {\tt x = 10 * x / 10} is solved by {\tt lra}. - - -\asection{{\tt lia}: a tactic for linear integer arithmetic} -\tacindex{lia} -\label{sec:lia} - -The tactic {\tt lia} offers an alternative to the {\tt omega} and {\tt - romega} tactic (see Chapter~\ref{OmegaChapter}). -% -Roughly speaking, the deductive power of {\tt lia} is the combined deductive power of {\tt ring\_simplify} and {\tt omega}. -% -However, it solves linear goals that {\tt omega} and {\tt romega} do not solve, such as the -following so-called \emph{omega nightmare}~\cite{TheOmegaPaper}. -\begin{coq_example*} -Goal forall x y, - 27 <= 11 * x + 13 * y <= 45 -> - -10 <= 7 * x - 9 * y <= 4 -> False. -\end{coq_example*} -\begin{coq_eval} -intros x y; lia. -\end{coq_eval} -The estimation of the relative efficiency of {\tt lia} \emph{vs} {\tt omega} -and {\tt romega} is under evaluation. - -\paragraph{High level view of {\tt lia}.} -Over $\mathbb{R}$, \emph{positivstellensatz} refutations are a complete -proof principle.\footnote{In practice, the oracle might fail to produce - such a refutation.} -% -However, this is not the case over $\mathbb{Z}$. -% -Actually, \emph{positivstellensatz} refutations are not even sufficient -to decide linear \emph{integer} arithmetic. -% -The canonical example is {\tt 2 * x = 1 -> False} which is a theorem of $\mathbb{Z}$ but not a theorem of $\mathbb{R}$. -% -To remedy this weakness, the {\tt lia} tactic is using recursively a combination of: -% -\begin{itemize} -\item linear \emph{positivstellensatz} refutations; -\item cutting plane proofs; -\item case split. -\end{itemize} - -\paragraph{Cutting plane proofs} are a way to take into account the discreetness of $\mathbb{Z}$ by rounding up -(rational) constants up-to the closest integer. -% -\begin{theorem} - Let $p$ be an integer and $c$ a rational constant. - \[ - p \ge c \Rightarrow p \ge \lceil c \rceil - \] -\end{theorem} -For instance, from $2 x = 1$ we can deduce -\begin{itemize} -\item $x \ge 1/2$ which cut plane is $ x \ge \lceil 1/2 \rceil = 1$; -\item $ x \le 1/2$ which cut plane is $ x \le \lfloor 1/2 \rfloor = 0$. -\end{itemize} -By combining these two facts (in normal form) $x - 1 \ge 0$ and $-x \ge -0$, we conclude by exhibiting a \emph{positivstellensatz} refutation: $-1 -\equiv \mathbf{x-1} + \mathbf{-x} \in \mathit{Cone}(\{x-1,x\})$. - -Cutting plane proofs and linear \emph{positivstellensatz} refutations are a complete proof principle for integer linear arithmetic. - -\paragraph{Case split} enumerates over the possible values of an expression. -\begin{theorem} - Let $p$ be an integer and $c_1$ and $c_2$ integer constants. - \[ - c_1 \le p \le c_2 \Rightarrow \bigvee_{x \in [c_1,c_2]} p = x - \] -\end{theorem} -Our current oracle tries to find an expression $e$ with a small range $[c_1,c_2]$. -% -We generate $c_2 - c_1$ subgoals which contexts are enriched with an equation $e = i$ for $i \in [c_1,c_2]$ and -recursively search for a proof. - - -\asection{{\tt nra}: a proof procedure for non-linear arithmetic} -\tacindex{nra} -\label{sec:nra} -The {\tt nra} tactic is an {\emph experimental} proof procedure for non-linear arithmetic. -% -The tactic performs a limited amount of non-linear reasoning before running the -linear prover of {\tt lra}. -This pre-processing does the following: -\begin{itemize} -\item If the context contains an arithmetic expression of the form $e[x^2]$ where $x$ is a - monomial, the context is enriched with $x^2\ge 0$; -\item For all pairs of hypotheses $e_1\ge 0$, $e_2 \ge 0$, the context is enriched with $e_1 \times e_2 \ge 0$. -\end{itemize} -After this pre-processing, the linear prover of {\tt lra} searches for a proof -by abstracting monomials by variables. - -\asection{{\tt nia}: a proof procedure for non-linear integer arithmetic} -\tacindex{nia} -\label{sec:nia} -The {\tt nia} tactic is a proof procedure for non-linear integer arithmetic. -% -It performs a pre-processing similar to {\tt nra}. The obtained goal is solved using the linear integer prover {\tt lia}. - -\asection{{\tt psatz}: a proof procedure for non-linear arithmetic} -\label{sec:psatz} -The {\tt psatz} tactic explores the $\mathit{Cone}$ by increasing degrees -- hence the depth parameter $n$. -In theory, such a proof search is complete -- if the goal is provable the search eventually stops. -Unfortunately, the external oracle is using numeric (approximate) optimization techniques that might miss a -refutation. - -To illustrate the working of the tactic, consider we wish to prove the following Coq goal. -\begin{coq_eval} -Require Import ZArith Psatz. -Open Scope Z_scope. -\end{coq_eval} -\begin{coq_example*} -Goal forall x, -x^2 >= 0 -> x - 1 >= 0 -> False. -\end{coq_example*} -\begin{coq_eval} -intro x; psatz Z 2. -\end{coq_eval} -Such a goal is solved by {\tt intro x; psatz Z 2}. The oracle returns the -cone expression $2 \times (\mathbf{x-1}) + (\mathbf{x-1}) \times -(\mathbf{x-1}) + \mathbf{-x^2}$ (polynomial hypotheses are printed in -bold). By construction, this expression belongs to $\mathit{Cone}(\{-x^2, -x -1\})$. Moreover, by running {\tt ring} we obtain $-1$. By -Theorem~\ref{thm:psatz}, the goal is valid. -% - -%% \paragraph{The {\tt sos} tactic} -- where {\tt sos} stands for \emph{sum of squares} -- tries to prove that a -%% single polynomial $p$ is positive by expressing it as a sum of squares \emph{i.e.,} $\sum_{i\in S} p_i^2$. -%% This amounts to searching for $p$ in the cone without generators \emph{i.e.}, $Cone(\{\})$. -% - - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/Omega.tex b/doc/refman/Omega.tex deleted file mode 100644 index 82765da6ed..0000000000 --- a/doc/refman/Omega.tex +++ /dev/null @@ -1,249 +0,0 @@ -\achapter{Omega: a solver of quantifier-free problems in -Presburger Arithmetic} -%HEVEA\cutname{omega.html} -\aauthor{Pierre Crégut} -\label{OmegaChapter} - -\asection{Description of {\tt omega}} -\tacindex{omega} -\label{description} - -{\tt omega} solves a goal in Presburger arithmetic, i.e. a universally -quantified formula made of equations and inequations. Equations may -be specified either on the type \verb=nat= of natural numbers or on -the type \verb=Z= of binary-encoded integer numbers. Formulas on -\verb=nat= are automatically injected into \verb=Z=. The procedure -may use any hypothesis of the current proof session to solve the goal. - -Multiplication is handled by {\tt omega} but only goals where at -least one of the two multiplicands of products is a constant are -solvable. This is the restriction meant by ``Presburger arithmetic''. - -If the tactic cannot solve the goal, it fails with an error message. -In any case, the computation eventually stops. - -\asubsection{Arithmetical goals recognized by {\tt omega}} - -{\tt omega} applied only to quantifier-free formulas built from the -connectors - -\begin{quote} -\verb=/\, \/, ~, ->= -\end{quote} - -on atomic formulas. Atomic formulas are built from the predicates - -\begin{quote} -\verb!=, le, lt, gt, ge! -\end{quote} - - on \verb=nat= or from the predicates - -\begin{quote} -\verb!=, <, <=, >, >=! -\end{quote} - - on \verb=Z=. In expressions of type \verb=nat=, {\tt omega} recognizes - -\begin{quote} -\verb!plus, minus, mult, pred, S, O! -\end{quote} - -and in expressions of type \verb=Z=, {\tt omega} recognizes - -\begin{quote} -\verb!+, -, *, Z.succ!, and constants. -\end{quote} - -All expressions of type \verb=nat= or \verb=Z= not built on these -operators are considered abstractly as if they -were arbitrary variables of type \verb=nat= or \verb=Z=. - -\asubsection{Messages from {\tt omega}} -\label{errors} - -When {\tt omega} does not solve the goal, one of the following errors -is generated: - -\begin{ErrMsgs} - -\item \errindex{omega can't solve this system} - - This may happen if your goal is not quantifier-free (if it is - universally quantified, try {\tt intros} first; if it contains - existentials quantifiers too, {\tt omega} is not strong enough to solve your - goal). This may happen also if your goal contains arithmetical - operators unknown from {\tt omega}. Finally, your goal may be really - wrong! - -\item \errindex{omega: Not a quantifier-free goal} - - If your goal is universally quantified, you should first apply {\tt - intro} as many time as needed. - -\item \errindex{omega: Unrecognized predicate or connective: {\sl ident}} - -\item \errindex{omega: Unrecognized atomic proposition: {\sl prop}} - -\item \errindex{omega: Can't solve a goal with proposition variables} - -\item \errindex{omega: Unrecognized proposition} - -\item \errindex{omega: Can't solve a goal with non-linear products} - -\item \errindex{omega: Can't solve a goal with equality on {\sl type}} - -\end{ErrMsgs} - -%% This code is currently unplugged -%% -% \asubsection{Control over the output} -% There are some flags that can be set to get more information on the procedure - -% \begin{itemize} -% \item \verb=Time= to get the time used by the procedure -% \item \verb=System= to visualize the normalized systems. -% \item \verb=Action= to visualize the actions performed by the OMEGA -% procedure (see \ref{technical}). -% \end{itemize} - -% \comindex{Set omega Time} -% \comindex{UnSet omega Time} -% \comindex{Switch omega Time} -% \comindex{Set omega System} -% \comindex{UnSet omega System} -% \comindex{Switch omega System} -% \comindex{Set omega Action} -% \comindex{UnSet omega Action} -% \comindex{Switch omega Action} - -% Use {\tt Set omega {\rm\sl flag}} to set the flag -% {\rm\sl flag}. Use {\tt Unset omega {\rm\sl flag}} to unset it and -% {\tt Switch omega {\rm\sl flag}} to toggle it. - -\section{Using {\tt omega}} - -The {\tt omega} tactic does not belong to the core system. It should be -loaded by -\begin{coq_example*} -Require Import Omega. -Open Scope Z_scope. -\end{coq_example*} - -\example{} - -\begin{coq_example} -Goal forall m n:Z, 1 + 2 * m <> 2 * n. -intros; omega. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -\example{} - -\begin{coq_example} -Goal forall z:Z, z > 0 -> 2 * z + 1 > z. -intro; omega. -\end{coq_example} - -% Other examples can be found in \verb+$COQLIB/theories/DEMOS/OMEGA+. - -\section{Options} - -\begin{quote} - \optindex{Stable Omega} - {\tt Unset Stable Omega} -\end{quote} -This deprecated option (on by default) is for compatibility with Coq -pre 8.5. It resets internal name counters to make executions of -{\tt omega} independent. - -\begin{quote} - \optindex{Omega UseLocalDefs} - {\tt Unset Omega UseLocalDefs} -\end{quote} -This option (on by default) allows {\tt omega} to use the bodies of -local variables. - -\begin{quote} - \optindex{Omega System} - {\tt Set Omega System} - \optindex{Omega Action} - {\tt Set Omega Action} -\end{quote} -These two options (off by default) activate the printing of debug -information. - -\asection{Technical data} -\label{technical} - -\asubsection{Overview of the tactic} -\begin{itemize} - -\item The goal is negated twice and the first negation is introduced as an - hypothesis. -\item Hypothesis are decomposed in simple equations or inequations. Multiple - goals may result from this phase. -\item Equations and inequations over \verb=nat= are translated over - \verb=Z=, multiple goals may result from the translation of - substraction. -\item Equations and inequations are normalized. -\item Goals are solved by the {\it OMEGA} decision procedure. -\item The script of the solution is replayed. - -\end{itemize} - -\asubsection{Overview of the {\it OMEGA} decision procedure} - -The {\it OMEGA} decision procedure involved in the {\tt omega} tactic uses -a small subset of the decision procedure presented in - -\begin{quote} - "The Omega Test: a fast and practical integer programming -algorithm for dependence analysis", William Pugh, Communication of the -ACM , 1992, p 102-114. -\end{quote} - -Here is an overview, look at the original paper for more information. - -\begin{itemize} - -\item Equations and inequations are normalized by division by the GCD of their - coefficients. -\item Equations are eliminated, using the Banerjee test to get a coefficient - equal to one. -\item Note that each inequation defines a half space in the space of real value - of the variables. - \item Inequations are solved by projecting on the hyperspace - defined by cancelling one of the variable. They are partitioned - according to the sign of the coefficient of the eliminated - variable. Pairs of inequations from different classes define a - new edge in the projection. - \item Redundant inequations are eliminated or merged in new - equations that can be eliminated by the Banerjee test. -\item The last two steps are iterated until a contradiction is reached - (success) or there is no more variable to eliminate (failure). - -\end{itemize} - -It may happen that there is a real solution and no integer one. The last -steps of the Omega procedure (dark shadow) are not implemented, so the -decision procedure is only partial. - -\asection{Bugs} - -\begin{itemize} -\item The simplification procedure is very dumb and this results in - many redundant cases to explore. - -\item Much too slow. - -\item Certainly other bugs! You can report them to \url{https://coq.inria.fr/bugs/}. - -\end{itemize} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-cic.tex b/doc/refman/RefMan-cic.tex deleted file mode 100644 index 2695c5eee4..0000000000 --- a/doc/refman/RefMan-cic.tex +++ /dev/null @@ -1,1881 +0,0 @@ -\chapter[Calculus of Inductive Constructions]{Calculus of Inductive Constructions -\label{Cic} -\index{Cic@\textsc{CIC}} -\index{Calculus of Inductive Constructions}} -%HEVEA\cutname{cic.html} - -The underlying formal language of {\Coq} is a {\em Calculus of -Inductive Constructions} (\CIC) whose inference rules are presented in -this chapter. The history of this formalism as well as pointers to related work -are provided in a separate chapter; see {\em Credits}. - -\section[The terms]{The terms\label{Terms}} - -The expressions of the {\CIC} are {\em terms} and all terms have a {\em type}. -There are types for functions (or -programs), there are atomic types (especially datatypes)... but also -types for proofs and types for the types themselves. -Especially, any object handled in the formalism must belong to a -type. For instance, universal quantification is relative to a type and -takes the form {\it ``for all x -of type T, P''}. The expression {\it ``x of type T''} is -written {\it ``x:T''}. Informally, {\it ``x:T''} can be thought as -{\it ``x belongs to T''}. - -The types of types are {\em sorts}. Types and sorts are themselves -terms so that terms, types and sorts are all components of a common -syntactic language of terms which is described in -Section~\ref{cic:terms} but, first, we describe sorts. - -\subsection[Sorts]{Sorts\label{Sorts} -\index{Sorts}} -All sorts have a type and there is an infinite well-founded -typing hierarchy of sorts whose base sorts are {\Prop} and {\Set}. - -The sort {\Prop} intends to be the type of logical propositions. If -$M$ is a logical proposition then it denotes the class of terms -representing proofs of $M$. An object $m$ belonging to $M$ witnesses -the fact that $M$ is provable. An object of type {\Prop} is called a -proposition. - -The sort {\Set} intends to be the type of small sets. This includes data -types such as booleans and naturals, but also products, subsets, and -function types over these data types. - -{\Prop} and {\Set} themselves can be manipulated as ordinary -terms. Consequently they also have a type. Because assuming simply -that {\Set} has type {\Set} leads to an inconsistent theory~\cite{Coq86}, the -language of {\CIC} has infinitely many sorts. There are, in addition -to {\Set} and {\Prop} a hierarchy of universes {\Type$(i)$} for any -integer $i$. - -Like {\Set}, all of the sorts {\Type$(i)$} contain small sets such as -booleans, natural numbers, as well as products, subsets and function -types over small sets. But, unlike {\Set}, they also contain large -sets, namely the sorts {\Set} and {\Type$(j)$} for $j<i$, and all -products, subsets and function types over these sorts. - -Formally, we call {\Sort} the set of sorts which is defined by: -\index{Type@{\Type}}% -\index{Prop@{\Prop}}% -\index{Set@{\Set}}% -\[\Sort \equiv \{\Prop,\Set,\Type(i)\;|\; i \in \NN\} \] -Their properties, such as: -{\Prop:\Type$(1)$}, {\Set:\Type$(1)$}, and {\Type$(i)$:\Type$(i+1)$}, -are defined in Section~\ref{subtyping-rules}. - -The user does not have to mention explicitly the index $i$ when referring to -the universe \Type$(i)$. One only writes \Type. The -system itself generates for each instance of \Type\ a new -index for the universe and checks that the constraints between these -indexes can be solved. From the user point of view we consequently -have {\Type}:{\Type}. -We shall make precise in the typing rules the constraints between the -indexes. - -\paragraph{Implementation issues} -In practice, the {\Type} hierarchy is implemented using -{\em algebraic universes}\index{algebraic universe}. -An algebraic universe $u$ is either a variable (a qualified -identifier with a number) or a successor of an algebraic universe (an -expression $u+1$), or an upper bound of algebraic universes (an -expression $max(u_1,...,u_n)$), or the base universe (the expression -$0$) which corresponds, in the arity of template polymorphic inductive -types (see Section \ref{Template-polymorphism}), -to the predicative sort {\Set}. A graph of constraints between -the universe variables is maintained globally. To ensure the existence -of a mapping of the universes to the positive integers, the graph of -constraints must remain acyclic. Typing expressions that violate the -acyclicity of the graph of constraints results in a \errindex{Universe -inconsistency} error (see also Section~\ref{PrintingUniverses}). - -%% HH: This looks to me more like source of confusion than helpful - -%% \subsection{Constants} - -%% Constants refers to -%% objects in the global environment. These constants may denote previously -%% defined objects, but also objects related to inductive definitions -%% (either the type itself or one of its constructors or destructors). - -%% \medskip\noindent {\bf Remark. } In other presentations of \CIC, -%% the inductive objects are not seen as -%% external declarations but as first-class terms. Usually the -%% definitions are also completely ignored. This is a nice theoretical -%% point of view but not so practical. An inductive definition is -%% specified by a possibly huge set of declarations, clearly we want to -%% share this specification among the various inductive objects and not -%% to duplicate it. So the specification should exist somewhere and the -%% various objects should refer to it. We choose one more level of -%% indirection where the objects are just represented as constants and -%% the environment gives the information on the kind of object the -%% constant refers to. - -%% \medskip -%% Our inductive objects will be manipulated as constants declared in the -%% environment. This roughly corresponds to the way they are actually -%% implemented in the \Coq\ system. It is simple to map this presentation -%% in a theory where inductive objects are represented by terms. - -\subsection{Terms} -\label{cic:terms} - -Terms are built from sorts, variables, constants, -%constructors, inductive types, -abstractions, applications, local definitions, -%case analysis, fixpoints, cofixpoints -and products. -From a syntactic point of view, types cannot be distinguished from terms, -except that they cannot start by an abstraction or a constructor. -More precisely the language of the {\em Calculus of Inductive - Constructions} is built from the following rules. -% -\begin{enumerate} -\item the sorts {\Set}, {\Prop}, ${\Type(i)}$ are terms. -\item variables, hereafter ranged over by letters $x$, $y$, etc., are terms -\item constants, hereafter ranged over by letters $c$, $d$, etc., are terms. -%\item constructors, hereafter ranged over by letter $C$, are terms. -%\item inductive types, hereafter ranged over by letter $I$, are terms. -\item\index{products} if $x$ is a variable and $T$, $U$ are terms then $\forall~x:T,U$ - ($\kw{forall}~x:T,~U$ in \Coq{} concrete syntax) is a term. If $x$ - occurs in $U$, $\forall~x:T,U$ reads as {\it ``for all x of type T, - U''}. As $U$ depends on $x$, one says that $\forall~x:T,U$ is a - {\em dependent product}. If $x$ does not occur in $U$ then - $\forall~x:T,U$ reads as {\it ``if T then U''}. A {\em non dependent - product} can be written: $T \ra U$. -\item if $x$ is a variable and $T$, $u$ are terms then $\lb x:T \mto u$ - ($\kw{fun}~x:T~ {\tt =>}~ u$ in \Coq{} concrete syntax) is a term. This is a - notation for the $\lambda$-abstraction of - $\lambda$-calculus\index{lambda-calculus@$\lambda$-calculus} - \cite{Bar81}. The term $\lb x:T \mto u$ is a function which maps - elements of $T$ to the expression $u$. -\item if $t$ and $u$ are terms then $(t\ u)$ is a term - ($t~u$ in \Coq{} concrete syntax). The term $(t\ - u)$ reads as {\it ``t applied to u''}. -\item if $x$ is a variable, and $t$, $T$ and $u$ are terms then - $\kw{let}~x:=t:T~\kw{in}~u$ is a - term which denotes the term $u$ where the variable $x$ is locally - bound to $t$ of type $T$. This stands for the common ``let-in'' - construction of functional programs such as ML or Scheme. -%\item case ... -%\item fixpoint ... -%\item cofixpoint ... -\end{enumerate} - -\paragraph{Free variables.} -The notion of free variables is defined as usual. In the expressions -$\lb x:T\mto U$ and $\forall x:T, U$ the occurrences of $x$ in $U$ -are bound. - -\paragraph[Substitution.]{Substitution.\index{Substitution}} -The notion of substituting a term $t$ to free occurrences of a -variable $x$ in a term $u$ is defined as usual. The resulting term -is written $\subst{u}{x}{t}$. - -\paragraph[The logical vs programming readings.]{The logical vs programming readings.} - -The constructions of the {\CIC} can be used to express both logical -and programming notions, accordingly to the Curry-Howard -correspondence between proofs and programs, and between propositions -and types~\cite{Cur58,How80,Bru72}. - -For instance, let us assume that \nat\ is the type of natural numbers -with zero element written $0$ and that ${\tt True}$ is the always true -proposition. Then $\ra$ is used both to denote $\nat\ra\nat$ which is -the type of functions from \nat\ to \nat, to denote ${\tt True}\ra{\tt - True}$ which is an implicative proposition, to denote $\nat \ra -\Prop$ which is the type of unary predicates over the natural numbers, -etc. - -Let us assume that ${\tt mult}$ is a function of type $\nat\ra\nat\ra -\nat$ and ${\tt eqnat}$ a predicate of type $\nat\ra\nat\ra \Prop$. -The $\lambda$-abstraction can serve to build ``ordinary'' functions as -in $\lambda x:\nat.({\tt mult}~x~x)$ (i.e. $\kw{fun}~x:\nat ~{\tt =>}~ -{\tt mult} ~x~x$ in {\Coq} notation) but may build also predicates -over the natural numbers. For instance $\lambda x:\nat.({\tt eqnat}~ -x~0)$ (i.e. $\kw{fun}~x:\nat ~{\tt =>}~ {\tt eqnat}~ x~0$ in {\Coq} -notation) will represent the predicate of one variable $x$ which -asserts the equality of $x$ with $0$. This predicate has type $\nat -\ra \Prop$ and it can be applied to any expression of type ${\nat}$, -say $t$, to give an object $P~t$ of type \Prop, namely a proposition. - -Furthermore $\kw{forall}~x:\nat,\,P\;x$ will represent the type of -functions which associate to each natural number $n$ an object of type -$(P~n)$ and consequently represent the type of proofs of the formula -``$\forall x.\,P(x)$''. - -\section[Typing rules]{Typing rules\label{Typed-terms}} - -As objects of type theory, terms are subjected to {\em type -discipline}. The well typing of a term depends on -a global environment and a local context. - -\paragraph{Local context.\index{Local context}} -A {\em local context} is an ordered list of -{\em local declarations\index{declaration!local}} of names which we call {\em variables\index{variable}}. -The declaration of some variable $x$ is -either a {\em local assumption\index{assumption!local}}, written $x:T$ ($T$ is a type) or a {\em local definition\index{definition!local}}, -written $x:=t:T$. We use brackets to write local contexts. A -typical example is $[x:T;y:=u:U;z:V]$. Notice that the variables -declared in a local context must be distinct. If $\Gamma$ declares some $x$, -we write $x \in \Gamma$. By writing $(x:T) \in \Gamma$ we mean that -either $x:T$ is an assumption in $\Gamma$ or that there exists some $t$ such -that $x:=t:T$ is a definition in $\Gamma$. If $\Gamma$ defines some -$x:=t:T$, we also write $(x:=t:T) \in \Gamma$. -For the rest of the chapter, the $\Gamma::(y:T)$ denotes the local context -$\Gamma$ enriched with the local assumption $y:T$. -Similarly, $\Gamma::(y:=t:T)$ denotes the local context -$\Gamma$ enriched with the local definition $(y:=t:T)$. -The notation $[]$ denotes the empty local context. -By $\Gamma_1; \Gamma_2$ we mean concatenation of the local context $\Gamma_1$ -and the local context $\Gamma_2$. - -% Does not seem to be used further... -% Si dans l'explication WF(E)[Gamma] concernant les constantes -% definies ds un contexte - -%We define the inclusion of two local contexts $\Gamma$ and $\Delta$ (written -%as $\Gamma \subset \Delta$) as the property, for all variable $x$, -%type $T$ and term $t$, if $(x:T) \in \Gamma$ then $(x:T) \in \Delta$ -%and if $(x:=t:T) \in \Gamma$ then $(x:=t:T) \in \Delta$. -%We write -% $|\Delta|$ for the length of the context $\Delta$, that is for the number -% of declarations (assumptions or definitions) in $\Delta$. - -\paragraph[Global environment.]{Global environment.\index{Global environment}} -%Because we are manipulating global declarations (global constants and global -%assumptions), we also need to consider a global environment $E$. - -A {\em global environment} is an ordered list of {\em global declarations\index{declaration!global}}. -Global declarations are either {\em global assumptions\index{assumption!global}} or {\em global -definitions\index{definition!global}}, but also declarations of inductive objects. Inductive objects themselves declare both inductive or coinductive types and constructors -(see Section~\ref{Cic-inductive-definitions}). - -A {\em global assumption} will be represented in the global environment as -$(c:T)$ which assumes the name $c$ to be of some type $T$. -A {\em global definition} will -be represented in the global environment as $c:=t:T$ which defines -the name $c$ to have value $t$ and type $T$. -We shall call such names {\em constants}. -For the rest of the chapter, the $E;c:T$ denotes the global environment -$E$ enriched with the global assumption $c:T$. -Similarly, $E;c:=t:T$ denotes the global environment -$E$ enriched with the global definition $(c:=t:T)$. - -The rules for inductive definitions (see Section -\ref{Cic-inductive-definitions}) have to be considered as assumption -rules to which the following definitions apply: if the name $c$ is -declared in $E$, we write $c \in E$ and if $c:T$ or $c:=t:T$ is -declared in $E$, we write $(c : T) \in E$. - -\paragraph[Typing rules.]{Typing rules.\label{Typing-rules}\index{Typing rules}} -In the following, we define simultaneously two -judgments. The first one \WTEG{t}{T} means the term $t$ is well-typed -and has type $T$ in the global environment $E$ and local context $\Gamma$. The -second judgment \WFE{\Gamma} means that the global environment $E$ is -well-formed and the local context $\Gamma$ is a valid local context in this -global environment. -% HH: This looks to me complicated. I think it would be better to talk -% about ``discharge'' as a transformation of global environments, -% rather than as keeping a local context next to global constants. -% -%% It also means a third property which makes sure that any -%%constant in $E$ was defined in an environment which is included in -%%$\Gamma$ -%%\footnote{This requirement could be relaxed if we instead introduced -%% an explicit mechanism for instantiating constants. At the external -%% level, the Coq engine works accordingly to this view that all the -%% definitions in the environment were built in a local sub-context of the -%% current local context.}. - -A term $t$ is well typed in a global environment $E$ iff there exists a -local context $\Gamma$ and a term $T$ such that the judgment \WTEG{t}{T} can -be derived from the following rules. -\begin{description} -\item[W-Empty] \inference{\WF{[]}{}} -\item[W-Local-Assum] % Ce n'est pas vrai : x peut apparaitre plusieurs fois dans Gamma -\inference{\frac{\WTEG{T}{s}~~~~s \in \Sort~~~~x \not\in \Gamma % \cup E - }{\WFE{\Gamma::(x:T)}}} -\item[W-Local-Def] -\inference{\frac{\WTEG{t}{T}~~~~x \not\in \Gamma % \cup E - }{\WFE{\Gamma::(x:=t:T)}}} -\item[W-Global-Assum] \inference{\frac{\WTE{}{T}{s}~~~~s \in \Sort~~~~c \notin E} - {\WF{E;c:T}{}}} -\item[W-Global-Def] \inference{\frac{\WTE{}{t}{T}~~~c \notin E} - {\WF{E;c:=t:T}{}}} -\item[Ax-Prop] \index{Typing rules!Ax-Prop} -\inference{\frac{\WFE{\Gamma}}{\WTEG{\Prop}{\Type(1)}}} -\item[Ax-Set] \index{Typing rules!Ax-Set} -\inference{\frac{\WFE{\Gamma}}{\WTEG{\Set}{\Type(1)}}} -\item[Ax-Type] \index{Typing rules!Ax-Type} -\inference{\frac{\WFE{\Gamma}}{\WTEG{\Type(i)}{\Type(i+1)}}} -\item[Var]\index{Typing rules!Var} - \inference{\frac{ \WFE{\Gamma}~~~~~(x:T) \in \Gamma~~\mbox{or}~~(x:=t:T) \in \Gamma~\mbox{for some $t$}}{\WTEG{x}{T}}} -\item[Const] \index{Typing rules!Const} -\inference{\frac{\WFE{\Gamma}~~~~(c:T) \in E~~\mbox{or}~~(c:=t:T) \in E~\mbox{for some $t$} }{\WTEG{c}{T}}} -\item[Prod-Prop] \index{Typing rules!Prod-Prop} -\inference{\frac{\WTEG{T}{s}~~~~s \in \Sort~~~ - \WTE{\Gamma::(x:T)}{U}{\Prop}} - { \WTEG{\forall~x:T,U}{\Prop}}} -\item[Prod-Set] \index{Typing rules!Prod-Set} -\inference{\frac{\WTEG{T}{s}~~~~s \in\{\Prop, \Set\}~~~~~~ - \WTE{\Gamma::(x:T)}{U}{\Set}} - { \WTEG{\forall~x:T,U}{\Set}}} -\item[Prod-Type] \index{Typing rules!Prod-Type} -\inference{\frac{\WTEG{T}{\Type(i)}~~~~ - \WTE{\Gamma::(x:T)}{U}{\Type(i)}} - {\WTEG{\forall~x:T,U}{\Type(i)}}} -\item[Lam]\index{Typing rules!Lam} -\inference{\frac{\WTEG{\forall~x:T,U}{s}~~~~ \WTE{\Gamma::(x:T)}{t}{U}} - {\WTEG{\lb x:T\mto t}{\forall x:T, U}}} -\item[App]\index{Typing rules!App} - \inference{\frac{\WTEG{t}{\forall~x:U,T}~~~~\WTEG{u}{U}} - {\WTEG{(t\ u)}{\subst{T}{x}{u}}}} -\item[Let]\index{Typing rules!Let} -\inference{\frac{\WTEG{t}{T}~~~~ \WTE{\Gamma::(x:=t:T)}{u}{U}} - {\WTEG{\letin{x}{t:T}{u}}{\subst{U}{x}{t}}}} -\end{description} - -\Rem Prod$_1$ and Prod$_2$ typing-rules make sense if we consider the semantic -difference between {\Prop} and {\Set}: -\begin{itemize} - \item All values of a type that has a sort {\Set} are extractable. - \item No values of a type that has a sort {\Prop} are extractable. -\end{itemize} - -\Rem We may have $\kw{let}~x:=t:T~\kw{in}~u$ -well-typed without having $((\lb x:T\mto u)~t)$ well-typed (where -$T$ is a type of $t$). This is because the value $t$ associated to $x$ -may be used in a conversion rule (see Section~\ref{conv-rules}). - -\section[Conversion rules]{Conversion rules\index{Conversion rules} -\label{conv-rules}} - -In \CIC, there is an internal reduction mechanism. In particular, it -can decide if two programs are {\em intentionally} equal (one -says {\em convertible}). Convertibility is described in this section. - -\paragraph[$\beta$-reduction.]{$\beta$-reduction.\label{beta}\index{beta-reduction@$\beta$-reduction}} - -We want to be able to identify some terms as we can identify the -application of a function to a given argument with its result. For -instance the identity function over a given type $T$ can be written -$\lb x:T\mto x$. In any global environment $E$ and local context $\Gamma$, we want to identify any object $a$ (of type $T$) with the -application $((\lb x:T\mto x)~a)$. We define for this a {\em reduction} (or a -{\em conversion}) rule we call $\beta$: -\[ \WTEGRED{((\lb x:T\mto - t)~u)}{\triangleright_{\beta}}{\subst{t}{x}{u}} \] -We say that $\subst{t}{x}{u}$ is the {\em $\beta$-contraction} of -$((\lb x:T\mto t)~u)$ and, conversely, that $((\lb x:T\mto t)~u)$ -is the {\em $\beta$-expansion} of $\subst{t}{x}{u}$. - -According to $\beta$-reduction, terms of the {\em Calculus of - Inductive Constructions} enjoy some fundamental properties such as -confluence, strong normalization, subject reduction. These results are -theoretically of great importance but we will not detail them here and -refer the interested reader to \cite{Coq85}. - -\paragraph[$\iota$-reduction.]{$\iota$-reduction.\label{iota}\index{iota-reduction@$\iota$-reduction}} -A specific conversion rule is associated to the inductive objects in -the global environment. We shall give later on (see Section~\ref{iotared}) the -precise rules but it just says that a destructor applied to an object -built from a constructor behaves as expected. This reduction is -called $\iota$-reduction and is more precisely studied in -\cite{Moh93,Wer94}. - - -\paragraph[$\delta$-reduction.]{$\delta$-reduction.\label{delta}\index{delta-reduction@$\delta$-reduction}} - -We may have variables defined in local contexts or constants defined in the global -environment. It is legal to identify such a reference with its value, -that is to expand (or unfold) it into its value. This -reduction is called $\delta$-reduction and shows as follows. - -$$\WTEGRED{x}{\triangleright_{\delta}}{t}~~~~~\mbox{if $(x:=t:T) \in \Gamma$}~~~~~~~~~\WTEGRED{c}{\triangleright_{\delta}}{t}~~~~~\mbox{if $(c:=t:T) \in E$}$$ - - -\paragraph[$\zeta$-reduction.]{$\zeta$-reduction.\label{zeta}\index{zeta-reduction@$\zeta$-reduction}} - -{\Coq} allows also to remove local definitions occurring in terms by -replacing the defined variable by its value. The declaration being -destroyed, this reduction differs from $\delta$-reduction. It is -called $\zeta$-reduction and shows as follows. - -$$\WTEGRED{\kw{let}~x:=u~\kw{in}~t}{\triangleright_{\zeta}}{\subst{t}{x}{u}}$$ - -\paragraph{$\eta$-expansion.% -\label{eta}% -\index{eta-expansion@$\eta$-expansion}% -%\index{eta-reduction@$\eta$-reduction} -}% -Another important concept is $\eta$-expansion. It is legal to identify any -term $t$ of functional type $\forall x:T, U$ with its so-called -$\eta$-expansion $\lb x:T\mto (t\ x)$ for $x$ an arbitrary variable -name fresh in $t$. - -\Rem We deliberately do not define $\eta$-reduction: -\begin{latexonly}% - $$\lb x:T\mto (t\ x)\not\triangleright_\eta\hskip.3em t$$ -\end{latexonly}% -\begin{htmlonly} - $$\lb x:T\mto (t\ x)~\not\triangleright_\eta~t$$ -\end{htmlonly} -This is because, in general, the type of $t$ need not to be convertible to the type of $\lb x:T\mto (t\ x)$. -E.g., if we take $f$ such that: -\begin{latexonly}% - $$f\hskip.5em:\hskip.5em\forall x:Type(2),Type(1)$$ -\end{latexonly}% -\begin{htmlonly} - $$f~:~\forall x:Type(2),Type(1)$$ -\end{htmlonly} -then -\begin{latexonly}% - $$\lb x:Type(1),(f\, x)\hskip.5em:\hskip.5em\forall x:Type(1),Type(1)$$ -\end{latexonly}% -\begin{htmlonly} - $$\lb x:Type(1),(f\, x)~:~\forall x:Type(1),Type(1)$$ -\end{htmlonly} -We could not allow -\begin{latexonly}% - $$\lb x:Type(1),(f\,x)\hskip.4em\not\triangleright_\eta\hskip.6em f$$ -\end{latexonly}% -\begin{htmlonly} - $$\lb x:Type(1),(f\,x)~\not\triangleright_\eta~f$$ -\end{htmlonly} -because the type of the reduced term $\forall x:Type(2),Type(1)$ -would not be convertible to the type of the original term $\forall x:Type(1),Type(1)$. - -\paragraph[Convertibility.]{Convertibility.\label{convertibility} -\index{beta-reduction@$\beta$-reduction}\index{iota-reduction@$\iota$-reduction}\index{delta-reduction@$\delta$-reduction}\index{zeta-reduction@$\zeta$-reduction}} - -Let us write $\WTEGRED{t}{\triangleright}{u}$ for the contextual closure of the relation $t$ reduces to $u$ in the global environment $E$ and local context $\Gamma$ with one of the previous reduction $\beta$, $\iota$, $\delta$ or $\zeta$. - -We say that two terms $t_1$ and $t_2$ are {\em - $\beta\iota\delta\zeta\eta$-convertible}, or simply {\em - convertible}, or {\em equivalent}, in the global environment $E$ and -local context $\Gamma$ iff there exist terms $u_1$ and $u_2$ such that -$\WTEGRED{t_1}{\triangleright \ldots \triangleright}{u_1}$ and -$\WTEGRED{t_2}{\triangleright \ldots \triangleright}{u_2}$ and either -$u_1$ and $u_2$ are identical, or they are convertible up to -$\eta$-expansion, i.e. $u_1$ is $\lb x:T\mto u'_1$ and $u_2\,x$ is -recursively convertible to $u'_1$, or, symmetrically, $u_2$ is $\lb -x:T\mto u'_2$ and $u_1\,x$ is recursively convertible to $u'_2$. We -then write $\WTEGCONV{t_1}{t_2}$. - -Apart from this we consider two instances of polymorphic and cumulative (see Chapter~\ref{Universes-full}) inductive types (see below) -convertible $\WTEGCONV{t\ w_1 \dots w_m}{t\ w_1' \dots w_m'}$ if we have subtypings (see below) in both directions, i.e., -$\WTEGLECONV{t\ w_1 \dots w_m}{t\ w_1' \dots w_m'}$ and $\WTEGLECONV{t\ w_1' \dots w_m'}{t\ w_1 \dots w_m}$. -Furthermore, we consider $\WTEGCONV{c\ v_1 \dots v_m}{c'\ v_1' \dots v_m'}$ convertible if $\WTEGCONV{v_i}{v_i'}$ -and we have that $c$ and $c'$ are the same constructors of different instances the same inductive types (differing only in universe levels) -such that $\WTEG{c\ v_1 \dots v_m}{t\ w_1 \dots w_m}$ and $\WTEG{c'\ v_1' \dots v_m'}{t'\ w_1' \dots w_m'}$ and we have $\WTEGCONV{t\ w_1 \dots w_m}{t\ w_1' \dots w_m'}$. - -The convertibility relation allows introducing a new typing rule -which says that two convertible well-formed types have the same -inhabitants. - -\section[Subtyping rules]{Subtyping rules\index{Subtyping rules} -\label{subtyping-rules}} - -At the moment, we did not take into account one rule between universes -which says that any term in a universe of index $i$ is also a term in -the universe of index $i+1$ (this is the {\em cumulativity} rule of -{\CIC}). This property extends the equivalence relation of -convertibility into a {\em subtyping} relation inductively defined by: -\begin{enumerate} -\item if $\WTEGCONV{t}{u}$ then $\WTEGLECONV{t}{u}$, -\item if $i \leq j$ then $\WTEGLECONV{\Type(i)}{\Type(j)}$, -\item for any $i$, $\WTEGLECONV{\Set}{\Type(i)}$, -\item $\WTEGLECONV{\Prop}{\Set}$, hence, by transitivity, - $\WTEGLECONV{\Prop}{\Type(i)}$, for any $i$ -\item if $\WTEGCONV{T}{U}$ and $\WTELECONV{\Gamma::(x:T)}{T'}{U'}$ then $\WTEGLECONV{\forall~x:T, T'}{\forall~x:U, U'}$. -\item if $\Ind{}{p}{\Gamma_I}{\Gamma_C}$ is a universe polymorphic and cumulative (see Chapter~\ref{Universes-full}) - inductive type (see below) and $(t : \forall\Gamma_P,\forall\Gamma_{\mathit{Arr}(t)}, \Sort)\in\Gamma_I$ - and $(t' : \forall\Gamma_P',\forall\Gamma_{\mathit{Arr}(t)}', \Sort')\in\Gamma_I$ - are two different instances of \emph{the same} inductive type (differing only in universe levels) with constructors - \[[c_1: \forall\Gamma_P,\forall T_{1,1} \dots T_{1,n_1},t\ v_{1,1} \dots v_{1,m}; \dots; c_k: \forall\Gamma_P,\forall T_{k, 1} \dots T_{k,n_k},t\ v_{n,1}\dots v_{n,m}]\] - and - \[[c_1: \forall\Gamma_P',\forall T_{1,1}' \dots T_{1,n_1}',t'\ v_{1,1}' \dots v_{1,m}'; \dots; c_k: \forall\Gamma_P',\forall T_{k, 1}' \dots T_{k,n_k}',t\ v_{n,1}'\dots v_{n,m}']\] - respectively then $\WTEGLECONV{t\ w_1 \dots w_m}{t\ w_1' \dots w_m'}$ (notice that $t$ and $t'$ are both fully applied, i.e., they have a sort as a type) - if $\WTEGCONV{w_i}{w_i'}$ for $1 \le i \le m$ and we have - \[ \WTEGLECONV{T_{i,j}}{T_{i,j}'} \text{ and } \WTEGLECONV{A_i}{A_i'}\] - where $\Gamma_{\mathit{Arr}(t)} = [a_1 : A_1; a_1 : A_l]$ and $\Gamma_{\mathit{Arr}(t)} = [a_1 : A_1'; a_1 : A_l']$. -\end{enumerate} - -The conversion rule up to subtyping is now exactly: - -\begin{description}\label{Conv} -\item[Conv]\index{Typing rules!Conv} - \inference{ - \frac{\WTEG{U}{s}~~~~\WTEG{t}{T}~~~~\WTEGLECONV{T}{U}}{\WTEG{t}{U}}} - \end{description} - - -\paragraph[Normal form.]{Normal form.\index{Normal form}\label{Normal-form}\label{Head-normal-form}\index{Head normal form}} -A term which cannot be any more reduced is said to be in {\em normal - form}. There are several ways (or strategies) to apply the reduction -rules. Among them, we have to mention the {\em head reduction} which -will play an important role (see Chapter~\ref{Tactics}). Any term can -be written as $\lb x_1:T_1\mto \ldots \lb x_k:T_k \mto -(t_0\ t_1\ldots t_n)$ where -$t_0$ is not an application. We say then that $t_0$ is the {\em head - of $t$}. If we assume that $t_0$ is $\lb x:T\mto u_0$ then one step of -$\beta$-head reduction of $t$ is: -\[\lb x_1:T_1\mto \ldots \lb x_k:T_k\mto (\lb x:T\mto u_0\ t_1\ldots t_n) -~\triangleright ~ \lb (x_1:T_1)\ldots(x_k:T_k)\mto -(\subst{u_0}{x}{t_1}\ t_2 \ldots t_n)\] -Iterating the process of head reduction until the head of the reduced -term is no more an abstraction leads to the {\em $\beta$-head normal - form} of $t$: -\[ t \triangleright \ldots \triangleright -\lb x_1:T_1\mto \ldots\lb x_k:T_k\mto (v\ u_1 -\ldots u_m)\] -where $v$ is not an abstraction (nor an application). Note that the -head normal form must not be confused with the normal form since some -$u_i$ can be reducible. -% -Similar notions of head-normal forms involving $\delta$, $\iota$ and $\zeta$ -reductions or any combination of those can also be defined. - -\section[Inductive definitions]{Inductive Definitions\label{Cic-inductive-definitions}} - -% Here we assume that the reader knows what is an inductive definition. - -Formally, we can represent any {\em inductive definition\index{definition!inductive}} as \Ind{}{p}{\Gamma_I}{\Gamma_C} where: -\begin{itemize} - \item $\Gamma_I$ determines the names and types of inductive types; - \item $\Gamma_C$ determines the names and types of constructors of these inductive types; - \item $p$ determines the number of parameters of these inductive types. -\end{itemize} -These inductive definitions, together with global assumptions and global definitions, then form the global environment. -% -Additionally, for any $p$ there always exists $\Gamma_P=[a_1:A_1;\dots;a_p:A_p]$ -such that each $T$ in $(t:T)\in\Gamma_I\cup\Gamma_C$ can be written as: -$\forall\Gamma_P, T^\prime$ where $\Gamma_P$ is called the {\em context of parameters\index{context of parameters}}. -Furthermore, we must have that each $T$ in $(t:T)\in\Gamma_I$ can be written as: -$\forall\Gamma_P,\forall\Gamma_{\mathit{Arr}(t)}, \Sort$ where $\Gamma_{\mathit{Arr}(t)}$ is called the -{\em Arity} of the inductive type\index{arity of inductive type} $t$ and -$\Sort$ is called the sort of the inductive type $t$. - -\paragraph{Examples} - - \newcommand\ind[3]{$\mathsf{Ind}~[#1]\left(\hskip-.4em - \begin{array}{r@{\mathrm{~:=~}}l} - #2 & #3 \\ - \end{array} - \hskip-.4em - \right)$} - \def\colon{@{\hskip.5em:\hskip.5em}} - -The declaration for parameterized lists is: -\begin{latexonly} - \vskip.5em - - \ind{1}{[\List:\Set\ra\Set]}{\left[\begin{array}{r@{:}l} - \Nil & \forall A:\Set,\List~A \\ - \cons & \forall A:\Set, A \ra \List~A \ra \List~A - \end{array} - \right]} - \vskip.5em -\end{latexonly} -\begin{rawhtml}<pre><table style="border-spacing:0"> - <tr style="vertical-align:middle"> - <td style="width:10pt;text-align:center;font-family:sans-serif;font-style:italic">Ind</td> - <td style="width:20pt;text-align:center">[1]</td> - <td style="width:5pt;text-align:center">⎛<br>⎝</td> - <td style="width:120pt;text-align:center">[ <span style="font-family:monospace">list : Set → Set</span> ]</td> - <td style="width:20pt;text-align:center;font-family:monospace">:=</td> - <td style="width:10pt;text-align:center">⎡<br>⎣</td> - <td> - <table style="border-spacing:0"> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">nil</td> - <td style="width:20pt;text-align:center;font-family:monospace">:=</td> - <td style="text-align:left;font-family:monospace">∀A : Set, list A</td> - </tr> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">cons</td> - <td style="width:20pt;text-align:center;font-family:monospace">:=</td> - <td style="text-align:left;font-family:monospace">∀A : Set, A → list A → list A</td> - </tr> - </table> - </td> - <td style="width:10pt;text-align:center">⎤<br>⎦</td> - <td style="width:5pt;text-align:center">⎞<br>⎠</td> - </tr> -</table></pre> -\end{rawhtml} -\noindent which corresponds to the result of the \Coq\ declaration: -\begin{coq_example*} -Inductive list (A:Set) : Set := - | nil : list A - | cons : A -> list A -> list A. -\end{coq_example*} - -\noindent The declaration for a mutual inductive definition of {\tree} and {\forest} is: -\begin{latexonly} - \vskip.5em -\ind{~}{\left[\begin{array}{r@{:}l}\tree&\Set\\\forest&\Set\end{array}\right]} - {\left[\begin{array}{r@{:}l} - \node & \forest \ra \tree\\ - \emptyf & \forest\\ - \consf & \tree \ra \forest \ra \forest\\ - \end{array}\right]} - \vskip.5em -\end{latexonly} -\begin{rawhtml}<pre><table style="border-spacing:0"> - <tr style="vertical-align:middle"> - <td style="width:10pt;text-align:center;font-family:sans-serif;font-style:italic">Ind</td> - <td style="width:20pt;text-align:center">[1]</td> - <td style="width:5pt;text-align:center">⎛<br>⎜<br>⎝</td> - <td style="width:10pt;text-align:center">⎡<br>⎣</td> - <td> - <table style="border-spacing:0"> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">tree</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">Set</td> - </tr> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">forest</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">Set</td> - </tr> - </table> - </td> - <td style="width:10pt;text-align:center">⎤<br>⎦</td> - <td style="width:20pt;text-align:center;font-family:monospace">:=</td> - <td style="width:10pt;text-align:center">⎡<br>⎢<br>⎣</td> - <td> - <table style="border-spacing:0"> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">node</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">forest → tree</td> - </tr> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">emptyf</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">forest</td> - </tr> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">consf</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">tree → forest → forest</td> - </tr> - </table> - </td> - <td style="width:10pt;text-align:center">⎤<br>⎥<br>⎦</td> - <td style="width:5pt;text-align:center">⎞<br>⎟<br>⎠</td> - </tr> -</table></pre> -\end{rawhtml} -\noindent which corresponds to the result of the \Coq\ -declaration: -\begin{coq_example*} -Inductive tree : Set := - node : forest -> tree -with forest : Set := - | emptyf : forest - | consf : tree -> forest -> forest. -\end{coq_example*} - -\noindent The declaration for a mutual inductive definition of {\even} and {\odd} is: -\begin{latexonly} - \newcommand\GammaI{\left[\begin{array}{r@{:}l} - \even & \nat\ra\Prop \\ - \odd & \nat\ra\Prop - \end{array} - \right]} - \newcommand\GammaC{\left[\begin{array}{r@{:}l} - \evenO & \even~\nO \\ - \evenS & \forall n : \nat, \odd~n \ra \even~(\nS~n)\\ - \oddS & \forall n : \nat, \even~n \ra \odd~(\nS~n) - \end{array} - \right]} - \vskip.5em - \ind{1}{\GammaI}{\GammaC} - \vskip.5em -\end{latexonly} -\begin{rawhtml}<pre><table style="border-spacing:0"> - <tr style="vertical-align:middle"> - <td style="width:10pt;text-align:center;font-family:sans-serif;font-style:italic">Ind</td> - <td style="width:20pt;text-align:center">[1]</td> - <td style="width:5pt;text-align:center">⎛<br>⎜<br>⎝</td> - <td style="width:10pt;text-align:center">⎡<br>⎣</td> - <td> - <table style="border-spacing:0"> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">even</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">nat → Prop</td> - </tr> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">odd</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">nat → Prop</td> - </tr> - </table> - </td> - <td style="width:10pt;text-align:center">⎤<br>⎦</td> - <td style="width:20pt;text-align:center;font-family:monospace">:=</td> - <td style="width:10pt;text-align:center">⎡<br>⎢<br>⎣</td> - <td> - <table style="border-spacing:0"> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">even_O</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">even O</td> - </tr> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">even_S</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">∀n : nat, odd n → even (S n)</td> - </tr> - <tr> - <td style="width:20pt;text-align:right;font-family:monospace">odd_S</td> - <td style="width:20pt;text-align:center;font-family:monospace">:</td> - <td style="text-align:left;font-family:monospace">∀n : nat, even n → odd (S n)</td> - </tr> - </table> - </td> - <td style="width:10pt;text-align:center">⎤<br>⎥<br>⎦</td> - <td style="width:5pt;text-align:center">⎞<br>⎟<br>⎠</td> - </tr> -</table></pre> -\end{rawhtml} -\noindent which corresponds to the result of the \Coq\ -declaration: -\begin{coq_example*} -Inductive even : nat -> Prop := - | even_O : even 0 - | even_S : forall n, odd n -> even (S n) -with odd : nat -> Prop := - | odd_S : forall n, even n -> odd (S n). -\end{coq_example*} - -\subsection{Types of inductive objects} -We have to give the type of constants in a global environment $E$ which -contains an inductive declaration. - -\begin{description} -\item[Ind] \index{Typing rules!Ind} - \inference{\frac{\WFE{\Gamma}~~~~~~~~\Ind{}{p}{\Gamma_I}{\Gamma_C} \in E~~~~~~~~(a:A)\in\Gamma_I}{\WTEG{a}{A}}} -\item[Constr] \index{Typing rules!Constr} - \inference{\frac{\WFE{\Gamma}~~~~~~~~\Ind{}{p}{\Gamma_I}{\Gamma_C} \in E~~~~~~~~(c:C)\in\Gamma_C}{\WTEG{c}{C}}} -\end{description} - -\begin{latexonly}% -\paragraph{Example.} -Provided that our environment $E$ contains inductive definitions we showed before, -these two inference rules above enable us to conclude that: -\vskip.5em -\newcommand\prefix{E[\Gamma]\vdash\hskip.25em} -$\begin{array}{@{}l} - \prefix\even : \nat\ra\Prop\\ - \prefix\odd : \nat\ra\Prop\\ - \prefix\evenO : \even~\nO\\ - \prefix\evenS : \forall~n:\nat, \odd~n \ra \even~(\nS~n)\\ - \prefix\oddS : \forall~n:\nat, \even~n \ra \odd~(\nS~n) - \end{array}$ -\end{latexonly}% - -%\paragraph{Parameters.} -%%The parameters introduce a distortion between the inside specification -%%of the inductive declaration where parameters are supposed to be -%%instantiated (this representation is appropriate for checking the -%%correctness or deriving the destructor principle) and the outside -%%typing rules where the inductive objects are seen as objects -%%abstracted with respect to the parameters. - -%In the definition of \List\ or \haslength\, $A$ is a parameter because -%what is effectively inductively defined is $\ListA$ or $\haslengthA$ for -%a given $A$ which is constant in the type of constructors. But when -%we define $(\haslengthA~l~n)$, $l$ and $n$ are not parameters because the -%constructors manipulate different instances of this family. - -\subsection{Well-formed inductive definitions} -We cannot accept any inductive declaration because some of them lead -to inconsistent systems. -We restrict ourselves to definitions which -satisfy a syntactic criterion of positivity. Before giving the formal -rules, we need a few definitions: - -\paragraph[Definition]{Definition\index{Arity}\label{Arity}} -A type $T$ is an {\em arity of sort $s$} if it converts -to the sort $s$ or to a product $\forall~x:T,U$ with $U$ an arity -of sort $s$. - -\paragraph[Examples]{Examples} -$A\ra \Set$ is an arity of sort $\Set$. -$\forall~A:\Prop,A\ra \Prop$ is an arity of sort \Prop. - -\paragraph[Definition]{Definition} -A type $T$ is an {\em arity} if there is a $s\in\Sort$ -such that $T$ is an arity of sort $s$. - -\paragraph[Examples]{Examples} -$A\ra \Set$ and $\forall~A:\Prop,A\ra \Prop$ are arities. - -\paragraph[Definition]{Definition\index{type of constructor}} -We say that $T$ is a {\em type of constructor of $I$\index{type of constructor}} -in one of the following two cases: -\begin{itemize} - \item $T$ is $(I~t_1\ldots ~t_n)$ - \item $T$ is $\forall x:U,T^\prime$ where $T^\prime$ is also a type of constructor of $I$ -\end{itemize} - -\paragraph[Examples]{Examples} -$\nat$ and $\nat\ra\nat$ are types of constructors of $\nat$.\\ -$\forall A:\Type,\List~A$ and $\forall A:\Type,A\ra\List~A\ra\List~A$ are constructors of $\List$. - -\paragraph[Definition]{Definition\index{Positivity}\label{Positivity}} -The type of constructor $T$ will be said to {\em satisfy the positivity -condition} for a constant $X$ in the following cases: - -\begin{itemize} -\item $T=(X~t_1\ldots ~t_n)$ and $X$ does not occur free in -any $t_i$ -\item $T=\forall~x:U,V$ and $X$ occurs only strictly positively in $U$ and -the type $V$ satisfies the positivity condition for $X$ -\end{itemize} -% -The constant $X$ {\em occurs strictly positively} in $T$ in the -following cases: -% -\begin{itemize} -\item $X$ does not occur in $T$ -\item $T$ converts to $(X~t_1 \ldots ~t_n)$ and $X$ does not occur in - any of $t_i$ -\item $T$ converts to $\forall~x:U,V$ and $X$ does not occur in - type $U$ but occurs strictly positively in type $V$ -\item $T$ converts to $(I~a_1 \ldots ~a_m ~ t_1 \ldots ~t_p)$ where - $I$ is the name of an inductive declaration of the form - $\Ind{\Gamma}{m}{I:A}{c_1:\forall p_1:P_1,\ldots \forall - p_m:P_m,C_1;\ldots;c_n:\forall p_1:P_1,\ldots \forall - p_m:P_m,C_n}$ - (in particular, it is not mutually defined and it has $m$ - parameters) and $X$ does not occur in any of the $t_i$, and the - (instantiated) types of constructor $C_i\{p_j/a_j\}_{j=1\ldots m}$ - of $I$ satisfy - the nested positivity condition for $X$ -%\item more generally, when $T$ is not a type, $X$ occurs strictly -%positively in $T[x:U]u$ if $X$ does not occur in $U$ but occurs -%strictly positively in $u$ -\end{itemize} -% -The type of constructor $T$ of $I$ {\em satisfies the nested -positivity condition} for a constant $X$ in the following -cases: - -\begin{itemize} -\item $T=(I~b_1\ldots b_m~u_1\ldots ~u_{p})$, $I$ is an inductive - definition with $m$ parameters and $X$ does not occur in -any $u_i$ -\item $T=\forall~x:U,V$ and $X$ occurs only strictly positively in $U$ and -the type $V$ satisfies the nested positivity condition for $X$ -\end{itemize} - -\newcommand\vv{\textSFxi} % │ -\newcommand\hh{\textSFx} % ─ -\newcommand\vh{\textSFviii} % ├ -\newcommand\hv{\textSFii} % └ -\newlength\framecharacterwidth -\settowidth\framecharacterwidth{\hh} -\newcommand\ws{\hbox{}\hskip\the\framecharacterwidth} -\newcommand\ruleref[1]{\hskip.25em\dots\hskip.2em{\em (bullet #1)}} -\newcommand{\NatTree}{\mbox{\textsf{nattree}}} -\newcommand{\NatTreeA}{\mbox{\textsf{nattree}}~\ensuremath{A}} -\newcommand{\cnode}{\mbox{\textsf{node}}} -\newcommand{\cleaf}{\mbox{\textsf{leaf}}} - -\noindent For instance, if one considers the following variant of a tree type branching over the natural numbers - -\begin{verbatim} -Inductive nattree (A:Type) : Type := - | leaf : nattree A - | node : A -> (nat -> nattree A) -> nattree A -\end{verbatim} - -\begin{latexonly} -\noindent Then every instantiated constructor of $\NatTreeA$ satisfies the nested positivity condition for $\NatTree$\\ -\noindent -\ws\ws\vv\\ -\ws\ws\vh\hh\ws concerning type $\NatTreeA$ of constructor $\cleaf$:\\ -\ws\ws\vv\ws\ws\ws\ws Type $\NatTreeA$ of constructor $\cleaf$ satisfies the positivity condition for $\NatTree$\\ -\ws\ws\vv\ws\ws\ws\ws because $\NatTree$ does not appear in any (real) arguments of the type of that constructor\\ -\ws\ws\vv\ws\ws\ws\ws (primarily because $\NatTree$ does not have any (real) arguments)\ruleref1\\ -\ws\ws\vv\\ -\ws\ws\hv\hh\ws concerning type $\forall~A\ra(\NN\ra\NatTreeA)\ra\NatTreeA$ of constructor $\cnode$:\\ - \ws\ws\ws\ws\ws\ws\ws Type $\forall~A:\Type,A\ra(\NN\ra\NatTreeA)\ra\NatTreeA$ of constructor $\cnode$\\ -\ws\ws\ws\ws\ws\ws\ws satisfies the positivity condition for $\NatTree$ because:\\ -\ws\ws\ws\ws\ws\ws\ws\vv\\ -\ws\ws\ws\ws\ws\ws\ws\vh\hh\ws $\NatTree$ occurs only strictly positively in $\Type$\ruleref1\\ -\ws\ws\ws\ws\ws\ws\ws\vv\\ -\ws\ws\ws\ws\ws\ws\ws\vh\hh\ws $\NatTree$ occurs only strictly positively in $A$\ruleref1\\ -\ws\ws\ws\ws\ws\ws\ws\vv\\ - \ws\ws\ws\ws\ws\ws\ws\vh\hh\ws $\NatTree$ occurs only strictly positively in $\NN\ra\NatTreeA$\ruleref{3+2}\\ -\ws\ws\ws\ws\ws\ws\ws\vv\\ -\ws\ws\ws\ws\ws\ws\ws\hv\hh\ws $\NatTree$ satisfies the positivity condition for $\NatTreeA$\ruleref1 -\end{latexonly} -\begin{rawhtml} -<pre> -<span style="font-family:serif">Then every instantiated constructor of <span style="font-family:monospace">nattree A</span> satisfies the nested positivity condition for <span style="font-family:monospace">nattree</span></span> - │ - ├─ <span style="font-family:serif">concerning type <span style="font-family:monospace">nattree A</span> of constructor <span style="font-family:monospace">nil</span>:</span> - │ <span style="font-family:serif">Type <span style="font-family:monospace">nattree A</span> of constructor <span style="font-family:monospace">nil</span> satisfies the positivity condition for <span style="font-family:monospace">nattree</span></span> - │ <span style="font-family:serif">because <span style="font-family:monospace">nattree</span> does not appear in any (real) arguments of the type of that constructor</span> - │ <span style="font-family:serif">(primarily because nattree does not have any (real) arguments) ... <span style="font-style:italic">(bullet 1)</span></span> - │ - ╰─ <span style="font-family:serif">concerning type <span style="font-family:monospace">∀ A → (nat → nattree A) → nattree A</span> of constructor <span style="font-family:monospace">cons</span>:</span> - <span style="font-family:serif">Type <span style="font-family:monospace">∀ A : Type, A → (nat → nattree A) → nattree A</span> of constructor <span style="font-family:monospace">cons</span></span> - <span style="font-family:serif">satisfies the positivity condition for <span style="font-family:monospace">nattree</span> because:</span> - │ - ├─ <span style="font-family:serif"><span style="font-family:monospace">nattree</span> occurs only strictly positively in <span style="font-family:monospace">Type</span> ... <span style="font-style:italic">(bullet 1)</span></span> - │ - ├─ <span style="font-family:serif"><span style="font-family:monospace">nattree</span> occurs only strictly positively in <span style="font-family:monospace">A</span> ... <span style="font-style:italic">(bullet 1)</span></span> - │ - ├─ <span style="font-family:serif"><span style="font-family:monospace">nattree</span> occurs only strictly positively in <span style="font-family:monospace">nat → nattree A</span> ... <span style="font-style:italic">(bullet 3+2)</span></span> - │ - ╰─ <span style="font-family:serif"><span style="font-family:monospace">nattree</span> satisfies the positivity condition for <span style="font-family:monospace">nattree A</span> ... <span style="font-style:italic">(bullet 1)</span></span> -</pre> -\end{rawhtml} - -\paragraph{Correctness rules.} -We shall now describe the rules allowing the introduction of a new -inductive definition. - -\begin{description} -\item[W-Ind] Let $E$ be a global environment and - $\Gamma_P,\Gamma_I,\Gamma_C$ are contexts such that - $\Gamma_I$ is $[I_1:\forall \Gamma_P,A_1;\ldots;I_k:\forall - \Gamma_P,A_k]$ and $\Gamma_C$ is - $[c_1:\forall \Gamma_P,C_1;\ldots;c_n:\forall \Gamma_P,C_n]$. -\inference{ - \frac{ - (\WTE{\Gamma_P}{A_j}{s'_j})_{j=1\ldots k} - ~~~~~~~~ (\WTE{\Gamma_I;\Gamma_P}{C_i}{s_{q_i}})_{i=1\ldots n} -} - {\WF{E;\Ind{}{p}{\Gamma_I}{\Gamma_C}}{\Gamma}}} -provided that the following side conditions hold: -\begin{itemize} -\item $k>0$ and all of $I_j$ and $c_i$ are distinct names for $j=1\ldots k$ and $i=1\ldots n$, -\item $p$ is the number of parameters of \NInd{}{\Gamma_I}{\Gamma_C} - and $\Gamma_P$ is the context of parameters, -\item for $j=1\ldots k$ we have that $A_j$ is an arity of sort $s_j$ and $I_j - \notin E$, -\item for $i=1\ldots n$ we have that $C_i$ is a type of constructor of - $I_{q_i}$ which satisfies the positivity condition for $I_1 \ldots I_k$ - and $c_i \notin \Gamma \cup E$. -\end{itemize} -\end{description} -One can remark that there is a constraint between the sort of the -arity of the inductive type and the sort of the type of its -constructors which will always be satisfied for the impredicative sort -{\Prop} but may fail to define inductive definition -on sort \Set{} and generate constraints between universes for -inductive definitions in the {\Type} hierarchy. - -\paragraph{Examples.} -It is well known that existential quantifier can be encoded as an -inductive definition. -The following declaration introduces the second-order existential -quantifier $\exists X.P(X)$. -\begin{coq_example*} -Inductive exProp (P:Prop->Prop) : Prop := - exP_intro : forall X:Prop, P X -> exProp P. -\end{coq_example*} -The same definition on \Set{} is not allowed and fails: -% (********** The following is not correct and should produce **********) -% (*** Error: Large non-propositional inductive types must be in Type***) -\begin{coq_example} -Fail Inductive exSet (P:Set->Prop) : Set := - exS_intro : forall X:Set, P X -> exSet P. -\end{coq_example} -It is possible to declare the same inductive definition in the -universe \Type. -The \texttt{exType} inductive definition has type $(\Type_i \ra\Prop)\ra -\Type_j$ with the constraint that the parameter \texttt{X} of \texttt{exT\_intro} has type $\Type_k$ with $k<j$ and $k\leq i$. -\begin{coq_example*} -Inductive exType (P:Type->Prop) : Type := - exT_intro : forall X:Type, P X -> exType P. -\end{coq_example*} -%We shall assume for the following definitions that, if necessary, we -%annotated the type of constructors such that we know if the argument -%is recursive or not. We shall write the type $(x:_R T)C$ if it is -%a recursive argument and $(x:_P T)C$ if the argument is not recursive. - -\paragraph[Template polymorphism.]{Template polymorphism.\index{Template polymorphism}} -\label{Template-polymorphism} - -Inductive types declared in {\Type} are -polymorphic over their arguments in {\Type}. -If $A$ is an arity of some sort and $s$ is a sort, we write $A_{/s}$ for the arity -obtained from $A$ by replacing its sort with $s$. Especially, if $A$ -is well-typed in some global environment and local context, then $A_{/s}$ is typable -by typability of all products in the Calculus of Inductive Constructions. -The following typing rule is added to the theory. - -\begin{description} -\item[Ind-Family] Let $\Ind{}{p}{\Gamma_I}{\Gamma_C}$ be an - inductive definition. Let $\Gamma_P = [p_1:P_1;\ldots;p_{p}:P_{p}]$ - be its context of parameters, $\Gamma_I = [I_1:\forall - \Gamma_P,A_1;\ldots;I_k:\forall \Gamma_P,A_k]$ its context of - definitions and $\Gamma_C = [c_1:\forall - \Gamma_P,C_1;\ldots;c_n:\forall \Gamma_P,C_n]$ its context of - constructors, with $c_i$ a constructor of $I_{q_i}$. - - Let $m \leq p$ be the length of the longest prefix of parameters - such that the $m$ first arguments of all occurrences of all $I_j$ in - all $C_k$ (even the occurrences in the hypotheses of $C_k$) are - exactly applied to $p_1~\ldots~p_m$ ($m$ is the number of {\em - recursively uniform parameters} and the $p-m$ remaining parameters - are the {\em recursively non-uniform parameters}). Let $q_1$, - \ldots, $q_r$, with $0\leq r\leq m$, be a (possibly) partial - instantiation of the recursively uniform parameters of - $\Gamma_P$. We have: - -\inference{\frac -{\left\{\begin{array}{l} -\Ind{}{p}{\Gamma_I}{\Gamma_C} \in E\\ -(E[] \vdash q_l : P'_l)_{l=1\ldots r}\\ -(\WTELECONV{}{P'_l}{\subst{P_l}{p_u}{q_u}_{u=1\ldots l-1}})_{l=1\ldots r}\\ -1 \leq j \leq k -\end{array} -\right.} -{E[] \vdash I_j\,q_1\,\ldots\,q_r:\forall [p_{r+1}:P_{r+1};\ldots;p_{p}:P_{p}], (A_j)_{/s_j}} -} - -provided that the following side conditions hold: - -\begin{itemize} -\item $\Gamma_{P'}$ is the context obtained from $\Gamma_P$ by -replacing each $P_l$ that is an arity with $P'_l$ for $1\leq l \leq r$ (notice that -$P_l$ arity implies $P'_l$ arity since $\WTELECONV{}{P'_l}{ \subst{P_l}{p_u}{q_u}_{u=1\ldots l-1}}$); -\item there are sorts $s_i$, for $1 \leq i \leq k$ such that, for - $\Gamma_{I'} = [I_1:\forall - \Gamma_{P'},(A_1)_{/s_1};\ldots;I_k:\forall \Gamma_{P'},(A_k)_{/s_k}]$ -we have $(\WTE{\Gamma_{I'};\Gamma_{P'}}{C_i}{s_{q_i}})_{i=1\ldots n}$; -\item the sorts $s_i$ are such that all eliminations, to {\Prop}, {\Set} and - $\Type(j)$, are allowed (see Section~\ref{allowedeleminationofsorts}). -\end{itemize} -\end{description} -% -Notice that if $I_j\,q_1\,\ldots\,q_r$ is typable using the rules {\bf -Ind-Const} and {\bf App}, then it is typable using the rule {\bf -Ind-Family}. Conversely, the extended theory is not stronger than the -theory without {\bf Ind-Family}. We get an equiconsistency result by -mapping each $\Ind{}{p}{\Gamma_I}{\Gamma_C}$ occurring into a -given derivation into as many different inductive types and constructors -as the number of different (partial) replacements of sorts, needed for -this derivation, in the parameters that are arities (this is possible -because $\Ind{}{p}{\Gamma_I}{\Gamma_C}$ well-formed implies -that $\Ind{}{p}{\Gamma_{I'}}{\Gamma_{C'}}$ is well-formed and -has the same allowed eliminations, where -$\Gamma_{I'}$ is defined as above and $\Gamma_{C'} = [c_1:\forall -\Gamma_{P'},C_1;\ldots;c_n:\forall \Gamma_{P'},C_n]$). That is, -the changes in the types of each partial instance -$q_1\,\ldots\,q_r$ can be characterized by the ordered sets of arity -sorts among the types of parameters, and to each signature is -associated a new inductive definition with fresh names. Conversion is -preserved as any (partial) instance $I_j\,q_1\,\ldots\,q_r$ or -$C_i\,q_1\,\ldots\,q_r$ is mapped to the names chosen in the specific -instance of $\Ind{}{p}{\Gamma_I}{\Gamma_C}$. - -\newcommand{\Single}{\mbox{\textsf{Set}}} - -In practice, the rule {\bf Ind-Family} is used by {\Coq} only when all the -inductive types of the inductive definition are declared with an arity whose -sort is in the $\Type$ -hierarchy. Then, the polymorphism is over the parameters whose -type is an arity of sort in the {\Type} hierarchy. -The sort $s_j$ are -chosen canonically so that each $s_j$ is minimal with respect to the -hierarchy ${\Prop}\subset{\Set_p}\subset\Type$ where $\Set_p$ is -predicative {\Set}. -%and ${\Prop_u}$ is the sort of small singleton -%inductive types (i.e. of inductive types with one single constructor -%and that contains either proofs or inhabitants of singleton types -%only). -More precisely, an empty or small singleton inductive definition -(i.e. an inductive definition of which all inductive types are -singleton -- see paragraph~\ref{singleton}) is set in -{\Prop}, a small non-singleton inductive type is set in {\Set} (even -in case {\Set} is impredicative -- see Section~\ref{impredicativity}), -and otherwise in the {\Type} hierarchy. - -Note that the side-condition about allowed elimination sorts in the -rule~{\bf Ind-Family} is just to avoid to recompute the allowed -elimination sorts at each instance of a pattern-matching (see -section~\ref{elimdep}). -As an example, let us consider the following definition: -\begin{coq_example*} -Inductive option (A:Type) : Type := -| None : option A -| Some : A -> option A. -\end{coq_example*} -% -As the definition is set in the {\Type} hierarchy, it is used -polymorphically over its parameters whose types are arities of a sort -in the {\Type} hierarchy. Here, the parameter $A$ has this property, -hence, if \texttt{option} is applied to a type in {\Set}, the result is -in {\Set}. Note that if \texttt{option} is applied to a type in {\Prop}, -then, the result is not set in \texttt{Prop} but in \texttt{Set} -still. This is because \texttt{option} is not a singleton type (see -section~\ref{singleton}) and it would lose the elimination to {\Set} and -{\Type} if set in {\Prop}. - -\begin{coq_example} -Check (fun A:Set => option A). -Check (fun A:Prop => option A). -\end{coq_example} -% -Here is another example. -% -\begin{coq_example*} -Inductive prod (A B:Type) : Type := pair : A -> B -> prod A B. -\end{coq_example*} -% -As \texttt{prod} is a singleton type, it will be in {\Prop} if applied -twice to propositions, in {\Set} if applied twice to at least one type -in {\Set} and none in {\Type}, and in {\Type} otherwise. In all cases, -the three kind of eliminations schemes are allowed. - -\begin{coq_example} -Check (fun A:Set => prod A). -Check (fun A:Prop => prod A A). -Check (fun (A:Prop) (B:Set) => prod A B). -Check (fun (A:Type) (B:Prop) => prod A B). -\end{coq_example} - -\Rem Template polymorphism used to be called ``sort-polymorphism of -inductive types'' before universe polymorphism (see -Chapter~\ref{Universes-full}) was introduced. - -\subsection{Destructors} -The specification of inductive definitions with arities and -constructors is quite natural. But we still have to say how to use an -object in an inductive type. - -This problem is rather delicate. There are actually several different -ways to do that. Some of them are logically equivalent but not always -equivalent from the computational point of view or from the user point -of view. - -From the computational point of view, we want to be able to define a -function whose domain is an inductively defined type by using a -combination of case analysis over the possible constructors of the -object and recursion. - -Because we need to keep a consistent theory and also we prefer to keep -a strongly normalizing reduction, we cannot accept any sort of -recursion (even terminating). So the basic idea is to restrict -ourselves to primitive recursive functions and functionals. - -For instance, assuming a parameter $A:\Set$ exists in the local context, we -want to build a function \length\ of type $\ListA\ra \nat$ which -computes the length of the list, so such that $(\length~(\Nil~A)) = \nO$ -and $(\length~(\cons~A~a~l)) = (\nS~(\length~l))$. We want these -equalities to be recognized implicitly and taken into account in the -conversion rule. - -From the logical point of view, we have built a type family by giving -a set of constructors. We want to capture the fact that we do not -have any other way to build an object in this type. So when trying to -prove a property about an object $m$ in an inductive definition it is -enough to enumerate all the cases where $m$ starts with a different -constructor. - -In case the inductive definition is effectively a recursive one, we -want to capture the extra property that we have built the smallest -fixed point of this recursive equation. This says that we are only -manipulating finite objects. This analysis provides induction -principles. -For instance, in order to prove $\forall l:\ListA,(\haslengthA~l~(\length~l))$ -it is enough to prove: -% -\begin{itemize} - \item $(\haslengthA~(\Nil~A)~(\length~(\Nil~A)))$ - \item $\forall a:A, \forall l:\ListA, (\haslengthA~l~(\length~l)) \ra\\ - \ra (\haslengthA~(\cons~A~a~l)~(\length~(\cons~A~a~l)))$ -\end{itemize} -% -which given the conversion equalities satisfied by \length\ is the -same as proving: -% -\begin{itemize} - \item $(\haslengthA~(\Nil~A)~\nO)$ - \item $\forall a:A, \forall l:\ListA, (\haslengthA~l~(\length~l)) \ra\\ - \ra (\haslengthA~(\cons~A~a~l)~(\nS~(\length~l)))$ -\end{itemize} -% -One conceptually simple way to do that, following the basic scheme -proposed by Martin-L\"of in his Intuitionistic Type Theory, is to -introduce for each inductive definition an elimination operator. At -the logical level it is a proof of the usual induction principle and -at the computational level it implements a generic operator for doing -primitive recursion over the structure. - -But this operator is rather tedious to implement and use. We choose in -this version of {\Coq} to factorize the operator for primitive recursion -into two more primitive operations as was first suggested by Th. Coquand -in~\cite{Coq92}. One is the definition by pattern-matching. The second one is a definition by guarded fixpoints. - -\subsubsection[The {\tt match\ldots with \ldots end} construction.]{The {\tt match\ldots with \ldots end} construction.\label{Caseexpr} -\index{match@{\tt match\ldots with\ldots end}}} - -The basic idea of this operator is that we have an object -$m$ in an inductive type $I$ and we want to prove a property -which possibly depends on $m$. For this, it is enough to prove the -property for $m = (c_i~u_1\ldots u_{p_i})$ for each constructor of $I$. -The \Coq{} term for this proof will be written: -\[\kw{match}~m~\kw{with}~ (c_1~x_{11}~...~x_{1p_1}) \Ra f_1 ~|~\ldots~|~ - (c_n~x_{n1}~...~x_{np_n}) \Ra f_n~ \kw{end}\] -In this expression, if -$m$ eventually happens to evaluate to $(c_i~u_1\ldots u_{p_i})$ then -the expression will behave as specified in its $i$-th branch and -it will reduce to $f_i$ where the $x_{i1}$\ldots $x_{ip_i}$ are replaced -by the $u_1\ldots u_{p_i}$ according to the $\iota$-reduction. - -Actually, for type-checking a \kw{match\ldots with\ldots end} -expression we also need to know the predicate $P$ to be proved by case -analysis. In the general case where $I$ is an inductively defined -$n$-ary relation, $P$ is a predicate over $n+1$ arguments: the $n$ first ones -correspond to the arguments of $I$ (parameters excluded), and the last -one corresponds to object $m$. \Coq{} can sometimes infer this -predicate but sometimes not. The concrete syntax for describing this -predicate uses the \kw{as\ldots in\ldots return} construction. For -instance, let us assume that $I$ is an unary predicate with one -parameter and one argument. The predicate is made explicit using the syntax: -\[\kw{match}~m~\kw{as}~ x~ \kw{in}~ I~\verb!_!~a~ \kw{return}~ P - ~\kw{with}~ (c_1~x_{11}~...~x_{1p_1}) \Ra f_1 ~|~\ldots~|~ - (c_n~x_{n1}~...~x_{np_n}) \Ra f_n \kw{end}\] -The \kw{as} part can be omitted if either the result type does not -depend on $m$ (non-dependent elimination) or $m$ is a variable (in -this case, $m$ can occur in $P$ where it is considered a bound variable). -The \kw{in} part can be -omitted if the result type does not depend on the arguments of -$I$. Note that the arguments of $I$ corresponding to parameters -\emph{must} be \verb!_!, because the result type is not generalized to -all possible values of the parameters. -The other arguments of $I$ -(sometimes called indices in the literature) -% NOTE: e.g. http://www.qatar.cmu.edu/~sacchini/papers/types08.pdf -have to be variables -($a$ above) and these variables can occur in $P$. -The expression after \kw{in} -must be seen as an \emph{inductive type pattern}. Notice that -expansion of implicit arguments and notations apply to this pattern. -% -For the purpose of presenting the inference rules, we use a more -compact notation: -\[ \Case{(\lb a x \mto P)}{m}{ \lb x_{11}~...~x_{1p_1} \mto f_1 ~|~\ldots~|~ - \lb x_{n1}...x_{np_n} \mto f_n}\] - -%% CP 06/06 Obsolete avec la nouvelle syntaxe et incompatible avec la -%% presentation theorique qui suit -% \paragraph{Non-dependent elimination.} -% -% When defining a function of codomain $C$ by case analysis over an -% object in an inductive type $I$, we build an object of type $I -% \ra C$. The minimality principle on an inductively defined logical -% predicate $I$ of type $A \ra \Prop$ is often used to prove a property -% $\forall x:A,(I~x)\ra (C~x)$. These are particular cases of the dependent -% principle that we stated before with a predicate which does not depend -% explicitly on the object in the inductive definition. - -% For instance, a function testing whether a list is empty -% can be -% defined as: -% \[\kw{fun} l:\ListA \Ra \kw{match}~l~\kw{with}~ \Nil \Ra \true~ -% |~(\cons~a~m) \Ra \false \kw{end}\] -% represented by -% \[\lb l:\ListA \mto\Case{\bool}{l}{\true~ |~ \lb a~m,~\false}\] -%\noindent {\bf Remark. } - -% In the system \Coq\ the expression above, can be -% written without mentioning -% the dummy abstraction: -% \Case{\bool}{l}{\Nil~ \mbox{\tt =>}~\true~ |~ (\cons~a~m)~ -% \mbox{\tt =>}~ \false} - -\paragraph[Allowed elimination sorts.]{Allowed elimination sorts.\index{Elimination sorts}} -\label{allowedeleminationofsorts} - -An important question for building the typing rule for \kw{match} is -what can be the type of $\lb a x \mto P$ with respect to the type of $m$. If -$m:I$ and -$I:A$ and -$\lb a x \mto P : B$ -then by \compat{I:A}{B} we mean that one can use $\lb a x \mto P$ with $m$ in the above -match-construct. - -\paragraph{Notations.} -The \compat{I:A}{B} is defined as the smallest relation satisfying the -following rules: -We write \compat{I}{B} for \compat{I:A}{B} where $A$ is the type of -$I$. - -The case of inductive definitions in sorts \Set\ or \Type{} is simple. -There is no restriction on the sort of the predicate to be -eliminated. -% -\begin{description} -\item[Prod] \inference{\frac{\compat{(I~x):A'}{B'}} - {\compat{I:\forall x:A, A'}{\forall x:A, B'}}} -\item[{\Set} \& \Type] \inference{\frac{ - s_1 \in \{\Set,\Type(j)\}~~~~~~~~s_2 \in \Sort}{\compat{I:s_1}{I\ra s_2}}} -\end{description} -% -The case of Inductive definitions of sort \Prop{} is a bit more -complicated, because of our interpretation of this sort. The only -harmless allowed elimination, is the one when predicate $P$ is also of -sort \Prop. -\begin{description} -\item[\Prop] \inference{\compat{I:\Prop}{I\ra\Prop}} -\end{description} -\Prop{} is the type of logical propositions, the proofs of properties -$P$ in \Prop{} could not be used for computation and are consequently -ignored by the extraction mechanism. -Assume $A$ and $B$ are two propositions, and the logical disjunction -$A\vee B$ is defined inductively by: -\begin{coq_example*} -Inductive or (A B:Prop) : Prop := - or_introl : A -> or A B | or_intror : B -> or A B. -\end{coq_example*} -The following definition which computes a boolean value by case over -the proof of \texttt{or A B} is not accepted: -% (***************************************************************) -% (*** This example should fail with ``Incorrect elimination'' ***) -\begin{coq_example} -Fail Definition choice (A B: Prop) (x:or A B) := - match x with or_introl _ _ a => true | or_intror _ _ b => false end. -\end{coq_example} -From the computational point of view, the structure of the proof of -\texttt{(or A B)} in this term is needed for computing the boolean -value. - -In general, if $I$ has type \Prop\ then $P$ cannot have type $I\ra -\Set$, because it will mean to build an informative proof of type -$(P~m)$ doing a case analysis over a non-computational object that -will disappear in the extracted program. But the other way is safe -with respect to our interpretation we can have $I$ a computational -object and $P$ a non-computational one, it just corresponds to proving -a logical property of a computational object. - -% Also if $I$ is in one of the sorts \{\Prop, \Set\}, one cannot in -% general allow an elimination over a bigger sort such as \Type. But -% this operation is safe whenever $I$ is a {\em small inductive} type, -% which means that all the types of constructors of -% $I$ are small with the following definition:\\ -% $(I~t_1\ldots t_s)$ is a {\em small type of constructor} and -% $\forall~x:T,C$ is a small type of constructor if $C$ is and if $T$ -% has type \Prop\ or \Set. \index{Small inductive type} - -% We call this particular elimination which gives the possibility to -% compute a type by induction on the structure of a term, a {\em strong -% elimination}\index{Strong elimination}. - -In the same spirit, elimination on $P$ of type $I\ra -\Type$ cannot be allowed because it trivially implies the elimination -on $P$ of type $I\ra \Set$ by cumulativity. It also implies that there -are two proofs of the same property which are provably different, -contradicting the proof-irrelevance property which is sometimes a -useful axiom: -\begin{coq_example} -Axiom proof_irrelevance : forall (P : Prop) (x y : P), x=y. -\end{coq_example} -\begin{coq_eval} -Reset proof_irrelevance. -\end{coq_eval} -The elimination of an inductive definition of type \Prop\ on a -predicate $P$ of type $I\ra \Type$ leads to a paradox when applied to -impredicative inductive definition like the second-order existential -quantifier \texttt{exProp} defined above, because it give access to -the two projections on this type. - -%\paragraph{Warning: strong elimination} -%\index{Elimination!Strong elimination} -%In previous versions of Coq, for a small inductive definition, only the -%non-informative strong elimination on \Type\ was allowed, because -%strong elimination on \Typeset\ was not compatible with the current -%extraction procedure. In this version, strong elimination on \Typeset\ -%is accepted but a dummy element is extracted from it and may generate -%problems if extracted terms are explicitly used such as in the -%{\tt Program} tactic or when extracting ML programs. - -\paragraph[Empty and singleton elimination]{Empty and singleton elimination\label{singleton} -\index{Elimination!Singleton elimination} -\index{Elimination!Empty elimination}} - -There are special inductive definitions in \Prop\ for which more -eliminations are allowed. -\begin{description} -\item[\Prop-extended] -\inference{ - \frac{I \mbox{~is an empty or singleton - definition}~~~s \in \Sort}{\compat{I:\Prop}{I\ra s}} -} -\end{description} -% -% A {\em singleton definition} has always an informative content, -% even if it is a proposition. -% -A {\em singleton -definition} has only one constructor and all the arguments of this -constructor have type \Prop. In that case, there is a canonical -way to interpret the informative extraction on an object in that type, -such that the elimination on any sort $s$ is legal. Typical examples are -the conjunction of non-informative propositions and the equality. -If there is an hypothesis $h:a=b$ in the local context, it can be used for -rewriting not only in logical propositions but also in any type. -% In that case, the term \verb!eq_rec! which was defined as an axiom, is -% now a term of the calculus. -\begin{coq_eval} -Require Extraction. -\end{coq_eval} -\begin{coq_example} -Print eq_rec. -Extraction eq_rec. -\end{coq_example} -An empty definition has no constructors, in that case also, -elimination on any sort is allowed. - -\paragraph{Type of branches.} -Let $c$ be a term of type $C$, we assume $C$ is a type of constructor -for an inductive type $I$. Let $P$ be a term that represents the -property to be proved. -We assume $r$ is the number of parameters and $p$ is the number of arguments. - -We define a new type \CI{c:C}{P} which represents the type of the -branch corresponding to the $c:C$ constructor. -\[ -\begin{array}{ll} -\CI{c:(I~p_1\ldots p_r\ t_1 \ldots t_p)}{P} &\equiv (P~t_1\ldots ~t_p~c) \\[2mm] -\CI{c:\forall~x:T,C}{P} &\equiv \forall~x:T,\CI{(c~x):C}{P} -\end{array} -\] -We write \CI{c}{P} for \CI{c:C}{P} with $C$ the type of $c$. - -\paragraph{Example.} -The following term in concrete syntax: -\begin{verbatim} -match t as l return P' with -| nil _ => t1 -| cons _ hd tl => t2 -end -\end{verbatim} -can be represented in abstract syntax as $$\Case{P}{t}{f_1\,|\,f_2}$$ -where -\begin{eqnarray*} - P & = & \lambda~l~.~P^\prime\\ - f_1 & = & t_1\\ - f_2 & = & \lambda~(hd:\nat)~.~\lambda~(tl:\List~\nat)~.~t_2 -\end{eqnarray*} -According to the definition: -\begin{latexonly}\vskip.5em\noindent\end{latexonly}% -\begin{htmlonly} - -\end{htmlonly} -$ \CI{(\Nil~\nat)}{P} \equiv \CI{(\Nil~\nat) : (\List~\nat)}{P} \equiv (P~(\Nil~\nat))$ -\begin{latexonly}\vskip.5em\noindent\end{latexonly}% -\begin{htmlonly} - -\end{htmlonly} -$ \CI{(\cons~\nat)}{P} - \equiv\CI{(\cons~\nat) : (\nat\ra\List~\nat\ra\List~\nat)}{P} \equiv\\ - \equiv\forall n:\nat, \CI{(\cons~\nat~n) : \List~\nat\ra\List~\nat)}{P} \equiv\\ - \equiv\forall n:\nat, \forall l:\List~\nat, \CI{(\cons~\nat~n~l) : \List~\nat)}{P} \equiv\\ -\equiv\forall n:\nat, \forall l:\List~\nat,(P~(\cons~\nat~n~l))$. -\begin{latexonly}\vskip.5em\noindent\end{latexonly}% -\begin{htmlonly} - -\end{htmlonly} -Given some $P$, then \CI{(\Nil~\nat)}{P} represents the expected type of $f_1$, and -\CI{(\cons~\nat)}{P} represents the expected type of $f_2$. - -\paragraph{Typing rule.} - -Our very general destructor for inductive definition enjoys the -following typing rule -% , where we write -% \[ -% \Case{P}{c}{[x_{11}:T_{11}]\ldots[x_{1p_1}:T_{1p_1}]g_1\ldots -% [x_{n1}:T_{n1}]\ldots[x_{np_n}:T_{np_n}]g_n} -% \] -% for -% \[ -% \Case{P}{c}{(c_1~x_{11}~...~x_{1p_1}) \Ra g_1 ~|~\ldots~|~ -% (c_n~x_{n1}...x_{np_n}) \Ra g_n } -% \] - -\begin{description} -\item[match] \label{elimdep} \index{Typing rules!match} -\inference{ -\frac{\WTEG{c}{(I~q_1\ldots q_r~t_1\ldots t_s)}~~ - \WTEG{P}{B}~~\compat{(I~q_1\ldots q_r)}{B} - ~~ -(\WTEG{f_i}{\CI{(c_{p_i}~q_1\ldots q_r)}{P}})_{i=1\ldots l}} -{\WTEG{\Case{P}{c}{f_1|\ldots |f_l}}{(P\ t_1\ldots t_s\ c)}}}%\\[3mm] - -provided $I$ is an inductive type in a definition -\Ind{}{r}{\Gamma_I}{\Gamma_C} with -$\Gamma_C = [c_1:C_1;\ldots;c_n:C_n]$ and $c_{p_1}\ldots c_{p_l}$ are the -only constructors of $I$. -\end{description} - -\paragraph{Example.} - -Below is a typing rule for the term shown in the previous example: -\inference{ - \frac{% - \WTEG{t}{(\List~\nat)}~~~~% - \WTEG{P}{B}~~~~% - \compat{(\List~\nat)}{B}~~~~% - \WTEG{f_1}{\CI{(\Nil~\nat)}{P}}~~~~% - \WTEG{f_2}{\CI{(\cons~\nat)}{P}}% - } -{\WTEG{\Case{P}{t}{f_1|f_2}}{(P~t)}}} - -\paragraph[Definition of $\iota$-reduction.]{Definition of $\iota$-reduction.\label{iotared} -\index{iota-reduction@$\iota$-reduction}} -We still have to define the $\iota$-reduction in the general case. - -A $\iota$-redex is a term of the following form: -\[\Case{P}{(c_{p_i}~q_1\ldots q_r~a_1\ldots a_m)}{f_1|\ldots | - f_l}\] -with $c_{p_i}$ the $i$-th constructor of the inductive type $I$ with $r$ -parameters. - -The $\iota$-contraction of this term is $(f_i~a_1\ldots a_m)$ leading -to the general reduction rule: -\[ \Case{P}{(c_{p_i}~q_1\ldots q_r~a_1\ldots a_m)}{f_1|\ldots | - f_n} \triangleright_{\iota} (f_i~a_1\ldots a_m) \] - -\subsection[Fixpoint definitions]{Fixpoint definitions\label{Fix-term} \index{Fix@{\tt Fix}}} -The second operator for elimination is fixpoint definition. -This fixpoint may involve several mutually recursive definitions. -The basic concrete syntax for a recursive set of mutually recursive -declarations is (with $\Gamma_i$ contexts): -\[\kw{fix}~f_1 (\Gamma_1) :A_1:=t_1~\kw{with} \ldots \kw{with}~ f_n -(\Gamma_n) :A_n:=t_n\] -The terms are obtained by projections from this set of declarations -and are written -\[\kw{fix}~f_1 (\Gamma_1) :A_1:=t_1~\kw{with} \ldots \kw{with}~ f_n -(\Gamma_n) :A_n:=t_n~\kw{for}~f_i\] -In the inference rules, we represent such a -term by -\[\Fix{f_i}{f_1:A_1':=t_1' \ldots f_n:A_n':=t_n'}\] -with $t_i'$ (resp. $A_i'$) representing the term $t_i$ abstracted -(resp. generalized) with -respect to the bindings in the context $\Gamma_i$, namely -$t_i'=\lb \Gamma_i \mto t_i$ and $A_i'=\forall \Gamma_i, A_i$. - -\subsubsection{Typing rule} -The typing rule is the expected one for a fixpoint. - -\begin{description} -\item[Fix] \index{Typing rules!Fix} -\inference{\frac{(\WTEG{A_i}{s_i})_{i=1\ldots n}~~~~ - (\WTE{\Gamma,f_1:A_1,\ldots,f_n:A_n}{t_i}{A_i})_{i=1\ldots n}} - {\WTEG{\Fix{f_i}{f_1:A_1:=t_1 \ldots f_n:A_n:=t_n}}{A_i}}} -\end{description} -% -Any fixpoint definition cannot be accepted because non-normalizing terms -allow proofs of absurdity. -% -The basic scheme of recursion that should be allowed is the one needed for -defining primitive -recursive functionals. In that case the fixpoint enjoys a special -syntactic restriction, namely one of the arguments belongs to an -inductive type, the function starts with a case analysis and recursive -calls are done on variables coming from patterns and representing subterms. -% -For instance in the case of natural numbers, a proof of the induction -principle of type -\[\forall P:\nat\ra\Prop, (P~\nO)\ra(\forall n:\nat, (P~n)\ra(P~(\nS~n)))\ra -\forall n:\nat, (P~n)\] -can be represented by the term: -\[\begin{array}{l} -\lb P:\nat\ra\Prop\mto\lb f:(P~\nO)\mto \lb g:(\forall n:\nat, -(P~n)\ra(P~(\nS~n))) \mto\\ -\Fix{h}{h:\forall n:\nat, (P~n):=\lb n:\nat\mto \Case{P}{n}{f~|~\lb - p:\nat\mto (g~p~(h~p))}} -\end{array} -\] -% -Before accepting a fixpoint definition as being correctly typed, we -check that the definition is ``guarded''. A precise analysis of this -notion can be found in~\cite{Gim94}. -% -The first stage is to precise on which argument the fixpoint will be -decreasing. The type of this argument should be an inductive -definition. -% -For doing this, the syntax of fixpoints is extended and becomes - \[\Fix{f_i}{f_1/k_1:A_1:=t_1 \ldots f_n/k_n:A_n:=t_n}\] -where $k_i$ are positive integers. -Each $k_i$ represents the index of pararameter of $f_i$, on which $f_i$ is decreasing. -Each $A_i$ should be a type (reducible to a term) starting with at least -$k_i$ products $\forall y_1:B_1,\ldots \forall y_{k_i}:B_{k_i}, A'_i$ -and $B_{k_i}$ an is unductive type. - -Now in the definition $t_i$, if $f_j$ occurs then it should be applied -to at least $k_j$ arguments and the $k_j$-th argument should be -syntactically recognized as structurally smaller than $y_{k_i}$ - - -The definition of being structurally smaller is a bit technical. -One needs first to define the notion of -{\em recursive arguments of a constructor}\index{Recursive arguments}. -For an inductive definition \Ind{}{r}{\Gamma_I}{\Gamma_C}, -if the type of a constructor $c$ has the form -$\forall p_1:P_1,\ldots \forall p_r:P_r, -\forall x_1:T_1, \ldots \forall x_r:T_r, (I_j~p_1\ldots -p_r~t_1\ldots t_s)$, then the recursive arguments will correspond to $T_i$ in -which one of the $I_l$ occurs. - -The main rules for being structurally smaller are the following:\\ -Given a variable $y$ of type an inductive -definition in a declaration -\Ind{}{r}{\Gamma_I}{\Gamma_C} -where $\Gamma_I$ is $[I_1:A_1;\ldots;I_k:A_k]$, and $\Gamma_C$ is - $[c_1:C_1;\ldots;c_n:C_n]$. -The terms structurally smaller than $y$ are: -\begin{itemize} -\item $(t~u)$ and $\lb x:u \mto t$ when $t$ is structurally smaller than $y$. -\item \Case{P}{c}{f_1\ldots f_n} when each $f_i$ is structurally - smaller than $y$. \\ - If $c$ is $y$ or is structurally smaller than $y$, its type is an inductive - definition $I_p$ part of the inductive - declaration corresponding to $y$. - Each $f_i$ corresponds to a type of constructor $C_q \equiv - \forall p_1:P_1,\ldots,\forall p_r:P_r, \forall y_1:B_1, \ldots \forall y_k:B_k, (I~a_1\ldots a_k)$ - and can consequently be - written $\lb y_1:B'_1\mto \ldots \lb y_k:B'_k\mto g_i$. - ($B'_i$ is obtained from $B_i$ by substituting parameters variables) - the variables $y_j$ occurring - in $g_i$ corresponding to recursive arguments $B_i$ (the ones in - which one of the $I_l$ occurs) are structurally smaller than $y$. -\end{itemize} -The following definitions are correct, we enter them using the -{\tt Fixpoint} command as described in Section~\ref{Fixpoint} and show -the internal representation. -\begin{coq_example} -Fixpoint plus (n m:nat) {struct n} : nat := - match n with - | O => m - | S p => S (plus p m) - end. -Print plus. -Fixpoint lgth (A:Set) (l:list A) {struct l} : nat := - match l with - | nil _ => O - | cons _ a l' => S (lgth A l') - end. -Print lgth. -Fixpoint sizet (t:tree) : nat := let (f) := t in S (sizef f) - with sizef (f:forest) : nat := - match f with - | emptyf => O - | consf t f => plus (sizet t) (sizef f) - end. -Print sizet. -\end{coq_example} - - -\subsubsection[Reduction rule]{Reduction rule\index{iota-reduction@$\iota$-reduction}} -Let $F$ be the set of declarations: $f_1/k_1:A_1:=t_1 \ldots -f_n/k_n:A_n:=t_n$. -The reduction for fixpoints is: -\[ (\Fix{f_i}{F}~a_1\ldots -a_{k_i}) \triangleright_{\iota} \substs{t_i}{f_k}{\Fix{f_k}{F}}{k=1\ldots n} -~a_1\ldots a_{k_i}\] -when $a_{k_i}$ starts with a constructor. -This last restriction is needed in order to keep strong normalization -and corresponds to the reduction for primitive recursive operators. -% -The following reductions are now possible: -\def\plus{\mathsf{plus}} -\def\tri{\triangleright_\iota} -\begin{eqnarray*} - \plus~(\nS~(\nS~\nO))~(\nS~\nO) & \tri & \nS~(\plus~(\nS~\nO)~(\nS~\nO))\\ - & \tri & \nS~(\nS~(\plus~\nO~(\nS~\nO)))\\ - & \tri & \nS~(\nS~(\nS~\nO))\\ -\end{eqnarray*} - -% La disparition de Program devrait rendre la construction Match obsolete -% \subsubsection{The {\tt Match \ldots with \ldots end} expression} -% \label{Matchexpr} -% %\paragraph{A unary {\tt Match\ldots with \ldots end}.} -% \index{Match...with...end@{\tt Match \ldots with \ldots end}} -% The {\tt Match} operator which was a primitive notion in older -% presentations of the Calculus of Inductive Constructions is now just a -% macro definition which generates the good combination of {\tt Case} -% and {\tt Fix} operators in order to generate an operator for primitive -% recursive definitions. It always considers an inductive definition as -% a single inductive definition. - -% The following examples illustrates this feature. -% \begin{coq_example} -% Definition nat_pr : (C:Set)C->(nat->C->C)->nat->C -% :=[C,x,g,n]Match n with x g end. -% Print nat_pr. -% \end{coq_example} -% \begin{coq_example} -% Definition forest_pr -% : (C:Set)C->(tree->forest->C->C)->forest->C -% := [C,x,g,n]Match n with x g end. -% \end{coq_example} - -% Cet exemple faisait error (HH le 12/12/96), j'ai change pour une -% version plus simple -%\begin{coq_example} -%Definition forest_pr -% : (P:forest->Set)(P emptyf)->((t:tree)(f:forest)(P f)->(P (consf t f))) -% ->(f:forest)(P f) -% := [C,x,g,n]Match n with x g end. -%\end{coq_example} - -\subsubsection{Mutual induction} - -The principles of mutual induction can be automatically generated -using the {\tt Scheme} command described in Section~\ref{Scheme}. - -\section{Admissible rules for global environments} - -From the original rules of the type system, one can show the -admissibility of rules which change the local context of definition of -objects in the global environment. We show here the admissible rules -that are used used in the discharge mechanism at the end of a section. - -% This is obsolete: Abstraction over defined constants actually uses a -% let-in since there are let-ins in Coq - -%% \paragraph{Mechanism of substitution.} - -%% One rule which can be proved valid, is to replace a term $c$ by its -%% value in the global environment. As we defined the substitution of a term for -%% a variable in a term, one can define the substitution of a term for a -%% constant. One easily extends this substitution to local contexts and global -%% environments. - -%% \paragraph{Substitution Property:} -%% \inference{\frac{\WF{E;c:=t:T; E'}{\Gamma}} -%% {\WF{E; \subst{E'}{c}{t}}{\subst{\Gamma}{c}{t}}}} - -\paragraph{Abstraction.} - -One can modify a global declaration by generalizing it over a -previously assumed constant $c$. For doing that, we need to modify the -reference to the global declaration in the subsequent global -environment and local context by explicitly applying this constant to -the constant $c'$. - -Below, if $\Gamma$ is a context of the form -$[y_1:A_1;\ldots;y_n:A_n]$, we write $\forall -x:U,\subst{\Gamma}{c}{x}$ to mean -$[y_1:\forall~x:U,\subst{A_1}{c}{x};\ldots;y_n:\forall~x:U,\subst{A_n}{c}{x}]$ -and -$\subst{E}{|\Gamma|}{|\Gamma|c}$. -to mean the parallel substitution -$\subst{\subst{E}{y_1}{(y_1~c)}\ldots}{y_n}{(y_n~c)}$. - -\paragraph{First abstracting property:} - \inference{\frac{\WF{E;c:U;E';c':=t:T;E''}{\Gamma}} - {\WF{E;c:U;E';c':=\lb x:U\mto \subst{t}{c}{x}:\forall~x:U,\subst{T}{c}{x}; - \subst{E''}{c'}{(c'~c)}}{\subst{\Gamma}{c}{(c~c')}}}} - - \inference{\frac{\WF{E;c:U;E';c':T;E''}{\Gamma}} - {\WF{E;c:U;E';c':\forall~x:U,\subst{T}{c}{x}; - \subst{E''}{c'}{(c'~c)}}{\subst{\Gamma}{c}{(c~c')}}}} - - \inference{\frac{\WF{E;c:U;E';\Ind{}{p}{\Gamma_I}{\Gamma_C};E''}{\Gamma}} - {\WFTWOLINES{E;c:U;E';\Ind{}{p+1}{\forall x:U,\subst{\Gamma_I}{c}{x}}{\forall x:U,\subst{\Gamma_C}{c}{x}};\subst{E''}{|\Gamma_I,\Gamma_C|}{|\Gamma_I,\Gamma_C|~c}}{\subst{\Gamma}{|\Gamma_I,\Gamma_C|}{|\Gamma_I,\Gamma_C|~c}}}} -% -One can similarly modify a global declaration by generalizing it over -a previously defined constant~$c'$. Below, if $\Gamma$ is a context -of the form $[y_1:A_1;\ldots;y_n:A_n]$, we write $ -\subst{\Gamma}{c}{u}$ to mean -$[y_1:\subst{A_1}{c}{u};\ldots;y_n:\subst{A_n}{c}{u}]$. - -\paragraph{Second abstracting property:} - \inference{\frac{\WF{E;c:=u:U;E';c':=t:T;E''}{\Gamma}} - {\WF{E;c:=u:U;E';c':=(\letin{x}{u:U}{\subst{t}{c}{x}}):\subst{T}{c}{u};E''}{\Gamma}}} - - \inference{\frac{\WF{E;c:=u:U;E';c':T;E''}{\Gamma}} - {\WF{E;c:=u:U;E';c':\subst{T}{c}{u};E''}{\Gamma}}} - - \inference{\frac{\WF{E;c:=u:U;E';\Ind{}{p}{\Gamma_I}{\Gamma_C};E''}{\Gamma}} - {\WF{E;c:=u:U;E';\Ind{}{p}{\subst{\Gamma_I}{c}{u}}{\subst{\Gamma_C}{c}{u}};E''}{\Gamma}}} - -\paragraph{Pruning the local context.} -If one abstracts or substitutes constants with the above rules then it -may happen that some declared or defined constant does not occur any -more in the subsequent global environment and in the local context. One can -consequently derive the following property. - -\paragraph{First pruning property:} -\inference{\frac{\WF{E;c:U;E'}{\Gamma} \qquad c \mbox{ does not occur in $E'$ and $\Gamma$}} - {\WF{E;E'}{\Gamma}}} - -\paragraph{Second pruning property:} -\inference{\frac{\WF{E;c:=u:U;E'}{\Gamma} \qquad c \mbox{ does not occur in $E'$ and $\Gamma$}} - {\WF{E;E'}{\Gamma}}} - -\section{Co-inductive types} -The implementation contains also co-inductive definitions, which are -types inhabited by infinite objects. -More information on co-inductive definitions can be found -in~\cite{Gimenez95b,Gim98,GimCas05}. -%They are described in Chapter~\ref{Co-inductives}. - -\section[The Calculus of Inductive Construction with - impredicative \Set]{The Calculus of Inductive Construction with - impredicative \Set\label{impredicativity}} - -\Coq{} can be used as a type-checker for the -Calculus of Inductive Constructions with an impredicative sort \Set{} -by using the compiler option \texttt{-impredicative-set}. -% -For example, using the ordinary \texttt{coqtop} command, the following -is rejected. -% (** This example should fail ******************************* -% Error: The term forall X:Set, X -> X has type Type -% while it is expected to have type Set ***) -\begin{coq_example} -Fail Definition id: Set := forall X:Set,X->X. -\end{coq_example} -while it will type-check, if one uses instead the \texttt{coqtop - -impredicative-set} command. - -The major change in the theory concerns the rule for product formation -in the sort \Set, which is extended to a domain in any sort: -\begin{description} -\item [Prod] \index{Typing rules!Prod (impredicative Set)} -\inference{\frac{\WTEG{T}{s}~~~~s \in \Sort~~~~~~ - \WTE{\Gamma::(x:T)}{U}{\Set}} - { \WTEG{\forall~x:T,U}{\Set}}} -\end{description} -This extension has consequences on the inductive definitions which are -allowed. -In the impredicative system, one can build so-called {\em large inductive - definitions} like the example of second-order existential -quantifier (\texttt{exSet}). - -There should be restrictions on the eliminations which can be -performed on such definitions. The eliminations rules in the -impredicative system for sort \Set{} become: -\begin{description} -\item[\Set] \inference{\frac{s \in - \{\Prop, \Set\}}{\compat{I:\Set}{I\ra s}} -~~~~\frac{I \mbox{~is a small inductive definition}~~~~s \in - \{\Type(i)\}} - {\compat{I:\Set}{I\ra s}}} -\end{description} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: - - diff --git a/doc/refman/RefMan-com.tex b/doc/refman/RefMan-com.tex deleted file mode 100644 index 5b73ac00a6..0000000000 --- a/doc/refman/RefMan-com.tex +++ /dev/null @@ -1,402 +0,0 @@ -\chapter[The \Coq~commands]{The \Coq~commands\label{Addoc-coqc} -\ttindex{coqtop} -\ttindex{coqc} -\ttindex{coqchk}} -%HEVEA\cutname{commands.html} - -There are three \Coq~commands: -\begin{itemize} -\item {\tt coqtop}: the \Coq\ toplevel (interactive mode); -\item {\tt coqc}: the \Coq\ compiler (batch compilation); -\item {\tt coqchk}: the \Coq\ checker (validation of compiled libraries). -\end{itemize} -The options are (basically) the same for the first two commands, and -roughly described below. You can also look at the \verb!man! pages of -\verb!coqtop! and \verb!coqc! for more details. - - -\section{Interactive use ({\tt coqtop})} - -In the interactive mode, also known as the \Coq~toplevel, the user can -develop his theories and proofs step by step. The \Coq~toplevel is -run by the command {\tt coqtop}. - -\index{byte-code} -\index{native code} -\label{binary-images} -They are two different binary images of \Coq: the byte-code one and -the native-code one (if {\ocaml} provides a native-code compiler -for your platform, which is supposed in the following). By default, -\verb!coqtop! executes the native-code version; run \verb!coqtop.byte! to -get the byte-code version. - -The byte-code toplevel is based on an {\ocaml} -toplevel (to allow the dynamic link of tactics). You can switch to -the {\ocaml} toplevel with the command \verb!Drop.!, and come back to the -\Coq~toplevel with the command \verb!Coqloop.loop();;!. - -\section{Batch compilation ({\tt coqc})} -The {\tt coqc} command takes a name {\em file} as argument. Then it -looks for a vernacular file named {\em file}{\tt .v}, and tries to -compile it into a {\em file}{\tt .vo} file (See ~\ref{compiled}). - -\Warning The name {\em file} should be a regular {\Coq} identifier, as -defined in Section~\ref{lexical}. It should contain only letters, digits -or underscores (\_). For instance, \verb+/bar/foo/toto.v+ is valid, but -\verb+/bar/foo/to-to.v+ is invalid. - -\section[Customization]{Customization at launch time} - -\subsection{By resource file\index{Resource file}} - -When \Coq\ is launched, with either {\tt coqtop} or {\tt coqc}, the -resource file \verb:$XDG_CONFIG_HOME/coq/coqrc.xxx: is loaded, where -\verb:$XDG_CONFIG_HOME: is the configuration directory of the user (by -default its home directory \verb!/.config! and \verb:xxx: is the version -number (e.g. 8.3). If this file is not found, then the file -\verb:$XDG_CONFIG_HOME/coqrc: is searched. You can also specify an -arbitrary name for the resource file (see option \verb:-init-file: -below). - - -This file may contain, for instance, \verb:Add LoadPath: commands to add -directories to the load path of \Coq. -It is possible to skip the loading of the resource file with the -option \verb:-q:. - -\subsection{By environment variables\label{EnvVariables} -\index{Environment variables}\label{envars}} - -Load path can be specified to the \Coq\ system by setting up -\verb:$COQPATH: environment variable. It is a list of directories -separated by \verb|:| (\verb|;| on windows). {\Coq} will also honor -\verb:$XDG_DATA_HOME: and \verb:$XDG_DATA_DIRS: (see Section -\ref{loadpath}). - -Some {\Coq} commands call other {\Coq} commands. In this case, they -look for the commands in directory specified by \verb:$COQBIN:. If -this variable is not set, they look for the commands in the executable -path. - -The \verb:$COQ_COLORS: environment variable can be used to specify the set of -colors used by {\tt coqtop} to highlight its output. It uses the same syntax as -the \verb:$LS_COLORS: variable from GNU's {\tt ls}, that is, a colon-separated -list of assignments of the form \verb:name=attr1;...;attrn: where {\tt name} is -the name of the corresponding highlight tag and {\tt attri} is an ANSI escape -code. The list of highlight tags can be retrieved with the {\tt -list-tags} -command-line option of {\tt coqtop}. - -\subsection{By command line options\index{Options of the command line} -\label{coqoptions}} - -The following command-line options are recognized by the commands {\tt - coqc} and {\tt coqtop}, unless stated otherwise: - -\begin{description} -\item[{\tt -I} {\em directory}, {\tt -include} {\em directory}]\ % - - Add physical path {\em directory} to the {\ocaml} loadpath. - - \SeeAlso Section~\ref{Libraries} and the command {\tt Declare ML Module} Section \ref{compiled}. - -\item[{\tt -Q} {\em directory} {\dirpath}]\ % - - Add physical path \emph{directory} to the list of directories where - {\Coq} looks for a file and bind it to the the logical directory - \emph{dirpath}. The subdirectory structure of \emph{directory} is - recursively available from {\Coq} using absolute names (extending - the {\dirpath} prefix) (see Section~\ref{LongNames}). - - Note that only those subdirectories and files which obey the lexical - conventions of what is an {\ident} (see Section~\ref{lexical}) - are taken into account. Conversely, the underlying file systems or - operating systems may be more restrictive than {\Coq}. While Linux's - ext4 file system supports any {\Coq} recursive layout - (within the limit of 255 bytes per file name), the default on NTFS - (Windows) or HFS+ (MacOS X) file systems is on the contrary to - disallow two files differing only in the case in the same directory. - - \SeeAlso Section~\ref{Libraries}. - -\item[{\tt -R} {\em directory} {\dirpath}]\ % - - Do as \texttt{-Q} \emph{directory} {\dirpath} but make the - subdirectory structure of \emph{directory} recursively visible so - that the recursive contents of physical \emph{directory} is available - from {\Coq} using short or partially qualified names. - - \SeeAlso Section~\ref{Libraries}. - -\item[{\tt -top} {\dirpath}]\ % - - Set the toplevel module name to {\dirpath} instead of {\tt Top}. Not - valid for {\tt coqc} as the toplevel module name is inferred from the - name of the output file. - -\item[{\tt -exclude-dir} {\em directory}]\ % - - Exclude any subdirectory named {\em directory} while - processing options such as {\tt -R} and {\tt -Q}. By default, only the - conventional version control management directories named {\tt CVS} and - {\tt \_darcs} are excluded. - -\item[{\tt -nois}]\ % - - Start from an empty state instead of loading the {\tt Init.Prelude} - module. - -\item[{\tt -init-file} {\em file}]\ % - - Load {\em file} as the resource file instead of loading the default - resource file from the standard configuration directories. - -\item[{\tt -q}]\ % - - Do not to load the default resource file. - -\item[{\tt -load-ml-source} {\em file}]\ % - - Load the {\ocaml} source file {\em file}. - -\item[{\tt -load-ml-object} {\em file}]\ % - - Load the {\ocaml} object file {\em file}. - -\item[{\tt -l} {\em file}, {\tt -load-vernac-source} {\em file}]\ % - - Load and execute the {\Coq} script from {\em file.v}. - -\item[{\tt -lv} {\em file}, {\tt -load-vernac-source-verbose} {\em - file}]\ % - - Load and execute the {\Coq} script from {\em file.v}. - Output its content on the standard input as it is executed. - -\item[{\tt -load-vernac-object} {\dirpath}]\ % - - Load \Coq~compiled library {\dirpath}. This is equivalent to running - {\tt Require} {\dirpath}. - -\item[{\tt -require} {\dirpath}]\ % - - Load \Coq~compiled library {\dirpath} and import it. This is equivalent - to running {\tt Require Import} {\dirpath}. - -\item[{\tt -batch}]\ % - - Exit just after argument parsing. Available for {\tt coqtop} only. - -\item[{\tt -compile} {\em file.v}]\ % - - Compile file {\em file.v} into {\em file.vo}. This options imply {\tt - -batch} (exit just after argument parsing). It is available only - for {\tt coqtop}, as this behavior is the purpose of {\tt coqc}. - -\item[{\tt -compile-verbose} {\em file.v}]\ % - - Same as {\tt -compile} but also output the content of {\em file.v} as - it is compiled. - -\item[{\tt -verbose}]\ % - - Output the content of the input file as it is compiled. This option is - available for {\tt coqc} only; it is the counterpart of {\tt - -compile-verbose}. - - \item[{\tt -w} (all|none|w$_1$,\ldots,w$_n$)]\ % - - Configure the display of warnings. This option expects {\tt all}, {\tt none} - or a comma-separated list of warning names or categories (see - Section~\ref{SetWarnings}). - -%Mostly unused in the code -%\item[{\tt -debug}]\ % -% -% Switch on the debug flag. - -\item[{\tt -color} (on|off|auto)]\ % - - Enable or not the coloring of output of {\tt coqtop}. Default is auto, - meaning that {\tt coqtop} dynamically decides, depending on whether the - output channel supports ANSI escape sequences. - -\item[{\tt -beautify}]\ % - - Pretty-print each command to {\em file.beautified} when compiling {\em - file.v}, in order to get old-fashioned syntax/definitions/notations. - -\item[{\tt -emacs}, {\tt -ide-slave}]\ % - - Start a special toplevel to communicate with a specific IDE. - -\item[{\tt -impredicative-set}]\ % - - Change the logical theory of {\Coq} by declaring the sort {\tt Set} - impredicative. Warning: this is known to be inconsistent with - some standard axioms of classical mathematics such as the functional - axiom of choice or the principle of description. - -\item[{\tt -type-in-type}]\ % - - Collapse the universe hierarchy of {\Coq}. Warning: this makes the - logic inconsistent. - -\item[{\tt -mangle-names} {\em ident}]\ % - - Experimental: Do not depend on this option. - - Replace Coq's auto-generated name scheme with names of the form - {\tt ident0}, {\tt ident1}, \ldots etc. - The command {\tt Set Mangle Names}\optindex{Mangle Names} turns - the behavior on in a document, and {\tt Set Mangle Names Prefix "ident"} - \optindex{Mangle Names Prefix} changes the used prefix. - - This feature is intended to be used as a linter for developments that want - to be robust to changes in the auto-generated name scheme. The options are - provided to facilitate tracking down problems. - -\item[{\tt -compat} {\em version}]\ % - - Attempt to maintain some backward-compatibility with a previous version. - -\item[{\tt -dump-glob} {\em file}]\ % - - Dump references for global names in file {\em file} (to be used - by {\tt coqdoc}, see~\ref{coqdoc}). By default, if {\em file.v} is being - compiled, {\em file.glob} is used. - -\item[{\tt -no-glob}]\ % - - Disable the dumping of references for global names. - -%\item[{\tt -no-hash-consing}]\ % - -\item[{\tt -image} {\em file}]\ % - - Set the binary image to be used by {\tt coqc} to be {\em file} - instead of the standard one. Not of general use. - -\item[{\tt -bindir} {\em directory}]\ % - - Set the directory containing {\Coq} binaries to be used by {\tt coqc}. - It is equivalent to doing \texttt{export COQBIN=}{\em directory} before - launching {\tt coqc}. - -\item[{\tt -where}]\ % - - Print the location of \Coq's standard library and exit. - -\item[{\tt -config}]\ % - - Print the locations of \Coq's binaries, dependencies, and libraries, then exit. - -\item[{\tt -filteropts}]\ % - - Print the list of command line arguments that {\tt coqtop} has - recognized as options and exit. - -\item[{\tt -v}]\ % - - Print \Coq's version and exit. - -\item[{\tt -list-tags}]\ % - - Print the highlight tags known by {\Coq} as well as their currently associated - color and exit. - -\item[{\tt -h}, {\tt --help}]\ % - - Print a short usage and exit. - -\end{description} - - -\section{Compiled libraries checker ({\tt coqchk})} - -The {\tt coqchk} command takes a list of library paths as argument, described -either by their logical name or by their physical filename, which must end in -{\tt .vo}. The corresponding compiled libraries (.vo files) are searched in the -path, recursively processing the libraries they depend on. The content -of all these libraries is then type-checked. The effect of {\tt - coqchk} is only to return with normal exit code in case of success, -and with positive exit code if an error has been found. Error messages -are not deemed to help the user understand what is wrong. In the -current version, it does not modify the compiled libraries to mark -them as successfully checked. - -Note that non-logical information is not checked. By logical -information, we mean the type and optional body associated to names. -It excludes for instance anything related to the concrete syntax of -objects (customized syntax rules, association between short and long -names), implicit arguments, etc. - -This tool can be used for several purposes. One is to check that a -compiled library provided by a third-party has not been forged and -that loading it cannot introduce inconsistencies.\footnote{Ill-formed - non-logical information might for instance bind {\tt - Coq.Init.Logic.True} to short name {\tt False}, so apparently {\tt - False} is inhabited, but using fully qualified names, {\tt - Coq.Init.Logic.False} will always refer to the absurd proposition, - what we guarantee is that there is no proof of this latter - constant.} -Another point is to get an even higher level of security. Since {\tt - coqtop} can be extended with custom tactics, possibly ill-typed -code, it cannot be guaranteed that the produced compiled libraries are -correct. {\tt coqchk} is a standalone verifier, and thus it cannot be -tainted by such malicious code. - -Command-line options {\tt -Q}, {\tt -R}, {\tt -where} and -{\tt -impredicative-set} are supported by {\tt coqchk} and have the -same meaning as for {\tt coqtop}. As there is no notion of relative paths in -object files {\tt -Q} and {\tt -R} have exactly the same meaning. - -Extra options are: -\begin{description} -\item[{\tt -norec} {\em module}]\ % - - Check {\em module} but do not check its dependencies. - -\item[{\tt -admit} {\em module}]\ % - - Do not check {\em module} and any of its dependencies, unless - explicitly required. - -\item[{\tt -o}]\ % - - At exit, print a summary about the context. List the names of all - assumptions and variables (constants without body). - -\item[{\tt -silent}]\ % - - Do not write progress information in standard output. -\end{description} - -Environment variable \verb:$COQLIB: can be set to override the -location of the standard library. - -The algorithm for deciding which modules are checked or admitted is -the following: assuming that {\tt coqchk} is called with argument $M$, -option {\tt -norec} $N$, and {\tt -admit} $A$. Let us write -$\overline{S}$ the set of reflexive transitive dependencies of set -$S$. Then: -\begin{itemize} -\item Modules $C=\overline{M}\backslash\overline{A}\cup M\cup N$ are - loaded and type-checked before being added to the context. -\item And $\overline{M}\cup\overline{N}\backslash C$ is the set of - modules that are loaded and added to the context without - type-checking. Basic integrity checks (checksums) are nonetheless - performed. -\end{itemize} - -As a rule of thumb, the {\tt -admit} can be used to tell that some -libraries have already been checked. So {\tt coqchk A B} can be split -in {\tt coqchk A \&\& coqchk B -admit A} without type-checking any -definition twice. Of course, the latter is slightly slower since it -makes more disk access. It is also less secure since an attacker might -have replaced the compiled library $A$ after it has been read by the -first command, but before it has been read by the second command. - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-ext.tex b/doc/refman/RefMan-ext.tex deleted file mode 100644 index a1950d136e..0000000000 --- a/doc/refman/RefMan-ext.tex +++ /dev/null @@ -1,2152 +0,0 @@ -\chapter[Extensions of \Gallina{}]{Extensions of \Gallina{}\label{Gallina-extension}\index{Gallina}} -%HEVEA\cutname{gallina-ext.html} - -{\gallina} is the kernel language of {\Coq}. We describe here extensions of -the Gallina's syntax. - -\section{Record types -\comindex{Record} -\comindex{Inductive} -\comindex{CoInductive} -\label{Record}} - -The \verb+Record+ construction is a macro allowing the definition of -records as is done in many programming languages. Its syntax is -described on Figure~\ref{record-syntax}. In fact, the \verb+Record+ -macro is more general than the usual record types, since it allows -also for ``manifest'' expressions. In this sense, the \verb+Record+ -construction allows defining ``signatures''. - -\begin{figure}[h] -\begin{centerframe} -\begin{tabular}{lcl} -{\sentence} & ++= & {\record}\\ - & & \\ -{\record} & ::= & - {\recordkw} {\ident} \zeroone{\binders} \zeroone{{\tt :} {\sort}} \verb.:=. \\ -&& ~~~~\zeroone{\ident} - \verb!{! \zeroone{\nelist{\field}{;}} \verb!}! \verb:.:\\ - & & \\ -{\recordkw} & ::= & - {\tt Record} $|$ {\tt Inductive} $|$ {\tt CoInductive}\\ - & & \\ -{\field} & ::= & {\name} \zeroone{\binders} : {\type} \zeroone{{\tt where} {\it notation}} \\ - & $|$ & {\name} \zeroone{\binders} {\typecstrtype} := {\term}\\ -\end{tabular} -\end{centerframe} -\caption{Syntax for the definition of {\tt Record}} -\label{record-syntax} -\end{figure} - -\noindent In the expression -\begin{quote} -{\tt Record {\ident} {\params} : {\sort} := {\ident$_0$} \{ \\ - {\ident$_1$} \binders$_1$ : {\term$_1$} ; ... ; \\ - {\ident$_n$} \binders$_n$ : {\term$_n$} \}.} -\end{quote} -\noindent the identifier {\ident} is the name of the defined record -and {\sort} is its type. The identifier {\ident$_0$} is the name of -its constructor. If {\ident$_0$} is omitted, the default name {\tt -Build\_{\ident}} is used. -If {\sort} is omitted, the default sort is {\Type}. -The identifiers {\ident$_1$}, \dots, {\ident$_n$} are the names of -fields and {\tt forall {\binders$_1$}, {\term$_1$}}, \dots, -{\tt forall {\binders$_n$}, {\term$_n$}} -their respective types. Remark that the type of {\ident$_i$} may -depend on the previous {\ident$_j$} (for $j<i$). Thus the order of the -fields is important. Finally, {\params} are the parameters of the -record. - -More generally, a record may have explicitly defined (a.k.a. -manifest) fields. For instance, {\tt Record} {\ident} {\tt [} -{\params} {\tt ]} \texttt{:} {\sort} := \verb+{+ {\ident$_1$} -\texttt{:} {\type$_1$} \verb+;+ {\ident$_2$} \texttt{:=} {\term$_2$} -\verb+;+ {\ident$_3$} \texttt{:} {\type$_3$} \verb+}+ in which case -the correctness of {\type$_3$} may rely on the instance {\term$_2$} of -{\ident$_2$} and {\term$_2$} in turn may depend on {\ident$_1$}. - - -\Example -The set of rational numbers may be defined as: -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example} -Record Rat : Set := mkRat - {sign : bool; - top : nat; - bottom : nat; - Rat_bottom_cond : 0 <> bottom; - Rat_irred_cond : - forall x y z:nat, (x * y) = top /\ (x * z) = bottom -> x = 1}. -\end{coq_example} - -Remark here that the field \verb+Rat_bottom_cond+ depends -on the field \verb+bottom+ and \verb+Rat_irred_cond+ depends -on both \verb+top+ and \verb+bottom+. - -Let us now see the work done by the {\tt Record} macro. First the -macro generates a variant type definition with just one constructor: -\begin{quote} -{\tt Variant {\ident} {\params} : {\sort} := \\ - {\ident$_0$} ({\ident$_1$} : {\term$_1$}) ... ({\ident$_n$} : {\term$_n$}).} -\end{quote} -To build an object of type {\ident}, one should provide the -constructor {\ident$_0$} with $n$ terms filling the fields of -the record. - -As an example, let us define the rational $1/2$: -\begin{coq_example*} -Theorem one_two_irred : - forall x y z:nat, x * y = 1 /\ x * z = 2 -> x = 1. -Admitted. -\end{coq_example*} -\begin{coq_example} -Definition half := mkRat true 1 2 (O_S 1) one_two_irred. -\end{coq_example} -\begin{coq_example} -Check half. -\end{coq_example} - -\begin{figure}[t] -\begin{centerframe} -\begin{tabular}{lcl} -{\term} & ++= & - \verb!{|! \zeroone{\nelist{\fielddef}{;}} \verb!|}! \\ - & & \\ -{\fielddef} & ::= & {\name} \zeroone{\binders} := {\term} \\ -\end{tabular} -\end{centerframe} -\caption{Syntax for constructing elements of a \texttt{Record} using named fields} -\label{fig:fieldsyntax} -\end{figure} - -Alternatively, the following syntax allows creating objects by using named fields, as -shown on Figure~\ref{fig:fieldsyntax}. The -fields do not have to be in any particular order, nor do they have to be all -present if the missing ones can be inferred or prompted for (see -Section~\ref{Program}). - -\begin{coq_example} -Definition half' := - {| sign := true; - Rat_bottom_cond := O_S 1; - Rat_irred_cond := one_two_irred |}. -\end{coq_example} - -This syntax can be disabled globally for printing by -\begin{quote} -{\tt Unset Printing Records.} -\optindex{Printing Records} -\end{quote} -For a given type, one can override this using either -\begin{quote} -{\tt Add Printing Record {\ident}.} -\end{quote} -to get record syntax or -\begin{quote} -{\tt Add Printing Constructor {\ident}.} -\end{quote} -to get constructor syntax. - -This syntax can also be used for pattern matching. - -\begin{coq_example} -Eval compute in ( - match half with - | {| sign := true; top := n |} => n - | _ => 0 - end). -\end{coq_example} - -The macro generates also, when it is possible, the projection -functions for destructuring an object of type {\ident}. These -projection functions are given the names of the corresponding -fields. If a field is named ``\verb=_='' then no projection is built -for it. In our example: - -\begin{coq_example} -Eval compute in top half. -Eval compute in bottom half. -Eval compute in Rat_bottom_cond half. -\end{coq_example} - -An alternative syntax for projections based on a dot notation is -available: - -\begin{coq_example} -Eval compute in half.(top). -\end{coq_example} - -It can be activated for printing with the command -\optindex{Printing Projections} -\begin{quote} -{\tt Set Printing Projections.} -\end{quote} - -\begin{coq_example} -Set Printing Projections. -Check top half. -\end{coq_example} - -The corresponding grammar rules are given in Figure~\ref{fig:projsyntax}. -When {\qualid} denotes a projection, the syntax {\tt - {\term}.({\qualid})} is equivalent to {\qualid~\term}, the syntax -{\term}{\tt .(}{\qualid}~{\termarg}$_1$ {\ldots} {\termarg}$_n${\tt )} to -{\qualid~{\termarg}$_1$ {\ldots} {\termarg}$_n$~\term}, and the syntax -{\term}{\tt .(@}{\qualid}~{\term}$_1$~\ldots~{\term}$_n${\tt )} to -{@\qualid~{\term}$_1$ {\ldots} {\term}$_n$~\term}. In each case, {\term} -is the object projected and the other arguments are the parameters of -the inductive type. - -\begin{figure}[t] -\begin{centerframe} -\begin{tabular}{lcl} -{\term} & ++= & {\term} {\tt .(} {\qualid} {\tt )}\\ - & $|$ & {\term} {\tt .(} {\qualid} \nelist{\termarg}{} {\tt )}\\ - & $|$ & {\term} {\tt .(} {@}{\qualid} \nelist{\term}{} {\tt )} -\end{tabular} -\end{centerframe} -\caption{Syntax for \texttt{Record} projections} -\label{fig:projsyntax} -\end{figure} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\begin{Remarks} - -\item Records defined with the {\tt Record} keyword are not allowed to be -recursive (references to the record's name in the type of its field -raises an error). To define recursive records, one can use the {\tt -Inductive} and {\tt CoInductive} keywords, resulting in an inductive -or co-inductive record. -A \emph{caveat}, however, is that records -cannot appear in mutually inductive (or co-inductive) definitions. - -\item Induction schemes are automatically generated for inductive records. -Automatic generation of induction schemes for non-recursive records -defined with the {\tt Record} keyword can be activated with the -{\tt Nonrecursive Elimination Schemes} option -(see~\ref{set-nonrecursive-elimination-schemes}). - -\item {\tt Structure} is a synonym of the keyword {\tt Record}. - -\end{Remarks} - -\begin{Warnings} -\item {\tt {\ident$_i$} cannot be defined.} - - It can happen that the definition of a projection is impossible. - This message is followed by an explanation of this impossibility. - There may be three reasons: - \begin{enumerate} - \item The name {\ident$_i$} already exists in the environment (see - Section~\ref{Axiom}). - \item The body of {\ident$_i$} uses an incorrect elimination for - {\ident} (see Sections~\ref{Fixpoint} and~\ref{Caseexpr}). - \item The type of the projections {\ident$_i$} depends on previous - projections which themselves could not be defined. - \end{enumerate} -\end{Warnings} - -\begin{ErrMsgs} - -\item \errindex{Records declared with the keyword Record or Structure cannot be recursive.} - - The record name {\ident} appears in the type of its fields, but uses - the keyword {\tt Record}. Use the keyword {\tt Inductive} or {\tt - CoInductive} instead. -\item \errindex{Cannot handle mutually (co)inductive records.} - - Records cannot be defined as part of mutually inductive (or - co-inductive) definitions, whether with records only or mixed with - standard definitions. -\item During the definition of the one-constructor inductive - definition, all the errors of inductive definitions, as described in - Section~\ref{gal-Inductive-Definitions}, may also occur. - -\end{ErrMsgs} - -\SeeAlso Coercions and records in Section~\ref{Coercions-and-records} -of the chapter devoted to coercions. - -\subsection{Primitive Projections} -\optindex{Primitive Projections} -\optindex{Printing Primitive Projection Parameters} -\optindex{Printing Primitive Projection Compatibility} -\index{Primitive projections} -\label{prim-proj} - -The option {\tt Set Primitive Projections} turns on the use of primitive -projections when defining subsequent records (even through the {\tt - Inductive} and {\tt CoInductive} commands). Primitive projections -extended the Calculus of Inductive Constructions with a new binary term -constructor {\tt r.(p)} representing a primitive projection p applied to -a record object {\tt r} (i.e., primitive projections are always -applied). Even if the record type has parameters, these do not appear at -applications of the projection, considerably reducing the sizes of terms -when manipulating parameterized records and typechecking time. On the -user level, primitive projections can be used as a replacement for the -usual defined ones, although there are a few notable differences. - -The internally omitted parameters can be reconstructed at printing time -even though they are absent in the actual AST manipulated by the kernel. This -can be obtained by setting the {\tt Printing Primitive Projection Parameters} -flag. Another compatibility printing can be activated thanks to the -{\tt Printing Primitive Projection Compatibility} option which governs the -printing of pattern-matching over primitive records. - -\subsubsection{Primitive Record Types} -When the {\tt Set Primitive Projections} option is on, definitions of -record types change meaning. When a type is declared with primitive -projections, its {\tt match} construct is disabled (see -\ref{primproj:compat} though). To eliminate the (co-)inductive type, one -must use its defined primitive projections. - -There are currently two ways to introduce primitive records types: -\begin{itemize} -\item Through the {\tt Record} command, in which case the type has to be - non-recursive. The defined type enjoys eta-conversion definitionally, - that is the generalized form of surjective pairing for records: - {\tt $r$ = Build\_R ($r$.($p_1$) .. $r$.($p_n$))}. Eta-conversion allows to define - dependent elimination for these types as well. -\item Through the {\tt Inductive} and {\tt CoInductive} commands, when - the body of the definition is a record declaration of the form {\tt - Build\_R \{ $p_1$ : $t_1$; .. ; $p_n$ : $t_n$ \}}. In this case the types can be - recursive and eta-conversion is disallowed. These kind of record types - differ from their traditional versions in the sense that dependent - elimination is not available for them and only non-dependent case analysis - can be defined. -\end{itemize} - -\subsubsection{Reduction} - -The basic reduction rule of a primitive projection is {\tt $p_i$ - (Build\_R $t_1$ .. $t_n$) $\rightarrow_{\iota}$ $t_i$}. However, to take the $\delta$ flag into -account, projections can be in two states: folded or unfolded. An -unfolded primitive projection application obeys the rule above, while -the folded version delta-reduces to the unfolded version. This allows to -precisely mimic the usual unfolding rules of constants. Projections -obey the usual {\tt simpl} flags of the {\tt Arguments} command in particular. - -There is currently no way to input unfolded primitive projections at the -user-level, and one must use the {\tt Printing Primitive Projection - Compatibility} to display unfolded primitive projections as matches -and distinguish them from folded ones. - -\subsubsection{Compatibility Projections and {\tt match}} -\label{primproj:compat} -To ease compatibility with ordinary record types, each primitive -projection is also defined as a ordinary constant taking parameters and -an object of the record type as arguments, and whose body is an -application of the unfolded primitive projection of the same name. These -constants are used when elaborating partial applications of the -projection. One can distinguish them from applications of the primitive -projection if the {\tt Printing Primitive Projection Parameters} option -is off: for a primitive projection application, parameters are printed -as underscores while for the compatibility projections they are printed -as usual. - -Additionally, user-written {\tt match} constructs on primitive records -are desugared into substitution of the projections, they cannot be -printed back as {\tt match} constructs. - - % - r.(p) and (p r) elaborate to native projection application, and - % the parameters cannot be mentioned. The following arguments are - % parsed according to the remaining implicit arguments declared for the - % projection (i.e. the implicit arguments after the record type - % argument). In dot notation, the record type argument is considered - % explicit no matter what its implicit status is. - % - r.(@p params) and @p args are parsed as regular applications of the - % projection with explicit parameters. - % - [simpl p] is forbidden, but [simpl @p] will simplify both the projection - % and its explicit [@p] version. - % - [unfold p] has no effect on projection applications unless it is applied - % to a constructor. If the explicit version appears it reduces to the - % projection application. - % - [pattern x at n], [rewrite x at n] and in general abstraction and selection - % of occurrences may fail due to the disappearance of parameters. - -\section{Variants and extensions of {\mbox{\tt match}} -\label{Extensions-of-match} -\index{match@{\tt match\ldots with\ldots end}}} - -\subsection{Multiple and nested pattern-matching -\index{ML-like patterns} -\label{Mult-match}} - -The basic version of \verb+match+ allows pattern-matching on simple -patterns. As an extension, multiple nested patterns or disjunction of -patterns are allowed, as in ML-like languages. - -The extension just acts as a macro that is expanded during parsing -into a sequence of {\tt match} on simple patterns. Especially, a -construction defined using the extended {\tt match} is generally -printed under its expanded form (see~\texttt{Set Printing Matching} in -section~\ref{SetPrintingMatching}). - -\SeeAlso Chapter~\ref{Mult-match-full}. - -\subsection{Pattern-matching on boolean values: the {\tt if} expression -\label{if-then-else} -\index{if@{\tt if ... then ... else}}} - -For inductive types with exactly two constructors and for -pattern-matchings expressions which do not depend on the arguments of -the constructors, it is possible to use a {\tt if ... then ... else} -notation. For instance, the definition - -\begin{coq_example} -Definition not (b:bool) := - match b with - | true => false - | false => true - end. -\end{coq_example} - -\noindent can be alternatively written - -\begin{coq_eval} -Reset not. -\end{coq_eval} -\begin{coq_example} -Definition not (b:bool) := if b then false else true. -\end{coq_example} - -More generally, for an inductive type with constructors {\tt C$_1$} -and {\tt C$_2$}, we have the following equivalence - -\smallskip - -{\tt if {\term} \zeroone{\ifitem} then {\term}$_1$ else {\term}$_2$} $\equiv$ -\begin{tabular}[c]{l} -{\tt match {\term} \zeroone{\ifitem} with}\\ -{\tt \verb!|! C$_1$ \_ {\ldots} \_ \verb!=>! {\term}$_1$} \\ -{\tt \verb!|! C$_2$ \_ {\ldots} \_ \verb!=>! {\term}$_2$} \\ -{\tt end} -\end{tabular} - -Here is an example. - -\begin{coq_example} -Check (fun x (H:{x=0}+{x<>0}) => - match H with - | left _ => true - | right _ => false - end). -\end{coq_example} - -Notice that the printing uses the {\tt if} syntax because {\tt sumbool} is -declared as such (see Section~\ref{printing-options}). - -\subsection{Irrefutable patterns: the destructuring {\tt let} variants -\index{let in@{\tt let ... in}} -\label{Letin}} - -Pattern-matching on terms inhabiting inductive type having only one -constructor can be alternatively written using {\tt let ... in ...} -constructions. There are two variants of them. - -\subsubsection{First destructuring {\tt let} syntax} -The expression {\tt let -(}~{\ident$_1$},\ldots,{\ident$_n$}~{\tt ) :=}~{\term$_0$}~{\tt -in}~{\term$_1$} performs case analysis on a {\term$_0$} which must be in -an inductive type with one constructor having itself $n$ arguments. Variables -{\ident$_1$}\ldots{\ident$_n$} are bound to the $n$ arguments of the -constructor in expression {\term$_1$}. For instance, the definition - -\begin{coq_example} -Definition fst (A B:Set) (H:A * B) := match H with - | pair x y => x - end. -\end{coq_example} - -can be alternatively written - -\begin{coq_eval} -Reset fst. -\end{coq_eval} -\begin{coq_example} -Definition fst (A B:Set) (p:A * B) := let (x, _) := p in x. -\end{coq_example} -Notice that reduction is different from regular {\tt let ... in ...} -construction since it happens only if {\term$_0$} is in constructor -form. Otherwise, the reduction is blocked. - -The pretty-printing of a definition by matching on a -irrefutable pattern can either be done using {\tt match} or the {\tt -let} construction (see Section~\ref{printing-options}). - -If {\term} inhabits an inductive type with one constructor {\tt C}, -we have an equivalence between - -{\tt let ({\ident}$_1$,\ldots,{\ident}$_n$) \zeroone{\ifitem} := {\term} in {\term}'} - -\noindent and - -{\tt match {\term} \zeroone{\ifitem} with C {\ident}$_1$ {\ldots} {\ident}$_n$ \verb!=>! {\term}' end} - - -\subsubsection{Second destructuring {\tt let} syntax\index{let '... in@\texttt{let '... in}}} - -Another destructuring {\tt let} syntax is available for inductive types with -one constructor by giving an arbitrary pattern instead of just a tuple -for all the arguments. For example, the preceding example can be written: -\begin{coq_eval} -Reset fst. -\end{coq_eval} -\begin{coq_example} -Definition fst (A B:Set) (p:A*B) := let 'pair x _ := p in x. -\end{coq_example} - -This is useful to match deeper inside tuples and also to use notations -for the pattern, as the syntax {\tt let 'p := t in b} allows arbitrary -patterns to do the deconstruction. For example: - -\begin{coq_example} -Definition deep_tuple (A:Set) (x:(A*A)*(A*A)) : A*A*A*A := - let '((a,b), (c, d)) := x in (a,b,c,d). -Notation " x 'With' p " := (exist _ x p) (at level 20). -Definition proj1_sig' (A:Set) (P:A->Prop) (t:{ x:A | P x }) : A := - let 'x With p := t in x. -\end{coq_example} - -When printing definitions which are written using this construct it -takes precedence over {\tt let} printing directives for the datatype -under consideration (see Section~\ref{printing-options}). - -\subsection{Controlling pretty-printing of {\tt match} expressions -\label{printing-options}} - -The following commands give some control over the pretty-printing of -{\tt match} expressions. - -\subsubsection{Printing nested patterns -\label{SetPrintingMatching} -\optindex{Printing Matching}} - -The Calculus of Inductive Constructions knows pattern-matching only -over simple patterns. It is however convenient to re-factorize nested -pattern-matching into a single pattern-matching over a nested pattern. -{\Coq}'s printer try to do such limited re-factorization. - -\begin{quote} -{\tt Set Printing Matching.} -\end{quote} -This tells {\Coq} to try to use nested patterns. This is the default -behavior. - -\begin{quote} -{\tt Unset Printing Matching.} -\end{quote} -This tells {\Coq} to print only simple pattern-matching problems in -the same way as the {\Coq} kernel handles them. - -\begin{quote} -{\tt Test Printing Matching.} -\end{quote} -This tells if the printing matching mode is on or off. The default is -on. - -\subsubsection{Factorization of clauses with same right-hand side} -\label{SetPrintingFactorizableMatchPatterns} -\optindex{Printing Factorizable Match Patterns} - -When several patterns share the same right-hand side, it is -additionally possible to share the clauses using disjunctive patterns. -Assuming that the printing matching mode is on, whether {\Coq}'s -printer shall try to do this kind of factorization is governed by the -following commands: - -\begin{quote} -{\tt Set Printing Factorizable Match Patterns.} -\end{quote} -This tells {\Coq}'s printer to try to use disjunctive patterns. This is the default -behavior. - -\begin{quote} -{\tt Unset Printing Factorizable Match Patterns.} -\end{quote} -This tells {\Coq}'s printer not to try to use disjunctive patterns. - -\begin{quote} -{\tt Test Printing Factorizable Match Patterns.} -\end{quote} -This tells if the factorization of clauses with same right-hand side is -on or off. - -\subsubsection{Use of a default clause} -\label{SetPrintingAllowDefaultClause} -\optindex{Printing Allow Default Clause} - -When several patterns share the same right-hand side which do not -depend on the arguments of the patterns, yet an extra factorization is -possible: the disjunction of patterns can be replaced with a ``{\tt - \_}'' default clause. Assuming that the printing matching mode and -the factorization mode are on, whether {\Coq}'s printer shall try to -use a default clause is governed by the following commands: - -\begin{quote} -{\tt Set Printing Allow Default Clause.} -\end{quote} -This tells {\Coq}'s printer to use a default clause when relevant. This is the default -behavior. - -\begin{quote} -{\tt Unset Printing Allow Default Clause.} -\end{quote} -This tells {\Coq}'s printer not to use a default clause. - -\begin{quote} -{\tt Test Printing Allow Default Clause.} -\end{quote} -This tells if the use of a default clause is allowed. - -\subsubsection{Printing of wildcard pattern -\optindex{Printing Wildcard}} - -Some variables in a pattern may not occur in the right-hand side of -the pattern-matching clause. There are options to control the -display of these variables. - -\begin{quote} -{\tt Set Printing Wildcard.} -\end{quote} -The variables having no occurrences in the right-hand side of the -pattern-matching clause are just printed using the wildcard symbol -``{\tt \_}''. - -\begin{quote} -{\tt Unset Printing Wildcard.} -\end{quote} -The variables, even useless, are printed using their usual name. But some -non dependent variables have no name. These ones are still printed -using a ``{\tt \_}''. - -\begin{quote} -{\tt Test Printing Wildcard.} -\end{quote} -This tells if the wildcard printing mode is on or off. The default is -to print wildcard for useless variables. - -\subsubsection{Printing of the elimination predicate -\optindex{Printing Synth}} - -In most of the cases, the type of the result of a matched term is -mechanically synthesizable. Especially, if the result type does not -depend of the matched term. - -\begin{quote} -{\tt Set Printing Synth.} -\end{quote} -The result type is not printed when {\Coq} knows that it can -re-synthesize it. - -\begin{quote} -{\tt Unset Printing Synth.} -\end{quote} -This forces the result type to be always printed. - -\begin{quote} -{\tt Test Printing Synth.} -\end{quote} -This tells if the non-printing of synthesizable types is on or off. -The default is to not print synthesizable types. - -\subsubsection{Printing matching on irrefutable pattern -\label{AddPrintingLet} -\comindex{Add Printing Let {\ident}} -\comindex{Remove Printing Let {\ident}} -\comindex{Test Printing Let for {\ident}} -\comindex{Print Table Printing Let}} - -If an inductive type has just one constructor, -pattern-matching can be written using the first destructuring let syntax. - -\begin{quote} -{\tt Add Printing Let {\ident}.} -\end{quote} -This adds {\ident} to the list of inductive types for which -pattern-matching is written using a {\tt let} expression. - -\begin{quote} -{\tt Remove Printing Let {\ident}.} -\end{quote} -This removes {\ident} from this list. Note that removing an inductive -type from this list has an impact only for pattern-matching written using -\texttt{match}. Pattern-matching explicitly written using a destructuring -let are not impacted. - -\begin{quote} -{\tt Test Printing Let for {\ident}.} -\end{quote} -This tells if {\ident} belongs to the list. - -\begin{quote} -{\tt Print Table Printing Let.} -\end{quote} -This prints the list of inductive types for which pattern-matching is -written using a {\tt let} expression. - -The list of inductive types for which pattern-matching is written -using a {\tt let} expression is managed synchronously. This means that -it is sensible to the command {\tt Reset}. - -\subsubsection{Printing matching on booleans -\comindex{Add Printing If {\ident}} -\comindex{Remove Printing If {\ident}} -\comindex{Test Printing If for {\ident}} -\comindex{Print Table Printing If}} - -If an inductive type is isomorphic to the boolean type, -pattern-matching can be written using {\tt if} ... {\tt then} ... {\tt - else} ... - -\begin{quote} -{\tt Add Printing If {\ident}.} -\end{quote} -This adds {\ident} to the list of inductive types for which -pattern-matching is written using an {\tt if} expression. - -\begin{quote} -{\tt Remove Printing If {\ident}.} -\end{quote} -This removes {\ident} from this list. - -\begin{quote} -{\tt Test Printing If for {\ident}.} -\end{quote} -This tells if {\ident} belongs to the list. - -\begin{quote} -{\tt Print Table Printing If.} -\end{quote} -This prints the list of inductive types for which pattern-matching is -written using an {\tt if} expression. - -The list of inductive types for which pattern-matching is written -using an {\tt if} expression is managed synchronously. This means that -it is sensible to the command {\tt Reset}. - -\subsubsection{Example} - -This example emphasizes what the printing options offer. - -\begin{coq_example} -Definition snd (A B:Set) (H:A * B) := match H with - | pair x y => y - end. -Test Printing Let for prod. -Print snd. -Remove Printing Let prod. -Unset Printing Synth. -Unset Printing Wildcard. -Print snd. -\end{coq_example} -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\subsection{Printing \mbox{\tt match} templates} - -The {\tt Show Match} vernacular command prints a {\tt match} template for -a given type. See Section~\ref{Show}. - -% \subsection{Still not dead old notations} - -% The following variant of {\tt match} is inherited from older version -% of {\Coq}. - -% \medskip -% \begin{tabular}{lcl} -% {\term} & ::= & {\annotation} {\tt Match} {\term} {\tt with} {\terms} {\tt end}\\ -% \end{tabular} -% \medskip - -% This syntax is a macro generating a combination of {\tt match} with {\tt -% Fix} implementing a combinator for primitive recursion equivalent to -% the {\tt Match} construction of \Coq\ V5.8. It is provided only for -% sake of compatibility with \Coq\ V5.8. It is recommended to avoid it. -% (see Section~\ref{Matchexpr}). - -% There is also a notation \texttt{Case} that is the -% ancestor of \texttt{match}. Again, it is still in the code for -% compatibility with old versions but the user should not use it. - -% Explained in RefMan-gal.tex -%% \section{Forced type} - -%% In some cases, one may wish to assign a particular type to a term. The -%% syntax to force the type of a term is the following: - -%% \medskip -%% \begin{tabular}{lcl} -%% {\term} & ++= & {\term} {\tt :} {\term}\\ -%% \end{tabular} -%% \medskip - -%% It forces the first term to be of type the second term. The -%% type must be compatible with -%% the term. More precisely it must be either a type convertible to -%% the automatically inferred type (see Chapter~\ref{Cic}) or a type -%% coercible to it, (see \ref{Coercions}). When the type of a -%% whole expression is forced, it is usually not necessary to give the types of -%% the variables involved in the term. - -%% Example: - -%% \begin{coq_example} -%% Definition ID := forall X:Set, X -> X. -%% Definition id := (fun X x => x):ID. -%% Check id. -%% \end{coq_example} - -\section{Advanced recursive functions} - -The following \emph{experimental} command is available -when the {\tt FunInd} library has been loaded via {\tt Require Import FunInd}: -\begin{center} - \texttt{Function {\ident} {\binder$_1$}\ldots{\binder$_n$} - \{decrease\_annot\} : type$_0$ := \term$_0$} - \comindex{Function} - \label{Function} -\end{center} -This command can be seen as a generalization of {\tt Fixpoint}. It is actually -a wrapper for several ways of defining a function \emph{and other useful - related objects}, namely: an induction principle that reflects the -recursive structure of the function (see \ref{FunInduction}) and its -fixpoint equality. - The meaning of this -declaration is to define a function {\it ident}, similarly to {\tt - Fixpoint}. Like in {\tt Fixpoint}, the decreasing argument must be -given (unless the function is not recursive), but it might not -necessarily be \emph{structurally} decreasing. The point of the {\tt - \{\}} annotation is to name the decreasing argument \emph{and} to -describe which kind of decreasing criteria must be used to ensure -termination of recursive calls. - -The {\tt Function} construction also enjoys the {\tt with} extension -to define mutually recursive definitions. However, this feature does -not work for non structurally recursive functions. % VRAI?? - -See the documentation of {\tt functional induction} -(see Section~\ref{FunInduction}) and {\tt Functional Scheme} -(see Section~\ref{FunScheme} and \ref{FunScheme-examples}) for how to use the -induction principle to easily reason about the function. - -\noindent {\bf Remark: } To obtain the right principle, it is better -to put rigid parameters of the function as first arguments. For -example it is better to define plus like this: - -\begin{coq_example*} -Function plus (m n : nat) {struct n} : nat := - match n with - | 0 => m - | S p => S (plus m p) - end. -\end{coq_example*} -\noindent than like this: -\begin{coq_eval} -Reset plus. -\end{coq_eval} -\begin{coq_example*} -Function plus (n m : nat) {struct n} : nat := - match n with - | 0 => m - | S p => S (plus p m) - end. -\end{coq_example*} - -\paragraph[Limitations]{Limitations\label{sec:Function-limitations}} -\term$_0$ must be built as a \emph{pure pattern-matching tree} -(\texttt{match...with}) with applications only \emph{at the end} of -each branch. - -Function does not support partial application of the function being defined. Thus, the following example cannot be accepted due to the presence of partial application of \ident{wrong} into the body of \ident{wrong}~: -\begin{coq_eval} -Require List. -\end{coq_eval} -\begin{coq_example*} -Fail Function wrong (C:nat) : nat := - List.hd 0 (List.map wrong (C::nil)). -\end{coq_example*} - -For now dependent cases are not treated for non structurally terminating functions. - - - -\begin{ErrMsgs} -\item \errindex{The recursive argument must be specified} -\item \errindex{No argument name \ident} -\item \errindex{Cannot use mutual definition with well-founded - recursion or measure} - -\item \errindex{Cannot define graph for \ident\dots} (warning) - - The generation of the graph relation \texttt{(R\_\ident)} used to - compute the induction scheme of \ident\ raised a typing error. Only - the ident is defined; the induction scheme will not be generated. - - This error happens generally when: - - \begin{itemize} - \item the definition uses pattern matching on dependent types, which - \texttt{Function} cannot deal with yet. - \item the definition is not a \emph{pattern-matching tree} as - explained above. - \end{itemize} - -\item \errindex{Cannot define principle(s) for \ident\dots} (warning) - - The generation of the graph relation \texttt{(R\_\ident)} succeeded - but the induction principle could not be built. Only the ident is - defined. Please report. - -\item \errindex{Cannot build functional inversion principle} (warning) - - \texttt{functional inversion} will not be available for the - function. -\end{ErrMsgs} - - -\SeeAlso{\ref{FunScheme}, \ref{FunScheme-examples}, \ref{FunInduction}} - -Depending on the {\tt \{$\ldots$\}} annotation, different definition -mechanisms are used by {\tt Function}. More precise description -given below. - -\begin{Variants} -\item \texttt{ Function {\ident} {\binder$_1$}\ldots{\binder$_n$} - : type$_0$ := \term$_0$} - - Defines the not recursive function \ident\ as if declared with - \texttt{Definition}. Moreover the following are defined: - - \begin{itemize} - \item {\tt\ident\_rect}, {\tt\ident\_rec} and {\tt\ident\_ind}, - which reflect the pattern matching structure of \term$_0$ (see the - documentation of {\tt Inductive} \ref{Inductive}); - \item The inductive \texttt{R\_\ident} corresponding to the graph of - \ident\ (silently); - \item \texttt{\ident\_complete} and \texttt{\ident\_correct} which are - inversion information linking the function and its graph. - \end{itemize} -\item \texttt{Function {\ident} {\binder$_1$}\ldots{\binder$_n$} - {\tt \{}{\tt struct} \ident$_0${\tt\}} : type$_0$ := \term$_0$} - - Defines the structural recursive function \ident\ as if declared - with \texttt{Fixpoint}. Moreover the following are defined: - - \begin{itemize} - \item The same objects as above; - \item The fixpoint equation of \ident: \texttt{\ident\_equation}. - \end{itemize} - -\item \texttt{Function {\ident} {\binder$_1$}\ldots{\binder$_n$} {\tt - \{}{\tt measure \term$_1$} \ident$_0${\tt\}} : type$_0$ := - \term$_0$} -\item \texttt{Function {\ident} {\binder$_1$}\ldots{\binder$_n$} - {\tt \{}{\tt wf \term$_1$} \ident$_0${\tt\}} : type$_0$ := \term$_0$} - -Defines a recursive function by well founded recursion. \textbf{The -module \texttt{Recdef} of the standard library must be loaded for this -feature}. The {\tt \{\}} annotation is mandatory and must be one of -the following: -\begin{itemize} -\item {\tt \{measure} \term$_1$ \ident$_0${\tt\}} with \ident$_0$ - being the decreasing argument and \term$_1$ being a function - from type of \ident$_0$ to \texttt{nat} for which value on the - decreasing argument decreases (for the {\tt lt} order on {\tt - nat}) at each recursive call of \term$_0$. Parameters of the - function are bound in \term$_0$; -\item {\tt \{wf} \term$_1$ \ident$_0${\tt\}} with \ident$_0$ being - the decreasing argument and \term$_1$ an ordering relation on - the type of \ident$_0$ (i.e. of type T$_{\ident_0}$ - $\to$ T$_{\ident_0}$ $\to$ {\tt Prop}) for which - the decreasing argument decreases at each recursive call of - \term$_0$. The order must be well founded. Parameters of the - function are bound in \term$_0$. -\end{itemize} - -Depending on the annotation, the user is left with some proof -obligations that will be used to define the function. These proofs -are: proofs that each recursive call is actually decreasing with -respect to the given criteria, and (if the criteria is \texttt{wf}) a -proof that the ordering relation is well founded. - -%Completer sur measure et wf - -Once proof obligations are discharged, the following objects are -defined: - -\begin{itemize} -\item The same objects as with the \texttt{struct}; -\item The lemma \texttt{\ident\_tcc} which collects all proof - obligations in one property; -\item The lemmas \texttt{\ident\_terminate} and \texttt{\ident\_F} - which is needed to be inlined during extraction of \ident. -\end{itemize} - - - -%Complete!! -The way this recursive function is defined is the subject of several -papers by Yves Bertot and Antonia Balaa on the one hand, and Gilles Barthe, -Julien Forest, David Pichardie, and Vlad Rusu on the other hand. - -%Exemples ok ici - -\bigskip - -\noindent {\bf Remark: } Proof obligations are presented as several -subgoals belonging to a Lemma {\ident}{\tt\_tcc}. % These subgoals are independent which means that in order to -% abort them you will have to abort each separately. - - - -%The decreasing argument cannot be dependent of another?? - -%Exemples faux ici -\end{Variants} - - -\section{Section mechanism -\index{Sections} -\label{Section}} - -The sectioning mechanism can be used to to organize a proof in -structured sections. Then local declarations become available (see -Section~\ref{Basic-definitions}). - -\subsection{\tt Section {\ident}\comindex{Section}} - -This command is used to open a section named {\ident}. - -%% Discontinued ? -%% \begin{Variants} -%% \comindex{Chapter} -%% \item{\tt Chapter {\ident}}\\ -%% Same as {\tt Section {\ident}} -%% \end{Variants} - -\subsection{\tt End {\ident} -\comindex{End}} - -This command closes the section named {\ident}. After closing of the -section, the local declarations (variables and local definitions) get -{\em discharged}, meaning that they stop being visible and that all -global objects defined in the section are generalized with respect to -the variables and local definitions they each depended on in the -section. - - -Here is an example : -\begin{coq_example} -Section s1. -Variables x y : nat. -Let y' := y. -Definition x' := S x. -Definition x'' := x' + y'. -Print x'. -End s1. -Print x'. -Print x''. -\end{coq_example} -Notice the difference between the value of {\tt x'} and {\tt x''} -inside section {\tt s1} and outside. - -\begin{ErrMsgs} -\item \errindex{This is not the last opened section} -\end{ErrMsgs} - -\begin{Remarks} -\item Most commands, like {\tt Hint}, {\tt Notation}, option management, ... -which appear inside a section are canceled when the -section is closed. -% see Section~\ref{LongNames} -%\item Usually all identifiers must be distinct. -%However, a name already used in a closed section (see \ref{Section}) -%can be reused. In this case, the old name is no longer accessible. - -% Obsolète -%\item A module implicitly open a section. Be careful not to name a -%module with an identifier already used in the module (see \ref{compiled}). -\end{Remarks} - -\input{RefMan-mod.v} - -\section{Libraries and qualified names} - -\subsection{Names of libraries -\label{Libraries} -\index{Libraries}} - -The theories developed in {\Coq} are stored in {\em library files} -which are hierarchically classified into {\em libraries} and {\em - sublibraries}. To express this hierarchy, library names are -represented by qualified identifiers {\qualid}, i.e. as list of -identifiers separated by dots (see Section~\ref{qualid}). For -instance, the library file {\tt Mult} of the standard {\Coq} library -{\tt Arith} is named {\tt Coq.Arith.Mult}. The identifier that starts -the name of a library is called a {\em library root}. All library -files of the standard library of {\Coq} have the reserved root {\tt Coq} -but library file names based on other roots can be obtained by using -{\Coq} commands ({\tt coqc}, {\tt coqtop}, {\tt coqdep}, \dots) options -{\tt -Q} or {\tt -R} (see Section~\ref{coqoptions}). Also, when an -interactive {\Coq} session starts, a library of root {\tt Top} is -started, unless option {\tt -top} is set (see -Section~\ref{coqoptions}). - -\subsection{Qualified names -\label{LongNames} -\index{Qualified identifiers} -\index{Absolute names}} - -Library files are modules which possibly contain submodules which -eventually contain constructions (axioms, parameters, definitions, -lemmas, theorems, remarks or facts). The {\em absolute name}, or {\em -full name}, of a construction in some library file is a qualified -identifier starting with the logical name of the library file, -followed by the sequence of submodules names encapsulating the -construction and ended by the proper name of the construction. -Typically, the absolute name {\tt Coq.Init.Logic.eq} denotes Leibniz' -equality defined in the module {\tt Logic} in the sublibrary {\tt -Init} of the standard library of \Coq. - -The proper name that ends the name of a construction is the {\it short -name} (or sometimes {\it base name}) of the construction (for -instance, the short name of {\tt Coq.Init.Logic.eq} is {\tt eq}). Any -partial suffix of the absolute name is a {\em partially qualified name} -(e.g. {\tt Logic.eq} is a partially qualified name for {\tt -Coq.Init.Logic.eq}). Especially, the short name of a construction is -its shortest partially qualified name. - -{\Coq} does not accept two constructions (definition, theorem, ...) -with the same absolute name but different constructions can have the -same short name (or even same partially qualified names as soon as the -full names are different). - -Notice that the notion of absolute, partially qualified and -short names also applies to library file names. - -\paragraph{Visibility} - -{\Coq} maintains a table called {\it name table} which maps partially -qualified names of constructions to absolute names. This table is -updated by the commands {\tt Require} (see \ref{Require}), {\tt -Import} and {\tt Export} (see \ref{Import}) and also each time a new -declaration is added to the context. An absolute name is called {\it -visible} from a given short or partially qualified name when this -latter name is enough to denote it. This means that the short or -partially qualified name is mapped to the absolute name in {\Coq} name -table. Definitions flagged as {\tt Local} are only accessible with their -fully qualified name (see \ref{Definition}). - -It may happen that a visible name is hidden by the short name or a -qualified name of another construction. In this case, the name that -has been hidden must be referred to using one more level of -qualification. To ensure that a construction always remains -accessible, absolute names can never be hidden. - -Examples: -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example} -Check 0. -Definition nat := bool. -Check 0. -Check Datatypes.nat. -Locate nat. -\end{coq_example} - -\SeeAlso Command {\tt Locate} in Section~\ref{Locate} and {\tt Locate -Library} in Section~\ref{Locate Library}. - -\subsection{Libraries and filesystem\label{loadpath}\index{Loadpath} -\index{Physical paths} \index{Logical paths}} - -Please note that the questions described here have been subject to -redesign in Coq v8.5. Former versions of Coq use the same terminology -to describe slightly different things. - -Compiled files (\texttt{.vo} and \texttt{.vio}) store sub-libraries. In -order to refer to them inside {\Coq}, a translation from file-system -names to {\Coq} names is needed. In this translation, names in the -file system are called {\em physical} paths while {\Coq} names are -contrastingly called {\em logical} names. - -A logical prefix {\tt Lib} can be associated to a physical path -\textrm{\textsl{path}} using the command line option {\tt -Q} -\textrm{\textsl{path}} {\tt Lib}. All subfolders of {\textsl{path}} are -recursively associated to the logical path {\tt Lib} extended with the -corresponding suffix coming from the physical path. For instance, the -folder {\tt path/fOO/Bar} maps to {\tt Lib.fOO.Bar}. Subdirectories -corresponding to invalid {\Coq} identifiers are skipped, and, by -convention, subdirectories named {\tt CVS} or {\tt \_darcs} are -skipped too. - -Thanks to this mechanism, {\texttt{.vo}} files are made available through the -logical name of the folder they are in, extended with their own basename. For -example, the name associated to the file {\tt path/fOO/Bar/File.vo} is -{\tt Lib.fOO.Bar.File}. The same caveat applies for invalid identifiers. -When compiling a source file, the {\texttt{.vo}} file stores its logical name, -so that an error is issued if it is loaded with the wrong loadpath afterwards. - -Some folders have a special status and are automatically put in the path. -{\Coq} commands associate automatically a logical path to files -in the repository trees rooted at the directory from where the command -is launched, \textit{coqlib}\texttt{/user-contrib/}, the directories -listed in the \verb:$COQPATH:, \verb:${XDG_DATA_HOME}/coq/: and -\verb:${XDG_DATA_DIRS}/coq/: environment variables (see -\url{http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html}) -with the same physical-to-logical translation and with an empty logical prefix. - -The command line option \texttt{-R} is a variant of \texttt{-Q} which has the -strictly same behavior regarding loadpaths, but which also makes the -corresponding \texttt{.vo} files available through their short names in a -way not unlike the {\tt Import} command (see~{\ref{Import}}). For instance, -\texttt{-R} \textrm{\textsl{path}} \texttt{Lib} associates to the file -\texttt{path/fOO/Bar/File.vo} the logical name \texttt{Lib.fOO.Bar.File}, but -allows this file to be accessed through the short names \texttt{fOO.Bar.File}, -\texttt{Bar.File} and \texttt{File}. If several files with identical base name -are present in different subdirectories of a recursive loadpath, which of -these files is found first may be system-dependent and explicit -qualification is recommended. The {\tt From} argument of the {\tt Require} -command can be used to bypass the implicit shortening by providing an absolute -root to the required file (see~\ref{Require}). - -There also exists another independent loadpath mechanism attached to {\ocaml} -object files (\texttt{.cmo} or \texttt{.cmxs}) rather than {\Coq} object files -as described above. The {\ocaml} loadpath is managed using the option -\texttt{-I path} (in the {\ocaml} world, there is neither a notion of logical -name prefix nor a way to access files in subdirectories of \texttt{path}). -See the command \texttt{Declare ML Module} in Section~\ref{compiled} to -understand the need of the {\ocaml} loadpath. - -See Section~\ref{coqoptions} for a more general view over the {\Coq} -command line options. - -%% \paragraph{The special case of remarks and facts} -%% -%% In contrast with definitions, lemmas, theorems, axioms and parameters, -%% the absolute name of remarks includes the segment of sections in which -%% it is defined. Concretely, if a remark {\tt R} is defined in -%% subsection {\tt S2} of section {\tt S1} in module {\tt M}, then its -%% absolute name is {\tt M.S1.S2.R}. The same for facts, except that the -%% name of the innermost section is dropped from the full name. Then, if -%% a fact {\tt F} is defined in subsection {\tt S2} of section {\tt S1} -%% in module {\tt M}, then its absolute name is {\tt M.S1.F}. - -\section{Implicit arguments -\index{Implicit arguments} -\label{Implicit Arguments}} - -An implicit argument of a function is an argument which can be -inferred from contextual knowledge. There are different kinds of -implicit arguments that can be considered implicit in different -ways. There are also various commands to control the setting or the -inference of implicit arguments. - -\subsection{The different kinds of implicit arguments} - -\subsubsection{Implicit arguments inferable from the knowledge of other -arguments of a function} - -The first kind of implicit arguments covers the arguments that are -inferable from the knowledge of the type of other arguments of the -function, or of the type of the surrounding context of the -application. Especially, such implicit arguments correspond to -parameters dependent in the type of the function. Typical implicit -arguments are the type arguments in polymorphic functions. -There are several kinds of such implicit arguments. - -\paragraph{Strict Implicit Arguments.} -An implicit argument can be either strict or non strict. An implicit -argument is said {\em strict} if, whatever the other arguments of the -function are, it is still inferable from the type of some other -argument. Technically, an implicit argument is strict if it -corresponds to a parameter which is not applied to a variable which -itself is another parameter of the function (since this parameter -may erase its arguments), not in the body of a {\tt match}, and not -itself applied or matched against patterns (since the original -form of the argument can be lost by reduction). - -For instance, the first argument of -\begin{quote} -\verb|cons: forall A:Set, A -> list A -> list A| -\end{quote} -in module {\tt List.v} is strict because {\tt list} is an inductive -type and {\tt A} will always be inferable from the type {\tt -list A} of the third argument of {\tt cons}. -On the contrary, the second argument of a term of type -\begin{quote} -\verb|forall P:nat->Prop, forall n:nat, P n -> ex nat P| -\end{quote} -is implicit but not strict, since it can only be inferred from the -type {\tt P n} of the third argument and if {\tt P} is, e.g., {\tt -fun \_ => True}, it reduces to an expression where {\tt n} does not -occur any longer. The first argument {\tt P} is implicit but not -strict either because it can only be inferred from {\tt P n} and {\tt -P} is not canonically inferable from an arbitrary {\tt n} and the -normal form of {\tt P n} (consider e.g. that {\tt n} is {\tt 0} and -the third argument has type {\tt True}, then any {\tt P} of the form -{\tt fun n => match n with 0 => True | \_ => \mbox{\em anything} end} would -be a solution of the inference problem). - -\paragraph{Contextual Implicit Arguments.} -An implicit argument can be {\em contextual} or not. An implicit -argument is said {\em contextual} if it can be inferred only from the -knowledge of the type of the context of the current expression. For -instance, the only argument of -\begin{quote} -\verb|nil : forall A:Set, list A| -\end{quote} -is contextual. Similarly, both arguments of a term of type -\begin{quote} -\verb|forall P:nat->Prop, forall n:nat, P n \/ n = 0| -\end{quote} -are contextual (moreover, {\tt n} is strict and {\tt P} is not). - -\paragraph{Reversible-Pattern Implicit Arguments.} -There is another class of implicit arguments that can be reinferred -unambiguously if all the types of the remaining arguments are -known. This is the class of implicit arguments occurring in the type -of another argument in position of reversible pattern, which means it -is at the head of an application but applied only to uninstantiated -distinct variables. Such an implicit argument is called {\em -reversible-pattern implicit argument}. A typical example is the -argument {\tt P} of {\tt nat\_rec} in -\begin{quote} -{\tt nat\_rec : forall P : nat -> Set, - P 0 -> (forall n : nat, P n -> P (S n)) -> forall x : nat, P x}. -\end{quote} -({\tt P} is reinferable by abstracting over {\tt n} in the type {\tt P n}). - -See Section~\ref{SetReversiblePatternImplicit} for the automatic declaration -of reversible-pattern implicit arguments. - -\subsubsection{Implicit arguments inferable by resolution} - -This corresponds to a class of non dependent implicit arguments that -are solved based on the structure of their type only. - -\subsection{Maximal or non maximal insertion of implicit arguments} - -In case a function is partially applied, and the next argument to be -applied is an implicit argument, two disciplines are applicable. In the -first case, the function is considered to have no arguments furtherly: -one says that the implicit argument is not maximally inserted. In -the second case, the function is considered to be implicitly applied -to the implicit arguments it is waiting for: one says that the -implicit argument is maximally inserted. - -Each implicit argument can be declared to have to be inserted -maximally or non maximally. This can be governed argument per argument -by the command {\tt Implicit Arguments} (see~\ref{ImplicitArguments}) -or globally by the command {\tt Set Maximal Implicit Insertion} -(see~\ref{SetMaximalImplicitInsertion}). See also -Section~\ref{PrintImplicit}. - -\subsection{Casual use of implicit arguments} - -In a given expression, if it is clear that some argument of a function -can be inferred from the type of the other arguments, the user can -force the given argument to be guessed by replacing it by ``{\tt \_}''. If -possible, the correct argument will be automatically generated. - -\begin{ErrMsgs} - -\item \errindex{Cannot infer a term for this placeholder} - - {\Coq} was not able to deduce an instantiation of a ``{\tt \_}''. - -\end{ErrMsgs} - -\subsection{Declaration of implicit arguments -\comindex{Arguments}} -\label{ImplicitArguments} - -In case one wants that some arguments of a given object (constant, -inductive types, constructors, assumptions, local or not) are always -inferred by Coq, one may declare once and for all which are the expected -implicit arguments of this object. There are two ways to do this, -a priori and a posteriori. - -\subsubsection{Implicit Argument Binders} - -In the first setting, one wants to explicitly give the implicit -arguments of a declared object as part of its definition. To do this, one has -to surround the bindings of implicit arguments by curly braces: -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example} -Definition id {A : Type} (x : A) : A := x. -\end{coq_example} - -This automatically declares the argument {\tt A} of {\tt id} as a -maximally inserted implicit argument. One can then do as-if the argument -was absent in every situation but still be able to specify it if needed: -\begin{coq_example} -Definition compose {A B C} (g : B -> C) (f : A -> B) := - fun x => g (f x). -Goal forall A, compose id id = id (A:=A). -\end{coq_example} - -The syntax is supported in all top-level definitions: {\tt Definition}, -{\tt Fixpoint}, {\tt Lemma} and so on. For (co-)inductive datatype -declarations, the semantics are the following: an inductive parameter -declared as an implicit argument need not be repeated in the inductive -definition but will become implicit for the constructors of the -inductive only, not the inductive type itself. For example: - -\begin{coq_example} -Inductive list {A : Type} : Type := -| nil : list -| cons : A -> list -> list. -Print list. -\end{coq_example} - -One can always specify the parameter if it is not uniform using the -usual implicit arguments disambiguation syntax. - -\subsubsection{Declaring Implicit Arguments} - -To set implicit arguments a posteriori, one can use the -command: -\begin{quote} -\tt Arguments {\qualid} \nelist{\possiblybracketedident}{} -\end{quote} -where the list of {\possiblybracketedident} is a prefix of the list of arguments -of {\qualid} where the ones to be declared implicit are surrounded by square -brackets and the ones to be declared as maximally inserted implicits are -surrounded by curly braces. - -After the above declaration is issued, implicit arguments can just (and -have to) be skipped in any expression involving an application of -{\qualid}. - -Implicit arguments can be cleared with the following syntax: - -\begin{quote} -{\tt Arguments {\qualid} : clear implicits -\comindex{Arguments}} -\end{quote} - -\begin{Variants} -\item {\tt Global Arguments {\qualid} \nelist{\possiblybracketedident}{} -\comindex{Global Arguments}} - -Tell to recompute the implicit arguments of {\qualid} after ending of -the current section if any, enforcing the implicit arguments known -from inside the section to be the ones declared by the command. - -\item {\tt Local Arguments {\qualid} \nelist{\possiblybracketedident}{} -\comindex{Local Arguments}} - -When in a module, tell not to activate the implicit arguments of -{\qualid} declared by this command to contexts that require the -module. - -\item {\tt \zeroone{Global {\sl |} Local} Arguments {\qualid} \sequence{\nelist{\possiblybracketedident}{}}{,}} - -For names of constants, inductive types, constructors, lemmas which -can only be applied to a fixed number of arguments (this excludes for -instance constants whose type is polymorphic), multiple -implicit arguments decflarations can be given. -Depending on the number of arguments {\qualid} is applied -to in practice, the longest applicable list of implicit arguments is -used to select which implicit arguments are inserted. - -For printing, the omitted arguments are the ones of the longest list -of implicit arguments of the sequence. - -\end{Variants} - -\Example -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example*} -Inductive list (A:Type) : Type := - | nil : list A - | cons : A -> list A -> list A. -\end{coq_example*} -\begin{coq_example} -Check (cons nat 3 (nil nat)). -Arguments cons [A] _ _. -Arguments nil [A]. -Check (cons 3 nil). -Fixpoint map (A B:Type) (f:A->B) (l:list A) : list B := - match l with nil => nil | cons a t => cons (f a) (map A B f t) end. -Fixpoint length (A:Type) (l:list A) : nat := - match l with nil => 0 | cons _ m => S (length A m) end. -Arguments map [A B] f l. -Arguments length {A} l. (* A has to be maximally inserted *) -Check (fun l:list (list nat) => map length l). -Arguments map [A B] f l, [A] B f l, A B f l. -Check (fun l => map length l = map (list nat) nat length l). -\end{coq_example} - -\Rem To know which are the implicit arguments of an object, use the command -{\tt Print Implicit} (see \ref{PrintImplicit}). - -\subsection{Automatic declaration of implicit arguments} - -{\Coq} can also automatically detect what are the implicit arguments -of a defined object. The command is just -\begin{quote} -{\tt Arguments {\qualid} : default implicits -\comindex{Arguments}} -\end{quote} -The auto-detection is governed by options telling if strict, -contextual, or reversible-pattern implicit arguments must be -considered or not (see -Sections~\ref{SetStrictImplicit},~\ref{SetContextualImplicit},~\ref{SetReversiblePatternImplicit} -and also~\ref{SetMaximalImplicitInsertion}). - -\begin{Variants} -\item {\tt Global Arguments {\qualid} : default implicits -\comindex{Global Arguments}} - -Tell to recompute the implicit arguments of {\qualid} after ending of -the current section if any. - -\item {\tt Local Arguments {\qualid} : default implicits -\comindex{Local Arguments}} - -When in a module, tell not to activate the implicit arguments of -{\qualid} computed by this declaration to contexts that requires the -module. - -\end{Variants} - -\Example -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example*} -Inductive list (A:Set) : Set := - | nil : list A - | cons : A -> list A -> list A. -\end{coq_example*} -\begin{coq_example} -Arguments cons : default implicits. -Print Implicit cons. -Arguments nil : default implicits. -Print Implicit nil. -Set Contextual Implicit. -Arguments nil : default implicits. -Print Implicit nil. -\end{coq_example} - -The computation of implicit arguments takes account of the -unfolding of constants. For instance, the variable {\tt p} below has -type {\tt (Transitivity R)} which is reducible to {\tt forall x,y:U, R x -y -> forall z:U, R y z -> R x z}. As the variables {\tt x}, {\tt y} and -{\tt z} appear strictly in body of the type, they are implicit. - -\begin{coq_example*} -Variable X : Type. -Definition Relation := X -> X -> Prop. -Definition Transitivity (R:Relation) := - forall x y:X, R x y -> forall z:X, R y z -> R x z. -Variables (R : Relation) (p : Transitivity R). -Arguments p : default implicits. -\end{coq_example*} -\begin{coq_example} -Print p. -Print Implicit p. -\end{coq_example} -\begin{coq_example*} -Variables (a b c : X) (r1 : R a b) (r2 : R b c). -\end{coq_example*} -\begin{coq_example} -Check (p r1 r2). -\end{coq_example} - -\subsection{Mode for automatic declaration of implicit arguments -\label{Auto-implicit} -\optindex{Implicit Arguments}} - -In case one wants to systematically declare implicit the arguments -detectable as such, one may switch to the automatic declaration of -implicit arguments mode by using the command -\begin{quote} -\tt Set Implicit Arguments. -\end{quote} -Conversely, one may unset the mode by using {\tt Unset Implicit -Arguments}. The mode is off by default. Auto-detection of implicit -arguments is governed by options controlling whether strict and -contextual implicit arguments have to be considered or not. - -\subsection{Controlling strict implicit arguments -\optindex{Strict Implicit} -\label{SetStrictImplicit}} - -When the mode for automatic declaration of implicit arguments is on, -the default is to automatically set implicit only the strict implicit -arguments plus, for historical reasons, a small subset of the non -strict implicit arguments. To relax this constraint and to -set implicit all non strict implicit arguments by default, use the command -\begin{quote} -\tt Unset Strict Implicit. -\end{quote} -Conversely, use the command {\tt Set Strict Implicit} to -restore the original mode that declares implicit only the strict implicit arguments plus a small subset of the non strict implicit arguments. - -In the other way round, to capture exactly the strict implicit arguments and no more than the strict implicit arguments, use the command: -\optindex{Strongly Strict Implicit} -\begin{quote} -\tt Set Strongly Strict Implicit. -\end{quote} -Conversely, use the command {\tt Unset Strongly Strict Implicit} to -let the option ``{\tt Strict Implicit}'' decide what to do. - -\Rem In versions of {\Coq} prior to version 8.0, the default was to -declare the strict implicit arguments as implicit. - -\subsection{Controlling contextual implicit arguments -\optindex{Contextual Implicit} -\label{SetContextualImplicit}} - -By default, {\Coq} does not automatically set implicit the contextual -implicit arguments. To tell {\Coq} to infer also contextual implicit -argument, use command -\begin{quote} -\tt Set Contextual Implicit. -\end{quote} -Conversely, use command {\tt Unset Contextual Implicit} to -unset the contextual implicit mode. - -\subsection{Controlling reversible-pattern implicit arguments -\optindex{Reversible Pattern Implicit} -\label{SetReversiblePatternImplicit}} - -By default, {\Coq} does not automatically set implicit the reversible-pattern -implicit arguments. To tell {\Coq} to infer also reversible-pattern implicit -argument, use command -\begin{quote} -\tt Set Reversible Pattern Implicit. -\end{quote} -Conversely, use command {\tt Unset Reversible Pattern Implicit} to -unset the reversible-pattern implicit mode. - -\subsection{Controlling the insertion of implicit arguments not followed by explicit arguments -\optindex{Maximal Implicit Insertion} -\label{SetMaximalImplicitInsertion}} - -Implicit arguments can be declared to be automatically inserted when a -function is partially applied and the next argument of the function is -an implicit one. In case the implicit arguments are automatically -declared (with the command {\tt Set Implicit Arguments}), the command -\begin{quote} -\tt Set Maximal Implicit Insertion. -\end{quote} -is used to tell to declare the implicit arguments with a maximal -insertion status. By default, automatically declared implicit -arguments are not declared to be insertable maximally. To restore the -default mode for maximal insertion, use command {\tt Unset Maximal -Implicit Insertion}. - -\subsection{Explicit applications -\index{Explicitly given implicit arguments} -\label{Implicits-explicitation} -\index{qualid@{\qualid}} \index{\symbol{64}}} - -In presence of non strict or contextual argument, or in presence of -partial applications, the synthesis of implicit arguments may fail, so -one may have to give explicitly certain implicit arguments of an -application. The syntax for this is {\tt (\ident:=\term)} where {\ident} -is the name of the implicit argument and {\term} is its corresponding -explicit term. Alternatively, one can locally deactivate the hiding of -implicit arguments of a function by using the notation -{\tt @{\qualid}~{\term}$_1$..{\term}$_n$}. This syntax extension is -given Figure~\ref{fig:explicitations}. -\begin{figure} -\begin{centerframe} -\begin{tabular}{lcl} -{\term} & ++= & @ {\qualid} \nelist{\term}{}\\ -& $|$ & @ {\qualid}\\ -& $|$ & {\qualid} \nelist{\textrm{\textsl{argument}}}{}\\ -\\ -{\textrm{\textsl{argument}}} & ::= & {\term} \\ -& $|$ & {\tt ({\ident}:={\term})}\\ -\end{tabular} -\end{centerframe} -\caption{Syntax for explicitly giving implicit arguments} -\label{fig:explicitations} -\end{figure} - -\noindent {\bf Example (continued): } -\begin{coq_example} -Check (p r1 (z:=c)). -Check (p (x:=a) (y:=b) r1 (z:=c) r2). -\end{coq_example} - -\subsection{Renaming implicit arguments -\comindex{Arguments} -} - -Implicit arguments names can be redefined using the following syntax: -\begin{quote} -{\tt Arguments {\qualid} \nelist{\name}{} : rename} -\end{quote} - -With the {\tt assert} flag, {\tt Arguments} can be used to assert -that a given object has the expected number of arguments and that -these arguments are named as expected. - -\noindent {\bf Example (continued): } -\begin{coq_example} -Arguments p [s t] _ [u] _: rename. -Check (p r1 (u:=c)). -Check (p (s:=a) (t:=b) r1 (u:=c) r2). -Fail Arguments p [s t] _ [w] _ : assert. -\end{coq_example} - - -\subsection{Displaying what the implicit arguments are -\comindex{Print Implicit} -\label{PrintImplicit}} - -To display the implicit arguments associated to an object, and to know -if each of them is to be used maximally or not, use the command -\begin{quote} -\tt Print Implicit {\qualid}. -\end{quote} - -\subsection{Explicit displaying of implicit arguments for pretty-printing -\optindex{Printing Implicit} -\optindex{Printing Implicit Defensive}} - -By default the basic pretty-printing rules hide the inferable implicit -arguments of an application. To force printing all implicit arguments, -use command -\begin{quote} -{\tt Set Printing Implicit.} -\end{quote} -Conversely, to restore the hiding of implicit arguments, use command -\begin{quote} -{\tt Unset Printing Implicit.} -\end{quote} - -By default the basic pretty-printing rules display the implicit arguments that are not detected as strict implicit arguments. This ``defensive'' mode can quickly make the display cumbersome so this can be deactivated by using the command -\begin{quote} -{\tt Unset Printing Implicit Defensive.} -\end{quote} -Conversely, to force the display of non strict arguments, use command -\begin{quote} -{\tt Set Printing Implicit Defensive.} -\end{quote} - -\SeeAlso {\tt Set Printing All} in Section~\ref{SetPrintingAll}. - -\subsection{Interaction with subtyping} - -When an implicit argument can be inferred from the type of more than -one of the other arguments, then only the type of the first of these -arguments is taken into account, and not an upper type of all of -them. As a consequence, the inference of the implicit argument of -``='' fails in -\begin{coq_example*} -Fail Check nat = Prop. -\end{coq_example*} - -but succeeds in -\begin{coq_example*} -Check Prop = nat. -\end{coq_example*} - -\subsection{Deactivation of implicit arguments for parsing} -\optindex{Parsing Explicit} - -Use of implicit arguments can be deactivated by issuing the command: -\begin{quote} -{\tt Set Parsing Explicit.} -\end{quote} - -In this case, all arguments of constants, inductive types, -constructors, etc, including the arguments declared as implicit, have -to be given as if none arguments were implicit. By symmetry, this also -affects printing. To restore parsing and normal printing of implicit -arguments, use: -\begin{quote} -{\tt Unset Parsing Explicit.} -\end{quote} - -\subsection{Canonical structures -\comindex{Canonical Structure}} - -A canonical structure is an instance of a record/structure type that -can be used to solve unification problems involving a projection -applied to an unknown structure instance (an implicit argument) and -a value. The complete documentation of canonical structures can be found -in Chapter~\ref{CS-full}, here only a simple example is given. - -Assume that {\qualid} denotes an object $(Build\_struc~ c_1~ \ldots~ c_n)$ in -the -structure {\em struct} of which the fields are $x_1$, ..., -$x_n$. Assume that {\qualid} is declared as a canonical structure -using the command -\begin{quote} -{\tt Canonical Structure {\qualid}.} -\end{quote} -Then, each time an equation of the form $(x_i~ -\_)=_{\beta\delta\iota\zeta}c_i$ has to be solved during the -type-checking process, {\qualid} is used as a solution. Otherwise -said, {\qualid} is canonically used to extend the field $c_i$ into a -complete structure built on $c_i$. - -Canonical structures are particularly useful when mixed with -coercions and strict implicit arguments. Here is an example. -\begin{coq_example*} -Require Import Relations. -Require Import EqNat. -Set Implicit Arguments. -Unset Strict Implicit. -Structure Setoid : Type := - {Carrier :> Set; - Equal : relation Carrier; - Prf_equiv : equivalence Carrier Equal}. -Definition is_law (A B:Setoid) (f:A -> B) := - forall x y:A, Equal x y -> Equal (f x) (f y). -Axiom eq_nat_equiv : equivalence nat eq_nat. -Definition nat_setoid : Setoid := Build_Setoid eq_nat_equiv. -Canonical Structure nat_setoid. -\end{coq_example*} - -Thanks to \texttt{nat\_setoid} declared as canonical, the implicit -arguments {\tt A} and {\tt B} can be synthesized in the next statement. -\begin{coq_example} -Lemma is_law_S : is_law S. -\end{coq_example} - -\Rem If a same field occurs in several canonical structure, then -only the structure declared first as canonical is considered. - -\begin{Variants} -\item {\tt Canonical Structure {\ident} := {\term} : {\type}.}\\ - {\tt Canonical Structure {\ident} := {\term}.}\\ - {\tt Canonical Structure {\ident} : {\type} := {\term}.} - -These are equivalent to a regular definition of {\ident} followed by -the declaration - -{\tt Canonical Structure {\ident}}. -\end{Variants} - -\SeeAlso more examples in user contribution \texttt{category} -(\texttt{Rocq/ALGEBRA}). - -\subsubsection{Print Canonical Projections. -\comindex{Print Canonical Projections}} - -This displays the list of global names that are components of some -canonical structure. For each of them, the canonical structure of -which it is a projection is indicated. For instance, the above example -gives the following output: - -\begin{coq_example} -Print Canonical Projections. -\end{coq_example} - -\subsection{Implicit types of variables} -\comindex{Implicit Types} - -It is possible to bind variable names to a given type (e.g. in a -development using arithmetic, it may be convenient to bind the names -{\tt n} or {\tt m} to the type {\tt nat} of natural numbers). The -command for that is -\begin{quote} -\tt Implicit Types \nelist{\ident}{} : {\type} -\end{quote} -The effect of the command is to automatically set the type of bound -variables starting with {\ident} (either {\ident} itself or -{\ident} followed by one or more single quotes, underscore or digits) -to be {\type} (unless the bound variable is already declared with an -explicit type in which case, this latter type is considered). - -\Example -\begin{coq_example} -Require Import List. -Implicit Types m n : nat. -Lemma cons_inj_nat : forall m n l, n :: l = m :: l -> n = m. -intros m n. -Lemma cons_inj_bool : forall (m n:bool) l, n :: l = m :: l -> n = m. -\end{coq_example} - -\begin{Variants} -\item {\tt Implicit Type {\ident} : {\type}}\\ -This is useful for declaring the implicit type of a single variable. -\item - {\tt Implicit Types\,% -(\,{\ident$_{1,1}$}\ldots{\ident$_{1,k_1}$}\,{\tt :}\,{\term$_1$} {\tt )}\,% -\ldots\,{\tt (}\,{\ident$_{n,1}$}\ldots{\ident$_{n,k_n}$}\,{\tt :}\,% -{\term$_n$} {\tt )}.}\\ - Adds $n$ blocks of implicit types with different specifications. -\end{Variants} - - -\subsection{Implicit generalization -\label{implicit-generalization} -\comindex{Generalizable Variables}} -% \textquoteleft since \` doesn't do what we want -\index{0genimpl@{\textquoteleft\{\ldots\}}} -\index{0genexpl@{\textquoteleft(\ldots)}} - -Implicit generalization is an automatic elaboration of a statement with -free variables into a closed statement where these variables are -quantified explicitly. Implicit generalization is done inside binders -starting with a \texttt{\`{}} and terms delimited by \texttt{\`{}\{ \}} and -\texttt{\`{}( )}, always introducing maximally inserted implicit arguments for -the generalized variables. Inside implicit generalization -delimiters, free variables in the current context are automatically -quantified using a product or a lambda abstraction to generate a closed -term. In the following statement for example, the variables \texttt{n} -and \texttt{m} are automatically generalized and become explicit -arguments of the lemma as we are using \texttt{\`{}( )}: - -\begin{coq_example} -Generalizable All Variables. -Lemma nat_comm : `(n = n + 0). -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} -One can control the set of generalizable identifiers with the -\texttt{Generalizable} vernacular command to avoid unexpected -generalizations when mistyping identifiers. There are three variants of -the command: - -\begin{quote} -{\tt Generalizable (All|No) Variable(s)? ({\ident$_1$ \ident$_n$})?.} -\end{quote} - -\begin{Variants} -\item {\tt Generalizable All Variables.} All variables are candidate for - generalization if they appear free in the context under a - generalization delimiter. This may result in confusing errors in - case of typos. In such cases, the context will probably contain some - unexpected generalized variable. - -\item {\tt Generalizable No Variables.} Disable implicit generalization - entirely. This is the default behavior. - -\item {\tt Generalizable Variable(s)? {\ident$_1$ \ident$_n$}.} - Allow generalization of the given identifiers only. Calling this - command multiple times adds to the allowed identifiers. - -\item {\tt Global Generalizable} Allows to export the choice of - generalizable variables. -\end{Variants} - -One can also use implicit generalization for binders, in which case the -generalized variables are added as binders and set maximally implicit. -\begin{coq_example*} -Definition id `(x : A) : A := x. -\end{coq_example*} -\begin{coq_example} -Print id. -\end{coq_example} - -The generalizing binders \texttt{\`{}\{ \}} and \texttt{\`{}( )} work similarly to -their explicit counterparts, only binding the generalized variables -implicitly, as maximally-inserted arguments. In these binders, the -binding name for the bound object is optional, whereas the type is -mandatory, dually to regular binders. - -\section{Coercions -\label{Coercions} -\index{Coercions}} - -Coercions can be used to implicitly inject terms from one {\em class} in -which they reside into another one. A {\em class} is either a sort -(denoted by the keyword {\tt Sortclass}), a product type (denoted by the -keyword {\tt Funclass}), or a type constructor (denoted by its name), -e.g. an inductive type or any constant with a type of the form -\texttt{forall} $(x_1:A_1) .. (x_n:A_n),~s$ where $s$ is a sort. - -Then the user is able to apply an -object that is not a function, but can be coerced to a function, and -more generally to consider that a term of type A is of type B provided -that there is a declared coercion between A and B. The main command is -\comindex{Coercion} -\begin{quote} -\tt Coercion {\qualid} : {\class$_1$} >-> {\class$_2$}. -\end{quote} -which declares the construction denoted by {\qualid} as a -coercion between {\class$_1$} and {\class$_2$}. - -More details and examples, and a description of the commands related -to coercions are provided in Chapter~\ref{Coercions-full}. - -\section[Printing constructions in full]{Printing constructions in full\label{SetPrintingAll} -\optindex{Printing All}} - -Coercions, implicit arguments, the type of pattern-matching, but also -notations (see Chapter~\ref{Addoc-syntax}) can obfuscate the behavior -of some tactics (typically the tactics applying to occurrences of -subterms are sensitive to the implicit arguments). The command -\begin{quote} -{\tt Set Printing All.} -\end{quote} -deactivates all high-level printing features such as coercions, -implicit arguments, returned type of pattern-matching, notations and -various syntactic sugar for pattern-matching or record projections. -Otherwise said, {\tt Set Printing All} includes the effects -of the commands {\tt Set Printing Implicit}, {\tt Set Printing -Coercions}, {\tt Set Printing Synth}, {\tt Unset Printing Projections} -and {\tt Unset Printing Notations}. To reactivate the high-level -printing features, use the command -\begin{quote} -{\tt Unset Printing All.} -\end{quote} - -\section[Printing universes]{Printing universes\label{PrintingUniverses} -\optindex{Printing Universes}} - -The following command: -\begin{quote} -{\tt Set Printing Universes} -\end{quote} -activates the display of the actual level of each occurrence of -{\Type}. See Section~\ref{Sorts} for details. This wizard option, in -combination with \texttt{Set Printing All} (see -section~\ref{SetPrintingAll}) can help to diagnose failures to unify -terms apparently identical but internally different in the Calculus of -Inductive Constructions. To reactivate the display of the actual level -of the occurrences of {\Type}, use -\begin{quote} -{\tt Unset Printing Universes.} -\end{quote} - -\comindex{Print Universes} -\comindex{Print Sorted Universes} - -The constraints on the internal level of the occurrences of {\Type} -(see Section~\ref{Sorts}) can be printed using the command -\begin{quote} -{\tt Print \zeroone{Sorted} Universes.} -\end{quote} -If the optional {\tt Sorted} option is given, each universe will be -made equivalent to a numbered label reflecting its level (with a -linear ordering) in the universe hierarchy. - -This command also accepts an optional output filename: -\begin{quote} -\tt Print \zeroone{Sorted} Universes {\str}. -\end{quote} -If {\str} ends in \texttt{.dot} or \texttt{.gv}, the constraints are -printed in the DOT language, and can be processed by Graphviz -tools. The format is unspecified if {\str} doesn't end in -\texttt{.dot} or \texttt{.gv}. - -\section[Existential variables]{Existential variables\label{ExistentialVariables}} -\label{evars} - -Coq terms can include existential variables which -represents unknown subterms to eventually be replaced by actual -subterms. - -Existential variables are generated in place of unsolvable implicit -arguments or ``{\tt \_}'' placeholders when using commands such as -\texttt{Check} (see Section~\ref{Check}) or when using tactics such as -\texttt{refine}~(see Section~\ref{refine}), as well as in place of unsolvable -instances when using tactics such that \texttt{eapply} (see -Section~\ref{eapply}). An existential variable is defined in a -context, which is the context of variables of the placeholder which -generated the existential variable, and a type, which is the expected -type of the placeholder. - -As a consequence of typing constraints, existential variables can be -duplicated in such a way that they possibly appear in different -contexts than their defining context. Thus, any occurrence of a given -existential variable comes with an instance of its original context. In the -simple case, when an existential variable denotes the placeholder -which generated it, or is used in the same context as the one in which -it was generated, the context is not displayed and the existential -variable is represented by ``?'' followed by an identifier. - -\begin{coq_example} -Parameter identity : forall (X:Set), X -> X. -Check identity _ _. -Check identity _ (fun x => _). -\end{coq_example} - -In the general case, when an existential variable ?{\ident} -appears outside of its context of definition, its instance, written under -the form \verb!@{id1:=term1; ...; idn:=termn}!, is appending to its -name, indicating how the variables of its defining context are -instantiated. The variables of the context of the existential -variables which are instantiated by themselves are not written, unless -the flag {\tt Printing Existential Instances} is on (see -Section~\ref{SetPrintingExistentialInstances}), and this is why an -existential variable used in the same context as its context of -definition is written with no instance. - -\begin{coq_example} -Check (fun x y => _) 0 1. -Set Printing Existential Instances. -Check (fun x y => _) 0 1. -\end{coq_example} - -\begin{coq_eval} -Unset Printing Existential Instances. -\end{coq_eval} - -Existential variables can be named by the user upon creation using -the syntax {\tt ?[\ident]}. This is useful when the existential -variable needs to be explicitly handled later in the script (e.g. -with a named-goal selector, see~\ref{ltac:selector}). - -\subsection{Explicit displaying of existential instances for pretty-printing -\label{SetPrintingExistentialInstances} -\optindex{Printing Existential Instances}} - -The command: -\begin{quote} -{\tt Set Printing Existential Instances} -\end{quote} -activates the full display of how the context of an existential variable is -instantiated at each of the occurrences of the existential variable. - -To deactivate the full display of the instances of existential -variables, use -\begin{quote} -{\tt Unset Printing Existential Instances.} -\end{quote} - -\subsection{Solving existential variables using tactics} -\ttindex{ltac:( \ldots )} - -\def\expr{\textrm{\textsl{tacexpr}}} - -Instead of letting the unification engine try to solve an existential variable -by itself, one can also provide an explicit hole together with a tactic to solve -it. Using the syntax {\tt ltac:(\expr)}, the user can put a -tactic anywhere a term is expected. The order of resolution is not specified and -is implementation-dependent. The inner tactic may use any variable defined in -its scope, including repeated alternations between variables introduced by term -binding as well as those introduced by tactic binding. The expression {\expr} -can be any tactic expression as described at section~\ref{TacticLanguage}. - -\begin{coq_example*} -Definition foo (x : nat) : nat := ltac:(exact x). -\end{coq_example*} - -This construction is useful when one wants to define complicated terms using -highly automated tactics without resorting to writing the proof-term by means of -the interactive proof engine. - -This mechanism is comparable to the {\tt Declare Implicit Tactic} command -defined at~\ref{DeclareImplicit}, except that the used tactic is local to each -hole instead of being declared globally. - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-ide.tex b/doc/refman/RefMan-ide.tex deleted file mode 100644 index 2d98534307..0000000000 --- a/doc/refman/RefMan-ide.tex +++ /dev/null @@ -1,312 +0,0 @@ -\chapter[\Coq{} Integrated Development Environment]{\Coq{} Integrated Development Environment\label{Addoc-coqide} -\ttindex{coqide}} -%HEVEA\cutname{coqide.html} - -The \Coq{} Integrated Development Environment is a graphical tool, to -be used as a user-friendly replacement to \texttt{coqtop}. Its main -purpose is to allow the user to navigate forward and backward into a -\Coq{} vernacular file, executing corresponding commands or undoing -them respectively. % CREDITS ? Proof general, lablgtk, ... - -\CoqIDE{} is run by typing the command \verb|coqide| on the command -line. Without argument, the main screen is displayed with an ``unnamed -buffer'', and with a file name as argument, another buffer displaying -the contents of that file. Additionally, \verb|coqide| accepts the same -options as \verb|coqtop|, given in Chapter~\ref{Addoc-coqc}, the ones having -obviously no meaning for \CoqIDE{} being ignored. - -\begin{figure}[t] -\begin{center} -%HEVEA\imgsrc[alt="coqide main screen"]{coqide.png} -%BEGIN LATEX -\ifpdf % si on est en pdflatex -\includegraphics[width=1.0\textwidth]{coqide.png} -\else -\includegraphics[width=1.0\textwidth]{coqide.eps} -\fi -%END LATEX -\end{center} -\caption{\CoqIDE{} main screen} -\label{fig:coqide} -\end{figure} - -A sample \CoqIDE{} main screen, while navigating into a file -\verb|Fermat.v|, is shown on Figure~\ref{fig:coqide}. At -the top is a menu bar, and a tool bar below it. The large window on -the left is displaying the various \emph{script buffers}. The upper right -window is the \emph{goal window}, where goals to -prove are displayed. The lower right window is the \emph{message window}, -where various messages resulting from commands are displayed. At the -bottom is the status bar. - -\section{Managing files and buffers, basic edition} - -In the script window, you may open arbitrarily many buffers to -edit. The \emph{File} menu allows you to open files or create some, -save them, print or export them into various formats. Among all these -buffers, there is always one which is the current -\emph{running buffer}, whose name is displayed on a background in the -\emph{processed} color (green by default), which is the one where Coq commands -are currently executed. - -Buffers may be edited as in any text editor, and classical basic -editing commands (Copy/Paste, \ldots) are available in the \emph{Edit} -menu. \CoqIDE{} offers only basic editing commands, so if you need -more complex editing commands, you may launch your favorite text -editor on the current buffer, using the \emph{Edit/External Editor} -menu. - -\section{Interactive navigation into \Coq{} scripts} - -The running buffer is the one where navigation takes place. The -toolbar offers five basic navigation commands. The first one, -represented by a down arrow icon, is for going forward executing one -command. If that command is successful, the part of the script that -has been executed is displayed on a background with the -processed color. If that command fails, the error message is -displayed in the message window, and the location of the error is -emphasized by an underline in the error foreground color (red by default). - -On Figure~\ref{fig:coqide}, the running buffer is \verb|Fermat.v|, all -commands until the \verb|Theorem| have been already executed, and the -user tried to go forward executing \verb|Induction n|. That command -failed because no such tactic exist (tactics are now in -lowercase\ldots), and the wrong word is underlined. - -Notice that the processed part of the running buffer is not editable. If -you ever want to modify something you have to go backward using the up -arrow tool, or even better, put the cursor where you want to go back -and use the \textsf{goto} button. Unlike with \verb|coqtop|, you -should never use \verb|Undo| to go backward. - -There are two additional buttons for navigation within the running buffer. -The ``down'' button with a line goes directly to the end; the ``up'' button -with a line goes back to the beginning. The handling of errors when using the -go-to-the-end button depends on whether \Coq{} is running in asynchronous mode or not -(see Chapter~\ref{Asyncprocessing}). If it is not running in that mode, execution stops -as soon as an error is found. Otherwise, execution continues, and the -error is marked with an underline in the error foreground color, with a background in -the error background color (pink by default). The same characterization of -error-handling applies when running several commands using the \textsf{goto} button. - -If you ever try to execute a command which happens to run during a -long time, and would like to abort it before its -termination, you may use the interrupt button (the white cross on a red circle). - -There are other buttons on the \CoqIDE{} toolbar: a button to save the running -buffer; a button to close the current buffer (an ``X''); buttons to switch among -buffers (left and right arrows); an ``information'' button; and a ``gears'' button. - -The ``information'' button is described in Section~\ref{sec:trytactics}. - -The ``gears'' button submits proof terms to the \Coq{} kernel for type-checking. -When \Coq{} uses asynchronous processing (see Chapter~\ref{Asyncprocessing}), proofs may -have been completed without kernel-checking of generated proof terms. The presence of -unchecked proof terms is indicated by \texttt{Qed} statements -that have a subdued \emph{being-processed} color (light blue by default), -rather than the processed color, though their preceding proofs have the processed color. - -Notice that for all these buttons, except for the ``gears'' button, their operations -are also available in the menu, where their keyboard shortcuts are given. - -\section[Try tactics automatically]{Try tactics automatically\label{sec:trytactics}} - -The menu \texttt{Try Tactics} provides some features for automatically -trying to solve the current goal using simple tactics. If such a -tactic succeeds in solving the goal, then its text is automatically -inserted into the script. There is finally a combination of these -tactics, called the \emph{proof wizard} which will try each of them in -turn. This wizard is also available as a tool button (the ``information'' -button). The set of tactics tried by the wizard is customizable in -the preferences. - -These tactics are general ones, in particular they do not refer to -particular hypotheses. You may also try specific tactics related to -the goal or one of the hypotheses, by clicking with the right mouse -button on the goal or the considered hypothesis. This is the -``contextual menu on goals'' feature, that may be disabled in the -preferences if undesirable. - -\section{Proof folding} - -As your script grows bigger and bigger, it might be useful to hide the proofs -of your theorems and lemmas. - -This feature is toggled via the \texttt{Hide} entry of the \texttt{Navigation} -menu. The proof shall be enclosed between \texttt{Proof.} and \texttt{Qed.}, -both with their final dots. The proof that shall be hidden or revealed is the -first one whose beginning statement (such as \texttt{Theorem}) precedes the -insertion cursor. - -\section{Vernacular commands, templates} - -The \texttt{Templates} menu allows using shortcuts to insert -vernacular commands. This is a nice way to proceed if you are not sure -of the spelling of the command you want. - -Moreover, this menu offers some \emph{templates} which will automatic -insert a complex command like Fixpoint with a convenient shape for its -arguments. - -\section{Queries} - -\begin{figure}[t] -\begin{center} -%HEVEA\imgsrc[alt="coqide query"]{coqide-queries.png} -%BEGIN LATEX -\ifpdf % si on est en pdflatex -\includegraphics[width=1.0\textwidth]{coqide-queries.png} -\else -\includegraphics[width=1.0\textwidth]{coqide-queries.eps} -\fi -%END LATEX -\end{center} -\caption{\CoqIDE{}: a Print query on a selected phrase} -\label{fig:queryselected} -\end{figure} - -We call \emph{query} any vernacular command that does not change the -current state, such as \verb|Check|, \verb|Search|, etc. -To run such commands interactively, without writing them in scripts, -\CoqIDE{} offers a \emph{query pane}. -The query pane can be displayed on demand by using the \texttt{View} menu, -or using the shortcut \verb|F1|. Queries can also be performed by -selecting a particular phrase, then choosing an item from the -\texttt{Queries} menu. The response then appears in the message window. -Figure~\ref{fig:queryselected} shows the result after selecting -of the phrase \verb|Nat.mul| in the script window, and choosing \verb|Print| -from the \texttt{Queries} menu. - -\section{Compilation} - -The \verb|Compile| menu offers direct commands to: -\begin{itemize} -\item compile the current buffer -\item run a compilation using \verb|make| -\item go to the last compilation error -\item create a \verb|makefile| using \verb|coq_makefile|. -\end{itemize} - -\section{Customizations} - -You may customize your environment using menu -\texttt{Edit/Preferences}. A new window will be displayed, with -several customization sections presented as a notebook. - -The first section is for selecting the text font used for scripts, goal -and message windows. - -The second section is devoted to file management: you may -configure automatic saving of files, by periodically saving the -contents into files named \verb|#f#| for each opened file -\verb|f|. You may also activate the \emph{revert} feature: in case a -opened file is modified on the disk by a third party, \CoqIDE{} may read -it again for you. Note that in the case you edited that same file, you -will be prompt to choose to either discard your changes or not. The -\texttt{File charset encoding} choice is described below in -Section~\ref{sec:coqidecharencoding} - - -The \verb|Externals| section allows customizing the external commands -for compilation, printing, web browsing. In the browser command, you -may use \verb|%s| to denote the URL to open, for example: % -\verb|mozilla -remote "OpenURL(%s)"|. - -The \verb|Tactics Wizard| section allows defining the set of tactics -that should be tried, in sequence, to solve the current goal. - -The last section is for miscellaneous boolean settings, such as the -``contextual menu on goals'' feature presented in -Section~\ref{sec:trytactics}. - -Notice that these settings are saved in the file \verb|.coqiderc| of -your home directory. - -A gtk2 accelerator keymap is saved under the name \verb|.coqide.keys|. -It is not recommanded to edit this file manually: to modify a given menu -shortcut, go to the corresponding menu item without releasing the -mouse button, press the key you want for the new shortcut, and release -the mouse button afterwards. If your system does not allow it, you may still -edit this configuration file by hand, but this is more involved. - -\section{Using Unicode symbols} - -\CoqIDE{} is based on GTK+ and inherits from it support for Unicode in -its text windows. Consequently a large set of symbols is available for -notations. - -\subsection{Displaying Unicode symbols} - -You just need to define suitable notations as described in -Chapter~\ref{Addoc-syntax}. For example, to use the mathematical symbols -$\forall$ and $\exists$, you may define -\begin{quote}\tt -Notation "$\forall$ x : t, P" := \\ -\qquad (forall x:t, P) (at level 200, x ident).\\ -Notation "$\exists$ x : t, P" := \\ -\qquad (exists x:t, P) (at level 200, x ident). -\end{quote} -There exists a small set of such notations already defined, in the -file \verb|utf8.v| of \Coq{} library, so you may enable them just by -\verb|Require utf8| inside \CoqIDE{}, or equivalently, by starting -\CoqIDE{} with \verb|coqide -l utf8|. - -However, there are some issues when using such Unicode symbols: you of -course need to use a character font which supports them. In the Fonts -section of the preferences, the Preview line displays some Unicode symbols, so -you could figure out if the selected font is OK. Related to this, one -thing you may need to do is choose whether GTK+ should use antialiased -fonts or not, by setting the environment variable \verb|GDK_USE_XFT| -to 1 or 0 respectively. - -\subsection{Defining an input method for non ASCII symbols} - -To input a Unicode symbol, a general method provided by GTK+ -is to simultaneously press the -Control, Shift and ``u'' keys, release, then type the hexadecimal code of the -symbol required, for example \verb|2200| for the $\forall$ symbol. -A list of symbol codes is available at \url{http://www.unicode.org}. - -An alternative method which does not require to know the hexadecimal -code of the character is to use an Input Method Editor. On POSIX -systems (Linux distributions, BSD variants and MacOS X), you can use -\texttt{uim} version 1.6 or later which provides a \LaTeX{}-style -input method. - -To configure \texttt{uim}, execute \texttt{uim-pref-gtk} as your regular user. -In the "Global Settings" group set the default Input Method to "ELatin" (don't -forget to tick the checkbox "Specify default IM"). In the "ELatin" group set the -layout to "TeX", and remember the content of the "[ELatin] on" field (by default -Control-\textbackslash). You can now execute CoqIDE with the following commands (assuming -you use a Bourne-style shell): - -\begin{verbatim} -$ export GTK_IM_MODULE=uim -$ coqide -\end{verbatim} - -Activate the ELatin Input Method with Control-\textbackslash, then type the -sequence "\verb=\Gamma=". You will see the sequence being -replaced by $\Gamma$ as soon as you type the second "a". - -\subsection[Character encoding for saved files]{Character encoding for saved files\label{sec:coqidecharencoding}} - -In the \texttt{Files} section of the preferences, the encoding option -is related to the way files are saved. - -If you have no need to exchange files with non UTF-8 aware -applications, it is better to choose the UTF-8 encoding, since it -guarantees that your files will be read again without problems. (This -is because when \CoqIDE{} reads a file, it tries to automatically -detect its character encoding.) - -If you choose something else than UTF-8, then missing characters will -be written encoded by \verb|\x{....}| or \verb|\x{........}| where -each dot is an hexadecimal digit: the number between braces is the -hexadecimal Unicode index for the missing character. - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-int.tex b/doc/refman/RefMan-int.tex deleted file mode 100644 index f802a35950..0000000000 --- a/doc/refman/RefMan-int.tex +++ /dev/null @@ -1,143 +0,0 @@ -%BEGIN LATEX -\setheaders{Introduction} -%END LATEX -\chapter*{Introduction} -%HEVEA\cutname{introduction.html} - -This document is the Reference Manual of version \coqversion{} of the \Coq\ -proof assistant. A companion volume, the \Coq\ Tutorial, is provided -for the beginners. It is advised to read the Tutorial first. -A book~\cite{CoqArt} on practical uses of the \Coq{} system was published in 2004 and is a good support for both the beginner and -the advanced user. - -%The system \Coq\ is designed to develop mathematical proofs. It can be -%used by mathematicians to develop mathematical theories and by -%computer scientists to write formal specifications, -The \Coq{} system is designed to develop mathematical proofs, and -especially to write formal specifications, programs and to verify that -programs are correct with respect to their specification. It provides -a specification language named \gallina. Terms of \gallina\ can -represent programs as well as properties of these programs and proofs -of these properties. Using the so-called \textit{Curry-Howard - isomorphism}, programs, properties and proofs are formalized in the -same language called \textit{Calculus of Inductive Constructions}, -that is a $\lambda$-calculus with a rich type system. All logical -judgments in \Coq\ are typing judgments. The very heart of the Coq -system is the type-checking algorithm that checks the correctness of -proofs, in other words that checks that a program complies to its -specification. \Coq\ also provides an interactive proof assistant to -build proofs using specific programs called \textit{tactics}. - -All services of the \Coq\ proof assistant are accessible by -interpretation of a command language called \textit{the vernacular}. - -\Coq\ has an interactive mode in which commands are interpreted as the -user types them in from the keyboard and a compiled mode where -commands are processed from a file. - -\begin{itemize} -\item The interactive mode may be used as a debugging mode in which - the user can develop his theories and proofs step by step, - backtracking if needed and so on. The interactive mode is run with - the {\tt coqtop} command from the operating system (which we shall - assume to be some variety of UNIX in the rest of this document). -\item The compiled mode acts as a proof checker taking a file - containing a whole development in order to ensure its correctness. - Moreover, \Coq's compiler provides an output file containing a - compact representation of its input. The compiled mode is run with - the {\tt coqc} command from the operating system. - -\end{itemize} -These two modes are documented in Chapter~\ref{Addoc-coqc}. - -Other modes of interaction with \Coq{} are possible: through an emacs -shell window, an emacs generic user-interface for proof assistant -({\ProofGeneral}~\cite{ProofGeneral}) or through a customized interface -(PCoq~\cite{Pcoq}). These facilities are not documented here. There -is also a \Coq{} Integrated Development Environment described in -Chapter~\ref{Addoc-coqide}. - -\section*{How to read this book} - -This is a Reference Manual, not a User Manual, so it is not made for a -continuous reading. However, it has some structure that is explained -below. - -\begin{itemize} -\item The first part describes the specification language, - Gallina. Chapters~\ref{Gallina} and~\ref{Gallina-extension} - describe the concrete syntax as well as the meaning of programs, - theorems and proofs in the Calculus of Inductive - Constructions. Chapter~\ref{Theories} describes the standard library - of \Coq. Chapter~\ref{Cic} is a mathematical description of the - formalism. Chapter~\ref{chapter:Modules} describes the module system. - -\item The second part describes the proof engine. It is divided in - five chapters. Chapter~\ref{Vernacular-commands} presents all - commands (we call them \emph{vernacular commands}) that are not - directly related to interactive proving: requests to the - environment, complete or partial evaluation, loading and compiling - files. How to start and stop proofs, do multiple proofs in parallel - is explained in Chapter~\ref{Proof-handling}. In - Chapter~\ref{Tactics}, all commands that realize one or more steps - of the proof are presented: we call them \emph{tactics}. The - language to combine these tactics into complex proof strategies is - given in Chapter~\ref{TacticLanguage}. Examples of tactics are - described in Chapter~\ref{Tactics-examples}. - -%\item The third part describes how to extend the system in two ways: -% adding parsing and pretty-printing rules -% (Chapter~\ref{Addoc-syntax}) and writing new tactics -% (Chapter~\ref{TacticLanguage}). - -\item The third part describes how to extend the syntax of \Coq. It -corresponds to the Chapter~\ref{Addoc-syntax}. - -\item In the fourth part more practical tools are documented. First in - Chapter~\ref{Addoc-coqc}, the usage of \texttt{coqc} (batch mode) - and \texttt{coqtop} (interactive mode) with their options is - described. Then, in Chapter~\ref{Utilities}, - various utilities that come with the \Coq\ distribution are - presented. - Finally, Chapter~\ref{Addoc-coqide} describes the \Coq{} integrated - development environment. - -\item The fifth part documents a number of advanced features, including - coercions, canonical structures, typeclasses, program extraction, and - specialized solvers and tactics. See the table of contents for a complete - list. -\end{itemize} - -At the end of the document, after the global index, the user can find -specific indexes for tactics, vernacular commands, and error -messages. - -\section*{List of additional documentation} - -This manual does not contain all the documentation the user may need -about \Coq{}. Various informations can be found in the following -documents: -\begin{description} - -\item[Tutorial] - A companion volume to this reference manual, the \Coq{} Tutorial, is - aimed at gently introducing new users to developing proofs in \Coq{} - without assuming prior knowledge of type theory. In a second step, the - user can read also the tutorial on recursive types (document {\tt - RecTutorial.ps}). - -\item[Installation] A text file INSTALL that comes with the sources - explains how to install \Coq{}. - -\item[The \Coq{} standard library] -A commented version of sources of the \Coq{} standard library -(including only the specifications, the proofs are removed) -is given in the additional document {\tt Library.ps}. - -\end{description} - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-lib.tex b/doc/refman/RefMan-lib.tex deleted file mode 100644 index 89f5be8438..0000000000 --- a/doc/refman/RefMan-lib.tex +++ /dev/null @@ -1,1112 +0,0 @@ -\chapter[The {\Coq} library]{The {\Coq} library\index{Theories}\label{Theories}} -%HEVEA\cutname{stdlib.html} - -The \Coq\ library is structured into two parts: - -\begin{description} -\item[The initial library:] it contains - elementary logical notions and data-types. It constitutes the - basic state of the system directly available when running - \Coq; - -\item[The standard library:] general-purpose libraries containing - various developments of \Coq\ axiomatizations about sets, lists, - sorting, arithmetic, etc. This library comes with the system and its - modules are directly accessible through the \verb!Require! command - (see Section~\ref{Require}); -\end{description} - -In addition, user-provided libraries or developments are provided by -\Coq\ users' community. These libraries and developments are available -for download at \url{http://coq.inria.fr} (see -Section~\ref{Contributions}). - -The chapter briefly reviews the \Coq\ libraries whose contents can -also be browsed at \url{http://coq.inria.fr/stdlib}. - -\section[The basic library]{The basic library\label{Prelude}} - -This section lists the basic notions and results which are directly -available in the standard \Coq\ system\footnote{Most -of these constructions are defined in the -{\tt Prelude} module in directory {\tt theories/Init} at the {\Coq} -root directory; this includes the modules -{\tt Notations}, -{\tt Logic}, -{\tt Datatypes}, -{\tt Specif}, -{\tt Peano}, -{\tt Wf} and -{\tt Tactics}. -Module {\tt Logic\_Type} also makes it in the initial state}. - -\subsection[Notations]{Notations\label{Notations}} - -This module defines the parsing and pretty-printing of many symbols -(infixes, prefixes, etc.). However, it does not assign a meaning to -these notations. The purpose of this is to define and fix once for all -the precedence and associativity of very common notations. The main -notations fixed in the initial state are listed on -Figure~\ref{init-notations}. - -\begin{figure} -\begin{center} -\begin{tabular}{|cll|} -\hline -Notation & Precedence & Associativity \\ -\hline -\verb!_ -> _! & 99 & right \\ -\verb!_ <-> _! & 95 & no \\ -\verb!_ \/ _! & 85 & right \\ -\verb!_ /\ _! & 80 & right \\ -\verb!~ _! & 75 & right \\ -\verb!_ = _! & 70 & no \\ -\verb!_ = _ = _! & 70 & no \\ -\verb!_ = _ :> _! & 70 & no \\ -\verb!_ <> _! & 70 & no \\ -\verb!_ <> _ :> _! & 70 & no \\ -\verb!_ < _! & 70 & no \\ -\verb!_ > _! & 70 & no \\ -\verb!_ <= _! & 70 & no \\ -\verb!_ >= _! & 70 & no \\ -\verb!_ < _ < _! & 70 & no \\ -\verb!_ < _ <= _! & 70 & no \\ -\verb!_ <= _ < _! & 70 & no \\ -\verb!_ <= _ <= _! & 70 & no \\ -\verb!_ + _! & 50 & left \\ -\verb!_ || _! & 50 & left \\ -\verb!_ - _! & 50 & left \\ -\verb!_ * _! & 40 & left \\ -\verb!_ && _! & 40 & left \\ -\verb!_ / _! & 40 & left \\ -\verb!- _! & 35 & right \\ -\verb!/ _! & 35 & right \\ -\verb!_ ^ _! & 30 & right \\ -\hline -\end{tabular} -\end{center} -\caption{Notations in the initial state} -\label{init-notations} -\end{figure} - -\subsection[Logic]{Logic\label{Logic}} - -\begin{figure} -\begin{centerframe} -\begin{tabular}{lclr} -{\form} & ::= & {\tt True} & ({\tt True})\\ - & $|$ & {\tt False} & ({\tt False})\\ - & $|$ & {\tt\char'176} {\form} & ({\tt not})\\ - & $|$ & {\form} {\tt /$\backslash$} {\form} & ({\tt and})\\ - & $|$ & {\form} {\tt $\backslash$/} {\form} & ({\tt or})\\ - & $|$ & {\form} {\tt ->} {\form} & (\em{primitive implication})\\ - & $|$ & {\form} {\tt <->} {\form} & ({\tt iff})\\ - & $|$ & {\tt forall} {\ident} {\tt :} {\type} {\tt ,} - {\form} & (\em{primitive for all})\\ - & $|$ & {\tt exists} {\ident} \zeroone{{\tt :} {\specif}} {\tt - ,} {\form} & ({\tt ex})\\ - & $|$ & {\tt exists2} {\ident} \zeroone{{\tt :} {\specif}} {\tt - ,} {\form} {\tt \&} {\form} & ({\tt ex2})\\ - & $|$ & {\term} {\tt =} {\term} & ({\tt eq})\\ - & $|$ & {\term} {\tt =} {\term} {\tt :>} {\specif} & ({\tt eq}) -\end{tabular} -\end{centerframe} -\caption{Syntax of formulas} -\label{formulas-syntax} -\end{figure} - -The basic library of {\Coq} comes with the definitions of standard -(intuitionistic) logical connectives (they are defined as inductive -constructions). They are equipped with an appealing syntax enriching the -(subclass {\form}) of the syntactic class {\term}. The syntax -extension is shown on Figure~\ref{formulas-syntax}. - -% The basic library of {\Coq} comes with the definitions of standard -% (intuitionistic) logical connectives (they are defined as inductive -% constructions). They are equipped with an appealing syntax enriching -% the (subclass {\form}) of the syntactic class {\term}. The syntax -% extension \footnote{This syntax is defined in module {\tt -% LogicSyntax}} is shown on Figure~\ref{formulas-syntax}. - -\Rem Implication is not defined but primitive (it is a non-dependent -product of a proposition over another proposition). There is also a -primitive universal quantification (it is a dependent product over a -proposition). The primitive universal quantification allows both -first-order and higher-order quantification. - -\subsubsection[Propositional Connectives]{Propositional Connectives\label{Connectives} -\index{Connectives}} - -First, we find propositional calculus connectives: -\ttindex{True} -\ttindex{I} -\ttindex{False} -\ttindex{not} -\ttindex{and} -\ttindex{conj} -\ttindex{proj1} -\ttindex{proj2} - -\begin{coq_eval} -Set Printing Depth 50. -\end{coq_eval} -\begin{coq_example*} -Inductive True : Prop := I. -Inductive False : Prop := . -Definition not (A: Prop) := A -> False. -Inductive and (A B:Prop) : Prop := conj (_:A) (_:B). -Section Projections. -Variables A B : Prop. -Theorem proj1 : A /\ B -> A. -Theorem proj2 : A /\ B -> B. -\end{coq_example*} -\begin{coq_eval} -Abort All. -\end{coq_eval} -\begin{coq_example*} -End Projections. -\end{coq_example*} -\ttindex{or} -\ttindex{or\_introl} -\ttindex{or\_intror} -\ttindex{iff} -\ttindex{IF\_then\_else} -\begin{coq_example*} -Inductive or (A B:Prop) : Prop := - | or_introl (_:A) - | or_intror (_:B). -Definition iff (P Q:Prop) := (P -> Q) /\ (Q -> P). -Definition IF_then_else (P Q R:Prop) := P /\ Q \/ ~ P /\ R. -\end{coq_example*} - -\subsubsection[Quantifiers]{Quantifiers\label{Quantifiers} -\index{Quantifiers}} - -Then we find first-order quantifiers: -\ttindex{all} -\ttindex{ex} -\ttindex{exists} -\ttindex{ex\_intro} -\ttindex{ex2} -\ttindex{exists2} -\ttindex{ex\_intro2} - -\begin{coq_example*} -Definition all (A:Set) (P:A -> Prop) := forall x:A, P x. -Inductive ex (A: Set) (P:A -> Prop) : Prop := - ex_intro (x:A) (_:P x). -Inductive ex2 (A:Set) (P Q:A -> Prop) : Prop := - ex_intro2 (x:A) (_:P x) (_:Q x). -\end{coq_example*} - -The following abbreviations are allowed: -\begin{center} - \begin{tabular}[h]{|l|l|} - \hline - \verb+exists x:A, P+ & \verb+ex A (fun x:A => P)+ \\ - \verb+exists x, P+ & \verb+ex _ (fun x => P)+ \\ - \verb+exists2 x:A, P & Q+ & \verb+ex2 A (fun x:A => P) (fun x:A => Q)+ \\ - \verb+exists2 x, P & Q+ & \verb+ex2 _ (fun x => P) (fun x => Q)+ \\ - \hline - \end{tabular} -\end{center} - -The type annotation ``\texttt{:A}'' can be omitted when \texttt{A} can be -synthesized by the system. - -\subsubsection[Equality]{Equality\label{Equality} -\index{Equality}} - -Then, we find equality, defined as an inductive relation. That is, -given a type \verb:A: and an \verb:x: of type \verb:A:, the -predicate \verb:(eq A x): is the smallest one which contains \verb:x:. -This definition, due to Christine Paulin-Mohring, is equivalent to -define \verb:eq: as the smallest reflexive relation, and it is also -equivalent to Leibniz' equality. - -\ttindex{eq} -\ttindex{eq\_refl} - -\begin{coq_example*} -Inductive eq (A:Type) (x:A) : A -> Prop := - eq_refl : eq A x x. -\end{coq_example*} - -\subsubsection[Lemmas]{Lemmas\label{PreludeLemmas}} - -Finally, a few easy lemmas are provided. - -\ttindex{absurd} - -\begin{coq_example*} -Theorem absurd : forall A C:Prop, A -> ~ A -> C. -\end{coq_example*} -\begin{coq_eval} -Abort. -\end{coq_eval} -\ttindex{eq\_sym} -\ttindex{eq\_trans} -\ttindex{f\_equal} -\ttindex{sym\_not\_eq} -\begin{coq_example*} -Section equality. -Variables A B : Type. -Variable f : A -> B. -Variables x y z : A. -Theorem eq_sym : x = y -> y = x. -Theorem eq_trans : x = y -> y = z -> x = z. -Theorem f_equal : x = y -> f x = f y. -Theorem not_eq_sym : x <> y -> y <> x. -\end{coq_example*} -\begin{coq_eval} -Abort. -Abort. -Abort. -Abort. -\end{coq_eval} -\ttindex{eq\_ind\_r} -\ttindex{eq\_rec\_r} -\ttindex{eq\_rect} -\ttindex{eq\_rect\_r} -%Definition eq_rect: (A:Set)(x:A)(P:A->Type)(P x)->(y:A)(x=y)->(P y). -\begin{coq_example*} -End equality. -Definition eq_ind_r : - forall (A:Type) (x:A) (P:A->Prop), P x -> forall y:A, y = x -> P y. -Definition eq_rec_r : - forall (A:Type) (x:A) (P:A->Set), P x -> forall y:A, y = x -> P y. -Definition eq_rect_r : - forall (A:Type) (x:A) (P:A->Type), P x -> forall y:A, y = x -> P y. -\end{coq_example*} -\begin{coq_eval} -Abort. -Abort. -Abort. -\end{coq_eval} -%Abort (for now predefined eq_rect) -\begin{coq_example*} -Hint Immediate eq_sym not_eq_sym : core. -\end{coq_example*} -\ttindex{f\_equal$i$} - -The theorem {\tt f\_equal} is extended to functions with two to five -arguments. The theorem are names {\tt f\_equal2}, {\tt f\_equal3}, -{\tt f\_equal4} and {\tt f\_equal5}. -For instance {\tt f\_equal3} is defined the following way. -\begin{coq_example*} -Theorem f_equal3 : - forall (A1 A2 A3 B:Type) (f:A1 -> A2 -> A3 -> B) - (x1 y1:A1) (x2 y2:A2) (x3 y3:A3), - x1 = y1 -> x2 = y2 -> x3 = y3 -> f x1 x2 x3 = f y1 y2 y3. -\end{coq_example*} -\begin{coq_eval} -Abort. -\end{coq_eval} - -\subsection[Datatypes]{Datatypes\label{Datatypes} -\index{Datatypes}} - -\begin{figure} -\begin{centerframe} -\begin{tabular}{rclr} -{\specif} & ::= & {\specif} {\tt *} {\specif} & ({\tt prod})\\ - & $|$ & {\specif} {\tt +} {\specif} & ({\tt sum})\\ - & $|$ & {\specif} {\tt + \{} {\specif} {\tt \}} & ({\tt sumor})\\ - & $|$ & {\tt \{} {\specif} {\tt \} + \{} {\specif} {\tt \}} & - ({\tt sumbool})\\ - & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt |} {\form} {\tt \}} - & ({\tt sig})\\ - & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt |} {\form} {\tt \&} - {\form} {\tt \}} & ({\tt sig2})\\ - & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt \&} {\specif} {\tt - \}} & ({\tt sigT})\\ - & $|$ & {\tt \{} {\ident} {\tt :} {\specif} {\tt \&} {\specif} {\tt - \&} {\specif} {\tt \}} & ({\tt sigT2})\\ - & & & \\ -{\term} & ::= & {\tt (} {\term} {\tt ,} {\term} {\tt )} & ({\tt pair}) -\end{tabular} -\end{centerframe} -\caption{Syntax of data-types and specifications} -\label{specif-syntax} -\end{figure} - - -In the basic library, we find the definition\footnote{They are in {\tt - Datatypes.v}} of the basic data-types of programming, again -defined as inductive constructions over the sort \verb:Set:. Some of -them come with a special syntax shown on Figure~\ref{specif-syntax}. - -\subsubsection[Programming]{Programming\label{Programming} -\index{Programming} -\label{libnats} -\ttindex{unit} -\ttindex{tt} -\ttindex{bool} -\ttindex{true} -\ttindex{false} -\ttindex{nat} -\ttindex{O} -\ttindex{S} -\ttindex{option} -\ttindex{Some} -\ttindex{None} -\ttindex{identity} -\ttindex{refl\_identity}} - -\begin{coq_example*} -Inductive unit : Set := tt. -Inductive bool : Set := true | false. -Inductive nat : Set := O | S (n:nat). -Inductive option (A:Set) : Set := Some (_:A) | None. -Inductive identity (A:Type) (a:A) : A -> Type := - refl_identity : identity A a a. -\end{coq_example*} - -Note that zero is the letter \verb:O:, and {\sl not} the numeral -\verb:0:. - -The predicate {\tt identity} is logically -equivalent to equality but it lives in sort {\tt - Type}. It is mainly maintained for compatibility. - -We then define the disjoint sum of \verb:A+B: of two sets \verb:A: and -\verb:B:, and their product \verb:A*B:. -\ttindex{sum} -\ttindex{A+B} -\ttindex{+} -\ttindex{inl} -\ttindex{inr} -\ttindex{prod} -\ttindex{A*B} -\ttindex{*} -\ttindex{pair} -\ttindex{fst} -\ttindex{snd} - -\begin{coq_example*} -Inductive sum (A B:Set) : Set := inl (_:A) | inr (_:B). -Inductive prod (A B:Set) : Set := pair (_:A) (_:B). -Section projections. -Variables A B : Set. -Definition fst (H: prod A B) := match H with - | pair _ _ x y => x - end. -Definition snd (H: prod A B) := match H with - | pair _ _ x y => y - end. -End projections. -\end{coq_example*} - -Some operations on {\tt bool} are also provided: {\tt andb} (with -infix notation {\tt \&\&}), {\tt orb} (with -infix notation {\tt ||}), {\tt xorb}, {\tt implb} and {\tt negb}. - -\subsection{Specification} - -The following notions\footnote{They are defined in module {\tt -Specif.v}} allow to build new data-types and specifications. -They are available with the syntax shown on -Figure~\ref{specif-syntax}. - -For instance, given \verb|A:Type| and \verb|P:A->Prop|, the construct -\verb+{x:A | P x}+ (in abstract syntax \verb+(sig A P)+) is a -\verb:Type:. We may build elements of this set as \verb:(exist x p): -whenever we have a witness \verb|x:A| with its justification -\verb|p:P x|. - -From such a \verb:(exist x p): we may in turn extract its witness -\verb|x:A| (using an elimination construct such as \verb:match:) but -{\sl not} its justification, which stays hidden, like in an abstract -data-type. In technical terms, one says that \verb:sig: is a ``weak -(dependent) sum''. A variant \verb:sig2: with two predicates is also -provided. - -\ttindex{\{x:A $\mid$ (P x)\}} -\ttindex{sig} -\ttindex{exist} -\ttindex{sig2} -\ttindex{exist2} - -\begin{coq_example*} -Inductive sig (A:Set) (P:A -> Prop) : Set := exist (x:A) (_:P x). -Inductive sig2 (A:Set) (P Q:A -> Prop) : Set := - exist2 (x:A) (_:P x) (_:Q x). -\end{coq_example*} - -A ``strong (dependent) sum'' \verb+{x:A & P x}+ may be also defined, -when the predicate \verb:P: is now defined as a -constructor of types in \verb:Type:. - -\ttindex{\{x:A \& (P x)\}} -\ttindex{\&} -\ttindex{sigT} -\ttindex{existT} -\ttindex{projT1} -\ttindex{projT2} -\ttindex{sigT2} -\ttindex{existT2} - -\begin{coq_example*} -Inductive sigT (A:Type) (P:A -> Type) : Type := existT (x:A) (_:P x). -Section Projections2. -Variable A : Type. -Variable P : A -> Type. -Definition projT1 (H:sigT A P) := let (x, h) := H in x. -Definition projT2 (H:sigT A P) := - match H return P (projT1 H) with - existT _ _ x h => h - end. -End Projections2. -Inductive sigT2 (A: Type) (P Q:A -> Type) : Type := - existT2 (x:A) (_:P x) (_:Q x). -\end{coq_example*} - -A related non-dependent construct is the constructive sum -\verb"{A}+{B}" of two propositions \verb:A: and \verb:B:. -\label{sumbool} -\ttindex{sumbool} -\ttindex{left} -\ttindex{right} -\ttindex{\{A\}+\{B\}} - -\begin{coq_example*} -Inductive sumbool (A B:Prop) : Set := left (_:A) | right (_:B). -\end{coq_example*} - -This \verb"sumbool" construct may be used as a kind of indexed boolean -data-type. An intermediate between \verb"sumbool" and \verb"sum" is -the mixed \verb"sumor" which combines \verb"A:Set" and \verb"B:Prop" -in the \verb"Set" \verb"A+{B}". -\ttindex{sumor} -\ttindex{inleft} -\ttindex{inright} -\ttindex{A+\{B\}} - -\begin{coq_example*} -Inductive sumor (A:Set) (B:Prop) : Set := -| inleft (_:A) -| inright (_:B). -\end{coq_example*} - -We may define variants of the axiom of choice, like in Martin-Löf's -Intuitionistic Type Theory. -\ttindex{Choice} -\ttindex{Choice2} -\ttindex{bool\_choice} - -\begin{coq_example*} -Lemma Choice : - forall (S S':Set) (R:S -> S' -> Prop), - (forall x:S, {y : S' | R x y}) -> - {f : S -> S' | forall z:S, R z (f z)}. -Lemma Choice2 : - forall (S S':Set) (R:S -> S' -> Set), - (forall x:S, {y : S' & R x y}) -> - {f : S -> S' & forall z:S, R z (f z)}. -Lemma bool_choice : - forall (S:Set) (R1 R2:S -> Prop), - (forall x:S, {R1 x} + {R2 x}) -> - {f : S -> bool | - forall x:S, f x = true /\ R1 x \/ f x = false /\ R2 x}. -\end{coq_example*} -\begin{coq_eval} -Abort. -Abort. -Abort. -\end{coq_eval} - -The next construct builds a sum between a data-type \verb|A:Type| and -an exceptional value encoding errors: - -\ttindex{Exc} -\ttindex{value} -\ttindex{error} - -\begin{coq_example*} -Definition Exc := option. -Definition value := Some. -Definition error := None. -\end{coq_example*} - - -This module ends with theorems, -relating the sorts \verb:Set: or \verb:Type: and -\verb:Prop: in a way which is consistent with the realizability -interpretation. -\ttindex{False\_rect} -\ttindex{False\_rec} -\ttindex{eq\_rect} -\ttindex{absurd\_set} -\ttindex{and\_rect} - -\begin{coq_example*} -Definition except := False_rec. -Theorem absurd_set : forall (A:Prop) (C:Set), A -> ~ A -> C. -Theorem and_rect2 : - forall (A B:Prop) (P:Type), (A -> B -> P) -> A /\ B -> P. -\end{coq_example*} -%\begin{coq_eval} -%Abort. -%Abort. -%\end{coq_eval} - -\subsection{Basic Arithmetics} - -The basic library includes a few elementary properties of natural -numbers, together with the definitions of predecessor, addition and -multiplication\footnote{This is in module {\tt Peano.v}}. It also -provides a scope {\tt nat\_scope} gathering standard notations for -common operations (+, *) and a decimal notation for numbers. That is he -can write \texttt{3} for \texttt{(S (S (S O)))}. This also works on -the left hand side of a \texttt{match} expression (see for example -section~\ref{refine-example}). This scope is opened by default. - -%Remove the redefinition of nat -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -The following example is not part of the standard library, but it -shows the usage of the notations: - -\begin{coq_example*} -Fixpoint even (n:nat) : bool := - match n with - | 0 => true - | 1 => false - | S (S n) => even n - end. -\end{coq_example*} - - -\ttindex{eq\_S} -\ttindex{pred} -\ttindex{pred\_Sn} -\ttindex{eq\_add\_S} -\ttindex{not\_eq\_S} -\ttindex{IsSucc} -\ttindex{O\_S} -\ttindex{n\_Sn} -\ttindex{plus} -\ttindex{plus\_n\_O} -\ttindex{plus\_n\_Sm} -\ttindex{mult} -\ttindex{mult\_n\_O} -\ttindex{mult\_n\_Sm} - -\begin{coq_example*} -Theorem eq_S : forall x y:nat, x = y -> S x = S y. -\end{coq_example*} -\begin{coq_eval} -Abort. -\end{coq_eval} -\begin{coq_example*} -Definition pred (n:nat) : nat := - match n with - | 0 => 0 - | S u => u - end. -Theorem pred_Sn : forall m:nat, m = pred (S m). -Theorem eq_add_S : forall n m:nat, S n = S m -> n = m. -Hint Immediate eq_add_S : core. -Theorem not_eq_S : forall n m:nat, n <> m -> S n <> S m. -\end{coq_example*} -\begin{coq_eval} -Abort All. -\end{coq_eval} -\begin{coq_example*} -Definition IsSucc (n:nat) : Prop := - match n with - | 0 => False - | S p => True - end. -Theorem O_S : forall n:nat, 0 <> S n. -Theorem n_Sn : forall n:nat, n <> S n. -\end{coq_example*} -\begin{coq_eval} -Abort All. -\end{coq_eval} -\begin{coq_example*} -Fixpoint plus (n m:nat) {struct n} : nat := - match n with - | 0 => m - | S p => S (p + m) - end -where "n + m" := (plus n m) : nat_scope. -Lemma plus_n_O : forall n:nat, n = n + 0. -Lemma plus_n_Sm : forall n m:nat, S (n + m) = n + S m. -\end{coq_example*} -\begin{coq_eval} -Abort All. -\end{coq_eval} -\begin{coq_example*} -Fixpoint mult (n m:nat) {struct n} : nat := - match n with - | 0 => 0 - | S p => m + p * m - end -where "n * m" := (mult n m) : nat_scope. -Lemma mult_n_O : forall n:nat, 0 = n * 0. -Lemma mult_n_Sm : forall n m:nat, n * m + n = n * (S m). -\end{coq_example*} -\begin{coq_eval} -Abort All. -\end{coq_eval} - -Finally, it gives the definition of the usual orderings \verb:le:, -\verb:lt:, \verb:ge:, and \verb:gt:. -\ttindex{le} -\ttindex{le\_n} -\ttindex{le\_S} -\ttindex{lt} -\ttindex{ge} -\ttindex{gt} - -\begin{coq_example*} -Inductive le (n:nat) : nat -> Prop := - | le_n : le n n - | le_S : forall m:nat, n <= m -> n <= (S m) -where "n <= m" := (le n m) : nat_scope. -Definition lt (n m:nat) := S n <= m. -Definition ge (n m:nat) := m <= n. -Definition gt (n m:nat) := m < n. -\end{coq_example*} - -Properties of these relations are not initially known, but may be -required by the user from modules \verb:Le: and \verb:Lt:. Finally, -\verb:Peano: gives some lemmas allowing pattern-matching, and a double -induction principle. - -\ttindex{nat\_case} -\ttindex{nat\_double\_ind} - -\begin{coq_example*} -Theorem nat_case : - forall (n:nat) (P:nat -> Prop), - P 0 -> (forall m:nat, P (S m)) -> P n. -\end{coq_example*} -\begin{coq_eval} -Abort All. -\end{coq_eval} -\begin{coq_example*} -Theorem nat_double_ind : - forall R:nat -> nat -> Prop, - (forall n:nat, R 0 n) -> - (forall n:nat, R (S n) 0) -> - (forall n m:nat, R n m -> R (S n) (S m)) -> forall n m:nat, R n m. -\end{coq_example*} -\begin{coq_eval} -Abort All. -\end{coq_eval} - -\subsection{Well-founded recursion} - -The basic library contains the basics of well-founded recursion and -well-founded induction\footnote{This is defined in module {\tt Wf.v}}. -\index{Well foundedness} -\index{Recursion} -\index{Well founded induction} -\ttindex{Acc} -\ttindex{Acc\_inv} -\ttindex{Acc\_rect} -\ttindex{well\_founded} - -\begin{coq_example*} -Section Well_founded. -Variable A : Type. -Variable R : A -> A -> Prop. -Inductive Acc (x:A) : Prop := - Acc_intro : (forall y:A, R y x -> Acc y) -> Acc x. -Lemma Acc_inv x : Acc x -> forall y:A, R y x -> Acc y. -\end{coq_example*} -\begin{coq_eval} -destruct 1; trivial. -Defined. -\end{coq_eval} -%% Acc_rect now primitively defined -%% Section AccRec. -%% Variable P : A -> Set. -%% Variable F : -%% forall x:A, -%% (forall y:A, R y x -> Acc y) -> (forall y:A, R y x -> P y) -> P x. -%% Fixpoint Acc_rec (x:A) (a:Acc x) {struct a} : P x := -%% F x (Acc_inv x a) -%% (fun (y:A) (h:R y x) => Acc_rec y (Acc_inv x a y h)). -%% End AccRec. -\begin{coq_example*} -Definition well_founded := forall a:A, Acc a. -Hypothesis Rwf : well_founded. -Theorem well_founded_induction : - forall P:A -> Set, - (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. -Theorem well_founded_ind : - forall P:A -> Prop, - (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. -\end{coq_example*} -\begin{coq_eval} -Abort All. -\end{coq_eval} -The automatically generated scheme {\tt Acc\_rect} -can be used to define functions by fixpoints using -well-founded relations to justify termination. Assuming -extensionality of the functional used for the recursive call, the -fixpoint equation can be proved. -\ttindex{Fix\_F} -\ttindex{fix\_eq} -\ttindex{Fix\_F\_inv} -\ttindex{Fix\_F\_eq} -\begin{coq_example*} -Section FixPoint. -Variable P : A -> Type. -Variable F : forall x:A, (forall y:A, R y x -> P y) -> P x. -Fixpoint Fix_F (x:A) (r:Acc x) {struct r} : P x := - F x (fun (y:A) (p:R y x) => Fix_F y (Acc_inv x r y p)). -Definition Fix (x:A) := Fix_F x (Rwf x). -Hypothesis F_ext : - forall (x:A) (f g:forall y:A, R y x -> P y), - (forall (y:A) (p:R y x), f y p = g y p) -> F x f = F x g. -Lemma Fix_F_eq : - forall (x:A) (r:Acc x), - F x (fun (y:A) (p:R y x) => Fix_F y (Acc_inv x r y p)) = Fix_F x r. -Lemma Fix_F_inv : forall (x:A) (r s:Acc x), Fix_F x r = Fix_F x s. -Lemma fix_eq : forall x:A, Fix x = F x (fun (y:A) (p:R y x) => Fix y). -\end{coq_example*} -\begin{coq_eval} -Abort All. -\end{coq_eval} -\begin{coq_example*} -End FixPoint. -End Well_founded. -\end{coq_example*} - -\subsection{Accessing the {\Type} level} - -The basic library includes the definitions\footnote{This is in module -{\tt Logic\_Type.v}} of the counterparts of some data-types and logical -quantifiers at the \verb:Type: level: negation, pair, and properties -of {\tt identity}. - -\ttindex{notT} -\ttindex{prodT} -\ttindex{pairT} -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example*} -Definition notT (A:Type) := A -> False. -Inductive prodT (A B:Type) : Type := pairT (_:A) (_:B). -\end{coq_example*} - -At the end, it defines data-types at the {\Type} level. - -\subsection{Tactics} - -A few tactics defined at the user level are provided in the initial -state\footnote{This is in module {\tt Tactics.v}}. They are listed at -\url{http://coq.inria.fr/stdlib} (paragraph {\tt Init}, link {\tt - Tactics}). - -\section{The standard library} - -\subsection{Survey} - -The rest of the standard library is structured into the following -subdirectories: - -\begin{tabular}{lp{12cm}} - {\bf Logic} & Classical logic and dependent equality \\ - {\bf Arith} & Basic Peano arithmetic \\ - {\bf PArith} & Basic positive integer arithmetic \\ - {\bf NArith} & Basic binary natural number arithmetic \\ - {\bf ZArith} & Basic relative integer arithmetic \\ - {\bf Numbers} & Various approaches to natural, integer and cyclic numbers (currently axiomatically and on top of 2$^{31}$ binary words) \\ - {\bf Bool} & Booleans (basic functions and results) \\ - {\bf Lists} & Monomorphic and polymorphic lists (basic functions and - results), Streams (infinite sequences defined with co-inductive - types) \\ - {\bf Sets} & Sets (classical, constructive, finite, infinite, power set, - etc.) \\ - {\bf FSets} & Specification and implementations of finite sets and finite - maps (by lists and by AVL trees)\\ - {\bf Reals} & Axiomatization of real numbers (classical, basic functions, - integer part, fractional part, limit, derivative, Cauchy - series, power series and results,...)\\ - {\bf Relations} & Relations (definitions and basic results) \\ - {\bf Sorting} & Sorted list (basic definitions and heapsort correctness) \\ - {\bf Strings} & 8-bits characters and strings\\ - {\bf Wellfounded} & Well-founded relations (basic results) \\ - -\end{tabular} -\medskip - -These directories belong to the initial load path of the system, and -the modules they provide are compiled at installation time. So they -are directly accessible with the command \verb!Require! (see -Chapter~\ref{Other-commands}). - -The different modules of the \Coq\ standard library are described in the -additional document \verb!Library.dvi!. They are also accessible on the WWW -through the \Coq\ homepage -\footnote{\url{http://coq.inria.fr}}. - -\subsection[Notations for integer arithmetics]{Notations for integer arithmetics\index{Arithmetical notations}} - -On Figure~\ref{zarith-syntax} is described the syntax of expressions -for integer arithmetics. It is provided by requiring and opening the -module {\tt ZArith} and opening scope {\tt Z\_scope}. - -\ttindex{+} -\ttindex{*} -\ttindex{-} -\ttindex{/} -\ttindex{<=} -\ttindex{>=} -\ttindex{<} -\ttindex{>} -\ttindex{?=} -\ttindex{mod} - -\begin{figure} -\begin{center} -\begin{tabular}{l|l|l|l} -Notation & Interpretation & Precedence & Associativity\\ -\hline -\verb!_ < _! & {\tt Z.lt} &&\\ -\verb!x <= y! & {\tt Z.le} &&\\ -\verb!_ > _! & {\tt Z.gt} &&\\ -\verb!x >= y! & {\tt Z.ge} &&\\ -\verb!x < y < z! & {\tt x < y \verb!/\! y < z} &&\\ -\verb!x < y <= z! & {\tt x < y \verb!/\! y <= z} &&\\ -\verb!x <= y < z! & {\tt x <= y \verb!/\! y < z} &&\\ -\verb!x <= y <= z! & {\tt x <= y \verb!/\! y <= z} &&\\ -\verb!_ ?= _! & {\tt Z.compare} & 70 & no\\ -\verb!_ + _! & {\tt Z.add} &&\\ -\verb!_ - _! & {\tt Z.sub} &&\\ -\verb!_ * _! & {\tt Z.mul} &&\\ -\verb!_ / _! & {\tt Z.div} &&\\ -\verb!_ mod _! & {\tt Z.modulo} & 40 & no \\ -\verb!- _! & {\tt Z.opp} &&\\ -\verb!_ ^ _! & {\tt Z.pow} &&\\ -\end{tabular} -\end{center} -\caption{Definition of the scope for integer arithmetics ({\tt Z\_scope})} -\label{zarith-syntax} -\end{figure} - -Figure~\ref{zarith-syntax} shows the notations provided by {\tt -Z\_scope}. It specifies how notations are interpreted and, when not -already reserved, the precedence and associativity. - -\begin{coq_example*} -Require Import ZArith. -\end{coq_example*} -\begin{coq_example} -Check (2 + 3)%Z. -Open Scope Z_scope. -Check 2 + 3. -\end{coq_example} - -\subsection[Peano's arithmetic (\texttt{nat})]{Peano's arithmetic (\texttt{nat})\index{Peano's arithmetic} -\ttindex{nat\_scope}} - -While in the initial state, many operations and predicates of Peano's -arithmetic are defined, further operations and results belong to other -modules. For instance, the decidability of the basic predicates are -defined here. This is provided by requiring the module {\tt Arith}. - -Figure~\ref{nat-syntax} describes notation available in scope {\tt -nat\_scope}. - -\begin{figure} -\begin{center} -\begin{tabular}{l|l} -Notation & Interpretation \\ -\hline -\verb!_ < _! & {\tt lt} \\ -\verb!x <= y! & {\tt le} \\ -\verb!_ > _! & {\tt gt} \\ -\verb!x >= y! & {\tt ge} \\ -\verb!x < y < z! & {\tt x < y \verb!/\! y < z} \\ -\verb!x < y <= z! & {\tt x < y \verb!/\! y <= z} \\ -\verb!x <= y < z! & {\tt x <= y \verb!/\! y < z} \\ -\verb!x <= y <= z! & {\tt x <= y \verb!/\! y <= z} \\ -\verb!_ + _! & {\tt plus} \\ -\verb!_ - _! & {\tt minus} \\ -\verb!_ * _! & {\tt mult} \\ -\end{tabular} -\end{center} -\caption{Definition of the scope for natural numbers ({\tt nat\_scope})} -\label{nat-syntax} -\end{figure} - -\subsection{Real numbers library} - -\subsubsection[Notations for real numbers]{Notations for real numbers\index{Notations for real numbers}} - -This is provided by requiring and opening the module {\tt Reals} and -opening scope {\tt R\_scope}. This set of notations is very similar to -the notation for integer arithmetics. The inverse function was added. -\begin{figure} -\begin{center} -\begin{tabular}{l|l} -Notation & Interpretation \\ -\hline -\verb!_ < _! & {\tt Rlt} \\ -\verb!x <= y! & {\tt Rle} \\ -\verb!_ > _! & {\tt Rgt} \\ -\verb!x >= y! & {\tt Rge} \\ -\verb!x < y < z! & {\tt x < y \verb!/\! y < z} \\ -\verb!x < y <= z! & {\tt x < y \verb!/\! y <= z} \\ -\verb!x <= y < z! & {\tt x <= y \verb!/\! y < z} \\ -\verb!x <= y <= z! & {\tt x <= y \verb!/\! y <= z} \\ -\verb!_ + _! & {\tt Rplus} \\ -\verb!_ - _! & {\tt Rminus} \\ -\verb!_ * _! & {\tt Rmult} \\ -\verb!_ / _! & {\tt Rdiv} \\ -\verb!- _! & {\tt Ropp} \\ -\verb!/ _! & {\tt Rinv} \\ -\verb!_ ^ _! & {\tt pow} \\ -\end{tabular} -\end{center} -\label{reals-syntax} -\caption{Definition of the scope for real arithmetics ({\tt R\_scope})} -\end{figure} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example*} -Require Import Reals. -\end{coq_example*} -\begin{coq_example} -Check (2 + 3)%R. -Open Scope R_scope. -Check 2 + 3. -\end{coq_example} - -\subsubsection{Some tactics} - -In addition to the \verb|ring|, \verb|field| and \verb|fourier| -tactics (see Chapter~\ref{Tactics}) there are: -\begin{itemize} -\item {\tt discrR} \tacindex{discrR} - - Proves that a real integer constant $c_1$ is different from another - real integer constant $c_2$. - -\begin{coq_example*} -Require Import DiscrR. -Goal 5 <> 0. -\end{coq_example*} - -\begin{coq_example} -discrR. -\end{coq_example} - -\begin{coq_eval} -Abort. -\end{coq_eval} - -\item {\tt split\_Rabs} allows unfolding the {\tt Rabs} constant and splits -corresponding conjunctions. -\tacindex{split\_Rabs} - -\begin{coq_example*} -Require Import SplitAbsolu. -Goal forall x:R, x <= Rabs x. -\end{coq_example*} - -\begin{coq_example} -intro; split_Rabs. -\end{coq_example} - -\begin{coq_eval} -Abort. -\end{coq_eval} - -\item {\tt split\_Rmult} splits a condition that a product is - non null into subgoals corresponding to the condition on each - operand of the product. -\tacindex{split\_Rmult} - -\begin{coq_example*} -Require Import SplitRmult. -Goal forall x y z:R, x * y * z <> 0. -\end{coq_example*} - -\begin{coq_example} -intros; split_Rmult. -\end{coq_example} - -\end{itemize} - -These tactics has been written with the tactic language Ltac -described in Chapter~\ref{TacticLanguage}. - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\subsection[List library]{List library\index{Notations for lists} -\ttindex{length} -\ttindex{head} -\ttindex{tail} -\ttindex{app} -\ttindex{rev} -\ttindex{nth} -\ttindex{map} -\ttindex{flat\_map} -\ttindex{fold\_left} -\ttindex{fold\_right}} - -Some elementary operations on polymorphic lists are defined here. They -can be accessed by requiring module {\tt List}. - -It defines the following notions: -\begin{center} -\begin{tabular}{l|l} -\hline -{\tt length} & length \\ -{\tt head} & first element (with default) \\ -{\tt tail} & all but first element \\ -{\tt app} & concatenation \\ -{\tt rev} & reverse \\ -{\tt nth} & accessing $n$-th element (with default) \\ -{\tt map} & applying a function \\ -{\tt flat\_map} & applying a function returning lists \\ -{\tt fold\_left} & iterator (from head to tail) \\ -{\tt fold\_right} & iterator (from tail to head) \\ -\hline -\end{tabular} -\end{center} - -Table show notations available when opening scope {\tt list\_scope}. - -\begin{figure} -\begin{center} -\begin{tabular}{l|l|l|l} -Notation & Interpretation & Precedence & Associativity\\ -\hline -\verb!_ ++ _! & {\tt app} & 60 & right \\ -\verb!_ :: _! & {\tt cons} & 60 & right \\ -\end{tabular} -\end{center} -\label{list-syntax} -\caption{Definition of the scope for lists ({\tt list\_scope})} -\end{figure} - - -\section[Users' contributions]{Users' contributions\index{Contributions} -\label{Contributions}} - -Numerous users' contributions have been collected and are available at -URL \url{http://coq.inria.fr/contribs/}. On this web page, you have a list -of all contributions with informations (author, institution, quick -description, etc.) and the possibility to download them one by one. -You will also find informations on how to submit a new -contribution. - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-mod.tex b/doc/refman/RefMan-mod.tex deleted file mode 100644 index b4e270e6c3..0000000000 --- a/doc/refman/RefMan-mod.tex +++ /dev/null @@ -1,428 +0,0 @@ -\section{Module system -\index{Modules} -\label{section:Modules}} - -The module system provides a way of packaging related elements -together, as well as a means of massive abstraction. - -\begin{figure}[t] -\begin{centerframe} -\begin{tabular}{rcl} -{\modtype} & ::= & {\qualid} \\ - & $|$ & {\modtype} \texttt{ with Definition }{\qualid} := {\term} \\ - & $|$ & {\modtype} \texttt{ with Module }{\qualid} := {\qualid} \\ - & $|$ & {\qualid} \nelist{\qualid}{}\\ - & $|$ & $!${\qualid} \nelist{\qualid}{}\\ - &&\\ - -{\onemodbinding} & ::= & {\tt ( [Import|Export] \nelist{\ident}{} : {\modtype} )}\\ - &&\\ - -{\modbindings} & ::= & \nelist{\onemodbinding}{}\\ - &&\\ - -{\modexpr} & ::= & \nelist{\qualid}{} \\ - & $|$ & $!$\nelist{\qualid}{} -\end{tabular} -\end{centerframe} -\caption{Syntax of modules} -\end{figure} - -In the syntax of module application, the $!$ prefix indicates that -any {\tt Inline} directive in the type of the functor arguments -will be ignored (see \ref{Inline} below). - -\subsection{\tt Module {\ident} -\comindex{Module}} - -This command is used to start an interactive module named {\ident}. - -\begin{Variants} - -\item{\tt Module {\ident} {\modbindings}} - - Starts an interactive functor with parameters given by {\modbindings}. - -\item{\tt Module {\ident} \verb.:. {\modtype}} - - Starts an interactive module specifying its module type. - -\item{\tt Module {\ident} {\modbindings} \verb.:. {\modtype}} - - Starts an interactive functor with parameters given by - {\modbindings}, and output module type {\modtype}. - -\item{\tt Module {\ident} \verb.<:. {\modtype$_1$} \verb.<:. $\ldots$ \verb.<:.{ \modtype$_n$}} - - Starts an interactive module satisfying each {\modtype$_i$}. - -\item{\tt Module {\ident} {\modbindings} \verb.<:. {\modtype$_1$} \verb.<:. $\ldots$ \verb.<:. {\modtype$_n$}} - - Starts an interactive functor with parameters given by - {\modbindings}. The output module type is verified against each - module type {\modtype$_i$}. - -\item\texttt{Module [Import|Export]} - - Behaves like \texttt{Module}, but automatically imports or exports - the module. - -\end{Variants} -\subsubsection{Reserved commands inside an interactive module: -\comindex{Include}} -\begin{enumerate} -\item {\tt Include {\module}} - - Includes the content of {\module} in the current interactive - module. Here {\module} can be a module expression or a module type - expression. If {\module} is a high-order module or module type - expression then the system tries to instantiate {\module} - by the current interactive module. - -\item {\tt Include {\module$_1$} \verb.<+. $\ldots$ \verb.<+. {\module$_n$}} - -is a shortcut for {\tt Include {\module$_1$}} $\ldots$ {\tt Include {\module$_n$}} -\end{enumerate} -\subsection{\tt End {\ident} -\comindex{End}} - -This command closes the interactive module {\ident}. If the module type -was given the content of the module is matched against it and an error -is signaled if the matching fails. If the module is basic (is not a -functor) its components (constants, inductive types, submodules etc) are -now available through the dot notation. - -\begin{ErrMsgs} -\item \errindex{No such label {\ident}} -\item \errindex{Signature components for label {\ident} do not match} -\item \errindex{This is not the last opened module} -\end{ErrMsgs} - - -\subsection{\tt Module {\ident} := {\modexpr} -\comindex{Module}} - -This command defines the module identifier {\ident} to be equal to -{\modexpr}. - -\begin{Variants} -\item{\tt Module {\ident} {\modbindings} := {\modexpr}} - - Defines a functor with parameters given by {\modbindings} and body {\modexpr}. - -% Particular cases of the next 2 items -%\item{\tt Module {\ident} \verb.:. {\modtype} := {\modexpr}} -% -% Defines a module with body {\modexpr} and interface {\modtype}. -%\item{\tt Module {\ident} \verb.<:. {\modtype} := {\modexpr}} -% -% Defines a module with body {\modexpr}, satisfying {\modtype}. - -\item{\tt Module {\ident} {\modbindings} \verb.:. {\modtype} := - {\modexpr}} - - Defines a functor with parameters given by {\modbindings} (possibly none), - and output module type {\modtype}, with body {\modexpr}. - -\item{\tt Module {\ident} {\modbindings} \verb.<:. {\modtype$_1$} \verb.<:. $\ldots$ \verb.<:. {\modtype$_n$}:= - {\modexpr}} - - Defines a functor with parameters given by {\modbindings} (possibly none) - with body {\modexpr}. The body is checked against each {\modtype$_i$}. - -\item{\tt Module {\ident} {\modbindings} := {\modexpr$_1$} \verb.<+. $\ldots$ \verb.<+. {\modexpr$_n$}} - - is equivalent to an interactive module where each {\modexpr$_i$} are included. - -\end{Variants} - -\subsection{\tt Module Type {\ident} -\comindex{Module Type}} - -This command is used to start an interactive module type {\ident}. - -\begin{Variants} - -\item{\tt Module Type {\ident} {\modbindings}} - - Starts an interactive functor type with parameters given by {\modbindings}. - -\end{Variants} -\subsubsection{Reserved commands inside an interactive module type: -\comindex{Include}\comindex{Inline}} -\label{Inline} -\begin{enumerate} -\item {\tt Include {\module}} - - Same as {\tt Include} inside a module. - -\item {\tt Include {\module$_1$} \verb.<+. $\ldots$ \verb.<+. {\module$_n$}} - -is a shortcut for {\tt Include {\module$_1$}} $\ldots$ {\tt Include {\module$_n$}} - -\item {\tt {\assumptionkeyword} Inline {\assums} } - - The instance of this assumption will be automatically expanded at functor - application, except when this functor application is prefixed by a $!$ annotation. -\end{enumerate} -\subsection{\tt End {\ident} -\comindex{End}} - -This command closes the interactive module type {\ident}. - -\begin{ErrMsgs} -\item \errindex{This is not the last opened module type} -\end{ErrMsgs} - -\subsection{\tt Module Type {\ident} := {\modtype}} - -Defines a module type {\ident} equal to {\modtype}. - -\begin{Variants} -\item {\tt Module Type {\ident} {\modbindings} := {\modtype}} - - Defines a functor type {\ident} specifying functors taking arguments - {\modbindings} and returning {\modtype}. - -\item{\tt Module Type {\ident} {\modbindings} := {\modtype$_1$} \verb.<+. $\ldots$ \verb.<+. {\modtype$_n$}} - - is equivalent to an interactive module type were each {\modtype$_i$} are included. - -\end{Variants} - -\subsection{\tt Declare Module {\ident} : {\modtype} -\comindex{Declare Module}} - -Declares a module {\ident} of type {\modtype}. - -\begin{Variants} - -\item{\tt Declare Module {\ident} {\modbindings} \verb.:. {\modtype}} - - Declares a functor with parameters {\modbindings} and output module - type {\modtype}. - - -\end{Variants} - - -\subsubsection{Example} - -Let us define a simple module. -\begin{coq_example} -Module M. - Definition T := nat. - Definition x := 0. - Definition y : bool. - exact true. - Defined. -End M. -\end{coq_example} -Inside a module one can define constants, prove theorems and do any -other things that can be done in the toplevel. Components of a closed -module can be accessed using the dot notation: -\begin{coq_example} -Print M.x. -\end{coq_example} -A simple module type: -\begin{coq_example} -Module Type SIG. - Parameter T : Set. - Parameter x : T. -End SIG. -\end{coq_example} - -Now we can create a new module from \texttt{M}, giving it a less -precise specification: the \texttt{y} component is dropped as well -as the body of \texttt{x}. - -\begin{coq_eval} -Set Printing Depth 50. -\end{coq_eval} -% (********** The following is not correct and should produce **********) -% (***************** Error: N.y not a defined object *******************) -\begin{coq_example} -Module N : SIG with Definition T := nat := M. -Print N.T. -Print N.x. -Fail Print N.y. -\end{coq_example} -\begin{coq_eval} -Reset N. -\end{coq_eval} - -\noindent -The definition of \texttt{N} using the module type expression -\texttt{SIG with Definition T:=nat} is equivalent to the following -one: - -\begin{coq_example*} -Module Type SIG'. - Definition T : Set := nat. - Parameter x : T. -End SIG'. -Module N : SIG' := M. -\end{coq_example*} -If we just want to be sure that the our implementation satisfies a -given module type without restricting the interface, we can use a -transparent constraint -\begin{coq_example} -Module P <: SIG := M. -Print P.y. -\end{coq_example} -Now let us create a functor, i.e. a parametric module -\begin{coq_example} -Module Two (X Y: SIG). -\end{coq_example} -\begin{coq_example*} - Definition T := (X.T * Y.T)%type. - Definition x := (X.x, Y.x). -\end{coq_example*} -\begin{coq_example} -End Two. -\end{coq_example} -and apply it to our modules and do some computations -\begin{coq_example} -Module Q := Two M N. -Eval compute in (fst Q.x + snd Q.x). -\end{coq_example} -In the end, let us define a module type with two sub-modules, sharing -some of the fields and give one of its possible implementations: -\begin{coq_example} -Module Type SIG2. - Declare Module M1 : SIG. - Module M2 <: SIG. - Definition T := M1.T. - Parameter x : T. - End M2. -End SIG2. -\end{coq_example} -\begin{coq_example*} -Module Mod <: SIG2. - Module M1. - Definition T := nat. - Definition x := 1. - End M1. - Module M2 := M. -\end{coq_example*} -\begin{coq_example} -End Mod. -\end{coq_example} -Notice that \texttt{M} is a correct body for the component \texttt{M2} -since its \texttt{T} component is equal \texttt{nat} and hence -\texttt{M1.T} as specified. -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\begin{Remarks} -\item Modules and module types can be nested components of each other. -\item One can have sections inside a module or a module type, but - not a module or a module type inside a section. -\item Commands like \texttt{Hint} or \texttt{Notation} can - also appear inside modules and module types. Note that in case of a - module definition like: - - \smallskip - \noindent - {\tt Module N : SIG := M.} - \smallskip - - or - - \smallskip - {\tt Module N : SIG.\\ - \ \ \dots\\ - End N.} - \smallskip - - hints and the like valid for \texttt{N} are not those defined in - \texttt{M} (or the module body) but the ones defined in - \texttt{SIG}. - -\end{Remarks} - -\subsection{\tt Import {\qualid} -\comindex{Import} -\label{Import}} - -If {\qualid} denotes a valid basic module (i.e. its module type is a -signature), makes its components available by their short names. - -Example: - -\begin{coq_example} -Module Mod. - Definition T:=nat. - Check T. -End Mod. -Check Mod.T. -Fail Check T. (* Incorrect! *) -Import Mod. -Check T. (* Now correct *) -\end{coq_example} -\begin{coq_eval} -Reset Mod. -\end{coq_eval} - -Some features defined in modules are activated only when a module is -imported. This is for instance the case of notations (see -Section~\ref{Notation}). - -Declarations made with the {\tt Local} flag are never imported by the -{\tt Import} command. Such declarations are only accessible through their -fully qualified name. - -Example: - -\begin{coq_example} -Module A. -Module B. -Local Definition T := nat. -End B. -End A. -Import A. -Fail Check B.T. -\end{coq_example} - -\begin{Variants} -\item{\tt Export {\qualid}}\comindex{Export} - - When the module containing the command {\tt Export {\qualid}} is - imported, {\qualid} is imported as well. -\end{Variants} - -\begin{ErrMsgs} - \item \errindexbis{{\qualid} is not a module}{is not a module} -% this error is impossible in the import command -% \item \errindex{Cannot mask the absolute name {\qualid} !} -\end{ErrMsgs} - -\begin{Warnings} - \item Trying to mask the absolute name {\qualid} ! -\end{Warnings} - -\subsection{\tt Print Module {\ident} -\comindex{Print Module} \optindex{Short Module Printing}} - -Prints the module type and (optionally) the body of the module {\ident}. - -For this command and {\tt Print Module Type}, the option {\tt Short - Module Printing} (off by default) disables the printing of the types of fields, -leaving only their names. - -\subsection{\tt Print Module Type {\ident} -\comindex{Print Module Type}} - -Prints the module type corresponding to {\ident}. - -\subsection{\tt Locate Module {\qualid} -\comindex{Locate Module}} - -Prints the full name of the module {\qualid}. - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-modr.tex b/doc/refman/RefMan-modr.tex deleted file mode 100644 index 7c672cf422..0000000000 --- a/doc/refman/RefMan-modr.tex +++ /dev/null @@ -1,564 +0,0 @@ -\chapter[The Module System]{The Module System\label{chapter:Modules}} -%HEVEA\cutname{modules.html} - -The module system extends the Calculus of Inductive Constructions -providing a convenient way to structure large developments as well as -a means of massive abstraction. -%It is described in details in Judicael's thesis and Jacek's thesis - -\section{Modules and module types} - -\paragraph{Access path.} It is denoted by $p$, it can be either a module -variable $X$ or, if $p'$ is an access path and $id$ an identifier, then -$p'.id$ is an access path. - -\paragraph{Structure element.} It is denoted by \elem\ and is either a -definition of a constant, an assumption, a definition of an inductive, - a definition of a module, an alias of module or a module type abbreviation. - -\paragraph{Structure expression.} It is denoted by $S$ and can be: -\begin{itemize} -\item an access path $p$ -\item a plain structure $\struct{\nelist{\elem}{;}}$ -\item a functor $\functor{X}{S}{S'}$, where $X$ is a module variable, - $S$ and $S'$ are structure expression -\item an application $S\,p$, where $S$ is a structure expression and $p$ -an access path -\item a refined structure $\with{S}{p}{p'}$ or $\with{S}{p}{t:T}$ where $S$ -is a structure expression, $p$ and $p'$ are access paths, $t$ is a term -and $T$ is the type of $t$. -\end{itemize} - -\paragraph{Module definition,} is written $\Mod{X}{S}{S'}$ and - consists of a module variable $X$, a module type -$S$ which can be any structure expression and optionally a module implementation $S'$ - which can be any structure expression except a refined structure. - -\paragraph{Module alias,} is written $\ModA{X}{p}$ and - consists of a module variable $X$ and a module path $p$. - -\paragraph{Module type abbreviation,} is written $\ModType{Y}{S}$, where -$Y$ is an identifier and $S$ is any structure expression . - - -\section{Typing Modules} - -In order to introduce the typing system we first slightly extend -the syntactic class of terms and environments given in -section~\ref{Terms}. The environments, apart from definitions of -constants and inductive types now also hold any other structure elements. -Terms, apart from variables, constants and complex terms, -include also access paths. - -We also need additional typing judgments: -\begin{itemize} -\item \WFT{E}{S}, denoting that a structure $S$ is well-formed, - -\item \WTM{E}{p}{S}, denoting that the module pointed by $p$ has type $S$ in -environment $E$. - -\item \WEV{E}{S}{\overline{S}}, denoting that a structure $S$ is evaluated to -a structure $\overline{S}$ in weak head normal form. - -\item \WS{E}{S_1}{S_2}, denoting that a structure $S_1$ is a subtype of a -structure $S_2$. - -\item \WS{E}{\elem_1}{\elem_2}, denoting that a structure element - $\elem_1$ is more precise that a structure element $\elem_2$. -\end{itemize} -The rules for forming structures are the following: -\begin{description} -\item[WF-STR] -\inference{% - \frac{ - \WF{E;E'}{} - }{%%%%%%%%%%%%%%%%%%%%% - \WFT{E}{\struct{E'}} - } -} -\item[WF-FUN] -\inference{% - \frac{ - \WFT{E;\ModS{X}{S}}{\overline{S'}} - }{%%%%%%%%%%%%%%%%%%%%%%%%%% - \WFT{E}{\functor{X}{S}{S'}} - } -} -\end{description} -Evaluation of structures to weak head normal form: -\begin{description} -\item[WEVAL-APP] -\inference{% - \frac{ - \begin{array}{c} - \WEV{E}{S}{\functor{X}{S_1}{S_2}}~~~~~\WEV{E}{S_1}{\overline{S_1}}\\ - \WTM{E}{p}{S_3}\qquad \WS{E}{S_3}{\overline{S_1}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WEV{E}{S\,p}{S_2\{p/X,t_1/p_1.c_1,\ldots,t_n/p_n.c_n\}} - } -} -\end{description} -In the last rule, $\{t_1/p_1.c_1,\ldots,t_n/p_n.c_n\}$ is the resulting - substitution from the inlining mechanism. We substitute in $S$ the - inlined fields $p_i.c_i$ form $\ModS{X}{S_1}$ by the corresponding delta-reduced term $t_i$ in $p$. -\begin{description} -\item[WEVAL-WITH-MOD] -\inference{% - \frac{ - \begin{array}{c} - \WEV{E}{S}{\structe{\ModS{X}{S_1}}}~~~~~\WEV{E;\elem_1;\ldots;\elem_i}{S_1}{\overline{S_1}}\\ - \WTM{E}{p}{S_2}\qquad \WS{E;\elem_1;\ldots;\elem_i}{S_2}{\overline{S_1}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \begin{array}{c} - \WEVT{E}{\with{S}{x}{p}}{\structes{\ModA{X}{p}}{p/X}} - \end{array} - } -} -\item[WEVAL-WITH-MOD-REC] -\inference{% - \frac{ - \begin{array}{c} - \WEV{E}{S}{\structe{\ModS{X_1}{S_1}}}\\ - \WEV{E;\elem_1;\ldots;\elem_i}{\with{S_1}{p}{p_1}}{\overline{S_2}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \begin{array}{c} - \WEVT{E}{\with{S}{X_1.p}{p_1}}{\structes{\ModS{X}{\overline{S_2}}}{p_1/X_1.p}} - \end{array} - } -} -\item[WEVAL-WITH-DEF] -\inference{% - \frac{ - \begin{array}{c} - \WEV{E}{S}{\structe{\Assum{}{c}{T_1}}}\\ - \WS{E;\elem_1;\ldots;\elem_i}{\Def{}{c}{t}{T}}{\Assum{}{c}{T_1}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \begin{array}{c} - \WEVT{E}{\with{S}{c}{t:T}}{\structe{\Def{}{c}{t}{T}}} - \end{array} - } -} -\item[WEVAL-WITH-DEF-REC] -\inference{% - \frac{ - \begin{array}{c} - \WEV{E}{S}{\structe{\ModS{X_1}{S_1}}}\\ - \WEV{E;\elem_1;\ldots;\elem_i}{\with{S_1}{p}{p_1}}{\overline{S_2}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \begin{array}{c} - \WEVT{E}{\with{S}{X_1.p}{t:T}}{\structe{\ModS{X}{\overline{S_2}}}} - \end{array} - } -} - -\item[WEVAL-PATH-MOD] -\inference{% - \frac{ - \begin{array}{c} - \WEV{E}{p}{\structe{ \Mod{X}{S}{S_1}}}\\ - \WEV{E;\elem_1;\ldots;\elem_i}{S}{\overline{S}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WEV{E}{p.X}{\overline{S}} - } -} -\inference{% - \frac{ - \begin{array}{c} - \WF{E}{}~~~~~~\Mod{X}{S}{S_1}\in E\\ - \WEV{E}{S}{\overline{S}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WEV{E}{X}{\overline{S}} - } -} -\item[WEVAL-PATH-ALIAS] -\inference{% - \frac{ - \begin{array}{c} - \WEV{E}{p}{\structe{\ModA{X}{p_1}}}\\ - \WEV{E;\elem_1;\ldots;\elem_i}{p_1}{\overline{S}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WEV{E}{p.X}{\overline{S}} - } -} -\inference{% - \frac{ - \begin{array}{c} - \WF{E}{}~~~~~~~\ModA{X}{p_1}\in E\\ - \WEV{E}{p_1}{\overline{S}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WEV{E}{X}{\overline{S}} - } -} -\item[WEVAL-PATH-TYPE] -\inference{% - \frac{ - \begin{array}{c} - \WEV{E}{p}{\structe{\ModType{Y}{S}}}\\ - \WEV{E;\elem_1;\ldots;\elem_i}{S}{\overline{S}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WEV{E}{p.Y}{\overline{S}} - } -} -\item[WEVAL-PATH-TYPE] -\inference{% - \frac{ - \begin{array}{c} - \WF{E}{}~~~~~~~\ModType{Y}{S}\in E\\ - \WEV{E}{S}{\overline{S}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WEV{E}{Y}{\overline{S}} - } -} -\end{description} - Rules for typing module: -\begin{description} -\item[MT-EVAL] -\inference{% - \frac{ - \WEV{E}{p}{\overline{S}} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WTM{E}{p}{\overline{S}} - } -} -\item[MT-STR] -\inference{% - \frac{ - \WTM{E}{p}{S} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WTM{E}{p}{S/p} - } -} -\end{description} -The last rule, called strengthening is used to make all module fields -manifestly equal to themselves. The notation $S/p$ has the following -meaning: -\begin{itemize} -\item if $S\lra\struct{\elem_1;\dots;\elem_n}$ then - $S/p=\struct{\elem_1/p;\dots;\elem_n/p}$ where $\elem/p$ is defined as - follows: - \begin{itemize} - \item $\Def{}{c}{t}{T}/p\footnote{Opaque definitions are processed as assumptions.} ~=~ \Def{}{c}{t}{T}$ - \item $\Assum{}{c}{U}/p ~=~ \Def{}{c}{p.c}{U}$ - \item $\ModS{X}{S}/p ~=~ \ModA{X}{p.X}$ - \item $\ModA{X}{p'}/p ~=~ \ModA{X}{p'}$ - \item $\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}/p ~=~ \Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$ - \item $\Indpstr{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'}{p} ~=~ \Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'}$ - \end{itemize} -\item if $S\lra\functor{X}{S'}{S''}$ then $S/p=S$ -\end{itemize} -The notation $\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$ denotes an -inductive definition that is definitionally equal to the inductive -definition in the module denoted by the path $p$. All rules which have -$\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}$ as premises are also valid for -$\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$. We give the formation rule -for $\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}$ below as well as -the equality rules on inductive types and constructors. \\ - -The module subtyping rules: -\begin{description} -\item[MSUB-STR] -\inference{% - \frac{ - \begin{array}{c} - \WS{E;\elem_1;\dots;\elem_n}{\elem_{\sigma(i)}}{\elem'_i} - \textrm{ \ for } i=1..m \\ - \sigma : \{1\dots m\} \ra \{1\dots n\} \textrm{ \ injective} - \end{array} - }{ - \WS{E}{\struct{\elem_1;\dots;\elem_n}}{\struct{\elem'_1;\dots;\elem'_m}} - } -} -\item[MSUB-FUN] -\inference{% T_1 -> T_2 <: T_1' -> T_2' - \frac{ - \WS{E}{\overline{S_1'}}{\overline{S_1}}~~~~~~~~~~\WS{E;\ModS{X}{S_1'}}{\overline{S_2}}{\overline{S_2'}} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WS{E}{\functor{X}{S_1}{S_2}}{\functor{X}{S_1'}{S_2'}} - } -} -% these are derived rules -% \item[MSUB-EQ] -% \inference{% -% \frac{ -% \WS{E}{T_1}{T_2}~~~~~~~~~~\WTERED{}{T_1}{=}{T_1'}~~~~~~~~~~\WTERED{}{T_2}{=}{T_2'} -% }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% \WS{E}{T_1'}{T_2'} -% } -% } -% \item[MSUB-REFL] -% \inference{% -% \frac{ -% \WFT{E}{T} -% }{ -% \WS{E}{T}{T} -% } -% } -\end{description} -Structure element subtyping rules: -\begin{description} -\item[ASSUM-ASSUM] -\inference{% - \frac{ - \WTELECONV{}{T_1}{T_2} - }{ - \WSE{\Assum{}{c}{T_1}}{\Assum{}{c}{T_2}} - } -} -\item[DEF-ASSUM] -\inference{% - \frac{ - \WTELECONV{}{T_1}{T_2} - }{ - \WSE{\Def{}{c}{t}{T_1}}{\Assum{}{c}{T_2}} - } -} -\item[ASSUM-DEF] -\inference{% - \frac{ - \WTELECONV{}{T_1}{T_2}~~~~~~~~\WTECONV{}{c}{t_2} - }{ - \WSE{\Assum{}{c}{T_1}}{\Def{}{c}{t_2}{T_2}} - } -} -\item[DEF-DEF] -\inference{% - \frac{ - \WTELECONV{}{T_1}{T_2}~~~~~~~~\WTECONV{}{t_1}{t_2} - }{ - \WSE{\Def{}{c}{t_1}{T_1}}{\Def{}{c}{t_2}{T_2}} - } -} -\item[IND-IND] -\inference{% - \frac{ - \WTECONV{}{\Gamma_P}{\Gamma_P'}% - ~~~~~~~~\WTECONV{\Gamma_P}{\Gamma_C}{\Gamma_C'}% - ~~~~~~~~\WTECONV{\Gamma_P;\Gamma_C}{\Gamma_I}{\Gamma_I'}% - }{ - \WSE{\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}}% - {\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}} - } -} -\item[INDP-IND] -\inference{% - \frac{ - \WTECONV{}{\Gamma_P}{\Gamma_P'}% - ~~~~~~~~\WTECONV{\Gamma_P}{\Gamma_C}{\Gamma_C'}% - ~~~~~~~~\WTECONV{\Gamma_P;\Gamma_C}{\Gamma_I}{\Gamma_I'}% - }{ - \WSE{\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}% - {\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}} - } -} -\item[INDP-INDP] -\inference{% - \frac{ - \WTECONV{}{\Gamma_P}{\Gamma_P'}% - ~~~~~~\WTECONV{\Gamma_P}{\Gamma_C}{\Gamma_C'}% - ~~~~~~\WTECONV{\Gamma_P;\Gamma_C}{\Gamma_I}{\Gamma_I'}% - ~~~~~~\WTECONV{}{p}{p'} - }{ - \WSE{\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}% - {\Indp{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}{p'}} - } -} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\item[MOD-MOD] -\inference{% - \frac{ - \WSE{S_1}{S_2} - }{ - \WSE{\ModS{X}{S_1}}{\ModS{X}{S_2}} - } -} -\item[ALIAS-MOD] -\inference{% - \frac{ - \WTM{E}{p}{S_1}~~~~~~~~\WSE{S_1}{S_2} - }{ - \WSE{\ModA{X}{p}}{\ModS{X}{S_2}} - } -} -\item[MOD-ALIAS] -\inference{% - \frac{ - \WTM{E}{p}{S_2}~~~~~~~~ - \WSE{S_1}{S_2}~~~~~~~~\WTECONV{}{X}{p} - }{ - \WSE{\ModS{X}{S_1}}{\ModA{X}{p}} - } -} -\item[ALIAS-ALIAS] -\inference{% - \frac{ - \WTECONV{}{p_1}{p_2} - }{ - \WSE{\ModA{X}{p_1}}{\ModA{X}{p_2}} - } -} -\item[MODTYPE-MODTYPE] -\inference{% - \frac{ - \WSE{S_1}{S_2}~~~~~~~~\WSE{S_2}{S_1} - }{ - \WSE{\ModType{Y}{S_1}}{\ModType{Y}{S_2}} - } -} -\end{description} -New environment formation rules -\begin{description} -\item[WF-MOD] -\inference{% - \frac{ - \WF{E}{}~~~~~~~~\WFT{E}{S} - }{ - \WF{E;\ModS{X}{S}}{} - } -} -\item[WF-MOD] -\inference{% - \frac{ -\begin{array}{c} - \WS{E}{S_2}{S_1}\\ - \WF{E}{}~~~~~\WFT{E}{S_1}~~~~~\WFT{E}{S_2} -\end{array} - }{ - \WF{E;\Mod{X}{S_1}{S_2}}{} - } -} - -\item[WF-ALIAS] -\inference{% - \frac{ - \WF{E}{}~~~~~~~~~~~\WTE{}{p}{S} - }{ - \WF{E,\ModA{X}{p}}{} - } -} -\item[WF-MODTYPE] -\inference{% - \frac{ - \WF{E}{}~~~~~~~~~~~\WFT{E}{S} - }{ - \WF{E,\ModType{Y}{S}}{} - } -} -\item[WF-IND] -\inference{% - \frac{ - \begin{array}{c} - \WF{E;\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}}{}\\ - \WT{E}{}{p:\struct{\elem_1;\dots;\elem_n;\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'};\dots}}\\ - \WS{E}{\Ind{}{\Gamma_P'}{\Gamma_C'}{\Gamma_I'}}{\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I}} - \end{array} - }{%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - \WF{E;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p}}{} - } -} -\end{description} -Component access rules -\begin{description} -\item[ACC-TYPE] -\inference{% - \frac{ - \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Assum{}{c}{T};\dots}} - }{ - \WTEG{p.c}{T} - } -} -\\ -\inference{% - \frac{ - \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Def{}{c}{t}{T};\dots}} - }{ - \WTEG{p.c}{T} - } -} -\item[ACC-DELTA] -Notice that the following rule extends the delta rule defined in -section~\ref{delta} -\inference{% - \frac{ - \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Def{}{c}{t}{U};\dots}} - }{ - \WTEGRED{p.c}{\triangleright_\delta}{t} - } -} -\\ -In the rules below we assume $\Gamma_P$ is $[p_1:P_1;\ldots;p_r:P_r]$, - $\Gamma_I$ is $[I_1:A_1;\ldots;I_k:A_k]$, and $\Gamma_C$ is - $[c_1:C_1;\ldots;c_n:C_n]$ -\item[ACC-IND] -\inference{% - \frac{ - \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I};\dots}} - }{ - \WTEG{p.I_j}{(p_1:P_1)\ldots(p_r:P_r)A_j} - } -} -\inference{% - \frac{ - \WTEG{p}{\struct{\elem_1;\dots;\elem_i;\Ind{}{\Gamma_P}{\Gamma_C}{\Gamma_I};\dots}} - }{ - \WTEG{p.c_m}{(p_1:P_1)\ldots(p_r:P_r){C_m}{I_j}{(I_j~p_1\ldots - p_r)}_{j=1\ldots k}} - } -} -\item[ACC-INDP] -\inference{% - \frac{ - \WT{E}{}{p}{\struct{\elem_1;\dots;\elem_i;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'};\dots}} - }{ - \WTRED{E}{}{p.I_i}{\triangleright_\delta}{p'.I_i} - } -} -\inference{% - \frac{ - \WT{E}{}{p}{\struct{\elem_1;\dots;\elem_i;\Indp{}{\Gamma_P}{\Gamma_C}{\Gamma_I}{p'};\dots}} - }{ - \WTRED{E}{}{p.c_i}{\triangleright_\delta}{p'.c_i} - } -} - -\end{description} - -% %%% replaced by \triangle_\delta -% Module path equality is a transitive and reflexive closure of the -% relation generated by ACC-MODEQ and ENV-MODEQ. -% \begin{itemize} -% \item []MP-EQ-REFL -% \inference{% -% \frac{ -% \WTEG{p}{T} -% }{ -% \WTEG{p}{p} -% } -% } -% \item []MP-EQ-TRANS -% \inference{% -% \frac{ -% \WTEGRED{p}{=}{p'}~~~~~~\WTEGRED{p'}{=}{p''} -% }{ -% \WTEGRED{p'}{=}{p''} -% } -% } - -% \end{itemize} - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: - diff --git a/doc/refman/RefMan-pre.tex b/doc/refman/RefMan-pre.tex deleted file mode 100644 index 05775bfbe5..0000000000 --- a/doc/refman/RefMan-pre.tex +++ /dev/null @@ -1,1351 +0,0 @@ -%BEGIN LATEX -\setheaders{Credits} -%END LATEX -\chapter*{Credits} -%HEVEA\cutname{credits.html} -%\addcontentsline{toc}{section}{Credits} - -\Coq{}~ is a proof assistant for higher-order logic, allowing the -development of computer programs consistent with their formal -specification. It is the result of about ten years of research of the -Coq project. We shall briefly survey here three main aspects: the -\emph{logical language} in which we write our axiomatizations and -specifications, the \emph{proof assistant} which allows the development -of verified mathematical proofs, and the \emph{program extractor} which -synthesizes computer programs obeying their formal specifications, -written as logical assertions in the language. - -The logical language used by {\Coq} is a variety of type theory, -called the \emph{Calculus of Inductive Constructions}. Without going -back to Leibniz and Boole, we can date the creation of what is now -called mathematical logic to the work of Frege and Peano at the turn -of the century. The discovery of antinomies in the free use of -predicates or comprehension principles prompted Russell to restrict -predicate calculus with a stratification of \emph{types}. This effort -culminated with \emph{Principia Mathematica}, the first systematic -attempt at a formal foundation of mathematics. A simplification of -this system along the lines of simply typed $\lambda$-calculus -occurred with Church's \emph{Simple Theory of Types}. The -$\lambda$-calculus notation, originally used for expressing -functionality, could also be used as an encoding of natural deduction -proofs. This Curry-Howard isomorphism was used by N. de Bruijn in the -\emph{Automath} project, the first full-scale attempt to develop and -mechanically verify mathematical proofs. This effort culminated with -Jutting's verification of Landau's \emph{Grundlagen} in the 1970's. -Exploiting this Curry-Howard isomorphism, notable achievements in -proof theory saw the emergence of two type-theoretic frameworks; the -first one, Martin-L\"of's \emph{Intuitionistic Theory of Types}, -attempts a new foundation of mathematics on constructive principles. -The second one, Girard's polymorphic $\lambda$-calculus $F_\omega$, is -a very strong functional system in which we may represent higher-order -logic proof structures. Combining both systems in a higher-order -extension of the Automath languages, T. Coquand presented in 1985 the -first version of the \emph{Calculus of Constructions}, CoC. This strong -logical system allowed powerful axiomatizations, but direct inductive -definitions were not possible, and inductive notions had to be defined -indirectly through functional encodings, which introduced -inefficiencies and awkwardness. The formalism was extended in 1989 by -T. Coquand and C. Paulin with primitive inductive definitions, leading -to the current \emph{Calculus of Inductive Constructions}. This -extended formalism is not rigorously defined here. Rather, numerous -concrete examples are discussed. We refer the interested reader to -relevant research papers for more information about the formalism, its -meta-theoretic properties, and semantics. However, it should not be -necessary to understand this theoretical material in order to write -specifications. It is possible to understand the Calculus of Inductive -Constructions at a higher level, as a mixture of predicate calculus, -inductive predicate definitions presented as typed PROLOG, and -recursive function definitions close to the language ML. - -Automated theorem-proving was pioneered in the 1960's by Davis and -Putnam in propositional calculus. A complete mechanization (in the -sense of a semi-decision procedure) of classical first-order logic was -proposed in 1965 by J.A. Robinson, with a single uniform inference -rule called \emph{resolution}. Resolution relies on solving equations -in free algebras (i.e. term structures), using the \emph{unification - algorithm}. Many refinements of resolution were studied in the -1970's, but few convincing implementations were realized, except of -course that PROLOG is in some sense issued from this effort. A less -ambitious approach to proof development is computer-aided -proof-checking. The most notable proof-checkers developed in the -1970's were LCF, designed by R. Milner and his colleagues at U. -Edinburgh, specialized in proving properties about denotational -semantics recursion equations, and the Boyer and Moore theorem-prover, -an automation of primitive recursion over inductive data types. While -the Boyer-Moore theorem-prover attempted to synthesize proofs by a -combination of automated methods, LCF constructed its proofs through -the programming of \emph{tactics}, written in a high-level functional -meta-language, ML. - -The salient feature which clearly distinguishes our proof assistant -from say LCF or Boyer and Moore's, is its possibility to extract -programs from the constructive contents of proofs. This computational -interpretation of proof objects, in the tradition of Bishop's -constructive mathematics, is based on a realizability interpretation, -in the sense of Kleene, due to C. Paulin. The user must just mark his -intention by separating in the logical statements the assertions -stating the existence of a computational object from the logical -assertions which specify its properties, but which may be considered -as just comments in the corresponding program. Given this information, -the system automatically extracts a functional term from a consistency -proof of its specifications. This functional term may be in turn -compiled into an actual computer program. This methodology of -extracting programs from proofs is a revolutionary paradigm for -software engineering. Program synthesis has long been a theme of -research in artificial intelligence, pioneered by R. Waldinger. The -Tablog system of Z. Manna and R. Waldinger allows the deductive -synthesis of functional programs from proofs in tableau form of their -specifications, written in a variety of first-order logic. Development -of a systematic \emph{programming logic}, based on extensions of -Martin-L\"of's type theory, was undertaken at Cornell U. by the Nuprl -team, headed by R. Constable. The first actual program extractor, PX, -was designed and implemented around 1985 by S. Hayashi from Kyoto -University. It allows the extraction of a LISP program from a proof -in a logical system inspired by the logical formalisms of S. Feferman. -Interest in this methodology is growing in the theoretical computer -science community. We can foresee the day when actual computer systems -used in applications will contain certified modules, automatically -generated from a consistency proof of their formal specifications. We -are however still far from being able to use this methodology in a -smooth interaction with the standard tools from software engineering, -i.e. compilers, linkers, run-time systems taking advantage of special -hardware, debuggers, and the like. We hope that {\Coq} can be of use -to researchers interested in experimenting with this new methodology. - -A first implementation of CoC was started in 1984 by G. Huet and T. -Coquand. Its implementation language was CAML, a functional -programming language from the ML family designed at INRIA in -Rocquencourt. The core of this system was a proof-checker for CoC seen -as a typed $\lambda$-calculus, called the \emph{Constructive Engine}. -This engine was operated through a high-level notation permitting the -declaration of axioms and parameters, the definition of mathematical -types and objects, and the explicit construction of proof objects -encoded as $\lambda$-terms. A section mechanism, designed and -implemented by G. Dowek, allowed hierarchical developments of -mathematical theories. This high-level language was called the -\emph{Mathematical Vernacular}. Furthermore, an interactive -\emph{Theorem Prover} permitted the incremental construction of proof -trees in a top-down manner, subgoaling recursively and backtracking -from dead-alleys. The theorem prover executed tactics written in CAML, -in the LCF fashion. A basic set of tactics was predefined, which the -user could extend by his own specific tactics. This system (Version -4.10) was released in 1989. Then, the system was extended to deal -with the new calculus with inductive types by C. Paulin, with -corresponding new tactics for proofs by induction. A new standard set -of tactics was streamlined, and the vernacular extended for tactics -execution. A package to compile programs extracted from proofs to -actual computer programs in CAML or some other functional language was -designed and implemented by B. Werner. A new user-interface, relying -on a CAML-X interface by D. de Rauglaudre, was designed and -implemented by A. Felty. It allowed operation of the theorem-prover -through the manipulation of windows, menus, mouse-sensitive buttons, -and other widgets. This system (Version 5.6) was released in 1991. - -\Coq{} was ported to the new implementation Caml-light of X. Leroy and -D. Doligez by D. de Rauglaudre (Version 5.7) in 1992. A new version -of \Coq{} was then coordinated by C. Murthy, with new tools designed -by C. Parent to prove properties of ML programs (this methodology is -dual to program extraction) and a new user-interaction loop. This -system (Version 5.8) was released in May 1993. A Centaur interface -\textsc{CTCoq} was then developed by Y. Bertot from the Croap project -from INRIA-Sophia-Antipolis. - -In parallel, G. Dowek and H. Herbelin developed a new proof engine, -allowing the general manipulation of existential variables -consistently with dependent types in an experimental version of \Coq{} -(V5.9). - -The version V5.10 of \Coq{} is based on a generic system for -manipulating terms with binding operators due to Chet Murthy. A new -proof engine allows the parallel development of partial proofs for -independent subgoals. The structure of these proof trees is a mixed -representation of derivation trees for the Calculus of Inductive -Constructions with abstract syntax trees for the tactics scripts, -allowing the navigation in a proof at various levels of details. The -proof engine allows generic environment items managed in an -object-oriented way. This new architecture, due to C. Murthy, -supports several new facilities which make the system easier to extend -and to scale up: - -\begin{itemize} -\item User-programmable tactics are allowed -\item It is possible to separately verify development modules, and to - load their compiled images without verifying them again - a quick - relocation process allows their fast loading -\item A generic parsing scheme allows user-definable notations, with a - symmetric table-driven pretty-printer -\item Syntactic definitions allow convenient abbreviations -\item A limited facility of meta-variables allows the automatic - synthesis of certain type expressions, allowing generic notations - for e.g. equality, pairing, and existential quantification. -\end{itemize} - -In the Fall of 1994, C. Paulin-Mohring replaced the structure of -inductively defined types and families by a new structure, allowing -the mutually recursive definitions. P. Manoury implemented a -translation of recursive definitions into the primitive recursive -style imposed by the internal recursion operators, in the style of the -ProPre system. C. Mu{\~n}oz implemented a decision procedure for -intuitionistic propositional logic, based on results of R. Dyckhoff. -J.C. Filli{\^a}tre implemented a decision procedure for first-order -logic without contraction, based on results of J. Ketonen and R. -Weyhrauch. Finally C. Murthy implemented a library of inversion -tactics, relieving the user from tedious definitions of ``inversion -predicates''. - -\begin{flushright} -Rocquencourt, Feb. 1st 1995\\ -Gérard Huet -\end{flushright} - -\section*{Credits: addendum for version 6.1} -%\addcontentsline{toc}{section}{Credits: addendum for version V6.1} - -The present version 6.1 of \Coq{} is based on the V5.10 architecture. It -was ported to the new language {\ocaml} by Bruno Barras. The -underlying framework has slightly changed and allows more conversions -between sorts. - -The new version provides powerful tools for easier developments. - -Cristina Cornes designed an extension of the \Coq{} syntax to allow -definition of terms using a powerful pattern-matching analysis in the -style of ML programs. - -Amokrane Saïbi wrote a mechanism to simulate -inheritance between types families extending a proposal by Peter -Aczel. He also developed a mechanism to automatically compute which -arguments of a constant may be inferred by the system and consequently -do not need to be explicitly written. - -Yann Coscoy designed a command which explains a proof term using -natural language. Pierre Cr{\'e}gut built a new tactic which solves -problems in quantifier-free Presburger Arithmetic. Both -functionalities have been integrated to the \Coq{} system by Hugo -Herbelin. - -Samuel Boutin designed a tactic for simplification of commutative -rings using a canonical set of rewriting rules and equality modulo -associativity and commutativity. - -Finally the organisation of the \Coq{} distribution has been supervised -by Jean-Christophe Filliâtre with the help of Judicaël Courant -and Bruno Barras. - -\begin{flushright} -Lyon, Nov. 18th 1996\\ -Christine Paulin -\end{flushright} - -\section*{Credits: addendum for version 6.2} -%\addcontentsline{toc}{section}{Credits: addendum for version V6.2} - -In version 6.2 of \Coq{}, the parsing is done using camlp4, a -preprocessor and pretty-printer for CAML designed by Daniel de -Rauglaudre at INRIA. Daniel de Rauglaudre made the first adaptation -of \Coq{} for camlp4, this work was continued by Bruno Barras who also -changed the structure of \Coq{} abstract syntax trees and the primitives -to manipulate them. The result of -these changes is a faster parsing procedure with greatly improved -syntax-error messages. The user-interface to introduce grammar or -pretty-printing rules has also changed. - -Eduardo Giménez redesigned the internal -tactic libraries, giving uniform names -to Caml functions corresponding to \Coq{} tactic names. - -Bruno Barras wrote new more efficient reductions functions. - -Hugo Herbelin introduced more uniform notations in the \Coq{} -specification language: the definitions by fixpoints and -pattern-matching have a more readable syntax. Patrick Loiseleur -introduced user-friendly notations for arithmetic expressions. - -New tactics were introduced: Eduardo Giménez improved a mechanism to -introduce macros for tactics, and designed special tactics for -(co)inductive definitions; Patrick Loiseleur designed a tactic to -simplify polynomial expressions in an arbitrary commutative ring which -generalizes the previous tactic implemented by Samuel Boutin. -Jean-Christophe Filli\^atre introduced a tactic for refining a goal, -using a proof term with holes as a proof scheme. - -David Delahaye designed the \textsf{SearchIsos} tool to search an -object in the library given its type (up to isomorphism). - -Henri Laulhère produced the \Coq{} distribution for the Windows environment. - -Finally, Hugo Herbelin was the main coordinator of the \Coq{} -documentation with principal contributions by Bruno Barras, David Delahaye, -Jean-Christophe Filli\^atre, Eduardo -Giménez, Hugo Herbelin and Patrick Loiseleur. - -\begin{flushright} -Orsay, May 4th 1998\\ -Christine Paulin -\end{flushright} - -\section*{Credits: addendum for version 6.3} -The main changes in version V6.3 was the introduction of a few new tactics -and the extension of the guard condition for fixpoint definitions. - - -B. Barras extended the unification algorithm to complete partial terms -and solved various tricky bugs related to universes.\\ -D. Delahaye developed the \texttt{AutoRewrite} tactic. He also designed the new -behavior of \texttt{Intro} and provided the tacticals \texttt{First} and -\texttt{Solve}.\\ -J.-C. Filli\^atre developed the \texttt{Correctness} tactic.\\ -E. Gim\'enez extended the guard condition in fixpoints.\\ -H. Herbelin designed the new syntax for definitions and extended the -\texttt{Induction} tactic.\\ -P. Loiseleur developed the \texttt{Quote} tactic and -the new design of the \texttt{Auto} -tactic, he also introduced the index of -errors in the documentation.\\ -C. Paulin wrote the \texttt{Focus} command and introduced -the reduction functions in definitions, this last feature -was proposed by J.-F. Monin from CNET Lannion. - -\begin{flushright} -Orsay, Dec. 1999\\ -Christine Paulin -\end{flushright} - -%\newpage - -\section*{Credits: versions 7} - -The version V7 is a new implementation started in September 1999 by -Jean-Christophe Filliâtre. This is a major revision with respect to -the internal architecture of the system. The \Coq{} version 7.0 was -distributed in March 2001, version 7.1 in September 2001, version -7.2 in January 2002, version 7.3 in May 2002 and version 7.4 in -February 2003. - -Jean-Christophe Filliâtre designed the architecture of the new system, he -introduced a new representation for environments and wrote a new kernel -for type-checking terms. His approach was to use functional -data-structures in order to get more sharing, to prepare the addition -of modules and also to get closer to a certified kernel. - -Hugo Herbelin introduced a new structure of terms with local -definitions. He introduced ``qualified'' names, wrote a new -pattern-matching compilation algorithm and designed a more compact -algorithm for checking the logical consistency of universes. He -contributed to the simplification of {\Coq} internal structures and the -optimisation of the system. He added basic tactics for forward -reasoning and coercions in patterns. - -David Delahaye introduced a new language for tactics. General tactics -using pattern-matching on goals and context can directly be written -from the {\Coq} toplevel. He also provided primitives for the design -of user-defined tactics in \textsc{Caml}. - -Micaela Mayero contributed the library on real numbers. -Olivier Desmettre extended this library with axiomatic -trigonometric functions, square, square roots, finite sums, Chasles -property and basic plane geometry. - -Jean-Christophe Filliâtre and Pierre Letouzey redesigned a new -extraction procedure from \Coq{} terms to \textsc{Caml} or -\textsc{Haskell} programs. This new -extraction procedure, unlike the one implemented in previous version -of \Coq{} is able to handle all terms in the Calculus of Inductive -Constructions, even involving universes and strong elimination. P. -Letouzey adapted user contributions to extract ML programs when it was -sensible. -Jean-Christophe Filliâtre wrote \verb=coqdoc=, a documentation -tool for {\Coq} libraries usable from version 7.2. - -Bruno Barras improved the reduction algorithms efficiency and -the confidence level in the correctness of {\Coq} critical type-checking -algorithm. - -Yves Bertot designed the \texttt{SearchPattern} and -\texttt{SearchRewrite} tools and the support for the \textsc{pcoq} interface -(\url{http://www-sop.inria.fr/lemme/pcoq/}). - -Micaela Mayero and David Delahaye introduced {\tt Field}, a decision tactic for commutative fields. - -Christine Paulin changed the elimination rules for empty and singleton -propositional inductive types. - -Loïc Pottier developed {\tt Fourier}, a tactic solving linear inequalities on real numbers. - -Pierre Crégut developed a new version based on reflexion of the {\tt Omega} -decision tactic. - -Claudio Sacerdoti Coen designed an XML output for the {\Coq} -modules to be used in the Hypertextual Electronic Library of -Mathematics (HELM cf \url{http://www.cs.unibo.it/helm}). - -A library for efficient representation of finite maps using binary trees -contributed by Jean Goubault was integrated in the basic theories. - -Pierre Courtieu developed a command and a tactic to reason on the -inductive structure of recursively defined functions. - -Jacek Chrz\k{a}szcz designed and implemented the module system of -{\Coq} whose foundations are in Judicaël Courant's PhD thesis. - -\bigskip - -The development was coordinated by C. Paulin. - -Many discussions within the Démons team and the LogiCal project -influenced significantly the design of {\Coq} especially with -%J. Chrz\k{a}szcz, P. Courtieu, -J. Courant, J. Duprat, J. Goubault, A. Miquel, -C. Marché, B. Monate and B. Werner. - -Intensive users suggested improvements of the system : -Y. Bertot, L. Pottier, L. Théry, P. Zimmerman from INRIA, -C. Alvarado, P. Crégut, J.-F. Monin from France Telecom R \& D. -\begin{flushright} -Orsay, May. 2002\\ -Hugo Herbelin \& Christine Paulin -\end{flushright} - -\section*{Credits: version 8.0} - -{\Coq} version 8 is a major revision of the {\Coq} proof assistant. -First, the underlying logic is slightly different. The so-called {\em -impredicativity} of the sort {\tt Set} has been dropped. The main -reason is that it is inconsistent with the principle of description -which is quite a useful principle for formalizing %classical -mathematics within classical logic. Moreover, even in an constructive -setting, the impredicativity of {\tt Set} does not add so much in -practice and is even subject of criticism from a large part of the -intuitionistic mathematician community. Nevertheless, the -impredicativity of {\tt Set} remains optional for users interested in -investigating mathematical developments which rely on it. - -Secondly, the concrete syntax of terms has been completely -revised. The main motivations were - -\begin{itemize} -\item a more uniform, purified style: all constructions are now lowercase, - with a functional programming perfume (e.g. abstraction is now - written {\tt fun}), and more directly accessible to the novice - (e.g. dependent product is now written {\tt forall} and allows - omission of types). Also, parentheses and are no longer mandatory - for function application. -\item extensibility: some standard notations (e.g. ``<'' and ``>'') were - incompatible with the previous syntax. Now all standard arithmetic - notations (=, +, *, /, <, <=, ... and more) are directly part of the - syntax. -\end{itemize} - -Together with the revision of the concrete syntax, a new mechanism of -{\em interpretation scopes} permits to reuse the same symbols -(typically +, -, *, /, <, <=) in various mathematical theories without -any ambiguities for {\Coq}, leading to a largely improved readability of -{\Coq} scripts. New commands to easily add new symbols are also -provided. - -Coming with the new syntax of terms, a slight reform of the tactic -language and of the language of commands has been carried out. The -purpose here is a better uniformity making the tactics and commands -easier to use and to remember. - -Thirdly, a restructuration and uniformisation of the standard library -of {\Coq} has been performed. There is now just one Leibniz' equality -usable for all the different kinds of {\Coq} objects. Also, the set of -real numbers now lies at the same level as the sets of natural and -integer numbers. Finally, the names of the standard properties of -numbers now follow a standard pattern and the symbolic -notations for the standard definitions as well. - -The fourth point is the release of \CoqIDE{}, a new graphical -gtk2-based interface fully integrated to {\Coq}. Close in style from -the Proof General Emacs interface, it is faster and its integration -with {\Coq} makes interactive developments more friendly. All -mathematical Unicode symbols are usable within \CoqIDE{}. - -Finally, the module system of {\Coq} completes the picture of {\Coq} -version 8.0. Though released with an experimental status in the previous -version 7.4, it should be considered as a salient feature of the new -version. - -Besides, {\Coq} comes with its load of novelties and improvements: new -or improved tactics (including a new tactic for solving first-order -statements), new management commands, extended libraries. - -\bigskip - -Bruno Barras and Hugo Herbelin have been the main contributors of the -reflexion and the implementation of the new syntax. The smart -automatic translator from old to new syntax released with {\Coq} is also -their work with contributions by Olivier Desmettre. - -Hugo Herbelin is the main designer and implementor of the notion of -interpretation scopes and of the commands for easily adding new notations. - -Hugo Herbelin is the main implementor of the restructuration of the -standard library. - -Pierre Corbineau is the main designer and implementor of the new -tactic for solving first-order statements in presence of inductive -types. He is also the maintainer of the non-domain specific automation -tactics. - -Benjamin Monate is the developer of the \CoqIDE{} graphical -interface with contributions by Jean-Christophe Filliâtre, Pierre -Letouzey, Claude Marché and Bruno Barras. - -Claude Marché coordinated the edition of the Reference Manual for - \Coq{} V8.0. - -Pierre Letouzey and Jacek Chrz\k{a}szcz respectively maintained the -extraction tool and module system of {\Coq}. - -Jean-Christophe Filliâtre, Pierre Letouzey, Hugo Herbelin and other -contributors from Sophia-Antipolis and Nijmegen participated to the -extension of the library. - -Julien Narboux built a NSIS-based automatic {\Coq} installation tool for -the Windows platform. - -Hugo Herbelin and Christine Paulin coordinated the development which -was under the responsability of Christine Paulin. - -\begin{flushright} -Palaiseau \& Orsay, Apr. 2004\\ -Hugo Herbelin \& Christine Paulin\\ -(updated Apr. 2006) -\end{flushright} - -\section*{Credits: version 8.1} - -{\Coq} version 8.1 adds various new functionalities. - -Benjamin Grégoire implemented an alternative algorithm to check the -convertibility of terms in the {\Coq} type-checker. This alternative -algorithm works by compilation to an efficient bytecode that is -interpreted in an abstract machine similar to Xavier Leroy's ZINC -machine. Convertibility is performed by comparing the normal -forms. This alternative algorithm is specifically interesting for -proofs by reflection. More generally, it is convenient in case of -intensive computations. - -Christine Paulin implemented an extension of inductive types allowing -recursively non uniform parameters. Hugo Herbelin implemented -sort-polymorphism for inductive types (now called template polymorphism). - -Claudio Sacerdoti Coen improved the tactics for rewriting on arbitrary -compatible equivalence relations. He also generalized rewriting to -arbitrary transition systems. - -Claudio Sacerdoti Coen added new features to the module system. - -Benjamin Grégoire, Assia Mahboubi and Bruno Barras developed a new -more efficient and more general simplification algorithm on rings and -semi-rings. - -Laurent Théry and Bruno Barras developed a new significantly more efficient -simplification algorithm on fields. - -Hugo Herbelin, Pierre Letouzey, Julien Forest, Julien Narboux and -Claudio Sacerdoti Coen added new tactic features. - -Hugo Herbelin implemented matching on disjunctive patterns. - -New mechanisms made easier the communication between {\Coq} and external -provers. Nicolas Ayache and Jean-Christophe Filliâtre implemented -connections with the provers {\sc cvcl}, {\sc Simplify} and {\sc -zenon}. Hugo Herbelin implemented an experimental protocol for calling -external tools from the tactic language. - -Matthieu Sozeau developed \textsc{Russell}, an experimental language -to specify the behavior of programs with subtypes. - -A mechanism to automatically use some specific tactic to solve -unresolved implicit has been implemented by Hugo Herbelin. - -Laurent Théry's contribution on strings and Pierre Letouzey and -Jean-Christophe Filliâtre's contribution on finite maps have been -integrated to the {\Coq} standard library. Pierre Letouzey developed a -library about finite sets ``à la {\ocaml}''. With Jean-Marc -Notin, he extended the library on lists. Pierre Letouzey's -contribution on rational numbers has been integrated and extended.. - -Pierre Corbineau extended his tactic for solving first-order -statements. He wrote a reflection-based intuitionistic tautology -solver. - -Pierre Courtieu, Julien Forest and Yves Bertot added extra support to -reason on the inductive structure of recursively defined functions. - -Jean-Marc Notin significantly contributed to the general maintenance -of the system. He also took care of {\textsf{coqdoc}}. - -Pierre Castéran contributed to the documentation of (co-)inductive -types and suggested improvements to the libraries. - -Pierre Corbineau implemented a declarative mathematical proof -language, usable in combination with the tactic-based style of proof. - -Finally, many users suggested improvements of the system through the -Coq-Club mailing list and bug-tracker systems, especially user groups -from INRIA Rocquencourt, Radboud University, University of -Pennsylvania and Yale University. - -\enlargethispage{\baselineskip} -\begin{flushright} -Palaiseau, July 2006\\ -Hugo Herbelin -\end{flushright} - -\section*{Credits: version 8.2} - -{\Coq} version 8.2 adds new features, new libraries and -improves on many various aspects. - -Regarding the language of Coq, the main novelty is the introduction by -Matthieu Sozeau of a package of commands providing Haskell-style -type classes. Type classes, that come with a few convenient features -such as type-based resolution of implicit arguments, plays a new role -of landmark in the architecture of Coq with respect to automatization. -For instance, thanks to type classes support, Matthieu Sozeau could -implement a new resolution-based version of the tactics dedicated to -rewriting on arbitrary transitive relations. - -Another major improvement of Coq 8.2 is the evolution of the -arithmetic libraries and of the tools associated to them. Benjamin -Grégoire and Laurent Théry contributed a modular library for building -arbitrarily large integers from bounded integers while Evgeny Makarov -contributed a modular library of abstract natural and integer -arithmetics together with a few convenient tactics. On his side, -Pierre Letouzey made numerous extensions to the arithmetic libraries on -$\mathbb{Z}$ and $\mathbb{Q}$, including extra support for -automatization in presence of various number-theory concepts. - -Frédéric Besson contributed a reflexive tactic based on -Krivine-Stengle Positivstellensatz (the easy way) for validating -provability of systems of inequalities. The platform is flexible enough -to support the validation of any algorithm able to produce a -``certificate'' for the Positivstellensatz and this covers the case of -Fourier-Motzkin (for linear systems in $\mathbb{Q}$ and $\mathbb{R}$), -Fourier-Motzkin with cutting planes (for linear systems in -$\mathbb{Z}$) and sum-of-squares (for non-linear systems). Evgeny -Makarov made the platform generic over arbitrary ordered rings. - -Arnaud Spiwack developed a library of 31-bits machine integers and, -relying on Benjamin Grégoire and Laurent Théry's library, delivered a -library of unbounded integers in base $2^{31}$. As importantly, he -developed a notion of ``retro-knowledge'' so as to safely extend the -kernel-located bytecode-based efficient evaluation algorithm of Coq -version 8.1 to use 31-bits machine arithmetics for efficiently -computing with the library of integers he developed. - -Beside the libraries, various improvements contributed to provide a -more comfortable end-user language and more expressive tactic -language. Hugo Herbelin and Matthieu Sozeau improved the -pattern-matching compilation algorithm (detection of impossible -clauses in pattern-matching, automatic inference of the return -type). Hugo Herbelin, Pierre Letouzey and Matthieu Sozeau contributed -various new convenient syntactic constructs and new tactics or tactic -features: more inference of redundant information, better unification, -better support for proof or definition by fixpoint, more expressive -rewriting tactics, better support for meta-variables, more convenient -notations, ... - -Élie Soubiran improved the module system, adding new features (such as -an ``include'' command) and making it more flexible and more -general. He and Pierre Letouzey improved the support for modules in -the extraction mechanism. - -Matthieu Sozeau extended the \textsc{Russell} language, ending in an -convenient way to write programs of given specifications, Pierre -Corbineau extended the Mathematical Proof Language and the -automatization tools that accompany it, Pierre Letouzey supervised and -extended various parts of the standard library, Stéphane Glondu -contributed a few tactics and improvements, Jean-Marc Notin provided -help in debugging, general maintenance and {\tt coqdoc} support, -Vincent Siles contributed extensions of the {\tt Scheme} command and -of {\tt injection}. - -Bruno Barras implemented the {\tt coqchk} tool: this is a stand-alone -type-checker that can be used to certify {\tt .vo} files. Especially, -as this verifier runs in a separate process, it is granted not to be -``hijacked'' by virtually malicious extensions added to {\Coq}. - -Yves Bertot, Jean-Christophe Filliâtre, Pierre Courtieu and -Julien Forest acted as maintainers of features they implemented in -previous versions of Coq. - -Julien Narboux contributed to {\CoqIDE}. -Nicolas Tabareau made the adaptation of the interface of the old -``setoid rewrite'' tactic to the new version. Lionel Mamane worked on -the interaction between Coq and its external interfaces. With Samuel -Mimram, he also helped making Coq compatible with recent software -tools. Russell O'Connor, Cezary Kaliscyk, Milad Niqui contributed to -improve the libraries of integers, rational, and real numbers. We -also thank many users and partners for suggestions and feedback, in -particular Pierre Castéran and Arthur Charguéraud, the INRIA Marelle -team, Georges Gonthier and the INRIA-Microsoft Mathematical Components team, -the Foundations group at Radboud university in Nijmegen, reporters of bugs -and participants to the Coq-Club mailing list. - -\begin{flushright} -Palaiseau, June 2008\\ -Hugo Herbelin\\ -\end{flushright} - -\section*{Credits: version 8.3} - -{\Coq} version 8.3 is before all a transition version with refinements -or extensions of the existing features and libraries and a new tactic -{\tt nsatz} based on Hilbert's Nullstellensatz for deciding systems of -equations over rings. - -With respect to libraries, the main evolutions are due to Pierre -Letouzey with a rewriting of the library of finite sets {\tt FSets} -and a new round of evolutions in the modular development of arithmetic -(library {\tt Numbers}). The reason for making {\tt FSets} evolve is -that the computational and logical contents were quite intertwined in -the original implementation, leading in some cases to longer -computations than expected and this problem is solved in the new {\tt - MSets} implementation. As for the modular arithmetic library, it was -only dealing with the basic arithmetic operators in the former version -and its current extension adds the standard theory of the division, -min and max functions, all made available for free to any -implementation of $\mathbb{N}$, $\mathbb{Z}$ or -$\mathbb{Z}/n\mathbb{Z}$. - -The main other evolutions of the library are due to Hugo Herbelin who -made a revision of the sorting library (including a certified -merge-sort) and to Guillaume Melquiond who slightly revised and -cleaned up the library of reals. - -The module system evolved significantly. Besides the resolution of -some efficiency issues and a more flexible construction of module -types, Élie Soubiran brought a new model of name equivalence, the -$\Delta$-equivalence, which respects as much as possible the names -given by the users. He also designed with Pierre Letouzey a new -convenient operator \verb!<+! for nesting functor application, that -provides a light notation for inheriting the properties of cascading -modules. - -The new tactic {\tt nsatz} is due to Loïc Pottier. It works by -computing Gr\"obner bases. Regarding the existing tactics, various -improvements have been done by Matthieu Sozeau, Hugo Herbelin and -Pierre Letouzey. - -Matthieu Sozeau extended and refined the type classes and {\tt - Program} features (the {\sc Russell} language). Pierre Letouzey -maintained and improved the extraction mechanism. Bruno Barras and -\'Elie Soubiran maintained the Coq checker, Julien Forest maintained -the {\tt Function} mechanism for reasoning over recursively defined -functions. Matthieu Sozeau, Hugo Herbelin and Jean-Marc Notin -maintained {\tt coqdoc}. Frédéric Besson maintained the {\sc - Micromega} plateform for deciding systems of inequalities. Pierre -Courtieu maintained the support for the Proof General Emacs -interface. Claude Marché maintained the plugin for calling external -provers ({\tt dp}). Yves Bertot made some improvements to the -libraries of lists and integers. Matthias Puech improved the search -functions. Guillaume Melquiond usefully contributed here and -there. Yann Régis-Gianas grounded the support for Unicode on a more -standard and more robust basis. - -Though invisible from outside, Arnaud Spiwack improved the general -process of management of existential variables. Pierre Letouzey and -Stéphane Glondu improved the compilation scheme of the Coq archive. -Vincent Gross provided support to {\CoqIDE}. Jean-Marc Notin provided -support for benchmarking and archiving. - -Many users helped by reporting problems, providing patches, suggesting -improvements or making useful comments, either on the bug tracker or -on the Coq-club mailing list. This includes but not exhaustively -Cédric Auger, Arthur Charguéraud, François Garillot, Georges Gonthier, -Robin Green, Stéphane Lescuyer, Eelis van der Weegen,~... - -Though not directly related to the implementation, special thanks are -going to Yves Bertot, Pierre Castéran, Adam Chlipala, and Benjamin -Pierce for the excellent teaching materials they provided. - -\begin{flushright} -Paris, April 2010\\ -Hugo Herbelin\\ -\end{flushright} - -\section*{Credits: version 8.4} - -{\Coq} version 8.4 contains the result of three long-term projects: a -new modular library of arithmetic by Pierre Letouzey, a new proof -engine by Arnaud Spiwack and a new communication protocol for {\CoqIDE} -by Vincent Gross. - -The new modular library of arithmetic extends, generalizes and -unifies the existing libraries on Peano arithmetic (types {\tt nat}, -{\tt N} and {\tt BigN}), positive arithmetic (type {\tt positive}), -integer arithmetic ({\tt Z} and {\tt BigZ}) and machine word -arithmetic (type {\tt Int31}). It provides with unified notations -(e.g. systematic use of {\tt add} and {\tt mul} for denoting the -addition and multiplication operators), systematic and generic -development of operators and properties of these operators for all the -types mentioned above, including gcd, pcm, power, square root, base 2 -logarithm, division, modulo, bitwise operations, logical shifts, -comparisons, iterators, ... - -The most visible feature of the new proof engine is the support for -structured scripts (bullets and proof brackets) but, even if yet not -user-available, the new engine also provides the basis for refining -existential variables using tactics, for applying tactics to several -goals simultaneously, for reordering goals, all features which are -planned for the next release. The new proof engine forced to -reimplement {\tt info} and {\tt Show Script} differently, what was -done by Pierre Letouzey. - -Before version 8.4, {\CoqIDE} was linked to {\Coq} with the graphical -interface living in a separate thread. From version 8.4, {\CoqIDE} is a -separate process communicating with {\Coq} through a textual -channel. This allows for a more robust interfacing, the ability to -interrupt {\Coq} without interrupting the interface, and the ability to -manage several sessions in parallel. Relying on the infrastructure -work made by Vincent Gross, Pierre Letouzey, Pierre Boutillier and -Pierre-Marie P\'edrot contributed many various refinements of {\CoqIDE}. - -{\Coq} 8.4 also comes with a bunch of many various smaller-scale changes -and improvements regarding the different components of the system. - -The underlying logic has been extended with $\eta$-conversion thanks -to Hugo Herbelin, St\'ephane Glondu and Benjamin Gr\'egoire. The -addition of $\eta$-conversion is justified by the confidence that the -formulation of the Calculus of Inductive Constructions based on typed -equality (such as the one considered in Lee and Werner to build a -set-theoretic model of CIC~\cite{LeeWerner11}) is applicable to the -concrete implementation of {\Coq}. - -The underlying logic benefited also from a refinement of the guard -condition for fixpoints by Pierre Boutillier, the point being that it -is safe to propagate the information about structurally smaller -arguments through $\beta$-redexes that are blocked by the -``match'' construction (blocked commutative cuts). - -Relying on the added permissiveness of the guard condition, Hugo -Herbelin could extend the pattern-matching compilation algorithm -so that matching over a sequence of terms involving -dependencies of a term or of the indices of the type of a term in the -type of other terms is systematically supported. - -Regarding the high-level specification language, Pierre Boutillier -introduced the ability to give implicit arguments to anonymous -functions, Hugo Herbelin introduced the ability to define notations -with several binders (e.g. \verb=exists x y z, P=), Matthieu Sozeau -made the type classes inference mechanism more robust and predictable, -Enrico Tassi introduced a command {\tt Arguments} that generalizes -{\tt Implicit Arguments} and {\tt Arguments Scope} for assigning -various properties to arguments of constants. Various improvements in -the type inference algorithm were provided by Matthieu Sozeau and Hugo -Herbelin with contributions from Enrico Tassi. - -Regarding tactics, Hugo Herbelin introduced support for referring to -expressions occurring in the goal by pattern in tactics such as {\tt - set} or {\tt destruct}. Hugo Herbelin also relied on ideas from -Chung-Kil Hur's {\tt Heq} plugin to introduce automatic computation of -occurrences to generalize when using {\tt destruct} and {\tt - induction} on types with indices. St\'ephane Glondu introduced new -tactics {\tt constr\_eq}, {\tt is\_evar} and {\tt has\_evar} to be -used when writing complex tactics. Enrico Tassi added support to -fine-tuning the behavior of {\tt simpl}. Enrico Tassi added the -ability to specify over which variables of a section a lemma has -to be exactly generalized. Pierre Letouzey added a tactic {\tt - timeout} and the interruptibility of {\tt vm\_compute}. Bug fixes -and miscellaneous improvements of the tactic language came from Hugo -Herbelin, Pierre Letouzey and Matthieu Sozeau. - -Regarding decision tactics, Lo\"ic Pottier maintained {\tt Nsatz}, -moving in particular to a type-class based reification of goals while -Fr\'ed\'eric Besson maintained {\tt Micromega}, adding in particular -support for division. - -Regarding vernacular commands, St\'ephane Glondu provided new commands -to analyze the structure of type universes. - -Regarding libraries, a new library about lists of a given length -(called vectors) has been provided by Pierre Boutillier. A new -instance of finite sets based on Red-Black trees and provided by -Andrew Appel has been adapted for the standard library by Pierre -Letouzey. In the library of real analysis, Yves Bertot changed the -definition of $\pi$ and provided a proof of the long-standing fact yet -remaining unproved in this library, namely that $sin \frac{\pi}{2} = -1$. - -Pierre Corbineau maintained the Mathematical Proof Language (C-zar). - -Bruno Barras and Benjamin Gr\'egoire maintained the call-by-value -reduction machines. - -The extraction mechanism benefited from several improvements provided by -Pierre Letouzey. - -Pierre Letouzey maintained the module system, with contributions from -\'Elie Soubiran. - -Julien Forest maintained the {\tt Function} command. - -Matthieu Sozeau maintained the setoid rewriting mechanism. - -{\Coq} related tools have been upgraded too. In particular, {\tt - coq\_makefile} has been largely revised by Pierre Boutillier. Also, -patches from Adam Chlipala for {\tt coqdoc} have been integrated by -Pierre Boutillier. - -Bruno Barras and Pierre Letouzey maintained the {\tt coqchk} checker. - -Pierre Courtieu and Arnaud Spiwack contributed new features for using -{\Coq} through Proof General. - -The {\tt Dp} plugin has been removed. Use the plugin provided with -{\tt Why 3} instead (\url{http://why3.lri.fr}). - -Under the hood, the {\Coq} architecture benefited from improvements in -terms of efficiency and robustness, especially regarding universes -management and existential variables management, thanks to Pierre -Letouzey and Yann R\'egis-Gianas with contributions from St\'ephane -Glondu and Matthias Puech. The build system is maintained by Pierre -Letouzey with contributions from St\'ephane Glondu and Pierre -Boutillier. - -A new backtracking mechanism simplifying the task of external -interfaces has been designed by Pierre Letouzey. - -The general maintenance was done by Pierre Letouzey, Hugo Herbelin, -Pierre Boutillier, Matthieu Sozeau and St\'ephane Glondu with also -specific contributions from Guillaume Melquiond, Julien Narboux and -Pierre-Marie Pédrot. - -Packaging tools were provided by Pierre Letouzey (Windows), Pierre -Boutillier (MacOS), St\'ephane Glondu (Debian). Releasing, testing and -benchmarking support was provided by Jean-Marc Notin. - -Many suggestions for improvements were motivated by feedback from -users, on either the bug tracker or the coq-club mailing list. Special -thanks are going to the users who contributed patches, starting with -Tom Prince. Other patch contributors include C\'edric Auger, David -Baelde, Dan Grayson, Paolo Herms, Robbert Krebbers, Marc Lasson, -Hendrik Tews and Eelis van der Weegen. - -\begin{flushright} -Paris, December 2011\\ -Hugo Herbelin\\ -\end{flushright} - - -\section*{Credits: version 8.5} - -{\Coq} version 8.5 contains the result of five specific long-term -projects: -\begin{itemize} -\item A new asynchronous evaluation and compilation mode by Enrico - Tassi with help from Bruno Barras and Carst Tankink. -\item Full integration of the new proof engine by Arnaud Spiwack - helped by Pierre-Marie Pédrot, -\item Addition of conversion and reduction based on native compilation - by Maxime Dénès and Benjamin Grégoire. -\item Full universe polymorphism for definitions and inductive types by - Matthieu Sozeau. -\item An implementation of primitive projections with $\eta$-conversion - bringing significant performance improvements when using records by - Matthieu Sozeau. -\end{itemize} - -The full integration of the proof engine, by Arnaud Spiwack and -Pierre-Marie Pédrot, brings to primitive tactics and the user level -Ltac language dependent subgoals, deep backtracking and multiple goal -handling, along with miscellaneous features and an improved potential -for future modifications. Dependent subgoals allow statements in a -goal to mention the proof of another. Proofs of unsolved subgoals -appear as existential variables. Primitive backtracking makes it -possible to write a tactic with several possible outcomes which are -tried successively when subsequent tactics fail. Primitives are also -available to control the backtracking behavior of tactics. Multiple -goal handling paves the way for smarter automation tactics. It is -currently used for simple goal manipulation such as goal reordering. - -The way {\Coq} processes a document in batch and interactive mode has -been redesigned by Enrico Tassi with help from Bruno Barras. Opaque -proofs, the text between Proof and Qed, can be processed -asynchronously, decoupling the checking of definitions and statements -from the checking of proofs. It improves the responsiveness of -interactive development, since proofs can be processed in the -background. Similarly, compilation of a file can be split into two -phases: the first one checking only definitions and statements and the -second one checking proofs. A file resulting from the first -phase~--~with the .vio extension~--~can be already Required. All .vio -files can be turned into complete .vo files in parallel. The same -infrastructure also allows terminating tactics to be run in parallel -on a set of goals via the \verb=par:= goal selector. - -{\CoqIDE} was modified to cope with asynchronous checking of the -document. Its source code was also made separate from that of {\Coq}, so -that {\CoqIDE} no longer has a special status among user interfaces, -paving the way for decoupling its release cycle from that of {\Coq} in -the future. - -Carst Tankink developed a {\Coq} back-end for user interfaces built on -Makarius Wenzel's Prover IDE framework (PIDE), like PIDE/jEdit (with -help from Makarius Wenzel) or PIDE/Coqoon (with help from Alexander -Faithfull and Jesper Bengtson). The development of such features was -funded by the Paral-ITP French ANR project. - -The full universe polymorphism extension was designed by Matthieu -Sozeau. It conservatively extends the universes system and core calculus -with definitions and inductive declarations parameterized by universes -and constraints. It is based on a modification of the kernel architecture to -handle constraint checking only, leaving the generation of constraints -to the refinement/type inference engine. Accordingly, tactics are now -fully universe aware, resulting in more localized error messages in case -of inconsistencies and allowing higher-level algorithms like unification -to be entirely type safe. The internal representation of universes has -been modified but this is invisible to the user. - -The underlying logic has been extended with $\eta$-conversion for -records defined with primitive projections by Matthieu Sozeau. This -additional form of $\eta$-conversion is justified using the same -principle than the previously added $\eta$-conversion for function -types, based on formulations of the Calculus of Inductive Constructions -with typed equality. Primitive projections, which do not carry the -parameters of the record and are rigid names (not defined as a -pattern-matching construct), make working with nested records more -manageable in terms of time and space consumption. This extension and -universe polymorphism were carried out partly while Matthieu Sozeau was working -at the IAS in Princeton. - -The guard condition has been made compliant with extensional equality -principles such as propositional extensionality and univalence, thanks to -Maxime Dénès and Bruno Barras. To ensure compatibility with the -univalence axiom, a new flag ``-indices-matter'' has been implemented, -taking into account the universe levels of indices when computing the -levels of inductive types. This supports using {\Coq} as a tool to explore -the relations between homotopy theory and type theory. - -Maxime Dénès and Benjamin Grégoire developed an implementation of -conversion test and normal form computation using the OCaml native -compiler. It complements the virtual machine conversion offering much -faster computation for expensive functions. - -{\Coq} 8.5 also comes with a bunch of many various smaller-scale -changes and improvements regarding the different components of the -system. We shall only list a few of them. - -Pierre Boutillier developed an improved tactic for simplification of -expressions called {\tt cbn}. - -Maxime Dénès maintained the bytecode-based reduction machine. Pierre -Letouzey maintained the extraction mechanism. - -Pierre-Marie Pédrot has extended the syntax of terms to, -experimentally, allow holes in terms to be solved by a locally -specified tactic. - -Existential variables are referred to by identifiers rather than mere -numbers, thanks to Hugo Herbelin who also improved the tactic language -here and there. - -Error messages for universe inconsistencies have been improved by -Matthieu Sozeau. Error messages for unification and type inference -failures have been improved by Hugo Herbelin, Pierre-Marie Pédrot and -Arnaud Spiwack. - -Pierre Courtieu contributed new features for using {\Coq} through Proof -General and for better interactive experience (bullets, Search, etc). - -The efficiency of the whole system has been significantly improved -thanks to contributions from Pierre-Marie Pédrot. - -A distribution channel for {\Coq} packages using the OPAM tool has -been initiated by Thomas Braibant and developed by Guillaume Claret, -with contributions by Enrico Tassi and feedback from Hugo Herbelin. - -Packaging tools were provided by Pierre Letouzey and Enrico Tassi -(Windows), Pierre Boutillier, Matthieu Sozeau and Maxime Dénès (MacOS -X). Maxime Dénès improved significantly the testing and benchmarking -support. - -Many power users helped to improve the design of the new features via -the bug tracker, the coq development mailing list or the coq-club -mailing list. Special thanks are going to the users who contributed -patches and intensive brain-storming, starting with Jason Gross, -Jonathan Leivent, Greg Malecha, Clément Pit-Claudel, Marc Lasson, -Lionel Rieg. It would however be impossible to mention with precision -all names of people who to some extent influenced the development. - -Version 8.5 is one of the most important release of {\Coq}. Its -development spanned over about 3 years and a half with about one year -of beta-testing. General maintenance during part or whole of this -period has been done by Pierre Boutillier, Pierre Courtieu, Maxime -Dénès, Hugo Herbelin, Pierre Letouzey, Guillaume Melquiond, -Pierre-Marie Pédrot, Matthieu Sozeau, Arnaud Spiwack, Enrico Tassi as -well as Bruno Barras, Yves Bertot, Frédéric Besson, Xavier Clerc, -Pierre Corbineau, Jean-Christophe Filliâtre, Julien Forest, Sébastien -Hinderer, Assia Mahboubi, Jean-Marc Notin, Yann Régis-Gianas, François -Ripault, Carst Tankink. Maxime Dénès coordinated the release process. - -\begin{flushright} -Paris, January 2015, revised December 2015,\\ -Hugo Herbelin, Matthieu Sozeau and the {\Coq} development team\\ -\end{flushright} - -\section*{Credits: version 8.6} - -{\Coq} version 8.6 contains the result of refinements, stabilization of -8.5's features and cleanups of the internals of the system. Over the -year of (now time-based) development, about 450 bugs were resolved and -over 100 contributions integrated. The main user visible changes are: -\begin{itemize} -\item A new, faster state-of-the-art universe constraint checker, by - Jacques-Henri Jourdan. -\item In CoqIDE and other asynchronous interfaces, more fine-grained - asynchronous processing and error reporting by Enrico Tassi, making {\Coq} - capable of recovering from errors and continue processing the document. -\item More access to the proof engine features from Ltac: goal - management primitives, range selectors and a {\tt typeclasses - eauto} engine handling multiple goals and multiple successes, by - Cyprien Mangin, Matthieu Sozeau and Arnaud Spiwack. -\item Tactic behavior uniformization and specification, generalization - of intro-patterns by Hugo Herbelin and others. -\item A brand new warning system allowing to control warnings, turn them - into errors or ignore them selectively by Maxime Dénès, Guillaume - Melquiond, Pierre-Marie Pédrot and others. -\item Irrefutable patterns in abstractions, by Daniel de Rauglaudre. -\item The {\tt ssreflect} subterm selection algorithm by Georges Gonthier and - Enrico Tassi is now accessible to tactic writers through the {\tt ssrmatching} - plugin. -\item Integration of {\tt LtacProf}, a profiler for {\tt Ltac} by Jason - Gross, Paul Steckler, Enrico Tassi and Tobias Tebbi. -\end{itemize} - -{\Coq} 8.6 also comes with a bunch of smaller-scale changes and -improvements regarding the different components of the system. We shall -only list a few of them. - -The {\tt iota} reduction flag is now a shorthand for {\tt match}, {\tt - fix} and {\tt cofix} flags controlling the corresponding reduction -rules (by Hugo Herbelin and Maxime Dénès). - -Maxime Dénès maintained the native compilation machinery. - -Pierre-Marie Pédrot separated the Ltac code from general purpose -tactics, and generalized and rationalized the handling of generic -arguments, allowing to create new versions of Ltac more easily in the -future. - -In patterns and terms, {\tt @}, abbreviations and notations are now -interpreted the same way, by Hugo Herbelin. - -Name handling for universes has been improved by Pierre-Marie Pédrot and -Matthieu Sozeau. The minimization algorithm has been improved by -Matthieu Sozeau. - -The unifier has been improved by Hugo Herbelin and Matthieu Sozeau, -fixing some incompatibilities introduced in Coq 8.5. Unification -constraints can now be left floating around and be seen by the user -thanks to a new option. The {\tt Keyed Unification} mode has been -improved by Matthieu Sozeau. - -The typeclass resolution engine and associated proof-search tactic have -been reimplemented on top of the proof-engine monad, providing better -integration in tactics, and new options have been introduced to control -it, by Matthieu Sozeau with help from Théo Zimmermann. - -The efficiency of the whole system has been significantly improved -thanks to contributions from Pierre-Marie Pédrot, Maxime Dénès and -Matthieu Sozeau and performance issue tracking by Jason Gross and Paul -Steckler. - -Standard library improvements by Jason Gross, Sébastien Hinderer, Pierre -Letouzey and others. - -Emilio Jesús Gallego Arias contributed many cleanups and refactorings of -the pretty-printing and user interface communication components. - -Frédéric Besson maintained the micromega tactic. - -The OPAM repository for {\Coq} packages has been maintained by Guillaume -Claret, Guillaume Melquiond, Matthieu Sozeau, Enrico Tassi and others. A -list of packages is now available at \url{https://coq.inria.fr/opam/www/}. - -Packaging tools and software development kits were prepared by Michael -Soegtrop with the help of Maxime Dénès and Enrico Tassi for Windows, and -Maxime Dénès and Matthieu Sozeau for MacOS X. Packages are now regularly -built on the continuous integration server. {\Coq} now comes with a {\tt - META} file usable with {\tt ocamlfind}, contributed by Emilio Jesús -Gallego Arias, Gregory Malecha, and Matthieu Sozeau. - -Matej Košík maintained and greatly improved the continuous integration -setup and the testing of {\Coq} contributions. He also contributed many -API improvement and code cleanups throughout the system. - -The contributors for this version are Bruno Barras, C.J. Bell, Yves -Bertot, Frédéric Besson, Pierre Boutillier, Tej Chajed, Guillaume -Claret, Xavier Clerc, Pierre Corbineau, Pierre Courtieu, Maxime Dénès, -Ricky Elrod, Emilio Jesús Gallego Arias, Jason Gross, Hugo Herbelin, -Sébastien Hinderer, Jacques-Henri Jourdan, Matej Kosik, Xavier Leroy, -Pierre Letouzey, Gregory Malecha, Cyprien Mangin, Erik Martin-Dorel, -Guillaume Melquiond, Clément Pit--Claudel, Pierre-Marie Pédrot, Daniel -de Rauglaudre, Lionel Rieg, Gabriel Scherer, Thomas Sibut-Pinote, -Matthieu Sozeau, Arnaud Spiwack, Paul Steckler, Enrico Tassi, Laurent -Théry, Nickolai Zeldovich and Théo Zimmermann. The development process -was coordinated by Hugo Herbelin and Matthieu Sozeau with the help of -Maxime Dénès, who was also in charge of the release process. - -Many power users helped to improve the design of the new features via -the bug tracker, the pull request system, the {\Coq} development mailing -list or the coq-club mailing list. Special thanks to the users who -contributed patches and intensive brain-storming and code reviews, -starting with Cyril Cohen, Jason Gross, Robbert Krebbers, Jonathan -Leivent, Xavier Leroy, Gregory Malecha, Clément Pit--Claudel, Gabriel -Scherer and Beta Ziliani. It would however be impossible to mention -exhaustively the names of everybody who to some extent influenced the -development. - -Version 8.6 is the first release of {\Coq} developed on a time-based -development cycle. Its development spanned 10 months from the release of -{\Coq} 8.5 and was based on a public roadmap. To date, it contains more -external contributions than any previous {\Coq} system. Code reviews -were systematically done before integration of new features, with an -important focus given to compatibility and performance issues, resulting -in a hopefully more robust release than {\Coq} 8.5. - -Coq Enhancement Proposals (CEPs for short) were introduced by Enrico -Tassi to provide more visibility and a discussion period on new -features, they are publicly available \url{https://github.com/coq/ceps}. - -Started during this period, an effort is led by Yves Bertot and Maxime -Dénès to put together a {\Coq} consortium. - -\begin{flushright} -Paris, November 2016,\\ -Matthieu Sozeau and the {\Coq} development team\\ -\end{flushright} - -\section*{Credits: version 8.7} - -{\Coq} version 8.7 contains the result of refinements, stabilization of -features and cleanups of the internals of the system along with a few -new features. The main user visible changes are: -\begin{itemize} -\item New tactics: variants of tactics supporting existential variables - \texttt{eassert}, \texttt{eenough}, etc... by Hugo Herbelin. Tactics - \texttt{extensionality in H} and \texttt{inversion\_sigma} by Jason - Gross, \texttt{specialize with ...} accepting partial bindings by - Pierre Courtieu. -\item Cumulative Polymorphic Inductive Types, allowing cumulativity of - universes to go through applied inductive types, by Amin Timany and - Matthieu Sozeau. -\item Integration of the \texttt{SSReflect} plugin and its documentation in the - reference manual, by Enrico Tassi, Assia Mahboubi and Maxime Dénès. -\item The \texttt{coq\_makefile} tool was completely redesigned to improve its - maintainability and the extensibility of generated Makefiles, and to - make \texttt{\_CoqProject} files more palatable to IDEs by Enrico Tassi. -\end{itemize} - -{\Coq} 8.7 involved a large amount of work on cleaning and speeding up -the code base, notably the work of Pierre-Marie Pédrot on making the -tactic-level system insensitive to existential variable expansion, -providing a safer API to plugin writers and making the code more -robust. The \texttt{dev/doc/changes.txt} file documents the numerous -changes to the implementation and improvements of interfaces. An effort -to provide an official, streamlined API to plugin writers is in -progress, thanks to the work of Matej Košík. - -Version 8.7 also comes with a bunch of smaller-scale changes and improvements -regarding the different components of the system. We shall only list a -few of them. - -The efficiency of the whole system has been significantly improved -thanks to contributions from Pierre-Marie Pédrot, Maxime Dénès and -Matthieu Sozeau and performance issue tracking by Jason Gross and Paul -Steckler. - -Thomas Sibut-Pinote and Hugo Herbelin added support for side effects -hooks in \texttt{cbv}, \texttt{cbn} and \texttt{simpl}. The side -effects are provided via a plugin available at -\url{https://github.com/herbelin/reduction-effects/}. - -The \texttt{BigN}, \texttt{BigZ}, \texttt{BigQ} libraries are no longer -part of the {\Coq} standard library, they are now provided by a separate -repository \url{https://github.com/coq/bignums}, maintained by Pierre -Letouzey. - -In the \texttt{Reals} library, \texttt{IZR} has been changed to produce -a compact representation of integers and real constants are now -represented using \texttt{IZR} (work by Guillaume Melquiond). - -Standard library additions and improvements by Jason Gross, Pierre -Letouzey and others, documented in the CHANGES file. - -The mathematical proof language/declarative mode plugin was removed from -the archive. - -The OPAM repository for {\Coq} packages has been maintained by Guillaume -Melquiond, Matthieu Sozeau, Enrico Tassi with contributions from many -users. A list of packages is available at -\url{https://coq.inria.fr/opam/www/}. - -Packaging tools and software development kits were prepared by Michael -Soegtrop with the help of Maxime Dénès and Enrico Tassi for Windows, and -Maxime Dénès for MacOS X. Packages are regularly built on the -Travis continuous integration server. - -The contributors for this version are Abhishek Anand, C.J. Bell, Yves -Bertot, Frédéric Besson, Tej Chajed, Pierre Courtieu, Maxime Dénès, -Julien Forest, Gaëtan Gilbert, Jason Gross, Hugo Herbelin, Emilio Jesús -Gallego Arias, Ralf Jung, Matej Košík, Xavier Leroy, Pierre Letouzey, -Assia Mahboubi, Cyprien Mangin, Erik Martin-Dorel, Olivier Marty, -Guillaume Melquiond, Sam Pablo Kuper, Benjamin Pierce, Pierre-Marie -Pédrot, Lars Rasmusson, Lionel Rieg, Valentin Robert, Yann Régis-Gianas, -Thomas Sibut-Pinote, Michael Soegtrop, Matthieu Sozeau, Arnaud Spiwack, -Paul Steckler, George Stelle, Pierre-Yves Strub, Enrico Tassi, Hendrik -Tews, Amin Timany, Laurent Théry, Vadim Zaliva and Théo Zimmermann. - -The development process was coordinated by Matthieu Sozeau with the help -of Maxime Dénès, who was also in charge of the release process. Théo -Zimmermann is the maintainer of this release. - -Many power users helped to improve the design of the new features via -the bug tracker, the pull request system, the {\Coq} development mailing -list or the coq-club mailing list. Special thanks to the users who -contributed patches and intensive brain-storming and code reviews, -starting with Jason Gross, Ralf Jung, Robbert Krebbers, Xavier Leroy, -Clément Pit--Claudel and Gabriel Scherer. It would however be impossible -to mention exhaustively the names of everybody who to some extent -influenced the development. - -Version 8.7 is the second release of {\Coq} developed on a time-based -development cycle. Its development spanned 9 months from the release of -{\Coq} 8.6 and was based on a public road-map. It attracted many external -contributions. Code reviews and continuous integration testing were -systematically used before integration of new features, with an -important focus given to compatibility and performance issues, resulting -in a hopefully more robust release than {\Coq} 8.6 while maintaining -compatibility. - -Coq Enhancement Proposals (CEPs for short) and open pull-requests -discussions were used to discuss publicly the new features. - -The {\Coq} consortium, an organization directed towards users and -supporters of the system, is now upcoming and will rely on Inria's -newly created Foundation. - -\begin{flushright} -Paris, August 2017,\\ -Matthieu Sozeau and the {\Coq} development team\\ -\end{flushright} - - -%new Makefile - -%\newpage - -% Integration of ZArith lemmas from Sophia and Nijmegen. - - -% $Id$ - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-sch.tex b/doc/refman/RefMan-sch.tex deleted file mode 100644 index 6004711235..0000000000 --- a/doc/refman/RefMan-sch.tex +++ /dev/null @@ -1,444 +0,0 @@ -\chapter{Proof schemes} -%HEVEA\cutname{schemes.html} - -\section{Generation of induction principles with {\tt Scheme}} -\label{Scheme} -\index{Schemes} -\comindex{Scheme} - -The {\tt Scheme} command is a high-level tool for generating -automatically (possibly mutual) induction principles for given types -and sorts. Its syntax follows the schema: -\begin{quote} -{\tt Scheme {\ident$_1$} := Induction for \ident'$_1$ Sort {\sort$_1$} \\ - with\\ - \mbox{}\hspace{0.1cm} \dots\\ - with {\ident$_m$} := Induction for {\ident'$_m$} Sort - {\sort$_m$}} -\end{quote} -where \ident'$_1$ \dots\ \ident'$_m$ are different inductive type -identifiers belonging to the same package of mutual inductive -definitions. This command generates {\ident$_1$}\dots{} {\ident$_m$} -to be mutually recursive definitions. Each term {\ident$_i$} proves a -general principle of mutual induction for objects in type {\term$_i$}. - -\begin{Variants} -\item {\tt Scheme {\ident$_1$} := Minimality for \ident'$_1$ Sort {\sort$_1$} \\ - with\\ - \mbox{}\hspace{0.1cm} \dots\ \\ - with {\ident$_m$} := Minimality for {\ident'$_m$} Sort - {\sort$_m$}} - - Same as before but defines a non-dependent elimination principle more - natural in case of inductively defined relations. - -\item {\tt Scheme Equality for \ident$_1$\comindex{Scheme Equality}} - - Tries to generate a Boolean equality and a proof of the - decidability of the usual equality. If \ident$_i$ involves - some other inductive types, their equality has to be defined first. - -\item {\tt Scheme Induction for \ident$_1$ Sort {\sort$_1$} \\ - with\\ - \mbox{}\hspace{0.1cm} \dots\\ - with Induction for {\ident$_m$} Sort - {\sort$_m$}} - - If you do not provide the name of the schemes, they will be automatically - computed from the sorts involved (works also with Minimality). - -\end{Variants} -\label{Scheme-examples} - -\firstexample -\example{Induction scheme for \texttt{tree} and \texttt{forest}} - -The definition of principle of mutual induction for {\tt tree} and -{\tt forest} over the sort {\tt Set} is defined by the command: - -\begin{coq_eval} -Reset Initial. -Variables A B : Set. -\end{coq_eval} - -\begin{coq_example*} -Inductive tree : Set := - node : A -> forest -> tree -with forest : Set := - | leaf : B -> forest - | cons : tree -> forest -> forest. - -Scheme tree_forest_rec := Induction for tree Sort Set - with forest_tree_rec := Induction for forest Sort Set. -\end{coq_example*} - -You may now look at the type of {\tt tree\_forest\_rec}: - -\begin{coq_example} -Check tree_forest_rec. -\end{coq_example} - -This principle involves two different predicates for {\tt trees} and -{\tt forests}; it also has three premises each one corresponding to a -constructor of one of the inductive definitions. - -The principle {\tt forest\_tree\_rec} shares exactly the same -premises, only the conclusion now refers to the property of forests. - -\begin{coq_example} -Check forest_tree_rec. -\end{coq_example} - -\example{Predicates {\tt odd} and {\tt even} on naturals} - -Let {\tt odd} and {\tt even} be inductively defined as: - -% Reset Initial. -\begin{coq_eval} -Open Scope nat_scope. -\end{coq_eval} - -\begin{coq_example*} -Inductive odd : nat -> Prop := - oddS : forall n:nat, even n -> odd (S n) -with even : nat -> Prop := - | evenO : even 0 - | evenS : forall n:nat, odd n -> even (S n). -\end{coq_example*} - -The following command generates a powerful elimination -principle: - -\begin{coq_example} -Scheme odd_even := Minimality for odd Sort Prop - with even_odd := Minimality for even Sort Prop. -\end{coq_example} - -The type of {\tt odd\_even} for instance will be: - -\begin{coq_example} -Check odd_even. -\end{coq_example} - -The type of {\tt even\_odd} shares the same premises but the -conclusion is {\tt (n:nat)(even n)->(Q n)}. - -\subsection{Automatic declaration of schemes -\optindex{Boolean Equality Schemes} -\optindex{Elimination Schemes} -\optindex{Nonrecursive Elimination Schemes} -\optindex{Case Analysis Schemes} -\optindex{Decidable Equality Schemes} -\optindex{Rewriting Schemes} -\label{set-nonrecursive-elimination-schemes} -} - -It is possible to deactivate the automatic declaration of the induction - principles when defining a new inductive type with the - {\tt Unset Elimination Schemes} command. It may be -reactivated at any time with {\tt Set Elimination Schemes}. - -The types declared with the keywords {\tt Variant} (see~\ref{Variant}) -and {\tt Record} (see~\ref{Record}) do not have an automatic -declaration of the induction principles. It can be activated with the -command {\tt Set Nonrecursive Elimination Schemes}. It can be -deactivated again with {\tt Unset Nonrecursive Elimination Schemes}. - -In addition, the {\tt Case Analysis Schemes} flag governs the generation of -case analysis lemmas for inductive types, i.e. corresponding to the -pattern-matching term alone and without fixpoint. -\\ - -You can also activate the automatic declaration of those Boolean equalities -(see the second variant of {\tt Scheme}) -with respectively the commands {\tt Set Boolean Equality Schemes} and -{\tt Set Decidable Equality Schemes}. -However you have to be careful with this option since -\Coq~ may now reject well-defined inductive types because it cannot compute -a Boolean equality for them. - -The {\tt Rewriting Schemes} flag governs generation of equality -related schemes such as congruence. - -\subsection{\tt Combined Scheme} -\label{CombinedScheme} -\comindex{Combined Scheme} - -The {\tt Combined Scheme} command is a tool for combining -induction principles generated by the {\tt Scheme} command. -Its syntax follows the schema : -\begin{quote} -{\tt Combined Scheme {\ident$_0$} from {\ident$_1$}, .., {\ident$_n$}} -\end{quote} -where -\ident$_1$ \ldots \ident$_n$ are different inductive principles that must belong to -the same package of mutual inductive principle definitions. This command -generates {\ident$_0$} to be the conjunction of the principles: it is -built from the common premises of the principles and concluded by the -conjunction of their conclusions. - -\Example -We can define the induction principles for trees and forests using: -\begin{coq_example} -Scheme tree_forest_ind := Induction for tree Sort Prop - with forest_tree_ind := Induction for forest Sort Prop. -\end{coq_example} - -Then we can build the combined induction principle which gives the -conjunction of the conclusions of each individual principle: -\begin{coq_example} -Combined Scheme tree_forest_mutind from tree_forest_ind, forest_tree_ind. -\end{coq_example} - -The type of {\tt tree\_forest\_mutrec} will be: -\begin{coq_example} -Check tree_forest_mutind. -\end{coq_example} - -\section{Generation of induction principles with {\tt Functional Scheme}} -\label{FunScheme} -\comindex{Functional Scheme} - -The {\tt Functional Scheme} command is a high-level experimental -tool for generating automatically induction principles -corresponding to (possibly mutually recursive) functions. -First, it must be made available via {\tt Require Import FunInd}. - Its -syntax then follows the schema: -\begin{quote} -{\tt Functional Scheme {\ident$_1$} := Induction for \ident'$_1$ Sort {\sort$_1$} \\ - with\\ - \mbox{}\hspace{0.1cm} \dots\ \\ - with {\ident$_m$} := Induction for {\ident'$_m$} Sort - {\sort$_m$}} -\end{quote} -where \ident'$_1$ \dots\ \ident'$_m$ are different mutually defined function -names (they must be in the same order as when they were defined). -This command generates the induction principles -\ident$_1$\dots\ident$_m$, following the recursive structure and case -analyses of the functions \ident'$_1$ \dots\ \ident'$_m$. - -\Rem -There is a difference between obtaining an induction scheme by using -\texttt{Functional Scheme} on a function defined by \texttt{Function} -or not. Indeed \texttt{Function} generally produces smaller -principles, closer to the definition written by the user. - -\firstexample -\example{Induction scheme for \texttt{div2}} -\label{FunScheme-examples} - -We define the function \texttt{div2} as follows: - -\begin{coq_eval} -Reset Initial. -Require Import FunInd. -\end{coq_eval} - -\begin{coq_example*} -Require Import Arith. -Fixpoint div2 (n:nat) : nat := - match n with - | O => 0 - | S O => 0 - | S (S n') => S (div2 n') - end. -\end{coq_example*} - -The definition of a principle of induction corresponding to the -recursive structure of \texttt{div2} is defined by the command: - -\begin{coq_example} -Functional Scheme div2_ind := Induction for div2 Sort Prop. -\end{coq_example} - -You may now look at the type of {\tt div2\_ind}: - -\begin{coq_example} -Check div2_ind. -\end{coq_example} - -We can now prove the following lemma using this principle: - -\begin{coq_example*} -Lemma div2_le' : forall n:nat, div2 n <= n. -intro n. - pattern n , (div2 n). -\end{coq_example*} - -\begin{coq_example} -apply div2_ind; intros. -\end{coq_example} - -\begin{coq_example*} -auto with arith. -auto with arith. -simpl; auto with arith. -Qed. -\end{coq_example*} - -We can use directly the \texttt{functional induction} -(\ref{FunInduction}) tactic instead of the pattern/apply trick: -\tacindex{functional induction} - -\begin{coq_example*} -Reset div2_le'. -Lemma div2_le : forall n:nat, div2 n <= n. -intro n. -\end{coq_example*} - -\begin{coq_example} -functional induction (div2 n). -\end{coq_example} - -\begin{coq_example*} -auto with arith. -auto with arith. -auto with arith. -Qed. -\end{coq_example*} - -\Rem There is a difference between obtaining an induction scheme for a -function by using \texttt{Function} (see Section~\ref{Function}) and by -using \texttt{Functional Scheme} after a normal definition using -\texttt{Fixpoint} or \texttt{Definition}. See \ref{Function} for -details. - - -\example{Induction scheme for \texttt{tree\_size}} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -We define trees by the following mutual inductive type: - -\begin{coq_example*} -Variable A : Set. -Inductive tree : Set := - node : A -> forest -> tree -with forest : Set := - | empty : forest - | cons : tree -> forest -> forest. -\end{coq_example*} - -We define the function \texttt{tree\_size} that computes the size -of a tree or a forest. Note that we use \texttt{Function} which -generally produces better principles. - -\begin{coq_example*} -Require Import FunInd. -Function tree_size (t:tree) : nat := - match t with - | node A f => S (forest_size f) - end - with forest_size (f:forest) : nat := - match f with - | empty => 0 - | cons t f' => (tree_size t + forest_size f') - end. -\end{coq_example*} - -\Rem \texttt{Function} generates itself non mutual induction -principles {\tt tree\_size\_ind} and {\tt forest\_size\_ind}: - -\begin{coq_example} -Check tree_size_ind. -\end{coq_example} - -The definition of mutual induction principles following the recursive -structure of \texttt{tree\_size} and \texttt{forest\_size} is defined -by the command: - -\begin{coq_example*} -Functional Scheme tree_size_ind2 := Induction for tree_size Sort Prop -with forest_size_ind2 := Induction for forest_size Sort Prop. -\end{coq_example*} - -You may now look at the type of {\tt tree\_size\_ind2}: - -\begin{coq_example} -Check tree_size_ind2. -\end{coq_example} - -\section{Generation of inversion principles with \tt Derive Inversion} -\label{Derive-Inversion} -\comindex{Derive Inversion} - -The syntax of {\tt Derive Inversion} follows the schema: -\begin{quote} -{\tt Derive Inversion {\ident} with forall - $(\vec{x} : \vec{T})$, $I~\vec{t}$ Sort \sort} -\end{quote} - -This command generates an inversion principle for the -\texttt{inversion \dots\ using} tactic. -\tacindex{inversion \dots\ using} -Let $I$ be an inductive predicate and $\vec{x}$ the variables -occurring in $\vec{t}$. This command generates and stocks the -inversion lemma for the sort \sort~ corresponding to the instance -$\forall (\vec{x}:\vec{T}), I~\vec{t}$ with the name {\ident} in the {\bf -global} environment. When applied, it is equivalent to having inverted -the instance with the tactic {\tt inversion}. - -\begin{Variants} -\item \texttt{Derive Inversion\_clear {\ident} with forall - $(\vec{x}:\vec{T})$, $I~\vec{t}$ Sort \sort}\\ - \comindex{Derive Inversion\_clear} - When applied, it is equivalent to having - inverted the instance with the tactic \texttt{inversion} - replaced by the tactic \texttt{inversion\_clear}. -\item \texttt{Derive Dependent Inversion {\ident} with forall - $(\vec{x}:\vec{T})$, $I~\vec{t}$ Sort \sort}\\ - \comindex{Derive Dependent Inversion} - When applied, it is equivalent to having - inverted the instance with the tactic \texttt{dependent inversion}. -\item \texttt{Derive Dependent Inversion\_clear {\ident} with forall - $(\vec{x}:\vec{T})$, $I~\vec{t}$ Sort \sort}\\ - \comindex{Derive Dependent Inversion\_clear} - When applied, it is equivalent to having - inverted the instance with the tactic \texttt{dependent inversion\_clear}. -\end{Variants} - -\Example - -Let us consider the relation \texttt{Le} over natural numbers and the -following variable: - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\begin{coq_example*} -Inductive Le : nat -> nat -> Set := - | LeO : forall n:nat, Le 0 n - | LeS : forall n m:nat, Le n m -> Le (S n) (S m). -Variable P : nat -> nat -> Prop. -\end{coq_example*} - -To generate the inversion lemma for the instance -\texttt{(Le (S n) m)} and the sort \texttt{Prop}, we do: - -\begin{coq_example*} -Derive Inversion_clear leminv with (forall n m:nat, Le (S n) m) Sort Prop. -\end{coq_example*} - -\begin{coq_example} -Check leminv. -\end{coq_example} - -Then we can use the proven inversion lemma: - -\begin{coq_eval} -Lemma ex : forall n m:nat, Le (S n) m -> P n m. -intros. -\end{coq_eval} - -\begin{coq_example} -Show. -\end{coq_example} - -\begin{coq_example} -inversion H using leminv. -\end{coq_example} - diff --git a/doc/refman/RefMan-ssr.tex b/doc/refman/RefMan-ssr.tex deleted file mode 100644 index 31dabcdd4e..0000000000 --- a/doc/refman/RefMan-ssr.tex +++ /dev/null @@ -1,4933 +0,0 @@ -\achapter{The SSReflect proof language} -%HEVEA\cutname{ssreflect.html} -\aauthor{Georges Gonthier, Assia Mahboubi, Enrico Tassi} - -\newcommand{\ssr}{{\sc SSReflect}} - -% listing -\ifhevea\newcommand{\ssrC}[1]{\texttt{#1}}\else\newcommand{\ssrC}[1]{\text{\lstinline!#1!}}\fi -\ifhevea\renewenvironment{center}{\@open{div}{class="center"}\@open{div}{class="centered"}}{\@close{div}\@close{div}}\fi -% non-terminal -%\newcommand\ssrN[2][]{\ensuremath{\langle\mbox{\itshape\rmfamily\small #2}\rangle_{#1}}} -\newcommand\ssrN[2][]{{\textsl {#2}}\ensuremath{_{#1}}} -\ifhevea\newcommand{\underbar}[1]{\underline{#1}}\fi - -% TODO: only use \ssrC -\let\ssrL=\lstinline - -\newcommand{\iitem}{{\it i-item}} -\newcommand{\ditem}{{\it d-item}} -\newcommand{\optional}[1]{{\it[}#1{\it]}} -\newcommand{\optsep}{{\it|}} -\newcommand{\idx}[1]{\tacindex{#1 (ssreflect)}} -\newcommand{\idxC}[1]{\comindex{#1 (ssreflect)}} - -\newenvironment{new}% - {\begin{Sbox}\begin{minipage}{0.97\textwidth}% - \begin{flushright}\textcolor{red}{\fbox{Version 1.3}}% - \end{flushright}\noindent}% - {\end{minipage}\end{Sbox}\noindent\doublebox{\TheSbox}} -\section{Introduction}\label{sec:intro} - -This chapter describes a set of tactics known as \ssr{} -originally designed to provide support for the so-called \emph{small scale -reflection} proof methodology. Despite the original purpose this set of tactic -is of general interest and is available in Coq starting from version 8.7. - -\ssr{} was developed independently of the tactics described in -Chapter~\ref{Tactics}. Indeed the scope of the tactics part of -\ssr{} largely overlaps with the standard set of tactics. Eventually -the overlap will be reduced in future releases of Coq. - -Proofs written in \ssr{} typically look quite different from the -ones written using only tactics as per Chapter~\ref{Tactics}. -We try to summarise here the most ``visible'' ones in order to -help the reader already accustomed to the tactics described in -Chapter~\ref{Tactics} to read this chapter. - -The first difference between the tactics described in this -chapter and the tactics described in Chapter~\ref{Tactics} is the way -hypotheses are managed (we call this \emph{bookkeeping}). -In Chapter~\ref{Tactics} the most common -approach is to avoid moving explicitly hypotheses back and forth -between the context and the conclusion of the goal. On the contrary -in \ssr{} -all bookkeeping is performed on the conclusion of the goal, using for -that purpose a couple of syntactic constructions behaving similar to -tacticals (and often named as such in this chapter). -The \ssrC{:} tactical moves hypotheses from the context to the -conclusion, while \ssrC{=>} moves hypotheses from the -conclusion to the context, and \ssrC{in} moves back -and forth an hypothesis from the context to the conclusion for the -time of applying an action to it. - -While naming hypotheses is commonly done by means of an \ssrC{as} -clause in the basic model of Chapter~\ref{Tactics}, it is here to -\ssrC{=>} that this task is devoted. As tactics leave -new assumptions in the conclusion, and are often followed by -\ssrC{=>} to explicitly name them. -While generalizing the goal is normally -not explicitly needed in Chapter~\ref{Tactics}, it is an explicit -operation performed by \ssrC{:}. - -Beside the difference of bookkeeping model, this chapter includes -specific tactics which have no explicit counterpart in -Chapter~\ref{Tactics} such as tactics to mix forward steps and -generalizations as \ssrC{generally have} or \ssrC{without loss}. - -\ssr{} adopts the point of view that rewriting, definition -expansion and partial evaluation participate all to a same concept of -rewriting a goal in a larger sense. As such, all these functionalities are -provided by the \ssrC{rewrite} tactic. - -\ssr{} includes a little language of patterns to select subterms in tactics -or tacticals where it matters. Its most notable application -is in the \ssrC{rewrite} tactic, where patterns are used to specify -where the rewriting step has to take place. - -Finally, \ssr{} supports so-called reflection steps, typically -allowing to switch back and forth between the computational view and -logical view of a concept. - -To conclude it is worth mentioning that \ssr{} tactics -can be mixed with non \ssr{} tactics in the same proof, -or in the same Ltac expression. The few exceptions -to this statement are described in section~\ref{sec:compat}. - -\iffalse -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection*{How to read this documentation} - -The syntax of the tactics is presented as follows: -\begin{itemize} -\item \ssrC{terminals} are in typewriter font and \ssrN{non terminals} are - between angle brackets. -\item Optional parts of the grammar are surrounded by \optional{ } - brackets. These should not be confused with verbatim brackets - \ssrC{[ ]}, which are delimiters in the \ssr{} syntax. -\item A vertical rule {\optsep} indicates an alternative in the syntax, and - should not be confused with a - verbatim vertical rule between verbatim brackets \ssrC{[ | ]}. -\item A non empty list of non terminals (at least one item should be - present) is represented by \ssrN{non terminals}$^+$. A possibly empty - one is represented by \ssrN{non terminals}$^*$. -\item In a non empty list of non terminals, items are separated by blanks. -\end{itemize} -\fi - -% Hevea has no colors -\ifhevea \else -\noindent We follow the default color scheme of the \ssr{} mode for -ProofGeneral provided in the distribution: - -\centerline{ -\textcolor{dkblue}{\texttt{tactic}} or \textcolor{dkviolet}{\tt - Command} or \textcolor{dkgreen}{\tt keyword} or -\textcolor{dkpink}{\tt tactical}} - -\noindent Closing tactics/tacticals like \ssrC{exact} or \ssrC{by} (see section -\ref{ssec:termin}) are in red. -\fi - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection*{Acknowledgments} -The authors would like to thank Frédéric Blanqui, François Pottier -and Laurence Rideau for their comments and suggestions. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\newpage\section{Usage} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Getting started}\label{sec:files} -To be available, the tactics presented in this manual need the -following minimal set of libraries to loaded: {\tt ssreflect.v}, {\tt -ssrfun.v} and {\tt ssrbool.v}. Moreover, these tactics come with a -methodology specific to the authors of Ssreflect and which requires a -few options to be set in a different way than in their default -way. All in all, this corresponds to working in the following context: - -\begin{lstlisting} - From Coq Require Import ssreflect ssrfun ssrbool. - Set Implicit Arguments. - Unset Strict Implicit. - Unset Printing Implicit Defensive. -\end{lstlisting} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Compatibility issues}\label{sec:compat} -Requiring the above modules creates an environment which -is mostly compatible with the rest of \Coq{}, up to a few discrepancies: -\begin{itemize} -\item New keywords (\ssrC{is}) might clash with variable, constant, -tactic or tactical names, or with quasi-keywords in tactic or -vernacular notations. -\item New tactic(al)s names (\ssrC{last}, \ssrC{done}, \ssrC{have}, - \ssrC{suffices}, \ssrC{suff}, - \ssrC{without loss}, \ssrC{wlog}, \ssrC{congr}, \ssrC{unlock}) might clash - with user tactic names. -\item Identifiers with both leading and trailing \ssrC{_}, such as \ssrC{_x_}, -are reserved by \ssr{} and cannot appear in scripts. -\item The extensions to the \ssrC{rewrite} tactic are partly -incompatible with those available in current versions of \Coq{}; -in particular: -\ssrC{rewrite .. in (type of k)} or \\ \ssrC{rewrite .. in *} or any other -variant of \ssrC{rewrite} will not work, and the \ssr{} syntax and semantics for occurrence selection and -rule chaining is different. - -Use an explicit rewrite direction (\ssrC{rewrite <-} $\dots$ or \ssrC{rewrite ->} $\dots$) -to access the \Coq{} \ssrC{rewrite} tactic. -\item New symbols (\ssrC{//, /=, //=}) might clash with adjacent existing - symbols (e.g., '\ssrC{//}') instead of '\ssrC{/}''\ssrC{/}'). This can be avoided - by inserting white spaces. -\item New constant and theorem names might clash with the user -theory. This can be avoided by not importing all of \ssr{}: -\begin{lstlisting} - From Coq Require ssreflect. - Import ssreflect.SsrSyntax. -\end{lstlisting} -Note that the full syntax of \ssr{}'s {\tt rewrite} and reserved identifiers are -enabled only if the \ssrC{ssreflect} module has been required and if -\ssrC{SsrSyntax} has been imported. Thus a file that requires (without importing) - \ssrC{ssreflect} and imports \ssrC{SsrSyntax}, can be -required and imported without automatically enabling \ssr{}'s -extended rewrite syntax and reserved identifiers. -\item Some user notations (in particular, defining an infix ';') might -interfere with the "open term", parenthesis free, syntax of tactics -such as \ssrC{have}, \ssrC{set} and \ssrC{pose}. -\item The generalization of \ssrC{if} statements to non-Boolean -conditions is turned off by \ssr{}, because it is mostly subsumed by -\ssrC{Coercion} to \ssrC{bool} of the \ssrC{sum}XXX types (declared in -\ssrC{ssrfun.v}) -and the \ssrC{if} {\term} \ssrC{is} \ssrN{pattern} \ssrC{then} {\term} \ssrC{else} {\term} construct (see -\ref{ssec:patcond}). To use the generalized form, turn off the \ssr{} -Boolean \ssrC{if} notation using the command: -\begin{lstlisting} - Close Scope boolean_if_scope. -\end{lstlisting} -\item The following two options can be unset to disable the - incompatible \ssrC{rewrite} syntax and allow - reserved identifiers to appear in scripts. -\begin{lstlisting} - Unset SsrRewrite. - Unset SsrIdents. -\end{lstlisting} -\end{itemize} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Gallina extensions} - -Small-scale reflection makes an extensive use of the programming -subset of Gallina, \Coq{}'s logical specification language. This subset -is quite suited to the description of functions on representations, -because it closely follows the well-established design of the ML -programming language. The \ssr{} extension provides three additions -to Gallina, for pattern assignment, pattern testing, and polymorphism; -these mitigate minor but annoying discrepancies between Gallina and ML. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Pattern assignment}\label{ssec:patass} -The \ssr{} extension provides the following construct for -irrefutable pattern matching, that is, destructuring assignment: - -\ssrC{let: } \ssrN{pattern} \ssrC{:=} \ssrN[1]{term} \ssrC{in} \ssrN[2]{term} - -Note the colon `\ssrC{:}' after the \ssrC{let} keyword, which avoids any -ambiguity with a function -definition or \Coq{}'s basic destructuring \ssrC{let}. The \ssrC{let:} -construct differs from the latter in that -\begin{itemize} -\item The pattern can be nested (deep pattern matching), in - particular, this allows expression of the form: -\begin{lstlisting} - let: exist (x, y) p_xy := Hp in ... -\end{lstlisting} -\item The destructured constructor is explicitly given in the - pattern, and is used for type inference, e.g., -\begin{lstlisting} - Let f u := let: (m, n) := u in m + n. -\end{lstlisting} -using a colon \ssrC{let:}, infers \ssrC{f : nat * nat -> nat}, whereas -\begin{lstlisting} - Let f u := let (m, n) := u in m + n. -\end{lstlisting} -with a usual \ssrC{let}, requires an extra type annotation. -\end{itemize} -The \ssrC{let:} construct is just (more legible) notation for the primitive Gallina expression - -\begin{center} -\ssrC{match} \ssrN[1]{term} \ssrC{with} \ssrN{pattern} \ssrC{=>} \ssrN[2]{term} \ssrC{end} -\end{center} - -The \ssr{} destructuring assignment supports all the dependent match -annotations; the full syntax is - -\begin{center} -\ssrC{let:} \ssrN[1]{pattern} \ssrC{as} \ssrN{ident} \ssrC{in} \ssrN[2]{pattern} \ssrC{:=} \ssrN[1]{term} \ssrC{return} \ssrN[2]{term} \ssrC{in} \ssrN[3]{term} -\end{center} - -where \ssrN[2]{pattern} is a \emph{type} pattern and \ssrN[1]{term} and -\ssrN[2]{term} are types. - -When the \ssrC{as} and \ssrC{return} are both present, then \ssrN{ident} is bound -in both the type \ssrN[2]{term} and the expression \ssrN[3]{term}; -variables in the optional type pattern \ssrN[2]{pattern} are -bound only in the type \ssrN[2]{term}, and other variables in \ssrN[1]{pattern} are -bound only in the expression \ssrN[3]{term}, however. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Pattern conditional}\label{ssec:patcond} -The following construct can be used for a refutable pattern matching, -that is, pattern testing: - -\begin{center} -\ssrC{if}\ \ssrN[1]{term} \ssrC{is} \ssrN[1]{pattern} \ssrC{then} \ssrN[2]{term} \ssrC{else} \ssrN[3]{term} -\end{center} - -Although this construct is not strictly ML (it does exits in variants -such as the pattern calculus or the $\rho$-calculus), it turns out to be -very convenient for writing functions on representations, -because most such functions manipulate simple datatypes such as Peano -integers, options, -lists, or binary trees, and the pattern conditional above is almost -always the right construct -for analyzing such simple types. For example, the \ssrC{null} and -\ssrC{all} list function(al)s can be defined as follows: -\begin{lstlisting} - Variable d: Set. - Fixpoint |*null*| (s : list d) := if s is nil then true else false. - Variable a : d -> bool. - Fixpoint |*all*| (s : list d) : bool := - if s is cons x s' then a x && all s' else true. -\end{lstlisting} - -The pattern conditional also provides a notation for destructuring -assignment with a refutable pattern, adapted to the pure functional -setting of Gallina, which lacks a \\\texttt{Match\_Failure} exception. - -Like \ssrC{let:} above, the \ssrC{if}$\dots$\ssrC{is} construct is just (more legible) -notation for the primitive Gallina expression: - -\begin{center} -\ssrC{match} \ssrN[1]{term} \ssrC{with} \ssrN{pattern} \ssrC{=>} \ssrN[2]{term} \ssrC{| _ =>} \ssrN[2]{term} \ssrC{end} -\end{center} - -Similarly, it will always be displayed as the expansion of this form -in terms of primitive \ssrC{match} expressions (where the default -expression $\ssrN[3]{term}$ may be replicated). - - -Explicit pattern testing also largely subsumes the generalization of -the \ssrC{if} construct to all binary datatypes; compare: - -\begin{center} -\ssrC{if} {\term} \ssrC{is inl _ then} \ssrN[l]{term} \ssrC{else} \ssrN[r]{term} -\end{center} - -and: - -\begin{center} -\ssrC{if} {\term} \ssrC{then} \ssrN[l]{term} \ssrC{else} \ssrN[r]{term} -\end{center} - -The latter appears to be marginally shorter, but it is quite -ambiguous, and indeed often -requires an explicit annotation term : \ssrC{\{_\}+\{_\}} to type-check, -which evens the character count. - -Therefore, \ssr{} restricts by default the condition of a plain \ssrC{if} -construct to the standard \ssrC{bool} type; this avoids spurious type -annotations, e.g., in: -\begin{lstlisting} - Definition |*orb*| b1 b2 := if b1 then true else b2. -\end{lstlisting} -As pointed out in section~\ref{sec:compat}, this restriction can be removed with -the command: -\begin{lstlisting} - Close Scope boolean_if_scope. -\end{lstlisting} -Like \ssrC{let:} above, the \ssrC{if} {\term} \ssrC{is} \ssrN{pattern} -\ssrC{else} {\term} construct -supports the dependent \ssrC{match} annotations: - -\begin{center} -\ssrC{if} \ssrN[1]{term} \ssrC{is} \ssrN[1]{pattern} \ssrC{as} \ssrN{ident} \ssrC{in} \ssrN[2]{pattern} \ssrC{return} \ssrN[2]{term} \ssrC{then} \ssrN[3]{term} \ssrC{else} \ssrN[4]{term} -\end{center} - -As in \ssrC{let:} the variable \ssrN{ident} (and those in -the type pattern \ssrN[2]{pattern}) are bound in \ssrN[2]{term}; \ssrN{ident} is -also bound in \ssrN[3]{term} (but not in \ssrN[4]{term}), while the -variables in \ssrN[1]{pattern} are bound only in \ssrN[3]{term}. - -\noindent -Another variant allows to treat the else case first: - -\begin{center} -\ssrC{if} \ssrN[1]{term} \ssrC{isn't} \ssrN[1]{pattern} \ssrC{then} \ssrN[2]{term} \ssrC{else} \ssrN[3]{term} -\end{center} - -Note that \ssrN[1]{pattern} eventually binds variables in \ssrN[3]{term} -and not \ssrN[2]{term}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Parametric polymorphism}\label{ssec:parampoly} - -Unlike ML, polymorphism in core Gallina is explicit: the type -parameters of polymorphic functions must be declared explicitly, and -supplied at each point of use. However, \Coq{} provides two features -to suppress redundant parameters: -\begin{itemize} -\item Sections are used to provide (possibly implicit) parameters for - a set of definitions. -\item Implicit arguments declarations are used to tell \Coq{} to use - type inference to deduce some parameters from the context at each - point of call. -\end{itemize} -The combination of these features provides a fairly good emulation of ML-style -polymorphism, but unfortunately this emulation breaks down for -higher-order programming. Implicit arguments are indeed not inferred -at all points of use, but only at -points of call, leading to expressions such as -\begin{lstlisting} - Definition |*all_null*| (s : list T) := all (@null T) s. -\end{lstlisting} -Unfortunately, such higher-order expressions are quite frequent in -representation functions, especially those which use \Coq{}'s -\ssrC{Structure}s to emulate Haskell type classes. - -Therefore, \ssr{} provides a variant of \Coq{}'s implicit argument -declaration, which causes \Coq{} to fill in some implicit parameters -at each point of use, e.g., the above definition can be written: -\begin{lstlisting} - Definition |*all_null*| (s : list d) := all null s. -\end{lstlisting} -Better yet, it can be omitted entirely, since \ssrC{all_null s} isn't -much of an improvement over \ssrC{all null s}. - -The syntax of the new declaration is - -\begin{center} -\ssrC{Prenex Implicits} \ssrN{ident}$^+$. -\end{center} - -Let us denote $_1 \dots c_n$ the list of identifiers given to a -\ssrC{Prenex Implicits} command. -The command checks that each $c_i$ is the name of a functional -constant, whose implicit arguments are prenex, i.e., the first $n_i > -0$ arguments of $c_i$ are implicit; then it assigns -\ssrC{Maximal Implicit} status to these arguments. - -As these prenex implicit arguments are ubiquitous and have often large -display strings, it is strongly recommended to change the default -display settings of \Coq{} so that they are not printed (except after a -\ssrC{Set Printing All} command). -All \ssr{} library files thus start with the incantation -\begin{lstlisting} - Set Implicit Arguments. - Unset Strict Implicit. - Unset Printing Implicit Defensive. -\end{lstlisting} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Anonymous arguments} - -When in a definition, the type of a certain argument is mandatory, but -not its name, one usually use ``arrow'' abstractions for prenex -arguments, or the \ssrC{(_ : }{\term}\ssrC{)} syntax for inner arguments. -In \ssr{}, the latter can be replaced by the open syntax `\ssrC{of\ }{\term}' -or (equivalently) `\ssrC{& }{\term}', which are both syntactically -equivalent to a \ssrC{(_ : }{\term}\ssrC{)} expression. - -For instance, the usual two-contrsuctor polymorphic type \ssrC{list}, -i.e. the one of the -standard {\tt List} library, can be defined by the following -declaration: -\begin{lstlisting} - Inductive list (A : Type) : Type := nil | cons of A & list A. -\end{lstlisting} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Wildcards}\label{ssec:wild} - -The terms passed as arguments -to \ssr{} tactics can contain \emph{holes}, materialized by wildcards -\ssrC{_}. -Since \ssr{} allows a more powerful form of type inference for these -arguments, it enhances the possibilities of using such wildcards. -These holes are in particular used as a convenient shorthand for -abstractions, especially in local definitions or type expressions. - -Wildcards may be interpreted as abstractions (see for example sections -\ref{ssec:pose} and \ref{ssec:struct}), or their content can be -inferred from the whole -context of the goal (see for example section \ref{ssec:set}). -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Definitions} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Definitions}\label{ssec:pose} -\idx{pose \dots{} := \dots{}} -\idx{pose fix \dots{} := \dots{}} -\idx{pose cofix \dots{} := \dots{}} - -The \ssrC{pose} tactic allows to add a defined constant to a -proof context. \ssr{} generalizes this tactic in several ways. -In particular, the \ssr{} \ssrC{pose} tactic supports \emph{open syntax}: -the body of -the definition does not need surrounding parentheses. For instance: -\begin{lstlisting} - pose t := x + y. -\end{lstlisting} -is a valid tactic expression. - -The \ssrC{pose} tactic is also improved for the -local definition of higher order terms. -Local definitions of functions can use the same syntax as -global ones. The tactic: -\begin{lstlisting} - pose f x y := x + y. -\end{lstlisting} -adds to the context the defined constant: -\begin{lstlisting} - f := fun x y : nat => x + y : nat -> nat -> nat -\end{lstlisting} - -The \ssr{} \ssrC{pose} tactic also supports (co)fixpoints, -by providing the local counterpart of the -\ssrC{Fixpoint f := $\dots$ } and \ssrC{CoFixpoint f := $\dots$ } constructs. -For instance, the following tactic: -\begin{lstlisting} - pose fix f (x y : nat) {struct x} : nat := - if x is S p then S (f p y) else 0. -\end{lstlisting} -defines a local fixpoint \ssrC{f}, which mimics the standard \ssrC{plus} -operation on natural numbers. - -Similarly, local cofixpoints can be defined by a tactic of the form: -\begin{lstlisting} - pose cofix f (arg : T) ... -\end{lstlisting} - -The possibility to include wildcards in the body of the definitions - offers a smooth -way of defining local abstractions. The type of ``holes'' is -guessed by type inference, and the holes are abstracted. -For instance the tactic: -\begin{lstlisting} - pose f := _ + 1. -\end{lstlisting} -is shorthand for: -\begin{lstlisting} - pose f n := n + 1. -\end{lstlisting} - -When the local definition of a function involves both arguments and -holes, hole abstractions appear first. For instance, the -tactic: -\begin{lstlisting} - pose f x := x + _. -\end{lstlisting} -is shorthand for: -\begin{lstlisting} - pose f n x := x + n. -\end{lstlisting} - - -The interaction of the \ssrC{pose} tactic with the interpretation of -implicit arguments results in a powerful and concise syntax for local -definitions involving dependent types. -For instance, the tactic: -\begin{lstlisting} - pose f x y := (x, y). -\end{lstlisting} -adds to the context the local definition: -\begin{lstlisting} - pose f (Tx Ty : Type) (x : Tx) (y : Ty) := (x, y). -\end{lstlisting} -The generalization of wildcards makes the use of the \ssrC{pose} tactic -resemble ML-like definitions of polymorphic functions. - -% The use of \ssrC{Prenex Implicits} declarations (see section -% \ref{ssec:parampoly}), makes this feature specially convenient. -% Note that this combines with the interpretation of wildcards, and that -% it is possible to define: -% \begin{lstlisting} -% pose g x y : prod _ nat := (x, y). -% \end{lstlisting} -% which is equivalent to: -% \begin{lstlisting} -% pose g x (y : nat) := (x, y). -% \end{lstlisting} - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Abbreviations}\label{ssec:set} -\idx{set \dots{} := \dots{}} - - -The \ssr{} \ssrC{set} tactic performs abbreviations: it introduces a -defined constant for a subterm appearing in the goal and/or in the -context. - -\ssr{} extends the \ssrC{set} tactic by supplying: -\begin{itemize} -\item an open syntax, similarly to the \ssrC{pose} tactic; -\item a more aggressive matching algorithm; -\item an improved interpretation of wildcards, taking advantage of the - matching algorithm; -\item an improved occurrence selection mechanism allowing to abstract only - selected occurrences of a term. -\end{itemize} - -The general syntax of this tactic is -\begin{center} -\ssrC{set} \ssrN{ident} \optional{\ssrC{:} \ssrN[1]{term}} \ssrC{:=} \optional{\ssrN{occ-switch}} \ssrN[2]{term} -\end{center} -\begin{center} -\ssrN{occ-switch} ::= \ssrC{\{}[\ssrC{+}|\ssrC{-}] {\naturalnumber}$^*$ \ssrC{\}} -\end{center} - - -where: - -\begin{itemize} -\item \ssrN{ident} is a fresh identifier chosen by the user. -\item \ssrN[1]{term} is -an optional type annotation. The type annotation \ssrN[1]{term} can be -given in open syntax (no surrounding parentheses). If no \ssrN{occ-switch} -(described hereafter) is present, it is also -the case for \ssrN[2]{term}. -On the other hand, in presence of \ssrN{occ-switch}, parentheses -surrounding \ssrN[2]{term} are mandatory. -\item In the occurrence switch \ssrN{occ-switch}, if the first element - of the list is a {\naturalnumber}, this element should be a number, and not - an Ltac variable. The empty list \ssrC{\{\}} is not interpreted as a - valid occurrence switch. -\end{itemize} -% For example, the script: -% \begin{lstlisting} -% Goal forall (f : nat -> nat)(x y : nat), f x + f x = f x. -% move=> f x y. -% \end{lstlisting} - -The tactic: -\begin{lstlisting} - set t := f _. -\end{lstlisting} -transforms the goal \ssrC{f x + f x = f x} into \ssrC{t + t = t}, adding -\ssrC{t := f x} to the context, and the tactic: -\begin{lstlisting} - set t := {2}(f _). -\end{lstlisting} -transforms it into \ssrC{f x + t = f x}, adding \ssrC{t := f x} to the context. - -The type annotation \ssrN[1]{term} may -contain wildcards, which will be filled with the appropriate value by -the matching process. - -The tactic first tries to find a subterm of the goal matching -\ssrN[2]{term} (and its type \ssrN[1]{term}), -and stops at the first subterm it finds. Then the occurrences -of this subterm selected by the optional \ssrN{occ-switch} are replaced -by \ssrN{ident} and a definition \ssrN{ident} \ssrC{:=} {\term} is added to -the context. If no \ssrN{occ-switch} is present, then all the -occurrences are abstracted. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Matching} - -The matching algorithm compares a pattern \textit{term} - with a subterm of the goal by comparing their heads -and then pairwise unifying their arguments (modulo conversion). Head -symbols match under the following conditions: - -\begin{itemize} -\item If the head of \textit{term} is a constant, then it - should be syntactically equal to the head symbol of the subterm. -\item If this head is a projection of a canonical structure, - then canonical structure equations are used for the matching. -\item If the head of \textit{term} is \emph{not} a constant, the - subterm should have the same structure ($\lambda$ abstraction, - \ssrC{let}$\dots$\ssrC{in} structure \dots). -\item If the head of \textit{term} is a hole, the subterm should have - at least as many arguments as \textit{term}. For instance the tactic: -\begin{lstlisting} - set t := _ x. -\end{lstlisting} -transforms the goal \ssrL-x + y = z- into \ssrC{t y = z} and adds -\ssrC{t := plus x : nat -> nat} to the context. - -\item In the special case where \textit{term} is of the form - \ssrC{(let f := }$t_0$ \ssrC{in f) }$t_1\dots t_n$, - then the pattern \textit{term} is treated -as \ssrC{(_ }$t_1\dots t_n$\ssrC{)}. For each subterm in -the goal having the form $(A\ u_1\dots u_{n'})$ with $n' \geq n$, the -matching algorithm successively tries to find the largest -partial application $(A\ u_1\dots u_{i'})$ convertible to the head -$t_0$ of \textit{term}. For instance the following tactic: -\begin{lstlisting} - set t := (let g y z := y.+1 + z in g) 2. -\end{lstlisting} -transforms the goal -\begin{lstlisting} - (let f x y z := x + y + z in f 1) 2 3 = 6. -\end{lstlisting} -into \ssrC{t 3 = 6} and adds the local definition of \ssrC{t} to the -context. -\end{itemize} - -Moreover: -\begin{itemize} -\item Multiple holes in \textit{term} are treated as independent - placeholders. For instance, the tactic: -\begin{lstlisting} - set t := _ + _. -\end{lstlisting} -transforms the goal \ssrC{x + y = z} into \ssrC{t = z} and pushes -\ssrC{t := x + y : nat} in the context. -\item The type of the subterm matched should fit the type - (possibly casted by some type annotations) of the pattern - \textit{term}. -\item The replacement of the subterm found by the instantiated pattern - should not capture variables, hence the following script: -\begin{lstlisting} - Goal forall x : nat, x + 1 = 0. - set u := _ + 1. -\end{lstlisting} -raises an error message, since \ssrC{x} is bound in the goal. -\item Typeclass inference should fill in any residual hole, but -matching should never assign a value to a global existential variable. - -\end{itemize} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Occurrence selection}\label{sssec:occselect} - -\ssr{} provides a generic syntax for the selection of occurrences by -their position indexes. These \emph{occurrence switches} are shared by -all -\ssr{} tactics which require control on subterm selection like rewriting, -generalization, \dots - -An \emph{occurrence switch} can be: -\begin{itemize} -\item A list \ssrC{\{} {\naturalnumber}$^*$ \ssrC{\}} of occurrences affected by the - tactic. -For instance, the tactic: -\begin{lstlisting} - set x := {1 3}(f 2). -\end{lstlisting} -transforms the goal \ssrC{f 2 + f 8 = f 2 + f 2} into -\ssrC{x + f 8 = f 2 + x}, and adds the abbreviation -\ssrC{x := f 2} in the -context. Notice that some occurrences of a -given term may be hidden to the user, for example because of a -notation. The vernacular \ssrC{$\texttt{\textcolor{dkviolet}{Set }}$ - Printing All} command displays all -these hidden occurrences and should be used to find the correct -coding of the occurrences to be selected\footnote{Unfortunately, -even after a call to the Set Printing All command, some occurrences are -still not displayed to the user, essentially the ones possibly hidden -in the predicate of a dependent match structure.}. For instance, the -following script: -\begin{lstlisting} - Notation "a < b":= (le (S a) b). - Goal forall x y, x < y -> S x < S y. - intros x y; set t := S x. -\end{lstlisting} -generates the goal -\ssrC{t <= y -> t < S y} since \ssrC{x < y} is now a notation for -\ssrC{S x <= y}. -\item A list \ssrC{\{}{\naturalnumber}$^+$\ssrC{\}}. This is equivalent to - \ssrC{\{} {\naturalnumber}$^+$ \ssrC{\}} but the list should start with a number, and - not with an Ltac variable. -\item A list \ssrC{\{}{\naturalnumber}$^*$\ssrC{\}} of occurrences \emph{not} to be - affected by the tactic. For instance, the tactic: -\begin{lstlisting} - set x := {-2}(f 2). -\end{lstlisting} -behaves like -\begin{lstlisting} - set x := {1 3}(f 2). -\end{lstlisting} -on the goal \ssrL-f 2 + f 8 = f 2 + f 2- which has three occurrences of -the the term \ssrC{f 2} -\item In particular, the switch \ssrC{\{+\}} selects \emph{all} the - occurrences. This switch is useful to turn - off the default behavior of a tactic which automatically clears - some assumptions (see section \ref{ssec:discharge} for instance). -\item The switch \ssrC{\{-\}} imposes that \emph{no} occurrences of the - term should be affected by the tactic. The tactic: -\begin{lstlisting} - set x := {-}(f 2). -\end{lstlisting} -leaves the goal unchanged and adds the definition \ssrC{x := f 2} to the -context. This kind of tactic may be used to take advantage of the -power of the matching algorithm in a local definition, instead of -copying large terms by hand. -\end{itemize} - - -It is important to remember that matching \emph{precedes} occurrence -selection, hence the tactic: -\begin{lstlisting} - set a := {2}(_ + _). -\end{lstlisting} -transforms the goal \ssrC{x + y = x + y + z} into \ssrC{x + y = a + z} -and fails on the goal \\ -\ssrC{(x + y) + (z + z) = z + z} with the error message: -\begin{lstlisting} - User error: only 1 < 2 occurrence of (x + y + (z + z)) -\end{lstlisting} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Localization}\label{ssec:loc} - - -It is possible to define an abbreviation for a term appearing in the -context of a goal thanks to the \ssrC{in} tactical. - -A tactic of the form: - -\begin{center} - \ssrC{set x :=} {\term} \ssrC{in} \ssrN[1]{fact}\ssrC{...}\ssrN[n]{fact}. -\end{center} - -introduces a defined constant called \ssrC{x} in the context, and folds -it in the facts \textit{fact$_1 \dots$ fact$_n$} -The body of \ssrC{x} is the first subterm matching \textit{term} in -\textit{fact$_1 \dots$ fact$_n$}. - -A tactic of the form: - -\begin{center} - \ssrC{set x :=} {\term} \ssrC{in} \ssrN[1]{fact}\ssrC{...}\ssrN[n]{fact} \ssrC{*.} -\end{center} - -matches {\term} and then folds \ssrC{x} similarly in -\textit{fact$_1 \dots$ fact$_n$}, but also folds \ssrC{x} in the goal. - -A goal \ssrL-x + t = 4-, whose context contains \ssrC{Hx : x = 3}, is left -unchanged by the tactic: -\begin{lstlisting} - set z := 3 in Hx. -\end{lstlisting} -but the context is extended with the definition \ssrC{z := 3} and \ssrC{Hx} becomes -\ssrC{Hx : x = z}. -On the same goal and context, the tactic: -\begin{lstlisting} - set z := 3 in Hx *. -\end{lstlisting} -will moreover change the goal into \ssrL-x + t = S z-. Indeed, remember -that \ssrC{4} is just a notation for \ssrC{(S 3)}. - -The use of the \ssrC{in} tactical is not limited to the localization of -abbreviations: for a complete description of the \ssrC{in} tactical, see -section \ref{ssec:profstack}. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Basic tactics}\label{sec:book} - - - -A sizable fraction of proof scripts consists of steps that do not -"prove" anything new, but instead perform menial bookkeeping tasks -such as selecting the names of constants and assumptions or splitting -conjuncts. Although they are logically trivial, bookkeeping steps are -extremely important because they define the structure of the data-flow -of a proof script. This is especially true for reflection-based -proofs, which often involve large numbers of constants and -assumptions. Good bookkeeping consists in always explicitly declaring -(i.e., naming) all new constants and assumptions in the script, and -systematically pruning irrelevant constants and assumptions in the -context. This is essential in the context of an interactive -development environment (IDE), because it facilitates navigating the -proof, allowing to instantly "jump back" to the point at which a -questionable assumption was added, and to find relevant assumptions by -browsing the pruned context. While novice or casual \Coq{} users may -find the automatic name selection feature convenient, the usage of -such a feature severely undermines the readability and maintainability -of proof scripts, much like automatic variable declaration in programming -languages. The \ssr{} tactics are therefore designed to support -precise bookkeeping and to eliminate name generation heuristics. -The bookkeeping features of \ssr{} are implemented as tacticals (or -pseudo-tacticals), shared across most \ssr{} tactics, and thus form -the foundation of the \ssr{} proof language. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Bookkeeping}\label{ssec:profstack} -\idx{move: \dots{}} -\idx{move=> \dots{}} -\idx{move: \dots{} => \dots{}} -\idx{\dots{} in \dots{}} - -During the course of a proof \Coq{} always present the user with -a \emph{sequent} whose general form is -\begin{displaymath}\begin{array}{l} -%\arrayrulecolor{dkviolet} -c_i\ \ssrC{:}\ T_i \\ -\dots\\ -d_j\ \ssrC{:=}\ e_j\ \ssrC{:}\ T_j \\ -\dots\\ -F_k\ \ssrC{:}\ P_k \\ -\dots \\[3pt] -\hline\hline\\[-8pt] -\ssrC{forall}\ \ssrC{(}x_\ell\ \ssrC{:}\ T_\ell\ssrC{)}\ \dots,\\ -\ssrC{let}\ y_m\ \ssrC{:=}\ b_m\ \ssrC{in}\ \dots\ \ssrC{in}\\ -P_n\ \ssrC{->}\ \dots\ \ssrC{->}\ C -\end{array}\end{displaymath} -The \emph{goal} to be proved appears below the double line; above the line is -the \emph{context} of the sequent, a set of declarations of -\emph{constants}~$c_i$, \emph{defined constants}~$d_i$, and -\emph{facts}~$F_k$ that can be used to prove the goal (usually, $T_i, -T_j\;:\;\ssrC{Type}$ and $P_k\;:\;\ssrC{Prop}$). The various kinds of -declarations can come in any order. The top part of the context -consists of declarations produced by the \ssrC{Section} commands -\ssrC{Variable}, \ssrC{Let}, and \ssrC{Hypothesis}. This \emph{section context} -is never affected by the \ssr{} tactics: they only operate on -the lower part --- the \emph{proof context}. -As in the figure above, the goal often decomposes into a series of -(universally) quantified \emph{variables} -$\ssrC{(}x_\ell\;\ssrC{:}\;T_\ell\ssrC{)}$, local \emph{definitions} -$\ssrC{let}\;y_m\:\ssrC{:=}\;b_m\;\ssrC{in}$, and \emph{assumptions} -$P_n\;\ssrC{->}$, and a \emph{conclusion}~$C$ (as in the context, variables, -definitions, and assumptions can appear in any order). The conclusion -is what actually needs to be proved --- the rest of the goal can be -seen as a part of the proof context that happens to be ``below the line''. - -However, although they are logically equivalent, there are fundamental -differences between constants and facts on the one hand, and variables -and assumptions on the others. Constants and facts are -\emph{unordered}, but \emph{named} explicitly in the proof text; -variables and assumptions are \emph{ordered}, but \emph{unnamed}: the -display names of variables may change at any time because of -$\alpha$-conversion. - -Similarly, basic deductive steps such as \ssrC{apply} can only operate on -the goal because the Gallina terms that control their action (e.g., -the type of the lemma used by \ssrC{apply}) only provide unnamed bound -variables.\footnote{Thus scripts that depend on bound variable names, e.g., -via \ssrC{intros} or \ssrC{with}, are inherently fragile.} Since the proof -script can only refer directly to the context, it must constantly -shift declarations from the goal to the context and conversely in -between deductive steps. - -In \ssr{} these moves are performed by two \emph{tacticals} `\ssrC{=>}' -and `\ssrC{:}', so that the bookkeeping required by a deductive step can -be directly associated to that step, and that tactics in an \ssr{} -script correspond to actual logical steps in the proof rather than -merely shuffle facts. Still, some isolated bookkeeping is unavoidable, -such as naming variables and assumptions at the beginning of a proof. -\ssr{} provides a specific \ssrC{move} tactic for this purpose. - -Now \ssrC{move} does essentially nothing: it is mostly a placeholder for -`\ssrC{=>}' and `\ssrC{:}'. The `\ssrC{=>}' tactical moves variables, local -definitions, and assumptions to the context, while the `\ssrC{:}' -tactical moves facts and constants to the goal. For example, the proof -of\footnote{The name \ssrC{subnK} reads as -``right cancellation rule for \ssrC{nat} subtraction''.} -\begin{lstlisting} - Lemma |*subnK*| : forall m n, n <= m -> m - n + n = m. -\end{lstlisting}\noindent -might start with -\begin{lstlisting} - move=> m n le_n_m. -\end{lstlisting} -where \ssrC{move} does nothing, but \ssrL|=> m n le_m_n| changes the -variables and assumption of the goal in the constants \ssrC{m n : nat} -and the fact \ssrL|le_n_m : n <= m|, thus exposing the conclusion\\ - \ssrC{m - n + n = m}. - -The `\ssrC{:}' tactical is the converse of `\ssrC{=>}': it removes facts -and constants from the context by turning them into variables and assumptions. -Thus -\begin{lstlisting} - move: m le_n_m. -\end{lstlisting} -turns back \ssrC{m} and \ssrL|le_m_n| into a variable and an assumption, removing -them from the proof context, and changing the goal to -\begin{lstlisting} - forall m, n <= m -> m - n + n = m. -\end{lstlisting} -which can be proved by induction on \ssrC{n} using \ssrC{elim: n}. - -\noindent -Because they are tacticals, `\ssrC{:}' and `\ssrC{=>}' can be combined, as in -\begin{lstlisting} - move: m le_n_m => p le_n_p. -\end{lstlisting} -simultaneously renames \ssrL|m| and \ssrL|le_m_n| into \ssrL|p| and \ssrL|le_n_p|, -respectively, by first turning them into unnamed variables, then -turning these variables back into constants and facts. - -Furthermore, \ssr{} redefines the basic \Coq{} tactics \ssrC{case}, -\ssrC{elim}, and \ssrC{apply} so that they can take better advantage of -'\ssrC{:}' and `\ssrC{=>}'. In there \ssr{} variants, these tactic operate -on the first variable or constant of the goal and they do not use or -change the proof context. The `\ssrC{:}' tactical is used to operate on -an element in the context. For instance the proof of \ssrC{subnK} could -continue with -\begin{lstlisting} - elim: n. -\end{lstlisting} -instead of \ssrC{elim n}; this has the advantage of -removing \ssrC{n} from the context. Better yet, this \ssrC{elim} can be combined -with previous \ssrC{move} and with the branching version of the \ssrC{=>} tactical -(described in~\ref{ssec:intro}), -to encapsulate the inductive step in a single command: -\begin{lstlisting} - elim: n m le_n_m => [|n IHn] m => [_ | lt_n_m]. -\end{lstlisting} -which breaks down the proof into two subgoals, -\begin{lstlisting} - m - 0 + 0 = m -\end{lstlisting} -given \ssrC{m : nat}, and -\begin{lstlisting} - m - S n + S n = m -\end{lstlisting} -given \ssrC{m n : nat}, \ssrL|lt_n_m : S n <= m|, and -\begin{lstlisting} - IHn : forall m, n <= m -> m - n + n = m. -\end{lstlisting} -The '\ssrC{:}' and `\ssrC{=>}' tacticals can be explained very simply -if one views the goal as a stack of variables and assumptions piled -on a conclusion: -\begin{itemize} -\item {\tac} \ssrC{:} $a$ $b$ $c$ pushes the context constants $a$, $b$, $c$ -as goal variables \emph{before} performing {\tac}. -\item {\tac} \ssrC{=>} $a$ $b$ $c$ pops the top three goal variables as -context constants $a$, $b$, $c$, \emph{after} {\tac} -has been performed. -\end{itemize} -These pushes and pops do not need to balance out as in the examples above, -so -\begin{lstlisting} - move: m le_n_m => p. -\end{lstlisting} -would rename \ssrC{m} into \ssrC{p}, but leave an extra assumption \ssrC{n <= p} -in the goal. - -Basic tactics like \ssrC{apply} and \ssrC{elim} can also be used without the -'\ssrC{:}' tactical: for example we can directly start a proof of \ssrC{subnK} -by induction on the top variable \ssrC{m} with -\begin{lstlisting} - elim=> [|m IHm] n le_n. -\end{lstlisting} - -\noindent -The general form of the localization tactical \ssrC{in} is also best -explained in terms of the goal stack: - -\begin{center} - {\tac} \ssrC{in a H1 H2 *.} -\end{center} - -is basically equivalent to - -\begin{center} - \ssrC{move: a H1 H2;} {\tac} \ssrC{=> a H1 H2.} -\end{center} - -with two differences: the \ssrC{in} tactical will preserve the body of \ssrC{a} if -\ssrC{a} is a defined constant, and if the `\ssrC{*}' is omitted it -will use a temporary abbreviation to hide the statement of the goal -from \ssrC{/*tactic*/}. - -The general form of the \ssrC{in} tactical can be used directly with -the \ssrC{move}, \ssrC{case} and \ssrC{elim} tactics, so that one can write -\begin{lstlisting} - elim: n => [|n IHn] in m le_n_m *. -\end{lstlisting} -instead of -\begin{lstlisting} - elim: n m le_n_m => [|n IHn] m le_n_m. -\end{lstlisting} -This is quite useful for inductive proofs that involve many facts. - -\noindent See section \ref{ssec:gloc} for the general syntax and presentation -of the \ssrC{in} tactical. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{The defective tactics}\label{ssec:basictac} - -In this section we briefly present the three basic tactics performing -context manipulations and the main backward chaining tool. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{The \ssrC{move} tactic.}\label{sssec:move} -\idx{move} - -The \ssrC{move} tactic, in its -defective form, behaves like the primitive \ssrC{hnf} \Coq{} tactic. For -example, such a defective: -\begin{lstlisting} - move. -\end{lstlisting} -exposes the first assumption in the goal, i.e. its changes the goal -\ssrC{\~ False} into \ssrC{False -> False}. - -More precisely, the \ssrC{move} tactic inspects the goal and does nothing -(\ssrC{idtac}) if an introduction step is possible, i.e. if the -goal is a product or a \ssrC{let}$\dots$\ssrC{in}, and performs \ssrC{hnf} -otherwise. - -Of course this tactic is most often used in combination with the -bookkeeping tacticals (see section \ref{ssec:intro} and -\ref{ssec:discharge}). These combinations mostly subsume the \ssrC{intros}, -\ssrC{generalize}, \ssrC{revert}, \ssrC{rename}, \ssrC{clear} and -\textcolor{dkblue}{\texttt{pattern}} tactics. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{The \ssrC{case} tactic.} -\idx{case: \dots{}} - -The \ssrC{case} tactic performs -\emph{primitive case analysis} on (co)inductive types; specifically, -it destructs the top variable or assumption of the goal, -exposing its constructor(s) and its arguments, as well as setting the value -of its type family indices if it belongs to a type family -(see section \ref{ssec:typefam}). - -The \ssr{} \ssrC{case} tactic has a special behavior on -equalities. -If the top assumption of the goal is an equality, the \ssrC{case} tactic -``destructs'' it as a set of equalities between the constructor -arguments of its left and right hand sides, as per the -tactic \ssrC{injection}. -For example, \ssrC{case} changes the goal -\begin{lstlisting} - (x, y) = (1, 2) -> G. -\end{lstlisting} -into -\begin{lstlisting} - x = 1 -> y = 2 -> G. -\end{lstlisting} - -Note also that the case of \ssr{} performs \ssrC{False} -elimination, even if no branch is generated by this case operation. -Hence the command: -\begin{lstlisting} - case. -\end{lstlisting} -on a goal of the form \ssrC{False -> G} will succeed and prove the goal. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{The \ssrC{elim} tactic.} -\idx{elim: \dots{}} - -The \ssrC{elim} tactic performs -inductive elimination on inductive types. -The defective: -\begin{lstlisting} - elim. -\end{lstlisting} -tactic performs inductive elimination on a goal whose top assumption -has an inductive type. For example on goal of the form: -\begin{lstlisting} - forall n : nat, m <= n -\end{lstlisting} - in a context containing \ssrC{m : nat}, the -\begin{lstlisting} - elim. -\end{lstlisting} -tactic produces two goals, -\begin{lstlisting} - m <= 0 -\end{lstlisting} -on one hand and -\begin{lstlisting} - forall n : nat, m <= n -> m <= S n -\end{lstlisting} -on the other hand. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{The \ssrC{apply} tactic.}\label{sssec:apply} -\idx{apply: \dots{}} - -The \ssrC{apply} tactic is the main -backward chaining tactic of the proof system. It takes as argument any -\ssrC{/*term*/} and applies it to the goal. -Assumptions in the type of \ssrC{/*term*/} that don't directly match the -goal may generate one or more subgoals. - -In fact the \ssr{} tactic: -\begin{lstlisting} - apply. -\end{lstlisting} -is a synonym for: -\begin{lstlisting} - intro top; first [refine top | refine (top _) | refine (top _ _) | ...]; clear top. -\end{lstlisting} -where \ssrC{top} is fresh name, and the sequence of \ssrC{refine} tactics -tries to catch the appropriate number of wildcards to be inserted. -Note that this use of the \ssrC{refine} tactic implies that the tactic -tries to match the goal up to expansion of -constants and evaluation of subterms. - -\ssr{}'s \ssrC{apply} has a special behaviour on goals containing -existential metavariables of sort \ssrC{Prop}. Consider the -following example: -\begin{lstlisting} -Goal (forall y, 1 < y -> y < 2 -> exists x : { n | n < 3 }, proj1_sig x > 0). -move=> y y_gt1 y_lt2; apply: (ex_intro _ (exist _ y _)). - by apply: gt_trans _ y_lt2. -by move=> y_lt3; apply: lt_trans y_gt1. -\end{lstlisting} -Note that the last \ssrC{_} of the tactic \ssrC{apply: (ex_intro _ (exist _ y _))} -represents a proof that \ssrC{y < 3}. Instead of generating the following -goal -\begin{lstlisting} - 0 < (n:=3) (m:=y) ?54 -\end{lstlisting} -\noindent the system tries to prove \ssrC{y < 3} calling the \ssrC{trivial} -tactic. If it succeeds, let's say because the context contains -\ssrC{H : y < 3}, then the system generates the following goal: -\begin{lstlisting} - 0 < proj1_sig (exist (fun n => n < 3) y H -\end{lstlisting} -\noindent Otherwise the missing proof is considered to be irrelevant, and -is thus discharged generating the following goals: -\begin{lstlisting} - y < 3 - forall H : y < 3, proj1_sig (exist (fun n => n < 3) y H) -\end{lstlisting} -Last, the user can replace the \ssrC{trivial} tactic by defining -an Ltac expression named \ssrC{ssrautoprop}. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Discharge}\label{ssec:discharge} -\idx{\dots{} : \dots{}} - -The general syntax of the discharging tactical `\ssrC{:}' is: -\begin{center} - {\tac} \optional{\ssrN{ident}} \ssrC{:} \ssrN[1]{d-item} $\dots$ \ssrN[n]{d-item} \optional{\ssrN{clear-switch}} -\end{center} -where $n > 0$, and \ssrN{d-item} and \ssrN{clear-switch} are defined as -\begin{longtable}{rcl} -\ssrN{d-item} & ::= & \optional{\ssrN{occ-switch} {\optsep} \ssrN{clear-switch}} {\term} \\ -\ssrN{clear-switch}& ::=& \ssrC{\{} \ssrN[1]{ident}\, \ldots\, \ssrN[m]{ident} \ssrC{\}} -\end{longtable} -with the following requirements: -\begin{itemize} -\item {\tac} must be one of the four basic tactics described - in~\ref{ssec:basictac}, i.e., \ssrC{move}, \ssrC{case}, \ssrC{elim} or \ssrC{apply}, - the \ssrC{exact} tactic (section \ref{ssec:termin}), - the \ssrC{congr} tactic (section \ref{ssec:congr}), or the application - of the \emph{view} tactical `\ssrC{/}' (section \ref{ssec:assumpinterp}) - to one of \ssrC{move}, \ssrC{case}, or \ssrC{elim}. -\item The optional \ssrN{ident} specifies \emph{equation generation} - (section \ref{ssec:equations}), and is only allowed if {\tac} - is \ssrC{move}, \ssrC{case} or \ssrC{elim}, or the application of the - view tactical `\ssrC{/}' (section \ref{ssec:assumpinterp}) - to \ssrC{case} or \ssrC{elim}. -\item An \ssrN{occ-switch} selects occurrences of {\term}, - as in \ref{sssec:occselect}; \ssrN{occ-switch} is not allowed if - {\tac} is \ssrC{apply} or \ssrC{exact}. -\item A clear item \ssrN{clear-switch} specifies facts and constants to be - deleted from the proof context (as per the \ssrC{clear} tactic). -\end{itemize} -The `\ssrC{:}' tactical first \emph{discharges} all the \ssrN{d-item}s, -right to left, and then performs {\tac}, i.e., for each \ssrN{d-item}, -starting with $\ssrN[n]{d-item}$: -\begin{enumerate} -\item The \ssr{} matching algorithm described in section~\ref{ssec:set} - is used to find occurrences of {\term} in the goal, - after filling any holes `\ssrC{_}' in {\term}; however if {\tac} - is \ssrC{apply} or \ssrC{exact} a different matching algorithm, - described below, is used - \footnote{Also, a slightly different variant may be used for the first - \ssrN{d-item} of \ssrC{case} and \ssrC{elim}; see section~\ref{ssec:typefam}.}. -\item~\label{enum:gen} These occurrences are replaced by a new - variable; in particular, - if {\term} is a fact, this adds an assumption to the goal. -\item~\label{enum:clr} If {\term} is \emph{exactly} the name of a constant - or fact in the proof context, it is deleted from the context, - unless there is an \ssrN{occ-switch}. -\end{enumerate} -Finally, {\tac} is performed just after $\ssrN[1]{d-item}$ has been -generalized --- -that is, between steps \ref{enum:gen} and \ref{enum:clr} for $\ssrN[1]{d-item}$. -The names listed in the final \ssrN{clear-switch} (if it is present) -are cleared first, before $\ssrN[n]{d-item}$ is discharged. - -\noindent -Switches affect the discharging of a \ssrN{d-item} as follows: -\begin{itemize} -\item An \ssrN{occ-switch} restricts generalization (step~\ref{enum:gen}) - to a specific subset of the occurrences of {\term}, as per - \ref{sssec:occselect}, and prevents clearing (step~\ref{enum:clr}). -\item All the names specified by a \ssrN{clear-switch} are deleted from the - context in step~\ref{enum:clr}, possibly in addition to {\term}. -\end{itemize} -For example, the tactic: -\begin{lstlisting} - move: n {2}n (refl_equal n). -\end{lstlisting} -\begin{itemize} -\item first generalizes \ssrC{(refl_equal n : n = n)}; -\item then generalizes the second occurrence of \ssrC{n}. -\item finally generalizes all the other occurrences of \ssrC{n}, - and clears \ssrC{n} from the proof context - (assuming \ssrC{n} is a proof constant). -\end{itemize} -Therefore this tactic changes any goal \ssrC{G} into -\begin{lstlisting} - forall n n0 : nat, n = n0 -> G. -\end{lstlisting} -where the name \ssrC{n0} is picked by the \Coq{} display function, -and assuming \ssrC{n} appeared only in~\ssrC{G}. - -Finally, note that a discharge operation generalizes defined constants -as variables, and not as local definitions. To override this behavior, -prefix the name of the local definition with a \ssrC{@}, -like in \ssrC{move: @n}. - -This is in contrast with the behavior of the \ssrC{in} tactical (see section -\ref{ssec:gloc}), which preserves local definitions by default. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Clear rules} - -The clear step will fail if {\term} is a proof constant that -appears in other facts; in that case either the facts should be -cleared explicitly with a \ssrN{clear-switch}, or the clear step should be -disabled. The latter can be done by adding an \ssrN{occ-switch} or simply by -putting parentheses around {\term}: both -\begin{lstlisting} - move: (n). -\end{lstlisting} -and -\begin{lstlisting} - move: {+}n. -\end{lstlisting} -generalize \ssrC{n} without clearing \ssrC{n} from the proof context. - -The clear step will also fail if the \ssrN{clear-switch} contains a -\ssrN{ident} that is not in the \emph{proof} context. -Note that \ssr{} never clears a section constant. - -If {\tac} is \ssrC{move} or \ssrC{case} and an equation \ssrN{ident} is given, -then clear (step~\ref{enum:clr}) for $\ssrN[1]{d-item}$ is suppressed -(see section \ref{ssec:equations}). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Matching for \ssrC{apply} and \ssrC{exact}}\label{sss:strongapply} - -The matching algorithm for \ssrN{d-item}s of the \ssr{} \ssrC{apply} and -\ssrC{exact} tactics -exploits the type of $\ssrN[1]{d-item}$ to interpret -wildcards in the other \ssrN{d-item} and to determine which occurrences of -these should be generalized. -Therefore, \ssrN{occur switch}es are not needed for \ssrC{apply} and \ssrC{exact}. - -Indeed, the \ssr{} tactic \ssrC{apply: H x} is equivalent to -\begin{lstlisting} - refine (@H _ ... _ x); clear H x -\end{lstlisting} -with an appropriate number of wildcards between \ssrC{H} and~\ssrC{x}. - -Note that this means that matching for \ssrC{apply} and \ssrC{exact} has -much more context to interpret wildcards; in particular it can accommodate -the `\ssrC{_}' \ssrN{d-item}, which would always be rejected after `\ssrC{move:}'. -For example, the tactic -\begin{lstlisting} - apply: trans_equal (Hfg _) _. -\end{lstlisting} -transforms the goal \ssrC{f a = g b}, whose context contains -\ssrC{(Hfg : forall x, f x = g x)}, into \ssrC{g a = g b}. -This tactic is equivalent (see section \ref{ssec:profstack}) to: -\begin{lstlisting} - refine (trans_equal (Hfg _) _). -\end{lstlisting} -and this is a common idiom for applying transitivity on the left hand side -of an equation. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{The \ssrC{abstract} tactic}\label{ssec:abstract} -\idx{abstract: \dots{}} - -The \ssrC{abstract} tactic assigns an abstract constant previously introduced with -the \ssrC{[: name ]} intro pattern (see section~\ref{ssec:intro}, -page~\pageref{ssec:introabstract}). -In a goal like the following: -\begin{lstlisting} - m : nat - abs : <hidden> - n : nat - ============= - m < 5 + n -\end{lstlisting} -The tactic \ssrC{abstract: abs n} first generalizes the goal with respect to -\ssrC{n} (that is not visible to the abstract constant \ssrC{abs}) and then -assigns \ssrC{abs}. The resulting goal is: -\begin{lstlisting} - m : nat - n : nat - ============= - m < 5 + n -\end{lstlisting} -Once this subgoal is closed, all other goals having \ssrC{abs} in their context -see the type assigned to \ssrC{abs}. In this case: -\begin{lstlisting} - m : nat - abs : forall n, m < 5 + n -\end{lstlisting} - -For a more detailed example the user should refer to section~\ref{sssec:have}, -page~\pageref{sec:havetransparent}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Introduction}\label{ssec:intro} -\idx{\dots{} => \dots{}} - -The application of a tactic to a given goal can generate -(quantified) variables, assumptions, or definitions, which the user may want to -\emph{introduce} as new facts, constants or defined constants, respectively. -If the tactic splits the goal into several subgoals, -each of them may require the introduction of different constants and facts. -Furthermore it is very common to immediately decompose -or rewrite with an assumption instead of adding it to the context, -as the goal can often be simplified and even -proved after this. - -All these operations are performed by the introduction tactical -`\ssrC{=>}', whose general syntax is -\begin{center} - {\tac} \ssrC{=>} \ssrN[1]{i-item} $\dots$ \ssrN[n]{i-item} -\end{center} -where {\tac} can be any tactic, $n > 0$ and -\begin{longtable}{rcl} - \ssrN{i-item}& ::=& \ssrN{i-pattern} {\optsep} \ssrN{s-item} {\optsep} \ssrN{clear-switch} {\optsep} \ssrC{/}{\term} \\ - \ssrN{s-item}& ::=& \ssrC{/=} {\optsep} \ssrC{//} {\optsep} \ssrC{//=} \\ - \ssrN{i-pattern}& ::=& \ssrN{ident} {\optsep} \ssrC{_} {\optsep} \ssrC{?} {\optsep} \ssrC{*} {\optsep} \optional{\ssrN{occ-switch}}\ssrC{->} {\optsep} \optional{\ssrN{occ-switch}}\ssrC{<-} {\optsep} \\ - && \ssrC{[} \ssrN[1]{i-item}$^*$ \ssrC{|} $\dots$ \ssrC{|} \ssrN[m]{i-item}$^*$ \ssrC{]} {\optsep} \ssrC{-} {\optsep} \ssrC{[:} \ssrN{ident}$^+$ \ssrC{]} -\end{longtable} - -The `\ssrC{=>}' tactical first executes {\tac}, then the -\ssrN{i-item}s, left to right, i.e., starting from $\ssrN[1]{i-item}$. An -\ssrN{s-item} specifies a simplification operation; a $\ssrN{clear -switch}$ specifies context pruning as in~\ref{ssec:discharge}. The -\ssrN{i-pattern}s can be seen as a variant of \emph{intro patterns}~\ref{intros-pattern}: -each performs an introduction operation, i.e., pops some variables or -assumptions from the goal. - -An \ssrN{s-item} can simplify the set of subgoals or the subgoal themselves: -\begin{itemize} -\item \ssrC{//} removes all the ``trivial'' subgoals that can be resolved by - the \ssr{} tactic \ssrC{done} described in~\ref{ssec:termin}, i.e., it - executes \ssrC{try done}. -\item \ssrC{/=} simplifies the goal by performing partial evaluation, as - per the tactic \ssrC{simpl}.\footnote{Except \ssrC{/=} does not - expand the local definitions created by the \ssr{} \ssrC{in} tactical.} -\item \ssrC{//=} combines both kinds of simplification; it is equivalent - to \ssrC{/= //}, i.e., \ssrC{simpl; try done}. -\end{itemize} -When an \ssrN{s-item} bears a \ssrN{clear-switch}, then the \ssrN{clear-switch} is -executed \emph{after} the \ssrN{s-item}, e.g., \ssrL|{IHn}//| will solve -some subgoals, possibly using the fact \ssrL|IHn|, and will erase \ssrL|IHn| -from the context of the remaining subgoals. - -The last entry in the \ssrN{i-item} grammar rule, \ssrC{/}{\term}, -represents a view (see section~\ref{sec:views}). If $\ssrN[k+1]{i-item}$ -is a view \ssrN{i-item}, the view is applied to the assumption in top -position once $\ssrN[1]{i-item} \dots \ssrN[k]{i-item}$ have been performed. - -The view is applied to the top assumption. - -\ssr{} supports the following \ssrN{i-pattern}s: -\begin{itemize} -\item \ssrN{ident} pops the top variable, assumption, or local definition into - a new constant, fact, or defined constant \ssrN{ident}, respectively. - Note that defined constants cannot be introduced when - $\delta$-expansion is required to expose the top variable or assumption. -\item \ssrC{?} pops the top variable into an anonymous constant or fact, - whose name is picked by the tactic interpreter. - \ssr{} only generates names that - cannot appear later in the user script.\footnote{\ssr{} reserves - all identifiers of the form ``\ssrC{_x_}'', which is used for such - generated names.} -\item \ssrC{_} pops the top variable into an anonymous constant that will be - deleted from - the proof context of all the subgoals produced by the \ssrC{=>} tactical. - They should thus never be displayed, except in an error message - if the constant is still actually used in the goal or context after - the last \ssrN{i-item} has been executed (\ssrN{s-item}s can erase goals - or terms where the constant appears). -\item \ssrC{*} pops all the remaining apparent variables/assumptions - as anonymous constants/facts. Unlike \ssrC{?} and \ssrC{move} the \ssrC{*} - \ssrN{i-item} does not expand definitions in the goal to expose - quantifiers, so it may be useful to repeat a \ssrC{move=> *} tactic, - e.g., on the goal -\begin{lstlisting} - forall a b : bool, a <> b -\end{lstlisting} -a first \ssrC{move=> *} adds only \ssrC{_a_ : bool} and \ssrC{_b_ : bool} to -the context; it takes a second \ssrC{move=> *} to add -\ssrC{_Hyp_ : _a_ = _b_}. -\item $[\ssrN{occ-switch}]$\ssrC{->} (resp. $[\ssrN{occ-switch}]$\ssrC{<-}) - pops the top assumption - (which should be a rewritable proposition) into an anonymous fact, - rewrites (resp. rewrites right to left) the goal with this fact - (using the \ssr{} \ssrC{rewrite} tactic described in section~\ref{sec:rw}, - and honoring the optional occurrence selector), - and finally deletes the anonymous fact from the context. -\item\ssrC{[ $\ssrN[1]{i-item}^*$ | $\dots$ | $\ssrN[m]{i-item}^*$ ]}, - when it is the very \emph{first} \ssrN{i-pattern} after ${\tac}\;\ssrC{=>}$ - tactical \emph{and} {\tac} is not a \ssrC{move}, is a \emph{branching} - \ssrN{i-pattern}. It executes - the sequence $\ssrN[i]{i-item}^*$ on the $i^{\rm th}$ - subgoal produced by {\tac}. The execution of {\tac} - should thus generate exactly $m$ - subgoals, unless the \ssrC{[$\dots$]} \ssrN{i-pattern} comes after an initial - \ssrC{//} or \ssrC{//=} \ssrN{s-item} that closes some of the goals produced by - {\tac}, in which case exactly $m$ subgoals should remain after the - \ssrN{s-item}, or we have the trivial branching \ssrN{i-pattern} \ssrC{[]}, - which always does nothing, regardless of the number of remaining subgoals. -\item\ssrC{[ $\ssrN[1]{i-item}^*$ | $\dots$ | $\ssrN[m]{i-item}^*$ ]}, when it is - \emph{not} the first \ssrN{i-pattern} or when {\tac} is a - \ssrC{move}, is a \emph{destructing} \ssrN{i-pattern}. It starts by - destructing the top variable, using the \ssr{} \ssrC{case} tactic - described in~\ref{ssec:basictac}. It then behaves as the - corresponding branching \ssrN{i-pattern}, executing the sequence - $\ssrN[i]{i-item}^*$ in the $i^{\rm th}$ subgoal generated by the case - analysis; unless we have the trivial destructing \ssrN{i-pattern} - \ssrC{[]}, the latter should generate exactly $m$ subgoals, i.e., the - top variable should have an inductive type with exactly $m$ - constructors.\footnote{More precisely, it should have a quantified - inductive type with $a$ assumptions and $m - a$ constructors.} - While it is good style to use the $\ssrN[i]{i-item}^*$ - to pop the variables and assumptions corresponding to each constructor, - this is not enforced by \ssr{}. -\item\ssrC{-} does nothing, but counts as an intro pattern. It can also - be used to force the interpretation of - \ssrC{[ $\ssrN[1]{i-item}^*$ | $\dots$ | $\ssrN[m]{i-item}^*$ ]} - as a case analysis like in \ssrC{move=> -[H1 H2]}. It can also be used - to indicate explicitly the link between a view and a name like in - \ssrC{move=> /eqP-H1}. Last, it can serve as a separator between - views. Section~\ref{ssec:multiview} explains in which respect - the tactic \ssrC{move=> /v1/v2} differs from the tactic - \ssrC{move=> /v1-/v2}. -\item\ssrC{[: $\ssrN{ident}^+$ ]} introduces in the context an abstract constant - for each \ssrN{ident}. Its type has to be fixed later on by using - the \ssrC{abstract} tactic (see page~\pageref{ssec:abstract}). Before then - the type displayed is \ssrC{<hidden>}.\label{ssec:introabstract} -\end{itemize} -Note that \ssr{} does not support the syntax -$\ssrC{(}\ssrN{ipat}\ssrC{,}\dots\ssrC{,}\ssrN{ipat}\ssrC{)}$ for destructing -intro-patterns. - -Clears are deferred until the end of the intro pattern. For -example, given the goal: -\begin{lstlisting} -x, y : nat -================== -0 < x = true -> (0 < x) && (y < 2) = true -\end{lstlisting} -the tactic \ssrC{move=> \{x\} ->} successfully rewrites the goal and -deletes \ssrC{x} and the anonymous equation. The goal is thus turned into: -\begin{lstlisting} -y : nat -================== -true && (y < 2) = true -\end{lstlisting} -If the cleared names are reused in the same intro pattern, a renaming -is performed behind the scenes. - -Facts mentioned in a clear switch must be valid -names in the proof context (excluding the section context). - -The rules for interpreting branching and destructing \ssrN{i-pattern} -are motivated by the fact that it would be pointless to have a branching -pattern if {\tac} is a \ssrC{move}, and in most of the remaining cases -{\tac} is \ssrC{case} or \ssrC{elim}, which implies destruction. -The rules above imply that -\begin{lstlisting} - move=> [a b]. - case=> [a b]. - case=> a b. -\end{lstlisting} -are all equivalent, so which one to use is a matter of style; -\ssrC{move} should be used for casual decomposition, -such as splitting a pair, and \ssrC{case} should be used for actual decompositions, -in particular for type families (see~\ref{ssec:typefam}) -and proof by contradiction. - -The trivial branching \ssrN{i-pattern} can be used to force the branching -interpretation, e.g., -\begin{lstlisting} - case=> [] [a b] c. - move=> [[a b] c]. - case; case=> a b c. -\end{lstlisting} -are all equivalent. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Generation of equations}\label{ssec:equations} -\idx{move eq : \dots{}} - -The generation of named equations option stores the definition of a -new constant as an equation. The tactic: -\begin{lstlisting} - move En: (size l) => n. -\end{lstlisting} -where \ssrC{l} is a list, replaces \ssrC{size l} by \ssrC{n} in the goal and -adds the fact \ssrC{En : size l = n} to the context. - This is quite different from: -\begin{lstlisting} - pose n := (size l). -\end{lstlisting} -which generates a definition \ssrC{n := (size l)}. It is not possible to -generalize or -rewrite such a definition; on the other hand, it is automatically -expanded during -computation, whereas expanding the equation \ssrC{En} requires explicit -rewriting. - -The use of this equation name generation option with a \ssrC{case} or an -\ssrC{elim} tactic changes the status of the first \iitem{}, in order to -deal with the possible parameters of the constants introduced. - -On the -goal \ssrC{a <> b} where \ssrC{a, b} are natural numbers, the tactic: -\begin{lstlisting} - case E : a => [|n]. -\end{lstlisting} -generates two subgoals. The equation \ssrC{E : a = 0} (resp. \ssrC{E : a = - S n}, and the constant \ssrC{n : nat}) has been added to -the context of the goal \ssrC{0 <> b} (resp. \ssrC{S n <> b}). - -If the user does not provide a branching \iitem{} as first \iitem{}, -or if the \iitem{} does not provide enough names for the arguments of -a constructor, -then the constants generated are introduced under fresh \ssr{} names. -For instance, on the goal \ssrC{a <> b}, the tactic: -\begin{lstlisting} - case E : a => H. -\end{lstlisting} -also generates two subgoals, both requiring a proof of \ssrC{False}. - The hypotheses \ssrC{E : a = 0} and -\ssrC{H : 0 = b} (resp. \ssrC{E : a = S _n_} and -\ssrC{H : S _n_ = b}) have been added to the context of the first -subgoal (resp. the second subgoal). - -Combining the generation of named equations mechanism with the -\ssrC{case} tactic strengthens the power of a case analysis. On the other -hand, when combined with the \ssrC{elim} tactic, this feature is mostly -useful for -debug purposes, to trace the values of decomposed parameters and -pinpoint failing branches. - -% This feature is also useful -% to analyse and debug generate-and-test style scripts that prove program -% properties by generating a large set of input patterns and uniformly -% solving the corresponding subgoals by computation and rewriting, e.g, - -% \begin{lstlisting} -% case: et => [|e' et]; first by case: s. -% case: e => //; case: b; case: w. -% \end{lstlisting} -% If the above sequence fails, then it's easy enough to replace the line -% above with -% \begin{lstlisting} -% case: et => [|e' et]. -% case Ds: s; case De: e => //; case Db: b; case Dw: w=> [|s' w'] //=. -% \end{lstlisting} -% Then the first subgoal that appears will be the failing one, and the -% equations \ssrC{Ds}, \ssrC{De}, \ssrC{Db} -% and \ssrC{Dw} will pinpoint its branch. When the constructors of -% the decomposed type have arguments (like \ssrC{w : (seq nat)} -% above) these need to be -% introduced in order to generate the equation, so there should -% always be an explicit \iitem{} (\ssrC{\[|s' w'\]} above) that -% assigns names to these arguments. If this \iitem{} -% is omitted the arguments are introduced with default -% name; this -% feature should be -% avoided except for quick debugging runs (it has some uses in complex tactic -% sequences, however). - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Type families}\label{ssec:typefam} -\idx{case: \dots{} / \dots{}} - -When the top assumption of a goal has an inductive type, two -specific operations are possible: the case analysis performed by the -\ssrC{case} tactic, and the application of an induction principle, -performed by the \ssrC{elim} tactic. When this top assumption has an -inductive type, which is moreover an instance of a type family, \Coq{} -may need help from the user to specify which occurrences of the parameters -of the type should be substituted. - -A specific \ssrC{/} switch indicates the type family parameters of the -type of a \ditem{} immediately following this \ssrC{/} switch, using the -syntax: - -\begin{center} - \ssrC{[} \ssrC{case} {\optsep} \ssrC{elim} \ssrC{]:} \ssrN{d-item}$^+$ \ssrC{/} \ssrN{d-item}$^*$ -\end{center} - -The \ssrN{d-item}s on the right side of the \ssrC{/} switch are discharged -as described in section \ref{ssec:discharge}. The case analysis or -elimination will be done on the type of the top assumption after these -discharge operations. - -Every \ssrN{d-item} preceding the \ssrC{/} is interpreted as arguments of this -type, which should be an instance of an inductive type family. These terms are -not actually generalized, but rather selected for substitution. Occurrence -switches can be used to restrict the substitution. If a {\term} is left -completely implicit (e.g. writing just $\ssrC{\_}$), then a pattern is inferred -looking at the type of the top assumption. This allows for the compact syntax -\ssrC{case: \{2\}\_ / eqP}, were \ssrC{\_} is interpreted as \ssrC{(\_ == \_)}. Moreover -if the \ssrN{d-item}s list is too short, it is padded with an initial -sequence of $\ssrC{\_}$ of the right length. - -Here is a small example on lists. We define first a function which -adds an element at the end of a given list. -\begin{lstlisting} - Require Import List. - - Section LastCases. - Variable A : Type. - - Fixpoint |*add_last*|(a : A)(l : list A): list A := - match l with - |nil => a :: nil - |hd :: tl => hd :: (add_last a tl) - end. -\end{lstlisting} -Then we define an inductive predicate for -case analysis on lists according to their last element: -\begin{lstlisting} - Inductive |*last_spec*| : list A -> Type := - | LastSeq0 : last_spec nil - | LastAdd s x : last_spec (add_last x s). - - Theorem |*lastP*| : forall l : list A, last_spec l. -\end{lstlisting} -Applied to the goal: -\begin{lstlisting} - Goal forall l : list A, (length l) * 2 = length (app l l). -\end{lstlisting} -the command: -\begin{lstlisting} - move=> l; case: (lastP l). -\end{lstlisting} -generates two subgoals: -\begin{lstlisting} - length nil * 2 = length (nil ++ nil) -\end{lstlisting} -and -\begin{lstlisting} - forall (s : list A) (x : A), - length (add_last x s) * 2 = length (add_last x s ++ add_last x s) -\end{lstlisting} -both having \ssrC{l : list A} in their context. - -Applied to the same goal, the command: -\begin{lstlisting} - move=> l; case: l / (lastP l). -\end{lstlisting} -generates the same subgoals but \ssrC{l} has been cleared from both -contexts. - -Again applied to the same goal, the command: -\begin{lstlisting} - move=> l; case: {1 3}l / (lastP l). -\end{lstlisting} -generates the subgoals \ssrL-length l * 2 = length (nil ++ l)- and -\ssrL-forall (s : list A) (x : A), length l * 2 = length (add_last x s++l)- -where the selected occurrences on the left of the \ssrC{/} switch have -been substituted with \ssrC{l} instead of being affected by the case -analysis. - -The equation name generation feature combined with a type family \ssrC{/} - switch generates an equation for the \emph{first} dependent d-item -specified by the user. -Again starting with the above goal, the command: -\begin{lstlisting} - move=> l; case E: {1 3}l / (lastP l)=>[|s x]. -\end{lstlisting} -adds \ssrC{E : l = nil} and \ssrC{E : l = add_last x s}, -respectively, to the context of the two subgoals it generates. - -There must be at least one \emph{d-item} to the left of the \ssrC{/} -switch; this prevents any -confusion with the view feature. However, the \ditem{}s to the right of -the \ssrC{/} are optional, and if they are omitted the first assumption -provides the instance of the type family. - -The equation always refers to the first \emph{d-item} in the actual -tactic call, before any padding with initial $\ssrC{\_}$s. Thus, if an -inductive type has two family parameters, it is possible to have -\ssr{} generate an equation for the second one by omitting the pattern -for the first; note however that this will fail if the type of the -second parameter depends on the value of the first parameter. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Control flow} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Indentation and bullets}\label{ssec:indent} - -A linear development of \Coq{} scripts gives little information on -the structure of the proof. In addition, replaying a proof after some -changes in the statement to be proved will usually not display information to -distinguish between the various branches of case analysis for instance. - -To help the user in this organization of the proof script at -development time, \ssr{} provides some bullets to highlight the -structure of branching proofs. The available bullets are \ssrC{-}, -\ssrC{+} and \ssrC{*}. Combined with tabulation, this lets us highlight four -nested levels of branching; the most we have ever -needed is three. Indeed, the use of ``simpl and closing'' switches, of -terminators (see above section \ref{ssec:termin}) and selectors (see - section \ref{ssec:select}) is powerful enough -to avoid most of the time more than two levels of indentation. - -Here is a fragment of such a structured script: - -\begin{lstlisting} -case E1: (abezoutn _ _) => [[| k1] [| k2]]. -- rewrite !muln0 !gexpn0 mulg1 => H1. - move/eqP: (sym_equal F0); rewrite -H1 orderg1 eqn_mul1. - by case/andP; move/eqP. -- rewrite muln0 gexpn0 mulg1 => H1. - have F1: t %| t * S k2.+1 - 1. - apply: (@dvdn_trans (orderg x)); first by rewrite F0; exact: dvdn_mull. - rewrite orderg_dvd; apply/eqP; apply: (mulgI x). - rewrite -{1}(gexpn1 x) mulg1 gexpn_add leq_add_sub //. - by move: P1; case t. - rewrite dvdn_subr in F1; last by exact: dvdn_mulr. - + rewrite H1 F0 -{2}(muln1 (p ^ l)); congr (_ * _). - by apply/eqP; rewrite -dvdn1. - + by move: P1; case: (t) => [| [| s1]]. -- rewrite muln0 gexpn0 mul1g => H1. -... -\end{lstlisting} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Terminators}\label{ssec:termin} -\idx{by \dots{}} - -To further structure scripts, \ssr{} -supplies \emph{terminating} tacticals to explicitly close off tactics. -When replaying scripts, we then have the nice property that -an error immediately occurs when a closed tactic fails to prove its -subgoal. - -It is hence recommended practice that the proof of any subgoal should -end with a tactic which \emph{fails if it does not solve the current - goal}, like \ssrC{discriminate}, \ssrC{contradiction} or \ssrC{assumption}. - -In fact, \ssr{} provides a generic tactical which turns any tactic into -a closing one (similar to \ssrC{now}). Its general syntax is: - -\begin{center} - \ssrC{by} {\tac}\ssrC{.} -\end{center} - -The Ltac expression: - -\begin{center} - \ssrC{by [}\ssrN[1]{tactic} \ssrC{| [}\ssrN[2]{tactic} \ssrC{| ...].} -\end{center} - -is equivalent to: - -\begin{center} - \ssrC{[by} \ssrN[1]{tactic} \ssrC{| by} \ssrN[2]{tactic} \ssrC{| ...].} -\end{center} - -and this form should be preferred to the former. - -In the script provided as example in section \ref{ssec:indent}, the -paragraph corresponding to each sub-case ends with a tactic line prefixed -with a \ssrC{by}, like in: - -\begin{center} - \ssrC{by apply/eqP; rewrite -dvdn1.} -\end{center} - - -The \ssrC{by} tactical is implemented using the user-defined, -and extensible \ssrC{done} tactic. This \ssrC{done} tactic tries to solve -the current goal by some trivial means and fails if it doesn't succeed. -Indeed, the tactic expression: - -\begin{center} - \ssrC{by} {\tac}\ssrC{.} -\end{center} - -is equivalent to: - -\begin{center} - {\tac}\ssrC{; done.} -\end{center} - -Conversely, the tactic - -\begin{center} - \ssrC{by [ ].} -\end{center} - -is equivalent to: - -\begin{center} - \ssrC{done.} -\end{center} - -The default implementation of the \ssrC{done} tactic, in the {\tt - ssreflect.v} file, is: - -\begin{lstlisting} -Ltac done := - trivial; hnf; intros; solve - [ do ![solve [trivial | apply: sym_equal; trivial] - | discriminate | contradiction | split] - | case not_locked_false_eq_true; assumption - | match goal with H : ~ _ |- _ => solve [case H; trivial] end ]. -\end{lstlisting} - -The lemma \ssrC{|*not_locked_false_eq_true*|} is needed to discriminate -\emph{locked} boolean predicates (see section \ref{ssec:lock}). -The iterator tactical \ssrC{do} is presented in section -\ref{ssec:iter}. -This tactic can be customized by the user, for instance to include an -\ssrC{auto} tactic. - -A natural and common way of closing a goal is to apply a lemma which -is the \ssrC{exact} one needed for the goal to be solved. The defective -form of the tactic: -\begin{lstlisting} - exact. -\end{lstlisting} -is equivalent to: -\begin{lstlisting} - do [done | by move=> top; apply top]. -\end{lstlisting} -where \ssrC{top} is a fresh name affected to the top assumption of the goal. -This applied form is supported by the \ssrC{:} discharge tactical, and -the tactic: -\begin{lstlisting} - exact: MyLemma. -\end{lstlisting} -is equivalent to: -\begin{lstlisting} - by apply: MyLemma. -\end{lstlisting} -(see section \ref{sss:strongapply} for the documentation of the \ssrC{apply:} -combination). - -\textit{Warning} The list of tactics, possibly chained by -semi-columns, that follows a \ssrC{by} keyword is considered as a -parenthesized block -applied to the current goal. Hence for example if the tactic: -\begin{lstlisting} - by rewrite my_lemma1. -\end{lstlisting} -succeeds, then the tactic: -\begin{lstlisting} - by rewrite my_lemma1; apply my_lemma2. -\end{lstlisting} -usually fails since it is equivalent to: -\begin{lstlisting} - by (rewrite my_lemma1; apply my_lemma2). -\end{lstlisting} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Selectors}\label{ssec:select} -\idx{last \dots{} first} -\idx{first \dots{} last} - -When composing tactics, the two tacticals \ssrC{first} and -\ssrC{last} let the user restrict the application of a tactic to only one -of the subgoals generated by the previous tactic. This -covers the frequent cases where a tactic generates two subgoals one of -which can be easily disposed of. - -This is an other powerful way of linearization of scripts, since it -happens very often that a trivial subgoal can be solved in a less than -one line tactic. For instance, the tactic: - -\begin{center} - \ssrN[1]{tactic}\ssrC{; last by} \ssrN[2]{tactic}\ssrC{.} -\end{center} - -tries to solve the last subgoal generated by \ssrN[1]{tactic} using the -\ssrN[2]{tactic}, and fails if it does not succeeds. Its analogous - -\begin{center} - \ssrN[1]{tactic}\ssrC{; first by} \ssrN[2]{tactic}. -\end{center} - -tries to solve the first subgoal generated by \ssrN[1]{tactic} using the -tactic \ssrN[2]{tactic}, and fails if it does not succeeds. - - -\ssr{} also offers an extension of this facility, by supplying -tactics to \emph{permute} the subgoals generated by a tactic. -The tactic: - -\begin{center} - {\tac}\ssrC{; last first.} -\end{center} - -inverts the order of the subgoals generated by {\tac}. It is -equivalent to: - -\begin{center} - {\tac}\ssrC{; first last.} -\end{center} - - -More generally, the tactic: - -\begin{center} - {\tac}\ssrC{; last }${\naturalnumber}$ \ssrC{first.} -\end{center} - -where ${\naturalnumber}$ is -a \Coq{} numeral, or and Ltac variable denoting -a \Coq{} numeral, having the value $k$. It -rotates the $n$ subgoals $G_1, -\dots, G_n$ generated by {\tac}. The first subgoal becomes -$G_{n + 1 - k}$ and the circular order of subgoals remains unchanged. - -Conversely, the tactic: - - {\tac}\ssrC{; first }${\naturalnumber}$ \ssrC{last.} - -rotates the $n$ subgoals $G_1, -\dots, G_n$ generated by \ssrC{tactic} in order that the first subgoal -becomes $G_{k}$. - -Finally, the tactics \ssrC{last} and \ssrC{first} combine with the -branching syntax of Ltac: -if the tactic $\ssrN[0]{tactic}$ generates $n$ -subgoals on a given goal, then the tactic - - $tactic_0$\ssrC{; last }${\naturalnumber}$ \ssrC{[}$tactic_1$\ssrC{|}$\dots$\ssrC{|}$tactic_m$\ssrC{] || }$tactic_{m+1}$\ssrC{.} - -where ${\naturalnumber}$ denotes the integer $k$ as above, applies $tactic_1$ to the -$n -k + 1$-th goal, $\dots tactic_m$ to the $n -k + 2 - m$-th -goal and $tactic_{m+1}$ to the others. - -For instance, the script: -\begin{lstlisting} - Inductive test : nat -> Prop := - C1 : forall n, test n | C2 : forall n, test n | - C3 : forall n, test n | C4 : forall n, test n. - - Goal forall n, test n -> True. - move=> n t; case: t; last 2 [move=> k| move=> l]; idtac. -\end{lstlisting} - -creates a goal with four subgoals, the first and the last being -\ssrC{nat -> True}, the second and the third being \ssrC{True} with -respectively \ssrC{k : nat} and \ssrC{l : nat} in their context. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Iteration}\label{ssec:iter} -\idx{do \dots{} [ \dots{} ]} - -\ssr{} offers an accurate control on the repetition of -tactics, thanks to the \ssrC{do} tactical, whose general syntax is: - -\begin{center} - \ssrC{do} \optional{\ssrN{mult}} \ssrC{[} \ssrN[1]{tactic} \ssrC{|} $\dots$ \ssrC{|} \ssrN[n]{tactic} \ssrC{]} -\end{center} -where \ssrN{mult} is a \emph{multiplier}. - -Brackets can only be omitted if a single tactic is given \emph{and} a -multiplier is present. - -A tactic of the form: - -\begin{center} - \ssrC{do [} \tac$_1$ \ssrC{|} $\dots$ \ssrC{|} \tac$_n$\ssrC{].} -\end{center} - -is equivalent to the standard Ltac expression: - -\begin{center} - \ssrC{first [} \tac$_1$ \ssrC{|} $\dots$ \ssrC{|} \tac$_n$\ssrC{].} -\end{center} - - -The optional multiplier \ssrN{mult} specifies how many times -the action of {\tac} should be repeated on the current subgoal. - -There are four kinds of multipliers: - \begin{itemize} - \item \ssrC{n!}: the step {\tac} is repeated exactly $n$ times - (where $n$ is a positive integer argument). - \item \ssrC{!}: the step {\tac} is repeated as many times as possible, - and done at least once. - \item \ssrC{?}: the step {\tac} is repeated as many times as possible, - optionally. - \item \ssrC{n?}: the step {\tac} is repeated up to $n$ times, - optionally. - \end{itemize} - -For instance, the tactic: - -\begin{center} - {\tac} \ssrL+; do 1?rewrite mult_comm.+ -\end{center} - -rewrites at most one time the lemma \ssrC{mult_com} in all the subgoals -generated by {\tac} , whereas the tactic: - -\begin{center} - {\tac} \ssrL+; do 2!rewrite mult_comm.+ -\end{center} - -rewrites exactly two times the lemma \ssrC{mult_com} in all the subgoals -generated by {\tac}, and fails if this rewrite is not possible -in some subgoal. - -Note that the combination of multipliers and \ssrC{rewrite} is so often -used that multipliers are in fact integrated to the syntax of the \ssr{} -\ssrC{rewrite} tactic, see section \ref{sec:rw}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Localization}\label{ssec:gloc} -\idx{\dots{} in \dots{}} - -In sections \ref{ssec:loc} and \ref{ssec:profstack}, we have already -presented the \emph{localization} tactical \ssrC{in}, whose general -syntax is: -\begin{center} - {\tac} \ssrC{in} \ssrN{ident}$^+$ \optional{\ssrC{*}} -\end{center} - -where \ssrN{ident}$^+$ is a non empty list of fact -names in the context. On the left side of \ssrC{in}, {\tac} can be -\ssrC{move}, \ssrC{case}, \ssrC{elim}, \ssrC{rewrite}, \ssrC{set}, - or any tactic formed with the general iteration tactical \ssrC{do} (see - section \ref{ssec:iter}). - -The operation described by {\tac} is performed in the facts -listed in \ssrN{ident}$^+$ and in the goal if a \ssrC{*} ends -the list. - -The \ssrC{in} tactical successively: -\begin{itemize} -\item generalizes the selected hypotheses, possibly ``protecting'' the - goal if \ssrC{*} is not present, -\item performs {\tac}, on the obtained goal, -\item reintroduces the generalized facts, under the same names. -\end{itemize} - -This defective form of the \ssrC{do} tactical is useful to avoid clashes -between standard Ltac \ssrC{in} and the \ssr{} tactical \ssrC{in}. -For example, in the following script: -\begin{lstlisting} - Ltac |*mytac*| H := rewrite H. - - Goal forall x y, x = y -> y = 3 -> x + y = 6. - move=> x y H1 H2. - do [mytac H2] in H1 *. -\end{lstlisting} -the last tactic rewrites the hypothesis \ssrC{H2 : y = 3} both in -\ssrC{H1 : x = y} and in the goal \ssrC{x + y = 6}. - -By default \ssrC{in} keeps the body of local definitions. To erase -the body of a local definition during the generalization phase, -the name of the local definition must be written between parentheses, -like in \ssrC{rewrite H in H1 (def_n) $\;\;$H2}. - -From \ssr{} 1.5 the grammar for the \ssrC{in} tactical has been extended -to the following one: - -\begin{center} - {\tac} \ssrC{in} \optional{ - \ssrN{clear-switch} {\optsep} - \optional{\ssrC{@}}\ssrN{ident} {\optsep} - \ssrC{(}\ssrN{ident}\ssrC{)} {\optsep} - \ssrC{(}\optional{\ssrC{@}}\ssrN{ident} \ssrC{:=} \ssrN{c-pattern}\ssrC{)} - }$^+$ \optional{\ssrC{*}} -\end{center} - -In its simplest form the last option lets one rename hypotheses that can't be -cleared (like section variables). For example \ssrC{(y := x)} generalizes -over \ssrC{x} and reintroduces the generalized -variable under the name \ssrC{y} (and does not clear \ssrC{x}).\\ -For a more precise description the $\ssrC{(}[\ssrC{@}]\ssrN{ident}\ \ssrC{:=}\ \ssrN{c-pattern}\ssrC{)}$ -item refer to the ``Advanced generalization'' paragraph at page~\pageref{par:advancedgen}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Structure}\label{ssec:struct} - -Forward reasoning structures the script by explicitly specifying some -assumptions to be added to the proof context. It is closely associated -with the declarative style of proof, since an extensive use of these -highlighted statements -make the script closer to a (very detailed) text book proof. - -Forward chaining tactics allow to state an intermediate lemma and start a -piece of script dedicated to the proof of this statement. The use of -closing tactics (see section \ref{ssec:termin}) and of -indentation makes syntactically explicit the portion of the script -building the proof of the intermediate statement. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{The \ssrC{have} tactic.} -\label{sssec:have} -\idx{have: \dots{}} -\idx{have: \dots{} := \dots{}} - -The main \ssr{} forward reasoning tactic is the \ssrC{have} tactic. It -can be use in two modes: one starts a new (sub)proof for an -intermediate result in the main proof, and the other -provides explicitly a proof term for this intermediate step. - -In the first mode, the syntax of \ssrC{have} in its defective form is: - - \ssrC{have: }{\term}\ssrC{.} - -This tactic supports open syntax for {\term}. -Applied to a goal \ssrC{G}, it generates a first subgoal requiring a -proof of {\term} in the context of \ssrC{G}. The second generated -subgoal is of the form {\term} \ssrC{-> G}, where {\term} becomes -the new top assumption, instead of being introduced with a fresh -name. At the proof-term level, the \ssrC{have} tactic creates a $\beta$ -redex, and introduces the lemma under a fresh name, automatically -chosen. - - -Like in the case of the \ssrC{pose} tactic (see section \ref{ssec:pose}), -the types of the holes are abstracted in {\term}. -For instance, the tactic: -\begin{lstlisting} - have: _ * 0 = 0. -\end{lstlisting} -is equivalent to: -\begin{lstlisting} - have: forall n : nat, n * 0 = 0. -\end{lstlisting} -The \ssrC{have} tactic also enjoys the same abstraction mechanism as the -\ssrC{pose} tactic for the non-inferred implicit arguments. For instance, -the tactic: -\begin{lstlisting} - have: forall x y, (x, y) = (x, y + 0). -\end{lstlisting} -opens a new subgoal to prove that: - -\noindent\ssrC{forall (T : Type) (x : T) (y : nat), (x, y) = (x, y + 0)} - - -The behavior of the defective \ssrC{have} tactic makes it possible to -generalize it in the -following general construction: -\begin{center} - \ssrC{have} \ssrN{i-item}$^*$ \optional{\ssrN{i-pattern}} - \optional{\ssrN{s-item} {\optsep} \ssrN{binder}$^+$} - \optional{\ssrC{:} \ssrN[1]{term}} - \optional{\ssrC{:=} \ssrN[2]{term} {\optsep} \ssrC{by} {\tac}} -\end{center} - -Open syntax is supported for $\ssrN[1]{term}$ and $\ssrN[2]{term}$. For the -description of -\iitem{}s and clear switches see section \ref{ssec:intro}. -The first mode of the \ssrC{have} tactic, which opens a sub-proof for an -intermediate result, uses tactics of the form: - -\begin{center} - \ssrC{have} \ssrN{clear-switch} \ssrN{i-item} \ssrC{:} {\term} \ssrC{by} {\tac}. -\end{center} - -which behave like:\\ - -\begin{center} - \ssrC{have:} {\term} \ssrC{; first by } {\tac}. -\end{center} -\begin{center} - \ssrC{ move=>} \ssrN{clear-switch} \ssrN{i-item}. -\end{center} - - -Note that the \ssrN{clear-switch} \emph{precedes} the -\ssrN{i-item}, which allows to reuse a name of the context, possibly used -by the proof of the assumption, to introduce the new assumption -itself. - -The \ssrC{by} feature is especially convenient when the proof script of the -statement is very short, basically when it fits in one line like in: -\begin{lstlisting} - have H : forall x y, x + y = y + x by move=> x y; rewrite addnC. -\end{lstlisting} - -The possibility of using \iitem{}s supplies a very concise -syntax for the further use of the intermediate step. For instance, -\begin{lstlisting} - have -> : forall x, x * a = a. -\end{lstlisting} -on a goal \ssrC{G}, opens a new subgoal asking for a proof of -\ssrC{forall x, x * a = a}, and a second subgoal in which the lemma - \ssrC{forall x, x * a = a} has been rewritten in the goal \ssrC{G}. Note - that in this last subgoal, the intermediate result does not appear in - the context. -Note that, thanks to the deferred execution of clears, the following -idiom is supported (assuming \ssrC{x} occurs in the goal only): -\begin{lstlisting} - have {x} -> : x = y -\end{lstlisting} - -An other frequent use of the intro patterns combined with \ssrC{have} is the -destruction of existential assumptions like in the tactic: -\begin{lstlisting} - have [x Px]: exists x : nat, x > 0. -\end{lstlisting} -which opens a new subgoal asking for a proof of \ssrC{exists x : nat, x > - 0} and a second subgoal in which the witness is introduced under -the name \ssrC{x : nat}, and its property under the name \ssrC{Px : x > 0}. - -An alternative use of the \ssrC{have} tactic is to provide the explicit proof -term for the intermediate lemma, using tactics of the form: - -\begin{center} - \ssrC{have} \optional{\ssrN{ident}} \ssrC{:=} {\term}. -\end{center} - -This tactic creates a new assumption of type the type of -{\term}. If the -optional \ssrN{ident} is present, this assumption is introduced under -the name \ssrN{ident}. Note that the body of the constant is lost for -the user. - -Again, non inferred implicit arguments and explicit holes are abstracted. For -instance, the tactic: -\begin{lstlisting} - have H := forall x, (x, x) = (x, x). -\end{lstlisting} -adds to the context \ssrC{H : Type -> Prop}. This is a schematic example but -the feature is specially useful when the proof term to give involves -for instance a lemma with some hidden implicit arguments. - -After the \ssrN{i-pattern}, a list of binders is allowed. -For example, if \ssrC{Pos_to_P} is a lemma that proves that -\ssrC{P} holds for any positive, the following command: -\begin{lstlisting} - have H x (y : nat) : 2 * x + y = x + x + y by auto. -\end{lstlisting} -will put in the context \ssrC{H : forall x, 2 * x = x + x}. A proof term -provided after \ssrC{:=} can mention these bound variables (that are -automatically introduced with the given names). -Since the \ssrN{i-pattern} can be omitted, to avoid ambiguity, bound variables -can be surrounded with parentheses even if no type is specified: -\begin{lstlisting} - have (x) : 2 * x = x + x by auto. -\end{lstlisting} - -The \ssrN{i-item}s and \ssrN{s-item} can be used to interpret the -asserted hypothesis with views (see section~\ref{sec:views}) or -simplify the resulting goals. - -The \ssrC{have} tactic also supports a \ssrC{suff} modifier which allows for -asserting that a given statement implies the current goal without -copying the goal itself. For example, given a goal \ssrC{G} the tactic -\ssrC{have suff H : P} results in the following two goals: -\begin{lstlisting} - |- P -> G - H : P -> G |- G -\end{lstlisting} -Note that \ssrC{H} is introduced in the second goal. The \ssrC{suff} -modifier is not compatible with the presence of a list of binders. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Generating \ssrC{let in} context entries with \ssrC{have}} -\label{sec:havetransparent} - -Since \ssr{} 1.5 the \ssrC{have} tactic supports a ``transparent'' modifier to -generate \ssrC{let in} context entries: the \ssrC{@} symbol in front of the context -entry name. For example: - -\begin{lstlisting} -have @i : 'I_n by apply: (Sub m); auto. -\end{lstlisting} -generates the following two context entry: -\begin{lstlisting} -i := Sub m proof_produced_by_auto : 'I_n -\end{lstlisting} - -Note that the sub-term produced by \ssrC{auto} is in general huge and -uninteresting, and hence one may want to hide it. - -For this purpose the \ssrC{[: name ]} intro pattern and the tactic -\ssrC{abstract} (see page~\pageref{ssec:abstract}) are provided. -Example: -\begin{lstlisting} -have [:blurb] @i : 'I_n by apply: (Sub m); abstract: blurb; auto. -\end{lstlisting} -generates the following two context entries: -\begin{lstlisting} -blurb : (m < n) (*1*) -i := Sub m blurb : 'I_n -\end{lstlisting} -The type of \ssrC{blurb} can be cleaned up by its annotations by just simplifying -it. The annotations are there for technical reasons only. - -When intro patterns for abstract constants are used in conjunction -with \ssrC{have} and an explicit term, they must be used as follows: - -\begin{lstlisting} -have [:blurb] @i : 'I_n := Sub m blurb. - by auto. -\end{lstlisting} - -In this case the abstract constant \ssrC{blurb} is assigned by using it -in the term that follows \ssrC{:=} and its corresponding goal is left to -be solved. Goals corresponding to intro patterns for abstract constants -are opened in the order in which the abstract constants are declared (not -in the ``order'' in which they are used in the term). - -Note that abstract constants do respect scopes. Hence, if a variable -is declared after their introduction, it has to be properly generalized (i.e. -explicitly passed to the abstract constant when one makes use of it). -For example any of the following two lines: -\begin{lstlisting} -have [:blurb] @i k : 'I_(n+k) by apply: (Sub m); abstract: blurb k; auto. -have [:blurb] @i k : 'I_(n+k) := apply: Sub m (blurb k); first by auto. -\end{lstlisting} -generates the following context: -\begin{lstlisting} -blurb : (forall k, m < n+k) (*1*) -i := fun k => Sub m (blurb k) : forall k, 'I_(n+k) -\end{lstlisting} - -Last, notice that the use of intro patterns for abstract constants is -orthogonal to the transparent flag \ssrC{@} for \ssrC{have}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{The \ssrC{have} tactic and type classes resolution} -\label{ssec:havetcresolution} - -Since \ssr{} 1.5 the \ssrC{have} tactic behaves as follows with respect to type -classes inference. - -\begin{itemize} -\item \ssrC{have foo : ty.} - Full inference for \ssrC{ty}. - The first subgoal demands a proof of such instantiated statement. -\item \ssrC{have foo : ty := .} - No inference for \ssrC{ty}. Unresolved instances are quantified in - \ssrC{ty}. The first subgoal demands a proof of such quantified - statement. Note that no proof term follows \ssrC{:=}, hence two - subgoals are generated. -\item \ssrC{have foo : ty := t.} - No inference for \ssrC{ty} and \ssrC{t}. -\item \ssrC{have foo := t.} - No inference for \ssrC{t}. Unresolved instances are quantified in the - (inferred) type of \ssrC{t} and abstracted in \ssrC{t}. -\end{itemize} - -The behavior of \ssr{} 1.4 and below (never resolve type classes) -can be restored with the option \ssrC{Set SsrHave NoTCResolution}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Variants: the \ssrC{suff} and \ssrC{wlog} tactics.} -\label{ssec:wlog} -\idx{suff: \dots{}} -\idx{suffices: \dots{}} -\idx{wlog: \dots{} / \dots{}} -\idx{without loss: \dots{} / \dots{}} - -As it is often the case in mathematical textbooks, forward -reasoning may be used in slightly different variants. -One of these variants is to show that the intermediate step $L$ -easily implies the initial goal $G$. By easily we mean here that -the proof of $L \Rightarrow G$ is shorter than the one of $L$ -itself. This kind of reasoning step usually starts with: -``It suffices to show that \dots''. - -This is such a frequent way of reasoning that \ssr{} has a variant of the -\ssrC{have} tactic called \ssrC{suffices} (whose abridged name is -\ssrC{suff}). The \ssrC{have} and \ssrC{suff} tactics are equivalent and -have the same syntax but: -\begin{itemize} -\item the order of the generated subgoals is inversed -\item but the optional clear item is still performed in the - \emph{second} branch. This means that the tactic: -\begin{lstlisting} - suff {H} H : forall x : nat, x >= 0. -\end{lstlisting} -fails if the context of the current goal indeed contains an -assumption named \ssrC{H}. -\end{itemize} -The rationale of this clearing policy is to make possible ``trivial'' -refinements of an assumption, without changing its name in the main -branch of the reasoning. - -The \ssrC{have} modifier can follow the \ssrC{suff} tactic. -For example, given a goal \ssrC{G} the tactic -\ssrC{suff have H : P} results in the following two goals: -\begin{lstlisting} - H : P |- G - |- (P -> G) -> G -\end{lstlisting} -Note that, in contrast with \ssrC{have suff}, the name \ssrC{H} has been introduced -in the first goal. - -Another useful construct is reduction, -showing that a particular case is in fact general enough to prove -a general property. This kind of reasoning step usually starts with: -``Without loss of generality, we can suppose that \dots''. -Formally, this corresponds to the proof of a goal \ssrC{G} by introducing -a cut \ssrN{wlog\_statement} \ssrC{-> G}. Hence the user shall provide a -proof for both \ssrC{(}\ssrN{wlog\_statement} \ssrC{-> G) -> G} and -\ssrN{wlog\_statement} \ssrC{-> G}. However, such cuts are usually rather -painful to perform by hand, because the statement -\ssrN{wlog\_statement} is tedious to write by hand, and somtimes even -to read. - -\ssr{} implements this kind of reasoning step through the \ssrC{without loss} -tactic, whose short name is \ssrC{wlog}. It offers support to describe -the shape of the cut statements, by providing the simplifying -hypothesis and by pointing at the elements of the initial goals which -should be generalized. The general syntax of \ssrC{without loss} is: - -\begin{center} - \ssrC{wlog} \optional{\ssrC{suff}} \optional{\ssrN{clear-switch}} \optional{\ssrN{i-item}} \ssrC{:} \optional{\ssrN[1]{ident} $\dots$ \ssrN[n]{ident}} \ssrC{/} {\term} -\end{center} - -where \ssrN[1]{ident} $\dots$ \ssrN[n]{ident} are identifiers for constants -in the context of the goal. Open syntax is supported for {\term}. - -In its defective form: - -\begin{center} - \ssrC{wlog: /} {\term}. -\end{center} - -on a goal \ssrC{G}, it creates two subgoals: a first one to prove the formula -\ssrC{(}{\term} \ssrC{-> G) -> G} and a second one to prove the formula -{\term} \ssrC{-> G}. - -:browse confirm wa -If the optional list \ssrN[1]{ident} $\dots$ \ssrN[n]{ident} is present on the left -side of \ssrC{/}, these constants are generalized in the premise -\ssrC{(}{\term} \ssrC{-> G)} of the first subgoal. By default the body of -local definitions is erased. This behavior can be inhibited -prefixing the name of the local definition with the \ssrC{@} character. - -In the second subgoal, the tactic: - -\begin{center} - \ssrC{move=>} \ssrN{clear-switch}\ssrC{} \ssrN{i-item}\ssrC{.} -\end{center} - -is performed if at least one of these optional switches is present in -the \ssrC{wlog} tactic. - -The \ssrC{wlog} tactic is specially useful when a symmetry argument -simplifies a proof. Here is an example showing the beginning of the -proof that quotient and reminder of natural number euclidean division -are unique. -\begin{lstlisting} - Lemma quo_rem_unicity: forall d q1 q2 r1 r2, - q1*d + r1 = q2*d + r2 -> r1 < d -> r2 < d -> (q1, r1) = (q2, r2). - move=> d q1 q2 r1 r2. - wlog: q1 q2 r1 r2 / q1 <= q2. - by case (le_gt_dec q1 q2)=> H; last symmetry; eauto with arith. -\end{lstlisting} - -The \ssrC{wlog suff} variant is simpler, since it cuts -\ssrN{wlog\_statement} instead of \ssrN{wlog\_statement} \ssrC{-> G}. It thus -opens the goals \ssrN{wlog\_statement} \ssrC{-> G} and \ssrN{wlog\_statement}. - -In its simplest form -the \ssrC{generally have :...} tactic -is equivalent to \ssrC{wlog suff :...} followed by \ssrC{last first}. -When the \ssrC{have} tactic -is used with the \ssrC{generally} (or \ssrC{gen}) modifier it accepts an -extra identifier followed by a comma before the usual intro pattern. -The identifier will name the new hypothesis in its more general form, -while the intro pattern will be used to process its instance. For example: -\begin{lstlisting} - Lemma simple n (ngt0 : 0 < n ) : P n. - gen have ltnV, /andP[nge0 neq0] : n ngt0 / (0 <= n) && (n != 0). -\end{lstlisting} -The first subgoal will be -\begin{lstlisting} - n : nat - ngt0 : 0 < n - ==================== - (0 <= n) && (n != 0) -\end{lstlisting} -while the second one will be -\begin{lstlisting} - n : nat - ltnV : forall n, 0 < n -> (0 <= n) && (n != 0) - nge0 : 0 <= n - neqn0 : n != 0 - ==================== - P n -\end{lstlisting} - -\paragraph{Advanced generalization}\label{par:advancedgen} -The complete syntax for the items on the left hand side of the \ssrC{/} -separator is the following one: -\begin{center} -\ssrN{clear-switch} {\optsep} \optional{\ssrC{@}} \ssrN{ident} {\optsep} \ssrC{(}\optional{\ssrC{@}}\ssrN{ident} \ssrC{:=} \ssrN{c-pattern}\ssrC{)} -\end{center} -Clear operations are intertwined with generalization operations. This -helps in particular avoiding dependency issues while generalizing some facts. - -\noindent -If an \ssrN{ident} is prefixed with the \ssrC{@} prefix mark, then a -let-in redex is created, which keeps track if its body (if any). The -syntax \ssrC{(}\ssrN{ident}\ssrC{:=}\ssrN{c-pattern}\ssrC{)} allows to -generalize an arbitrary term using a given name. Note that its simplest -form \ssrC{(x := y)} is just a renaming of \ssrC{y} into \ssrC{x}. In -particular, this can be useful in order to simulate the generalization -of a section variable, otherwise not allowed. Indeed renaming does not -require the original variable to be cleared. - - -\noindent -The syntax \ssrC{(@x := y)} generates a let-in abstraction but with the following -caveat: \ssrC{x} will not bind \ssrC{y}, but its body, whenever \ssrC{y} can be -unfolded. This cover the case of both local and global definitions, as -illustrated in the following example: - -\begin{lstlisting} -Section Test. -Variable x : nat. -Definition addx z := z + x. -Lemma test : x <= addx x. -wlog H : (y := x) (@twoy := addx x) / twoy = 2 * y. -\end{lstlisting} -\noindent -The first subgoal is: -\begin{lstlisting} - (forall y : nat, let twoy := y + y in twoy = 2 * y -> y <= twoy) -> - x <= addx x -\end{lstlisting} -\noindent -To avoid unfolding the term captured by the pattern \ssrC{add x} one -can use the pattern \ssrC{id (addx x)}, that would produce the following first -subgoal: -\begin{lstlisting} - (forall y : nat, let twoy := addx y in twoy = 2 * y -> y <= twoy) -> - x <= addx x -\end{lstlisting} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Rewriting}\label{sec:rw} -\idx{rewrite \dots{}} - -The generalized use of reflection implies that most of the -intermediate results handled are properties of effectively computable -functions. The most efficient mean of establishing such results are -computation and simplification of expressions involving such -functions, i.e., rewriting. \ssr{} therefore includes an extended -\ssrC{rewrite} tactic, that unifies and combines most of the rewriting -functionalities. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{An extended \ssrC{rewrite} tactic}\label{ssec:extrw} -The main features of the \ssrC{rewrite} tactic are: -\begin{itemize} -\item It can perform an entire series of such operations in any - subset of the goal and/or context; -\item It allows to perform rewriting, - simplifications, folding/unfolding of definitions, closing of goals; -\item Several rewriting operations can be chained in a single tactic; -\item Control over the occurrence at which rewriting is to be performed is - significantly enhanced. -\end{itemize} - - -The general form of an \ssr{} rewrite tactic is: - -\begin{center} - \ssrC{rewrite} \ssrN{rstep}$^+$\ssrC{.} -\end{center} - -The combination of a rewrite tactic with the \ssrC{in} tactical (see -section \ref{ssec:loc}) performs rewriting in both the context and the -goal. - -A rewrite step \ssrN{rstep} has the general form: - -\begin{center} - \optional{\ssrN{r-prefix}}\ssrN{r-item} -\end{center} - -where: - -\begin{longtable}{rcl} -\ssrN{r-prefix} & ::= & - \optional{\ssrC{-}} \optional{\ssrN{mult}} \optional{\ssrN{occ-switch} {\optsep} \ssrN{clear-switch}} \optional{\ssrC{[}\ssrN{r-pattern}\ssrC{]}}\\ -\ssrN{r-pattern} & ::= & -{\term} {\optsep} \ssrC{in} \optional{\ssrN{ident} \ssrC{in}} {\term} {\optsep} \optional{{\term} \ssrC{in} {\optsep} {\term} \ssrC{as} } \ssrN{ident} \ssrC{in} {\term}\\ -\ssrN{r-item} & ::= & -\optional{\ssrC{/}}{\term} {\optsep} \ssrN{s-item} \\ -\end{longtable} - - -% \begin{eqnarray*} -% \ssrN{r-prefix} & ::= & -% [\ssrC{-}]\ [\ssrN{mult}][\ssrN{occ-switch} | \ssrN{cl-item}][{\term}]\\ -% \ssrN{r-item} & ::= & -% [\ssrC{-}]{\term}\ |\ [\ssrC{-}]\ssrC{[}\ssrN[1]{term}\ssrC{]}\ssrC{/(}\ssrN[2]{term}\ssrC{)} \ |\ -% \ssrN{simpl switch} \ |\ \\ -% && \ssrN{eq-term} \ |\ \ssrC{(} \ssrN[1]{eq-term}\ssrC{,}\dots -% \ssrC{,}\ssrN[n]{eq-term} \ssrC{)} \ |\ \ssrC{(_ :}\ssrN{eq-term} \ssrC{)} -% \end{eqnarray*} - - -An \ssrN{r-prefix} contains annotations to qualify where and how the -rewrite operation should be performed: -\begin{itemize} -\item The optional initial \ssrC{-} indicates the direction of the rewriting - of \ssrN{r-item}: if present the direction is right-to-left and it is - left-to-right otherwise. -\item The multiplier \ssrN{mult} (see section \ref{ssec:iter}) - specifies if and how the rewrite operation should be repeated. -\item A rewrite operation matches the occurrences of a \emph{rewrite - pattern}, and replaces these occurrences by an other term, according - to the given \ssrN{r-item}. - The optional \emph{redex switch} $\ssrC{[}\ssrN{r-pattern}\ssrC{]}$, which - should always be surrounded by brackets, gives explicitly this - rewrite pattern. In its simplest form, it is a regular term. - If no explicit redex switch - is present the rewrite pattern to be matched is inferred from the - \ssrN{r-item}. -\item This optional {\term}, or - the \ssrN{r-item}, may be preceded by an - occurrence switch (see section \ref{ssec:select}) or a clear item - (see section \ref{ssec:discharge}), these two possibilities being - exclusive. An occurrence switch selects the occurrences of the - rewrite pattern which should be affected by the rewrite operation. -\end{itemize} - - -An \ssrN{r-item} can be: - - -\begin{itemize} -\item A \emph{simplification r-item}, represented by a - \ssrN{s-item} (see section \ref{ssec:intro}). -% In some cases, \ssrN{r-prefix}es are not supported. - Simplification operations are - intertwined with the possible other rewrite operations specified by - the list of r-items. -\item A \emph{folding/unfolding r-item}. The tactic: - - \ssrC{rewrite /}{\term} - -unfolds the head constant of \textit{term} in every occurrence of the -first matching of \textit{term} in the goal. In particular, if -\ssrC{my_def} is a (local or global) defined constant, the tactic: -\begin{lstlisting} - rewrite /my_def. -\end{lstlisting} -is analogous to: -\begin{lstlisting} - unfold my_def. -\end{lstlisting} -Conversely: -\begin{lstlisting} - rewrite -/my_def. -\end{lstlisting} -is equivalent to: -\begin{lstlisting} - fold my_def. -\end{lstlisting} -%\emph{Warning} The combination of redex switch with unfold -%\ssrN{r-item} is not yet implemented. - -When an unfold r-item is combined with a redex pattern, a conversion -operation is performed. A tactic of the form: - -\begin{center} - \ssrC{rewrite -[}\ssrN[1]{term}\ssrC{]/}\ssrN[2]{term}\ssrC{.} -\end{center} - -is equivalent to: - - -\begin{center} - \ssrC{change} \ssrN[1]{term} \ssrC{with} \ssrN[2]{term}\ssrC{.} -\end{center} - - -If \ssrN[2]{term} is a single constant and \ssrN[1]{term} head symbol -is not \ssrN[2]{term}, then the head symbol of \ssrN[1]{term} is -repeatedly unfolded until \ssrN[2]{term} appears. - -\begin{lstlisting} - Definition double x := x + x. - Definition ddouble x := double (double x). - Lemma ex1 x : ddouble x = 4 * x. - rewrite [ddouble _]/double. -\end{lstlisting} - -The resulting goal is: - -\begin{lstlisting} - double x + double x = 4 * x -\end{lstlisting} - -\emph{Warning} The \ssr{} terms containing holes are \emph{not} -typed as abstractions in this context. Hence the following script: -\begin{lstlisting} - Definition f := fun x y => x + y. - Goal forall x y, x + y = f y x. - move=> x y. - rewrite -[f y]/(y + _). -\end{lstlisting} -raises the error message -\begin{verbatim} - User error: fold pattern (y + _) does not match redex (f y) -\end{verbatim} -but the script obtained by replacing the last line with: -\begin{lstlisting} - rewrite -[f y x]/(y + _). -\end{lstlisting} -is valid. - - -\item A term, which can be: - \begin{itemize} - \item A term whose type has the form: - $$\ssrC{forall}\ (x_1\ :\ A_1)\dots(x_n\ :\ A_n),\ eq\ term_1\ term_2$$ - where $eq$ is the Leibniz equality or a registered setoid - equality. %In the case of setoid relations, the only supported - %r-prefix is the directional \ssrC{-}. - \item A list of terms $(t_1,\dots,t_n)$, each $t_i$ having a type of the - form: $$\ssrC{forall}\ (x_1\ :\ A_1)\dots(x_n\ :\ A_n),\ eq\ term_1\ term_2$$ where - $eq$ is the Leibniz equality or a registered setoid - equality. The tactic: - - \centerline{\ssrC{rewrite} \ssrN{r-prefix}\ssrC{(}$t_1$\ssrC{,}$\dots$\ssrC{,}$t_n$\ssrC{).}} - - is equivalent to: - - \centerline{\ssrC{do [rewrite} \ssrN{r-prefix} $t_1$ \ssrC{|} $\dots$ \ssrC| rewrite} \ssrN{r-prefix} $t_n$\ssrC{].} - - \item An anonymous rewrite lemma - \ssrC{(_ :} {\term}), where \textit{term} has again the form: - $$\ssrC{forall}\ (x_1\ :\ A_1)\dots(x_n\ :\ A_n),\ eq\ term_1\ term_2$$ - The tactic: - - \centerline{\ssrC{rewrite (_ :} {\term}\ssrC{)}} - - is in fact synonym of: - - \centerline{\ssrC{cutrewrite (}{\term}\ssrC{).}} - - - \end{itemize} - -\end{itemize} - - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Remarks and examples}\label{ssec:rwex} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Rewrite redex selection} -The general strategy of \ssr{} -is to grasp as many redexes as possible and to let the user select the -ones to be rewritten thanks to the improved syntax for the control of -rewriting. - -This may be a source of incompatibilities between the two \ssrC{rewrite} -tactics. - -In a rewrite tactic of the form: - - \ssrC{rewrite} \ssrN{occ-switch}\ssrC{[}\ssrN[1]{term}\ssrC{]}\ssrN[2]{term}. - -\ssrN[1]{term} is the explicit rewrite redex and -\ssrN[2]{term} is the -rewrite rule. This execution of this tactic unfolds as follows: - -\begin{itemize} -\item First \ssrN[1]{term} and \ssrN[2]{term} are $\beta\iota$ normalized. Then - \ssrN[2]{term} is put in head normal form if the Leibniz equality - constructor \ssrC{eq} is not the head symbol. This may involve $\zeta$ - reductions. -\item Then, the matching algorithm (see section \ref{ssec:set}) - determines the first subterm of the goal matching the rewrite pattern. - The rewrite pattern is - given by \ssrN[1]{term}, if an explicit redex pattern switch is provided, or by - the type of \ssrN[2]{term} otherwise. However, matching skips over - matches that would lead to trivial rewrites. All the - occurrences of this subterm in the goal are candidates for rewriting. -\item Then only the occurrences coded by \ssrN{occ-switch} (see again - section \ref{ssec:set}) are finally selected for rewriting. -\item The left hand side of $\ssrN[2]{term}$ is unified with the subterm found - by the matching algorithm, and if this succeeds, all the selected - occurrences in the goal are replaced by the right hand side of - $\ssrN[2]{term}$. -\item Finally the goal is $\beta\iota$ normalized. -\end{itemize} - -In the case $\ssrN[2]{term}$ is a list of terms, the first top-down (in -the goal) left-to-right (in the list) matching rule gets selected. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Chained rewrite steps} - - -The possibility to chain rewrite operations in a single tactic makes -scripts more compact and gathers in a single command line a bunch -of surgical -operations which would be described by a one sentence in a pen and -paper proof. - -Performing rewrite and simplification operations in a single tactic -enhances significantly the concision of scripts. For instance the -tactic: -\begin{lstlisting} - rewrite /my_def {2}[f _]/= my_eq //=. -\end{lstlisting} -unfolds \ssrC{my_def} in the goal, simplifies the second occurrence of the -first subterm matching pattern \ssrC{[f _]}, rewrites \ssrC{my_eq}, -simplifies the whole goal and closes trivial goals. - -Here are some concrete examples of chained rewrite operations, in the -proof of basic results on natural numbers arithmetic: - -\begin{lstlisting} - Lemma |*addnS*| : forall m n, m + n.+1 = (m + n).+1. - Proof. by move=> m n; elim: m. Qed. - - Lemma |*addSnnS*| : forall m n, m.+1 + n = m + n.+1. - Proof. move=> *; rewrite addnS; apply addSn. Qed. - - Lemma |*addnCA*| : forall m n p, m + (n + p) = n + (m + p). - Proof. by move=> m n; elim: m => [|m Hrec] p; rewrite ?addSnnS -?addnS. Qed. - - Lemma |*addnC*| : forall m n, m + n = n + m. - Proof. by move=> m n; rewrite -{1}[n]addn0 addnCA addn0. Qed. -\end{lstlisting} - -Note the use of the \ssrC{?} switch for parallel rewrite operations in -the proof of \ssrC{|*addnCA*|}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Explicit redex switches are matched first} -If an \ssrN{r-prefix} involves a \emph{redex switch}, the first step is to -find a subterm matching this redex pattern, independently from the left hand -side \ssrC{t1} of the equality the user wants to rewrite. - -For instance, if \ssrL-H : forall t u, t + u = u + t- is in the context of a -goal \ssrL-x + y = y + x-, the tactic: -\begin{lstlisting} - rewrite [y + _]H. -\end{lstlisting} -transforms the goal into \ssrL-x + y = x + y-. - -Note that if this first pattern matching is not compatible with the -\emph{r-item}, the rewrite fails, even if the goal contains a correct -redex matching both the redex switch and the left hand side of the -equality. For instance, if \ssrL-H : forall t u, t + u * 0 = t- is -in the context of a goal \ssrL-x + y * 4 + 2 * 0 = x + 2 * 0-, then tactic: -\begin{lstlisting} - rewrite [x + _]H. -\end{lstlisting} -raises the error message: -\begin{verbatim} - User error: rewrite rule H doesn't match redex (x + y * 4) -\end{verbatim} -while the tactic: -\begin{lstlisting} - rewrite (H _ 2). -\end{lstlisting} -transforms the goal into \ssrL-x + y * 4 = x + 2 * 0-. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Occurrence switches and redex switches} -The tactic: -\begin{lstlisting} - rewrite {2}[_ + y + 0](_: forall z, z + 0 = z). -\end{lstlisting} -transforms the goal: -\begin{lstlisting} - x + y + 0 = x + y + y + 0 + 0 + (x + y + 0) -\end{lstlisting} -into: -\begin{lstlisting} - x + y + 0 = x + y + y + 0 + 0 + (x + y) -\end{lstlisting} -and generates a second subgoal: -\begin{lstlisting} - forall z : nat, z + 0 = z -\end{lstlisting} -The second subgoal is generated by the use of an anonymous lemma in -the rewrite tactic. The effect of the tactic on the initial goal is to -rewrite this lemma at the second occurrence of the first matching -\ssrL-x + y + 0- of the explicit rewrite redex \ssrL-_ + y + 0-. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Occurrence selection and repetition} -Occurrence selection has priority over repetition switches. This means -the repetition of a rewrite tactic specified by a multiplier -will perform matching each time an elementary rewrite operation is -performed. Repeated rewrite tactics apply to every subgoal generated -by the previous tactic, including the previous instances of the -repetition. For example: -\begin{lstlisting} - Goal forall x y z : nat, x + 1 = x + y + 1. - move=> x y z. -\end{lstlisting} -creates a goal \ssrC{ x + 1 = x + y + 1}, which is turned into \ssrC{z = z} -by the additional tactic: -\begin{lstlisting} - rewrite 2!(_ : _ + 1 = z). -\end{lstlisting} -In fact, this last tactic generates \emph{three} subgoals, -respectively -\ssrC{ x + y + 1 = z}, \ssrC{ z = z} and \ssrC{x + 1 = z}. Indeed, the second -rewrite operation specified with the \ssrC{2!} multiplier applies to -the two subgoals generated by the first rewrite. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Multi-rule rewriting} -The \ssrC{rewrite} tactic can be provided a \emph{tuple} of rewrite rules, -or more generally a tree of such rules, since this tuple can feature -arbitrary inner parentheses. We call \emph{multirule} such a -generalized rewrite rule. This feature is of special interest when it -is combined with multiplier switches, which makes the \ssrC{rewrite} -tactic iterates the rewrite operations prescribed by the rules on the -current goal. For instance, let us define two triples \ssrC{multi1} and -\ssrC{multi2} as: -\begin{lstlisting} - Variables (a b c : nat). - - Hypothesis eqab : a = b. - - Hypothesis eqac : a = c. -\end{lstlisting} - -Executing the tactic: -\begin{lstlisting} - rewrite (eqab, eqac) -\end{lstlisting} -on the goal: -\begin{lstlisting} - ========= - a = a -\end{lstlisting} -turns it into \ssrC{b = b}, as rule \ssrC{eqab} is the first to apply among -the ones gathered in the tuple passed to the \ssrC{rewrite} -tactic. This multirule \ssrC{(eqab, eqac)} is actually a \Coq{} term and we -can name it with a definition: -\begin{lstlisting} -Definition |*multi1*| := (eqab, eqac). -\end{lstlisting} -In this case, the tactic \ssrC{rewrite multi1} is a synonym for -\ssrC{(eqab, eqac)}. More precisely, a multirule rewrites -the first subterm to which one of the rules applies in a left-to-right -traversal of the goal, with the first rule from the multirule tree in -left-to-right order. Matching is performed according to the algorithm -described in Section~\ref{ssec:set}, but literal matches have -priority. For instance if we add a definition and a new multirule to -our context: - -\begin{lstlisting} - Definition |*d*| := a. - - Hypotheses eqd0 : d = 0. - - Definition |*multi2*| := (eqab, eqd0). -\end{lstlisting} -then executing the tactic: -\begin{lstlisting} - rewrite multi2. -\end{lstlisting} -on the goal: -\begin{lstlisting} - ========= - d = b -\end{lstlisting} -turns it into \ssrC{0 = b}, as rule \ssrC{eqd0} applies without unfolding -the definition of \ssrC{d}. For repeated rewrites the selection process -is repeated anew. For instance, if we define: - -\begin{lstlisting} - Hypothesis eq_adda_b : forall x, x + a = b. - - Hypothesis eq_adda_c : forall x, x + a = c. - - Hypothesis eqb0 : b = 0. - - Definition |*multi3*| := (eq_adda_b, eq_adda_c, eqb0). -\end{lstlisting} -then executing the tactic: -\begin{lstlisting} - rewrite 2!multi3. -\end{lstlisting} -on the goal: -\begin{lstlisting} - ========= - 1 + a = 12 + a -\end{lstlisting} -turns it into \ssrC{0 = 12 + a}: it uses \ssrC{eq_adda_b} then \ssrC{eqb0} on the -left-hand side only. Now executing the tactic \ssrC{rewrite !multi3} -turns the same goal into \ssrC{0 = 0}. - -The grouping of rules inside a multirule does not affect the selection -strategy but can make it easier to include one rule set in another or -to (universally) quantify over the parameters of a subset of rules (as -there is special code that will omit unnecessary quantifiers for rules that -can be syntactically extracted). It is also possible to -reverse the direction of a rule subset, using a special dedicated syntax: -the tactic \ssrC{rewrite (=~ multi1)} is equivalent to -\ssrC{rewrite multi1_rev} with: -\begin{lstlisting} - Hypothesis eqba : b = a. - - Hypothesis eqca : c = a. - - Definition |*multi1_rev*| := (eqba, eqca). -\end{lstlisting} -except that the constants \ssrC{eqba, eqab, mult1_rev} have not been created. - -Rewriting with multirules is useful to implement simplification or -transformation procedures, to be applied on terms of small to medium -size. For instance, the library \ssrL{ssrnat} --- available in the -external math-comp library --- provides two implementations for -arithmetic operations on natural numbers: an elementary one and a tail -recursive version, less inefficient but also less convenient for -reasoning purposes. The library also provides one lemma per such -operation, stating that both versions return the same values when -applied to the same arguments: - -\begin{lstlisting} - Lemma |*addE*| : add =2 addn. - Lemma |*doubleE*| : double =1 doublen. - Lemma |*add_mulE*| n m s : add_mul n m s = addn (muln n m) s. - Lemma |*mulE*| : mul =2 muln. - Lemma |*mul_expE*| m n p : mul_exp m n p = muln (expn m n) p. - Lemma |*expE*| : exp =2 expn. - Lemma |*oddE*| : odd =1 oddn. -\end{lstlisting} - -The operation on the left hand side of each lemma is the efficient -version, and the corresponding naive implementation is on the right -hand side. In order to reason conveniently on expressions involving -the efficient operations, we gather all these rules in the -definition \ssrC{|*trecE*|}: -\begin{lstlisting} - Definition |*trecE*| := (addE, (doubleE, oddE), (mulE, add_mulE, (expE, mul_expE))). -\end{lstlisting} -The tactic: -\begin{lstlisting} - rewrite !trecE. -\end{lstlisting} -restores the naive versions of each operation in a goal involving the -efficient ones, e.g. for the purpose of a correctness proof. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Wildcards vs abstractions} - The \ssrC{rewrite} tactic supports r-items containing holes. For example - in the tactic $(1)$: -\begin{lstlisting} - rewrite (_ : _ * 0 = 0). -\end{lstlisting} - the term \ssrC{_ * 0 = 0} is interpreted as \ssrC{forall n : nat, n * 0 = 0}. - Anyway this tactic is \emph{not} equivalent to the tactic $(2)$: -\begin{lstlisting} - rewrite (_ : forall x, x * 0 = 0). -\end{lstlisting} - The tactic $(1)$ transforms the goal - \ssrL-(y * 0) + y * (z * 0) = 0- into \ssrC{y * (z * 0) = 0} - and generates a new subgoal to prove the statement \ssrC{y * 0 = 0}, - which is the \emph{instance} of the\\ \ssrC{forall x, x * 0 = 0} - rewrite rule that - has been used to perform the rewriting. On the other hand, tactic - $(2)$ performs the same rewriting on the current goal but generates a - subgoal to prove \ssrC{forall x, x * 0 = 0}. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{When \ssr{} \ssrC{rewrite} fails on standard \Coq{} licit rewrite} -In a few cases, the \ssr{} \ssrC{rewrite} tactic fails -rewriting some redexes which standard \Coq{} successfully rewrites. -There are two main cases: - -\begin{itemize} -\item \ssr{} never accepts to rewrite indeterminate patterns like: -\begin{lstlisting} - Lemma |*foo*| : forall x : unit, x = tt. -\end{lstlisting} -\ssr{} will however accept the $\eta\zeta$ expansion of this rule: -\begin{lstlisting} - Lemma |*fubar*| : forall x : unit, (let u := x in u) = tt. -\end{lstlisting} -\item In standard \Coq{}, suppose that we work in the following context: -\begin{lstlisting} - Variable g : nat -> nat. - Definition |*f*| := g. -\end{lstlisting} -then rewriting \ssrC{H : forall x, f x = 0} in the goal -\ssrC{g 3 + g 3 = g 6} succeeds -and transforms the goal into \ssrC{0 + 0 = g 6}. - -This rewriting is not possible in \ssr{} because there is no -occurrence of the head symbol \ssrC{f} of the rewrite rule in the -goal. Rewriting with \ssrC{H} first requires unfolding the occurrences of -\ssrC{f} where the substitution is to be performed (here there is a single -such occurrence), using tactic \ssrC{rewrite /f} (for a global -replacement of \ssrC{f} by \ssrC{g}) or \ssrC{rewrite $\ \ssrN{pattern}$/f}, for a -finer selection. -\end{itemize} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Existential metavariables and rewriting} -\label{ssec:rewcaveats} -The \ssrC{rewrite} tactic will not instantiate existing existential -metavariables when matching a redex pattern. - -If a rewrite rule generates a goal -with new existential metavariables, these will be generalized as for \ssrC{apply} -(see page~\pageref{sssec:apply}) and corresponding new goals will be generated. -For example, consider the following script: - -\begin{lstlisting} - Lemma |*ex3*| (x : 'I_2) y (le_1 : y < 1) (E : val x = y) : Some x = insub y. - rewrite insubT ?(leq_trans le_1)// => le_2. -\end{lstlisting} - -Since \ssrC{insubT} has the following type: - -\begin{lstlisting} - forall T P (sT : subType P) (x : T) (Px : P x), insub x = Some (Sub x Px) -\end{lstlisting} - -and since the implicit argument corresponding to the \ssrC{Px} abstraction is not -supplied by the user, the resulting goal should be \ssrC{Some x = Some (Sub y -$\;\;?_{Px}$)}. Instead, \ssr{} \ssrC{rewrite} tactic generates the two following -goals: -\begin{lstlisting} - y < 2 - forall Hyp0 : y < 2, Some x = Some (Sub y Hyp0) -\end{lstlisting} -The script closes the former with \ssrC{?(leq_trans le_1)//}, then it introduces -the new generalization naming it \ssrC{le_2}. - -\begin{lstlisting} - x : 'I_2 - y : nat - le_1 : y < 1 - E : val x = y - le_2 : y < 2 - ============================ - Some x = Some (Sub y le_2) -\end{lstlisting} - -As a temporary limitation, this behavior is available only if the rewriting -rule is stated using Leibniz equality (as opposed to setoid relations). -It will be extended to other rewriting relations in the future. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Locking, unlocking} \label{ssec:lock} - -As program proofs tend to generate large goals, it is important to be -able to control the partial evaluation performed by the simplification -operations that are performed by the tactics. These evaluations can -for example come from a \ssrC{/=} simplification switch, or from rewrite steps -which may expand large terms while performing conversion. We definitely -want to avoid repeating large subterms of the goal in -the proof script. We do this by -``clamping down'' selected function symbols in the goal, which -prevents them from -being considered in simplification or rewriting steps. This clamping -is accomplished by using the occurrence switches (see section -\ref{sssec:occselect}) together with ``term tagging'' operations. - -\ssr{} provides two levels of tagging. - -The first one uses auxiliary definitions to introduce a provably equal -copy of any term \ssrC{t}. However this copy is (on purpose) -\emph{not convertible} to \ssrC{t} in the \Coq{} system\footnote{This is - an implementation feature: there is not such obstruction in the - metatheory}. The job is done by the following construction: - -\begin{lstlisting} - Lemma |*master_key*| : unit. Proof. exact tt. Qed. - Definition |*locked*| A := let: tt := master_key in fun x : A => x. - Lemma |*lock*| : forall A x, x = locked x :> A. -\end{lstlisting} -Note that the definition of \ssrC{|*master_key*|} is explicitly opaque. -The equation \ssrC{t = locked t} given by the \ssrC{lock} lemma can be used -for selective rewriting, blocking on the fly the reduction in the -term \ssrC{t}. -For example the script: -\begin{lstlisting} - Require Import List. - Variable A : Type. - - Fixpoint |*my_has*| (p : A -> bool)(l : list A){struct l} : bool:= - match l with - |nil => false - |cons x l => p x || (my_has p l) - end. - - Goal forall a x y l, a x = true -> my_has a ( x :: y :: l) = true. - move=> a x y l Hax. -\end{lstlisting} -where \ssrL{||} denotes the boolean disjunction, results in a goal -\ssrC{my_has a ( x :: y :: l) = true}. The tactic: -\begin{lstlisting} - rewrite {2}[cons]lock /= -lock. -\end{lstlisting} -turns it into \ssrC{a x || my_has a (y :: l) = true}. -Let us now start by reducing the initial goal without blocking reduction. -The script: -\begin{lstlisting} - Goal forall a x y l, a x = true -> my_has a ( x :: y :: l) = true. - move=> a x y l Hax /=. -\end{lstlisting} -creates a goal \ssrC{(a x) || (a y) || (my_has a l) = true}. Now the -tactic: -\begin{lstlisting} - rewrite {1}[orb]lock orbC -lock. -\end{lstlisting} -where \ssrC{orbC} states the commutativity of \ssrC{orb}, changes the -goal into\\ \ssrC{(a x) || (my_has a l) || (a y) = true}: only the -arguments of the second disjunction where permuted. - - -It is sometimes desirable to globally prevent a definition from being -expanded by simplification; this is done by adding \ssrC{locked} in the -definition. - -For instance, the function \ssrC{|*fgraph_of_fun*|} maps a function whose -domain and codomain are finite types to a concrete representation of -its (finite) graph. Whatever implementation of this transformation we -may use, we want it to be hidden to simplifications and tactics, to -avoid the collapse of the graph object: -\begin{lstlisting} - Definition |*fgraph_of_fun*| := - locked - (fun (d1 :finType) (d2 :eqType) (f : d1 -> d2) => Fgraph (size_maps f _)). -\end{lstlisting} - -We provide a special tactic \ssrC{unlock} for unfolding such definitions -while removing ``locks'', e.g., the tactic: - - \ssrC{unlock} \ssrN{occ-switch}\ssrC{fgraph_of_fun}. - -replaces the occurrence(s) of \ssrC{fgraph_of_fun} coded by the \ssrN{occ-switch} -with \ssrC{(Fgraph (size_maps _ _))} in the goal. - -We found that it was usually preferable to prevent the expansion of -some functions by the partial evaluation switch ``/='', unless -this allowed the evaluation of a condition. This is possible thanks to -an other mechanism of term tagging, resting on the following -\emph{Notation}: -\begin{lstlisting} - Notation "'nosimpl' t" := (let: tt := tt in t). -\end{lstlisting} - -The term \ssrC{(nosimpl t)} simplifies to t \emph{except} in a -definition. More precisely, -given: -\begin{lstlisting} - Definition |*foo*| := (nosimpl bar). -\end{lstlisting} -the term \ssrC{foo (or (foo t'))} will \emph{not} be expanded by the -\emph{simpl} tactic unless it is in a forcing context (e.g., in -\ssrC{match foo t' with $\dots$ end}, \ssrC{foo t'} will be reduced if this allows -\ssrC{match} to be reduced). Note that \ssrC{nosimpl bar} is simply notation -for a term that reduces to \ssrC{bar}; hence \ssrC{unfold foo} will replace - \ssrC{foo} by \ssrC{bar}, and \ssrC{fold foo} will replace \ssrC{bar} by - \ssrC{foo}. - -\emph{Warning} The \ssrC{nosimpl} trick only works if no reduction is -apparent in \ssrC{t}; in particular, the declaration: -\begin{lstlisting} - Definition |*foo*| x := nosimpl (bar x). -\end{lstlisting} -will usually not work. Anyway, the common practice is to tag only the -function, and to use the following definition, which blocks the -reduction as expected: -\begin{lstlisting} - Definition |*foo*| x := nosimpl bar x. -\end{lstlisting} - - -A standard example making this technique shine is the case of -arithmetic operations. We define for instance: -\begin{lstlisting} - Definition |*addn*| := nosimpl plus. -\end{lstlisting} -The operation \ssrC{addn} behaves exactly like plus, except that -\ssrC{(addn (S n) m)} will not -simplify spontaneously to \ssrC{(S (addn n m))} (the two terms, however, are -inter-convertible). In addition, the unfolding step: -\begin{lstlisting} -rewrite /addn -\end{lstlisting} -will replace \ssrC{addn} directly with \ssrC{plus}, so the \ssrC{nosimpl} form -is essentially invisible. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Congruence}\label{ssec:congr} - -Because of the way matching interferes with type families parameters, -the tactic: -\begin{lstlisting} - apply: my_congr_property. -\end{lstlisting} -will generally fail to perform congruence simplification, even on -rather simple cases. We therefore provide a -more robust alternative in which the function is supplied: -$$\ssrC{congr}\ [\ssrN{int}]\ {\term}$$ - -This tactic: -\begin{itemize} -\item checks that the goal is a Leibniz equality -\item matches both sides of this equality with ``{\term} applied to - some arguments'', inferring the right number of arguments from the goal - and the type of {\term}. This may - expand some definitions or fixpoints. -\item generates the subgoals corresponding to pairwise equalities of - the arguments present in the goal. -\end{itemize} - -The goal can be a non dependent product \ssrC{P -> Q}. -In that case, the system asserts the equation \ssrC{P = Q}, uses it to solve -the goal, and calls the \ssrC{congr} tactic on the remaining goal -\ssrC{P = Q}. This can be useful for instance to perform a transitivity -step, like in the following situation: -\begin{lstlisting} - x, y, z : nat - =============== - x = y -> x = z -\end{lstlisting} -the tactic \ssrC{congr (_ = _)} turns this goal into: - -\begin{lstlisting} - x, y, z : nat - =============== - y = z -\end{lstlisting} -which can also be obtained starting from: -\begin{lstlisting} - x, y, z : nat - h : x = y - =============== - x = z -\end{lstlisting} -and using the tactic \ssrC{congr (_ = _): h}. - -The optional \ssrN{int} forces the number of arguments for which the -tactic should generate equality proof obligations. - -This tactic supports equalities between applications with dependent -arguments. Yet dependent arguments should have exactly the same -parameters on both sides, and these parameters should appear as first -arguments. - -The following script: -\begin{lstlisting} - Definition f n := match n with 0 => plus | S _ => mult end. - Definition g (n m : nat) := plus. - - Goal forall x y, f 0 x y = g 1 1 x y. - by move=> x y; congr plus. - Qed. -\end{lstlisting} -shows that the \ssrC{congr} tactic matches \ssrC{plus} with \ssrC{f 0} on the -left hand side and \ssrC{g 1 1} on the right hand side, and solves the goal. - -The script: -\begin{lstlisting} - Goal forall n m, m <= n -> S m + (S n - S m) = S n. - move=> n m Hnm; congr S; rewrite -/plus. -\end{lstlisting} -generates the subgoal \ssrC{m + (S n - S m) = n}. The tactic -\ssrC{rewrite -/plus} folds back the expansion of \ssrC{plus} which was -necessary for matching both sides of the equality with an application -of \ssrC{S}. - -Like most \ssr{} arguments, {\term} can contain wildcards. -The script: -\begin{lstlisting} - Goal forall x y, x + (y * (y + x - x)) = x * 1 + (y + 0) * y. - move=> x y; congr ( _ + (_ * _)). -\end{lstlisting} -generates three subgoals, respectively \ssrC{x = x * 1}, \ssrC{y = y + 0} -and \ssrC{ y + x - x = y}. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Contextual patterns} -\label{ssec:rewp} - -The simple form of patterns used so far, ${\term}s$ possibly containing -wild cards, often require an additional \ssrN{occ-switch} to be specified. -While this may work pretty fine for small goals, the use of polymorphic -functions and dependent types may lead to an invisible duplication of functions -arguments. These copies usually end up in types hidden by the implicit -arguments machinery or by user defined notations. In these situations -computing the right occurrence numbers is very tedious because they must be -counted on the goal as printed after setting the \ssrC{Printing All} flag. -Moreover the resulting script is not really informative for the reader, since -it refers to occurrence numbers he cannot easily see. - -Contextual patterns mitigate these issues allowing to specify occurrences -according to the context they occur in. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Syntax} - -The following table summarizes the full syntax of -\ssrN{c-pattern} and the corresponding subterm(s) identified -by the pattern. -In the third column we use s.m.r. for -``the subterms matching the redex'' specified in the second column. - -\begin{center} -%\begin{tabularx}{\textwidth}{>{\arraybackslash}m{0.30\textwidth}|>{\arraybackslash}m{0.21\textwidth}|>{\arraybackslash}m{0.39\textwidth}} -\begin{tabular}{llp{10em}} -\ssrN{c-pattern} & redex & subterms affected \\ -\hline -{\term} & {\term} & all occurrences of {\term}\\ -\hline -$\ssrN{ident}\ \ssrC{in}\ {\term}$ & - subterm of {\term} selected by \ssrN{ident} & - all the subterms identified by \ssrN{ident} in all - the occurrences of {\term} \\ -\hline -$\ssrN[1]{term}\ \ssrC{in}\ \ssrN{ident}\ \ssrC{in}\ \ssrN[2]{term}$ & $\ssrN[1]{term}$ & - in all s.m.r. in all the subterms identified by \ssrN{ident} in all - the occurrences of $\ssrN[2]{term}$ \\ -\hline -$\ssrN[1]{term}\ \ssrC{as}\ \ssrN{ident}\ \ssrC{in}\ \ssrN[2]{term}$ & $\ssrN[1]{term}$ & - in all the subterms identified by \ssrN{ident} in all - the occurrences of $\ssrN[2]{term}[\ssrN[1]{term}/\ssrN{ident}]$\\ -\hline -%\end{tabularx} -\end{tabular} -\end{center} - -The \ssrC{rewrite} tactic supports two more patterns obtained -prefixing the first two with \ssrC{in}. The intended meaning is that the -pattern identifies all subterms of the specified context. The -\ssrC{rewrite} tactic will infer a pattern for the redex looking at the -rule used for rewriting. - -\begin{center} -\begin{tabularx}{\textwidth}{>{\arraybackslash}m{0.30\textwidth}|>{\arraybackslash}m{0.21\textwidth}|>{\arraybackslash}m{0.39\textwidth}} -\ssrN{r-pattern} & redex & subterms affected \\ -\hline -$\ssrC{in}\ {\term}$ & inferred from rule & - in all s.m.r. in all occurrences of {\term}\\ -\hline -$\ssrC{in}\ \ssrN{ident}\ \ssrC{in}\ {\term}$ & inferred from rule & - in all s.m.r. in all the subterms identified by \ssrN{ident} in all - the occurrences of {\term} \\ -\hline -\end{tabularx} -\end{center} - -The first \ssrN{c-pattern} is the simplest form matching any -context but selecting a specific redex and has been described in the -previous sections. We have seen so far that the possibility of -selecting a redex using a term with holes is already a powerful mean of redex -selection. Similarly, any {\term}s provided by the -user in the more complex forms of \ssrN{c-pattern}s presented in the -tables above can contain holes. - -For a quick glance at what can be expressed with the last -\ssrN{r-pattern} consider the goal \ssrC{a = b} and the tactic -\begin{lstlisting} - rewrite [in X in _ = X]rule. -\end{lstlisting} -It rewrites all occurrences of the left hand side of \ssrC{rule} inside -\ssrC{b} only (\ssrC{a}, and the hidden type of the equality, are ignored). -Note that the variant \ssrC{rewrite [X in _ = X]rule} would have -rewritten \ssrC{b} exactly (i.e., it would only work if \ssrC{b} and the -left hand side of \ssrC{rule} can be unified). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Matching contextual patterns} - -The \ssrN{c-pattern}s and \ssrN{r-pattern}s involving -{\term}s with holes are matched -against the goal in order to find a closed instantiation. This -matching proceeds as follows: - -\begin{center} -\begin{tabularx}{\textwidth}{>{\arraybackslash}m{0.30\textwidth}|>{\arraybackslash}m{0.65\textwidth}} -\ssrN{c-pattern} & instantiation order and place for $\ssrN[i]{term}$ and redex\\ -\hline -{\term} & {\term} is matched against the goal, redex is unified with - the instantiation of {\term}\\ -\hline -$\ssrN{ident}\ \ssrC{in}\ {\term}$ & - {\term} is matched against the goal, redex is - unified with the subterm of the - instantiation of {\term} identified by \ssrN{ident}\\ -\hline -$\ssrN[1]{term}\ \ssrC{in}\ \ssrN{ident}\ \ssrC{in}\ \ssrN[2]{term}$ & - $\ssrN[2]{term}$ is matched against the goal, $\ssrN[1]{term}$ is - matched against the subterm of the - instantiation of $\ssrN[1]{term}$ identified by \ssrN{ident}, - redex is unified with the instantiation of $\ssrN[1]{term}$\\ -\hline -$\ssrN[1]{term}\ \ssrC{as}\ \ssrN{ident}\ \ssrC{in}\ \ssrN[2]{term}$ & - $\ssrN[2]{term}[\ssrN[1]{term}/\ssrN{ident}]$ - is matched against the goal, - redex is unified with the instantiation of $\ssrN[1]{term}$\\ -\hline -\end{tabularx} -\end{center} - -In the following patterns, the redex is intended to be inferred from the -rewrite rule. - -\begin{center} -\begin{tabularx}{\textwidth}{>{\arraybackslash}m{0.30\textwidth}|>{\arraybackslash}m{0.65\textwidth}} -\ssrN{r-pattern} & instantiation order and place for $\ssrN[i]{term}$ and redex\\ -\hline -$\ssrC{in}\ \ssrN{ident}\ \ssrC{in}\ {\term}$ & - {\term} is matched against the goal, the redex is - matched against the subterm of the - instantiation of {\term} identified by \ssrN{ident}\\ -\hline -$\ssrC{in}\ {\term}$ & {\term} is matched against the goal, redex is - matched against the instantiation of {\term}\\ -\hline -\end{tabularx} -\end{center} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Examples} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection{Contextual pattern in \ssrC{set} and the \ssrC{:} tactical} - -As already mentioned in section~\ref{ssec:set} the \ssrC{set} tactic -takes as an argument a term in open syntax. This term is interpreted -as the simplest for of \ssrN{c-pattern}. To void confusion in the grammar, -open syntax is supported only for the simplest form of patterns, while - parentheses are required around more complex patterns. - -\begin{lstlisting} -set t := (X in _ = X). -set t := (a + _ in X in _ = X). -\end{lstlisting} - -Given the goal \ssrC{a + b + 1 = b + (a + 1)} the first tactic -captures \ssrC{b + (a + 1)}, while the latter \ssrC{a + 1}. - -Since the user may define an infix notation for \ssrC{in} the former -tactic may result ambiguous. The disambiguation rule implemented is -to prefer patterns over simple terms, but to interpret a pattern with -double parentheses as a simple term. For example -the following tactic would capture any occurrence of the term `\ssrC{a in A}'. - -\begin{lstlisting} -set t := ((a in A)). -\end{lstlisting} - -Contextual pattern can also be used as arguments of the \ssrC{:} tactical. -For example: -\begin{lstlisting} -elim: n (n in _ = n) (refl_equal n). -\end{lstlisting} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection{Contextual patterns in \ssrC{rewrite}} -As a more comprehensive example consider the following goal: -\begin{lstlisting} - (x.+1 + y) + f (x.+1 + y) (z + (x + y).+1) = 0 -\end{lstlisting} -The tactic \ssrC{rewrite [in f _ _]addSn} turns it into: -\begin{lstlisting} - (x.+1 + y) + f (x + y).+1 (z + (x + y).+1) = 0 -\end{lstlisting} -since the simplification rule \ssrC{addSn} is applied only under the \ssrC{f} symbol. -Then we simplify also the first addition and expand \ssrC{0} into \ssrC{0+0}. -\begin{lstlisting} - rewrite addSn -[X in _ = X]addn0. -\end{lstlisting} -obtaining: -\begin{lstlisting} - (x + y).+1 + f (x + y).+1 (z + (x + y).+1) = 0 + 0 -\end{lstlisting} -Note that the right hand side of \ssrC{addn0} is undetermined, but the -rewrite pattern specifies the redex explicitly. The right hand side of -\ssrC{addn0} is unified with the term identified by \ssrC{X}, \ssrC{0} here. - -The following pattern does not specify a redex, since it -identifies an entire region, hence the rewrite rule has to be instantiated -explicitly. Thus the tactic: -\begin{lstlisting} - rewrite -{2}[in X in _ = X](addn0 0). -\end{lstlisting} -changes the goal as follows: -\begin{lstlisting} - (x + y).+1 + f (x + y).+1 (z + (x + y).+1) = 0 + (0 + 0) -\end{lstlisting} -The following tactic is quite tricky: -\begin{lstlisting} - rewrite [_.+1 in X in f _ X](addnC x.+1). -\end{lstlisting} -and the resulting goals is: -\begin{lstlisting} - (x + y).+1 + f (x + y).+1 (z + (y + x.+1)) = 0 + (0 + 0) -\end{lstlisting} -The explicit redex \ssrC{_.+1} is important since its head -constant \ssrC{S} differs from the head constant inferred from -\ssrC{(addnC x.+1)} (that is \ssrC{addn}, denoted \ssrC{+} here). -Moreover, the pattern \ssrC{f _ X} is important to rule out the first occurrence -of \ssrC{(x + y).+1}. Last, only the subterms of \ssrC{f _ X} identified by \ssrC{X} are -rewritten, thus the first argument of \ssrC{f} is skipped too. -Also note the pattern \ssrC{_.+1} is interpreted in the context -identified by \ssrC{X}, thus it gets instantiated to \ssrC{(y + x).+1} and -not \ssrC{(x + y).+1}. - -The last rewrite pattern allows to specify exactly the shape of the term -identified by \ssrC{X}, that is thus unified with the left hand side of the -rewrite rule. -\begin{lstlisting} - rewrite [x.+1 + y as X in f X _]addnC. -\end{lstlisting} -The resulting goal is: -\begin{lstlisting} - (x + y).+1 + f (y + x.+1) (z + (y + x.+1)) = 0 + (0 + 0) -\end{lstlisting} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Patterns for recurrent contexts} - -The user can define shortcuts for recurrent contexts corresponding to the -\ssrN{ident} \ssrC{in} {\term} part. The notation scope identified -with \ssrC{\%pattern} provides a special notation `\ssrC{(X in t)}' the user -must adopt to define context shortcuts. - -The following example is taken from \ssrC{ssreflect.v} where the -\ssrC{LHS} and \ssrC{RHS} shortcuts are defined. - -\begin{lstlisting} -Notation RHS := (X in _ = X)%pattern. -Notation LHS := (X in X = _)%pattern. -\end{lstlisting} - -Shortcuts defined this way can be freely used in place of the -trailing \ssrN{ident} \ssrC{in} {\term} part of any contextual -pattern. -Some examples follow: - -\begin{lstlisting} -set rhs := RHS. -rewrite [in RHS]rule. -case: (a + _ in RHS). -\end{lstlisting} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Views and reflection}\label{sec:views} - -The bookkeeping facilities presented in section \ref{sec:book} are -crafted to ease simultaneous introductions and generalizations of facts and -casing, -naming $\dots$ operations. It also a common practice to make a stack -operation immediately followed by an \emph{interpretation} of the fact -being pushed, -that is, to apply a lemma to this fact before passing it -to a tactic for decomposition, application and so on. - - -% possibly - -% Small scale reflection consists in using a two levels -% approach locally when developing formal proofs. This means that a -% fact, which may be an assumption, or the goal itself, will often be -% \emph{interpreted} before being passed to a tactic -% for decomposition, application and so on. - -\ssr{} provides a convenient, unified syntax to combine these -interpretation operations with the proof stack operations. This -\emph{view mechanism} relies on the combination of the \ssrC{/} view -switch with bookkeeping tactics and tacticals. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Interpreting eliminations} -\idx{elim/\dots{}} - -The view syntax combined with the \ssrC{elim} tactic specifies an -elimination scheme to -be used instead of the default, generated, one. Hence the \ssr{} tactic: -\begin{lstlisting} - elim/V. -\end{lstlisting} -is a synonym for: -\begin{lstlisting} - intro top; elim top using V; clear top. -\end{lstlisting} -where \ssrC{top} is a fresh name and \ssrC{V} any second-order lemma. - -Since an elimination view supports the two bookkeeping tacticals of -discharge and introduction (see section \ref{sec:book}), the \ssr{} tactic: -\begin{lstlisting} - elim/V: x => y. -\end{lstlisting} -is a synonym for: -\begin{lstlisting} - elim x using V; clear x; intro y. -\end{lstlisting} -where \ssrC{x} is a variable in the context, \ssrC{y} a fresh name and \ssrC{V} -any second order lemma; \ssr{} relaxes the syntactic restrictions of -the \Coq{} \ssrC{elim}. The first pattern following \ssrC{:} can be a \ssrC{_} -wildcard if the conclusion of the view \ssrC{V} specifies a pattern for -its last argument (e.g., if \ssrC{V} is a functional induction lemma -generated by the \ssrC{Function} command). - -The elimination view mechanism is compatible with the equation name -generation (see section \ref{ssec:equations}). - -The following script illustrate a toy example of this feature. Let us -define a function adding an element at the end of a list: -\begin{lstlisting} - Require Import List. - - Variable d : Type. - - Fixpoint |*add_last*|(s : list d) (z : d) {struct s} : list d := - match s with - | nil => z :: nil - | cons x s' => cons x (add_last s' z) - end. -\end{lstlisting} - -One can define an alternative, reversed, induction principle on inductively -defined \ssrC{list}s, by proving the following lemma: - -\begin{lstlisting} - Lemma |*last_ind_list*| : forall (P : list d -> Type), - P nil -> - (forall (s : list d) (x : d), P s -> P (add_last s x)) -> forall s : list d, P s. -\end{lstlisting} - -Then the combination of elimination views with equation names result -in a concise syntax for reasoning inductively using the user -defined elimination scheme. The script: -\begin{lstlisting} - Goal forall (x : d)(l : list d), l = l. - move=> x l. - elim/last_ind_list E : l=> [| u v]; last first. -\end{lstlisting} -generates two subgoals: the first one to prove \ssrC{nil = nil} in a -context featuring \ssrC{E : l = nil} and the second to prove -\ssrC{add_last u v = add_last u v}, in a context containing -\ssrC{E : l = add_last u v}. - -User provided eliminators (potentially generated with the -\ssrC{Function} \Coq{}'s command) can be combined with the type family switches -described in section~\ref{ssec:typefam}. Consider an eliminator -\ssrC{foo_ind} of type: - - \ssrC{foo_ind : forall $\dots$, forall x : T, P p$_1$ $\dots$ p$_m$} - -and consider the tactic - - \ssrC{elim/foo_ind: e$_1$ $\dots$ / e$_n$} - -The \ssrC{elim/} tactic distinguishes two cases: -\begin{description} -\item[truncated eliminator] when \ssrC{x} does not occur in \ssrC{P p$_1 \dots$ p$_m$} - and the type of \ssrC{e$_n$} unifies with \ssrC{T} and \ssrC{e$_n$} is not \ssrC{_}. - In that case, \ssrC{e$_n$} is passed to the eliminator as the last argument - (\ssrC{x} in \ssrC{foo_ind}) and \ssrC{e$_{n-1} \dots$ e$_1$} are used as patterns - to select in the goal the occurrences that will be bound by the - predicate \ssrC{P}, thus it must be possible to unify the sub-term of - the goal matched by \ssrC{e$_{n-1}$} with \ssrC{p$_m$}, the one matched by - \ssrC{e$_{n-2}$} with \ssrC{p$_{m-1}$} and so on. -\item[regular eliminator] in all the other cases. Here it must be - possible to unify the term matched by - \ssrC{e$_n$} with \ssrC{p$_m$}, the one matched by - \ssrC{e$_{n-1}$} with \ssrC{p$_{m-1}$} and so on. Note that - standard eliminators have the shape \ssrC{$\dots$forall x, P $\dots$ x}, thus - \ssrC{e$_n$} is the pattern identifying the eliminated term, as expected. -\end{description} -As explained in section~\ref{ssec:typefam}, the initial prefix of -\ssrC{e$_i$} can be omitted. - -Here an example of a regular, but non trivial, eliminator: -\begin{lstlisting} - Function |*plus*| (m n : nat) {struct n} : nat := - match n with 0 => m | S p => S (plus m p) end. -\end{lstlisting} -The type of \ssrC{plus_ind} is -\begin{lstlisting} -plus_ind : forall (m : nat) (P : nat -> nat -> Prop), - (forall n : nat, n = 0 -> P 0 m) -> - (forall n p : nat, n = p.+1 -> P p (plus m p) -> P p.+1 (plus m p).+1) -> - forall n : nat, P n (plus m n) -\end{lstlisting} -Consider the following goal -\begin{lstlisting} - Lemma |*exF*| x y z: plus (plus x y) z = plus x (plus y z). -\end{lstlisting} -The following tactics are all valid and perform the same elimination -on that goal. -\begin{lstlisting} - elim/plus_ind: z / (plus _ z). - elim/plus_ind: {z}(plus _ z). - elim/plus_ind: {z}_. - elim/plus_ind: z / _. -\end{lstlisting} -In the two latter examples, being the user provided pattern a wildcard, the -pattern inferred from the type of the eliminator is used instead. For both -cases it is \ssrC{(plus _ _)} and matches the subterm \ssrC{plus (plus x y)$\;$z} thus -instantiating the latter \ssrC{_} with \ssrC{z}. Note that the tactic -\ssrC{elim/plus_ind: y / _} would have resulted in an error, since \ssrC{y} and \ssrC{z} -do no unify but the type of the eliminator requires the second argument of -\ssrC{P} to be the same as the second argument of \ssrC{plus} in the second -argument of \ssrC{P}. - -Here an example of a truncated eliminator. Consider the goal -\begin{lstlisting} - p : nat_eqType - n : nat - n_gt0 : 0 < n - pr_p : prime p - ================= - p %| \prod_(i <- prime_decomp n | i \in prime_decomp n) i.1 ^ i.2 -> - exists2 x : nat * nat, x \in prime_decomp n & p = x.1 -\end{lstlisting} -and the tactic -\begin{lstlisting} -elim/big_prop: _ => [| u v IHu IHv | [q e] /=]. -\end{lstlisting} -where the type of the eliminator is -\begin{lstlisting} -big_prop: forall (R : Type) (Pb : R -> Type) (idx : R) (op1 : R -> R -> R), - Pb idx -> - (forall x y : R, Pb x -> Pb y -> Pb (op1 x y)) -> - forall (I : Type) (r : seq I) (P : pred I) (F : I -> R), - (forall i : I, P i -> Pb (F i)) -> - Pb (\big[op1/idx]_(i <- r | P i) F i) -\end{lstlisting} -Since the pattern for the argument of \ssrC{Pb} is not specified, the inferred one -is used instead: \ssrC{(\\big[_/_]_(i <- _ | _ i) _ i)}, and after the -introductions, the following goals are generated. -\begin{lstlisting} -subgoal 1 is: - p %| 1 -> exists2 x : nat * nat, x \in prime_decomp n & p = x.1 -subgoal 2 is: - p %| u * v -> exists2 x : nat * nat, x \in prime_decomp n & p = x.1 -subgoal 3 is: - (q, e) \in prime_decomp n -> p %| q ^ e -> - exists2 x : nat * nat, x \in prime_decomp n & p = x.1 -\end{lstlisting} -Note that the pattern matching algorithm instantiated all the variables -occurring in the pattern. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Interpreting assumptions}\label{ssec:assumpinterp} -\idx{move/\dots{}} - -Interpreting an assumption in the context of a proof is applying it a -correspondence lemma before generalizing, and/or decomposing it. -For instance, with the extensive use of boolean reflection (see -section \ref{ssec:boolrefl}), it is -quite frequent to need to decompose the logical interpretation of (the -boolean expression of) a -fact, rather than the fact itself. -This can be achieved by a combination of \ssrC{move : _ => _} -switches, like in the following script, where \ssrC{||} is a notation for -the boolean disjunction: -\begin{lstlisting} - Variables P Q : bool -> Prop. - Hypothesis |*P2Q*| : forall a b, P (a || b) -> Q a. - - Goal forall a, P (a || a) -> True. - move=> a HPa; move: {HPa}(P2Q _ _ HPa) => HQa. -\end{lstlisting} -which transforms the hypothesis \ssrC{HPn : P n} which has been -introduced from the initial statement into \ssrC{HQn : Q n}. -This operation is so common that the tactic shell has -specific syntax for it. -The following scripts: -\begin{lstlisting} - Goal forall a, P (a || a) -> True. - move=> a HPa; move/P2Q: HPa => HQa. -\end{lstlisting} -or more directly: -\begin{lstlisting} - Goal forall a, P (a || a) -> True. - move=> a; move/P2Q=> HQa. -\end{lstlisting} -are equivalent to the former one. The former script shows how to -interpret a fact (already in the context), thanks to the discharge -tactical (see section \ref{ssec:discharge}) and the latter, how to -interpret the top assumption of a goal. Note -that the number of wildcards to be inserted to find the correct -application of the view lemma to the hypothesis has been automatically -inferred. - -The view mechanism is compatible with the \ssrC{case} tactic and with the -equation name generation mechanism (see section \ref{ssec:equations}): -\begin{lstlisting} - Variables P Q: bool -> Prop. - Hypothesis |*Q2P*| : forall a b, Q (a || b) -> P a \/ P b. - - Goal forall a b, Q (a || b) -> True. - move=> a b; case/Q2P=> [HPa | HPb]. -\end{lstlisting} -creates two new subgoals whose contexts no more contain -\ssrC{HQ : Q (a || b)} but respectively \ssrC{HPa : P a} and -\ssrC{HPb : P b}. This view tactic -performs: -\begin{lstlisting} - move=> a b HQ; case: {HQ}(Q2P _ _ HQ) => [HPa | HPb]. -\end{lstlisting} - -The term on the right of the \ssrC{/} view switch is called a \emph{view - lemma}. Any \ssr{} term coercing to a product type can be used as a -view lemma. - - -The examples we have given so far explicitly provide the direction of the -translation to be performed. In fact, view lemmas need not to be -oriented. The view mechanism is able to detect which -application is relevant for the current goal. For instance, the -script: -\begin{lstlisting} - Variables P Q: bool -> Prop. - Hypothesis |*PQequiv*| : forall a b, P (a || b) <-> Q a. - - Goal forall a b, P (a || b) -> True. - move=> a b; move/PQequiv=> HQab. -\end{lstlisting} -has the same behavior as the first example above. - -The view mechanism can insert automatically a \emph{view hint} to -transform the double implication into the expected simple implication. -The last script is in fact equivalent to: -\begin{lstlisting} - Goal forall a b, P (a || b) -> True. - move=> a b; move/(iffLR (PQequiv _ _)). -\end{lstlisting} -where: -\begin{lstlisting} - Lemma |*iffLR*| : forall P Q, (P <-> Q) -> P -> Q. -\end{lstlisting} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Specializing assumptions} -\idx{move/\dots{}} - -The special case when the \emph{head symbol} of the view lemma is a -wildcard is used to interpret an assumption by \emph{specializing} -it. The view mechanism hence offers the possibility to -apply a higher-order assumption to some given arguments. - -For example, the script: -\begin{lstlisting} - Goal forall z, (forall x y, x + y = z -> z = x) -> z = 0. - move=> z; move/(_ 0 z). -\end{lstlisting} -changes the goal into: -\begin{lstlisting} - (0 + z = z -> z = 0) -> z = 0 -\end{lstlisting} - - - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Interpreting goals}\label{ssec:goalinterp} - -In a similar way, it is also often convenient to interpret a goal by changing -it into an equivalent proposition. The view mechanism of \ssr{} has a -special syntax \ssrC{apply/} for combining simultaneous goal -interpretation operations and -bookkeeping steps in a single tactic. - -With the hypotheses of section \ref{ssec:assumpinterp}, the following -script, where \ssrL+~~+ denotes the boolean negation: -\begin{lstlisting} - Goal forall a, P ((~~ a) || a). - move=> a; apply/PQequiv. -\end{lstlisting} -transforms the goal into \ssrC{Q (~~ a)}, and is equivalent to: -\begin{lstlisting} - Goal forall a, P ((~~ a) || a). - move=> a; apply: (iffRL (PQequiv _ _)). -\end{lstlisting} -where \ssrC{iffLR} is the analogous of \ssrC{iffRL} for the converse -implication. - -Any \ssr{} term whose type coerces to a double implication can be used -as a view for goal interpretation. - -Note that the goal interpretation view mechanism supports both -\ssrC{apply} and \ssrC{exact} tactics. As expected, a goal interpretation -view command \ssrC{exact/$term$} should solve the current goal or it will -fail. - - -\emph{Warning} Goal interpretation view tactics are \emph{not} compatible -with the bookkeeping tactical \ssrC{=>} since this would be redundant with -the \ssrC{apply:} {\term} \ssrC{=> _} construction. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Boolean reflection}\label{ssec:boolrefl} -In the Calculus of Inductive Construction, there is -an obvious distinction between logical propositions and boolean values. -On the one hand, logical propositions are objects -of \emph{sort} \ssrC{Prop} which is the carrier of intuitionistic -reasoning. Logical connectives in \ssrC{Prop} are \emph{types}, which give precise -information on the structure of their proofs; this information is -automatically exploited by \Coq{} tactics. For example, \Coq{} knows that a -proof of \ssrL+A \/ B+ is either a proof of \ssrC{A} or a proof of \ssrC{B}. -The tactics \ssrC{left} and \ssrC{right} change the goal \ssrL+A \/ B+ -to \ssrC{A} and \ssrC{B}, respectively; dualy, the tactic \ssrC{case} reduces the goal -\ssrL+A \/ B => G+ to two subgoals \ssrC{A => G} and \ssrC{B => G}. - -On the other hand, \ssrC{bool} is an inductive \emph{datatype} -with two constructors \ssrC{true} and \ssrC{false}. -Logical connectives on \ssrC{bool} are \emph{computable functions}, defined by -their truth tables, using case analysis: -\begin{lstlisting} - Definition (b1 || b2) := if b1 then true else b2. -\end{lstlisting} -Properties of such connectives are also established using case -analysis: the tactic \ssrC{by case: b} solves the goal -\begin{lstlisting} - b || ~~ b = true -\end{lstlisting} -by replacing \ssrC{b} first by \ssrC{true} and then by \ssrC{false}; in either case, -the resulting subgoal reduces by computation to the trivial -\ssrC{true = true}. - -Thus, \ssrC{Prop} and \ssrC{bool} are truly complementary: the former -supports robust natural deduction, the latter allows brute-force -evaluation. -\ssr{} supplies -a generic mechanism to have the best of the two worlds and move freely -from a propositional version of a -decidable predicate to its boolean version. - -First, booleans are injected into propositions -using the coercion mechanism: -\begin{lstlisting} - Coercion |*is_true*| (b : bool) := b = true. -\end{lstlisting} -This allows any boolean formula~\ssrC{b} to be used in a context -where \Coq{} would expect a proposition, e.g., after \ssrC{Lemma $\dots$ : }. -It is then interpreted as \ssrC{(is_true b)}, i.e., -the proposition \ssrC{b = true}. Coercions are elided by the pretty-printer, -so they are essentially transparent to the user. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{The \ssrC{reflect} predicate}\label{ssec:reflpred} - -To get all the benefits of the boolean reflection, it is in fact -convenient to introduce the following inductive predicate -\ssrC{reflect} to relate propositions and booleans: - -\begin{lstlisting} - Inductive |*reflect*| (P: Prop): bool -> Type := - | Reflect_true: P => reflect P true - | Reflect_false: ~P => reflect P false. -\end{lstlisting} - -The statement \ssrC{(reflect P b)} asserts that \ssrC{(is_true b)} -and \ssrC{P} are logically equivalent propositions. - -For instance, the following lemma: -\begin{lstlisting} - Lemma |*andP*|: forall b1 b2, reflect (b1 /\ b2) (b1 && b2). -\end{lstlisting} -relates the boolean conjunction \ssrC{&&} to -the logical one \ssrL+/\+. -Note that in \ssrC{andP}, \ssrC{b1} and \ssrC{b2} are two boolean variables and -the proposition \ssrL+b1 /\ b2+ hides two coercions. -The conjunction of \ssrC{b1} and \ssrC{b2} can then be viewed -as \ssrL+b1 /\ b2+ or as \ssrC{b1 && b2}. - - -Expressing logical equivalences through this family of inductive types -makes possible to take benefit from \emph{rewritable equations} -associated to the case analysis of \Coq{}'s inductive types. - -Since the equivalence predicate is defined in \Coq{} as: -\begin{lstlisting} - Definition |*iff*| (A B:Prop) := (A -> B) /\ (B -> A). -\end{lstlisting} -where \ssrC{/\\} is a notation for \ssrC{and}: -\begin{lstlisting} - Inductive |*and*| (A B:Prop) : Prop := - conj : A -> B -> and A B -\end{lstlisting} - -This make case analysis very different according to the way an -equivalence property has been defined. - - -For instance, if we have proved the lemma: -\begin{lstlisting} - Lemma |*andE*|: forall b1 b2, (b1 /\ b2) <-> (b1 && b2). -\end{lstlisting} -let us compare the respective behaviours of \ssrC{andE} and \ssrC{andP} on a -goal: -\begin{lstlisting} - Goal forall b1 b2, if (b1 && b2) then b1 else ~~(b1||b2). -\end{lstlisting} - -The command: -\begin{lstlisting} - move=> b1 b2; case (@andE b1 b2). -\end{lstlisting} -generates a single subgoal: -\begin{lstlisting} - (b1 && b2 -> b1 /\ b2) -> (b1 /\ b2 -> b1 && b2) -> - if b1 && b2 then b1 else ~~ (b1 || b2) -\end{lstlisting} - -while the command: -\begin{lstlisting} - move=> b1 b2; case (@andP b1 b2). -\end{lstlisting} -generates two subgoals, respectively \ssrL+b1 /\ b2 -> b1+ and -\ssrL+~ (b1 /\ b2) -> ~~ (b1 || b2)+. - - - -Expressing reflection relation through the \ssrC{reflect} predicate -is hence a very convenient way to deal with classical reasoning, by -case analysis. Using the \ssrC{reflect} predicate allows moreover to -program rich specifications inside -its two constructors, which will be automatically taken into account -during destruction. This formalisation style gives far more -efficient specifications than quantified (double) implications. - - -A naming convention in \ssr{} is to postfix the name of view lemmas with \ssrC{P}. -For example, \ssrC{orP} relates \ssrC{||} and \ssrL+\/+, \ssrC{negP} relates -\ssrL+~~+ and \ssrL+~+. - -The view mechanism is compatible with \ssrC{reflect} predicates. - -For example, the script -\begin{lstlisting} - Goal forall a b : bool, a -> b -> a /\\ b. - move=> a b Ha Hb; apply/andP. -\end{lstlisting} -changes the goal \ssrL+a /\ b+ to \ssrC{a && b} (see section \ref{ssec:goalinterp}). - -Conversely, the script -\begin{lstlisting} - Goal forall a b : bool, a /\ b -> a. - move=> a b; move/andP. -\end{lstlisting} -changes the goal \ssrL+a /\ b -> a+ into \ssrC{a && b -> a} (see section -\ref{ssec:assumpinterp}). - - -The same tactics can also be used to perform the converse -operation, changing a boolean conjunction into a logical one. The view -mechanism guesses the direction of the -transformation to be used i.e., the constructor of the \ssrC{reflect} -predicate which should be chosen. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{General mechanism for interpreting goals and assumptions} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Specializing assumptions} -\idx{move/\dots{}} - -The \ssr{} -tactic: - - \ssrC{move/(_} \ssrN[1]{term} $\dots$ \ssrN[n]{term}\ssrC{)} - -\noindent -is equivalent to the tactic: - - \ssrC{intro top; generalize (top} \ssrN[1]{term} $\dots$ \ssrN[n]{term}\ssrC{); clear top.} - -\noindent -where \ssrC{top} is a fresh name for introducing the top assumption of -the current goal. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Interpreting assumptions} -\label{sssec:hypview} -The general form of an assumption view tactic is: - -\begin{center} - \optional{\ssrC{move} {\optsep} \ssrC{case}} \ssrC{/} \ssrN[0]{term} -\end{center} - -The term \ssrN[0]{term}, called the \emph{view lemma} can be: -\begin{itemize} -\item a (term coercible to a) function; -\item a (possibly quantified) implication; -\item a (possibly quantified) double implication; -\item a (possibly quantified) instance of the \ssrC{reflect} predicate - (see section \ref{ssec:reflpred}). -\end{itemize} - -Let \ssrC{top} be the top assumption in the goal. - -There are three steps in the behaviour of an assumption view tactic: -\begin{itemize} -\item It first introduces \ssrL+top+. -\item If the type of \ssrN[0]{term} is neither a double implication nor - an instance of the \ssrC{reflect} predicate, then the tactic - automatically generalises a term of the form: - -\begin{center} - \ssrC{(}\ssrN[0]{term} \ssrN[1]{term} $\dots$ \ssrN[n]{term}\ssrC{)} -\end{center} - - where the terms \ssrN[1]{term} $\dots$ \ssrN[n]{term} instantiate the - possible quantified variables of \ssrN[0]{term}, in order for - \ssrC{(}\ssrN[0]{term} \ssrN[1]{term} $\dots$ \ssrN[n]{term} \ssrC{top)} to be well typed. -\item If the type of $\ssrN[0]{term}$ is an equivalence, or - an instance of the \ssrC{reflect} predicate, it generalises a term of - the form: - \begin{center} - (\ssrN[vh]{term} (\ssrN[0]{term} \ssrN[1]{term} $\dots$ \ssrN[n]{term})) - \end{center} - where the term \ssrN[vh]{term} inserted is called an - \emph{assumption interpretation view hint}. -\item It finally clears \ssrC{top}. -\end{itemize} -For a \ssrC{case/}\ssrN[0]{term} tactic, the generalisation step is -replaced by a case analysis step. - -\emph{View hints} are declared by the user (see section -\ref{ssec:vhints}) and are stored in the \ssrC{Hint View} database. -The proof engine automatically -detects from the shape of the top assumption \ssrC{top} and of the view -lemma $\ssrN[0]{term}$ provided to the tactic the appropriate view hint in -the database to be inserted. - -If $\ssrN[0]{term}$ is a double implication, then the view hint \ssrC{A} will -be one of the defined view hints for implication. These hints are by -default the ones present in the file {\tt ssreflect.v}: -\begin{lstlisting} - Lemma |*iffLR*| : forall P Q, (P <-> Q) -> P -> Q. -\end{lstlisting} -which transforms a double implication into the left-to-right one, or: -\begin{lstlisting} - Lemma |*iffRL*| : forall P Q, (P <-> Q) -> Q -> P. -\end{lstlisting} -which produces the converse implication. In both cases, the two first -\ssrC{Prop} arguments are implicit. - -If $\ssrN[0]{term}$ is an instance of the \ssrC{reflect} predicate, then \ssrC{A} -will be one of the defined view hints for the \ssrC{reflect} -predicate, which are by -default the ones present in the file {\tt ssrbool.v}. -These hints are not only used for choosing the appropriate direction of -the translation, but they also allow complex transformation, involving -negations. - For instance the hint: -\begin{lstlisting} - Lemma |*introN*| : forall (P : Prop) (b : bool), reflect P b -> ~ P -> ~~ b. -\end{lstlisting} -makes the following script: -\begin{lstlisting} - Goal forall a b : bool, a -> b -> ~~ (a && b). - move=> a b Ha Hb. apply/andP. -\end{lstlisting} -transforms the goal into \ssrC{ \~ (a /\ b)}. -In fact\footnote{The current state of the proof shall be displayed by - the \ssrC{Show Proof} command of \Coq{} proof mode.} -this last script does not exactly use the hint \ssrC{introN}, but the -more general hint: -\begin{lstlisting} - Lemma |*introNTF*| : forall (P : Prop) (b c : bool), - reflect P b -> (if c then ~ P else P) -> ~~ b = c -\end{lstlisting} -The lemma \ssrL+|*introN*|+ is an instantiation of \ssrC{introNF} using - \ssrC{c := true}. - -Note that views, being part of \ssrN{i-pattern}, can be used to interpret -assertions too. For example the following script asserts \ssrC{a \&\& b} -but actually used its propositional interpretation. -\begin{lstlisting} - Lemma |*test*| (a b : bool) (pab : b && a) : b. - have /andP [pa ->] : (a && b) by rewrite andbC. -\end{lstlisting} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsubsection*{Interpreting goals} -\idx{apply/\dots{}} - -A goal interpretation view tactic of the form: - -\begin{center} - \ssrC{apply/} \ssrN[0]{term} -\end{center} -applied to a goal \ssrC{top} is interpreted in the following way: -\begin{itemize} -\item If the type of $\ssrN[0]{term}$ is not an instance of the - \ssrC{reflect} predicate, nor an equivalence, - then the term $\ssrN[0]{term}$ is applied to the current goal \ssrC{top}, - possibly inserting implicit arguments. -\item If the type of $\ssrN[0]{term}$ is an instance of the \ssrC{reflect} - predicate or an equivalence, then -a \emph{goal interpretation view hint} can possibly be inserted, which -corresponds to the application of a term -\ssrC{($\ssrN[vh]{term}$ ($\ssrN[0]{term}$ _ $\dots$ _))} to the current -goal, possibly inserting implicit arguments. -\end{itemize} - -Like assumption interpretation view hints, goal interpretation ones -are user defined lemmas stored (see section \ref{ssec:vhints}) in the -\ssrC{Hint View} database bridging -the possible gap between the type of $\ssrN[0]{term}$ and the type of the -goal. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Interpreting equivalences} -\idx{apply/\dots{}/\dots{}} - -Equivalent boolean propositions are simply \emph{equal} boolean terms. -A special construction helps the user to prove boolean equalities by -considering them as logical double implications (between their coerced -versions), while -performing at the same time logical operations on both sides. - -The syntax of double views is: -\begin{center} - \ssrC{apply/} \ssrN[l]{term} \ssrC{/} \ssrN[r]{term} -\end{center} - -The term \ssrN[l]{term} is the view lemma applied to the left hand side of the -equality, \ssrN[r]{term} is the one applied to the right hand side. - -In this context, the identity view: -\begin{lstlisting} -Lemma |*idP*| : reflect b1 b1. -\end{lstlisting} -is useful, for example the tactic: -\begin{lstlisting} - apply/idP/idP. -\end{lstlisting} -transforms the goal -\ssrL+~~ (b1 || b2)= b3+ - into two subgoals, respectively - \ssrL+~~ (b1 || b2) -> b3+ and \\ -\ssrL+b3 -> ~~ (b1 || b2).+ - -The same goal can be decomposed in several ways, and the user may -choose the most convenient interpretation. For instance, the tactic: -\begin{lstlisting} - apply/norP/idP. -\end{lstlisting} -applied on the same goal \ssrL+~~ (b1 || b2) = b3+ generates the subgoals -\ssrL+~~ b1 /\ ~~ b2 -> b3+ and\\ -\ssrL+b3 -> ~~ b1 /\ ~~ b2+. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Declaring new \ssrC{Hint View}s}\label{ssec:vhints} -\idxC{Hint View} - -The database of hints for the view mechanism is extensible via a -dedicated vernacular command. As library {\tt ssrbool.v} already -declares a corpus of hints, this feature is probably useful only for -users who define their own logical connectives. Users can declare -their own hints following the syntax used in {\tt ssrbool.v}: - -\begin{center} - \ssrC{Hint View for} {\tac} \ssrC{/} \ssrN{ident} \optional{\ssrC{|}{\naturalnumber}} -\end{center} - - where {\tac}$\in \{$\ssrC{move, apply}$\}$, \ssrN{ident} is the -name of the lemma to be declared as a hint, and ${\naturalnumber}$ a natural -number. If \ssrL+move+ is used as {\tac}, the hint is declared for -assumption interpretation tactics, \ssrL+apply+ declares hints for goal -interpretations. -Goal interpretation view hints are declared for both simple views and -left hand side views. The optional natural number ${\naturalnumber}$ is the -number of implicit arguments to be considered for the declared hint -view lemma \ssrC{name_of_the_lemma}. - -The command: - -\begin{center} - \ssrC{Hint View for apply//} \ssrN{ident}\optional{\ssrC{|}{\naturalnumber}}. -\end{center} - -with a double slash \ssrL+//+, declares hint views for right hand sides of -double views. - - -\noindent See the files {\tt ssreflect.v} and {\tt ssrbool.v} for examples. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection{Multiple views}\label{ssec:multiview} - -The hypotheses and the goal can be interpreted applying multiple views in -sequence. Both \ssrC{move} and \ssrC{apply} can be followed by an arbitrary number -of \ssrC{/}$\ssrN[i]{term}$. The main difference between the following two tactics -\begin{lstlisting} - apply/v1/v2/v3. - apply/v1; apply/v2; apply/v3. -\end{lstlisting} -is that the former applies all the views to the principal goal. -Applying a view with hypotheses generates new goals, and the second line -would apply the view \ssrC{v2} to all the goals generated by \ssrC{apply/v1}. -Note that the NO-OP intro pattern \ssrC{-} can be used to separate two -views, making the two following examples equivalent: -\begin{lstlisting} - move=> /v1; move=> /v2. - move=> /v1-/v2. -\end{lstlisting} - -The tactic \ssrC{move} can be used together with the \ssrC{in} -tactical to pass a given hypothesis to a lemma. For example, if -\ssrC{P2Q : P -> Q } and \ssrC{Q2R : Q -> R}, the following -tactic turns the hypothesis \ssrC{p : P} into \ssrC{P : R}. -\begin{lstlisting} - move/P2Q/Q2R in p. -\end{lstlisting} - -If the list of views is of length two, \ssrC{Hint View}s for interpreting -equivalences are indeed taken into account, otherwise only single -\ssrC{Hint View}s are used. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{\ssr{} searching tool} -\idxC{Search \dots{}} - -\ssr{} proposes an extension of the \ssrC{Search} command. Its syntax is: - -\begin{center} - \ssrC{Search} \optional{\ssrN{pattern}} \optional{\optional{\ssrC{\-}} \optional{\ssrN{string}\optional{\ssrC{\%}\ssrN{key}} {\optsep} \ssrN{pattern}}}$^*$ \optional{\ssrC{in} \optional{\optional{\ssrC{\-}} \ssrN{name} }$^+$} -\end{center} - -% \begin{lstlisting} -% Search [[\~]\ssrN{string}]$^*$ [\ssrN{pattern}] [[$\ssrN[1]{pattern} \dots $ $\ssrN[n]{pattern}$]] $[[$inside$|$outside$]$ $M_1 \dots M_n$]. -% \end{lstlisting} - -% This tactic returns the list of defined constants matching the -% given criteria: -% \begin{itemize} -% \item \ssrL+[[-]\ssrN{string}]$^*$+ is an open sequence of strings, which sould -% all appear in the name of the returned constants. The optional \ssrL+-+ -% prefixes strings that are required \emph{not} to appear. -% % \item \ssrN{pattern} should be a subterm of the -% % \emph{conclusion} of the lemmas found by the command. If a lemma features -% % an occurrence -% % of this pattern only in one or several of its assumptions, it will not be -% % selected by the searching tool. -% \item -% \ssrL=[$\ssrN{pattern}^+$]= -% is a list of \ssr{} terms, which may -% include types, that are required to appear in the returned constants. -% Terms with holes should be surrounded by parentheses. -% \item $\ssrC{in}\ [[\ssrC{\-}]M]^+$ limits the search to the signature -% of open modules given in the list, but the ones preceeded by the -% $\ssrC{\-}$ flag. The -% command: -% \begin{lstlisting} -% Search in M. -% \end{lstlisting} -% is hence a way of obtaining the complete signature of the module \ssrL{M}. -% \end{itemize} -where \ssrN{name} is the name of an open module. -This command search returns the list of lemmas: -\begin{itemize} -\item whose \emph{conclusion} contains a subterm matching the optional - first \ssrN{pattern}. A $\ssrC{-}$ reverses the test, producing the list - of lemmas whose conclusion does not contain any subterm matching - the pattern; -\item whose name contains the given string. A $\ssrC{-}$ prefix reverses - the test, producing the list of lemmas whose name does not contain the - string. A string that contains symbols or -is followed by a scope \ssrN{key}, is interpreted as the constant whose -notation involves that string (e.g., \ssrL=+= for \ssrL+addn+), if this is -unambiguous; otherwise the diagnostic includes the output of the -\ssrC{Locate} vernacular command. - -\item whose statement, including assumptions and types, contains a - subterm matching the next patterns. If a pattern is prefixed by - $\ssrC{-}$, the test is reversed; -\item contained in the given list of modules, except the ones in the - modules prefixed by a $\ssrC{-}$. -\end{itemize} - -Note that: -\begin{itemize} -\item As for regular terms, patterns can feature scope - indications. For instance, the command: -\begin{lstlisting} - Search _ (_ + _)%N. -\end{lstlisting} -lists all the lemmas whose statement (conclusion or hypotheses) -involve an application of the binary operation denoted by the infix -\ssrC{+} symbol in the \ssrC{N} scope (which is \ssr{} scope for natural numbers). -\item Patterns with holes should be surrounded by parentheses. -\item Search always volunteers the expansion of the notation, avoiding the - need to execute Locate independently. Moreover, a string fragment - looks for any notation that contains fragment as - a substring. If the \ssrL+ssrbool+ library is imported, the command: -\begin{lstlisting} - Search "~~". -\end{lstlisting} -answers : -\begin{lstlisting} -"~~" is part of notation (~~ _) -In bool_scope, (~~ b) denotes negb b -negbT forall b : bool, b = false -> ~~ b -contra forall c b : bool, (c -> b) -> ~~ b -> ~~ c -introN forall (P : Prop) (b : bool), reflect P b -> ~ P -> ~~ b -\end{lstlisting} - \item A diagnostic is issued if there are different matching notations; - it is an error if all matches are partial. -\item Similarly, a diagnostic warns about multiple interpretations, and - signals an error if there is no default one. -\item The command \ssrC{Search in M.} -is a way of obtaining the complete signature of the module \ssrL{M}. -\item Strings and pattern indications can be interleaved, but the - first indication has a special status if it is a pattern, and only - filters the conclusion of lemmas: -\begin{itemize} - \item The command : - \begin{lstlisting} - Search (_ =1 _) "bij". - \end{lstlisting} -lists all the lemmas whose conclusion features a '$\ssrC{=1}$' and whose -name contains the string \verb+bij+. -\item The command : - \begin{lstlisting} - Search "bij" (_ =1 _). - \end{lstlisting} -lists all the lemmas whose statement, including hypotheses, features a -'$\ssrC{=1}$' and whose name contains the string \verb+bij+. - -\end{itemize} - -\end{itemize} -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Synopsis and Index} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection*{Parameters} - -\begin{minipage}[c]{\textwidth}\renewcommand{\footnoterule}{} -\begin{longtable}{lcl} -\ssrN{d-tactic} && one of the - \ssrC{elim}, \ssrC{case}, \ssrC{congr}, \ssrC{apply}, \ssrC{exact} - and \ssrC{move} \ssr{} tactics \\ -\ssrN{fix-body} && standard \Coq{} \textit{fix\_body}\\ -\ssrN{ident} && standard \Coq{} identifier\\ -\ssrN{int} && integer literal \\ -\ssrN{key} && notation scope\\ -\ssrN{name} && module name\\ -${\naturalnumber}$ && \ssrN{int} or Ltac variable denoting a standard \Coq{} numeral\footnote{The name of this Ltac variable should not be the name of a tactic which can be followed by a bracket - \ssrL+[+, like \ssrL+do+, \ssrL+ have+,\dots}\\ -\ssrN{pattern} && synonym for {\term}\\ -\ssrN{string} && standard \Coq{} string\\ -{\tac} && standard \Coq{} tactic or \ssr{} tactic\\ -{\term} & \hspace{1cm} & Gallina term, possibly containing wildcards\\ -%\ssrN{view} && global constant\\ -\end{longtable} -\end{minipage} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection*{Items and switches} - -\begin{longtable}{lclr} -\ssrN{binder} & {\ident} {\optsep} \ssrC{(} {\ident} \optional{\ssrC{:} {\term} } \ssrC{)} & binder& p. \pageref{ssec:pose}\\ -\\ -\ssrN{clear-switch} & \ssrC{\{} {\ident}$^+$ \ssrC{\}} & clear switch & p. \pageref{ssec:discharge}\\ -\\ -\ssrN{c-pattern} & \optional{{\term} \ssrC{in} {\optsep} {\term} \ssrC{as}} {\ident} \ssrC{in} {\term} & context pattern & p. \pageref{ssec:rewp} \\ -\\ -\ssrN{d-item} & \optional{\ssrN{occ-switch} {\optsep} \ssrN{clear-switch}} \optional{{\term} {\optsep} \ssrC{(}\ssrN{c-pattern}\ssrC{)}} & discharge item & p. \pageref{ssec:discharge}\\ -\\ -\ssrN{gen-item} & \optional{\ssrC{@}}{\ident} {\optsep} \ssrC{(}{\ident}\ssrC{)} {\optsep} \ssrC{(}\optional{\ssrC{@}}{\ident} \ssrC{:=} \ssrN{c-pattern}\ssrC{)} & generalization item & p. \pageref{ssec:struct}\\ -\\ -\ssrN{i-pattern} & {\ident} {\optsep} \ssrC{_} {\optsep} \ssrC{?} {\optsep} \ssrC{*} {\optsep} \optional{\ssrN{occ-switch}}\ssrC{->} {\optsep} \optional{\ssrN{occ-switch}}\ssrC{<-} {\optsep} & intro pattern & p. \pageref{ssec:intro}\\ -& \ssrC{[} \ssrN{i-item}$^*$ \ssrC{|} $\dots$ \ssrC{|} \ssrN{i-item}$^*$ \ssrC{]} {\optsep} \ssrC{-} {\optsep} \ssrC{[:} {\ident}$^+$\ssrC{]} &\\ -\\ -\ssrN{i-item} & \ssrN{clear-switch} {\optsep} \ssrN{s-item} {\optsep} \ssrN{i-pattern} {\optsep} \ssrC{/}{\term} & intro item & p. \pageref{ssec:intro}\\ -\\ -\ssrN{int-mult} & \optional{{\naturalnumber}} \ssrN{mult-mark} & multiplier & p. \pageref{ssec:iter}\\ -\\ -\ssrN{occ-switch} & \ssrC{\{} \optional{\ssrC{+} {\optsep} \ssrC{-}} {\naturalnumber}$^*$\ssrC{\}} & occur. switch & p. \pageref{sssec:occselect}\\ -\\ -\ssrN{mult} & \optional{{\naturalnumber}} \ssrN{mult-mark} & multiplier & p. \pageref{ssec:iter}\\ -\\ -\ssrN{mult-mark} & \ssrC{?} {\optsep} \ssrC{!} & multiplier mark & p. \pageref{ssec:iter}\\ -\\ -\ssrN{r-item} & \optional{\ssrC{/}} {\term} {\optsep} \ssrN{s-item} & rewrite item & p. \pageref{ssec:extrw}\\ -\\ -\ssrN{r-prefix} & \optional{\ssrC{-}} \optional{\ssrN{int-mult}} \optional{\ssrN{occ-switch} {\optsep} \ssrN{clear-switch}} \optional{\ssrC{[}\ssrN{r-pattern}\ssrC{]}} & rewrite prefix & p. \pageref{ssec:extrw}\\ -\\ -\ssrN{r-pattern} & {\term} {\optsep} \ssrN{c-pattern} {\optsep} \ssrC{in} \optional{{\ident} \ssrC{in}} {\term} & rewrite pattern & p. \pageref{ssec:extrw}\\ -\\ -\ssrN{r-step} & \optional{\ssrN{r-prefix}}\ssrN{r-item} & rewrite step & p. \pageref{ssec:extrw}\\ -\\ -\ssrN{s-item} & \ssrC{/=} {\optsep} \ssrC{//} {\optsep} \ssrC{//=} & simplify switch & p. \pageref{ssec:intro}\\ -\\ -\end{longtable} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection*{Tactics} -\emph{Note}: \ssrC{without loss} and \ssrC{suffices} are synonyms for \ssrC{wlog} and -\ssrC{suff} respectively. - -\begin{longtable}{llr} -\ssrC{move} & \textcolor{dkblue}{\texttt{idtac}} or \ssrC{hnf}& p. \pageref{ssec:profstack} \\ -\ssrC{apply} & application & p. \pageref{ssec:basictac}\\ -\ssrC{exact} &&\\ -\ssrC{abstract} && p. \pageref{ssec:abstract}, \pageref{sec:havetransparent}\\ -\\ -\ssrC{elim} & induction & p. \pageref{ssec:basictac}\\ -\ssrC{case} & case analysis & p. \pageref{ssec:basictac}\\ -\\ -\ssrC{rewrite} \ssrN{rstep}$^+$ & rewrite& p. \pageref{ssec:extrw}\\ -\\ -\ssrC{have} \ssrN{i-item}$^*$ \optional{\ssrN{i-pattern}} \optional{\ssrN{s-item} {\optsep} \ssrN{binder}$^+$} \optional{\ssrC{:} {\term}} \ssrC{:=} {\term} & forward & p. \pageref{ssec:struct}\\ -\ssrC{have} \ssrN{i-item}$^*$ \optional{\ssrN{i-pattern}} \optional{\ssrN{s-item}{\optsep} \ssrN{binder}$^+$} \ssrC{:} {\term} \optional{\ssrC{by} {\tac}} & chaining & \\ -\ssrC{have suff} \optional{\ssrN{clear-switch}} \optional{\ssrN{i-pattern}} \optional{\ssrC{:} {\term}} \ssrC{:=} {\term} & & \\ -\ssrC{have suff} \optional{\ssrN{clear-switch}} \optional{\ssrN{i-pattern}} \ssrC{:} {\term} \optional{\ssrC{by} {\tac}} & & \\ -\ssrC{gen have} \optional{{\ident}\ssrC{,}} \optional{\ssrN{i-pattern}} \ssrC{:} \ssrN{gen-item}$^+$ \ssrC{/} {\term} \optional{\ssrC{by} {\tac}} & & \\ -\\ -\ssrC{wlog} \optional{\ssrC{suff}} \optional{\ssrN{i-item}} \ssrC{:} \optional{\ssrN{gen-item}{\optsep} \ssrN{clear-switch}}$^*$ \ssrC{/} {\term} & specializing & p. \pageref{ssec:struct} \\ -\\ -\ssrC{suff} \ssrN{i-item}$^*$ \optional{\ssrN{i-pattern}} \optional{\ssrN{binder}$^+$} \ssrC{:} {\term} \optional{\ssrC{by} {\tac}} & backchaining & p. \pageref{ssec:struct}\\ -\ssrC{suff} \optional{\ssrC{have}} \optional{\ssrN{clear-switch}} \optional{\ssrN{i-pattern}} \ssrC{:} {\term} \optional{\ssrC{by} {\tac}} & & \\ -\\ -\ssrC{pose} {\ident} \ssrC{:=} {\term} & local definition& p. \pageref{ssec:pose}\\ -\ssrC{pose} {\ident} \ssrN{binder}$^+$ \ssrC{:=} {\term} & \rlap{local function definition}& \\ -\ssrC{pose fix} \ssrN{fix-body} & \rlap{local fix definition} & \\ -\ssrC{pose cofix} \ssrN{fix-body} & \rlap{local cofix definition} & \\ -\\ -\ssrC{set} {\ident} \optional{\ssrC{:} {\term}} \ssrC{:=} \optional{\ssrN{occ-switch}} \optional{{\term}{\optsep} \ssrC{(}\ssrN{c-pattern}\ssrC{)}} & abbreviation&p. \pageref{ssec:set}\\ -\\ -\ssrC{unlock} \optional{\ssrN{r-prefix}]{\ident}}$^*$ & unlock & p. \pageref{ssec:lock}\\ -\\ -\ssrC{congr} \optional{\naturalnumber} {\term} & congruence& p. \pageref{ssec:congr}\\ -\end{longtable} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection*{Tacticals} - -\begin{longtable}{lclr} -\ssrN{d-tactic} \optional{\ident} \ssrC{:} \ssrN{d-item}$^{+}$ \optional{\ssrN{clear-switch}} & & discharge & p. \pageref{ssec:discharge}\\ -\\ -{\tac} \ssrC{=>} \ssrN{i-item}$^+$ && introduction & p. \pageref{ssec:intro}\\ -\\ -{\tac} \ssrC{in} \optional{\ssrN{gen-item} {\optsep} \ssrN{clear-switch}}$^+$ \optional{\ssrC{*}} && localization & p. \pageref{ssec:gloc}\\ -\\ -\ssrC{do} \optional{\ssrN{mult}} \ssrC{[} \nelist{\tac}{|} \ssrC{]}&& iteration & p. \pageref{ssec:iter}\\ -\ssrC{do} \ssrN{mult} {\tac} &&& \\ -\\ -{\tac} \ssrC{ ; first} \optional{\naturalnumber} \ssrC{[}\nelist{\tac}{|}\ssrC{]} && selector & p. \pageref{ssec:select}\\ -{\tac} \ssrC{ ; last} \optional{\naturalnumber} \ssrC{[}\nelist{\tac}{|}\ssrC{]} \\ -{\tac} \ssrC{ ; first} \optional{\naturalnumber} \ssrC{last} && subgoals & p. \pageref{ssec:select}\\ -{\tac} \ssrC{; last} \optional{\naturalnumber} \ssrC{first} && rotation & \\ -\\ -\ssrC{by [} \nelist{\tac}{|} \ssrC{]} && closing & p. \pageref{ssec:termin}\\ -\ssrC{by []} \\ -\ssrC{by} {\tac} \\ -\end{longtable} - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\subsection*{Commands} -\begin{longtable}{lclr} -\ssrL+Hint View for+ \optional{\ssrL+move+ {\it |} \ssrL+apply+} {\tt /} {\ident} \optional{{\tt|} {\naturalnumber}} && view hint -declaration & p. \pageref{ssec:vhints}\\ -\\ -\ssrL+Hint View for apply//+ {\ident} \optional{{\tt|}{\naturalnumber}} && right hand side double - & p. \pageref{ssec:vhints}\\ -&& view hint declaration &\\ -\\ -%\ssrL+Import Prenex Implicits+ && enable prenex implicits & -%p. \pageref{ssec:parampoly}\\ -%\\ -\ssrL+Prenex Implicits+ {\ident}$^+$ & \hspace{.6cm} & prenex implicits decl. - & p. \pageref{ssec:parampoly}\\ - -\end{longtable} - -\iffalse - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section{Changes} - -\subsection{\ssr{} version 1.3} -All changes are retrocompatible extensions but for: -\begin{itemize} -\item Occurrences in the type family switch now refer only to the goal, while - before they used to refer also to the types in the abstractions of the - predicate used by the eliminator. This bug used to affect lemmas like - \ssrC{boolP}. See the relative comments in \ssrC{ssrbool.v}. -\item Clear switches can only mention existing hypothesis and - otherwise fail. This can in particular affect intro patterns - simultaneously applied to several goals. - % commit: 2686 -\item A bug in the \ssrC{rewrite} tactic allowed to - instantiate existential metavariables occurring in the goal. - This is not the case any longer (see section~\ref{ssec:rewcaveats}). -\item The \ssrC{fold} and \ssrC{unfold} \ssrN{r-items} for \ssrC{rewrite} used to - fail silently when used in combination with a \ssrN{r-pattern} matching no - goal subterm. They now fail. The old behavior can be obtained using - the \ssrC{?} multiplier (see section~\ref{ssec:extrw}). -\item \Coq{} 8.2 users with a statically linked toplevel must comment out the\\ - \ssrC{Declare ML Module "ssreflect".}\\ - line at the beginning of \ssrC{ssreflect.v} to compile the 1.3 library. -\end{itemize} -New features: -\begin{itemize} -\item Contextual \ssrC{rewrite} patterns. - The context surrounding the redex can now be used to specify which - redex occurrences should be rewritten (see section~\ref{ssec:rewp}).\\ - \ssrC{rewrite [in X in _ = X]addnC.} - % commit: 2690, 2689, 2718, 2733 -\item Proof irrelevant interpretation of goals with existential metavariables. - Goals containing an existential metavariable of sort \ssrC{Prop} are - generalized over it, and a new goal for the missing subproof is - generated (see page~\pageref{sssec:apply} and - section~\ref{ssec:rewcaveats}).\\ - \ssrC{apply: (ex_intro _ (@Ordinal _ y _)).}\\ - \ssrC{rewrite insubT.} - % commit: 2553, 2544, 2543, 2733 -\item Views are now part of \ssrN{i-pattern} and can thus be used - inside intro patterns (see section~\ref{ssec:intro}).\\ - \ssrC{move=> a b /andP [Ha Hb].} - % commit: 2720 -\item Multiple views for \ssrC{move}, \ssrC{move $\dots$ in} and \ssrC{apply} - (see section~\ref{ssec:multiview}).\\ - \ssrC{move/v1/v2/v3.}\\ - \ssrC{move/v1/v2/v3 in H.}\\ - \ssrC{apply/v1/v2/v3.} - % commit: 2720 -\item \ssrC{have} and \ssrC{suff} idiom with view (see section~\ref{sssec:hypview}). -\begin{lstlisting} - Lemma |*test*| (a b : bool) (pab : a && b) : b. - have {pab} /= /andP [pa ->] // : true && (a && b) := pab. -\end{lstlisting} - % commit: 2726 -\item \ssrC{have suff}, \ssrC{suff have} and \ssrC{wlog suff} forward reasoning - tactics (see section~\ref{ssec:struct}).\\ - \ssrC{have suff H : P.} - % commit: 2633 -\item Binders support in \ssrC{have} (see section~\ref{sssec:have}).\\ - \ssrC{have H x y (r : R x y) : P x -> Q y.} - % commit: 2633 -\item Deferred clear switches. Clears are deferred to the end of the - intro pattern. In the meanwhile, cleared variables are still - part of the context, thus the goal can mention them, but are - renamed to non accessible dummy names (see section~\ref{ssec:intro}).\\ - \ssrC{suff: G \\x H = K; first case/dprodP=> \{G H\} [[G H -> -> defK]].} - % commit: 2660 -\item Relaxed alternation condition in intro patterns. The - \ssrN{i-item} grammar rule is simplified (see section~\ref{ssec:intro}).\\ - \ssrC{move=> a \{H\} /= \{H1\} // b c /= \{H2\}.} - % commit: 2713 -\item Occurrence selection for \ssrC{->} and \ssrC{<-} intro pattern - (see section~\ref{ssec:intro}).\\ - \ssrC{move=> a b H \{2\}->.} - % commit: 2714 -\item Modifiers for the discharging '\ssrC{:}' and \ssrC{in} tactical to override - the default behavior when dealing with local definitions (let-in): - \ssrC{@f} forces the body of \ssrC{f} to be kept, \ssrC{(f)} forces the body of - \ssrC{f} to be dropped (see sections~\ref{ssec:discharge} - and~\ref{ssec:gloc}).\\ - \ssrC{move: x y @f z.}\\ - \ssrC{rewrite rule in (f) $\;\;$H.} - %commit: 2659, 2710 -\item Type family switch in \ssrC{elim} and \ssrC{case} - can contain patterns with occurrence switch - (see section~\ref{ssec:typefam}).\\ - \ssrC{case: \{2\}(_ == x) / eqP.} - % commit: 2593, 2598, 2539, 2538, 2527, 2529 -\item Generic second order predicate support for \ssrC{elim} - (see section~\ref{sec:views}).\\ - \ssrC{elim/big\_prop: _} - % commit: 2767 -\item The \ssrC{congr} tactic now also works on products (see - section~\ref{ssec:congr}). -\begin{lstlisting} - Lemma |*test*| x (H : P x) : P y. - congr (P _): H. -\end{lstlisting} - % commit: 2608 -\item Selectors now support Ltac variables - (see section~\ref{ssec:select}).\\ - \ssrC{let n := 3 in tac; first n last.} - % commit: 2725 -\item Deprecated use of \ssrC{Import Prenex Implicits} directive. - It must be replaced with the \Coq{} \ssrC{Unset Printing - Implicit Defensive} vernacular command. -\item New synonym \ssrC{Canonical} for \ssrC{Canonical Structure}. -\end{itemize} -\subsection{\ssr{} version 1.4} -New features: -\begin{itemize} -\item User definable recurrent contexts (see section~\ref{ssec:rewp}).\\ - \ssrC{Notation RHS := (X in _ = X)\%pattern} -\item Contextual patterns in - \ssrC{set} and `\ssrC{:}' (see section~\ref{ssec:rewp}).\\ - \ssrC{set t := (a + _ in RHS)} -\item NO-OP intro pattern (see section~\ref{ssec:intro}).\\ - \ssrC{move=> /eqP-H /fooP-/barP} -\item \ssrC{if $\ {\term}\ $ isn't $\ \ssrN{pattern}\ $ then $\ {\term}\ $ - else $\ {\term}\ $} notation (see section~\ref{ssec:patcond}).\\ - \ssrC{if x isn't Some y then simple else complex y} -\end{itemize} -\subsection{\ssr{} version 1.5} -Incompatibilities: -\begin{itemize} -\item The \ssrC{have} tactic now performs type classes resolution. The old - behavior can be restored with \ssrC{Set SsrHave NoTCResolution} -\end{itemize} -Fixes: -\begin{itemize} -\item The \ssrC{let foo := type of t in} syntax of standard \ssrC{Ltac} has - been made compatible with \ssr{} and can be freely used even if - the \ssr{} plugin is loaded -\end{itemize} -New features: -\begin{itemize} -\item Generalizations supported in have (see section~\ref{ssec:struct}).\\ - \ssrC{generally have hx2px, pa : a ha / P a.} -\item Renaming and patterns in wlog (see section~\ref{ssec:struct} and - page \pageref{par:advancedgen}).\\ - \ssrC{wlog H : (n := m)$\;$ (x := m + _)$\;$ / T x}.\\ - \ssrC{wlog H : (n := m)$\;$ (@ldef := secdef m)$\;$ / T x}. -\item Renaming, patterns and clear switches in \ssrC{in} - tactical (see section~\ref{ssec:gloc}).\\ - \ssrC{$\dots$ in H1 \{H2\} (n := m).} -\item Handling of type classes in \ssrC{have} - (see page~\pageref{ssec:havetcresolution}).\\ - \ssrC{have foo : ty. (* TC inference for ty *)}\\ - \ssrC{have foo : ty := . (* no TC inference for ty *)}\\ - \ssrC{have foo : ty := t. (* no TC inference for ty and t *)}\\ - \ssrC{have foo := t. (* no TC inference for t *)} -\item Transparent flag for \ssrC{have} to generate a \ssrC{let in} context entry - (see page~\pageref{sec:havetransparent}).\\ - \ssrC{have @i : 'I\_n by apply: (Sub m); auto.} -\item Intro pattern \ssrC{[: foo bar ]} to create abstract variables - (see page~\pageref{ssec:introabstract}). -\item Tactic \ssrC{abstract:} to assign an abstract variable - (see page~\pageref{ssec:abstract}).\\ - \ssrC{have [: blurb ] @i : 'I\_n by apply: (Sub m); abstract: blurb; auto.}\\ - \ssrC{have [: blurb ] i : 'I\_n := Sub m blurb; first by auto.} - -\end{itemize} - -\fi diff --git a/doc/refman/RefMan-syn.tex b/doc/refman/RefMan-syn.tex deleted file mode 100644 index 836753db16..0000000000 --- a/doc/refman/RefMan-syn.tex +++ /dev/null @@ -1,1431 +0,0 @@ -\chapter[Syntax extensions and interpretation scopes]{Syntax extensions and interpretation scopes\label{Addoc-syntax}} -%HEVEA\cutname{syntax-extensions.html} - -In this chapter, we introduce advanced commands to modify the way -{\Coq} parses and prints objects, i.e. the translations between the -concrete and internal representations of terms and commands. - -The main commands to provide custom symbolic notations for terms are -{\tt Notation} and {\tt Infix}. They are described in Section -\ref{Notation}. There is also a variant of {\tt Notation} which does -not modify the parser. This provides with a form of abbreviation and -it is described in Section~\ref{Abbreviations}. It is sometimes -expected that the same symbolic notation has different meanings in -different contexts. To achieve this form of overloading, {\Coq} offers -a notion of interpretation scope. This is described in -Section~\ref{scopes}. - -The main command to provide custom notations for tactics is {\tt - Tactic Notation}. It is described in Section~\ref{Tactic-Notation}. - -% No need any more to remind this -%% \Rem The commands {\tt Grammar}, {\tt Syntax} and {\tt Distfix} which -%% were present for a while in {\Coq} are no longer available from {\Coq} -%% version 8.0. The underlying AST structure is also no longer available. - -\section[Notations]{Notations\label{Notation} -\comindex{Notation}} - -\subsection{Basic notations} - -A {\em notation} is a symbolic expression denoting some term -or term pattern. - -A typical notation is the use of the infix symbol \verb=/\= to denote -the logical conjunction (\texttt{and}). Such a notation is declared -by - -\begin{coq_example*} -Notation "A /\ B" := (and A B). -\end{coq_example*} - -The expression \texttt{(and A B)} is the abbreviated term and the -string \verb="A /\ B"= (called a {\em notation}) tells how it is -symbolically written. - -A notation is always surrounded by double quotes (except when the -abbreviation has the form of an ordinary applicative expression; see \ref{Abbreviations}). The -notation is composed of {\em tokens} separated by spaces. Identifiers -in the string (such as \texttt{A} and \texttt{B}) are the {\em -parameters} of the notation. They must occur at least once each in the -denoted term. The other elements of the string (such as \verb=/\=) are -the {\em symbols}. - -An identifier can be used as a symbol but it must be surrounded by -simple quotes to avoid the confusion with a parameter. Similarly, -every symbol of at least 3 characters and starting with a simple quote -must be quoted (then it starts by two single quotes). Here is an example. - -\begin{coq_example*} -Notation "'IF' c1 'then' c2 'else' c3" := (IF_then_else c1 c2 c3). -\end{coq_example*} - -%TODO quote the identifier when not in front, not a keyword, as in "x 'U' y" ? - -A notation binds a syntactic expression to a term. Unless the parser -and pretty-printer of {\Coq} already know how to deal with the -syntactic expression (see \ref{ReservedNotation}), explicit precedences and -associativity rules have to be given. - -\Rem The right-hand side of a notation is interpreted at the time the -notation is given. In particular, disambiguation of constants, implicit arguments (see -Section~\ref{Implicit Arguments}), coercions (see -Section~\ref{Coercions}), etc. are resolved at the time of the -declaration of the notation. - -\subsection[Precedences and associativity]{Precedences and associativity\index{Precedences} -\index{Associativity}} - -Mixing different symbolic notations in the same text may cause serious -parsing ambiguity. To deal with the ambiguity of notations, {\Coq} -uses precedence levels ranging from 0 to 100 (plus one extra level -numbered 200) and associativity rules. - -Consider for example the new notation - -\begin{coq_example*} -Notation "A \/ B" := (or A B). -\end{coq_example*} - -Clearly, an expression such as {\tt forall A:Prop, True \verb=/\= A \verb=\/= -A \verb=\/= False} is ambiguous. To tell the {\Coq} parser how to -interpret the expression, a priority between the symbols \verb=/\= and -\verb=\/= has to be given. Assume for instance that we want conjunction -to bind more than disjunction. This is expressed by assigning a -precedence level to each notation, knowing that a lower level binds -more than a higher level. Hence the level for disjunction must be -higher than the level for conjunction. - -Since connectives are not tight articulation points of a text, it -is reasonable to choose levels not so far from the highest level which -is 100, for example 85 for disjunction and 80 for -conjunction\footnote{which are the levels effectively chosen in the -current implementation of {\Coq}}. - -Similarly, an associativity is needed to decide whether {\tt True \verb=/\= -False \verb=/\= False} defaults to {\tt True \verb=/\= (False -\verb=/\= False)} (right associativity) or to {\tt (True -\verb=/\= False) \verb=/\= False} (left associativity). We may -even consider that the expression is not well-formed and that -parentheses are mandatory (this is a ``no associativity'')\footnote{ -{\Coq} accepts notations declared as no associative but the parser on -which {\Coq} is built, namely {\camlpppp}, currently does not implement the -no-associativity and replaces it by a left associativity; hence it is -the same for {\Coq}: no-associativity is in fact left associativity}. -We do not know of a special convention of the associativity of -disjunction and conjunction, so let us apply for instance a right -associativity (which is the choice of {\Coq}). - -Precedence levels and associativity rules of notations have to be -given between parentheses in a list of modifiers that the -\texttt{Notation} command understands. Here is how the previous -examples refine. - -\begin{coq_example*} -Notation "A /\ B" := (and A B) (at level 80, right associativity). -Notation "A \/ B" := (or A B) (at level 85, right associativity). -\end{coq_example*} - -By default, a notation is considered non associative, but the -precedence level is mandatory (except for special cases whose level is -canonical). The level is either a number or the phrase {\tt next -level} whose meaning is obvious. The list of levels already assigned -is on Figure~\ref{init-notations}. - -\subsection{Complex notations} - -Notations can be made from arbitrarily complex symbols. One can for -instance define prefix notations. - -\begin{coq_example*} -Notation "~ x" := (not x) (at level 75, right associativity). -\end{coq_example*} - -One can also define notations for incomplete terms, with the hole -expected to be inferred at typing time. - -\begin{coq_example*} -Notation "x = y" := (@eq _ x y) (at level 70, no associativity). -\end{coq_example*} - -One can define {\em closed} notations whose both sides are symbols. In -this case, the default precedence level for inner subexpression is -200, and the default level for the notation itself is 0. - -\begin{coq_eval} -Set Printing Depth 50. -(********** The following is correct but produces **********) -(**** an incompatibility with the reserved notation ********) -\end{coq_eval} -\begin{coq_example*} -Notation "( x , y )" := (@pair _ _ x y). -\end{coq_example*} - -One can also define notations for binders. - -\begin{coq_eval} -Set Printing Depth 50. -(********** The following is correct but produces **********) -(**** an incompatibility with the reserved notation ********) -\end{coq_eval} -\begin{coq_example*} -Notation "{ x : A | P }" := (sig A (fun x => P)). -\end{coq_example*} - -In the last case though, there is a conflict with the notation for -type casts. The notation for type casts, as shown by the command {\tt Print Grammar -constr} is at level 100. To avoid \verb=x : A= being parsed as a type cast, -it is necessary to put {\tt x} at a level below 100, typically 99. Hence, a -correct definition is the following. - -\begin{coq_example*} -Notation "{ x : A | P }" := (sig A (fun x => P)) (x at level 99). -\end{coq_example*} - -%This change has retrospectively an effect on the notation for notation -%{\tt "{ A } + { B }"}. For the sake of factorization, {\tt A} must be -%put at level 99 too, which gives -% -%\begin{coq_example*} -%Notation "{ A } + { B }" := (sumbool A B) (at level 0, A at level 99). -%\end{coq_example*} - -More generally, it is required that notations are explicitly -factorized on the left. See the next section for more about -factorization. - -\subsection{Simple factorization rules} - -{\Coq} extensible parsing is performed by {\camlpppp} which is -essentially a LL1 parser: it decides which notation to parse by -looking tokens from left to right. Hence, some care has to be taken -not to hide already existing rules by new rules. Some simple left -factorization work has to be done. Here is an example. - -\begin{coq_eval} -(********** The next rule for notation _ < _ < _ produces **********) -(*** Error: Notation _ < _ < _ is already defined at level 70 ... ***) -\end{coq_eval} -\begin{coq_example*} -Notation "x < y" := (lt x y) (at level 70). -Notation "x < y < z" := (x < y /\ y < z) (at level 70). -\end{coq_example*} - -In order to factorize the left part of the rules, the subexpression -referred by {\tt y} has to be at the same level in both rules. However -the default behavior puts {\tt y} at the next level below 70 -in the first rule (no associativity is the default), and at the level -200 in the second rule (level 200 is the default for inner expressions). -To fix this, we need to force the parsing level of {\tt y}, -as follows. - -\begin{coq_example*} -Notation "x < y" := (lt x y) (at level 70). -Notation "x < y < z" := (x < y /\ y < z) (at level 70, y at next level). -\end{coq_example*} - -For the sake of factorization with {\Coq} predefined rules, simple -rules have to be observed for notations starting with a symbol: -e.g. rules starting with ``\{'' or ``('' should be put at level 0. The -list of {\Coq} predefined notations can be found in Chapter~\ref{Theories}. - -The command to display the current state of the {\Coq} term parser is -\comindex{Print Grammar constr} - -\begin{quote} -\tt Print Grammar constr. -\end{quote} - -\variant - -\comindex{Print Grammar pattern} -{\tt Print Grammar pattern.}\\ - -This displays the state of the subparser of patterns (the parser -used in the grammar of the {\tt match} {\tt with} constructions). - -\subsection{Displaying symbolic notations} - -The command \texttt{Notation} has an effect both on the {\Coq} parser and -on the {\Coq} printer. For example: - -\begin{coq_example} -Check (and True True). -\end{coq_example} - -However, printing, especially pretty-printing, also requires some -care. We may want specific indentations, line breaks, alignment if on -several lines, etc. For pretty-printing, {\Coq} relies on {\ocaml} -formatting library, which provides indentation and automatic line -breaks depending on page width by means of {\em formatting boxes}. - -The default printing of notations is rudimentary. For printing a -notation, a formatting box is opened in such a way that if the -notation and its arguments cannot fit on a single line, a line break -is inserted before the symbols of the notation and the arguments on -the next lines are aligned with the argument on the first line. - -A first simple control that a user can have on the printing of a -notation is the insertion of spaces at some places of the -notation. This is performed by adding extra spaces between the symbols -and parameters: each extra space (other than the single space needed -to separate the components) is interpreted as a space to be inserted -by the printer. Here is an example showing how to add spaces around -the bar of the notation. - -\begin{coq_example} -Notation "{{ x : A | P }}" := (sig (fun x : A => P)) - (at level 0, x at level 99). -Check (sig (fun x : nat => x=x)). -\end{coq_example} - -The second, more powerful control on printing is by using the {\tt -format} modifier. Here is an example - -\begin{small} -\begin{coq_example} -Notation "'If' c1 'then' c2 'else' c3" := (IF_then_else c1 c2 c3) -(at level 200, right associativity, format -"'[v ' 'If' c1 '/' '[' 'then' c2 ']' '/' '[' 'else' c3 ']' ']'"). -\end{coq_example} -\end{small} - -\begin{coq_example} -Check - (IF_then_else (IF_then_else True False True) - (IF_then_else True False True) - (IF_then_else True False True)). -\end{coq_example} - -A {\em format} is an extension of the string denoting the notation with -the possible following elements delimited by single quotes: - -\begin{itemize} -\item extra spaces are translated into simple spaces -\item tokens of the form \verb='/ '= are translated into breaking point, - in case a line break occurs, an indentation of the number of spaces - after the ``\verb=/='' is applied (2 spaces in the given example) -\item token of the form \verb='//'= force writing on a new line -\item well-bracketed pairs of tokens of the form \verb='[ '= and \verb=']'= - are translated into printing boxes; in case a line break occurs, - an extra indentation of the number of spaces given after the ``\verb=[='' - is applied (4 spaces in the example) -\item well-bracketed pairs of tokens of the form \verb='[hv '= and \verb=']'= - are translated into horizontal-orelse-vertical printing boxes; - if the content of the box does not fit on a single line, then every breaking - point forces a newline and an extra indentation of the number of spaces - given after the ``\verb=[='' is applied at the beginning of each newline - (3 spaces in the example) -\item well-bracketed pairs of tokens of the form \verb='[v '= and - \verb=']'= are translated into vertical printing boxes; every - breaking point forces a newline, even if the line is large enough to - display the whole content of the box, and an extra indentation of the - number of spaces given after the ``\verb=[='' is applied at the beginning - of each newline -\end{itemize} - -%Thus, for the previous example, we get -%\footnote{The ``@'' is here to shunt -%the notation "'IF' A 'then' B 'else' C" which is defined in {\Coq} -%initial state}: - -Notations do not survive the end of sections. No typing of the denoted -expression is performed at definition time. Type-checking is done only -at the time of use of the notation. - -\Rem -Sometimes, a notation is expected only for the parser. -%(e.g. because -%the underlying parser of {\Coq}, namely {\camlpppp}, is LL1 and some extra -%rules are needed to circumvent the absence of factorization). -To do so, the option {\tt only parsing} is allowed in the list of modifiers of -\texttt{Notation}. - -Conversely, the {\tt only printing} can be used to declare -that a notation should only be used for printing and should not declare a -parsing rule. In particular, such notations do not modify the parser. - -\subsection{The \texttt{Infix} command -\comindex{Infix}} - -The \texttt{Infix} command is a shortening for declaring notations of -infix symbols. Its syntax is - -\begin{quote} -\noindent\texttt{Infix "{\symbolentry}" :=} {\term} {\tt (} \nelist{\em modifier}{,} {\tt )}. -\end{quote} - -and it is equivalent to - -\begin{quote} -\noindent\texttt{Notation "x {\symbolentry} y" := ({\term} x y) (} \nelist{\em modifier}{,} {\tt )}. -\end{quote} - -where {\tt x} and {\tt y} are fresh names. Here is an example. - -\begin{coq_example*} -Infix "/\" := and (at level 80, right associativity). -\end{coq_example*} - -\subsection{Reserving notations -\label{ReservedNotation} -\comindex{Reserved Notation}} - -A given notation may be used in different contexts. {\Coq} expects all -uses of the notation to be defined at the same precedence and with the -same associativity. To avoid giving the precedence and associativity -every time, it is possible to declare a parsing rule in advance -without giving its interpretation. Here is an example from the initial -state of {\Coq}. - -\begin{coq_example} -Reserved Notation "x = y" (at level 70, no associativity). -\end{coq_example} - -Reserving a notation is also useful for simultaneously defining an -inductive type or a recursive constant and a notation for it. - -\Rem The notations mentioned on Figure~\ref{init-notations} are -reserved. Hence their precedence and associativity cannot be changed. - -\subsection{Simultaneous definition of terms and notations -\comindex{Fixpoint {\ldots} where {\ldots}} -\comindex{CoFixpoint {\ldots} where {\ldots}} -\comindex{Inductive {\ldots} where {\ldots}}} - -Thanks to reserved notations, the inductive, co-inductive, record, -recursive and corecursive definitions can benefit of customized -notations. To do this, insert a {\tt where} notation clause after the -definition of the (co)inductive type or (co)recursive term (or after -the definition of each of them in case of mutual definitions). The -exact syntax is given on Figure~\ref{notation-syntax} for inductive, -co-inductive, recursive and corecursive definitions and on -Figure~\ref{record-syntax} for records. Here are examples: - -\begin{coq_eval} -Set Printing Depth 50. -(********** The following is correct but produces an error **********) -(********** because the symbol /\ is already bound **********) -(**** Error: The conclusion of A -> B -> A /\ B is not valid *****) -\end{coq_eval} - -\begin{coq_example*} -Inductive and (A B:Prop) : Prop := conj : A -> B -> A /\ B -where "A /\ B" := (and A B). -\end{coq_example*} - -\begin{coq_eval} -Set Printing Depth 50. -(********** The following is correct but produces an error **********) -(********** because the symbol + is already bound **********) -(**** Error: no recursive definition *****) -\end{coq_eval} - -\begin{coq_example*} -Fixpoint plus (n m:nat) {struct n} : nat := - match n with - | O => m - | S p => S (p+m) - end -where "n + m" := (plus n m). -\end{coq_example*} - -\subsection{Displaying informations about notations -\optindex{Printing Notations}} - -To deactivate the printing of all notations, use the command -\begin{quote} -\tt Unset Printing Notations. -\end{quote} -To reactivate it, use the command -\begin{quote} -\tt Set Printing Notations. -\end{quote} -The default is to use notations for printing terms wherever possible. - -\SeeAlso {\tt Set Printing All} in Section~\ref{SetPrintingAll}. - -\subsection{Locating notations -\comindex{Locate} -\label{LocateSymbol}} - -To know to which notations a given symbol belongs to, use the command -\begin{quote} -\tt Locate {\symbolentry} -\end{quote} -where symbol is any (composite) symbol surrounded by double quotes. To locate -a particular notation, use a string where the variables of the -notation are replaced by ``\_'' and where possible single quotes -inserted around identifiers or tokens starting with a single quote are -dropped. - -\Example -\begin{coq_example} -Locate "exists". -Locate "exists _ .. _ , _". -\end{coq_example} - -\SeeAlso Section \ref{Locate}. - -\begin{figure} -\begin{small} -\begin{centerframe} -\begin{tabular}{lcl} -{\sentence} & ::= & - \zeroone{\tt Local} \texttt{Notation} {\str} \texttt{:=} {\term} - \zeroone{\modifiers} \zeroone{:{\scope}} .\\ - & $|$ & - \zeroone{\tt Local} \texttt{Infix} {\str} \texttt{:=} {\qualid} - \zeroone{\modifiers} \zeroone{:{\scope}} .\\ - & $|$ & - \zeroone{\tt Local} \texttt{Reserved Notation} {\str} - \zeroone{\modifiers} .\\ - & $|$ & {\tt Inductive} - \nelist{{\inductivebody} \zeroone{\declnotation}}{with}{\tt .}\\ - & $|$ & {\tt CoInductive} - \nelist{{\inductivebody} \zeroone{\declnotation}}{with}{\tt .}\\ - & $|$ & {\tt Fixpoint} - \nelist{{\fixpointbody} \zeroone{\declnotation}}{with} {\tt .} \\ - & $|$ & {\tt CoFixpoint} - \nelist{{\cofixpointbody} \zeroone{\declnotation}}{with} {\tt .} \\ -\\ -{\declnotation} & ::= & - \zeroone{{\tt where} \nelist{{\str} {\tt :=} {\term} \zeroone{:{\scope}}}{\tt and}}. -\\ -\\ -{\modifiers} - & ::= & {\tt at level} {\naturalnumber} \\ - & $|$ & \nelist{\ident}{,} {\tt at level} {\naturalnumber} \zeroone{\binderinterp}\\ - & $|$ & \nelist{\ident}{,} {\tt at next level} \zeroone{\binderinterp}\\ - & $|$ & {\ident} {\binderinterp} \\ - & $|$ & {\ident} {\tt ident} \\ - & $|$ & {\ident} {\tt global} \\ - & $|$ & {\ident} {\tt bigint} \\ - & $|$ & {\ident} \zeroone{{\tt strict}} {\tt pattern} \zeroone{{\tt at level} {\naturalnumber}}\\ - & $|$ & {\ident} {\tt binder} \\ - & $|$ & {\ident} {\tt closed binder} \\ - & $|$ & {\tt left associativity} \\ - & $|$ & {\tt right associativity} \\ - & $|$ & {\tt no associativity} \\ - & $|$ & {\tt only parsing} \\ - & $|$ & {\tt only printing} \\ - & $|$ & {\tt format} {\str} \\ -\\ -\\ -{\binderinterp} - & ::= & {\tt as ident} \\ - & $|$ & {\tt as pattern} \\ - & $|$ & {\tt as strict pattern} \\ -\end{tabular} -\end{centerframe} -\end{small} -\caption{Syntax of the variants of {\tt Notation}} -\label{notation-syntax} -\end{figure} - -\subsection{Notations and binders} - -Notations can include binders. This section lists -different ways to deal with binders. For further examples, see also -Section~\ref{RecursiveNotationsWithBinders}. - -\subsubsection{Binders bound in the notation and parsed as identifiers} - -Here is the basic example of a notation using a binder: - -\begin{coq_example*} -Notation "'sigma' x : A , B" := (sigT (fun x : A => B)) - (at level 200, x ident, A at level 200, right associativity). -\end{coq_example*} - -The binding variables in the right-hand side that occur as a parameter -of the notation (here {\tt x}) dynamically bind all the occurrences -in their respective binding scope after instantiation of the -parameters of the notation. This means that the term bound to {\tt B} can -refer to the variable name bound to {\tt x} as shown in the following -application of the notation: - -\begin{coq_example} -Check sigma z : nat, z = 0. -\end{coq_example} - -Notice the modifier {\tt x ident} in the declaration of the -notation. It tells to parse {\tt x} as a single identifier. - -\subsubsection{Binders bound in the notation and parsed as patterns} - -In the same way as patterns can be used as binders, as in {\tt fun - '(x,y) => x+y} or {\tt fun '(existT \_ x \_) => x}, notations can be -defined so that any pattern (in the sense of the entry {\pattern} of -Figure~\ref{term-syntax-aux}) can be used in place of the -binder. Here is an example: - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\begin{coq_example*} -Notation "'subset' ' p , P " := (sig (fun p => P)) - (at level 200, p pattern, format "'subset' ' p , P"). -\end{coq_example*} - -\begin{coq_example} -Check subset '(x,y), x+y=0. -\end{coq_example} - -The modifier {\tt p pattern} in the declaration of the notation -tells to parse $p$ as a pattern. Note that a single -variable is both an identifier and a pattern, so, e.g., the following -also works: - -% Note: we rely on the notation of the standard library which does not -% print the expected output, so we hide the output. -\begin{coq_example} -Check subset 'x, x=0. -\end{coq_example} - -If one wants to prevent such a notation to be used for printing when the -pattern is reduced to a single identifier, one has to use instead -the modifier {\tt p strict pattern}. For parsing, however, a {\tt - strict pattern} will continue to include the case of a -variable. Here is an example showing the difference: - -\begin{coq_example*} -Notation "'subset_bis' ' p , P" := (sig (fun p => P)) - (at level 200, p strict pattern). -Notation "'subset_bis' p , P " := (sig (fun p => P)) - (at level 200, p ident). -\end{coq_example*} - -\begin{coq_example} -Check subset_bis 'x, x=0. -\end{coq_example} - -The default level for a {\tt pattern} is 0. One can use a different level by -using {\tt pattern at level} $n$ where the scale is the same as the one for -terms (Figure~\ref{init-notations}). - -\subsubsection{Binders bound in the notation and parsed as terms} - -Sometimes, for the sake of factorization of rules, a binder has to be -parsed as a term. This is typically the case for a notation such as -the following: - -\begin{coq_eval} -Set Printing Depth 50. -(********** The following is correct but produces **********) -(**** an incompatibility with the reserved notation ********) -\end{coq_eval} -\begin{coq_example*} -Notation "{ x : A | P }" := (sig (fun x : A => P)) - (at level 0, x at level 99 as ident). -\end{coq_example*} - -This is so because the grammar also contains rules starting with -{\tt \{} and followed by a term, such as the rule for the notation - {\tt \{ A \} + \{ B \}} for the constant {\tt - sumbool}~(see Section~\ref{sumbool}). - -Then, in the rule, {\tt x ident} is replaced by {\tt x at level 99 as - ident} meaning that {\tt x} is parsed as a term at level 99 (as done -in the notation for {\tt sumbool}), but that this term has actually to -be an identifier. - -The notation {\tt \{ x | P \}} is already defined in the standard -library with the {\tt as ident} modifier. We cannot redefine it but -one can define an alternative notation, say {\tt \{ p such that P }\}, -using instead {\tt as pattern}. - -% Note, this conflicts with the default rule in the standard library, so -% we don't show the -\begin{coq_example*} -Notation "{ p 'such' 'that' P }" := (sig (fun p => P)) - (at level 0, p at level 99 as pattern). -\end{coq_example*} - -Then, the following works: -\begin{coq_example} -Check {(x,y) such that x+y=0}. -\end{coq_example} - -To enforce that the pattern should not be used for printing when it -is just an identifier, one could have said {\tt p at level - 99 as strict pattern}. - -Note also that in the absence of a {\tt as ident}, {\tt as strict - pattern} or {\tt as pattern} modifiers, the default is to consider -subexpressions occurring in binding position and parsed as terms to be -{\tt as ident}. - -\subsubsection{Binders not bound in the notation} -\label{NotationsWithBinders} - -We can also have binders in the right-hand side of a notation which -are not themselves bound in the notation. In this case, the binders -are considered up to renaming of the internal binder. E.g., for the -notation - -\begin{coq_example*} -Notation "'exists_different' n" := (exists p:nat, p<>n) (at level 200). -\end{coq_example*} -the next command fails because {\tt p} does not bind in -the instance of {\tt n}. -\begin{coq_eval} -Set Printing Depth 50. -\end{coq_eval} -% (********** The following produces **********) -% (**** The reference p was not found in the current environment ********) -\begin{coq_example} -Fail Check (exists_different p). -\end{coq_example} - -\subsection{Notations with recursive patterns} -\label{RecursiveNotations} - -A mechanism is provided for declaring elementary notations with -recursive patterns. The basic example is: - -\begin{coq_example*} -Notation "[ x ; .. ; y ]" := (cons x .. (cons y nil) ..). -\end{coq_example*} - -On the right-hand side, an extra construction of the form {\tt ..} $t$ -{\tt ..} can be used. Notice that {\tt ..} is part of the {\Coq} -syntax and it must not be confused with the three-dots notation -$\ldots$ used in this manual to denote a sequence of arbitrary size. - -On the left-hand side, the part ``$x$ $s$ {\tt ..} $s$ $y$'' of the -notation parses any number of time (but at least one time) a sequence -of expressions separated by the sequence of tokens $s$ (in the -example, $s$ is just ``{\tt ;}''). - -The right-hand side must contain a subterm of the form either -$\phi(x,${\tt ..} $\phi(y,t)$ {\tt ..}$)$ or $\phi(y,${\tt ..} -$\phi(x,t)$ {\tt ..}$)$ where $\phi([~]_E,[~]_I)$, called the {\em - iterator} of the recursive notation is an arbitrary expression with -distinguished placeholders and -where $t$ is called the {\tt terminating expression} of the recursive -notation. In the example, we choose the name s$x$ and $y$ but in -practice they can of course be chosen arbitrarily. Note that the -placeholder $[~]_I$ has to occur only once but the $[~]_E$ can occur -several times. - -Parsing the notation produces a list of expressions which are used to -fill the first placeholder of the iterating pattern which itself is -repeatedly nested as many times as the length of the list, the second -placeholder being the nesting point. In the innermost occurrence of the -nested iterating pattern, the second placeholder is finally filled with the -terminating expression. - -In the example above, the iterator $\phi([~]_E,[~]_I)$ is {\tt cons - $[~]_E$ $[~]_I$} and the terminating expression is {\tt nil}. Here are -other examples: -\begin{coq_example*} -Notation "( x , y , .. , z )" := (pair .. (pair x y) .. z) (at level 0). -Notation "[| t * ( x , y , .. , z ) ; ( a , b , .. , c ) * u |]" := - (pair (pair .. (pair (pair t x) (pair t y)) .. (pair t z)) - (pair .. (pair (pair a u) (pair b u)) .. (pair c u))) - (t at level 39). -\end{coq_example*} - -Recursive patterns can occur several times on the right-hand side. -Here is an example: - -\begin{coq_example*} -Notation "[> a , .. , b <]" := - (cons a .. (cons b nil) .., cons b .. (cons a nil) ..). -\end{coq_example*} - -Notations with recursive patterns can be reserved like standard -notations, they can also be declared within interpretation scopes (see -section \ref{scopes}). - -\subsection{Notations with recursive patterns involving binders} -\label{RecursiveNotationsWithBinders} - -Recursive notations can also be used with binders. The basic example is: - -\begin{coq_example*} -Notation "'exists' x .. y , p" := - (ex (fun x => .. (ex (fun y => p)) ..)) - (at level 200, x binder, y binder, right associativity). -\end{coq_example*} - -The principle is the same as in Section~\ref{RecursiveNotations} -except that in the iterator $\phi([~]_E,[~]_I)$, the placeholder $[~]_E$ can -also occur in position of the binding variable of a {\tt - fun} or a {\tt forall}. - -To specify that the part ``$x$ {\tt ..} $y$'' of the notation -parses a sequence of binders, $x$ and $y$ must be marked as {\tt - binder} in the list of modifiers of the notation. The -binders of the parsed sequence are used to fill the occurrences of the first -placeholder of the iterating pattern which is repeatedly nested as many times -as the number of binders generated. If ever the generalization -operator {\tt `} (see Section~\ref{implicit-generalization}) is used -in the binding list, the added binders are taken into account too. - -Binders parsing exist in two flavors. If $x$ and $y$ are marked as -{\tt binder}, then a sequence such as {\tt a b c : T} will be accepted -and interpreted as the sequence of binders {\tt (a:T) (b:T) - (c:T)}. For instance, in the notation above, the syntax {\tt exists - a b : nat, a = b} is valid. - -The variables $x$ and $y$ can also be marked as {\tt closed binder} in -which case only well-bracketed binders of the form {\tt (a b c:T)} or -{\tt \{a b c:T\}} etc. are accepted. - -With closed binders, the recursive sequence in the left-hand side can -be of the more general form $x$ $s$ {\tt ..} $s$ $y$ where $s$ is an -arbitrary sequence of tokens. With open binders though, $s$ has to be -empty. Here is an example of recursive notation with closed binders: - -\begin{coq_example*} -Notation "'mylet' f x .. y := t 'in' u":= - (let f := fun x => .. (fun y => t) .. in u) - (at level 200, x closed binder, y closed binder, right associativity). -\end{coq_example*} - -A recursive pattern for binders can be used in position of a recursive -pattern for terms. Here is an example: - -\begin{coq_example*} -Notation "'FUNAPP' x .. y , f" := - (fun x => .. (fun y => (.. (f x) ..) y ) ..) - (at level 200, x binder, y binder, right associativity). -\end{coq_example*} - -If an occurrence of the $[~]_E$ is not in position of a binding -variable but of a term, it is the name used in the binding which is -used. Here is an example: - -\begin{coq_example*} -Notation "'exists_non_null' x .. y , P" := - (ex (fun x => x <> 0 /\ .. (ex (fun y => y <> 0 /\ P)) ..)) - (at level 200, x binder). -\end{coq_example*} - -\subsection{Predefined entries} - -By default, sub-expressions are parsed as terms and the corresponding -grammar entry is called {\tt constr}. However, one may sometimes want -to restrict the syntax of terms in a notation. For instance, the -following notation will accept to parse only global reference in -position of {\tt x}: - -\begin{coq_example*} -Notation "'apply' f a1 .. an" := (.. (f a1) .. an) - (at level 10, f global, a1, an at level 9). -\end{coq_example*} - -In addition to {\tt global}, one can restrict the syntax of a -sub-expression by using the entry names {\tt ident} or {\tt pattern} -already seen in Section~\ref{NotationsWithBinders}, even when the -corresponding expression is not used as a binder in the right-hand -side. E.g.: - -\begin{coq_example*} -Notation "'apply_id' f a1 .. an" := (.. (f a1) .. an) - (at level 10, f ident, a1, an at level 9). -\end{coq_example*} - -\subsection{Summary} - -\paragraph{Syntax of notations} - -The different syntactic variants of the command \texttt{Notation} are -given on Figure~\ref{notation-syntax}. The optional {\tt :{\scope}} is -described in the Section~\ref{scopes}. - -\Rem No typing of the denoted expression is performed at definition -time. Type-checking is done only at the time of use of the notation. - -\Rem Many examples of {\tt Notation} may be found in the files -composing the initial state of {\Coq} (see directory {\tt -\$COQLIB/theories/Init}). - -\Rem The notation \verb="{ x }"= has a special status in such a way -that complex notations of the form \verb="x + { y }"= or -\verb="x * { y }"= can be nested with correct precedences. Especially, -every notation involving a pattern of the form \verb="{ x }"= is -parsed as a notation where the pattern \verb="{ x }"= has been simply -replaced by \verb="x"= and the curly brackets are parsed separately. -E.g. \verb="y + { z }"= is not parsed as a term of the given form but -as a term of the form \verb="y + z"= where \verb=z= has been parsed -using the rule parsing \verb="{ x }"=. Especially, level and -precedences for a rule including patterns of the form \verb="{ x }"= -are relative not to the textual notation but to the notation where the -curly brackets have been removed (e.g. the level and the associativity -given to some notation, say \verb="{ y } & { z }"= in fact applies to -the underlying \verb="{ x }"=-free rule which is \verb="y & z"=). - -\paragraph{Persistence of notations} - -Notations do not survive the end of sections. They survive modules -unless the command {\tt Local Notation} is used instead of {\tt -Notation}. - -\section[Interpretation scopes]{Interpretation scopes\index{Interpretation scopes} -\label{scopes}} -% Introduction - -An {\em interpretation scope} is a set of notations for terms with -their interpretation. Interpretation scopes provide a weak, -purely syntactical form of notation overloading: the same notation, for -instance the infix symbol \verb=+=, can be used to denote distinct -definitions of the additive operator. Depending on which interpretation -scope is currently open, the interpretation is different. -Interpretation scopes can include an interpretation for -numerals and strings. However, this is only made possible at the -{\ocaml} level. - -See Figure \ref{notation-syntax} for the syntax of notations including -the possibility to declare them in a given scope. Here is a typical -example which declares the notation for conjunction in the scope {\tt -type\_scope}. - -\begin{verbatim} -Notation "A /\ B" := (and A B) : type_scope. -\end{verbatim} - -\Rem A notation not defined in a scope is called a {\em lonely} notation. - -\subsection{Global interpretation rules for notations} - -At any time, the interpretation of a notation for term is done within -a {\em stack} of interpretation scopes and lonely notations. In case a -notation has several interpretations, the actual interpretation is the -one defined by (or in) the more recently declared (or open) lonely -notation (or interpretation scope) which defines this notation. -Typically if a given notation is defined in some scope {\scope} but -has also an interpretation not assigned to a scope, then, if {\scope} -is open before the lonely interpretation is declared, then the lonely -interpretation is used (and this is the case even if the -interpretation of the notation in {\scope} is given after the lonely -interpretation: otherwise said, only the order of lonely -interpretations and opening of scopes matters, and not the declaration -of interpretations within a scope). - -The initial state of {\Coq} declares three interpretation scopes and -no lonely notations. These scopes, in opening order, are {\tt -core\_scope}, {\tt type\_scope} and {\tt nat\_scope}. - -The command to add a scope to the interpretation scope stack is -\comindex{Open Scope} -\comindex{Close Scope} -\begin{quote} -{\tt Open Scope} {\scope}. -\end{quote} -It is also possible to remove a scope from the interpretation scope -stack by using the command -\begin{quote} -{\tt Close Scope} {\scope}. -\end{quote} -Notice that this command does not only cancel the last {\tt Open Scope -{\scope}} but all the invocations of it. - -\Rem {\tt Open Scope} and {\tt Close Scope} do not survive the end of -sections where they occur. When defined outside of a section, they are -exported to the modules that import the module where they occur. - -\begin{Variants} - -\item {\tt Local Open Scope} {\scope}. - -\item {\tt Local Close Scope} {\scope}. - -These variants are not exported to the modules that import the module -where they occur, even if outside a section. - -\item {\tt Global Open Scope} {\scope}. - -\item {\tt Global Close Scope} {\scope}. - -These variants survive sections. They behave as if {\tt Global} were -absent when not inside a section. - -\end{Variants} - -\subsection{Local interpretation rules for notations} - -In addition to the global rules of interpretation of notations, some -ways to change the interpretation of subterms are available. - -\subsubsection{Local opening of an interpretation scope -\label{scopechange} -\index{\%} -\comindex{Delimit Scope} -\comindex{Undelimit Scope}} - -It is possible to locally extend the interpretation scope stack using -the syntax ({\term})\%{\delimkey} (or simply {\term}\%{\delimkey} -for atomic terms), where {\delimkey} is a special identifier called -{\em delimiting key} and bound to a given scope. - -In such a situation, the term {\term}, and all its subterms, are -interpreted in the scope stack extended with the scope bound to -{\delimkey}. - -To bind a delimiting key to a scope, use the command - -\begin{quote} -\texttt{Delimit Scope} {\scope} \texttt{with} {\ident} -\end{quote} - -To remove a delimiting key of a scope, use the command - -\begin{quote} -\texttt{Undelimit Scope} {\scope} -\end{quote} - -\subsubsection{Binding arguments of a constant to an interpretation scope -\comindex{Arguments}} - -It is possible to set in advance that some arguments of a given -constant have to be interpreted in a given scope. The command is -\begin{quote} -{\tt Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} -\end{quote} -where the list is a prefix of the list of the arguments of {\qualid} eventually -annotated with their {\scope}. Grouping round parentheses can be used to -decorate multiple arguments with the same scope. {\scope} can be either a scope -name or its delimiting key. For example the following command puts the first two -arguments of {\tt plus\_fct} in the scope delimited by the key {\tt F} ({\tt - Rfun\_scope}) and the last argument in the scope delimited by the key {\tt R} -({\tt R\_scope}). - -\begin{coq_example*} -Arguments plus_fct (f1 f2)%F x%R. -\end{coq_example*} - -The {\tt Arguments} command accepts scopes decoration to all grouping -parentheses. In the following example arguments {\tt A} and {\tt B} -are marked as maximally inserted implicit arguments and are -put into the {\tt type\_scope} scope. - -\begin{coq_example*} -Arguments respectful {A B}%type (R R')%signature _ _. -\end{coq_example*} - -When interpreting a term, if some of the arguments of {\qualid} are -built from a notation, then this notation is interpreted in the scope -stack extended by the scope bound (if any) to this argument. The -effect of the scope is limited to the argument itself. It does not propagate -to subterms but the subterms that, after interpretation of the -notation, turn to be themselves arguments of a reference are -interpreted accordingly to the arguments scopes bound to this reference. - -Arguments scopes can be cleared with the following command: - -\begin{quote} -{\tt Arguments {\qualid} : clear scopes} -\end{quote} - -Extra argument scopes, to be used in case of coercion to Funclass -(see Chapter~\ref{Coercions-full}) or with a computed type, -can be given with - -\begin{quote} -{\tt Arguments} {\qualid} \nelist{\textunderscore {\tt \%} \scope}{} {\tt : extra scopes.} -\end{quote} - -\begin{Variants} -\item {\tt Global Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} - -This behaves like {\tt Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} -but survives when a section is closed instead -of stopping working at section closing. Without the {\tt Global} modifier, -the effect of the command stops when the section it belongs to ends. - -\item {\tt Local Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} - -This behaves like {\tt Arguments} {\qualid} \nelist{\name {\tt \%}\scope}{} -but does not survive modules and files. -Without the {\tt Local} modifier, the effect of the command is -visible from within other modules or files. - -\end{Variants} - -\SeeAlso The command to show the scopes bound to the arguments of a -function is described in Section~\ref{About}. - -\subsubsection{Binding types of arguments to an interpretation scope} - -When an interpretation scope is naturally associated to a type -(e.g. the scope of operations on the natural numbers), it may be -convenient to bind it to this type. When a scope {\scope} is bound to -a type {\type}, any new function defined later on gets its arguments -of type {\type} interpreted by default in scope {\scope} (this default -behavior can however be overwritten by explicitly using the command -{\tt Arguments}). - -Whether the argument of a function has some type {\type} is determined -statically. For instance, if {\tt f} is a polymorphic function of type -{\tt forall X:Type, X -> X} and type {\tt t} is bound to a scope -{\scope}, then {\tt a} of type {\tt t} in {\tt f~t~a} is not -recognized as an argument to be interpreted in scope {\scope}. - -\comindex{Bind Scope} -\label{bindscope} -More generally, any coercion {\class} (see Chapter~\ref{Coercions-full}) can be -bound to an interpretation scope. The command to do it is -\begin{quote} -{\tt Bind Scope} {\scope} \texttt{with} {\class} -\end{quote} - -\Example -\begin{coq_example} -Parameter U : Set. -Bind Scope U_scope with U. -Parameter Uplus : U -> U -> U. -Parameter P : forall T:Set, T -> U -> Prop. -Parameter f : forall T:Set, T -> U. -Infix "+" := Uplus : U_scope. -Unset Printing Notations. -Open Scope nat_scope. (* Define + on the nat as the default for + *) -Check (fun x y1 y2 z t => P _ (x + t) ((f _ (y1 + y2) + z))). -\end{coq_example} - -\Rem The scopes {\tt type\_scope} and {\tt function\_scope} also have a local effect on -interpretation. See the next section. - -\SeeAlso The command to show the scopes bound to the arguments of a -function is described in Section~\ref{About}. - -\Rem In notations, the subterms matching the identifiers of the -notations are interpreted in the scope in which the identifiers -occurred at the time of the declaration of the notation. Here is an -example: - -\begin{coq_example} -Parameter g : bool -> bool. -Notation "@@" := true (only parsing) : bool_scope. -Notation "@@" := false (only parsing): mybool_scope. - -(* Defining a notation while the argument of g is bound to bool_scope *) -Bind Scope bool_scope with bool. -Notation "# x #" := (g x) (at level 40). -Check # @@ #. -(* Rebinding the argument of g to mybool_scope has no effect on the notation *) -Arguments g _%mybool_scope. -Check # @@ #. -(* But we can force the scope *) -Delimit Scope mybool_scope with mybool. -Check # @@%mybool #. -\end{coq_example} - -\subsection[The {\tt type\_scope} interpretation scope]{The {\tt type\_scope} interpretation scope\index{type\_scope@\texttt{type\_scope}}} - -The scope {\tt type\_scope} has a special status. It is a primitive -interpretation scope which is temporarily activated each time a -subterm of an expression is expected to be a type. It is delimited by -the key {\tt type}, and bound to the coercion class {\tt Sortclass}. It is also -used in certain situations where an expression is statically known to -be a type, including the conclusion and the type of hypotheses within -an {\tt Ltac} goal match (see Section~\ref{ltac-match-goal}) -the statement of a theorem, the type of -a definition, the type of a binder, the domain and codomain of -implication, the codomain of products, and more generally any type -argument of a declared or defined constant. - -\subsection[The {\tt function\_scope} interpretation scope]{The {\tt function\_scope} interpretation scope\index{function\_scope@\texttt{function\_scope}}} - -The scope {\tt function\_scope} also has a special status. -It is temporarily activated each time the argument of a global reference is -recognized to be a {\tt Funclass instance}, i.e., of type {\tt forall x:A, B} or {\tt A -> B}. - -\subsection{Interpretation scopes used in the standard library of {\Coq}} - -We give an overview of the scopes used in the standard library of -{\Coq}. For a complete list of notations in each scope, use the -commands {\tt Print Scopes} or {\tt Print Scope {\scope}}. - -\subsubsection{\tt type\_scope} - -This scope includes infix {\tt *} for product types and infix {\tt +} for -sum types. It is delimited by key {\tt type}, and bound to the coercion class -{\tt Sortclass}, as described at \ref{bindscope}. - -\subsubsection{\tt nat\_scope} - -This scope includes the standard arithmetical operators and relations on -type {\tt nat}. Positive numerals in this scope are mapped to their -canonical representent built from {\tt O} and {\tt S}. The scope is -delimited by key {\tt nat}, and bound to the type {\tt nat} (see \ref{bindscope}). - -\subsubsection{\tt N\_scope} - -This scope includes the standard arithmetical operators and relations on -type {\tt N} (binary natural numbers). It is delimited by key {\tt N} -and comes with an interpretation for numerals as closed term of type {\tt N}. - -\subsubsection{\tt Z\_scope} - -This scope includes the standard arithmetical operators and relations on -type {\tt Z} (binary integer numbers). It is delimited by key {\tt Z} -and comes with an interpretation for numerals as closed term of type {\tt Z}. - -\subsubsection{\tt positive\_scope} - -This scope includes the standard arithmetical operators and relations on -type {\tt positive} (binary strictly positive numbers). It is -delimited by key {\tt positive} and comes with an interpretation for -numerals as closed term of type {\tt positive}. - -\subsubsection{\tt Q\_scope} - -This scope includes the standard arithmetical operators and relations on -type {\tt Q} (rational numbers defined as fractions of an integer and -a strictly positive integer modulo the equality of the -numerator-denominator cross-product). As for numerals, only $0$ and -$1$ have an interpretation in scope {\tt Q\_scope} (their -interpretations are $\frac{0}{1}$ and $\frac{1}{1}$ respectively). - -\subsubsection{\tt Qc\_scope} - -This scope includes the standard arithmetical operators and relations on the -type {\tt Qc} of rational numbers defined as the type of irreducible -fractions of an integer and a strictly positive integer. - -\subsubsection{\tt real\_scope} - -This scope includes the standard arithmetical operators and relations on -type {\tt R} (axiomatic real numbers). It is delimited by key {\tt R} -and comes with an interpretation for numerals using the {\tt IZR} -morphism from binary integer numbers to {\tt R}. - -\subsubsection{\tt bool\_scope} - -This scope includes notations for the boolean operators. It is -delimited by key {\tt bool}, and bound to the type {\tt bool} (see \ref{bindscope}). - -\subsubsection{\tt list\_scope} - -This scope includes notations for the list operators. It is -delimited by key {\tt list}, and bound to the type {\tt list} (see \ref{bindscope}). - -\subsubsection{\tt function\_scope} - -This scope is delimited by the key {\tt function}, and bound to the coercion class {\tt Funclass}, -as described at \ref{bindscope}. - -\subsubsection{\tt core\_scope} - -This scope includes the notation for pairs. It is delimited by key {\tt core}. - -\subsubsection{\tt string\_scope} - -This scope includes notation for strings as elements of the type {\tt -string}. Special characters and escaping follow {\Coq} conventions -on strings (see Section~\ref{strings}). Especially, there is no -convention to visualize non printable characters of a string. The -file {\tt String.v} shows an example that contains quotes, a newline -and a beep (i.e. the ASCII character of code 7). - -\subsubsection{\tt char\_scope} - -This scope includes interpretation for all strings of the form -\verb!"!$c$\verb!"! where $c$ is an ASCII character, or of the form -\verb!"!$nnn$\verb!"! where $nnn$ is a three-digits number (possibly -with leading 0's), or of the form \verb!""""!. Their respective -denotations are the ASCII code of $c$, the decimal ASCII code $nnn$, -or the ASCII code of the character \verb!"! (i.e. the ASCII code -34), all of them being represented in the type {\tt ascii}. - -\subsection{Displaying informations about scopes} - -\subsubsection{\tt Print Visibility\comindex{Print Visibility}} - -This displays the current stack of notations in scopes and lonely -notations that is used to interpret a notation. The top of the stack -is displayed last. Notations in scopes whose interpretation is hidden -by the same notation in a more recently open scope are not -displayed. Hence each notation is displayed only once. - -\variant - -{\tt Print Visibility {\scope}}\\ - -This displays the current stack of notations in scopes and lonely -notations assuming that {\scope} is pushed on top of the stack. This -is useful to know how a subterm locally occurring in the scope of -{\scope} is interpreted. - -\subsubsection{\tt Print Scope {\scope}\comindex{Print Scope}} - -This displays all the notations defined in interpretation scope -{\scope}. It also displays the delimiting key if any and the class to -which the scope is bound, if any. - -\subsubsection{\tt Print Scopes\comindex{Print Scopes}} - -This displays all the notations, delimiting keys and corresponding -class of all the existing interpretation scopes. -It also displays the lonely notations. - -\section[Abbreviations]{Abbreviations\index{Abbreviations} -\label{Abbreviations} -\comindex{Notation}} - -An {\em abbreviation} is a name, possibly applied to arguments, that -denotes a (presumably) more complex expression. Here are examples: - -\begin{coq_eval} -Require Import List. -Require Import Relations. -Set Printing Notations. -\end{coq_eval} -\begin{coq_example} -Notation Nlist := (list nat). -Check 1 :: 2 :: 3 :: nil. -Notation reflexive R := (forall x, R x x). -Check forall A:Prop, A <-> A. -Check reflexive iff. -\end{coq_example} - -An abbreviation expects no precedence nor associativity, since it -is parsed as usual application. Abbreviations are used as -much as possible by the {\Coq} printers unless the modifier -\verb=(only parsing)= is given. - -Abbreviations are bound to an absolute name as an ordinary -definition is, and they can be referred by qualified names too. - -Abbreviations are syntactic in the sense that they are bound to -expressions which are not typed at the time of the definition of the -abbreviation but at the time it is used. Especially, abbreviations can -be bound to terms with holes (i.e. with ``\_''). The general syntax -for abbreviations is -\begin{quote} -\zeroone{{\tt Local}} \texttt{Notation} {\ident} \sequence{\ident}{} \texttt{:=} {\term} - \zeroone{{\tt (only parsing)}}~\verb=.= -\end{quote} - -\Example -\begin{coq_eval} -Set Strict Implicit. -Reset Initial. -\end{coq_eval} -\begin{coq_example} -Definition explicit_id (A:Set) (a:A) := a. -Notation id := (explicit_id _). -Check (id 0). -\end{coq_example} - -Abbreviations do not survive the end of sections. No typing of the denoted -expression is performed at definition time. Type-checking is done only -at the time of use of the abbreviation. - -%\Rem \index{Syntactic Definition} % -%Abbreviations are similar to the {\em syntactic -%definitions} available in versions of {\Coq} prior to version 8.0, -%except that abbreviations are used for printing (unless the modifier -%\verb=(only parsing)= is given) while syntactic definitions were not. - -\section{Tactic Notations -\label{Tactic-Notation} -\comindex{Tactic Notation}} - -Tactic notations allow to customize the syntax of the tactics of the -tactic language. -%% \footnote{Tactic notations are just a simplification of -%% the {\tt Grammar tactic simple\_tactic} command that existed in -%% versions prior to version 8.0.} -Tactic notations obey the following syntax: -\medskip - -\noindent -\begin{tabular}{lcl} -{\sentence} & ::= & \zeroone{\tt Local} \texttt{Tactic Notation} \zeroone{\taclevel} \sequence{\proditem}{} \\ -& & \texttt{:= {\tac} .}\\ -{\proditem} & ::= & {\str} $|$ {\tacargtype}{\tt ({\ident})} \\ -{\taclevel} & ::= & {\tt (at level} {\naturalnumber}{\tt )} \\ -{\tacargtype}\!\! & ::= & -%{\tt preident} $|$ -{\tt ident} $|$ -{\tt simple\_intropattern} $|$ -{\tt reference} \\ & $|$ & -{\tt hyp} $|$ -{\tt hyp\_list} $|$ -{\tt ne\_hyp\_list} \\ & $|$ & -% {\tt quantified\_hypothesis} \\ & $|$ & -{\tt constr} $|$ {\tt uconstr} $|$ -{\tt constr\_list} $|$ -{\tt ne\_constr\_list} \\ & $|$ & -%{\tt castedopenconstr} $|$ -{\tt integer} $|$ -{\tt integer\_list} $|$ -{\tt ne\_integer\_list} \\ & $|$ & -{\tt int\_or\_var} $|$ -{\tt int\_or\_var\_list} $|$ -{\tt ne\_int\_or\_var\_list} \\ & $|$ & -{\tt tactic} $|$ {\tt tactic$n$} \qquad\mbox{(for $0\leq n\leq 5$)} - -\end{tabular} -\medskip - -A tactic notation {\tt Tactic Notation {\taclevel} -{\sequence{\proditem}{}} := {\tac}} extends the parser and -pretty-printer of tactics with a new rule made of the list of -production items. It then evaluates into the tactic expression -{\tac}. For simple tactics, it is recommended to use a terminal -symbol, i.e. a {\str}, for the first production item. The tactic -level indicates the parsing precedence of the tactic notation. This -information is particularly relevant for notations of tacticals. -Levels 0 to 5 are available (default is 0). -To know the parsing precedences of the -existing tacticals, use the command -\comindex{Print Grammar tactic} - {\tt Print Grammar tactic.} - -Each type of tactic argument has a specific semantic regarding how it -is parsed and how it is interpreted. The semantic is described in the -following table. The last command gives examples of tactics which -use the corresponding kind of argument. - -\medskip -\noindent -\begin{tabular}{l|l|l|l} -Tactic argument type & parsed as & interpreted as & as in tactic \\ -\hline & & & \\ -{\tt\small ident} & identifier & a user-given name & {\tt intro} \\ -{\tt\small simple\_intropattern} & intro\_pattern & an intro\_pattern & {\tt intros}\\ -{\tt\small hyp} & identifier & an hypothesis defined in context & {\tt clear}\\ -%% quantified_hypothesis actually not supported -%%{\tt\small quantified\_hypothesis} & identifier or integer & a named or non dep. hyp. of the goal & {\tt intros until}\\ -{\tt\small reference} & qualified identifier & a global reference of term & {\tt unfold}\\ -{\tt\small constr} & term & a term & {\tt exact} \\ -{\tt\small uconstr} & term & an untyped term & {\tt refine} \\ -%% castedopenconstr actually not supported -%%{\tt\small castedopenconstr} & term & a term with its sign. of exist. var. & {\tt refine}\\ -{\tt\small integer} & integer & an integer & \\ -{\tt\small int\_or\_var} & identifier or integer & an integer & {\tt do} \\ -{\tt\small tactic} & tactic at level 5 & a tactic & \\ -{\tt\small tactic$n$} & tactic at level $n$ & a tactic & \\ -{\tt\small {\nterm{entry}}\_list} & list of {\nterm{entry}} & a list of how {\nterm{entry}} is interpreted & \\ -{\tt\small ne\_{\nterm{entry}}\_list} & non-empty list of {\nterm{entry}} & a list of how {\nterm{entry}} is interpreted& \\ -\end{tabular} - -\Rem In order to be bound in tactic definitions, each syntactic entry -for argument type must include the case of simple {\ltac} identifier -as part of what it parses. This is naturally the case for {\tt ident}, -{\tt simple\_intropattern}, {\tt reference}, {\tt constr}, ... but not -for {\tt integer}. This is the reason for introducing a special entry -{\tt int\_or\_var} which evaluates to integers only but which -syntactically includes identifiers in order to be usable in tactic -definitions. - -\Rem The {\tt {\nterm{entry}}\_list} and {\tt ne\_{\nterm{entry}}\_list} -entries can be used in primitive tactics or in other notations at -places where a list of the underlying entry can be used: {\nterm{entry}} is -either {\tt\small constr}, {\tt\small hyp}, {\tt\small integer} or -{\tt\small int\_or\_var}. - -Tactic notations do not survive the end of sections. They survive -modules unless the command {\tt Local Tactic Notation} is used instead -of {\tt Tactic Notation}. - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-tac.tex b/doc/refman/RefMan-tac.tex deleted file mode 100644 index 2597e3c37d..0000000000 --- a/doc/refman/RefMan-tac.tex +++ /dev/null @@ -1,5397 +0,0 @@ -% TODO: unify the use of \form and \type to mean a type -% or use \form specifically for a type of type Prop -\chapter{Tactics -\index{Tactics} -\label{Tactics}} -%HEVEA\cutname{tactics.html} - -A deduction rule is a link between some (unique) formula, that we call -the {\em conclusion} and (several) formulas that we call the {\em -premises}. A deduction rule can be read in two ways. The first -one says: {\it ``if I know this and this then I can deduce -this''}. For instance, if I have a proof of $A$ and a proof of $B$ -then I have a proof of $A \land B$. This is forward reasoning from -premises to conclusion. The other way says: {\it ``to prove this I -have to prove this and this''}. For instance, to prove $A \land B$, I -have to prove $A$ and I have to prove $B$. This is backward reasoning -from conclusion to premises. We say that the conclusion -is the {\em goal}\index{goal} to prove and premises are the {\em -subgoals}\index{subgoal}. The tactics implement {\em backward -reasoning}. When applied to a goal, a tactic replaces this goal with -the subgoals it generates. We say that a tactic reduces a goal to its -subgoal(s). - -Each (sub)goal is denoted with a number. The current goal is numbered -1. By default, a tactic is applied to the current goal, but one can -address a particular goal in the list by writing {\sl n:\tac} which -means {\it ``apply tactic {\tac} to goal number {\sl n}''}. -We can show the list of subgoals by typing {\tt Show} (see -Section~\ref{Show}). - -Since not every rule applies to a given statement, every tactic cannot be -used to reduce any goal. In other words, before applying a tactic to a -given goal, the system checks that some {\em preconditions} are -satisfied. If it is not the case, the tactic raises an error message. - -Tactics are built from atomic tactics and tactic expressions (which -extends the folklore notion of tactical) to combine those atomic -tactics. This chapter is devoted to atomic tactics. The tactic -language will be described in Chapter~\ref{TacticLanguage}. - -\section{Invocation of tactics -\label{tactic-syntax} -\index{tactic@{\tac}}} - -A tactic is applied as an ordinary command. It may be preceded by a -goal selector (see Section \ref{ltac:selector}). -If no selector is specified, the default -selector (see Section \ref{default-selector}) is used. - -\newcommand{\toplevelselector}{\nterm{toplevel\_selector}} -\begin{tabular}{lcl} -{\commandtac} & ::= & {\toplevelselector} {\tt :} {\tac} {\tt .}\\ - & $|$ & {\tac} {\tt .} -\end{tabular} -\subsection[\tt Set Default Goal Selector ``\toplevelselector''.] - {\tt Set Default Goal Selector ``\toplevelselector''. - \optindex{Default Goal Selector} - \label{default-selector}} -After using this command, the default selector -- used when no selector -is specified when applying a tactic -- is set to the chosen value. The -initial value is $1$, hence the tactics are, by default, applied to -the first goal. Using {\tt Set Default Goal Selector ``all''} will -make is so that tactics are, by default, applied to every goal -simultaneously. Then, to apply a tactic {\tt tac} to the first goal -only, you can write {\tt 1:tac}. Although more selectors are available, -only {\tt ``all''} or a single natural number are valid default -goal selectors. - -\subsection[\tt Test Default Goal Selector.] - {\tt Test Default Goal Selector.} -This command displays the current default selector. - -\subsection{Bindings list -\index{Binding list} -\label{Binding-list}} - -Tactics that take a term as argument may also support a bindings list, so -as to instantiate some parameters of the term by name or position. -The general form of a term equipped with a bindings list is {\tt -{\term} with {\bindinglist}} where {\bindinglist} may be of two -different forms: - -\begin{itemize} -\item In a bindings list of the form {\tt (\vref$_1$ := \term$_1$) - \dots\ (\vref$_n$ := \term$_n$)}, {\vref} is either an {\ident} or a - {\num}. The references are determined according to the type of - {\term}. If \vref$_i$ is an identifier, this identifier has to be - bound in the type of {\term} and the binding provides the tactic - with an instance for the parameter of this name. If \vref$_i$ is - some number $n$, this number denotes the $n$-th non dependent - premise of the {\term}, as determined by the type of {\term}. - - \ErrMsg \errindex{No such binder} - -\item A bindings list can also be a simple list of terms {\tt - \term$_1$ \dots\ \term$_n$}. In that case the references to - which these terms correspond are determined by the tactic. In case - of {\tt induction}, {\tt destruct}, {\tt elim} and {\tt case} (see - Section~\ref{elim}) the terms have to provide instances for all the - dependent products in the type of \term\ while in the case of {\tt - apply}, or of {\tt constructor} and its variants, only instances for - the dependent products that are not bound in the conclusion of the - type are required. - - \ErrMsg \errindex{Not the right number of missing arguments} -\end{itemize} - -\subsection{Occurrences sets and occurrences clauses} -\label{Occurrences_clauses} -\index{Occurrences clauses} - -An occurrences clause is a modifier to some tactics that obeys the -following syntax: - -\begin{tabular}{lcl} -{\occclause} & ::= & {\tt in} {\occgoalset} \\ -{\occgoalset} & ::= & - \zeroonelax{{\ident$_1$} \zeroone{\atoccurrences} {\tt ,} \\ -& & {\dots} {\tt ,}\\ -& & {\ident$_m$} \zeroone{\atoccurrences}}\\ -& & \zeroone{{\tt |-} \zeroone{{\tt *} \zeroone{\atoccurrences}}}\\ -& | & - {\tt *} {\tt |-} \zeroone{{\tt *} \zeroone{\atoccurrences}}\\ -& | & - {\tt *}\\ -{\atoccurrences} & ::= & {\tt at} {\occlist}\\ -{\occlist} & ::= & \zeroone{{\tt -}} {\num$_1$} \dots\ {\num$_n$} -\end{tabular} - -The role of an occurrence clause is to select a set of occurrences of -a {\term} in a goal. In the first case, the {{\ident$_i$} -\zeroone{{\tt at} {\num$_1^i$} \dots\ {\num$_{n_i}^i$}}} parts -indicate that occurrences have to be selected in the hypotheses named -{\ident$_i$}. If no numbers are given for hypothesis {\ident$_i$}, -then all the occurrences of {\term} in the hypothesis are selected. If -numbers are given, they refer to occurrences of {\term} when the term -is printed using option {\tt Set Printing All} (see -Section~\ref{SetPrintingAll}), counting from left to right. In -particular, occurrences of {\term} in implicit arguments (see -Section~\ref{Implicit Arguments}) or coercions (see -Section~\ref{Coercions}) are counted. - -If a minus sign is given between {\tt at} and the list of occurrences, -it negates the condition so that the clause denotes all the occurrences except -the ones explicitly mentioned after the minus sign. - -As an exception to the left-to-right order, the occurrences in the -{\tt return} subexpression of a {\tt match} are considered {\em -before} the occurrences in the matched term. - -In the second case, the {\tt *} on the left of {\tt |-} means that -all occurrences of {\term} are selected in every hypothesis. - -In the first and second case, if {\tt *} is mentioned on the right of -{\tt |-}, the occurrences of the conclusion of the goal have to be -selected. If some numbers are given, then only the occurrences denoted -by these numbers are selected. In no numbers are given, all -occurrences of {\term} in the goal are selected. - -Finally, the last notation is an abbreviation for {\tt * |- *}. Note -also that {\tt |-} is optional in the first case when no {\tt *} is -given. - -Here are some tactics that understand occurrences clauses: -{\tt set}, {\tt remember}, {\tt induction}, {\tt destruct}. - -\SeeAlso~Sections~\ref{tactic:set}, \ref{Tac-induction}, \ref{SetPrintingAll}. - -\section{Applying theorems} - -\subsection{\tt exact \term} -\tacindex{exact} -\label{exact} - -This tactic applies to any goal. It gives directly the exact proof -term of the goal. Let {\T} be our goal, let {\tt p} be a term of type -{\tt U} then {\tt exact p} succeeds iff {\tt T} and {\tt U} are -convertible (see Section~\ref{conv-rules}). - -\begin{ErrMsgs} -\item \errindex{Not an exact proof} -\end{ErrMsgs} - -\begin{Variants} - \item \texttt{eexact \term}\tacindex{eexact} - - This tactic behaves like \texttt{exact} but is able to handle terms - and goals with meta-variables. - -\end{Variants} - -\subsection{\tt assumption} -\tacindex{assumption} - -This tactic looks in the local context for an -hypothesis which type is equal to the goal. If it is the case, the -subgoal is proved. Otherwise, it fails. - -\begin{ErrMsgs} -\item \errindex{No such assumption} -\end{ErrMsgs} - -\begin{Variants} -\tacindex{eassumption} - \item \texttt{eassumption} - - This tactic behaves like \texttt{assumption} but is able to handle - goals with meta-variables. - -\end{Variants} - -\subsection{\tt refine \term} -\tacindex{refine} -\label{refine} -\label{refine-example} -\index{?@{\texttt{?}}} - -This tactic applies to any goal. It behaves like {\tt exact} with a big -difference: the user can leave some holes (denoted by \texttt{\_} or -{\tt (\_:\type)}) in the term. {\tt refine} will generate as -many subgoals as there are holes in the term. The type of holes must be -either synthesized by the system or declared by an -explicit cast like \verb|(_:nat->Prop)|. Any subgoal that occurs in other -subgoals is automatically shelved, as if calling {\tt shelve\_unifiable} -(see Section~\ref{shelve}). -This low-level tactic can be useful to advanced users. - -\Example - -\begin{coq_example*} -Inductive Option : Set := - | Fail : Option - | Ok : bool -> Option. -\end{coq_example} -\begin{coq_example} -Definition get : forall x:Option, x <> Fail -> bool. -refine - (fun x:Option => - match x return x <> Fail -> bool with - | Fail => _ - | Ok b => fun _ => b - end). -intros; absurd (Fail = Fail); trivial. -\end{coq_example} -\begin{coq_example*} -Defined. -\end{coq_example*} - -\begin{ErrMsgs} -\item \errindex{invalid argument}: - the tactic \texttt{refine} does not know what to do - with the term you gave. -\item \texttt{Refine passed ill-formed term}: the term you gave is not - a valid proof (not easy to debug in general). - This message may also occur in higher-level tactics that call - \texttt{refine} internally. -\item \errindex{Cannot infer a term for this placeholder}: - there is a hole in the term you gave - which type cannot be inferred. Put a cast around it. -\end{ErrMsgs} - -\begin{Variants} -\item {\tt simple refine \term}\tacindex{simple refine} - - This tactic behaves like {\tt refine}, but it does not shelve any - subgoal. It does not perform any beta-reduction either. -\item {\tt notypeclasses refine \term}\tacindex{notypeclasses refine} - - This tactic behaves like {\tt refine} except it performs typechecking - without resolution of typeclasses. - -\item {\tt simple notypeclasses refine \term}\tacindex{simple - notypeclasses refine} - - This tactic behaves like {\tt simple refine} except it performs typechecking - without resolution of typeclasses. -\end{Variants} - -\subsection{\tt apply \term} -\tacindex{apply} -\label{apply} -\label{eapply} - -This tactic applies to any goal. The argument {\term} is a term -well-formed in the local context. The tactic {\tt apply} tries to -match the current goal against the conclusion of the type of {\term}. -If it succeeds, then the tactic returns as many subgoals as the number -of non-dependent premises of the type of {\term}. If the conclusion of -the type of {\term} does not match the goal {\em and} the conclusion -is an inductive type isomorphic to a tuple type, then each component -of the tuple is recursively matched to the goal in the left-to-right -order. - -The tactic {\tt apply} relies on first-order unification with -dependent types unless the conclusion of the type of {\term} is of the -form {\tt ($P$ $t_1$ \dots\ $t_n$)} with $P$ to be instantiated. In -the latter case, the behavior depends on the form of the goal. If the -goal is of the form {\tt (fun $x$ => $Q$)~$u_1$~\ldots~$u_n$} and the -$t_i$ and $u_i$ unifies, then $P$ is taken to be {\tt (fun $x$ => $Q$)}. -Otherwise, {\tt apply} tries to define $P$ by abstracting over -$t_1$~\ldots ~$t_n$ in the goal. See {\tt pattern} in -Section~\ref{pattern} to transform the goal so that it gets the form -{\tt (fun $x$ => $Q$)~$u_1$~\ldots~$u_n$}. - -\begin{ErrMsgs} -\item \errindex{Unable to unify \dots\ with \dots} - - The {\tt apply} - tactic failed to match the conclusion of {\term} and the current goal. - You can help the {\tt apply} tactic by transforming your - goal with the {\tt change} or {\tt pattern} tactics (see - sections~\ref{pattern},~\ref{change}). - -\item \errindex{Unable to find an instance for the variables -{\ident} \dots\ {\ident}} - - This occurs when some instantiations of the premises of {\term} are not - deducible from the unification. This is the case, for instance, when - you want to apply a transitivity property. In this case, you have to - use one of the variants below: - -\end{ErrMsgs} - -\begin{Variants} - -\item{\tt apply {\term} with {\term$_1$} \dots\ {\term$_n$}} - \tacindex{apply \dots\ with} - - Provides {\tt apply} with explicit instantiations for all dependent - premises of the type of {\term} that do not occur in the conclusion - and consequently cannot be found by unification. Notice that - {\term$_1$} \mbox{\dots} {\term$_n$} must be given according to the order - of these dependent premises of the type of {\term}. - - \ErrMsg \errindex{Not the right number of missing arguments} - -\item{\tt apply {\term} with ({\vref$_1$} := {\term$_1$}) \dots\ ({\vref$_n$} - := {\term$_n$})} - - This also provides {\tt apply} with values for instantiating - premises. Here, variables are referred by names and non-dependent - products by increasing numbers (see syntax in Section~\ref{Binding-list}). - -\item {\tt apply \term$_1$ , \mbox{\dots} , \term$_n$} - - This is a shortcut for {\tt apply} {\term$_1$} {\tt ; [ ..~|} - \ldots~{\tt ; [ ..~| {\tt apply} {\term$_n$} ]} \ldots~{\tt ]}, i.e. for the - successive applications of {\term$_{i+1}$} on the last subgoal - generated by {\tt apply} {\term$_i$}, starting from the application - of {\term$_1$}. - -\item {\tt eapply \term}\tacindex{eapply} - - The tactic {\tt eapply} behaves like {\tt apply} but it does not fail - when no instantiations are deducible for some variables in the - premises. Rather, it turns these variables into - existential variables which are variables still to instantiate (see - Section~\ref{evars}). The instantiation is intended to be found - later in the proof. - -\item {\tt simple apply {\term}} \tacindex{simple apply} - - This behaves like {\tt apply} but it reasons modulo conversion only - on subterms that contain no variables to instantiate. For instance, - the following example does not succeed because it would require the - conversion of {\tt id ?foo} and {\tt O}. - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example*} -Definition id (x : nat) := x. -Hypothesis H : forall y, id y = y. -Goal O = O. -\end{coq_example*} -\begin{coq_example} -Fail simple apply H. -\end{coq_example} - - Because it reasons modulo a limited amount of conversion, {\tt - simple apply} fails quicker than {\tt apply} and it is then - well-suited for uses in used-defined tactics that backtrack often. - Moreover, it does not traverse tuples as {\tt apply} does. - -\item \zeroone{{\tt simple}} {\tt apply} {\term$_1$} \zeroone{{\tt with} - {\bindinglist$_1$}} {\tt ,} \ldots {\tt ,} {\term$_n$} \zeroone{{\tt with} - {\bindinglist$_n$}}\\ - \zeroone{{\tt simple}} {\tt eapply} {\term$_1$} \zeroone{{\tt with} - {\bindinglist$_1$}} {\tt ,} \ldots {\tt ,} {\term$_n$} \zeroone{{\tt with} - {\bindinglist$_n$}} - - This summarizes the different syntaxes for {\tt apply} and {\tt eapply}. - -\item {\tt lapply {\term}} \tacindex{lapply} - - This tactic applies to any goal, say {\tt G}. The argument {\term} - has to be well-formed in the current context, its type being - reducible to a non-dependent product {\tt A -> B} with {\tt B} - possibly containing products. Then it generates two subgoals {\tt - B->G} and {\tt A}. Applying {\tt lapply H} (where {\tt H} has type - {\tt A->B} and {\tt B} does not start with a product) does the same - as giving the sequence {\tt cut B. 2:apply H.} where {\tt cut} is - described below. - - \Warning When {\term} contains more than one non - dependent product the tactic {\tt lapply} only takes into account the - first product. - -\end{Variants} - -\Example -Assume we have a transitive relation {\tt R} on {\tt nat}: -\label{eapply-example} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example*} -Variable R : nat -> nat -> Prop. -Hypothesis Rtrans : forall x y z:nat, R x y -> R y z -> R x z. -Variables n m p : nat. -Hypothesis Rnm : R n m. -Hypothesis Rmp : R m p. -\end{coq_example*} - -Consider the goal {\tt (R n p)} provable using the transitivity of -{\tt R}: - -\begin{coq_example*} -Goal R n p. -\end{coq_example*} - -The direct application of {\tt Rtrans} with {\tt apply} fails because -no value for {\tt y} in {\tt Rtrans} is found by {\tt apply}: - -%\begin{coq_eval} -%Set Printing Depth 50. -%(********** The following is not correct and should produce **********) -%(**** Error: generated subgoal (R n ?17) has metavariables in it *****) -%\end{coq_eval} -\begin{coq_example} -Fail apply Rtrans. -\end{coq_example} - -A solution is to apply {\tt (Rtrans n m p)} or {\tt (Rtrans n m)}. - -\begin{coq_example} -apply (Rtrans n m p). -\end{coq_example} - -\begin{coq_eval} -Undo. -\end{coq_eval} - -Note that {\tt n} can be inferred from the goal, so the following would -work too. - -\begin{coq_example*} -apply (Rtrans _ m). -\end{coq_example*} -\begin{coq_eval} -Undo. -\end{coq_eval} - -More elegantly, {\tt apply Rtrans with (y:=m)} allows only mentioning -the unknown {\tt m}: - -\begin{coq_example*} -apply Rtrans with (y := m). -\end{coq_example*} -\begin{coq_eval} -Undo. -\end{coq_eval} - -Another solution is to mention the proof of {\tt (R x y)} in {\tt -Rtrans} \ldots - -\begin{coq_example} -apply Rtrans with (1 := Rnm). -\end{coq_example} -\begin{coq_eval} -Undo. -\end{coq_eval} - -\ldots or the proof of {\tt (R y z)}. - -\begin{coq_example} -apply Rtrans with (2 := Rmp). -\end{coq_example} -\begin{coq_eval} -Undo. -\end{coq_eval} - -On the opposite, one can use {\tt eapply} which postpones the problem -of finding {\tt m}. Then one can apply the hypotheses {\tt Rnm} and {\tt -Rmp}. This instantiates the existential variable and completes the proof. - -\begin{coq_example} -eapply Rtrans. -apply Rnm. -apply Rmp. -\end{coq_example} - -\begin{coq_eval} -Reset R. -\end{coq_eval} - -\noindent {\bf Remark: } When the conclusion of the type of the term -to apply is an inductive type isomorphic to a tuple type and {\em apply} -looks recursively whether a component of the tuple matches the goal, -it excludes components whose statement would result in applying an -universal lemma of the form {\tt forall A, ... -> A}. Excluding this -kind of lemma can be avoided by setting the following option: - -\begin{quote} -\optindex{Universal Lemma Under Conjunction} -{\tt Set Universal Lemma Under Conjunction} -\end{quote} - -This option, which preserves compatibility with versions of {\Coq} -prior to 8.4 is also available for {\tt apply {\term} in {\ident}} -(see Section~\ref{apply-in}). - -\subsection{\tt apply {\term} in {\ident}} -\label{apply-in} -\tacindex{apply \dots\ in} - -This tactic applies to any goal. The argument {\term} is a term -well-formed in the local context and the argument {\ident} is an -hypothesis of the context. The tactic {\tt apply {\term} in {\ident}} -tries to match the conclusion of the type of {\ident} against a -non-dependent premise of the type of {\term}, trying them from right to -left. If it succeeds, the statement of hypothesis {\ident} is -replaced by the conclusion of the type of {\term}. The tactic also -returns as many subgoals as the number of other non-dependent premises -in the type of {\term} and of the non-dependent premises of the type -of {\ident}. If the conclusion of the type of {\term} does not match -the goal {\em and} the conclusion is an inductive type isomorphic to a -tuple type, then the tuple is (recursively) decomposed and the first -component of the tuple of which a non-dependent premise matches the -conclusion of the type of {\ident}. Tuples are decomposed in a -width-first left-to-right order (for instance if the type of {\tt H1} -is a \verb=A <-> B= statement, and the type of {\tt H2} is \verb=A= -then {\tt apply H1 in H2} transforms the type of {\tt H2} into {\tt - B}). The tactic {\tt apply} relies on first-order pattern-matching -with dependent types. - -\begin{ErrMsgs} -\item \errindex{Statement without assumptions} - -This happens if the type of {\term} has no non dependent premise. - -\item \errindex{Unable to apply} - -This happens if the conclusion of {\ident} does not match any of the -non dependent premises of the type of {\term}. -\end{ErrMsgs} - -\begin{Variants} -\item {\tt apply \nelist{\term}{,} in {\ident}} - -This applies each of {\term} in sequence in {\ident}. - -\item {\tt apply \nelist{{\term} with {\bindinglist}}{,} in {\ident}} - -This does the same but uses the bindings in each {\bindinglist} to -instantiate the parameters of the corresponding type of {\term} -(see syntax of bindings in Section~\ref{Binding-list}). - -\item {\tt eapply \nelist{{\term} with {\bindinglist}}{,} in {\ident}} -\tacindex{eapply \dots\ in} - -This works as {\tt apply \nelist{{\term} with {\bindinglist}}{,} in -{\ident}} but turns unresolved bindings into existential variables, if -any, instead of failing. - -\item {\tt apply \nelist{{\term} with {\bindinglist}}{,} in {\ident} as {\intropattern}} - -This works as {\tt apply \nelist{{\term} with {\bindinglist}}{,} in -{\ident}} then applies the {\intropattern} to the hypothesis {\ident}. - -\item {\tt eapply \nelist{{\term} with {\bindinglist}}{,} in {\ident} as {\intropattern}} - -This works as {\tt apply \nelist{{\term} with {\bindinglist}}{,} in {\ident} as {\intropattern}} but using {\tt eapply}. - -\item {\tt simple apply {\term} in {\ident}} -\tacindex{simple apply \dots\ in} -\tacindex{simple eapply \dots\ in} - -This behaves like {\tt apply {\term} in {\ident}} but it reasons -modulo conversion only on subterms that contain no variables to -instantiate. For instance, if {\tt id := fun x:nat => x} and {\tt H : - forall y, id y = y -> True} and {\tt H0 :\ O = O} then {\tt simple - apply H in H0} does not succeed because it would require the -conversion of {\tt id ?1234} and {\tt O} where {\tt ?1234} is a variable to -instantiate. Tactic {\tt simple apply {\term} in {\ident}} does not -either traverse tuples as {\tt apply {\term} in {\ident}} does. - -\item {\tt \zeroone{simple} apply \nelist{{\term} \zeroone{with {\bindinglist}}}{,} in {\ident} \zeroone{as {\intropattern}}}\\ -{\tt \zeroone{simple} eapply \nelist{{\term} \zeroone{with {\bindinglist}}}{,} in {\ident} \zeroone{as {\intropattern}}} - -This summarizes the different syntactic variants of {\tt apply {\term} - in {\ident}} and {\tt eapply {\term} in {\ident}}. -\end{Variants} - -\subsection{\tt constructor \num} -\label{constructor} -\tacindex{constructor} - -This tactic applies to a goal such that its conclusion is -an inductive type (say {\tt I}). The argument {\num} must be less -or equal to the numbers of constructor(s) of {\tt I}. Let {\tt ci} be -the {\tt i}-th constructor of {\tt I}, then {\tt constructor i} is -equivalent to {\tt intros; apply ci}. - -\begin{ErrMsgs} -\item \errindex{Not an inductive product} -\item \errindex{Not enough constructors} -\end{ErrMsgs} - -\begin{Variants} -\item \texttt{constructor} - - This tries \texttt{constructor 1} then \texttt{constructor 2}, - \dots\ , then \texttt{constructor} \textit{n} where \textit{n} is - the number of constructors of the head of the goal. - -\item {\tt constructor \num~with} {\bindinglist} - - Let {\tt ci} be the {\tt i}-th constructor of {\tt I}, then {\tt - constructor i with \bindinglist} is equivalent to {\tt intros; - apply ci with \bindinglist}. - - \Warning the terms in the \bindinglist\ are checked - in the context where {\tt constructor} is executed and not in the - context where {\tt apply} is executed (the introductions are not - taken into account). - -% To document? -% \item {\tt constructor {\tactic}} - -\item {\tt split}\tacindex{split} - - This applies only if {\tt I} has a single constructor. It is then - equivalent to {\tt constructor 1}. It is typically used in the case - of a conjunction $A\land B$. - - \ErrMsg \errindex{Not an inductive goal with 1 constructor} - -\item {\tt exists {\bindinglist}}\tacindex{exists} - - This applies only if {\tt I} has a single constructor. It is then - equivalent to {\tt intros; constructor 1 with \bindinglist}. It is - typically used in the case of an existential quantification $\exists - x, P(x)$. - - \ErrMsg \errindex{Not an inductive goal with 1 constructor} - -\item {\tt exists \nelist{\bindinglist}{,}} - - This iteratively applies {\tt exists {\bindinglist}}. - -\item {\tt left}\tacindex{left}\\ - {\tt right}\tacindex{right} - - These tactics apply only if {\tt I} has two constructors, for instance - in the case of a - disjunction $A\lor B$. Then, they are respectively equivalent to {\tt - constructor 1} and {\tt constructor 2}. - - \ErrMsg \errindex{Not an inductive goal with 2 constructors} - -\item {\tt left with \bindinglist}\\ - {\tt right with \bindinglist}\\ - {\tt split with \bindinglist} - - As soon as the inductive type has the right number of constructors, - these expressions are equivalent to calling {\tt - constructor $i$ with \bindinglist} for the appropriate $i$. - -\item \texttt{econstructor}\tacindex{econstructor}\\ - \texttt{eexists}\tacindex{eexists}\\ - \texttt{esplit}\tacindex{esplit}\\ - \texttt{eleft}\tacindex{eleft}\\ - \texttt{eright}\tacindex{eright} - - These tactics and their variants behave like \texttt{constructor}, - \texttt{exists}, \texttt{split}, \texttt{left}, \texttt{right} and - their variants but they introduce existential variables instead of - failing when the instantiation of a variable cannot be found (cf - \texttt{eapply} and Section~\ref{eapply-example}). - -\end{Variants} - -\section{Managing the local context} - -\subsection{\tt intro} -\tacindex{intro} -\label{intro} - -This tactic applies to a goal that is either a product or starts with -a let binder. If the goal is a product, the tactic implements the -``Lam''\index{Typing rules!Lam} rule given in -Section~\ref{Typed-terms}\footnote{Actually, only the second subgoal will be -generated since the other one can be automatically checked.}. If the -goal starts with a let binder, then the tactic implements a mix of the -``Let''\index{Typing rules!Let} and ``Conv''\index{Typing rules!Conv}. - -If the current goal is a dependent product $\forall x:T,~U$ (resp {\tt -let $x$:=$t$ in $U$}) then {\tt intro} puts {\tt $x$:$T$} (resp {\tt $x$:=$t$}) - in the local context. -% Obsolete (quantified names already avoid hypotheses names): -% Otherwise, it puts -% {\tt x}{\it n}{\tt :T} where {\it n} is such that {\tt x}{\it n} is a -%fresh name. -The new subgoal is $U$. -% If the {\tt x} has been renamed {\tt x}{\it n} then it is replaced -% by {\tt x}{\it n} in {\tt U}. - -If the goal is a non-dependent product $T \to U$, then it puts -in the local context either {\tt H}{\it n}{\tt :$T$} (if $T$ is of -type {\tt Set} or {\tt Prop}) or {\tt X}{\it n}{\tt :$T$} (if the type -of $T$ is {\tt Type}). The optional index {\it n} is such that {\tt -H}{\it n} or {\tt X}{\it n} is a fresh identifier. -In both cases, the new subgoal is $U$. - -If the goal is neither a product nor starting with a let definition, -the tactic {\tt intro} applies the tactic {\tt hnf} until the tactic -{\tt intro} can be applied or the goal is not head-reducible. - -\begin{ErrMsgs} -\item \errindex{No product even after head-reduction} -\item \errindexbis{{\ident} is already used}{is already used} -\end{ErrMsgs} - -\begin{Variants} - -\item {\tt intros}\tacindex{intros} - - This repeats {\tt intro} until it meets the head-constant. It never reduces - head-constants and it never fails. - -\item {\tt intro {\ident}} - - This applies {\tt intro} but forces {\ident} to be the name of the - introduced hypothesis. - - \ErrMsg \errindex{name {\ident} is already used} - - \Rem If a name used by {\tt intro} hides the base name of a global - constant then the latter can still be referred to by a qualified name - (see \ref{LongNames}). - -\item {\tt intros \ident$_1$ \dots\ \ident$_n$} - - This is equivalent to the composed tactic {\tt intro \ident$_1$; \dots\ ; - intro \ident$_n$}. - - More generally, the \texttt{intros} tactic takes a pattern as - argument in order to introduce names for components of an inductive - definition or to clear introduced hypotheses. This is explained - in~\ref{intros-pattern}. - -\item {\tt intros until {\ident}} \tacindex{intros until} - - This repeats {\tt intro} until it meets a premise of the goal having form - {\tt (} {\ident}~{\tt :}~{\term} {\tt )} and discharges the variable - named {\ident} of the current goal. - - \ErrMsg \errindex{No such hypothesis in current goal} - -\item {\tt intros until {\num}} \tacindex{intros until} - - This repeats {\tt intro} until the {\num}-th non-dependent product. For - instance, on the subgoal % - \verb+forall x y:nat, x=y -> y=x+ the tactic \texttt{intros until 1} - is equivalent to \texttt{intros x y H}, as \verb+x=y -> y=x+ is the - first non-dependent product. And on the subgoal % - \verb+forall x y z:nat, x=y -> y=x+ the tactic \texttt{intros until 1} - is equivalent to \texttt{intros x y z} as the product on \texttt{z} - can be rewritten as a non-dependent product: % - \verb+forall x y:nat, nat -> x=y -> y=x+ - - - \ErrMsg \errindex{No such hypothesis in current goal} - - This happens when {\num} is 0 or is greater than the number of non-dependent - products of the goal. - -\item {\tt intro after \ident} \tacindex{intro after}\\ - {\tt intro before \ident} \tacindex{intro before}\\ - {\tt intro at top} \tacindex{intro at top}\\ - {\tt intro at bottom} \tacindex{intro at bottom} - - These tactics apply {\tt intro} and move the freshly introduced hypothesis - respectively after the hypothesis \ident{}, before the hypothesis - \ident{}, at the top of the local context, or at the bottom of the - local context. All hypotheses on which the new hypothesis depends - are moved too so as to respect the order of dependencies between - hypotheses. Note that {\tt intro at bottom} is a synonym for {\tt - intro} with no argument. - - \ErrMsg \errindex{No such hypothesis} : {\ident} - -\item {\tt intro \ident$_1$ after \ident$_2$}\\ - {\tt intro \ident$_1$ before \ident$_2$}\\ - {\tt intro \ident$_1$ at top}\\ - {\tt intro \ident$_1$ at bottom} - - These tactics behave as previously but naming the introduced hypothesis - \ident$_1$. It is equivalent to {\tt intro \ident$_1$} followed by - the appropriate call to {\tt move}~(see Section~\ref{move}). - -\end{Variants} - -\subsection{\tt intros {\intropatternlist}} -\label{intros-pattern} -\tacindex{intros \intropattern} -\index{Introduction patterns} -\index{Naming introduction patterns} -\index{Disjunctive/conjunctive introduction patterns} -\index{Disjunctive/conjunctive introduction patterns} -\index{Equality introduction patterns} - -This extension of the tactic {\tt intros} allows to apply tactics on -the fly on the variables or hypotheses which have been introduced. An -{\em introduction pattern list} {\intropatternlist} is a list of -introduction patterns possibly containing the filling introduction -patterns {\tt *} and {\tt **}. An {\em introduction pattern} is -either: -\begin{itemize} -\item a {\em naming introduction pattern}, i.e. either one of: - \begin{itemize} - \item the pattern \texttt{?} - \item the pattern \texttt{?\ident} - \item an identifier - \end{itemize} -\item an {\em action introduction pattern} which itself classifies into: - \begin{itemize} - \item a {\em disjunctive/conjunctive introduction pattern}, i.e. either one of: - \begin{itemize} - \item a disjunction of lists of patterns: - {\tt [$\intropatternlist_1$ | \dots\ | $\intropatternlist_n$]} - \item a conjunction of patterns: {\tt ($p_1$ , \dots\ , $p_n$)} - \item a list of patterns {\tt ($p_1$ \&\ \dots\ \&\ $p_n$)} - for sequence of right-associative binary constructs - \end{itemize} - \item an {\em equality introduction pattern}, i.e. either one of: - \begin{itemize} - \item a pattern for decomposing an equality: {\tt [= $p_1$ \dots\ $p_n$]} - \item the rewriting orientations: {\tt ->} or {\tt <-} - \end{itemize} - \item the on-the-fly application of lemmas: $p${\tt \%{\term$_1$}} - \ldots {\tt \%{\term$_n$}} where $p$ itself is not a pattern for - on-the-fly application of lemmas (note: syntax is in experimental stage) - \end{itemize} -\item the wildcard: {\tt \_} -\end{itemize} - -Assuming a goal of type $Q \to P$ (non-dependent product), or -of type $\forall x:T,~P$ (dependent product), the behavior of -{\tt intros $p$} is defined inductively over the structure of the -introduction pattern~$p$: -\begin{itemize} -\item introduction on \texttt{?} performs the introduction, and lets {\Coq} - choose a fresh name for the variable; -\item introduction on \texttt{?\ident} performs the introduction, and - lets {\Coq} choose a fresh name for the variable based on {\ident}; -\item introduction on \texttt{\ident} behaves as described in - Section~\ref{intro}; -\item introduction over a disjunction of list of patterns {\tt - [$\intropatternlist_{1}$ | \dots\ | $\intropatternlist_n$]} expects - the product to be over an inductive type whose number of - constructors is $n$ (or more generally over a type of conclusion an - inductive type built from $n$ constructors, e.g. {\tt C -> - A\textbackslash/B} with $n=2$ since {\tt A\textbackslash/B} has 2 - constructors): it destructs the introduced hypothesis as {\tt - destruct} (see Section~\ref{destruct}) would and applies on each - generated subgoal the corresponding tactic; - \texttt{intros}~$\intropatternlist_i$. The introduction patterns in - $\intropatternlist_i$ are expected to consume no more than the - number of arguments of the $i^{\mbox{\scriptsize th}}$ - constructor. If it consumes less, then {\Coq} completes the pattern - so that all the arguments of the constructors of the inductive type - are introduced (for instance, the list of patterns {\tt [$\;$|$\;$] - H} applied on goal {\tt forall x:nat, x=0 -> 0=x} behaves the same - as the list of patterns {\tt [$\,$|$\,$?$\,$] H}); -\item introduction over a conjunction of patterns {\tt ($p_1$, \ldots, - $p_n$)} expects the goal to be a product over an inductive type $I$ with a - single constructor that itself has at least $n$ arguments: it - performs a case analysis over the hypothesis, as {\tt destruct} - would, and applies the patterns $p_1$~\ldots~$p_n$ to the arguments - of the constructor of $I$ (observe that {\tt ($p_1$, {\ldots}, - $p_n$)} is an alternative notation for {\tt [$p_1$ {\ldots} - $p_n$]}); -\item introduction via {\tt ($p_1$ \& \dots\ \& $p_n$)} - is a shortcut for introduction via - {\tt ($p_1$,(\ldots,(\dots,$p_n$)\ldots))}; it expects the - hypothesis to be a sequence of right-associative binary inductive - constructors such as {\tt conj} or {\tt ex\_intro}; for instance, an - hypothesis with type {\tt A\verb|/\|(exists x, B\verb|/\|C\verb|/\|D)} can be - introduced via pattern {\tt (a \& x \& b \& c \& d)}; -\item if the product is over an equality type, then a pattern of the - form {\tt [= $p_{1}$ \dots\ $p_n$]} applies either {\tt injection} - (see Section~\ref{injection}) or {\tt discriminate} (see - Section~\ref{discriminate}) instead of {\tt destruct}; if {\tt - injection} is applicable, the patterns $p_1$, \ldots, $p_n$ are - used on the hypotheses generated by {\tt injection}; if the number - of patterns is smaller than the number of hypotheses generated, the - pattern \texttt{?} is used to complete the list; - %TODO! - %if {\tt discriminate} is applicable, the list of patterns $p_{1}$ - %\dots\ $p_n$ is supposed to be empty; -\item introduction over {\tt ->} (respectively {\tt <-}) expects the - hypothesis to be an equality and the right-hand-side (respectively - the left-hand-side) is replaced by the left-hand-side (respectively - the right-hand-side) in the conclusion of the goal; the hypothesis - itself is erased; if the term to substitute is a variable, it is - substituted also in the context of goal and the variable is removed - too; -\item introduction over a pattern $p${\tt \%{\term$_1$}} \ldots {\tt - \%{\term$_n$}} first applies {\term$_1$},\ldots, {\term$_n$} on the - hypothesis to be introduced (as in {\tt apply }{\term}$_1$, \ldots, - {\term}$_n$ {\tt in}) prior to the application of the introduction - pattern $p$; -\item introduction on the wildcard depends on whether the product is - dependent or not: in the non-dependent case, it erases the - corresponding hypothesis (i.e. it behaves as an {\tt intro} followed - by a {\tt clear}, cf Section~\ref{clear}) while in the dependent - case, it succeeds and erases the variable only if the wildcard is - part of a more complex list of introduction patterns that also - erases the hypotheses depending on this variable; -\item introduction over {\tt *} introduces all forthcoming quantified - variables appearing in a row; introduction over {\tt **} introduces - all forthcoming quantified variables or hypotheses until the goal is - not any more a quantification or an implication. -\end{itemize} - -\Example - -\begin{coq_example} -Goal forall A B C:Prop, A \/ B /\ C -> (A -> C) -> C. -intros * [a | (_,c)] f. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -\Rem {\tt intros $p_1~\ldots~p_n$} is not equivalent to \texttt{intros - $p_1$;\ldots; intros $p_n$} for the following reason: If one of the -$p_i$ is a wildcard pattern, he might succeed in the first case -because the further hypotheses it depends in are eventually erased too -while it might fail in the second case because of dependencies in -hypotheses which are not yet introduced (and a fortiori not yet -erased). - -\Rem In {\tt intros $\intropatternlist$}, if the last introduction -pattern is a disjunctive or conjunctive pattern {\tt - [$\intropatternlist_1$ | \dots\ | $\intropatternlist_n$]}, the -completion of $\intropatternlist_i$ so that all the arguments of the -$i^{\mbox{\scriptsize th}}$ constructors of the corresponding -inductive type are introduced can be controlled with the -following option: -\optindex{Bracketing Last Introduction Pattern} - -\begin{quote} -{\tt Set Bracketing Last Introduction Pattern} -\end{quote} - -Force completion, if needed, when the last introduction pattern is a -disjunctive or conjunctive pattern (this is the default). - -\begin{quote} -{\tt Unset Bracketing Last Introduction Pattern} -\end{quote} - -Deactivate completion when the last introduction pattern is a disjunctive -or conjunctive pattern. - - - -\subsection{\tt clear \ident} -\tacindex{clear} -\label{clear} - -This tactic erases the hypothesis named {\ident} in the local context -of the current goal. As a consequence, {\ident} is no more displayed and no more -usable in the proof development. - -\begin{ErrMsgs} -\item \errindex{No such hypothesis} -\item \errindexbis{{\ident} is used in the conclusion}{is used in the - conclusion} -\item \errindexbis{{\ident} is used in the hypothesis {\ident'}}{is - used in the hypothesis} -\end{ErrMsgs} - -\begin{Variants} - -\item {\tt clear {\ident$_1$} \dots\ {\ident$_n$}} - - This is equivalent to {\tt clear {\ident$_1$}. {\ldots} clear - {\ident$_n$}.} - -\item {\tt clearbody {\ident}}\tacindex{clearbody} - - This tactic expects {\ident} to be a local definition then clears - its body. Otherwise said, this tactic turns a definition into an - assumption. - - \ErrMsg \errindexbis{{\ident} is not a local definition}{is not a local definition} - -\item \texttt{clear - {\ident$_1$} \dots\ {\ident$_n$}} - - This tactic clears all the hypotheses except the ones depending in - the hypotheses named {\ident$_1$} {\ldots} {\ident$_n$} and in the - goal. - -\item \texttt{clear} - - This tactic clears all the hypotheses except the ones the goal depends on. - -\item {\tt clear dependent \ident \tacindex{clear dependent}} - - This clears the hypothesis \ident\ and all the hypotheses - that depend on it. - -\end{Variants} - -\subsection{\tt revert \ident$_1$ \dots\ \ident$_n$} -\tacindex{revert} -\label{revert} - -This applies to any goal with variables \ident$_1$ \dots\ \ident$_n$. -It moves the hypotheses (possibly defined) to the goal, if this respects -dependencies. This tactic is the inverse of {\tt intro}. - -\begin{ErrMsgs} -\item \errindex{No such hypothesis} -\item \errindexbis{{\ident} is used in the hypothesis {\ident'}}{is - used in the hypothesis} -\end{ErrMsgs} - -\begin{Variants} -\item {\tt revert dependent \ident \tacindex{revert dependent}} - - This moves to the goal the hypothesis {\ident} and all the hypotheses - that depend on it. - -\end{Variants} - -\subsection{\tt move {\ident$_1$} after {\ident$_2$}} -\tacindex{move} -\label{move} - -This moves the hypothesis named {\ident$_1$} in the local context -after the hypothesis named {\ident$_2$}, where ``after'' is in -reference to the direction of the move. The proof term is not changed. - -If {\ident$_1$} comes before {\ident$_2$} in the order of -dependencies, then all the hypotheses between {\ident$_1$} and -{\ident$_2$} that (possibly indirectly) depend on {\ident$_1$} are -moved too, and all of them are thus moved after {\ident$_2$} in the -order of dependencies. - -If {\ident$_1$} comes after {\ident$_2$} in the order of dependencies, -then all the hypotheses between {\ident$_1$} and {\ident$_2$} that -(possibly indirectly) occur in the type of {\ident$_1$} are moved -too, and all of them are thus moved before {\ident$_2$} in the order -of dependencies. - -\begin{Variants} - -\item {\tt move {\ident$_1$} before {\ident$_2$}} - -This moves {\ident$_1$} towards and just before the hypothesis named -{\ident$_2$}. As for {\tt move {\ident$_1$} after {\ident$_2$}}, -dependencies over {\ident$_1$} (when {\ident$_1$} comes before -{\ident$_2$} in the order of dependencies) or in the type of -{\ident$_1$} (when {\ident$_1$} comes after {\ident$_2$} in the order -of dependencies) are moved too. - -\item {\tt move {\ident} at top} - -This moves {\ident} at the top of the local context (at the beginning of the context). - -\item {\tt move {\ident} at bottom} - -This moves {\ident} at the bottom of the local context (at the end of the context). - -\end{Variants} - -\begin{ErrMsgs} - -\item \errindex{No such hypothesis} - -\item \errindex{Cannot move {\ident$_1$} after {\ident$_2$}: - it occurs in the type of {\ident$_2$}} - -\item \errindex{Cannot move {\ident$_1$} after {\ident$_2$}: - it depends on {\ident$_2$}} - -\end{ErrMsgs} - -\Example - -\begin{coq_example} -Goal forall x :nat, x = 0 -> forall z y:nat, y=y-> 0=x. -intros x H z y H0. -move x after H0. -Undo. -move x before H0. -Undo. -move H0 after H. -Undo. -move H0 before H. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -\subsection{\tt rename {\ident$_1$} into {\ident$_2$}} -\tacindex{rename} - -This renames hypothesis {\ident$_1$} into {\ident$_2$} in the current -context. The name of the hypothesis in the proof-term, however, is left -unchanged. - -\begin{Variants} - -\item {\tt rename {\ident$_1$} into {\ident$_2$}, \ldots, - {\ident$_{2k-1}$} into {\ident$_{2k}$}} - -This renames the variables {\ident$_1$} \ldots {\ident$_2k-1$} into respectively -{\ident$_2$} \ldots {\ident$_2k$} in parallel. In particular, the target -identifiers may contain identifiers that exist in the source context, as long -as the latter are also renamed by the same tactic. - -\end{Variants} - -\begin{ErrMsgs} -\item \errindex{No such hypothesis} -\item \errindexbis{{\ident$_2$} is already used}{is already used} -\end{ErrMsgs} - -\subsection{\tt set ( {\ident} := {\term} )} -\label{tactic:set} -\tacindex{set} - -This replaces {\term} by {\ident} in the conclusion of the current goal -and adds the new definition {\tt {\ident} := \term} to the local context. - -If {\term} has holes (i.e. subexpressions of the form ``\_''), the -tactic first checks that all subterms matching the pattern are -compatible before doing the replacement using the leftmost subterm -matching the pattern. - -\begin{ErrMsgs} -\item \errindex{The variable {\ident} is already defined} -\end{ErrMsgs} - -\begin{Variants} - -\item {\tt set ( {\ident} := {\term} ) in {\occgoalset}} - -This notation allows specifying which occurrences of {\term} have to -be substituted in the context. The {\tt in {\occgoalset}} clause is an -occurrence clause whose syntax and behavior are described in -Section~\ref{Occurrences_clauses}. - -\item {\tt set ( {\ident} \nelistnosep{\binder} := {\term} )} - - This is equivalent to {\tt set ( {\ident} := fun - \nelistnosep{\binder} => {\term} )}. - -\item {\tt set \term} - - This behaves as {\tt set (} {\ident} := {\term} {\tt )} but {\ident} - is generated by {\Coq}. This variant also supports an occurrence clause. - -\item {\tt set ( {\ident$_0$} \nelistnosep{\binder} := {\term} ) in {\occgoalset}}\\ - {\tt set {\term} in {\occgoalset}} - - These are the general forms that combine the previous possibilities. - -\item {\tt eset ( {\ident$_0$} \nelistnosep{\binder} := {\term} ) in {\occgoalset}}\tacindex{eset}\\ - {\tt eset {\term} in {\occgoalset}} - - While the different variants of \texttt{set} expect that no - existential variables are generated by the tactic, \texttt{eset} - removes this constraint. In practice, this is relevant only when - \texttt{eset} is used as a synonym of \texttt{epose}, i.e. when the - term does not occur in the goal. - -\item {\tt remember {\term} as {\ident}}\tacindex{remember} - - This behaves as {\tt set ( {\ident} := {\term} ) in *} and using a - logical (Leibniz's) equality instead of a local definition. - -\item {\tt remember {\term} as {\ident} eqn:{\ident}} - - This behaves as {\tt remember {\term} as {\ident}}, except - that the name of the generated equality is also given. - -\item {\tt remember {\term} as {\ident} in {\occgoalset}} - - This is a more general form of {\tt remember} that remembers the - occurrences of {\term} specified by an occurrences set. - -\item - {\tt eremember {\term} as {\ident}}\tacindex{eremember}\\ - {\tt eremember {\term} as {\ident} in {\occgoalset}}\\ - {\tt eremember {\term} as {\ident} eqn:{\ident}} - - While the different variants of \texttt{remember} expect that no - existential variables are generated by the tactic, \texttt{eremember} - removes this constraint. - -\item {\tt pose ( {\ident} := {\term} )}\tacindex{pose} - - This adds the local definition {\ident} := {\term} to the current - context without performing any replacement in the goal or in the - hypotheses. It is equivalent to {\tt set ( {\ident} {\tt :=} - {\term} {\tt ) in |-}}. - -\item {\tt pose ( {\ident} \nelistnosep{\binder} := {\term} )} - - This is equivalent to {\tt pose (} {\ident} {\tt :=} {\tt fun} - \nelistnosep{\binder} {\tt =>} {\term} {\tt )}. - -\item{\tt pose {\term}} - - This behaves as {\tt pose ( {\ident} := {\term} )} but - {\ident} is generated by {\Coq}. - -\item {\tt epose ( {\ident} := {\term} )}\tacindex{epose}\\ - {\tt epose ( {\ident} \nelistnosep{\binder} := {\term} )}\\ - {\tt epose {\term}} - - While the different variants of \texttt{pose} expect that no - existential variables are generated by the tactic, \texttt{epose} - removes this constraint. - -\end{Variants} - -\subsection{\tt decompose [ {\qualid$_1$} \dots\ {\qualid$_n$} ] \term} -\label{decompose} -\tacindex{decompose} - -This tactic recursively decomposes a -complex proposition in order to obtain atomic ones. - -\Example - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example} -Goal forall A B C:Prop, A /\ B /\ C \/ B /\ C \/ C /\ A -> C. -intros A B C H; decompose [and or] H; assumption. -\end{coq_example} -\begin{coq_example*} -Qed. -\end{coq_example*} - -{\tt decompose} does not work on right-hand sides of implications or products. - -\begin{Variants} - -\item {\tt decompose sum \term}\tacindex{decompose sum} - - This decomposes sum types (like \texttt{or}). - -\item {\tt decompose record \term}\tacindex{decompose record} - - This decomposes record types (inductive types with one constructor, - like \texttt{and} and \texttt{exists} and those defined with the - \texttt{Record} macro, see Section~\ref{Record}). - -\end{Variants} - -\section{Controlling the proof flow} - -\subsection{\tt assert ( {\ident} :\ {\form} )} -\tacindex{assert} - -This tactic applies to any goal. {\tt assert (H : U)} adds a new -hypothesis of name \texttt{H} asserting \texttt{U} to the current goal -and opens a new subgoal \texttt{U}\footnote{This corresponds to the - cut rule of sequent calculus.}. The subgoal {\texttt U} comes first -in the list of subgoals remaining to prove. - -\begin{ErrMsgs} -\item \errindex{Not a proposition or a type} - - Arises when the argument {\form} is neither of type {\tt Prop}, {\tt - Set} nor {\tt Type}. - -\end{ErrMsgs} - -\begin{Variants} - -\item{\tt assert {\form}} - - This behaves as {\tt assert ( {\ident} :\ {\form} )} but - {\ident} is generated by {\Coq}. - -\item \texttt{assert {\form} by {\tac}}\tacindex{assert by} - - This tactic behaves like \texttt{assert} but applies {\tac} - to solve the subgoals generated by \texttt{assert}. - - \ErrMsg \errindex{Proof is not complete} - -\item \texttt{assert {\form} as {\intropattern}\tacindex{assert as}} - - If {\intropattern} is a naming introduction pattern (see - Section~\ref{intros-pattern}), the hypothesis is named after this - introduction pattern (in particular, if {\intropattern} is {\ident}, - the tactic behaves like \texttt{assert ({\ident} :\ {\form})}). - - If {\intropattern} is an action introduction pattern, the tactic - behaves like \texttt{assert {\form}} followed by the action done by - this introduction pattern. - -\item \texttt{assert {\form} as {\intropattern} by {\tac}} - - This combines the two previous variants of {\tt assert}. - -\item{\tt assert ( {\ident} := {\term} )} - - This behaves as {\tt assert ({\ident} :\ {\type}) by exact {\term}} - where {\type} is the type of {\term}. This is deprecated in favor of - {\tt pose proof}. - - If the head of {\term} is {\ident}, the tactic behaves as - {\tt specialize \term}. - - \ErrMsg \errindex{Variable {\ident} is already declared} - -\item \texttt{eassert {\form} as {\intropattern} by {\tac}}\tacindex{eassert}\tacindex{eassert as}\tacindex{eassert by}\\ - {\tt assert ( {\ident} := {\term} )} - - While the different variants of \texttt{assert} expect that no - existential variables are generated by the tactic, \texttt{eassert} - removes this constraint. This allows not to specify the asserted - statement completely before starting to prove it. - -\item \texttt{pose proof {\term} \zeroone{as {\intropattern}}\tacindex{pose proof}} - - This tactic behaves like \texttt{assert T \zeroone{as {\intropattern}} by - exact {\term}} where \texttt{T} is the type of {\term}. - - In particular, \texttt{pose proof {\term} as {\ident}} behaves as - \texttt{assert ({\ident} := {\term})} and \texttt{pose proof {\term} - as {\intropattern}} is the same as applying - the {\intropattern} to {\term}. - -\item \texttt{epose proof {\term} \zeroone{as {\intropattern}}\tacindex{epose proof}} - - While \texttt{pose proof} expects that no existential variables are generated by the tactic, - \texttt{epose proof} removes this constraint. - -\item \texttt{enough ({\ident} :\ {\form})}\tacindex{enough} - - This adds a new hypothesis of name {\ident} asserting {\form} to the - goal the tactic \texttt{enough} is applied to. A new subgoal stating - \texttt{\form} is inserted after the initial goal rather than before - it as \texttt{assert} would do. - -\item \texttt{enough {\form}}\tacindex{enough} - - This behaves like \texttt{enough ({\ident} :\ {\form})} with the name - {\ident} of the hypothesis generated by {\Coq}. - -\item \texttt{enough {\form} as {\intropattern}\tacindex{enough as}} - - This behaves like \texttt{enough} {\form} using {\intropattern} to - name or destruct the new hypothesis. - -\item \texttt{enough ({\ident} :\ {\form}) by {\tac}}\tacindex{enough by}\\ - \texttt{enough {\form} by {\tac}}\tacindex{enough by}\\ - \texttt{enough {\form} as {\intropattern} by {\tac}} - - This behaves as above but with {\tac} expected to solve the initial - goal after the extra assumption {\form} is added and possibly - destructed. If the \texttt{as} {\intropattern} clause generates more - than one subgoal, {\tac} is applied to all of them. - -\item \texttt{eenough ({\ident} :\ {\form}) by {\tac}}\tacindex{eenough}\tacindex{eenough as}\tacindex{eenough by}\\ - \texttt{eenough {\form} by {\tac}}\tacindex{enough by}\\ - \texttt{eenough {\form} as {\intropattern} by {\tac}} - - While the different variants of \texttt{enough} expect that no - existential variables are generated by the tactic, \texttt{eenough} - removes this constraint. - -\item {\tt cut {\form}}\tacindex{cut} - - This tactic applies to any goal. It implements the non-dependent - case of the ``App''\index{Typing rules!App} rule given in - Section~\ref{Typed-terms}. (This is Modus Ponens inference rule.) - {\tt cut U} transforms the current goal \texttt{T} into the two - following subgoals: {\tt U -> T} and \texttt{U}. The subgoal {\tt U - -> T} comes first in the list of remaining subgoal to prove. - -\item {\tt specialize ({\ident} \term$_1$ \dots\ \term$_n$)\tacindex{specialize} \zeroone{as \intropattern}}\\ - {\tt specialize {\ident} with {\bindinglist} \zeroone{as \intropattern}} - - The tactic {\tt specialize} works on local hypothesis \ident. - The premises of this hypothesis (either universal - quantifications or non-dependent implications) are instantiated - by concrete terms coming either from arguments \term$_1$ - $\ldots$ \term$_n$ or from a bindings list (see - Section~\ref{Binding-list} for more about bindings lists). - In the first form the application to \term$_1$ {\ldots} - \term$_n$ can be partial. The first form is equivalent to - {\tt assert ({\ident} := {\ident} {\term$_1$} \dots\ \term$_n$)}. - - In the second form, instantiation elements can also be partial. - In this case the uninstantiated arguments are inferred by - unification if possible or left quantified in the hypothesis - otherwise. - - With the {\tt as} clause, the local hypothesis {\ident} is left - unchanged and instead, the modified hypothesis is introduced as - specified by the {\intropattern}. - - The name {\ident} can also refer to a global lemma or - hypothesis. In this case, for compatibility reasons, the - behavior of {\tt specialize} is close to that of {\tt - generalize}: the instantiated statement becomes an additional - premise of the goal. The {\tt as} clause is especially useful - in this case to immediately introduce the instantiated statement - as a local hypothesis. - - \begin{ErrMsgs} - \item \errindexbis{{\ident} is used in hypothesis \ident'}{is used in hypothesis} - \item \errindexbis{{\ident} is used in conclusion}{is used in conclusion} - \end{ErrMsgs} - -%% Moreover, the old syntax allows the use of a number after {\tt specialize} -%% for controlling the number of premises to instantiate. Giving this -%% number should not be mandatory anymore (automatic detection of how -%% many premises can be eaten without leaving meta-variables). Hence -%% no documentation for this integer optional argument of specialize - -\end{Variants} - -\subsection{\tt generalize \term} -\tacindex{generalize} -\label{generalize} - -This tactic applies to any goal. It generalizes the conclusion with -respect to some term. - -\Example - -\begin{coq_eval} -Goal forall x y:nat, (0 <= x + y + y). -intros. -\end{coq_eval} -\begin{coq_example} -Show. -generalize (x + y + y). -\end{coq_example} - -\begin{coq_eval} -Abort. -\end{coq_eval} - -If the goal is $G$ and $t$ is a subterm of type $T$ in the goal, then -{\tt generalize} \textit{t} replaces the goal by {\tt forall (x:$T$), $G'$} -where $G'$ is obtained from $G$ by replacing all occurrences of $t$ by -{\tt x}. The name of the variable (here {\tt n}) is chosen based on $T$. - -\begin{Variants} -\item {\tt generalize {\term$_1$ , \dots\ , \term$_n$}} - - This is equivalent to {\tt generalize \term$_n$; \dots\ ; generalize - \term$_1$}. Note that the sequence of \term$_i$'s are processed - from $n$ to $1$. - -\item {\tt generalize {\term} at {\num$_1$ \dots\ \num$_i$}} - - This is equivalent to {\tt generalize \term} but it generalizes only over - the specified occurrences of {\term} (counting from left to right on the - expression printed using option {\tt Set Printing All}). - -\item {\tt generalize {\term} as {\ident}} - - This is equivalent to {\tt generalize \term} but it uses {\ident} to name the - generalized hypothesis. - -\item {\tt generalize {\term$_1$} at {\num$_{11}$ \dots\ \num$_{1i_1}$} - as {\ident$_1$} - , {\ldots} , - {\term$_n$} at {\num$_{n1}$ \mbox{\dots} \num$_{ni_n}$} - as {\ident$_2$}} - - This is the most general form of {\tt generalize} that combines the - previous behaviors. - -\item {\tt generalize dependent \term} \tacindex{generalize dependent} - - This generalizes {\term} but also {\em all} hypotheses that depend - on {\term}. It clears the generalized hypotheses. - -\end{Variants} - -\subsection{\tt evar ( {\ident} :\ {\term} )} -\tacindex{evar} -\label{evar} - -The {\tt evar} tactic creates a new local definition named \ident\ with -type \term\ in the context. The body of this binding is a fresh -existential variable. - -\subsection{\tt instantiate ( {\ident} := {\term} )} -\tacindex{instantiate} -\label{instantiate} - -The {\tt instantiate} tactic refines (see Section~\ref{refine}) -an existential variable {\ident} with the term {\term}. -It is equivalent to {\tt only [\ident]: refine \term} (preferred alternative). - -\begin{Remarks} -\item To be able to refer to an existential variable by name, the -user must have given the name explicitly (see~\ref{ExistentialVariables}). - -\item When you are referring to hypotheses which you did not name -explicitly, be aware that Coq may make a different decision on how to -name the variable in the current goal and in the context of the -existential variable. This can lead to surprising behaviors. -\end{Remarks} - -\begin{Variants} - - \item {\tt instantiate ( {\num} := {\term} )} - This variant allows to refer to an existential variable which was not - named by the user. The {\num} argument is the position of the - existential variable from right to left in the goal. - Because this variant is not robust to slight changes in the goal, - its use is strongly discouraged. - - \item {\tt instantiate ( {\num} := {\term} ) in \ident} - - \item {\tt instantiate ( {\num} := {\term} ) in ( Value of {\ident} )} - - \item {\tt instantiate ( {\num} := {\term} ) in ( Type of {\ident} )} - -These allow to refer respectively to existential variables occurring in -a hypothesis or in the body or the type of a local definition. - - \item {\tt instantiate} - - Without argument, the {\tt instantiate} tactic tries to solve as - many existential variables as possible, using information gathered - from other tactics in the same tactical. This is automatically - done after each complete tactic (i.e. after a dot in proof mode), - but not, for example, between each tactic when they are sequenced - by semicolons. - -\end{Variants} - -\subsection{\tt admit} -\tacindex{admit} -\tacindex{give\_up} -\label{admit} - -The {\tt admit} tactic allows temporarily skipping a subgoal so as to -progress further in the rest of the proof. A proof containing -admitted goals cannot be closed with {\tt Qed} but only with -{\tt Admitted}. - -\begin{Variants} - - \item {\tt give\_up} - - Synonym of {\tt admit}. - -\end{Variants} - -\subsection{\tt absurd \term} -\tacindex{absurd} -\label{absurd} - -This tactic applies to any goal. The argument {\term} is any -proposition {\tt P} of type {\tt Prop}. This tactic applies {\tt - False} elimination, that is it deduces the current goal from {\tt - False}, and generates as subgoals {\tt $\sim$P} and {\tt P}. It is -very useful in proofs by cases, where some cases are impossible. In -most cases, \texttt{P} or $\sim$\texttt{P} is one of the hypotheses of -the local context. - -\subsection{\tt contradiction} -\label{contradiction} -\tacindex{contradiction} - -This tactic applies to any goal. The {\tt contradiction} tactic -attempts to find in the current context (after all {\tt intros}) an -hypothesis that is equivalent to an empty inductive type (e.g. {\tt - False}), to the negation of a singleton inductive type (e.g. {\tt - True} or {\tt x=x}), or two contradictory hypotheses. - -\begin{ErrMsgs} -\item \errindex{No such assumption} -\end{ErrMsgs} - -\begin{Variants} -\item {\tt contradiction \ident} - -The proof of {\tt False} is searched in the hypothesis named \ident. -\end{Variants} - -\subsection{\tt contradict \ident} -\label{contradict} -\tacindex{contradict} - -This tactic allows manipulating negated hypothesis and goals. The -name \ident\ should correspond to a hypothesis. With -{\tt contradict H}, the current goal and context is transformed in -the following way: -\begin{itemize} -\item {\tt H:$\neg$A $\vd$ B} \ becomes \ {\tt $\vd$ A} -\item {\tt H:$\neg$A $\vd$ $\neg$B} \ becomes \ {\tt H: B $\vd$ A } -\item {\tt H: A $\vd$ B} \ becomes \ {\tt $\vd$ $\neg$A} -\item {\tt H: A $\vd$ $\neg$B} \ becomes \ {\tt H: B $\vd$ $\neg$A} -\end{itemize} - -\subsection{\tt exfalso} -\label{exfalso} -\tacindex{exfalso} - -This tactic implements the ``ex falso quodlibet'' logical principle: -an elimination of {\tt False} is performed on the current goal, and the -user is then required to prove that {\tt False} is indeed provable in -the current context. This tactic is a macro for {\tt elimtype False}. - -\section{Case analysis and induction} - -The tactics presented in this section implement induction or case -analysis on inductive or co-inductive objects (see -Section~\ref{Cic-inductive-definitions}). - -\subsection{\tt destruct \term} -\tacindex{destruct} -\label{destruct} - -This tactic applies to any goal. The argument {\term} must be of -inductive or co-inductive type and the tactic generates subgoals, one -for each possible form of {\term}, i.e. one for each constructor of -the inductive or co-inductive type. Unlike {\tt induction}, no -induction hypothesis is generated by {\tt destruct}. - -There are special cases: - -\begin{itemize} - -\item If {\term} is an identifier {\ident} denoting a quantified - variable of the conclusion of the goal, then {\tt destruct {\ident}} - behaves as {\tt intros until {\ident}; destruct {\ident}}. If - {\ident} is not anymore dependent in the goal after application of - {\tt destruct}, it is erased (to avoid erasure, use - parentheses, as in {\tt destruct ({\ident})}). - -\item If {\term} is a {\num}, then {\tt destruct {\num}} behaves as -{\tt intros until {\num}} followed by {\tt destruct} applied to the -last introduced hypothesis. Remark: For destruction of a numeral, use -syntax {\tt destruct ({\num})} (not very interesting anyway). - -\item In case {\term} is an hypothesis {\ident} of the context, - and {\ident} is not anymore dependent in the goal after - application of {\tt destruct}, it is erased (to avoid erasure, use - parentheses, as in {\tt destruct ({\ident})}). - -\item The argument {\term} can also be a pattern of which holes are - denoted by ``\_''. In this case, the tactic checks that all subterms - matching the pattern in the conclusion and the hypotheses are - compatible and performs case analysis using this subterm. - -\end{itemize} - -\begin{Variants} -\item{\tt destruct \term$_1$, \ldots, \term$_n$} - - This is a shortcut for {\tt destruct \term$_1$; \ldots; destruct \term$_n$}. - -\item{\tt destruct {\term} as {\disjconjintropattern}} - - This behaves as {\tt destruct {\term}} but uses the names in - {\intropattern} to name the variables introduced in the context. - The {\intropattern} must have the form {\tt [} $p_{11}$ \ldots - $p_{1n_1}$ {\tt |} {\ldots} {\tt |} $p_{m1}$ \ldots $p_{mn_m}$ - {\tt ]} with $m$ being the number of constructors of the type of - {\term}. Each variable introduced by {\tt destruct} in the context - of the $i^{th}$ goal gets its name from the list $p_{i1}$ \ldots - $p_{in_i}$ in order. If there are not enough names, {\tt destruct} - invents names for the remaining variables to introduce. More - generally, the $p_{ij}$ can be any introduction pattern (see - Section~\ref{intros-pattern}). This provides a concise notation for - chaining destruction of an hypothesis. - -% It is recommended to use this variant of {\tt destruct} for -% robust proof scripts. - -\item{\tt destruct {\term} eqn:{\namingintropattern}} - - This behaves as {\tt destruct {\term}} but adds an equation between - {\term} and the value that {\term} takes in each of the possible - cases. The name of the equation is specified by {\namingintropattern} - (see Section~\ref{intros-pattern}), in particular {\tt ?} can be - used to let Coq generate a fresh name. - -\item{\tt destruct {\term} with \bindinglist} - - This behaves like \texttt{destruct {\term}} providing explicit - instances for the dependent premises of the type of {\term} (see - syntax of bindings in Section~\ref{Binding-list}). - -\item{\tt edestruct {\term}\tacindex{edestruct}} - - This tactic behaves like \texttt{destruct {\term}} except that it - does not fail if the instance of a dependent premises of the type of - {\term} is not inferable. Instead, the unresolved instances are left - as existential variables to be inferred later, in the same way as - {\tt eapply} does (see Section~\ref{eapply-example}). - -\item{\tt destruct {\term$_1$} using {\term$_2$}}\\ - {\tt destruct {\term$_1$} using {\term$_2$} with {\bindinglist}} - - These are synonyms of {\tt induction {\term$_1$} using {\term$_2$}} and - {\tt induction {\term$_1$} using {\term$_2$} with {\bindinglist}}. - -\item \texttt{destruct {\term} in {\occgoalset}} - - This syntax is used for selecting which occurrences of {\term} the - case analysis has to be done on. The {\tt in {\occgoalset}} clause is an - occurrence clause whose syntax and behavior is described in - Section~\ref{Occurrences_clauses}. - -\item{\tt destruct {\term$_1$} with {\bindinglist$_1$} - as {\disjconjintropattern} eqn:{\namingintropattern} - using {\term$_2$} with {\bindinglist$_2$} in {\occgoalset}}\\ - {\tt edestruct {\term$_1$} with {\bindinglist$_1$} - as {\disjconjintropattern} eqn:{\namingintropattern} - using {\term$_2$} with {\bindinglist$_2$} in {\occgoalset}} - - These are the general forms of {\tt destruct} and {\tt edestruct}. - They combine the effects of the {\tt with}, {\tt as}, {\tt eqn:}, {\tt using}, - and {\tt in} clauses. - -\item{\tt case \term}\label{case}\tacindex{case} - - The tactic {\tt case} is a more basic tactic to perform case - analysis without recursion. It behaves as {\tt elim \term} but using - a case-analysis elimination principle and not a recursive one. - -\item {\tt case {\term} with {\bindinglist}} - - Analogous to {\tt elim {\term} with {\bindinglist}} above. - -\item{\tt ecase {\term}\tacindex{ecase}}\\ - {\tt ecase {\term} with {\bindinglist}} - - In case the type of {\term} has dependent premises, or dependent - premises whose values are not inferable from the {\tt with - {\bindinglist}} clause, {\tt ecase} turns them into existential - variables to be resolved later on. - -\item {\tt simple destruct \ident}\tacindex{simple destruct} - - This tactic behaves as {\tt intros until - {\ident}; case {\tt {\ident}}} when {\ident} is a quantified - variable of the goal. - -\item {\tt simple destruct {\num}} - - This tactic behaves as {\tt intros until - {\num}; case {\tt {\ident}}} where {\ident} is the name given by - {\tt intros until {\num}} to the {\num}-th non-dependent premise of - the goal. - -\item{\tt case\_eq \term}\label{case_eq}\tacindex{case\_eq} - - The tactic {\tt case\_eq} is a variant of the {\tt case} tactic that - allow to perform case analysis on a term without completely - forgetting its original form. This is done by generating equalities - between the original form of the term and the outcomes of the case - analysis. - -% The effect of this tactic is similar to the effect of {\tt -% destruct {\term} in |- *} with the exception that no new hypotheses -% are introduced in the context. - -\end{Variants} - -\subsection{\tt induction \term} -\tacindex{induction} -\label{Tac-induction} - -This tactic applies to any goal. The argument {\term} must be of -inductive type and the tactic {\tt induction} generates subgoals, -one for each possible form of {\term}, i.e. one for each constructor -of the inductive type. - -If the argument is dependent in either the conclusion or some -hypotheses of the goal, the argument is replaced by the appropriate -constructor form in each of the resulting subgoals and induction -hypotheses are added to the local context using names whose prefix is -{\tt IH}. - -There are particular cases: - -\begin{itemize} - -\item If {\term} is an identifier {\ident} denoting a quantified - variable of the conclusion of the goal, then {\tt induction - {\ident}} behaves as {\tt intros until {\ident}; induction - {\ident}}. If {\ident} is not anymore dependent in the goal - after application of {\tt induction}, it is erased (to avoid - erasure, use parentheses, as in {\tt induction ({\ident})}). - -\item If {\term} is a {\num}, then {\tt induction {\num}} behaves as -{\tt intros until {\num}} followed by {\tt induction} applied to the -last introduced hypothesis. Remark: For simple induction on a numeral, -use syntax {\tt induction ({\num})} (not very interesting anyway). - -\item In case {\term} is an hypothesis {\ident} of the context, - and {\ident} is not anymore dependent in the goal after - application of {\tt induction}, it is erased (to avoid erasure, use - parentheses, as in {\tt induction ({\ident})}). - -\item The argument {\term} can also be a pattern of which holes are - denoted by ``\_''. In this case, the tactic checks that all subterms - matching the pattern in the conclusion and the hypotheses are - compatible and performs induction using this subterm. - -\end{itemize} - -\Example - -\begin{coq_example} -Lemma induction_test : forall n:nat, n = n -> n <= n. -intros n H. -induction n. -\end{coq_example} - -\begin{ErrMsgs} -\item \errindex{Not an inductive product} -\item \errindex{Unable to find an instance for the variables -{\ident} \ldots {\ident}} - - Use in this case - the variant {\tt elim \dots\ with \dots} below. -\end{ErrMsgs} - -\begin{Variants} -\item{\tt induction {\term} as {\disjconjintropattern}} - - This behaves as {\tt induction {\term}} but uses the names in - {\disjconjintropattern} to name the variables introduced in the context. - The {\disjconjintropattern} must typically be of the form - {\tt [} $p_{11}$ {\ldots} - $p_{1n_1}$ {\tt |} {\ldots} {\tt |} $p_{m1}$ {\ldots} $p_{mn_m}$ {\tt - ]} with $m$ being the number of constructors of the type of - {\term}. Each variable introduced by {\tt induction} in the context - of the $i^{th}$ goal gets its name from the list $p_{i1}$ {\ldots} - $p_{in_i}$ in order. If there are not enough names, {\tt induction} - invents names for the remaining variables to introduce. More - generally, the $p_{ij}$ can be any disjunctive/conjunctive - introduction pattern (see Section~\ref{intros-pattern}). For instance, - for an inductive type with one constructor, the pattern notation - {\tt (}$p_{1}$ {\tt ,} {\ldots} {\tt ,} $p_{n}${\tt )} can be used instead of - {\tt [} $p_{1}$ {\ldots} $p_{n}$ {\tt ]}. - -%% \item{\tt induction {\term} eqn:{\namingintropattern}} - -%% This behaves as {\tt induction {\term}} but adds an equation between -%% {\term} and the value that {\term} takes in each of the induction -%% case. The name of the equation is built according to -%% {\namingintropattern} which can be an identifier, a ``?'', etc, as -%% indicated in Section~\ref{intros-pattern}. - -%% \item{\tt induction {\term} as {\disjconjintropattern} eqn:{\namingintropattern}} - -%% This combines the two previous forms. - -\item{\tt induction {\term} with \bindinglist} - - This behaves like \texttt{induction {\term}} providing explicit - instances for the premises of the type of {\term} (see the syntax of - bindings in Section~\ref{Binding-list}). - -\item{\tt einduction {\term}\tacindex{einduction}} - - This tactic behaves like \texttt{induction {\term}} excepts that it - does not fail if some dependent premise of the type of {\term} is - not inferable. Instead, the unresolved premises are posed as - existential variables to be inferred later, in the same way as {\tt - eapply} does (see Section~\ref{eapply-example}). - -\item {\tt induction {\term$_1$} using {\term$_2$}} - - This behaves as {\tt induction {\term$_1$}} but using {\term$_2$} as - induction scheme. It does not expect the conclusion of the type of - {\term$_1$} to be inductive. - -\item {\tt induction {\term$_1$} using {\term$_2$} with {\bindinglist}} - - This behaves as {\tt induction {\term$_1$} using {\term$_2$}} but - also providing instances for the premises of the type of {\term$_2$}. - -\item \texttt{induction {\term}$_1$, {\ldots}, {\term}$_n$ using {\qualid}} - - This syntax is used for the case {\qualid} denotes an induction principle - with complex predicates as the induction principles generated by - {\tt Function} or {\tt Functional Scheme} may be. - -\item \texttt{induction {\term} in {\occgoalset}} - - This syntax is used for selecting which occurrences of {\term} the - induction has to be carried on. The {\tt in \occgoalset} clause is - an occurrence clause whose syntax and behavior is described in - Section~\ref{Occurrences_clauses}. If variables or hypotheses not - mentioning {\term} in their type are listed in {\occgoalset}, those - are generalized as well in the statement to prove. - -\Example - -\begin{coq_example} -Lemma comm x y : x + y = y + x. -induction y in x |- *. -Show 2. -\end{coq_example} - -\item {\tt induction {\term$_1$} with {\bindinglist$_1$} - as {\disjconjintropattern} %% eqn:{\namingintropattern} - using {\term$_2$} with {\bindinglist$_2$} in {\occgoalset}}\\ - {\tt einduction {\term$_1$} with {\bindinglist$_1$} - as {\disjconjintropattern} %% eqn:{\namingintropattern} - using {\term$_2$} with {\bindinglist$_2$} in {\occgoalset}} - - These are the most general forms of {\tt induction} and {\tt - einduction}. It combines the effects of the {\tt with}, {\tt as}, %%{\tt eqn:}, - {\tt using}, and {\tt in} clauses. - -\item {\tt elim \term}\label{elim} - - This is a more basic induction tactic. Again, the type of the - argument {\term} must be an inductive type. Then, according to - the type of the goal, the tactic {\tt elim} chooses the appropriate - destructor and applies it as the tactic {\tt apply} - would do. For instance, if the proof context contains {\tt - n:nat} and the current goal is {\tt T} of type {\tt - Prop}, then {\tt elim n} is equivalent to {\tt apply nat\_ind with - (n:=n)}. The tactic {\tt elim} does not modify the context of - the goal, neither introduces the induction loading into the context - of hypotheses. - - More generally, {\tt elim \term} also works when the type of {\term} - is a statement with premises and whose conclusion is inductive. In - that case the tactic performs induction on the conclusion of the - type of {\term} and leaves the non-dependent premises of the type as - subgoals. In the case of dependent products, the tactic tries to - find an instance for which the elimination lemma applies and fails - otherwise. - -\item {\tt elim {\term} with {\bindinglist}} - - Allows to give explicit instances to the premises of the type - of {\term} (see Section~\ref{Binding-list}). - -\item{\tt eelim {\term}\tacindex{eelim}} - - In case the type of {\term} has dependent premises, this turns them into - existential variables to be resolved later on. - -\item{\tt elim {\term$_1$} using {\term$_2$}}\\ - {\tt elim {\term$_1$} using {\term$_2$} with {\bindinglist}\tacindex{elim \dots\ using}} - -Allows the user to give explicitly an elimination predicate -{\term$_2$} that is not the standard one for the underlying inductive -type of {\term$_1$}. The {\bindinglist} clause allows -instantiating premises of the type of {\term$_2$}. - -\item{\tt elim {\term$_1$} with {\bindinglist$_1$} using {\term$_2$} with {\bindinglist$_2$}}\\ - {\tt eelim {\term$_1$} with {\bindinglist$_1$} using {\term$_2$} with {\bindinglist$_2$}} - - These are the most general forms of {\tt elim} and {\tt eelim}. It - combines the effects of the {\tt using} clause and of the two uses - of the {\tt with} clause. - -\item {\tt elimtype \form}\tacindex{elimtype} - - The argument {\form} must be inductively defined. {\tt elimtype I} - is equivalent to {\tt cut I. intro H{\rm\sl n}; elim H{\rm\sl n}; - clear H{\rm\sl n}}. Therefore the hypothesis {\tt H{\rm\sl n}} will - not appear in the context(s) of the subgoal(s). Conversely, if {\tt - t} is a term of (inductive) type {\tt I} that does not occur - in the goal, then {\tt elim t} is equivalent to {\tt elimtype I; 2: - exact t.} - -\item {\tt simple induction \ident}\tacindex{simple induction} - - This tactic behaves as {\tt intros until - {\ident}; elim {\tt {\ident}}} when {\ident} is a quantified - variable of the goal. - -\item {\tt simple induction {\num}} - - This tactic behaves as {\tt intros until - {\num}; elim {\tt {\ident}}} where {\ident} is the name given by - {\tt intros until {\num}} to the {\num}-th non-dependent premise of - the goal. - -%% \item {\tt simple induction {\term}}\tacindex{simple induction} - -%% If {\term} is an {\ident} corresponding to a quantified variable of -%% the goal then the tactic behaves as {\tt intros until {\ident}; elim -%% {\tt {\ident}}}. If {\term} is a {\num} then the tactic behaves as -%% {\tt intros until {\ident}; elim {\tt {\ident}}}. Otherwise, it is -%% a synonym for {\tt elim {\term}}. - -%% \Rem For simple induction on a numeral, use syntax {\tt simple -%% induction ({\num})}. - -\end{Variants} - -%\subsection[\tt FixPoint \dots]{\tt FixPoint \dots\tacindex{Fixpoint}} -%Not yet documented. - -\subsection{\tt double induction \ident$_1$ \ident$_2$} -\tacindex{double induction} - -This tactic is deprecated and should be replaced by {\tt induction \ident$_1$; induction \ident$_2$} (or {\tt induction \ident$_1$; destruct \ident$_2$} depending on the exact needs). - -%% This tactic applies to any goal. If the variables {\ident$_1$} and -%% {\ident$_2$} of the goal have an inductive type, then this tactic -%% performs double induction on these variables. For instance, if the -%% current goal is \verb+forall n m:nat, P n m+ then, {\tt double induction n -%% m} yields the four cases with their respective inductive hypotheses. - -%% In particular, for proving \verb+(P (S n) (S m))+, the generated induction -%% hypotheses are \verb+(P (S n) m)+ and \verb+(m:nat)(P n m)+ (of the latter, -%% \verb+(P n m)+ and \verb+(P n (S m))+ are derivable). - -%% \Rem When the induction hypothesis \verb+(P (S n) m)+ is not -%% needed, {\tt induction \ident$_1$; destruct \ident$_2$} produces -%% more concise subgoals. - -\begin{Variant} - -\item {\tt double induction \num$_1$ \num$_2$} - -This tactic is deprecated and should be replaced by {\tt induction - \num$_1$; induction \num$_3$} where \num$_3$ is the result of -\num$_2$-\num$_1$. - -%% This applies double induction on the \num$_1^{th}$ and \num$_2^{th}$ {\it -%% non dependent} premises of the goal. More generally, any combination of an -%% {\ident} and a {\num} is valid. - -\end{Variant} - -\subsection{\tt dependent induction \ident} -\tacindex{dependent induction} -\label{DepInduction} - -The \emph{experimental} tactic \texttt{dependent induction} performs -induction-inversion on an instantiated inductive predicate. -One needs to first require the {\tt Coq.Program.Equality} module to use -this tactic. The tactic is based on the BasicElim tactic by Conor -McBride \cite{DBLP:conf/types/McBride00} and the work of Cristina Cornes -around inversion \cite{DBLP:conf/types/CornesT95}. From an instantiated -inductive predicate and a goal, it generates an equivalent goal where the -hypothesis has been generalized over its indexes which are then -constrained by equalities to be the right instances. This permits to -state lemmas without resorting to manually adding these equalities and -still get enough information in the proofs. - -\Example - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example} -Lemma le_minus : forall n:nat, n < 1 -> n = 0. -intros n H ; induction H. -\end{coq_example} - -Here we did not get any information on the indexes to help fulfill this -proof. The problem is that, when we use the \texttt{induction} tactic, -we lose information on the hypothesis instance, notably that the second -argument is \texttt{1} here. Dependent induction solves this problem by -adding the corresponding equality to the context. - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example} -Require Import Coq.Program.Equality. -Lemma le_minus : forall n:nat, n < 1 -> n = 0. -intros n H ; dependent induction H. -\end{coq_example} - -The subgoal is cleaned up as the tactic tries to automatically -simplify the subgoals with respect to the generated equalities. -In this enriched context, it becomes possible to solve this subgoal. -\begin{coq_example} -reflexivity. -\end{coq_example} - -Now we are in a contradictory context and the proof can be solved. -\begin{coq_example} -inversion H. -\end{coq_example} - -This technique works with any inductive predicate. -In fact, the \texttt{dependent induction} tactic is just a wrapper around -the \texttt{induction} tactic. One can make its own variant by just -writing a new tactic based on the definition found in -\texttt{Coq.Program.Equality}. - -\begin{Variants} -\item {\tt dependent induction {\ident} generalizing {\ident$_1$} \dots - {\ident$_n$}}\tacindex{dependent induction \dots\ generalizing} - - This performs dependent induction on the hypothesis {\ident} but first - generalizes the goal by the given variables so that they are - universally quantified in the goal. This is generally what one wants - to do with the variables that are inside some constructors in the - induction hypothesis. The other ones need not be further generalized. - -\item {\tt dependent destruction {\ident}}\tacindex{dependent destruction} - - This performs the generalization of the instance {\ident} but uses {\tt destruct} - instead of {\tt induction} on the generalized hypothesis. This gives - results equivalent to {\tt inversion} or {\tt dependent inversion} if - the hypothesis is dependent. -\end{Variants} - -\SeeAlso \ref{dependent-induction-example} for a larger example of -dependent induction and an explanation of the underlying technique. - -\subsection{\tt functional induction (\qualid\ \term$_1$ \dots\ \term$_n$)} -\tacindex{functional induction} -\label{FunInduction} - -The tactic \texttt{functional induction} performs -case analysis and induction following the definition of a function. It -makes use of a principle generated by \texttt{Function} -(see Section~\ref{Function}) or \texttt{Functional Scheme} -(see Section~\ref{FunScheme}). Note that this tactic is only available -after a {\tt Require Import FunInd}. - -\begin{coq_eval} -Reset Initial. -Import Nat. -\end{coq_eval} -\begin{coq_example} -Require Import FunInd. -Functional Scheme minus_ind := Induction for minus Sort Prop. -Check minus_ind. -Lemma le_minus (n m:nat) : n - m <= n. -functional induction (minus n m) using minus_ind; simpl; auto. -\end{coq_example} -\begin{coq_example*} -Qed. -\end{coq_example*} - -\Rem \texttt{(\qualid\ \term$_1$ \dots\ \term$_n$)} must be a correct -full application of \qualid. In particular, the rules for implicit -arguments are the same as usual. For example use \texttt{@\qualid} if -you want to write implicit arguments explicitly. - -\Rem Parentheses over \qualid \dots \term$_n$ are mandatory. - -\Rem \texttt{functional induction (f x1 x2 x3)} is actually a wrapper -for \texttt{induction x1, x2, x3, (f x1 x2 x3) using \qualid} followed by -a cleaning phase, where {\qualid} is the induction principle -registered for $f$ (by the \texttt{Function} (see Section~\ref{Function}) -or \texttt{Functional Scheme} (see Section~\ref{FunScheme}) command) -corresponding to the sort of the goal. Therefore \texttt{functional - induction} may fail if the induction scheme {\qualid} is -not defined. See also Section~\ref{Function} for the function terms -accepted by \texttt{Function}. - -\Rem There is a difference between obtaining an induction scheme for a -function by using \texttt{Function} (see Section~\ref{Function}) and by -using \texttt{Functional Scheme} after a normal definition using -\texttt{Fixpoint} or \texttt{Definition}. See \ref{Function} for -details. - -\SeeAlso{\ref{Function},\ref{FunScheme},\ref{FunScheme-examples}, - \ref{sec:functional-inversion}} - -\begin{ErrMsgs} -\item \errindex{Cannot find induction information on \qualid} -\item \errindex{Not the right number of induction arguments} -\end{ErrMsgs} - -\begin{Variants} -\item {\tt functional induction (\qualid\ \term$_1$ \dots\ \term$_n$) - as {\disjconjintropattern} using \term$_{m+1}$ with \bindinglist} - - Similarly to \texttt{Induction} and \texttt{elim} - (see Section~\ref{Tac-induction}), this allows giving explicitly the - name of the introduced variables, the - induction principle, and the values of dependent premises of the - elimination scheme, including \emph{predicates} for mutual induction - when {\qualid} is part of a mutually recursive definition. - -\end{Variants} - -\subsection{\tt discriminate \term} -\label{discriminate} -\tacindex{discriminate} - - -This tactic proves any goal from an assumption stating that two -structurally different terms of an inductive set are equal. For -example, from {\tt (S (S O))=(S O)} we can derive by absurdity any -proposition. - -The argument {\term} is assumed to be a proof of a statement -of conclusion {\tt{\term$_1$} = {\term$_2$}} with {\term$_1$} and -{\term$_2$} being elements of an inductive set. To build the proof, -the tactic traverses the normal forms\footnote{Reminder: opaque - constants will not be expanded by $\delta$ reductions.} of -{\term$_1$} and {\term$_2$} looking for a couple of subterms {\tt u} -and {\tt w} ({\tt u} subterm of the normal form of {\term$_1$} and -{\tt w} subterm of the normal form of {\term$_2$}), placed at the same -positions and whose head symbols are two different constructors. If -such a couple of subterms exists, then the proof of the current goal -is completed, otherwise the tactic fails. - -\Rem The syntax {\tt discriminate {\ident}} can be used to refer to a -hypothesis quantified in the goal. In this case, the quantified -hypothesis whose name is {\ident} is first introduced in the local -context using \texttt{intros until \ident}. - -\begin{ErrMsgs} -\item \errindex{No primitive equality found} -\item \errindex{Not a discriminable equality} -\end{ErrMsgs} - -\begin{Variants} -\item \texttt{discriminate \num} - - This does the same thing as \texttt{intros until \num} followed by - \texttt{discriminate \ident} where {\ident} is the identifier for - the last introduced hypothesis. - -\item \texttt{discriminate {\term} with \bindinglist} - - This does the same thing as \texttt{discriminate {\term}} but using -the given bindings to instantiate parameters or hypotheses of {\term}. - -\item \texttt{ediscriminate \num}\tacindex{ediscriminate}\\ - \texttt{ediscriminate {\term} \zeroone{with \bindinglist}} - - This works the same as {\tt discriminate} but if the type of {\term}, - or the type of the hypothesis referred to by {\num}, has uninstantiated - parameters, these parameters are left as existential variables. - -\item \texttt{discriminate} - - This behaves like {\tt discriminate {\ident}} if {\ident} is the - name of an hypothesis to which {\tt discriminate} is applicable; if - the current goal is of the form {\term$_1$} {\tt <>} {\term$_2$}, - this behaves as {\tt intro {\ident}; discriminate {\ident}}. - - \ErrMsg \errindex{No discriminable equalities} -\end{Variants} - -\subsection{\tt injection \term} -\label{injection} -\tacindex{injection} - -The {\tt injection} tactic exploits the property that constructors of -inductive types are injective, i.e. that if $c$ is a constructor -of an inductive type and $c~\vec{t_1}$ and $c~\vec{t_2}$ are equal -then $\vec{t_1}$ and $\vec{t_2}$ are equal too. - -If {\term} is a proof of a statement of conclusion - {\tt {\term$_1$} = {\term$_2$}}, -then {\tt injection} applies the injectivity of constructors as deep as possible to -derive the equality of all the subterms of {\term$_1$} and {\term$_2$} at positions -where {\term$_1$} and {\term$_2$} start to differ. -For example, from {\tt (S p, S n) = (q, S (S m)} we may derive {\tt S - p = q} and {\tt n = S m}. For this tactic to work, {\term$_1$} and -{\term$_2$} should be typed with an inductive -type and they should be neither convertible, nor having a different -head constructor. If these conditions are satisfied, the tactic -derives the equality of all the subterms of {\term$_1$} and -{\term$_2$} at positions where they differ and adds them as -antecedents to the conclusion of the current goal. - -\Example Consider the following goal: - -\begin{coq_example*} -Inductive list : Set := - | nil : list - | cons : nat -> list -> list. -Variable P : list -> Prop. -\end{coq_example*} -\begin{coq_eval} -Lemma ex : - forall (l:list) (n:nat), P nil -> cons n l = cons 0 nil -> P l. -intros l n H H0. -\end{coq_eval} -\begin{coq_example} -Show. -injection H0. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -Beware that \texttt{injection} yields an equality in a sigma type -whenever the injected object has a dependent type $P$ with its two -instances in different types $(P~t_1~...~t_n)$ and -$(P~u_1~...~u_n)$. If $t_1$ and $u_1$ are the same and have for type -an inductive type for which a decidable equality has been declared -using the command {\tt Scheme Equality} (see \ref{Scheme}), the use of -a sigma type is avoided. - -\Rem If some quantified hypothesis of the goal is named {\ident}, then -{\tt injection {\ident}} first introduces the hypothesis in the local -context using \texttt{intros until \ident}. - -\begin{ErrMsgs} -\item \errindex{Not a projectable equality but a discriminable one} -\item \errindex{Nothing to do, it is an equality between convertible terms} -\item \errindex{Not a primitive equality} -\item \errindex{Nothing to inject} -\end{ErrMsgs} - -\begin{Variants} -\item \texttt{injection \num} - - This does the same thing as \texttt{intros until \num} followed by -\texttt{injection \ident} where {\ident} is the identifier for the last -introduced hypothesis. - -\item \texttt{injection {\term} with \bindinglist} - - This does the same as \texttt{injection {\term}} but using - the given bindings to instantiate parameters or hypotheses of {\term}. - -\item \texttt{einjection \num}\tacindex{einjection}\\ - \texttt{einjection {\term} \zeroone{with \bindinglist}} - - This works the same as {\tt injection} but if the type of {\term}, - or the type of the hypothesis referred to by {\num}, has uninstantiated - parameters, these parameters are left as existential variables. - -\item{\tt injection} - - If the current goal is of the form {\term$_1$} {\tt <>} {\term$_2$}, - this behaves as {\tt intro {\ident}; injection {\ident}}. - - \ErrMsg \errindex{goal does not satisfy the expected preconditions} - -\item \texttt{injection {\term} \zeroone{with \bindinglist} as \nelistnosep{\intropattern}}\\ -\texttt{injection {\num} as {\intropattern} \dots\ \intropattern}\\ -\texttt{injection as {\intropattern} \dots\ \intropattern}\\ -\texttt{einjection {\term} \zeroone{with \bindinglist} as \nelistnosep{\intropattern}}\\ -\texttt{einjection {\num} as {\intropattern} \dots\ \intropattern}\\ -\texttt{einjection as {\intropattern} \dots\ \intropattern} -\tacindex{injection \dots\ as} - -These variants apply \texttt{intros} \nelistnosep{\intropattern} after -the call to \texttt{injection} or \texttt{einjection} so that all -equalities generated are moved in the context of hypotheses. The -number of {\intropattern} must not exceed the number of equalities -newly generated. If it is smaller, fresh names are automatically -generated to adjust the list of {\intropattern} to the number of new -equalities. The original equality is erased if it corresponds to an -hypothesis. - -\end{Variants} - -\optindex{Structural Injection} - -It is possible to ensure that \texttt{injection {\term}} erases the -original hypothesis and leaves the generated equalities in the context -rather than putting them as antecedents of the current goal, as if -giving \texttt{injection {\term} as} (with an empty list of names). To -obtain this behavior, the option {\tt Set Structural Injection} must -be activated. This option is off by default. - -By default, \texttt{injection} only creates new equalities between -terms whose type is in sort \texttt{Type} or \texttt{Set}, thus -implementing a special behavior for objects that are proofs -of a statement in \texttt{Prop}. This behavior can be turned off -by setting the option \texttt{Set Keep Proof Equalities}. -\optindex{Keep Proof Equalities} -\subsection{\tt inversion \ident} -\tacindex{inversion} - -Let the type of {\ident} in the local context be $(I~\vec{t})$, -where $I$ is a (co)inductive predicate. Then, -\texttt{inversion} applied to \ident~ derives for each possible -constructor $c_i$ of $(I~\vec{t})$, {\bf all} the necessary -conditions that should hold for the instance $(I~\vec{t})$ to be -proved by $c_i$. - -\Rem If {\ident} does not denote a hypothesis in the local context -but refers to a hypothesis quantified in the goal, then the -latter is first introduced in the local context using -\texttt{intros until \ident}. - -\Rem As inversion proofs may be large in size, we recommend the user to -stock the lemmas whenever the same instance needs to be inverted -several times. See Section~\ref{Derive-Inversion}. - -\Rem Part of the behavior of the \texttt{inversion} tactic is to generate -equalities between expressions that appeared in the hypothesis that is -being processed. By default, no equalities are generated if they relate -two proofs (i.e. equalities between terms whose type is in -sort \texttt{Prop}). This behavior can be turned off by using the option -\texttt{Set Keep Proof Equalities.} -\optindex{Keep Proof Equalities} - -\begin{Variants} -\item \texttt{inversion \num} - - This does the same thing as \texttt{intros until \num} then - \texttt{inversion \ident} where {\ident} is the identifier for the - last introduced hypothesis. - -\item \tacindex{inversion\_clear} \texttt{inversion\_clear \ident} - - This behaves as \texttt{inversion} and then erases \ident~ from the - context. - -\item \tacindex{inversion \dots\ as} \texttt{inversion {\ident} as \intropattern} - - This generally behaves as \texttt{inversion} but using names in - {\intropattern} for naming hypotheses. The {\intropattern} must have - the form {\tt [} $p_{11} \ldots p_{1n_1}$ {\tt |} {\ldots} {\tt |} - $p_{m1} \ldots p_{mn_m}$ {\tt ]} with $m$ being the number of - constructors of the type of {\ident}. Be careful that the list must - be of length $m$ even if {\tt inversion} discards some cases (which - is precisely one of its roles): for the discarded cases, just use an - empty list (i.e. $n_i=0$). - - The arguments of the $i^{th}$ constructor and the - equalities that {\tt inversion} introduces in the context of the - goal corresponding to the $i^{th}$ constructor, if it exists, get - their names from the list $p_{i1}$ \ldots $p_{in_i}$ in order. If - there are not enough names, {\tt inversion} invents names for the - remaining variables to introduce. In case an equation splits into - several equations (because {\tt inversion} applies {\tt injection} - on the equalities it generates), the corresponding name $p_{ij}$ in - the list must be replaced by a sublist of the form {\tt [$p_{ij1}$ - \mbox{\dots} $p_{ijq}$]} (or, equivalently, {\tt ($p_{ij1}$, - \dots, $p_{ijq}$)}) where $q$ is the number of subequalities - obtained from splitting the original equation. Here is an example. - - The \texttt{inversion \dots\ as} variant of \texttt{inversion} - generally behaves in a slightly more expectable way than - \texttt{inversion} (no artificial duplication of some hypotheses - referring to other hypotheses) To take benefit of these - improvements, it is enough to use \texttt{inversion \dots\ as []}, - letting the names being finally chosen by {\Coq}. - -\begin{coq_eval} -Require Import List. -\end{coq_eval} - -\begin{coq_example} -Inductive contains0 : list nat -> Prop := - | in_hd : forall l, contains0 (0 :: l) - | in_tl : forall l b, contains0 l -> contains0 (b :: l). -Goal forall l:list nat, contains0 (1 :: l) -> contains0 l. -intros l H; inversion H as [ | l' p Hl' [Heqp Heql'] ]. -\end{coq_example} - -\begin{coq_eval} -Abort. -\end{coq_eval} - -\item \texttt{inversion {\num} as \intropattern} - - This allows naming the hypotheses introduced by - \texttt{inversion \num} in the context. - -\item \tacindex{inversion\_clear \dots\ as} \texttt{inversion\_clear - {\ident} as \intropattern} - - This allows naming the hypotheses introduced by - \texttt{inversion\_clear} in the context. Notice that hypothesis - names can be provided as if \texttt{inversion} were called, even - though the \texttt{inversion\_clear} will eventually erase the - hypotheses. - -\item \tacindex{inversion \dots\ in} \texttt{inversion {\ident} - in \ident$_1$ \dots\ \ident$_n$} - - Let \ident$_1$ \dots\ \ident$_n$, be identifiers in the local context. This - tactic behaves as generalizing \ident$_1$ \dots\ \ident$_n$, and - then performing \texttt{inversion}. - -\item \tacindex{inversion \dots\ as \dots\ in} \texttt{inversion - {\ident} as {\intropattern} in \ident$_1$ \dots\ - \ident$_n$} - - This allows naming the hypotheses introduced in the context by - \texttt{inversion {\ident} in \ident$_1$ \dots\ \ident$_n$}. - -\item \tacindex{inversion\_clear \dots\ in} \texttt{inversion\_clear - {\ident} in \ident$_1$ \dots\ \ident$_n$} - - Let \ident$_1$ \dots\ \ident$_n$, be identifiers in the local context. This - tactic behaves as generalizing \ident$_1$ \dots\ \ident$_n$, and - then performing {\tt inversion\_clear}. - -\item \tacindex{inversion\_clear \dots\ as \dots\ in} - \texttt{inversion\_clear {\ident} as {\intropattern} - in \ident$_1$ \dots\ \ident$_n$} - - This allows naming the hypotheses introduced in the context by - \texttt{inversion\_clear {\ident} in \ident$_1$ \dots\ \ident$_n$}. - -\item \tacindex{dependent inversion} \texttt{dependent inversion \ident} - - That must be used when \ident\ appears in the current goal. It acts - like \texttt{inversion} and then substitutes \ident\ for the - corresponding term in the goal. - -\item \tacindex{dependent inversion \dots\ as } \texttt{dependent - inversion {\ident} as \intropattern} - - This allows naming the hypotheses introduced in the context by - \texttt{dependent inversion} {\ident}. - -\item \tacindex{dependent inversion\_clear} \texttt{dependent - inversion\_clear \ident} - - Like \texttt{dependent inversion}, except that {\ident} is cleared - from the local context. - -\item \tacindex{dependent inversion\_clear \dots\ as} - \texttt{dependent inversion\_clear {\ident} as \intropattern} - - This allows naming the hypotheses introduced in the context by - \texttt{dependent inversion\_clear} {\ident}. - -\item \tacindex{dependent inversion \dots\ with} \texttt{dependent - inversion {\ident} with \term} - - This variant allows you to specify the generalization of the goal. It - is useful when the system fails to generalize the goal automatically. If - {\ident} has type $(I~\vec{t})$ and $I$ has type - $\forall (\vec{x}:\vec{T}), s$, then \term~ must be of type - $I:\forall (\vec{x}:\vec{T}), I~\vec{x}\to s'$ where $s'$ is the - type of the goal. - -\item \tacindex{dependent inversion \dots\ as \dots\ with} - \texttt{dependent inversion {\ident} as {\intropattern} - with \term} - - This allows naming the hypotheses introduced in the context by - \texttt{dependent inversion {\ident} with \term}. - -\item \tacindex{dependent inversion\_clear \dots\ with} - \texttt{dependent inversion\_clear {\ident} with \term} - - Like \texttt{dependent inversion \dots\ with} but clears {\ident} from - the local context. - -\item \tacindex{dependent inversion\_clear \dots\ as \dots\ with} - \texttt{dependent inversion\_clear {\ident} as - {\intropattern} with \term} - - This allows naming the hypotheses introduced in the context by - \texttt{dependent inversion\_clear {\ident} with \term}. - -\item \tacindex{simple inversion} \texttt{simple inversion \ident} - - It is a very primitive inversion tactic that derives all the necessary - equalities but it does not simplify the constraints as - \texttt{inversion} does. - -\item \tacindex{simple inversion \dots\ as} \texttt{simple inversion - {\ident} as \intropattern} - - This allows naming the hypotheses introduced in the context by - \texttt{simple inversion}. - -\item \tacindex{inversion \dots\ using} \texttt{inversion {\ident} - using \ident$'$} - - Let {\ident} have type $(I~\vec{t})$ ($I$ an inductive - predicate) in the local context, and \ident$'$ be a (dependent) inversion - lemma. Then, this tactic refines the current goal with the specified - lemma. - -\item \tacindex{inversion \dots\ using \dots\ in} \texttt{inversion - {\ident} using \ident$'$ in \ident$_1$\dots\ \ident$_n$} - - This tactic behaves as generalizing \ident$_1$\dots\ \ident$_n$, - then doing \texttt{inversion {\ident} using \ident$'$}. - -\item \tacindex{inversion\_sigma} \texttt{inversion\_sigma} - - This tactic turns equalities of dependent pairs (e.g., - \texttt{existT P x p = existT P y q}, frequently left over by - \texttt{inversion} on a dependent type family) into pairs of - equalities (e.g., a hypothesis \texttt{H : x = y} and a hypothesis - of type \texttt{rew H in p = q}); these hypotheses can subsequently - be simplified using \texttt{subst}, without ever invoking any kind - of axiom asserting uniqueness of identity proofs. If you want to - explicitly specify the hypothesis to be inverted, or name the - generated hypotheses, you can invoke \texttt{induction H as [H1 H2] - using eq\_sigT\_rect}. This tactic also works for \texttt{sig}, - \texttt{sigT2}, and \texttt{sig2}, and there are similar - \texttt{eq\_sig\emph{*}\_rect} induction lemmas. - -\end{Variants} - -\firstexample -\example{Non-dependent inversion} -\label{inversion-examples} - -Let us consider the relation \texttt{Le} over natural numbers and the -following variables: - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\begin{coq_example*} -Inductive Le : nat -> nat -> Set := - | LeO : forall n:nat, Le 0 n - | LeS : forall n m:nat, Le n m -> Le (S n) (S m). -Variable P : nat -> nat -> Prop. -Variable Q : forall n m:nat, Le n m -> Prop. -\end{coq_example*} - -Let us consider the following goal: - -\begin{coq_eval} -Lemma ex : forall n m:nat, Le (S n) m -> P n m. -intros. -\end{coq_eval} - -\begin{coq_example} -Show. -\end{coq_example} - -To prove the goal, we may need to reason by cases on \texttt{H} and to -derive that \texttt{m} is necessarily of -the form $(S~m_0)$ for certain $m_0$ and that $(Le~n~m_0)$. -Deriving these conditions corresponds to prove that the -only possible constructor of \texttt{(Le (S n) m)} is -\texttt{LeS} and that we can invert the -\texttt{->} in the type of \texttt{LeS}. -This inversion is possible because \texttt{Le} is the smallest set closed by -the constructors \texttt{LeO} and \texttt{LeS}. - -\begin{coq_example} -inversion_clear H. -\end{coq_example} - -Note that \texttt{m} has been substituted in the goal for \texttt{(S m0)} -and that the hypothesis \texttt{(Le n m0)} has been added to the -context. - -Sometimes it is -interesting to have the equality \texttt{m=(S m0)} in the -context to use it after. In that case we can use \texttt{inversion} that -does not clear the equalities: - -\begin{coq_eval} -Undo. -\end{coq_eval} - -\begin{coq_example} -inversion H. -\end{coq_example} - -\begin{coq_eval} -Abort. -\end{coq_eval} - -\example{Dependent inversion} - -Let us consider the following goal: - -\begin{coq_eval} -Lemma ex_dep : forall (n m:nat) (H:Le (S n) m), Q (S n) m H. -intros. -\end{coq_eval} - -\begin{coq_example} -Show. -\end{coq_example} - -As \texttt{H} occurs in the goal, we may want to reason by cases on its -structure and so, we would like inversion tactics to -substitute \texttt{H} by the corresponding term in constructor form. -Neither \texttt{Inversion} nor {\tt Inversion\_clear} make such a -substitution. -To have such a behavior we use the dependent inversion tactics: - -\begin{coq_example} -dependent inversion_clear H. -\end{coq_example} - -Note that \texttt{H} has been substituted by \texttt{(LeS n m0 l)} and -\texttt{m} by \texttt{(S m0)}. - -\example{Using \texorpdfstring{\texttt{inversion\_sigma}}{inversion\_sigma}} - -Let us consider the following inductive type of length-indexed lists, -and a lemma about inverting equality of \texttt{cons}: - -\begin{coq_eval} -Reset Initial. -Set Printing Compact Contexts. -\end{coq_eval} - -\begin{coq_example*} -Require Coq.Logic.Eqdep_dec. - -Inductive vec A : nat -> Type := -| nil : vec A O -| cons {n} (x : A) (xs : vec A n) : vec A (S n). - -Lemma invert_cons : forall A n x xs y ys, - @cons A n x xs = @cons A n y ys - -> xs = ys. -Proof. -\end{coq_example*} - -\begin{coq_example} -intros A n x xs y ys H. -\end{coq_example} - -After performing \texttt{inversion}, we are left with an equality of -\texttt{existT}s: - -\begin{coq_example} -inversion H. -\end{coq_example} - -We can turn this equality into a usable form with -\texttt{inversion\_sigma}: - -\begin{coq_example} -inversion_sigma. -\end{coq_example} - -To finish cleaning up the proof, we will need to use the fact that -that all proofs of \texttt{n = n} for \texttt{n} a \texttt{nat} are -\texttt{eq\_refl}: - -\begin{coq_example} -let H := match goal with H : n = n |- _ => H end in -pose proof (Eqdep_dec.UIP_refl_nat _ H); subst H. -simpl in *. -\end{coq_example} - -Finally, we can finish the proof: - -\begin{coq_example} -assumption. -Qed. -\end{coq_example} - -\subsection{\tt fix {\ident} {\num}} -\tacindex{fix} -\label{tactic:fix} - -This tactic is a primitive tactic to start a proof by induction. In -general, it is easier to rely on higher-level induction tactics such -as the ones described in Section~\ref{Tac-induction}. - -In the syntax of the tactic, the identifier {\ident} is the name given -to the induction hypothesis. The natural number {\num} tells on which -premise of the current goal the induction acts, starting -from 1, counting both dependent and non dependent -products, but skipping local definitions. Especially, the current -lemma must be composed of at least {\num} products. - -Like in a {\tt fix} expression, the induction -hypotheses have to be used on structurally smaller arguments. -The verification that inductive proof arguments are correct is done -only at the time of registering the lemma in the environment. To know -if the use of induction hypotheses is correct at some -time of the interactive development of a proof, use the command {\tt - Guarded} (see Section~\ref{Guarded}). - -\begin{Variants} - \item {\tt fix \ident$_1$ {\num} with ( \ident$_2$ - \nelistnosep{\binder$_2$} \zeroone{\{ struct \ident$'_2$ - \}} :~\type$_2$ ) \dots\ ( \ident$_n$ - \nelistnosep{\binder$_n$} \zeroone{\{ struct \ident$'_n$ \}} :~\type$_n$ )} - -This starts a proof by mutual induction. The statements to be -simultaneously proved are respectively {\tt forall} - \nelistnosep{{\binder}$_2$}{\tt ,} {\type}$_2$, {\ldots}, {\tt forall} - \nelistnosep{{\binder}$_n$}{\tt ,} {\type}$_n$. The identifiers -{\ident}$_1$ {\ldots} {\ident}$_n$ are the names of the induction -hypotheses. The identifiers {\ident}$'_2$ {\ldots} {\ident}$'_n$ are the -respective names of the premises on which the induction is performed -in the statements to be simultaneously proved (if not given, the -system tries to guess itself what they are). - -\end{Variants} - -\subsection{\tt cofix \ident} -\tacindex{cofix} -\label{tactic:cofix} - -This tactic starts a proof by coinduction. The identifier {\ident} is -the name given to the coinduction hypothesis. Like in a {\tt cofix} -expression, the use of induction hypotheses have to guarded by a -constructor. The verification that the use of co-inductive hypotheses -is correct is done only at the time of registering the lemma in the -environment. To know if the use of coinduction hypotheses is correct -at some time of the interactive development of a proof, use the -command {\tt Guarded} (see Section~\ref{Guarded}). - - -\begin{Variants} - \item {\tt cofix \ident$_1$ with ( \ident$_2$ - \nelistnosep{\binder$_2$} :~\type$_2$ ) \dots\ ( - \ident$_n$ \nelistnosep{\binder$_n$} :~\type$_n$ )} - -This starts a proof by mutual coinduction. The statements to be -simultaneously proved are respectively {\tt forall} -\nelistnosep{{\binder}$_2$}{\tt ,} {\type}$_2$, {\ldots}, {\tt forall} - \nelistnosep{{\binder}$_n$}{\tt ,} {\type}$_n$. The identifiers - {\ident}$_1$ {\ldots} {\ident}$_n$ are the names of the - coinduction hypotheses. - -\end{Variants} - -\section{Rewriting expressions} - - -These tactics use the equality {\tt eq:forall A:Type, A->A->Prop} -defined in file {\tt Logic.v} (see Section~\ref{Equality}). The -notation for {\tt eq}~$T~t~u$ is simply {\tt $t$=$u$} dropping the -implicit type of $t$ and $u$. - -\subsection{\tt rewrite \term} -\label{rewrite} -\tacindex{rewrite} - -This tactic applies to any goal. The type of {\term} -must have the form - -\texttt{forall (x$_1$:A$_1$) \dots\ (x$_n$:A$_n$)}\texttt{eq} \term$_1$ \term$_2$. - -\noindent where \texttt{eq} is the Leibniz equality or a registered -setoid equality. - -\noindent Then {\tt rewrite \term} finds the first subterm matching -\term$_1$ in the goal, resulting in instances \term$_1'$ and \term$_2'$ -and then replaces every occurrence of \term$_1'$ by \term$_2'$. -Hence, some of the variables x$_i$ are -solved by unification, and some of the types \texttt{A}$_1$, \dots, -\texttt{A}$_n$ become new subgoals. - -% \Rem In case the type of -% \term$_1$ contains occurrences of variables bound in the -% type of \term, the tactic tries first to find a subterm of the goal -% which matches this term in order to find a closed instance \term$'_1$ -% of \term$_1$, and then all instances of \term$'_1$ will be replaced. - -\begin{ErrMsgs} -\item \errindex{The term provided does not end with an equation} - -\item \errindex{Tactic generated a subgoal identical to the original goal} - -This happens if \term$_1$ does not occur in the goal. -\end{ErrMsgs} - -\begin{Variants} -\item {\tt rewrite -> \term}\tacindex{rewrite ->} - - Is equivalent to {\tt rewrite \term} - -\item {\tt rewrite <- \term}\tacindex{rewrite <-} - - Uses the equality \term$_1${\tt=}\term$_2$ from right to left - -\item {\tt rewrite {\term} in \nterm{clause}} - \tacindex{rewrite \dots\ in} - - Analogous to {\tt rewrite {\term}} but rewriting is done following - \nterm{clause} (similarly to \ref{Conversion-tactics}). For - instance: - \begin{itemize} - \item \texttt{rewrite H in H1} will rewrite \texttt{H} in the hypothesis - \texttt{H1} instead of the current goal. - \item \texttt{rewrite H in H1 at 1, H2 at - 2 |- *} means \texttt{rewrite H; rewrite H in H1 at 1; - rewrite H in H2 at - 2}. In particular a failure will happen if any of - these three simpler tactics fails. - \item \texttt{rewrite H in * |- } will do \texttt{rewrite H in - H$_i$} for all hypotheses \texttt{H$_i$} different from \texttt{H}. A success will happen - as soon as at least one of these simpler tactics succeeds. - \item \texttt{rewrite H in *} is a combination of \texttt{rewrite H} - and \texttt{rewrite H in * |-} that succeeds if at - least one of these two tactics succeeds. - \end{itemize} - Orientation {\tt ->} or {\tt <-} can be - inserted before the term to rewrite. - -\item {\tt rewrite {\term} at {\occlist}} - \tacindex{rewrite \dots\ at} - - Rewrite only the given occurrences of \term$_1'$. Occurrences are - specified from left to right as for \texttt{pattern} (\S - \ref{pattern}). The rewrite is always performed using setoid - rewriting, even for Leibniz's equality, so one has to - \texttt{Import Setoid} to use this variant. - -\item {\tt rewrite {\term} by {\tac}} - \tacindex{rewrite \dots\ by} - - Use {\tac} to completely solve the side-conditions arising from the - rewrite. - -\item {\tt rewrite \term$_1$ , \mbox{\dots} , \term$_n$} - - Is equivalent to the $n$ successive tactics {\tt rewrite $\term_1$} - up to {\tt rewrite $\term_n$}, each one working on the first subgoal - generated by the previous one. - Orientation {\tt ->} or {\tt <-} can be - inserted before each term to rewrite. One unique \nterm{clause} - can be added at the end after the keyword {\tt in}; it will - then affect all rewrite operations. - -\item In all forms of {\tt rewrite} described above, a term to rewrite - can be immediately prefixed by one of the following modifiers: - \begin{itemize} - \item {\tt ?} : the tactic {\tt rewrite ?$\term$} performs the - rewrite of $\term$ as many times as possible (perhaps zero time). - This form never fails. - \item {\tt $n$?} : works similarly, except that it will do at most - $n$ rewrites. - \item {\tt !} : works as {\tt ?}, except that at least one rewrite - should succeed, otherwise the tactic fails. - \item {\tt $n$!} (or simply {\tt $n$}) : precisely $n$ rewrites - of $\term$ will be done, leading to failure if these $n$ rewrites are not possible. - \end{itemize} - -\item {\tt erewrite {\term}\tacindex{erewrite}} - -This tactic works as {\tt rewrite {\term}} but turning unresolved -bindings into existential variables, if any, instead of failing. It has -the same variants as {\tt rewrite} has. - -\end{Variants} - -\subsection{\tt replace \term$_1$ with \term$_2$} -\label{tactic:replace} -\tacindex{replace \dots\ with} - -This tactic applies to any goal. It replaces all free occurrences of -{\term$_1$} in the current goal with {\term$_2$} and generates the -equality {\term$_2$}{\tt =}{\term$_1$} as a subgoal. This equality is -automatically solved if it occurs among the assumption, or if its -symmetric form occurs. It is equivalent to {\tt cut -\term$_2$=\term$_1$; [intro H{\sl n}; rewrite <- H{\sl n}; clear H{\sl -n}| assumption || symmetry; try assumption]}. - -\begin{ErrMsgs} -\item \errindex{terms do not have convertible types} -\end{ErrMsgs} - -\begin{Variants} - -\item {\tt replace \term$_1$ with \term$_2$ by \tac} - - This acts as {\tt replace \term$_1$ with \term$_2$} but applies {\tt \tac} - to solve the generated subgoal {\tt \term$_2$=\term$_1$}. - -\item {\tt replace {\term}} - - Replaces {\term} with {\term'} using the - first assumption whose type has the form {\tt \term=\term'} or {\tt - \term'=\term}. - -\item {\tt replace -> {\term}} - - Replaces {\term} with {\term'} using the - first assumption whose type has the form {\tt \term=\term'} - -\item {\tt replace <- {\term}} - - Replaces {\term} with {\term'} using the - first assumption whose type has the form {\tt \term'=\term} - -\item {\tt replace {\term$_1$} with {\term$_2$} in \nterm{clause} }\\ - {\tt replace {\term$_1$} with {\term$_2$} in \nterm{clause} by \tac }\\ - {\tt replace {\term} in \nterm{clause}}\\ - {\tt replace -> {\term} in \nterm{clause}}\\ - {\tt replace <- {\term} in \nterm{clause}} - - Acts as before but the replacements take place in - \nterm{clause}~(see Section~\ref{Conversion-tactics}) and not only - in the conclusion of the goal. - The \nterm{clause} argument must not contain any \texttt{type of} nor \texttt{value of}. - -\item {\tt cutrewrite <- (\term$_1$ = \term$_2$)} -%\label{cutrewrite} -\tacindex{cutrewrite} - -This tactic is deprecated. It acts like {\tt replace {\term$_2$} with - {\term$_1$}}, or, equivalently as {\tt enough} (\term$_1$ = -\term$_2$) {\tt as <-}. - -\item {\tt cutrewrite -> (\term$_1$ = \term$_2$)} -%\label{cutrewrite} -\tacindex{cutrewrite} - -This tactic is deprecated. It can be replaced by {\tt enough} -(\term$_1$ = \term$_2$) {\tt as ->}. - -\end{Variants} - -\subsection{\tt subst \ident} -\tacindex{subst} -\optindex{Regular Subst Tactic} - -This tactic applies to a goal that has \ident\ in its context and (at -least) one hypothesis, say $H$, of type {\tt \ident} = $t$ or $t$ -{\tt = \ident} with {\ident} not occurring in $t$. Then it replaces -{\ident} by $t$ everywhere in the goal (in the hypotheses and in the -conclusion) and clears {\ident} and $H$ from the context. - -If {\ident} is a local definition of the form {\ident} := $t$, it is -also unfolded and cleared. - -\Rem -When several hypotheses have the form {\tt \ident} = $t$ or {\tt - $t$ = \ident}, the first one is used. - -\Rem -If $H$ is itself dependent in the goal, it is replaced by the -proof of reflexivity of equality. - -\begin{Variants} - \item {\tt subst \ident$_1$ {\dots} \ident$_n$} - - This is equivalent to {\tt subst \ident$_1$; \dots; subst \ident$_n$}. - \item {\tt subst} - - This applies {\tt subst} repeatedly from top to bottom to all - identifiers of the context for which an equality of the form {\tt - \ident} = $t$ or $t$ {\tt = \ident} or {\tt \ident} := $t$ exists, with - {\ident} not occurring in $t$. - -\noindent {\bf Remark: } The behavior of {\tt subst} can be controlled -using option {\tt Set Regular Subst Tactic}. When this option is -activated, {\tt subst} also deals with the following corner cases: -\begin{itemize} -\item A context with ordered hypotheses {\tt \ident$_1$ = \ident$_2$} - and {\tt \ident$_1$ = $t$}, or {$t'$ = \ident$_1$} with $t'$ not a - variable, and no other hypotheses of the form {\tt \ident$_2$ = $u$} - or {\tt $u$ = \ident$_2$}; without the option, a second call to {\tt - subst} would be necessary to replace {\ident$_2$} by $t$ or $t'$ - respectively. - -\item The presence of a recursive equation which without the option - would be a cause of failure of {\tt subst}. - -\item A context with cyclic dependencies as with hypotheses {\tt - \ident$_1$ = f~\ident$_2$} and {\tt \ident$_2$ = g~\ident$_1$} which - without the option would be a cause of failure of {\tt subst}. -\end{itemize} -Additionally, it prevents a local definition such as {\tt \ident} := - $t$ to be unfolded which otherwise it would exceptionally unfold in -configurations containing hypotheses of the form {\tt {\ident} = $u$}, -or {\tt $u'$ = \ident} with $u'$ not a variable. - -Finally, it preserves the initial order of hypotheses, which without -the option it may break. - -The option is on by default. - -\end{Variants} - -\subsection{\tt stepl \term} -\tacindex{stepl} - -This tactic is for chaining rewriting steps. It assumes a goal of the -form ``$R$ {\term}$_1$ {\term}$_2$'' where $R$ is a binary relation -and relies on a database of lemmas of the form {\tt forall} $x$ $y$ -$z$, $R$ $x$ $y$ {\tt ->} $eq$ $x$ $z$ {\tt ->} $R$ $z$ $y$ where $eq$ -is typically a setoid equality. The application of {\tt stepl {\term}} -then replaces the goal by ``$R$ {\term} {\term}$_2$'' and adds a new -goal stating ``$eq$ {\term} {\term}$_1$''. - -Lemmas are added to the database using the command -\comindex{Declare Left Step} -\begin{quote} -{\tt Declare Left Step {\term}.} -\end{quote} - -The tactic is especially useful for parametric setoids which are not -accepted as regular setoids for {\tt rewrite} and {\tt - setoid\_replace} (see Chapter~\ref{setoids}). - -\begin{Variants} -\item{\tt stepl {\term} by {\tac}} - -This applies {\tt stepl {\term}} then applies {\tac} to the second goal. - -\item{\tt stepr {\term}}\\ - {\tt stepr {\term} by {\tac}}\tacindex{stepr} - -This behaves as {\tt stepl} but on the right-hand-side of the binary relation. -Lemmas are expected to be of the form -``{\tt forall} $x$ $y$ -$z$, $R$ $x$ $y$ {\tt ->} $eq$ $y$ $z$ {\tt ->} $R$ $x$ $z$'' -and are registered using the command -\comindex{Declare Right Step} -\begin{quote} -{\tt Declare Right Step {\term}.} -\end{quote} -\end{Variants} - -\subsection{\tt change \term} -\tacindex{change} -\label{change} - -This tactic applies to any goal. It implements the rule -``Conv''\index{Typing rules!Conv} given in Section~\ref{Conv}. {\tt - change U} replaces the current goal \T\ with \U\ providing that -\U\ is well-formed and that \T\ and \U\ are convertible. - -\begin{ErrMsgs} -\item \errindex{Not convertible} -\end{ErrMsgs} - -\tacindex{change \dots\ in} -\begin{Variants} -\item {\tt change \term$_1$ with \term$_2$} - - This replaces the occurrences of \term$_1$ by \term$_2$ in the - current goal. The terms \term$_1$ and \term$_2$ must be - convertible. - -\item {\tt change \term$_1$ at \num$_1$ \dots\ \num$_i$ with \term$_2$} - - This replaces the occurrences numbered \num$_1$ \dots\ \num$_i$ of - \term$_1$ by \term$_2$ in the current goal. - The terms \term$_1$ and \term$_2$ must be convertible. - - \ErrMsg {\tt Too few occurrences} - -\item {\tt change {\term} in {\ident}} - -\item {\tt change \term$_1$ with \term$_2$ in {\ident}} - -\item {\tt change \term$_1$ at \num$_1$ \dots\ \num$_i$ with \term$_2$ in - {\ident}} - - This applies the {\tt change} tactic not to the goal but to the - hypothesis {\ident}. - -\end{Variants} - -\SeeAlso \ref{Conversion-tactics} - - -\section{Performing computations -\index{Conversion tactics} -\label{Conversion-tactics}} - -This set of tactics implements different specialized usages of the -tactic \texttt{change}. - -All conversion tactics (including \texttt{change}) can be -parameterized by the parts of the goal where the conversion can -occur. This is done using \emph{goal clauses} which consists in a list -of hypotheses and, optionally, of a reference to the conclusion of the -goal. For defined hypothesis it is possible to specify if the -conversion should occur on the type part, the body part or both -(default). - -\index{Clauses} -\index{Goal clauses} -Goal clauses are written after a conversion tactic (tactics -\texttt{set}~\ref{tactic:set}, \texttt{rewrite}~\ref{rewrite}, -\texttt{replace}~\ref{tactic:replace} and -\texttt{autorewrite}~\ref{tactic:autorewrite} also use goal clauses) and -are introduced by the keyword \texttt{in}. If no goal clause is provided, -the default is to perform the conversion only in the conclusion. - -The syntax and description of the various goal clauses is the following: -\begin{description} -\item[]\texttt{in {\ident}$_1$ $\ldots$ {\ident}$_n$ |- } only in hypotheses {\ident}$_1$ - \ldots {\ident}$_n$ -\item[]\texttt{in {\ident}$_1$ $\ldots$ {\ident}$_n$ |- *} in hypotheses {\ident}$_1$ \ldots - {\ident}$_n$ and in the conclusion -\item[]\texttt{in * |-} in every hypothesis -\item[]\texttt{in *} (equivalent to \texttt{in * |- *}) everywhere -\item[]\texttt{in (type of {\ident}$_1$) (value of {\ident}$_2$) $\ldots$ |-} in - type part of {\ident}$_1$, in the value part of {\ident}$_2$, etc. -\end{description} - -For backward compatibility, the notation \texttt{in}~{\ident}$_1$\ldots {\ident}$_n$ -performs the conversion in hypotheses {\ident}$_1$\ldots {\ident}$_n$. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%voir reduction__conv_x : histoires d'univers. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -\subsection{{\tt cbv \flag$_1$ \mbox{\dots} \flag$_n$}, {\tt lazy \flag$_1$ -\mbox{\dots} \flag$_n$}, and \tt compute} -\tacindex{cbv} -\tacindex{lazy} -\tacindex{compute} -\tacindex{vm\_compute}\label{vmcompute} -\tacindex{native\_compute}\label{nativecompute} - -These parameterized reduction tactics apply to any goal and perform -the normalization of the goal according to the specified flags. In -correspondence with the kinds of reduction considered in \Coq\, namely -$\beta$ (reduction of functional application), $\delta$ (unfolding of -transparent constants, see \ref{Transparent}), $\iota$ (reduction of -pattern-matching over a constructed term, and unfolding of {\tt fix} -and {\tt cofix} expressions) and $\zeta$ (contraction of local -definitions), the flags are either {\tt beta}, {\tt delta}, -{\tt match}, {\tt fix}, {\tt cofix}, {\tt iota} or {\tt zeta}. -The {\tt iota} flag is a shorthand for {\tt match}, {\tt fix} and {\tt cofix}. -The {\tt delta} flag itself can be refined into {\tt -delta [\qualid$_1$\ldots\qualid$_k$]} or {\tt delta --[\qualid$_1$\ldots\qualid$_k$]}, restricting in the first case the -constants to unfold to the constants listed, and restricting in the -second case the constant to unfold to all but the ones explicitly -mentioned. Notice that the {\tt delta} flag does not apply to -variables bound by a let-in construction inside the term itself (use -here the {\tt zeta} flag). In any cases, opaque constants are not -unfolded (see Section~\ref{Opaque}). - -Normalization according to the flags is done by first evaluating the -head of the expression into a {\em weak-head} normal form, i.e. until -the evaluation is bloked by a variable (or an opaque constant, or an -axiom), as e.g. in {\tt x\;u$_1$\;...\;u$_n$}, or {\tt match x with - ... end}, or {\tt (fix f x \{struct x\} := ...) x}, or is a -constructed form (a $\lambda$-expression, a constructor, a cofixpoint, -an inductive type, a product type, a sort), or is a redex that the -flags prevent to reduce. Once a weak-head normal form is obtained, -subterms are recursively reduced using the same strategy. - -Reduction to weak-head normal form can be done using two strategies: -{\em lazy} ({\tt lazy} tactic), or {\em call-by-value} ({\tt cbv} -tactic). The lazy strategy is a call-by-need strategy, with sharing of -reductions: the arguments of a function call are weakly evaluated only -when necessary, and if an argument is used several times then it is -weakly computed only once. This reduction is efficient for reducing -expressions with dead code. For instance, the proofs of a proposition -{\tt exists~$x$. $P(x)$} reduce to a pair of a witness $t$, and a -proof that $t$ satisfies the predicate $P$. Most of the time, $t$ may -be computed without computing the proof of $P(t)$, thanks to the lazy -strategy. - -The call-by-value strategy is the one used in ML languages: the -arguments of a function call are systematically weakly evaluated -first. Despite the lazy strategy always performs fewer reductions than -the call-by-value strategy, the latter is generally more efficient for -evaluating purely computational expressions (i.e. with little dead code). - -\begin{Variants} -\item {\tt compute} \tacindex{compute}\\ - {\tt cbv} - - These are synonyms for {\tt cbv beta delta iota zeta}. - -\item {\tt lazy} - - This is a synonym for {\tt lazy beta delta iota zeta}. - -\item {\tt compute [\qualid$_1$\ldots\qualid$_k$]}\\ - {\tt cbv [\qualid$_1$\ldots\qualid$_k$]} - - These are synonyms of {\tt cbv beta delta - [\qualid$_1$\ldots\qualid$_k$] iota zeta}. - -\item {\tt compute -[\qualid$_1$\ldots\qualid$_k$]}\\ - {\tt cbv -[\qualid$_1$\ldots\qualid$_k$]} - - These are synonyms of {\tt cbv beta delta - -[\qualid$_1$\ldots\qualid$_k$] iota zeta}. - -\item {\tt lazy [\qualid$_1$\ldots\qualid$_k$]}\\ - {\tt lazy -[\qualid$_1$\ldots\qualid$_k$]} - - These are respectively synonyms of {\tt lazy beta delta - [\qualid$_1$\ldots\qualid$_k$] iota zeta} and {\tt lazy beta delta - -[\qualid$_1$\ldots\qualid$_k$] iota zeta}. - -\item {\tt vm\_compute} \tacindex{vm\_compute} - - This tactic evaluates the goal using the optimized call-by-value evaluation - bytecode-based virtual machine described in - \cite{CompiledStrongReduction}. This algorithm is dramatically more efficient - than the algorithm used for the {\tt cbv} tactic, but it cannot be - fine-tuned. It is specially interesting for full evaluation of algebraic - objects. This includes the case of reflection-based tactics. - -\item {\tt native\_compute} \tacindex{native\_compute} \optindex{NativeCompute Profiling} - - This tactic evaluates the goal by compilation to \ocaml{} as described in - \cite{FullReduction}. If \Coq{} is running in native code, it can be typically - two to five times faster than {\tt vm\_compute}. Note however that the - compilation cost is higher, so it is worth using only for intensive - computations. - - On Linux, if you have the {\tt perf} profiler installed, you can profile {\tt native\_compute} evaluations. - The command - \begin{quote} - {\tt Set Native Compute Profiling} - \end{quote} - enables profiling. Use the command - \begin{quote} - {\tt Set NativeCompute Profile Filename \str} - \end{quote} - to specify the profile output; the default is {\tt native\_compute\_profile.data}. The actual filename used - will contain extra characters to avoid overwriting an existing file; that filename is reported to the user. That means - you can individually profile multiple uses of {\tt native\_compute} in a script. From the Linux command line, run {\tt perf report} on - the profile file to see the results. Consult the {\tt perf} documentation for more details. - -\end{Variants} - -\Rem The following option makes {\tt cbv} (and its derivative {\tt - compute}) print information about the constants it encounters and -the unfolding decisions it makes. -\begin{quote} - \optindex{Debug Cbv} - {\tt Set Debug Cbv} -\end{quote} - -% Obsolete? Anyway not very important message -%\begin{ErrMsgs} -%\item \errindex{Delta must be specified before} -% -% A list of constants appeared before the {\tt delta} flag. -%\end{ErrMsgs} - - -\subsection{\tt red} -\tacindex{red} - -This tactic applies to a goal that has the form {\tt - forall (x:T1)\dots(xk:Tk), t} with {\tt t} -$\beta\iota\zeta$-reducing to {\tt c t1 \dots\ tn} and {\tt c} a -constant. If -{\tt c} is transparent then it replaces {\tt c} with its definition -(say {\tt t}) and then reduces {\tt (t t1 \dots\ tn)} according to -$\beta\iota\zeta$-reduction rules. - -\begin{ErrMsgs} -\item \errindex{Not reducible} -\end{ErrMsgs} - -\subsection{\tt hnf} -\tacindex{hnf} - -This tactic applies to any goal. It replaces the current goal with its -head normal form according to the $\beta\delta\iota\zeta$-reduction -rules, i.e. it reduces the head of the goal until it becomes a -product or an irreducible term. All inner $\beta\iota$-redexes are also -reduced. - -\Example -The term \verb+forall n:nat, (plus (S n) (S n))+ is not reduced by {\tt hnf}. - -\Rem The $\delta$ rule only applies to transparent constants -(see Section~\ref{Opaque} on transparency and opacity). - -\subsection{\texorpdfstring{\texttt{cbn}}{cbn} and \texorpdfstring{\texttt{simpl}}{simpl}} -\tacindex{cbn} \tacindex{simpl} - -These tactics apply to any goal. They try to reduce a term to -something still readable instead of fully normalizing it. They perform -a sort of strong normalization with two key differences: -\begin{itemize} -\item They unfold a constant if and only if it leads to a - $\iota$-reduction, i.e. reducing a match or unfolding a fixpoint. -\item While reducing a constant unfolding to (co)fixpoints, - the tactics use the name of the - constant the (co)fixpoint comes from instead of the (co)fixpoint - definition in recursive calls. -\end{itemize} - -The \texttt{cbn} tactic is claimed to be a more principled, faster and more -predictable replacement for \texttt{simpl}. - -The \texttt{cbn} tactic accepts the same flags as \texttt{cbv} and -\texttt{lazy}. The behavior of both \texttt{simpl} and \texttt{cbn} -can be tuned using the \texttt{Arguments} vernacular command as -follows: \comindex{Arguments} -\begin{itemize} -\item -A constant can be marked to be never unfolded by \texttt{cbn} or -\texttt{simpl}: -\begin{coq_example*} -Arguments minus n m : simpl never. -\end{coq_example*} -After that command an expression like \texttt{(minus (S x) y)} is left -untouched by the tactics \texttt{cbn} and \texttt{simpl}. -\item -A constant can be marked to be unfolded only if applied to enough arguments. -The number of arguments required can be specified using -the {\tt /} symbol in the arguments list of the {\tt Arguments} vernacular -command. -\begin{coq_example*} -Definition fcomp A B C f (g : A -> B) (x : A) : C := f (g x). -Notation "f \o g" := (fcomp f g) (at level 50). -Arguments fcomp {A B C} f g x /. -\end{coq_example*} -After that command the expression {\tt (f \verb+\+o g)} is left untouched by -{\tt simpl} while {\tt ((f \verb+\+o g) t)} is reduced to {\tt (f (g t))}. -The same mechanism can be used to make a constant volatile, i.e. always -unfolded. -\begin{coq_example*} -Definition volatile := fun x : nat => x. -Arguments volatile / x. -\end{coq_example*} -\item -A constant can be marked to be unfolded only if an entire set of arguments -evaluates to a constructor. The {\tt !} symbol can be used to mark such -arguments. -\begin{coq_example*} -Arguments minus !n !m. -\end{coq_example*} -After that command, the expression {\tt (minus (S x) y)} is left untouched by -{\tt simpl}, while {\tt (minus (S x) (S y))} is reduced to {\tt (minus x y)}. -\item -A special heuristic to determine if a constant has to be unfolded can be -activated with the following command: -\begin{coq_example*} -Arguments minus n m : simpl nomatch. -\end{coq_example*} -The heuristic avoids to perform a simplification step that would -expose a {\tt match} construct in head position. For example the -expression {\tt (minus (S (S x)) (S y))} is simplified to -{\tt (minus (S x) y)} even if an extra simplification is possible. -\end{itemize} - -In detail, the tactic \texttt{simpl} first applies -$\beta\iota$-reduction. Then, it expands transparent constants and -tries to reduce further using $\beta\iota$-reduction. But, when no -$\iota$ rule is applied after unfolding then $\delta$-reductions are -not applied. For instance trying to use \texttt{simpl} on -\texttt{(plus n O)=n} changes nothing. - -Notice that only transparent constants whose name can be reused in the -recursive calls are possibly unfolded by \texttt{simpl}. For instance -a constant defined by \texttt{plus' := plus} is possibly unfolded and -reused in the recursive calls, but a constant such as \texttt{succ := - plus (S O)} is never unfolded. This is the main difference between -\texttt{simpl} and \texttt{cbn}. The tactic \texttt{cbn} reduces -whenever it will be able to reuse it or not: \texttt{succ t} is -reduced to \texttt{S t}. - -\tacindex{simpl \dots\ in} -\begin{Variants} -\item {\tt cbn [\qualid$_1$\ldots\qualid$_k$]}\\ - {\tt cbn -[\qualid$_1$\ldots\qualid$_k$]} - - These are respectively synonyms of {\tt cbn beta delta - [\qualid$_1$\ldots\qualid$_k$] iota zeta} and {\tt cbn beta delta - -[\qualid$_1$\ldots\qualid$_k$] iota zeta} (see \ref{vmcompute}). - -\item {\tt simpl {\pattern}} - - This applies {\tt simpl} only to the subterms matching {\pattern} in the - current goal. - -\item {\tt simpl {\pattern} at \num$_1$ \dots\ \num$_i$} - - This applies {\tt simpl} only to the \num$_1$, \dots, \num$_i$ - occurrences of the subterms matching {\pattern} in the current goal. - - \ErrMsg {\tt Too few occurrences} - -\item {\tt simpl {\qualid}}\\ - {\tt simpl {\qstring}} - - This applies {\tt simpl} only to the applicative subterms whose head - occurrence is the unfoldable constant {\qualid} (the constant can be - referred to by its notation using {\qstring} if such a notation - exists). - -\item {\tt simpl {\qualid} at \num$_1$ \dots\ \num$_i$}\\ - {\tt simpl {\qstring} at \num$_1$ \dots\ \num$_i$}\\ - - This applies {\tt simpl} only to the \num$_1$, \dots, \num$_i$ - applicative subterms whose head occurrence is {\qualid} (or - {\qstring}). - -\end{Variants} - -\begin{quote} - \optindex{Debug RAKAM} - {\tt Set Debug RAKAM} -\end{quote} -This option makes {\tt cbn} print various debugging information. -{\tt RAKAM} is the Refolding Algebraic Krivine Abstract Machine. - -\subsection{\tt unfold \qualid} -\tacindex{unfold} -\label{unfold} - -This tactic applies to any goal. The argument {\qualid} must denote a -defined transparent constant or local definition (see Sections~\ref{Basic-definitions} and~\ref{Transparent}). The tactic {\tt - unfold} applies the $\delta$ rule to each occurrence of the constant -to which {\qualid} refers in the current goal and then replaces it -with its $\beta\iota$-normal form. - -\begin{ErrMsgs} -\item {\qualid} \errindex{does not denote an evaluable constant} - -\end{ErrMsgs} - -\begin{Variants} -\item {\tt unfold {\qualid} in {\ident}} - \tacindex{unfold \dots in} - - Replaces {\qualid} in hypothesis {\ident} with its definition - and replaces the hypothesis with its $\beta\iota$ normal form. - -\item {\tt unfold {\qualid}$_1$, \dots, \qualid$_n$} - - Replaces {\em simultaneously} {\qualid}$_1$, \dots, {\qualid}$_n$ - with their definitions and replaces the current goal with its - $\beta\iota$ normal form. - -\item {\tt unfold {\qualid}$_1$ at \num$_1^1$, \dots, \num$_i^1$, -\dots,\ \qualid$_n$ at \num$_1^n$ \dots\ \num$_j^n$} - - The lists \num$_1^1$, \dots, \num$_i^1$ and \num$_1^n$, \dots, - \num$_j^n$ specify the occurrences of {\qualid}$_1$, \dots, - \qualid$_n$ to be unfolded. Occurrences are located from left to - right. - - \ErrMsg {\tt bad occurrence number of {\qualid}$_i$} - - \ErrMsg {\qualid}$_i$ {\tt does not occur} - -\item {\tt unfold {\qstring}} - - If {\qstring} denotes the discriminating symbol of a notation (e.g. {\tt - "+"}) or an expression defining a notation (e.g. \verb!"_ + _"!), and - this notation refers to an unfoldable constant, then the tactic - unfolds it. - -\item {\tt unfold {\qstring}\%{\delimkey}} - - This is variant of {\tt unfold {\qstring}} where {\qstring} gets its - interpretation from the scope bound to the delimiting key - {\delimkey} instead of its default interpretation (see - Section~\ref{scopechange}). - -\item {\tt unfold \qualidorstring$_1$ at \num$_1^1$, \dots, \num$_i^1$, -\dots,\ \qualidorstring$_n$ at \num$_1^n$ \dots\ \num$_j^n$} - - This is the most general form, where {\qualidorstring} is either a - {\qualid} or a {\qstring} referring to a notation. - -\end{Variants} - -\subsection{\tt fold \term} -\tacindex{fold} - -This tactic applies to any goal. The term \term\ is reduced using the {\tt red} -tactic. Every occurrence of the resulting term in the goal is then -replaced by \term. - -\begin{Variants} -\item {\tt fold} \term$_1$ \dots\ \term$_n$ - - Equivalent to {\tt fold} \term$_1${\tt;}\ldots{\tt; fold} \term$_n$. -\end{Variants} - -\subsection{\tt pattern \term} -\tacindex{pattern} -\label{pattern} - -This command applies to any goal. The argument {\term} must be a free -subterm of the current goal. The command {\tt pattern} performs -$\beta$-expansion (the inverse of $\bt$-reduction) of the current goal -(say \T) by -\begin{enumerate} -\item replacing all occurrences of {\term} in {\T} with a fresh variable -\item abstracting this variable -\item applying the abstracted goal to {\term} -\end{enumerate} - -For instance, if the current goal $T$ is expressible as $\phi(t)$ -where the notation captures all the instances of $t$ in $\phi(t)$, -then {\tt pattern $t$} transforms it into {\tt (fun x:$A$ => $\phi(${\tt -x}$)$) $t$}. This command can be used, for instance, when the tactic -{\tt apply} fails on matching. - -\begin{Variants} -\item {\tt pattern {\term} at {\num$_1$} \dots\ {\num$_n$}} - - Only the occurrences {\num$_1$} \dots\ {\num$_n$} of {\term} are - considered for $\beta$-expansion. Occurrences are located from left - to right. - -\item {\tt pattern {\term} at - {\num$_1$} \dots\ {\num$_n$}} - - All occurrences except the occurrences of indexes {\num$_1$} \dots\ - {\num$_n$} of {\term} are considered for - $\beta$-expansion. Occurrences are located from left to right. - -\item {\tt pattern {\term$_1$}, \dots, {\term$_m$}} - - Starting from a goal $\phi(t_1 \dots\ t_m)$, the tactic - {\tt pattern $t_1$, \dots,\ $t_m$} generates the equivalent goal {\tt - (fun (x$_1$:$A_1$) \dots\ (x$_m$:$A_m$) => $\phi(${\tt x$_1$\dots\ - x$_m$}$)$) $t_1$ \dots\ $t_m$}. If $t_i$ occurs in one of the - generated types $A_j$ these occurrences will also be considered and - possibly abstracted. - -\item {\tt pattern {\term$_1$} at {\num$_1^1$} \dots\ {\num$_{n_1}^1$}, \dots, - {\term$_m$} at {\num$_1^m$} \dots\ {\num$_{n_m}^m$}} - - This behaves as above but processing only the occurrences \num$_1^1$, - \dots, \num$_i^1$ of \term$_1$, \dots, \num$_1^m$, \dots, \num$_j^m$ - of \term$_m$ starting from \term$_m$. - -\item {\tt pattern} {\term$_1$} \zeroone{{\tt at \zeroone{-}} {\num$_1^1$} \dots\ {\num$_{n_1}^1$}} {\tt ,} \dots {\tt ,} - {\term$_m$} \zeroone{{\tt at \zeroone{-}} {\num$_1^m$} \dots\ {\num$_{n_m}^m$}} - - This is the most general syntax that combines the different variants. - -\end{Variants} - -\subsection{Conversion tactics applied to hypotheses} - -{\convtactic} {\tt in} \ident$_1$ \dots\ \ident$_n$ - -Applies the conversion tactic {\convtactic} to the -hypotheses \ident$_1$, \ldots, \ident$_n$. The tactic {\convtactic} is -any of the conversion tactics listed in this section. - -If \ident$_i$ is a local definition, then \ident$_i$ can be replaced -by (Type of \ident$_i$) to address not the body but the type of the -local definition. Example: {\tt unfold not in (Type of H1) (Type of H3).} - -\begin{ErrMsgs} -\item \errindex{No such hypothesis} : {\ident}. -\end{ErrMsgs} - - -\section{Automation} -\subsection{\tt auto} -\label{auto} -\tacindex{auto} - -This tactic implements a Prolog-like resolution procedure to solve the -current goal. It first tries to solve the goal using the {\tt - assumption} tactic, then it reduces the goal to an atomic one using -{\tt intros} and introduces the newly generated hypotheses as hints. -Then it looks at the list of tactics associated to the head symbol of -the goal and tries to apply one of them (starting from the tactics -with lower cost). This process is recursively applied to the generated -subgoals. - -By default, \texttt{auto} only uses the hypotheses of the current goal and the -hints of the database named {\tt core}. - -\begin{Variants} - -\item {\tt auto \num} - - Forces the search depth to be \num. The maximal search depth is 5 by - default. - -\item {\tt auto with \ident$_1$ \dots\ \ident$_n$} - - Uses the hint databases $\ident_1$ \dots\ $\ident_n$ in addition to - the database {\tt core}. See Section~\ref{Hints-databases} for the - list of pre-defined databases and the way to create or extend a - database. - -\item {\tt auto with *} - - Uses all existing hint databases. See Section~\ref{Hints-databases} - -\item \texttt{auto using} \nterm{lemma}$_1$ {\tt ,} {\ldots} {\tt ,} \nterm{lemma}$_n$ - - Uses \nterm{lemma}$_1$, \ldots, \nterm{lemma}$_n$ in addition to - hints (can be combined with the \texttt{with \ident} option). If - $lemma_i$ is an inductive type, it is the collection of its - constructors which is added as hints. - -\item {\tt info\_auto} - - Behaves like {\tt auto} but shows the tactics it uses to solve the goal. - This variant is very useful for getting a better understanding of automation, - or to know what lemmas/assumptions were used. - -\item {\tt debug auto} Behaves like {\tt auto} but shows the tactics - it tries to solve the goal, including failing paths. - -\item {\tt \zeroone{info\_}auto \zeroone{\num}} \zeroone{{\tt using} \nterm{lemma}$_1$ - {\tt ,} {\ldots} {\tt ,} \nterm{lemma}$_n$} \zeroone{{\tt with} - \ident$_1$ {\ldots} \ident$_n$} - - This is the most general form, combining the various options. - -\item {\tt trivial}\tacindex{trivial} - - This tactic is a restriction of {\tt auto} that is not recursive and - tries only hints that cost 0. Typically it solves trivial - equalities like $X=X$. - -\item \texttt{trivial with \ident$_1$ \dots\ \ident$_n$} - -\item \texttt{trivial with *} - -\item \texttt{trivial using} \nterm{lemma}$_1$ {\tt ,} {\ldots} {\tt ,} \nterm{lemma}$_n$ - -\item {\tt info\_trivial} - -\item {\tt debug trivial} - -\item {\tt \zeroone{info\_}trivial} \zeroone{{\tt using} \nterm{lemma}$_1$ - {\tt ,} {\ldots} {\tt ,} \nterm{lemma}$_n$} \zeroone{{\tt with} - \ident$_1$ {\ldots} \ident$_n$} - -\end{Variants} - -\Rem {\tt auto} either solves completely the goal or else leaves it -intact. \texttt{auto} and \texttt{trivial} never fail. - -\Rem The following options enable printing of informative or debug -information for the {\tt auto} and {\tt trivial} tactics: -\begin{quote} - \optindex{Info Auto} - {\tt Set Info Auto} - \optindex{Debug Auto} - {\tt Set Debug Auto} - \optindex{Info Trivial} - {\tt Set Info Trivial} - \optindex{Debug Trivial} - {\tt Set Debug Trivial} -\end{quote} - -\SeeAlso Section~\ref{Hints-databases} - -\subsection{\tt eauto} -\tacindex{eauto} -\label{eauto} - -This tactic generalizes {\tt auto}. While {\tt auto} does not try -resolution hints which would leave existential variables in the goal, -{\tt eauto} does try them (informally speaking, it uses -{\tt simple eapply} where {\tt auto} uses {\tt simple apply}). -As a consequence, {\tt eauto} can solve such a goal: - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example} -Hint Resolve ex_intro. -Goal forall P:nat -> Prop, P 0 -> exists n, P n. -eauto. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -Note that {\tt ex\_intro} should be declared as a hint. - -\begin{Variants} - -\item {\tt \zeroone{info\_}eauto \zeroone{\num}} \zeroone{{\tt using} \nterm{lemma}$_1$ - {\tt ,} {\ldots} {\tt ,} \nterm{lemma}$_n$} \zeroone{{\tt with} - \ident$_1$ {\ldots} \ident$_n$} - - The various options for eauto are the same as for auto. - -\end{Variants} - -\Rem {\tt eauto} obeys the following options: -\begin{quote} - \optindex{Info Eauto} - {\tt Set Info Eauto} - \optindex{Debug Eauto} - {\tt Set Debug Eauto} -\end{quote} - -\SeeAlso Section~\ref{Hints-databases} - -\subsection{\tt autounfold with \ident$_1$ \mbox{\dots} \ident$_n$} -\tacindex{autounfold} -\label{autounfold} - -This tactic unfolds constants that were declared through a {\tt Hint - Unfold} in the given databases. - -\begin{Variants} -\item {\tt autounfold with \ident$_1$ \dots\ \ident$_n$ in \nterm{clause}} - - Performs the unfolding in the given clause. - -\item {\tt autounfold with *} - - Uses the unfold hints declared in all the hint databases. -\end{Variants} - - -\subsection{\tt autorewrite with \ident$_1$ \mbox{\dots} \ident$_n$} -\label{tactic:autorewrite} -\tacindex{autorewrite} - -This tactic \footnote{The behavior of this tactic has much changed compared to -the versions available in the previous distributions (V6). This may cause -significant changes in your theories to obtain the same result. As a drawback -of the re-engineering of the code, this tactic has also been completely revised -to get a very compact and readable version.} carries out rewritings according -the rewriting rule bases {\tt \ident$_1$ \dots \ident$_n$}. - -Each rewriting rule of a base \ident$_i$ is applied to the main subgoal until -it fails. Once all the rules have been processed, if the main subgoal has -progressed (e.g., if it is distinct from the initial main goal) then the rules -of this base are processed again. If the main subgoal has not progressed then -the next base is processed. For the bases, the behavior is exactly similar to -the processing of the rewriting rules. - -The rewriting rule bases are built with the {\tt Hint~Rewrite} vernacular -command. - -\Warning{} This tactic may loop if you build non terminating rewriting systems. - -\begin{Variant} -\item {\tt autorewrite with \ident$_1$ \mbox{\dots} \ident$_n$ using \tac} - -Performs, in the same way, all the rewritings of the bases {\tt \ident$_1$ -\mbox{\dots} \ident$_n$} applying {\tt \tac} to the main subgoal after each -rewriting step. - -\item {\tt autorewrite with \ident$_1$ \mbox{\dots} \ident$_n$ in \qualid} - - Performs all the rewritings in hypothesis {\qualid}. -\item {\tt autorewrite with \ident$_1$ \mbox{\dots} \ident$_n$ in {\qualid} using \tac} - - Performs all the rewritings in hypothesis {\qualid} applying {\tt - \tac} to the main subgoal after each rewriting step. - -\item {\tt autorewrite with \ident$_1$ \mbox{\dots} \ident$_n$ in \nterm{clause}} - - Performs all the rewriting in the clause \nterm{clause}. - The \nterm{clause} argument must not contain any \texttt{type of} nor \texttt{value of}. - -\end{Variant} - -\SeeAlso Section~\ref{HintRewrite} for feeding the database of lemmas used by {\tt autorewrite}. - -\SeeAlso Section~\ref{autorewrite-example} for examples showing the use of -this tactic. - -% En attente d'un moyen de valoriser les fichiers de demos -%\SeeAlso file \texttt{contrib/Rocq/DEMOS/Demo\_AutoRewrite.v} - -\subsection{\tt easy} -\tacindex{easy} -\label{easy} - -This tactic tries to solve the current goal by a number of standard closing steps. -In particular, it tries to close the current goal using the closing tactics -{\tt trivial}, reflexivity, symmetry, contradiction and inversion of hypothesis. -If this fails, it tries introducing variables and splitting and-hypotheses, -using the closing tactics afterwards, and splitting the goal using {\tt split} and recursing. - -This tactic solves goals that belong to many common classes; in particular, many cases of -unsatisfiable hypotheses, and simple equality goals are usually solved by this tactic. - -\begin{Variant} -\item {\tt now \tac} - \tacindex{now} - - Run \tac\/ followed by easy. This is a notation for {\tt \tac; easy}. -\end{Variant} - -\section{Controlling automation} - -\subsection{The hints databases for {\tt auto} and {\tt eauto}} -\index{Hints databases} -\label{Hints-databases} -\comindex{Hint} - -The hints for \texttt{auto} and \texttt{eauto} are stored in -databases. Each database maps head symbols to a list of hints. One can -use the command \texttt{Print Hint \ident} to display the hints -associated to the head symbol \ident{} (see \ref{PrintHint}). Each -hint has a cost that is a nonnegative integer, and an optional pattern. -The hints with lower cost are tried first. A hint is tried by -\texttt{auto} when the conclusion of the current goal -matches its pattern or when it has no pattern. - -\subsubsection*{Creating Hint databases - \label{CreateHintDb}\comindex{CreateHintDb}} - -One can optionally declare a hint database using the command -\texttt{Create HintDb}. If a hint is added to an unknown database, it -will be automatically created. - -\medskip -\texttt{Create HintDb} {\ident} [\texttt{discriminated}] -\medskip - -This command creates a new database named \ident. -The database is implemented by a Discrimination Tree (DT) that serves as -an index of all the lemmas. The DT can use transparency information to decide -if a constant should be indexed or not (c.f. \ref{HintTransparency}), -making the retrieval more efficient. -The legacy implementation (the default one for new databases) uses the -DT only on goals without existentials (i.e., auto goals), for non-Immediate -hints and do not make use of transparency hints, putting more work on the -unification that is run after retrieval (it keeps a list of the lemmas -in case the DT is not used). The new implementation enabled by -the {\tt discriminated} option makes use of DTs in all cases and takes -transparency information into account. However, the order in which hints -are retrieved from the DT may differ from the order in which they were -inserted, making this implementation observationally different from the -legacy one. - -The general -command to add a hint to some databases \ident$_1$, \dots, \ident$_n$ is -\begin{tabbing} - {\tt Hint {\hintdef} :~\ident$_1$ \mbox{\dots} \ident$_n$} -\end{tabbing} - -\begin{Variants} -\item {\tt Hint \hintdef} - - No database name is given: the hint is registered in the {\tt core} - database. - -\item {\tt Local Hint {\hintdef} :~\ident$_1$ \mbox{\dots} \ident$_n$} - - This is used to declare hints that must not be exported to the other - modules that require and import the current module. Inside a - section, the option {\tt Local} is useless since hints do not - survive anyway to the closure of sections. - -\item {\tt Local Hint \hintdef} - - Idem for the {\tt core} database. - -\end{Variants} - -The {\hintdef} is one of the following expressions: - -\begin{itemize} -\item {\tt Resolve \term {\zeroone{{\tt |} \zeroone{\num} \zeroone{\pattern}}}} - \comindex{Hint Resolve} - - This command adds {\tt simple apply {\term}} to the hint list - with the head symbol of the type of \term. The cost of that hint is - the number of subgoals generated by {\tt simple apply {\term}} or \num - if specified. The associated pattern is inferred from the conclusion - of the type of \term or the given \pattern if specified. -%{\tt auto} actually uses a slightly modified variant of {\tt simple apply} with use_metas_eagerly_in_conv_on_closed_terms set to false - - In case the inferred type of \term\ does not start with a product - the tactic added in the hint list is {\tt exact {\term}}. -% Actually, a slightly restricted version is used (no conversion on the head symbol) - In case - this type can however be reduced to a type starting with a product, - the tactic {\tt simple apply {\term}} is also stored in the hints list. - - If the inferred type of \term\ contains a dependent quantification - on a variable which occurs only in the premisses of the type and not - in its conclusion, no instance could be inferred for the variable by - unification with the goal. In this case, the hint is added to the - hint list of {\tt eauto} (see \ref{eauto}) instead of the hint list - of {\tt auto} and a warning is printed. A typical example of a hint - that is used only by \texttt{eauto} is a transitivity lemma. - - \begin{ErrMsgs} -%% \item \errindex{Bound head variable} - - \item \term\ \errindex{cannot be used as a hint} - - The head symbol of the type of {\term} is a bound variable such - that this tactic cannot be associated to a constant. - - %% The type of {\term} contains products over variables that do not - %% appear in the conclusion. A typical example is a transitivity axiom. - %% In that case the {\tt simple apply} tactic fails, and thus is useless. - - \end{ErrMsgs} - - \begin{Variants} - - \item {\tt Resolve \term$_1$ \mbox{\dots} \term$_m$} - - Adds each \texttt{Resolve} {\term$_i$}. - - \item {\tt Resolve -> \term} - - Adds the left-to-right implication of an equivalence as a hint - (informally the hint will be used as {\tt apply <- \term}, - although as mentionned before, the tactic actually used is - a restricted version of apply). - - \item {\tt Resolve <- \term} - - Adds the right-to-left implication of an equivalence as a hint. - - \end{Variants} - -\item \texttt{Immediate {\term}} -\comindex{Hint Immediate} - - This command adds {\tt simple apply {\term}; trivial} to the hint list - associated with the head symbol of the type of {\ident} in the given - database. This tactic will fail if all the subgoals generated by - {\tt simple apply {\term}} are not solved immediately by the {\tt trivial} - tactic (which only tries tactics with cost $0$). - - This command is useful for theorems such as the symmetry of equality - or $n+1=m+1 \to n=m$ that we may like to introduce with a - limited use in order to avoid useless proof-search. - - The cost of this tactic (which never generates subgoals) is always 1, - so that it is not used by {\tt trivial} itself. - - \begin{ErrMsgs} - -%% \item \errindex{Bound head variable} - - \item \term\ \errindex{cannot be used as a hint} - - \end{ErrMsgs} - - \begin{Variants} - - \item {\tt Immediate \term$_1$ \mbox{\dots} \term$_m$} - - Adds each \texttt{Immediate} {\term$_i$}. - - \end{Variants} - -\item \texttt{Constructors} {\ident} -\comindex{Hint Constructors} - - If {\ident} is an inductive type, this command adds all its - constructors as hints of type \texttt{Resolve}. Then, when the - conclusion of current goal has the form \texttt{({\ident} \dots)}, - \texttt{auto} will try to apply each constructor. - - \begin{ErrMsgs} - - \item {\ident} \errindex{is not an inductive type} - -% No need to have this message here, is is generic to all commands -% referring to globals -%% \item {\ident} \errindex{not declared} - - \end{ErrMsgs} - - \begin{Variants} - - \item {\tt Constructors \ident$_1$ \mbox{\dots} \ident$_m$} - - Adds each \texttt{Constructors} {\ident$_i$}. - - \end{Variants} - -\item \texttt{Unfold} {\qualid} -\comindex{Hint Unfold} - - This adds the tactic {\tt unfold {\qualid}} to the hint list that - will only be used when the head constant of the goal is \ident. Its - cost is 4. - - \begin{Variants} - - \item {\tt Unfold \ident$_1$ \mbox{\dots} \ident$_m$} - - Adds each \texttt{Unfold} {\ident$_i$}. - - \end{Variants} - -\item \texttt{Transparent}, \texttt{Opaque} {\qualid} -\label{HintTransparency} -\comindex{Hint Transparent} -\comindex{Hint Opaque} - - This adds a transparency hint to the database, making {\tt {\qualid}} - a transparent or opaque constant during resolution. This information - is used during unification of the goal with any lemma in the database - and inside the discrimination network to relax or constrain it in the - case of \texttt{discriminated} databases. - - \begin{Variants} - - \item \texttt{Transparent}, \texttt{Opaque} {\ident$_1$} \mbox{\dots} {\ident$_m$} - - Declares each {\ident$_i$} as a transparent or opaque constant. - - \end{Variants} - -\item \texttt{Extern \num\ [\pattern]\ => }\textsl{tactic} -\comindex{Hint Extern} - - This hint type is to extend \texttt{auto} with tactics other than - \texttt{apply} and \texttt{unfold}. For that, we must specify a - cost, an optional pattern and a tactic to execute. Here is an example: - -\begin{quotation} -\begin{verbatim} -Hint Extern 4 (~(_ = _)) => discriminate. -\end{verbatim} -\end{quotation} - - Now, when the head of the goal is a disequality, \texttt{auto} will - try \texttt{discriminate} if it does not manage to solve the goal - with hints with a cost less than 4. - - One can even use some sub-patterns of the pattern in the tactic - script. A sub-pattern is a question mark followed by an identifier, like - \texttt{?X1} or \texttt{?X2}. Here is an example: - -% Require EqDecide. -\begin{coq_example*} -Require Import List. -\end{coq_example*} -\begin{coq_example} -Hint Extern 5 ({?X1 = ?X2} + {?X1 <> ?X2}) => - generalize X1, X2; decide equality : eqdec. -Goal -forall a b:list (nat * nat), {a = b} + {a <> b}. -Info 1 auto with eqdec. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -\item \texttt{Cut} {\textit{regexp}} -\label{HintCut} -\comindex{Hint Cut} - - \textit{Warning:} these hints currently only apply to typeclass proof search and - the \texttt{typeclasses eauto} tactic (\ref{typeclasseseauto}). - - This command can be used to cut the proof-search tree according to a - regular expression matching paths to be cut. The grammar for regular - expressions is the following. Beware, there is no operator precedence - during parsing, one can check with \texttt{Print HintDb} to verify the - current cut expression: -\[\begin{array}{lcll} - e & ::= & \ident & \text{ hint or instance identifier } \\ - & & \texttt{\_} & \text{ any hint } \\ - & & e | e' & \text{ disjunction } \\ - & & e e' & \text{ sequence } \\ - & & e * & \text{ Kleene star } \\ - & & \texttt{emp} & \text{ empty } \\ - & & \texttt{eps} & \text{ epsilon } \\ - & & \texttt{(} e \texttt{)} & -\end{array}\] - -The \texttt{emp} regexp does not match any search path while -\texttt{eps} matches the empty path. During proof search, the path of -successive successful hints on a search branch is recorded, as a list of -identifiers for the hints (note \texttt{Hint Extern}'s do not have an -associated identifier). Before applying any hint $\ident$ the current -path $p$ extended with $\ident$ is matched against the current cut -expression $c$ associated to the hint database. If matching succeeds, -the hint is \emph{not} applied. The semantics of \texttt{Hint Cut} $e$ -is to set the cut expression to $c | e$, the initial cut expression -being \texttt{emp}. - - -\item \texttt{Mode} {\qualid} {\tt (+ | ! | -)}$^*$ -\label{HintMode} -\comindex{Hint Mode} - -This sets an optional mode of use of the identifier {\qualid}. When -proof-search faces a goal that ends in an application of {\qualid} to -arguments {\tt \term$_1$ \mbox{\dots} \term$_n$}, the mode tells if the -hints associated to qualid can be applied or not. A mode specification -is a list of $n$ {\tt +}, {\tt !} or {\tt -} items that specify if an -argument of the identifier is to be treated as an input ({\tt +}), if -its head only is an input ({\tt !}) or an output ({\tt -}) of the -identifier. For a mode to match a list of arguments, input terms and -input heads \emph{must not} contain existential variables or be -existential variables respectively, while outputs can be any -term. Multiple modes can be declared for a single identifier, in that -case only one mode needs to match the arguments for the hints to be -applied. - -The head of a term is understood here as the applicative head, or the -match or projection scrutinee's head, recursively, casts being ignored. - -{\tt Hint Mode} is especially useful for typeclasses, when one does not -want to support default instances and avoid ambiguity in -general. Setting a parameter of a class as an input forces proof-search -to be driven by that index of the class, with {\tt !} giving more -flexibility by allowing existentials to still appear deeper in the index -but not at its head. - -\end{itemize} - -\Rem One can use an \texttt{Extern} hint with no pattern to do -pattern-matching on hypotheses using \texttt{match goal with} inside -the tactic. - -% There are shortcuts that allow to define several goal at once: - -% \begin{itemize} -% \item \comindex{Hints Resolve}\texttt{Hints Resolve \ident$_1$ \dots\ \ident$_n$ : \ident.}\\ -% This command is a shortcut for the following ones: -% \begin{quotation} -% \noindent\texttt{Hint \ident$_1$ : \ident\ := Resolve \ident$_1$}\\ -% \dots\\ -% \texttt{Hint \ident$_1$ : \ident := Resolve \ident$_1$} -% \end{quotation} -% Notice that the hint name is the same that the theorem given as -% hint. -% \item \comindex{Hints Immediate}\texttt{Hints Immediate \ident$_1$ \dots\ \ident$_n$ : \ident.}\\ -% \item \comindex{Hints Unfold}\texttt{Hints Unfold \qualid$_1$ \dots\ \qualid$_n$ : \ident.}\\ -% \end{itemize} - -%\begin{Warnings} -% \item \texttt{Overriding hint named \dots\ in database \dots} -%\end{Warnings} - - - -\subsection{Hint databases defined in the \Coq\ standard library} - -Several hint databases are defined in the \Coq\ standard library. The -actual content of a database is the collection of the hints declared -to belong to this database in each of the various modules currently -loaded. Especially, requiring new modules potentially extend a -database. At {\Coq} startup, only the {\tt core} database is non empty -and can be used. - -\begin{description} - -\item[\tt core] This special database is automatically used by - \texttt{auto}, except when pseudo-database \texttt{nocore} is - given to \texttt{auto}. The \texttt{core} database contains - only basic lemmas about negation, - conjunction, and so on from. Most of the hints in this database come - from the \texttt{Init} and \texttt{Logic} directories. - -\item[\tt arith] This database contains all lemmas about Peano's - arithmetic proved in the directories \texttt{Init} and - \texttt{Arith} - -\item[\tt zarith] contains lemmas about binary signed integers from - the directories \texttt{theories/ZArith}. When required, the module - {\tt Omega} also extends the database {\tt zarith} with a high-cost - hint that calls {\tt omega} on equations and inequalities in {\tt - nat} or {\tt Z}. - -\item[\tt bool] contains lemmas about booleans, mostly from directory - \texttt{theories/Bool}. - -\item[\tt datatypes] is for lemmas about lists, streams and so on that - are mainly proved in the \texttt{Lists} subdirectory. - -\item[\tt sets] contains lemmas about sets and relations from the - directories \texttt{Sets} and \texttt{Relations}. - -\item[\tt typeclass\_instances] contains all the type class instances - declared in the environment, including those used for \texttt{setoid\_rewrite}, - from the \texttt{Classes} directory. -\end{description} - -You are advised not to put your own hints in the {\tt core} database, -but use one or several databases specific to your development. - -\subsection{\tt Remove Hints \term$_1$ \mbox{\dots} \term$_n$ :~ \ident$_1$ - \mbox{\dots} \ident$_m$} -\label{RemoveHints} -\comindex{Remove Hints} - -This command removes the hints associated to terms \term$_1$ \mbox{\dots} -\term$_n$ in databases \ident$_1$ \mbox{\dots} \ident$_m$. - -\subsection{\tt Print Hint} -\label{PrintHint} -\comindex{Print Hint} - -This command displays all hints that apply to the current goal. It -fails if no proof is being edited, while the two variants can be used at -every moment. - -\begin{Variants} - -\item {\tt Print Hint \ident} - - This command displays only tactics associated with \ident\ in the - hints list. This is independent of the goal being edited, so this - command will not fail if no goal is being edited. - -\item {\tt Print Hint *} - - This command displays all declared hints. - -\item {\tt Print HintDb \ident} -\label{PrintHintDb} -\comindex{Print HintDb} - - This command displays all hints from database \ident. - -\end{Variants} - -\subsection{\tt Hint Rewrite \term$_1$ \mbox{\dots} \term$_n$ :~ \ident$_1$ \mbox{\dots} \ident$_m$} -\label{HintRewrite} -\comindex{Hint Rewrite} - -This vernacular command adds the terms {\tt \term$_1$ \mbox{\dots} \term$_n$} -(their types must be equalities) in the rewriting bases \ident$_1$, \dots, \ident$_m$ -with the default orientation (left to right). Notice that the -rewriting bases are distinct from the {\tt auto} hint bases and that -{\tt auto} does not take them into account. - -This command is synchronous with the section mechanism (see \ref{Section}): -when closing a section, all aliases created by \texttt{Hint Rewrite} in that -section are lost. Conversely, when loading a module, all \texttt{Hint Rewrite} -declarations at the global level of that module are loaded. - -\begin{Variants} -\item {\tt Hint Rewrite -> \term$_1$ \mbox{\dots} \term$_n$ :~\ident$_1$ \mbox{\dots} \ident$_m$} - -This is strictly equivalent to the command above (we only make explicit the -orientation which otherwise defaults to {\tt ->}). - -\item {\tt Hint Rewrite <- \term$_1$ \mbox{\dots} \term$_n$ :~\ident$_1$ \mbox{\dots} \ident$_m$} - -Adds the rewriting rules {\tt \term$_1$ \mbox{\dots} \term$_n$} with a right-to-left -orientation in the bases \ident$_1$, \dots, \ident$_m$. - -\item {\tt Hint Rewrite \term$_1$ \mbox{\dots} \term$_n$ using {\tac} :~\ident$_1$ \mbox{\dots} \ident$_m$} - -When the rewriting rules {\tt \term$_1$ \mbox{\dots} \term$_n$} in \ident$_1$, \dots, \ident$_m$ will -be used, the tactic {\tt \tac} will be applied to the generated subgoals, the -main subgoal excluded. - -%% \item -%% {\tt Hint Rewrite [ \term$_1$ \dots \term$_n$ ] in \ident}\\ -%% {\tt Hint Rewrite [ \term$_1$ \dots \term$_n$ ] in {\ident} using {\tac}}\\ -%% These are deprecated syntactic variants for -%% {\tt Hint Rewrite \term$_1$ \dots \term$_n$ : \ident} and -%% {\tt Hint Rewrite \term$_1$ \dots \term$_n$ using {\tac} : {\ident}}. - -\item \texttt{Print Rewrite HintDb {\ident}} - - This command displays all rewrite hints contained in {\ident}. - -\end{Variants} - -\subsection{Hint locality -\label{Hint-Locality}} -\optindex{Loose Hint Behavior} - -Hints provided by the \texttt{Hint} commands are erased when closing a -section. Conversely, all hints of a module \texttt{A} that are not -defined inside a section (and not defined with option {\tt Local}) become -available when the module {\tt A} is imported (using -e.g. \texttt{Require Import A.}). - -As of today, hints only have a binary behavior regarding locality, as described -above: either they disappear at the end of a section scope, or they remain -global forever. This causes a scalability issue, because hints coming from an -unrelated part of the code may badly influence another development. It can be -mitigated to some extent thanks to the {\tt Remove Hints} command -(see ~\ref{RemoveHints}), but this is a mere workaround and has some -limitations (for instance, external hints cannot be removed). - -A proper way to fix this issue is to bind the hints to their module scope, as -for most of the other objects Coq uses. Hints should only made available when -the module they are defined in is imported, not just required. It is very -difficult to change the historical behavior, as it would break a lot of scripts. -We propose a smooth transitional path by providing the {\tt Loose Hint Behavior} -option which accepts three flags allowing for a fine-grained handling of -non-imported hints. - -\begin{Variants} - -\item {\tt Set Loose Hint Behavior "Lax"} - - This is the default, and corresponds to the historical behavior, that is, - hints defined outside of a section have a global scope. - -\item {\tt Set Loose Hint Behavior "Warn"} - - When set, it outputs a warning when a non-imported hint is used. Note that - this is an over-approximation, because a hint may be triggered by a run that - will eventually fail and backtrack, resulting in the hint not being actually - useful for the proof. - -\item {\tt Set Loose Hint Behavior "Strict"} - - When set, it changes the behavior of an unloaded hint to a immediate fail - tactic, allowing to emulate an import-scoped hint mechanism. - -\end{Variants} - -\subsection{Setting implicit automation tactics} - -\subsubsection{\tt Proof with {\tac}} -\label{ProofWith} -\comindex{Proof with} - - This command may be used to start a proof. It defines a default - tactic to be used each time a tactic command {\tac$_1$} is ended by - ``\verb#...#''. In this case the tactic command typed by the user is - equivalent to \tac$_1$;{\tac}. - -\SeeAlso {\tt Proof.} in Section~\ref{BeginProof}. - -\begin{Variants} - -\item {\tt Proof with {\tac} using \ident$_1$ \mbox{\dots} \ident$_n$} - - Combines in a single line {\tt Proof with} and {\tt Proof using}, - see~\ref{ProofUsing} - -\item {\tt Proof using \ident$_1$ \mbox{\dots} \ident$_n$ with {\tac}} - - Combines in a single line {\tt Proof with} and {\tt Proof using}, - see~\ref{ProofUsing} - -\end{Variants} - -\subsubsection{\tt Declare Implicit Tactic {\tac}}\label{DeclareImplicit} -\comindex{Declare Implicit Tactic} - -This command declares a tactic to be used to solve implicit arguments -that {\Coq} does not know how to solve by unification. It is used -every time the term argument of a tactic has one of its holes not -fully resolved. - -Here is an example: - -\begin{coq_example} -Parameter quo : nat -> forall n:nat, n<>0 -> nat. -Notation "x // y" := (quo x y _) (at level 40). - -Declare Implicit Tactic assumption. -Goal forall n m, m<>0 -> { q:nat & { r | q * m + r = n } }. -intros. -exists (n // m). -\end{coq_example} -\begin{coq_eval} -Clear Implicit Tactic. -Reset Initial. -\end{coq_eval} - -The tactic {\tt exists (n // m)} did not fail. The hole was solved by -{\tt assumption} so that it behaved as {\tt exists (quo n m H)}. - -\section{Decision procedures} - -\subsection{\tt tauto} -\tacindex{tauto} -\tacindex{dtauto} -\label{tauto} - -This tactic implements a decision procedure for intuitionistic propositional -calculus based on the contraction-free sequent calculi LJT* of Roy Dyckhoff -\cite{Dyc92}. Note that {\tt tauto} succeeds on any instance of an -intuitionistic tautological proposition. {\tt tauto} unfolds negations -and logical equivalence but does not unfold any other definition. - -The following goal can be proved by {\tt tauto} whereas {\tt auto} -would fail: - -\begin{coq_example} -Goal forall (x:nat) (P:nat -> Prop), x = 0 \/ P x -> x <> 0 -> P x. - intros. - tauto. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -Moreover, if it has nothing else to do, {\tt tauto} performs -introductions. Therefore, the use of {\tt intros} in the previous -proof is unnecessary. {\tt tauto} can for instance prove the -following: -\begin{coq_example} -(* auto would fail *) -Goal forall (A:Prop) (P:nat -> Prop), - A \/ (forall x:nat, ~ A -> P x) -> forall x:nat, ~ A -> P x. - - tauto. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -\Rem In contrast, {\tt tauto} cannot solve the following goal - -\begin{coq_example*} -Goal forall (A:Prop) (P:nat -> Prop), - A \/ (forall x:nat, ~ A -> P x) -> forall x:nat, ~ ~ (A \/ P x). -\end{coq_example*} -\begin{coq_eval} -Abort. -\end{coq_eval} - -because \verb=(forall x:nat, ~ A -> P x)= cannot be treated as atomic and an -instantiation of \verb=x= is necessary. - -\begin{Variants} - -\item {\tt dtauto} - - While {\tt tauto} recognizes inductively defined connectives - isomorphic to the standard connective {\tt and}, {\tt prod}, {\tt - or}, {\tt sum}, {\tt False}, {\tt Empty\_set}, {\tt unit}, {\tt - True}, {\tt dtauto} recognizes also all inductive types with - one constructors and no indices, i.e. record-style connectives. - -\end{Variants} - -\subsection{\tt intuition \tac} -\tacindex{intuition} -\tacindex{dintuition} -\label{intuition} - -The tactic \texttt{intuition} takes advantage of the search-tree built -by the decision procedure involved in the tactic {\tt tauto}. It uses -this information to generate a set of subgoals equivalent to the -original one (but simpler than it) and applies the tactic -{\tac} to them \cite{Mun94}. If this tactic fails on some goals then -{\tt intuition} fails. In fact, {\tt tauto} is simply {\tt intuition - fail}. - -For instance, the tactic {\tt intuition auto} applied to the goal -\begin{verbatim} -(forall (x:nat), P x)/\B -> (forall (y:nat),P y)/\ P O \/B/\ P O -\end{verbatim} -internally replaces it by the equivalent one: -\begin{verbatim} -(forall (x:nat), P x), B |- P O -\end{verbatim} -and then uses {\tt auto} which completes the proof. - -Originally due to C{\'e}sar~Mu{\~n}oz, these tactics ({\tt tauto} and {\tt intuition}) -have been completely re-engineered by David~Delahaye using mainly the tactic -language (see Chapter~\ref{TacticLanguage}). The code is now much shorter and -a significant increase in performance has been noticed. The general behavior -with respect to dependent types, unfolding and introductions has -slightly changed to get clearer semantics. This may lead to some -incompatibilities. - -\begin{Variants} -\item {\tt intuition} - - Is equivalent to {\tt intuition auto with *}. - -\item {\tt dintuition} - - While {\tt intuition} recognizes inductively defined connectives - isomorphic to the standard connective {\tt and}, {\tt prod}, {\tt - or}, {\tt sum}, {\tt False}, {\tt Empty\_set}, {\tt unit}, {\tt - True}, {\tt dintuition} recognizes also all inductive types with - one constructors and no indices, i.e. record-style connectives. - -\end{Variants} - -\optindex{Intuition Negation Unfolding} - -Some aspects of the tactic {\tt intuition} can be -controlled using options. To avoid that inner negations which do not -need to be unfolded are unfolded, use: - -\begin{quote} -{\tt Unset Intuition Negation Unfolding} -\end{quote} - -To do that all negations of the goal are unfolded even inner ones -(this is the default), use: - -\begin{quote} -{\tt Set Intuition Negation Unfolding} -\end{quote} - -To avoid that inner occurrence of {\tt iff} which do not need to be -unfolded are unfolded (this is the default), use: - -% En attente d'un moyen de valoriser les fichiers de demos -%\SeeAlso file \texttt{contrib/Rocq/DEMOS/Demo\_tauto.v} - - -\subsection{\tt rtauto} -\tacindex{rtauto} -\label{rtauto} - -The {\tt rtauto} tactic solves propositional tautologies similarly to what {\tt tauto} does. The main difference is that the proof term is built using a reflection scheme applied to a sequent calculus proof of the goal. The search procedure is also implemented using a different technique. - -Users should be aware that this difference may result in faster proof-search but slower proof-checking, and {\tt rtauto} might not solve goals that {\tt tauto} would be able to solve (e.g. goals involving universal quantifiers). - -\subsection{\tt firstorder} -\tacindex{firstorder} -\label{firstorder} - -The tactic \texttt{firstorder} is an {\it experimental} extension of -\texttt{tauto} to -first-order reasoning, written by Pierre Corbineau. -It is not restricted to usual logical connectives but -instead may reason about any first-order class inductive definition. - -The default tactic used by \texttt{firstorder} when no rule applies is {\tt - auto with *}, it can be reset locally or globally using the {\nobreak - {\tt Set Firstorder Solver {\tac}}} \optindex{Firstorder Solver} -vernacular command and printed using {\nobreak {\tt Print Firstorder - Solver}}. - -\begin{Variants} - \item {\tt firstorder {\tac}} - \tacindex{firstorder {\tac}} - - Tries to solve the goal with {\tac} when no logical rule may apply. - - \item {\tt firstorder using {\qualid}$_1$ , \dots\ , {\qualid}$_n$ } - \tacindex{firstorder using} - - Adds lemmas {\qualid}$_1$ \dots\ {\qualid}$_n$ to the proof-search - environment. If {\qualid}$_i$ refers to an inductive type, it is - the collection of its constructors which are added to the - proof-search environment. - - \item {\tt firstorder with \ident$_1$ \dots\ \ident$_n$ } - \tacindex{firstorder with} - - Adds lemmas from {\tt auto} hint bases \ident$_1$ \dots\ \ident$_n$ - to the proof-search environment. - -\item \texttt{firstorder {\tac} using {\qualid}$_1$ , \dots\ , {\qualid}$_n$ with \ident$_1$ \dots\ \ident$_n$} - - This combines the effects of the different variants of \texttt{firstorder}. - -\end{Variants} - -Proof-search is bounded by a depth parameter which can be set by typing the -{\nobreak \tt Set Firstorder Depth $n$} \optindex{Firstorder Depth} -vernacular command. - - -\subsection{\tt congruence} -\tacindex{congruence} -\label{congruence} - -The tactic {\tt congruence}, by Pierre Corbineau, implements the standard Nelson and Oppen -congruence closure algorithm, which is a decision procedure for ground -equalities with uninterpreted symbols. It also include the constructor theory -(see \ref{injection} and \ref{discriminate}). -If the goal is a non-quantified equality, {\tt congruence} tries to -prove it with non-quantified equalities in the context. Otherwise it -tries to infer a discriminable equality from those in the context. Alternatively, congruence tries to prove that a hypothesis is equal to the goal or to the negation of another hypothesis. - -{\tt congruence} is also able to take advantage of hypotheses stating quantified equalities, you have to provide a bound for the number of extra equalities generated that way. Please note that one of the members of the equality must contain all the quantified variables in order for {\tt congruence} to match against it. - -\begin{coq_eval} -Reset Initial. -Variable A:Set. -Variables a b:A. -Variable f:A->A. -Variable g:A->A->A. -\end{coq_eval} - -\begin{coq_example} -Theorem T: - a=(f a) -> (g b (f a))=(f (f a)) -> (g a b)=(f (g b a)) -> (g a b)=a. -intros. -congruence. -\end{coq_example} - -\begin{coq_eval} -Reset Initial. -Variable A:Set. -Variables a c d:A. -Variable f:A->A*A. -\end{coq_eval} - -\begin{coq_example} -Theorem inj : f = pair a -> Some (f c) = Some (f d) -> c=d. -intros. -congruence. -\end{coq_example} - -\begin{Variants} - \item {\tt congruence {\sl n}} - - Tries to add at most {\tt \sl n} instances of hypotheses stating quantified equalities to the problem in order to solve it. A bigger value of {\tt \sl n} does not make success slower, only failure. You might consider adding some lemmas as hypotheses using {\tt assert} in order for congruence to use them. - -\item {\tt congruence with \term$_1$ \dots\ \term$_n$} - - Adds {\tt \term$_1$ \dots\ \term$_n$} to the pool of terms used by - {\tt congruence}. This helps in case you have partially applied - constructors in your goal. -\end{Variants} - -\begin{ErrMsgs} - \item \errindex{I don't know how to handle dependent equality} - - The decision procedure managed to find a proof of the goal or of - a discriminable equality but this proof could not be built in {\Coq} - because of dependently-typed functions. - - \item \errindex{Goal is solvable by congruence but some arguments are missing. Try "congruence with \dots", replacing metavariables by arbitrary terms.} - - The decision procedure could solve the goal with the provision - that additional arguments are supplied for some partially applied - constructors. Any term of an appropriate type will allow the - tactic to successfully solve the goal. Those additional arguments - can be given to {\tt congruence} by filling in the holes in the - terms given in the error message, using the {\tt with} variant - described above. -\end{ErrMsgs} - -\noindent {\bf Remark: } {\tt congruence} can be made to print debug -information by setting the following option: - -\begin{quote} -\optindex{Congruence Verbose} -{\tt Set Congruence Verbose} -\end{quote} - -\section{Checking properties of terms} - -Each of the following tactics acts as the identity if the check succeeds, and results in an error otherwise. - -\subsection{\tt constr\_eq \term$_1$ \term$_2$} -\tacindex{constr\_eq} -\label{constreq} - -This tactic checks whether its arguments are equal modulo alpha conversion and casts. - -\ErrMsg \errindex{Not equal} - -\subsection{\tt unify \term$_1$ \term$_2$} -\tacindex{unify} -\label{unify} - -This tactic checks whether its arguments are unifiable, potentially -instantiating existential variables. - -\ErrMsg \errindex{Not unifiable} - -\begin{Variants} -\item {\tt unify \term$_1$ \term$_2$ with \ident} - - Unification takes the transparency information defined in the - hint database {\tt \ident} into account (see Section~\ref{HintTransparency}). -\end{Variants} - -\subsection{\tt is\_evar \term} -\tacindex{is\_evar} -\label{isevar} - -This tactic checks whether its argument is a current existential -variable. Existential variables are uninstantiated variables generated -by {\tt eapply} (see Section~\ref{apply}) and some other tactics. - -\ErrMsg \errindex{Not an evar} - -\subsection{\tt has\_evar \term} -\tacindex{has\_evar} -\label{hasevar} - -This tactic checks whether its argument has an existential variable as -a subterm. Unlike {\tt context} patterns combined with {\tt is\_evar}, -this tactic scans all subterms, including those under binders. - -\ErrMsg \errindex{No evars} - -\subsection{\tt is\_var \term} -\tacindex{is\_var} -\label{isvar} - -This tactic checks whether its argument is a variable or hypothesis in the -current goal context or in the opened sections. - -\ErrMsg \errindex{Not a variable or hypothesis} - -\section{Equality} - -\subsection{\tt f\_equal} -\label{f-equal} -\tacindex{f\_equal} - -This tactic applies to a goal of the form $f\ a_1\ \ldots\ a_n = f'\ -a'_1\ \ldots\ a'_n$. Using {\tt f\_equal} on such a goal leads to -subgoals $f=f'$ and $a_1=a'_1$ and so on up to $a_n=a'_n$. Amongst -these subgoals, the simple ones (e.g. provable by -reflexivity or congruence) are automatically solved by {\tt f\_equal}. - -\subsection{\tt reflexivity} -\label{reflexivity} -\tacindex{reflexivity} - -This tactic applies to a goal that has the form {\tt t=u}. It checks -that {\tt t} and {\tt u} are convertible and then solves the goal. -It is equivalent to {\tt apply refl\_equal}. - -\begin{ErrMsgs} -\item \errindex{The conclusion is not a substitutive equation} -\item \errindex{Unable to unify \dots\ with \dots} -\end{ErrMsgs} - -\subsection{\tt symmetry} -\tacindex{symmetry} - -This tactic applies to a goal that has the form {\tt t=u} and changes it -into {\tt u=t}. - -\begin{Variants} -\item {\tt symmetry in \ident} \tacindex{symmetry in} - -If the statement of the hypothesis {\ident} has the form {\tt t=u}, -the tactic changes it to {\tt u=t}. -\end{Variants} - -\subsection{\tt transitivity \term} -\tacindex{transitivity} - -This tactic applies to a goal that has the form {\tt t=u} -and transforms it into the two subgoals -{\tt t={\term}} and {\tt {\term}=u}. - -\section{Equality and inductive sets} - -We describe in this section some special purpose tactics dealing with -equality and inductive sets or types. These tactics use the equality -{\tt eq:forall (A:Type), A->A->Prop}, simply written with the -infix symbol {\tt =}. - -\subsection{\tt decide equality} -\label{decideequality} -\tacindex{decide equality} - -This tactic solves a goal of the form -{\tt forall $x$ $y$:$R$, \{$x$=$y$\}+\{\verb|~|$x$=$y$\}}, where $R$ -is an inductive type such that its constructors do not take proofs or -functions as arguments, nor objects in dependent types. -It solves goals of the form {\tt \{$x$=$y$\}+\{\verb|~|$x$=$y$\}} as well. - -\subsection{\tt compare \term$_1$ \term$_2$} -\tacindex{compare} - -This tactic compares two given objects \term$_1$ and \term$_2$ -of an inductive datatype. If $G$ is the current goal, it leaves the sub-goals -\term$_1${\tt =}\term$_2$ {\tt ->} $G$ and \verb|~|\term$_1${\tt =}\term$_2$ -{\tt ->} $G$. The type -of \term$_1$ and \term$_2$ must satisfy the same restrictions as in the tactic -\texttt{decide equality}. - -\subsection{\tt simplify\_eq \term} -\tacindex{simplify\_eq} -\tacindex{esimplify\_eq} -\label{simplify-eq} - -Let {\term} be the proof of a statement of conclusion {\tt - {\term$_1$}={\term$_2$}}. If {\term$_1$} and -{\term$_2$} are structurally different (in the sense described for the -tactic {\tt discriminate}), then the tactic {\tt simplify\_eq} behaves as {\tt - discriminate {\term}}, otherwise it behaves as {\tt injection - {\term}}. - -\Rem If some quantified hypothesis of the goal is named {\ident}, then -{\tt simplify\_eq {\ident}} first introduces the hypothesis in the local -context using \texttt{intros until \ident}. - -\begin{Variants} -\item \texttt{simplify\_eq} \num - - This does the same thing as \texttt{intros until \num} then -\texttt{simplify\_eq \ident} where {\ident} is the identifier for the last -introduced hypothesis. - -\item \texttt{simplify\_eq} \term{} {\tt with} {\bindinglist} - - This does the same as \texttt{simplify\_eq {\term}} but using - the given bindings to instantiate parameters or hypotheses of {\term}. - -\item \texttt{esimplify\_eq} \num\\ - \texttt{esimplify\_eq} \term{} \zeroone{{\tt with} {\bindinglist}} - - This works the same as {\tt simplify\_eq} but if the type of {\term}, - or the type of the hypothesis referred to by {\num}, has uninstantiated - parameters, these parameters are left as existential variables. - -\item{\tt simplify\_eq} - -If the current goal has form $t_1\verb=<>=t_2$, it behaves as -\texttt{intro {\ident}; simplify\_eq {\ident}}. -\end{Variants} - -\subsection{\tt dependent rewrite -> \ident} -\tacindex{dependent rewrite ->} -\label{dependent-rewrite} - -This tactic applies to any goal. If \ident\ has type -\verb+(existT B a b)=(existT B a' b')+ -in the local context (i.e. each term of the -equality has a sigma type $\{ a:A~ \&~(B~a)\}$) this tactic rewrites -\verb+a+ into \verb+a'+ and \verb+b+ into \verb+b'+ in the current -goal. This tactic works even if $B$ is also a sigma type. This kind -of equalities between dependent pairs may be derived by the injection -and inversion tactics. - -\begin{Variants} -\item{\tt dependent rewrite <- {\ident}} -\tacindex{dependent rewrite <-} - -Analogous to {\tt dependent rewrite ->} but uses the equality from -right to left. -\end{Variants} - -\section{Inversion -\label{inversion}} - -\subsection{\tt functional inversion \ident} -\tacindex{functional inversion} -\label{sec:functional-inversion} - -\texttt{functional inversion} is a tactic -that performs inversion on hypothesis {\ident} of the form -\texttt{\qualid\ \term$_1$\dots\term$_n$\ = \term} or \texttt{\term\ = - \qualid\ \term$_1$\dots\term$_n$} where \qualid\ must have been -defined using \texttt{Function} (see Section~\ref{Function}). -Note that this tactic is only available after a {\tt Require Import FunInd}. - -\begin{ErrMsgs} -\item \errindex{Hypothesis {\ident} must contain at least one Function} - -\item \errindex{Cannot find inversion information for hypothesis \ident} - - This error may be raised when some inversion lemma failed to be - generated by Function. -\end{ErrMsgs} - -\begin{Variants} -\item {\tt functional inversion \num} - - This does the same thing as \texttt{intros until \num} then - \texttt{functional inversion \ident} where {\ident} is the - identifier for the last introduced hypothesis. -\item {\tt functional inversion \ident\ \qualid}\\ - {\tt functional inversion \num\ \qualid} - - If the hypothesis {\ident} (or {\num}) has a type of the form - \texttt{\qualid$_1$\ \term$_1$\dots\term$_n$\ =\ \qualid$_2$\ - \term$_{n+1}$\dots\term$_{n+m}$} where \qualid$_1$ and \qualid$_2$ - are valid candidates to functional inversion, this variant allows - choosing which {\qualid} is inverted. -\end{Variants} - - - -\subsection{\tt quote \ident} -\tacindex{quote} -\index{2-level approach} - -This kind of inversion has nothing to do with the tactic -\texttt{inversion} above. This tactic does \texttt{change (\ident\ - t)}, where \texttt{t} is a term built in order to ensure the -convertibility. In other words, it does inversion of the function -\ident. This function must be a fixpoint on a simple recursive -datatype: see~\ref{quote-examples} for the full details. - -\begin{ErrMsgs} -\item \errindex{quote: not a simple fixpoint} - - Happens when \texttt{quote} is not able to perform inversion properly. -\end{ErrMsgs} - -\begin{Variants} -\item \texttt{quote {\ident} [ \ident$_1$ \dots \ident$_n$ ]} - - All terms that are built only with \ident$_1$ \dots \ident$_n$ will be - considered by \texttt{quote} as constants rather than variables. -\end{Variants} - -% En attente d'un moyen de valoriser les fichiers de demos -% \SeeAlso file \texttt{theories/DEMOS/DemoQuote.v} in the distribution - -\section{Classical tactics} -\label{ClassicalTactics} - -In order to ease the proving process, when the {\tt Classical} module is loaded. A few more tactics are available. Make sure to load the module using the \texttt{Require Import} command. - -\subsection{{\tt classical\_left} and \tt classical\_right} -\tacindex{classical\_left} -\tacindex{classical\_right} - -The tactics \texttt{classical\_left} and \texttt{classical\_right} are the analog of the \texttt{left} and \texttt{right} but using classical logic. They can only be used for disjunctions. -Use \texttt{classical\_left} to prove the left part of the disjunction with the assumption that the negation of right part holds. -Use \texttt{classical\_right} to prove the right part of the disjunction with the assumption that the negation of left part holds. - -\section{Automatizing -\label{Automatizing}} - -% EXISTE ENCORE ? -% -% \subsection{\tt Prolog [ \term$_1$ \dots\ \term$_n$ ] \num} -% \tacindex{Prolog}\label{Prolog} -% This tactic, implemented by Chet Murthy, is based upon the concept of -% existential variables of Gilles Dowek, stating that resolution is a -% kind of unification. It tries to solve the current goal using the {\tt -% Assumption} tactic, the {\tt intro} tactic, and applying hypotheses -% of the local context and terms of the given list {\tt [ \term$_1$ -% \dots\ \term$_n$\ ]}. It is more powerful than {\tt auto} since it -% may apply to any theorem, even those of the form {\tt (x:A)(P x) -> Q} -% where {\tt x} does not appear free in {\tt Q}. The maximal search -% depth is {\tt \num}. - -% \begin{ErrMsgs} -% \item \errindex{Prolog failed}\\ -% The Prolog tactic was not able to prove the subgoal. -% \end{ErrMsgs} - - -%% \subsection{{\tt jp} {\em (Jprover)} -%% \tacindex{jp} -%% \label{jprover}} - -%% The tactic \texttt{jp}, due to Huang Guan-Shieng, is an experimental -%% port of the {\em Jprover}\cite{SLKN01} semi-decision procedure for -%% first-order intuitionistic logic implemented in {\em -%% NuPRL}\cite{Kre02}. - -%% The tactic \texttt{jp}, due to Huang Guan-Shieng, is an {\it -%% experimental} port of the {\em Jprover}\cite{SLKN01} semi-decision -%% procedure for first-order intuitionistic logic implemented in {\em -%% NuPRL}\cite{Kre02}. - -%% Search may optionally be bounded by a multiplicity parameter -%% indicating how many (at most) copies of a formula may be used in -%% the proof process, its absence may lead to non-termination of the tactic. - -%% %\begin{coq_eval} -%% %Variable S:Set. -%% %Variables P Q:S->Prop. -%% %Variable f:S->S. -%% %\end{coq_eval} - -%% %\begin{coq_example*} -%% %Lemma example: (exists x |P x\/Q x)->(exists x |P x)\/(exists x |Q x). -%% %jp. -%% %Qed. - -%% %Lemma example2: (forall x ,P x->P (f x))->forall x,P x->P (f(f x)). -%% %jp. -%% %Qed. -%% %\end{coq_example*} - -%% \begin{Variants} -%% \item {\tt jp $n$}\\ -%% \tacindex{jp $n$} -%% Tries the {\em Jprover} procedure with multiplicities up to $n$, -%% starting from 1. -%% \item {\tt jp}\\ -%% Tries the {\em Jprover} procedure without multiplicity bound, -%% possibly running forever. -%% \end{Variants} - -%% \begin{ErrMsgs} -%% \item \errindex{multiplicity limit reached}\\ -%% The procedure tried all multiplicities below the limit and -%% failed. Goal might be solved by increasing the multiplicity limit. -%% \item \errindex{formula is not provable}\\ -%% The procedure determined that goal was not provable in -%% intuitionistic first-order logic, no matter how big the -%% multiplicity is. -%% \end{ErrMsgs} - - -% \subsection[\tt Linear]{\tt Linear\tacindex{Linear}\label{Linear}} -% The tactic \texttt{Linear}, due to Jean-Christophe Filli{\^a}atre -% \cite{Fil94}, implements a decision procedure for {\em Direct -% Predicate Calculus}, that is first-order Gentzen's Sequent Calculus -% without contraction rules \cite{KeWe84,BeKe92}. Intuitively, a -% first-order goal is provable in Direct Predicate Calculus if it can be -% proved using each hypothesis at most once. - -% Unlike the previous tactics, the \texttt{Linear} tactic does not belong -% to the initial state of the system, and it must be loaded explicitly -% with the command - -% \begin{coq_example*} -% Require Linear. -% \end{coq_example*} - -% For instance, assuming that \texttt{even} and \texttt{odd} are two -% predicates on natural numbers, and \texttt{a} of type \texttt{nat}, the -% tactic \texttt{Linear} solves the following goal - -% \begin{coq_eval} -% Variables even,odd : nat -> Prop. -% Variable a:nat. -% \end{coq_eval} - -% \begin{coq_example*} -% Lemma example : (even a) -% -> ((x:nat)((even x)->(odd (S x)))) -% -> (EX y | (odd y)). -% \end{coq_example*} - -% You can find examples of the use of \texttt{Linear} in -% \texttt{theories/DEMOS/DemoLinear.v}. -% \begin{coq_eval} -% Abort. -% \end{coq_eval} - -% \begin{Variants} -% \item {\tt Linear with \ident$_1$ \dots\ \ident$_n$}\\ -% \tacindex{Linear with} -% Is equivalent to apply first {\tt generalize \ident$_1$ \dots -% \ident$_n$} (see Section~\ref{generalize}) then the \texttt{Linear} -% tactic. So one can use axioms, lemmas or hypotheses of the local -% context with \texttt{Linear} in this way. -% \end{Variants} - -% \begin{ErrMsgs} -% \item \errindex{Not provable in Direct Predicate Calculus} -% \item \errindex{Found $n$ classical proof(s) but no intuitionistic one}\\ -% The decision procedure looks actually for classical proofs of the -% goals, and then checks that they are intuitionistic. In that case, -% classical proofs have been found, which do not correspond to -% intuitionistic ones. -% \end{ErrMsgs} - - -\subsection{\tt btauto} -\tacindex{btauto} -\label{btauto} - -The tactic \texttt{btauto} implements a reflexive solver for boolean tautologies. It -solves goals of the form {\tt t = u} where {\tt t} and {\tt u} are constructed -over the following grammar: - -$$\mathtt{t} ::= x \mid \mathtt{true} \mid \mathtt{false}\mid \mathtt{orb\ t_1\ t_2} -\mid \mathtt{andb\ t_1\ t_2} \mid\mathtt{xorb\ t_1\ t_2} \mid\mathtt{negb\ t} -\mid\mathtt{if\ t_1\ then\ t_2\ else\ t_3} -$$ - -Whenever the formula supplied is not a tautology, it also provides a counter-example. - -Internally, it uses a system very similar to the one of the {\tt ring} tactic. - -\subsection{\tt omega} -\tacindex{omega} -\label{omega} - -The tactic \texttt{omega}, due to Pierre Cr{\'e}gut, -is an automatic decision procedure for Presburger -arithmetic. It solves quantifier-free -formulas built with \verb|~|, \verb|\/|, \verb|/\|, -\verb|->| on top of equalities, inequalities and disequalities on -both the type \texttt{nat} of natural numbers and \texttt{Z} of binary -integers. This tactic must be loaded by the command \texttt{Require Import - Omega}. See the additional documentation about \texttt{omega} -(see Chapter~\ref{OmegaChapter}). - -\subsection{{\tt ring} and \tt ring\_simplify \term$_1$ \mbox{\dots} \term$_n$} -\tacindex{ring} -\tacindex{ring\_simplify} -\comindex{Add Ring} -\comindex{Print Rings} - -The {\tt ring} tactic solves equations upon polynomial expressions of -a ring (or semi-ring) structure. It proceeds by normalizing both hand -sides of the equation (w.r.t. associativity, commutativity and -distributivity, constant propagation) and comparing syntactically the -results. - -{\tt ring\_simplify} applies the normalization procedure described -above to the terms given. The tactic then replaces all occurrences of -the terms given in the conclusion of the goal by their normal -forms. If no term is given, then the conclusion should be an equation -and both hand sides are normalized. - -See Chapter~\ref{ring} for more information on the tactic and how to -declare new ring structures. All declared field structures can be -printed with the {\tt Print Rings} command. - -\subsection{{\tt field}, {\tt field\_simplify \term$_1$ \mbox{\dots} - \term$_n$}, and \tt field\_simplify\_eq} -\tacindex{field} -\tacindex{field\_simplify} -\tacindex{field\_simplify\_eq} -\comindex{Add Field} -\comindex{Print Fields} - -The {\tt field} tactic is built on the same ideas as {\tt ring}: this -is a reflexive tactic that solves or simplifies equations in a field -structure. The main idea is to reduce a field expression (which is an -extension of ring expressions with the inverse and division -operations) to a fraction made of two polynomial expressions. - -Tactic {\tt field} is used to solve subgoals, whereas {\tt - field\_simplify \term$_1$\dots\term$_n$} replaces the provided terms -by their reduced fraction. {\tt field\_simplify\_eq} applies when the -conclusion is an equation: it simplifies both hand sides and multiplies -so as to cancel denominators. So it produces an equation without -division nor inverse. - -All of these 3 tactics may generate a subgoal in order to prove that -denominators are different from zero. - -See Chapter~\ref{ring} for more information on the tactic and how to -declare new field structures. All declared field structures can be -printed with the {\tt Print Fields} command. - -\Example -\begin{coq_example*} -Require Import Reals. -Goal forall x y:R, - (x * y > 0)%R -> - (x * (1 / x + x / (x + y)))%R = - ((- 1 / y) * y * (- x * (x / (x + y)) - 1))%R. -\end{coq_example*} - -\begin{coq_example} -intros; field. -\end{coq_example} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\SeeAlso file {\tt plugins/setoid\_ring/RealField.v} for an example of instantiation,\\ -\phantom{\SeeAlso}theory {\tt theories/Reals} for many examples of use of {\tt -field}. - -\subsection{\tt fourier} -\tacindex{fourier} - -This tactic written by Lo{\"\i}c Pottier solves linear inequalities on -real numbers using Fourier's method~\cite{Fourier}. This tactic must -be loaded by {\tt Require Import Fourier}. - -\Example -\begin{coq_example*} -Require Import Reals. -Require Import Fourier. -Goal forall x y:R, (x < y)%R -> (y + 1 >= x - 1)%R. -\end{coq_example*} - -\begin{coq_example} -intros; fourier. -\end{coq_example} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\section{Non-logical tactics} - -\subsection[\tt cycle \num]{\tt cycle \num\tacindex{cycle}} - -This tactic puts the {\num} first goals at the end of the list of -goals. If {\num} is negative, it will put the last $\left|\num\right|$ goals at -the beginning of the list. - -\Example -\begin{coq_example*} -Parameter P : nat -> Prop. -Goal P 1 /\ P 2 /\ P 3 /\ P 4 /\ P 5. -\end{coq_example*} -\begin{coq_example} -repeat split. -all: cycle 2. -all: cycle -3. -\end{coq_example} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\subsection[\tt swap \num$_1$ \num$_2$]{\tt swap \num$_1$ \num$_2$\tacindex{swap}} - -This tactic switches the position of the goals of indices $\num_1$ and $\num_2$. If either $\num_1$ or $\num_2$ is negative then goals are counted from the end of the focused goal list. Goals are indexed from $1$, there is no goal with position $0$. - -\Example -\begin{coq_example*} -Parameter P : nat -> Prop. -Goal P 1 /\ P 2 /\ P 3 /\ P 4 /\ P 5. -\end{coq_example*} -\begin{coq_example} -repeat split. -all: swap 1 3. -all: swap 1 -1. -\end{coq_example} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\subsection[\tt revgoals]{\tt revgoals\tacindex{revgoals}} - -This tactics reverses the list of the focused goals. - -\Example -\begin{coq_example*} -Parameter P : nat -> Prop. -Goal P 1 /\ P 2 /\ P 3 /\ P 4 /\ P 5. -\end{coq_example*} -\begin{coq_example} -repeat split. -all: revgoals. -\end{coq_example} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - - - -\subsection[\tt shelve]{\tt shelve\tacindex{shelve}\label{shelve}} - -This tactic moves all goals under focus to a shelf. While on the shelf, goals -will not be focused on. They can be solved by unification, or they can be called -back into focus with the command {\tt Unshelve} (Section~\ref{unshelve}). - -\begin{Variants} - \item \texttt{shelve\_unifiable}\tacindex{shelve\_unifiable} - - Shelves only the goals under focus that are mentioned in other goals. - Goals that appear in the type of other goals can be solved by unification. - -\Example -\begin{coq_example} -Goal exists n, n=0. -refine (ex_intro _ _ _). -all:shelve_unifiable. -reflexivity. -\end{coq_example} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\end{Variants} - -\subsection[\tt Unshelve]{\tt Unshelve\comindex{Unshelve}\label{unshelve}} - -This command moves all the goals on the shelf (see Section~\ref{shelve}) from the -shelf into focus, by appending them to the end of the current list of focused goals. - -\subsection[\tt give\_up]{\tt give\_up\tacindex{give\_up}} - -This tactic removes the focused goals from the proof. They are not solved, and cannot -be solved later in the proof. As the goals are not solved, the proof cannot be closed. - -The {\tt give\_up} tactic can be used while editing a proof, to choose to write the -proof script in a non-sequential order. - -\section{Simple tactic macros} -\index{Tactic macros} -\label{TacticDefinition} - -A simple example has more value than a long explanation: - -\begin{coq_example} -Ltac Solve := simpl; intros; auto. -Ltac ElimBoolRewrite b H1 H2 := - elim b; [ intros; rewrite H1; eauto | intros; rewrite H2; eauto ]. -\end{coq_example} - -The tactics macros are synchronous with the \Coq\ section mechanism: -a tactic definition is deleted from the current environment -when you close the section (see also \ref{Section}) -where it was defined. If you want that a -tactic macro defined in a module is usable in the modules that -require it, you should put it outside of any section. - -Chapter~\ref{TacticLanguage} gives examples of more complex -user-defined tactics. - - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/RefMan-tacex.tex b/doc/refman/RefMan-tacex.tex deleted file mode 100644 index 7cdb1a5274..0000000000 --- a/doc/refman/RefMan-tacex.tex +++ /dev/null @@ -1,930 +0,0 @@ -\chapter[Detailed examples of tactics]{Detailed examples of tactics\label{Tactics-examples}} -%HEVEA\cutname{tactic-examples.html} - -This chapter presents detailed examples of certain tactics, to -illustrate their behavior. - -\section[\tt dependent induction]{\tt dependent induction\label{dependent-induction-example}} -\def\depind{{\tt dependent induction}~} -\def\depdestr{{\tt dependent destruction}~} - -The tactics \depind and \depdestr are another solution for inverting -inductive predicate instances and potentially doing induction at the -same time. It is based on the \texttt{BasicElim} tactic of Conor McBride which -works by abstracting each argument of an inductive instance by a variable -and constraining it by equalities afterwards. This way, the usual -{\tt induction} and {\tt destruct} tactics can be applied to the -abstracted instance and after simplification of the equalities we get -the expected goals. - -The abstracting tactic is called {\tt generalize\_eqs} and it takes as -argument an hypothesis to generalize. It uses the {\tt JMeq} datatype -defined in {\tt Coq.Logic.JMeq}, hence we need to require it before. -For example, revisiting the first example of the inversion documentation above: - -\begin{coq_example*} -Require Import Coq.Logic.JMeq. -\end{coq_example*} -\begin{coq_eval} -Require Import Coq.Program.Equality. -\end{coq_eval} - -\begin{coq_eval} -Inductive Le : nat -> nat -> Set := - | LeO : forall n:nat, Le 0 n - | LeS : forall n m:nat, Le n m -> Le (S n) (S m). -Variable P : nat -> nat -> Prop. -Variable Q : forall n m:nat, Le n m -> Prop. -\end{coq_eval} - -\begin{coq_example*} -Goal forall n m:nat, Le (S n) m -> P n m. -intros n m H. -\end{coq_example*} -\begin{coq_example} -generalize_eqs H. -\end{coq_example} - -The index {\tt S n} gets abstracted by a variable here, but a -corresponding equality is added under the abstract instance so that no -information is actually lost. The goal is now almost amenable to do induction -or case analysis. One should indeed first move {\tt n} into the goal to -strengthen it before doing induction, or {\tt n} will be fixed in -the inductive hypotheses (this does not matter for case analysis). -As a rule of thumb, all the variables that appear inside constructors in -the indices of the hypothesis should be generalized. This is exactly -what the \texttt{generalize\_eqs\_vars} variant does: - -\begin{coq_eval} -Undo 1. -\end{coq_eval} -\begin{coq_example} -generalize_eqs_vars H. -induction H. -\end{coq_example} - -As the hypothesis itself did not appear in the goal, we did not need to -use an heterogeneous equality to relate the new hypothesis to the old -one (which just disappeared here). However, the tactic works just as well -in this case, e.g.: - -\begin{coq_eval} -Admitted. -\end{coq_eval} - -\begin{coq_example} -Goal forall n m (p : Le (S n) m), Q (S n) m p. -intros n m p ; generalize_eqs_vars p. -\end{coq_example} - -One drawback of this approach is that in the branches one will have to -substitute the equalities back into the instance to get the right -assumptions. Sometimes injection of constructors will also be needed to -recover the needed equalities. Also, some subgoals should be directly -solved because of inconsistent contexts arising from the constraints on -indexes. The nice thing is that we can make a tactic based on -discriminate, injection and variants of substitution to automatically -do such simplifications (which may involve the K axiom). -This is what the {\tt simplify\_dep\_elim} tactic from -{\tt Coq.Program.Equality} does. For example, we might simplify the -previous goals considerably: -% \begin{coq_eval} -% Abort. -% Goal forall n m:nat, Le (S n) m -> P n m. -% intros n m H ; generalize_eqs_vars H. -% \end{coq_eval} - -\begin{coq_example} -induction p ; simplify_dep_elim. -\end{coq_example} - -The higher-order tactic {\tt do\_depind} defined in {\tt - Coq.Program.Equality} takes a tactic and combines the -building blocks we have seen with it: generalizing by equalities -calling the given tactic with the -generalized induction hypothesis as argument and cleaning the subgoals -with respect to equalities. Its most important instantiations are -\depind and \depdestr that do induction or simply case analysis on the -generalized hypothesis. For example we can redo what we've done manually -with \depdestr: - -\begin{coq_eval} -Abort. -\end{coq_eval} -\begin{coq_example*} -Require Import Coq.Program.Equality. -Lemma ex : forall n m:nat, Le (S n) m -> P n m. -intros n m H. -\end{coq_example*} -\begin{coq_example} -dependent destruction H. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -This gives essentially the same result as inversion. Now if the -destructed hypothesis actually appeared in the goal, the tactic would -still be able to invert it, contrary to {\tt dependent - inversion}. Consider the following example on vectors: - -\begin{coq_example*} -Require Import Coq.Program.Equality. -Set Implicit Arguments. -Variable A : Set. -Inductive vector : nat -> Type := -| vnil : vector 0 -| vcons : A -> forall n, vector n -> vector (S n). -Goal forall n, forall v : vector (S n), - exists v' : vector n, exists a : A, v = vcons a v'. - intros n v. -\end{coq_example*} -\begin{coq_example} - dependent destruction v. -\end{coq_example} -\begin{coq_eval} -Abort. -\end{coq_eval} - -In this case, the {\tt v} variable can be replaced in the goal by the -generalized hypothesis only when it has a type of the form {\tt vector - (S n)}, that is only in the second case of the {\tt destruct}. The -first one is dismissed because {\tt S n <> 0}. - -\subsection{A larger example} - -Let's see how the technique works with {\tt induction} on inductive -predicates on a real example. We will develop an example application to the -theory of simply-typed lambda-calculus formalized in a dependently-typed style: - -\begin{coq_example*} -Inductive type : Type := -| base : type -| arrow : type -> type -> type. -Notation " t --> t' " := (arrow t t') (at level 20, t' at next level). -Inductive ctx : Type := -| empty : ctx -| snoc : ctx -> type -> ctx. -Notation " G , tau " := (snoc G tau) (at level 20, tau at next level). -Fixpoint conc (G D : ctx) : ctx := - match D with - | empty => G - | snoc D' x => snoc (conc G D') x - end. -Notation " G ; D " := (conc G D) (at level 20). -Inductive term : ctx -> type -> Type := -| ax : forall G tau, term (G, tau) tau -| weak : forall G tau, - term G tau -> forall tau', term (G, tau') tau -| abs : forall G tau tau', - term (G , tau) tau' -> term G (tau --> tau') -| app : forall G tau tau', - term G (tau --> tau') -> term G tau -> term G tau'. -\end{coq_example*} - -We have defined types and contexts which are snoc-lists of types. We -also have a {\tt conc} operation that concatenates two contexts. -The {\tt term} datatype represents in fact the possible typing -derivations of the calculus, which are isomorphic to the well-typed -terms, hence the name. A term is either an application of: -\begin{itemize} -\item the axiom rule to type a reference to the first variable in a context, -\item the weakening rule to type an object in a larger context -\item the abstraction or lambda rule to type a function -\item the application to type an application of a function to an argument -\end{itemize} - -Once we have this datatype we want to do proofs on it, like weakening: - -\begin{coq_example*} -Lemma weakening : forall G D tau, term (G ; D) tau -> - forall tau', term (G , tau' ; D) tau. -\end{coq_example*} -\begin{coq_eval} - Abort. -\end{coq_eval} - -The problem here is that we can't just use {\tt induction} on the typing -derivation because it will forget about the {\tt G ; D} constraint -appearing in the instance. A solution would be to rewrite the goal as: -\begin{coq_example*} -Lemma weakening' : forall G' tau, term G' tau -> - forall G D, (G ; D) = G' -> - forall tau', term (G, tau' ; D) tau. -\end{coq_example*} -\begin{coq_eval} - Abort. -\end{coq_eval} - -With this proper separation of the index from the instance and the right -induction loading (putting {\tt G} and {\tt D} after the inducted-on -hypothesis), the proof will go through, but it is a very tedious -process. One is also forced to make a wrapper lemma to get back the -more natural statement. The \depind tactic alleviates this trouble by -doing all of this plumbing of generalizing and substituting back automatically. -Indeed we can simply write: - -\begin{coq_example*} -Require Import Coq.Program.Tactics. -Lemma weakening : forall G D tau, term (G ; D) tau -> - forall tau', term (G , tau' ; D) tau. -Proof with simpl in * ; simpl_depind ; auto. - intros G D tau H. dependent induction H generalizing G D ; intros. -\end{coq_example*} - -This call to \depind has an additional arguments which is a list of -variables appearing in the instance that should be generalized in the -goal, so that they can vary in the induction hypotheses. By default, all -variables appearing inside constructors (except in a parameter position) -of the instantiated hypothesis will be generalized automatically but -one can always give the list explicitly. - -\begin{coq_example} - Show. -\end{coq_example} - -The {\tt simpl\_depind} tactic includes an automatic tactic that tries -to simplify equalities appearing at the beginning of induction -hypotheses, generally using trivial applications of -reflexivity. In cases where the equality is not between constructor -forms though, one must help the automation by giving -some arguments, using the {\tt specialize} tactic for example. - -\begin{coq_example*} -destruct D... apply weak ; apply ax. apply ax. -destruct D... -\end{coq_example*} -\begin{coq_example} -Show. -\end{coq_example} -\begin{coq_example} - specialize (IHterm G0 empty eq_refl). -\end{coq_example} - -Once the induction hypothesis has been narrowed to the right equality, -it can be used directly. - -\begin{coq_example} - apply weak, IHterm. -\end{coq_example} - -If there is an easy first-order solution to these equations as in this subgoal, the -{\tt specialize\_eqs} tactic can be used instead of giving explicit proof -terms: - -\begin{coq_example} - specialize_eqs IHterm. -\end{coq_example} -This concludes our example. -\SeeAlso The induction \ref{elim}, case \ref{case} and inversion \ref{inversion} tactics. - -\section[\tt autorewrite]{\tt autorewrite\label{autorewrite-example}} - -Here are two examples of {\tt autorewrite} use. The first one ({\em Ackermann -function}) shows actually a quite basic use where there is no conditional -rewriting. The second one ({\em Mac Carthy function}) involves conditional -rewritings and shows how to deal with them using the optional tactic of the -{\tt Hint~Rewrite} command. - -\firstexample -\example{Ackermann function} -%Here is a basic use of {\tt AutoRewrite} with the Ackermann function: - -\begin{coq_example*} -Reset Initial. -Require Import Arith. -Variable Ack : - nat -> nat -> nat. -Axiom Ack0 : - forall m:nat, Ack 0 m = S m. -Axiom Ack1 : forall n:nat, Ack (S n) 0 = Ack n 1. -Axiom Ack2 : forall n m:nat, Ack (S n) (S m) = Ack n (Ack (S n) m). -\end{coq_example*} - -\begin{coq_example} -Hint Rewrite Ack0 Ack1 Ack2 : base0. -Lemma ResAck0 : - Ack 3 2 = 29. -autorewrite with base0 using try reflexivity. -\end{coq_example} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\example{Mac Carthy function} -%The Mac Carthy function shows a more complex case: - -\begin{coq_example*} -Require Import Omega. -Variable g : - nat -> nat -> nat. -Axiom g0 : - forall m:nat, g 0 m = m. -Axiom - g1 : - forall n m:nat, - (n > 0) -> (m > 100) -> g n m = g (pred n) (m - 10). -Axiom - g2 : - forall n m:nat, - (n > 0) -> (m <= 100) -> g n m = g (S n) (m + 11). -\end{coq_example*} - -\begin{coq_example} -Hint Rewrite g0 g1 g2 using omega : base1. -Lemma Resg0 : - g 1 110 = 100. -autorewrite with base1 using reflexivity || simpl. -\end{coq_example} - -\begin{coq_eval} -Abort. -\end{coq_eval} - -\begin{coq_example} -Lemma Resg1 : g 1 95 = 91. -autorewrite with base1 using reflexivity || simpl. -\end{coq_example} - -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -\section[\tt quote]{\tt quote\tacindex{quote} -\label{quote-examples}} - -The tactic \texttt{quote} allows using Barendregt's so-called -2-level approach without writing any ML code. Suppose you have a -language \texttt{L} of -'abstract terms' and a type \texttt{A} of 'concrete terms' -and a function \texttt{f : L -> A}. If \texttt{L} is a simple -inductive datatype and \texttt{f} a simple fixpoint, \texttt{quote f} -will replace the head of current goal by a convertible term of the form -\texttt{(f t)}. \texttt{L} must have a constructor of type: \texttt{A - -> L}. - -Here is an example: - -\begin{coq_example} -Require Import Quote. -Parameters A B C : Prop. -Inductive formula : Type := - | f_and : formula -> formula -> formula (* binary constructor *) - | f_or : formula -> formula -> formula - | f_not : formula -> formula (* unary constructor *) - | f_true : formula (* 0-ary constructor *) - | f_const : Prop -> formula (* constructor for constants *). -Fixpoint interp_f (f: - formula) : Prop := - match f with - | f_and f1 f2 => interp_f f1 /\ interp_f f2 - | f_or f1 f2 => interp_f f1 \/ interp_f f2 - | f_not f1 => ~ interp_f f1 - | f_true => True - | f_const c => c - end. -Goal A /\ (A \/ True) /\ ~ B /\ (A <-> A). -quote interp_f. -\end{coq_example} - -The algorithm to perform this inversion is: try to match the -term with right-hand sides expression of \texttt{f}. If there is a -match, apply the corresponding left-hand side and call yourself -recursively on sub-terms. If there is no match, we are at a leaf: -return the corresponding constructor (here \texttt{f\_const}) applied -to the term. - -\begin{ErrMsgs} -\item \errindex{quote: not a simple fixpoint} \\ - Happens when \texttt{quote} is not able to perform inversion properly. -\end{ErrMsgs} - -\subsection{Introducing variables map} - -The normal use of \texttt{quote} is to make proofs by reflection: one -defines a function \texttt{simplify : formula -> formula} and proves a -theorem \texttt{simplify\_ok: (f:formula)(interp\_f (simplify f)) -> - (interp\_f f)}. Then, one can simplify formulas by doing: -\begin{verbatim} - quote interp_f. - apply simplify_ok. - compute. -\end{verbatim} -But there is a problem with leafs: in the example above one cannot -write a function that implements, for example, the logical simplifications -$A \land A \ra A$ or $A \land \lnot A \ra \texttt{False}$. This is -because the \Prop{} is impredicative. - -It is better to use that type of formulas: - -\begin{coq_eval} -Reset formula. -\end{coq_eval} -\begin{coq_example} -Inductive formula : Set := - | f_and : formula -> formula -> formula - | f_or : formula -> formula -> formula - | f_not : formula -> formula - | f_true : formula - | f_atom : index -> formula. -\end{coq_example*} - -\texttt{index} is defined in module \texttt{quote}. Equality on that -type is decidable so we are able to simplify $A \land A$ into $A$ at -the abstract level. - -When there are variables, there are bindings, and \texttt{quote} -provides also a type \texttt{(varmap A)} of bindings from -\texttt{index} to any set \texttt{A}, and a function -\texttt{varmap\_find} to search in such maps. The interpretation -function has now another argument, a variables map: - -\begin{coq_example} -Fixpoint interp_f (vm: - varmap Prop) (f:formula) {struct f} : Prop := - match f with - | f_and f1 f2 => interp_f vm f1 /\ interp_f vm f2 - | f_or f1 f2 => interp_f vm f1 \/ interp_f vm f2 - | f_not f1 => ~ interp_f vm f1 - | f_true => True - | f_atom i => varmap_find True i vm - end. -\end{coq_example} - -\noindent\texttt{quote} handles this second case properly: - -\begin{coq_example} -Goal A /\ (B \/ A) /\ (A \/ ~ B). -quote interp_f. -\end{coq_example} - -It builds \texttt{vm} and \texttt{t} such that \texttt{(f vm t)} is -convertible with the conclusion of current goal. - -\subsection{Combining variables and constants} - -One can have both variables and constants in abstracts terms; that is -the case, for example, for the \texttt{ring} tactic (chapter -\ref{ring}). Then one must provide to \texttt{quote} a list of -\emph{constructors of constants}. For example, if the list is -\texttt{[O S]} then closed natural numbers will be considered as -constants and other terms as variables. - -Example: - -\begin{coq_eval} -Reset formula. -\end{coq_eval} -\begin{coq_example*} -Inductive formula : Type := - | f_and : formula -> formula -> formula - | f_or : formula -> formula -> formula - | f_not : formula -> formula - | f_true : formula - | f_const : Prop -> formula (* constructor for constants *) - | f_atom : index -> formula. -Fixpoint interp_f - (vm: (* constructor for variables *) - varmap Prop) (f:formula) {struct f} : Prop := - match f with - | f_and f1 f2 => interp_f vm f1 /\ interp_f vm f2 - | f_or f1 f2 => interp_f vm f1 \/ interp_f vm f2 - | f_not f1 => ~ interp_f vm f1 - | f_true => True - | f_const c => c - | f_atom i => varmap_find True i vm - end. -Goal -A /\ (A \/ True) /\ ~ B /\ (C <-> C). -\end{coq_example*} - -\begin{coq_example} -quote interp_f [ A B ]. -Undo. - quote interp_f [ B C iff ]. -\end{coq_example} - -\Warning Since function inversion -is undecidable in general case, don't expect miracles from it! - -\begin{Variants} - -\item {\tt quote {\ident} in {\term} using {\tac}} - - \tac\ must be a functional tactic (starting with {\tt fun x =>}) - and will be called with the quoted version of \term\ according to - \ident. - -\item {\tt quote {\ident} [ \ident$_1$ \dots\ \ident$_n$ ] in {\term} using {\tac}} - - Same as above, but will use \ident$_1$, \dots, \ident$_n$ to - chose which subterms are constants (see above). - -\end{Variants} - -% \SeeAlso file \texttt{theories/DEMOS/DemoQuote.v} - -\SeeAlso comments of source file \texttt{plugins/quote/quote.ml} - -\SeeAlso the \texttt{ring} tactic (Chapter~\ref{ring}) - - - -\section{Using the tactical language} - -\subsection{About the cardinality of the set of natural numbers} - -A first example which shows how to use the pattern matching over the proof -contexts is the proof that natural numbers have more than two elements. The -proof of such a lemma can be done as %shown on Figure~\ref{cnatltac}. -follows: -%\begin{figure} -%\begin{centerframe} -\begin{coq_eval} -Reset Initial. -Require Import Arith. -Require Import List. -\end{coq_eval} -\begin{coq_example*} -Lemma card_nat : - ~ (exists x : nat, exists y : nat, forall z:nat, x = z \/ y = z). -Proof. -red; intros (x, (y, Hy)). -elim (Hy 0); elim (Hy 1); elim (Hy 2); intros; - match goal with - | [_:(?a = ?b),_:(?a = ?c) |- _ ] => - cut (b = c); [ discriminate | transitivity a; auto ] - end. -Qed. -\end{coq_example*} -%\end{centerframe} -%\caption{A proof on cardinality of natural numbers} -%\label{cnatltac} -%\end{figure} - -We can notice that all the (very similar) cases coming from the three -eliminations (with three distinct natural numbers) are successfully solved by -a {\tt match goal} structure and, in particular, with only one pattern (use -of non-linear matching). - -\subsection{Permutation on closed lists} - -Another more complex example is the problem of permutation on closed lists. The -aim is to show that a closed list is a permutation of another one. - -First, we define the permutation predicate as shown in table~\ref{permutpred}. - -\begin{figure} -\begin{centerframe} -\begin{coq_example*} -Section Sort. -Variable A : Set. -Inductive permut : list A -> list A -> Prop := - | permut_refl : forall l, permut l l - | permut_cons : - forall a l0 l1, permut l0 l1 -> permut (a :: l0) (a :: l1) - | permut_append : forall a l, permut (a :: l) (l ++ a :: nil) - | permut_trans : - forall l0 l1 l2, permut l0 l1 -> permut l1 l2 -> permut l0 l2. -End Sort. -\end{coq_example*} -\end{centerframe} -\caption{Definition of the permutation predicate} -\label{permutpred} -\end{figure} - -A more complex example is the problem of permutation on closed lists. -The aim is to show that a closed list is a permutation of another one. -First, we define the permutation predicate as shown on -Figure~\ref{permutpred}. - -\begin{figure} -\begin{centerframe} -\begin{coq_example} -Ltac Permut n := - match goal with - | |- (permut _ ?l ?l) => apply permut_refl - | |- (permut _ (?a :: ?l1) (?a :: ?l2)) => - let newn := eval compute in (length l1) in - (apply permut_cons; Permut newn) - | |- (permut ?A (?a :: ?l1) ?l2) => - match eval compute in n with - | 1 => fail - | _ => - let l1' := constr:(l1 ++ a :: nil) in - (apply (permut_trans A (a :: l1) l1' l2); - [ apply permut_append | compute; Permut (pred n) ]) - end - end. -Ltac PermutProve := - match goal with - | |- (permut _ ?l1 ?l2) => - match eval compute in (length l1 = length l2) with - | (?n = ?n) => Permut n - end - end. -\end{coq_example} -\end{centerframe} -\caption{Permutation tactic} -\label{permutltac} -\end{figure} - -Next, we can write naturally the tactic and the result can be seen on -Figure~\ref{permutltac}. We can notice that we use two toplevel -definitions {\tt PermutProve} and {\tt Permut}. The function to be -called is {\tt PermutProve} which computes the lengths of the two -lists and calls {\tt Permut} with the length if the two lists have the -same length. {\tt Permut} works as expected. If the two lists are -equal, it concludes. Otherwise, if the lists have identical first -elements, it applies {\tt Permut} on the tail of the lists. Finally, -if the lists have different first elements, it puts the first element -of one of the lists (here the second one which appears in the {\tt - permut} predicate) at the end if that is possible, i.e., if the new -first element has been at this place previously. To verify that all -rotations have been done for a list, we use the length of the list as -an argument for {\tt Permut} and this length is decremented for each -rotation down to, but not including, 1 because for a list of length -$n$, we can make exactly $n-1$ rotations to generate at most $n$ -distinct lists. Here, it must be noticed that we use the natural -numbers of {\Coq} for the rotation counter. On Figure~\ref{ltac}, we -can see that it is possible to use usual natural numbers but they are -only used as arguments for primitive tactics and they cannot be -handled, in particular, we cannot make computations with them. So, a -natural choice is to use {\Coq} data structures so that {\Coq} makes -the computations (reductions) by {\tt eval compute in} and we can get -the terms back by {\tt match}. - -With {\tt PermutProve}, we can now prove lemmas as -% shown on Figure~\ref{permutlem}. -follows: -%\begin{figure} -%\begin{centerframe} - -\begin{coq_example*} -Lemma permut_ex1 : - permut nat (1 :: 2 :: 3 :: nil) (3 :: 2 :: 1 :: nil). -Proof. PermutProve. Qed. -Lemma permut_ex2 : - permut nat - (0 :: 1 :: 2 :: 3 :: 4 :: 5 :: 6 :: 7 :: 8 :: 9 :: nil) - (0 :: 2 :: 4 :: 6 :: 8 :: 9 :: 7 :: 5 :: 3 :: 1 :: nil). -Proof. PermutProve. Qed. -\end{coq_example*} -%\end{centerframe} -%\caption{Examples of {\tt PermutProve} use} -%\label{permutlem} -%\end{figure} - - -\subsection{Deciding intuitionistic propositional logic} - -\begin{figure}[b] -\begin{centerframe} -\begin{coq_example} -Ltac Axioms := - match goal with - | |- True => trivial - | _:False |- _ => elimtype False; assumption - | _:?A |- ?A => auto - end. -\end{coq_example} -\end{centerframe} -\caption{Deciding intuitionistic propositions (1)} -\label{tautoltaca} -\end{figure} - - -\begin{figure} -\begin{centerframe} -\begin{coq_example} -Ltac DSimplif := - repeat - (intros; - match goal with - | id:(~ _) |- _ => red in id - | id:(_ /\ _) |- _ => - elim id; do 2 intro; clear id - | id:(_ \/ _) |- _ => - elim id; intro; clear id - | id:(?A /\ ?B -> ?C) |- _ => - cut (A -> B -> C); - [ intro | intros; apply id; split; assumption ] - | id:(?A \/ ?B -> ?C) |- _ => - cut (B -> C); - [ cut (A -> C); - [ intros; clear id - | intro; apply id; left; assumption ] - | intro; apply id; right; assumption ] - | id0:(?A -> ?B),id1:?A |- _ => - cut B; [ intro; clear id0 | apply id0; assumption ] - | |- (_ /\ _) => split - | |- (~ _) => red - end). -Ltac TautoProp := - DSimplif; - Axioms || - match goal with - | id:((?A -> ?B) -> ?C) |- _ => - cut (B -> C); - [ intro; cut (A -> B); - [ intro; cut C; - [ intro; clear id | apply id; assumption ] - | clear id ] - | intro; apply id; intro; assumption ]; TautoProp - | id:(~ ?A -> ?B) |- _ => - cut (False -> B); - [ intro; cut (A -> False); - [ intro; cut B; - [ intro; clear id | apply id; assumption ] - | clear id ] - | intro; apply id; red; intro; assumption ]; TautoProp - | |- (_ \/ _) => (left; TautoProp) || (right; TautoProp) - end. -\end{coq_example} -\end{centerframe} -\caption{Deciding intuitionistic propositions (2)} -\label{tautoltacb} -\end{figure} - -The pattern matching on goals allows a complete and so a powerful -backtracking when returning tactic values. An interesting application -is the problem of deciding intuitionistic propositional logic. -Considering the contraction-free sequent calculi {\tt LJT*} of -Roy~Dyckhoff (\cite{Dyc92}), it is quite natural to code such a tactic -using the tactic language as shown on Figures~\ref{tautoltaca} -and~\ref{tautoltacb}. The tactic {\tt Axioms} tries to conclude using -usual axioms. The tactic {\tt DSimplif} applies all the reversible -rules of Dyckhoff's system. Finally, the tactic {\tt TautoProp} (the -main tactic to be called) simplifies with {\tt DSimplif}, tries to -conclude with {\tt Axioms} and tries several paths using the -backtracking rules (one of the four Dyckhoff's rules for the left -implication to get rid of the contraction and the right or). - -For example, with {\tt TautoProp}, we can prove tautologies like - those: -% on Figure~\ref{tautolem}. -%\begin{figure}[tbp] -%\begin{centerframe} -\begin{coq_example*} -Lemma tauto_ex1 : forall A B:Prop, A /\ B -> A \/ B. -Proof. TautoProp. Qed. -Lemma tauto_ex2 : - forall A B:Prop, (~ ~ B -> B) -> (A -> B) -> ~ ~ A -> B. -Proof. TautoProp. Qed. -\end{coq_example*} -%\end{centerframe} -%\caption{Proofs of tautologies with {\tt TautoProp}} -%\label{tautolem} -%\end{figure} - -\subsection{Deciding type isomorphisms} - -A more tricky problem is to decide equalities between types and modulo -isomorphisms. Here, we choose to use the isomorphisms of the simply typed -$\lb{}$-calculus with Cartesian product and $unit$ type (see, for example, -\cite{RC95}). The axioms of this $\lb{}$-calculus are given by -table~\ref{isosax}. - -\begin{figure} -\begin{centerframe} -\begin{coq_eval} -Reset Initial. -\end{coq_eval} -\begin{coq_example*} -Open Scope type_scope. -Section Iso_axioms. -Variables A B C : Set. -Axiom Com : A * B = B * A. -Axiom Ass : A * (B * C) = A * B * C. -Axiom Cur : (A * B -> C) = (A -> B -> C). -Axiom Dis : (A -> B * C) = (A -> B) * (A -> C). -Axiom P_unit : A * unit = A. -Axiom AR_unit : (A -> unit) = unit. -Axiom AL_unit : (unit -> A) = A. -Lemma Cons : B = C -> A * B = A * C. -Proof. -intro Heq; rewrite Heq; reflexivity. -Qed. -End Iso_axioms. -\end{coq_example*} -\end{centerframe} -\caption{Type isomorphism axioms} -\label{isosax} -\end{figure} - -A more tricky problem is to decide equalities between types and modulo -isomorphisms. Here, we choose to use the isomorphisms of the simply typed -$\lb{}$-calculus with Cartesian product and $unit$ type (see, for example, -\cite{RC95}). The axioms of this $\lb{}$-calculus are given on -Figure~\ref{isosax}. - -\begin{figure}[ht] -\begin{centerframe} -\begin{coq_example} -Ltac DSimplif trm := - match trm with - | (?A * ?B * ?C) => - rewrite <- (Ass A B C); try MainSimplif - | (?A * ?B -> ?C) => - rewrite (Cur A B C); try MainSimplif - | (?A -> ?B * ?C) => - rewrite (Dis A B C); try MainSimplif - | (?A * unit) => - rewrite (P_unit A); try MainSimplif - | (unit * ?B) => - rewrite (Com unit B); try MainSimplif - | (?A -> unit) => - rewrite (AR_unit A); try MainSimplif - | (unit -> ?B) => - rewrite (AL_unit B); try MainSimplif - | (?A * ?B) => - (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) - | (?A -> ?B) => - (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) - end - with MainSimplif := - match goal with - | |- (?A = ?B) => try DSimplif A; try DSimplif B - end. -Ltac Length trm := - match trm with - | (_ * ?B) => let succ := Length B in constr:(S succ) - | _ => constr:(1) - end. -Ltac assoc := repeat rewrite <- Ass. -\end{coq_example} -\end{centerframe} -\caption{Type isomorphism tactic (1)} -\label{isosltac1} -\end{figure} - -\begin{figure}[ht] -\begin{centerframe} -\begin{coq_example} -Ltac DoCompare n := - match goal with - | [ |- (?A = ?A) ] => reflexivity - | [ |- (?A * ?B = ?A * ?C) ] => - apply Cons; let newn := Length B in - DoCompare newn - | [ |- (?A * ?B = ?C) ] => - match eval compute in n with - | 1 => fail - | _ => - pattern (A * B) at 1; rewrite Com; assoc; DoCompare (pred n) - end - end. -Ltac CompareStruct := - match goal with - | [ |- (?A = ?B) ] => - let l1 := Length A - with l2 := Length B in - match eval compute in (l1 = l2) with - | (?n = ?n) => DoCompare n - end - end. -Ltac IsoProve := MainSimplif; CompareStruct. -\end{coq_example} -\end{centerframe} -\caption{Type isomorphism tactic (2)} -\label{isosltac2} -\end{figure} - -The tactic to judge equalities modulo this axiomatization can be written as -shown on Figures~\ref{isosltac1} and~\ref{isosltac2}. The algorithm is quite -simple. Types are reduced using axioms that can be oriented (this done by {\tt -MainSimplif}). The normal forms are sequences of Cartesian -products without Cartesian product in the left component. These normal forms -are then compared modulo permutation of the components (this is done by {\tt -CompareStruct}). The main tactic to be called and realizing this algorithm is -{\tt IsoProve}. - -% Figure~\ref{isoslem} gives -Here are examples of what can be solved by {\tt IsoProve}. -%\begin{figure}[ht] -%\begin{centerframe} -\begin{coq_example*} -Lemma isos_ex1 : - forall A B:Set, A * unit * B = B * (unit * A). -Proof. -intros; IsoProve. -Qed. - -Lemma isos_ex2 : - forall A B C:Set, - (A * unit -> B * (C * unit)) = - (A * unit -> (C -> unit) * C) * (unit -> A -> B). -Proof. -intros; IsoProve. -Qed. -\end{coq_example*} -%\end{centerframe} -%\caption{Type equalities solved by {\tt IsoProve}} -%\label{isoslem} -%\end{figure} - -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "Reference-Manual" -%%% End: diff --git a/doc/refman/Reference-Manual.tex b/doc/refman/Reference-Manual.tex index fc1c01cf24..86f123322c 100644 --- a/doc/refman/Reference-Manual.tex +++ b/doc/refman/Reference-Manual.tex @@ -85,8 +85,6 @@ Options A and B of the licence are {\em not} elected.} %END LATEX %\defaultheaders -\include{RefMan-int}% Introduction -\include{RefMan-pre}% Credits %BEGIN LATEX \tableofcontents @@ -97,45 +95,30 @@ Options A and B of the licence are {\em not} elected.} \defaultheaders %END LATEX \include{RefMan-gal.v}% Gallina -\include{RefMan-ext.v}% Gallina extensions -\include{RefMan-lib.v}% The coq library -\include{RefMan-cic.v}% The Calculus of Constructions -\include{RefMan-modr}% The module system \part{The proof engine} \include{RefMan-oth.v}% Vernacular commands \include{RefMan-pro.v}% Proof handling -\include{RefMan-tac.v}% Tactics and tacticals \include{RefMan-ltac.v}% Writing tactics -\include{RefMan-tacex.v}% Detailed Examples of tactics \lstset{language=SSR} \lstset{moredelim=[is][]{|*}{*|}} \lstset{moredelim=*[is][\itshape\rmfamily]{/*}{*/}} -\include{RefMan-ssr} \part{User extensions} -\include{RefMan-syn.v}% The Syntax and the Grammar commands %%SUPPRIME \include{RefMan-tus.v}% Writing tactics -\include{RefMan-sch.v}% The Scheme commands \part{Practical tools} -\include{RefMan-com}% The coq commands (coqc coqtop) \include{RefMan-uti}% utilities (gallina, do_Makefile, etc) -\include{RefMan-ide}% Coq IDE %BEGIN LATEX \RefManCutCommand{BEGINADDENDUM=\thepage} %END LATEX \part{Addendum to the Reference Manual} \include{AddRefMan-pre}% -\include{Cases.v}% \include{Coercion.v}% -\include{CanonicalStructures.v}% \include{Classes.v}% -\include{Omega.v}% -\include{Micromega.v} \include{Extraction.v}% \include{Program.v}% \include{Polynom.v}% = Ring diff --git a/doc/sphinx/_static/coqide-queries.png b/doc/sphinx/_static/coqide-queries.png Binary files differnew file mode 100644 index 0000000000..7a46ac4e68 --- /dev/null +++ b/doc/sphinx/_static/coqide-queries.png diff --git a/doc/sphinx/_static/coqide.png b/doc/sphinx/_static/coqide.png Binary files differnew file mode 100644 index 0000000000..e300401c9f --- /dev/null +++ b/doc/sphinx/_static/coqide.png diff --git a/doc/sphinx/_static/notations.css b/doc/sphinx/_static/notations.css index 1ae7a7cd7f..9b7b826d58 100644 --- a/doc/sphinx/_static/notations.css +++ b/doc/sphinx/_static/notations.css @@ -158,11 +158,6 @@ dt > .property { color: #FFFFFF; } -/* FIXME: Specific to the RTD theme */ -a:visited { - color: #2980B9; -} - /* Pygments for Coq is confused by ‘…’ */ code span.error { background: inherit !important; diff --git a/doc/sphinx/addendum/canonical-structures.rst b/doc/sphinx/addendum/canonical-structures.rst new file mode 100644 index 0000000000..6843e9eaa1 --- /dev/null +++ b/doc/sphinx/addendum/canonical-structures.rst @@ -0,0 +1,435 @@ +.. include:: ../replaces.rst +.. _canonicalstructures: + +Canonical Structures +====================== + +:Authors: Assia Mahboubi and Enrico Tassi + +This chapter explains the basics of Canonical Structure and how they can be used +to overload notations and build a hierarchy of algebraic structures. The +examples are taken from :cite:`CSwcu`. We invite the interested reader to refer +to this paper for all the details that are omitted here for brevity. The +interested reader shall also find in :cite:`CSlessadhoc` a detailed description +of another, complementary, use of Canonical Structures: advanced proof search. +This latter papers also presents many techniques one can employ to tune the +inference of Canonical Structures. + + +Notation overloading +------------------------- + +We build an infix notation == for a comparison predicate. Such +notation will be overloaded, and its meaning will depend on the types +of the terms that are compared. + +.. coqtop:: all + + Module EQ. + Record class (T : Type) := Class { cmp : T -> T -> Prop }. + Structure type := Pack { obj : Type; class_of : class obj }. + Definition op (e : type) : obj e -> obj e -> Prop := + let 'Pack _ (Class _ the_cmp) := e in the_cmp. + Check op. + Arguments op {e} x y : simpl never. + Arguments Class {T} cmp. + Module theory. + Notation "x == y" := (op x y) (at level 70). + End theory. + End EQ. + +We use Coq modules as name spaces. This allows us to follow the same +pattern and naming convention for the rest of the chapter. The base +name space contains the definitions of the algebraic structure. To +keep the example small, the algebraic structure ``EQ.type`` we are +defining is very simplistic, and characterizes terms on which a binary +relation is defined, without requiring such relation to validate any +property. The inner theory module contains the overloaded notation ``==`` +and will eventually contain lemmas holding on all the instances of the +algebraic structure (in this case there are no lemmas). + +Note that in practice the user may want to declare ``EQ.obj`` as a +coercion, but we will not do that here. + +The following line tests that, when we assume a type ``e`` that is in +theEQ class, then we can relates two of its objects with ``==``. + +.. coqtop:: all + + Import EQ.theory. + Check forall (e : EQ.type) (a b : EQ.obj e), a == b. + +Still, no concrete type is in the ``EQ`` class. + +.. coqtop:: all + + Fail Check 3 == 3. + +We amend that by equipping ``nat`` with a comparison relation. + +.. coqtop:: all + + Definition nat_eq (x y : nat) := Nat.compare x y = Eq. + Definition nat_EQcl : EQ.class nat := EQ.Class nat_eq. + Canonical Structure nat_EQty : EQ.type := EQ.Pack nat nat_EQcl. + Check 3 == 3. + Eval compute in 3 == 4. + +This last test shows that |Coq| is now not only able to typecheck ``3 == 3``, +but also that the infix relation was bound to the ``nat_eq`` relation. +This relation is selected whenever ``==`` is used on terms of type nat. +This can be read in the line declaring the canonical structure +``nat_EQty``, where the first argument to ``Pack`` is the key and its second +argument a group of canonical values associated to the key. In this +case we associate to nat only one canonical value (since its class, +``nat_EQcl`` has just one member). The use of the projection ``op`` requires +its argument to be in the class ``EQ``, and uses such a member (function) +to actually compare its arguments. + +Similarly, we could equip any other type with a comparison relation, +and use the ``==`` notation on terms of this type. + + +Derived Canonical Structures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We know how to use ``== `` on base types, like ``nat``, ``bool``, ``Z``. Here we show +how to deal with type constructors, i.e. how to make the following +example work: + + +.. coqtop:: all + + Fail Check forall (e : EQ.type) (a b : EQ.obj e), (a, b) == (a, b). + +The error message is telling that |Coq| has no idea on how to compare +pairs of objects. The following construction is telling Coq exactly +how to do that. + +.. coqtop:: all + + Definition pair_eq (e1 e2 : EQ.type) (x y : EQ.obj e1 * EQ.obj e2) := + fst x == fst y /\ snd x == snd y. + + Definition pair_EQcl e1 e2 := EQ.Class (pair_eq e1 e2). + + Canonical Structure pair_EQty (e1 e2 : EQ.type) : EQ.type := + EQ.Pack (EQ.obj e1 * EQ.obj e2) (pair_EQcl e1 e2). + + Check forall (e : EQ.type) (a b : EQ.obj e), (a, b) == (a, b). + + Check forall n m : nat, (3, 4) == (n, m). + +Thanks to the ``pair_EQty`` declaration, |Coq| is able to build a comparison +relation for pairs whenever it is able to build a comparison relation +for each component of the pair. The declaration associates to the key ``*`` +(the type constructor of pairs) the canonical comparison +relation ``pair_eq`` whenever the type constructor ``*`` is applied to two +types being themselves in the ``EQ`` class. + +Hierarchy of structures +---------------------------- + +To get to an interesting example we need another base class to be +available. We choose the class of types that are equipped with an +order relation, to which we associate the infix ``<=`` notation. + +.. coqtop:: all + + Module LE. + + Record class T := Class { cmp : T -> T -> Prop }. + + Structure type := Pack { obj : Type; class_of : class obj }. + + Definition op (e : type) : obj e -> obj e -> Prop := + let 'Pack _ (Class _ f) := e in f. + + Arguments op {_} x y : simpl never. + + Arguments Class {T} cmp. + + Module theory. + + Notation "x <= y" := (op x y) (at level 70). + + End theory. + + End LE. + +As before we register a canonical ``LE`` class for ``nat``. + +.. coqtop:: all + + Import LE.theory. + + Definition nat_le x y := Nat.compare x y <> Gt. + + Definition nat_LEcl : LE.class nat := LE.Class nat_le. + + Canonical Structure nat_LEty : LE.type := LE.Pack nat nat_LEcl. + +And we enable |Coq| to relate pair of terms with ``<=``. + +.. coqtop:: all + + Definition pair_le e1 e2 (x y : LE.obj e1 * LE.obj e2) := + fst x <= fst y /\ snd x <= snd y. + + Definition pair_LEcl e1 e2 := LE.Class (pair_le e1 e2). + + Canonical Structure pair_LEty (e1 e2 : LE.type) : LE.type := + LE.Pack (LE.obj e1 * LE.obj e2) (pair_LEcl e1 e2). + + Check (3,4,5) <= (3,4,5). + +At the current stage we can use ``==`` and ``<=`` on concrete types, like +tuples of natural numbers, but we can’t develop an algebraic theory +over the types that are equipped with both relations. + +.. coqtop:: all + + Check 2 <= 3 /\ 2 == 2. + + Fail Check forall (e : EQ.type) (x y : EQ.obj e), x <= y -> y <= x -> x == y. + + Fail Check forall (e : LE.type) (x y : LE.obj e), x <= y -> y <= x -> x == y. + +We need to define a new class that inherits from both ``EQ`` and ``LE``. + + +.. coqtop:: all + + Module LEQ. + + Record mixin (e : EQ.type) (le : EQ.obj e -> EQ.obj e -> Prop) := + Mixin { compat : forall x y : EQ.obj e, le x y /\ le y x <-> x == y }. + + Record class T := Class { + EQ_class : EQ.class T; + LE_class : LE.class T; + extra : mixin (EQ.Pack T EQ_class) (LE.cmp T LE_class) }. + + Structure type := _Pack { obj : Type; class_of : class obj }. + + Arguments Mixin {e le} _. + + Arguments Class {T} _ _ _. + +The mixin component of the ``LEQ`` class contains all the extra content we +are adding to ``EQ`` and ``LE``. In particular it contains the requirement +that the two relations we are combining are compatible. + +Unfortunately there is still an obstacle to developing the algebraic +theory of this new class. + +.. coqtop:: all + + Module theory. + + Fail Check forall (le : type) (n m : obj le), n <= m -> n <= m -> n == m. + + +The problem is that the two classes ``LE`` and ``LEQ`` are not yet related by +a subclass relation. In other words |Coq| does not see that an object of +the ``LEQ`` class is also an object of the ``LE`` class. + +The following two constructions tell |Coq| how to canonically build the +``LE.type`` and ``EQ.type`` structure given an ``LEQ.type`` structure on the same +type. + +.. coqtop:: all + + Definition to_EQ (e : type) : EQ.type := + EQ.Pack (obj e) (EQ_class _ (class_of e)). + + Canonical Structure to_EQ. + + Definition to_LE (e : type) : LE.type := + LE.Pack (obj e) (LE_class _ (class_of e)). + + Canonical Structure to_LE. + +We can now formulate out first theorem on the objects of the ``LEQ`` +structure. + +.. coqtop:: all + + Lemma lele_eq (e : type) (x y : obj e) : x <= y -> y <= x -> x == y. + + now intros; apply (compat _ _ (extra _ (class_of e)) x y); split. + + Qed. + + Arguments lele_eq {e} x y _ _. + + End theory. + + End LEQ. + + Import LEQ.theory. + + Check lele_eq. + +Of course one would like to apply results proved in the algebraic +setting to any concrete instate of the algebraic structure. + +.. coqtop:: all + + Example test_algebraic (n m : nat) : n <= m -> m <= n -> n == m. + + Fail apply (lele_eq n m). + + Abort. + + Example test_algebraic2 (l1 l2 : LEQ.type) (n m : LEQ.obj l1 * LEQ.obj l2) : + n <= m -> m <= n -> n == m. + + Fail apply (lele_eq n m). + + Abort. + +Again one has to tell |Coq| that the type ``nat`` is in the ``LEQ`` class, and +how the type constructor ``*`` interacts with the ``LEQ`` class. In the +following proofs are omitted for brevity. + +.. coqtop:: all + + Lemma nat_LEQ_compat (n m : nat) : n <= m /\ m <= n <-> n == m. + + Admitted. + + Definition nat_LEQmx := LEQ.Mixin nat_LEQ_compat. + + Lemma pair_LEQ_compat (l1 l2 : LEQ.type) (n m : LEQ.obj l1 * LEQ.obj l2) : + n <= m /\ m <= n <-> n == m. + + Admitted. + + Definition pair_LEQmx l1 l2 := LEQ.Mixin (pair_LEQ_compat l1 l2). + +The following script registers an ``LEQ`` class for ``nat`` and for the type +constructor ``*``. It also tests that they work as expected. + +Unfortunately, these declarations are very verbose. In the following +subsection we show how to make these declaration more compact. + +.. coqtop:: all + + Module Add_instance_attempt. + + Canonical Structure nat_LEQty : LEQ.type := + LEQ._Pack nat (LEQ.Class nat_EQcl nat_LEcl nat_LEQmx). + + Canonical Structure pair_LEQty (l1 l2 : LEQ.type) : LEQ.type := + LEQ._Pack (LEQ.obj l1 * LEQ.obj l2) + (LEQ.Class + (EQ.class_of (pair_EQty (to_EQ l1) (to_EQ l2))) + (LE.class_of (pair_LEty (to_LE l1) (to_LE l2))) + (pair_LEQmx l1 l2)). + + Example test_algebraic (n m : nat) : n <= m -> m <= n -> n == m. + + now apply (lele_eq n m). + + Qed. + + Example test_algebraic2 (n m : nat * nat) : n <= m -> m <= n -> n == m. + + now apply (lele_eq n m). Qed. + + End Add_instance_attempt. + +Note that no direct proof of ``n <= m -> m <= n -> n == m`` is provided by +the user for ``n`` and m of type ``nat * nat``. What the user provides is a +proof of this statement for ``n`` and ``m`` of type ``nat`` and a proof that the +pair constructor preserves this property. The combination of these two +facts is a simple form of proof search that |Coq| performs automatically +while inferring canonical structures. + +Compact declaration of Canonical Structures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We need some infrastructure for that. + +.. coqtop:: all + + Require Import Strings.String. + + Module infrastructure. + + Inductive phantom {T : Type} (t : T) : Type := Phantom. + + Definition unify {T1 T2} (t1 : T1) (t2 : T2) (s : option string) := + phantom t1 -> phantom t2. + + Definition id {T} {t : T} (x : phantom t) := x. + + Notation "[find v | t1 ~ t2 ] p" := (fun v (_ : unify t1 t2 None) => p) + (at level 50, v ident, only parsing). + + Notation "[find v | t1 ~ t2 | s ] p" := (fun v (_ : unify t1 t2 (Some s)) => p) + (at level 50, v ident, only parsing). + + Notation "'Error : t : s" := (unify _ t (Some s)) + (at level 50, format "''Error' : t : s"). + + Open Scope string_scope. + + End infrastructure. + +To explain the notation ``[find v | t1 ~ t2]`` let us pick one of its +instances: ``[find e | EQ.obj e ~ T | "is not an EQ.type" ]``. It should be +read as: “find a class e such that its objects have type T or fail +with message "T is not an EQ.type"”. + +The other utilities are used to ask |Coq| to solve a specific unification +problem, that will in turn require the inference of some canonical structures. +They are explained in mode details in :cite:`CSwcu`. + +We now have all we need to create a compact “packager” to declare +instances of the ``LEQ`` class. + +.. coqtop:: all + + Import infrastructure. + + Definition packager T e0 le0 (m0 : LEQ.mixin e0 le0) := + [find e | EQ.obj e ~ T | "is not an EQ.type" ] + [find o | LE.obj o ~ T | "is not an LE.type" ] + [find ce | EQ.class_of e ~ ce ] + [find co | LE.class_of o ~ co ] + [find m | m ~ m0 | "is not the right mixin" ] + LEQ._Pack T (LEQ.Class ce co m). + + Notation Pack T m := (packager T _ _ m _ id _ id _ id _ id _ id). + +The object ``Pack`` takes a type ``T`` (the key) and a mixin ``m``. It infers all +the other pieces of the class ``LEQ`` and declares them as canonical +values associated to the ``T`` key. All in all, the only new piece of +information we add in the ``LEQ`` class is the mixin, all the rest is +already canonical for ``T`` and hence can be inferred by |Coq|. + +``Pack`` is a notation, hence it is not type checked at the time of its +declaration. It will be type checked when it is used, an in that case ``T`` is +going to be a concrete type. The odd arguments ``_`` and ``id`` we pass to the +packager represent respectively the classes to be inferred (like ``e``, ``o``, +etc) and a token (``id``) to force their inference. Again, for all the details +the reader can refer to :cite:`CSwcu`. + +The declaration of canonical instances can now be way more compact: + +.. coqtop:: all + + Canonical Structure nat_LEQty := Eval hnf in Pack nat nat_LEQmx. + + Canonical Structure pair_LEQty (l1 l2 : LEQ.type) := + Eval hnf in Pack (LEQ.obj l1 * LEQ.obj l2) (pair_LEQmx l1 l2). + +Error messages are also quite intelligible (if one skips to the end of +the message). + +.. coqtop:: all + + Fail Canonical Structure err := Eval hnf in Pack bool nat_LEQmx. + diff --git a/doc/sphinx/addendum/extended-pattern-matching.rst b/doc/sphinx/addendum/extended-pattern-matching.rst new file mode 100644 index 0000000000..64d4eddf04 --- /dev/null +++ b/doc/sphinx/addendum/extended-pattern-matching.rst @@ -0,0 +1,611 @@ +.. include:: ../replaces.rst + +.. _extendedpatternmatching: + +Extended pattern-matching +========================= + +:Authors: Cristina Cornes and Hugo Herbelin + +.. TODO links to figures + +This section describes the full form of pattern-matching in |Coq| terms. + +.. |rhs| replace:: right hand side + +Patterns +-------- + +The full syntax of match is presented in Figures 1.1 and 1.2. +Identifiers in patterns are either constructor names or variables. Any +identifier that is not the constructor of an inductive or co-inductive +type is considered to be a variable. A variable name cannot occur more +than once in a given pattern. It is recommended to start variable +names by a lowercase letter. + +If a pattern has the form ``(c x)`` where ``c`` is a constructor symbol and x +is a linear vector of (distinct) variables, it is called *simple*: it +is the kind of pattern recognized by the basic version of match. On +the opposite, if it is a variable ``x`` or has the form ``(c p)`` with ``p`` not +only made of variables, the pattern is called *nested*. + +A variable pattern matches any value, and the identifier is bound to +that value. The pattern “``_``” (called “don't care” or “wildcard” symbol) +also matches any value, but does not bind anything. It may occur an +arbitrary number of times in a pattern. Alias patterns written +:n:`(@pattern as @identifier)` are also accepted. This pattern matches the +same values as ``pattern`` does and ``identifier`` is bound to the matched +value. A pattern of the form :n:`pattern | pattern` is called disjunctive. A +list of patterns separated with commas is also considered as a pattern +and is called *multiple pattern*. However multiple patterns can only +occur at the root of pattern-matching equations. Disjunctions of +*multiple pattern* are allowed though. + +Since extended ``match`` expressions are compiled into the primitive ones, +the expressiveness of the theory remains the same. Once the stage of +parsing has finished only simple patterns remain. Re-nesting of +pattern is performed at printing time. An easy way to see the result +of the expansion is to toggle off the nesting performed at printing +(use here :opt:`Set Printing Matching`), then by printing the term with :cmd:`Print` +if the term is a constant, or using the command :cmd:`Check`. + +The extended ``match`` still accepts an optional *elimination predicate* +given after the keyword ``return``. Given a pattern matching expression, +if all the right-hand-sides of ``=>`` have the same +type, then this type can be sometimes synthesized, and so we can omit +the return part. Otherwise the predicate after return has to be +provided, like for the basicmatch. + +Let us illustrate through examples the different aspects of extended +pattern matching. Consider for example the function that computes the +maximum of two natural numbers. We can write it in primitive syntax +by: + +.. coqtop:: in undo + + Fixpoint max (n m:nat) {struct m} : nat := + match n with + | O => m + | S n' => match m with + | O => S n' + | S m' => S (max n' m') + end + end. + +Multiple patterns +----------------- + +Using multiple patterns in the definition of max lets us write: + +.. coqtop:: in undo + + Fixpoint max (n m:nat) {struct m} : nat := + match n, m with + | O, _ => m + | S n', O => S n' + | S n', S m' => S (max n' m') + end. + +which will be compiled into the previous form. + +The pattern-matching compilation strategy examines patterns from left +to right. A match expression is generated **only** when there is at least +one constructor in the column of patterns. E.g. the following example +does not build a match expression. + +.. coqtop:: all + + Check (fun x:nat => match x return nat with + | y => y + end). + + +Aliasing subpatterns +-------------------- + +We can also use :n:`as @ident` to associate a name to a sub-pattern: + +.. coqtop:: in undo + + Fixpoint max (n m:nat) {struct n} : nat := + match n, m with + | O, _ => m + | S n' as p, O => p + | S n', S m' => S (max n' m') + end. + +Nested patterns +--------------- + +Here is now an example of nested patterns: + +.. coqtop:: in + + Fixpoint even (n:nat) : bool := + match n with + | O => true + | S O => false + | S (S n') => even n' + end. + +This is compiled into: + +.. coqtop:: all undo + + Unset Printing Matching. + Print even. + +In the previous examples patterns do not conflict with, but sometimes +it is comfortable to write patterns that admit a non trivial +superposition. Consider the boolean function :g:`lef` that given two +natural numbers yields :g:`true` if the first one is less or equal than the +second one and :g:`false` otherwise. We can write it as follows: + +.. coqtop:: in undo + + Fixpoint lef (n m:nat) {struct m} : bool := + match n, m with + | O, x => true + | x, O => false + | S n, S m => lef n m + end. + +Note that the first and the second multiple pattern superpose because +the couple of values ``O O`` matches both. Thus, what is the result of the +function on those values? To eliminate ambiguity we use the *textual +priority rule*: we consider patterns ordered from top to bottom, then +a value is matched by the pattern at the ith row if and only if it is +not matched by some pattern of a previous row. Thus in the example,O O +is matched by the first pattern, and so :g:`(lef O O)` yields true. + +Another way to write this function is: + +.. coqtop:: in + + Fixpoint lef (n m:nat) {struct m} : bool := + match n, m with + | O, x => true + | S n, S m => lef n m + | _, _ => false + end. + +Here the last pattern superposes with the first two. Because of the +priority rule, the last pattern will be used only for values that do +not match neither the first nor the second one. + +Terms with useless patterns are not accepted by the system. Here is an +example: + +.. coqtop:: all + + Fail Check (fun x:nat => + match x with + | O => true + | S _ => false + | x => true + end). + + +Disjunctive patterns +-------------------- + +Multiple patterns that share the same right-hand-side can be +factorized using the notation :n:`{+| @mult_pattern}`. For +instance, :g:`max` can be rewritten as follows: + +.. coqtop:: in undo + + Fixpoint max (n m:nat) {struct m} : nat := + match n, m with + | S n', S m' => S (max n' m') + | 0, p | p, 0 => p + end. + +Similarly, factorization of (non necessary multiple) patterns that +share the same variables is possible by using the notation :n:`{+| @pattern}`. +Here is an example: + +.. coqtop:: in + + Definition filter_2_4 (n:nat) : nat := + match n with + | 2 as m | 4 as m => m + | _ => 0 + end. + + +Here is another example using disjunctive subpatterns. + +.. coqtop:: in + + Definition filter_some_square_corners (p:nat*nat) : nat*nat := + match p with + | ((2 as m | 4 as m), (3 as n | 5 as n)) => (m,n) + | _ => (0,0) + end. + +About patterns of parametric types +---------------------------------- + +Parameters in patterns +~~~~~~~~~~~~~~~~~~~~~~ + +When matching objects of a parametric type, parameters do not bind in +patterns. They must be substituted by “``_``”. Consider for example the +type of polymorphic lists: + +.. coqtop:: in + + Inductive List (A:Set) : Set := + | nil : List A + | cons : A -> List A -> List A. + +We can check the function *tail*: + +.. coqtop:: all + + Check + (fun l:List nat => + match l with + | nil _ => nil nat + | cons _ _ l' => l' + end). + +When we use parameters in patterns there is an error message: + +.. coqtop:: all + + Fail Check + (fun l:List nat => + match l with + | nil A => nil nat + | cons A _ l' => l' + end). + +.. opt:: Asymmetric Patterns + +This option (off by default) removes parameters from constructors in patterns: + +.. coqtop:: all + + Set Asymmetric Patterns. + Check (fun l:List nat => + match l with + | nil => nil + | cons _ l' => l' + end) + Unset Asymmetric Patterns. + +Implicit arguments in patterns +------------------------------ + +By default, implicit arguments are omitted in patterns. So we write: + +.. coqtop:: all + + Arguments nil [A]. + Arguments cons [A] _ _. + Check + (fun l:List nat => + match l with + | nil => nil + | cons _ l' => l' + end). + +But the possibility to use all the arguments is given by “``@``” implicit +explicitations (as for terms 2.7.11). + +.. coqtop:: all + + Check + (fun l:List nat => + match l with + | @nil _ => @nil nat + | @cons _ _ l' => l' + end). + + +Matching objects of dependent types +----------------------------------- + +The previous examples illustrate pattern matching on objects of non- +dependent types, but we can also use the expansion strategy to +destructure objects of dependent type. Consider the type :g:`listn` of +lists of a certain length: + +.. coqtop:: in reset + + Inductive listn : nat -> Set := + | niln : listn 0 + | consn : forall n:nat, nat -> listn n -> listn (S n). + + +Understanding dependencies in patterns +-------------------------------------- + +We can define the function length over :g:`listn` by: + +.. coqtop:: in + + Definition length (n:nat) (l:listn n) := n. + +Just for illustrating pattern matching, we can define it by case +analysis: + +.. coqtop:: in + + Definition length (n:nat) (l:listn n) := + match l with + | niln => 0 + | consn n _ _ => S n + end. + +We can understand the meaning of this definition using the same +notions of usual pattern matching. + + +When the elimination predicate must be provided +----------------------------------------------- + +Dependent pattern matching +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The examples given so far do not need an explicit elimination +predicate because all the |rhs| have the same type and the strategy +succeeds to synthesize it. Unfortunately when dealing with dependent +patterns it often happens that we need to write cases where the type +of the |rhs| are different instances of the elimination predicate. The +function concat for listn is an example where the branches have +different type and we need to provide the elimination predicate: + +.. coqtop:: in + + Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : + listn (n + m) := + match l in listn n return listn (n + m) with + | niln => l' + | consn n' a y => consn (n' + m) a (concat n' y m l') + end. + +The elimination predicate is :g:`fun (n:nat) (l:listn n) => listn (n+m)`. +In general if :g:`m` has type :g:`(I q1 … qr t1 … ts)` where :g:`q1, …, qr` +are parameters, the elimination predicate should be of the form :g:`fun y1 … ys x : (I q1 … qr y1 … ys ) => Q`. + +In the concrete syntax, it should be written : +``match m as x in (I _ … _ y1 … ys) return Q with … end`` +The variables which appear in the ``in`` and ``as`` clause are new and bounded +in the property :g:`Q` in the return clause. The parameters of the +inductive definitions should not be mentioned and are replaced by ``_``. + +Multiple dependent pattern matching +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Recall that a list of patterns is also a pattern. So, when we +destructure several terms at the same time and the branches have +different types we need to provide the elimination predicate for this +multiple pattern. It is done using the same scheme, each term may be +associated to an as and in clause in order to introduce a dependent +product. + +For example, an equivalent definition for :g:`concat` (even though the +matching on the second term is trivial) would have been: + +.. coqtop:: in + + Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : + listn (n + m) := + match l in listn n, l' return listn (n + m) with + | niln, x => x + | consn n' a y, x => consn (n' + m) a (concat n' y m x) + end. + +Even without real matching over the second term, this construction can +be used to keep types linked. If :g:`a` and :g:`b` are two :g:`listn` of the same +length, by writing + +.. coqtop:: in + + Fixpoint concat (n:nat) (l:listn n) (m:nat) (l':listn m) {struct l} : + listn (n + m) := + match l in listn n, l' return listn (n + m) with + | niln, x => x + | consn n' a y, x => consn (n' + m) a (concat n' y m x) + end. + +I have a copy of :g:`b` in type :g:`listn 0` resp :g:`listn (S n')`. + + +Patterns in ``in`` +~~~~~~~~~~~~~~~~~~ + +If the type of the matched term is more precise than an inductive +applied to variables, arguments of the inductive in the ``in`` branch can +be more complicated patterns than a variable. + +Moreover, constructors whose type do not follow the same pattern will +become impossible branches. In an impossible branch, you can answer +anything but False_rect unit has the advantage to be subterm of +anything. + +To be concrete: the tail function can be written: + +.. coqtop:: in + + Definition tail n (v: listn (S n)) := + match v in listn (S m) return listn m with + | niln => False_rect unit + | consn n' a y => y + end. + +and :g:`tail n v` will be subterm of :g:`v`. + +Using pattern matching to write proofs +-------------------------------------- + +In all the previous examples the elimination predicate does not depend +on the object(s) matched. But it may depend and the typical case is +when we write a proof by induction or a function that yields an object +of dependent type. An example of proof using match in given in Section +8.2.3. + +For example, we can write the function :g:`buildlist` that given a natural +number :g:`n` builds a list of length :g:`n` containing zeros as follows: + +.. coqtop:: in + + Fixpoint buildlist (n:nat) : listn n := + match n return listn n with + | O => niln + | S n => consn n 0 (buildlist n) + end. + +We can also use multiple patterns. Consider the following definition +of the predicate less-equal :g:`Le`: + +.. coqtop:: in + + Inductive LE : nat -> nat -> Prop := + | LEO : forall n:nat, LE 0 n + | LES : forall n m:nat, LE n m -> LE (S n) (S m). + +We can use multiple patterns to write the proof of the lemma +:g:`forall (n m:nat), (LE n m) \/ (LE m n)`: + +.. coqtop:: in + + Fixpoint dec (n m:nat) {struct n} : LE n m \/ LE m n := + match n, m return LE n m \/ LE m n with + | O, x => or_introl (LE x 0) (LEO x) + | x, O => or_intror (LE x 0) (LEO x) + | S n as n', S m as m' => + match dec n m with + | or_introl h => or_introl (LE m' n') (LES n m h) + | or_intror h => or_intror (LE n' m') (LES m n h) + end + end. + +In the example of :g:`dec`, the first match is dependent while the second +is not. + +The user can also use match in combination with the tactic :tacn:`refine` (see +Section 8.2.3) to build incomplete proofs beginning with a match +construction. + + +Pattern-matching on inductive objects involving local definitions +----------------------------------------------------------------- + +If local definitions occur in the type of a constructor, then there +are two ways to match on this constructor. Either the local +definitions are skipped and matching is done only on the true +arguments of the constructors, or the bindings for local definitions +can also be caught in the matching. + +.. example:: + + .. coqtop:: in + + Inductive list : nat -> Set := + | nil : list 0 + | cons : forall n:nat, let m := (2 * n) in list m -> list (S (S m)). + + In the next example, the local definition is not caught. + + .. coqtop:: in + + Fixpoint length n (l:list n) {struct l} : nat := + match l with + | nil => 0 + | cons n l0 => S (length (2 * n) l0) + end. + + But in this example, it is. + + .. coqtop:: in + + Fixpoint length' n (l:list n) {struct l} : nat := + match l with + | nil => 0 + | @cons _ m l0 => S (length' m l0) + end. + +.. note:: For a given matching clause, either none of the local + definitions or all of them can be caught. + +.. note:: You can only catch let bindings in mode where you bind all + variables and so you have to use ``@`` syntax. + +.. note:: this feature is incoherent with the fact that parameters + cannot be caught and consequently is somehow hidden. For example, + there is no mention of it in error messages. + +Pattern-matching and coercions +------------------------------ + +If a mismatch occurs between the expected type of a pattern and its +actual type, a coercion made from constructors is sought. If such a +coercion can be found, it is automatically inserted around the +pattern. + +.. example:: + + .. coqtop:: in + + Inductive I : Set := + | C1 : nat -> I + | C2 : I -> I. + + Coercion C1 : nat >-> I. + + .. coqtop:: all + + Check (fun x => match x with + | C2 O => 0 + | _ => 0 + end). + + +When does the expansion strategy fail? +-------------------------------------- + +The strategy works very like in ML languages when treating patterns of +non-dependent type. But there are new cases of failure that are due to +the presence of dependencies. + +The error messages of the current implementation may be sometimes +confusing. When the tactic fails because patterns are somehow +incorrect then error messages refer to the initial expression. But the +strategy may succeed to build an expression whose sub-expressions are +well typed when the whole expression is not. In this situation the +message makes reference to the expanded expression. We encourage +users, when they have patterns with the same outer constructor in +different equations, to name the variable patterns in the same +positions with the same name. E.g. to write ``(cons n O x) => e1`` and +``(cons n _ x) => e2`` instead of ``(cons n O x) => e1`` and +``(cons n' _ x') => e2``. This helps to maintain certain name correspondence between the +generated expression and the original. + +Here is a summary of the error messages corresponding to each +situation: + +.. exn:: The constructor @ident expects @num arguments + + The variable ident is bound several times in pattern termFound a constructor + of inductive type term while a constructor of term is expectedPatterns are + incorrect (because constructors are not applied to the correct number of the + arguments, because they are not linear or they are wrongly typed). + +.. exn:: Non exhaustive pattern-matching + + The pattern matching is not exhaustive. + +.. exn:: The elimination predicate term should be of arity @num (for non \ + dependent case) or @num (for dependent case) + + The elimination predicate provided to match has not the expected arity. + +.. exn:: Unable to infer a match predicate + Either there is a type incompatibility or the problem involves dependencies + + There is a type mismatch between the different branches. The user should + provide an elimination predicate. diff --git a/doc/sphinx/addendum/micromega.rst b/doc/sphinx/addendum/micromega.rst new file mode 100644 index 0000000000..e850587c8a --- /dev/null +++ b/doc/sphinx/addendum/micromega.rst @@ -0,0 +1,252 @@ +.. _ micromega: + +Micromega: tactics for solving arithmetic goals over ordered rings +================================================================== + +:Authors: Frédéric Besson and Evgeny Makarov + +Short description of the tactics +-------------------------------- + +The Psatz module (``Require Import Psatz.``) gives access to several +tactics for solving arithmetic goals over :math:`\mathbb{Z}`, :math:`\mathbb{Q}`, and :math:`\mathbb{R}` [#]_. +It also possible to get the tactics for integers by a ``Require Import Lia``, +rationals ``Require Import Lqa`` and reals ``Require Import Lra``. + ++ ``lia`` is a decision procedure for linear integer arithmetic (see Section :ref:`lia <lia>`); ++ ``nia`` is an incomplete proof procedure for integer non-linear + arithmetic (see Section :ref:`nia <nia>`); ++ ``lra`` is a decision procedure for linear (real or rational) arithmetic + (see Section :ref:`lra <lra>`); ++ ``nra`` is an incomplete proof procedure for non-linear (real or + rational) arithmetic (see Section :ref:`nra <nra>`); ++ ``psatz D n`` where ``D`` is :math:`\mathbb{Z}` or :math:`\mathbb{Q}` or :math:`\mathbb{R}`, and + ``n`` is an optional integer limiting the proof search depth + is an incomplete proof procedure for non-linear arithmetic. + It is based on John Harrison’s HOL Light + driver to the external prover `csdp` [#]_. Note that the `csdp` driver is + generating a *proof cache* which makes it possible to rerun scripts + even without `csdp` (see Section :ref:`psatz <psatz>`). + +The tactics solve propositional formulas parameterized by atomic +arithmetic expressions interpreted over a domain :math:`D` ∈ {ℤ, ℚ, ℝ}. +The syntax of the formulas is the following: + + .. productionlist:: `F` + F : A ∣ P ∣ True ∣ False ∣ F 1 ∧ F 2 ∣ F 1 ∨ F 2 ∣ F 1 ↔ F 2 ∣ F 1 → F 2 ∣ ¬ F + A : p 1 = p 2 ∣ p 1 > p 2 ∣ p 1 < p 2 ∣ p 1 ≥ p 2 ∣ p 1 ≤ p 2 + p : c ∣ x ∣ −p ∣ p 1 − p 2 ∣ p 1 + p 2 ∣ p 1 × p 2 ∣ p ^ n + +where :math:`c` is a numeric constant, :math:`x \in D` is a numeric variable, the +operators :math:`−, +, ×` are respectively subtraction, addition, and product; +:math:`p ^ n` is exponentiation by a constant :math:`n`, :math:`P` is an arbitrary proposition. +For :math:`\mathbb{Q}`, equality is not Leibniz equality = but the equality of +rationals ==. + +For :math:`\mathbb{Z}` (resp. :math:`\mathbb{Q}`), :math:`c` ranges over integer constants (resp. rational +constants). For :math:`\mathbb{R}`, the tactic recognizes as real constants the +following expressions: + +:: + + c ::= R0 | R1 | Rmul(c,c) | Rplus(c,c) | Rminus(c,c) | IZR z | IQR q | Rdiv(c,c) | Rinv c + +where :math:`z` is a constant in :math:`\mathbb{Z}` and :math:`q` is a constant in :math:`\mathbb{Q}`. +This includes integer constants written using the decimal notation, *i.e.*, c%R. + + +*Positivstellensatz* refutations +-------------------------------- + +The name `psatz` is an abbreviation for *positivstellensatz* – literally +"positivity theorem" – which generalizes Hilbert’s *nullstellensatz*. It +relies on the notion of Cone. Given a (finite) set of polynomials :math:`S`, +:math:`\mathit{Cone}(S)` is inductively defined as the smallest set of polynomials +closed under the following rules: + +:math:`\begin{array}{l} +\dfrac{p \in S}{p \in \mathit{Cone}(S)} \quad +\dfrac{}{p^2 \in \mathit{Cone}(S)} \quad +\dfrac{p_1 \in \mathit{Cone}(S) \quad p_2 \in \mathit{Cone}(S) \quad +\Join \in \{+,*\}} {p_1 \Join p_2 \in \mathit{Cone}(S)}\\ +\end{array}` + +The following theorem provides a proof principle for checking that a +set of polynomial inequalities does not have solutions [#]_. + +.. _psatz_thm: + +**Theorem (Psatz)**. Let :math:`S` be a set of polynomials. +If :math:`-1` belongs to :math:`\mathit{Cone}(S)`, then the conjunction +:math:`\bigwedge_{p \in S} p\ge 0` is unsatisfiable. +A proof based on this theorem is called a *positivstellensatz* +refutation. The tactics work as follows. Formulas are normalized into +conjunctive normal form :math:`\bigwedge_i C_i` where :math:`C_i` has the +general form :math:`(\bigwedge_{j\in S_i} p_j \Join 0) \to \mathit{False})` and +:math:`\Join \in \{>,\ge,=\}` for :math:`D\in \{\mathbb{Q},\mathbb{R}\}` and +:math:`\Join \in \{\ge, =\}` for :math:`\mathbb{Z}`. + +For each conjunct :math:`C_i`, the tactic calls a oracle which searches for +:math:`-1` within the cone. Upon success, the oracle returns a *cone +expression* that is normalized by the ring tactic (see :ref:`theringandfieldtacticfamilies`) +and checked to be :math:`-1`. + +.. _lra: + +`lra`: a decision procedure for linear real and rational arithmetic +------------------------------------------------------------------- + +The `lra` tactic is searching for *linear* refutations using Fourier +elimination [#]_. As a result, this tactic explores a subset of the *Cone* +defined as + + :math:`\mathit{LinCone}(S) =\left\{ \left. \sum_{p \in S} \alpha_p \times p~\right|~\alpha_p \mbox{ are positive constants} \right\}` + +The deductive power of `lra` is the combined deductive power of +`ring_simplify` and `fourier`. There is also an overlap with the field +tactic *e.g.*, :math:`x = 10 * x / 10` is solved by `lra`. + + +.. _lia: + +`lia`: a tactic for linear integer arithmetic +--------------------------------------------- + +The tactic lia offers an alternative to the omega and romega tactic +(see :ref:`omega`). Roughly speaking, the deductive power of lia is +the combined deductive power of `ring_simplify` and `omega`. However, it +solves linear goals that `omega` and `romega` do not solve, such as the +following so-called *omega nightmare* :cite:`TheOmegaPaper`. + +.. coqtop:: in + + Goal forall x y, + 27 <= 11 * x + 13 * y <= 45 -> + -10 <= 7 * x - 9 * y <= 4 -> False. + +The estimation of the relative efficiency of `lia` *vs* `omega` and `romega` +is under evaluation. + +High level view of `lia` +~~~~~~~~~~~~~~~~~~~~~~~~ + +Over :math:`\mathbb{R}`, *positivstellensatz* refutations are a complete proof +principle [#]_. However, this is not the case over :math:`\mathbb{Z}`. Actually, +*positivstellensatz* refutations are not even sufficient to decide +linear *integer* arithmetic. The canonical example is :math:`2 * x = 1 -> \mathtt{False}` +which is a theorem of :math:`\mathbb{Z}` but not a theorem of :math:`{\mathbb{R}}`. To remedy this +weakness, the `lia` tactic is using recursively a combination of: + ++ linear *positivstellensatz* refutations; ++ cutting plane proofs; ++ case split. + +Cutting plane proofs +~~~~~~~~~~~~~~~~~~~~~~ + +are a way to take into account the discreteness of :math:`\mathbb{Z}` by rounding up +(rational) constants up-to the closest integer. + +.. _ceil_thm: + +**Theorem**. Let :math:`p` be an integer and :math:`c` a rational constant. Then + + :math:`p \ge c \rightarrow p \ge \lceil{c}\rceil` + +For instance, from 2 x = 1 we can deduce + ++ :math:`x \ge 1/2` whose cut plane is :math:`x \ge \lceil{1/2}\rceil = 1`; ++ :math:`x \le 1/2` whose cut plane is :math:`x \le \lfloor{1/2}\rfloor = 0`. + +By combining these two facts (in normal form) :math:`x − 1 \ge 0` and +:math:`-x \ge 0`, we conclude by exhibiting a *positivstellensatz* refutation: +:math:`−1 \equiv x−1 + −x \in \mathit{Cone}({x−1,x})`. + +Cutting plane proofs and linear *positivstellensatz* refutations are a +complete proof principle for integer linear arithmetic. + +Case split +~~~~~~~~~~~ + +enumerates over the possible values of an expression. + +.. _casesplit_thm: + +**Theorem**. Let :math:`p` be an integer and :math:`c_1` and :math:`c_2` +integer constants. Then: + + :math:`c_1 \le p \le c_2 \Rightarrow \bigvee_{x \in [c_1,c_2]} p = x` + +Our current oracle tries to find an expression :math:`e` with a small range +:math:`[c_1,c_2]`. We generate :math:`c_2 − c_1` subgoals which contexts are enriched +with an equation :math:`e = i` for :math:`i \in [c_1,c_2]` and recursively search for +a proof. + +.. _nra: + +`nra`: a proof procedure for non-linear arithmetic +-------------------------------------------------- + +The `nra` tactic is an *experimental* proof procedure for non-linear +arithmetic. The tactic performs a limited amount of non-linear +reasoning before running the linear prover of `lra`. This pre-processing +does the following: + + ++ If the context contains an arithmetic expression of the form + :math:`e[x^2]` where :math:`x` is a monomial, the context is enriched with + :math:`x^2 \ge 0`; ++ For all pairs of hypotheses :math:`e_1 \ge 0`, :math:`e_2 \ge 0`, the context is + enriched with :math:`e_1 \times e_2 \ge 0`. + +After this pre-processing, the linear prover of `lra` searches for a +proof by abstracting monomials by variables. + +.. _nia: + +`nia`: a proof procedure for non-linear integer arithmetic +---------------------------------------------------------- + +The `nia` tactic is a proof procedure for non-linear integer arithmetic. +It performs a pre-processing similar to `nra`. The obtained goal is +solved using the linear integer prover `lia`. + +.. _psatz: + +`psatz`: a proof procedure for non-linear arithmetic +---------------------------------------------------- + +The `psatz` tactic explores the :math:`\mathit{Cone}` by increasing degrees – hence the +depth parameter :math:`n`. In theory, such a proof search is complete – if the +goal is provable the search eventually stops. Unfortunately, the +external oracle is using numeric (approximate) optimization techniques +that might miss a refutation. + +To illustrate the working of the tactic, consider we wish to prove the +following Coq goal: + +.. coqtop:: all + + Require Import ZArith Psatz. + Open Scope Z_scope. + Goal forall x, -x^2 >= 0 -> x - 1 >= 0 -> False. + intro x. + psatz Z 2. + +As shown, such a goal is solved by ``intro x. psatz Z 2.``. The oracle returns the +cone expression :math:`2 \times (x-1) + (\mathbf{x-1}) \times (\mathbf{x−1}) + -x^2` +(polynomial hypotheses are printed in bold). By construction, this expression +belongs to :math:`\mathit{Cone}({−x^2,x -1})`. Moreover, by running `ring` we +obtain :math:`-1`. By Theorem :ref:`Psatz <psatz_thm>`, the goal is valid. + +.. [#] Support for `nat` and :math:`\mathbb{N}` is obtained by pre-processing the goal with + the `zify` tactic. +.. [#] Sources and binaries can be found at https://projects.coin-or.org/Csdp +.. [#] Variants deal with equalities and strict inequalities. +.. [#] More efficient linear programming techniques could equally be employed. +.. [#] In practice, the oracle might fail to produce such a refutation. + +.. comment in original TeX: +.. %% \paragraph{The {\tt sos} tactic} -- where {\tt sos} stands for \emph{sum of squares} -- tries to prove that a +.. %% single polynomial $p$ is positive by expressing it as a sum of squares \emph{i.e.,} $\sum_{i\in S} p_i^2$. +.. %% This amounts to searching for $p$ in the cone without generators \emph{i.e.}, $Cone(\{\})$. diff --git a/doc/sphinx/addendum/omega.rst b/doc/sphinx/addendum/omega.rst new file mode 100644 index 0000000000..20e40c5507 --- /dev/null +++ b/doc/sphinx/addendum/omega.rst @@ -0,0 +1,184 @@ +.. _omega: + +Omega: a solver for quantifier-free problems in Presburger Arithmetic +===================================================================== + +:Author: Pierre Crégut + +Description of ``omega`` +------------------------ + +This tactic does not need any parameter: + +.. tacn:: omega + +``omega`` solves a goal in Presburger arithmetic, i.e. a universally +quantified formula made of equations and inequations. Equations may +be specified either on the type ``nat`` of natural numbers or on +the type ``Z`` of binary-encoded integer numbers. Formulas on +``nat`` are automatically injected into ``Z``. The procedure +may use any hypothesis of the current proof session to solve the goal. + +Multiplication is handled by ``omega`` but only goals where at +least one of the two multiplicands of products is a constant are +solvable. This is the restriction meant by "Presburger arithmetic". + +If the tactic cannot solve the goal, it fails with an error message. +In any case, the computation eventually stops. + +Arithmetical goals recognized by ``omega`` +------------------------------------------ + +``omega`` applied only to quantifier-free formulas built from the +connectors:: + + /\ \/ ~ -> + +on atomic formulas. Atomic formulas are built from the predicates:: + + = < <= > >= + +on ``nat`` or ``Z``. In expressions of type ``nat``, ``omega`` recognizes:: + + + - * S O pred + +and in expressions of type ``Z``, ``omega`` recognizes numeral constants and:: + + + - * Z.succ Z.pred + +All expressions of type ``nat`` or ``Z`` not built on these +operators are considered abstractly as if they +were arbitrary variables of type ``nat`` or ``Z``. + +Messages from ``omega`` +----------------------- + +When ``omega`` does not solve the goal, one of the following errors +is generated: + +.. exn:: omega can't solve this system + + This may happen if your goal is not quantifier-free (if it is + universally quantified, try ``intros`` first; if it contains + existentials quantifiers too, ``omega`` is not strong enough to solve your + goal). This may happen also if your goal contains arithmetical + operators unknown from ``omega``. Finally, your goal may be really + wrong! + +.. exn:: omega: Not a quantifier-free goal + + If your goal is universally quantified, you should first apply + ``intro`` as many time as needed. + +.. exn:: omega: Unrecognized predicate or connective: @ident + +.. exn:: omega: Unrecognized atomic proposition: ... + +.. exn:: omega: Can't solve a goal with proposition variables + +.. exn:: omega: Unrecognized proposition + +.. exn:: omega: Can't solve a goal with non-linear products + +.. exn:: omega: Can't solve a goal with equality on type ... + + +Using ``omega`` +--------------- + +The ``omega`` tactic does not belong to the core system. It should be +loaded by + +.. coqtop:: in + + Require Import Omega. + +.. example:: + + .. coqtop:: all + + Require Import Omega. + + Open Scope Z_scope. + + Goal forall m n:Z, 1 + 2 * m <> 2 * n. + intros; omega. + Abort. + + Goal forall z:Z, z > 0 -> 2 * z + 1 > z. + intro; omega. + Abort. + + +Options +------- + +.. opt:: Stable Omega + +This deprecated option (on by default) is for compatibility with Coq pre 8.5. It +resets internal name counters to make executions of ``omega`` independent. + +.. opt:: Omega UseLocalDefs + +This option (on by default) allows ``omega`` to use the bodies of local +variables. + +.. opt:: Omega System + +This option (off by default) activate the printing of debug information + +.. opt:: Omega Action + +This option (off by default) activate the printing of debug information + +Technical data +-------------- + +Overview of the tactic +~~~~~~~~~~~~~~~~~~~~~~ + + * The goal is negated twice and the first negation is introduced as an hypothesis. + * Hypothesis are decomposed in simple equations or inequations. Multiple + goals may result from this phase. + * Equations and inequations over ``nat`` are translated over + ``Z``, multiple goals may result from the translation of substraction. + * Equations and inequations are normalized. + * Goals are solved by the OMEGA decision procedure. + * The script of the solution is replayed. + +Overview of the OMEGA decision procedure +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The OMEGA decision procedure involved in the ``omega`` tactic uses +a small subset of the decision procedure presented in :cite:`TheOmegaPaper` +Here is an overview, look at the original paper for more information. + + * Equations and inequations are normalized by division by the GCD of their + coefficients. + * Equations are eliminated, using the Banerjee test to get a coefficient + equal to one. + * Note that each inequation defines a half space in the space of real value + of the variables. + * Inequations are solved by projecting on the hyperspace + defined by cancelling one of the variable. They are partitioned + according to the sign of the coefficient of the eliminated + variable. Pairs of inequations from different classes define a + new edge in the projection. + * Redundant inequations are eliminated or merged in new + equations that can be eliminated by the Banerjee test. + * The last two steps are iterated until a contradiction is reached + (success) or there is no more variable to eliminate (failure). + +It may happen that there is a real solution and no integer one. The last +steps of the Omega procedure (dark shadow) are not implemented, so the +decision procedure is only partial. + +Bugs +---- + + * The simplification procedure is very dumb and this results in + many redundant cases to explore. + + * Much too slow. + + * Certainly other bugs! You can report them to https://coq.inria.fr/bugs/. diff --git a/doc/sphinx/biblio.bib b/doc/sphinx/biblio.bib new file mode 100644 index 0000000000..247f32103c --- /dev/null +++ b/doc/sphinx/biblio.bib @@ -0,0 +1,1397 @@ +@String{jfp = "Journal of Functional Programming"} +@String{lncs = "Lecture Notes in Computer Science"} +@String{lnai = "Lecture Notes in Artificial Intelligence"} +@String{SV = "{Sprin-ger-Verlag}"} + +@InProceedings{Aud91, + author = {Ph. Audebaud}, + booktitle = {Proceedings of the sixth Conf. on Logic in Computer Science.}, + publisher = {IEEE}, + title = {Partial {Objects} in the {Calculus of Constructions}}, + year = {1991} +} + +@PhDThesis{Aud92, + author = {Ph. Audebaud}, + school = {{Universit\'e} Bordeaux I}, + title = {Extension du Calcul des Constructions par Points fixes}, + year = {1992} +} + +@InProceedings{Audebaud92b, + author = {Ph. Audebaud}, + booktitle = {{Proceedings of the 1992 Workshop on Types for Proofs and Programs}}, + editor = {{B. Nordstr\"om and K. Petersson and G. Plotkin}}, + note = {Also Research Report LIP-ENS-Lyon}, + pages = {21--34}, + title = {{CC+ : an extension of the Calculus of Constructions with fixpoints}}, + year = {1992} +} + +@InProceedings{Augustsson85, + author = {L. Augustsson}, + title = {{Compiling Pattern Matching}}, + booktitle = {Conference Functional Programming and +Computer Architecture}, + year = {1985} +} + +@Article{BaCo85, + author = {J.L. Bates and R.L. Constable}, + journal = {ACM transactions on Programming Languages and Systems}, + title = {Proofs as {Programs}}, + volume = {7}, + year = {1985} +} + +@Book{Bar81, + author = {H.P. Barendregt}, + publisher = {North-Holland}, + title = {The Lambda Calculus its Syntax and Semantics}, + year = {1981} +} + +@TechReport{Bar91, + author = {H. Barendregt}, + institution = {Catholic University Nijmegen}, + note = {In Handbook of Logic in Computer Science, Vol II}, + number = {91-19}, + title = {Lambda {Calculi with Types}}, + year = {1991} +} + +@Article{BeKe92, + author = {G. Bellin and J. Ketonen}, + journal = {Theoretical Computer Science}, + pages = {115--142}, + title = {A decision procedure revisited : Notes on direct logic, linear logic and its implementation}, + volume = {95}, + year = {1992} +} + +@Book{Bee85, + author = {M.J. Beeson}, + publisher = SV, + title = {Foundations of Constructive Mathematics, Metamathematical Studies}, + year = {1985} +} + +@Book{Bis67, + author = {E. Bishop}, + publisher = {McGraw-Hill}, + title = {Foundations of Constructive Analysis}, + year = {1967} +} + +@Book{BoMo79, + author = {R.S. Boyer and J.S. Moore}, + key = {BoMo79}, + publisher = {Academic Press}, + series = {ACM Monograph}, + title = {A computational logic}, + year = {1979} +} + +@MastersThesis{Bou92, + author = {S. Boutin}, + month = sep, + school = {{Universit\'e Paris 7}}, + title = {Certification d'un compilateur {ML en Coq}}, + year = {1992} +} + +@InProceedings{Bou97, + title = {Using reflection to build efficient and certified decision procedure +s}, + author = {S. Boutin}, + booktitle = {TACS'97}, + editor = {Martin Abadi and Takahashi Ito}, + publisher = SV, + series = lncs, + volume = 1281, + year = {1997} +} + +@PhDThesis{Bou97These, + author = {S. Boutin}, + title = {R\'eflexions sur les quotients}, + school = {Paris 7}, + year = 1997, + type = {th\`ese d'Universit\'e}, + month = apr +} + +@Article{Bru72, + author = {N.J. de Bruijn}, + journal = {Indag. Math.}, + title = {{Lambda-Calculus Notation with Nameless Dummies, a Tool for Automatic Formula Manipulation, with Application to the Church-Rosser Theorem}}, + volume = {34}, + year = {1972} +} + + +@InCollection{Bru80, + author = {N.J. de Bruijn}, + booktitle = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, + editor = {J.P. Seldin and J.R. Hindley}, + publisher = {Academic Press}, + title = {A survey of the project {Automath}}, + year = {1980} +} + +@TechReport{COQ93, + author = {G. Dowek and A. Felty and H. Herbelin and G. Huet and C. Murthy and C. Parent and C. Paulin-Mohring and B. Werner}, + institution = {INRIA}, + month = may, + number = {154}, + title = {{The Coq Proof Assistant User's Guide Version 5.8}}, + year = {1993} +} + +@TechReport{COQ02, + author = {The Coq Development Team}, + institution = {INRIA}, + month = Feb, + number = {255}, + title = {{The Coq Proof Assistant Reference Manual Version 7.2}}, + year = {2002} +} + +@TechReport{CPar93, + author = {C. Parent}, + institution = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, + month = oct, + note = {Also in~\cite{Nijmegen93}}, + number = {93-29}, + title = {Developing certified programs in the system {Coq}- {The} {Program} tactic}, + year = {1993} +} + +@PhDThesis{CPar95, + author = {C. Parent}, + school = {Ecole {Normale} {Sup\'erieure} de {Lyon}}, + title = {{Synth\`ese de preuves de programmes dans le Calcul des Constructions Inductives}}, + year = {1995} +} + +@Book{Caml, + author = {P. Weis and X. Leroy}, + publisher = {InterEditions}, + title = {Le langage Caml}, + year = {1993} +} + +@InProceedings{ChiPotSimp03, + author = {Laurent Chicli and Lo\"{\i}c Pottier and Carlos Simpson}, + title = {Mathematical Quotients and Quotient Types in Coq}, + booktitle = {TYPES}, + crossref = {DBLP:conf/types/2002}, + year = {2002} +} + +@TechReport{CoC89, + author = {Projet Formel}, + institution = {INRIA}, + number = {110}, + title = {{The Calculus of Constructions. Documentation and user's guide, Version 4.10}}, + year = {1989} +} + +@InProceedings{CoHu85a, + author = {Th. Coquand and G. Huet}, + address = {Linz}, + booktitle = {EUROCAL'85}, + publisher = SV, + series = LNCS, + title = {{Constructions : A Higher Order Proof System for Mechanizing Mathematics}}, + volume = {203}, + year = {1985} +} + +@InProceedings{CoHu85b, + author = {Th. Coquand and G. Huet}, + booktitle = {Logic Colloquium'85}, + editor = {The Paris Logic Group}, + publisher = {North-Holland}, + title = {{Concepts Math\'ematiques et Informatiques formalis\'es dans le Calcul des Constructions}}, + year = {1987} +} + +@Article{CoHu86, + author = {Th. Coquand and G. Huet}, + journal = {Information and Computation}, + number = {2/3}, + title = {The {Calculus of Constructions}}, + volume = {76}, + year = {1988} +} + +@InProceedings{CoPa89, + author = {Th. Coquand and C. Paulin-Mohring}, + booktitle = {Proceedings of Colog'88}, + editor = {P. Martin-L\"of and G. Mints}, + publisher = SV, + series = LNCS, + title = {Inductively defined types}, + volume = {417}, + year = {1990} +} + +@Book{Con86, + author = {R.L. {Constable et al.}}, + publisher = {Prentice-Hall}, + title = {{Implementing Mathematics with the Nuprl Proof Development System}}, + year = {1986} +} + +@PhDThesis{Coq85, + author = {Th. Coquand}, + month = jan, + school = {Universit\'e Paris~7}, + title = {Une Th\'eorie des Constructions}, + year = {1985} +} + +@InProceedings{Coq86, + author = {Th. Coquand}, + address = {Cambridge, MA}, + booktitle = {Symposium on Logic in Computer Science}, + publisher = {IEEE Computer Society Press}, + title = {{An Analysis of Girard's Paradox}}, + year = {1986} +} + +@InProceedings{Coq90, + author = {Th. Coquand}, + booktitle = {Logic and Computer Science}, + editor = {P. Oddifredi}, + note = {INRIA Research Report 1088, also in~\cite{CoC89}}, + publisher = {Academic Press}, + title = {{Metamathematical Investigations of a Calculus of Constructions}}, + year = {1990} +} + +@InProceedings{Coq91, + author = {Th. Coquand}, + booktitle = {Proceedings 9th Int. Congress of Logic, Methodology and Philosophy of Science}, + title = {{A New Paradox in Type Theory}}, + month = {August}, + year = {1991} +} + +@InProceedings{Coq92, + author = {Th. Coquand}, + title = {{Pattern Matching with Dependent Types}}, + year = {1992}, + booktitle = {Proceedings of the 1992 Workshop on Types for Proofs and Programs} +} + +@InProceedings{Coquand93, + author = {Th. Coquand}, + booktitle = {Types for Proofs and Programs}, + editor = {H. Barendregt and T. Nipokow}, + publisher = SV, + series = LNCS, + title = {{Infinite objects in Type Theory}}, + volume = {806}, + year = {1993}, + pages = {62-78} +} + +@inproceedings{Corbineau08types, + author = {P. Corbineau}, + title = {A Declarative Language for the Coq Proof Assistant}, + editor = {M. Miculan and I. Scagnetto and F. Honsell}, + booktitle = {TYPES '07, Cividale del Friuli, Revised Selected Papers}, + publisher = {Springer}, + series = LNCS, + volume = {4941}, + year = {2007}, + pages = {69-84}, + ee = {http://dx.doi.org/10.1007/978-3-540-68103-8_5}, +} + +@PhDThesis{Cor97, + author = {C. Cornes}, + month = nov, + school = {{Universit\'e Paris 7}}, + title = {Conception d'un langage de haut niveau de représentation de preuves}, + type = {Th\`ese de Doctorat}, + year = {1997} +} + +@MastersThesis{Cou94a, + author = {J. Courant}, + month = sep, + school = {DEA d'Informatique, ENS Lyon}, + title = {Explicitation de preuves par r\'ecurrence implicite}, + year = {1994} +} + +@book{Cur58, + author = {Haskell B. Curry and Robert Feys and William Craig}, + title = {Combinatory Logic}, + volume = 1, + publisher = "North-Holland", + year = 1958, + note = {{\S{9E}}}, +} + +@InProceedings{Del99, + author = {Delahaye, D.}, + title = {Information Retrieval in a Coq Proof Library using + Type Isomorphisms}, + booktitle = {Proceedings of TYPES '99, L\"okeberg}, + publisher = SV, + series = lncs, + year = {1999}, + url = + "\\{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# + "{\sf TYPES99-SIsos.ps.gz}" +} + +@InProceedings{Del00, + author = {Delahaye, D.}, + title = {A {T}actic {L}anguage for the {S}ystem {{\sf Coq}}}, + booktitle = {Proceedings of Logic for Programming and Automated Reasoning + (LPAR), Reunion Island}, + publisher = SV, + series = LNCS, + volume = {1955}, + pages = {85--95}, + month = {November}, + year = {2000}, + url = + "{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# + "{\sf LPAR2000-ltac.ps.gz}" +} + +@InProceedings{DelMay01, + author = {Delahaye, D. and Mayero, M.}, + title = {{\tt Field}: une proc\'edure de d\'ecision pour les nombres r\'eels en {\Coq}}, + booktitle = {Journ\'ees Francophones des Langages Applicatifs, Pontarlier}, + publisher = {INRIA}, + month = {Janvier}, + year = {2001}, + url = + "\\{\sf ftp://ftp.inria.fr/INRIA/Projects/coq/David.Delahaye/papers/}"# + "{\sf JFLA2000-Field.ps.gz}" +} + +@TechReport{Dow90, + author = {G. Dowek}, + institution = {INRIA}, + number = {1283}, + title = {Naming and Scoping in a Mathematical Vernacular}, + type = {Research Report}, + year = {1990} +} + +@Article{Dow91a, + author = {G. Dowek}, + journal = {Compte-Rendus de l'Acad\'emie des Sciences}, + note = {The undecidability of Third Order Pattern Matching in Calculi with Dependent Types or Type Constructors}, + number = {12}, + pages = {951--956}, + title = {L'Ind\'ecidabilit\'e du Filtrage du Troisi\`eme Ordre dans les Calculs avec Types D\'ependants ou Constructeurs de Types}, + volume = {I, 312}, + year = {1991} +} + +@InProceedings{Dow91b, + author = {G. Dowek}, + booktitle = {Proceedings of Mathematical Foundation of Computer Science}, + note = {Also INRIA Research Report}, + pages = {151--160}, + publisher = SV, + series = LNCS, + title = {A Second Order Pattern Matching Algorithm in the Cube of Typed $\lambda$-calculi}, + volume = {520}, + year = {1991} +} + +@PhDThesis{Dow91c, + author = {G. Dowek}, + month = dec, + school = {Universit\'e Paris 7}, + title = {D\'emonstration automatique dans le Calcul des Constructions}, + year = {1991} +} + +@Article{Dow92a, + author = {G. Dowek}, + title = {The Undecidability of Pattern Matching in Calculi where Primitive Recursive Functions are Representable}, + year = 1993, + journal = {Theoretical Computer Science}, + volume = 107, + number = 2, + pages = {349-356} +} + +@Article{Dow94a, + author = {G. Dowek}, + journal = {Annals of Pure and Applied Logic}, + volume = {69}, + pages = {135--155}, + title = {Third order matching is decidable}, + year = {1994} +} + +@InProceedings{Dow94b, + author = {G. Dowek}, + booktitle = {Proceedings of the second international conference on typed lambda calculus and applications}, + title = {Lambda-calculus, Combinators and the Comprehension Schema}, + year = {1995} +} + +@InProceedings{Dyb91, + author = {P. Dybjer}, + booktitle = {Logical Frameworks}, + editor = {G. Huet and G. Plotkin}, + pages = {59--79}, + publisher = {Cambridge University Press}, + title = {Inductive sets and families in {Martin-Löf's} + Type Theory and their set-theoretic semantics: An inversion principle for {Martin-L\"of's} type theory}, + volume = {14}, + year = {1991} +} + +@Article{Dyc92, + author = {Roy Dyckhoff}, + journal = {The Journal of Symbolic Logic}, + month = sep, + number = {3}, + title = {Contraction-free sequent calculi for intuitionistic logic}, + volume = {57}, + year = {1992} +} + +@MastersThesis{Fil94, + author = {J.-C. Filli\^atre}, + month = sep, + school = {DEA d'Informatique, ENS Lyon}, + title = {Une proc\'edure de d\'ecision pour le Calcul des Pr\'edicats Direct. Étude et impl\'ementation dans le syst\`eme {\Coq}}, + year = {1994} +} + +@TechReport{Filliatre95, + author = {J.-C. Filli\^atre}, + institution = {LIP-ENS-Lyon}, + title = {A decision procedure for Direct Predicate Calculus}, + type = {Research report}, + number = {96--25}, + year = {1995} +} + +@Article{Filliatre03jfp, + author = {J.-C. Filliâtre}, + title = {Verification of Non-Functional Programs + using Interpretations in Type Theory}, + journal = jfp, + volume = 13, + number = 4, + pages = {709--745}, + month = jul, + year = 2003, + note = {[English translation of \cite{Filliatre99}]}, + url = {http://www.lri.fr/~filliatr/ftp/publis/jphd.ps.gz}, + topics = {team, lri}, + type_publi = {irevcomlec} +} + +@PhDThesis{Filliatre99, + author = {J.-C. Filli\^atre}, + title = {Preuve de programmes imp\'eratifs en th\'eorie des types}, + type = {Thèse de Doctorat}, + school = {Universit\'e Paris-Sud}, + year = 1999, + month = {July}, + url = {\url{http://www.lri.fr/~filliatr/ftp/publis/these.ps.gz}} +} + +@Unpublished{Filliatre99c, + author = {J.-C. Filli\^atre}, + title = {{Formal Proof of a Program: Find}}, + month = {January}, + year = 2000, + note = {Submitted to \emph{Science of Computer Programming}}, + url = {\url{http://www.lri.fr/~filliatr/ftp/publis/find.ps.gz}} +} + +@InProceedings{FilliatreMagaud99, + author = {J.-C. Filli\^atre and N. Magaud}, + title = {Certification of sorting algorithms in the system {\Coq}}, + booktitle = {Theorem Proving in Higher Order Logics: + Emerging Trends}, + year = 1999, + url = {\url{http://www.lri.fr/~filliatr/ftp/publis/Filliatre-Magaud.ps.gz}} +} + +@Unpublished{Fle90, + author = {E. Fleury}, + month = jul, + note = {Rapport de Stage}, + title = {Implantation des algorithmes de {Floyd et de Dijkstra} dans le {Calcul des Constructions}}, + year = {1990} +} + +@Book{Fourier, + author = {Jean-Baptiste-Joseph Fourier}, + publisher = {Gauthier-Villars}, + title = {Fourier's method to solve linear + inequations/equations systems.}, + year = {1890} +} + +@InProceedings{Gim94, + author = {E. Gim\'enez}, + booktitle = {Types'94 : Types for Proofs and Programs}, + note = {Extended version in LIP research report 95-07, ENS Lyon}, + publisher = SV, + series = LNCS, + title = {Codifying guarded definitions with recursive schemes}, + volume = {996}, + year = {1994} +} + +@PhDThesis{Gim96, + author = {E. Gim\'enez}, + title = {Un calcul des constructions infinies et son application \'a la v\'erification de syst\`emes communicants}, + school = {\'Ecole Normale Sup\'erieure de Lyon}, + year = {1996} +} + +@TechReport{Gim98, + author = {E. Gim\'enez}, + title = {A Tutorial on Recursive Types in Coq}, + institution = {INRIA}, + year = 1998, + month = mar +} + +@Unpublished{GimCas05, + author = {E. Gim\'enez and P. Cast\'eran}, + title = {A Tutorial on [Co-]Inductive Types in Coq}, + institution = {INRIA}, + year = 2005, + month = jan, + note = {available at \url{http://coq.inria.fr/doc}} +} + +@InProceedings{Gimenez95b, + author = {E. Gim\'enez}, + booktitle = {Workshop on Types for Proofs and Programs}, + series = LNCS, + number = {1158}, + pages = {135-152}, + title = {An application of co-Inductive types in Coq: + verification of the Alternating Bit Protocol}, + editorS = {S. Berardi and M. Coppo}, + publisher = SV, + year = {1995} +} + +@InProceedings{Gir70, + author = {J.-Y. Girard}, + booktitle = {Proceedings of the 2nd Scandinavian Logic Symposium}, + publisher = {North-Holland}, + title = {Une extension de l'interpr\'etation de {G\"odel} \`a l'analyse, et son application \`a l'\'elimination des coupures dans l'analyse et la th\'eorie des types}, + year = {1970} +} + +@PhDThesis{Gir72, + author = {J.-Y. Girard}, + school = {Universit\'e Paris~7}, + title = {Interpr\'etation fonctionnelle et \'elimination des coupures de l'arithm\'etique d'ordre sup\'erieur}, + year = {1972} +} + +@Book{Gir89, + author = {J.-Y. Girard and Y. Lafont and P. Taylor}, + publisher = {Cambridge University Press}, + series = {Cambridge Tracts in Theoretical Computer Science 7}, + title = {Proofs and Types}, + year = {1989} +} + +@TechReport{Har95, + author = {John Harrison}, + title = {Metatheory and Reflection in Theorem Proving: A Survey and Critique}, + institution = {SRI International Cambridge Computer Science Research Centre,}, + year = 1995, + type = {Technical Report}, + number = {CRC-053}, + abstract = {http://www.cl.cam.ac.uk/users/jrh/papers.html} +} + +@MastersThesis{Hir94, + author = {D. Hirschkoff}, + month = sep, + school = {DEA IARFA, Ecole des Ponts et Chauss\'ees, Paris}, + title = {Écriture d'une tactique arithm\'etique pour le syst\`eme {\Coq}}, + year = {1994} +} + +@InProceedings{HofStr98, + author = {Martin Hofmann and Thomas Streicher}, + title = {The groupoid interpretation of type theory}, + booktitle = {Proceedings of the meeting Twenty-five years of constructive type theory}, + publisher = {Oxford University Press}, + year = {1998} +} + +@InCollection{How80, + author = {W.A. Howard}, + booktitle = {to H.B. Curry : Essays on Combinatory Logic, Lambda Calculus and Formalism.}, + editor = {J.P. Seldin and J.R. Hindley}, + note = {Unpublished 1969 Manuscript}, + publisher = {Academic Press}, + title = {The Formulae-as-Types Notion of Constructions}, + year = {1980} +} + +@InProceedings{Hue87tapsoft, + author = {G. Huet}, + title = {Programming of Future Generation Computers}, + booktitle = {Proceedings of TAPSOFT87}, + series = LNCS, + volume = 249, + pages = {276--286}, + year = 1987, + publisher = SV +} + +@InProceedings{Hue87, + author = {G. Huet}, + booktitle = {Programming of Future Generation Computers}, + editor = {K. Fuchi and M. Nivat}, + note = {Also in \cite{Hue87tapsoft}}, + publisher = {Elsevier Science}, + title = {Induction Principles Formalized in the {Calculus of Constructions}}, + year = {1988} +} + +@InProceedings{Hue88, + author = {G. Huet}, + booktitle = {A perspective in Theoretical Computer Science. Commemorative Volume for Gift Siromoney}, + editor = {R. Narasimhan}, + note = {Also in~\cite{CoC89}}, + publisher = {World Scientific Publishing}, + title = {{The Constructive Engine}}, + year = {1989} +} + +@Unpublished{Hue88b, + author = {G. Huet}, + title = {Extending the Calculus of Constructions with Type:Type}, + year = 1988, + note = {Unpublished} +} + +@Book{Hue89, + editor = {G. Huet}, + publisher = {Addison-Wesley}, + series = {The UT Year of Programming Series}, + title = {Logical Foundations of Functional Programming}, + year = {1989} +} + +@InProceedings{Hue92, + author = {G. Huet}, + booktitle = {Proceedings of 12th FST/TCS Conference, New Delhi}, + pages = {229--240}, + publisher = SV, + series = LNCS, + title = {The Gallina Specification Language : A case study}, + volume = {652}, + year = {1992} +} + +@Article{Hue94, + author = {G. Huet}, + journal = {J. Functional Programming}, + pages = {371--394}, + publisher = {Cambridge University Press}, + title = {Residual theory in $\lambda$-calculus: a formal development}, + volume = {4,3}, + year = {1994} +} + +@InCollection{HuetLevy79, + author = {G. Huet and J.-J. L\'{e}vy}, + title = {Call by Need Computations in Non-Ambigous +Linear Term Rewriting Systems}, + note = {Also research report 359, INRIA, 1979}, + booktitle = {Computational Logic, Essays in Honor of +Alan Robinson}, + editor = {J.-L. Lassez and G. Plotkin}, + publisher = {The MIT press}, + year = {1991} +} + +@Article{KeWe84, + author = {J. Ketonen and R. Weyhrauch}, + journal = {Theoretical Computer Science}, + pages = {297--307}, + title = {A decidable fragment of {P}redicate {C}alculus}, + volume = {32}, + year = {1984} +} + +@Book{Kle52, + author = {S.C. Kleene}, + publisher = {North-Holland}, + series = {Bibliotheca Mathematica}, + title = {Introduction to Metamathematics}, + year = {1952} +} + +@Book{Kri90, + author = {J.-L. Krivine}, + publisher = {Masson}, + series = {Etudes et recherche en informatique}, + title = {Lambda-calcul {types et mod\`eles}}, + year = {1990} +} + +@Book{LE92, + editor = {G. Huet and G. Plotkin}, + publisher = {Cambridge University Press}, + title = {Logical Environments}, + year = {1992} +} + +@Book{LF91, + editor = {G. Huet and G. Plotkin}, + publisher = {Cambridge University Press}, + title = {Logical Frameworks}, + year = {1991} +} + +@Article{Laville91, + author = {A. Laville}, + title = {Comparison of Priority Rules in Pattern +Matching and Term Rewriting}, + journal = {Journal of Symbolic Computation}, + volume = {11}, + pages = {321--347}, + year = {1991} +} + +@InProceedings{LePa94, + author = {F. Leclerc and C. Paulin-Mohring}, + booktitle = {{Types for Proofs and Programs, Types' 93}}, + editor = {H. Barendregt and T. Nipkow}, + publisher = SV, + series = {LNCS}, + title = {{Programming with Streams in Coq. A case study : The Sieve of Eratosthenes}}, + volume = {806}, + year = {1994} +} + +@TechReport{Leroy90, + author = {X. Leroy}, + title = {The {ZINC} experiment: an economical implementation +of the {ML} language}, + institution = {INRIA}, + number = {117}, + year = {1990} +} + +@InProceedings{Let02, + author = {P. Letouzey}, + title = {A New Extraction for Coq}, + booktitle = {TYPES}, + year = 2002, + crossref = {DBLP:conf/types/2002}, + url = {draft at \url{http://www.irif.fr/~letouzey/download/extraction2002.pdf}} +} + +@PhDThesis{Luo90, + author = {Z. Luo}, + title = {An Extended Calculus of Constructions}, + school = {University of Edinburgh}, + year = {1990} +} + +@inproceedings{Luttik97specificationof, + Author = {Sebastiaan P. Luttik and Eelco Visser}, + Booktitle = {2nd International Workshop on the Theory and Practice of Algebraic Specifications (ASF+SDF'97), Electronic Workshops in Computing}, + Publisher = {Springer-Verlag}, + Title = {Specification of Rewriting Strategies}, + Year = {1997}} + +@Book{MaL84, + author = {{P. Martin-L\"of}}, + publisher = {Bibliopolis}, + series = {Studies in Proof Theory}, + title = {Intuitionistic Type Theory}, + year = {1984} +} + +@Article{MaSi94, + author = {P. Manoury and M. Simonot}, + title = {Automatizing Termination Proofs of Recursively Defined Functions.}, + journal = {TCS}, + volume = {135}, + number = {2}, + year = {1994}, + pages = {319-343}, +} + +@InProceedings{Miquel00, + author = {A. Miquel}, + title = {A Model for Impredicative Type Systems with Universes, +Intersection Types and Subtyping}, + booktitle = {{Proceedings of the 15th Annual IEEE Symposium on Logic in Computer Science (LICS'00)}}, + publisher = {IEEE Computer Society Press}, + year = {2000} +} + +@PhDThesis{Miquel01a, + author = {A. Miquel}, + title = {Le Calcul des Constructions implicite: syntaxe et s\'emantique}, + month = {dec}, + school = {{Universit\'e Paris 7}}, + year = {2001} +} + +@InProceedings{Miquel01b, + author = {A. Miquel}, + title = {The Implicit Calculus of Constructions: Extending Pure Type Systems with an Intersection Type Binder and Subtyping}, + booktitle = {{Proceedings of the fifth International Conference on Typed Lambda Calculi and Applications (TLCA01), Krakow, Poland}}, + publisher = SV, + series = {LNCS}, + number = 2044, + year = {2001} +} + +@InProceedings{MiWer02, + author = {A. Miquel and B. Werner}, + title = {The Not So Simple Proof-Irrelevant Model of CC}, + booktitle = {TYPES}, + year = {2002}, + pages = {240-258}, + ee = {http://link.springer.de/link/service/series/0558/bibs/2646/26460240.htm}, + crossref = {DBLP:conf/types/2002}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@proceedings{DBLP:conf/types/2002, + editor = {H. Geuvers and F. Wiedijk}, + title = {Types for Proofs and Programs, Second International Workshop, + TYPES 2002, Berg en Dal, The Netherlands, April 24-28, 2002, + Selected Papers}, + booktitle = {TYPES}, + publisher = SV, + series = LNCS, + volume = {2646}, + year = {2003}, + isbn = {3-540-14031-X}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@InProceedings{Moh89a, + author = {C. Paulin-Mohring}, + address = {Austin}, + booktitle = {Sixteenth Annual ACM Symposium on Principles of Programming Languages}, + month = jan, + publisher = {ACM}, + title = {Extracting ${F}_{\omega}$'s programs from proofs in the {Calculus of Constructions}}, + year = {1989} +} + +@PhDThesis{Moh89b, + author = {C. Paulin-Mohring}, + month = jan, + school = {{Universit\'e Paris 7}}, + title = {Extraction de programmes dans le {Calcul des Constructions}}, + year = {1989} +} + +@InProceedings{Moh93, + author = {C. Paulin-Mohring}, + booktitle = {Proceedings of the conference Typed Lambda Calculi and Applications}, + editor = {M. Bezem and J.-F. Groote}, + note = {Also LIP research report 92-49, ENS Lyon}, + number = {664}, + publisher = SV, + series = {LNCS}, + title = {{Inductive Definitions in the System Coq - Rules and Properties}}, + year = {1993} +} + +@Book{Moh97, + author = {C. Paulin-Mohring}, + month = jan, + publisher = {{ENS Lyon}}, + title = {{Le syst\`eme Coq. \mbox{Th\`ese d'habilitation}}}, + year = {1997} +} + +@MastersThesis{Mun94, + author = {C. Muñoz}, + month = sep, + school = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, + title = {D\'emonstration automatique dans la logique propositionnelle intuitionniste}, + year = {1994} +} + +@PhDThesis{Mun97d, + author = {C. Mu{\~{n}}oz}, + title = {Un calcul de substitutions pour la repr\'esentation + de preuves partielles en th\'eorie de types}, + school = {Universit\'e Paris 7}, + year = {1997}, + note = {Version en anglais disponible comme rapport de + recherche INRIA RR-3309}, + type = {Th\`ese de Doctorat} +} + +@Book{NoPS90, + author = {B. {Nordstr\"om} and K. Peterson and J. Smith}, + booktitle = {Information Processing 83}, + publisher = {Oxford Science Publications}, + series = {International Series of Monographs on Computer Science}, + title = {Programming in {Martin-L\"of's} Type Theory}, + year = {1990} +} + +@Article{Nor88, + author = {B. {Nordstr\"om}}, + journal = {BIT}, + title = {Terminating General Recursion}, + volume = {28}, + year = {1988} +} + +@Book{Odi90, + editor = {P. Odifreddi}, + publisher = {Academic Press}, + title = {Logic and Computer Science}, + year = {1990} +} + +@InProceedings{PaMS92, + author = {M. Parigot and P. Manoury and M. Simonot}, + address = {St. Petersburg, Russia}, + booktitle = {Logic Programming and automated reasoning}, + editor = {A. Voronkov}, + month = jul, + number = {624}, + publisher = SV, + series = {LNCS}, + title = {{ProPre : A Programming language with proofs}}, + year = {1992} +} + +@Article{PaWe92, + author = {C. Paulin-Mohring and B. Werner}, + journal = {Journal of Symbolic Computation}, + pages = {607--640}, + title = {{Synthesis of ML programs in the system Coq}}, + volume = {15}, + year = {1993} +} + +@Article{Par92, + author = {M. Parigot}, + journal = {Theoretical Computer Science}, + number = {2}, + pages = {335--356}, + title = {{Recursive Programming with Proofs}}, + volume = {94}, + year = {1992} +} + +@InProceedings{Parent95b, + author = {C. Parent}, + booktitle = {{Mathematics of Program Construction'95}}, + publisher = SV, + series = {LNCS}, + title = {{Synthesizing proofs from programs in +the Calculus of Inductive Constructions}}, + volume = {947}, + year = {1995} +} + +@InProceedings{Prasad93, + author = {K.V. Prasad}, + booktitle = {{Proceedings of CONCUR'93}}, + publisher = SV, + series = {LNCS}, + title = {{Programming with broadcasts}}, + volume = {715}, + year = {1993} +} + +@Book{RC95, + author = {di~Cosmo, R.}, + title = {Isomorphisms of Types: from $\lambda$-calculus to information + retrieval and language design}, + series = {Progress in Theoretical Computer Science}, + publisher = {Birkhauser}, + year = {1995}, + note = {ISBN-0-8176-3763-X} +} + +@TechReport{Rou92, + author = {J. Rouyer}, + institution = {INRIA}, + month = nov, + number = {1795}, + title = {{Développement de l'Algorithme d'Unification dans le Calcul des Constructions}}, + year = {1992} +} + +@Article{Rushby98, + title = {Subtypes for Specifications: Predicate Subtyping in + {PVS}}, + author = {John Rushby and Sam Owre and N. Shankar}, + journal = {IEEE Transactions on Software Engineering}, + pages = {709--720}, + volume = 24, + number = 9, + month = sep, + year = 1998 +} + +@TechReport{Saibi94, + author = {A. Sa\"{\i}bi}, + institution = {INRIA}, + month = dec, + number = {2345}, + title = {{Axiomatization of a lambda-calculus with explicit-substitutions in the Coq System}}, + year = {1994} +} + + +@MastersThesis{Ter92, + author = {D. Terrasse}, + month = sep, + school = {IARFA}, + title = {{Traduction de TYPOL en COQ. Application \`a Mini ML}}, + year = {1992} +} + +@TechReport{ThBeKa92, + author = {L. Th\'ery and Y. Bertot and G. Kahn}, + institution = {INRIA Sophia}, + month = may, + number = {1684}, + title = {Real theorem provers deserve real user-interfaces}, + type = {Research Report}, + year = {1992} +} + +@Book{TrDa89, + author = {A.S. Troelstra and D. van Dalen}, + publisher = {North-Holland}, + series = {Studies in Logic and the foundations of Mathematics, volumes 121 and 123}, + title = {Constructivism in Mathematics, an introduction}, + year = {1988} +} + +@PhDThesis{Wer94, + author = {B. Werner}, + school = {Universit\'e Paris 7}, + title = {Une th\'eorie des constructions inductives}, + type = {Th\`ese de Doctorat}, + year = {1994} +} + +@PhDThesis{Bar99, + author = {B. Barras}, + school = {Universit\'e Paris 7}, + title = {Auto-validation d'un système de preuves avec familles inductives}, + type = {Th\`ese de Doctorat}, + year = {1999} +} + +@Unpublished{ddr98, + author = {D. de Rauglaudre}, + title = {Camlp4 version 1.07.2}, + year = {1998}, + note = {In Camlp4 distribution} +} + +@Article{dowek93, + author = {G. Dowek}, + title = {{A Complete Proof Synthesis Method for the Cube of Type Systems}}, + journal = {Journal Logic Computation}, + volume = {3}, + number = {3}, + pages = {287--315}, + month = {June}, + year = {1993} +} + +@InProceedings{manoury94, + author = {P. Manoury}, + title = {{A User's Friendly Syntax to Define +Recursive Functions as Typed $\lambda-$Terms}}, + booktitle = {{Types for Proofs and Programs, TYPES'94}}, + series = {LNCS}, + volume = {996}, + month = jun, + year = {1994} +} + +@TechReport{maranget94, + author = {L. Maranget}, + institution = {INRIA}, + number = {2385}, + title = {{Two Techniques for Compiling Lazy Pattern Matching}}, + year = {1994} +} + +@InProceedings{puel-suarez90, + author = {L.Puel and A. Su\'arez}, + booktitle = {{Conference Lisp and Functional Programming}}, + series = {ACM}, + publisher = SV, + title = {{Compiling Pattern Matching by Term +Decomposition}}, + year = {1990} +} + +@MastersThesis{saidi94, + author = {H. Saidi}, + month = sep, + school = {DEA d'Informatique Fondamentale, Universit\'e Paris 7}, + title = {R\'esolution d'\'equations dans le syst\`eme T + de G\"odel}, + year = {1994} +} + +@inproceedings{sozeau06, + author = {Matthieu Sozeau}, + title = {Subset Coercions in {C}oq}, + year = {2007}, + booktitle = {TYPES'06}, + pages = {237-252}, + volume = {4502}, + publisher = "Springer", + series = {LNCS} +} + +@inproceedings{sozeau08, + Author = {Matthieu Sozeau and Nicolas Oury}, + booktitle = {TPHOLs'08}, + Pdf = {http://www.lri.fr/~sozeau/research/publications/drafts/classes.pdf}, + Title = {{F}irst-{C}lass {T}ype {C}lasses}, + Year = {2008}, +} + +@Misc{streicher93semantical, + author = {T. Streicher}, + title = {Semantical Investigations into Intensional Type Theory}, + note = {Habilitationsschrift, LMU Munchen.}, + year = {1993} +} + +@Misc{Pcoq, + author = {Lemme Team}, + title = {Pcoq a graphical user-interface for {Coq}}, + note = {\url{http://www-sop.inria.fr/lemme/pcoq/}} +} + +@Misc{ProofGeneral, + author = {David Aspinall}, + title = {Proof General}, + note = {\url{https://proofgeneral.github.io/}} +} + +@Book{CoqArt, + title = {Interactive Theorem Proving and Program Development. + Coq'Art: The Calculus of Inductive Constructions}, + author = {Yves Bertot and Pierre Castéran}, + publisher = {Springer Verlag}, + series = {Texts in Theoretical Computer Science. An EATCS series}, + year = 2004 +} + +@InCollection{wadler87, + author = {P. Wadler}, + title = {Efficient Compilation of Pattern Matching}, + booktitle = {The Implementation of Functional Programming +Languages}, + editor = {S.L. Peyton Jones}, + publisher = {Prentice-Hall}, + year = {1987} +} + +@inproceedings{DBLP:conf/types/CornesT95, + author = {Cristina Cornes and + Delphine Terrasse}, + title = {Automating Inversion of Inductive Predicates in Coq}, + booktitle = {TYPES}, + year = {1995}, + pages = {85-104}, + crossref = {DBLP:conf/types/1995}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} +@proceedings{DBLP:conf/types/1995, + editor = {Stefano Berardi and + Mario Coppo}, + title = {Types for Proofs and Programs, International Workshop TYPES'95, + Torino, Italy, June 5-8, 1995, Selected Papers}, + booktitle = {TYPES}, + publisher = {Springer}, + series = {Lecture Notes in Computer Science}, + volume = {1158}, + year = {1996}, + isbn = {3-540-61780-9}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@inproceedings{DBLP:conf/types/McBride00, + author = {Conor McBride}, + title = {Elimination with a Motive}, + booktitle = {TYPES}, + year = {2000}, + pages = {197-216}, + ee = {http://link.springer.de/link/service/series/0558/bibs/2277/22770197.htm}, + crossref = {DBLP:conf/types/2000}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@proceedings{DBLP:conf/types/2000, + editor = {Paul Callaghan and + Zhaohui Luo and + James McKinna and + Robert Pollack}, + title = {Types for Proofs and Programs, International Workshop, TYPES + 2000, Durham, UK, December 8-12, 2000, Selected Papers}, + booktitle = {TYPES}, + publisher = {Springer}, + series = {Lecture Notes in Computer Science}, + volume = {2277}, + year = {2002}, + isbn = {3-540-43287-6}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@INPROCEEDINGS{sugar, + author = {Alessandro Giovini and Teo Mora and Gianfranco Niesi and Lorenzo Robbiano and Carlo Traverso}, + title = {"One sugar cube, please" or Selection strategies in the Buchberger algorithm}, + booktitle = { Proceedings of the ISSAC'91, ACM Press}, + year = {1991}, + pages = {5--4}, + publisher = {} +} + +@article{LeeWerner11, + author = {Gyesik Lee and + Benjamin Werner}, + title = {Proof-irrelevant model of {CC} with predicative induction + and judgmental equality}, + journal = {Logical Methods in Computer Science}, + volume = {7}, + number = {4}, + year = {2011}, + ee = {http://dx.doi.org/10.2168/LMCS-7(4:5)2011}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@Comment{cross-references, must be at end} + +@Book{Bastad92, + editor = {B. Nordstr\"om and K. Petersson and G. Plotkin}, + publisher = {Available by ftp at site ftp.inria.fr}, + title = {Proceedings of the 1992 Workshop on Types for Proofs and Programs}, + year = {1992} +} + +@Book{Nijmegen93, + editor = {H. Barendregt and T. Nipkow}, + publisher = SV, + series = LNCS, + title = {Types for Proofs and Programs}, + volume = {806}, + year = {1994} +} + +@article{TheOmegaPaper, + author = "W. Pugh", + title = "The Omega test: a fast and practical integer programming algorithm for dependence analysis", + journal = "Communication of the ACM", + pages = "102--114", + year = "1992", +} + +@inproceedings{CSwcu, + hal_id = {hal-00816703}, + url = {http://hal.inria.fr/hal-00816703}, + title = {{Canonical Structures for the working Coq user}}, + author = {Mahboubi, Assia and Tassi, Enrico}, + booktitle = {{ITP 2013, 4th Conference on Interactive Theorem Proving}}, + publisher = {Springer}, + pages = {19-34}, + address = {Rennes, France}, + volume = {7998}, + editor = {Sandrine Blazy and Christine Paulin and David Pichardie }, + series = {LNCS }, + doi = {10.1007/978-3-642-39634-2\_5 }, + year = {2013}, +} + +@article{CSlessadhoc, + author = {Gonthier, Georges and Ziliani, Beta and Nanevski, Aleksandar and Dreyer, Derek}, + title = {How to Make Ad Hoc Proof Automation Less Ad Hoc}, + journal = {SIGPLAN Not.}, + issue_date = {September 2011}, + volume = {46}, + number = {9}, + month = sep, + year = {2011}, + issn = {0362-1340}, + pages = {163--175}, + numpages = {13}, + url = {http://doi.acm.org/10.1145/2034574.2034798}, + doi = {10.1145/2034574.2034798}, + acmid = {2034798}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {canonical structures, coq, custom proof automation, hoare type theory, interactive theorem proving, tactics, type classes}, +} + +@inproceedings{CompiledStrongReduction, + author = {Benjamin Gr{\'{e}}goire and + Xavier Leroy}, + editor = {Mitchell Wand and + Simon L. Peyton Jones}, + title = {A compiled implementation of strong reduction}, + booktitle = {Proceedings of the Seventh {ACM} {SIGPLAN} International Conference + on Functional Programming {(ICFP} '02), Pittsburgh, Pennsylvania, + USA, October 4-6, 2002.}, + pages = {235--246}, + publisher = {{ACM}}, + year = {2002}, + url = {http://doi.acm.org/10.1145/581478.581501}, + doi = {10.1145/581478.581501}, + timestamp = {Tue, 11 Jun 2013 13:49:16 +0200}, + biburl = {http://dblp.uni-trier.de/rec/bib/conf/icfp/GregoireL02}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} + +@inproceedings{FullReduction, + author = {Mathieu Boespflug and + Maxime D{\'{e}}n{\`{e}}s and + Benjamin Gr{\'{e}}goire}, + editor = {Jean{-}Pierre Jouannaud and + Zhong Shao}, + title = {Full Reduction at Full Throttle}, + booktitle = {Certified Programs and Proofs - First International Conference, {CPP} + 2011, Kenting, Taiwan, December 7-9, 2011. Proceedings}, + series = {Lecture Notes in Computer Science}, + volume = {7086}, + pages = {362--377}, + publisher = {Springer}, + year = {2011}, + url = {http://dx.doi.org/10.1007/978-3-642-25379-9_26}, + doi = {10.1007/978-3-642-25379-9_26}, + timestamp = {Thu, 17 Nov 2011 13:33:48 +0100}, + biburl = {http://dblp.uni-trier.de/rec/bib/conf/cpp/BoespflugDG11}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} diff --git a/doc/sphinx/conf.py b/doc/sphinx/conf.py index 0bff41a259..23bc9a2e4a 100755 --- a/doc/sphinx/conf.py +++ b/doc/sphinx/conf.py @@ -10,7 +10,7 @@ ## # (see LICENSE file for the text of the license) ## ########################################################################## # -# Coq 8.5 documentation build configuration file, created by +# Coq documentation build configuration file, created by # sphinx-quickstart on Wed May 11 11:23:13 2016. # # This file is execfile()d with the current directory set to its @@ -32,6 +32,9 @@ sys.setrecursionlimit(1500) # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('../tools/')) +sys.path.append(os.path.abspath('../../config/')) + +import coq_config # -- General configuration ------------------------------------------------ @@ -64,7 +67,7 @@ master_doc = 'index' # General information about the project. project = 'Coq' -copyright = '2016, Inria' +copyright = '1999-2018, Inria' author = 'The Coq Development Team' # The version info for the project you're documenting, acts as replacement for @@ -72,9 +75,9 @@ author = 'The Coq Development Team' # built documents. # # The short X.Y version. -version = '8.7' +version = coq_config.version # The full version, including alpha/beta/rc tags. -release = '8.7.dev' +release = coq_config.version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -92,7 +95,13 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = [ + '_build', + 'Thumbs.db', + '.DS_Store', + 'introduction.rst', + 'credits.rst' +] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -143,6 +152,14 @@ html_theme = 'sphinx_rtd_theme' # documentation. #html_theme_options = {} +html_context = { + 'display_github': True, + 'github_user': 'coq', + 'github_repo': 'coq', + 'github_version': 'master', + 'conf_py_path': '/doc/sphinx/' +} + # Add any paths that contain custom themes here, relative to this directory. import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] @@ -230,9 +247,6 @@ html_use_smartypants = False # FIXME wrap code in <code> tags, otherwise quotesg # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' -# Output file base name for HTML help builder. -htmlhelp_basename = 'Coq85doc' - # -- Options for LaTeX output --------------------------------------------- ########################### @@ -264,10 +278,10 @@ latex_additional_files = ["_static/coqnotations.sty"] # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'Coq85.tex', 'Coq 8.5 Documentation', - 'The Coq Development Team (edited by C. Pit-Claudel)', 'manual'), -] +# latex_documents = [ +# (master_doc, 'CoqRefMan.tex', 'Coq Documentation', +# 'The Coq Development Team', 'manual'), +#] # The name of an image file (relative to this directory) to place at the top of # the title page. @@ -294,10 +308,10 @@ latex_documents = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'coq85', 'Coq 8.5 Documentation', - [author], 1) -] +#man_pages = [ +# (master_doc, 'coq', 'Coq Documentation', +# [author], 1) +#] # If true, show URL addresses after external links. #man_show_urls = False @@ -308,11 +322,11 @@ man_pages = [ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'Coq85', 'Coq 8.5 Documentation', - author, 'Coq85', 'One line description of project.', - 'Miscellaneous'), -] +#texinfo_documents = [ +# (master_doc, 'Coq', 'Coq Documentation', +# author, 'Coq', 'One line description of project.', +# 'Miscellaneous'), +#] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] @@ -330,10 +344,10 @@ texinfo_documents = [ # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. -epub_title = project -epub_author = author -epub_publisher = author -epub_copyright = copyright +#epub_title = project +#epub_author = author +#epub_publisher = author +#epub_copyright = copyright # The basename for the epub file. It defaults to the project name. #epub_basename = project diff --git a/doc/sphinx/coq-cmdindex.rst b/doc/sphinx/coq-cmdindex.rst new file mode 100644 index 0000000000..7df6cb36c5 --- /dev/null +++ b/doc/sphinx/coq-cmdindex.rst @@ -0,0 +1,5 @@ +.. hack to get index in TOC + +----------------- +Command index +----------------- diff --git a/doc/sphinx/coq-exnindex.rst b/doc/sphinx/coq-exnindex.rst new file mode 100644 index 0000000000..100c57b085 --- /dev/null +++ b/doc/sphinx/coq-exnindex.rst @@ -0,0 +1,5 @@ +.. hack to get index in TOC + +---------------------- +Errors, warnings index +---------------------- diff --git a/doc/sphinx/coq-optindex.rst b/doc/sphinx/coq-optindex.rst new file mode 100644 index 0000000000..f8046a800b --- /dev/null +++ b/doc/sphinx/coq-optindex.rst @@ -0,0 +1,5 @@ +.. hack to get index in TOC + +----------------- +Option index +----------------- diff --git a/doc/sphinx/coq-tacindex.rst b/doc/sphinx/coq-tacindex.rst new file mode 100644 index 0000000000..588104f465 --- /dev/null +++ b/doc/sphinx/coq-tacindex.rst @@ -0,0 +1,5 @@ +.. hack to get index in TOC + +------------- +Tactic index +------------- diff --git a/doc/sphinx/credits.rst b/doc/sphinx/credits.rst new file mode 100644 index 0000000000..a60f326454 --- /dev/null +++ b/doc/sphinx/credits.rst @@ -0,0 +1,1401 @@ +------------------------------------------- +Credits +------------------------------------------- + +Coq is a proof assistant for higher-order logic, allowing the +development of computer programs consistent with their formal +specification. It is the result of about ten years of research of the +Coq project. We shall briefly survey here three main aspects: the +*logical language* in which we write our axiomatizations and +specifications, the *proof assistant* which allows the development of +verified mathematical proofs, and the *program extractor* which +synthesizes computer programs obeying their formal specifications, +written as logical assertions in the language. + +The logical language used by |Coq| is a variety of type theory, called the +*Calculus of Inductive Constructions*. Without going back to Leibniz and +Boole, we can date the creation of what is now called mathematical logic +to the work of Frege and Peano at the turn of the century. The discovery +of antinomies in the free use of predicates or comprehension principles +prompted Russell to restrict predicate calculus with a stratification of +*types*. This effort culminated with *Principia Mathematica*, the first +systematic attempt at a formal foundation of mathematics. A +simplification of this system along the lines of simply typed +:math:`\lambda`-calculus occurred with Church’s *Simple Theory of +Types*. The :math:`\lambda`-calculus notation, originally used for +expressing functionality, could also be used as an encoding of natural +deduction proofs. This Curry-Howard isomorphism was used by N. de Bruijn +in the *Automath* project, the first full-scale attempt to develop and +mechanically verify mathematical proofs. This effort culminated with +Jutting’s verification of Landau’s *Grundlagen* in the 1970’s. +Exploiting this Curry-Howard isomorphism, notable achievements in proof +theory saw the emergence of two type-theoretic frameworks; the first +one, Martin-Löf’s *Intuitionistic Theory of Types*, attempts a new +foundation of mathematics on constructive principles. The second one, +Girard’s polymorphic :math:`\lambda`-calculus :math:`F_\omega`, is a +very strong functional system in which we may represent higher-order +logic proof structures. Combining both systems in a higher-order +extension of the Automath languages, T. Coquand presented in 1985 the +first version of the *Calculus of Constructions*, CoC. This strong +logical system allowed powerful axiomatizations, but direct inductive +definitions were not possible, and inductive notions had to be defined +indirectly through functional encodings, which introduced inefficiencies +and awkwardness. The formalism was extended in 1989 by T. Coquand and C. +Paulin with primitive inductive definitions, leading to the current +*Calculus of Inductive Constructions*. This extended formalism is not +rigorously defined here. Rather, numerous concrete examples are +discussed. We refer the interested reader to relevant research papers +for more information about the formalism, its meta-theoretic properties, +and semantics. However, it should not be necessary to understand this +theoretical material in order to write specifications. It is possible to +understand the Calculus of Inductive Constructions at a higher level, as +a mixture of predicate calculus, inductive predicate definitions +presented as typed PROLOG, and recursive function definitions close to +the language ML. + +Automated theorem-proving was pioneered in the 1960’s by Davis and +Putnam in propositional calculus. A complete mechanization (in the sense +of a semi-decision procedure) of classical first-order logic was +proposed in 1965 by J.A. Robinson, with a single uniform inference rule +called *resolution*. Resolution relies on solving equations in free +algebras (i.e. term structures), using the *unification algorithm*. Many +refinements of resolution were studied in the 1970’s, but few convincing +implementations were realized, except of course that PROLOG is in some +sense issued from this effort. A less ambitious approach to proof +development is computer-aided proof-checking. The most notable +proof-checkers developed in the 1970’s were LCF, designed by R. Milner +and his colleagues at U. Edinburgh, specialized in proving properties +about denotational semantics recursion equations, and the Boyer and +Moore theorem-prover, an automation of primitive recursion over +inductive data types. While the Boyer-Moore theorem-prover attempted to +synthesize proofs by a combination of automated methods, LCF constructed +its proofs through the programming of *tactics*, written in a high-level +functional meta-language, ML. + +The salient feature which clearly distinguishes our proof assistant from +say LCF or Boyer and Moore’s, is its possibility to extract programs +from the constructive contents of proofs. This computational +interpretation of proof objects, in the tradition of Bishop’s +constructive mathematics, is based on a realizability interpretation, in +the sense of Kleene, due to C. Paulin. The user must just mark his +intention by separating in the logical statements the assertions stating +the existence of a computational object from the logical assertions +which specify its properties, but which may be considered as just +comments in the corresponding program. Given this information, the +system automatically extracts a functional term from a consistency proof +of its specifications. This functional term may be in turn compiled into +an actual computer program. This methodology of extracting programs from +proofs is a revolutionary paradigm for software engineering. Program +synthesis has long been a theme of research in artificial intelligence, +pioneered by R. Waldinger. The Tablog system of Z. Manna and R. +Waldinger allows the deductive synthesis of functional programs from +proofs in tableau form of their specifications, written in a variety of +first-order logic. Development of a systematic *programming logic*, +based on extensions of Martin-Löf’s type theory, was undertaken at +Cornell U. by the Nuprl team, headed by R. Constable. The first actual +program extractor, PX, was designed and implemented around 1985 by S. +Hayashi from Kyoto University. It allows the extraction of a LISP +program from a proof in a logical system inspired by the logical +formalisms of S. Feferman. Interest in this methodology is growing in +the theoretical computer science community. We can foresee the day when +actual computer systems used in applications will contain certified +modules, automatically generated from a consistency proof of their +formal specifications. We are however still far from being able to use +this methodology in a smooth interaction with the standard tools from +software engineering, i.e. compilers, linkers, run-time systems taking +advantage of special hardware, debuggers, and the like. We hope that |Coq| +can be of use to researchers interested in experimenting with this new +methodology. + +A first implementation of CoC was started in 1984 by G. Huet and T. +Coquand. Its implementation language was CAML, a functional programming +language from the ML family designed at INRIA in Rocquencourt. The core +of this system was a proof-checker for CoC seen as a typed +:math:`\lambda`-calculus, called the *Constructive Engine*. This engine +was operated through a high-level notation permitting the declaration of +axioms and parameters, the definition of mathematical types and objects, +and the explicit construction of proof objects encoded as +:math:`\lambda`-terms. A section mechanism, designed and implemented by +G. Dowek, allowed hierarchical developments of mathematical theories. +This high-level language was called the *Mathematical Vernacular*. +Furthermore, an interactive *Theorem Prover* permitted the incremental +construction of proof trees in a top-down manner, subgoaling recursively +and backtracking from dead-alleys. The theorem prover executed tactics +written in CAML, in the LCF fashion. A basic set of tactics was +predefined, which the user could extend by his own specific tactics. +This system (Version 4.10) was released in 1989. Then, the system was +extended to deal with the new calculus with inductive types by C. +Paulin, with corresponding new tactics for proofs by induction. A new +standard set of tactics was streamlined, and the vernacular extended for +tactics execution. A package to compile programs extracted from proofs +to actual computer programs in CAML or some other functional language +was designed and implemented by B. Werner. A new user-interface, relying +on a CAML-X interface by D. de Rauglaudre, was designed and implemented +by A. Felty. It allowed operation of the theorem-prover through the +manipulation of windows, menus, mouse-sensitive buttons, and other +widgets. This system (Version 5.6) was released in 1991. + +Coq was ported to the new implementation Caml-light of X. Leroy and D. +Doligez by D. de Rauglaudre (Version 5.7) in 1992. A new version of |Coq| +was then coordinated by C. Murthy, with new tools designed by C. Parent +to prove properties of ML programs (this methodology is dual to program +extraction) and a new user-interaction loop. This system (Version 5.8) +was released in May 1993. A Centaur interface CTCoq was then developed +by Y. Bertot from the Croap project from INRIA-Sophia-Antipolis. + +In parallel, G. Dowek and H. Herbelin developed a new proof engine, +allowing the general manipulation of existential variables consistently +with dependent types in an experimental version of |Coq| (V5.9). + +The version V5.10 of |Coq| is based on a generic system for manipulating +terms with binding operators due to Chet Murthy. A new proof engine +allows the parallel development of partial proofs for independent +subgoals. The structure of these proof trees is a mixed representation +of derivation trees for the Calculus of Inductive Constructions with +abstract syntax trees for the tactics scripts, allowing the navigation +in a proof at various levels of details. The proof engine allows generic +environment items managed in an object-oriented way. This new +architecture, due to C. Murthy, supports several new facilities which +make the system easier to extend and to scale up: + +- User-programmable tactics are allowed + +- It is possible to separately verify development modules, and to load + their compiled images without verifying them again - a quick + relocation process allows their fast loading + +- A generic parsing scheme allows user-definable notations, with a + symmetric table-driven pretty-printer + +- Syntactic definitions allow convenient abbreviations + +- A limited facility of meta-variables allows the automatic synthesis + of certain type expressions, allowing generic notations for e.g. + equality, pairing, and existential quantification. + +In the Fall of 1994, C. Paulin-Mohring replaced the structure of +inductively defined types and families by a new structure, allowing the +mutually recursive definitions. P. Manoury implemented a translation of +recursive definitions into the primitive recursive style imposed by the +internal recursion operators, in the style of the ProPre system. C. +Muñoz implemented a decision procedure for intuitionistic propositional +logic, based on results of R. Dyckhoff. J.C. Filliâtre implemented a +decision procedure for first-order logic without contraction, based on +results of J. Ketonen and R. Weyhrauch. Finally C. Murthy implemented a +library of inversion tactics, relieving the user from tedious +definitions of “inversion predicates”. + +| Rocquencourt, Feb. 1st 1995 +| Gérard Huet +| + +Credits: addendum for version 6.1 +================================= + +The present version 6.1 of |Coq| is based on the V5.10 architecture. It +was ported to the new language Objective Caml by Bruno Barras. The +underlying framework has slightly changed and allows more conversions +between sorts. + +The new version provides powerful tools for easier developments. + +Cristina Cornes designed an extension of the |Coq| syntax to allow +definition of terms using a powerful pattern-matching analysis in the +style of ML programs. + +Amokrane Saïbi wrote a mechanism to simulate inheritance between types +families extending a proposal by Peter Aczel. He also developed a +mechanism to automatically compute which arguments of a constant may be +inferred by the system and consequently do not need to be explicitly +written. + +Yann Coscoy designed a command which explains a proof term using natural +language. Pierre Crégut built a new tactic which solves problems in +quantifier-free Presburger Arithmetic. Both functionalities have been +integrated to the |Coq| system by Hugo Herbelin. + +Samuel Boutin designed a tactic for simplification of commutative rings +using a canonical set of rewriting rules and equality modulo +associativity and commutativity. + +Finally the organisation of the |Coq| distribution has been supervised by +Jean-Christophe Filliâtre with the help of Judicaël Courant and Bruno +Barras. + +| Lyon, Nov. 18th 1996 +| Christine Paulin +| + +Credits: addendum for version 6.2 +================================= + +In version 6.2 of |Coq|, the parsing is done using camlp4, a preprocessor +and pretty-printer for CAML designed by Daniel de Rauglaudre at INRIA. +Daniel de Rauglaudre made the first adaptation of |Coq| for camlp4, this +work was continued by Bruno Barras who also changed the structure of |Coq| +abstract syntax trees and the primitives to manipulate them. The result +of these changes is a faster parsing procedure with greatly improved +syntax-error messages. The user-interface to introduce grammar or +pretty-printing rules has also changed. + +Eduardo Giménez redesigned the internal tactic libraries, giving uniform +names to Caml functions corresponding to |Coq| tactic names. + +Bruno Barras wrote new more efficient reductions functions. + +Hugo Herbelin introduced more uniform notations in the |Coq| specification +language: the definitions by fixpoints and pattern-matching have a more +readable syntax. Patrick Loiseleur introduced user-friendly notations +for arithmetic expressions. + +New tactics were introduced: Eduardo Giménez improved a mechanism to +introduce macros for tactics, and designed special tactics for +(co)inductive definitions; Patrick Loiseleur designed a tactic to +simplify polynomial expressions in an arbitrary commutative ring which +generalizes the previous tactic implemented by Samuel Boutin. +Jean-Christophe Filliâtre introduced a tactic for refining a goal, using +a proof term with holes as a proof scheme. + +David Delahaye designed the tool to search an object in the library +given its type (up to isomorphism). + +Henri Laulhère produced the |Coq| distribution for the Windows +environment. + +Finally, Hugo Herbelin was the main coordinator of the |Coq| documentation +with principal contributions by Bruno Barras, David Delahaye, +Jean-Christophe Filliâtre, Eduardo Giménez, Hugo Herbelin and Patrick +Loiseleur. + +| Orsay, May 4th 1998 +| Christine Paulin +| + +Credits: addendum for version 6.3 +================================= + +The main changes in version V6.3 was the introduction of a few new +tactics and the extension of the guard condition for fixpoint +definitions. + +B. Barras extended the unification algorithm to complete partial terms +and solved various tricky bugs related to universes. + +D. Delahaye developed the ``AutoRewrite`` tactic. He also designed the +new behavior of ``Intro`` and provided the tacticals ``First`` and +``Solve``. + +J.-C. Filliâtre developed the ``Correctness`` tactic. + +\E. Giménez extended the guard condition in fixpoints. + +H. Herbelin designed the new syntax for definitions and extended the +``Induction`` tactic. + +P. Loiseleur developed the ``Quote`` tactic and the new design of the +``Auto`` tactic, he also introduced the index of errors in the +documentation. + +C. Paulin wrote the ``Focus`` command and introduced the reduction +functions in definitions, this last feature was proposed by J.-F. +Monin from CNET Lannion. + +| Orsay, Dec. 1999 +| Christine Paulin +| + +Credits: versions 7 +=================== + +The version V7 is a new implementation started in September 1999 by +Jean-Christophe Filliâtre. This is a major revision with respect to the +internal architecture of the system. The |Coq| version 7.0 was distributed +in March 2001, version 7.1 in September 2001, version 7.2 in January +2002, version 7.3 in May 2002 and version 7.4 in February 2003. + +Jean-Christophe Filliâtre designed the architecture of the new system, +he introduced a new representation for environments and wrote a new +kernel for type-checking terms. His approach was to use functional +data-structures in order to get more sharing, to prepare the addition of +modules and also to get closer to a certified kernel. + +Hugo Herbelin introduced a new structure of terms with local +definitions. He introduced “qualified” names, wrote a new +pattern-matching compilation algorithm and designed a more compact +algorithm for checking the logical consistency of universes. He +contributed to the simplification of |Coq| internal structures and the +optimisation of the system. He added basic tactics for forward reasoning +and coercions in patterns. + +David Delahaye introduced a new language for tactics. General tactics +using pattern-matching on goals and context can directly be written from +the |Coq| toplevel. He also provided primitives for the design of +user-defined tactics in Caml. + +Micaela Mayero contributed the library on real numbers. Olivier +Desmettre extended this library with axiomatic trigonometric functions, +square, square roots, finite sums, Chasles property and basic plane +geometry. + +Jean-Christophe Filliâtre and Pierre Letouzey redesigned a new +extraction procedure from |Coq| terms to Caml or Haskell programs. This +new extraction procedure, unlike the one implemented in previous version +of |Coq| is able to handle all terms in the Calculus of Inductive +Constructions, even involving universes and strong elimination. P. +Letouzey adapted user contributions to extract ML programs when it was +sensible. Jean-Christophe Filliâtre wrote ``coqdoc``, a documentation +tool for |Coq| libraries usable from version 7.2. + +Bruno Barras improved the reduction algorithms efficiency and the +confidence level in the correctness of |Coq| critical type-checking +algorithm. + +Yves Bertot designed the ``SearchPattern`` and ``SearchRewrite`` tools +and the support for the pcoq interface +(http://www-sop.inria.fr/lemme/pcoq/). + +Micaela Mayero and David Delahaye introduced Field, a decision tactic +for commutative fields. + +Christine Paulin changed the elimination rules for empty and singleton +propositional inductive types. + +Loïc Pottier developed Fourier, a tactic solving linear inequalities on +real numbers. + +Pierre Crégut developed a new version based on reflexion of the Omega +decision tactic. + +Claudio Sacerdoti Coen designed an XML output for the |Coq| modules to be +used in the Hypertextual Electronic Library of Mathematics (HELM cf +http://www.cs.unibo.it/helm). + +A library for efficient representation of finite maps using binary trees +contributed by Jean Goubault was integrated in the basic theories. + +Pierre Courtieu developed a command and a tactic to reason on the +inductive structure of recursively defined functions. + +Jacek Chrzszcz designed and implemented the module system of |Coq| whose +foundations are in Judicaël Courant’s PhD thesis. + +The development was coordinated by C. Paulin. + +Many discussions within the Démons team and the LogiCal project +influenced significantly the design of |Coq| especially with J. Courant, +J. Duprat, J. Goubault, A. Miquel, C. Marché, B. Monate and B. Werner. + +Intensive users suggested improvements of the system : Y. Bertot, L. +Pottier, L. Théry, P. Zimmerman from INRIA, C. Alvarado, P. Crégut, +J.-F. Monin from France Telecom R & D. + +| Orsay, May. 2002 +| Hugo Herbelin & Christine Paulin +| + +Credits: version 8.0 +==================== + +Coq version 8 is a major revision of the |Coq| proof assistant. First, the +underlying logic is slightly different. The so-called *impredicativity* +of the sort Set has been dropped. The main reason is that it is +inconsistent with the principle of description which is quite a useful +principle for formalizing mathematics within classical logic. Moreover, +even in an constructive setting, the impredicativity of Set does not add +so much in practice and is even subject of criticism from a large part +of the intuitionistic mathematician community. Nevertheless, the +impredicativity of Set remains optional for users interested in +investigating mathematical developments which rely on it. + +Secondly, the concrete syntax of terms has been completely revised. The +main motivations were + +- a more uniform, purified style: all constructions are now lowercase, + with a functional programming perfume (e.g. abstraction is now + written fun), and more directly accessible to the novice (e.g. + dependent product is now written forall and allows omission of + types). Also, parentheses and are no longer mandatory for function + application. + +- extensibility: some standard notations (e.g. “<” and “>”) were + incompatible with the previous syntax. Now all standard arithmetic + notations (=, +, \*, /, <, <=, ... and more) are directly part of the + syntax. + +Together with the revision of the concrete syntax, a new mechanism of +*interpretation scopes* permits to reuse the same symbols (typically +, +-, \*, /, <, <=) in various mathematical theories without any +ambiguities for |Coq|, leading to a largely improved readability of |Coq| +scripts. New commands to easily add new symbols are also provided. + +Coming with the new syntax of terms, a slight reform of the tactic +language and of the language of commands has been carried out. The +purpose here is a better uniformity making the tactics and commands +easier to use and to remember. + +Thirdly, a restructuration and uniformisation of the standard library of +Coq has been performed. There is now just one Leibniz’ equality usable +for all the different kinds of |Coq| objects. Also, the set of real +numbers now lies at the same level as the sets of natural and integer +numbers. Finally, the names of the standard properties of numbers now +follow a standard pattern and the symbolic notations for the standard +definitions as well. + +The fourth point is the release of |CoqIDE|, a new graphical gtk2-based +interface fully integrated to |Coq|. Close in style from the Proof General +Emacs interface, it is faster and its integration with |Coq| makes +interactive developments more friendly. All mathematical Unicode symbols +are usable within |CoqIDE|. + +Finally, the module system of |Coq| completes the picture of |Coq| version +8.0. Though released with an experimental status in the previous version +7.4, it should be considered as a salient feature of the new version. + +Besides, |Coq| comes with its load of novelties and improvements: new or +improved tactics (including a new tactic for solving first-order +statements), new management commands, extended libraries. + +Bruno Barras and Hugo Herbelin have been the main contributors of the +reflexion and the implementation of the new syntax. The smart automatic +translator from old to new syntax released with |Coq| is also their work +with contributions by Olivier Desmettre. + +Hugo Herbelin is the main designer and implementor of the notion of +interpretation scopes and of the commands for easily adding new +notations. + +Hugo Herbelin is the main implementor of the restructuration of the +standard library. + +Pierre Corbineau is the main designer and implementor of the new tactic +for solving first-order statements in presence of inductive types. He is +also the maintainer of the non-domain specific automation tactics. + +Benjamin Monate is the developer of the |CoqIDE| graphical interface with +contributions by Jean-Christophe Filliâtre, Pierre Letouzey, Claude +Marché and Bruno Barras. + +Claude Marché coordinated the edition of the Reference Manual for |Coq| +V8.0. + +Pierre Letouzey and Jacek Chrzszcz respectively maintained the +extraction tool and module system of |Coq|. + +Jean-Christophe Filliâtre, Pierre Letouzey, Hugo Herbelin and other +contributors from Sophia-Antipolis and Nijmegen participated to the +extension of the library. + +Julien Narboux built a NSIS-based automatic |Coq| installation tool for +the Windows platform. + +Hugo Herbelin and Christine Paulin coordinated the development which was +under the responsability of Christine Paulin. + +| Palaiseau & Orsay, Apr. 2004 +| Hugo Herbelin & Christine Paulin +| (updated Apr. 2006) +| + +Credits: version 8.1 +==================== + +Coq version 8.1 adds various new functionalities. + +Benjamin Grégoire implemented an alternative algorithm to check the +convertibility of terms in the |Coq| type-checker. This alternative +algorithm works by compilation to an efficient bytecode that is +interpreted in an abstract machine similar to Xavier Leroy’s ZINC +machine. Convertibility is performed by comparing the normal forms. This +alternative algorithm is specifically interesting for proofs by +reflection. More generally, it is convenient in case of intensive +computations. + +Christine Paulin implemented an extension of inductive types allowing +recursively non uniform parameters. Hugo Herbelin implemented +sort-polymorphism for inductive types (now called template polymorphism). + +Claudio Sacerdoti Coen improved the tactics for rewriting on arbitrary +compatible equivalence relations. He also generalized rewriting to +arbitrary transition systems. + +Claudio Sacerdoti Coen added new features to the module system. + +Benjamin Grégoire, Assia Mahboubi and Bruno Barras developed a new more +efficient and more general simplification algorithm on rings and +semi-rings. + +Laurent Théry and Bruno Barras developed a new significantly more +efficient simplification algorithm on fields. + +Hugo Herbelin, Pierre Letouzey, Julien Forest, Julien Narboux and +Claudio Sacerdoti Coen added new tactic features. + +Hugo Herbelin implemented matching on disjunctive patterns. + +New mechanisms made easier the communication between |Coq| and external +provers. Nicolas Ayache and Jean-Christophe Filliâtre implemented +connections with the provers cvcl, Simplify and zenon. Hugo Herbelin +implemented an experimental protocol for calling external tools from the +tactic language. + +Matthieu Sozeau developed Russell, an experimental language to specify +the behavior of programs with subtypes. + +A mechanism to automatically use some specific tactic to solve +unresolved implicit has been implemented by Hugo Herbelin. + +Laurent Théry’s contribution on strings and Pierre Letouzey and +Jean-Christophe Filliâtre’s contribution on finite maps have been +integrated to the |Coq| standard library. Pierre Letouzey developed a +library about finite sets “à la Objective Caml”. With Jean-Marc Notin, +he extended the library on lists. Pierre Letouzey’s contribution on +rational numbers has been integrated and extended.. + +Pierre Corbineau extended his tactic for solving first-order statements. +He wrote a reflection-based intuitionistic tautology solver. + +Pierre Courtieu, Julien Forest and Yves Bertot added extra support to +reason on the inductive structure of recursively defined functions. + +Jean-Marc Notin significantly contributed to the general maintenance of +the system. He also took care of `coqdoc`. + +Pierre Castéran contributed to the documentation of (co-)inductive types +and suggested improvements to the libraries. + +Pierre Corbineau implemented a declarative mathematical proof language, +usable in combination with the tactic-based style of proof. + +Finally, many users suggested improvements of the system through the +Coq-Club mailing list and bug-tracker systems, especially user groups +from INRIA Rocquencourt, Radboud University, University of Pennsylvania +and Yale University. + +| Palaiseau, July 2006 +| Hugo Herbelin +| + +Credits: version 8.2 +==================== + +Coq version 8.2 adds new features, new libraries and improves on many +various aspects. + +Regarding the language of |Coq|, the main novelty is the introduction by +Matthieu Sozeau of a package of commands providing Haskell-style type +classes. Type classes, that come with a few convenient features such as +type-based resolution of implicit arguments, plays a new role of +landmark in the architecture of |Coq| with respect to automatization. For +instance, thanks to type classes support, Matthieu Sozeau could +implement a new resolution-based version of the tactics dedicated to +rewriting on arbitrary transitive relations. + +Another major improvement of |Coq| 8.2 is the evolution of the arithmetic +libraries and of the tools associated to them. Benjamin Grégoire and +Laurent Théry contributed a modular library for building arbitrarily +large integers from bounded integers while Evgeny Makarov contributed a +modular library of abstract natural and integer arithmetics together +with a few convenient tactics. On his side, Pierre Letouzey made +numerous extensions to the arithmetic libraries on :math:`\mathbb{Z}` +and :math:`\mathbb{Q}`, including extra support for automatization in +presence of various number-theory concepts. + +Frédéric Besson contributed a reflexive tactic based on Krivine-Stengle +Positivstellensatz (the easy way) for validating provability of systems +of inequalities. The platform is flexible enough to support the +validation of any algorithm able to produce a “certificate” for the +Positivstellensatz and this covers the case of Fourier-Motzkin (for +linear systems in :math:`\mathbb{Q}` and :math:`\mathbb{R}`), +Fourier-Motzkin with cutting planes (for linear systems in +:math:`\mathbb{Z}`) and sum-of-squares (for non-linear systems). Evgeny +Makarov made the platform generic over arbitrary ordered rings. + +Arnaud Spiwack developed a library of 31-bits machine integers and, +relying on Benjamin Grégoire and Laurent Théry’s library, delivered a +library of unbounded integers in base :math:`2^{31}`. As importantly, he +developed a notion of “retro-knowledge” so as to safely extend the +kernel-located bytecode-based efficient evaluation algorithm of |Coq| +version 8.1 to use 31-bits machine arithmetics for efficiently computing +with the library of integers he developed. + +Beside the libraries, various improvements contributed to provide a more +comfortable end-user language and more expressive tactic language. Hugo +Herbelin and Matthieu Sozeau improved the pattern-matching compilation +algorithm (detection of impossible clauses in pattern-matching, +automatic inference of the return type). Hugo Herbelin, Pierre Letouzey +and Matthieu Sozeau contributed various new convenient syntactic +constructs and new tactics or tactic features: more inference of +redundant information, better unification, better support for proof or +definition by fixpoint, more expressive rewriting tactics, better +support for meta-variables, more convenient notations, ... + +Élie Soubiran improved the module system, adding new features (such as +an “include” command) and making it more flexible and more general. He +and Pierre Letouzey improved the support for modules in the extraction +mechanism. + +Matthieu Sozeau extended the Russell language, ending in an convenient +way to write programs of given specifications, Pierre Corbineau extended +the Mathematical Proof Language and the automatization tools that +accompany it, Pierre Letouzey supervised and extended various parts of the +standard library, Stéphane Glondu contributed a few tactics and +improvements, Jean-Marc Notin provided help in debugging, general +maintenance and coqdoc support, Vincent Siles contributed extensions of +the Scheme command and of injection. + +Bruno Barras implemented the `coqchk` tool: this is a stand-alone +type-checker that can be used to certify .vo files. Especially, as this +verifier runs in a separate process, it is granted not to be “hijacked” +by virtually malicious extensions added to |Coq|. + +Yves Bertot, Jean-Christophe Filliâtre, Pierre Courtieu and Julien +Forest acted as maintainers of features they implemented in previous +versions of |Coq|. + +Julien Narboux contributed to |CoqIDE|. Nicolas Tabareau made the +adaptation of the interface of the old “setoid rewrite” tactic to the +new version. Lionel Mamane worked on the interaction between |Coq| and its +external interfaces. With Samuel Mimram, he also helped making |Coq| +compatible with recent software tools. Russell O’Connor, Cezary +Kaliscyk, Milad Niqui contributed to improve the libraries of integers, +rational, and real numbers. We also thank many users and partners for +suggestions and feedback, in particular Pierre Castéran and Arthur +Charguéraud, the INRIA Marelle team, Georges Gonthier and the +INRIA-Microsoft Mathematical Components team, the Foundations group at +Radboud university in Nijmegen, reporters of bugs and participants to +the Coq-Club mailing list. + +| Palaiseau, June 2008 +| Hugo Herbelin +| + +Credits: version 8.3 +==================== + +Coq version 8.3 is before all a transition version with refinements or +extensions of the existing features and libraries and a new tactic nsatz +based on Hilbert’s Nullstellensatz for deciding systems of equations +over rings. + +With respect to libraries, the main evolutions are due to Pierre +Letouzey with a rewriting of the library of finite sets FSets and a new +round of evolutions in the modular development of arithmetic (library +Numbers). The reason for making FSets evolve is that the computational +and logical contents were quite intertwined in the original +implementation, leading in some cases to longer computations than +expected and this problem is solved in the new MSets implementation. As +for the modular arithmetic library, it was only dealing with the basic +arithmetic operators in the former version and its current extension +adds the standard theory of the division, min and max functions, all +made available for free to any implementation of :math:`\mathbb{N}`, +:math:`\mathbb{Z}` or :math:`\mathbb{Z}/n\mathbb{Z}`. + +The main other evolutions of the library are due to Hugo Herbelin who +made a revision of the sorting library (including a certified +merge-sort) and to Guillaume Melquiond who slightly revised and cleaned +up the library of reals. + +The module system evolved significantly. Besides the resolution of some +efficiency issues and a more flexible construction of module types, Élie +Soubiran brought a new model of name equivalence, the +:math:`\Delta`-equivalence, which respects as much as possible the names +given by the users. He also designed with Pierre Letouzey a new +convenient operator ``<+`` for nesting functor application, that +provides a light notation for inheriting the properties of cascading +modules. + +The new tactic nsatz is due to Loïc Pottier. It works by computing +Gröbner bases. Regarding the existing tactics, various improvements have +been done by Matthieu Sozeau, Hugo Herbelin and Pierre Letouzey. + +Matthieu Sozeau extended and refined the type classes and Program +features (the Russell language). Pierre Letouzey maintained and improved +the extraction mechanism. Bruno Barras and Élie Soubiran maintained the +Coq checker, Julien Forest maintained the Function mechanism for +reasoning over recursively defined functions. Matthieu Sozeau, Hugo +Herbelin and Jean-Marc Notin maintained coqdoc. Frédéric Besson +maintained the Micromega plateform for deciding systems of inequalities. +Pierre Courtieu maintained the support for the Proof General Emacs +interface. Claude Marché maintained the plugin for calling external +provers (dp). Yves Bertot made some improvements to the libraries of +lists and integers. Matthias Puech improved the search functions. +Guillaume Melquiond usefully contributed here and there. Yann +Régis-Gianas grounded the support for Unicode on a more standard and +more robust basis. + +Though invisible from outside, Arnaud Spiwack improved the general +process of management of existential variables. Pierre Letouzey and +Stéphane Glondu improved the compilation scheme of the |Coq| archive. +Vincent Gross provided support to |CoqIDE|. Jean-Marc Notin provided +support for benchmarking and archiving. + +Many users helped by reporting problems, providing patches, suggesting +improvements or making useful comments, either on the bug tracker or on +the Coq-club mailing list. This includes but not exhaustively Cédric +Auger, Arthur Charguéraud, François Garillot, Georges Gonthier, Robin +Green, Stéphane Lescuyer, Eelis van der Weegen, ... + +Though not directly related to the implementation, special thanks are +going to Yves Bertot, Pierre Castéran, Adam Chlipala, and Benjamin +Pierce for the excellent teaching materials they provided. + +| Paris, April 2010 +| Hugo Herbelin +| + +Credits: version 8.4 +==================== + +Coq version 8.4 contains the result of three long-term projects: a new +modular library of arithmetic by Pierre Letouzey, a new proof engine by +Arnaud Spiwack and a new communication protocol for |CoqIDE| by Vincent +Gross. + +The new modular library of arithmetic extends, generalizes and unifies +the existing libraries on Peano arithmetic (types nat, N and BigN), +positive arithmetic (type positive), integer arithmetic (Z and BigZ) and +machine word arithmetic (type Int31). It provides with unified notations +(e.g. systematic use of add and mul for denoting the addition and +multiplication operators), systematic and generic development of +operators and properties of these operators for all the types mentioned +above, including gcd, pcm, power, square root, base 2 logarithm, +division, modulo, bitwise operations, logical shifts, comparisons, +iterators, ... + +The most visible feature of the new proof engine is the support for +structured scripts (bullets and proof brackets) but, even if yet not +user-available, the new engine also provides the basis for refining +existential variables using tactics, for applying tactics to several +goals simultaneously, for reordering goals, all features which are +planned for the next release. The new proof engine forced to reimplement +info and Show Script differently, what was done by Pierre Letouzey. + +Before version 8.4, |CoqIDE| was linked to |Coq| with the graphical +interface living in a separate thread. From version 8.4, |CoqIDE| is a +separate process communicating with |Coq| through a textual channel. This +allows for a more robust interfacing, the ability to interrupt |Coq| +without interrupting the interface, and the ability to manage several +sessions in parallel. Relying on the infrastructure work made by Vincent +Gross, Pierre Letouzey, Pierre Boutillier and Pierre-Marie Pédrot +contributed many various refinements of |CoqIDE|. + +Coq 8.4 also comes with a bunch of many various smaller-scale changes +and improvements regarding the different components of the system. + +The underlying logic has been extended with :math:`\eta`-conversion +thanks to Hugo Herbelin, Stéphane Glondu and Benjamin Grégoire. The +addition of :math:`\eta`-conversion is justified by the confidence that +the formulation of the Calculus of Inductive Constructions based on +typed equality (such as the one considered in Lee and Werner to build a +set-theoretic model of CIC :cite:`LeeWerner11`) is +applicable to the concrete implementation of |Coq|. + +The underlying logic benefited also from a refinement of the guard +condition for fixpoints by Pierre Boutillier, the point being that it is +safe to propagate the information about structurally smaller arguments +through :math:`\beta`-redexes that are blocked by the “match” +construction (blocked commutative cuts). + +Relying on the added permissiveness of the guard condition, Hugo +Herbelin could extend the pattern-matching compilation algorithm so that +matching over a sequence of terms involving dependencies of a term or of +the indices of the type of a term in the type of other terms is +systematically supported. + +Regarding the high-level specification language, Pierre Boutillier +introduced the ability to give implicit arguments to anonymous +functions, Hugo Herbelin introduced the ability to define notations with +several binders (e.g. ``exists x y z, P``), Matthieu Sozeau made the +type classes inference mechanism more robust and predictable, Enrico +Tassi introduced a command Arguments that generalizes Implicit Arguments +and Arguments Scope for assigning various properties to arguments of +constants. Various improvements in the type inference algorithm were +provided by Matthieu Sozeau and Hugo Herbelin with contributions from +Enrico Tassi. + +Regarding tactics, Hugo Herbelin introduced support for referring to +expressions occurring in the goal by pattern in tactics such as set or +destruct. Hugo Herbelin also relied on ideas from Chung-Kil Hur’s Heq +plugin to introduce automatic computation of occurrences to generalize +when using destruct and induction on types with indices. Stéphane Glondu +introduced new tactics constr\_eq, is\_evar and has\_evar to be used +when writing complex tactics. Enrico Tassi added support to fine-tuning +the behavior of simpl. Enrico Tassi added the ability to specify over +which variables of a section a lemma has to be exactly generalized. +Pierre Letouzey added a tactic timeout and the interruptibility of +vm\_compute. Bug fixes and miscellaneous improvements of the tactic +language came from Hugo Herbelin, Pierre Letouzey and Matthieu Sozeau. + +Regarding decision tactics, Loïc Pottier maintained Nsatz, moving in +particular to a type-class based reification of goals while Frédéric +Besson maintained Micromega, adding in particular support for division. + +Regarding vernacular commands, Stéphane Glondu provided new commands to +analyze the structure of type universes. + +Regarding libraries, a new library about lists of a given length (called +vectors) has been provided by Pierre Boutillier. A new instance of +finite sets based on Red-Black trees and provided by Andrew Appel has +been adapted for the standard library by Pierre Letouzey. In the library +of real analysis, Yves Bertot changed the definition of :math:`\pi` and +provided a proof of the long-standing fact yet remaining unproved in +this library, namely that :math:`sin \frac{\pi}{2} = +1`. + +Pierre Corbineau maintained the Mathematical Proof Language (C-zar). + +Bruno Barras and Benjamin Grégoire maintained the call-by-value +reduction machines. + +The extraction mechanism benefited from several improvements provided by +Pierre Letouzey. + +Pierre Letouzey maintained the module system, with contributions from +Élie Soubiran. + +Julien Forest maintained the Function command. + +Matthieu Sozeau maintained the setoid rewriting mechanism. + +Coq related tools have been upgraded too. In particular, coq\_makefile +has been largely revised by Pierre Boutillier. Also, patches from Adam +Chlipala for coqdoc have been integrated by Pierre Boutillier. + +Bruno Barras and Pierre Letouzey maintained the `coqchk` checker. + +Pierre Courtieu and Arnaud Spiwack contributed new features for using +Coq through Proof General. + +The Dp plugin has been removed. Use the plugin provided with Why 3 +instead (http://why3.lri.fr). + +Under the hood, the |Coq| architecture benefited from improvements in +terms of efficiency and robustness, especially regarding universes +management and existential variables management, thanks to Pierre +Letouzey and Yann Régis-Gianas with contributions from Stéphane Glondu +and Matthias Puech. The build system is maintained by Pierre Letouzey +with contributions from Stéphane Glondu and Pierre Boutillier. + +A new backtracking mechanism simplifying the task of external interfaces +has been designed by Pierre Letouzey. + +The general maintenance was done by Pierre Letouzey, Hugo Herbelin, +Pierre Boutillier, Matthieu Sozeau and Stéphane Glondu with also +specific contributions from Guillaume Melquiond, Julien Narboux and +Pierre-Marie Pédrot. + +Packaging tools were provided by Pierre Letouzey (Windows), Pierre +Boutillier (MacOS), Stéphane Glondu (Debian). Releasing, testing and +benchmarking support was provided by Jean-Marc Notin. + +Many suggestions for improvements were motivated by feedback from users, +on either the bug tracker or the coq-club mailing list. Special thanks +are going to the users who contributed patches, starting with Tom +Prince. Other patch contributors include Cédric Auger, David Baelde, Dan +Grayson, Paolo Herms, Robbert Krebbers, Marc Lasson, Hendrik Tews and +Eelis van der Weegen. + +| Paris, December 2011 +| Hugo Herbelin +| + +Credits: version 8.5 +==================== + +Coq version 8.5 contains the result of five specific long-term projects: + +- A new asynchronous evaluation and compilation mode by Enrico Tassi + with help from Bruno Barras and Carst Tankink. + +- Full integration of the new proof engine by Arnaud Spiwack helped by + Pierre-Marie Pédrot, + +- Addition of conversion and reduction based on native compilation by + Maxime Dénès and Benjamin Grégoire. + +- Full universe polymorphism for definitions and inductive types by + Matthieu Sozeau. + +- An implementation of primitive projections with + :math:`\eta`-conversion bringing significant performance improvements + when using records by Matthieu Sozeau. + +The full integration of the proof engine, by Arnaud Spiwack and +Pierre-Marie Pédrot, brings to primitive tactics and the user level Ltac +language dependent subgoals, deep backtracking and multiple goal +handling, along with miscellaneous features and an improved potential +for future modifications. Dependent subgoals allow statements in a goal +to mention the proof of another. Proofs of unsolved subgoals appear as +existential variables. Primitive backtracking makes it possible to write +a tactic with several possible outcomes which are tried successively +when subsequent tactics fail. Primitives are also available to control +the backtracking behavior of tactics. Multiple goal handling paves the +way for smarter automation tactics. It is currently used for simple goal +manipulation such as goal reordering. + +The way |Coq| processes a document in batch and interactive mode has been +redesigned by Enrico Tassi with help from Bruno Barras. Opaque proofs, +the text between Proof and Qed, can be processed asynchronously, +decoupling the checking of definitions and statements from the checking +of proofs. It improves the responsiveness of interactive development, +since proofs can be processed in the background. Similarly, compilation +of a file can be split into two phases: the first one checking only +definitions and statements and the second one checking proofs. A file +resulting from the first phase – with the .vio extension – can be +already Required. All .vio files can be turned into complete .vo files +in parallel. The same infrastructure also allows terminating tactics to +be run in parallel on a set of goals via the ``par:`` goal selector. + +|CoqIDE| was modified to cope with asynchronous checking of the document. +Its source code was also made separate from that of |Coq|, so that |CoqIDE| +no longer has a special status among user interfaces, paving the way for +decoupling its release cycle from that of |Coq| in the future. + +Carst Tankink developed a |Coq| back-end for user interfaces built on +Makarius Wenzel’s Prover IDE framework (PIDE), like PIDE/jEdit (with +help from Makarius Wenzel) or PIDE/Coqoon (with help from Alexander +Faithfull and Jesper Bengtson). The development of such features was +funded by the Paral-ITP French ANR project. + +The full universe polymorphism extension was designed by Matthieu +Sozeau. It conservatively extends the universes system and core calculus +with definitions and inductive declarations parameterized by universes +and constraints. It is based on a modification of the kernel +architecture to handle constraint checking only, leaving the generation +of constraints to the refinement/type inference engine. Accordingly, +tactics are now fully universe aware, resulting in more localized error +messages in case of inconsistencies and allowing higher-level algorithms +like unification to be entirely type safe. The internal representation +of universes has been modified but this is invisible to the user. + +The underlying logic has been extended with :math:`\eta`-conversion for +records defined with primitive projections by Matthieu Sozeau. This +additional form of :math:`\eta`-conversion is justified using the same +principle than the previously added :math:`\eta`-conversion for function +types, based on formulations of the Calculus of Inductive Constructions +with typed equality. Primitive projections, which do not carry the +parameters of the record and are rigid names (not defined as a +pattern-matching construct), make working with nested records more +manageable in terms of time and space consumption. This extension and +universe polymorphism were carried out partly while Matthieu Sozeau was +working at the IAS in Princeton. + +The guard condition has been made compliant with extensional equality +principles such as propositional extensionality and univalence, thanks +to Maxime Dénès and Bruno Barras. To ensure compatibility with the +univalence axiom, a new flag “-indices-matter” has been implemented, +taking into account the universe levels of indices when computing the +levels of inductive types. This supports using |Coq| as a tool to explore +the relations between homotopy theory and type theory. + +Maxime Dénès and Benjamin Grégoire developed an implementation of +conversion test and normal form computation using the OCaml native +compiler. It complements the virtual machine conversion offering much +faster computation for expensive functions. + +Coq 8.5 also comes with a bunch of many various smaller-scale changes +and improvements regarding the different components of the system. We +shall only list a few of them. + +Pierre Boutillier developed an improved tactic for simplification of +expressions called cbn. + +Maxime Dénès maintained the bytecode-based reduction machine. Pierre +Letouzey maintained the extraction mechanism. + +Pierre-Marie Pédrot has extended the syntax of terms to, experimentally, +allow holes in terms to be solved by a locally specified tactic. + +Existential variables are referred to by identifiers rather than mere +numbers, thanks to Hugo Herbelin who also improved the tactic language +here and there. + +Error messages for universe inconsistencies have been improved by +Matthieu Sozeau. Error messages for unification and type inference +failures have been improved by Hugo Herbelin, Pierre-Marie Pédrot and +Arnaud Spiwack. + +Pierre Courtieu contributed new features for using |Coq| through Proof +General and for better interactive experience (bullets, Search, etc). + +The efficiency of the whole system has been significantly improved +thanks to contributions from Pierre-Marie Pédrot. + +A distribution channel for |Coq| packages using the OPAM tool has been +initiated by Thomas Braibant and developed by Guillaume Claret, with +contributions by Enrico Tassi and feedback from Hugo Herbelin. + +Packaging tools were provided by Pierre Letouzey and Enrico Tassi +(Windows), Pierre Boutillier, Matthieu Sozeau and Maxime Dénès (MacOS +X). Maxime Dénès improved significantly the testing and benchmarking +support. + +Many power users helped to improve the design of the new features via +the bug tracker, the coq development mailing list or the coq-club +mailing list. Special thanks are going to the users who contributed +patches and intensive brain-storming, starting with Jason Gross, +Jonathan Leivent, Greg Malecha, Clément Pit-Claudel, Marc Lasson, Lionel +Rieg. It would however be impossible to mention with precision all names +of people who to some extent influenced the development. + +Version 8.5 is one of the most important releases of |Coq|. Its development +spanned over about 3 years and a half with about one year of +beta-testing. General maintenance during part or whole of this period +has been done by Pierre Boutillier, Pierre Courtieu, Maxime Dénès, Hugo +Herbelin, Pierre Letouzey, Guillaume Melquiond, Pierre-Marie Pédrot, +Matthieu Sozeau, Arnaud Spiwack, Enrico Tassi as well as Bruno Barras, +Yves Bertot, Frédéric Besson, Xavier Clerc, Pierre Corbineau, +Jean-Christophe Filliâtre, Julien Forest, Sébastien Hinderer, Assia +Mahboubi, Jean-Marc Notin, Yann Régis-Gianas, François Ripault, Carst +Tankink. Maxime Dénès coordinated the release process. + +| Paris, January 2015, revised December 2015, +| Hugo Herbelin, Matthieu Sozeau and the |Coq| development team +| + +Credits: version 8.6 +==================== + +Coq version 8.6 contains the result of refinements, stabilization of +8.5’s features and cleanups of the internals of the system. Over the +year of (now time-based) development, about 450 bugs were resolved and +over 100 contributions integrated. The main user visible changes are: + +- A new, faster state-of-the-art universe constraint checker, by + Jacques-Henri Jourdan. + +- In |CoqIDE| and other asynchronous interfaces, more fine-grained + asynchronous processing and error reporting by Enrico Tassi, making + |Coq| capable of recovering from errors and continue processing the + document. + +- More access to the proof engine features from Ltac: goal management + primitives, range selectors and a typeclasses eauto engine handling + multiple goals and multiple successes, by Cyprien Mangin, Matthieu + Sozeau and Arnaud Spiwack. + +- Tactic behavior uniformization and specification, generalization of + intro-patterns by Hugo Herbelin and others. + +- A brand new warning system allowing to control warnings, turn them + into errors or ignore them selectively by Maxime Dénès, Guillaume + Melquiond, Pierre-Marie Pédrot and others. + +- Irrefutable patterns in abstractions, by Daniel de Rauglaudre. + +- The ssreflect subterm selection algorithm by Georges Gonthier and + Enrico Tassi is now accessible to tactic writers through the + ssrmatching plugin. + +- Integration of LtacProf, a profiler for Ltac by Jason Gross, Paul + Steckler, Enrico Tassi and Tobias Tebbi. + +Coq 8.6 also comes with a bunch of smaller-scale changes and +improvements regarding the different components of the system. We shall +only list a few of them. + +The iota reduction flag is now a shorthand for match, fix and cofix +flags controlling the corresponding reduction rules (by Hugo Herbelin +and Maxime Dénès). + +Maxime Dénès maintained the native compilation machinery. + +Pierre-Marie Pédrot separated the Ltac code from general purpose +tactics, and generalized and rationalized the handling of generic +arguments, allowing to create new versions of Ltac more easily in the +future. + +In patterns and terms, @, abbreviations and notations are now +interpreted the same way, by Hugo Herbelin. + +Name handling for universes has been improved by Pierre-Marie Pédrot and +Matthieu Sozeau. The minimization algorithm has been improved by +Matthieu Sozeau. + +The unifier has been improved by Hugo Herbelin and Matthieu Sozeau, +fixing some incompatibilities introduced in |Coq| 8.5. Unification +constraints can now be left floating around and be seen by the user +thanks to a new option. The Keyed Unification mode has been improved by +Matthieu Sozeau. + +The typeclass resolution engine and associated proof-search tactic have +been reimplemented on top of the proof-engine monad, providing better +integration in tactics, and new options have been introduced to control +it, by Matthieu Sozeau with help from Théo Zimmermann. + +The efficiency of the whole system has been significantly improved +thanks to contributions from Pierre-Marie Pédrot, Maxime Dénès and +Matthieu Sozeau and performance issue tracking by Jason Gross and Paul +Steckler. + +Standard library improvements by Jason Gross, Sébastien Hinderer, Pierre +Letouzey and others. + +Emilio Jesús Gallego Arias contributed many cleanups and refactorings of +the pretty-printing and user interface communication components. + +Frédéric Besson maintained the micromega tactic. + +The OPAM repository for |Coq| packages has been maintained by Guillaume +Claret, Guillaume Melquiond, Matthieu Sozeau, Enrico Tassi and others. A +list of packages is now available at https://coq.inria.fr/opam/www/. + +Packaging tools and software development kits were prepared by Michael +Soegtrop with the help of Maxime Dénès and Enrico Tassi for Windows, and +Maxime Dénès and Matthieu Sozeau for MacOS X. Packages are now regularly +built on the continuous integration server. |Coq| now comes with a META +file usable with ocamlfind, contributed by Emilio Jesús Gallego Arias, +Gregory Malecha, and Matthieu Sozeau. + +Matej Košík maintained and greatly improved the continuous integration +setup and the testing of |Coq| contributions. He also contributed many API +improvement and code cleanups throughout the system. + +The contributors for this version are Bruno Barras, C.J. Bell, Yves +Bertot, Frédéric Besson, Pierre Boutillier, Tej Chajed, Guillaume +Claret, Xavier Clerc, Pierre Corbineau, Pierre Courtieu, Maxime Dénès, +Ricky Elrod, Emilio Jesús Gallego Arias, Jason Gross, Hugo Herbelin, +Sébastien Hinderer, Jacques-Henri Jourdan, Matej Kosik, Xavier Leroy, +Pierre Letouzey, Gregory Malecha, Cyprien Mangin, Erik Martin-Dorel, +Guillaume Melquiond, Clément Pit–Claudel, Pierre-Marie Pédrot, Daniel de +Rauglaudre, Lionel Rieg, Gabriel Scherer, Thomas Sibut-Pinote, Matthieu +Sozeau, Arnaud Spiwack, Paul Steckler, Enrico Tassi, Laurent Théry, +Nickolai Zeldovich and Théo Zimmermann. The development process was +coordinated by Hugo Herbelin and Matthieu Sozeau with the help of Maxime +Dénès, who was also in charge of the release process. + +Many power users helped to improve the design of the new features via +the bug tracker, the pull request system, the |Coq| development mailing +list or the coq-club mailing list. Special thanks to the users who +contributed patches and intensive brain-storming and code reviews, +starting with Cyril Cohen, Jason Gross, Robbert Krebbers, Jonathan +Leivent, Xavier Leroy, Gregory Malecha, Clément Pit–Claudel, Gabriel +Scherer and Beta Ziliani. It would however be impossible to mention +exhaustively the names of everybody who to some extent influenced the +development. + +Version 8.6 is the first release of |Coq| developed on a time-based +development cycle. Its development spanned 10 months from the release of +Coq 8.5 and was based on a public roadmap. To date, it contains more +external contributions than any previous |Coq| system. Code reviews were +systematically done before integration of new features, with an +important focus given to compatibility and performance issues, resulting +in a hopefully more robust release than |Coq| 8.5. + +Coq Enhancement Proposals (CEPs for short) were introduced by Enrico +Tassi to provide more visibility and a discussion period on new +features, they are publicly available https://github.com/coq/ceps. + +Started during this period, an effort is led by Yves Bertot and Maxime +Dénès to put together a |Coq| consortium. + +| Paris, November 2016, +| Matthieu Sozeau and the |Coq| development team +| + +Credits: version 8.7 +==================== +|Coq| version 8.7 contains the result of refinements, stabilization of features +and cleanups of the internals of the system along with a few new features. The +main user visible changes are: + +- New tactics: variants of tactics supporting existential variables eassert, + eenough, etc... by Hugo Herbelin. Tactics extensionality in H and + inversion_sigma by Jason Gross, specialize with ... accepting partial bindings + by Pierre Courtieu. + +- Cumulative Polymorphic Inductive Types, allowing cumulativity of universes to + go through applied inductive types, by Amin Timany and Matthieu Sozeau. + +- Integration of the SSReflect plugin and its documentation in the reference + manual, by Enrico Tassi, Assia Mahboubi and Maxime Dénès. + +- The coq_makefile tool was completely redesigned to improve its maintainability + and the extensibility of generated Makefiles, and to make `_CoqProject` files + more palatable to IDEs by Enrico Tassi. + +|Coq| 8.7 involved a large amount of work on cleaning and speeding up the code +base, notably the work of Pierre-Marie Pédrot on making the tactic-level system +insensitive to existential variable expansion, providing a safer API to plugin +writers and making the code more robust. The `dev/doc/changes.txt` file +documents the numerous changes to the implementation and improvements of +interfaces. An effort to provide an official, streamlined API to plugin writers +is in progress, thanks to the work of Matej Košík. + +Version 8.7 also comes with a bunch of smaller-scale changes and improvements +regarding the different components of the system. We shall only list a few of +them. + +The efficiency of the whole system has been significantly improved thanks to +contributions from Pierre-Marie Pédrot, Maxime Dénès and Matthieu Sozeau and +performance issue tracking by Jason Gross and Paul Steckler. + +Thomas Sibut-Pinote and Hugo Herbelin added support for side effects hooks in +cbv, cbn and simpl. The side effects are provided via a plugin available at +https://github.com/herbelin/reduction-effects/. + +The BigN, BigZ, BigQ libraries are no longer part of the |Coq| standard library, +they are now provided by a separate repository https://github.com/coq/bignums, +maintained by Pierre Letouzey. + +In the Reals library, `IZR` has been changed to produce a compact representation +of integers and real constants are now represented using `IZR` (work by +Guillaume Melquiond). + +Standard library additions and improvements by Jason Gross, Pierre Letouzey and +others, documented in the `CHANGES` file. + +The mathematical proof language/declarative mode plugin was removed from the +archive. + +The OPAM repository for |Coq| packages has been maintained by Guillaume Melquiond, +Matthieu Sozeau, Enrico Tassi with contributions from many users. A list of +packages is available at https://coq.inria.fr/opam/www/. + +Packaging tools and software development kits were prepared by Michael Soegtrop +with the help of Maxime Dénès and Enrico Tassi for Windows, and Maxime Dénès for +MacOS X. Packages are regularly built on the Travis continuous integration +server. + +The contributors for this version are Abhishek Anand, C.J. Bell, Yves Bertot, +Frédéric Besson, Tej Chajed, Pierre Courtieu, Maxime Dénès, Julien Forest, +Gaëtan Gilbert, Jason Gross, Hugo Herbelin, Emilio Jesús Gallego Arias, Ralf +Jung, Matej Košík, Xavier Leroy, Pierre Letouzey, Assia Mahboubi, Cyprien +Mangin, Erik Martin-Dorel, Olivier Marty, Guillaume Melquiond, Sam Pablo Kuper, +Benjamin Pierce, Pierre-Marie Pédrot, Lars Rasmusson, Lionel Rieg, Valentin +Robert, Yann Régis-Gianas, Thomas Sibut-Pinote, Michael Soegtrop, Matthieu +Sozeau, Arnaud Spiwack, Paul Steckler, George Stelle, Pierre-Yves Strub, Enrico +Tassi, Hendrik Tews, Amin Timany, Laurent Théry, Vadim Zaliva and Théo +Zimmermann. + +The development process was coordinated by Matthieu Sozeau with the help of +Maxime Dénès, who was also in charge of the release process. Théo Zimmermann is +the maintainer of this release. + +Many power users helped to improve the design of the new features via the bug +tracker, the pull request system, the |Coq| development mailing list or the +coq-club mailing list. Special thanks to the users who contributed patches and +intensive brain-storming and code reviews, starting with Jason Gross, Ralf Jung, +Robbert Krebbers, Xavier Leroy, Clément Pit–Claudel and Gabriel Scherer. It +would however be impossible to mention exhaustively the names of everybody who +to some extent influenced the development. + +Version 8.7 is the second release of |Coq| developed on a time-based development +cycle. Its development spanned 9 months from the release of |Coq| 8.6 and was +based on a public road-map. It attracted many external contributions. Code +reviews and continuous integration testing were systematically used before +integration of new features, with an important focus given to compatibility and +performance issues, resulting in a hopefully more robust release than |Coq| 8.6 +while maintaining compatibility. + +|Coq| Enhancement Proposals (CEPs for short) and open pull-requests discussions +were used to discuss publicly the new features. + +The |Coq| consortium, an organization directed towards users and supporters of the +system, is now upcoming and will rely on Inria’s newly created Foundation. + +| Paris, August 2017, +| Matthieu Sozeau and the |Coq| development team +| + +Credits: version 8.8 +==================== + + +|Coq| version 8.8 contains the result of refinements and stabilization of +features and deprecations, cleanups of the internals of the system along +with a few new features. The main user visible changes are: + +- Kernel: fix a subject reduction failure due to allowing fixpoints + on non-recursive values, which allows to recover full parametricity + for CIC, by Matthieu Sozeau. Handling of evars in the VM (the kernel + still does not accept evars) by Pierre-Marie Pédrot. + +- Notations: many improvements on recursive notations and support for + destructuring patterns in the syntax of notations by Hugo Herbelin. + +- Proof language: tacticals for profiling, timing and checking success + or failure of tactics by Jason Gross. The focusing bracket ``{`` + supports single-numbered goal selectors, e.g. ``2:{``, by Théo + Zimmermann. + +- Vernacular: deprecation of commands and more uniform handling of the + ``Local`` flag, by Vincent Laporte and Maxime Dénès, part of a larger + attribute system overhaul. Experimental ``Show Extraction`` command by + Pierre Letouzey. Coercion now accepts ``Prop`` or ``Type`` as a source + by Arthur Charguéraud. ``Export`` modifier for options allowing to + export the option to modules that ``Import`` and not only ``Require`` + a module, by Pierre-Marie Pédrot. + +- Universes: many user-level and API level enhancements: qualified + naming and printing, variance annotations for cumulative inductive + types, more general constraints and enhancements of the minimization + heuristics, interaction with modules by Gaëtan Gilbert, Pierre-Marie + Pédrot and Matthieu Sozeau. + +- Library: Decimal Numbers library by Pierre Letouzey and various small + improvements. + +- Documentation: a large community effort resulted in the migration + of the reference manual to the Sphinx documentation tool. The result + is this manual. + +- Tools: experimental ``-mangle-names`` option to coqtop/coqc for + linting proof scripts, by Jasper Hugunin. + +On the implementation side, the ``dev/doc/changes.md`` file +documents the numerous changes to the implementation and improvements of +interfaces. The file provides guidelines on porting a plugin to the new +version. + +Version 8.8 also comes with a bunch of smaller-scale changes and +improvements regarding the different components of the system. +Most important ones are documented in the ``CHANGES`` file. + +The efficiency of the whole system has seen improvements thanks to +contributions from Gaëtan Gilbert, Pierre-Marie Pédrot, Maxime Dénès and +Matthieu Sozeau and performance issue tracking by Jason Gross and Paul +Steckler. + +The official wiki and the bugtracker of |Coq| migrated to the GitHub +platform, thanks to the work of Pierre Letouzey and Théo +Zimmermann. Gaëtan Gilbert, Emilio Jesús Gallego Arias worked on +maintaining and improving the continuous integration system. + +The OPAM repository for |Coq| packages has been maintained by Guillaume +Melquiond, Matthieu Sozeau, Enrico Tassi with contributions from many +users. A list of packages is available at https://coq.inria.fr/opam/www. + +The 40 contributors for this version are Yves Bertot, Joachim +Breitner, Tej Chajed, Arthur Charguéraud, Jacques-Pascal Deplaix, Maxime +Dénès, Jim Fehrle, Yannick Forster, Gaëtan Gilbert, Jason Gross, Samuel +Gruetter, Thomas Hebb, Hugo Herbelin, Jasper Hugunin, Emilio Jesus +Gallego Arias, Ralf Jung, Johannes Kloos, Matej Košík, Robbert Krebbers, +Tony Beta Lambda, Vincent Laporte, Pierre Letouzey, Farzon Lotfi, +Cyprien Mangin, Guillaume Melquiond, Raphaël Monat, Carl Patenaude +Poulin, Pierre-Marie Pédrot, Matthew Ryan, Matt Quinn, Sigurd Schneider, +Bernhard Schommer, Matthieu Sozeau, Arnaud Spiwack, Paul Steckler, +Enrico Tassi, Anton Trunov, Martin Vassor, Vadim Zaliva and Théo +Zimmermann. + +Version 8.8 is the third release of |Coq| developed on a time-based +development cycle. Its development spanned 6 months from the release of +|Coq| 8.7 and was based on a public roadmap. The development process +was coordinated by Matthieu Sozeau. Maxime Dénès was in charge of the +release process. + +Many power users helped to improve the design of the new features via +the bug tracker, the pull request system, the |Coq| development mailing +list or the coq-club@inria.fr mailing list. Special thanks to the users who +contributed patches and intensive brain-storming and code reviews, +starting with Jason Gross, Ralf Jung, Robbert Krebbers and Amin Timany. +It would however be impossible to mention exhaustively the names +of everybody who to some extent influenced the development. + +The |Coq| consortium, an organization directed towards users and +supporters of the system, is now running and employs Maxime Dénès. +The contacts of the Coq Consortium are Yves Bertot and Maxime Dénès. + +| Santiago de Chile, March 2018, +| Matthieu Sozeau for the |Coq| development team +| diff --git a/doc/sphinx/genindex.rst b/doc/sphinx/genindex.rst new file mode 100644 index 0000000000..a991c7f9f8 --- /dev/null +++ b/doc/sphinx/genindex.rst @@ -0,0 +1,5 @@ +.. hack to get index in TOC + +----- +Index +----- diff --git a/doc/sphinx/index.rst b/doc/sphinx/index.rst index e69de29bb2..c5d4936b18 100644 --- a/doc/sphinx/index.rst +++ b/doc/sphinx/index.rst @@ -0,0 +1,71 @@ +.. _introduction: + +.. include:: preamble.rst +.. include:: replaces.rst + +Introduction +=========================================== + +.. include:: introduction.rst +.. include:: credits.rst + +------------------ +Table of contents +------------------ + +.. toctree:: + :caption: The language + + language/gallina-extensions + language/coq-library + language/cic + language/module-system + +.. toctree:: + :caption: The proof engine + + proof-engine/tactics + proof-engine/detailed-tactic-examples + proof-engine/ssreflect-proof-language + +.. toctree:: + :caption: User extensions + + user-extensions/syntax-extensions + user-extensions/proof-schemes + +.. toctree:: + :caption: Practical tools + + practical-tools/coq-commands + practical-tools/coqide + +.. toctree:: + :caption: Addendum + + addendum/extended-pattern-matching + addendum/canonical-structures + addendum/omega + addendum/micromega + +.. toctree:: + :caption: Reference + + zebibliography + +.. toctree:: + :caption: Indexes + + genindex + coq-cmdindex + coq-tacindex + coq-optindex + coq-exnindex + +.. No entries yet + * :index:`thmindex` + +This material (the Coq Reference Manual) may be distributed only subject to the +terms and conditions set forth in the Open Publication License, v1.0 or later +(the latest version is presently available at +http://www.opencontent.org/openpub). Options A and B are not elected. diff --git a/doc/sphinx/introduction.rst b/doc/sphinx/introduction.rst new file mode 100644 index 0000000000..514745c1bf --- /dev/null +++ b/doc/sphinx/introduction.rst @@ -0,0 +1,119 @@ +------------------------ +Introduction +------------------------ + +This document is the Reference Manual of version of the |Coq| proof +assistant. A companion volume, the |Coq| Tutorial, is provided for the +beginners. It is advised to read the Tutorial first. A +book :cite:`CoqArt` on practical uses of the |Coq| system was +published in 2004 and is a good support for both the beginner and the +advanced user. + +The |Coq| system is designed to develop mathematical proofs, and +especially to write formal specifications, programs and to verify that +programs are correct with respect to their specification. It provides a +specification language named |Gallina|. Terms of |Gallina| can represent +programs as well as properties of these programs and proofs of these +properties. Using the so-called *Curry-Howard isomorphism*, programs, +properties and proofs are formalized in the same language called +*Calculus of Inductive Constructions*, that is a +:math:`\lambda`-calculus with a rich type system. All logical judgments +in |Coq| are typing judgments. The very heart of the |Coq| system is the +type-checking algorithm that checks the correctness of proofs, in other +words that checks that a program complies to its specification. |Coq| also +provides an interactive proof assistant to build proofs using specific +programs called *tactics*. + +All services of the |Coq| proof assistant are accessible by interpretation +of a command language called *the vernacular*. + +Coq has an interactive mode in which commands are interpreted as the +user types them in from the keyboard and a compiled mode where commands +are processed from a file. + +- The interactive mode may be used as a debugging mode in which the + user can develop his theories and proofs step by step, backtracking + if needed and so on. The interactive mode is run with the `coqtop` + command from the operating system (which we shall assume to be some + variety of UNIX in the rest of this document). + +- The compiled mode acts as a proof checker taking a file containing a + whole development in order to ensure its correctness. Moreover, + |Coq|’s compiler provides an output file containing a compact + representation of its input. The compiled mode is run with the `coqc` + command from the operating system. + +These two modes are documented in Chapter :ref:`thecoqcommands`. + +Other modes of interaction with |Coq| are possible: through an emacs shell +window, an emacs generic user-interface for proof assistant (Proof +General :cite:`ProofGeneral`) or through a customized +interface (PCoq :cite:`Pcoq`). These facilities are not +documented here. There is also a |Coq| Integrated Development Environment +described in :ref:`coqintegrateddevelopmentenvironment`. + +How to read this book +===================== + +This is a Reference Manual, not a User Manual, so it is not made for a +continuous reading. However, it has some structure that is explained +below. + +- The first part describes the specification language, |Gallina|. + Chapters :ref:`thegallinaspecificationlanguage` and :ref:`extensionsofgallina` describe the concrete + syntax as well as the meaning of programs, theorems and proofs in the + Calculus of Inductive Constructions. Chapter :ref:`thecoqlibrary` describes the + standard library of |Coq|. Chapter :ref:`calculusofinductiveconstructions` is a mathematical description + of the formalism. Chapter :ref:`themodulesystem` describes the module + system. + +- The second part describes the proof engine. It is divided in five + chapters. Chapter :ref:`vernacularcommands` presents all commands (we + call them *vernacular commands*) that are not directly related to + interactive proving: requests to the environment, complete or partial + evaluation, loading and compiling files. How to start and stop + proofs, do multiple proofs in parallel is explained in + Chapter :ref:`proofhandling`. In Chapter :ref:`tactics`, all commands that + realize one or more steps of the proof are presented: we call them + *tactics*. The language to combine these tactics into complex proof + strategies is given in Chapter :ref:`thetacticlanguage`. Examples of tactics + are described in Chapter :ref:`detailedexamplesoftactics`. + +- The third part describes how to extend the syntax of |Coq|. It + corresponds to the Chapter :ref:`syntaxextensionsandinterpretationscopes`. + +- In the fourth part more practical tools are documented. First in + Chapter :ref:`thecoqcommands`, the usage of `coqc` (batch mode) and + `coqtop` (interactive mode) with their options is described. Then, + in Chapter :ref:`utilities`, various utilities that come with the + |Coq| distribution are presented. Finally, Chapter :ref:`coqintegrateddevelopmentenvironment` + describes the |Coq| integrated development environment. + +- The fifth part documents a number of advanced features, including coercions, + canonical structures, typeclasses, program extraction, and specialized + solvers and tactics. See the table of contents for a complete list. + +At the end of the document, after the global index, the user can find +specific indexes for tactics, vernacular commands, and error messages. + +List of additional documentation +================================ + +This manual does not contain all the documentation the user may need +about |Coq|. Various informations can be found in the following documents: + +Tutorial + A companion volume to this reference manual, the |Coq| Tutorial, is + aimed at gently introducing new users to developing proofs in |Coq| + without assuming prior knowledge of type theory. In a second step, + the user can read also the tutorial on recursive types (document + `RecTutorial.ps`). + +Installation + A text file `INSTALL` that comes with the sources explains how to + install |Coq|. + +The |Coq| standard library + A commented version of sources of the |Coq| standard library + (including only the specifications, the proofs are removed) is given + in the additional document `Library.ps`. diff --git a/doc/sphinx/language/cic.rst b/doc/sphinx/language/cic.rst new file mode 100644 index 0000000000..7ed6524095 --- /dev/null +++ b/doc/sphinx/language/cic.rst @@ -0,0 +1,1845 @@ +.. include:: ../preamble.rst +.. include:: ../replaces.rst + +.. _calculusofinductiveconstructions: + + +Calculus of Inductive Constructions +==================================== + +The underlying formal language of |Coq| is a *Calculus of Inductive +Constructions* (|Cic|) whose inference rules are presented in this +chapter. The history of this formalism as well as pointers to related +work are provided in a separate chapter; see *Credits*. + + +.. _The-terms: + +The terms +------------- + +The expressions of the |Cic| are *terms* and all terms have a *type*. +There are types for functions (or programs), there are atomic types +(especially datatypes)... but also types for proofs and types for the +types themselves. Especially, any object handled in the formalism must +belong to a type. For instance, universal quantification is relative +to a type and takes the form "*for all x of type T, P* ". The expression +“x of type T” is written :g:`x:T`. Informally, :g:`x:T` can be thought as +“x belongs to T”. + +The types of types are *sorts*. Types and sorts are themselves terms +so that terms, types and sorts are all components of a common +syntactic language of terms which is described in Section :ref:`terms` but, +first, we describe sorts. + + +.. _Sorts: + +Sorts +~~~~~~~~~~~ + +All sorts have a type and there is an infinite well-founded typing +hierarchy of sorts whose base sorts are :math:`\Prop` and :math:`\Set`. + +The sort :math:`\Prop` intends to be the type of logical propositions. If :math:`M` is a +logical proposition then it denotes the class of terms representing +proofs of :math:`M`. An object :math:`m` belonging to :math:`M` witnesses the fact that :math:`M` is +provable. An object of type :math:`\Prop` is called a proposition. + +The sort :math:`\Set` intends to be the type of small sets. This includes data +types such as booleans and naturals, but also products, subsets, and +function types over these data types. + +:math:`\Prop` and :math:`\Set` themselves can be manipulated as ordinary terms. +Consequently they also have a type. Because assuming simply that :math:`\Set` +has type :math:`\Set` leads to an inconsistent theory :cite:`Coq86`, the language of +|Cic| has infinitely many sorts. There are, in addition to :math:`\Set` and :math:`\Prop` +a hierarchy of universes :math:`\Type(i)` for any integer :math:`i`. + +Like :math:`\Set`, all of the sorts :math:`\Type(i)` contain small sets such as +booleans, natural numbers, as well as products, subsets and function +types over small sets. But, unlike :math:`\Set`, they also contain large sets, +namely the sorts :math:`\Set` and :math:`\Type(j)` for :math:`j<i`, and all products, subsets +and function types over these sorts. + +Formally, we call :math:`\Sort` the set of sorts which is defined by: + +.. math:: + + \Sort \equiv \{\Prop,\Set,\Type(i)\;|\; i~∈ ℕ\} + +Their properties, such as: :math:`\Prop:\Type(1)`, :math:`\Set:\Type(1)`, and +:math:`\Type(i):\Type(i+1)`, are defined in Section :ref:`subtyping-rules`. + +The user does not have to mention explicitly the index :math:`i` when +referring to the universe :math:`\Type(i)`. One only writes :math:`\Type`. The system +itself generates for each instance of :math:`\Type` a new index for the +universe and checks that the constraints between these indexes can be +solved. From the user point of view we consequently have :math:`\Type:\Type`. We +shall make precise in the typing rules the constraints between the +indices. + + +.. _Implementation-issues: + +**Implementation issues** In practice, the Type hierarchy is +implemented using *algebraic +universes*. An algebraic universe :math:`u` is either a variable (a qualified +identifier with a number) or a successor of an algebraic universe (an +expression :math:`u+1`), or an upper bound of algebraic universes (an +expression :math:`\max(u 1 ,...,u n )`), or the base universe (the expression +:math:`0`) which corresponds, in the arity of template polymorphic inductive +types (see Section +:ref:`well-formed-inductive-definitions`), +to the predicative sort :math:`\Set`. A graph of +constraints between the universe variables is maintained globally. To +ensure the existence of a mapping of the universes to the positive +integers, the graph of constraints must remain acyclic. Typing +expressions that violate the acyclicity of the graph of constraints +results in a Universe inconsistency error (see also Section +:ref:`TODO-2.10`). + + +.. _Terms: + +Terms +~~~~~ + + + +Terms are built from sorts, variables, constants, abstractions, +applications, local definitions, and products. From a syntactic point +of view, types cannot be distinguished from terms, except that they +cannot start by an abstraction or a constructor. More precisely the +language of the *Calculus of Inductive Constructions* is built from +the following rules. + + +#. the sorts :math:`\Set`, :math:`\Prop`, :math:`\Type(i)` are terms. +#. variables, hereafter ranged over by letters :math:`x`, :math:`y`, etc., are terms +#. constants, hereafter ranged over by letters :math:`c`, :math:`d`, etc., are terms. +#. if :math:`x` is a variable and :math:`T`, :math:`U` are terms then + :math:`∀ x:T,U` (:g:`forall x:T, U` in |Coq| concrete syntax) is a term. + If :math:`x` occurs in :math:`U`, :math:`∀ x:T,U` reads as + “for all :math:`x` of type :math:`T`, :math:`U`”. + As :math:`U` depends on :math:`x`, one says that :math:`∀ x:T,U` is + a *dependent product*. If :math:`x` does not occur in :math:`U` then + :math:`∀ x:T,U` reads as + “if :math:`T` then :math:`U`”. A *non dependent product* can be + written: :math:`T \rightarrow U`. +#. if :math:`x` is a variable and :math:`T`, :math:`u` are terms then + :math:`λ x:T . u` (:g:`fun x:T => u` + in |Coq| concrete syntax) is a term. This is a notation for the + λ-abstraction of λ-calculus :cite:`Bar81`. The term :math:`λ x:T . u` is a function + which maps elements of :math:`T` to the expression :math:`u`. +#. if :math:`t` and :math:`u` are terms then :math:`(t~u)` is a term + (:g:`t u` in |Coq| concrete + syntax). The term :math:`(t~u)` reads as “t applied to u”. +#. if :g:`x` is a variable, and :math:`t`, :math:`T` and :math:`u` are + terms then :g:`let x:=t:T in u` is + a term which denotes the term :math:`u` where the variable :math:`x` is locally bound + to :math:`t` of type :math:`T`. This stands for the common “let-in” construction of + functional programs such as ML or Scheme. + + + +.. _Free-variables: + +**Free variables.** +The notion of free variables is defined as usual. In the expressions +:g:`λx:T. U` and :g:`∀ x:T, U` the occurrences of :math:`x` in :math:`U` are bound. + + +.. _Substitution: + +**Substitution.** +The notion of substituting a term :math:`t` to free occurrences of a variable +:math:`x` in a term :math:`u` is defined as usual. The resulting term is written +:math:`\subst{u}{x}{t}`. + + +.. _The-logical-vs-programming-readings: + +**The logical vs programming readings.** +The constructions of the |Cic| can be used to express both logical and +programming notions, accordingly to the Curry-Howard correspondence +between proofs and programs, and between propositions and types +:cite:`Cur58,How80,Bru72`. + +For instance, let us assume that :math:`\nat` is the type of natural numbers +with zero element written :math:`0` and that :g:`True` is the always true +proposition. Then :math:`→` is used both to denote :math:`\nat→\nat` which is the type +of functions from :math:`\nat` to :math:`\nat`, to denote True→True which is an +implicative proposition, to denote :math:`\nat →\Prop` which is the type of +unary predicates over the natural numbers, etc. + +Let us assume that ``mult`` is a function of type :math:`\nat→\nat→\nat` and ``eqnat`` a +predicate of type \nat→\nat→ \Prop. The λ-abstraction can serve to build +“ordinary” functions as in :math:`λ x:\nat.(\kw{mult}~x~x)` (i.e. +:g:`fun x:nat => mult x x` +in |Coq| notation) but may build also predicates over the natural +numbers. For instance :math:`λ x:\nat.(\kw{eqnat}~x~0)` +(i.e. :g:`fun x:nat => eqnat x 0` +in |Coq| notation) will represent the predicate of one variable :math:`x` which +asserts the equality of :math:`x` with :math:`0`. This predicate has type +:math:`\nat → \Prop` +and it can be applied to any expression of type :math:`\nat`, say :math:`t`, to give an +object :math:`P~t` of type :math:`\Prop`, namely a proposition. + +Furthermore :g:`forall x:nat, P x` will represent the type of functions +which associate to each natural number :math:`n` an object of type :math:`(P~n)` and +consequently represent the type of proofs of the formula “:math:`∀ x. P(x`)”. + + +.. _Typing-rules: + +Typing rules +---------------- + +As objects of type theory, terms are subjected to *type discipline*. +The well typing of a term depends on a global environment and a local +context. + + +.. _Local-context: + +**Local context.** +A *local context* is an ordered list of *local declarations* of names +which we call *variables*. The declaration of some variable :math:`x` is +either a *local assumption*, written :math:`x:T` (:math:`T` is a type) or a *local +definition*, written :math:`x:=t:T`. We use brackets to write local contexts. +A typical example is :math:`[x:T;y:=u:U;z:V]`. Notice that the variables +declared in a local context must be distinct. If :math:`Γ` is a local context +that declares some :math:`x`, we +write :math:`x ∈ Γ`. By writing :math:`(x:T) ∈ Γ` we mean that either :math:`x:T` is an +assumption in :math:`Γ` or that there exists some :math:`t` such that :math:`x:=t:T` is a +definition in :math:`Γ`. If :math:`Γ` defines some :math:`x:=t:T`, we also write :math:`(x:=t:T) ∈ Γ`. +For the rest of the chapter, :math:`Γ::(y:T)` denotes the local context :math:`Γ` +enriched with the local assumption :math:`y:T`. Similarly, :math:`Γ::(y:=t:T)` denotes +the local context :math:`Γ` enriched with the local definition :math:`(y:=t:T)`. The +notation :math:`[]` denotes the empty local context. By :math:`Γ_1 ; Γ_2` we mean +concatenation of the local context :math:`Γ_1` and the local context :math:`Γ_2` . + + +.. _Global-environment: + +**Global environment.** +A *global environment* is an ordered list of *global declarations*. +Global declarations are either *global assumptions* or *global +definitions*, but also declarations of inductive objects. Inductive +objects themselves declare both inductive or coinductive types and +constructors (see Section :ref:`inductive-definitions`). + +A *global assumption* will be represented in the global environment as +:math:`(c:T)` which assumes the name :math:`c` to be of some type :math:`T`. A *global +definition* will be represented in the global environment as :math:`c:=t:T` +which defines the name :math:`c` to have value :math:`t` and type :math:`T`. We shall call +such names *constants*. For the rest of the chapter, the :math:`E;c:T` denotes +the global environment :math:`E` enriched with the global assumption :math:`c:T`. +Similarly, :math:`E;c:=t:T` denotes the global environment :math:`E` enriched with the +global definition :math:`(c:=t:T)`. + +The rules for inductive definitions (see Section +:ref:`inductive-definitions`) have to be considered as assumption +rules to which the following definitions apply: if the name :math:`c` +is declared in :math:`E`, we write :math:`c ∈ E` and if :math:`c:T` or +:math:`c:=t:T` is declared in :math:`E`, we write :math:`(c : T) ∈ E`. + + +.. _Typing-rules2: + +**Typing rules.** +In the following, we define simultaneously two judgments. The first +one :math:`\WTEG{t}{T}` means the term :math:`t` is well-typed and has type :math:`T` in the +global environment :math:`E` and local context :math:`Γ`. The second judgment :math:`\WFE{Γ}` +means that the global environment :math:`E` is well-formed and the local +context :math:`Γ` is a valid local context in this global environment. + +A term :math:`t` is well typed in a global environment :math:`E` iff +there exists a local context :math:`\Gamma` and a term :math:`T` such +that the judgment :math:`\WTEG{t}{T}` can be derived from the +following rules. + +.. inference:: W-Empty + + --------- + \WF{[]}{} + +.. inference:: W-Local-Assum + + \WTEG{T}{s} + s \in \Sort + x \not\in \Gamma % \cup E + ------------------------- + \WFE{\Gamma::(x:T)} + +.. inference:: W-Local-Def + + \WTEG{t}{T} + x \not\in \Gamma % \cup E + ------------------------- + \WFE{\Gamma::(x:=t:T)} + +.. inference:: W-Global-Assum + + \WTE{}{T}{s} + s \in \Sort + c \notin E + ------------ + \WF{E;c:T}{} + +.. inference:: W-Global-Def + + \WTE{}{t}{T} + c \notin E + --------------- + \WF{E;c:=t:T}{} + +.. inference:: Ax-Prop + + \WFE{\Gamma} + ---------------------- + \WTEG{\Prop}{\Type(1)} + +.. inference:: Ax-Set + + \WFE{\Gamma} + --------------------- + \WTEG{\Set}{\Type(1)} + +.. inference:: Ax-Type + + \WFE{\Gamma} + --------------------------- + \WTEG{\Type(i)}{\Type(i+1)} + +.. inference:: Var + + \WFE{\Gamma} + (x:T) \in \Gamma~~\mbox{or}~~(x:=t:T) \in \Gamma~\mbox{for some $t$} + -------------------------------------------------------------------- + \WTEG{x}{T} + +.. inference:: Const + + \WFE{\Gamma} + (c:T) \in E~~\mbox{or}~~(c:=t:T) \in E~\mbox{for some $t$} + ---------------------------------------------------------- + \WTEG{c}{T} + +.. inference:: Prod-Prop + + \WTEG{T}{s} + s \in {\Sort} + \WTE{\Gamma::(x:T)}{U}{\Prop} + ----------------------------- + \WTEG{\forall~x:T,U}{\Prop} + +.. inference:: Prod-Set + + \WTEG{T}{s} + s \in \{\Prop, \Set\} + \WTE{\Gamma::(x:T)}{U}{\Set} + ---------------------------- + \WTEG{\forall~x:T,U}{\Set} + +.. inference:: Prod-Type + + \WTEG{T}{\Type(i)} + \WTE{\Gamma::(x:T)}{U}{\Type(i)} + -------------------------------- + \WTEG{\forall~x:T,U}{\Type(i)} + +.. inference:: Lam + + \WTEG{\forall~x:T,U}{s} + \WTE{\Gamma::(x:T)}{t}{U} + ------------------------------------ + \WTEG{\lb x:T\mto t}{\forall x:T, U} + +.. inference:: App + + \WTEG{t}{\forall~x:U,T} + \WTEG{u}{U} + ------------------------------ + \WTEG{(t\ u)}{\subst{T}{x}{u}} + +.. inference:: Let + + \WTEG{t}{T} + \WTE{\Gamma::(x:=t:T)}{u}{U} + ----------------------------------------- + \WTEG{\letin{x}{t:T}{u}}{\subst{U}{x}{t}} + + + +**Remark**: **Prod-Prop** and **Prod-Set** typing-rules make sense if we consider the +semantic difference between :math:`\Prop` and :math:`\Set`: + + ++ All values of a type that has a sort :math:`\Set` are extractable. ++ No values of a type that has a sort :math:`\Prop` are extractable. + + + +**Remark**: We may have :math:`\letin{x}{t:T}{u}` well-typed without having +:math:`((λ x:T.u) t)` well-typed (where :math:`T` is a type of +:math:`t`). This is because the value :math:`t` associated to +:math:`x` may be used in a conversion rule (see Section :ref:`Conversion-rules`). + + +.. _Conversion-rules: + +Conversion rules +-------------------- + +In |Cic|, there is an internal reduction mechanism. In particular, it +can decide if two programs are *intentionally* equal (one says +*convertible*). Convertibility is described in this section. + + +.. _β-reduction: + +**β-reduction.** +We want to be able to identify some terms as we can identify the +application of a function to a given argument with its result. For +instance the identity function over a given type T can be written +:math:`λx:T. x`. In any global environment :math:`E` and local context +:math:`Γ`, we want to identify any object :math:`a` (of type +:math:`T`) with the application :math:`((λ x:T. x) a)`. We define for +this a *reduction* (or a *conversion*) rule we call :math:`β`: + +.. math:: + + E[Γ] ⊢ ((λx:T. t) u)~\triangleright_β~\subst{t}{x}{u} + +We say that :math:`\subst{t}{x}{u}` is the *β-contraction* of +:math:`((λx:T. t) u)` and, conversely, that :math:`((λ x:T. t) u)` is the +*β-expansion* of :math:`\subst{t}{x}{u}`. + +According to β-reduction, terms of the *Calculus of Inductive +Constructions* enjoy some fundamental properties such as confluence, +strong normalization, subject reduction. These results are +theoretically of great importance but we will not detail them here and +refer the interested reader to :cite:`Coq85`. + + +.. _ι-reduction: + +**ι-reduction.** +A specific conversion rule is associated to the inductive objects in +the global environment. We shall give later on (see Section +:ref:`Well-formed-inductive-definitions`) the precise rules but it +just says that a destructor applied to an object built from a +constructor behaves as expected. This reduction is called ι-reduction +and is more precisely studied in :cite:`Moh93,Wer94`. + + +.. _δ-reduction: + +**δ-reduction.** +We may have variables defined in local contexts or constants defined +in the global environment. It is legal to identify such a reference +with its value, that is to expand (or unfold) it into its value. This +reduction is called δ-reduction and shows as follows. + +.. inference:: Delta-Local + + \WFE{\Gamma} + (x:=t:T) ∈ Γ + -------------- + E[Γ] ⊢ x~\triangleright_Δ~t + +.. inference:: Delta-Global + + \WFE{\Gamma} + (c:=t:T) ∈ E + -------------- + E[Γ] ⊢ c~\triangleright_δ~t + + +.. _ζ-reduction: + +**ζ-reduction.** +|Coq| allows also to remove local definitions occurring in terms by +replacing the defined variable by its value. The declaration being +destroyed, this reduction differs from δ-reduction. It is called +ζ-reduction and shows as follows. + +.. inference:: Zeta + + \WFE{\Gamma} + \WTEG{u}{U} + \WTE{\Gamma::(x:=u:U)}{t}{T} + -------------- + E[Γ] ⊢ \letin{x}{u}{t}~\triangleright_ζ~\subst{t}{x}{u} + + +.. _η-expansion: + +**η-expansion.** +Another important concept is η-expansion. It is legal to identify any +term :math:`t` of functional type :math:`∀ x:T, U` with its so-called η-expansion + +.. math:: + λx:T. (t~x) + +for :math:`x` an arbitrary variable name fresh in :math:`t`. + + +**Remark**: We deliberately do not define η-reduction: + +.. math:: + λ x:T. (t~x) \not\triangleright_η t + +This is because, in general, the type of :math:`t` need not to be convertible +to the type of :math:`λ x:T. (t~x)`. E.g., if we take :math:`f` such that: + +.. math:: + f : ∀ x:\Type(2),\Type(1) + +then + +.. math:: + λ x:\Type(1),(f~x) : ∀ x:\Type(1),\Type(1) + +We could not allow + +.. math:: + λ x:Type(1),(f x) \triangleright_η f + +because the type of the reduced term :math:`∀ x:\Type(2),\Type(1)` would not be +convertible to the type of the original term :math:`∀ x:\Type(1),\Type(1).` + + +.. _Convertibility: + +**Convertibility.** +Let us write :math:`E[Γ] ⊢ t \triangleright u` for the contextual closure of the +relation :math:`t` reduces to :math:`u` in the global environment +:math:`E` and local context :math:`Γ` with one of the previous +reductions β, ι, δ or ζ. + +We say that two terms :math:`t_1` and :math:`t_2` are +*βιδζη-convertible*, or simply *convertible*, or *equivalent*, in the +global environment :math:`E` and local context :math:`Γ` iff there +exist terms :math:`u_1` and :math:`u_2` such that :math:`E[Γ] ⊢ t_1 \triangleright +… \triangleright u_1` and :math:`E[Γ] ⊢ t_2 \triangleright … \triangleright u_2` and either :math:`u_1` and +:math:`u_2` are identical, or they are convertible up to η-expansion, +i.e. :math:`u_1` is :math:`λ x:T. u_1'` and :math:`u_2 x` is +recursively convertible to :math:`u_1'` , or, symmetrically, +:math:`u_2` is :math:`λx:T. u_2'` +and :math:`u_1 x` is recursively convertible to u_2′ . We then write +:math:`E[Γ] ⊢ t_1 =_{βδιζη} t_2` . + +Apart from this we consider two instances of polymorphic and +cumulative (see Chapter :ref:`polymorphicuniverses`) inductive types +(see below) convertible + +.. math:: + E[Γ] ⊢ t~w_1 … w_m =_{βδιζη} t~w_1' … w_m' + +if we have subtypings (see below) in both directions, i.e., + +.. math:: + E[Γ] ⊢ t~w_1 … w_m ≤_{βδιζη} t~w_1' … w_m' + +and + +.. math:: + E[Γ] ⊢ t~w_1' … w_m' ≤_{βδιζη} t~w_1 … w_m. + +Furthermore, we consider + +.. math:: + E[Γ] ⊢ c~v_1 … v_m =_{βδιζη} c'~v_1' … v_m' + +convertible if + +.. math:: + E[Γ] ⊢ v_i =_{βδιζη} v_i' + +and we have that :math:`c` and :math:`c'` +are the same constructors of different instances of the same inductive +types (differing only in universe levels) such that + +.. math:: + E[Γ] ⊢ c~v_1 … v_m : t~w_1 … w_m + +and + +.. math:: + E[Γ] ⊢ c'~v_1' … v_m' : t'~ w_1' … w_m ' + +and we have + +.. math:: + E[Γ] ⊢ t~w_1 … w_m =_{βδιζη} t~w_1' … w_m'. + +The convertibility relation allows introducing a new typing rule which +says that two convertible well-formed types have the same inhabitants. + + +.. _subtyping-rules: + +Subtyping rules +------------------- + +At the moment, we did not take into account one rule between universes +which says that any term in a universe of index i is also a term in +the universe of index i+1 (this is the *cumulativity* rule of|Cic|). +This property extends the equivalence relation of convertibility into +a *subtyping* relation inductively defined by: + + +#. if :math:`E[Γ] ⊢ t =_{βδιζη} u` then :math:`E[Γ] ⊢ t ≤_{βδιζη} u`, +#. if :math:`i ≤ j` then :math:`E[Γ] ⊢ \Type(i) ≤_{βδιζη} \Type(j)`, +#. for any :math:`i`, :math:`E[Γ] ⊢ \Set ≤_{βδιζη} \Type(i)`, +#. :math:`E[Γ] ⊢ \Prop ≤_{βδιζη} \Set`, hence, by transitivity, + :math:`E[Γ] ⊢ \Prop ≤_{βδιζη} \Type(i)`, for any :math:`i` +#. if :math:`E[Γ] ⊢ T =_{βδιζη} U` and + :math:`E[Γ::(x:T)] ⊢ T' ≤_{βδιζη} U'` then + :math:`E[Γ] ⊢ ∀x:T, T′ ≤_{βδιζη} ∀ x:U, U′`. +#. if :math:`\ind{p}{Γ_I}{Γ_C}` is a universe polymorphic and cumulative + (see Chapter :ref:`polymorphicuniverses`) inductive type (see below) + and + :math:`(t : ∀Γ_P ,∀Γ_{\mathit{Arr}(t)}, \Sort)∈Γ_I` + and + :math:`(t' : ∀Γ_P' ,∀Γ_{\mathit{Arr}(t)}', \Sort')∈Γ_I` + are two different instances of *the same* inductive type (differing only in + universe levels) with constructors + + .. math:: + [c_1 : ∀Γ_P ,∀ T_{1,1} … T_{1,n_1} , t~v_{1,1} … v_{1,m} ;…; + c_k : ∀Γ_P ,∀ T_{k,1} … T_{k,n_k} ,t~v_{n,1} … v_{n,m} ] + + and + + .. math:: + [c_1 : ∀Γ_P' ,∀ T_{1,1}' … T_{1,n_1}' , t'~v_{1,1}' … v_{1,m}' ;…; + c_k : ∀Γ_P' ,∀ T_{k,1}' … T_{k,n_k}' ,t'~v_{n,1}' … v_{n,m}' ] + + respectively then + + .. math:: + E[Γ] ⊢ t~w_1 … w_m ≤_{βδιζη} t~w_1' … w_m' + + (notice that :math:`t` and :math:`t'` are both + fully applied, i.e., they have a sort as a type) if + + .. math:: + E[Γ] ⊢ w_i =_{βδιζη} w_i' + + for :math:`1 ≤ i ≤ m` and we have + + + .. math:: + E[Γ] ⊢ T_{i,j} ≤_{βδιζη} T_{i,j}' + + and + + .. math:: + E[Γ] ⊢ A_i ≤_{βδιζη} A_i' + + where :math:`Γ_{\mathit{Arr}(t)} = [a_1 : A_1 ; … ; a_l : A_l ]` and + :math:`Γ_{\mathit{Arr}(t)}' = [a_1 : A_1'; … ; a_l : A_l']`. + + +The conversion rule up to subtyping is now exactly: + +.. inference:: Conv + + E[Γ] ⊢ U : s + E[Γ] ⊢ t : T + E[Γ] ⊢ T ≤_{βδιζη} U + -------------- + E[Γ] ⊢ t : U + + +.. _Normal-form: + +**Normal form**. A term which cannot be any more reduced is said to be in *normal +form*. There are several ways (or strategies) to apply the reduction +rules. Among them, we have to mention the *head reduction* which will +play an important role (see Chapter :ref:`tactics`). Any term :math:`t` can be written as +:math:`λ x_1 :T_1 . … λ x_k :T_k . (t_0~t_1 … t_n )` where :math:`t_0` is not an +application. We say then that :math:`t~0` is the *head of* :math:`t`. If we assume +that :math:`t_0` is :math:`λ x:T. u_0` then one step of β-head reduction of :math:`t` is: + +.. math:: + λ x_1 :T_1 . … λ x_k :T_k . (λ x:T. u_0~t_1 … t_n ) \triangleright + λ (x_1 :T_1 )…(x_k :T_k ). (\subst{u_0}{x}{t_1}~t_2 … t_n ) + +Iterating the process of head reduction until the head of the reduced +term is no more an abstraction leads to the *β-head normal form* of :math:`t`: + +.. math:: + t \triangleright … \triangleright λ x_1 :T_1 . …λ x_k :T_k . (v~u_1 … u_m ) + +where :math:`v` is not an abstraction (nor an application). Note that the head +normal form must not be confused with the normal form since some :math:`u_i` +can be reducible. Similar notions of head-normal forms involving δ, ι +and ζ reductions or any combination of those can also be defined. + + +.. _inductive-definitions: + +Inductive Definitions +------------------------- + +Formally, we can represent any *inductive definition* as +:math:`\ind{p}{Γ_I}{Γ_C}` where: + ++ :math:`Γ_I` determines the names and types of inductive types; ++ :math:`Γ_C` determines the names and types of constructors of these + inductive types; ++ :math:`p` determines the number of parameters of these inductive types. + + +These inductive definitions, together with global assumptions and +global definitions, then form the global environment. Additionally, +for any :math:`p` there always exists :math:`Γ_P =[a_1 :A_1 ;…;a_p :A_p ]` such that +each :math:`T` in :math:`(t:T)∈Γ_I \cup Γ_C` can be written as: :math:`∀Γ_P , T'` where :math:`Γ_P` is +called the *context of parameters*. Furthermore, we must have that +each :math:`T` in :math:`(t:T)∈Γ_I` can be written as: :math:`∀Γ_P,∀Γ_{\mathit{Arr}(t)}, S` where +:math:`Γ_{\mathit{Arr}(t)}` is called the *Arity* of the inductive type t and :math:`S` is called +the sort of the inductive type t (not to be confused with :math:`\Sort` which is the set of sorts). + + +** Examples** The declaration for parameterized lists is: + +.. math:: + \ind{1}{[\List:\Set→\Set]}{\left[\begin{array}{rcl} + \Nil & : & \forall A:\Set,\List~A \\ + \cons & : & \forall A:\Set, A→ \List~A→ \List~A + \end{array} + \right]} + +which corresponds to the result of the |Coq| declaration: + +.. example:: + .. coqtop:: in + + Inductive list (A:Set) : Set := + | nil : list A + | cons : A -> list A -> list A. + +The declaration for a mutual inductive definition of tree and forest +is: + +.. math:: + \ind{~}{\left[\begin{array}{rcl}\tree&:&\Set\\\forest&:&\Set\end{array}\right]} + {\left[\begin{array}{rcl} + \node &:& \forest → \tree\\ + \emptyf &:& \forest\\ + \consf &:& \tree → \forest → \forest\\ + \end{array}\right]} + +which corresponds to the result of the |Coq| declaration: + +.. example:: + .. coqtop:: in + + Inductive tree : Set := + | node : forest -> tree + with forest : Set := + | emptyf : forest + | consf : tree -> forest -> forest. + +The declaration for a mutual inductive definition of even and odd is: + +.. math:: + \ind{1}{\left[\begin{array}{rcl}\even&:&\nat → \Prop \\ + \odd&:&\nat → \Prop \end{array}\right]} + {\left[\begin{array}{rcl} + \evenO &:& \even~0\\ + \evenS &:& \forall n, \odd~n -> \even~(\kw{S}~n)\\ + \oddS &:& \forall n, \even~n -> \odd~(\kw{S}~n) + \end{array}\right]} + +which corresponds to the result of the |Coq| declaration: + +.. example:: + .. coqtop:: in + + Inductive even : nat -> prop := + | even_O : even 0 + | even_S : forall n, odd n -> even (S n) + with odd : nat -> prop := + | odd_S : forall n, even n -> odd (S n). + + + +.. _Types-of-inductive-objects: + +Types of inductive objects +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We have to give the type of constants in a global environment E which +contains an inductive declaration. + +.. inference:: Ind + + \WFE{Γ} + \ind{p}{Γ_I}{Γ_C} ∈ E + (a:A)∈Γ_I + --------------------- + E[Γ] ⊢ a : A + +.. inference:: Constr + + \WFE{Γ} + \ind{p}{Γ_I}{Γ_C} ∈ E + (c:C)∈Γ_C + --------------------- + E[Γ] ⊢ c : C + +**Example.** +Provided that our environment :math:`E` contains inductive definitions we showed before, +these two inference rules above enable us to conclude that: + +.. math:: + \begin{array}{l} + E[Γ] ⊢ \even : \nat→\Prop\\ + E[Γ] ⊢ \odd : \nat→\Prop\\ + E[Γ] ⊢ \even\_O : \even~O\\ + E[Γ] ⊢ \even\_S : \forall~n:\nat, \odd~n → \even~(S~n)\\ + E[Γ] ⊢ \odd\_S : \forall~n:\nat, \even~n → \odd~(S~n) + \end{array} + + + + +.. _Well-formed-inductive-definitions: + +Well-formed inductive definitions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We cannot accept any inductive declaration because some of them lead +to inconsistent systems. We restrict ourselves to definitions which +satisfy a syntactic criterion of positivity. Before giving the formal +rules, we need a few definitions: + + +**Type is an Arity of Sort S.** +A type :math:`T` is an *arity of sort s* if it converts to the sort s or to a +product :math:`∀ x:T,U` with :math:`U` an arity of sort s. + +.. example:: + + :math:`A→\Set` is an arity of sort :math:`\Set`. :math:`∀ A:\Prop,A→ \Prop` is an arity of sort + :math:`\Prop`. + + +**Type is an Arity.** +A type :math:`T` is an *arity* if there is a :math:`s∈ \Sort` such that :math:`T` is an arity of +sort s. + + +.. example:: + + :math:`A→ Set` and :math:`∀ A:\Prop,A→ \Prop` are arities. + + +**Type of Constructor of I.** +We say that T is a *type of constructor of I* in one of the following +two cases: + + ++ :math:`T` is :math:`(I~t_1 … t_n )` ++ :math:`T` is :math:`∀ x:U,T'` where :math:`T'` is also a type of constructor of :math:`I` + + + +.. example:: + + :math:`\nat` and :math:`\nat→\nat` are types of constructor of :math:`\nat`. + :math:`∀ A:Type,\List~A` and :math:`∀ A:Type,A→\List~A→\List~A` are types of constructor of :math:`\List`. + +**Positivity Condition.** +The type of constructor :math:`T` will be said to *satisfy the positivity +condition* for a constant :math:`X` in the following cases: + + ++ :math:`T=(X~t_1 … t_n )` and :math:`X` does not occur free in any :math:`t_i` ++ :math:`T=∀ x:U,V` and :math:`X` occurs only strictly positively in :math:`U` and the type :math:`V` + satisfies the positivity condition for :math:`X`. + + +**Occurs Strictly Positively.** +The constant :math:`X` *occurs strictly positively* in :math:`T` in the following +cases: + + ++ :math:`X` does not occur in :math:`T` ++ :math:`T` converts to :math:`(X~t_1 … t_n )` and :math:`X` does not occur in any of :math:`t_i` ++ :math:`T` converts to :math:`∀ x:U,V` and :math:`X` does not occur in type :math:`U` but occurs + strictly positively in type :math:`V` ++ :math:`T` converts to :math:`(I~a_1 … a_m~t_1 … t_p )` where :math:`I` is the name of an + inductive declaration of the form + + .. math:: + \ind{m}{I:A}{c_1 :∀ p_1 :P_1 ,… ∀p_m :P_m ,C_1 ;…;c_n :∀ p_1 :P_1 ,… ∀p_m :P_m ,C_n} + + (in particular, it is + not mutually defined and it has :math:`m` parameters) and :math:`X` does not occur in + any of the :math:`t_i`, and the (instantiated) types of constructor + :math:`\subst{C_i}{p_j}{a_j}_{j=1… m}` of :math:`I` satisfy the nested positivity condition for :math:`X` + +**Nested Positivity Condition.** +The type of constructor :math:`T` of :math:`I` *satisfies the nested positivity +condition* for a constant :math:`X` in the following cases: + + ++ :math:`T=(I~b_1 … b_m~u_1 … u_p)`, :math:`I` is an inductive definition with :math:`m` + parameters and :math:`X` does not occur in any :math:`u_i` ++ :math:`T=∀ x:U,V` and :math:`X` occurs only strictly positively in :math:`U` and the type :math:`V` + satisfies the nested positivity condition for :math:`X` + + +For instance, if one considers the type + +.. example:: + .. coqtop:: all + + Module TreeExample. + Inductive tree (A:Type) : Type := + | leaf : tree A + | node : A -> (nat -> tree A) -> tree A. + End TreeExample. + +:: + + [TODO Note: This commentary does not seem to correspond to the + preceding example. Instead it is referring to the first example + in Inductive Definitions section. It seems we should either + delete the preceding example and refer the the example above of + type `list A`, or else we should rewrite the commentary below.] + + Then every instantiated constructor of list A satisfies the nested positivity + condition for list + │ + ├─ concerning type list A of constructor nil: + │ Type list A of constructor nil satisfies the positivity condition for list + │ because list does not appear in any (real) arguments of the type of that + | constructor (primarily because list does not have any (real) + | arguments) ... (bullet 1) + │ + ╰─ concerning type ∀ A → list A → list A of constructor cons: + Type ∀ A : Type, A → list A → list A of constructor cons + satisfies the positivity condition for list because: + │ + ├─ list occurs only strictly positively in Type ... (bullet 3) + │ + ├─ list occurs only strictly positively in A ... (bullet 3) + │ + ├─ list occurs only strictly positively in list A ... (bullet 4) + │ + ╰─ list satisfies the positivity condition for list A ... (bullet 1) + + + + +.. _Correctness-rules: + +**Correctness rules.** +We shall now describe the rules allowing the introduction of a new +inductive definition. + +Let :math:`E` be a global environment and :math:`Γ_P`, :math:`Γ_I`, :math:`Γ_C` be contexts +such that :math:`Γ_I` is :math:`[I_1 :∀ Γ_P ,A_1 ;…;I_k :∀ Γ_P ,A_k]`, and +:math:`Γ_C` is :math:`[c_1:∀ Γ_P ,C_1 ;…;c_n :∀ Γ_P ,C_n ]`. Then + +.. inference:: W-Ind + + \WFE{Γ_P} + (E[Γ_P ] ⊢ A_j : s_j' )_{j=1… k} + (E[Γ_I ;Γ_P ] ⊢ C_i : s_{q_i} )_{i=1… n} + ------------------------------------------ + \WF{E;\ind{p}{Γ_I}{Γ_C}}{Γ} + + +provided that the following side conditions hold: + + + :math:`k>0` and all of :math:`I_j` and :math:`c_i` are distinct names for :math:`j=1… k` and :math:`i=1… n`, + + :math:`p` is the number of parameters of :math:`\ind{p}{Γ_I}{Γ_C}` and :math:`Γ_P` is the + context of parameters, + + for :math:`j=1… k` we have that :math:`A_j` is an arity of sort :math:`s_j` and :math:`I_j ∉ E`, + + for :math:`i=1… n` we have that :math:`C_i` is a type of constructor of :math:`I_{q_i}` which + satisfies the positivity condition for :math:`I_1 … I_k` and :math:`c_i ∉ Γ ∪ E`. + +One can remark that there is a constraint between the sort of the +arity of the inductive type and the sort of the type of its +constructors which will always be satisfied for the impredicative +sortProp but may fail to define inductive definition on sort Set and +generate constraints between universes for inductive definitions in +the Type hierarchy. + + +**Examples**. It is well known that the existential quantifier can be encoded as an +inductive definition. The following declaration introduces the second- +order existential quantifier :math:`∃ X.P(X)`. + +.. example:: + .. coqtop:: in + + Inductive exProp (P:Prop->Prop) : Prop := + | exP_intro : forall X:Prop, P X -> exProp P. + +The same definition on Set is not allowed and fails: + +.. example:: + .. coqtop:: all + + Fail Inductive exSet (P:Set->Prop) : Set := + exS_intro : forall X:Set, P X -> exSet P. + +It is possible to declare the same inductive definition in the +universe Type. The exType inductive definition has type +:math:`(\Type(i)→\Prop)→\Type(j)` with the constraint that the parameter :math:`X` of :math:`\kw{exT_intro}` +has type :math:`\Type(k)` with :math:`k<j` and :math:`k≤ i`. + +.. example:: + .. coqtop:: all + + Inductive exType (P:Type->Prop) : Type := + exT_intro : forall X:Type, P X -> exType P. + + + +.. _Template-polymorphism: + +**Template polymorphism.** +Inductive types declared in Type are polymorphic over their arguments +in Type. If :math:`A` is an arity of some sort and s is a sort, we write :math:`A_{/s}` +for the arity obtained from :math:`A` by replacing its sort with s. +Especially, if :math:`A` is well-typed in some global environment and local +context, then :math:`A_{/s}` is typable by typability of all products in the +Calculus of Inductive Constructions. The following typing rule is +added to the theory. + +Let :math:`\ind{p}{Γ_I}{Γ_C}` be an inductive definition. Let +:math:`Γ_P = [p_1 :P_1 ;…;p_p :P_p ]` be its context of parameters, +:math:`Γ_I = [I_1:∀ Γ_P ,A_1 ;…;I_k :∀ Γ_P ,A_k ]` its context of definitions and +:math:`Γ_C = [c_1 :∀ Γ_P ,C_1 ;…;c_n :∀ Γ_P ,C_n]` its context of constructors, +with :math:`c_i` a constructor of :math:`I_{q_i}`. Let :math:`m ≤ p` be the length of the +longest prefix of parameters such that the :math:`m` first arguments of all +occurrences of all :math:`I_j` in all :math:`C_k` (even the occurrences in the +hypotheses of :math:`C_k`) are exactly applied to :math:`p_1 … p_m` (:math:`m` is the number +of *recursively uniform parameters* and the :math:`p−m` remaining parameters +are the *recursively non-uniform parameters*). Let :math:`q_1 , …, q_r` , with +:math:`0≤ r≤ m`, be a (possibly) partial instantiation of the recursively +uniform parameters of :math:`Γ_P` . We have: + +.. inference:: Ind-Family + + \left\{\begin{array}{l} + \ind{p}{Γ_I}{Γ_C} \in E\\ + (E[] ⊢ q_l : P'_l)_{l=1\ldots r}\\ + (E[] ⊢ P'_l ≤_{βδιζη} \subst{P_l}{p_u}{q_u}_{u=1\ldots l-1})_{l=1\ldots r}\\ + 1 \leq j \leq k + \end{array} + \right. + ----------------------------- + E[] ⊢ I_j~q_1 … q_r :∀ [p_{r+1} :P_{r+1} ;…;p_p :P_p], (A_j)_{/s_j} + +provided that the following side conditions hold: + + + :math:`Γ_{P′}` is the context obtained from :math:`Γ_P` by replacing each :math:`P_l` that is + an arity with :math:`P_l'` for :math:`1≤ l ≤ r` (notice that :math:`P_l` arity implies :math:`P_l'` + arity since :math:`(E[] ⊢ P_l' ≤_{βδιζη} \subst{P_l}{p_u}{q_u}_{u=1\ldots l-1} )`; + + there are sorts :math:`s_i` , for :math:`1 ≤ i ≤ k` such that, for + :math:`Γ_{I'} = [I_1 :∀ Γ_{P'} ,(A_1)_{/s_1} ;…;I_k :∀ Γ_{P'} ,(A_k)_{/s_k}]` + we have :math:`(E[Γ_{I′} ;Γ_{P′}] ⊢ C_i : s_{q_i})_{i=1… n}` ; + + the sorts :math:`s_i` are such that all eliminations, to + :math:`\Prop`, :math:`\Set` and :math:`\Type(j)`, are allowed + (see Section Destructors_). + + + +Notice that if :math:`I_j~q_1 … q_r` is typable using the rules **Ind-Const** and +**App**, then it is typable using the rule **Ind-Family**. Conversely, the +extended theory is not stronger than the theory without **Ind-Family**. We +get an equiconsistency result by mapping each :math:`\ind{p}{Γ_I}{Γ_C}` +occurring into a given derivation into as many different inductive +types and constructors as the number of different (partial) +replacements of sorts, needed for this derivation, in the parameters +that are arities (this is possible because :math:`\ind{p}{Γ_I}{Γ_C}` well-formed +implies that :math:`\ind{p}{Γ_{I'}}{Γ_{C'}}` is well-formed and has the +same allowed eliminations, where :math:`Γ_{I′}` is defined as above and +:math:`Γ_{C′} = [c_1 :∀ Γ_{P′} ,C_1 ;…;c_n :∀ Γ_{P′} ,C_n ]`). That is, the changes in the +types of each partial instance :math:`q_1 … q_r` can be characterized by the +ordered sets of arity sorts among the types of parameters, and to each +signature is associated a new inductive definition with fresh names. +Conversion is preserved as any (partial) instance :math:`I_j~q_1 … q_r` or +:math:`C_i~q_1 … q_r` is mapped to the names chosen in the specific instance of +:math:`\ind{p}{Γ_I}{Γ_C}`. + +In practice, the rule **Ind-Family** is used by |Coq| only when all the +inductive types of the inductive definition are declared with an arity +whose sort is in the Type hierarchy. Then, the polymorphism is over +the parameters whose type is an arity of sort in the Type hierarchy. +The sorts :math:`s_j` are chosen canonically so that each :math:`s_j` is minimal with +respect to the hierarchy :math:`\Prop ⊂ \Set_p ⊂ \Type` where :math:`\Set_p` is predicative +:math:`\Set`. More precisely, an empty or small singleton inductive definition +(i.e. an inductive definition of which all inductive types are +singleton – see paragraph Destructors_) is set in :math:`\Prop`, a small non-singleton +inductive type is set in :math:`\Set` (even in case :math:`\Set` is impredicative – see +Section The-Calculus-of-Inductive-Construction-with-impredicative-Set_), +and otherwise in the Type hierarchy. + +Note that the side-condition about allowed elimination sorts in the +rule **Ind-Family** is just to avoid to recompute the allowed elimination +sorts at each instance of a pattern-matching (see section Destructors_). As +an example, let us consider the following definition: + +.. example:: + .. coqtop:: in + + Inductive option (A:Type) : Type := + | None : option A + | Some : A -> option A. + +As the definition is set in the Type hierarchy, it is used +polymorphically over its parameters whose types are arities of a sort +in the Type hierarchy. Here, the parameter :math:`A` has this property, hence, +if :g:`option` is applied to a type in :math:`\Set`, the result is in :math:`\Set`. Note that +if :g:`option` is applied to a type in :math:`\Prop`, then, the result is not set in +:math:`\Prop` but in :math:`\Set` still. This is because :g:`option` is not a singleton type +(see section Destructors_) and it would lose the elimination to :math:`\Set` and :math:`\Type` +if set in :math:`\Prop`. + +.. example:: + .. coqtop:: all + + Check (fun A:Set => option A). + Check (fun A:Prop => option A). + +Here is another example. + +.. example:: + .. coqtop:: in + + Inductive prod (A B:Type) : Type := pair : A -> B -> prod A B. + +As :g:`prod` is a singleton type, it will be in :math:`\Prop` if applied twice to +propositions, in :math:`\Set` if applied twice to at least one type in :math:`\Set` and +none in :math:`\Type`, and in :math:`\Type` otherwise. In all cases, the three kind of +eliminations schemes are allowed. + +.. example:: + .. coqtop:: all + + Check (fun A:Set => prod A). + Check (fun A:Prop => prod A A). + Check (fun (A:Prop) (B:Set) => prod A B). + Check (fun (A:Type) (B:Prop) => prod A B). + +Remark: Template polymorphism used to be called “sort-polymorphism of +inductive types” before universe polymorphism (see Chapter :ref:`polymorphicuniverses`) was +introduced. + + +.. _Destructors: + +Destructors +~~~~~~~~~~~~~~~~~ + +The specification of inductive definitions with arities and +constructors is quite natural. But we still have to say how to use an +object in an inductive type. + +This problem is rather delicate. There are actually several different +ways to do that. Some of them are logically equivalent but not always +equivalent from the computational point of view or from the user point +of view. + +From the computational point of view, we want to be able to define a +function whose domain is an inductively defined type by using a +combination of case analysis over the possible constructors of the +object and recursion. + +Because we need to keep a consistent theory and also we prefer to keep +a strongly normalizing reduction, we cannot accept any sort of +recursion (even terminating). So the basic idea is to restrict +ourselves to primitive recursive functions and functionals. + +For instance, assuming a parameter :g:`A:Set` exists in the local context, +we want to build a function length of type :g:`list A -> nat` which computes +the length of the list, so such that :g:`(length (nil A)) = O` and :g:`(length +(cons A a l)) = (S (length l))`. We want these equalities to be +recognized implicitly and taken into account in the conversion rule. + +From the logical point of view, we have built a type family by giving +a set of constructors. We want to capture the fact that we do not have +any other way to build an object in this type. So when trying to prove +a property about an object :math:`m` in an inductive definition it is enough +to enumerate all the cases where :math:`m` starts with a different +constructor. + +In case the inductive definition is effectively a recursive one, we +want to capture the extra property that we have built the smallest +fixed point of this recursive equation. This says that we are only +manipulating finite objects. This analysis provides induction +principles. For instance, in order to prove :g:`∀ l:list A,(has_length A l +(length l))` it is enough to prove: + + ++ :g:`(has_length A (nil A) (length (nil A)))` ++ :g:`∀ a:A, ∀ l:list A, (has_length A l (length l)) →` + :g:`(has_length A (cons A a l) (length (cons A a l)))` + + +which given the conversion equalities satisfied by length is the same +as proving: + + ++ :g:`(has_length A (nil A) O)` ++ :g:`∀ a:A, ∀ l:list A, (has_length A l (length l)) →` + :g:`(has_length A (cons A a l) (S (length l)))` + + +One conceptually simple way to do that, following the basic scheme +proposed by Martin-Löf in his Intuitionistic Type Theory, is to +introduce for each inductive definition an elimination operator. At +the logical level it is a proof of the usual induction principle and +at the computational level it implements a generic operator for doing +primitive recursion over the structure. + +But this operator is rather tedious to implement and use. We choose in +this version of |Coq| to factorize the operator for primitive recursion +into two more primitive operations as was first suggested by Th. +Coquand in :cite:`Coq92`. One is the definition by pattern-matching. The +second one is a definition by guarded fixpoints. + + +.. _The-match…with-end-construction: + +**The match…with …end construction** +The basic idea of this operator is that we have an object :math:`m` in an +inductive type :math:`I` and we want to prove a property which possibly +depends on :math:`m`. For this, it is enough to prove the property for +:math:`m = (c_i~u_1 … u_{p_i} )` for each constructor of :math:`I`. +The |Coq| term for this proof +will be written: + +.. math:: + \Match~m~\with~(c_1~x_{11} ... x_{1p_1} ) ⇒ f_1 | … | (c_n~x_{n1} ... x_{np_n} ) ⇒ f_n \endkw + +In this expression, if :math:`m` eventually happens to evaluate to +:math:`(c_i~u_1 … u_{p_i})` then the expression will behave as specified in its :math:`i`-th branch +and it will reduce to :math:`f_i` where the :math:`x_{i1} …x_{ip_i}` are replaced by the +:math:`u_1 … u_{p_i}` according to the ι-reduction. + +Actually, for type-checking a :math:`\Match…\with…\endkw` expression we also need +to know the predicate P to be proved by case analysis. In the general +case where :math:`I` is an inductively defined :math:`n`-ary relation, :math:`P` is a predicate +over :math:`n+1` arguments: the :math:`n` first ones correspond to the arguments of :math:`I` +(parameters excluded), and the last one corresponds to object :math:`m`. |Coq| +can sometimes infer this predicate but sometimes not. The concrete +syntax for describing this predicate uses the :math:`\as…\In…\return` +construction. For instance, let us assume that :math:`I` is an unary predicate +with one parameter and one argument. The predicate is made explicit +using the syntax: + +.. math:: + \Match~m~\as~x~\In~I~\_~a~\return~P~\with~ + (c_1~x_{11} ... x_{1p_1} ) ⇒ f_1 | … + | (c_n~x_{n1} ... x_{np_n} ) ⇒ f_n~\endkw + +The :math:`\as` part can be omitted if either the result type does not depend +on :math:`m` (non-dependent elimination) or :math:`m` is a variable (in this case, :math:`m` +can occur in :math:`P` where it is considered a bound variable). The :math:`\In` part +can be omitted if the result type does not depend on the arguments +of :math:`I`. Note that the arguments of :math:`I` corresponding to parameters *must* +be :math:`\_`, because the result type is not generalized to all possible +values of the parameters. The other arguments of :math:`I` (sometimes called +indices in the literature) have to be variables (:math:`a` above) and these +variables can occur in :math:`P`. The expression after :math:`\In` must be seen as an +*inductive type pattern*. Notice that expansion of implicit arguments +and notations apply to this pattern. For the purpose of presenting the +inference rules, we use a more compact notation: + +.. math:: + \case(m,(λ a x . P), λ x_{11} ... x_{1p_1} . f_1~| … |~λ x_{n1} ...x_{np_n} . f_n ) + + +.. _Allowed-elimination-sorts: + +**Allowed elimination sorts.** An important question for building the typing rule for match is what +can be the type of :math:`λ a x . P` with respect to the type of :math:`m`. If :math:`m:I` +and :math:`I:A` and :math:`λ a x . P : B` then by :math:`[I:A|B]` we mean that one can use +:math:`λ a x . P` with :math:`m` in the above match-construct. + + +.. _Notations: + +**Notations.** The :math:`[I:A|B]` is defined as the smallest relation satisfying the +following rules: We write :math:`[I|B]` for :math:`[I:A|B]` where :math:`A` is the type of :math:`I`. + +The case of inductive definitions in sorts :math:`\Set` or :math:`\Type` is simple. +There is no restriction on the sort of the predicate to be eliminated. + +.. inference:: Prod + + [(I~x):A′|B′] + ----------------------- + [I:∀ x:A, A′|∀ x:A, B′] + + +.. inference:: Set & Type + + s_1 ∈ \{\Set,\Type(j)\} + s_2 ∈ \Sort + ---------------- + [I:s_1 |I→ s_2 ] + + +The case of Inductive definitions of sort :math:`\Prop` is a bit more +complicated, because of our interpretation of this sort. The only +harmless allowed elimination, is the one when predicate :math:`P` is also of +sort :math:`\Prop`. + +.. inference:: Prop + + ~ + --------------- + [I:Prop|I→Prop] + + +:math:`\Prop` is the type of logical propositions, the proofs of properties :math:`P` in +:math:`\Prop` could not be used for computation and are consequently ignored by +the extraction mechanism. Assume :math:`A` and :math:`B` are two propositions, and the +logical disjunction :math:`A ∨ B` is defined inductively by: + +.. example:: + .. coqtop:: in + + Inductive or (A B:Prop) : Prop := + or_introl : A -> or A B | or_intror : B -> or A B. + + +The following definition which computes a boolean value by case over +the proof of :g:`or A B` is not accepted: + +.. example:: + .. coqtop:: all + + Fail Definition choice (A B: Prop) (x:or A B) := + match x with or_introl _ _ a => true | or_intror _ _ b => false end. + +From the computational point of view, the structure of the proof of +:g:`(or A B)` in this term is needed for computing the boolean value. + +In general, if :math:`I` has type :math:`\Prop` then :math:`P` cannot have type :math:`I→Set,` because +it will mean to build an informative proof of type :math:`(P~m)` doing a case +analysis over a non-computational object that will disappear in the +extracted program. But the other way is safe with respect to our +interpretation we can have :math:`I` a computational object and :math:`P` a +non-computational one, it just corresponds to proving a logical property +of a computational object. + +In the same spirit, elimination on :math:`P` of type :math:`I→Type` cannot be allowed +because it trivially implies the elimination on :math:`P` of type :math:`I→ Set` by +cumulativity. It also implies that there are two proofs of the same +property which are provably different, contradicting the proof- +irrelevance property which is sometimes a useful axiom: + +.. example:: + .. coqtop:: all + + Axiom proof_irrelevance : forall (P : Prop) (x y : P), x=y. + +The elimination of an inductive definition of type :math:`\Prop` on a predicate +:math:`P` of type :math:`I→ Type` leads to a paradox when applied to impredicative +inductive definition like the second-order existential quantifier +:g:`exProp` defined above, because it give access to the two projections on +this type. + + +.. _Empty-and-singleton-elimination: + +**Empty and singleton elimination.** There are special inductive definitions in +:math:`\Prop` for which more eliminations are allowed. + +.. inference:: Prop-extended + + I~\kw{is an empty or singleton definition} + s ∈ \Sort + ------------------------------------- + [I:Prop|I→ s] + +A *singleton definition* has only one constructor and all the +arguments of this constructor have type Prop. In that case, there is a +canonical way to interpret the informative extraction on an object in +that type, such that the elimination on any sort :math:`s` is legal. Typical +examples are the conjunction of non-informative propositions and the +equality. If there is an hypothesis :math:`h:a=b` in the local context, it can +be used for rewriting not only in logical propositions but also in any +type. + +.. example:: + .. coqtop:: all + + Print eq_rec. + Require Extraction. + Extraction eq_rec. + +An empty definition has no constructors, in that case also, +elimination on any sort is allowed. + + +.. _Type-of-branches: + +**Type of branches.** +Let :math:`c` be a term of type :math:`C`, we assume :math:`C` is a type of constructor for an +inductive type :math:`I`. Let :math:`P` be a term that represents the property to be +proved. We assume :math:`r` is the number of parameters and :math:`p` is the number of +arguments. + +We define a new type :math:`\{c:C\}^P` which represents the type of the branch +corresponding to the :math:`c:C` constructor. + +.. math:: + \begin{array}{ll} + \{c:(I~p_1\ldots p_r\ t_1 \ldots t_p)\}^P &\equiv (P~t_1\ldots ~t_p~c) \\ + \{c:\forall~x:T,C\}^P &\equiv \forall~x:T,\{(c~x):C\}^P + \end{array} + +We write :math:`\{c\}^P` for :math:`\{c:C\}^P` with :math:`C` the type of :math:`c`. + + +**Example.** +The following term in concrete syntax:: + + match t as l return P' with + | nil _ => t1 + | cons _ hd tl => t2 + end + + +can be represented in abstract syntax as + +.. math:: + \case(t,P,f 1 | f 2 ) + +where + +.. math:: + \begin{eqnarray*} + P & = & \lambda~l~.~P^\prime\\ + f_1 & = & t_1\\ + f_2 & = & \lambda~(hd:\nat)~.~\lambda~(tl:\List~\nat)~.~t_2 + \end{eqnarray*} + +According to the definition: + +.. math:: + \{(\kw{nil}~\nat)\}^P ≡ \{(\kw{nil}~\nat) : (\List~\nat)\}^P ≡ (P~(\kw{nil}~\nat)) + +.. math:: + + \begin{array}{rl} + \{(\kw{cons}~\nat)\}^P & ≡\{(\kw{cons}~\nat) : (\nat→\List~\nat→\List~\nat)\}^P \\ + & ≡∀ n:\nat, \{(\kw{cons}~\nat~n) : \List~\nat→\List~\nat)\}^P \\ + & ≡∀ n:\nat, ∀ l:\List~\nat, \{(\kw{cons}~\nat~n~l) : \List~\nat)\}^P \\ + & ≡∀ n:\nat, ∀ l:\List~\nat,(P~(\kw{cons}~\nat~n~l)). + \end{array} + +Given some :math:`P` then :math:`\{(\kw{nil}~\nat)\}^P` represents the expected type of :math:`f_1` , +and :math:`\{(\kw{cons}~\nat)\}^P` represents the expected type of :math:`f_2`. + + +.. _Typing-rule: + +**Typing rule.** +Our very general destructor for inductive definition enjoys the +following typing rule + +.. inference:: match + + \begin{array}{l} + E[Γ] ⊢ c : (I~q_1 … q_r~t_1 … t_s ) \\ + E[Γ] ⊢ P : B \\ + [(I~q_1 … q_r)|B] \\ + (E[Γ] ⊢ f_i : \{(c_{p_i}~q_1 … q_r)\}^P)_{i=1… l} + \end{array} + ------------------------------------------------ + E[Γ] ⊢ \case(c,P,f_1 |… |f_l ) : (P~t_1 … t_s~c) + +provided :math:`I` is an inductive type in a +definition :math:`\ind{r}{Γ_I}{Γ_C}` with :math:`Γ_C = [c_1 :C_1 ;…;c_n :C_n ]` and +:math:`c_{p_1} … c_{p_l}` are the only constructors of :math:`I`. + + + +**Example.** +Below is a typing rule for the term shown in the previous example: + +.. inference:: list example + + \begin{array}{l} + E[Γ] ⊢ t : (\List ~\nat) \\ + E[Γ] ⊢ P : B \\ + [(\List ~\nat)|B] \\ + E[Γ] ⊢ f_1 : {(\kw{nil} ~\nat)}^P \\ + E[Γ] ⊢ f_2 : {(\kw{cons} ~\nat)}^P + \end{array} + ------------------------------------------------ + E[Γ] ⊢ \case(t,P,f_1 |f_2 ) : (P~t) + + +.. _Definition-of-ι-reduction: + +**Definition of ι-reduction.** +We still have to define the ι-reduction in the general case. + +An ι-redex is a term of the following form: + +.. math:: + \case((c_{p_i}~q_1 … q_r~a_1 … a_m ),P,f_1 |… |f_l ) + +with :math:`c_{p_i}` the :math:`i`-th constructor of the inductive type :math:`I` with :math:`r` +parameters. + +The ι-contraction of this term is :math:`(f_i~a_1 … a_m )` leading to the +general reduction rule: + +.. math:: + \case((c_{p_i}~q_1 … q_r~a_1 … a_m ),P,f_1 |… |f_n ) \triangleright_ι (f_i~a_1 … a_m ) + + +.. _Fixpoint-definitions: + +Fixpoint definitions +~~~~~~~~~~~~~~~~~~~~ + +The second operator for elimination is fixpoint definition. This +fixpoint may involve several mutually recursive definitions. The basic +concrete syntax for a recursive set of mutually recursive declarations +is (with :math:`Γ_i` contexts): + +.. math:: + \fix~f_1 (Γ_1 ) :A_1 :=t_1 \with … \with~f_n (Γ_n ) :A_n :=t_n + + +The terms are obtained by projections from this set of declarations +and are written + +.. math:: + \fix~f_1 (Γ_1 ) :A_1 :=t_1 \with … \with~f_n (Γ_n ) :A_n :=t_n \for~f_i + +In the inference rules, we represent such a term by + +.. math:: + \Fix~f_i\{f_1 :A_1':=t_1' … f_n :A_n':=t_n'\} + +with :math:`t_i'` (resp. :math:`A_i'`) representing the term :math:`t_i` abstracted (resp. +generalized) with respect to the bindings in the context Γ_i , namely +:math:`t_i'=λ Γ_i . t_i` and :math:`A_i'=∀ Γ_i , A_i`. + + +Typing rule ++++++++++++ + +The typing rule is the expected one for a fixpoint. + +.. inference:: Fix + + (E[Γ] ⊢ A_i : s_i )_{i=1… n} + (E[Γ,f_1 :A_1 ,…,f_n :A_n ] ⊢ t_i : A_i )_{i=1… n} + ------------------------------------------------------- + E[Γ] ⊢ \Fix~f_i\{f_1 :A_1 :=t_1 … f_n :A_n :=t_n \} : A_i + + +Any fixpoint definition cannot be accepted because non-normalizing +terms allow proofs of absurdity. The basic scheme of recursion that +should be allowed is the one needed for defining primitive recursive +functionals. In that case the fixpoint enjoys a special syntactic +restriction, namely one of the arguments belongs to an inductive type, +the function starts with a case analysis and recursive calls are done +on variables coming from patterns and representing subterms. For +instance in the case of natural numbers, a proof of the induction +principle of type + +.. math:: + ∀ P:\nat→\Prop, (P~O)→(∀ n:\nat, (P~n)→(P~(\kw{S}~n)))→ ∀ n:\nat, (P~n) + +can be represented by the term: + +.. math:: + \begin{array}{l} + λ P:\nat→\Prop. λ f:(P~O). λ g:(∀ n:\nat, (P~n)→(P~(S~n))).\\ + \Fix~h\{h:∀ n:\nat, (P~n):=λ n:\nat. \case(n,P,f | λp:\nat. (g~p~(h~p)))\} + \end{array} + +Before accepting a fixpoint definition as being correctly typed, we +check that the definition is “guarded”. A precise analysis of this +notion can be found in :cite:`Gim94`. The first stage is to precise on which +argument the fixpoint will be decreasing. The type of this argument +should be an inductive definition. For doing this, the syntax of +fixpoints is extended and becomes + +.. math:: + \Fix~f_i\{f_1/k_1 :A_1':=t_1' … f_n/k_n :A_n':=t_n'\} + + +where :math:`k_i` are positive integers. Each :math:`k_i` represents the index of +parameter of :math:`f_i` , on which :math:`f_i` is decreasing. Each :math:`A_i` should be a +type (reducible to a term) starting with at least :math:`k_i` products +:math:`∀ y_1 :B_1 ,… ∀ y_{k_i} :B_{k_i} , A_i'` and :math:`B_{k_i}` an inductive type. + +Now in the definition :math:`t_i`, if :math:`f_j` occurs then it should be applied to +at least :math:`k_j` arguments and the :math:`k_j`-th argument should be +syntactically recognized as structurally smaller than :math:`y_{k_i}`. + +The definition of being structurally smaller is a bit technical. One +needs first to define the notion of *recursive arguments of a +constructor*. For an inductive definition :math:`\ind{r}{Γ_I}{Γ_C}`, if the +type of a constructor :math:`c` has the form +:math:`∀ p_1 :P_1 ,… ∀ p_r :P_r, ∀ x_1:T_1, … ∀ x_r :T_r, (I_j~p_1 … p_r~t_1 … t_s )`, +then the recursive +arguments will correspond to :math:`T_i` in which one of the :math:`I_l` occurs. + +The main rules for being structurally smaller are the following. +Given a variable :math:`y` of type an inductive definition in a declaration +:math:`\ind{r}{Γ_I}{Γ_C}` where :math:`Γ_I` is :math:`[I_1 :A_1 ;…;I_k :A_k]`, and :math:`Γ_C` is +:math:`[c_1 :C_1 ;…;c_n :C_n ]`, the terms structurally smaller than :math:`y` are: + + ++ :math:`(t~u)` and :math:`λ x:u . t` when :math:`t` is structurally smaller than :math:`y`. ++ :math:`\case(c,P,f_1 … f_n)` when each :math:`f_i` is structurally smaller than :math:`y`. + If :math:`c` is :math:`y` or is structurally smaller than :math:`y`, its type is an inductive + definition :math:`I_p` part of the inductive declaration corresponding to :math:`y`. + Each :math:`f_i` corresponds to a type of constructor + :math:`C_q ≡ ∀ p_1 :P_1 ,…,∀ p_r :P_r , ∀ y_1 :B_1 , … ∀ y_k :B_k , (I~a_1 … a_k )` + and can consequently be written :math:`λ y_1 :B_1' . … λ y_k :B_k'. g_i`. (:math:`B_i'` is + obtained from :math:`B_i` by substituting parameters variables) the variables + :math:`y_j` occurring in :math:`g_i` corresponding to recursive arguments :math:`B_i` (the + ones in which one of the :math:`I_l` occurs) are structurally smaller than y. + + +The following definitions are correct, we enter them using the ``Fixpoint`` +command as described in Section :ref:`TODO-1.3.4` and show the internal +representation. + +.. example:: + .. coqtop:: all + + Fixpoint plus (n m:nat) {struct n} : nat := + match n with + | O => m + | S p => S (plus p m) + end. + + Print plus. + Fixpoint lgth (A:Set) (l:list A) {struct l} : nat := + match l with + | nil _ => O + | cons _ a l' => S (lgth A l') + end. + Print lgth. + Fixpoint sizet (t:tree) : nat := let (f) := t in S (sizef f) + with sizef (f:forest) : nat := + match f with + | emptyf => O + | consf t f => plus (sizet t) (sizef f) + end. + Print sizet. + +.. _Reduction-rule: + +Reduction rule +++++++++++++++ + +Let :math:`F` be the set of declarations: +:math:`f_1 /k_1 :A_1 :=t_1 …f_n /k_n :A_n:=t_n`. +The reduction for fixpoints is: + +.. math:: + (\Fix~f_i \{F\} a_1 …a_{k_i}) \triangleright_ι \subst{t_i}{f_k}{\Fix~f_k \{F\}}_{k=1… n} ~a_1 … a_{k_i} + +when :math:`a_{k_i}` starts with a constructor. This last restriction is needed +in order to keep strong normalization and corresponds to the reduction +for primitive recursive operators. The following reductions are now +possible: + +.. math:: + \def\plus{\mathsf{plus}} + \def\tri{\triangleright_\iota} + \begin{eqnarray*} + \plus~(\nS~(\nS~\nO))~(\nS~\nO) & \tri & \nS~(\plus~(\nS~\nO)~(\nS~\nO))\\ + & \tri & \nS~(\nS~(\plus~\nO~(\nS~\nO)))\\ + & \tri & \nS~(\nS~(\nS~\nO))\\ + \end{eqnarray*} + +.. _Mutual-induction: + +**Mutual induction** + +The principles of mutual induction can be automatically generated +using the Scheme command described in Section :ref:`TODO-13.1`. + + +.. _Admissible-rules-for-global-environments: + +Admissible rules for global environments +-------------------------------------------- + +From the original rules of the type system, one can show the +admissibility of rules which change the local context of definition of +objects in the global environment. We show here the admissible rules +that are used in the discharge mechanism at the end of a section. + + +.. _Abstraction: + +**Abstraction.** +One can modify a global declaration by generalizing it over a +previously assumed constant :math:`c`. For doing that, we need to modify the +reference to the global declaration in the subsequent global +environment and local context by explicitly applying this constant to +the constant :math:`c'`. + +Below, if :math:`Γ` is a context of the form :math:`[y_1 :A_1 ;…;y_n :A_n]`, we write +:math:`∀x:U,\subst{Γ}{c}{x}` to mean +:math:`[y_1 :∀ x:U,\subst{A_1}{c}{x};…;y_n :∀ x:U,\subst{A_n}{c}{x}]` +and :math:`\subst{E}{|Γ|}{|Γ|c}` to mean the parallel substitution +:math:`E\{y_1 /(y_1~c)\}…\{y_n/(y_n~c)\}`. + + +.. _First-abstracting-property: + +**First abstracting property:** + +.. math:: + \frac{\WF{E;c:U;E′;c′:=t:T;E″}{Γ}} + {\WF{E;c:U;E′;c′:=λ x:U. \subst{t}{c}{x}:∀x:U,\subst{T}{c}{x};\subst{E″}{c′}{(c′~c)}} + {\subst{Γ}{c}{(c~c′)}}} + + +.. math:: + \frac{\WF{E;c:U;E′;c′:T;E″}{Γ}} + {\WF{E;c:U;E′;c′:∀ x:U,\subst{T}{c}{x};\subst{E″}{c′}{(c′~c)}}{Γ{c/(c~c′)}}} + +.. math:: + \frac{\WF{E;c:U;E′;\ind{p}{Γ_I}{Γ_C};E″}{Γ}} + {\WFTWOLINES{E;c:U;E′;\ind{p+1}{∀ x:U,\subst{Γ_I}{c}{x}}{∀ x:U,\subst{Γ_C}{c}{x}}; + \subst{E″}{|Γ_I ,Γ_C |}{|Γ_I ,Γ_C | c}} + {\subst{Γ}{|Γ_I ,Γ_C|}{|Γ_I ,Γ_C | c}}} + +One can similarly modify a global declaration by generalizing it over +a previously defined constant :math:`c′`. Below, if :math:`Γ` is a context of the form +:math:`[y_1 :A_1 ;…;y_n :A_n]`, we write :math:`\subst{Γ}{c}{u}` to mean +:math:`[y_1 :\subst{A_1} {c}{u};…;y_n:\subst{A_n} {c}{u}]`. + + +.. _Second-abstracting-property: + +**Second abstracting property:** + +.. math:: + \frac{\WF{E;c:=u:U;E′;c′:=t:T;E″}{Γ}} + {\WF{E;c:=u:U;E′;c′:=(\letin{x}{u:U}{\subst{t}{c}{x}}):\subst{T}{c}{u};E″}{Γ}} + +.. math:: + \frac{\WF{E;c:=u:U;E′;c′:T;E″}{Γ}} + {\WF{E;c:=u:U;E′;c′:\subst{T}{c}{u};E″}{Γ}} + +.. math:: + \frac{\WF{E;c:=u:U;E′;\ind{p}{Γ_I}{Γ_C};E″}{Γ}} + {\WF{E;c:=u:U;E′;\ind{p}{\subst{Γ_I}{c}{u}}{\subst{Γ_C}{c}{u}};E″}{Γ}} + +.. _Pruning-the-local-context: + +**Pruning the local context.** +If one abstracts or substitutes constants with the above rules then it +may happen that some declared or defined constant does not occur any +more in the subsequent global environment and in the local context. +One can consequently derive the following property. + + +.. _First-pruning-property: + +.. inference:: First pruning property: + + \WF{E;c:U;E′}{Γ} + c~\kw{does not occur in}~E′~\kw{and}~Γ + -------------------------------------- + \WF{E;E′}{Γ} + + +.. _Second-pruning-property: + +.. inference:: Second pruning property: + + \WF{E;c:=u:U;E′}{Γ} + c~\kw{does not occur in}~E′~\kw{and}~Γ + -------------------------------------- + \WF{E;E′}{Γ} + + +.. _Co-inductive-types: + +Co-inductive types +---------------------- + +The implementation contains also co-inductive definitions, which are +types inhabited by infinite objects. More information on co-inductive +definitions can be found in :cite:`Gimenez95b,Gim98,GimCas05`. + + +.. _The-Calculus-of-Inductive-Construction-with-impredicative-Set: + +The Calculus of Inductive Construction with impredicative Set +----------------------------------------------------------------- + +|Coq| can be used as a type-checker for the Calculus of Inductive +Constructions with an impredicative sort :math:`\Set` by using the compiler +option ``-impredicative-set``. For example, using the ordinary `coqtop` +command, the following is rejected, + +.. example:: + .. coqtop:: all + + Fail Definition id: Set := forall X:Set,X->X. + +while it will type-check, if one uses instead the `coqtop` +``-impredicative-set`` option.. + +The major change in the theory concerns the rule for product formation +in the sort Set, which is extended to a domain in any sort: + +.. inference:: ProdImp + + E[Γ] ⊢ T : s + s ∈ {\Sort} + E[Γ::(x:T)] ⊢ U : Set + --------------------- + E[Γ] ⊢ ∀ x:T,U : Set + +This extension has consequences on the inductive definitions which are +allowed. In the impredicative system, one can build so-called *large +inductive definitions* like the example of second-order existential +quantifier (exSet). + +There should be restrictions on the eliminations which can be +performed on such definitions. The eliminations rules in the +impredicative system for sort Set become: + + + +.. inference:: Set1 + + s ∈ \{Prop, Set\} + ----------------- + [I:Set|I→ s] + +.. inference:: Set2 + + I~\kw{is a small inductive definition} + s ∈ \{\Type(i)\} + ---------------- + [I:Set|I→ s] + + diff --git a/doc/sphinx/language/coq-library.rst b/doc/sphinx/language/coq-library.rst new file mode 100644 index 0000000000..29053d6a57 --- /dev/null +++ b/doc/sphinx/language/coq-library.rst @@ -0,0 +1,988 @@ +.. include:: ../replaces.rst + +.. _thecoqlibrary: + +The |Coq| library +================= + +:Source: https://coq.inria.fr/distrib/current/refman/stdlib.html +:Converted by: Pierre Letouzey + +.. index:: + single: Theories + + +The |Coq| library is structured into two parts: + + * **The initial library**: it contains elementary logical notions and + data-types. It constitutes the basic state of the system directly + available when running |Coq|; + + * **The standard library**: general-purpose libraries containing various + developments of |Coq| axiomatizations about sets, lists, sorting, + arithmetic, etc. This library comes with the system and its modules + are directly accessible through the ``Require`` command (see + Section :ref:`TODO-6.5.1-Require`); + +In addition, user-provided libraries or developments are provided by +|Coq| users' community. These libraries and developments are available +for download at http://coq.inria.fr (see +Section :ref:`userscontributions`). + +This chapter briefly reviews the |Coq| libraries whose contents can +also be browsed at http://coq.inria.fr/stdlib. + + + + +The basic library +----------------- + +This section lists the basic notions and results which are directly +available in the standard |Coq| system. Most of these constructions +are defined in the ``Prelude`` module in directory ``theories/Init`` +at the |Coq| root directory; this includes the modules +``Notations``, +``Logic``, +``Datatypes``, +``Specif``, +``Peano``, +``Wf`` and +``Tactics``. +Module ``Logic_Type`` also makes it in the initial state. + + +Notations +~~~~~~~~~ + +This module defines the parsing and pretty-printing of many symbols +(infixes, prefixes, etc.). However, it does not assign a meaning to +these notations. The purpose of this is to define and fix once for all +the precedence and associativity of very common notations. The main +notations fixed in the initial state are : + +================ ============ =============== +Notation Precedence Associativity +================ ============ =============== +``_ -> _`` 99 right +``_ <-> _`` 95 no +``_ \/ _`` 85 right +``_ /\ _`` 80 right +``~ _`` 75 right +``_ = _`` 70 no +``_ = _ = _`` 70 no +``_ = _ :> _`` 70 no +``_ <> _`` 70 no +``_ <> _ :> _`` 70 no +``_ < _`` 70 no +``_ > _`` 70 no +``_ <= _`` 70 no +``_ >= _`` 70 no +``_ < _ < _`` 70 no +``_ < _ <= _`` 70 no +``_ <= _ < _`` 70 no +``_ <= _ <= _`` 70 no +``_ + _`` 50 left +``_ || _`` 50 left +``_ - _`` 50 left +``_ * _`` 40 left +``_ _`` 40 left +``_ / _`` 40 left +``- _`` 35 right +``/ _`` 35 right +``_ ^ _`` 30 right +================ ============ =============== + +Logic +~~~~~ + +The basic library of |Coq| comes with the definitions of standard +(intuitionistic) logical connectives (they are defined as inductive +constructions). They are equipped with an appealing syntax enriching the +subclass `form` of the syntactic class `term`. The syntax of `form` +is shown below: + +.. /!\ Please keep the blanks in the lines below, experimentally they produce + a nice last column. Or even better, find a proper way to do this! + +.. productionlist:: + form : True (True) + : | False (False) + : | ~ `form` (not) + : | `form` /\ `form` (and) + : | `form` \/ `form` (or) + : | `form` -> `form` (primitive implication) + : | `form` <-> `form` (iff) + : | forall `ident` : `type`, `form` (primitive for all) + : | exists `ident` [: `specif`], `form` (ex) + : | exists2 `ident` [: `specif`], `form` & `form` (ex2) + : | `term` = `term` (eq) + : | `term` = `term` :> `specif` (eq) + +.. note:: + + Implication is not defined but primitive (it is a non-dependent + product of a proposition over another proposition). There is also a + primitive universal quantification (it is a dependent product over a + proposition). The primitive universal quantification allows both + first-order and higher-order quantification. + +Propositional Connectives ++++++++++++++++++++++++++ + +.. index:: + single: Connectives + single: True (term) + single: I (term) + single: False (term) + single: not (term) + single: and (term) + single: conj (term) + single: proj1 (term) + single: proj2 (term) + single: or (term) + single: or_introl (term) + single: or_intror (term) + single: iff (term) + single: IF_then_else (term) + +First, we find propositional calculus connectives: + +.. coqtop:: in + + Inductive True : Prop := I. + Inductive False : Prop := . + Definition not (A: Prop) := A -> False. + Inductive and (A B:Prop) : Prop := conj (_:A) (_:B). + Section Projections. + Variables A B : Prop. + Theorem proj1 : A /\ B -> A. + Theorem proj2 : A /\ B -> B. + End Projections. + Inductive or (A B:Prop) : Prop := + | or_introl (_:A) + | or_intror (_:B). + Definition iff (P Q:Prop) := (P -> Q) /\ (Q -> P). + Definition IF_then_else (P Q R:Prop) := P /\ Q \/ ~ P /\ R. + +Quantifiers ++++++++++++ + +.. index:: + single: Quantifiers + single: all (term) + single: ex (term) + single: exists (term) + single: ex_intro (term) + single: ex2 (term) + single: exists2 (term) + single: ex_intro2 (term) + +Then we find first-order quantifiers: + +.. coqtop:: in + + Definition all (A:Set) (P:A -> Prop) := forall x:A, P x. + Inductive ex (A: Set) (P:A -> Prop) : Prop := + ex_intro (x:A) (_:P x). + Inductive ex2 (A:Set) (P Q:A -> Prop) : Prop := + ex_intro2 (x:A) (_:P x) (_:Q x). + +The following abbreviations are allowed: + +====================== ======================================= +``exists x:A, P`` ``ex A (fun x:A => P)`` +``exists x, P`` ``ex _ (fun x => P)`` +``exists2 x:A, P & Q`` ``ex2 A (fun x:A => P) (fun x:A => Q)`` +``exists2 x, P & Q`` ``ex2 _ (fun x => P) (fun x => Q)`` +====================== ======================================= + +The type annotation ``:A`` can be omitted when ``A`` can be +synthesized by the system. + +Equality +++++++++ + +.. index:: + single: Equality + single: eq (term) + single: eq_refl (term) + +Then, we find equality, defined as an inductive relation. That is, +given a type ``A`` and an ``x`` of type ``A``, the +predicate :g:`(eq A x)` is the smallest one which contains ``x``. +This definition, due to Christine Paulin-Mohring, is equivalent to +define ``eq`` as the smallest reflexive relation, and it is also +equivalent to Leibniz' equality. + +.. coqtop:: in + + Inductive eq (A:Type) (x:A) : A -> Prop := + eq_refl : eq A x x. + +Lemmas +++++++ + +Finally, a few easy lemmas are provided. + +.. index:: + single: absurd (term) + single: eq_sym (term) + single: eq_trans (term) + single: f_equal (term) + single: sym_not_eq (term) + single: eq_ind_r (term) + single: eq_rec_r (term) + single: eq_rect (term) + single: eq_rect_r (term) + +.. coqtop:: in + + Theorem absurd : forall A C:Prop, A -> ~ A -> C. + Section equality. + Variables A B : Type. + Variable f : A -> B. + Variables x y z : A. + Theorem eq_sym : x = y -> y = x. + Theorem eq_trans : x = y -> y = z -> x = z. + Theorem f_equal : x = y -> f x = f y. + Theorem not_eq_sym : x <> y -> y <> x. + End equality. + Definition eq_ind_r : + forall (A:Type) (x:A) (P:A->Prop), P x -> forall y:A, y = x -> P y. + Definition eq_rec_r : + forall (A:Type) (x:A) (P:A->Set), P x -> forall y:A, y = x -> P y. + Definition eq_rect_r : + forall (A:Type) (x:A) (P:A->Type), P x -> forall y:A, y = x -> P y. + Hint Immediate eq_sym not_eq_sym : core. + +.. index:: + single: f_equal2 ... f_equal5 (term) + +The theorem ``f_equal`` is extended to functions with two to five +arguments. The theorem are names ``f_equal2``, ``f_equal3``, +``f_equal4`` and ``f_equal5``. +For instance ``f_equal3`` is defined the following way. + +.. coqtop:: in + + Theorem f_equal3 : + forall (A1 A2 A3 B:Type) (f:A1 -> A2 -> A3 -> B) + (x1 y1:A1) (x2 y2:A2) (x3 y3:A3), + x1 = y1 -> x2 = y2 -> x3 = y3 -> f x1 x2 x3 = f y1 y2 y3. + +.. _datatypes: + +Datatypes +~~~~~~~~~ + +.. index:: + single: Datatypes + +In the basic library, we find in ``Datatypes.v`` the definition +of the basic data-types of programming, +defined as inductive constructions over the sort ``Set``. Some of +them come with a special syntax shown below (this syntax table is common with +the next section :ref:`specification`): + +.. productionlist:: + specif : `specif` * `specif` (prod) + : | `specif` + `specif` (sum) + : | `specif` + { `specif` } (sumor) + : | { `specif` } + { `specif` } (sumbool) + : | { `ident` : `specif` | `form` } (sig) + : | { `ident` : `specif` | `form` & `form` } (sig2) + : | { `ident` : `specif` & `specif` } (sigT) + : | { `ident` : `specif` & `specif` & `specif` } (sigT2) + term : (`term`, `term`) (pair) + + +Programming ++++++++++++ + +.. index:: + single: Programming + single: unit (term) + single: tt (term) + single: bool (term) + single: true (term) + single: false (term) + single: nat (term) + single: O (term) + single: S (term) + single: option (term) + single: Some (term) + single: None (term) + single: identity (term) + single: refl_identity (term) + +.. coqtop:: in + + Inductive unit : Set := tt. + Inductive bool : Set := true | false. + Inductive nat : Set := O | S (n:nat). + Inductive option (A:Set) : Set := Some (_:A) | None. + Inductive identity (A:Type) (a:A) : A -> Type := + refl_identity : identity A a a. + +Note that zero is the letter ``O``, and *not* the numeral ``0``. + +The predicate ``identity`` is logically +equivalent to equality but it lives in sort ``Type``. +It is mainly maintained for compatibility. + +We then define the disjoint sum of ``A+B`` of two sets ``A`` and +``B``, and their product ``A*B``. + +.. index:: + single: sum (term) + single: A+B (term) + single: + (term) + single: inl (term) + single: inr (term) + single: prod (term) + single: A*B (term) + single: * (term) + single: pair (term) + single: fst (term) + single: snd (term) + +.. coqtop:: in + + Inductive sum (A B:Set) : Set := inl (_:A) | inr (_:B). + Inductive prod (A B:Set) : Set := pair (_:A) (_:B). + Section projections. + Variables A B : Set. + Definition fst (H: prod A B) := match H with + | pair _ _ x y => x + end. + Definition snd (H: prod A B) := match H with + | pair _ _ x y => y + end. + End projections. + +Some operations on ``bool`` are also provided: ``andb`` (with +infix notation ``&&``), ``orb`` (with +infix notation ``||``), ``xorb``, ``implb`` and ``negb``. + +.. _specification: + +Specification +~~~~~~~~~~~~~ + +The following notions defined in module ``Specif.v`` allow to build new data-types and specifications. +They are available with the syntax shown in the previous section :ref:`datatypes`. + +For instance, given :g:`A:Type` and :g:`P:A->Prop`, the construct +:g:`{x:A | P x}` (in abstract syntax :g:`(sig A P)`) is a +``Type``. We may build elements of this set as :g:`(exist x p)` +whenever we have a witness :g:`x:A` with its justification +:g:`p:P x`. + +From such a :g:`(exist x p)` we may in turn extract its witness +:g:`x:A` (using an elimination construct such as ``match``) but +*not* its justification, which stays hidden, like in an abstract +data-type. In technical terms, one says that ``sig`` is a *weak +(dependent) sum*. A variant ``sig2`` with two predicates is also +provided. + +.. index:: + single: {x:A | P x} (term) + single: sig (term) + single: exist (term) + single: sig2 (term) + single: exist2 (term) + +.. coqtop:: in + + Inductive sig (A:Set) (P:A -> Prop) : Set := exist (x:A) (_:P x). + Inductive sig2 (A:Set) (P Q:A -> Prop) : Set := + exist2 (x:A) (_:P x) (_:Q x). + +A *strong (dependent) sum* :g:`{x:A & P x}` may be also defined, +when the predicate ``P`` is now defined as a +constructor of types in ``Type``. + +.. index:: + single: {x:A & P x} (term) + single: sigT (term) + single: existT (term) + single: sigT2 (term) + single: existT2 (term) + single: projT1 (term) + single: projT2 (term) + +.. coqtop:: in + + Inductive sigT (A:Type) (P:A -> Type) : Type := existT (x:A) (_:P x). + Section Projections2. + Variable A : Type. + Variable P : A -> Type. + Definition projT1 (H:sigT A P) := let (x, h) := H in x. + Definition projT2 (H:sigT A P) := + match H return P (projT1 H) with + existT _ _ x h => h + end. + End Projections2. + Inductive sigT2 (A: Type) (P Q:A -> Type) : Type := + existT2 (x:A) (_:P x) (_:Q x). + +A related non-dependent construct is the constructive sum +:g:`{A}+{B}` of two propositions ``A`` and ``B``. + +.. index:: + single: sumbool (term) + single: left (term) + single: right (term) + single: {A}+{B} (term) + +.. coqtop:: in + + Inductive sumbool (A B:Prop) : Set := left (_:A) | right (_:B). + +This ``sumbool`` construct may be used as a kind of indexed boolean +data-type. An intermediate between ``sumbool`` and ``sum`` is +the mixed ``sumor`` which combines :g:`A:Set` and :g:`B:Prop` +in the construction :g:`A+{B}` in ``Set``. + +.. index:: + single: sumor (term) + single: inleft (term) + single: inright (term) + single: A+{B} (term) + +.. coqtop:: in + + Inductive sumor (A:Set) (B:Prop) : Set := + | inleft (_:A) + | inright (_:B). + +We may define variants of the axiom of choice, like in Martin-Löf's +Intuitionistic Type Theory. + +.. index:: + single: Choice (term) + single: Choice2 (term) + single: bool_choice (term) + +.. coqtop:: in + + Lemma Choice : + forall (S S':Set) (R:S -> S' -> Prop), + (forall x:S, {y : S' | R x y}) -> + {f : S -> S' | forall z:S, R z (f z)}. + Lemma Choice2 : + forall (S S':Set) (R:S -> S' -> Set), + (forall x:S, {y : S' & R x y}) -> + {f : S -> S' & forall z:S, R z (f z)}. + Lemma bool_choice : + forall (S:Set) (R1 R2:S -> Prop), + (forall x:S, {R1 x} + {R2 x}) -> + {f : S -> bool | + forall x:S, f x = true /\ R1 x \/ f x = false /\ R2 x}. + +The next construct builds a sum between a data-type :g:`A:Type` and +an exceptional value encoding errors: + +.. index:: + single: Exc (term) + single: value (term) + single: error (term) + +.. coqtop:: in + + Definition Exc := option. + Definition value := Some. + Definition error := None. + +This module ends with theorems, relating the sorts ``Set`` or +``Type`` and ``Prop`` in a way which is consistent with the +realizability interpretation. + +.. index:: + single: False_rect (term) + single: False_rec (term) + single: eq_rect (term) + single: absurd_set (term) + single: and_rect (term) + +.. coqtop:: in + + Definition except := False_rec. + Theorem absurd_set : forall (A:Prop) (C:Set), A -> ~ A -> C. + Theorem and_rect2 : + forall (A B:Prop) (P:Type), (A -> B -> P) -> A /\ B -> P. + + +Basic Arithmetics +~~~~~~~~~~~~~~~~~ + +The basic library includes a few elementary properties of natural +numbers, together with the definitions of predecessor, addition and +multiplication, in module ``Peano.v``. It also +provides a scope ``nat_scope`` gathering standard notations for +common operations (``+``, ``*``) and a decimal notation for +numbers, allowing for instance to write ``3`` for :g:`S (S (S O)))`. This also works on +the left hand side of a ``match`` expression (see for example +section :ref:`TODO-refine-example`). This scope is opened by default. + +.. example:: + + The following example is not part of the standard library, but it + shows the usage of the notations: + + .. coqtop:: in + + Fixpoint even (n:nat) : bool := + match n with + | 0 => true + | 1 => false + | S (S n) => even n + end. + +.. index:: + single: eq_S (term) + single: pred (term) + single: pred_Sn (term) + single: eq_add_S (term) + single: not_eq_S (term) + single: IsSucc (term) + single: O_S (term) + single: n_Sn (term) + single: plus (term) + single: plus_n_O (term) + single: plus_n_Sm (term) + single: mult (term) + single: mult_n_O (term) + single: mult_n_Sm (term) + +Now comes the content of module ``Peano``: + +.. coqtop:: in + + Theorem eq_S : forall x y:nat, x = y -> S x = S y. + Definition pred (n:nat) : nat := + match n with + | 0 => 0 + | S u => u + end. + Theorem pred_Sn : forall m:nat, m = pred (S m). + Theorem eq_add_S : forall n m:nat, S n = S m -> n = m. + Hint Immediate eq_add_S : core. + Theorem not_eq_S : forall n m:nat, n <> m -> S n <> S m. + Definition IsSucc (n:nat) : Prop := + match n with + | 0 => False + | S p => True + end. + Theorem O_S : forall n:nat, 0 <> S n. + Theorem n_Sn : forall n:nat, n <> S n. + Fixpoint plus (n m:nat) {struct n} : nat := + match n with + | 0 => m + | S p => S (p + m) + end + where "n + m" := (plus n m) : nat_scope. + Lemma plus_n_O : forall n:nat, n = n + 0. + Lemma plus_n_Sm : forall n m:nat, S (n + m) = n + S m. + Fixpoint mult (n m:nat) {struct n} : nat := + match n with + | 0 => 0 + | S p => m + p * m + end + where "n * m" := (mult n m) : nat_scope. + Lemma mult_n_O : forall n:nat, 0 = n * 0. + Lemma mult_n_Sm : forall n m:nat, n * m + n = n * (S m). + + +Finally, it gives the definition of the usual orderings ``le``, +``lt``, ``ge`` and ``gt``. + +.. index:: + single: le (term) + single: le_n (term) + single: le_S (term) + single: lt (term) + single: ge (term) + single: gt (term) + +.. coqtop:: in + + Inductive le (n:nat) : nat -> Prop := + | le_n : le n n + | le_S : forall m:nat, n <= m -> n <= (S m). + where "n <= m" := (le n m) : nat_scope. + Definition lt (n m:nat) := S n <= m. + Definition ge (n m:nat) := m <= n. + Definition gt (n m:nat) := m < n. + +Properties of these relations are not initially known, but may be +required by the user from modules ``Le`` and ``Lt``. Finally, +``Peano`` gives some lemmas allowing pattern-matching, and a double +induction principle. + +.. index:: + single: nat_case (term) + single: nat_double_ind (term) + +.. coqtop:: in + + Theorem nat_case : + forall (n:nat) (P:nat -> Prop), + P 0 -> (forall m:nat, P (S m)) -> P n. + Theorem nat_double_ind : + forall R:nat -> nat -> Prop, + (forall n:nat, R 0 n) -> + (forall n:nat, R (S n) 0) -> + (forall n m:nat, R n m -> R (S n) (S m)) -> forall n m:nat, R n m. + + +Well-founded recursion +~~~~~~~~~~~~~~~~~~~~~~ + +The basic library contains the basics of well-founded recursion and +well-founded induction, in module ``Wf.v``. + +.. index:: + single: Well foundedness + single: Recursion + single: Well founded induction + single: Acc (term) + single: Acc_inv (term) + single: Acc_rect (term) + single: well_founded (term) + +.. coqtop:: in + + Section Well_founded. + Variable A : Type. + Variable R : A -> A -> Prop. + Inductive Acc (x:A) : Prop := + Acc_intro : (forall y:A, R y x -> Acc y) -> Acc x. + Lemma Acc_inv x : Acc x -> forall y:A, R y x -> Acc y. + Definition well_founded := forall a:A, Acc a. + Hypothesis Rwf : well_founded. + Theorem well_founded_induction : + forall P:A -> Set, + (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. + Theorem well_founded_ind : + forall P:A -> Prop, + (forall x:A, (forall y:A, R y x -> P y) -> P x) -> forall a:A, P a. + +The automatically generated scheme ``Acc_rect`` +can be used to define functions by fixpoints using +well-founded relations to justify termination. Assuming +extensionality of the functional used for the recursive call, the +fixpoint equation can be proved. + +.. index:: + single: Fix_F (term) + single: fix_eq (term) + single: Fix_F_inv (term) + single: Fix_F_eq (term) + +.. coqtop:: in + + Section FixPoint. + Variable P : A -> Type. + Variable F : forall x:A, (forall y:A, R y x -> P y) -> P x. + Fixpoint Fix_F (x:A) (r:Acc x) {struct r} : P x := + F x (fun (y:A) (p:R y x) => Fix_F y (Acc_inv x r y p)). + Definition Fix (x:A) := Fix_F x (Rwf x). + Hypothesis F_ext : + forall (x:A) (f g:forall y:A, R y x -> P y), + (forall (y:A) (p:R y x), f y p = g y p) -> F x f = F x g. + Lemma Fix_F_eq : + forall (x:A) (r:Acc x), + F x (fun (y:A) (p:R y x) => Fix_F y (Acc_inv x r y p)) = Fix_F x r. + Lemma Fix_F_inv : forall (x:A) (r s:Acc x), Fix_F x r = Fix_F x s. + Lemma fix_eq : forall x:A, Fix x = F x (fun (y:A) (p:R y x) => Fix y). + End FixPoint. + End Well_founded. + +Accessing the Type level +~~~~~~~~~~~~~~~~~~~~~~~~ + +The basic library includes the definitions of the counterparts of some data-types and logical +quantifiers at the ``Type``: level: negation, pair, and properties +of ``identity``. This is the module ``Logic_Type.v``. + +.. index:: + single: notT (term) + single: prodT (term) + single: pairT (term) + +.. coqtop:: in + + Definition notT (A:Type) := A -> False. + Inductive prodT (A B:Type) : Type := pairT (_:A) (_:B). + +At the end, it defines data-types at the ``Type`` level. + +Tactics +~~~~~~~ + +A few tactics defined at the user level are provided in the initial +state, in module ``Tactics.v``. They are listed at +http://coq.inria.fr/stdlib, in paragraph ``Init``, link ``Tactics``. + + +The standard library +-------------------- + +Survey +~~~~~~ + +The rest of the standard library is structured into the following +subdirectories: + + * **Logic** : Classical logic and dependent equality + * **Arith** : Basic Peano arithmetic + * **PArith** : Basic positive integer arithmetic + * **NArith** : Basic binary natural number arithmetic + * **ZArith** : Basic relative integer arithmetic + * **Numbers** : Various approaches to natural, integer and cyclic numbers (currently axiomatically and on top of 2^31 binary words) + * **Bool** : Booleans (basic functions and results) + * **Lists** : Monomorphic and polymorphic lists (basic functions and results), Streams (infinite sequences defined with co-inductive types) + * **Sets** : Sets (classical, constructive, finite, infinite, power set, etc.) + * **FSets** : Specification and implementations of finite sets and finite maps (by lists and by AVL trees) + * **Reals** : Axiomatization of real numbers (classical, basic functions, integer part, fractional part, limit, derivative, Cauchy series, power series and results,...) + * **Relations** : Relations (definitions and basic results) + * **Sorting** : Sorted list (basic definitions and heapsort correctness) + * **Strings** : 8-bits characters and strings + * **Wellfounded** : Well-founded relations (basic results) + + +These directories belong to the initial load path of the system, and +the modules they provide are compiled at installation time. So they +are directly accessible with the command ``Require`` (see +Section :ref:`TODO-6.5.1-Require`). + +The different modules of the |Coq| standard library are documented +online at http://coq.inria.fr/stdlib. + +Peano’s arithmetic (nat) +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. index:: + single: Peano's arithmetic + single: nat_scope + +While in the initial state, many operations and predicates of Peano's +arithmetic are defined, further operations and results belong to other +modules. For instance, the decidability of the basic predicates are +defined here. This is provided by requiring the module ``Arith``. + +The following table describes the notations available in scope +``nat_scope`` : + +=============== =================== +Notation Interpretation +=============== =================== +``_ < _`` ``lt`` +``_ <= _`` ``le`` +``_ > _`` ``gt`` +``_ >= _`` ``ge`` +``x < y < z`` ``x < y /\ y < z`` +``x < y <= z`` ``x < y /\ y <= z`` +``x <= y < z`` ``x <= y /\ y < z`` +``x <= y <= z`` ``x <= y /\ y <= z`` +``_ + _`` ``plus`` +``_ - _`` ``minus`` +``_ * _`` ``mult`` +=============== =================== + + +Notations for integer arithmetics +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. index:: + single: Arithmetical notations + single: + (term) + single: * (term) + single: - (term) + singel: / (term) + single: <= (term) + single: >= (term) + single: < (term) + single: > (term) + single: ?= (term) + single: mod (term) + + +The following table describes the syntax of expressions +for integer arithmetics. It is provided by requiring and opening the module ``ZArith`` and opening scope ``Z_scope``. +It specifies how notations are interpreted and, when not +already reserved, the precedence and associativity. + +=============== ==================== ========== ============= +Notation Interpretation Precedence Associativity +=============== ==================== ========== ============= +``_ < _`` ``Z.lt`` +``_ <= _`` ``Z.le`` +``_ > _`` ``Z.gt`` +``_ >= _`` ``Z.ge`` +``x < y < z`` ``x < y /\ y < z`` +``x < y <= z`` ``x < y /\ y <= z`` +``x <= y < z`` ``x <= y /\ y < z`` +``x <= y <= z`` ``x <= y /\ y <= z`` +``_ ?= _`` ``Z.compare`` 70 no +``_ + _`` ``Z.add`` +``_ - _`` ``Z.sub`` +``_ * _`` ``Z.mul`` +``_ / _`` ``Z.div`` +``_ mod _`` ``Z.modulo`` 40 no +``- _`` ``Z.opp`` +``_ ^ _`` ``Z.pow`` +=============== ==================== ========== ============= + + +.. example:: + .. coqtop:: all reset + + Require Import ZArith. + Check (2 + 3)%Z. + Open Scope Z_scope. + Check 2 + 3. + + +Real numbers library +~~~~~~~~~~~~~~~~~~~~ + +Notations for real numbers +++++++++++++++++++++++++++ + +This is provided by requiring and opening the module ``Reals`` and +opening scope ``R_scope``. This set of notations is very similar to +the notation for integer arithmetics. The inverse function was added. + +=============== =================== +Notation Interpretation +=============== =================== +``_ < _`` ``Rlt`` +``_ <= _`` ``Rle`` +``_ > _`` ``Rgt`` +``_ >= _`` ``Rge`` +``x < y < z`` ``x < y /\ y < z`` +``x < y <= z`` ``x < y /\ y <= z`` +``x <= y < z`` ``x <= y /\ y < z`` +``x <= y <= z`` ``x <= y /\ y <= z`` +``_ + _`` ``Rplus`` +``_ - _`` ``Rminus`` +``_ * _`` ``Rmult`` +``_ / _`` ``Rdiv`` +``- _`` ``Ropp`` +``/ _`` ``Rinv`` +``_ ^ _`` ``pow`` +=============== =================== + +.. example:: + .. coqtop:: all reset + + Require Import Reals. + Check (2 + 3)%R. + Open Scope R_scope. + Check 2 + 3. + +Some tactics for real numbers ++++++++++++++++++++++++++++++ + +In addition to the powerful ``ring``, ``field`` and ``fourier`` +tactics (see Chapter :ref:`tactics`), there are also: + +.. tacn:: discrR + :name: discrR + + Proves that two real integer constants are different. + +.. example:: + .. coqtop:: all reset + + Require Import DiscrR. + Open Scope R_scope. + Goal 5 <> 0. + discrR. + +.. tacn:: split_Rabs + :name: split_Rabs + + Allows unfolding the ``Rabs`` constant and splits corresponding conjunctions. + +.. example:: + .. coqtop:: all reset + + Require Import Reals. + Open Scope R_scope. + Goal forall x:R, x <= Rabs x. + intro; split_Rabs. + +.. tacn:: split_Rmult + :name: split_Rmult + + Splits a condition that a product is non null into subgoals + corresponding to the condition on each operand of the product. + +.. example:: + .. coqtop:: all reset + + Require Import Reals. + Open Scope R_scope. + Goal forall x y z:R, x * y * z <> 0. + intros; split_Rmult. + +These tactics has been written with the tactic language Ltac +described in Chapter :ref:`thetacticlanguage`. + + +List library +~~~~~~~~~~~~ + +.. index:: + single: Notations for lists + single: length (term) + single: head (term) + single: tail (term) + single: app (term) + single: rev (term) + single: nth (term) + single: map (term) + single: flat_map (term) + single: fold_left (term) + single: fold_right (term) + +Some elementary operations on polymorphic lists are defined here. +They can be accessed by requiring module ``List``. + +It defines the following notions: + + * ``length`` + * ``head`` : first element (with default) + * ``tail`` : all but first element + * ``app`` : concatenation + * ``rev`` : reverse + * ``nth`` : accessing n-th element (with default) + * ``map`` : applying a function + * ``flat_map`` : applying a function returning lists + * ``fold_left`` : iterator (from head to tail) + * ``fold_right`` : iterator (from tail to head) + +The following table shows notations available when opening scope ``list_scope``. + +========== ============== ========== ============= +Notation Interpretation Precedence Associativity +========== ============== ========== ============= +``_ ++ _`` ``app`` 60 right +``_ :: _`` ``cons`` 60 right +========== ============== ========== ============= + +.. _userscontributions: + +Users’ contributions +-------------------- + +Numerous users' contributions have been collected and are available at +URL http://coq.inria.fr/opam/www/. On this web page, you have a list +of all contributions with informations (author, institution, quick +description, etc.) and the possibility to download them one by one. +You will also find informations on how to submit a new +contribution. diff --git a/doc/sphinx/language/gallina-extensions.rst b/doc/sphinx/language/gallina-extensions.rst new file mode 100644 index 0000000000..d618d90ad2 --- /dev/null +++ b/doc/sphinx/language/gallina-extensions.rst @@ -0,0 +1,2356 @@ +.. include:: ../replaces.rst + +.. _extensionsofgallina: + +Extensions of |Gallina| +======================= + +|Gallina| is the kernel language of |Coq|. We describe here extensions of +|Gallina|’s syntax. + +.. _record-types: + +Record types +---------------- + +The ``Record`` construction is a macro allowing the definition of +records as is done in many programming languages. Its syntax is +described in the grammar below. In fact, the ``Record`` macro is more general +than the usual record types, since it allows also for “manifest” +expressions. In this sense, the ``Record`` construction allows defining +“signatures”. + +.. _record_grammar: + + .. productionlist:: `sentence` + record : `record_keyword` ident [binders] [: sort] := [ident] { [`field` ; … ; `field`] }. + record_keyword : Record | Inductive | CoInductive + field : name [binders] : type [ where notation ] + : | name [binders] [: term] := term + +In the expression: + +.. cmd:: Record @ident {* @param } {? : @sort} := {? @ident} { {*; @ident {* @binder } : @term } }. + +the first identifier `ident` is the name of the defined record and `sort` is its +type. The optional identifier following ``:=`` is the name of its constructor. If it is omitted, +the default name ``Build_``\ `ident`, where `ident` is the record name, is used. If `sort` is +omitted, the default sort is `\Type`. The identifiers inside the brackets are the names of +fields. For a given field `ident`, its type is :g:`forall binder …, term`. +Remark that the type of a particular identifier may depend on a previously-given identifier. Thus the +order of the fields is important. Finally, each `param` is a parameter of the record. + +More generally, a record may have explicitly defined (a.k.a. manifest) +fields. For instance, we might have:: + + Record ident param : sort := { ident₁ : type₁ ; ident₂ := term₂ ; ident₃ : type₃ }. + +in which case the correctness of |type_3| may rely on the instance |term_2| of |ident_2| and |term_2| in turn +may depend on |ident_1|. + +.. example:: + + .. coqtop:: reset all + + Record Rat : Set := mkRat + {sign : bool; + top : nat; + bottom : nat; + Rat_bottom_cond : 0 <> bottom; + Rat_irred_cond : + forall x y z:nat, (x * y) = top /\ (x * z) = bottom -> x = 1}. + +Remark here that the fields ``Rat_bottom_cond`` depends on the field ``bottom`` and ``Rat_irred_cond`` +depends on both ``top`` and ``bottom``. + +Let us now see the work done by the ``Record`` macro. First the macro +generates a variant type definition with just one constructor: + +.. cmd:: Variant @ident {* @params} : @sort := @ident {* (@ident : @term_1)}. + +To build an object of type `ident`, one should provide the constructor +|ident_0| with the appropriate number of terms filling the fields of the record. + +.. example:: Let us define the rational :math:`1/2`: + + .. coqtop:: in + + Theorem one_two_irred : forall x y z:nat, x * y = 1 /\ x * z = 2 -> x = 1. + Admitted. + + Definition half := mkRat true 1 2 (O_S 1) one_two_irred. + Check half. + +.. _record-named-fields-grammar: + + .. productionlist:: + term : {| [`field_def` ; … ; `field_def`] |} + field_def : name [binders] := `term` + +Alternatively, the following syntax allows creating objects by using named fields, as +shown in this grammar. The fields do not have to be in any particular order, nor do they have +to be all present if the missing ones can be inferred or prompted for +(see :ref:`programs`). + +.. coqtop:: all + + Definition half' := + {| sign := true; + Rat_bottom_cond := O_S 1; + Rat_irred_cond := one_two_irred |}. + +This syntax can be disabled globally for printing by + +.. cmd:: Unset Printing Records. + +For a given type, one can override this using either + +.. cmd:: Add Printing Record @ident. + +to get record syntax or + +.. cmd:: Add Printing Constructor @ident. + +to get constructor syntax. + +This syntax can also be used for pattern matching. + +.. coqtop:: all + + Eval compute in ( + match half with + | {| sign := true; top := n |} => n + | _ => 0 + end). + +The macro generates also, when it is possible, the projection +functions for destructuring an object of type `\ident`. These +projection functions are given the names of the corresponding +fields. If a field is named `_` then no projection is built +for it. In our example: + +.. coqtop:: all + + Eval compute in top half. + Eval compute in bottom half. + Eval compute in Rat_bottom_cond half. + +An alternative syntax for projections based on a dot notation is +available: + +.. coqtop:: all + + Eval compute in half.(top). + +It can be activated for printing with + +.. cmd:: Set Printing Projections. + +.. example:: + + .. coqtop:: all + + Set Printing Projections. + Check top half. + +.. _record_projections_grammar: + + .. productionlist:: terms + term : term `.` ( qualid ) + : | term `.` ( qualid arg … arg ) + : | term `.` ( @`qualid` `term` … `term` ) + + Syntax of Record projections + +The corresponding grammar rules are given in the preceding grammar. When `qualid` +denotes a projection, the syntax `term.(qualid)` is equivalent to `qualid term`, +the syntax `term.(qualid` |arg_1| |arg_n| `)` to `qualid` |arg_1| `…` |arg_n| `term`, +and the syntax `term.(@qualid` |term_1| |term_n| `)` to `@qualid` |term_1| `…` |term_n| `term`. +In each case, `term` is the object projected and the +other arguments are the parameters of the inductive type. + +.. note::. Records defined with the ``Record`` keyword are not allowed to be + recursive (references to the record's name in the type of its field + raises an error). To define recursive records, one can use the ``Inductive`` + and ``CoInductive`` keywords, resulting in an inductive or co-inductive record. + A *caveat*, however, is that records cannot appear in mutually inductive + (or co-inductive) definitions. + +.. note:: Induction schemes are automatically generated for inductive records. + Automatic generation of induction schemes for non-recursive records + defined with the ``Record`` keyword can be activated with the + ``Nonrecursive Elimination Schemes`` option (see :ref:`TODO-13.1.1-nonrecursive-elimination-schemes`). + +.. note::``Structure`` is a synonym of the keyword ``Record``. + +.. warn:: @ident cannot be defined. + + It can happen that the definition of a projection is impossible. + This message is followed by an explanation of this impossibility. + There may be three reasons: + + #. The name `ident` already exists in the environment (see Section :ref:`TODO-1.3.1-axioms`). + #. The body of `ident` uses an incorrect elimination for + `ident` (see Sections :ref:`TODO-1.3.4-fixpoint` and :ref:`TODO-4.5.3-case-expr`). + #. The type of the projections `ident` depends on previous + projections which themselves could not be defined. + +.. exn:: Records declared with the keyword Record or Structure cannot be recursive. + + The record name `ident` appears in the type of its fields, but uses + the keyword ``Record``. Use the keyword ``Inductive`` or ``CoInductive`` instead. + +.. exn:: Cannot handle mutually (co)inductive records. + + Records cannot be defined as part of mutually inductive (or + co-inductive) definitions, whether with records only or mixed with + standard definitions. + +During the definition of the one-constructor inductive definition, all +the errors of inductive definitions, as described in Section +:ref:`TODO-1.3.3-inductive-definitions`, may also occur. + +**See also** Coercions and records in Section :ref:`TODO-18.9-coercions-and-records` of the chapter devoted to coercions. + +.. _primitive_projections: + +Primitive Projections +~~~~~~~~~~~~~~~~~~~~~ + +The option ``Set Primitive Projections`` turns on the use of primitive +projections when defining subsequent records (even through the ``Inductive`` +and ``CoInductive`` commands). Primitive projections +extended the Calculus of Inductive Constructions with a new binary +term constructor `r.(p)` representing a primitive projection `p` applied +to a record object `r` (i.e., primitive projections are always applied). +Even if the record type has parameters, these do not appear at +applications of the projection, considerably reducing the sizes of +terms when manipulating parameterized records and typechecking time. +On the user level, primitive projections can be used as a replacement +for the usual defined ones, although there are a few notable differences. + +The internally omitted parameters can be reconstructed at printing time +even though they are absent in the actual AST manipulated by the kernel. This +can be obtained by setting the ``Printing Primitive Projection Parameters`` +flag. Another compatibility printing can be activated thanks to the +``Printing Primitive Projection Compatibility`` option which governs the +printing of pattern-matching over primitive records. + +Primitive Record Types +++++++++++++++++++++++ + +When the ``Set Primitive Projections`` option is on, definitions of +record types change meaning. When a type is declared with primitive +projections, its :g:`match` construct is disabled (see :ref:`primitive_projections` though). +To eliminate the (co-)inductive type, one must use its defined primitive projections. + +For compatibility, the parameters still appear to the user when +printing terms even though they are absent in the actual AST +manipulated by the kernel. This can be changed by unsetting the +``Printing Primitive Projection Parameters`` flag. Further compatibility +printing can be deactivated thanks to the ``Printing Primitive Projection +Compatibility`` option which governs the printing of pattern-matching +over primitive records. + +There are currently two ways to introduce primitive records types: + +#. Through the ``Record`` command, in which case the type has to be + non-recursive. The defined type enjoys eta-conversion definitionally, + that is the generalized form of surjective pairing for records: + `r` ``= Build_``\ `R` ``(``\ `r`\ ``.(``\ |p_1|\ ``) …`` `r`\ ``.(``\ |p_n|\ ``))``. + Eta-conversion allows to define dependent elimination for these types as well. +#. Through the ``Inductive`` and ``CoInductive`` commands, when + the body of the definition is a record declaration of the form + ``Build_``\ `R` ``{`` |p_1| ``:`` |t_1|\ ``; … ;`` |p_n| ``:`` |t_n| ``}``. + In this case the types can be recursive and eta-conversion is disallowed. These kind of record types + differ from their traditional versions in the sense that dependent + elimination is not available for them and only non-dependent case analysis + can be defined. + +Reduction ++++++++++ + +The basic reduction rule of a primitive projection is +|p_i| ``(Build_``\ `R` |t_1| … |t_n|\ ``)`` :math:`{\rightarrow_{\iota}}` |t_i|. +However, to take the :math:`{\delta}` flag into +account, projections can be in two states: folded or unfolded. An +unfolded primitive projection application obeys the rule above, while +the folded version delta-reduces to the unfolded version. This allows to +precisely mimic the usual unfolding rules of constants. Projections +obey the usual ``simpl`` flags of the ``Arguments`` command in particular. +There is currently no way to input unfolded primitive projections at the +user-level, and one must use the ``Printing Primitive Projection Compatibility`` +to display unfolded primitive projections as matches and distinguish them from folded ones. + + +Compatibility Projections and :g:`match` +++++++++++++++++++++++++++++++++++++++++ + +To ease compatibility with ordinary record types, each primitive +projection is also defined as a ordinary constant taking parameters and +an object of the record type as arguments, and whose body is an +application of the unfolded primitive projection of the same name. These +constants are used when elaborating partial applications of the +projection. One can distinguish them from applications of the primitive +projection if the ``Printing Primitive Projection Parameters`` option +is off: For a primitive projection application, parameters are printed +as underscores while for the compatibility projections they are printed +as usual. + +Additionally, user-written :g:`match` constructs on primitive records +are desugared into substitution of the projections, they cannot be +printed back as :g:`match` constructs. + +Variants and extensions of :g:`match` +------------------------------------- + +.. _extended pattern-matching: + +Multiple and nested pattern-matching +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The basic version of :g:`match` allows pattern-matching on simple +patterns. As an extension, multiple nested patterns or disjunction of +patterns are allowed, as in ML-like languages. + +The extension just acts as a macro that is expanded during parsing +into a sequence of match on simple patterns. Especially, a +construction defined using the extended match is generally printed +under its expanded form (see ``Set Printing Matching`` in :ref:`controlling-match-pp`). + +See also: :ref:`extended pattern-matching`. + + +Pattern-matching on boolean values: the if expression +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For inductive types with exactly two constructors and for pattern-matching +expressions that do not depend on the arguments of the constructors, it is possible +to use a ``if … then … else`` notation. For instance, the definition + +.. coqtop:: all + + Definition not (b:bool) := + match b with + | true => false + | false => true + end. + +can be alternatively written + +.. coqtop:: reset all + + Definition not (b:bool) := if b then false else true. + +More generally, for an inductive type with constructors |C_1| and |C_2|, +we have the following equivalence + +:: + + if term [dep_ret_type] then term₁ else term₂ ≡ + match term [dep_ret_type] with + | C₁ _ … _ => term₁ + | C₂ _ … _ => term₂ + end + +.. example:: + + .. coqtop:: all + + Check (fun x (H:{x=0}+{x<>0}) => + match H with + | left _ => true + | right _ => false + end). + +Notice that the printing uses the :g:`if` syntax because `sumbool` is +declared as such (see :ref:`controlling-match-pp`). + + +Irrefutable patterns: the destructuring let variants +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pattern-matching on terms inhabiting inductive type having only one +constructor can be alternatively written using :g:`let … in …` +constructions. There are two variants of them. + + +First destructuring let syntax +++++++++++++++++++++++++++++++ + +The expression :g:`let (`\ |ident_1|:g:`, … ,` |ident_n|\ :g:`) :=` |term_0|\ :g:`in` |term_1| performs +case analysis on |term_0| which must be in an inductive type with one +constructor having itself :math:`n` arguments. Variables |ident_1| … |ident_n| are +bound to the :math:`n` arguments of the constructor in expression |term_1|. For +instance, the definition + +.. coqtop:: reset all + + Definition fst (A B:Set) (H:A * B) := match H with + | pair x y => x + end. + +can be alternatively written + +.. coqtop:: reset all + + Definition fst (A B:Set) (p:A * B) := let (x, _) := p in x. + +Notice that reduction is different from regular :g:`let … in …` +construction since it happens only if |term_0| is in constructor form. +Otherwise, the reduction is blocked. + +The pretty-printing of a definition by matching on a irrefutable +pattern can either be done using :g:`match` or the :g:`let` construction +(see Section :ref:`controlling-match-pp`). + +If term inhabits an inductive type with one constructor `C`, we have an +equivalence between + +:: + + let (ident₁, …, identₙ) [dep_ret_type] := term in term' + +and + +:: + + match term [dep_ret_type] with + C ident₁ … identₙ => term' + end + + +Second destructuring let syntax ++++++++++++++++++++++++++++++++ + +Another destructuring let syntax is available for inductive types with +one constructor by giving an arbitrary pattern instead of just a tuple +for all the arguments. For example, the preceding example can be +written: + +.. coqtop:: reset all + + Definition fst (A B:Set) (p:A*B) := let 'pair x _ := p in x. + +This is useful to match deeper inside tuples and also to use notations +for the pattern, as the syntax :g:`let ’p := t in b` allows arbitrary +patterns to do the deconstruction. For example: + +.. coqtop:: all + + Definition deep_tuple (A:Set) (x:(A*A)*(A*A)) : A*A*A*A := + let '((a,b), (c, d)) := x in (a,b,c,d). + + Notation " x 'With' p " := (exist _ x p) (at level 20). + + Definition proj1_sig' (A:Set) (P:A->Prop) (t:{ x:A | P x }) : A := + let 'x With p := t in x. + +When printing definitions which are written using this construct it +takes precedence over let printing directives for the datatype under +consideration (see Section :ref:`controlling-match-pp`). + + +.. _controlling-match-pp: + +Controlling pretty-printing of match expressions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following commands give some control over the pretty-printing +of :g:`match` expressions. + +Printing nested patterns ++++++++++++++++++++++++++ + +The Calculus of Inductive Constructions knows pattern-matching only +over simple patterns. It is however convenient to re-factorize nested +pattern-matching into a single pattern-matching over a nested +pattern. |Coq|’s printer tries to do such limited re-factorization. + +.. cmd:: Set Printing Matching. + +This tells |Coq| to try to use nested patterns. This is the default +behavior. + +.. cmd:: Unset Printing Matching. + +This tells |Coq| to print only simple pattern-matching problems in the +same way as the |Coq| kernel handles them. + +.. cmd:: Test Printing Matching. + +This tells if the printing matching mode is on or off. The default is +on. + +Factorization of clauses with same right-hand side +++++++++++++++++++++++++++++++++++++++++++++++++++ + +When several patterns share the same right-hand side, it is additionally +possible to share the clauses using disjunctive patterns. Assuming that the +printing matching mode is on, whether |Coq|'s printer shall try to do this kind +of factorization is governed by the following commands: + +.. cmd:: Set Printing Factorizable Match Patterns. + +This tells |Coq|'s printer to try to use disjunctive patterns. This is the +default behavior. + +.. cmd:: Unset Printing Factorizable Match Patterns. + +This tells |Coq|'s printer not to try to use disjunctive patterns. + +.. cmd:: Test Printing Factorizable Match Patterns. + +This tells if the factorization of clauses with same right-hand side is on or +off. + +Use of a default clause ++++++++++++++++++++++++ + +When several patterns share the same right-hand side which do not depend on the +arguments of the patterns, yet an extra factorization is possible: the +disjunction of patterns can be replaced with a `_` default clause. Assuming that +the printing matching mode and the factorization mode are on, whether |Coq|'s +printer shall try to use a default clause is governed by the following commands: + +.. cmd:: Set Printing Allow Default Clause. + +This tells |Coq|'s printer to use a default clause when relevant. This is the +default behavior. + +.. cmd:: Unset Printing Allow Default Clause. + +This tells |Coq|'s printer not to use a default clause. + +.. cmd:: Test Printing Allow Default Clause. + +This tells if the use of a default clause is allowed. + +Printing of wildcard patterns +++++++++++++++++++++++++++++++ + +Some variables in a pattern may not occur in the right-hand side of +the pattern-matching clause. There are options to control the display +of these variables. + +.. cmd:: Set Printing Wildcard. + +The variables having no occurrences in the right-hand side of the +pattern-matching clause are just printed using the wildcard symbol +“_”. + +.. cmd:: Unset Printing Wildcard. + +The variables, even useless, are printed using their usual name. But +some non-dependent variables have no name. These ones are still +printed using a “_”. + +.. cmd:: Test Printing Wildcard. + +This tells if the wildcard printing mode is on or off. The default is +to print wildcard for useless variables. + + +Printing of the elimination predicate ++++++++++++++++++++++++++++++++++++++ + +In most of the cases, the type of the result of a matched term is +mechanically synthesizable. Especially, if the result type does not +depend of the matched term. + +.. cmd:: Set Printing Synth. + +The result type is not printed when |Coq| knows that it can re- +synthesize it. + +.. cmd:: Unset Printing Synth. + +This forces the result type to be always printed. + +.. cmd:: Test Printing Synth. + +This tells if the non-printing of synthesizable types is on or off. +The default is to not print synthesizable types. + + +Printing matching on irrefutable patterns +++++++++++++++++++++++++++++++++++++++++++ + +If an inductive type has just one constructor, pattern-matching can be +written using the first destructuring let syntax. + +.. cmd:: Add Printing Let @ident. + + This adds `ident` to the list of inductive types for which pattern-matching + is written using a let expression. + +.. cmd:: Remove Printing Let @ident. + + This removes ident from this list. Note that removing an inductive + type from this list has an impact only for pattern-matching written + using :g:`match`. Pattern-matching explicitly written using a destructuring + :g:`let` are not impacted. + +.. cmd:: Test Printing Let for @ident. + + This tells if `ident` belongs to the list. + +.. cmd:: Print Table Printing Let. + + This prints the list of inductive types for which pattern-matching is + written using a let expression. + + The list of inductive types for which pattern-matching is written + using a :g:`let` expression is managed synchronously. This means that it is + sensitive to the command ``Reset``. + + +Printing matching on booleans ++++++++++++++++++++++++++++++ + +If an inductive type is isomorphic to the boolean type, pattern-matching +can be written using ``if`` … ``then`` … ``else`` …: + +.. cmd:: Add Printing If @ident. + + This adds ident to the list of inductive types for which pattern-matching is + written using an if expression. + +.. cmd:: Remove Printing If @ident. + + This removes ident from this list. + +.. cmd:: Test Printing If for @ident. + + This tells if ident belongs to the list. + +.. cmd:: Print Table Printing If. + + This prints the list of inductive types for which pattern-matching is + written using an if expression. + +The list of inductive types for which pattern-matching is written +using an ``if`` expression is managed synchronously. This means that it is +sensitive to the command ``Reset``. + +This example emphasizes what the printing options offer. + +.. example:: + + .. coqtop:: all + + Definition snd (A B:Set) (H:A * B) := match H with + | pair x y => y + end. + + Test Printing Let for prod. + + Print snd. + + Remove Printing Let prod. + + Unset Printing Synth. + + Unset Printing Wildcard. + + Print snd. + +.. _advanced-recursive-functions: + +Advanced recursive functions +---------------------------- + +The following experimental command is available when the ``FunInd`` library has been loaded via ``Require Import FunInd``: + +.. cmd:: Function @ident {* @binder} { @decrease_annot } : @type := @term. + +This command can be seen as a generalization of ``Fixpoint``. It is actually a wrapper +for several ways of defining a function *and other useful related +objects*, namely: an induction principle that reflects the recursive +structure of the function (see Section :ref:`TODO-8.5.5-functional-induction`) and its fixpoint equality. +The meaning of this declaration is to define a function ident, +similarly to ``Fixpoint`. Like in ``Fixpoint``, the decreasing argument must +be given (unless the function is not recursive), but it might not +necessarily be *structurally* decreasing. The point of the {} annotation +is to name the decreasing argument *and* to describe which kind of +decreasing criteria must be used to ensure termination of recursive +calls. + +The ``Function`` construction also enjoys the ``with`` extension to define +mutually recursive definitions. However, this feature does not work +for non structurally recursive functions. + +See the documentation of functional induction (:ref:`TODO-8.5.5-functional-induction`) +and ``Functional Scheme`` (:ref:`TODO-13.2-functional-scheme`) for how to use +the induction principle to easily reason about the function. + +Remark: To obtain the right principle, it is better to put rigid +parameters of the function as first arguments. For example it is +better to define plus like this: + +.. coqtop:: reset none + + Require Import FunInd. + +.. coqtop:: all + + Function plus (m n : nat) {struct n} : nat := + match n with + | 0 => m + | S p => S (plus m p) + end. + +than like this: + +.. coqtop:: reset all + + Function plus (n m : nat) {struct n} : nat := + match n with + | 0 => m + | S p => S (plus p m) + end. + + +*Limitations* + +|term_0| must be built as a *pure pattern-matching tree* (:g:`match … with`) +with applications only *at the end* of each branch. + +Function does not support partial application of the function being +defined. Thus, the following example cannot be accepted due to the +presence of partial application of `wrong` in the body of +`wrong` : + +.. coqtop:: all + + Fail Function wrong (C:nat) : nat := + List.hd 0 (List.map wrong (C::nil)). + +For now, dependent cases are not treated for non structurally +terminating functions. + +.. exn:: The recursive argument must be specified +.. exn:: No argument name @ident +.. exn:: Cannot use mutual definition with well-founded recursion or measure + +.. warn:: Cannot define graph for @ident + + The generation of the graph relation (`R_ident`) used to compute the induction scheme of ident + raised a typing error. Only `ident` is defined; the induction scheme + will not be generated. This error happens generally when: + + - the definition uses pattern matching on dependent types, + which ``Function`` cannot deal with yet. + - the definition is not a *pattern-matching tree* as explained above. + +.. warn:: Cannot define principle(s) for @ident + + The generation of the graph relation (`R_ident`) succeeded but the induction principle + could not be built. Only `ident` is defined. Please report. + +.. warn:: Cannot build functional inversion principle + + `functional inversion` will not be available for the function. + +See also: :ref:`TODO-13.2-generating-ind-principles` and ref:`TODO-8.5.5-functional-induction` + +Depending on the ``{…}`` annotation, different definition mechanisms are +used by ``Function``. A more precise description is given below. + +.. cmdv:: Function @ident {* @binder } : @type := @term + + Defines the not recursive function `ident` as if declared with `Definition`. Moreover + the following are defined: + + + `ident_rect`, `ident_rec` and `ident_ind`, which reflect the pattern + matching structure of `term` (see the documentation of :ref:`TODO-1.3.3-Inductive`); + + The inductive `R_ident` corresponding to the graph of `ident` (silently); + + `ident_complete` and `ident_correct` which are inversion information + linking the function and its graph. + +.. cmdv:: Function @ident {* @binder } { struct @ident } : @type := @term + + Defines the structural recursive function `ident` as if declared with ``Fixpoint``. Moreover the following are defined: + + + The same objects as above; + + The fixpoint equation of `ident`: `ident_equation`. + +.. cmdv:: Function @ident {* @binder } { measure @term @ident } : @type := @term +.. cmdv:: Function @ident {* @binder } { wf @term @ident } : @type := @term + + Defines a recursive function by well-founded recursion. The module ``Recdef`` + of the standard library must be loaded for this feature. The ``{}`` + annotation is mandatory and must be one of the following: + + + ``{measure`` `term` `ident` ``}`` with `ident` being the decreasing argument + and `term` being a function from type of `ident` to ``nat`` for which + value on the decreasing argument decreases (for the ``lt`` order on ``nat``) + at each recursive call of `term`. Parameters of the function are + bound in `term`\ ; + + ``{wf`` `term` `ident` ``}`` with `ident` being the decreasing argument and + `term` an ordering relation on the type of `ident` (i.e. of type + `T`\ :math:`_{\sf ident}` → `T`\ :math:`_{\sf ident}` → ``Prop``) for which the decreasing argument + decreases at each recursive call of `term`. The order must be well-founded. + Parameters of the function are bound in `term`. + + Depending on the annotation, the user is left with some proof + obligations that will be used to define the function. These proofs + are: proofs that each recursive call is actually decreasing with + respect to the given criteria, and (if the criteria is `wf`) a proof + that the ordering relation is well-founded. Once proof obligations are + discharged, the following objects are defined: + + + The same objects as with the struct; + + The lemma `ident`\ :math:`_{\sf tcc}` which collects all proof obligations in one + property; + + The lemmas `ident`\ :math:`_{\sf terminate}` and `ident`\ :math:`_{\sf F}` which is needed to be inlined + during extraction of ident. + + The way this recursive function is defined is the subject of several + papers by Yves Bertot and Antonia Balaa on the one hand, and Gilles + Barthe, Julien Forest, David Pichardie, and Vlad Rusu on the other + hand. Remark: Proof obligations are presented as several subgoals + belonging to a Lemma `ident`\ :math:`_{\sf tcc}`. + + +Section mechanism +----------------- + +The sectioning mechanism can be used to to organize a proof in +structured sections. Then local declarations become available (see +Section :ref:`TODO-1.3.2-Definitions`). + + +.. cmd:: Section @ident. + + This command is used to open a section named `ident`. + + +.. cmd:: End @ident. + + This command closes the section named `ident`. After closing of the + section, the local declarations (variables and local definitions) get + *discharged*, meaning that they stop being visible and that all global + objects defined in the section are generalized with respect to the + variables and local definitions they each depended on in the section. + + .. example:: + + .. coqtop:: all + + Section s1. + + Variables x y : nat. + + Let y' := y. + + Definition x' := S x. + + Definition x'' := x' + y'. + + Print x'. + + End s1. + + Print x'. + + Print x''. + + Notice the difference between the value of `x’` and `x’’` inside section + `s1` and outside. + + .. exn:: This is not the last opened section + +**Remarks:** + +#. Most commands, like ``Hint``, ``Notation``, option management, … which + appear inside a section are canceled when the section is closed. + + +Module system +------------- + +The module system provides a way of packaging related elements +together, as well as a means of massive abstraction. + + .. productionlist:: modules + module_type : qualid + : | `module_type` with Definition qualid := term + : | `module_type` with Module qualid := qualid + : | qualid qualid … qualid + : | !qualid qualid … qualid + module_binding : ( [Import|Export] ident … ident : module_type ) + module_bindings : `module_binding` … `module_binding` + module_expression : qualid … qualid + : | !qualid … qualid + + Syntax of modules + +In the syntax of module application, the ! prefix indicates that any +`Inline` directive in the type of the functor arguments will be ignored +(see :ref:`named_module_type` below). + + +.. cmd:: Module @ident. + + This command is used to start an interactive module named `ident`. + +.. cmdv:: Module @ident {* @module_binding}. + + Starts an interactive functor with + parameters given by module_bindings. + +.. cmdv:: Module @ident : @module_type. + + Starts an interactive module specifying its module type. + +.. cmdv:: Module @ident {* @module_binding} : @module_type. + + Starts an interactive functor with parameters given by the list of `module binding`, and output module + type `module_type`. + +.. cmdv:: Module @ident <: {+<: @module_type }. + + Starts an interactive module satisfying each `module_type`. + + .. cmdv:: Module @ident {* @module_binding} <: {+<; @module_type }. + + Starts an interactive functor with parameters given by the list of `module_binding`. The output module type + is verified against each `module_type`. + +.. cmdv:: Module [ Import | Export ]. + + Behaves like ``Module``, but automatically imports or exports the module. + +Reserved commands inside an interactive module +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. cmd:: Include @module. + + Includes the content of module in the current + interactive module. Here module can be a module expression or a module + type expression. If module is a high-order module or module type + expression then the system tries to instantiate module by the current + interactive module. + +.. cmd:: Include {+<+ @module}. + + is a shortcut for the commands ``Include`` `module` for each `module`. + +.. cmd:: End @ident. + + This command closes the interactive module `ident`. If the module type + was given the content of the module is matched against it and an error + is signaled if the matching fails. If the module is basic (is not a + functor) its components (constants, inductive types, submodules etc.) + are now available through the dot notation. + + .. exn:: No such label @ident + + .. exn:: Signature components for label @ident do not match + + .. exn:: This is not the last opened module + +.. cmd:: Module @ident := @module_expression. + + This command defines the module identifier `ident` to be equal + to `module_expression`. + + .. cmdv:: Module @ident {* @module_binding} := @module_expression. + + Defines a functor with parameters given by the list of `module_binding` and body `module_expression`. + + .. cmdv:: Module @ident {* @module_binding} : @module_type := @module_expression. + + Defines a functor with parameters given by the list of `module_binding` (possibly none), and output module type `module_type`, + with body `module_expression`. + + .. cmdv:: Module @ident {* @module_binding} <: {+<: @module_type} := @module_expression. + + Defines a functor with parameters given by module_bindings (possibly none) with body `module_expression`. + The body is checked against each |module_type_i|. + + .. cmdv:: Module @ident {* @module_binding} := {+<+ @module_expression}. + + is equivalent to an interactive module where each `module_expression` is included. + +.. _named_module_type: + +.. cmd:: Module Type @ident. + +This command is used to start an interactive module type `ident`. + + .. cmdv:: Module Type @ident {* @module_binding}. + + Starts an interactive functor type with parameters given by `module_bindings`. + + +Reserved commands inside an interactive module type: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. cmd:: Include @module. + + Same as ``Include`` inside a module. + +.. cmd:: Include {+<+ @module}. + + is a shortcut for the command ``Include`` `module` for each `module`. + +.. cmd:: @assumption_keyword Inline @assums. + + The instance of this assumption will be automatically expanded at functor application, except when + this functor application is prefixed by a ``!`` annotation. + +.. cmd:: End @ident. + + This command closes the interactive module type `ident`. + + .. exn:: This is not the last opened module type + +.. cmd:: Module Type @ident := @module_type. + + Defines a module type `ident` equal to `module_type`. + + .. cmdv:: Module Type @ident {* @module_binding} := @module_type. + + Defines a functor type `ident` specifying functors taking arguments `module_bindings` and + returning `module_type`. + + .. cmdv:: Module Type @ident {* @module_binding} := {+<+ @module_type }. + + is equivalent to an interactive module type were each `module_type` is included. + +.. cmd:: Declare Module @ident : @module_type. + + Declares a module `ident` of type `module_type`. + + .. cmdv:: Declare Module @ident {* @module_binding} : @module_type. + + Declares a functor with parameters given by the list of `module_binding` and output module type + `module_type`. + +.. example:: + + Let us define a simple module. + + .. coqtop:: all + + Module M. + + Definition T := nat. + + Definition x := 0. + + Definition y : bool. + + exact true. + + Defined. + + End M. + +Inside a module one can define constants, prove theorems and do any +other things that can be done in the toplevel. Components of a closed +module can be accessed using the dot notation: + +.. coqtop:: all + + Print M.x. + +A simple module type: + +.. coqtop:: all + + Module Type SIG. + + Parameter T : Set. + + Parameter x : T. + + End SIG. + +Now we can create a new module from M, giving it a less precise +specification: the y component is dropped as well as the body of x. + +.. coqtop:: all + + Module N : SIG with Definition T := nat := M. + + Print N.T. + + Print N.x. + + Fail Print N.y. + +.. reset to remove N (undo in last coqtop block doesn't seem to do that), invisibly redefine M, SIG +.. coqtop:: none reset + + Module M. + + Definition T := nat. + + Definition x := 0. + + Definition y : bool. + + exact true. + + Defined. + + End M. + + Module Type SIG. + + Parameter T : Set. + + Parameter x : T. + + End SIG. + +The definition of ``N`` using the module type expression ``SIG`` with +``Definition T := nat`` is equivalent to the following one: + +.. coqtop:: all + + Module Type SIG'. + + Definition T : Set := nat. + + Parameter x : T. + + End SIG'. + + Module N : SIG' := M. + +If we just want to be sure that the our implementation satisfies a +given module type without restricting the interface, we can use a +transparent constraint + +.. coqtop:: all + + Module P <: SIG := M. + + Print P.y. + +Now let us create a functor, i.e. a parametric module + +.. coqtop:: all + + Module Two (X Y: SIG). + + Definition T := (X.T * Y.T)%type. + + Definition x := (X.x, Y.x). + + End Two. + +and apply it to our modules and do some computations: + +.. coqtop:: all + + Module Q := Two M N. + + Eval compute in (fst Q.x + snd Q.x). + +In the end, let us define a module type with two sub-modules, sharing +some of the fields and give one of its possible implementations: + +.. coqtop:: all + + Module Type SIG2. + + Declare Module M1 : SIG. + + Module M2 <: SIG. + + Definition T := M1.T. + + Parameter x : T. + + End M2. + + End SIG2. + + Module Mod <: SIG2. + + Module M1. + + Definition T := nat. + + Definition x := 1. + + End M1. + + Module M2 := M. + + End Mod. + +Notice that ``M`` is a correct body for the component ``M2`` since its ``T`` +component is equal ``nat`` and hence ``M1.T`` as specified. + +**Remarks:** + +#. Modules and module types can be nested components of each other. +#. One can have sections inside a module or a module type, but not a + module or a module type inside a section. +#. Commands like ``Hint`` or ``Notation`` can also appear inside modules and + module types. Note that in case of a module definition like: + +:: + + Module N : SIG := M. + +or:: + + Module N : SIG. … End N. + +hints and the like valid for ``N`` are not those defined in ``M`` (or the module body) but the ones defined +in ``SIG``. + + +.. _import_qualid: + +.. cmd:: Import @qualid. + + If `qualid` denotes a valid basic module (i.e. its module type is a + signature), makes its components available by their short names. + +.. example:: + + .. coqtop:: reset all + + Module Mod. + + Definition T:=nat. + + Check T. + + End Mod. + + Check Mod.T. + + Fail Check T. + + Import Mod. + + Check T. + +Some features defined in modules are activated only when a module is +imported. This is for instance the case of notations (see :ref:`TODO-12.1-Notations`). + +Declarations made with the Local flag are never imported by theImport +command. Such declarations are only accessible through their fully +qualified name. + +.. example:: + + .. coqtop:: all + + Module A. + + Module B. + + Local Definition T := nat. + + End B. + + End A. + + Import A. + + Fail Check B.T. + + .. cmdv:: Export @qualid + + When the module containing the command Export qualid + is imported, qualid is imported as well. + + .. exn:: @qualid is not a module + + .. warn:: Trying to mask the absolute name @qualid! + +.. cmd:: Print Module @ident. + + Prints the module type and (optionally) the body of the module `ident`. + +.. cmd:: Print Module Type @ident. + + Prints the module type corresponding to `ident`. + +.. opt:: Short Module Printing + + This option (off by default) disables the printing of the types of fields, + leaving only their names, for the commands ``Print Module`` and ``Print Module Type``. + +.. cmd:: Locate Module @qualid. + + Prints the full name of the module `qualid`. + +Libraries and qualified names +--------------------------------- + +Names of libraries +~~~~~~~~~~~~~~~~~~ + +The theories developed in |Coq| are stored in *library files* which are +hierarchically classified into *libraries* and *sublibraries*. To +express this hierarchy, library names are represented by qualified +identifiers qualid, i.e. as list of identifiers separated by dots (see +:ref:`TODO-1.2.3-identifiers`). For instance, the library file ``Mult`` of the standard +|Coq| library ``Arith`` is named ``Coq.Arith.Mult``. The identifier that starts +the name of a library is called a *library root*. All library files of +the standard library of |Coq| have the reserved root |Coq| but library +file names based on other roots can be obtained by using |Coq| commands +(coqc, coqtop, coqdep, …) options ``-Q`` or ``-R`` (see :ref:`TODO-14.3.3-command-line-options`). +Also, when an interactive |Coq| session starts, a library of root ``Top`` is +started, unless option ``-top`` or ``-notop`` is set (see :ref:`TODO-14.3.3-command-line-options`). + + +Qualified names +~~~~~~~~~~~~~~~ + +Library files are modules which possibly contain submodules which +eventually contain constructions (axioms, parameters, definitions, +lemmas, theorems, remarks or facts). The *absolute name*, or *full +name*, of a construction in some library file is a qualified +identifier starting with the logical name of the library file, +followed by the sequence of submodules names encapsulating the +construction and ended by the proper name of the construction. +Typically, the absolute name ``Coq.Init.Logic.eq`` denotes Leibniz’ +equality defined in the module Logic in the sublibrary ``Init`` of the +standard library of |Coq|. + +The proper name that ends the name of a construction is the short name +(or sometimes base name) of the construction (for instance, the short +name of ``Coq.Init.Logic.eq`` is ``eq``). Any partial suffix of the absolute +name is a *partially qualified name* (e.g. ``Logic.eq`` is a partially +qualified name for ``Coq.Init.Logic.eq``). Especially, the short name of a +construction is its shortest partially qualified name. + +|Coq| does not accept two constructions (definition, theorem, …) with +the same absolute name but different constructions can have the same +short name (or even same partially qualified names as soon as the full +names are different). + +Notice that the notion of absolute, partially qualified and short +names also applies to library file names. + +**Visibility** + +|Coq| maintains a table called the name table which maps partially qualified +names of constructions to absolute names. This table is updated by the +commands ``Require`` (see :ref:`TODO-6.5.1-Require`), Import and Export (see :ref:`import_qualid`) and +also each time a new declaration is added to the context. An absolute +name is called visible from a given short or partially qualified name +when this latter name is enough to denote it. This means that the +short or partially qualified name is mapped to the absolute name in +|Coq| name table. Definitions flagged as Local are only accessible with +their fully qualified name (see :ref:`TODO-1.3.2-definitions`). + +It may happen that a visible name is hidden by the short name or a +qualified name of another construction. In this case, the name that +has been hidden must be referred to using one more level of +qualification. To ensure that a construction always remains +accessible, absolute names can never be hidden. + +.. example:: + + .. coqtop:: all + + Check 0. + + Definition nat := bool. + + Check 0. + + Check Datatypes.nat. + + Locate nat. + +See also: Command Locate in :ref:`TODO-6.3.10-locate-qualid` and Locate Library in +:ref:`TODO-6.6.11-locate-library`. + + +Libraries and filesystem +~~~~~~~~~~~~~~~~~~~~~~~~ + +Please note that the questions described here have been subject to +redesign in |Coq| v8.5. Former versions of |Coq| use the same terminology +to describe slightly different things. + +Compiled files (``.vo`` and ``.vio``) store sub-libraries. In order to refer +to them inside |Coq|, a translation from file-system names to |Coq| names +is needed. In this translation, names in the file system are called +*physical* paths while |Coq| names are contrastingly called *logical* +names. + +A logical prefix Lib can be associated to a physical pathpath using +the command line option ``-Q`` `path` ``Lib``. All subfolders of path are +recursively associated to the logical path ``Lib`` extended with the +corresponding suffix coming from the physical path. For instance, the +folder ``path/fOO/Bar`` maps to ``Lib.fOO.Bar``. Subdirectories corresponding +to invalid |Coq| identifiers are skipped, and, by convention, +subdirectories named ``CVS`` or ``_darcs`` are skipped too. + +Thanks to this mechanism, .vo files are made available through the +logical name of the folder they are in, extended with their own +basename. For example, the name associated to the file +``path/fOO/Bar/File.vo`` is ``Lib.fOO.Bar.File``. The same caveat applies for +invalid identifiers. When compiling a source file, the ``.vo`` file stores +its logical name, so that an error is issued if it is loaded with the +wrong loadpath afterwards. + +Some folders have a special status and are automatically put in the +path. |Coq| commands associate automatically a logical path to files in +the repository trees rooted at the directory from where the command is +launched, coqlib/user-contrib/, the directories listed in the +`$COQPATH`, `${XDG_DATA_HOME}/coq/` and `${XDG_DATA_DIRS}/coq/` +environment variables (see`http://standards.freedesktop.org/basedir- +spec/basedir-spec-latest.html`_) with the same physical-to-logical +translation and with an empty logical prefix. + +The command line option ``-R`` is a variant of ``-Q`` which has the strictly +same behavior regarding loadpaths, but which also makes the +corresponding ``.vo`` files available through their short names in a way +not unlike the ``Import`` command (see :ref:`import_qualid`). For instance, ``-R`` `path` ``Lib`` +associates to the ``filepath/fOO/Bar/File.vo`` the logical name +``Lib.fOO.Bar.File``, but allows this file to be accessed through the +short names ``fOO.Bar.File,Bar.File`` and ``File``. If several files with +identical base name are present in different subdirectories of a +recursive loadpath, which of these files is found first may be system- +dependent and explicit qualification is recommended. The ``From`` argument +of the ``Require`` command can be used to bypass the implicit shortening +by providing an absolute root to the required file (see :ref:`TODO-6.5.1-require-qualid`). + +There also exists another independent loadpath mechanism attached to +OCaml object files (``.cmo`` or ``.cmxs``) rather than |Coq| object +files as described above. The OCaml loadpath is managed using +the option ``-I`` `path` (in the OCaml world, there is neither a +notion of logical name prefix nor a way to access files in +subdirectories of path). See the command ``Declare`` ``ML`` ``Module`` in +:ref:`TODO-6.5-compiled-files` to understand the need of the OCaml loadpath. + +See :ref:`TODO-14.3.3-command-line-options` for a more general view over the |Coq| command +line options. + + +Implicit arguments +------------------ + +An implicit argument of a function is an argument which can be +inferred from contextual knowledge. There are different kinds of +implicit arguments that can be considered implicit in different ways. +There are also various commands to control the setting or the +inference of implicit arguments. + + +The different kinds of implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Implicit arguments inferable from the knowledge of other arguments of a function +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The first kind of implicit arguments covers the arguments that are +inferable from the knowledge of the type of other arguments of the +function, or of the type of the surrounding context of the +application. Especially, such implicit arguments correspond to +parameters dependent in the type of the function. Typical implicit +arguments are the type arguments in polymorphic functions. There are +several kinds of such implicit arguments. + +**Strict Implicit Arguments** + +An implicit argument can be either strict or non strict. An implicit +argument is said to be *strict* if, whatever the other arguments of the +function are, it is still inferable from the type of some other +argument. Technically, an implicit argument is strict if it +corresponds to a parameter which is not applied to a variable which +itself is another parameter of the function (since this parameter may +erase its arguments), not in the body of a match, and not itself +applied or matched against patterns (since the original form of the +argument can be lost by reduction). + +For instance, the first argument of +:: + + cons: forall A:Set, A -> list A -> list A + +in module ``List.v`` is strict because :g:`list` is an inductive type and :g:`A` +will always be inferable from the type :g:`list A` of the third argument of +:g:`cons`. On the contrary, the second argument of a term of type +:: + + forall P:nat->Prop, forall n:nat, P n -> ex nat P + +is implicit but not strict, since it can only be inferred from the +type :g:`P n` of the third argument and if :g:`P` is, e.g., :g:`fun _ => True`, it +reduces to an expression where ``n`` does not occur any longer. The first +argument :g:`P` is implicit but not strict either because it can only be +inferred from :g:`P n` and :g:`P` is not canonically inferable from an arbitrary +:g:`n` and the normal form of :g:`P n`. Consider, e.g., that :g:`n` is :math:`0` and the third +argument has type :g:`True`, then any :g:`P` of the form +:: + + fun n => match n with 0 => True | _ => anything end + +would be a solution of the inference problem. + +**Contextual Implicit Arguments** + +An implicit argument can be *contextual* or not. An implicit argument +is said *contextual* if it can be inferred only from the knowledge of +the type of the context of the current expression. For instance, the +only argument of:: + + nil : forall A:Set, list A` + +is contextual. Similarly, both arguments of a term of type:: + + forall P:nat->Prop, forall n:nat, P n \/ n = 0 + +are contextual (moreover, :g:`n` is strict and :g:`P` is not). + +**Reversible-Pattern Implicit Arguments** + +There is another class of implicit arguments that can be reinferred +unambiguously if all the types of the remaining arguments are known. +This is the class of implicit arguments occurring in the type of +another argument in position of reversible pattern, which means it is +at the head of an application but applied only to uninstantiated +distinct variables. Such an implicit argument is called *reversible- +pattern implicit argument*. A typical example is the argument :g:`P` of +nat_rec in +:: + + nat_rec : forall P : nat -> Set, P 0 -> + (forall n : nat, P n -> P (S n)) -> forall x : nat, P x + +(:g:`P` is reinferable by abstracting over :g:`n` in the type :g:`P n`). + +See :ref:`controlling-rev-pattern-implicit-args` for the automatic declaration of reversible-pattern +implicit arguments. + +Implicit arguments inferable by resolution +++++++++++++++++++++++++++++++++++++++++++ + +This corresponds to a class of non-dependent implicit arguments that +are solved based on the structure of their type only. + + +Maximal or non maximal insertion of implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In case a function is partially applied, and the next argument to be +applied is an implicit argument, two disciplines are applicable. In +the first case, the function is considered to have no arguments +furtherly: one says that the implicit argument is not maximally +inserted. In the second case, the function is considered to be +implicitly applied to the implicit arguments it is waiting for: one +says that the implicit argument is maximally inserted. + +Each implicit argument can be declared to have to be inserted +maximally or non maximally. This can be governed argument per argument +by the command ``Implicit Arguments`` (see Section :ref:`declare-implicit-args`) or globally by the +command ``Set Maximal Implicit Insertion`` (see Section :ref:`controlling-insertion-implicit-args`). +See also :ref:`displaying-implicit-args`. + + +Casual use of implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In a given expression, if it is clear that some argument of a function +can be inferred from the type of the other arguments, the user can +force the given argument to be guessed by replacing it by “_”. If +possible, the correct argument will be automatically generated. + +.. exn:: Cannot infer a term for this placeholder. + + |Coq| was not able to deduce an instantiation of a “_”. + +.. _declare-implicit-args: + +Declaration of implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In case one wants that some arguments of a given object (constant, +inductive types, constructors, assumptions, local or not) are always +inferred by |Coq|, one may declare once and for all which are the +expected implicit arguments of this object. There are two ways to do +this, *a priori* and *a posteriori*. + + +Implicit Argument Binders ++++++++++++++++++++++++++ + +In the first setting, one wants to explicitly give the implicit +arguments of a declared object as part of its definition. To do this, +one has to surround the bindings of implicit arguments by curly +braces: + +.. coqtop:: all + + Definition id {A : Type} (x : A) : A := x. + +This automatically declares the argument A of id as a maximally +inserted implicit argument. One can then do as-if the argument was +absent in every situation but still be able to specify it if needed: + +.. coqtop:: all + + Definition compose {A B C} (g : B -> C) (f : A -> B) := fun x => g (f x). + + Goal forall A, compose id id = id (A:=A). + + +The syntax is supported in all top-level definitions: +``Definition``, ``Fixpoint``, ``Lemma`` and so on. For (co-)inductive datatype +declarations, the semantics are the following: an inductive parameter +declared as an implicit argument need not be repeated in the inductive +definition but will become implicit for the constructors of the +inductive only, not the inductive type itself. For example: + +.. coqtop:: all + + Inductive list {A : Type} : Type := + | nil : list + | cons : A -> list -> list. + + Print list. + +One can always specify the parameter if it is not uniform using the +usual implicit arguments disambiguation syntax. + + +Declaring Implicit Arguments +++++++++++++++++++++++++++++ + +To set implicit arguments *a posteriori*, one can use the command: + +.. cmd:: Arguments @qualid {* @possibly_bracketed_ident }. + +where the list of `possibly_bracketed_ident` is a prefix of the list of +arguments of `qualid` where the ones to be declared implicit are +surrounded by square brackets and the ones to be declared as maximally +inserted implicits are surrounded by curly braces. + +After the above declaration is issued, implicit arguments can just +(and have to) be skipped in any expression involving an application +of `qualid`. + +Implicit arguments can be cleared with the following syntax: + +.. cmd:: Arguments @qualid : clear implicits. + +.. cmdv:: Global Arguments @qualid {* @possibly_bracketed_ident } + + Says to recompute the implicit arguments of + `qualid` after ending of the current section if any, enforcing the + implicit arguments known from inside the section to be the ones + declared by the command. + +.. cmdv:: Local Arguments @qualid {* @possibly_bracketed_ident }. + + When in a module, tell not to activate the + implicit arguments ofqualid declared by this command to contexts that + require the module. + +.. cmdv:: {? Global | Local } Arguments @qualid {*, {+ @possibly_bracketed_ident } }. + + For names of constants, inductive types, + constructors, lemmas which can only be applied to a fixed number of + arguments (this excludes for instance constants whose type is + polymorphic), multiple implicit arguments declarations can be given. + Depending on the number of arguments qualid is applied to in practice, + the longest applicable list of implicit arguments is used to select + which implicit arguments are inserted. For printing, the omitted + arguments are the ones of the longest list of implicit arguments of + the sequence. + +.. example:: + + .. coqtop:: reset all + + Inductive list (A:Type) : Type := + | nil : list A + | cons : A -> list A -> list A. + + Check (cons nat 3 (nil nat)). + + Arguments cons [A] _ _. + + Arguments nil [A]. + + Check (cons 3 nil). + + Fixpoint map (A B:Type) (f:A->B) (l:list A) : list B := match l with nil => nil | cons a t => cons (f a) (map A B f t) end. + + Fixpoint length (A:Type) (l:list A) : nat := match l with nil => 0 | cons _ m => S (length A m) end. + + Arguments map [A B] f l. + + Arguments length {A} l. (* A has to be maximally inserted *) + + Check (fun l:list (list nat) => map length l). + + Arguments map [A B] f l, [A] B f l, A B f l. + + Check (fun l => map length l = map (list nat) nat length l). + +Remark: To know which are the implicit arguments of an object, use the +command ``Print Implicit`` (see :ref:`displaying-implicit-args`). + + +Automatic declaration of implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +|Coq| can also automatically detect what are the implicit arguments of a +defined object. The command is just + +.. cmd:: Arguments @qualid : default implicits. + +The auto-detection is governed by options telling if strict, +contextual, or reversible-pattern implicit arguments must be +considered or not (see :ref:`controlling-strict-implicit-args`, :ref:`controlling-strict-implicit-args`, +:ref:`controlling-rev-pattern-implicit-args`, and also :ref:`controlling-insertion-implicit-args`). + +.. cmdv:: Global Arguments @qualid : default implicits + + Tell to recompute the + implicit arguments of qualid after ending of the current section if + any. + +.. cmdv:: Local Arguments @qualid : default implicits + + When in a module, tell not to activate the implicit arguments of `qualid` computed by this + declaration to contexts that requires the module. + +.. example:: + + .. coqtop:: reset all + + Inductive list (A:Set) : Set := + | nil : list A + | cons : A -> list A -> list A. + + Arguments cons : default implicits. + + Print Implicit cons. + + Arguments nil : default implicits. + + Print Implicit nil. + + Set Contextual Implicit. + + Arguments nil : default implicits. + + Print Implicit nil. + +The computation of implicit arguments takes account of the unfolding +of constants. For instance, the variable ``p`` below has type +``(Transitivity R)`` which is reducible to +``forall x,y:U, R x y -> forall z:U, R y z -> R x z``. As the variables ``x``, ``y`` and ``z`` +appear strictly in the body of the type, they are implicit. + +.. coqtop:: reset none + + Set Warnings "-local-declaration". + +.. coqtop:: all + + Variable X : Type. + + Definition Relation := X -> X -> Prop. + + Definition Transitivity (R:Relation) := forall x y:X, R x y -> forall z:X, R y z -> R x z. + + Variables (R : Relation) (p : Transitivity R). + + Arguments p : default implicits. + + Print p. + + Print Implicit p. + + Variables (a b c : X) (r1 : R a b) (r2 : R b c). + + Check (p r1 r2). + + +Mode for automatic declaration of implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In case one wants to systematically declare implicit the arguments +detectable as such, one may switch to the automatic declaration of +implicit arguments mode by using the command: + +.. cmd:: Set Implicit Arguments. + +Conversely, one may unset the mode by using ``Unset Implicit Arguments``. +The mode is off by default. Auto-detection of implicit arguments is +governed by options controlling whether strict and contextual implicit +arguments have to be considered or not. + +.. _controlling-strict-implicit-args: + +Controlling strict implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When the mode for automatic declaration of implicit arguments is on, +the default is to automatically set implicit only the strict implicit +arguments plus, for historical reasons, a small subset of the non-strict +implicit arguments. To relax this constraint and to set +implicit all non strict implicit arguments by default, use the command: + +.. cmd:: Unset Strict Implicit. + +Conversely, use the command ``Set Strict Implicit`` to restore the +original mode that declares implicit only the strict implicit +arguments plus a small subset of the non strict implicit arguments. + +In the other way round, to capture exactly the strict implicit +arguments and no more than the strict implicit arguments, use the +command + +.. cmd:: Set Strongly Strict Implicit. + +Conversely, use the command ``Unset Strongly Strict Implicit`` to let the +option “Strict Implicit” decide what to do. + +Remark: In versions of |Coq| prior to version 8.0, the default was to +declare the strict implicit arguments as implicit. + +.. _controlling-contextual-implicit-args: + +Controlling contextual implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, |Coq| does not automatically set implicit the contextual +implicit arguments. To tell |Coq| to infer also contextual implicit +argument, use command + +.. cmd:: Set Contextual Implicit. + +Conversely, use command ``Unset Contextual Implicit`` to unset the +contextual implicit mode. + +.. _controlling-rev-pattern-implicit-args: + +Controlling reversible-pattern implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, |Coq| does not automatically set implicit the reversible-pattern +implicit arguments. To tell |Coq| to infer also reversible- +pattern implicit argument, use command + +.. cmd:: Set Reversible Pattern Implicit. + +Conversely, use command ``Unset Reversible Pattern Implicit`` to unset the +reversible-pattern implicit mode. + +.. _controlling-insertion-implicit-args: + +Controlling the insertion of implicit arguments not followed by explicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Implicit arguments can be declared to be automatically inserted when a +function is partially applied and the next argument of the function is +an implicit one. In case the implicit arguments are automatically +declared (with the command ``Set Implicit Arguments``), the command + +.. cmd:: Set Maximal Implicit Insertion. + +is used to tell to declare the implicit arguments with a maximal +insertion status. By default, automatically declared implicit +arguments are not declared to be insertable maximally. To restore the +default mode for maximal insertion, use the command + +.. cmd:: Unset Maximal Implicit Insertion. + +Explicit applications +~~~~~~~~~~~~~~~~~~~~~ + +In presence of non-strict or contextual argument, or in presence of +partial applications, the synthesis of implicit arguments may fail, so +one may have to give explicitly certain implicit arguments of an +application. The syntax for this is ``(`` `ident` ``:=`` `term` ``)`` where `ident` is the +name of the implicit argument and term is its corresponding explicit +term. Alternatively, one can locally deactivate the hiding of implicit +arguments of a function by using the notation `@qualid` |term_1| … |term_n|. +This syntax extension is given in the following grammar: + +.. _explicit_app_grammar: + + .. productionlist:: explicit_apps + term : @ qualid term … `term` + : | @ qualid + : | qualid `argument` … `argument` + argument : `term` + : | (ident := `term`) + + Syntax for explicitly giving implicit arguments + +.. example:: (continued) + + .. coqtop:: all + + Check (p r1 (z:=c)). + + Check (p (x:=a) (y:=b) r1 (z:=c) r2). + + +Renaming implicit arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Implicit arguments names can be redefined using the following syntax: + +.. cmd:: Arguments @qualid {* @name} : @rename. + +With the assert flag, ``Arguments`` can be used to assert that a given +object has the expected number of arguments and that these arguments +are named as expected. + +.. example:: (continued) + +.. coqtop:: all + + Arguments p [s t] _ [u] _: rename. + + Check (p r1 (u:=c)). + + Check (p (s:=a) (t:=b) r1 (u:=c) r2). + + Fail Arguments p [s t] _ [w] _ : assert. + +.. _displaying-implicit-args: + +Displaying what the implicit arguments are +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To display the implicit arguments associated to an object, and to know +if each of them is to be used maximally or not, use the command + +.. cmd:: Print Implicit @qualid. + +Explicit displaying of implicit arguments for pretty-printing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default the basic pretty-printing rules hide the inferable implicit +arguments of an application. To force printing all implicit arguments, +use command + +.. cmd:: Set Printing Implicit. + +Conversely, to restore the hiding of implicit arguments, use command + +.. cmd:: Unset Printing Implicit. + +By default the basic pretty-printing rules display the implicit +arguments that are not detected as strict implicit arguments. This +“defensive” mode can quickly make the display cumbersome so this can +be deactivated by using the command + +.. cmd:: Unset Printing Implicit Defensive. + +Conversely, to force the display of non strict arguments, use command + +.. cmd:: Set Printing Implicit Defensive. + +See also: ``Set Printing All`` in :ref:`printing_constructions_full`. + +Interaction with subtyping +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When an implicit argument can be inferred from the type of more than +one of the other arguments, then only the type of the first of these +arguments is taken into account, and not an upper type of all of them. +As a consequence, the inference of the implicit argument of “=” fails +in + +.. coqtop:: all + + Fail Check nat = Prop. + +but succeeds in + +.. coqtop:: all + + Check Prop = nat. + + +Deactivation of implicit arguments for parsing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Use of implicit arguments can be deactivated by issuing the command: + +.. cmd:: Set Parsing Explicit. + +In this case, all arguments of constants, inductive types, +constructors, etc, including the arguments declared as implicit, have +to be given as if none arguments were implicit. By symmetry, this also +affects printing. To restore parsing and normal printing of implicit +arguments, use: + +.. cmd:: Unset Parsing Explicit. + +Canonical structures +~~~~~~~~~~~~~~~~~~~~ + +A canonical structure is an instance of a record/structure type that +can be used to solve unification problems involving a projection +applied to an unknown structure instance (an implicit argument) and a +value. The complete documentation of canonical structures can be found +in :ref:`canonicalstructures`; here only a simple example is given. + +Assume that `qualid` denotes an object ``(Build_struc`` |c_1| … |c_n| ``)`` in the +structure *struct* of which the fields are |x_1|, …, |x_n|. Assume that +`qualid` is declared as a canonical structure using the command + +.. cmd:: Canonical Structure @qualid. + +Then, each time an equation of the form ``(``\ |x_i| ``_)`` |eq_beta_delta_iota_zeta| |c_i| has to be +solved during the type-checking process, `qualid` is used as a solution. +Otherwise said, `qualid` is canonically used to extend the field |c_i| +into a complete structure built on |c_i|. + +Canonical structures are particularly useful when mixed with coercions +and strict implicit arguments. Here is an example. + +.. coqtop:: all + + Require Import Relations. + + Require Import EqNat. + + Set Implicit Arguments. + + Unset Strict Implicit. + + Structure Setoid : Type := {Carrier :> Set; Equal : relation Carrier; + Prf_equiv : equivalence Carrier Equal}. + + Definition is_law (A B:Setoid) (f:A -> B) := forall x y:A, Equal x y -> Equal (f x) (f y). + + Axiom eq_nat_equiv : equivalence nat eq_nat. + + Definition nat_setoid : Setoid := Build_Setoid eq_nat_equiv. + + Canonical Structure nat_setoid. + +Thanks to ``nat_setoid`` declared as canonical, the implicit arguments ``A`` +and ``B`` can be synthesized in the next statement. + +.. coqtop:: all + + Lemma is_law_S : is_law S. + +Remark: If a same field occurs in several canonical structure, then +only the structure declared first as canonical is considered. + +.. cmdv:: Canonical Structure @ident := @term : @type. + +.. cmdv:: Canonical Structure @ident := @term. + +.. cmdv:: Canonical Structure @ident : @type := @term. + +These are equivalent to a regular definition of `ident` followed by the declaration +``Canonical Structure`` `ident`. + +See also: more examples in user contribution category (Rocq/ALGEBRA). + + +Print Canonical Projections. +++++++++++++++++++++++++++++ + +This displays the list of global names that are components of some +canonical structure. For each of them, the canonical structure of +which it is a projection is indicated. For instance, the above example +gives the following output: + +.. coqtop:: all + + Print Canonical Projections. + + +Implicit types of variables +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is possible to bind variable names to a given type (e.g. in a +development using arithmetic, it may be convenient to bind the names `n` +or `m` to the type ``nat`` of natural numbers). The command for that is + +.. cmd:: Implicit Types {+ @ident } : @type. + +The effect of the command is to automatically set the type of bound +variables starting with `ident` (either `ident` itself or `ident` followed by +one or more single quotes, underscore or digits) to be `type` (unless +the bound variable is already declared with an explicit type in which +case, this latter type is considered). + +.. example:: + + .. coqtop:: all + + Require Import List. + + Implicit Types m n : nat. + + Lemma cons_inj_nat : forall m n l, n :: l = m :: l -> n = m. + + intros m n. + + Lemma cons_inj_bool : forall (m n:bool) l, n :: l = m :: l -> n = m. + +.. cmdv:: Implicit Type @ident : @type. + + This is useful for declaring the implicit type of a single variable. + +.. cmdv:: Implicit Types {+ ( {+ @ident } : @term ) } + + Adds blocks of implicit types with different specifications. + +Implicit generalization +~~~~~~~~~~~~~~~~~~~~~~~ + +.. index:: `{ } +.. index:: `( ) + +Implicit generalization is an automatic elaboration of a statement +with free variables into a closed statement where these variables are +quantified explicitly. Implicit generalization is done inside binders +starting with a \` and terms delimited by \`{ } and \`( ), always +introducing maximally inserted implicit arguments for the generalized +variables. Inside implicit generalization delimiters, free variables +in the current context are automatically quantified using a product or +a lambda abstraction to generate a closed term. In the following +statement for example, the variables n and m are automatically +generalized and become explicit arguments of the lemma as we are using +\`( ): + +.. coqtop:: all + + Generalizable All Variables. + + Lemma nat_comm : `(n = n + 0). + +One can control the set of generalizable identifiers with +the ``Generalizable`` vernacular command to avoid unexpected +generalizations when mistyping identifiers. There are several commands +that specify which variables should be generalizable. + +.. cmd:: Generalizable All Variables. + + All variables are candidate for + generalization if they appear free in the context under a + generalization delimiter. This may result in confusing errors in case + of typos. In such cases, the context will probably contain some + unexpected generalized variable. + +.. cmd:: Generalizable No Variables. + + Disable implicit generalization entirely. This is the default behavior. + +.. cmd:: Generalizable (Variable | Variables) {+ @ident }. + + Allow generalization of the given identifiers only. Calling this command multiple times + adds to the allowed identifiers. + +.. cmd:: Global Generalizable. + + Allows exporting the choice of generalizable variables. + +One can also use implicit generalization for binders, in which case +the generalized variables are added as binders and set maximally +implicit. + +.. coqtop:: all + + Definition id `(x : A) : A := x. + + Print id. + +The generalizing binders \`{ } and \`( ) work similarly to their +explicit counterparts, only binding the generalized variables +implicitly, as maximally-inserted arguments. In these binders, the +binding name for the bound object is optional, whereas the type is +mandatory, dually to regular binders. + + +Coercions +--------- + +Coercions can be used to implicitly inject terms from one *class* in +which they reside into another one. A *class* is either a sort +(denoted by the keyword ``Sortclass``), a product type (denoted by the +keyword ``Funclass``), or a type constructor (denoted by its name), e.g. +an inductive type or any constant with a type of the form +``forall (`` |x_1| : |A_1| ) … ``(``\ |x_n| : |A_n|\ ``)``, `s` where `s` is a sort. + +Then the user is able to apply an object that is not a function, but +can be coerced to a function, and more generally to consider that a +term of type ``A`` is of type ``B`` provided that there is a declared coercion +between ``A`` and ``B``. The main command is + +.. cmd:: Coercion @qualid : @class >-> @class. + +which declares the construction denoted by qualid as a coercion +between the two given classes. + +More details and examples, and a description of the commands related +to coercions are provided in :ref:`implicitcoercions`. + +.. _printing_constructions_full: + +Printing constructions in full +------------------------------ + +Coercions, implicit arguments, the type of pattern-matching, but also +notations (see :ref:`syntaxextensionsandinterpretationscopes`) can obfuscate the behavior of some +tactics (typically the tactics applying to occurrences of subterms are +sensitive to the implicit arguments). The command + +.. cmd:: Set Printing All. + +deactivates all high-level printing features such as coercions, +implicit arguments, returned type of pattern-matching, notations and +various syntactic sugar for pattern-matching or record projections. +Otherwise said, ``Set Printing All`` includes the effects of the commands +``Set Printing Implicit``, ``Set Printing Coercions``, ``Set Printing Synth``, +``Unset Printing Projections``, and ``Unset Printing Notations``. To reactivate +the high-level printing features, use the command + +.. cmd:: Unset Printing All. + +Printing universes +------------------ + +The following command: + +.. cmd:: Set Printing Universes. + +activates the display of the actual level of each occurrence of ``Type``. +See :ref:`TODO-4.1.1-sorts` for details. This wizard option, in combination +with ``Set Printing All`` (see :ref:`printing_constructions_full`) can help to diagnose failures +to unify terms apparently identical but internally different in the +Calculus of Inductive Constructions. To reactivate the display of the +actual level of the occurrences of Type, use + +.. cmd:: Unset Printing Universes. + +The constraints on the internal level of the occurrences of Type +(see :ref:`TODO-4.1.1-sorts`) can be printed using the command + +.. cmd:: Print {? Sorted} Universes. + +If the optional ``Sorted`` option is given, each universe will be made +equivalent to a numbered label reflecting its level (with a linear +ordering) in the universe hierarchy. + +This command also accepts an optional output filename: + +.. cmd:: Print {? Sorted} Universes @string. + +If `string` ends in ``.dot`` or ``.gv``, the constraints are printed in the DOT +language, and can be processed by Graphviz tools. The format is +unspecified if `string` doesn’t end in ``.dot`` or ``.gv``. + + +Existential variables +--------------------- + +|Coq| terms can include existential variables which represents unknown +subterms to eventually be replaced by actual subterms. + +Existential variables are generated in place of unsolvable implicit +arguments or “_” placeholders when using commands such as ``Check`` (see +Section :ref:`TODO-6.3.1-check`) or when using tactics such as ``refine`` (see Section +:ref:`TODO-8.2.3-refine`), as well as in place of unsolvable instances when using +tactics such that ``eapply`` (see Section :ref:`TODO-8.2.4-apply`). An existential +variable is defined in a context, which is the context of variables of +the placeholder which generated the existential variable, and a type, +which is the expected type of the placeholder. + +As a consequence of typing constraints, existential variables can be +duplicated in such a way that they possibly appear in different +contexts than their defining context. Thus, any occurrence of a given +existential variable comes with an instance of its original context. +In the simple case, when an existential variable denotes the +placeholder which generated it, or is used in the same context as the +one in which it was generated, the context is not displayed and the +existential variable is represented by “?” followed by an identifier. + +.. coqtop:: all + + Parameter identity : forall (X:Set), X -> X. + + Check identity _ _. + + Check identity _ (fun x => _). + +In the general case, when an existential variable ``?``\ `ident` appears +outside of its context of definition, its instance, written under the +form + +| ``{`` :n:`{*; @ident:=@term}` ``}`` + +is appending to its name, indicating how the variables of its defining context are instantiated. +The variables of the context of the existential variables which are +instantiated by themselves are not written, unless the flag ``Printing Existential Instances`` +is on (see Section :ref:`explicit-display-existentials`), and this is why an +existential variable used in the same context as its context of definition is written with no instance. + +.. coqtop:: all + + Check (fun x y => _) 0 1. + + Set Printing Existential Instances. + + Check (fun x y => _) 0 1. + +Existential variables can be named by the user upon creation using +the syntax ``?``\ `ident`. This is useful when the existential +variable needs to be explicitly handled later in the script (e.g. +with a named-goal selector, see :ref:`TODO-9.2-goal-selectors`). + +.. _explicit-display-existentials: + +Explicit displaying of existential instances for pretty-printing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The command: + +.. cmd:: Set Printing Existential Instances. + +activates the full display of how the context of an existential +variable is instantiated at each of the occurrences of the existential +variable. + +To deactivate the full display of the instances of existential +variables, use + +.. cmd:: Unset Printing Existential Instances. + +Solving existential variables using tactics +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Instead of letting the unification engine try to solve an existential +variable by itself, one can also provide an explicit hole together +with a tactic to solve it. Using the syntax ``ltac:(``\ `tacexpr`\ ``)``, the user +can put a tactic anywhere a term is expected. The order of resolution +is not specified and is implementation-dependent. The inner tactic may +use any variable defined in its scope, including repeated alternations +between variables introduced by term binding as well as those +introduced by tactic binding. The expression `tacexpr` can be any tactic +expression as described in :ref:`thetacticlanguage`. + +.. coqtop:: all + + Definition foo (x : nat) : nat := ltac:(exact x). + +This construction is useful when one wants to define complicated terms +using highly automated tactics without resorting to writing the proof-term +by means of the interactive proof engine. + +This mechanism is comparable to the ``Declare Implicit Tactic`` command +defined at :ref:`TODO-8.9.7-implicit-automation`, except that the used +tactic is local to each hole instead of being declared globally. diff --git a/doc/sphinx/language/module-system.rst b/doc/sphinx/language/module-system.rst new file mode 100644 index 0000000000..e6a6736654 --- /dev/null +++ b/doc/sphinx/language/module-system.rst @@ -0,0 +1,459 @@ +.. include:: ../preamble.rst +.. include:: ../replaces.rst + +.. _themodulesystem: + +The Module System +================= + +The module system extends the Calculus of Inductive Constructions +providing a convenient way to structure large developments as well as +a means of massive abstraction. + + +Modules and module types +---------------------------- + +**Access path.** An access path is denoted by :math:`p` and can be +either a module variable :math:`X` or, if :math:`p′` is an access path +and :math:`id` an identifier, then :math:`p′.id` is an access path. + + +**Structure element.** A structure element is denoted by :math:`e` and +is either a definition of a constant, an assumption, a definition of +an inductive, a definition of a module, an alias of a module or a module +type abbreviation. + + +**Structure expression.** A structure expression is denoted by :math:`S` and can be: + ++ an access path :math:`p` ++ a plain structure :math:`\Struct~e ; … ; e~\End` ++ a functor :math:`\Functor(X:S)~S′`, where :math:`X` is a module variable, :math:`S` and :math:`S′` are + structure expressions ++ an application :math:`S~p`, where :math:`S` is a structure expression and :math:`p` an + access path ++ a refined structure :math:`S~\with~p := p`′ or :math:`S~\with~p := t:T` where :math:`S` is a + structure expression, :math:`p` and :math:`p′` are access paths, :math:`t` is a term and :math:`T` is + the type of :math:`t`. + +**Module definition.** A module definition is written :math:`\Mod{X}{S}{S'}` +and consists of a module variable :math:`X`, a module type +:math:`S` which can be any structure expression and optionally a +module implementation :math:`S′` which can be any structure expression +except a refined structure. + + +**Module alias.** A module alias is written :math:`\ModA{X}{p}` +and consists of a module variable :math:`X` and a module path +:math:`p`. + +**Module type abbreviation.** +A module type abbreviation is written :math:`\ModType{Y}{S}`, +where :math:`Y` is an identifier and :math:`S` is any structure +expression . + + +Typing Modules +------------------ + +In order to introduce the typing system we first slightly extend the syntactic +class of terms and environments given in section :ref:`The-terms`. The +environments, apart from definitions of constants and inductive types now also +hold any other structure elements. Terms, apart from variables, constants and +complex terms, include also access paths. + +We also need additional typing judgments: + + ++ :math:`\WFT{E}{S}`, denoting that a structure :math:`S` is well-formed, ++ :math:`\WTM{E}{p}{S}`, denoting that the module pointed by :math:`p` has type :math:`S` in + environment :math:`E`. ++ :math:`\WEV{E}{S}{\ovl{S}}`, denoting that a structure :math:`S` is evaluated to a + structure :math:`S` in weak head normal form. ++ :math:`\WS{E}{S_1}{S_2}` , denoting that a structure :math:`S_1` is a subtype of a + structure :math:`S_2`. ++ :math:`\WS{E}{e_1}{e_2}` , denoting that a structure element e_1 is more + precise than a structure element e_2. + +The rules for forming structures are the following: + +.. inference:: WF-STR + + \WF{E;E′}{} + ------------------------ + \WFT{E}{ \Struct~E′ ~\End} + +.. inference:: WF-FUN + + \WFT{E; \ModS{X}{S}}{ \ovl{S′} } + -------------------------- + \WFT{E}{ \Functor(X:S)~S′} + + +Evaluation of structures to weak head normal form: + +.. inference:: WEVAL-APP + + \begin{array}{c} + \WEV{E}{S}{\Functor(X:S_1 )~S_2}~~~~~\WEV{E}{S_1}{\ovl{S_1}} \\ + \WTM{E}{p}{S_3}~~~~~ \WS{E}{S_3}{\ovl{S_1}} + \end{array} + -------------------------- + \WEV{E}{S~p}{S_2 \{p/X,t_1 /p_1 .c_1 ,…,t_n /p_n.c_n \}} + + +In the last rule, :math:`\{t_1 /p_1 .c_1 ,…,t_n /p_n .c_n \}` is the resulting +substitution from the inlining mechanism. We substitute in :math:`S` the +inlined fields :math:`p_i .c_i` from :math:`\ModS{X}{S_1 }` by the corresponding delta- +reduced term :math:`t_i` in :math:`p`. + +.. inference:: WEVAL-WITH-MOD + + \begin{array}{c} + E[] ⊢ S \lra \Struct~e_1 ;…;e_i ; \ModS{X}{S_1 };e_{i+2} ;… ;e_n ~\End \\ + E;e_1 ;…;e_i [] ⊢ S_1 \lra \ovl{S_1} ~~~~~~ + E[] ⊢ p : S_2 \\ + E;e_1 ;…;e_i [] ⊢ S_2 <: \ovl{S_1} + \end{array} + ---------------------------------- + \begin{array}{c} + \WEV{E}{S~\with~x := p}{}\\ + \Struct~e_1 ;…;e_i ; \ModA{X}{p};e_{i+2} \{p/X\} ;…;e_n \{p/X\} ~\End + \end{array} + +.. inference:: WEVAL-WITH-MOD-REC + + \begin{array}{c} + \WEV{E}{S}{\Struct~e_1 ;…;e_i ; \ModS{X_1}{S_1 };e_{i+2} ;… ;e_n ~\End} \\ + \WEV{E;e_1 ;…;e_i }{S_1~\with~p := p_1}{\ovl{S_2}} + \end{array} + -------------------------- + \begin{array}{c} + \WEV{E}{S~\with~X_1.p := p_1}{} \\ + \Struct~e_1 ;…;e_i ; \ModS{X}{\ovl{S_2}};e_{i+2} \{p_1 /X_1.p\} ;…;e_n \{p_1 /X_1.p\} ~\End + \end{array} + +.. inference:: WEVAL-WITH-DEF + + \begin{array}{c} + \WEV{E}{S}{\Struct~e_1 ;…;e_i ;\Assum{}{c}{T_1};e_{i+2} ;… ;e_n ~\End} \\ + \WS{E;e_1 ;…;e_i }{Def()(c:=t:T)}{\Assum{}{c}{T_1}} + \end{array} + -------------------------- + \begin{array}{c} + \WEV{E}{S~\with~c := t:T}{} \\ + \Struct~e_1 ;…;e_i ;Def()(c:=t:T);e_{i+2} ;… ;e_n ~\End + \end{array} + +.. inference:: WEVAL-WITH-DEF-REC + + \begin{array}{c} + \WEV{E}{S}{\Struct~e_1 ;…;e_i ; \ModS{X_1 }{S_1 };e_{i+2} ;… ;e_n ~\End} \\ + \WEV{E;e_1 ;…;e_i }{S_1~\with~p := p_1}{\ovl{S_2}} + \end{array} + -------------------------- + \begin{array}{c} + \WEV{E}{S~\with~X_1.p := t:T}{} \\ + \Struct~e_1 ;…;e_i ; \ModS{X}{\ovl{S_2} };e_{i+2} ;… ;e_n ~\End + \end{array} + +.. inference:: WEVAL-PATH-MOD1 + + \begin{array}{c} + \WEV{E}{p}{\Struct~e_1 ;…;e_i ; \Mod{X}{S}{S_1};e_{i+2} ;… ;e_n End} \\ + \WEV{E;e_1 ;…;e_i }{S}{\ovl{S}} + \end{array} + -------------------------- + E[] ⊢ p.X \lra \ovl{S} + +.. inference:: WEVAL-PATH-MOD2 + + \WF{E}{} + \Mod{X}{S}{S_1}∈ E + \WEV{E}{S}{\ovl{S}} + -------------------------- + \WEV{E}{X}{\ovl{S}} + +.. inference:: WEVAL-PATH-ALIAS1 + + \begin{array}{c} + \WEV{E}{p}{~\Struct~e_1 ;…;e_i ; \ModA{X}{p_1};e_{i+2} ;… ;e_n End} \\ + \WEV{E;e_1 ;…;e_i }{p_1}{\ovl{S}} + \end{array} + -------------------------- + \WEV{E}{p.X}{\ovl{S}} + +.. inference:: WEVAL-PATH-ALIAS2 + + \WF{E}{} + \ModA{X}{p_1 }∈ E + \WEV{E}{p_1}{\ovl{S}} + -------------------------- + \WEV{E}{X}{\ovl{S}} + +.. inference:: WEVAL-PATH-TYPE1 + + \begin{array}{c} + \WEV{E}{p}{~\Struct~e_1 ;…;e_i ; \ModType{Y}{S};e_{i+2} ;… ;e_n End} \\ + \WEV{E;e_1 ;…;e_i }{S}{\ovl{S}} + \end{array} + -------------------------- + \WEV{E}{p.Y}{\ovl{S}} + +.. inference:: WEVAL-PATH-TYPE2 + + \WF{E}{} + \ModType{Y}{S}∈ E + \WEV{E}{S}{\ovl{S}} + -------------------------- + \WEV{E}{Y}{\ovl{S}} + + +Rules for typing module: + +.. inference:: MT-EVAL + + \WEV{E}{p}{\ovl{S}} + -------------------------- + E[] ⊢ p : \ovl{S} + +.. inference:: MT-STR + + E[] ⊢ p : S + -------------------------- + E[] ⊢ p : S/p + + +The last rule, called strengthening is used to make all module fields +manifestly equal to themselves. The notation :math:`S/p` has the following +meaning: + + ++ if :math:`S\lra~\Struct~e_1 ;…;e_n ~\End` then :math:`S/p=~\Struct~e_1 /p;…;e_n /p ~\End` + where :math:`e/p` is defined as follows (note that opaque definitions are processed + as assumptions): + + + :math:`\Def{}{c}{t}{T}/p = \Def{}{c}{t}{T}` + + :math:`\Assum{}{c}{U}/p = \Def{}{c}{p.c}{U}` + + :math:`\ModS{X}{S}/p = \ModA{X}{p.X}` + + :math:`\ModA{X}{p′}/p = \ModA{X}{p′}` + + :math:`\Ind{}{Γ_P}{Γ_C}{Γ_I}/p = \Indp{}{Γ_P}{Γ_C}{Γ_I}{p}` + + :math:`\Indpstr{}{Γ_P}{Γ_C}{Γ_I}{p'}{p} = \Indp{}{Γ_P}{Γ_C}{Γ_I}{p'}` + ++ if :math:`S \lra \Functor(X:S′)~S″` then :math:`S/p=S` + + +The notation :math:`\Indp{}{Γ_P}{Γ_C}{Γ_I}{p}` +denotes an inductive definition that is definitionally equal to the +inductive definition in the module denoted by the path :math:`p`. All rules +which have :math:`\Ind{}{Γ_P}{Γ_C}{Γ_I}` as premises are also valid for +:math:`\Indp{}{Γ_P}{Γ_C}{Γ_I}{p}`. We give the formation rule for +:math:`\Indp{}{Γ_P}{Γ_C}{Γ_I}{p}` +below as well as the equality rules on inductive types and +constructors. + +The module subtyping rules: + +.. inference:: MSUB-STR + + \begin{array}{c} + \WS{E;e_1 ;…;e_n }{e_{σ(i)}}{e'_i ~\for~ i=1..m} \\ + σ : \{1… m\} → \{1… n\} ~\injective + \end{array} + -------------------------- + \WS{E}{\Struct~e_1 ;…;e_n ~\End}{~\Struct~e'_1 ;…;e'_m ~\End} + +.. inference:: MSUB-FUN + + \WS{E}{\ovl{S_1'}}{\ovl{S_1}} + \WS{E; \ModS{X}{S_1'}}{\ovl{S_2}}{\ovl{S_2'}} + -------------------------- + E[] ⊢ \Functor(X:S_1 ) S_2 <: \Functor(X:S_1') S_2' + + +Structure element subtyping rules: + +.. inference:: ASSUM-ASSUM + + E[] ⊢ T_1 ≤_{βδιζη} T_2 + -------------------------- + \WS{E}{\Assum{}{c}{T_1 }}{\Assum{}{c}{T_2 }} + +.. inference:: DEF-ASSUM + + E[] ⊢ T_1 ≤_{βδιζη} T_2 + -------------------------- + \WS{E}{\Def{}{c}{t}{T_1 }}{\Assum{}{c}{T_2 }} + +.. inference:: ASSUM-DEF + + E[] ⊢ T_1 ≤_{βδιζη} T_2 + E[] ⊢ c =_{βδιζη} t_2 + -------------------------- + \WS{E}{\Assum{}{c}{T_1 }}{\Def{}{c}{t_2 }{T_2 }} + +.. inference:: DEF-DEF + + E[] ⊢ T_1 ≤_{βδιζη} T_2 + E[] ⊢ t_1 =_{βδιζη} t_2 + -------------------------- + \WS{E}{\Def{}{c}{t_1 }{T_1 }}{\Def{}{c}{t_2 }{T_2 }} + +.. inference:: IND-IND + + E[] ⊢ Γ_P =_{βδιζη} Γ_P' + E[Γ_P ] ⊢ Γ_C =_{βδιζη} Γ_C' + E[Γ_P ;Γ_C ] ⊢ Γ_I =_{βδιζη} Γ_I' + -------------------------- + \WS{E}{\ind{Γ_P}{Γ_C}{Γ_I}}{\ind{Γ_P'}{Γ_C'}{Γ_I'}} + +.. inference:: INDP-IND + + E[] ⊢ Γ_P =_{βδιζη} Γ_P' + E[Γ_P ] ⊢ Γ_C =_{βδιζη} Γ_C' + E[Γ_P ;Γ_C ] ⊢ Γ_I =_{βδιζη} Γ_I' + -------------------------- + \WS{E}{\Indp{}{Γ_P}{Γ_C}{Γ_I}{p}}{\ind{Γ_P'}{Γ_C'}{Γ_I'}} + +.. inference:: INDP-INDP + + \begin{array}{c} + E[] ⊢ Γ_P =_{βδιζη} Γ_P' + E[Γ_P ] ⊢ Γ_C =_{βδιζη} Γ_C' \\ + E[Γ_P ;Γ_C ] ⊢ Γ_I =_{βδιζη} Γ_I' + E[] ⊢ p =_{βδιζη} p' + \end{array} + -------------------------- + \WS{E}{\Indp{}{Γ_P}{Γ_C}{Γ_I}{p}}{\Indp{}{Γ_P'}{Γ_C'}{Γ_I'}{p'}} + +.. inference:: MOD-MOD + + \WS{E}{S_1}{S_2} + -------------------------- + \WS{E}{\ModS{X}{S_1 }}{\ModS{X}{S_2 }} + +.. inference:: ALIAS-MOD + + E[] ⊢ p : S_1 + \WS{E}{S_1}{S_2} + -------------------------- + \WS{E}{\ModA{X}{p}}{\ModS{X}{S_2 }} + +.. inference:: MOD-ALIAS + + E[] ⊢ p : S_2 + \WS{E}{S_1}{S_2} + E[] ⊢ X =_{βδιζη} p + -------------------------- + \WS{E}{\ModS{X}{S_1 }}{\ModA{X}{p}} + +.. inference:: ALIAS-ALIAS + + E[] ⊢ p_1 =_{βδιζη} p_2 + -------------------------- + \WS{E}{\ModA{X}{p_1 }}{\ModA{X}{p_2 }} + +.. inference:: MODTYPE-MODTYPE + + \WS{E}{S_1}{S_2} + \WS{E}{S_2}{S_1} + -------------------------- + \WS{E}{\ModType{Y}{S_1 }}{\ModType{Y}{S_2 }} + + +New environment formation rules + + +.. inference:: WF-MOD1 + + \WF{E}{} + \WFT{E}{S} + -------------------------- + WF(E; \ModS{X}{S})[] + +.. inference:: WF-MOD2 + + \WS{E}{S_2}{S_1} + \WF{E}{} + \WFT{E}{S_1} + \WFT{E}{S_2} + -------------------------- + \WF{E; \Mod{X}{S_1}{S_2}}{} + +.. inference:: WF-ALIAS + + \WF{E}{} + E[] ⊢ p : S + -------------------------- + \WF{E, \ModA{X}{p}}{} + +.. inference:: WF-MODTYPE + + \WF{E}{} + \WFT{E}{S} + -------------------------- + \WF{E, \ModType{Y}{S}}{} + +.. inference:: WF-IND + + \begin{array}{c} + \WF{E;\ind{Γ_P}{Γ_C}{Γ_I}}{} \\ + E[] ⊢ p:~\Struct~e_1 ;…;e_n ;\ind{Γ_P'}{Γ_C'}{Γ_I'};… ~\End : \\ + E[] ⊢ \ind{Γ_P'}{Γ_C'}{Γ_I'} <: \ind{Γ_P}{Γ_C}{Γ_I} + \end{array} + -------------------------- + \WF{E; \Indp{}{Γ_P}{Γ_C}{Γ_I}{p} }{} + + +Component access rules + + +.. inference:: ACC-TYPE1 + + E[Γ] ⊢ p :~\Struct~e_1 ;…;e_i ;\Assum{}{c}{T};… ~\End + -------------------------- + E[Γ] ⊢ p.c : T + +.. inference:: ACC-TYPE2 + + E[Γ] ⊢ p :~\Struct~e_1 ;…;e_i ;\Def{}{c}{t}{T};… ~\End + -------------------------- + E[Γ] ⊢ p.c : T + +Notice that the following rule extends the delta rule defined in section :ref:`Conversion-rules` + +.. inference:: ACC-DELTA + + E[Γ] ⊢ p :~\Struct~e_1 ;…;e_i ;\Def{}{c}{t}{U};… ~\End + -------------------------- + E[Γ] ⊢ p.c \triangleright_δ t + +In the rules below we assume +:math:`Γ_P` is :math:`[p_1 :P_1 ;…;p_r :P_r ]`, +:math:`Γ_I` is :math:`[I_1 :A_1 ;…;I_k :A_k ]`, +and :math:`Γ_C` is :math:`[c_1 :C_1 ;…;c_n :C_n ]`. + +.. inference:: ACC-IND1 + + E[Γ] ⊢ p :~\Struct~e_1 ;…;e_i ;\ind{Γ_P}{Γ_C}{Γ_I};… ~\End + -------------------------- + E[Γ] ⊢ p.I_j : (p_1 :P_1 )…(p_r :P_r )A_j + +.. inference:: ACC-IND2 + + E[Γ] ⊢ p :~\Struct~e_1 ;…;e_i ;\ind{Γ_P}{Γ_C}{Γ_I};… ~\End + -------------------------- + E[Γ] ⊢ p.c_m : (p_1 :P_1 )…(p_r :P_r )C_m I_j (I_j~p_1 …p_r )_{j=1… k} + +.. inference:: ACC-INDP1 + + E[] ⊢ p :~\Struct~e_1 ;…;e_i ; \Indp{}{Γ_P}{Γ_C}{Γ_I}{p'} ;… ~\End + -------------------------- + E[] ⊢ p.I_i \triangleright_δ p'.I_i + +.. inference:: ACC-INDP2 + + E[] ⊢ p :~\Struct~e_1 ;…;e_i ; \Indp{}{Γ_P}{Γ_C}{Γ_I}{p'} ;… ~\End + -------------------------- + E[] ⊢ p.c_i \triangleright_δ p'.c_i diff --git a/doc/sphinx/practical-tools/coq-commands.rst b/doc/sphinx/practical-tools/coq-commands.rst new file mode 100644 index 0000000000..1ff808894a --- /dev/null +++ b/doc/sphinx/practical-tools/coq-commands.rst @@ -0,0 +1,256 @@ +.. include:: ../replaces.rst + +.. _thecoqcommands: + +The |Coq| commands +==================== + +There are three |Coq| commands: + ++ ``coqtop``: the |Coq| toplevel (interactive mode); ++ ``coqc``: the |Coq| compiler (batch compilation); ++ ``coqchk``: the |Coq| checker (validation of compiled libraries). + + +The options are (basically) the same for the first two commands, and +roughly described below. You can also look at the ``man`` pages of +``coqtop`` and ``coqc`` for more details. + +Interactive use (coqtop) +------------------------ + +In the interactive mode, also known as the |Coq| toplevel, the user can +develop his theories and proofs step by step. The |Coq| toplevel is run +by the command ``coqtop``. + +They are two different binary images of |Coq|: the byte-code one and the +native-code one (if OCaml provides a native-code compiler for +your platform, which is supposed in the following). By default, +``coqtop`` executes the native-code version; run ``coqtop.byte`` to get +the byte-code version. + +The byte-code toplevel is based on an OCaml toplevel (to +allow dynamic linking of tactics). You can switch to the OCaml toplevel +with the command ``Drop.``, and come back to the |Coq| +toplevel with the command ``Coqloop.loop();;``. + +Batch compilation (coqc) +------------------------ + +The ``coqc`` command takes a name *file* as argument. Then it looks for a +vernacular file named *file*.v, and tries to compile it into a +*file*.vo file (See :ref:`TODO-6.5`). Warning: The name *file* should be a +regular |Coq| identifier, as defined in Section :ref:'TODO-1.1'. It should contain +only letters, digits or underscores (_). For instance, ``/bar/foo/toto.v`` is valid, but +``/bar/foo/to-to.v`` is invalid. + + +Customization at launch time +--------------------------------- + +By resource file +~~~~~~~~~~~~~~~~~~~~~~~ + +When |Coq| is launched, with either ``coqtop`` or ``coqc``, the resource file +``$XDG_CONFIG_HOME/coq/coqrc.xxx`` is loaded, where ``$XDG_CONFIG_HOME`` +is the configuration directory of the user (by default its home +directory ``/.config`` and ``xxx`` is the version number (e.g. 8.8). If +this file is not found, then the file ``$XDG_CONFIG_HOME/coqrc`` is +searched. You can also specify an arbitrary name for the resource file +(see option ``-init-file`` below). + +This file may contain, for instance, ``Add LoadPath`` commands to add +directories to the load path of |Coq|. It is possible to skip the +loading of the resource file with the option ``-q``. + + +By environment variables +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Load path can be specified to the |Coq| system by setting up ``$COQPATH`` +environment variable. It is a list of directories separated by +``:`` (``;`` on Windows). |Coq| will also honor ``$XDG_DATA_HOME`` and +``$XDG_DATA_DIRS`` (see Section :ref:`TODO-2.6.3`). + +Some |Coq| commands call other |Coq| commands. In this case, they look for +the commands in directory specified by ``$COQBIN``. If this variable is +not set, they look for the commands in the executable path. + +The ``$COQ_COLORS`` environment variable can be used to specify the set +of colors used by ``coqtop`` to highlight its output. It uses the same +syntax as the ``$LS_COLORS`` variable from GNU’s ls, that is, a colon-separated +list of assignments of the form ``name=``:n:``{*; attr}`` where +``name`` is the name of the corresponding highlight tag and each ``attrᵢ`` is an +ANSI escape code. The list of highlight tags can be retrieved with the +``-list-tags`` command-line option of ``coqtop``. + +By command line options +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following command-line options are recognized by the commands ``coqc`` +and ``coqtop``, unless stated otherwise: + +:-I *directory*, -include *directory*: Add physical path *directory* + to the OCaml loadpath. See also: :ref:`TODO-2.6.1` and the + command Declare ML Module Section :ref:`TODO-6.5`. +:-Q *directory* dirpath: Add physical path *directory* to the list of + directories where |Coq| looks for a file and bind it to the the logical + directory *dirpath*. The subdirectory structure of *directory* is + recursively available from |Coq| using absolute names (extending the + dirpath prefix) (see Section :ref:`TODO-2.6.2`).Note that only those + subdirectories and files which obey the lexical conventions of what is + an ident (see Section :ref:`TODO-1.1`) are taken into account. Conversely, the + underlying file systems or operating systems may be more restrictive + than |Coq|. While Linux’s ext4 file system supports any |Coq| recursive + layout (within the limit of 255 bytes per file name), the default on + NTFS (Windows) or HFS+ (MacOS X) file systems is on the contrary to + disallow two files differing only in the case in the same directory. + See also: Section :ref:`TODO-2.6.1`. +:-R *directory* dirpath: Do as -Q *directory* dirpath but make the + subdirectory structure of *directory* recursively visible so that the + recursive contents of physical *directory* is available from |Coq| using + short or partially qualified names. See also: Section :ref:`TODO-2.6.1`. +:-top dirpath: Set the toplevel module name to dirpath instead of Top. + Not valid for `coqc` as the toplevel module name is inferred from the + name of the output file. +:-exclude-dir *directory*: Exclude any subdirectory named *directory* + while processing options such as -R and -Q. By default, only the + conventional version control management directories named CVS + and_darcs are excluded. +:-nois: Start from an empty state instead of loading the Init.Prelude + module. +:-init-file *file*: Load *file* as the resource file instead of + loading the default resource file from the standard configuration + directories. +:-q: Do not to load the default resource file. +:-load-ml-source *file*: Load the OCaml source file *file*. +:-load-ml-object *file*: Load the OCaml object file *file*. +:-l *file*, -load-vernac-source *file*: Load and execute the |Coq| + script from *file.v*. +:-lv *file*, -load-vernac-source-verbose *file*: Load and execute the + |Coq| script from *file.v*. Output its content on the standard input as + it is executed. +:-load-vernac-object dirpath: Load |Coq| compiled library dirpath. This + is equivalent to runningRequire dirpath. +:-require dirpath: Load |Coq| compiled library dirpath and import it. + This is equivalent to running Require Import dirpath. +:-batch: Exit just after argument parsing. Available for `coqtop` only. +:-compile *file.v*: Compile file *file.v* into *file.vo*. This options + imply -batch (exit just after argument parsing). It is available only + for `coqtop`, as this behavior is the purpose of `coqc`. +:-compile-verbose *file.v*: Same as -compile but also output the + content of *file.v* as it is compiled. +:-verbose: Output the content of the input file as it is compiled. + This option is available for `coqc` only; it is the counterpart of + -compile-verbose. +:-w (all|none|w₁,…,wₙ): Configure the display of warnings. This + option expects all, none or a comma-separated list of warning names or + categories (see Section :ref:`TODO-6.9.3`). +:-color (on|off|auto): Enable or not the coloring of output of `coqtop`. + Default is auto, meaning that `coqtop` dynamically decides, depending on + whether the output channel supports ANSI escape sequences. +:-beautify: Pretty-print each command to *file.beautified* when + compiling *file.v*, in order to get old-fashioned + syntax/definitions/notations. +:-emacs, -ide-slave: Start a special toplevel to communicate with a + specific IDE. +:-impredicative-set: Change the logical theory of |Coq| by declaring the + sort Set impredicative. Warning: This is known to be inconsistent with some + standard axioms of classical mathematics such as the functional + axiom of choice or the principle of description. +:-type-in-type: Collapse the universe hierarchy of |Coq|. Warning: This makes the logic + inconsistent. +:-mangle-names *ident*: Experimental: Do not depend on this option. Replace + Coq's auto-generated name scheme with names of the form *ident0*, *ident1*, + etc. The command ``Set Mangle Names`` turns the behavior on in a document, + and ``Set Mangle Names Prefix "ident"`` changes the used prefix. This feature + s intended to be used as a linter for developments that want to be robust to + changes in the auto-generated name scheme. The options are provided to + facilitate tracking down problems. +:-compat *version*: Attempt to maintain some backward-compatibility + with a previous version. +:-dump-glob *file*: Dump references for global names in file *file* + (to be used by coqdoc, see :ref:`TODO-15.4`). By default, if *file.v* is being + compiled, *file.glob* is used. +:-no-glob: Disable the dumping of references for global names. +:-image *file*: Set the binary image to be used by `coqc` to be *file* + instead of the standard one. Not of general use. +:-bindir *directory*: Set the directory containing |Coq| binaries to be + used by `coqc`. It is equivalent to doing export COQBIN= *directory* + before launching `coqc`. +:-where: Print the location of |Coq|’s standard library and exit. +:-config: Print the locations of |Coq|’s binaries, dependencies, and + libraries, then exit. +:-filteropts: Print the list of command line arguments that `coqtop` has + recognized as options and exit. +:-v: Print |Coq|’s version and exit. +:-list-tags: Print the highlight tags known by |Coq| as well as their + currently associated color and exit. +:-h, --help: Print a short usage and exit. + +Compiled libraries checker (coqchk) +---------------------------------------- + +The ``coqchk`` command takes a list of library paths as argument, described either +by their logical name or by their physical filename, hich must end in ``.vo``. The +corresponding compiled libraries (``.vo`` files) are searched in the path, +recursively processing the libraries they depend on. The content of all these +libraries is then type-checked. The effect of ``coqchk`` is only to return with +normal exit code in case of success, and with positive exit code if an error has +been found. Error messages are not deemed to help the user understand what is +wrong. In the current version, it does not modify the compiled libraries to mark +them as successfully checked. + +Note that non-logical information is not checked. By logical +information, we mean the type and optional body associated to names. +It excludes for instance anything related to the concrete syntax of +objects (customized syntax rules, association between short and long +names), implicit arguments, etc. + +This tool can be used for several purposes. One is to check that a +compiled library provided by a third-party has not been forged and +that loading it cannot introduce inconsistencies [#]_. Another point is +to get an even higher level of security. Since ``coqtop`` can be extended +with custom tactics, possibly ill-typed code, it cannot be guaranteed +that the produced compiled libraries are correct. ``coqchk`` is a +standalone verifier, and thus it cannot be tainted by such malicious +code. + +Command-line options ``-Q``, ``-R``, ``-where`` and ``-impredicative-set`` are supported +by ``coqchk`` and have the same meaning as for ``coqtop``. As there is no notion of +relative paths in object files ``-Q`` and ``-R`` have exactly the same meaning. + +:-norec *module*: Check *module* but do not check its dependencies. +:-admit *module*: Do not check *module* and any of its dependencies, + unless explicitly required. +:-o: At exit, print a summary about the context. List the names of all + assumptions and variables (constants without body). +:-silent: Do not write progress information in standard output. + +Environment variable ``$COQLIB`` can be set to override the location of +the standard library. + +The algorithm for deciding which modules are checked or admitted is +the following: assuming that ``coqchk`` is called with argument ``M``, option +``-norec N``, and ``-admit A``. Let us write :math:`\overline{S}` for the +set of reflexive transitive dependencies of set :math:`S`. Then: + ++ Modules :math:`C = \overline{M} \backslash \overline{A} \cup M \cup N` are loaded and type-checked before being added + to the context. ++ And :math:`M \cup N \backslash C` is the set of modules that are loaded and added to the + context without type-checking. Basic integrity checks (checksums) are + nonetheless performed. + +As a rule of thumb, the -admit can be used to tell that some libraries +have already been checked. So ``coqchk A B`` can be split in ``coqchk A`` && +``coqchk B -admit A`` without type-checking any definition twice. Of +course, the latter is slightly slower since it makes more disk access. +It is also less secure since an attacker might have replaced the +compiled library ``A`` after it has been read by the first command, but +before it has been read by the second command. + +.. [#] Ill-formed non-logical information might for instance bind + Coq.Init.Logic.True to short name False, so apparently False is + inhabited, but using fully qualified names, Coq.Init.Logic.False will + always refer to the absurd proposition, what we guarantee is that + there is no proof of this latter constant. diff --git a/doc/sphinx/practical-tools/coqide.rst b/doc/sphinx/practical-tools/coqide.rst new file mode 100644 index 0000000000..1fcfc665be --- /dev/null +++ b/doc/sphinx/practical-tools/coqide.rst @@ -0,0 +1,307 @@ +.. include:: ../replaces.rst + +.. _coqintegrateddevelopmentenvironment: + +|Coq| Integrated Development Environment +======================================== + +The Coq Integrated Development Environment is a graphical tool, to be +used as a user-friendly replacement to `coqtop`. Its main purpose is to +allow the user to navigate forward and backward into a Coq vernacular +file, executing corresponding commands or undoing them respectively. + +CoqIDE is run by typing the command `coqide` on the command line. +Without argument, the main screen is displayed with an “unnamed +buffer”, and with a file name as argument, another buffer displaying +the contents of that file. Additionally, `coqide` accepts the same +options as `coqtop`, given in :ref:`thecoqcommands`, the ones having obviously +no meaning for |CoqIDE| being ignored. + +.. _coqide_mainscreen: + + .. image:: ../_static/coqide.png + :alt: |CoqIDE| main screen + +A sample |CoqIDE| main screen, while navigating into a file `Fermat.v`, +is shown in the figure :ref:`CoqIDE main screen <coqide_mainscreen>`. +At the top is a menu bar, and a tool bar +below it. The large window on the left is displaying the various +*script buffers*. The upper right window is the *goal window*, where +goals to prove are displayed. The lower right window is the *message +window*, where various messages resulting from commands are displayed. +At the bottom is the status bar. + +Managing files and buffers, basic editing +---------------------------------------------- + +In the script window, you may open arbitrarily many buffers to edit. +The *File* menu allows you to open files or create some, save them, +print or export them into various formats. Among all these buffers, +there is always one which is the current *running buffer*, whose name +is displayed on a background in the *processed* color (green by default), which +is the one where Coq commands are currently executed. + +Buffers may be edited as in any text editor, and classical basic +editing commands (Copy/Paste, …) are available in the *Edit* menu. +CoqIDE offers only basic editing commands, so if you need more complex +editing commands, you may launch your favorite text editor on the +current buffer, using the *Edit/External Editor* menu. + +Interactive navigation into Coq scripts +-------------------------------------------- + +The running buffer is the one where navigation takes place. The toolbar offers +five basic commands for this. The first one, represented by a down arrow icon, +is for going forward executing one command. If that command is successful, the +part of the script that has been executed is displayed on a background with the +processed color. If that command fails, the error message is displayed in the +message window, and the location of the error is emphasized by an underline in +the error foreground color (red by default). + +In the figure :ref:`CoqIDE main screen <coqide_mainscreen>`, +the running buffer is `Fermat.v`, all commands until +the ``Theorem`` have been already executed, and the user tried to go +forward executing ``Induction n``. That command failed because no such +tactic exists (tactics are now in lowercase…), and the wrong word is +underlined. + +Notice that the processed part of the running buffer is not editable. If +you ever want to modify something you have to go backward using the up +arrow tool, or even better, put the cursor where you want to go back +and use the goto button. Unlike with `coqtop`, you should never use +``Undo`` to go backward. + +There are two additional buttons for navigation within the running buffer. The +"down" button with a line goes directly to the end; the "up" button with a line +goes back to the beginning. The handling of errors when using the go-to-the-end +button depends on whether |Coq| is running in asynchronous mode or not (see +Chapter :ref:`Asyncprocessing`). If it is not running in that mode, execution +stops as soon as an error is found. Otherwise, execution continues, and the +error is marked with an underline in the error foreground color, with a +background in the error background color (pink by default). The same +characterization of error-handling applies when running several commands using +the "goto" button. + +If you ever try to execute a command which happens to run during a +long time, and would like to abort it before its termination, you may +use the interrupt button (the white cross on a red circle). + +There are other buttons on the CoqIDE toolbar: a button to save the running +buffer; a button to close the current buffer (an "X"); buttons to switch among +buffers (left and right arrows); an "information" button; and a "gears" button. + +The "information" button is described in Section :ref:`sec:trytactics`. + +The "gears" button submits proof terms to the |Coq| kernel for type-checking. +When |Coq| uses asynchronous processing (see Chapter :ref:`Asyncprocessing`), +proofs may have been completed without kernel-checking of generated proof terms. +The presence of unchecked proof terms is indicated by ``Qed`` statements that +have a subdued *being-processed* color (light blue by default), rather than the +processed color, though their preceding proofs have the processed color. + +Notice that for all these buttons, except for the "gears" button, their operations +are also available in the menu, where their keyboard shortcuts are given. + +.. _try-tactics-automatically: + +Trying tactics automatically +------------------------------ + +The menu Try Tactics provides some features for automatically trying +to solve the current goal using simple tactics. If such a tactic +succeeds in solving the goal, then its text is automatically inserted +into the script. There is finally a combination of these tactics, +called the *proof wizard* which will try each of them in turn. This +wizard is also available as a tool button (the "information" button). The set of +tactics tried by the wizard is customizable in the preferences. + +These tactics are general ones, in particular they do not refer to +particular hypotheses. You may also try specific tactics related to +the goal or one of the hypotheses, by clicking with the right mouse +button on the goal or the considered hypothesis. This is the +“contextual menu on goals” feature, that may be disabled in the +preferences if undesirable. + + +Proof folding +------------------ + +As your script grows bigger and bigger, it might be useful to hide the +proofs of your theorems and lemmas. + +This feature is toggled via the Hide entry of the Navigation menu. The +proof shall be enclosed between ``Proof.`` and ``Qed.``, both with their final +dots. The proof that shall be hidden or revealed is the first one +whose beginning statement (such as ``Theorem``) precedes the insertion +cursor. + + +Vernacular commands, templates +----------------------------------- + +The Templates menu allows using shortcuts to insert vernacular +commands. This is a nice way to proceed if you are not sure of the +spelling of the command you want. + +Moreover, this menu offers some *templates* which will automatic +insert a complex command like ``Fixpoint`` with a convenient shape for its +arguments. + +Queries +------------ + +.. _coqide_queryselected: + +.. image:: ../_static/coqide-queries.png + :alt: |CoqIDE| queries + +We call *query* any vernacular command that does not change the current state, +such as ``Check``, ``Search``, etc. To run such commands interactively, without +writing them in scripts, CoqIDE offers a *query pane*. The query pane can be +displayed on demand by using the ``View`` menu, or using the shortcut ``F1``. +Queries can also be performed by selecting a particular phrase, then choosing an +item from the ``Queries`` menu. The response then appears in the message window. +Figure :ref:`fig:queryselected` shows the result after selecting of the phrase +``Nat.mul`` in the script window, and choosing ``Print`` from the ``Queries`` +menu. + + +Compilation +---------------- + +The `Compile` menu offers direct commands to: + ++ compile the current buffer ++ run a compilation using `make` ++ go to the last compilation error ++ create a `Makefile` using `coq_makefile`. + +Customizations +------------------- + +You may customize your environment using menu Edit/Preferences. A new +window will be displayed, with several customization sections +presented as a notebook. + +The first section is for selecting the text font used for scripts, +goal and message windows. + +The second section is devoted to file management: you may configure +automatic saving of files, by periodically saving the contents into +files named `#f#` for each opened file `f`. You may also activate the +*revert* feature: in case a opened file is modified on the disk by a +third party, |CoqIDE| may read it again for you. Note that in the case +you edited that same file, you will be prompt to choose to either +discard your changes or not. The File charset encoding choice is +described below in :ref:`character-encoding-saved-files`. + +The `Externals` section allows customizing the external commands for +compilation, printing, web browsing. In the browser command, you may +use `%s` to denote the URL to open, for example: +`firefox -remote "OpenURL(%s)"`. + +The `Tactics Wizard` section allows defining the set of tactics that +should be tried, in sequence, to solve the current goal. + +The last section is for miscellaneous boolean settings, such as the +“contextual menu on goals” feature presented in the section +:ref:`Try tactics automatically <try-tactics-automatically>`. + +Notice that these settings are saved in the file `.coqiderc` of your +home directory. + +A Gtk2 accelerator keymap is saved under the name `.coqide.keys`. It +is not recommanded to edit this file manually: to modify a given menu +shortcut, go to the corresponding menu item without releasing the +mouse button, press the key you want for the new shortcut, and release +the mouse button afterwards. If your system does not allow it, you may +still edit this configuration file by hand, but this is more involved. + + +Using Unicode symbols +-------------------------- + +CoqIDE is based on GTK+ and inherits from it support for Unicode in +its text windows. Consequently a large set of symbols is available for +notations. + + +Displaying Unicode symbols +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You just need to define suitable notations as described in the chapter +:ref:`syntaxextensionsandinterpretationscopes`. For example, to use the +mathematical symbols ∀ and ∃, you may define: + +.. coqtop:: in + + Notation "∀ x : T, P" := + (forall x : T, P) (at level 200, x ident). + Notation "∃ x : T, P" := + (exists x : T, P) (at level 200, x ident). + +There exists a small set of such notations already defined, in the +file `utf8.v` of Coq library, so you may enable them just by +``Require utf8`` inside |CoqIDE|, or equivalently, by starting |CoqIDE| with +``coqide -l utf8``. + +However, there are some issues when using such Unicode symbols: you of +course need to use a character font which supports them. In the Fonts +section of the preferences, the Preview line displays some Unicode +symbols, so you could figure out if the selected font is OK. Related +to this, one thing you may need to do is choose whether GTK+ should +use antialiased fonts or not, by setting the environment variable +`GDK_USE_XFT` to 1 or 0 respectively. + + +Defining an input method for non-ASCII symbols +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To input a Unicode symbol, a general method provided by GTK+ is to +simultaneously press the Control, Shift and “u” keys, release, then +type the hexadecimal code of the symbol required, for example `2200` +for the ∀ symbol. A list of symbol codes is available at +`http://www.unicode.org`. + +An alternative method which does not require to know the hexadecimal +code of the character is to use an Input Method Editor. On POSIX +systems (Linux distributions, BSD variants and MacOS X), you can +use `uim` version 1.6 or later which provides a :math:`\LaTeX`-style input +method. + +To configure uim, execute uim-pref-gtk as your regular user. In the +"Global Settings" group set the default Input Method to "ELatin" +(don’t forget to tick the checkbox "Specify default IM"). In the +"ELatin" group set the layout to "TeX", and remember the content of +the "[ELatin] on" field (by default Control-\\). You can now execute +|CoqIDE| with the following commands (assuming you use a Bourne-style +shell): + +:: + + $ export GTK_IM_MODULE=uim + $ coqide + + +Activate the ELatin Input Method with Control-\\, then type the +sequence `\\Gamma`. You will see the sequence being replaced by Γ as +soon as you type the second "a". + +.. _character-encoding-saved-files: + +Character encoding for saved files +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the Files section of the preferences, the encoding option is +related to the way files are saved. + +If you have no need to exchange files with non UTF-8 aware +applications, it is better to choose the UTF-8 encoding, since it +guarantees that your files will be read again without problems. (This +is because when |CoqIDE| reads a file, it tries to automatically detect +its character encoding.) + +If you choose something else than UTF-8, then missing characters will +be written encoded by `\x{....}` or `\x{........}` where each dot is +an hexadecimal digit: the number between braces is the hexadecimal +Unicode index for the missing character. diff --git a/doc/sphinx/preamble.rst b/doc/sphinx/preamble.rst new file mode 100644 index 0000000000..395f558a85 --- /dev/null +++ b/doc/sphinx/preamble.rst @@ -0,0 +1,92 @@ +.. preamble:: + + \[ + \newcommand{\alors}{\textsf{then}} + \newcommand{\alter}{\textsf{alter}} + \newcommand{\as}{\kw{as}} + \newcommand{\Assum}[3]{\kw{Assum}(#1)(#2:#3)} + \newcommand{\bool}{\textsf{bool}} + \newcommand{\case}{\kw{case}} + \newcommand{\conc}{\textsf{conc}} + \newcommand{\cons}{\textsf{cons}} + \newcommand{\consf}{\textsf{consf}} + \newcommand{\conshl}{\textsf{cons\_hl}} + \newcommand{\Def}[4]{\kw{Def}(#1)(#2:=#3:#4)} + \newcommand{\emptyf}{\textsf{emptyf}} + \newcommand{\End}{\kw{End}} + \newcommand{\endkw}{\kw{end}} + \newcommand{\EqSt}{\textsf{EqSt}} + \newcommand{\even}{\textsf{even}} + \newcommand{\evenO}{\textsf{even_O}} + \newcommand{\evenS}{\textsf{even_S}} + \newcommand{\false}{\textsf{false}} + \newcommand{\filter}{\textsf{filter}} + \newcommand{\Fix}{\kw{Fix}} + \newcommand{\fix}{\kw{fix}} + \newcommand{\for}{\textsf{for}} + \newcommand{\forest}{\textsf{forest}} + \newcommand{\from}{\textsf{from}} + \newcommand{\Functor}{\kw{Functor}} + \newcommand{\haslength}{\textsf{has\_length}} + \newcommand{\hd}{\textsf{hd}} + \newcommand{\ident}{\textsf{ident}} + \newcommand{\In}{\kw{in}} + \newcommand{\Ind}[4]{\kw{Ind}[#2](#3:=#4)} + \newcommand{\ind}[3]{\kw{Ind}~[#1]\left(#2\mathrm{~:=~}#3\right)} + \newcommand{\Indp}[5]{\kw{Ind}_{#5}(#1)[#2](#3:=#4)} + \newcommand{\Indpstr}[6]{\kw{Ind}_{#5}(#1)[#2](#3:=#4)/{#6}} + \newcommand{\injective}{\kw{injective}} + \newcommand{\kw}[1]{\textsf{#1}} + \newcommand{\lb}{\lambda} + \newcommand{\length}{\textsf{length}} + \newcommand{\letin}[3]{\kw{let}~#1:=#2~\kw{in}~#3} + \newcommand{\List}{\textsf{list}} + \newcommand{\lra}{\longrightarrow} + \newcommand{\Match}{\kw{match}} + \newcommand{\Mod}[3]{{\kw{Mod}}({#1}:{#2}\,\zeroone{:={#3}})} + \newcommand{\ModA}[2]{{\kw{ModA}}({#1}=={#2})} + \newcommand{\ModS}[2]{{\kw{Mod}}({#1}:{#2})} + \newcommand{\ModType}[2]{{\kw{ModType}}({#1}:={#2})} + \newcommand{\mto}{.\;} + \newcommand{\Nat}{\mathbb{N}} + \newcommand{\nat}{\textsf{nat}} + \newcommand{\Nil}{\textsf{nil}} + \newcommand{\nilhl}{\textsf{nil\_hl}} + \newcommand{\nO}{\textsf{O}} + \newcommand{\node}{\textsf{node}} + \newcommand{\nS}{\textsf{S}} + \newcommand{\odd}{\textsf{odd}} + \newcommand{\oddS}{\textsf{odd_S}} + \newcommand{\ovl}[1]{\overline{#1}} + \newcommand{\Pair}{\textsf{pair}} + \newcommand{\Prod}{\textsf{prod}} + \newcommand{\Prop}{\textsf{Prop}} + \newcommand{\return}{\kw{return}} + \newcommand{\Set}{\textsf{Set}} + \newcommand{\si}{\textsf{if}} + \newcommand{\sinon}{\textsf{else}} + \newcommand{\Sort}{\cal S} + \newcommand{\Str}{\textsf{Stream}} + \newcommand{\Struct}{\kw{Struct}} + \newcommand{\subst}[3]{#1\{#2/#3\}} + \newcommand{\tl}{\textsf{tl}} + \newcommand{\tree}{\textsf{tree}} + \newcommand{\true}{\textsf{true}} + \newcommand{\Type}{\textsf{Type}} + \newcommand{\unfold}{\textsf{unfold}} + \newcommand{\WEV}[3]{\mbox{$#1[] \vdash #2 \lra #3$}} + \newcommand{\WEVT}[3]{\mbox{$#1[] \vdash #2 \lra$}\\ \mbox{$ #3$}} + \newcommand{\WF}[2]{{\cal W\!F}(#1)[#2]} + \newcommand{\WFE}[1]{\WF{E}{#1}} + \newcommand{\WFT}[2]{#1[] \vdash {\cal W\!F}(#2)} + \newcommand{\WFTWOLINES}[2]{{\cal W\!F}\begin{array}{l}(#1)\\\mbox{}[{#2}]\end{array}} + \newcommand{\with}{\kw{with}} + \newcommand{\WS}[3]{#1[] \vdash #2 <: #3} + \newcommand{\WSE}[2]{\WS{E}{#1}{#2}} + \newcommand{\WT}[4]{#1[#2] \vdash #3 : #4} + \newcommand{\WTE}[3]{\WT{E}{#1}{#2}{#3}} + \newcommand{\WTEG}[2]{\WTE{\Gamma}{#1}{#2}} + \newcommand{\WTM}[3]{\WT{#1}{}{#2}{#3}} + \newcommand{\zeroone}[1]{[{#1}]} + \newcommand{\zeros}{\textsf{zeros}} + \] diff --git a/doc/sphinx/proof-engine/detailed-tactic-examples.rst b/doc/sphinx/proof-engine/detailed-tactic-examples.rst new file mode 100644 index 0000000000..932f967881 --- /dev/null +++ b/doc/sphinx/proof-engine/detailed-tactic-examples.rst @@ -0,0 +1,1023 @@ +.. _detailedexamplesoftactics: + +Detailed examples of tactics +============================ + +This chapter presents detailed examples of certain tactics, to +illustrate their behavior. + +dependent induction +------------------- + +The tactics ``dependent induction`` and ``dependent destruction`` are another +solution for inverting inductive predicate instances and potentially +doing induction at the same time. It is based on the ``BasicElim`` tactic +of Conor McBride which works by abstracting each argument of an +inductive instance by a variable and constraining it by equalities +afterwards. This way, the usual induction and destruct tactics can be +applied to the abstracted instance and after simplification of the +equalities we get the expected goals. + +The abstracting tactic is called generalize_eqs and it takes as +argument an hypothesis to generalize. It uses the JMeq datatype +defined in Coq.Logic.JMeq, hence we need to require it before. For +example, revisiting the first example of the inversion documentation: + +.. coqtop:: in + + Require Import Coq.Logic.JMeq. + + Inductive Le : nat -> nat -> Set := + | LeO : forall n:nat, Le 0 n + | LeS : forall n m:nat, Le n m -> Le (S n) (S m). + + Variable P : nat -> nat -> Prop. + + Goal forall n m:nat, Le (S n) m -> P n m. + + intros n m H. + +.. coqtop:: all + + generalize_eqs H. + +The index ``S n`` gets abstracted by a variable here, but a corresponding +equality is added under the abstract instance so that no information +is actually lost. The goal is now almost amenable to do induction or +case analysis. One should indeed first move ``n`` into the goal to +strengthen it before doing induction, or ``n`` will be fixed in the +inductive hypotheses (this does not matter for case analysis). As a +rule of thumb, all the variables that appear inside constructors in +the indices of the hypothesis should be generalized. This is exactly +what the ``generalize_eqs_vars`` variant does: + +.. coqtop:: all + + generalize_eqs_vars H. + induction H. + +As the hypothesis itself did not appear in the goal, we did not need +to use an heterogeneous equality to relate the new hypothesis to the +old one (which just disappeared here). However, the tactic works just +as well in this case, e.g.: + +.. coqtop:: in + + Variable Q : forall (n m : nat), Le n m -> Prop. + Goal forall n m (p : Le (S n) m), Q (S n) m p. + +.. coqtop:: all + + intros n m p. + generalize_eqs_vars p. + +One drawback of this approach is that in the branches one will have to +substitute the equalities back into the instance to get the right +assumptions. Sometimes injection of constructors will also be needed +to recover the needed equalities. Also, some subgoals should be +directly solved because of inconsistent contexts arising from the +constraints on indexes. The nice thing is that we can make a tactic +based on discriminate, injection and variants of substitution to +automatically do such simplifications (which may involve the K axiom). +This is what the ``simplify_dep_elim`` tactic from ``Coq.Program.Equality`` +does. For example, we might simplify the previous goals considerably: + +.. coqtop:: all + + Require Import Coq.Program.Equality. + +.. coqtop:: all + + induction p ; simplify_dep_elim. + +The higher-order tactic ``do_depind`` defined in ``Coq.Program.Equality`` +takes a tactic and combines the building blocks we have seen with it: +generalizing by equalities calling the given tactic with the +generalized induction hypothesis as argument and cleaning the subgoals +with respect to equalities. Its most important instantiations +are ``dependent induction`` and ``dependent destruction`` that do induction or +simply case analysis on the generalized hypothesis. For example we can +redo what we’ve done manually with dependent destruction: + +.. coqtop:: in + + Require Import Coq.Program.Equality. + +.. coqtop:: in + + Lemma ex : forall n m:nat, Le (S n) m -> P n m. + +.. coqtop:: in + + intros n m H. + +.. coqtop:: all + + dependent destruction H. + +This gives essentially the same result as inversion. Now if the +destructed hypothesis actually appeared in the goal, the tactic would +still be able to invert it, contrary to dependent inversion. Consider +the following example on vectors: + +.. coqtop:: in + + Require Import Coq.Program.Equality. + +.. coqtop:: in + + Set Implicit Arguments. + +.. coqtop:: in + + Variable A : Set. + +.. coqtop:: in + + Inductive vector : nat -> Type := + | vnil : vector 0 + | vcons : A -> forall n, vector n -> vector (S n). + +.. coqtop:: in + + Goal forall n, forall v : vector (S n), + exists v' : vector n, exists a : A, v = vcons a v'. + +.. coqtop:: in + + intros n v. + +.. coqtop:: all + + dependent destruction v. + +In this case, the ``v`` variable can be replaced in the goal by the +generalized hypothesis only when it has a type of the form ``vector (S n)``, +that is only in the second case of the destruct. The first one is +dismissed because ``S n <> 0``. + + +A larger example +~~~~~~~~~~~~~~~~ + +Let’s see how the technique works with induction on inductive +predicates on a real example. We will develop an example application +to the theory of simply-typed lambda-calculus formalized in a +dependently-typed style: + +.. coqtop:: in + + Inductive type : Type := + | base : type + | arrow : type -> type -> type. + +.. coqtop:: in + + Notation " t --> t' " := (arrow t t') (at level 20, t' at next level). + +.. coqtop:: in + + Inductive ctx : Type := + | empty : ctx + | snoc : ctx -> type -> ctx. + +.. coqtop:: in + + Notation " G , tau " := (snoc G tau) (at level 20, tau at next level). + +.. coqtop:: in + + Fixpoint conc (G D : ctx) : ctx := + match D with + | empty => G + | snoc D' x => snoc (conc G D') x + end. + +.. coqtop:: in + + Notation " G ; D " := (conc G D) (at level 20). + +.. coqtop:: in + + Inductive term : ctx -> type -> Type := + | ax : forall G tau, term (G, tau) tau + | weak : forall G tau, + term G tau -> forall tau', term (G, tau') tau + | abs : forall G tau tau', + term (G , tau) tau' -> term G (tau --> tau') + | app : forall G tau tau', + term G (tau --> tau') -> term G tau -> term G tau'. + +We have defined types and contexts which are snoc-lists of types. We +also have a ``conc`` operation that concatenates two contexts. The ``term`` +datatype represents in fact the possible typing derivations of the +calculus, which are isomorphic to the well-typed terms, hence the +name. A term is either an application of: + + ++ the axiom rule to type a reference to the first variable in a + context ++ the weakening rule to type an object in a larger context ++ the abstraction or lambda rule to type a function ++ the application to type an application of a function to an argument + + +Once we have this datatype we want to do proofs on it, like weakening: + +.. coqtop:: in undo + + Lemma weakening : forall G D tau, term (G ; D) tau -> + forall tau', term (G , tau' ; D) tau. + +The problem here is that we can’t just use induction on the typing +derivation because it will forget about the ``G ; D`` constraint appearing +in the instance. A solution would be to rewrite the goal as: + +.. coqtop:: in + + Lemma weakening' : forall G' tau, term G' tau -> + forall G D, (G ; D) = G' -> + forall tau', term (G, tau' ; D) tau. + +With this proper separation of the index from the instance and the +right induction loading (putting ``G`` and ``D`` after the inducted-on +hypothesis), the proof will go through, but it is a very tedious +process. One is also forced to make a wrapper lemma to get back the +more natural statement. The ``dependent induction`` tactic alleviates this +trouble by doing all of this plumbing of generalizing and substituting +back automatically. Indeed we can simply write: + +.. coqtop:: in + + Require Import Coq.Program.Tactics. + +.. coqtop:: in + + Lemma weakening : forall G D tau, term (G ; D) tau -> + forall tau', term (G , tau' ; D) tau. + +.. coqtop:: in + + Proof with simpl in * ; simpl_depind ; auto. + +.. coqtop:: in + + intros G D tau H. dependent induction H generalizing G D ; intros. + +This call to dependent induction has an additional arguments which is +a list of variables appearing in the instance that should be +generalized in the goal, so that they can vary in the induction +hypotheses. By default, all variables appearing inside constructors +(except in a parameter position) of the instantiated hypothesis will +be generalized automatically but one can always give the list +explicitly. + +.. coqtop:: all + + Show. + +The ``simpl_depind`` tactic includes an automatic tactic that tries to +simplify equalities appearing at the beginning of induction +hypotheses, generally using trivial applications of ``reflexivity``. In +cases where the equality is not between constructor forms though, one +must help the automation by giving some arguments, using the +``specialize`` tactic for example. + +.. coqtop:: in + + destruct D... apply weak; apply ax. apply ax. + +.. coqtop:: in + + destruct D... + +.. coqtop:: all + + Show. + +.. coqtop:: all + + specialize (IHterm G0 empty eq_refl). + +Once the induction hypothesis has been narrowed to the right equality, +it can be used directly. + +.. coqtop:: all + + apply weak, IHterm. + +If there is an easy first-order solution to these equations as in this +subgoal, the ``specialize_eqs`` tactic can be used instead of giving +explicit proof terms: + +.. coqtop:: all + + specialize_eqs IHterm. + +This concludes our example. + +See also: The ``induction`` :ref:`TODO-9-induction`, ``case`` :ref:`TODO-9-induction` and ``inversion`` :ref:`TODO-8.14-inversion` tactics. + + +autorewrite +----------- + +Here are two examples of ``autorewrite`` use. The first one ( *Ackermann +function*) shows actually a quite basic use where there is no +conditional rewriting. The second one ( *Mac Carthy function*) +involves conditional rewritings and shows how to deal with them using +the optional tactic of the ``Hint Rewrite`` command. + + +Example 1: Ackermann function + +.. coqtop:: in + + Reset Initial. + +.. coqtop:: in + + Require Import Arith. + +.. coqtop:: in + + Variable Ack : nat -> nat -> nat. + +.. coqtop:: in + + Axiom Ack0 : forall m:nat, Ack 0 m = S m. + Axiom Ack1 : forall n:nat, Ack (S n) 0 = Ack n 1. + Axiom Ack2 : forall n m:nat, Ack (S n) (S m) = Ack n (Ack (S n) m). + +.. coqtop:: in + + Hint Rewrite Ack0 Ack1 Ack2 : base0. + +.. coqtop:: all + + Lemma ResAck0 : Ack 3 2 = 29. + +.. coqtop:: all + + autorewrite with base0 using try reflexivity. + +Example 2: Mac Carthy function + +.. coqtop:: in + + Require Import Omega. + +.. coqtop:: in + + Variable g : nat -> nat -> nat. + +.. coqtop:: in + + Axiom g0 : forall m:nat, g 0 m = m. + Axiom g1 : forall n m:nat, (n > 0) -> (m > 100) -> g n m = g (pred n) (m - 10). + Axiom g2 : forall n m:nat, (n > 0) -> (m <= 100) -> g n m = g (S n) (m + 11). + + +.. coqtop:: in + + Hint Rewrite g0 g1 g2 using omega : base1. + +.. coqtop:: in + + Lemma Resg0 : g 1 110 = 100. + +.. coqtop:: out + + Show. + +.. coqtop:: all + + autorewrite with base1 using reflexivity || simpl. + +.. coqtop:: all + + Lemma Resg1 : g 1 95 = 91. + +.. coqtop:: all + + autorewrite with base1 using reflexivity || simpl. + + +quote +----- + +The tactic ``quote`` allows using Barendregt’s so-called 2-level approach +without writing any ML code. Suppose you have a language ``L`` of +'abstract terms' and a type ``A`` of 'concrete terms' and a function ``f : L -> A``. +If ``L`` is a simple inductive datatype and ``f`` a simple fixpoint, +``quote f`` will replace the head of current goal by a convertible term of +the form ``(f t)``. ``L`` must have a constructor of type: ``A -> L``. + +Here is an example: + +.. coqtop:: in + + Require Import Quote. + +.. coqtop:: all + + Parameters A B C : Prop. + +.. coqtop:: all + + Inductive formula : Type := + | f_and : formula -> formula -> formula (* binary constructor *) + | f_or : formula -> formula -> formula + | f_not : formula -> formula (* unary constructor *) + | f_true : formula (* 0-ary constructor *) + | f_const : Prop -> formula (* constructor for constants *). + +.. coqtop:: all + + Fixpoint interp_f (f:formula) : Prop := + match f with + | f_and f1 f2 => interp_f f1 /\ interp_f f2 + | f_or f1 f2 => interp_f f1 \/ interp_f f2 + | f_not f1 => ~ interp_f f1 + | f_true => True + | f_const c => c + end. + +.. coqtop:: all + + Goal A /\ (A \/ True) /\ ~ B /\ (A <-> A). + +.. coqtop:: all + + quote interp_f. + +The algorithm to perform this inversion is: try to match the term with +right-hand sides expression of ``f``. If there is a match, apply the +corresponding left-hand side and call yourself recursively on sub- +terms. If there is no match, we are at a leaf: return the +corresponding constructor (here ``f_const``) applied to the term. + + +Error messages: + + +#. quote: not a simple fixpoint + + Happens when ``quote`` is not able to perform inversion properly. + + + +Introducing variables map +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The normal use of quote is to make proofs by reflection: one defines a +function ``simplify : formula -> formula`` and proves a theorem +``simplify_ok: (f:formula)(interp_f (simplify f)) -> (interp_f f)``. Then, +one can simplify formulas by doing: + +.. coqtop:: in + + quote interp_f. + apply simplify_ok. + compute. + +But there is a problem with leafs: in the example above one cannot +write a function that implements, for example, the logical +simplifications :math:`A \wedge A \rightarrow A` or :math:`A \wedge +\lnot A \rightarrow \mathrm{False}`. This is because ``Prop`` is +impredicative. + +It is better to use that type of formulas: + +.. coqtop:: in reset + + Require Import Quote. + +.. coqtop:: in + + Parameters A B C : Prop. + +.. coqtop:: all + + Inductive formula : Set := + | f_and : formula -> formula -> formula + | f_or : formula -> formula -> formula + | f_not : formula -> formula + | f_true : formula + | f_atom : index -> formula. + +``index`` is defined in module ``Quote``. Equality on that type is +decidable so we are able to simplify :math:`A \wedge A` into :math:`A` +at the abstract level. + +When there are variables, there are bindings, and ``quote`` also +provides a type ``(varmap A)`` of bindings from index to any set +``A``, and a function ``varmap_find`` to search in such maps. The +interpretation function also has another argument, a variables map: + +.. coqtop:: all + + Fixpoint interp_f (vm:varmap Prop) (f:formula) {struct f} : Prop := + match f with + | f_and f1 f2 => interp_f vm f1 /\ interp_f vm f2 + | f_or f1 f2 => interp_f vm f1 \/ interp_f vm f2 + | f_not f1 => ~ interp_f vm f1 + | f_true => True + | f_atom i => varmap_find True i vm + end. + +``quote`` handles this second case properly: + +.. coqtop:: all + + Goal A /\ (B \/ A) /\ (A \/ ~ B). + +.. coqtop:: all + + quote interp_f. + +It builds ``vm`` and ``t`` such that ``(f vm t)`` is convertible with the +conclusion of current goal. + + +Combining variables and constants +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One can have both variables and constants in abstracts terms; for +example, this is the case for the ``ring`` tactic +:ref:`TODO-25-ringandfieldtacticfamilies`. Then one must provide to +``quote`` a list of *constructors of constants*. For example, if the list +is ``[O S]`` then closed natural numbers will be considered as constants +and other terms as variables. + +Example: + +.. coqtop:: in + + Inductive formula : Type := + | f_and : formula -> formula -> formula + | f_or : formula -> formula -> formula + | f_not : formula -> formula + | f_true : formula + | f_const : Prop -> formula (* constructor for constants *) + | f_atom : index -> formula. + +.. coqtop:: in + + Fixpoint interp_f (vm:varmap Prop) (f:formula) {struct f} : Prop := + match f with + | f_and f1 f2 => interp_f vm f1 /\ interp_f vm f2 + | f_or f1 f2 => interp_f vm f1 \/ interp_f vm f2 + | f_not f1 => ~ interp_f vm f1 + | f_true => True + | f_const c => c + | f_atom i => varmap_find True i vm + end. + +.. coqtop:: in + + Goal A /\ (A \/ True) /\ ~ B /\ (C <-> C). + +.. coqtop:: all + + quote interp_f [ A B ]. + + +.. coqtop:: all + + Undo. + +.. coqtop:: all + + quote interp_f [ B C iff ]. + +Warning: Since function inversion is undecidable in general case, +don’t expect miracles from it! + +.. tacv:: quote @ident in @term using @tactic + + ``tactic`` must be a functional tactic (starting with ``fun x =>``) and + will be called with the quoted version of term according to ``ident``. + +.. tacv:: quote @ident [{+ @ident}] in @term using @tactic + + Same as above, but will use the additional ``ident`` list to chose + which subterms are constants (see above). + +See also: comments of source file ``plugins/quote/quote.ml`` + +See also: the ``ring`` tactic :ref:`TODO-25-ringandfieldtacticfamilies` + + +Using the tactical language +--------------------------- + + +About the cardinality of the set of natural numbers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A first example which shows how to use pattern matching over the +proof contexts is the proof that natural numbers have more than two +elements. The proof of such a lemma can be done as follows: + +.. coqtop:: in + + Lemma card_nat : ~ (exists x : nat, exists y : nat, forall z:nat, x = z \/ y = z). + Proof. + +.. coqtop:: in + + red; intros (x, (y, Hy)). + +.. coqtop:: in + + elim (Hy 0); elim (Hy 1); elim (Hy 2); intros; + + match goal with + | [_:(?a = ?b),_:(?a = ?c) |- _ ] => + cut (b = c); [ discriminate | transitivity a; auto ] + end. + +.. coqtop:: in + + Qed. + +We can notice that all the (very similar) cases coming from the three +eliminations (with three distinct natural numbers) are successfully +solved by a match goal structure and, in particular, with only one +pattern (use of non-linear matching). + + +Permutation on closed lists +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Another more complex example is the problem of permutation on closed +lists. The aim is to show that a closed list is a permutation of +another one. + +First, we define the permutation predicate as shown here: + +.. coqtop:: in + + Section Sort. + +.. coqtop:: in + + Variable A : Set. + +.. coqtop:: in + + Inductive permut : list A -> list A -> Prop := + | permut_refl : forall l, permut l l + | permut_cons : forall a l0 l1, permut l0 l1 -> permut (a :: l0) (a :: l1) + | permut_append : forall a l, permut (a :: l) (l ++ a :: nil) + | permut_trans : forall l0 l1 l2, permut l0 l1 -> permut l1 l2 -> permut l0 l2. + +.. coqtop:: in + + End Sort. + +A more complex example is the problem of permutation on closed lists. +The aim is to show that a closed list is a permutation of another one. +First, we define the permutation predicate as shown above. + + +.. coqtop:: none + + Require Import List. + + +.. coqtop:: all + + Ltac Permut n := + match goal with + | |- (permut _ ?l ?l) => apply permut_refl + | |- (permut _ (?a :: ?l1) (?a :: ?l2)) => + let newn := eval compute in (length l1) in + (apply permut_cons; Permut newn) + | |- (permut ?A (?a :: ?l1) ?l2) => + match eval compute in n with + | 1 => fail + | _ => + let l1' := constr:(l1 ++ a :: nil) in + (apply (permut_trans A (a :: l1) l1' l2); + [ apply permut_append | compute; Permut (pred n) ]) + end + end. + + +.. coqtop:: all + + Ltac PermutProve := + match goal with + | |- (permut _ ?l1 ?l2) => + match eval compute in (length l1 = length l2) with + | (?n = ?n) => Permut n + end + end. + +Next, we can write naturally the tactic and the result can be seen +above. We can notice that we use two top level definitions +``PermutProve`` and ``Permut``. The function to be called is +``PermutProve`` which computes the lengths of the two lists and calls +``Permut`` with the length if the two lists have the same +length. ``Permut`` works as expected. If the two lists are equal, it +concludes. Otherwise, if the lists have identical first elements, it +applies ``Permut`` on the tail of the lists. Finally, if the lists +have different first elements, it puts the first element of one of the +lists (here the second one which appears in the permut predicate) at +the end if that is possible, i.e., if the new first element has been +at this place previously. To verify that all rotations have been done +for a list, we use the length of the list as an argument for Permut +and this length is decremented for each rotation down to, but not +including, 1 because for a list of length ``n``, we can make exactly +``n−1`` rotations to generate at most ``n`` distinct lists. Here, it +must be noticed that we use the natural numbers of Coq for the +rotation counter. On Figure :ref:`TODO-9.1-tactic-language`, we can +see that it is possible to use usual natural numbers but they are only +used as arguments for primitive tactics and they cannot be handled, in +particular, we cannot make computations with them. So, a natural +choice is to use Coq data structures so that Coq makes the +computations (reductions) by eval compute in and we can get the terms +back by match. + +With ``PermutProve``, we can now prove lemmas as follows: + +.. coqtop:: in + + Lemma permut_ex1 : permut nat (1 :: 2 :: 3 :: nil) (3 :: 2 :: 1 :: nil). + +.. coqtop:: in + + Proof. PermutProve. Qed. + +.. coqtop:: in + + Lemma permut_ex2 : permut nat + (0 :: 1 :: 2 :: 3 :: 4 :: 5 :: 6 :: 7 :: 8 :: 9 :: nil) + (0 :: 2 :: 4 :: 6 :: 8 :: 9 :: 7 :: 5 :: 3 :: 1 :: nil). + + Proof. PermutProve. Qed. + + + +Deciding intuitionistic propositional logic +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _decidingintuitionistic1: + +.. coqtop:: all + + Ltac Axioms := + match goal with + | |- True => trivial + | _:False |- _ => elimtype False; assumption + | _:?A |- ?A => auto + end. + +.. _decidingintuitionistic2: + +.. coqtop:: all + + Ltac DSimplif := + repeat + (intros; + match goal with + | id:(~ _) |- _ => red in id + | id:(_ /\ _) |- _ => + elim id; do 2 intro; clear id + | id:(_ \/ _) |- _ => + elim id; intro; clear id + | id:(?A /\ ?B -> ?C) |- _ => + cut (A -> B -> C); + [ intro | intros; apply id; split; assumption ] + | id:(?A \/ ?B -> ?C) |- _ => + cut (B -> C); + [ cut (A -> C); + [ intros; clear id + | intro; apply id; left; assumption ] + | intro; apply id; right; assumption ] + | id0:(?A -> ?B),id1:?A |- _ => + cut B; [ intro; clear id0 | apply id0; assumption ] + | |- (_ /\ _) => split + | |- (~ _) => red + end). + +.. coqtop:: all + + Ltac TautoProp := + DSimplif; + Axioms || + match goal with + | id:((?A -> ?B) -> ?C) |- _ => + cut (B -> C); + [ intro; cut (A -> B); + [ intro; cut C; + [ intro; clear id | apply id; assumption ] + | clear id ] + | intro; apply id; intro; assumption ]; TautoProp + | id:(~ ?A -> ?B) |- _ => + cut (False -> B); + [ intro; cut (A -> False); + [ intro; cut B; + [ intro; clear id | apply id; assumption ] + | clear id ] + | intro; apply id; red; intro; assumption ]; TautoProp + | |- (_ \/ _) => (left; TautoProp) || (right; TautoProp) + end. + +The pattern matching on goals allows a complete and so a powerful +backtracking when returning tactic values. An interesting application +is the problem of deciding intuitionistic propositional logic. +Considering the contraction-free sequent calculi LJT* of Roy Dyckhoff +:ref:`TODO-56-biblio`, it is quite natural to code such a tactic +using the tactic language as shown on figures: :ref:`Deciding +intuitionistic propositions (1) <decidingintuitionistic1>` and +:ref:`Deciding intuitionistic propositions (2) +<decidingintuitionistic2>`. The tactic ``Axioms`` tries to conclude +using usual axioms. The tactic ``DSimplif`` applies all the reversible +rules of Dyckhoff’s system. Finally, the tactic ``TautoProp`` (the +main tactic to be called) simplifies with ``DSimplif``, tries to +conclude with ``Axioms`` and tries several paths using the +backtracking rules (one of the four Dyckhoff’s rules for the left +implication to get rid of the contraction and the right or). + +For example, with ``TautoProp``, we can prove tautologies like those: + +.. coqtop:: in + + Lemma tauto_ex1 : forall A B:Prop, A /\ B -> A \/ B. + +.. coqtop:: in + + Proof. TautoProp. Qed. + +.. coqtop:: in + + Lemma tauto_ex2 : + forall A B:Prop, (~ ~ B -> B) -> (A -> B) -> ~ ~ A -> B. + +.. coqtop:: in + + Proof. TautoProp. Qed. + + +Deciding type isomorphisms +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A more tricky problem is to decide equalities between types and modulo +isomorphisms. Here, we choose to use the isomorphisms of the simply +typed λ-calculus with Cartesian product and unit type (see, for +example, [:ref:`TODO-45`]). The axioms of this λ-calculus are given below. + +.. coqtop:: in reset + + Open Scope type_scope. + +.. coqtop:: in + + Section Iso_axioms. + +.. coqtop:: in + + Variables A B C : Set. + +.. coqtop:: in + + Axiom Com : A * B = B * A. + + Axiom Ass : A * (B * C) = A * B * C. + + Axiom Cur : (A * B -> C) = (A -> B -> C). + + Axiom Dis : (A -> B * C) = (A -> B) * (A -> C). + + Axiom P_unit : A * unit = A. + + Axiom AR_unit : (A -> unit) = unit. + + Axiom AL_unit : (unit -> A) = A. + +.. coqtop:: in + + Lemma Cons : B = C -> A * B = A * C. + + Proof. + + intro Heq; rewrite Heq; reflexivity. + + Qed. + +.. coqtop:: in + + End Iso_axioms. + + + +.. _typeisomorphism1: + +.. coqtop:: all + + Ltac DSimplif trm := + match trm with + | (?A * ?B * ?C) => + rewrite <- (Ass A B C); try MainSimplif + | (?A * ?B -> ?C) => + rewrite (Cur A B C); try MainSimplif + | (?A -> ?B * ?C) => + rewrite (Dis A B C); try MainSimplif + | (?A * unit) => + rewrite (P_unit A); try MainSimplif + | (unit * ?B) => + rewrite (Com unit B); try MainSimplif + | (?A -> unit) => + rewrite (AR_unit A); try MainSimplif + | (unit -> ?B) => + rewrite (AL_unit B); try MainSimplif + | (?A * ?B) => + (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) + | (?A -> ?B) => + (DSimplif A; try MainSimplif) || (DSimplif B; try MainSimplif) + end + with MainSimplif := + match goal with + | |- (?A = ?B) => try DSimplif A; try DSimplif B + end. + +.. coqtop:: all + + Ltac Length trm := + match trm with + | (_ * ?B) => let succ := Length B in constr:(S succ) + | _ => constr:(1) + end. + +.. coqtop:: all + + Ltac assoc := repeat rewrite <- Ass. + + +.. _typeisomorphism2: + +.. coqtop:: all + + Ltac DoCompare n := + match goal with + | [ |- (?A = ?A) ] => reflexivity + | [ |- (?A * ?B = ?A * ?C) ] => + apply Cons; let newn := Length B in + DoCompare newn + | [ |- (?A * ?B = ?C) ] => + match eval compute in n with + | 1 => fail + | _ => + pattern (A * B) at 1; rewrite Com; assoc; DoCompare (pred n) + end + end. + +.. coqtop:: all + + Ltac CompareStruct := + match goal with + | [ |- (?A = ?B) ] => + let l1 := Length A + with l2 := Length B in + match eval compute in (l1 = l2) with + | (?n = ?n) => DoCompare n + end + end. + +.. coqtop:: all + + Ltac IsoProve := MainSimplif; CompareStruct. + + +The tactic to judge equalities modulo this axiomatization can be +written as shown on these figures: :ref:`type isomorphism tactic (1) +<typeisomorphism1>` and :ref:`type isomorphism tactic (2) +<typeisomorphism2>`. The algorithm is quite simple. Types are reduced +using axioms that can be oriented (this done by ``MainSimplif``). The +normal forms are sequences of Cartesian products without Cartesian +product in the left component. These normal forms are then compared +modulo permutation of the components (this is done by +``CompareStruct``). The main tactic to be called and realizing this +algorithm isIsoProve. + +Here are examples of what can be solved by ``IsoProve``. + +.. coqtop:: in + + Lemma isos_ex1 : + forall A B:Set, A * unit * B = B * (unit * A). + Proof. + intros; IsoProve. + Qed. + +.. coqtop:: in + + Lemma isos_ex2 : + forall A B C:Set, + (A * unit -> B * (C * unit)) = (A * unit -> (C -> unit) * C) * (unit -> A -> B). + Proof. + intros; IsoProve. + Qed. diff --git a/doc/sphinx/proof-engine/ssreflect-proof-language.rst b/doc/sphinx/proof-engine/ssreflect-proof-language.rst new file mode 100644 index 0000000000..61dffa0243 --- /dev/null +++ b/doc/sphinx/proof-engine/ssreflect-proof-language.rst @@ -0,0 +1,5497 @@ +.. include:: ../replaces.rst + +.. _thessreflectprooflanguage: + +------------------------------ + The |SSR| proof language +------------------------------ + +:Source: https://coq.inria.fr/distrib/current/refman/ssreflect.html +:Converted by: Enrico Tassi + +Author: Georges Gonthier, Assia Mahboubi, Enrico Tassi + + +Introduction +------------ + +This chapter describes a set of tactics known as |SSR| originally +designed to provide support for the so-called *small scale reflection* +proof methodology. Despite the original purpose this set of tactic is +of general interest and is available in |Coq| starting from version 8.7. + +|SSR| was developed independently of the tactics described in +Chapter :ref:`tactics`. Indeed the scope of the tactics part of |SSR| largely +overlaps with the standard set of tactics. Eventually the overlap will +be reduced in future releases of |Coq|. + +Proofs written in |SSR| typically look quite different from the +ones written using only tactics as per Chapter :ref:`tactics`. We try to +summarise here the most “visible” ones in order to help the reader +already accustomed to the tactics described in Chapter :ref:`tactics` to read +this chapter. + +The first difference between the tactics described in this chapter and the +tactics described in Chapter :ref:`tactics` is the way hypotheses are managed +(we call this *bookkeeping*). In Chapter :ref:`tactics` the most common +approach is to avoid moving explicitly hypotheses back and forth between the +context and the conclusion of the goal. On the contrary in |SSR| all +bookkeeping is performed on the conclusion of the goal, using for that +purpose a couple of syntactic constructions behaving similar to tacticals +(and often named as such in this chapter). The ``:`` tactical moves hypotheses +from the context to the conclusion, while ``=>`` moves hypotheses from the +conclusion to the context, and in moves back and forth an hypothesis from the +context to the conclusion for the time of applying an action to it. + +While naming hypotheses is commonly done by means of an ``as`` clause in the +basic model of Chapter :ref:`tactics`, it is here to ``=>`` that this task is +devoted. Tactics frequently leave new assumptions in the conclusion, and are +often followed by ``=>`` to explicitly name them. While generalizing the +goal is normally not explicitly needed in Chapter :ref:`tactics`, it is an +explicit operation performed by ``:``. + +Beside the difference of bookkeeping model, this chapter includes +specific tactics which have no explicit counterpart in Chapter :ref:`tactics` +such as tactics to mix forward steps and generalizations as generally +have or without loss. + +|SSR| adopts the point of view that rewriting, definition +expansion and partial evaluation participate all to a same concept of +rewriting a goal in a larger sense. As such, all these functionalities +are provided by the rewrite tactic. + +|SSR| includes a little language of patterns to select subterms in +tactics or tacticals where it matters. Its most notable application is +in the rewrite tactic, where patterns are used to specify where the +rewriting step has to take place. + +Finally, |SSR| supports so-called reflection steps, typically +allowing to switch back and forth between the computational view and +logical view of a concept. + +To conclude it is worth mentioning that |SSR| tactics can be mixed +with non |SSR| tactics in the same proof, or in the same Ltac +expression. The few exceptions to this statement are described in +section :ref:`compatibility_issues_ssr`. + + +Acknowledgments +~~~~~~~~~~~~~~~ + +The authors would like to thank Frédéric Blanqui, François Pottier and +Laurence Rideau for their comments and suggestions. + + +Usage +----- + + +Getting started +~~~~~~~~~~~~~~~ + +To be available, the tactics presented in this manual need the +following minimal set of libraries to loaded: ``ssreflect.v``, +``ssrfun.v`` and ``ssrbool.v``. +Moreover, these tactics come with a methodology +specific to the authors of |SSR| and which requires a few options +to be set in a different way than in their default way. All in all, +this corresponds to working in the following context: + +.. coqtop:: all + + From Coq Require Import ssreflect ssrfun ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + +.. _compatibility_issues_ssr: + + +Compatibility issues +~~~~~~~~~~~~~~~~~~~~ + +Requiring the above modules creates an environment which is mostly +compatible with the rest of |Coq|, up to a few discrepancies: + + ++ New keywords (``is``) might clash with variable, constant, tactic or + tactical names, or with quasi-keywords in tactic or vernacular + notations. ++ New tactic(al)s names (``last``, ``done``, ``have``, ``suffices``, + ``suff``, ``without loss``, ``wlog``, ``congr``, ``unlock``) + might clash with user tactic names. ++ Identifiers with both leading and trailing ``_``, such as ``_x_``, are + reserved by |SSR| and cannot appear in scripts. ++ The extensions to the rewrite tactic are partly incompatible with those + available in current versions of |Coq|; in particular: ``rewrite .. in + (type of k)`` or ``rewrite .. in *`` or any other variant of ``rewrite`` + will not work, and the |SSR| syntax and semantics for occurrence selection + and rule chaining is different. Use an explicit rewrite direction + (``rewrite <- …`` or ``rewrite -> …``) to access the |Coq| rewrite tactic. ++ New symbols (``//``, ``/=``, ``//=``) might clash with adjacent + existing symbols. + This can be avoided by inserting white spaces. ++ New constant and theorem names might clash with the user theory. + This can be avoided by not importing all of |SSR|: + + .. coqtop:: in + + From Coq Require ssreflect. + Import ssreflect.SsrSyntax. + + Note that the full + syntax of |SSR|’s rewrite and reserved identifiers are enabled + only if the ssreflect module has been required and if ``SsrSyntax`` has + been imported. Thus a file that requires (without importing) ssreflect + and imports ``SsrSyntax``, can be required and imported without + automatically enabling |SSR|’s extended rewrite syntax and + reserved identifiers. ++ Some user notations (in particular, defining an infix ``;``) might + interfere with the "open term", parenthesis free, syntax of tactics + such as have, set and pose. ++ The generalization of if statements to non-Boolean conditions is turned off + by |SSR|, because it is mostly subsumed by Coercion to ``bool`` of the + ``sumXXX`` types (declared in ``ssrfun.v``) and the ``if`` *term* ``is`` *pattern* ``then`` + *term* ``else`` *term* construct (see :ref:`pattern_conditional_ssr`). To use the + generalized form, turn off the |SSR| Boolean if notation using the command: + ``Close Scope boolean_if_scope``. ++ The following two options can be unset to disable the incompatible + rewrite syntax and allow reserved identifiers to appear in scripts. + + .. coqtop:: in + + Unset SsrRewrite. + Unset SsrIdents. + + +|Gallina| extensions +-------------------- + +Small-scale reflection makes an extensive use of the programming +subset of |Gallina|, |Coq|’s logical specification language. This subset +is quite suited to the description of functions on representations, +because it closely follows the well-established design of the ML +programming language. The |SSR| extension provides three additions +to |Gallina|, for pattern assignment, pattern testing, and polymorphism; +these mitigate minor but annoying discrepancies between |Gallina| and +ML. + + +Pattern assignment +~~~~~~~~~~~~~~~~~~ + +The |SSR| extension provides the following construct for +irrefutable pattern matching, that is, destructuring assignment: + +.. prodn:: + term += let: @pattern := @term in @term + +Note the colon ``:`` after the ``let`` keyword, which avoids any ambiguity +with a function definition or |Coq|’s basic destructuring let. The let: +construct differs from the latter in that + + ++ The pattern can be nested (deep pattern matching), in particular, + this allows expression of the form: + +.. coqtop:: in + + let: exist (x, y) p_xy := Hp in … . + ++ The destructured constructor is explicitly given in the pattern, and + is used for type inference. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Definition f u := let: (m, n) := u in m + n. + Check f. + + Using :g:`let:` Coq infers a type for :g:`f`, + whereas with a usual ``let`` the same term requires an extra type + annotation in order to type check. + + .. coqtop:: reset all + + Definition f u := let (m, n) := u in m + n. + + +The ``let:`` construct is just (more legible) notation for the primitive +|Gallina| expression + +.. coqtop:: in + + match term with pattern => term end. + +The |SSR| destructuring assignment supports all the dependent +match annotations; the full syntax is + +.. prodn:: + term += let: @pattern {? as @ident} {? in @pattern} := @term {? return @term} in @term + +where the second :token:`pattern` and the second :token:`term` are *types*. + +When the ``as`` and ``return`` keywords are both present, then :token:`ident` is bound +in both the second :token:`pattern` and the second :token:`term`; variables +in the optional type :token:`pattern` are bound only in the second term, and +other variables in the first :token:`pattern` are bound only in the third +:token:`term`, however. + + +.. _pattern_conditional_ssr: + +Pattern conditional +~~~~~~~~~~~~~~~~~~~ + +The following construct can be used for a refutable pattern matching, +that is, pattern testing: + +.. prodn:: + term += if @term is @pattern then @term else @term + +Although this construct is not strictly ML (it does exist in variants +such as the pattern calculus or the ρ-calculus), it turns out to be +very convenient for writing functions on representations, because most +such functions manipulate simple data types such as Peano integers, +options, lists, or binary trees, and the pattern conditional above is +almost always the right construct for analyzing such simple types. For +example, the null and all list function(al)s can be defined as follows: + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Variable d: Set. + Fixpoint null (s : list d) := + if s is nil then true else false. + Variable a : d -> bool. + Fixpoint all (s : list d) : bool := + if s is cons x s' then a x && all s' else true. + +The pattern conditional also provides a notation for destructuring +assignment with a refutable pattern, adapted to the pure functional +setting of |Gallina|, which lacks a ``Match_Failure`` exception. + +Like ``let:`` above, the ``if…is`` construct is just (more legible) notation +for the primitive |Gallina| expression: + +.. coqtop:: in + + match term with pattern => term | _ => term end. + +Similarly, it will always be displayed as the expansion of this form +in terms of primitive match expressions (where the default expression +may be replicated). + +Explicit pattern testing also largely subsumes the generalization of +the if construct to all binary data types; compare: + +.. coqtop:: in + + if term is inl _ then term else term. + +and: + +.. coqtop:: in + + if term then term else term. + +The latter appears to be marginally shorter, but it is quite +ambiguous, and indeed often requires an explicit annotation +``(term : {_} + {_})`` to type-check, which evens the character count. + +Therefore, |SSR| restricts by default the condition of a plain if +construct to the standard ``bool`` type; this avoids spurious type +annotations. + +.. example:: + + .. coqtop:: all + + Definition orb b1 b2 := if b1 then true else b2. + +As pointed out in section :ref:`compatibility_issues_ssr`, +this restriction can be removed with +the command: + +``Close Scope boolean_if_scope.`` + +Like ``let:`` above, the ``if-is-then-else`` +construct supports +the dependent match annotations: + +.. prodn:: + term += if @term is @pattern as @ident in @pattern return @term then @term else @term + +As in ``let:`` the variable :token:`ident` (and those in the type pattern) +are bound in the second :token:`term`; :token:`ident` is also bound in the +third :token:`term` (but not in the fourth :token:`term`), while the +variables in the first :token:`pattern` are bound only in the third +:token:`term`. + +Another variant allows to treat the ``else`` case first: + +.. prodn:: + term += if @term isn't @pattern then @term else @term + +Note that :token:`pattern` eventually binds variables in the third +:token:`term` and not in the second :token:`term`. + +.. _parametric_polymorphism_ssr: + +Parametric polymorphism +~~~~~~~~~~~~~~~~~~~~~~~ + +Unlike ML, polymorphism in core |Gallina| is explicit: the type +parameters of polymorphic functions must be declared explicitly, and +supplied at each point of use. However, |Coq| provides two features to +suppress redundant parameters: + + ++ Sections are used to provide (possibly implicit) parameters for a + set of definitions. ++ Implicit arguments declarations are used to tell |Coq| to use type + inference to deduce some parameters from the context at each point of + call. + + +The combination of these features provides a fairly good emulation of +ML-style polymorphism, but unfortunately this emulation breaks down +for higher-order programming. Implicit arguments are indeed not +inferred at all points of use, but only at points of call, leading to +expressions such as + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + Variable T : Type. + Variable null : forall T : Type, T -> bool. + Variable all : (T -> bool) -> list T -> bool. + + .. coqtop:: all + + Definition all_null (s : list T) := all (@null T) s. + +Unfortunately, such higher-order expressions are quite frequent in +representation functions, especially those which use |Coq|'s +``Structures`` to emulate Haskell type classes. + +Therefore, |SSR| provides a variant of |Coq|’s implicit argument +declaration, which causes |Coq| to fill in some implicit parameters at +each point of use, e.g., the above definition can be written: + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + Variable T : Type. + Variable null : forall T : Type, T -> bool. + Variable all : (T -> bool) -> list T -> bool. + + + .. coqtop:: all undo + + Prenex Implicits null. + Definition all_null (s : list T) := all null s. + +Better yet, it can be omitted entirely, since ``all_null s`` isn’t much of +an improvement over ``all null s``. + +The syntax of the new declaration is + +.. cmd:: Prenex Implicits {+ @ident}. + +Let us denote :math:`c_1` … :math:`c_n` the list of identifiers given to a +``Prenex Implicits`` command. The command checks that each ci is the name of +a functional constant, whose implicit arguments are prenex, i.e., the first +:math:`n_i > 0` arguments of :math:`c_i` are implicit; then it assigns +``Maximal Implicit`` status to these arguments. + +As these prenex implicit arguments are ubiquitous and have often large +display strings, it is strongly recommended to change the default +display settings of |Coq| so that they are not printed (except after +a ``Set Printing All command``). All |SSR| library files thus start +with the incantation + +.. coqtop:: all undo + + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + +Anonymous arguments +~~~~~~~~~~~~~~~~~~~ + +When in a definition, the type of a certain argument is mandatory, but +not its name, one usually use “arrow” abstractions for prenex +arguments, or the ``(_ : term)`` syntax for inner arguments. In |SSR|, +the latter can be replaced by the open syntax ``of term`` or +(equivalently) ``& term``, which are both syntactically equivalent to a +``(_ : term)`` expression. + +For instance, the usual two-constructor polymorphic type list, i.e. +the one of the standard List library, can be defined by the following +declaration: + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Inductive list (A : Type) : Type := nil | cons of A & list A. + + +Wildcards +~~~~~~~~~ + +The terms passed as arguments to |SSR| tactics can contain +*holes*, materialized by wildcards ``_``. Since |SSR| allows a more +powerful form of type inference for these arguments, it enhances the +possibilities of using such wildcards. These holes are in particular +used as a convenient shorthand for abstractions, especially in local +definitions or type expressions. + +Wildcards may be interpreted as abstractions (see for example sections +:ref:`definitions_ssr` and ref:`structure_ssr`), or their content can be +inferred from the whole context of the goal (see for example section +:ref:`abbreviations_ssr`). + + +.. _definitions_ssr: + +Definitions +~~~~~~~~~~~ + +The pose tactic allows to add a defined constant to a proof context. +|SSR| generalizes this tactic in several ways. In particular, the +|SSR| pose tactic supports *open syntax*: the body of the +definition does not need surrounding parentheses. For instance: + +.. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Lemma test : True. + Proof. + +.. coqtop:: in + + pose t := x + y. + +is a valid tactic expression. + +The pose tactic is also improved for the local definition of higher +order terms. Local definitions of functions can use the same syntax as +global ones. For example the tactic ``pose`` supoprts parameters: + +.. example:: + + .. coqtop:: all + + Lemma test : True. + pose f x y := x + y. + +The |SSR| pose tactic also supports (co)fixpoints, by providing +the local counterpart of the ``Fixpoint f := …`` and ``CoFixpoint f := …`` +constructs. For instance, the following tactic: + +.. coqtop:: in + + pose fix f (x y : nat) {struct x} : nat := + if x is S p then S (f p y) else 0. + +defines a local fixpoint ``f``, which mimics the standard plus operation +on natural numbers. + +Similarly, local cofixpoints can be defined by a tactic of the form: + +.. coqtop:: in + + pose cofix f (arg : T) := … . + +The possibility to include wildcards in the body of the definitions +offers a smooth way of defining local abstractions. The type of +“holes” is guessed by type inference, and the holes are abstracted. +For instance the tactic: + +.. coqtop:: in + + pose f := _ + 1. + +is shorthand for: + +.. coqtop:: in + + pose f n := n + 1. + +When the local definition of a function involves both arguments and +holes, hole abstractions appear first. For instance, the tactic: + +.. coqtop:: in + + pose f x := x + _. + +is shorthand for: + +.. coqtop:: in + + pose f n x := x + n. + +The interaction of the pose tactic with the interpretation of implicit +arguments results in a powerful and concise syntax for local +definitions involving dependent types. For instance, the tactic: + +.. coqtop:: in + + pose f x y := (x, y). + +adds to the context the local definition: + +.. coqtop:: in + + pose f (Tx Ty : Type) (x : Tx) (y : Ty) := (x, y). + +The generalization of wildcards makes the use of the pose tactic +resemble ML-like definitions of polymorphic functions. + + +.. _abbreviations_ssr: + + +Abbreviations +~~~~~~~~~~~~~ + +The |SSR| set tactic performs abbreviations: it introduces a +defined constant for a subterm appearing in the goal and/or in the +context. + +|SSR| extends the set tactic by supplying: + + ++ an open syntax, similarly to the pose tactic; ++ a more aggressive matching algorithm; ++ an improved interpretation of wildcards, taking advantage of the + matching algorithm; ++ an improved occurrence selection mechanism allowing to abstract only + selected occurrences of a term. + + +The general syntax of this tactic is + +.. tacn:: set @ident {? : @term } := {? @occ_switch } @term + :name: set (ssreflect) + +.. prodn:: + occ_switch ::= { {? + %| - } {* @num } } + +where: + ++ :token:`ident` is a fresh identifier chosen by the user. ++ term 1 is an optional type annotation. The type annotation term 1 + can be given in open syntax (no surrounding parentheses). If no + :token:`occ_switch` (described hereafter) is present, + it is also the case for the second :token:`term`. + On the other hand, in presence of :token:`occ_switch`, parentheses + surrounding the second :token:`term` are mandatory. ++ In the occurrence switch :token:`occ_switch`, if the first element of the + list is a natural, this element should be a number, and not an Ltac + variable. The empty list {} is not interpreted as a valid occurrence + switch. + +The tactic: + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Axiom f : nat -> nat. + + .. coqtop:: all + + Lemma test x : f x + f x = f x. + set t := f _. + + .. coqtop:: none + + Undo. + + .. coqtop:: all + + set t := {2}(f _). + + +The type annotation may contain wildcards, which will be filled +with the appropriate value by the matching process. + +The tactic first tries to find a subterm of the goal matching +the second :token:`term` +(and its type), and stops at the first subterm it finds. Then +the occurrences of this subterm selected by the optional :token:`occ_switch` +are replaced by :token:`ident` and a definition ``ident := term`` +is added to the +context. If no :token:`occ_switch` is present, then all the occurrences are +abstracted. + + +Matching +```````` + +The matching algorithm compares a pattern ``term`` with a subterm of the +goal by comparing their heads and then pairwise unifying their +arguments (modulo conversion). Head symbols match under the following +conditions: + + ++ If the head of ``term`` is a constant, then it should be syntactically + equal to the head symbol of the subterm. ++ If this head is a projection of a canonical structure, then + canonical structure equations are used for the matching. ++ If the head of term is *not* a constant, the subterm should have the + same structure (λ abstraction,let…in structure …). ++ If the head of ``term`` is a hole, the subterm should have at least as + many arguments as ``term``. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test (x y z : nat) : x + y = z. + set t := _ x. + ++ In the special case where ``term`` is of the form + ``(let f := t0 in f) t1 … tn`` , then the pattern ``term`` is treated + as ``(_ t1 … tn)``. For each + subterm in the goal having the form ``(A u1 … um)`` with m ≥ n, the + matching algorithm successively tries to find the largest partial + application ``(A u1 … uj)`` convertible to the head ``t0`` of ``term``. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test : (let f x y z := x + y + z in f 1) 2 3 = 6. + set t := (let g y z := S y + z in g) 2. + + The notation ``unkeyed`` defined in ``ssreflect.v`` is a shorthand for + the degenerate term ``let x := … in x``. + +Moreover: + ++ Multiple holes in ``term`` are treated as independent placeholders. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test x y z : x + y = z. + set t := _ + _. + ++ The type of the subterm matched should fit the type (possibly casted + by some type annotations) of the pattern ``term``. ++ The replacement of the subterm found by the instantiated pattern + should not capture variables. In the example above ``x`` is bound + and should not be captured. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test : forall x : nat, x + 1 = 0. + set t := _ + 1. + ++ Typeclass inference should fill in any residual hole, but matching + should never assign a value to a global existential variable. + + +.. _occurrence_selection_ssr: + +Occurrence selection +```````````````````` + +|SSR| provides a generic syntax for the selection of occurrences +by their position indexes. These *occurrence switches* are shared by +all |SSR| tactics which require control on subterm selection like +rewriting, generalization, … + +An *occurrence switch* can be: + ++ A list natural numbers ``{+ n1 … nm}`` + of occurrences affected by the tactic. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Axiom f : nat -> nat. + + .. coqtop:: all + + Lemma test : f 2 + f 8 = f 2 + f 2. + set x := {+1 3}(f 2). + + Notice that some occurrences of a given term may be + hidden to the user, for example because of a notation. The vernacular + ``Set Printing All`` command displays all these hidden occurrences and + should be used to find the correct coding of the occurrences to be + selected [#1]_. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Notation "a < b":= (le (S a) b). + Lemma test x y : x < y -> S x < S y. + set t := S x. + ++ A list of natural numbers between ``{n1 … nm}``. + This is equivalent to the previous ``{+ n1 … nm}`` but the list + should start with a number, and not with an Ltac variable. ++ A list ``{- n1 … nm}`` of occurrences *not* to be affected by the + tactic. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Axiom f : nat -> nat. + + .. coqtop:: all + + Lemma test : f 2 + f 8 = f 2 + f 2. + set x := {-2}(f 2). + + + Note that, in this goal, it behaves like ``set x := {1 3}(f 2).`` ++ In particular, the switch ``{+}`` selects *all* the occurrences. This + switch is useful to turn off the default behavior of a tactic which + automatically clears some assumptions (see section :ref:`discharge_ssr` for + instance). ++ The switch ``{-}`` imposes that *no* occurrences of the term should be + affected by the tactic. The tactic: ``set x := {-}(f 2).`` leaves the goal + unchanged and adds the definition ``x := f 2`` to the context. This kind + of tactic may be used to take advantage of the power of the matching + algorithm in a local definition, instead of copying large terms by + hand. + +It is important to remember that matching *preceeds* occurrence +selection. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test x y z : x + y = x + y + z. + set a := {2}(_ + _). + +Hence, in the following goal, the same tactic fails since there is +only one occurrence of the selected term. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test x y z : (x + y) + (z + z) = z + z. + set a := {2}(_ + _). + + +.. _basic_localization_ssr: + +Basic localization +~~~~~~~~~~~~~~~~~~ + +It is possible to define an abbreviation for a term appearing in the +context of a goal thanks to the in tactical. + +A tactic of the form: + +.. tacv:: set @ident := @term in {+ @ident} + +introduces a defined constant called ``x`` in the context, and folds it in +the context entries mentioned on the right hand side of ``in``. +The body of ``x`` is the first subterm matching these context entries +(taken in the given order). + +A tactic of the form: + +.. tacv:: set @ident := @term in {+ @ident} * + +matches term and then folds ``x`` similarly in all the given context entries +but also folds ``x`` in the goal. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all undo + + Lemma test x t (Hx : x = 3) : x + t = 4. + set z := 3 in Hx. + +If the localization also mentions the goal, then the result is the following one: + + .. example:: + + .. coqtop:: all + + Lemma test x t (Hx : x = 3) : x + t = 4. + set z := 3 in Hx * . + +Indeed, remember that 4 is just a notation for (S 3). + +The use of the ``in`` tactical is not limited to the localization of +abbreviations: for a complete description of the in tactical, see +section :ref:`bookkeeping_ssr` and :ref:`localization_ssr`. + + +.. _basic_tactics_ssr: + +Basic tactics +------------- + +A sizable fraction of proof scripts consists of steps that do not +"prove" anything new, but instead perform menial bookkeeping tasks +such as selecting the names of constants and assumptions or splitting +conjuncts. Although they are logically trivial, bookkeeping steps are +extremely important because they define the structure of the data-flow +of a proof script. This is especially true for reflection-based +proofs, which often involve large numbers of constants and +assumptions. Good bookkeeping consists in always explicitly declaring +(i.e., naming) all new constants and assumptions in the script, and +systematically pruning irrelevant constants and assumptions in the +context. This is essential in the context of an interactive +development environment (IDE), because it facilitates navigating the +proof, allowing to instantly "jump back" to the point at which a +questionable assumption was added, and to find relevant assumptions by +browsing the pruned context. While novice or casual |Coq| users may find +the automatic name selection feature convenient, the usage of such a +feature severely undermines the readability and maintainability of +proof scripts, much like automatic variable declaration in programming +languages. The |SSR| tactics are therefore designed to support +precise bookkeeping and to eliminate name generation heuristics. The +bookkeeping features of |SSR| are implemented as tacticals (or +pseudo-tacticals), shared across most |SSR| tactics, and thus form +the foundation of the |SSR| proof language. + + +.. _bookkeeping_ssr: + +Bookkeeping +~~~~~~~~~~~ + +During the course of a proof |Coq| always present the user with a +*sequent* whose general form is:: + + ci : Ti + … + dj := ej : Tj + … + Fk : Pk + … + ================= + forall (xl : Tl ) …, + let ym := bm in … in + Pn -> … -> C + +The *goal* to be proved appears below the double line; above the line +is the *context* of the sequent, a set of declarations of *constants* +``ci`` , *defined constants* d i , and *facts* ``Fk`` that can be used to +prove the goal (usually, ``Ti`` , ``Tj : Type`` and ``Pk : Prop``). +The various +kinds of declarations can come in any order. The top part of the +context consists of declarations produced by the Section +commands ``Variable``, ``Let``, and ``Hypothesis``. +This *section context* is never +affected by the |SSR| tactics: they only operate on the lower part +— the *proof context*. As in the figure above, the goal often +decomposes into a series of (universally) quantified *variables* +``(xl : Tl)``, local *definitions* +``let ym := bm in``, and *assumptions* +``P n ->``, +and a *conclusion* ``C`` (as in the context, variables, definitions, and +assumptions can appear in any order). The conclusion is what actually +needs to be proved — the rest of the goal can be seen as a part of the +proof context that happens to be “below the line”. + +However, although they are logically equivalent, there are fundamental +differences between constants and facts on the one hand, and variables +and assumptions on the others. Constants and facts are *unordered*, +but *named* explicitly in the proof text; variables and assumptions +are *ordered*, but *unnamed*: the display names of variables may +change at any time because of α-conversion. + +Similarly, basic deductive steps such as apply can only operate on the +goal because the |Gallina| terms that control their action (e.g., the +type of the lemma used by ``apply``) only provide unnamed bound variables. +[#2]_ Since the proof script can only refer directly to the context, it +must constantly shift declarations from the goal to the context and +conversely in between deductive steps. + +In |SSR| these moves are performed by two *tacticals* ``=>`` and +``:``, so that the bookkeeping required by a deductive step can be +directly associated to that step, and that tactics in an |SSR| +script correspond to actual logical steps in the proof rather than +merely shuffle facts. Still, some isolated bookkeeping is unavoidable, +such as naming variables and assumptions at the beginning of a +proof. |SSR| provides a specific ``move`` tactic for this purpose. + +Now ``move`` does essentially nothing: it is mostly a placeholder for +``=>`` and ``:``. The ``=>`` tactical moves variables, local definitions, +and assumptions to the context, while the ``:`` tactical moves facts and +constants to the goal. + +.. example:: + + For example, the proof of [#3]_ + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma subnK : forall m n, n <= m -> m - n + n = m. + + might start with + + .. coqtop:: all + + move=> m n le_n_m. + + where move does nothing, but ``=> m n le_m_n`` changes + the variables and assumption of the goal in the constants + ``m n : nat`` and the fact ``le_n_m : n <= m``, thus exposing the + conclusion ``m - n + n = m``. + + The ``:`` tactical is the converse of ``=>``, indeed it removes facts and + constants from the context by turning them into variables and + assumptions. + + .. coqtop:: all + + move: m le_n_m. + + turns back ``m`` and ``le_m_n`` into a variable and an assumption, + removing them from the proof context, and changing the goal to + ``forall m, n <= m -> m - n + n = m`` + which can be proved by induction on ``n`` using ``elim: n``. + +Because they are tacticals, ``:`` and ``=>`` can be combined, as in + +.. coqtop: in + + move: m le_n_m => p le_n_p. + +simultaneously renames ``m`` and ``le_m_n`` into ``p`` and ``le_n_p``, +respectively, by first turning them into unnamed variables, then +turning these variables back into constants and facts. + +Furthermore, |SSR| redefines the basic |Coq| tactics ``case``, ``elim``, +and ``apply`` so that they can take better advantage of +``:`` and ``=>``. In there +|SSR| variants, these tactic operate on the first variable or +constant of the goal and they do not use or change the proof context. +The ``:`` tactical is used to operate on an element in the context. + +.. example:: + + For instance the proof of ``subnK`` could continue with ``elim: n``. + Instead of ``elim n`` (note, no colon), this has the advantage of + removing n from the context. Better yet, this ``elim`` can be combined + with previous move and with the branching version of the ``=>`` tactical + (described in :ref:`introduction_ssr`), + to encapsulate the inductive step in a single + command: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma subnK : forall m n, n <= m -> m - n + n = m. + move=> m n le_n_m. + elim: n m le_n_m => [|n IHn] m => [_ | lt_n_m]. + + which breaks down the proof into two subgoals, the second one + having in its context + ``lt_n_m : S n <= m`` and + ``IHn : forall m, n <= m -> m - n + n = m``. + +The ``:`` and ``=>`` tacticals can be explained very simply if one views +the goal as a stack of variables and assumptions piled on a conclusion: + ++ ``tactic : a b c`` pushes the context constants ``a``, ``b``, ``c`` as goal + variables *before* performing tactic. ++ ``tactic => a b c`` pops the top three goal variables as context + constants ``a``, ``b``, ``c``, *after* tactic has been performed. + +These pushes and pops do not need to balance out as in the examples +above, so ``move: m le_n_m => p`` +would rename ``m`` into ``p``, but leave an extra assumption ``n <= p`` +in the goal. + +Basic tactics like apply and elim can also be used without the ’:’ +tactical: for example we can directly start a proof of ``subnK`` by +induction on the top variable ``m`` with + +.. coqtop:: in + + elim=> [|m IHm] n le_n. + +The general form of the localization tactical in is also best +explained in terms of the goal stack: + +.. coqtop:: in + + tactic in a H1 H2 *. + +is basically equivalent to + +.. coqtop:: in + + move: a H1 H2; tactic => a H1 H2. + + +with two differences: the in tactical will preserve the body of a ifa +is a defined constant, and if the ``*`` is omitted it will use a +temporary abbreviation to hide the statement of the goal from +``tactic``. + +The general form of the in tactical can be used directly with the +``move``, ``case`` and ``elim`` tactics, so that one can write + +.. coqtop:: in + + elim: n => [|n IHn] in m le_n_m *. + +instead of + +.. coqtop:: in + + elim: n m le_n_m => [|n IHn] m le_n_m. + +This is quite useful for inductive proofs that involve many facts. + +See section :ref:`localization_ssr` for +the general syntax and presentation of the in +tactical. + + +.. _the_defective_tactics_ssr: + +The defective tactics +~~~~~~~~~~~~~~~~~~~~~ + +In this section we briefly present the three basic tactics performing +context manipulations and the main backward chaining tool. + + +The move tactic. +```````````````` + +The move tactic, in its defective form, behaves like the primitive ``hnf`` +|Coq| tactic. For example, such a defective: + +.. tacn:: move + :name: move + +exposes the first assumption in the goal, i.e. its changes the +goal ``not False`` into ``False -> False``. + +More precisely, the ``move`` tactic inspects the goal and does nothing +(``idtac``) if an introduction step is possible, i.e. if the goal is a +product or a ``let…in``, and performs ``hnf`` otherwise. + +Of course this tactic is most often used in combination with the +bookkeeping tacticals (see section :ref:`introduction_ssr` and :ref:`discharge_ssr`). These +combinations mostly subsume the ``intros``, ``generalize``, ``revert``, ``rename``, +``clear`` and ``pattern`` tactics. + + +The case tactic +``````````````` + +The ``case`` tactic performs *primitive case analysis* on (co)inductive +types; specifically, it destructs the top variable or assumption of +the goal, exposing its constructor(s) and its arguments, as well as +setting the value of its type family indices if it belongs to a type +family (see section :ref:`type_families_ssr`). + +The |SSR| case tactic has a special behavior on equalities. If the +top assumption of the goal is an equality, the case tactic “destructs” +it as a set of equalities between the constructor arguments of its +left and right hand sides, as per the tactic injection. For example, +``case`` changes the goal + +.. coqtop:: in + + (x, y) = (1, 2) -> G. + +into + +.. coqtop:: in + + x = 1 -> y = 2 -> G. + +Note also that the case of |SSR| performs ``False`` elimination, even +if no branch is generated by this case operation. Hence the command: +``case.`` on a goal of the form ``False -> G`` will succeed and +prove the goal. + + +The elim tactic +``````````````` + +The ``elim`` tactic performs inductive elimination on inductive types. The +defective: + +.. tacn:: elim + :name: elim (ssreflect) + +tactic performs inductive elimination on a goal whose top assumption +has an inductive type. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test m : forall n : nat, m <= n. + elim. + + +.. _apply_ssr: + +The apply tactic +```````````````` + +The ``apply`` tactic is the main backward chaining tactic of the proof +system. It takes as argument any :token:`term` and applies it to the goal. +Assumptions in the type of :token:`term` that don’t directly match the goal +may generate one or more subgoals. + +In fact the |SSR| tactic: + +.. tacn:: apply + :name: apply (ssreflect) + +is a synonym for: + +.. coqtop:: in + + intro top; first [refine top | refine (top _) | refine (top _ _) | …]; clear top. + +where ``top`` is fresh name, and the sequence of refine tactics tries to +catch the appropriate number of wildcards to be inserted. Note that +this use of the refine tactic implies that the tactic tries to match +the goal up to expansion of constants and evaluation of subterms. + +|SSR|’s apply has a special behaviour on goals containing +existential metavariables of sort Prop. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Axiom lt_trans : forall a b c, a < b -> b < c -> a < c. + + .. coqtop:: all + + Lemma test : forall y, 1 < y -> y < 2 -> exists x : { n | n < 3 }, 0 < proj1_sig x. + move=> y y_gt1 y_lt2; apply: (ex_intro _ (exist _ y _)). + by apply: lt_trans y_lt2 _. + by move=> y_lt3; apply: lt_trans y_gt1. + + Note that the last ``_`` of the tactic + ``apply: (ex_intro _ (exist _ y _))`` + represents a proof that ``y < 3``. Instead of generating the goal + + .. coqtop:: in + + 0 < proj1_sig (exist (fun n : nat => n < 3) y ?Goal). + + the system tries to prove ``y < 3`` calling the trivial tactic. + If it succeeds, let’s say because the context contains + ``H : y < 3``, then the + system generates the following goal: + + .. coqtop:: in + + 0 < proj1_sig (exist (fun n => n < 3) y H). + + Otherwise the missing proof is considered to be irrelevant, and is + thus discharged generating the two goals shown above. + + Last, the user can replace the trivial tactic by defining an Ltac + expression named ``ssrautoprop``. + + +.. _discharge_ssr: + +Discharge +~~~~~~~~~ + +The general syntax of the discharging tactical ``:`` is: + +.. tacn:: @tactic {? @ident } : {+ @d_item } {? @clear_switch } + +.. prodn:: + d_item ::= {? @occ_switch %| @clear_switch } @term +.. prodn:: + clear_switch ::= { {+ @ident } } + +with the following requirements: + ++ :token:`tactic` must be one of the four basic tactics described in :ref:`the_defective_tactics_ssr`, + i.e., ``move``, ``case``, ``elim`` or ``apply``, the ``exact`` + tactic (section :ref:`terminators_ssr`), + the ``congr`` tactic (section :ref:`congruence_ssr`), + or the application of the *view* + tactical ‘/’ (section :ref:`interpreting_assumptions_ssr`) to one of move, case, or elim. ++ The optional :token:`ident` specifies *equation generation* (section :ref:`generation_of_equations_ssr`), + and is only allowed if tactic is ``move``, ``case`` or ``elim``, or the + application of the view tactical ‘/’ (section :ref:`interpreting_assumptions_ssr`) to ``case`` or ``elim``. ++ An :token:`occ_switch` selects occurrences of :token:`term`, as in :ref:`abbreviations_ssr`; :token:`occ_switch` + is not allowed if :token:`tactic` is ``apply`` or ``exact``. ++ A clear item :token:`clear_switch` specifies facts and constants to be + deleted from the proof context (as per the clear tactic). + + +The ``:`` tactical first *discharges* all the :token:`d_item`, right to left, +and then performs tactic, i.e., for each :token:`d_item`, starting with the last one : + + +#. The |SSR| matching algorithm described in section :ref:`abbreviations_ssr` is + used to find occurrences of term in the goal, after filling any holes + ‘_’ in term; however if tactic is apply or exact a different matching + algorithm, described below, is used [#4]_. +#. These occurrences are replaced by a new variable; in particular, if + term is a fact, this adds an assumption to the goal. +#. If term is *exactly* the name of a constant or fact in the proof + context, it is deleted from the context, unless there is an + :token:`occ_switch`. + + +Finally, tactic is performed just after the first :token:`d_item` +has been generalized +— that is, between steps 2 and 3. The names listed in +the final :token:`clear_switch` (if it is present) are cleared first, before +:token:`d_item` n is discharged. + +Switches affect the discharging of a :token:`d_item` as follows: + + ++ An :token:`occ_switch` restricts generalization (step 2) to a specific subset + of the occurrences of term, as per section :ref:`abbreviations_ssr`, and prevents clearing (step + 3). ++ All the names specified by a :token:`clear_switch` are deleted from the + context in step 3, possibly in addition to term. + + +For example, the tactic: + +.. coqtop:: in + + move: n {2}n (refl_equal n). + ++ first generalizes ``(refl_equal n : n = n)``; ++ then generalizes the second occurrence of ``n``. ++ finally generalizes all the other occurrences of ``n``, and clears ``n`` + from the proof context (assuming n is a proof constant). + +Therefore this tactic changes any goal ``G`` into + +.. coqtop:: + + forall n n0 : nat, n = n0 -> G. + +where the name ``n0`` is picked by the |Coq| display function, and assuming +``n`` appeared only in ``G``. + +Finally, note that a discharge operation generalizes defined constants +as variables, and not as local definitions. To override this behavior, +prefix the name of the local definition with a ``@``, like in ``move: @n``. + +This is in contrast with the behavior of the in tactical (see +section :ref:`localization_ssr`), which preserves local +definitions by default. + + +Clear rules +``````````` + +The clear step will fail if term is a proof constant that appears in +other facts; in that case either the facts should be cleared +explicitly with a :token:`clear_switch`, or the clear step should be disabled. +The latter can be done by adding an :token:`occ_switch` or simply by putting +parentheses around term: both +``move: (n).`` +and +``move: {+}n.`` +generalize ``n`` without clearing ``n`` from the proof context. + +The clear step will also fail if the :token:`clear_switch` contains a :token:`ident` that +is not in the *proof* context. Note that |SSR| never clears a +section constant. + +If tactic is ``move`` or ``case`` and an equation :token:`ident` is given, then clear +(step 3) for :token:`d_item` is suppressed (see section :ref:`generation_of_equations_ssr`). + + +Matching for apply and exact +```````````````````````````` + +The matching algorithm for :token:`d_item` of the |SSR| +``apply`` and ``exact`` +tactics exploits the type of the first :token:`d_item` to interpret +wildcards in the +other :token:`d_item` and to determine which occurrences of these should be +generalized. Therefore, occur switches are not needed for apply and +exact. + +Indeed, the |SSR| tactic ``apply: H x`` is equivalent to +``refine (@H _ … _ x); clear H x`` +with an appropriate number of wildcards between ``H`` and ``x``. + +Note that this means that matching for ``apply`` and ``exact`` has much more +context to interpret wildcards; in particular it can accommodate the +``_`` :token:`d_item`, which would always be rejected after ``move:``. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Axiom f : nat -> nat. + Axiom g : nat -> nat. + + .. coqtop:: all + + Lemma test (Hfg : forall x, f x = g x) a b : f a = g b. + apply: trans_equal (Hfg _) _. + +This tactic is equivalent (see section +:ref:`bookkeeping_ssr`) to: +``refine (trans_equal (Hfg _) _).`` +and this is a common idiom for applying transitivity on the left hand +side of an equation. + + +.. _abstract_ssr: + +The abstract tactic +``````````````````` + +The ``abstract`` tactic assigns an ``abstract`` constant previously +introduced with the ``[: name ]`` intro pattern +(see section :ref:`introduction_ssr`). + +In a goal like the following:: + + m : nat + abs : <hidden> + n : nat + ============= + m < 5 + n + +The tactic ``abstract: abs n`` first generalizes the goal with respect ton +(that is not visible to the abstract constant abs) and then assigns +abs. The resulting goal is:: + + m : nat + n : nat + ============= + m < 5 + n + +Once this subgoal is closed, all other goals having abs in their +context see the type assigned to ``abs``. In this case:: + + m : nat + abs : forall n, m < 5 + n + ============= + … + +For a more detailed example the reader should refer to +section :ref:`structure_ssr`. + + +.. _introduction_ssr: + +Introduction in the context +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The application of a tactic to a given goal can generate (quantified) +variables, assumptions, or definitions, which the user may want to +*introduce* as new facts, constants or defined constants, +respectively. If the tactic splits the goal into several subgoals, +each of them may require the introduction of different constants and +facts. Furthermore it is very common to immediately decompose or +rewrite with an assumption instead of adding it to the context, as the +goal can often be simplified and even proved after this. + +All these operations are performed by the introduction tactical ``=>``, +whose general syntax is + +.. tacn:: @tactic => {+ @i_item } + :name: => + +.. prodn:: + i_item ::= @i_pattern %| @s_item %| @clear_switch %| /@term + +.. prodn:: + s_item ::= /= %| // %| //= + +.. prodn:: + i_pattern ::= @ident %| _ %| ? %| * %| {? @occ_switch } -> %| {? @occ_switch }<- %| [ {?| @i_item } ] %| - %| [: {+ @ident } ] + +The ``=>`` tactical first executes tactic, then the :token:`i_item` s, +left to right. An :token:`s_item` specifies a +simplification operation; a :token:`clear_switch` +h specifies context pruning as in :ref:`discharge_ssr`. +The :token:`i_pattern` s can be seen as a variant of *intro patterns* +:ref:`tactics`: each performs an introduction operation, i.e., pops some +variables or assumptions from the goal. + +An :token:`s_item` can simplify the set of subgoals or the subgoal themselves: + ++ ``//`` removes all the “trivial” subgoals that can be resolved by the + |SSR| tactic ``done`` described in :ref:`terminators_ssr`, i.e., + it executes ``try done``. ++ ``/=`` simplifies the goal by performing partial evaluation, as per the + tactic ``simpl`` [#5]_. ++ ``//=`` combines both kinds of simplification; it is equivalent to + ``/= //``, i.e., ``simpl; try done``. + + +When an :token:`s_item` bears a :token:`clear_switch`, then the +:token:`clear_switch` is executed +*after* the :token:`s_item`, e.g., ``{IHn}//`` will solve some subgoals, +possibly using the fact ``IHn``, and will erase ``IHn`` from the context +of the remaining subgoals. + +The last entry in the :token:`i_item` grammar rule, ``/``:token:`term`, +represents a view (see section :ref:`views_and_reflection_ssr`). +If the next :token:`i_item` is a view, then the view is +applied to the assumption in top position once all the +previous :token:`i_item` have been performed. + +The view is applied to the top assumption. + +|SSR| supports the following :token:`i_pattern` s: + +:token:`ident` + pops the top variable, assumption, or local definition into + a new constant, fact, or defined constant :token:`ident`, respectively. + Note that defined constants cannot be introduced when δ-expansion is + required to expose the top variable or assumption. +``?`` + pops the top variable into an anonymous constant or fact, whose name + is picked by the tactic interpreter. |SSR| only generates names that cannot + appear later in the user script [#6]_. +``_`` + pops the top variable into an anonymous constant that will be deleted + from the proof context of all the subgoals produced by the ``=>`` tactical. + They should thus never be displayed, except in an error message if the + constant is still actually used in the goal or context after the last + :token:`i_item` has been executed (:token:`s_item` can erase goals or + terms where the constant appears). +``*`` + pops all the remaining apparent variables/assumptions as anonymous + constants/facts. Unlike ``?`` and ``move`` the ``*`` + :token:`i_item` does not + expand definitions in the goal to expose quantifiers, so it may be useful + to repeat a ``move=> *`` tactic, e.g., on the goal:: + + forall a b : bool, a <> b + + a first ``move=> *`` adds only ``_a_ : bool`` and ``_b_ : bool`` + to the context; it takes a second ``move=> *`` to add ``_Hyp_ : _a_ = _b_``. +:token:`occ_switch` ``->`` + (resp. :token:`occ_switch` ``<-``) + pops the top assumption (which should be a rewritable proposition) into an + anonymous fact, rewrites (resp. rewrites right to left) the goal with this + fact (using the |SSR| ``rewrite`` tactic described in section + :ref:`rewriting_ssr`, and honoring the optional occurrence selector), and + finally deletes the anonymous fact from the context. +``[`` :token:`i_item` * ``| … |`` :token:`i_item` * ``]`` + when it is the + very *first* :token:`i_pattern` after tactic ``=>`` tactical *and* tactic + is not a move, is a *branching*:token:`i_pattern`. It executes the sequence + :token:`i_item`:math:`_i` on the i-th subgoal produced by tactic. The + execution of tactic should thus generate exactly m subgoals, unless the + ``[…]`` :token:`i_pattern` comes after an initial ``//`` or ``//=`` + :token:`s_item` that closes some of the goals produced by ``tactic``, in + which case exactly m subgoals should remain after thes- item, or we have + the trivial branching :token:`i_pattern` [], which always does nothing, + regardless of the number of remaining subgoals. +``[`` :token:`i_item` * ``| … |`` :token:`i_item` * ``]`` + when it is *not* + the first :token:`i_pattern` or when tactic is a ``move``, is a + *destructing* :token:`i_pattern`. It starts by destructing the top + variable, using the |SSR| ``case`` tactic described in + :ref:`the_defective_tactics_ssr`. It then behaves as the corresponding + branching :token:`i_pattern`, executing the + sequence:token:`i_item`:math:`_i` in the i-th subgoal generated by the + case analysis; unless we have the trivial destructing :token:`i_pattern` + ``[]``, the latter should generate exactly m subgoals, i.e., the top + variable should have an inductive type with exactly m constructors [#7]_. + While it is good style to use the :token:`i_item` i * to pop the variables + and assumptions corresponding to each constructor, this is not enforced by + |SSR|. +``-`` + does nothing, but counts as an intro pattern. It can also be used to + force the interpretation of ``[`` :token:`i_item` * ``| … |`` + :token:`i_item` * ``]`` as a case analysis like in ``move=> -[H1 H2]``. It + can also be used to indicate explicitly the link between a view and a name + like in ``move=> /eqP-H1``. Last, it can serve as a separator between + views. Section :ref:`views_and_reflection_ssr` [#9]_ explains in which + respect the tactic ``move=> /v1/v2`` differs from the tactic ``move=> + /v1-/v2``. +``[:`` :token:`ident` ``…]`` + introduces in the context an abstract constant + for each :token:`ident`. Its type has to be fixed later on by using the + ``abstract`` tactic. Before then the type displayed is ``<hidden>``. + + +Note that |SSR| does not support the syntax ``(ipat, …, ipat)`` for +destructing intro-patterns. + +Clears are deferred until the end of the intro pattern. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test x y : Nat.leb 0 x = true -> (Nat.leb 0 x) && (Nat.leb y 2) = true. + move=> {x} ->. + +If the cleared names are reused in the same intro pattern, a renaming +is performed behind the scenes. + +Facts mentioned in a clear switch must be valid names in the proof +context (excluding the section context). + +The rules for interpreting branching and destructing :token:`i_pattern` are +motivated by the fact that it would be pointless to have a branching +pattern if tactic is a ``move``, and in most of the remaining cases +tactic is ``case`` or ``elim``, which implies destruction. +The rules above imply that: + ++ ``move=> [a b].`` ++ ``case=> [a b].`` ++ ``case=> a b.`` + +are all equivalent, so which one to use is a matter of style; ``move`` should +be used for casual decomposition, such as splitting a pair, and ``case`` +should be used for actual decompositions, in particular for type families +(see :ref:`type_families_ssr`) and proof by contradiction. + +The trivial branching :token:`i_pattern` can be used to force the branching +interpretation, e.g.: + ++ ``case=> [] [a b] c.`` ++ ``move=> [[a b] c].`` ++ ``case; case=> a b c.`` + +are all equivalent. + + +.. _generation_of_equations_ssr: + +Generation of equations +~~~~~~~~~~~~~~~~~~~~~~~ + +The generation of named equations option stores the definition of a +new constant as an equation. The tactic: + +.. coqtop:: in + + move En: (size l) => n. + +where ``l`` is a list, replaces ``size l`` by ``n`` in the goal and +adds the fact ``En : size l = n`` to the context. +This is quite different from: + +.. coqtop:: in + + pose n := (size l). + +which generates a definition ``n := (size l)``. It is not possible to +generalize or rewrite such a definition; on the other hand, it is +automatically expanded during computation, whereas expanding the +equation ``En`` requires explicit rewriting. + +The use of this equation name generation option with a ``case`` or an +``elim`` tactic changes the status of the first :token:`i_item`, in order to +deal with the possible parameters of the constants introduced. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test (a b :nat) : a <> b. + case E : a => [|n]. + +If the user does not provide a branching :token:`i_item` as first +:token:`i_item`, or if the :token:`i_item` does not provide enough names for +the arguments of a constructor, then the constants generated are introduced +under fresh |SSR| names. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test (a b :nat) : a <> b. + case E : a => H. + Show 2. + +Combining the generation of named equations mechanism with thecase +tactic strengthens the power of a case analysis. On the other hand, +when combined with the elim tactic, this feature is mostly useful for +debug purposes, to trace the values of decomposed parameters and +pinpoint failing branches. + + +.. _type_families_ssr: + +Type families +~~~~~~~~~~~~~ + +When the top assumption of a goal has an inductive type, two specific +operations are possible: the case analysis performed by the ``case`` +tactic, and the application of an induction principle, performed by +the ``elim`` tactic. When this top assumption has an inductive type, which +is moreover an instance of a type family, |Coq| may need help from the +user to specify which occurrences of the parameters of the type should +be substituted. + +A specific ``/`` switch indicates the type family parameters of the type +of a :token:`d_item` immediately following this ``/`` switch, +using the syntax: + +.. tacv:: case: {+ @d_item } / {+ @d_item } +.. tacv:: elim: {+ @d_item } / {+ @d_item } + +The :token:`d_item` on the right side of the ``/`` switch are discharged as +described in section :ref:`discharge_ssr`. The case analysis or elimination +will be done on the type of the top assumption after these discharge +operations. + +Every :token:`d_item` preceding the ``/`` is interpreted as arguments of this +type, which should be an instance of an inductive type family. These terms +are not actually generalized, but rather selected for substitution. +Occurrence switches can be used to restrict the substitution. If a term is +left completely implicit (e.g. writing just ``_``), then a pattern is +inferred looking at the type of the top assumption. This allows for the +compact syntax: + +.. coqtop:: in + + case: {2}_ / eqP. + +were ``_`` is interpreted as ``(_ == _)`` since +``eqP T a b : reflect (a = b) (a == b)`` and reflect is a type family with +one index. + +Moreover if the :token:`d_item` list is too short, it is padded with an +initial sequence of ``_`` of the right length. + +.. example:: + + Here is a small example on lists. We define first a function which + adds an element at the end of a given list. + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Require Import List. + Section LastCases. + Variable A : Type. + Implicit Type l : list A. + Fixpoint add_last a l : list A := + match l with + | nil => a :: nil + | hd :: tl => hd :: (add_last a tl) end. + + Then we define an inductive predicate for case analysis on lists + according to their last element: + + .. coqtop:: all + + Inductive last_spec : list A -> Type := + | LastSeq0 : last_spec nil + | LastAdd s x : last_spec (add_last x s). + + Theorem lastP : forall l : list A, last_spec l. + Admitted. + + We are now ready to use ``lastP`` in conjunction with ``case``. + + .. coqtop:: all + + Lemma test l : (length l) * 2 = length (l ++ l). + case: (lastP l). + + Applied to the same goal, the command: + ``case: l / (lastP l).`` + generates the same subgoals but ``l`` has been cleared from both contexts. + + Again applied to the same goal, the command. + + .. coqtop:: none + + Abort. + + .. coqtop:: all + + Lemma test l : (length l) * 2 = length (l ++ l). + case: {1 3}l / (lastP l). + + Note that selected occurrences on the left of the ``/`` + switch have been substituted with l instead of being affected by + the case analysis. + +The equation name generation feature combined with a type family / +switch generates an equation for the *first* dependent :token:`d_item` +specified by the user. Again starting with the above goal, the +command: + +.. example:: + + .. coqtop:: none + + Abort. + + .. coqtop:: all + + Lemma test l : (length l) * 2 = length (l ++ l). + case E: {1 3}l / (lastP l) => [|s x]. + Show 2. + + +There must be at least one :token:`d_item` to the left of the / switch; this +prevents any confusion with the view feature. However, the :token:`d_item` +to the right of the ``/`` are optional, and if they are omitted the first +assumption provides the instance of the type family. + +The equation always refers to the first :token:`d_item` in the actual tactic +call, before any padding with initial ``_``. Thus, if an inductive type +has two family parameters, it is possible to have|SSR| generate an +equation for the second one by omitting the pattern for the first; +note however that this will fail if the type of the second parameter +depends on the value of the first parameter. + + +Control flow +------------ + + +.. _indentation_ssr: + +Indentation and bullets +~~~~~~~~~~~~~~~~~~~~~~~ + +A linear development of |Coq| scripts gives little information on the +structure of the proof. In addition, replaying a proof after some +changes in the statement to be proved will usually not display +information to distinguish between the various branches of case +analysis for instance. + +To help the user in this organization of the proof script at development +time, |SSR| provides some bullets to highlight the structure of branching +proofs. The available bullets are ``-``, ``+`` and ``*``. Combined with +tabulation, this lets us highlight four nested levels of branching; the most +we have ever needed is three. Indeed, the use of “simpl and closing” +switches, of terminators (see above section :ref:`terminators_ssr`) and +selectors (see section :ref:`selectors_ssr`) is powerful enough to avoid most +of the time more than two levels of indentation. + +Here is a fragment of such a structured script:: + + case E1: (abezoutn _ _) => [[| k1] [| k2]]. + - rewrite !muln0 !gexpn0 mulg1 => H1. + move/eqP: (sym_equal F0); rewrite -H1 orderg1 eqn_mul1. + by case/andP; move/eqP. + - rewrite muln0 gexpn0 mulg1 => H1. + have F1: t %| t * S k2.+1 - 1. + apply: (@dvdn_trans (orderg x)); first by rewrite F0; exact: dvdn_mull. + rewrite orderg_dvd; apply/eqP; apply: (mulgI x). + rewrite -{1}(gexpn1 x) mulg1 gexpn_add leq_add_sub //. + by move: P1; case t. + rewrite dvdn_subr in F1; last by exact: dvdn_mulr. + + rewrite H1 F0 -{2}(muln1 (p ^ l)); congr (_ * _). + by apply/eqP; rewrite -dvdn1. + + by move: P1; case: (t) => [| [| s1]]. + - rewrite muln0 gexpn0 mul1g => H1. + ... + + +.. _terminators_ssr: + +Terminators +~~~~~~~~~~~ + +To further structure scripts, |SSR| supplies *terminating* +tacticals to explicitly close off tactics. When replaying scripts, we +then have the nice property that an error immediately occurs when a +closed tactic fails to prove its subgoal. + +It is hence recommended practice that the proof of any subgoal should +end with a tactic which *fails if it does not solve the current goal*, +like discriminate, contradiction or assumption. + +In fact, |SSR| provides a generic tactical which turns any tactic +into a closing one (similar to now). Its general syntax is: + +.. tacn:: by @tactic + :name: by + +The Ltac expression: + +.. coqtop:: in + + by [@tactic | [@tactic | …]. + +is equivalent to: + +.. coqtop:: in + + [by @tactic | by @tactic | ...]. + +and this form should be preferred to the former. + +In the script provided as example in section :ref:`indentation_ssr`, the +paragraph corresponding to each sub-case ends with a tactic line prefixed +with a ``by``, like in: + +.. coqtop:: in + + by apply/eqP; rewrite -dvdn1. + +The by tactical is implemented using the user-defined, and extensible +done tactic. This done tactic tries to solve the current goal by some +trivial means and fails if it doesn’t succeed. Indeed, the tactic +expression: + +.. coqtop:: in + + by tactic. + +is equivalent to: + +.. coqtop:: in + + tactic; done. + +Conversely, the tactic + +.. coqtop:: + + by [ ]. + +is equivalent to: + +.. coqtop:: + + done. + +The default implementation of the done tactic, in the ``ssreflect.v`` +file, is: + +.. coqtop:: in + + Ltac done := + trivial; hnf; intros; solve + [ do ![solve [trivial | apply: sym_equal; trivial] + | discriminate | contradiction | split] + | case not_locked_false_eq_true; assumption + | match goal with H : ~ _ |- _ => solve [case H; trivial] end ]. + +The lemma ``not_locked_false_eq_true`` is needed to discriminate +*locked* boolean predicates (see section :ref:`locking_ssr`). The iterator +tactical do is presented in section :ref:`iteration_ssr`. This tactic can be +customized by the user, for instance to include an ``auto`` tactic. + +A natural and common way of closing a goal is to apply a lemma which +is the exact one needed for the goal to be solved. The defective form +of the tactic: + +.. coqtop:: in + + exact. + +is equivalent to: + +.. coqtop:: in + + do [done | by move=> top; apply top]. + +where top is a fresh name affected to the top assumption of the goal. +This applied form is supported by the : discharge tactical, and the +tactic: + +.. coqtop:: in + + exact: MyLemma. + +is equivalent to: + +.. coqtop:: in + + by apply: MyLemma. + +(see section :ref:`discharge_ssr` for the documentation of the apply: combination). + +Warning The list of tactics, possibly chained by semi-columns, that +follows a by keyword is considered as a parenthesized block applied to +the current goal. Hence for example if the tactic: + +.. coqtop:: in + + by rewrite my_lemma1. + +succeeds, then the tactic: + +.. coqtop:: in + + by rewrite my_lemma1; apply my_lemma2. + +usually fails since it is equivalent to: + +.. coqtop:: in + + by (rewrite my_lemma1; apply my_lemma2). + + + +.. _selectors_ssr: + +Selectors +~~~~~~~~~ + +When composing tactics, the two tacticals ``first`` and ``last`` let the user +restrict the application of a tactic to only one of the subgoals +generated by the previous tactic. This covers the frequent cases where +a tactic generates two subgoals one of which can be easily disposed +of. + +This is an other powerful way of linearization of scripts, since it +happens very often that a trivial subgoal can be solved in a less than +one line tactic. For instance, the tactic: + +.. tacn:: @tactic ; last by @tactic + :name: last + +tries to solve the last subgoal generated by the first +tactic using the given second tactic , and fails if it does not succeeds. +Its analogous + +.. tacn:: @tactic ; first by @tactic + :name: first + +tries to solve the first subgoal generated by the first tactic using the +second given tactic, and fails if it does not succeeds. + +|SSR| also offers an extension of this facility, by supplying +tactics to *permute* the subgoals generated by a tactic. The tactic: + +.. tacv:: @tactic; last first + +inverts the order of the subgoals generated by tactic. It is +equivalent to: + +.. tacv:: @tactic; first last + +More generally, the tactic: + +.. tacn:: @tactic; last @natural first + :name: last first + +where :token:`natural` is a |Coq| numeral, or and Ltac variable +denoting a |Coq| +numeral, having the value k. It rotates the n subgoals G1 , …, Gn +generated by tactic. The first subgoal becomes Gn + 1 − k and the +circular order of subgoals remains unchanged. + +Conversely, the tactic: + +.. tacn:: @tactic; first @natural last + :name: first last + +rotates the n subgoals G1 , …, Gn generated by tactic in order that +the first subgoal becomes Gk . + +Finally, the tactics ``last`` and ``first`` combine with the branching syntax +of Ltac: if the tactic generates n subgoals on a given goal, +then the tactic + +.. coqtop:: in + + tactic ; last k [ tactic1 |…| tacticm ] || tacticn. + +where natural denotes the integer k as above, applies tactic1 to the n +−k + 1-th goal, … tacticm to the n −k + 2 − m-th goal and tactic n +to the others. + +.. example:: + + Here is a small example on lists. We define first a function which + adds an element at the end of a given list. + + .. coqtop:: reset + + Abort. + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Inductive test : nat -> Prop := + | C1 n of n = 1 : test n + | C2 n of n = 2 : test n + | C3 n of n = 3 : test n + | C4 n of n = 4 : test n. + + Lemma example n (t : test n) : True. + case: t; last 2 [move=> k| move=> l]; idtac. + + +.. _iteration_ssr: + +Iteration +~~~~~~~~~ + +|SSR| offers an accurate control on the repetition of tactics, +thanks to the do tactical, whose general syntax is: + +.. tacn:: do {? @mult } ( @tactic | [ {+| @tactic } ] ) + :name: do + +where :token:`mult` is a *multiplier*. + +Brackets can only be omitted if a single tactic is given *and* a +multiplier is present. + +A tactic of the form: + +.. coqtop:: in + + do [ tactic 1 | … | tactic n ]. + +is equivalent to the standard Ltac expression: + +.. coqtop:: in + + first [ tactic 1 | … | tactic n ]. + +The optional multiplier :token:`mult` specifies how many times the action of +tactic should be repeated on the current subgoal. + +There are four kinds of multipliers: + +.. prodn:: + mult ::= @num ! %| ! %| @num ? %| ? + +Their meaning is: + ++ ``n!`` the step tactic is repeated exactly n times (where n is a + positive integer argument). ++ ``!`` the step tactic is repeated as many times as possible, and done + at least once. ++ ``?`` the step tactic is repeated as many times as possible, + optionally. ++ ``n?`` the step tactic is repeated up to n times, optionally. + + +For instance, the tactic: + +.. coqtop:: in + + tactic; do 1? rewrite mult_comm. + +rewrites at most one time the lemma ``mult_com`` in all the subgoals +generated by tactic , whereas the tactic: + +.. coqtop:: in + + tactic; do 2! rewrite mult_comm. + +rewrites exactly two times the lemma ``mult_com`` in all the subgoals +generated by tactic, and fails if this rewrite is not possible in some +subgoal. + +Note that the combination of multipliers and rewrite is so often used +that multipliers are in fact integrated to the syntax of the +|SSR| rewrite tactic, see section :ref:`rewriting_ssr`. + + +.. _localization_ssr: + +Localization +~~~~~~~~~~~~ + +In sections :ref:`basic_localization_ssr` and :ref:`bookkeeping_ssr`, we have +already presented the *localization* tactical in, whose general syntax is: + +.. tacn:: @tactic in {+ @ident} {? * } + :name: in + +where :token:`ident` is a name in the +context. On the left side of ``in``, +:token:`tactic` can be ``move``, ``case``, ``elim``, ``rewrite``, ``set``, +or any tactic formed with the general iteration tactical ``do`` (see section +:ref:`iteration_ssr`). + +The operation described by tactic is performed in the facts listed after +``in`` and in the goal if a ``*`` ends the list of names. + +The ``in`` tactical successively: + ++ generalizes the selected hypotheses, possibly “protecting” the goal + if ``*`` is not present, ++ performs :token:`tactic`, on the obtained goal, ++ reintroduces the generalized facts, under the same names. + +This defective form of the ``do`` tactical is useful to avoid clashes +between standard Ltac in and the |SSR| tactical in. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Ltac mytac H := rewrite H. + + Lemma test x y (H1 : x = y) (H2 : y = 3) : x + y = 6. + do [mytac H2] in H1 *. + + the last tactic rewrites the hypothesis ``H2 : y = 3`` both in + ``H1 : x = y`` and in the goal ``x + y = 6``. + +By default ``in`` keeps the body of local definitions. To erase the body +of a local definition during the generalization phase, the name of the +local definition must be written between parentheses, like in +``rewrite H in H1 (def_n) H2.`` + +From |SSR| 1.5 the grammar for the in tactical has been extended +to the following one: + +.. tacv:: @tactic in {+ @clear_switch | {? @ } @ident | ( @ident ) | ( {? @ } @ident := @c_pattern ) } {? * } + +In its simplest form the last option lets one rename hypotheses that +can’t be cleared (like section variables). For example ``(y := x)`` +generalizes over ``x`` and reintroduces the generalized variable under the +name ``y`` (and does not clear ``x``). +For a more precise description this form of localization refer +to :ref:`advanced_generalization_ssr`. + + +.. _structure_ssr: + +Structure +~~~~~~~~~ + +Forward reasoning structures the script by explicitly specifying some +assumptions to be added to the proof context. It is closely associated +with the declarative style of proof, since an extensive use of these +highlighted statements make the script closer to a (very detailed) +text book proof. + +Forward chaining tactics allow to state an intermediate lemma and start a +piece of script dedicated to the proof of this statement. The use of closing +tactics (see section :ref:`terminators_ssr`) and of indentation makes +syntactically explicit the portion of the script building the proof of the +intermediate statement. + + +The have tactic. +```````````````` + +The main |SSR| forward reasoning tactic is the ``have`` tactic. It can +be use in two modes: one starts a new (sub)proof for an intermediate +result in the main proof, and the other provides explicitly a proof +term for this intermediate step. + +In the first mode, the syntax of have in its defective form is: + +.. tacn:: have : @term + :name: have + +This tactic supports open syntax for :token:`term`. Applied to a goal ``G``, it +generates a first subgoal requiring a proof of ``term`` in the context of +``G``. The second generated subgoal is of the form ``term -> G``, where term +becomes the new top assumption, instead of being introduced with a +fresh name. At the proof-term level, the have tactic creates a β +redex, and introduces the lemma under a fresh name, automatically +chosen. + +Like in the case of the ``pose`` tactic (see section :ref:`definitions_ssr`), the types of +the holes are abstracted in term. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test : True. + have: _ * 0 = 0. + + The invokation of ``have`` is equivalent to: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Lemma test : True. + + .. coqtop:: all + + have: forall n : nat, n * 0 = 0. + +The have tactic also enjoys the same abstraction mechanism as the ``pose`` +tactic for the non-inferred implicit arguments. For instance, the +tactic: + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Lemma test : True. + + .. coqtop:: all + + have: forall x y, (x, y) = (x, y + 0). + + opens a new subgoal where the type of ``x`` is quantified. + +The behavior of the defective have tactic makes it possible to +generalize it in the following general construction: + +.. tacn:: have {* @i_item } {? @i_pattern } {? @s_item | {+ @binder } } {? : @term } {? := @term | by @tactic } + +Open syntax is supported for both :token:`term`. For the description +of :token:`i_item` and :token:`s_item` see section +:ref:`introduction_ssr`. The first mode of the +have tactic, which opens a sub-proof for an intermediate result, uses +tactics of the form: + +.. tacv:: have @clear_switch @i_item : @term by @tactic + +which behave like: + +.. coqtop:: in + + have: term ; first by tactic. + move=> clear_switch i_item. + +Note that the :token:`clear_switch` *precedes* the:token:`i_item`, which +allows to reuse +a name of the context, possibly used by the proof of the assumption, +to introduce the new assumption itself. + +The``by`` feature is especially convenient when the proof script of the +statement is very short, basically when it fits in one line like in: + +.. coqtop:: in + + have H23 : 3 + 2 = 2 + 3 by rewrite addnC. + +The possibility of using :token:`i_item` supplies a very concise syntax for +the further use of the intermediate step. For instance, + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test a : 3 * a - 1 = a. + have -> : forall x, x * a = a. + + Note how the second goal was rewritten using the stated equality. + Also note that in this last subgoal, the intermediate result does not + appear in the context. + +Thanks to the deferred execution of clears, the following idiom is +also supported (assuming x occurs in the goal only): + +.. coqtop:: in + + have {x} -> : x = y. + +An other frequent use of the intro patterns combined with ``have`` is the +destruction of existential assumptions like in the tactic: + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test : True. + have [x Px]: exists x : nat, x > 0. + Focus 2. + +An alternative use of the ``have`` tactic is to provide the explicit proof +term for the intermediate lemma, using tactics of the form: + +.. tacv:: have {? @ident } := term + +This tactic creates a new assumption of type the type of :token:`term`. +If the +optional :token:`ident` is present, this assumption is introduced under the +name :token:`ident`. Note that the body of the constant is lost for the user. + +Again, non inferred implicit arguments and explicit holes are +abstracted. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test : True. + have H := forall x, (x, x) = (x, x). + + adds to the context ``H : Type -> Prop.`` This is a schematic example but + the feature is specially useful when the proof term to give involves + for instance a lemma with some hidden implicit arguments. + +After the :token:`i_pattern`, a list of binders is allowed. + + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect Omega. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test : True. + have H x (y : nat) : 2 * x + y = x + x + y by omega. + +A proof term provided after ``:=`` can mention these bound variables +(that are automatically introduced with the given names). +Since the :token:`i_pattern` can be omitted, to avoid ambiguity, +bound variables can be surrounded +with parentheses even if no type is specified: + +.. coqtop:: in + + have (x) : 2 * x = x + x by omega. + +The :token:`i_item` and :token:`s_item` can be used to interpret the asserted +hypothesis with views (see section :ref:`views_and_reflection_ssr`) or simplify the resulting +goals. + +The ``have`` tactic also supports a ``suff`` modifier which allows for +asserting that a given statement implies the current goal without +copying the goal itself. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test : True. + have suff H : 2 + 2 = 3. + Focus 2. + + Note that H is introduced in the second goal. + +The ``suff`` modifier is not +compatible with the presence of a list of binders. + +.. _generating_let_ssr: + +Generating let in context entries with have +``````````````````````````````````````````` + +Since |SSR| 1.5 the ``have`` tactic supports a “transparent” modifier +to generate let in context entries: the ``@`` symbol in front of the +context entry name. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect Omega. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Set Printing Depth 15. + + .. coqtop:: all + + Inductive Ord n := Sub x of x < n. + Notation "'I_ n" := (Ord n) (at level 8, n at level 2, format "''I_' n"). + Arguments Sub {_} _ _. + + Lemma test n m (H : m + 1 < n) : True. + have @i : 'I_n by apply: (Sub m); omega. + +Note that the sub-term produced by ``omega`` is in general huge and +uninteresting, and hence one may want to hide it. +For this purpose the ``[: name ]`` intro pattern and the tactic +``abstract`` (see page :ref:`abstract_ssr`) are provided. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect Omega. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + Inductive Ord n := Sub x of x < n. + Notation "'I_ n" := (Ord n) (at level 8, n at level 2, format "''I_' n"). + Arguments Sub {_} _ _. + + .. coqtop:: all + + Lemma test n m (H : m + 1 < n) : True. + have [:pm] @i : 'I_n by apply: (Sub m); abstract: pm; omega. + + The type of ``pm`` can be cleaned up by its annotation ``(*1*)`` by just + simplifying it. The annotations are there for technical reasons only. + +When intro patterns for abstract constants are used in conjunction +with have and an explicit term, they must be used as follows: + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect Omega. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + Inductive Ord n := Sub x of x < n. + Notation "'I_ n" := (Ord n) (at level 8, n at level 2, format "''I_' n"). + Arguments Sub {_} _ _. + + .. coqtop:: all + + Lemma test n m (H : m + 1 < n) : True. + have [:pm] @i : 'I_n := Sub m pm. + by omega. + +In this case the abstract constant ``pm`` is assigned by using it in +the term that follows ``:=`` and its corresponding goal is left to be +solved. Goals corresponding to intro patterns for abstract constants +are opened in the order in which the abstract constants are declared +(not in the “order” in which they are used in the term). + +Note that abstract constants do respect scopes. Hence, if a variable +is declared after their introduction, it has to be properly +generalized (i.e. explicitly passed to the abstract constant when one +makes use of it). + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect Omega. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + Inductive Ord n := Sub x of x < n. + Notation "'I_ n" := (Ord n) (at level 8, n at level 2, format "''I_' n"). + Arguments Sub {_} _ _. + + .. coqtop:: all + + Lemma test n m (H : m + 1 < n) : True. + have [:pm] @i k : 'I_(n+k) by apply: (Sub m); abstract: pm k; omega. + +Last, notice that the use of intro patterns for abstract constants is +orthogonal to the transparent flag ``@`` for have. + + +The have tactic and type classes resolution +``````````````````````````````````````````` + +Since |SSR| 1.5 the have tactic behaves as follows with respect to +type classes inference. + + .. coqtop:: reset + + From Coq Require Import ssreflect Omega. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + Axiom ty : Type. + Axiom t : ty. + + Goal True. + ++ .. coqtop:: in undo + + have foo : ty. + + Full inference for ``ty``. The first subgoal demands a + proof of such instantiated statement. + ++ coqtop:: + + have foo : ty := . + + No inference for ``ty``. Unresolved instances are + quantified in ``ty``. The first subgoal demands a proof of such quantified + statement. Note that no proof term follows ``:=``, hence two subgoals are + generated. + ++ .. coqtop:: in undo + + have foo : ty := t. + + No inference for ``ty`` and ``t``. + ++ .. coqtop:: in undo + + have foo := t. + + No inference for ``t``. Unresolved instances are + quantified in the (inferred) type of ``t`` and abstracted in ``t``. + + +The behavior of |SSR| 1.4 and below (never resolve type classes) +can be restored with the option + +.. cmd:: Set SsrHave NoTCResolution. + + +Variants: the suff and wlog tactics +``````````````````````````````````` + +As it is often the case in mathematical textbooks, forward reasoning +may be used in slightly different variants. One of these variants is +to show that the intermediate step L easily implies the initial goal +G. By easily we mean here that the proof of L ⇒ G is shorter than the +one of L itself. This kind of reasoning step usually starts with: “It +suffices to show that …”. + +This is such a frequent way of reasoning that |SSR| has a variant +of the ``have`` tactic called ``suffices`` (whose abridged name is ``suff``). +The +``have`` and ``suff`` tactics are equivalent and have the same syntax but: + + ++ the order of the generated subgoals is inversed ++ but the optional clear item is still performed in the *second* + branch. This means that the tactic: + + .. coqtop:: in + + suff {H} H : forall x : nat, x >= 0. + + fails if the context of the current goal indeed contains an + assumption named ``H``. + + +The rationale of this clearing policy is to make possible “trivial” +refinements of an assumption, without changing its name in the main +branch of the reasoning. + +The ``have`` modifier can follow the ``suff`` tactic. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect Omega. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Axioms G P : Prop. + + .. coqtop:: all + + Lemma test : G. + suff have H : P. + + Note that, in contrast with ``have suff``, the name H has been introduced + in the first goal. + +Another useful construct is reduction, showing that a particular case +is in fact general enough to prove a general property. This kind of +reasoning step usually starts with: “Without loss of generality, we +can suppose that …”. Formally, this corresponds to the proof of a goal +G by introducing a cut wlog_statement -> G. Hence the user shall +provide a proof for both (wlog_statement -> G) -> G and +wlog_statement -> G. However, such cuts are usually rather +painful to perform by +hand, because the statement wlog_statement is tedious to write by hand, +and sometimes even to read. + +|SSR| implements this kind of reasoning step through the without +loss tactic, whose short name is ``wlog``. It offers support to describe +the shape of the cut statements, by providing the simplifying +hypothesis and by pointing at the elements of the initial goals which +should be generalized. The general syntax of without loss is: + +.. tacn:: wlog {? suff } {? @clear_switch } {? @i_item } : {* @ident } / @term + :name: wlog + +where each :token:`ident` is a constant in the context +of the goal. Open syntax is supported for :token:`term`. + +In its defective form: + +.. tacv:: wlog: / @term + + +on a goal G, it creates two subgoals: a first one to prove the +formula (term -> G) -> G and a second one to prove the formula +term -> G. + +If the optional list of :token:`itent` is present +on the left side of ``/``, these constants are generalized in the +premise (term -> G) of the first subgoal. By default the body of local +definitions is erased. This behavior can be inhibited prefixing the +name of the local definition with the ``@`` character. + +In the second subgoal, the tactic: + +.. coqtop:: in + + move=> clear_switch i_item. + +is performed if at least one of these optional switches is present in +the ``wlog`` tactic. + +The ``wlog`` tactic is specially useful when a symmetry argument +simplifies a proof. Here is an example showing the beginning of the +proof that quotient and reminder of natural number euclidean division +are unique. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect Omega. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma quo_rem_unicity d q1 q2 r1 r2 : + q1*d + r1 = q2*d + r2 -> r1 < d -> r2 < d -> (q1, r1) = (q2, r2). + wlog: q1 q2 r1 r2 / q1 <= q2. + by case (le_gt_dec q1 q2)=> H; last symmetry; eauto with arith. + +The ``wlog suff`` variant is simpler, since it cuts wlog_statement instead +of wlog_statement -> G. It thus opens the goals wlog_statement -> G +and wlog_statement. + +In its simplest form the ``generally have : …`` tactic is equivalent to +``wlog suff : …`` followed by last first. When the ``have`` tactic is used +with the ``generally`` (or ``gen``) modifier it accepts an extra identifier +followed by a comma before the usual intro pattern. The identifier +will name the new hypothesis in its more general form, while the intro +pattern will be used to process its instance. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrfun ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + Axiom P : nat -> Prop. + Axioms eqn leqn : nat -> nat -> bool. + Notation "a != b" := (eqn a b) (at level 70) : this_scope. + Notation "a <= b" := (leqn a b) (at level 70) : this_scope. + Open Scope this_scope. + + .. coqtop:: all + + Lemma simple n (ngt0 : 0 < n ) : P n. + gen have ltnV, /andP[nge0 neq0] : n ngt0 / (0 <= n) && (n != 0). + Focus 2. + + +.. _advanced_generalization_ssr: + +Advanced generalization ++++++++++++++++++++++++ + +The complete syntax for the items on the left hand side of the ``/`` +separator is the following one: + +.. tacv wlog … : {? @clear_switch | {? @ } @ident | ( {? @ } @ident := @c_pattern) } / @term + +Clear operations are intertwined with generalization operations. This +helps in particular avoiding dependency issues while generalizing some +facts. + +If an :token:`ident` is prefixed with the ``@`` mark, then a let-in redex is +created, which keeps track if its body (if any). The syntax +``( ident := c_pattern)`` allows to generalize an arbitrary term using a +given name. Note that its simplest form ``(x := y)`` is just a renaming of +``y`` into ``x``. In particular, this can be useful in order to simulate the +generalization of a section variable, otherwise not allowed. Indeed +renaming does not require the original variable to be cleared. + +The syntax ``(@x := y)`` generates a let-in abstraction but with the +following caveat: ``x`` will not bind ``y``, but its body, whenever ``y`` can be +unfolded. This cover the case of both local and global definitions, as +illustrated in the following example. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Section Test. + Variable x : nat. + Definition addx z := z + x. + Lemma test : x <= addx x. + wlog H : (y := x) (@twoy := addx x) / twoy = 2 * y. + + To avoid unfolding the term captured by the pattern add x one can use + the pattern ``id (addx x)``, that would produce the following first + subgoal + + .. coqtop:: reset + + From Coq Require Import ssreflect Omega. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + Variable x : nat. + Definition addx z := z + x. + Lemma test : x <= addx x. + + .. coqtop:: all + + wlog H : (y := x) (@twoy := id (addx x)) / twoy = 2 * y. + + +.. _rewriting_ssr: + +Rewriting +--------- + +The generalized use of reflection implies that most of the +intermediate results handled are properties of effectively computable +functions. The most efficient mean of establishing such results are +computation and simplification of expressions involving such +functions, i.e., rewriting. |SSR| therefore includes an +extended ``rewrite`` tactic, that unifies and combines most of the +rewriting functionalities. + + +An extended rewrite tactic +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The main features of the rewrite tactic are: + ++ It can perform an entire series of such operations in any subset of + the goal and/or context; ++ It allows to perform rewriting, simplifications, folding/unfolding + of definitions, closing of goals; ++ Several rewriting operations can be chained in a single tactic; ++ Control over the occurrence at which rewriting is to be performed is + significantly enhanced. + +The general form of an |SSR| rewrite tactic is: + +.. tacn:: rewrite {+ @rstep } + :name: rewrite (ssreflect) + +The combination of a rewrite tactic with the in tactical (see section +:ref:`localization_ssr`) performs rewriting in both the context and the goal. + +A rewrite step :token:`rstep` has the general form: + +.. prodn:: + rstep ::= {? @r_prefix } @r_item + +.. prodn:: + r_prefix ::= {? - } {? @mult } {? @occ_switch %| @clear_switch } {? [ @r_pattern ] } + +.. prodn:: + r_pattern ::= @term %| in {? @ident in } @term %| %( @term in %| @term as %) @ident in @term + +.. prodn:: + r_item ::= {? / } @term %| @s_item + +An :token:`r_prefix` contains annotations to qualify where and how the rewrite +operation should be performed: + ++ The optional initial ``-`` indicates the direction of the rewriting of + :token:`r_item`: + if present the direction is right-to-left and it is left-to-right otherwise. ++ The multiplier :token:`mult` (see section :ref:`iteration_ssr`) + specifies if and how the + rewrite operation should be repeated. ++ A rewrite operation matches the occurrences of a *rewrite pattern*, + and replaces these occurrences by an other term, according to the + given :token:`r_item`. The optional *redex switch* ``[r_pattern]``, + which should + always be surrounded by brackets, gives explicitly this rewrite + pattern. In its simplest form, it is a regular term. If no explicit + redex switch is present the rewrite pattern to be matched is inferred + from the :token:`r_item`. ++ This optional term, or the :token:`r_item`, may be preceded by an occurrence + switch (see section :ref:`selectors_ssr`) or a clear item + (see section :ref:`discharge_ssr`), + these two possibilities being exclusive. An occurrence switch selects + the occurrences of the rewrite pattern which should be affected by the + rewrite operation. + + +An :token:`r_item` can be: + + ++ A *simplification* :token:`r_item`, + represented by a :token:`s_item` (see section + :ref:`introduction_ssr`). Simplification operations are intertwined with the possible + other rewrite operations specified by the list of :token:`r_item`. ++ A *folding/unfolding* :token:`r_item`. The tactic: + ``rewrite /term`` unfolds the + head constant of term in every occurrence of the first matching of + term in the goal. In particular, if ``my_def`` is a (local or global) + defined constant, the tactic: ``rewrite /my_def.`` is analogous to: + ``unfold my_def``. + Conversely: ``rewrite -/my_def.`` is equivalent to: ``fold my_def``. + When an unfold :token:`r_item` is combined with a + redex pattern, a conversion + operation is performed. A tactic of the form: + ``rewrite -[term1]/term2.`` + is equivalent to: ``change term1 with term2.`` If ``term2`` is a + single constant and ``term1`` head symbol is not ``term2``, then the head + symbol of ``term1`` is repeatedly unfolded until ``term2`` appears. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Definition double x := x + x. + Definition ddouble x := double (double x). + Lemma test x : ddouble x = 4 * x. + rewrite [ddouble _]/double. + + *Warning* The |SSR| + terms containing holes are *not* typed as abstractions in this + context. Hence the following script fails. + + .. coqtop:: none + + Abort. + + .. coqtop:: all + + Definition f := fun x y => x + y. + Lemma test x y : x + y = f y x. + rewrite -[f y]/(y + _). + + but the following script succeeds + + .. coqtop:: none + + Restart. + + .. coqtop:: all + + rewrite -[f y x]/(y + _). + ++ A :token:`term`, which can be: + + + A term whose type has the form: + ``forall (x1 : A1 )…(xn : An ), eq term1 term2`` where + ``eq`` is the Leibniz equality or a registered setoid + equality. + + A list of terms ``(t1 ,…,tn)``, each ``ti`` having a type above. + The tactic: ``rewrite r_prefix (t1 ,…,tn ).`` + is equivalent to: ``do [rewrite r_prefix t1 | … | rewrite r_prefix tn ].`` + + An anonymous rewrite lemma ``(_ : term)``, where term has a type as above. tactic: ``rewrite (_ : term)`` is in fact synonym of: ``cutrewrite (term).``. + + +Remarks and examples +~~~~~~~~~~~~~~~~~~~~ + +Rewrite redex selection +``````````````````````` + +The general strategy of |SSR| is to grasp as many redexes as +possible and to let the user select the ones to be rewritten thanks to +the improved syntax for the control of rewriting. + +This may be a source of incompatibilities between the two rewrite +tactics. + +In a rewrite tactic of the form: + +.. coqtop:: in + + rewrite occ_switch [term1]term2. + +``term1`` is the explicit rewrite redex and ``term2`` is the rewrite rule. +This execution of this tactic unfolds as follows: + + ++ First ``term1`` and ``term2`` are βι normalized. Then ``term2`` + is put in head + normal form if the Leibniz equality constructor ``eq`` is not the head + symbol. This may involve ζ reductions. ++ Then, the matching algorithm (see section :ref:`abbreviations_ssr`) + determines the + first subterm of the goal matching the rewrite pattern. The rewrite + pattern is given by ``term1``, if an explicit redex pattern switch is + provided, or by the type of ``term2`` otherwise. However, matching skips + over matches that would lead to trivial rewrites. All the occurrences + of this subterm in the goal are candidates for rewriting. ++ Then only the occurrences coded by :token:`occ_switch` (see again section + :ref:`abbreviations_ssr`) are finally selected for rewriting. ++ The left hand side of ``term2`` is unified with the subterm found by + the matching algorithm, and if this succeeds, all the selected + occurrences in the goal are replaced by the right hand side of ``term2``. ++ Finally the goal is βι normalized. + + +In the case ``term2`` is a list of terms, the first top-down (in the +goal) left-to-right (in the list) matching rule gets selected. + + +Chained rewrite steps +````````````````````` + +The possibility to chain rewrite operations in a single tactic makes +scripts more compact and gathers in a single command line a bunch of +surgical operations which would be described by a one sentence in a +pen and paper proof. + +Performing rewrite and simplification operations in a single tactic +enhances significantly the concision of scripts. For instance the +tactic: + +.. coqtop:: in + + rewrite /my_def {2}[f _]/= my_eq //=. + + +unfolds ``my_def`` in the goal, simplifies the second occurrence of the +first subterm matching pattern ``[f _]``, rewrites ``my_eq``, simplifies the +goals and closes trivial goals. + +Here are some concrete examples of chained rewrite operations, in the +proof of basic results on natural numbers arithmetic. + +.. example:: + + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Axiom addn0 : forall m, m + 0 = m. + Axiom addnS : forall m n, m + S n = S (m + n). + Axiom addSnnS : forall m n, S m + n = m + S n. + + Lemma addnCA m n p : m + (n + p) = n + (m + p). + by elim: m p => [ | m Hrec] p; rewrite ?addSnnS -?addnS. + Qed. + + Lemma addnC n m : m + n = n + m. + by rewrite -{1}[n]addn0 addnCA addn0. + Qed. + +Note the use of the ``?`` switch for parallel rewrite operations in the +proof of ``addnCA``. + + +Explicit redex switches are matched first +````````````````````````````````````````` + +If an :token:`r_prefix` involves a *redex switch*, the first step is to find a +subterm matching this redex pattern, independently from the left hand +side of the equality the user wants to rewrite. + + +.. example:: + + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test (H : forall t u, t + u = u + t) x y : x + y = y + x. + rewrite [y + _]H. + +Note that if this first pattern matching is not compatible with the +:token:`r_item`, the rewrite fails, even if the goal contains a +correct redex matching both the redex switch and the left hand side of +the equality. + +.. example:: + + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test (H : forall t u, t + u * 0 = t) x y : x + y * 4 + 2 * 0 = x + 2 * 0. + rewrite [x + _]H. + + Indeed the left hand side of ``H`` does not match + the redex identified by the pattern ``x + y * 4``. + + +Occurrence switches and redex switches +`````````````````````````````````````` + +.. example:: + + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test x y : x + y + 0 = x + y + y + 0 + 0 + (x + y + 0). + rewrite {2}[_ + y + 0](_: forall z, z + 0 = z). + +The second subgoal is generated by the use of an anonymous lemma in +the rewrite tactic. The effect of the tactic on the initial goal is to +rewrite this lemma at the second occurrence of the first matching +``x + y + 0`` of the explicit rewrite redex ``_ + y + 0``. + + +Occurrence selection and repetition +``````````````````````````````````` + +Occurrence selection has priority over repetition switches. This means +the repetition of a rewrite tactic specified by a multiplier will +perform matching each time an elementary rewrite operation is +performed. Repeated rewrite tactics apply to every subgoal generated +by the previous tactic, including the previous instances of the +repetition. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Lemma test x y (z : nat) : x + 1 = x + y + 1. + rewrite 2!(_ : _ + 1 = z). + +This last tactic generates *three* subgoals because +the second rewrite operation specified with the ``2!`` multiplier +applies to the two subgoals generated by the first rewrite. + + +Multi-rule rewriting +```````````````````` + +The rewrite tactic can be provided a *tuple* of rewrite rules, or more +generally a tree of such rules, since this tuple can feature arbitrary +inner parentheses. We call *multirule* such a generalized rewrite +rule. This feature is of special interest when it is combined with +multiplier switches, which makes the rewrite tactic iterates the +rewrite operations prescribed by the rules on the current goal. + + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + Section Test. + + .. coqtop:: all + + Variables (a b c : nat). + Hypothesis eqab : a = b. + Hypothesis eqac : a = c. + + Lemma test : a = a. + rewrite (eqab, eqac). + + Indeed rule ``eqab`` is the first to apply among the ones + gathered in the tuple passed to the rewrite tactic. This multirule + ``(eqab, eqac)`` is actually a |Coq| term and we can name it with a + definition: + + .. coqtop:: none + + Abort. + + .. coqtop:: all + + Definition multi1 := (eqab, eqac). + + In this case, the tactic ``rewrite multi1`` is a synonym for + ``rewrite (eqab, eqac)``. + +More precisely, a multirule rewrites the first subterm to which one of +the rules applies in a left-to-right traversal of the goal, with the +first rule from the multirule tree in left-to-right order. Matching is +performed according to the algorithm described in +Section :ref:`abbreviations_ssr`, but +literal matches have priority. + +.. example:: + + .. coqtop:: all + + Definition d := a. + Hypotheses eqd0 : d = 0. + Definition multi2 := (eqab, eqd0). + + Lemma test : d = b. + rewrite multi2. + + Indeed rule ``eqd0`` applies without unfolding the + definition of ``d``. + +For repeated rewrites the selection process is +repeated anew. + +.. example:: + + .. coqtop:: none + + Abort. + + .. coqtop:: all + + Hypothesis eq_adda_b : forall x, x + a = b. + Hypothesis eq_adda_c : forall x, x + a = c. + Hypothesis eqb0 : b = 0. + Definition multi3 := (eq_adda_b, eq_adda_c, eqb0). + + Lemma test : 1 + a = 12 + a. + rewrite 2!multi3. + + It uses ``eq_adda_b`` then ``eqb0`` on the left-hand + side only. Without the bound ``2`` one would obtain ``0 = 0``. + +The grouping of rules inside a multirule does not affect the selection +strategy but can make it easier to include one rule set in another or +to (universally) quantify over the parameters of a subset of rules (as +there is special code that will omit unnecessary quantifiers for rules +that can be syntactically extracted). It is also possible to reverse +the direction of a rule subset, using a special dedicated syntax: the +tactic rewrite ``(=~ multi1)`` is equivalent to ``rewrite multi1_rev``. + +.. example:: + + .. coqtop:: none + + Abort. + + .. coqtop:: all + + Hypothesis eqba : b = a. + Hypothesis eqca : c = a. + Definition multi1_rev := (eqba, eqca). + +except that the constants ``eqba``, ``eqab``, ``mult1_rev`` +have not been created. + +Rewriting with multirules is useful to implement simplification or +transformation procedures, to be applied on terms of small to medium +size. For instance the library `ssrnat` (Mathematical Components library) +provides two implementations +for arithmetic operations on natural numbers: an elementary one and a +tail recursive version, less inefficient but also less convenient for +reasoning purposes. The library also provides one lemma per such +operation, stating that both versions return the same values when +applied to the same arguments: + +.. coqtop:: in + + Lemma addE : add =2 addn. + Lemma doubleE : double =1 doublen. + Lemma add_mulE n m s : add_mul n m s = addn (muln n m) s. + Lemma mulE : mul =2 muln. + Lemma mul_expE m n p : mul_exp m n p = muln (expn m n) p. + Lemma expE : exp =2 expn. + Lemma oddE : odd =1 oddn. + +The operation on the left hand side of each lemma is the efficient +version, and the corresponding naive implementation is on the right +hand side. In order to reason conveniently on expressions involving +the efficient operations, we gather all these rules in the definition +``trecE``: + +.. coqtop:: in + + Definition trecE := (addE, (doubleE, oddE), (mulE, add_mulE, (expE, mul_expE))). + +The tactic: ``rewrite !trecE.`` +restores the naive versions of each operation in a goal involving the +efficient ones, e.g. for the purpose of a correctness proof. + + +Wildcards vs abstractions +````````````````````````` + +The rewrite tactic supports :token:`r_items` containing holes. For example in +the tactic ``rewrite (_ : _ * 0 = 0).`` +the term ``_ * 0 = 0`` is interpreted as ``forall n : nat, n * 0 = 0.`` +Anyway this tactic is *not* equivalent to +``rewrite (_ : forall x, x * 0 = 0).``. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + Section Test. + + .. coqtop:: all + + Lemma test y z : y * 0 + y * (z * 0) = 0. + rewrite (_ : _ * 0 = 0). + + while the other tactic results in + + .. coqtop:: none + + Undo. + + .. coqtop:: all + + rewrite (_ : forall x, x * 0 = 0). + + The first tactic requires you to prove the instance of the (missing) + lemma that was used, while the latter requires you prove the quantified + form. + +When |SSR| rewrite fails on standard |Coq| licit rewrite +```````````````````````````````````````````````````````` + +In a few cases, the |SSR| rewrite tactic fails rewriting some +redexes which standard |Coq| successfully rewrites. There are two main +cases: + + ++ |SSR| never accepts to rewrite indeterminate patterns like: + + .. coqtop:: in + + Lemma foo (x : unit) : x = tt. + + |SSR| will however accept the + ηζ expansion of this rule: + + .. coqtop:: in + + Lemma fubar (x : unit) : (let u := x in u) = tt. + ++ The standard rewrite tactic provided by |Coq| uses a different algorithm + to find instances of the rewrite rule. + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + Section Test. + + .. coqtop:: all + + Variable g : nat -> nat. + Definition f := g. + Axiom H : forall x, g x = 0. + + Lemma test : f 3 + f 3 = f 6. + (* we call the standard rewrite tactic here *) + rewrite -> H. + + This rewriting is not possible in |SSR| because + there is no occurrence of the head symbol ``f`` of the rewrite rule in the + goal. + + .. coqtop:: none + + Undo. + + .. coqtop:: all + + rewrite H. + + Rewriting with ``H`` first requires unfolding the occurrences of + ``f`` + where the substitution is to be performed (here there is a single such + occurrence), using tactic ``rewrite /f`` (for a global replacement of + f by g) or ``rewrite pattern/f``, for a finer selection. + + .. coqtop:: none + + Undo. + + .. coqtop:: all + + rewrite /f H. + + alternatively one can override the pattern inferred from ``H`` + + .. coqtop:: none + + Undo. + + .. coqtop:: all + + rewrite [f _]H. + + +Existential metavariables and rewriting +``````````````````````````````````````` + +The rewrite tactic will not instantiate existing existential +metavariables when matching a redex pattern. + +If a rewrite rule generates a goal with new existential metavariables +in the ``Prop`` sort, these will be generalized as for ``apply`` +(see :ref:`apply_ssr`) and +corresponding new goals will be generated. + + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrfun ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Axiom leq : nat -> nat -> bool. + Notation "m <= n" := (leq m n) : nat_scope. + Notation "m < n" := (S m <= n) : nat_scope. + Inductive Ord n := Sub x of x < n. + Notation "'I_ n" := (Ord n) (at level 8, n at level 2, format "''I_' n"). + Arguments Sub {_} _ _. + Definition val n (i : 'I_n) := let: Sub a _ := i in a. + Definition insub n x := + if @idP (x < n) is ReflectT _ Px then Some (Sub x Px) else None. + Axiom insubT : forall n x Px, insub n x = Some (Sub x Px). + + Lemma test (x : 'I_2) y : Some x = insub 2 y. + rewrite insubT. + + Since the argument corresponding to Px is not supplied by the user, the + resulting goal should be ``Some x = Some (Sub y ?Goal).`` + Instead, |SSR| ``rewrite`` tactic hides the existential variable. + + As in :ref:`apply_ssr`, the ``ssrautoprop`` tactic is used to try to + solve the existential variable. + + .. coqtop:: none + + Abort. + + .. coqtop:: all + + Lemma test (x : 'I_2) y (H : y < 2) : Some x = insub 2 y. + rewrite insubT. + + +As a temporary limitation, this behavior is available only if the +rewriting rule is stated using Leibniz equality (as opposed to setoid +relations). It will be extended to other rewriting relations in the +future. + + +.. _locking_ssr: + +Locking, unlocking +~~~~~~~~~~~~~~~~~~ + +As program proofs tend to generate large goals, it is important to be +able to control the partial evaluation performed by the simplification +operations that are performed by the tactics. These evaluations can +for example come from a ``/=`` simplification switch, or from rewrite +steps which may expand large terms while performing conversion. We +definitely want to avoid repeating large subterms of the goal in the +proof script. We do this by “clamping down” selected function symbols +in the goal, which prevents them from being considered in +simplification or rewriting steps. This clamping is accomplished by +using the occurrence switches (see section:ref:`abbreviations_ssr`) +together with “term tagging” operations. + +|SSR| provides two levels of tagging. + +The first one uses auxiliary definitions to introduce a provably equal +copy of any term t. However this copy is (on purpose) *not +convertible* to t in the |Coq| system [#8]_. The job is done by the +following construction: + +.. coqtop:: in + + Lemma master_key : unit. Proof. exact tt. Qed. + Definition locked A := let: tt := master_key in fun x : A => x. + Lemma lock : forall A x, x = locked x :> A. + +Note that the definition of *master_key* is explicitly opaque. The +equation ``t = locked t`` given by the ``lock`` lemma can be used for +selective rewriting, blocking on the fly the reduction in the term ``t``. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrfun ssrbool List. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Variable A : Type. + Fixpoint has (p : A -> bool) (l : list A) : bool := + if l is cons x l then p x || (has p l) else false. + + Lemma test p x y l (H : p x = true) : has p ( x :: y :: l) = true. + rewrite {2}[cons]lock /= -lock. + +It is sometimes desirable to globally prevent a definition from being +expanded by simplification; this is done by adding locked in the +definition. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Definition lid := locked (fun x : nat => x). + + Lemma test : lid 3 = 3. + rewrite /=. + unlock lid. + +We provide a special tactic unlock for unfolding such definitions +while removing “locks”, e.g., the tactic: + +.. tacn:: unlock {? @occ_switch } @ident + +replaces the occurrence(s) of :token:`ident` coded by the +:token:`occ_switch` with the corresponding body. + +We found that it was usually preferable to prevent the expansion of +some functions by the partial evaluation switch ``/=``, unless this +allowed the evaluation of a condition. This is possible thanks to an +other mechanism of term tagging, resting on the following *Notation*: + +.. coqtop:: in + + Notation "'nosimpl' t" := (let: tt := tt in t). + +The term ``(nosimpl t)`` simplifies to ``t`` *except* in a definition. +More precisely, given: + +.. coqtop:: in + + Definition foo := (nosimpl bar). + +the term ``foo`` (or ``(foo t’)``) will *not* be expanded by the *simpl* +tactic unless it is in a forcing context (e.g., in ``match foo t’ with … +end``, ``foo t’`` will be reduced if this allows ``match`` to be reduced). +Note that ``nosimpl bar`` is simply notation for a term that reduces to +``bar``; hence ``unfold foo`` will replace ``foo`` by ``bar``, and +``fold foo`` will replace ``bar`` by ``foo``. + +*Warning* The ``nosimpl`` trick only works if no reduction is apparent in +``t``; in particular, the declaration: + +.. coqtop:: in + + Definition foo x := nosimpl (bar x). + +will usually not work. Anyway, the common practice is to tag only the +function, and to use the following definition, which blocks the +reduction as expected: + +.. coqtop:: in + + Definition foo x := nosimpl bar x. + +A standard example making this technique shine is the case of +arithmetic operations. We define for instance: + +.. coqtop:: in + + Definition addn := nosimpl plus. + +The operation ``addn`` behaves exactly like ``plus``, except that +``(addn (S n) m)`` will not simplify spontaneously to +``(S (addn n m))`` (the two terms, however, are inter-convertible). +In addition, the unfolding step: ``rewrite /addn`` +will replace ``addn`` directly with ``plus``, so the ``nosimpl`` form is +essentially invisible. + + +.. _congruence_ssr: + +Congruence +~~~~~~~~~~ + +Because of the way matching interferes with type families parameters, +the tactic: + +.. coqtop:: in + + apply: my_congr_property. + +will generally fail to perform congruence simplification, even on +rather simple cases. We therefore provide a more robust alternative in +which the function is supplied: + +.. tacn:: congr {? @num } @term + :name: congr + +This tactic: ++ checks that the goal is a Leibniz equality ++ matches both sides of this equality with “term applied to some arguments”, inferring the right number of arguments from the goal and the type of term. This may expand some definitions or fixpoints. ++ generates the subgoals corresponding to pairwise equalities of the arguments present in the goal. + +The goal can be a non dependent product ``P -> Q``. In that case, the +system asserts the equation ``P = Q``, uses it to solve the goal, and +calls the ``congr`` tactic on the remaining goal ``P = Q``. This can be useful +for instance to perform a transitivity step, like in the following +situation. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test (x y z : nat) (H : x = y) : x = z. + congr (_ = _) : H. + Abort. + + Lemma test (x y z : nat) : x = y -> x = z. + congr (_ = _). + +The optional :token:`num` forces the number of arguments for which the +tactic should generate equality proof obligations. + +This tactic supports equalities between applications with dependent +arguments. Yet dependent arguments should have exactly the same +parameters on both sides, and these parameters should appear as first +arguments. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Definition f n := + if n is 0 then plus else mult. + Definition g (n m : nat) := plus. + + Lemma test x y : f 0 x y = g 1 1 x y. + congr plus. + + This script shows that the ``congr`` tactic matches ``plus`` + with ``f 0`` on the left hand side and ``g 1 1`` on the right hand + side, and solves the goal. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test n m (Hnm : m <= n) : S m + (S n - S m) = S n. + congr S; rewrite -/plus. + + The tactic ``rewrite -/plus`` folds back the expansion of plus + which was necessary for matching both sides of the equality with + an application of ``S``. + +Like most |SSR| arguments, term can contain wildcards. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test x y : x + (y * (y + x - x)) = x * 1 + (y + 0) * y. + congr ( _ + (_ * _)). + +.. _contextual_patterns_ssr: + +Contextual patterns +------------------- + +The simple form of patterns used so far, terms possibly containing +wild cards, often require an additional :token:`occ_switch` to be specified. +While this may work pretty fine for small goals, the use of +polymorphic functions and dependent types may lead to an invisible +duplication of functions arguments. These copies usually end up in +types hidden by the implicit arguments machinery or by user defined +notations. In these situations computing the right occurrence numbers +is very tedious because they must be counted on the goal as printed +after setting the Printing All flag. Moreover the resulting script is +not really informative for the reader, since it refers to occurrence +numbers he cannot easily see. + +Contextual patterns mitigate these issues allowing to specify +occurrences according to the context they occur in. + + +Syntax +~~~~~~ + +The following table summarizes the full syntax of :token:`c_pattern` and the +corresponding subterm(s) identified by the pattern. In the third +column we use s.m.r. for “the subterms matching the redex” specified +in the second column. + +.. list-table:: + :header-rows: 1 + + * - :token:`c_pattern` + - redex + - subterms affected + + * - ``term`` + - ``term`` + - all occurrences of ``term`` + + * - ``ident in term`` + - subterm of ``term`` selected by ``ident`` + - all the subterms identified by ``ident`` in all the + occurrences of ``term`` + + * - ``term1 in ident in term2`` + - ``term1`` in all s.m.r. + - in all the subterms identified by + ``ident`` in all the occurrences of ``term2`` + * - ``term1 as ident in term2`` + - ``term 1`` + - in all the subterms identified by ``ident` + in all the occurrences of ``term2[term 1 /ident]`` + +The rewrite tactic supports two more patterns obtained prefixing the +first two with in. The intended meaning is that the pattern identifies +all subterms of the specified context. The ``rewrite`` tactic will infer a +pattern for the redex looking at the rule used for rewriting. + +.. list-table:: + :header-rows: 1 + + * - :token:`r_pattern` + - redex + - subterms affected + + * - ``in term`` + - inferred from rule + - in all s.m.r. in all occurrences of ``term`` + + * - ``in ident in term`` + - inferred from rule + - in all s.m.r. in all the subterms identified by ``ident`` + in all the occurrences of ``term`` + +The first :token:`c_pattern` is the simplest form matching any context but +selecting a specific redex and has been described in the previous +sections. We have seen so far that the possibility of selecting a +redex using a term with holes is already a powerful mean of redex +selection. Similarly, any terms provided by the user in the more +complex forms of :token:`c_patterns` +presented in the tables above can contain +holes. + +For a quick glance at what can be expressed with the last +:token:`r_pattern` +consider the goal ``a = b`` and the tactic + +.. coqtop:: in + + rewrite [in X in _ = X]rule. + +It rewrites all occurrences of the left hand side of ``rule`` +inside ``b`` only (``a``, and the hidden type of the equality, are ignored). Note that the +variant ``rewrite [X in _ = X]rule`` would have rewritten ``b`` +exactly (i.e., it would only work if ``b`` and the left hand side +of rule can be unified). + + +Matching contextual patterns +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The :token:`c_pattern` and :token:`r_pattern` involving terms +with holes are matched +against the goal in order to find a closed instantiation. This +matching proceeds as follows: + +.. list-table:: + :header-rows: 1 + + * - :token:`c_pattern` + - instantiation order and place for ``term_i`` and redex + + * - ``term`` + - ``term`` is matched against the goal, redex is unified with + the instantiation of ``term`` + + * - ``ident in term`` + - ``term`` is matched against the goal, redex is unified with the + subterm of the instantiation of ``term`` identified by + ``ident`` + + * - ``term1 in ident in term2`` + - ``term2`` is matched against the goal, ``term1`` + is matched against the subterm of the instantiation of + ``term1`` identified by ``ident``, redex is unified with + the instantiation of ``term1`` + + * - ``term1 as ident in term2`` + - ``term2[term1/ident]`` is matched against + the goal, redex is unified with the instantiation of ``term1`` + +In the following patterns, the redex is intended to be inferred from +the rewrite rule. + +.. list-table:: + :header-rows: 1 + + * - :token:`r_pattern` + - instantiation order and place for ``term_i`` and redex + + * - ``in ident in term`` + - ``term`` is matched against the goal, the redex is matched against + the subterm of the instantiation of ``term`` identified by + ``ident`` + + * - ``in term`` + - ``term`` is matched against the goal, redex is matched against the + instantiation of ``term`` + + +Examples +~~~~~~~~ + + +Contextual pattern in set and the : tactical +```````````````````````````````````````````` + +As already mentioned in section :ref:`abbreviations_ssr` the ``set`` +tactic takes as an +argument a term in open syntax. This term is interpreted as the +simplest for of :token:`c_pattern`. To void confusion in the grammar, open +syntax is supported only for the simplest form of patterns, while +parentheses are required around more complex patterns. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test a b : a + b + 1 = b + (a + 1). + set t := (X in _ = X). + rewrite {}/t. + set t := (a + _ in X in _ = X). + + +Since the user may define an infix notation for ``in`` the former tactic +may result ambiguous. The disambiguation rule implemented is to prefer +patterns over simple terms, but to interpret a pattern with double +parentheses as a simple term. For example the following tactic would +capture any occurrence of the term ``a in A``. + +.. coqtop:: in + + set t := ((a in A)). + +Contextual pattern can also be used as arguments of the ``:`` tactical. +For example: + +.. coqtop:: in + + elim: n (n in _ = n) (refl_equal n). + + +Contextual patterns in rewrite +`````````````````````````````` + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Notation "n .+1" := (Datatypes.S n) (at level 2, left associativity, + format "n .+1") : nat_scope. + + Axiom addSn : forall m n, m.+1 + n = (m + n).+1. + Axiom addn0 : forall m, m + 0 = m. + Axiom addnC : forall m n, m + n = n + m. + + Lemma test x y z f : (x.+1 + y) + f (x.+1 + y) (z + (x + y).+1) = 0. + rewrite [in f _ _]addSn. + + Note: the simplification rule ``addSn`` is applied only under the ``f`` + symbol. + Then we simplify also the first addition and expand 0 into 0+0. + + .. coqtop:: all + + rewrite addSn -[X in _ = X]addn0. + + Note that the right hand side of ``addn0`` is undetermined, but the + rewrite pattern specifies the redex explicitly. The right hand side + of ``addn0`` is unified with the term identified by ``X``, ``0`` here. + + + The following pattern does not specify a redex, since it identifies an + entire region, hence the rewrite rule has to be instantiated + explicitly. Thus the tactic: + + .. coqtop:: all + + rewrite -{2}[in X in _ = X](addn0 0). + + The following tactic is quite tricky: + + .. coqtop:: all + + rewrite [_.+1 in X in f _ X](addnC x.+1). + + The explicit redex ``_.+1`` is important since its head constant ``S`` + differs from the head constant inferred from + ``(addnC x.+1)`` (that is ``+``). + Moreover, the pattern ``f _ X`` is important to rule out + the first occurrence of ``(x + y).+1``. + Last, only the subterms of ``f _ X`` + identified by ``X`` are rewritten, thus the first argument of + ``f`` is skipped too. + Also note the pattern ``_.+1`` is interpreted in the context + identified by ``X``, thus it gets instantiated to + ``(y + x).+1`` and not ``(x + y).+1``. + + The last rewrite pattern allows to specify exactly the shape of the + term identified by X, that is thus unified with the left hand side of + the rewrite rule. + + .. coqtop:: all + + rewrite [x.+1 + y as X in f X _]addnC. + + +Patterns for recurrent contexts +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The user can define shortcuts for recurrent contexts corresponding to +the ``ident in term`` part. The notation scope identified with +``%pattern`` +provides a special notation ``(X in t)`` the user must adopt +in order to define +context shortcuts. + +The following example is taken from ``ssreflect.v`` where the +``LHS`` and ``RHS`` shortcuts are defined. + +.. coqtop:: in + + Notation RHS := (X in _ = X)%pattern. + Notation LHS := (X in X = _)%pattern. + +Shortcuts defined this way can be freely used in place of the trailing +``ident in term`` part of any contextual pattern. Some examples follow: + +.. coqtop:: in + + set rhs := RHS. + rewrite [in RHS]rule. + case: (a + _ in RHS). + + +.. _views_and_reflection_ssr: + +Views and reflection +-------------------- + +The bookkeeping facilities presented in section :ref:`basic_tactics_ssr` are +crafted to ease simultaneous introductions and generalizations of facts and +operations of casing, naming etc. It also a common practice to make a stack +operation immediately followed by an *interpretation* of the fact +being pushed, that is, to apply a lemma to this fact before passing it +to a tactic for decomposition, application and so on. + +|SSR| provides a convenient, unified syntax to combine these +interpretation operations with the proof stack operations. This *view +mechanism* relies on the combination of the ``/`` view switch with +bookkeeping tactics and tacticals. + + +Interpreting eliminations +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The view syntax combined with the ``elim`` tactic specifies an elimination +scheme to be used instead of the default, generated, one. Hence the +|SSR| tactic: + +.. coqtop:: in + + elim/V. + +is a synonym for: + +.. coqtop:: in + + intro top; elim top using V; clear top. + +where top is a fresh name and V any second-order lemma. + +Since an elimination view supports the two bookkeeping tacticals of +discharge and introduction (see section :ref:`basic_tactics_ssr`), +the |SSR| tactic: + +.. coqtop:: in + + elim/V: x => y. + +is a synonym for: + +.. coqtop:: in + + elim x using V; clear x; intro y. + +where ``x`` is a variable in the context, ``y`` a fresh name and ``V`` +any second order lemma; |SSR| relaxes the syntactic restrictions of the |Coq| +``elim``. The first pattern following ``:`` can be a ``_`` wildcard if the +conclusion of the view ``V`` specifies a pattern for its last argument +(e.g., if ``V`` is a functional induction lemma generated by the +``Function`` command). + +The elimination view mechanism is compatible with the equation name +generation (see section :ref:`generation_of_equations_ssr`). + + +.. example:: + + The following script illustrate a toy example of this feature. Let us + define a function adding an element at the end of a list: + + .. coqtop:: reset + + From Coq Require Import ssreflect List. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Variable d : Type. + Fixpoint add_last(s : list d) (z : d) {struct s} : list d := + if s is cons x s' then cons x (add_last s' z) else z :: nil. + + One can define an alternative, reversed, induction principle on + inductively defined lists, by proving the following lemma: + + .. coqtop:: all + + Axiom last_ind_list : forall P : list d -> Prop, + P nil -> (forall s (x : d), P s -> P (add_last s x)) -> + forall s : list d, P s. + + Then the combination of elimination views with equation names result + in a concise syntax for reasoning inductively using the user defined + elimination scheme. + + .. coqtop:: all + + Lemma test (x : d) (l : list d): l = l. + elim/last_ind_list E : l=> [| u v]; last first. + + +User provided eliminators (potentially generated with the ``Function`` +|Coq|’s command) can be combined with the type family switches described +in section :ref:`type_families_ssr`. +Consider an eliminator ``foo_ind`` of type: + +.. coqtop:: in + + foo_ind : forall …, forall x : T, P p1 … pm. + +and consider the tactic: + +.. coqtop:: in + + elim/foo_ind: e1 … / en. + +The ``elim/`` tactic distinguishes two cases: + +:truncated eliminator: when ``x`` does not occur in ``P p1 … pm`` and the + type of ``en`` unifies with ``T`` and ``en`` is not ``_``. + In that case, ``en`` is + passed to the eliminator as the last argument (``x`` in ``foo_ind``) and + ``en−1 … e1`` are used as patterns to select in the goal the occurrences that + will be bound by the predicate ``P``, thus it must be possible to unify + the sub-term of the goal matched by ``en−1`` with ``pm`` , the one matched + by ``en−2`` with ``pm−1`` and so on. +:regular eliminator: in all the other cases. Here it must be possible + to unify the term matched by ``en`` with ``pm`` , the one matched by + ``en−1`` + with ``pm−1`` and so on. Note that standard eliminators have the shape + ``…forall x, P … x``, thus ``en`` is the pattern identifying the + eliminated term, as expected. + + +As explained in section :ref:`type_families_ssr`, the initial prefix of +``ei`` can be omitted. + +Here an example of a regular, but non trivial, eliminator. + +.. example:: + + Here is a toy example illustrating this feature. + + .. coqtop:: reset + + From Coq Require Import ssreflect FunInd. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Function plus (m n : nat) {struct n} : nat := + if n is S p then S (plus m p) else m. + + About plus_ind. + + Lemma test x y z : plus (plus x y) z = plus x (plus y z). + + The following tactics are all valid and perform the same elimination + on this goal. + + .. coqtop:: in + + elim/plus_ind: z / (plus _ z). + elim/plus_ind: {z}(plus _ z). + elim/plus_ind: {z}_. + elim/plus_ind: z / _. + + .. coqtop:: reset + + From Coq Require Import ssreflect FunInd. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + Function plus (m n : nat) {struct n} : nat := + if n is S p then S (plus m p) else m. + + About plus_ind. + + Lemma test x y z : plus (plus x y) z = plus x (plus y z). + + .. coqtop:: all + + elim/plus_ind: z / _. + + The two latter examples feature a wildcard pattern: in this case, + the resulting pattern is inferred from the type of the eliminator. + In both these examples, it is ``(plus _ _)``, which matches the subterm + ``plus (plus x y) z`` thus instantiating the last ``_`` with ``z``. + Note that the tactic: + + .. coqtop:: reset + + From Coq Require Import ssreflect FunInd. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + Function plus (m n : nat) {struct n} : nat := + if n is S p then S (plus m p) else m. + + About plus_ind. + + Lemma test x y z : plus (plus x y) z = plus x (plus y z). + + .. coqtop:: all + + elim/plus_ind: y / _. + + triggers an error: in the conclusion + of the ``plus_ind`` eliminator, the first argument of the predicate + ``P`` should be the same as the second argument of ``plus``, in the + second argument of ``P``, but ``y`` and ``z`` do no unify. + +Here an example of a truncated eliminator: + +.. example:: + + Consider the goal: + + .. coqtop:: reset + + From Coq Require Import ssreflect FunInd. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: in + + Lemma test p n (n_gt0 : 0 < n) (pr_p : prime p) : + p %| \prod_(i <- prime_decomp n | i \in prime_decomp n) i.1 ^ i.2 -> + exists2 x : nat * nat, x \in prime_decomp n & p = x.1. + Proof. + elim/big_prop: _ => [| u v IHu IHv | [q e] /=]. + + + where the type of the ``big_prop`` eliminator is + + .. coqtop:: in + + big_prop: forall (R : Type) (Pb : R -> Type) + (idx : R) (op1 : R -> R -> R), Pb idx -> + (forall x y : R, Pb x -> Pb y -> Pb (op1 x y)) -> + forall (I : Type) (r : seq I) (P : pred I) (F : I -> R), + (forall i : I, P i -> Pb (F i)) -> + Pb (\big[op1/idx]_(i <- r | P i) F i). + + Since the pattern for the argument of Pb is not specified, the + inferred one is used instead: ``big[_/_]_(i <- _ | _ i) _ i``, + and after the introductions, the following goals are generated: + + .. coqtop:: in + + subgoal 1 is: + p %| 1 -> exists2 x : nat * nat, x \in prime_decomp n & p = x.1 + subgoal 2 is: + p %| u * v -> exists2 x : nat * nat, x \in prime_decomp n & p = x.1 + subgoal 3 is: + (q, e) \in prime_decomp n -> p %| q ^ e -> + exists2 x : nat * nat, x \in prime_decomp n & p = x.1. + + Note that the pattern matching algorithm instantiated all the + variables occurring in the pattern. + + +.. _interpreting_assumptions_ssr: + +Interpreting assumptions +~~~~~~~~~~~~~~~~~~~~~~~~ + +Interpreting an assumption in the context of a proof consists in +applying it a lemma before generalizing, and/or decomposing this +assumption. For instance, with the extensive use of boolean reflection +(see section :ref:`views_and_reflection_ssr`.4), it is quite frequent +to need to decompose the logical interpretation of (the boolean +expression of) a fact, rather than the fact itself. This can be +achieved by a combination of ``move : _ => _`` switches, like in the +following example, where ``||`` is a notation for the boolean +disjunction. + + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Variables P Q : bool -> Prop. + Hypothesis P2Q : forall a b, P (a || b) -> Q a. + + Lemma test a : P (a || a) -> True. + move=> HPa; move: {HPa}(P2Q HPa) => HQa. + + which transforms the hypothesis ``HPa : P a`` which has been introduced + from the initial statement into ``HQa : Q a``. + This operation is so common that the tactic shell has specific + syntax for it. The following scripts: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + Variables P Q : bool -> Prop. + Hypothesis P2Q : forall a b, P (a || b) -> Q a. + + Lemma test a : P (a || a) -> True. + + .. coqtop:: all undo + + move=> HPa; move/P2Q: HPa => HQa. + + or more directly: + + .. coqtop:: all undo + + move/P2Q=> HQa. + + are equivalent to the former one. The former script shows how to + interpret a fact (already in the context), thanks to the discharge + tactical (see section :ref:`discharge_ssr`) and the latter, how to interpret the top + assumption of a goal. Note that the number of wildcards to be inserted + to find the correct application of the view lemma to the hypothesis + has been automatically inferred. + +The view mechanism is compatible with the ``case`` tactic and with the +equation name generation mechanism (see section :ref:`generation_of_equations_ssr`): + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Variables P Q: bool -> Prop. + Hypothesis Q2P : forall a b, Q (a || b) -> P a \/ P b. + + Lemma test a b : Q (a || b) -> True. + case/Q2P=> [HPa | HPb]. + + This view tactic performs: + + .. coqtop:: in + + move=> HQ; case: {HQ}(Q2P HQ) => [HPa | HPb]. + +The term on the right of the ``/`` view switch is called a *view lemma*. +Any |SSR| term coercing to a product type can be used as a view +lemma. + +The examples we have given so far explicitly provide the direction of +the translation to be performed. In fact, view lemmas need not to be +oriented. The view mechanism is able to detect which application is +relevant for the current goal. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Variables P Q: bool -> Prop. + Hypothesis PQequiv : forall a b, P (a || b) <-> Q a. + + Lemma test a b : P (a || b) -> True. + move/PQequiv=> HQab. + + has the same behavior as the first example above. + + The view mechanism can insert automatically a *view hint* to transform + the double implication into the expected simple implication. The last + script is in fact equivalent to: + + .. coqtop:: in + + Lemma test a b : P (a || b) -> True. + move/(iffLR (PQequiv _ _)). + + where: + + .. coqtop:: in + + Lemma iffLR P Q : (P <-> Q) -> P -> Q. + + +Specializing assumptions +```````````````````````` + +The special case when the *head symbol* of the view lemma is a +wildcard is used to interpret an assumption by *specializing* it. The +view mechanism hence offers the possibility to apply a higher-order +assumption to some given arguments. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test z : (forall x y, x + y = z -> z = x) -> z = 0. + move/(_ 0 z). + + +Interpreting goals +~~~~~~~~~~~~~~~~~~ + +In a similar way, it is also often convenient to +changing a goal by turning it into an equivalent proposition. The view +mechanism of |SSR| has a special syntax ``apply/`` for combining in a +single tactic simultaneous goal interpretation operations and +bookkeeping steps. + + +.. example:: + The following example use the ``~~`` prenex notation for boolean negation: + + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Variables P Q: bool -> Prop. + Hypothesis PQequiv : forall a b, P (a || b) <-> Q a. + + Lemma test a : P ((~~ a) || a). + apply/PQequiv. + + thus in this case, the tactic ``apply/PQequiv`` is equivalent to + ``apply: (iffRL (PQequiv _ _))``, where ``iffRL`` is tha analogue of + ``iffRL`` for the converse implication. + +Any |SSR| term whose type coerces to a double implication can be +used as a view for goal interpretation. + +Note that the goal interpretation view mechanism supports both ``apply`` +and ``exact`` tactics. As expected, a goal interpretation view command +exact/term should solve the current goal or it will fail. + +*Warning* Goal interpretation view tactics are *not* compatible with +the bookkeeping tactical ``=>`` since this would be redundant with the +``apply: term => _`` construction. + + +Boolean reflection +~~~~~~~~~~~~~~~~~~ + +In the Calculus of Inductive Construction, there is an obvious +distinction between logical propositions and boolean values. On the +one hand, logical propositions are objects of *sort* ``Prop`` which is +the carrier of intuitionistic reasoning. Logical connectives in +``Prop`` are *types*, which give precise information on the structure +of their proofs; this information is automatically exploited by |Coq| +tactics. For example, |Coq| knows that a proof of ``A \/ B`` is +either a proof of ``A`` or a proof of ``B``. The tactics ``left`` and +``right`` change the goal ``A \/ B`` to ``A`` and ``B``, respectively; +dually, the tactic ``case`` reduces the goal ``A \/ B => G`` to two +subgoals ``A => G`` and ``B => G``. + +On the other hand, bool is an inductive *datatype* with two +constructors true and false. Logical connectives on bool are +*computable functions*, defined by their truth tables, using case +analysis: + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Definition orb (b1 b2 : bool) := if b1 then true else b2. + +Properties of such connectives are also established using case +analysis + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test b : b || ~~ b = true. + by case: b. + + Once ``b`` is replaced by ``true`` in the first goal and by ``false`` in the + second one, the goals reduce by computations to the trivial ``true = true``. + +Thus, ``Prop`` and ``bool`` are truly complementary: the former supports +robust natural deduction, the latter allows brute-force +evaluation. |SSR| supplies a generic mechanism to have the best of +the two worlds and move freely from a propositional version of a +decidable predicate to its boolean version. + +First, booleans are injected into propositions using the coercion +mechanism: + +.. coqtop:: in + + Coercion is_true (b : bool) := b = true. + +This allows any boolean formula ``b`` to be used in a context where |Coq| +would expect a proposition, e.g., after ``Lemma … : ``. It is then +interpreted as ``(is_true b)``, i.e., the proposition ``b = true``. Coercions +are elided by the pretty-printer, so they are essentially transparent +to the user. + +The reflect predicate +~~~~~~~~~~~~~~~~~~~~~ + +To get all the benefits of the boolean reflection, it is in fact +convenient to introduce the following inductive predicate ``reflect`` to +relate propositions and booleans: + +.. coqtop:: in + + Inductive reflect (P: Prop): bool -> Type := + | Reflect_true : P -> reflect P true + | Reflect_false : ~P -> reflect P false. + +The statement ``(reflect P b)`` asserts that ``(is_true b)`` and ``P`` are +logically equivalent propositions. + +For instance, the following lemma: + +.. coqtop:: in + + Lemma andP: forall b1 b2, reflect (b1 /\ b2) (b1 && b2). + +relates the boolean conjunction to the logical one ``/\``. Note that in +``andP``, ``b1`` and ``b2`` are two boolean variables and the +proposition ``b1 /\ b2`` hides two coercions. The conjunction of +``b1`` and ``b2`` can then be viewed as ``b1 /\ b2`` or as ``b1 && b2``. + +Expressing logical equivalences through this family of inductive types +makes possible to take benefit from *rewritable equations* associated +to the case analysis of |Coq|’s inductive types. + +Since the equivalence predicate is defined in |Coq| as: + +.. coqtop:: in + + Definition iff (A B:Prop) := (A -> B) /\ (B -> A). + +where ``/\`` is a notation for ``and``: + +.. coqtop:: in + + Inductive and (A B:Prop) : Prop := conj : A -> B -> and A B. + +This make case analysis very different according to the way an +equivalence property has been defined. + +.. coqtop:: in + + Lemma andE (b1 b2 : bool) : (b1 /\ b2) <-> (b1 && b2). + +Let us compare the respective behaviours of ``andE`` and ``andP``. + + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + Axiom andE : forall (b1 b2 : bool), (b1 /\ b2) <-> (b1 && b2). + + .. coqtop:: all + + Lemma test (b1 b2 : bool) : if (b1 && b2) then b1 else ~~(b1||b2). + + .. coqtop:: all undo + + case: (@andE b1 b2). + + .. coqtop:: all undo + + case: (@andP b1 b2). + +Expressing reflection relation through the ``reflect`` predicate is hence +a very convenient way to deal with classical reasoning, by case +analysis. Using the ``reflect`` predicate allows moreover to program rich +specifications inside its two constructors, which will be +automatically taken into account during destruction. This +formalisation style gives far more efficient specifications than +quantified (double) implications. + +A naming convention in |SSR| is to postfix the name of view lemmas +with ``P``. For example, ``orP`` relates ``||`` and ``\/``, +``negP`` relates ``~~`` and ``~``. + +The view mechanism is compatible with reflect predicates. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test (a b : bool) (Ha : a) (Hb : b) : a /\ b. + apply/andP. + + Conversely + + .. coqtop:: none + + Abort. + + .. coqtop:: all + + Lemma test (a b : bool) : a /\ b -> a. + move/andP. + +The same tactics can also be used to perform the converse operation, +changing a boolean conjunction into a logical one. The view mechanism +guesses the direction of the transformation to be used i.e., the +constructor of the reflect predicate which should be chosen. + + +General mechanism for interpreting goals and assumptions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Specializing assumptions +```````````````````````` + +The |SSR| tactic: + +.. coqtop:: in + + move/(_ term1 … termn). + +is equivalent to the tactic: + +.. coqtop:: in + + intro top; generalize (top term1 … termn); clear top. + +where ``top`` is a fresh name for introducing the top assumption of the +current goal. + + +Interpreting assumptions +```````````````````````` + +The general form of an assumption view tactic is: + +.. tacv:: [move | case] / @term + +The term , called the *view lemma* can be: + + ++ a (term coercible to a) function; ++ a (possibly quantified) implication; ++ a (possibly quantified) double implication; ++ a (possibly quantified) instance of the reflect predicate (see + section :ref:`views_and_reflection_ssr`). + + +Let ``top`` be the top assumption in the goal. + +There are three steps in the behaviour of an assumption view tactic: + ++ It first introduces ``top``. ++ If the type of :token:`term` is neither a double implication nor an + instance of the reflect predicate, then the tactic automatically + generalises a term of the form: ``term term1 … termn`` where the + terms ``term1 … termn`` instantiate the possible quantified variables of + ``term`` , in order for ``(term term1 … termn top)`` to be well typed. ++ If the type of ``term`` is an equivalence, or an instance of the + reflect predicate, it generalises a term of the form: + ``(termvh (term term1 … termn ))`` where the term ``termvh`` + inserted is called an + *assumption interpretation view hint*. ++ It finally clears top. + + +For a ``case/term`` tactic, the generalisation step is replaced by a +case analysis step. + +*View hints* are declared by the user (see section:ref:`views_and_reflection_ssr`.8) and are +stored in the Hint View database. The proof engine automatically +detects from the shape of the top assumption ``top`` and of the view lemma +``term`` provided to the tactic the appropriate view hint in the +database to be inserted. + +If ``term`` is a double implication, then the view hint will be one of +the defined view hints for implication. These hints are by default the +ones present in the file ``ssreflect.v``: + +.. coqtop:: in + + Lemma iffLR : forall P Q, (P <-> Q) -> P -> Q. + +which transforms a double implication into the left-to-right one, or: + +.. coqtop:: in + + Lemma iffRL : forall P Q, (P <-> Q) -> Q -> P. + +which produces the converse implication. In both cases, the two +first Prop arguments are implicit. + +If ``term`` is an instance of the ``reflect`` predicate, then ``A`` will be one +of the defined view hints for the ``reflec``t predicate, which are by +default the ones present in the file ``ssrbool.v``. These hints are not +only used for choosing the appropriate direction of the translation, +but they also allow complex transformation, involving negations. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: in + + Lemma introN : forall (P : Prop) (b : bool), reflect P b -> ~ P -> ~~b. + + .. coqtop:: all + + Lemma test (a b : bool) (Ha : a) (Hb : b) : ~~ (a && b). + apply/andP. + + In fact this last script does not + exactly use the hint ``introN``, but the more general hint: + + .. coqtop:: in + + Lemma introNTF : forall (P : Prop) (b c : bool), + reflect P b -> (if c then ~ P else P) -> ~~ b = c. + + The lemma ` `introN`` is an instantiation of introNF using c := true. + +Note that views, being part of :token:`i_pattern`, can be used to interpret +assertions too. For example the following script asserts ``a && b`` but +actually used its propositional interpretation. + + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test (a b : bool) (pab : b && a) : b. + have /andP [pa ->] : (a && b) by rewrite andbC. + +Interpreting goals + +A goal interpretation view tactic of the form: + +.. tacv:: apply/@term + +applied to a goal ``top`` is interpreted in the following way: + ++ If the type of ``term`` is not an instance of the ``reflect`` predicate, + nor an equivalence, then the term ``term`` is applied to the current + goal ``top``, possibly inserting implicit arguments. ++ If the type of ``term`` is an instance of the reflect predicate or an + equivalence, then a *goal interpretation view hint* can possibly be + inserted, which corresponds to the application of a term + ``(termvh (term _ … _))`` to the current goal, possibly inserting implicit arguments. + + +Like assumption interpretation view hints, goal interpretation ones +are user defined lemmas stored (see section :ref:`views_and_reflection_ssr`) in the ``Hint View`` +database bridging the possible gap between the type of ``term`` and the +type of the goal. + + +Interpreting equivalences +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Equivalent boolean propositions are simply *equal* boolean terms. A +special construction helps the user to prove boolean equalities by +considering them as logical double implications (between their coerced +versions), while performing at the same time logical operations on +both sides. + +The syntax of double views is: + +.. tacv:: apply/@term/@term + +The first term is the view lemma applied to the left hand side of the +equality, while the second term is the one applied to the right hand side. + +In this context, the identity view can be used when no view has to be applied: + +.. coqtop:: in + + Lemma idP : reflect b1 b1. + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test (b1 b2 b3 : bool) : ~~ (b1 || b2) = b3. + apply/idP/idP. + + The same goal can be decomposed in several ways, and the user may + choose the most convenient interpretation. + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + + .. coqtop:: all + + Lemma test (b1 b2 b3 : bool) : ~~ (b1 || b2) = b3. + apply/norP/idP. + + +.. _declaring_new_hints_ssr: + +Declaring new Hint Views +~~~~~~~~~~~~~~~~~~~~~~~~ + +The database of hints for the view mechanism is extensible via a +dedicated vernacular command. As library ``ssrbool.v`` already declares a +corpus of hints, this feature is probably useful only for users who +define their own logical connectives. Users can declare their own +hints following the syntax used in ``ssrbool.v``: + +.. cmd:: Hint View for move / @ident {? | @num } +.. cmd:: Hint View for apply / @ident {? | @num } + +The :token:`ident` is the name of the lemma to be +declared as a hint. If `move` is used as +tactic, the hint is declared for assumption interpretation tactics, +`apply` declares hints for goal interpretations. Goal interpretation +view hints are declared for both simple views and left hand side +views. The optional natural number is the number of implicit +arguments to be considered for the declared hint view lemma. + +The command: + +.. cmd:: Hint View for apply//@ident {? | @num } + +with a double slash ``//``, declares hint views for right hand sides of +double views. + +See the files ``ssreflect.v`` and ``ssrbool.v`` for examples. + + +Multiple views +~~~~~~~~~~~~~~ + +The hypotheses and the goal can be interpreted applying multiple views +in sequence. Both move and apply can be followed by an arbitrary +number of ``/term``. The main difference between the following two +tactics + +.. coqtop:: in + + apply/v1/v2/v3. + apply/v1; apply/v2; apply/v3. + +is that the former applies all the views to the principal goal. +Applying a view with hypotheses generates new goals, and the second +line would apply the view ``v2`` to all the goals generated by ``apply/v1``. + +Note that the NO-OP intro pattern ``-`` can be used to separate two views, +making the two following examples equivalent: + +.. coqtop:: in + + move=> /v1; move=> /v2. + move=> /v1 - /v2. + +The tactic ``move`` can be used together with the ``in`` tactical to +pass a given hypothesis to a lemma. + + +.. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + Section Test. + Variables P Q R : Prop. + + .. coqtop:: all + + Variable P2Q : P -> Q. + Variable Q2R : Q -> R. + + Lemma test (p : P) : True. + move/P2Q/Q2R in p. + +If the list of views is of length two, ``Hint Views`` for interpreting +equivalences are indeed taken into account, otherwise only single +``Hint Views`` are used. + + +|SSR| searching tool +-------------------- + +|SSR| proposes an extension of the Search command. Its syntax is: + +.. cmd:: Search {? @pattern } {* {? - } %( @string %| @pattern %) {? % @ident} } {? in {+ {? - } @qualid } } + +where :token:`qualid` is the name of an open module. This command search returns +the list of lemmas: + + ++ whose *conclusion* contains a subterm matching the optional first + pattern. A - reverses the test, producing the list of lemmas whose + conclusion does not contain any subterm matching the pattern; ++ whose name contains the given string. A ``-`` prefix reverses the test, + producing the list of lemmas whose name does not contain the string. A + string that contains symbols or is followed by a scope key, is + interpreted as the constant whose notation involves that string (e.g., + `+` for `addn`), if this is unambiguous; otherwise the diagnostic + includes the output of the ``Locate`` vernacular command. ++ whose statement, including assumptions and types, contains a subterm + matching the next patterns. If a pattern is prefixed by ``-``, the test is + reversed; ++ contained in the given list of modules, except the ones in the + modules prefixed by a ``-``. + + +Note that: + + ++ As for regular terms, patterns can feature scope indications. For + instance, the command: ``Search _ (_ + _)%N.`` lists all the lemmas whose + statement (conclusion or hypotheses) involve an application of the + binary operation denoted by the infix ``+`` symbol in the ``N`` scope (which is + |SSR| scope for natural numbers). ++ Patterns with holes should be surrounded by parentheses. ++ Search always volunteers the expansion of the notation, avoiding the + need to execute Locate independently. Moreover, a string fragment + looks for any notation that contains fragment as a substring. If the + ``ssrbool.v`` library is imported, the command: ``Search "~~".`` answers : + + .. example:: + + .. coqtop:: reset + + From Coq Require Import ssreflect ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. + + .. coqtop:: all + + Search "~~". + ++ A diagnostic is issued if there are different matching notations; it + is an error if all matches are partial. ++ Similarly, a diagnostic warns about multiple interpretations, and + signals an error if there is no default one. ++ The command ``Search in M.`` is a way of obtaining the complete + signature of the module ``M``. ++ Strings and pattern indications can be interleaved, but the first + indication has a special status if it is a pattern, and only filters + the conclusion of lemmas: + + + The command : ``Search (_ =1 _) "bij".`` lists all the lemmas whose + conclusion features a ``=1`` and whose name contains the string ``bij``. + + The command : ``Search "bij" (_ =1 _).`` lists all the lemmas whose + statement, including hypotheses, features a ``=1`` and whose name + contains the string ``bij``. + +Synopsis and Index +------------------ + +Parameters +~~~~~~~~~~ + +|SSR| tactics + +.. prodn:: + d_tactic ::= elim %| case %| congr %| apply %| exact %| move + +Notation scope + +.. prodn:: key ::= @ident + +Module name + +.. prodn:: name ::= @qualid + +Natural number + +.. prodn:: natural ::= @num %| @ident + +where :token:`ident` is an Ltac variable denoting a standard |Coq| numeral +(should not be the name of a tactic which can be followed by a +bracket ``[``, like ``do``, ``have``,…) + +Pattern + +.. prodn:: pattern ::= @term + +Items and switches +~~~~~~~~~~~~~~~~~~ + +.. prodn:: binder ::= @ident %| ( @ident {? : @term } ) + +binder see :ref:`abbreviations_ssr`. + +.. prodn:: clear_switch ::= { {+ @ident } } + +clear switch see :ref:`discharge_ssr` + +.. prodn:: c_pattern ::= {? @term in %| @term as } @ident in @term + +context pattern see :ref:`contextual_patterns_ssr` + +.. prodn:: d_item ::= {? @occ_switch %| @clear_switch } {? @term %| ( @c_pattern ) } + +discharge item see :ref:`discharge_ssr` + +.. prodn:: gen_item ::= {? @ } @ident %| ( @ident ) %| ( {? @ } @ident := @c_pattern ) + +generalization item see :ref:`structure_ssr` + +.. prodn:: i_pattern ::= @ident %| _ %| ? %| * %| {? @occ_switch } -> %| {? @occ_switch } <- %| [ {*| {* @i_item } } %| - %| [: {+ @ident } ] + +intro pattern :ref:`introduction_ssr` + +.. prodn:: i_item ::= @clear_switch %| @s_item %| @i_pattern %| / @term + +intro item see :ref:`introduction_ssr` + +.. prodn:: int_mult ::= {? @num } @mult_mark + +multiplier see :ref:`iteration_ssr` + +.. prodn:: occ_switch ::= { {? + %| - } {* @natural } } + +occur. switch see :ref:`occurrence_selection_ssr` + +.. prodn:: mult ::= {? @natural } @mult_mark + +multiplier see :ref:`iteration_ssr` + +.. prodn:: mult_mark ::= ? %| ! + +multiplier mark see :ref:`iteration_ssr` + +.. prodn:: r_item ::= {? / } @term %| @s_item + +rewrite item see :ref:`rewriting_ssr` + +.. prodn:: r_prefix ::= {? - } {? @int_mult } {? @occ_switch %| @clear_switch } {? [ @r_pattern ] } + +rewrite prefix see :ref:`rewriting_ssr` + +.. prodn:: r_pattern ::= @term %| @c_pattern %| in {? @ident in } @term + +rewrite pattern see :ref:`rewriting_ssr` + +.. prodn:: r_step ::= {? @r_prefix } @r_item + +rewrite step see :ref:`rewriting_ssr` + +.. prodn:: s_item ::= /= %| // %| //= + +simplify switch see :ref:`introduction_ssr` + +Tactics +~~~~~~~ + +*Note*: ``without loss`` and ``suffices`` are synonyms for ``wlog`` and ``suff`` +respectively. + +.. tacn:: move + +idtac or hnf see :ref:`bookkeeping_ssr` + +.. tacn:: apply +.. tacn:: exact + +application see :ref:`the_defective_tactics_ssr` + +.. tacn:: abstract + + see :ref:`abstract_ssr` and :ref:`generating_let_ssr` + +.. tacn:: elim + +induction see :ref:`the_defective_tactics_ssr` + +.. tacn:: case + +case analysis see :ref:`the_defective_tactics_ssr` + +.. tacn:: rewrite {+ @r_step } + +rewrite see :ref:`rewriting_ssr` + +.. tacn:: have {* @i_item } {? @i_pattern } {? @s_item %| {+ @binder } } {? : @term } := @term +.. tacv:: have {* @i_item } {? @i_pattern } {? @s_item %| {+ @binder } } : @term {? by @tactic } +.. tacn:: have suff {? @clear_switch } {? @i_pattern } {? : @term } := @term +.. tacv:: have suff {? @clear_switch } {? @i_pattern } : @term {? by @tactic } +.. tacv:: gen have {? @ident , } {? @i_pattern } : {+ @gen_item } / @term {? by @tactic } + +forward chaining see :ref:`structure_ssr` + + +.. tacn:: wlog {? suff } {? @i_item } : {* @gen_item %| @clear_switch } / @term + +specializing see :ref:`structure_ssr` + +.. tacn:: suff {* @i_item } {? @i_pattern } {+ @binder } : @term {? by @tactic } +.. tacv:: suff {? have } {? @clear_switch } {? @i_pattern } : @term {? by @tactic } + +backchaining see :ref:`structure_ssr` + +.. tacn:: pose @ident := @term + +local definition :ref:`definitions_ssr` + +.. tacv:: pose @ident {+ @binder } := @term + +local function definition + +.. tacv:: pose fix @fix_body + +local fix definition + +.. tacv:: pose cofix @fix_body + +local cofix definition + +.. tacn:: set @ident {? : @term } := {? @occ_switch } %( @term %| ( @c_pattern) %) + +abbreviation see :ref:`abbreviations_ssr` + +.. tacn:: unlock {* {? @r_prefix } @ident } + +unlock see :ref:`locking_ssr` + +.. tacn:: congr {? @num } @term + +congruence :ref:`congruence_ssr` + + +Tacticals +~~~~~~~~~ + +.. prodn:: tactic += @d_tactic {? @ident } : {+ @d_item } {? @clear_switch } + +discharge :ref:`discharge_ssr` + +.. prodn:: tactic += @tacitc => {+ @i_item } + +introduction see :ref:`introduction_ssr` + +.. prodn:: tactic += @tactic in {+ @gen_item %| @clear_switch } {? * } + +localization see :ref:`localization_ssr` + +.. prodn:: tactic += do {? @mult } %( @tactic %| [ {+| @tactic } ] %) + +iteration see :ref:`iteration_ssr` + +.. prodn:: tactic += @tactic ; %( first %| last %) {? @num } %( @tactic %| [ {+| @tactic } ] %) + +selector see :ref:`selectors_ssr` + +.. prodn:: tactic += @tactic ; %( first %| last %) {? @num } + +rotation see :ref:`selectors_ssr` + +.. prodn:: tactic += by %( @tactic %| [ {*| @tactic } ] %) + +closing see :ref:`terminators_ssr` + +Commands +~~~~~~~~ + +.. cmd:: Hint View for %( move %| apply %) / @ident {? | @num } + +view hint declaration see :ref:`declaring_new_hints_ssr` + +.. cmd:: Hint View for apply // @ident {? @num } + +right hand side double , view hint declaration see :ref:`declaring_new_hints_ssr` + +.. cmd:: Prenex Implicits {+ @ident } + +prenex implicits declaration see :ref:`parametric_polymorphism_ssr` + +.. rubric:: Footnotes + +.. [#1] Unfortunately, even after a call to the Set Printing All command, + some occurrences are still not displayed to the user, essentially the + ones possibly hidden in the predicate of a dependent match structure. +.. [#2] Thus scripts that depend on bound variable names, e.g., via intros + or with, are inherently fragile. +.. [#3] The name ``subnK`` reads as “right cancellation rule for nat + subtraction”. +.. [#4] Also, a slightly different variant may be used for the first :token:`d_item` + of case and elim; see section :ref:`type_families_ssr`. +.. [#5] Except /= does not expand the local definitions created by the + |SSR| in tactical. +.. [#6] |SSR| reserves all identifiers of the form “_x_”, which is + used for such generated names. +.. [#7] More precisely, it should have a quantified inductive type with a + assumptions and m − a constructors. +.. [#8] This is an implementation feature: there is not such obstruction + in the metatheory +.. [#9] The current state of the proof shall be displayed by the Show + Proof command of |Coq| proof mode. diff --git a/doc/sphinx/proof-engine/tactics.rst b/doc/sphinx/proof-engine/tactics.rst new file mode 100644 index 0000000000..da34e3b55b --- /dev/null +++ b/doc/sphinx/proof-engine/tactics.rst @@ -0,0 +1,4352 @@ +.. include:: ../preamble.rst +.. include:: ../replaces.rst + +.. _tactics: + +Tactics +======== + +A deduction rule is a link between some (unique) formula, that we call +the *conclusion* and (several) formulas that we call the *premises*. A +deduction rule can be read in two ways. The first one says: “if I know +this and this then I can deduce this”. For instance, if I have a proof +of A and a proof of B then I have a proof of A ∧ B. This is forward +reasoning from premises to conclusion. The other way says: “to prove +this I have to prove this and this”. For instance, to prove A ∧ B, I +have to prove A and I have to prove B. This is backward reasoning from +conclusion to premises. We say that the conclusion is the *goal* to +prove and premises are the *subgoals*. The tactics implement *backward +reasoning*. When applied to a goal, a tactic replaces this goal with +the subgoals it generates. We say that a tactic reduces a goal to its +subgoal(s). + +Each (sub)goal is denoted with a number. The current goal is numbered +1. By default, a tactic is applied to the current goal, but one can +address a particular goal in the list by writing n:tactic which means +“apply tactic tactic to goal number n”. We can show the list of +subgoals by typing Show (see Section :ref:`TODO-7.3.1-Show`). + +Since not every rule applies to a given statement, every tactic cannot +be used to reduce any goal. In other words, before applying a tactic +to a given goal, the system checks that some *preconditions* are +satisfied. If it is not the case, the tactic raises an error message. + +Tactics are built from atomic tactics and tactic expressions (which +extends the folklore notion of tactical) to combine those atomic +tactics. This chapter is devoted to atomic tactics. The tactic +language will be described in Chapter :ref:`TODO-9-Thetacticlanguage`. + +Invocation of tactics +------------------------- + +A tactic is applied as an ordinary command. It may be preceded by a +goal selector (see Section :ref:`TODO-9.2-Semantics`). If no selector is +specified, the default selector (see Section +:ref:`TODO-8.1.1-Setdefaultgoalselector`) is used. + +.. _tactic_invocation_grammar: + + .. productionlist:: `sentence` + tactic_invocation : toplevel_selector : tactic. + : |tactic . + +.. cmd:: Set Default Goal Selector @toplevel_selector. + +After using this command, the default selector – used when no selector +is specified when applying a tactic – is set to the chosen value. The +initial value is 1, hence the tactics are, by default, applied to the +first goal. Using Set Default Goal Selector ‘‘all’’ will make is so +that tactics are, by default, applied to every goal simultaneously. +Then, to apply a tactic tac to the first goal only, you can write +1:tac. Although more selectors are available, only ‘‘all’’ or a single +natural number are valid default goal selectors. + +.. cmd:: Test Default Goal Selector. + +This command displays the current default selector. + +.. _bindingslist: + +Bindings list +~~~~~~~~~~~~~~~~~~~ + +Tactics that take a term as argument may also support a bindings list, +so as to instantiate some parameters of the term by name or position. +The general form of a term equipped with a bindings list is ``term with +bindings_list`` where ``bindings_list`` may be of two different forms: + +.. _bindings_list_grammar: + + .. productionlist:: `bindings_list` + bindings_list : (ref := `term`) ... (ref := `term`) + : `term` ... `term` + ++ In a bindings list of the form :n:`{* (ref:= term)}`, :n:`ref` is either an + :n:`@ident` or a :n:`@num`. The references are determined according to the type of + ``term``. If :n:`ref` is an identifier, this identifier has to be bound in the + type of ``term`` and the binding provides the tactic with an instance for the + parameter of this name. If :n:`ref` is some number ``n``, this number denotes + the ``n``-th non dependent premise of the ``term``, as determined by the type + of ``term``. + + .. exn:: No such binder + ++ A bindings list can also be a simple list of terms :n:`{* term}`. + In that case the references to which these terms correspond are + determined by the tactic. In case of ``induction``, ``destruct``, ``elim`` + and ``case`` (see :ref:`TODO-9-Thetacticlanguage`) the terms have to + provide instances for all the dependent products in the type of term while in + the case of ``apply``, or of ``constructor`` and its variants, only instances + for the dependent products that are not bound in the conclusion of the type + are required. + + .. exn:: Not the right number of missing arguments. + +.. _occurencessets: + +Occurrences sets and occurrences clauses +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +An occurrences clause is a modifier to some tactics that obeys the +following syntax: + +.. _tactic_occurence_grammar: + + .. productionlist:: `sentence` + occurence_clause : in `goal_occurences` + goal_occurences : [ident [`at_occurences`], ... , ident [`at_occurences`] [|- [* [`at_occurences`]]]] + :| * |- [* [`at_occurences`]] + :| * + at_occurrences : at `occurrences` + occurences : [-] `num` ... `num` + +The role of an occurrence clause is to select a set of occurrences of a term in +a goal. In the first case, the :n:`@ident {? at {* num}}` parts indicate that +occurrences have to be selected in the hypotheses named :n:`@ident`. If no numbers +are given for hypothesis :n:`@ident`, then all the occurrences of `term` in the +hypothesis are selected. If numbers are given, they refer to occurrences of +`term` when the term is printed using option ``Set Printing All`` (see +:ref:`TODO-2.9-Printingconstructionsinfull`), counting from left to right. In +particular, occurrences of `term` in implicit arguments (see +:ref:`TODO-2.7-Implicitarguments`) or coercions (see :ref:`TODO-2.8-Coercions`) +are counted. + +If a minus sign is given between at and the list of occurrences, it +negates the condition so that the clause denotes all the occurrences +except the ones explicitly mentioned after the minus sign. + +As an exception to the left-to-right order, the occurrences in +thereturn subexpression of a match are considered *before* the +occurrences in the matched term. + +In the second case, the ``*`` on the left of ``|-`` means that all occurrences +of term are selected in every hypothesis. + +In the first and second case, if ``*`` is mentioned on the right of ``|-``, the +occurrences of the conclusion of the goal have to be selected. If some numbers +are given, then only the occurrences denoted by these numbers are selected. If +no numbers are given, all occurrences of :n:`@term` in the goal are selected. + +Finally, the last notation is an abbreviation for ``* |- *``. Note also +that ``|-`` is optional in the first case when no ``*`` is given. + +Here are some tactics that understand occurrences clauses: ``set``, ``remember`` +, ``induction``, ``destruct``. + + +See also: :ref:`TODO-8.3.7-Managingthelocalcontext`, +:ref:`TODO-8.5.2-Caseanalysisandinduction`, +:ref:`TODO-2.9-Printingconstructionsinfull`. + + +Applying theorems +--------------------- + +.. tacn:: exact @term + :name: exact + +This tactic applies to any goal. It gives directly the exact proof +term of the goal. Let ``T`` be our goal, let ``p`` be a term of type ``U`` then +``exact p`` succeeds iff ``T`` and ``U`` are convertible (see +:ref:`TODO-4.3-Conversionrules`). + +.. exn:: Not an exact proof. + +.. tacv:: eexact @term. + +This tactic behaves like exact but is able to handle terms and goals with +meta-variables. + +.. tacn:: assumption + :name: assumption + +This tactic looks in the local context for an hypothesis which type is equal to +the goal. If it is the case, the subgoal is proved. Otherwise, it fails. + +.. exn:: No such assumption. + +.. tacv:: eassumption + +This tactic behaves like assumption but is able to handle goals with +meta-variables. + +.. tacn:: refine @term + :name: refine + +This tactic applies to any goal. It behaves like exact with a big +difference: the user can leave some holes (denoted by ``_`` or``(_:type)``) in +the term. refine will generate as many subgoals as there are holes in +the term. The type of holes must be either synthesized by the system +or declared by an explicit cast like ``(_:nat->Prop)``. Any subgoal that +occurs in other subgoals is automatically shelved, as if calling +shelve_unifiable (see Section 8.17.4). This low-level tactic can be +useful to advanced users. + +.. example:: + .. coqtop:: reset all + + Inductive Option : Set := + | Fail : Option + | Ok : bool -> Option. + + Definition get : forall x:Option, x <> Fail -> bool. + + refine + (fun x:Option => + match x return x <> Fail -> bool with + | Fail => _ + | Ok b => fun _ => b + end). + + intros; absurd (Fail = Fail); trivial. + + Defined. + +.. exn:: invalid argument + + The tactic ``refine`` does not know what to do with the term you gave. + +.. exn:: Refine passed ill-formed term + + The term you gave is not a valid proof (not easy to debug in general). This + message may also occur in higher-level tactics that call ``refine`` + internally. + +.. exn:: Cannot infer a term for this placeholder + + There is a hole in the term you gave which type cannot be inferred. Put a + cast around it. + +.. tacv:: simple refine @term + + This tactic behaves like refine, but it does not shelve any subgoal. It does + not perform any beta-reduction either. + +.. tacv:: notypeclasses refine @term + + This tactic behaves like ``refine`` except it performs typechecking without + resolution of typeclasses. + +.. tacv:: simple notypeclasses refine @term + + This tactic behaves like ``simple refine`` except it performs typechecking + without resolution of typeclasses. + +.. tacv:: apply @term + :name: apply + +This tactic applies to any goal. The argument term is a term well-formed in the +local context. The tactic apply tries to match the current goal against the +conclusion of the type of term. If it succeeds, then the tactic returns as many +subgoals as the number of non-dependent premises of the type of term. If the +conclusion of the type of term does not match the goal *and* the conclusion is +an inductive type isomorphic to a tuple type, then each component of the tuple +is recursively matched to the goal in the left-to-right order. + +The tactic ``apply`` relies on first-order unification with dependent types +unless the conclusion of the type of ``term`` is of the form :g:`P (t`:sub:`1` +:g:`...` :g:`t`:sub:`n` :g:`)` with `P` to be instantiated. In the latter case, the behavior +depends on the form of the goal. If the goal is of the form +:g:`(fun x => Q) u`:sub:`1` :g:`...` :g:`u`:sub:`n` and the :g:`t`:sub:`i` and +:g:`u`:sub:`i` unifies, then :g:`P` is taken to be :g:`(fun x => Q)`. Otherwise, +``apply`` tries to define :g:`P` by abstracting over :g:`t`:sub:`1` :g:`...` +:g:`t`:sub:`n` in the goal. See :tacn:`pattern` to transform the goal so that it +gets the form :g:`(fun x => Q) u`:sub:`1` :g:`...` :g:`u`:sub:`n`. + +.. exn:: Unable to unify ... with ... . + +The apply tactic failed to match the conclusion of term and the current goal. +You can help the apply tactic by transforming your goal with the +:ref:`change <change_term>` or :tacn:`pattern` tactics. + +.. exn:: Unable to find an instance for the variables {+ @ident}. + +This occurs when some instantiations of the premises of term are not deducible +from the unification. This is the case, for instance, when you want to apply a +transitivity property. In this case, you have to use one of the variants below: + +.. cmd:: apply @term with {+ @term} + +Provides apply with explicit instantiations for all dependent premises of the +type of term that do not occur in the conclusion and consequently cannot be +found by unification. Notice that the collection :n:`{+ @term}` must be given +according to the order of these dependent premises of the type of term. + +.. exn:: Not the right number of missing arguments. + +.. tacv:: apply @term with @bindings_list + +This also provides apply with values for instantiating premises. Here, variables +are referred by names and non-dependent products by increasing numbers (see +:ref:`bindings list <bindingslist>`). + +.. tacv:: apply {+, @term} + +This is a shortcut for ``apply term``:sub:`1` +``; [.. | ... ; [ .. | apply`` ``term``:sub:`n` ``] ... ]``, +i.e. for the successive applications of ``term``:sub:`i+1` on the last subgoal +generated by ``apply term``:sub:`i` , starting from the application of +``term``:sub:`1`. + +.. tacv:: eapply @term + :name: eapply + +The tactic ``eapply`` behaves like ``apply`` but it does not fail when no +instantiations are deducible for some variables in the premises. Rather, it +turns these variables into existential variables which are variables still to +instantiate (see :ref:`TODO-2.11-ExistentialVariables`). The instantiation is +intended to be found later in the proof. + +.. tacv:: simple apply @term. + +This behaves like ``apply`` but it reasons modulo conversion only on subterms +that contain no variables to instantiate. For instance, the following example +does not succeed because it would require the conversion of ``id ?foo`` and +``O``. + +.. example:: + + .. coqtop:: all + + Definition id (x : nat) := x. + Hypothesis H : forall y, id y = y. + Goal O = O. + Fail simple apply H. + +Because it reasons modulo a limited amount of conversion, ``simple apply`` fails +quicker than ``apply`` and it is then well-suited for uses in user-defined +tactics that backtrack often. Moreover, it does not traverse tuples as ``apply`` +does. + +.. tacv:: {? simple} apply {+, @term {? with @bindings_list}} +.. tacv:: {? simple} eapply {+, @term {? with @bindings_list}} + +This summarizes the different syntaxes for ``apply`` and ``eapply``. + +.. tacv:: lapply @term + :name: `lapply + +This tactic applies to any goal, say :g:`G`. The argument term has to be +well-formed in the current context, its type being reducible to a non-dependent +product :g:`A -> B` with :g:`B` possibly containing products. Then it generates +two subgoals :g:`B->G` and :g:`A`. Applying ``lapply H`` (where :g:`H` has type +:g:`A->B` and :g:`B` does not start with a product) does the same as giving the +sequence ``cut B. 2:apply H.`` where ``cut`` is described below. + +.. warn:: When @term contains more than one non dependent product the tactic lapply only takes into account the first product. + +.. example:: + Assume we have a transitive relation ``R`` on ``nat``: + + .. coqtop:: reset in + + Variable R : nat -> nat -> Prop. + + Hypothesis Rtrans : forall x y z:nat, R x y -> R y z -> R x z. + + Variables n m p : nat. + + Hypothesis Rnm : R n m. + + Hypothesis Rmp : R m p. + + Consider the goal ``(R n p)`` provable using the transitivity of ``R``: + + .. coqtop:: in + + Goal R n p. + + The direct application of ``Rtrans`` with ``apply`` fails because no value + for ``y`` in ``Rtrans`` is found by ``apply``: + + .. coqtop:: all + + Fail apply Rtrans. + + A solution is to ``apply (Rtrans n m p)`` or ``(Rtrans n m)``. + + .. coqtop:: all undo + + apply (Rtrans n m p). + + Note that ``n`` can be inferred from the goal, so the following would work + too. + + .. coqtop:: in undo + + apply (Rtrans _ m). + + More elegantly, ``apply Rtrans with (y:=m)`` allows only mentioning the + unknown m: + + .. coqtop:: in undo + + apply Rtrans with (y := m). + + Another solution is to mention the proof of ``(R x y)`` in ``Rtrans`` + + .. coqtop:: all undo + + apply Rtrans with (1 := Rnm). + + ... or the proof of ``(R y z)``. + + .. coqtop:: all undo + + apply Rtrans with (2 := Rmp). + + On the opposite, one can use ``eapply`` which postpones the problem of + finding ``m``. Then one can apply the hypotheses ``Rnm`` and ``Rmp``. This + instantiates the existential variable and completes the proof. + + .. coqtop:: all + + eapply Rtrans. + + apply Rnm. + + apply Rmp. + +.. note:: + When the conclusion of the type of the term to ``apply`` is an inductive + type isomorphic to a tuple type and ``apply`` looks recursively whether a + component of the tuple matches the goal, it excludes components whose + statement would result in applying an universal lemma of the form + ``forall A, ... -> A``. Excluding this kind of lemma can be avoided by + setting the following option: + +.. opt:: Set Universal Lemma Under Conjunction. + + This option, which preserves compatibility with versions of Coq prior to + 8.4 is also available for :n:`apply @term in @ident` (see :tacn:`apply ... in`). + +.. tacn:: apply @term in @ident + :name: apply ... in + + This tactic applies to any goal. The argument ``term`` is a term well-formed in + the local context and the argument :n:`@ident` is an hypothesis of the context. + The tactic ``apply term in ident`` tries to match the conclusion of the type + of :n:`@ident` against a non-dependent premise of the type of ``term``, trying + them from right to left. If it succeeds, the statement of hypothesis + :n:`@ident` is replaced by the conclusion of the type of ``term``. The tactic + also returns as many subgoals as the number of other non-dependent premises + in the type of ``term`` and of the non-dependent premises of the type of + :n:`@ident`. If the conclusion of the type of ``term`` does not match the goal + *and* the conclusion is an inductive type isomorphic to a tuple type, then + the tuple is (recursively) decomposed and the first component of the tuple + of which a non-dependent premise matches the conclusion of the type of + :n:`@ident`. Tuples are decomposed in a width-first left-to-right order (for + instance if the type of :g:`H1` is a :g:`A <-> B` statement, and the type of + :g:`H2` is :g:`A` then ``apply H1 in H2`` transforms the type of :g:`H2` + into :g:`B`). The tactic ``apply`` relies on first-order pattern-matching + with dependent types. + +.. exn:: Statement without assumptions. + + This happens if the type of ``term`` has no non dependent premise. + +.. exn:: Unable to apply. + + This happens if the conclusion of :n:`@ident` does not match any of the non + dependent premises of the type of ``term``. + +.. tacv:: apply {+, @term} in @ident + + This applies each of ``term`` in sequence in :n:`@ident`. + +.. tacv:: apply {+, @term with @bindings_list} in @ident + + This does the same but uses the bindings in each :n:`(@id := @ val)` to + instantiate the parameters of the corresponding type of ``term`` (see + :ref:`bindings list <bindingslist>`). + +.. tacv:: eapply {+, @term with @bindings_list} in @ident + + This works as :tacn:`apply ... in` but turns unresolved bindings into + existential variables, if any, instead of failing. + +.. tacv:: apply {+, @term with @bindings_list} in @ident as @intro_pattern + :name: apply ... in ... as + + This works as :tacn:`apply ... in` then applies the + :n:`@intro_pattern` to the hypothesis :n:`@ident`. + +.. tacv:: eapply {+, @term with @bindings_list} in @ident as @intro_pattern. + + This works as :tacn:`apply ... in as` but using ``eapply``. + +.. tacv:: simple apply @term in @ident + + This behaves like :tacn:`apply ... in` but it reasons modulo conversion only + on subterms that contain no variables to instantiate. For instance, if + :g:`id := fun x:nat => x` and :g:`H: forall y, id y = y -> True` and + :g:`H0 : O = O` then ``simple apply H in H0`` does not succeed because it + would require the conversion of :g:`id ?1234` and :g:`O` where :g:`?1234` is + a variable to instantiate. Tactic :n:`simple apply @term in @ident` does not + either traverse tuples as :n:`apply @term in @ident` does. + +.. tacv:: {? simple} apply {+, @term {? with @bindings_list}} in @ident {? as @intro_pattern} +.. tacv:: {? simple} eapply {+, @term {? with @bindings_list}} in @ident {? as @intro_pattern} + + This summarizes the different syntactic variants of :n:`apply @term in + @ident` and :n:`eapply @term in @ident`. + +.. tacn:: constructor @num + :name: constructor + + This tactic applies to a goal such that its conclusion is an inductive + type (say :g:`I`). The argument :n:`@num` must be less or equal to the + numbers of constructor(s) of :g:`I`. Let :g:`c`:sub:`i` be the i-th + constructor of :g:`I`, then ``constructor i`` is equivalent to + ``intros; apply c``:sub:`i`. + +.. exn:: Not an inductive product. +.. exn:: Not enough constructors. + +.. tacv:: constructor + + This tries :g:`constructor`:sub:`1` then :g:`constructor`:sub:`2`, ..., then + :g:`constructor`:sub:`n` where `n` is the number of constructors of the head + of the goal. + +.. tacv:: constructor @num with @bindings_list + + Let ``c`` be the i-th constructor of :g:`I`, then + :n:`constructor i with @bindings_list` is equivalent to + :n:`intros; apply c with @bindings_list`. + + .. warn:: + The terms in the @bindings_list are checked in the context where constructor is executed and not in the context where @apply is executed (the introductions are not taken into account). + +.. tacv:: split + + This applies only if :g:`I` has a single constructor. It is then + equivalent to :n:`constructor 1.`. It is typically used in the case of a + conjunction :g:`A` :math:`\wedge` :g:`B`. + +.. exn:: Not an inductive goal with 1 constructor. + +.. tacv:: exists @val + + This applies only if :g:`I` has a single constructor. It is then equivalent + to :n:`intros; constructor 1 with @bindings_list.` It is typically used in + the case of an existential quantification :math:`\exists`:g:`x, P(x).` + +.. exn:: Not an inductive goal with 1 constructor. + +.. tacv:: exists @bindings_list + + This iteratively applies :n:`exists @bindings_list`. + +.. tacv:: left +.. tacv:: right + + These tactics apply only if :g:`I` has two constructors, for + instance in the case of a disjunction :g:`A` :math:`\vee` :g:`B`. + Then, they are respectively equivalent to ``constructor 1`` and + ``constructor 2``. + +.. exn:: Not an inductive goal with 2 constructors. + +.. tacv:: left with @bindings_list +.. tacv:: right with @bindings_list +.. tacv:: split with @bindings_list + + As soon as the inductive type has the right number of constructors, these + expressions are equivalent to calling :n:`constructor i with @bindings_list` + for the appropriate ``i``. + +.. tacv:: econstructor +.. tacv:: eexists +.. tacv:: esplit +.. tacv:: eleft +.. tacv:: eright + + These tactics and their variants behave like ``constructor``, ``exists``, + ``split``, ``left``, ``right`` and their variants but they introduce + existential variables instead of failing when the instantiation of a + variable cannot be found (cf. :tacn:`eapply` and :tacn:`apply`). + + +.. _managingthelocalcontext: + +Managing the local context +------------------------------ + +.. tacn:: intro + :name: intro + +This tactic applies to a goal that is either a product or starts with a let +binder. If the goal is a product, the tactic implements the "Lam" rule given in +:ref:`TODO-4.2-Typing-rules` [1]_. If the goal starts with a let binder, then the +tactic implements a mix of the "Let" and "Conv". + +If the current goal is a dependent product :math:`\forall` :g:`x:T, U` (resp +:g:`let x:=t in U`) then ``intro`` puts :g:`x:T` (resp :g:`x:=t`) in the local +context. The new subgoal is :g:`U`. + +If the goal is a non-dependent product :g:`T`:math:`\rightarrow`:g:`U`, then it +puts in the local context either :g:`Hn:T` (if :g:`T` is of type :g:`Set` or +:g:`Prop`) or Xn:T (if the type of :g:`T` is :g:`Type`). The optional index +``n`` is such that ``Hn`` or ``Xn`` is a fresh identifier. In both cases, the +new subgoal is :g:`U`. + +If the goal is neither a product nor starting with a let definition, +the tactic ``intro`` applies the tactic ``hnf`` until the tactic ``intro`` can +be applied or the goal is not head-reducible. + +.. exn:: No product even after head-reduction. +.. exn:: ident is already used. + +.. tacv:: intros + + This repeats ``intro`` until it meets the head-constant. It never + reduces head-constants and it never fails. + +.. tac:: intro @ident + + This applies ``intro`` but forces :n:`@ident` to be the name of the + introduced hypothesis. + +.. exn:: name @ident is already used + +.. note:: If a name used by intro hides the base name of a global + constant then the latter can still be referred to by a qualified name + (see :ref:`TODO-2.6.2-Qualified-names`). +.. tacv:: intros {+ @ident}. + + This is equivalent to the composed tactic + :n:`intro @ident; ... ; intro @ident`. More generally, the ``intros`` tactic + takes a pattern as argument in order to introduce names for components + of an inductive definition or to clear introduced hypotheses. This is + explained in :ref:`TODO-8.3.2`. + +.. tacv:: intros until @ident + + This repeats intro until it meets a premise of the goal having form + `(@ident:term)` and discharges the variable named `ident` of the current + goal. + +.. exn:: No such hypothesis in current goal + +.. tacv:: intros until @num + + This repeats intro until the `num`-th non-dependent product. For instance, + on the subgoal :g:`forall x y:nat, x=y -> y=x` the tactic + :n:`intros until 1` is equivalent to :n:`intros x y H`, as :g:`x=y -> y=x` + is the first non-dependent product. And on the subgoal :g:`forall x y + z:nat, x=y -> y=x` the tactic :n:`intros until 1` is equivalent to + :n:`intros x y z` as the product on :g:`z` can be rewritten as a + non-dependent product: :g:`forall x y:nat, nat -> x=y -> y=x` + +.. exn:: No such hypothesis in current goal. + + This happens when `num` is 0 or is greater than the number of non-dependent + products of the goal. + +.. tacv:: intro after @ident +.. tacv:: intro before @ident +.. tacv:: intro at top +.. tacv:: intro at bottom + + These tactics apply :n:`intro` and move the freshly introduced hypothesis + respectively after the hypothesis :n:`@ident`, before the hypothesis + :n:`@ident`, at the top of the local context, or at the bottom of the local + context. All hypotheses on which the new hypothesis depends are moved + too so as to respect the order of dependencies between hypotheses. + Note that :n:`intro at bottom` is a synonym for :n:`intro` with no argument. + +.. exn:: No such hypothesis : @ident. + +.. tacv:: intro @ident after @ident +.. tacv:: intro @ident before @ident +.. tacv:: intro @ident at top +.. tacv:: intro @ident at bottom + + These tactics behave as previously but naming the introduced hypothesis + :n:`@ident`. It is equivalent to :n:`intro @ident` followed by the + appropriate call to move (see :tacn:`move ... after`). + +.. tacn:: intros @intro_pattern_list + :name: intros ... + + This extension of the tactic :n:`intros` allows to apply tactics on the fly + on the variables or hypotheses which have been introduced. An + *introduction pattern list* :n:`@intro_pattern_list` is a list of + introduction patterns possibly containing the filling introduction + patterns `*` and `**`. An *introduction pattern* is either: + + + a *naming introduction pattern*, i.e. either one of: + + + the pattern :n:`?` + + + the pattern :n:`?ident` + + + an identifier + + + an *action introduction pattern* which itself classifies into: + + + a *disjunctive/conjunctive introduction pattern*, i.e. either one of + + + a disjunction of lists of patterns + :n:`[@intro_pattern_list | ... | @intro_pattern_list]` + + + a conjunction of patterns: :n:`({+, p})` + + + a list of patterns + :n:`({+& p})` + for sequence of right-associative binary constructs + + + an *equality introduction pattern*, i.e. either one of: + + + a pattern for decomposing an equality: :n:`[= {+ p}]` + + the rewriting orientations: :n:`->` or :n:`<-` + + + the on-the-fly application of lemmas: :n:`p{+ %term}` where :n:`p` + itself is not a pattern for on-the-fly application of lemmas (note: + syntax is in experimental stage) + + + the wildcard: :n:`_` + + + Assuming a goal of type :g:`Q → P` (non-dependent product), or of type + :math:`\forall`:g:`x:T, P` (dependent product), the behavior of + :n:`intros p` is defined inductively over the structure of the introduction + pattern :n:`p`: + +Introduction on :n:`?` performs the introduction, and lets Coq choose a fresh +name for the variable; + +Introduction on :n:`?ident` performs the introduction, and lets Coq choose a +fresh name for the variable based on :n:`@ident`; + +Introduction on :n:`@ident` behaves as described in :tacn:`intro` + +Introduction over a disjunction of list of patterns +:n:`[@intro_pattern_list | ... | @intro_pattern_list ]` expects the product +to be over an inductive type whose number of constructors is `n` (or more +generally over a type of conclusion an inductive type built from `n` +constructors, e.g. :g:`C -> A\/B` with `n=2` since :g:`A\/B` has `2` +constructors): it destructs the introduced hypothesis as :n:`destruct` (see +:tacn:`destruct`) would and applies on each generated subgoal the +corresponding tactic; + +.. tacv:: intros @intro_pattern_list + + The introduction patterns in :n:`@intro_pattern_list` are expected to consume + no more than the number of arguments of the `i`-th constructor. If it + consumes less, then Coq completes the pattern so that all the arguments of + the constructors of the inductive type are introduced (for instance, the + list of patterns :n:`[ | ] H` applied on goal :g:`forall x:nat, x=0 -> 0=x` + behaves the same as the list of patterns :n:`[ | ? ] H`); + +Introduction over a conjunction of patterns :n:`({+, p})` expects +the goal to be a product over an inductive type :g:`I` with a single +constructor that itself has at least `n` arguments: It performs a case +analysis over the hypothesis, as :n:`destruct` would, and applies the +patterns :n:`{+ p}` to the arguments of the constructor of :g:`I` (observe +that :n:`({+ p})` is an alternative notation for :n:`[{+ p}]`); + +Introduction via :n:`({+& p})` is a shortcut for introduction via +:n:`(p,( ... ,( ..., p ) ... ))`; it expects the hypothesis to be a sequence of +right-associative binary inductive constructors such as :g:`conj` or +:g:`ex_intro`; for instance, an hypothesis with type +:g:`A /\(exists x, B /\ C /\ D)` can be introduced via pattern +:n:`(a & x & b & c & d)`; + +If the product is over an equality type, then a pattern of the form +:n:`[= {+ p}]` applies either :tacn:`injection` or :tacn:`discriminate` +instead of :tacn:`destruct`; if :tacn:`injection` is applicable, the patterns +:n:`{+, p}` are used on the hypotheses generated by :tacn:`injection`; if the +number of patterns is smaller than the number of hypotheses generated, the +pattern :n:`?` is used to complete the list; + +.. tacv:: introduction over -> +.. tacv:: introduction over <- + + expects the hypothesis to be an equality and the right-hand-side + (respectively the left-hand-side) is replaced by the left-hand-side + (respectively the right-hand-side) in the conclusion of the goal; + the hypothesis itself is erased; if the term to substitute is a variable, it + is substituted also in the context of goal and the variable is removed too; + +Introduction over a pattern :n:`p{+ %term}` first applies :n:`{+ term}` +on the hypothesis to be introduced (as in :n:`apply {+, term}`) prior to the +application of the introduction pattern :n:`p`; + +Introduction on the wildcard depends on whether the product is dependent or not: +in the non-dependent case, it erases the corresponding hypothesis (i.e. it +behaves as an :tacn:`intro` followed by a :tacn:`clear`) while in the +dependent case, it succeeds and erases the variable only if the wildcard is part +of a more complex list of introduction patterns that also erases the hypotheses +depending on this variable; + +Introduction over :n:`*` introduces all forthcoming quantified variables +appearing in a row; introduction over :n:`**` introduces all forthcoming +quantified variables or hypotheses until the goal is not any more a +quantification or an implication. + +.. example:: + .. coqtop:: all + + Goal forall A B C:Prop, A \/ B /\ C -> (A -> C) -> C. + intros * [a | (_,c)] f. + +.. note:: + :n:`intros {+ p}` is not equivalent to :n:`intros p; ... ; intros p` + for the following reason: If one of the :n:`p` is a wildcard pattern, it + might succeed in the first case because the further hypotheses it + depends in are eventually erased too while it might fail in the second + case because of dependencies in hypotheses which are not yet + introduced (and a fortiori not yet erased). + +.. note:: + In :n:`intros @intro_pattern_list`, if the last introduction pattern + is a disjunctive or conjunctive pattern + :n:`[{+| @intro_pattern_list}]`, the completion of :n:`@intro_pattern_list` + so that all the arguments of the i-th constructors of the corresponding + inductive type are introduced can be controlled with the following option: + + .. cmd:: Set Bracketing Last Introduction Pattern. + + Force completion, if needed, when the last introduction pattern is a + disjunctive or conjunctive pattern (this is the default). + + .. cmd:: Unset Bracketing Last Introduction Pattern. + + Deactivate completion when the last introduction pattern is a disjunctive or + conjunctive pattern. + +.. tacn:: clear @ident + :name: clear + + This tactic erases the hypothesis named :n:`@ident` in the local context of + the current goal. As a consequence, :n:`@ident` is no more displayed and no + more usable in the proof development. + +.. exn:: No such hypothesis. + +.. exn:: @ident is used in the conclusion. + +.. exn:: @ident is used in the hypothesis @ident. + +.. tacv:: clear {+ @ident} + + This is equivalent to :n:`clear @ident. ... clear @ident.` + +.. tacv:: clearbody @ident + + This tactic expects :n:`@ident` to be a local definition then clears its + body. Otherwise said, this tactic turns a definition into an assumption. + +.. exn:: @ident is not a local definition + +.. tacv:: clear - {+ @ident} + + This tactic clears all the hypotheses except the ones depending in the + hypotheses named :n:`{+ @ident}` and in the goal. + +.. tacv:: clear + + This tactic clears all the hypotheses except the ones the goal depends on. + +.. tacv:: clear dependent @ident + + This clears the hypothesis :n:`@ident` and all the hypotheses that depend on + it. + +.. tacn:: revert {+ @ident} + :name: revert ... + +This applies to any goal with variables :n:`{+ @ident}`. It moves the hypotheses +(possibly defined) to the goal, if this respects dependencies. This tactic is +the inverse of :tacn:`intro`. + +.. exn:: No such hypothesis. + +.. exn:: @ident is used in the hypothesis @ident. + +.. tac:: revert dependent @ident + + This moves to the goal the hypothesis :n:`@ident` and all the hypotheses that + depend on it. + +.. tacn:: move @ident after @ident + :name: move .. after ... + + This moves the hypothesis named :n:`@ident` in the local context after the + hypothesis named :n:`@ident`, where “after” is in reference to the + direction of the move. The proof term is not changed. + + If :n:`@ident` comes before :n:`@ident` in the order of dependencies, then + all the hypotheses between :n:`@ident` and :n:`ident@` that (possibly + indirectly) depend on :n:`@ident` are moved too, and all of them are thus + moved after :n:`@ident` in the order of dependencies. + + If :n:`@ident` comes after :n:`@ident` in the order of dependencies, then all + the hypotheses between :n:`@ident` and :n:`@ident` that (possibly indirectly) + occur in the type of :n:`@ident` are moved too, and all of them are thus + moved before :n:`@ident` in the order of dependencies. + +.. tacv:: move @ident before @ident + + This moves :n:`@ident` towards and just before the hypothesis named + :n:`@ident`. As for :tacn:`move ... after ...`, dependencies over + :n:`@ident` (when :n:`@ident` comes before :n:`@ident` in the order of + dependencies) or in the type of :n:`@ident` (when :n:`@ident` comes after + :n:`@ident` in the order of dependencies) are moved too. + +.. tacv:: move @ident at top + + This moves :n:`@ident` at the top of the local context (at the beginning of + the context). + +.. tacv:: move @ident at bottom + + This moves ident at the bottom of the local context (at the end of the + context). + +.. exn:: No such hypothesis +.. exn:: Cannot move @ident after @ident : it occurs in the type of @ident +.. exn:: Cannot move @ident after @ident : it depends on @ident + +.. example:: + .. coqtop:: all + + Goal forall x :nat, x = 0 -> forall z y:nat, y=y-> 0=x. + intros x H z y H0. + move x after H0. + Undo. + move x before H0. + Undo. + move H0 after H. + Undo. + move H0 before H. + +.. tacn:: rename @ident into @ident + :name: rename ... into ... + +This renames hypothesis :n:`@ident` into :n:`@ident` in the current context. +The name of the hypothesis in the proof-term, however, is left unchanged. + +.. tacv:: rename {+, @ident into @ident} + + This renames the variables :n:`@ident` into :n:`@ident` in parallel. In + particular, the target identifiers may contain identifiers that exist in the + source context, as long as the latter are also renamed by the same tactic. + +.. exn:: No such hypothesis +.. exn:: @ident is already used + +.. tacn:: set (@ident := @term) + :name: set + + This replaces :n:`@term` by :n:`@ident` in the conclusion of the current goal + and adds the new definition :g:`ident := term` to the local context. + + If :n:`@term` has holes (i.e. subexpressions of the form “`_`”), the tactic + first checks that all subterms matching the pattern are compatible before + doing the replacement using the leftmost subterm matching the pattern. + +.. exn:: The variable @ident is already defined + +.. tacv:: set (@ident := @term ) in @goal_occurrences + + This notation allows specifying which occurrences of :n:`@term` have to be + substituted in the context. The :n:`in @goal_occurrences` clause is an + occurrence clause whose syntax and behavior are described in + :ref:`goal occurences <occurencessets>`. + +.. tacv:: set (@ident {+ @binder} := @term ) + + This is equivalent to :n:`set (@ident := funbinder {+ binder} => @term )`. + +.. tacv:: set term + This behaves as :n:`set(@ident := @term)` but :n:`@ident` is generated by + Coq. This variant also supports an occurrence clause. + +.. tacv:: set (@ident {+ @binder} := @term) in @goal_occurrences +.. tacv:: set @term in @goal_occurrences + + These are the general forms that combine the previous possibilities. + +.. tacv:: eset (@ident {+ @binder} := @term ) in @goal_occurrences +.. tacv:: eset @term in @goal_occurrences + + While the different variants of :tacn:`set` expect that no existential + variables are generated by the tactic, :n:`eset` removes this constraint. In + practice, this is relevant only when :n:`eset` is used as a synonym of + :tacn:`epose`, i.e. when the :`@term` does not occur in the goal. + +.. tacv:: remember @term as @ident + + This behaves as :n:`set (@ident:= @term ) in *` and using a logical + (Leibniz’s) equality instead of a local definition. + +.. tacv:: remember @term as @ident eqn:@ident + + This behaves as :n:`remember @term as @ident`, except that the name of the + generated equality is also given. + +.. tacv:: remember @term as @ident in @goal_occurrences + + This is a more general form of :n:`remember` that remembers the occurrences + of term specified by an occurrences set. + +.. tacv:: eremember @term as @ident +.. tacv:: eremember @term as @ident in @goal_occurrences +.. tacv:: eremember @term as @ident eqn:@ident + While the different variants of :n:`remember` expect that no existential + variables are generated by the tactic, :n:`eremember` removes this constraint. + +.. tacv:: pose ( @ident := @term ) + :name: pose + + This adds the local definition :n:`@ident := @term` to the current context + without performing any replacement in the goal or in the hypotheses. It is + equivalent to :n:`set ( @ident := @term ) in |-`. + +.. tacv:: pose ( @ident {+ @binder} := @term ) + + This is equivalent to :n:`pose (@ident := funbinder {+ binder} => @term)`. + +.. tacv:: pose @term + + This behaves as :n:`pose (@ident := @term )` but :n:`@ident` is generated by + Coq. + +.. tacv:: epose (@ident := @term ) +.. tacv:: epose (@ident {+ @binder} := @term ) +.. tacv:: epose term + :name: epose + + While the different variants of :tacn:`pose` expect that no + existential variables are generated by the tactic, epose removes this + constraint. + +.. tacn:: decompose [{+ @qualid}] @term + :name: decompose + + This tactic recursively decomposes a complex proposition in order to + obtain atomic ones. + +.. example:: + .. coqtop:: all + + Goal forall A B C:Prop, A /\ B /\ C \/ B /\ C \/ C /\ A -> C. + intros A B C H; decompose [and or] H; assumption. + Qed. + +:n:`decompose` does not work on right-hand sides of implications or products. + +.. tacv:: decompose sum @term + + This decomposes sum types (like or). + +.. tacv:: decompose record @term + + This decomposes record types (inductive types with one constructor, like + "and" and "exists" and those defined with the Record macro, see + :ref:`TODO-2.1`). + +.. _controllingtheproofflow: + +Controlling the proof flow +------------------------------ + +.. tacn:: assert (@ident : form) + :name: assert + + This tactic applies to any goal. :n:`assert (H : U)` adds a new hypothesis + of name :n:`H` asserting :g:`U` to the current goal and opens a new subgoal + :g:`U` [2]_. The subgoal :g:`U` comes first in the list of subgoals remaining to + prove. + +.. exn:: Not a proposition or a type + + Arises when the argument form is neither of type :g:`Prop`, :g:`Set` nor + :g:`Type`. + +.. tacv:: assert form + + This behaves as :n:`assert (@ident : form ) but :n:`@ident` is generated by + Coq. + +.. tacv:: assert form by tactic + + This tactic behaves like :n:`assert` but applies tactic to solve the subgoals + generated by assert. + + .. exn:: Proof is not complete + +.. tacv:: assert form as intro_pattern + + If :n:`intro_pattern` is a naming introduction pattern (see :tacn:`intro`), + the hypothesis is named after this introduction pattern (in particular, if + :n:`intro_pattern` is :n:`@ident`, the tactic behaves like + :n:`assert (@ident : form)`). If :n:`intro_pattern` is an action + introduction pattern, the tactic behaves like :n:`assert form` followed by + the action done by this introduction pattern. + +.. tacv:: assert form as intro_pattern by tactic + + This combines the two previous variants of :n:`assert`. + +.. tacv:: assert (@ident := @term ) + + This behaves as :n:`assert (@ident : type) by exact @term` where :g:`type` is + the type of :g:`term`. This is deprecated in favor of :n:`pose proof`. If the + head of term is :n:`@ident`, the tactic behaves as :n:`specialize @term`. + + .. exn:: Variable @ident is already declared + +.. tacv:: eassert form as intro_pattern by tactic + +.. tacv:: assert (@ident := @term) + + While the different variants of :n:`assert` expect that no existential + variables are generated by the tactic, :n:`eassert` removes this constraint. + This allows not to specify the asserted statement completeley before starting + to prove it. + +.. tacv:: pose proof @term {? as intro_pattern} + + This tactic behaves like :n:`assert T {? as intro_pattern} by exact @term` + where :g:`T` is the type of :g:`term`. In particular, + :n:`pose proof @term as @ident` behaves as :n:`assert (@ident := @term)` + and :n:`pose proof @term as intro_pattern` is the same as applying the + intro_pattern to :n:`@term`. + +.. tacv:: epose proof term {? as intro_pattern} + + While :n:`pose proof` expects that no existential variables are generated by + the tactic, :n:`epose proof` removes this constraint. + +.. tacv:: enough (@ident : form) + + This adds a new hypothesis of name :n:`@ident` asserting :n:`form` to the + goal the tactic :n:`enough` is applied to. A new subgoal stating :n:`form` is + inserted after the initial goal rather than before it as :n:`assert` would do. + +.. tacv:: enough form + + This behaves like :n:`enough (@ident : form)` with the name :n:`@ident` of + the hypothesis generated by Coq. + +.. tacv:: enough form as intro_pattern + + This behaves like :n:`enough form` using :n:`intro_pattern` to name or + destruct the new hypothesis. + +.. tacv:: enough (@ident : form) by tactic +.. tacv:: enough form by tactic +.. tacv:: enough form as intro_pattern by tactic + + This behaves as above but with :n:`tactic` expected to solve the initial goal + after the extra assumption :n:`form` is added and possibly destructed. If the + :n:`as intro_pattern` clause generates more than one subgoal, :n:`tactic` is + applied to all of them. + +.. tacv:: eenough (@ident : form) by tactic +.. tacv:: eenough form by tactic +.. tacv:: eenough form as intro_pattern by tactic + + While the different variants of :n:`enough` expect that no existential + variables are generated by the tactic, :n:`eenough` removes this constraint. + +.. tacv:: cut form + + This tactic applies to any goal. It implements the non-dependent case of + the “App” rule given in :ref:`TODO-4.2`. (This is Modus Ponens inference + rule.) :n:`cut U` transforms the current goal :g:`T` into the two following + subgoals: :g:`U -> T` and :g:`U`. The subgoal :g:`U -> T` comes first in the + list of remaining subgoal to prove. + +.. tacv:: specialize (ident {* @term}) {? as intro_pattern} +.. tacv:: specialize ident with @bindings_list {? as intro_pattern} + + The tactic :n:`specialize` works on local hypothesis :n:`@ident`. The + premises of this hypothesis (either universal quantifications or + non-dependent implications) are instantiated by concrete terms coming either + from arguments :n:`{* @term}` or from a :ref:`bindings list <bindingslist>`. + In the first form the application to :n:`{* @term}` can be partial. The + first form is equivalent to :n:`assert (@ident := @ident {* @term})`. In the + second form, instantiation elements can also be partial. In this case the + uninstantiated arguments are inferred by unification if possible or left + quantified in the hypothesis otherwise. With the :n:`as` clause, the local + hypothesis :n:`@ident` is left unchanged and instead, the modified hypothesis + is introduced as specified by the :n:`intro_pattern`. The name :n:`@ident` + can also refer to a global lemma or hypothesis. In this case, for + compatibility reasons, the behavior of :n:`specialize` is close to that of + :n:`generalize`: the instantiated statement becomes an additional premise of + the goal. The :n:`as` clause is especially useful in this case to immediately + introduce the instantiated statement as a local hypothesis. + + .. exn:: @ident is used in hypothesis @ident + .. exn:: @ident is used in conclusion + +.. tacn:: generalize @term + :name: generalize + + This tactic applies to any goal. It generalizes the conclusion with + respect to some term. + +.. example:: + .. coqtop:: reset none + + Goal forall x y:nat, 0 <= x + y + y. + Proof. intros *. + + .. coqtop:: all + + Show. + generalize (x + y + y). + +If the goal is :g:`G` and :g:`t` is a subterm of type :g:`T` in the goal, +then :n:`generalize t` replaces the goal by :g:`forall (x:T), G′` where :g:`G′` +is obtained from :g:`G` by replacing all occurrences of :g:`t` by :g:`x`. The +name of the variable (here :g:`n`) is chosen based on :g:`T`. + +.. tacv:: generalize {+ @term} + + This is equivalent to :n:`generalize @term; ... ; generalize @term`. + Note that the sequence of term :sub:`i` 's are processed from n to 1. + +.. tacv:: generalize @term at {+ @num} + + This is equivalent to :n:`generalize @term` but it generalizes only over the + specified occurrences of :n:`@term` (counting from left to right on the + expression printed using option :g:`Set Printing All`). + +.. tacv:: generalize @term as @ident + + This is equivalent to :n:`generalize @term` but it uses :n:`@ident` to name + the generalized hypothesis. + +.. tacv:: generalize {+, @term at {+ @num} as @ident} + + This is the most general form of :n:`generalize` that combines the previous + behaviors. + +.. tacv:: generalize dependent @term + + This generalizes term but also *all* hypotheses that depend on :n:`@term`. It + clears the generalized hypotheses. + +.. tacn:: evar (@ident : @term) + :name: evar + + The :n:`evar` tactic creates a new local definition named :n:`@ident` with type + :n:`@term` in the context. The body of this binding is a fresh existential + variable. + +.. tacn:: instantiate (@ident := @term ) + :name: instantiate + + The instantiate tactic refines (see :tacn:`refine`) an existential variable + :n:`@ident` with the term :n:`@term`. It is equivalent to only [ident]: + :n:`refine @term` (preferred alternative). + + .. note:: To be able to refer to an existential variable by name, the user + must have given the name explicitly (see :ref:`TODO-2.11`). + + .. note:: When you are referring to hypotheses which you did not name + explicitly, be aware that Coq may make a different decision on how to + name the variable in the current goal and in the context of the + existential variable. This can lead to surprising behaviors. + +.. tacv:: instantiate (@num := @term) + + This variant allows to refer to an existential variable which was not named + by the user. The :n:`@num` argument is the position of the existential variable + from right to left in the goal. Because this variant is not robust to slight + changes in the goal, its use is strongly discouraged. + +.. tacv:: instantiate ( @num := @term ) in @ident +.. tacv:: instantiate ( @num := @term ) in ( Value of @ident ) +.. tacv:: instantiate ( @num := @term ) in ( Type of @ident ) + + These allow to refer respectively to existential variables occurring in a + hypothesis or in the body or the type of a local definition. + +.. tacv:: instantiate + + Without argument, the instantiate tactic tries to solve as many existential + variables as possible, using information gathered from other tactics in the + same tactical. This is automatically done after each complete tactic (i.e. + after a dot in proof mode), but not, for example, between each tactic when + they are sequenced by semicolons. + +.. tacn:: admit + :name: admit + +The admit tactic allows temporarily skipping a subgoal so as to +progress further in the rest of the proof. A proof containing admitted +goals cannot be closed with :g:`Qed` but only with :g:`Admitted`. + +.. tacv:: give_up + + Synonym of :n:`admit`. + +.. tacn:: absurd @term + :name: absurd + + This tactic applies to any goal. The argument term is any proposition + :g:`P` of type :g:`Prop`. This tactic applies False elimination, that is it + deduces the current goal from False, and generates as subgoals :g:`∼P` and + :g:`P`. It is very useful in proofs by cases, where some cases are + impossible. In most cases, :g:`P` or :g:`∼P` is one of the hypotheses of the + local context. + +.. tacn:: contradiction + :name: contradiction + + This tactic applies to any goal. The contradiction tactic attempts to + find in the current context (after all intros) an hypothesis that is + equivalent to an empty inductive type (e.g. :g:`False`), to the negation of + a singleton inductive type (e.g. :g:`True` or :g:`x=x`), or two contradictory + hypotheses. + +.. exn:: No such assumption + +.. tacv:: contradiction @ident + + The proof of False is searched in the hypothesis named :n:`@ident`. + +.. tacn:: contradict @ident + :name: contradict + + This tactic allows manipulating negated hypothesis and goals. The name + :n:`@ident` should correspond to a hypothesis. With :n:`contradict H`, the + current goal and context is transformed in the following way: + + + H:¬A ⊢ B becomes ⊢ A + + H:¬A ⊢ ¬B becomes H: B ⊢ A + + H: A ⊢ B becomes ⊢ ¬A + + H: A ⊢ ¬B becomes H: B ⊢ ¬A + +.. tacn:: exfalso + :name: exfalso + + This tactic implements the “ex falso quodlibet” logical principle: an + elimination of False is performed on the current goal, and the user is + then required to prove that False is indeed provable in the current + context. This tactic is a macro for :n:`elimtype False`. + +Case analysis and induction +------------------------------- + +The tactics presented in this section implement induction or case +analysis on inductive or co-inductive objects (see :ref:`TODO-4.5`). + +.. tacn:: destruct @term + :name: destruct + + This tactic applies to any goal. The argument :n:`@term` must be of + inductive or co-inductive type and the tactic generates subgoals, one + for each possible form of :n:`@term`, i.e. one for each constructor of the + inductive or co-inductive type. Unlike :n:`induction`, no induction + hypothesis is generated by :n:`destruct`. + + There are special cases: + + + If :n:`@term` is an identifier :n:`@ident` denoting a quantified variable + of the conclusion of the goal, then :n:`destruct @ident` behaves as + :n:`intros until @ident; destruct @ident`. If :n:`@ident` is not anymore + dependent in the goal after application of :n:`destruct`, it is erased + (to avoid erasure, use parentheses, as in :n:`destruct (@ident)`). + + + If term is a num, then destruct num behaves asintros until num + followed by destruct applied to the last introduced hypothesis. + + .. note:: + For destruction of a numeral, use syntax destruct (num) (not + very interesting anyway). + + + In case term is an hypothesis :n:`@ident` of the context, and :n:`@ident` + is not anymore dependent in the goal after application of :n:`destruct`, it + is erased (to avoid erasure, use parentheses, as in :n:`destruct (@ident)`). + + + The argument :n:`@term` can also be a pattern of which holes are denoted + by “_”. In this case, the tactic checks that all subterms matching the + pattern in the conclusion and the hypotheses are compatible and + performs case analysis using this subterm. + +.. tacv:: destruct {+, @term} + + This is a shortcut for :n:`destruct term; ...; destruct term`. + +.. tacv:: destruct @term as @disj_conj_intro_pattern + + This behaves as :n:`destruct @term` but uses the names in :n:`@intro_pattern` + to name the variables introduced in the context. The :n:`@intro_pattern` must + have the form :n:`[p11 ... p1n | ... | pm1 ... pmn ]` with `m` being the + number of constructors of the type of :n:`@term`. Each variable introduced + by :n:`destruct` in the context of the `i`-th goal gets its name from the + list :n:`pi1 ... pin` in order. If there are not enough names, + :n:`@destruct` invents names for the remaining variables to introduce. More + generally, the :n:`pij` can be any introduction pattern (see + :tacn:`intros`). This provides a concise notation for chaining destruction of + an hypothesis. + +.. tacv:: destruct @term eqn:@naming_intro_pattern + + This behaves as :n:`destruct @term` but adds an equation between :n:`@term` + and the value that :n:`@term` takes in each of the possible cases. The name + of the equation is specified by :n:`@naming_intro_pattern` (see + :tacn:`intros`), in particular `?` can be used to let Coq generate a fresh + name. + +.. tacv:: destruct @term with @bindings_list + + This behaves like :n:`destruct @term` providing explicit instances for the + dependent premises of the type of :n:`@term` (see :ref:`syntax of bindings <bindingslist>`). + +.. tacv:: edestruct @term + + This tactic behaves like :n:`destruct @term` except that it does not fail if + the instance of a dependent premises of the type of :n:`@term` is not + inferable. Instead, the unresolved instances are left as existential + variables to be inferred later, in the same way as :tacn:`eapply` does. + +.. tacv:: destruct @term using @term +.. tacv:: destruct @term using @term with @bindings_list + + These are synonyms of :n:`induction @term using @term` and + :n:`induction @term using @term with @bindings_list`. + +.. tacv:: destruct @term in @goal_occurrences + + This syntax is used for selecting which occurrences of :n:`@term` the case + analysis has to be done on. The :n:`in @goal_occurrences` clause is an + occurrence clause whose syntax and behavior is described in + :ref:`occurences sets <occurencessets>`. + +.. tacv:: destruct @term with @bindings_list as @disj_conj_intro_pattern eqn:@naming_intro_pattern using @term with @bindings_list in @goal_occurrences +.. tacv:: edestruct @term with @bindings_list as @disj_conj_intro_pattern eqn:@naming_intro_pattern using @term with @bindings_list in @goal_occurrences + + These are the general forms of :n:`destruct` and :n:`edestruct`. They combine + the effects of the `with`, `as`, `eqn:`, `using`, and `in` clauses. + +.. tacv:: case term + + The tactic :n:`case` is a more basic tactic to perform case analysis without + recursion. It behaves as :n:`elim @term` but using a case-analysis + elimination principle and not a recursive one. + +.. tacv:: case @term with @bindings_list + + Analogous to :n:`elim @term with @bindings_list` above. + +.. tacv:: ecase @term +.. tacv:: ecase @term with @bindings_list + + In case the type of :n:`@term` has dependent premises, or dependent premises + whose values are not inferable from the :n:`with @bindings_list` clause, + :n:`ecase` turns them into existential variables to be resolved later on. + +.. tacv:: simple destruct @ident + + This tactic behaves as :n:`intros until @ident; case @ident` when :n:`@ident` + is a quantified variable of the goal. + +.. tacv:: simple destruct @num + + This tactic behaves as :n:`intros until @num; case @ident` where :n:`@ident` + is the name given by :n:`intros until @num` to the :n:`@num` -th + non-dependent premise of the goal. + +.. tacv:: case_eq @term + + The tactic :n:`case_eq` is a variant of the :n:`case` tactic that allow to + perform case analysis on a term without completely forgetting its original + form. This is done by generating equalities between the original form of the + term and the outcomes of the case analysis. + +.. tacn:: induction @term + :name: induction + + This tactic applies to any goal. The argument :n:`@term` must be of + inductive type and the tactic :n:`induction` generates subgoals, one for + each possible form of :n:`@term`, i.e. one for each constructor of the + inductive type. + + If the argument is dependent in either the conclusion or some + hypotheses of the goal, the argument is replaced by the appropriate + constructor form in each of the resulting subgoals and induction + hypotheses are added to the local context using names whose prefix + is **IH**. + + There are particular cases: + + + If term is an identifier :n:`@ident` denoting a quantified variable of the + conclusion of the goal, then inductionident behaves as :n:`intros until + @ident; induction @ident`. If :n:`@ident` is not anymore dependent in the + goal after application of :n:`induction`, it is erased (to avoid erasure, + use parentheses, as in :n:`induction (@ident)`). + + If :n:`@term` is a :n:`@num`, then :n:`induction @num` behaves as + :n:`intros until @num` followed by :n:`induction` applied to the last + introduced hypothesis. + + .. note:: + For simple induction on a numeral, use syntax induction (num) + (not very interesting anyway). + + + In case term is an hypothesis :n:`@ident` of the context, and :n:`@ident` + is not anymore dependent in the goal after application of :n:`induction`, + it is erased (to avoid erasure, use parentheses, as in + :n:`induction (@ident)`). + + The argument :n:`@term` can also be a pattern of which holes are denoted + by “_”. In this case, the tactic checks that all subterms matching the + pattern in the conclusion and the hypotheses are compatible and + performs induction using this subterm. + +.. example:: + .. coqtop:: reset all + + Lemma induction_test : forall n:nat, n = n -> n <= n. + intros n H. + induction n. + +.. exn:: Not an inductive product + +.. exn:: Unable to find an instance for the variables @ident ... @ident + + Use in this case the variant :tacn:`elim ... with` below. + +.. tacv:: induction @term as @disj_conj_intro_pattern + + This behaves as :tacn:`induction` but uses the names in + :n:`@disj_conj_intro_pattern` to name the variables introduced in the + context. The :n:`@disj_conj_intro_pattern` must typically be of the form + :n:`[ p` :sub:`11` :n:`... p` :sub:`1n` :n:`| ... | p`:sub:`m1` :n:`... p`:sub:`mn` :n:`]` + with :n:`m` being the number of constructors of the type of :n:`@term`. Each + variable introduced by induction in the context of the i-th goal gets its + name from the list :n:`p`:sub:`i1` :n:`... p`:sub:`in` in order. If there are + not enough names, induction invents names for the remaining variables to + introduce. More generally, the :n:`p`:sub:`ij` can be any + disjunctive/conjunctive introduction pattern (see :tacn:`intros ...`). For + instance, for an inductive type with one constructor, the pattern notation + :n:`(p`:sub:`1` :n:`, ... , p`:sub:`n` :n:`)` can be used instead of + :n:`[ p`:sub:`1` :n:`... p`:sub:`n` :n:`]`. + +.. tacv:: induction @term with @bindings_list + + This behaves like :tacn:`induction` providing explicit instances for the + premises of the type of :n:`term` (see :ref:`bindings list <bindingslist>`). + +.. tacv:: einduction @term + + This tactic behaves like :tacn:`induction` except that it does not fail if + some dependent premise of the type of :n:`@term` is not inferable. Instead, + the unresolved premises are posed as existential variables to be inferred + later, in the same way as :tacn:`eapply` does. + +.. tacv:: induction @term using @term + :name: induction ... using ... + + This behaves as :tacn:`induction` but using :n:`@term` as induction scheme. + It does not expect the conclusion of the type of the first :n:`@term` to be + inductive. + +.. tacv:: induction @term using @term with @bindings_list + + This behaves as :tacn:`induction ... using ...` but also providing instances + for the premises of the type of the second :n:`@term`. + +.. tacv:: induction {+, @term} using @qualid + + This syntax is used for the case :n:`@qualid` denotes an induction principle + with complex predicates as the induction principles generated by + ``Function`` or ``Functional Scheme`` may be. + +.. tacv:: induction @term in @goal_occurrences + + This syntax is used for selecting which occurrences of :n:`@term` the + induction has to be carried on. The :n:`in @goal_occurrences` clause is an + occurrence clause whose syntax and behavior is described in + :ref:`occurences sets <occurencessets>`. If variables or hypotheses not + mentioning :n:`@term` in their type are listed in :n:`@goal_occurrences`, + those are generalized as well in the statement to prove. + + .. example:: + .. coqtop:: reset all + + Lemma comm x y : x + y = y + x. + induction y in x |- *. + Show 2. + +.. tacv:: induction @term with @bindings_list as @disj_conj_intro_pattern using @term with @bindings_list in @goal_occurrences + +.. tacv:: einduction @term with @bindings_list as @disj_conj_intro_pattern using @term with @bindings_list in @goal_occurrences + + These are the most general forms of ``induction`` and ``einduction``. It combines the + effects of the with, as, using, and in clauses. + +.. tacv:: elim @term + :name: elim + + This is a more basic induction tactic. Again, the type of the argument + :n:`@term` must be an inductive type. Then, according to the type of the + goal, the tactic ``elim`` chooses the appropriate destructor and applies it + as the tactic :tacn:`apply` would do. For instance, if the proof context + contains :g:`n:nat` and the current goal is :g:`T` of type :g:`Prop`, then + :n:`elim n` is equivalent to :n:`apply nat_ind with (n:=n)`. The tactic + ``elim`` does not modify the context of the goal, neither introduces the + induction loading into the context of hypotheses. More generally, + :n:`elim @term` also works when the type of :n:`@term` is a statement + with premises and whose conclusion is inductive. In that case the tactic + performs induction on the conclusion of the type of :n:`@term` and leaves the + non-dependent premises of the type as subgoals. In the case of dependent + products, the tactic tries to find an instance for which the elimination + lemma applies and fails otherwise. + +.. tacv:: elim @term with @bindings_list + :name: elim ... with + + Allows to give explicit instances to the premises of the type of :n:`@term` + (see :ref:`bindings list <bindingslist>`). + +.. tacv:: eelim @term + + In case the type of :n:`@term` has dependent premises, this turns them into + existential variables to be resolved later on. + +.. tacv:: elim @term using @term +.. tacv:: elim @term using @term with @bindings_list + + Allows the user to give explicitly an elimination predicate :n:`@term` that + is not the standard one for the underlying inductive type of :n:`@term`. The + :n:`@bindings_list` clause allows instantiating premises of the type of + :n:`@term`. + +.. tacv:: elim @term with @bindings_list using @term with @bindings_list +.. tacv:: eelim @term with @bindings_list using @term with @bindings_list + + These are the most general forms of ``elim`` and ``eelim``. It combines the + effects of the ``using`` clause and of the two uses of the ``with`` clause. + +.. tacv:: elimtype form + + The argument :n:`form` must be inductively defined. :n:`elimtype I` is + equivalent to :n:`cut I. intro Hn; elim Hn; clear Hn.` Therefore the + hypothesis :g:`Hn` will not appear in the context(s) of the subgoal(s). + Conversely, if :g:`t` is a :n:`@term` of (inductive) type :g:`I` that does + not occur in the goal, then :n:`elim t` is equivalent to + :n:`elimtype I; 2:exact t.` + +.. tacv:: simple induction @ident + + This tactic behaves as :n:`intros until @ident; elim @ident` when + :n:`@ident` is a quantified variable of the goal. + +.. tacv:: simple induction @num + + This tactic behaves as :n:`intros until @num; elim @ident` where :n:`@ident` + is the name given by :n:`intros until @num` to the :n:`@num`-th non-dependent + premise of the goal. + +.. tacn:: double induction @ident @ident + :name: double induction + + This tactic is deprecated and should be replaced by + :n:`induction @ident; induction @ident` (or + :n:`induction @ident ; destruct @ident` depending on the exact needs). + +.. tacv:: double induction num1 num2 + + This tactic is deprecated and should be replaced by + :n:`induction num1; induction num3` where :n:`num3` is the result + of :n:`num2 - num1` + +.. tacn:: dependent induction @ident + :name: dependent induction + + The *experimental* tactic dependent induction performs induction- + inversion on an instantiated inductive predicate. One needs to first + require the Coq.Program.Equality module to use this tactic. The tactic + is based on the BasicElim tactic by Conor McBride + :cite:`DBLP:conf/types/McBride00` and the work of Cristina Cornes around + inversion :cite:`DBLP:conf/types/CornesT95`. From an instantiated + inductive predicate and a goal, it generates an equivalent goal where + the hypothesis has been generalized over its indexes which are then + constrained by equalities to be the right instances. This permits to + state lemmas without resorting to manually adding these equalities and + still get enough information in the proofs. + +.. example:: + .. coqtop:: reset all + + Lemma le_minus : forall n:nat, n < 1 -> n = 0. + intros n H ; induction H. + + Here we did not get any information on the indexes to help fulfill + this proof. The problem is that, when we use the ``induction`` tactic, we + lose information on the hypothesis instance, notably that the second + argument is 1 here. Dependent induction solves this problem by adding + the corresponding equality to the context. + + .. coqtop:: reset all + + Require Import Coq.Program.Equality. + Lemma le_minus : forall n:nat, n < 1 -> n = 0. + intros n H ; dependent induction H. + + The subgoal is cleaned up as the tactic tries to automatically + simplify the subgoals with respect to the generated equalities. In + this enriched context, it becomes possible to solve this subgoal. + + .. coqtop:: all + + reflexivity. + + Now we are in a contradictory context and the proof can be solved. + + .. coqtop:: all + + inversion H. + + This technique works with any inductive predicate. In fact, the + ``dependent induction`` tactic is just a wrapper around the ``induction`` + tactic. One can make its own variant by just writing a new tactic + based on the definition found in ``Coq.Program.Equality``. + +.. tacv:: dependent induction @ident generalizing {+ @ident} + + This performs dependent induction on the hypothesis :n:`@ident` but first + generalizes the goal by the given variables so that they are universally + quantified in the goal. This is generally what one wants to do with the + variables that are inside some constructors in the induction hypothesis. The + other ones need not be further generalized. + +.. tacv:: dependent destruction @ident + + This performs the generalization of the instance :n:`@ident` but uses + ``destruct`` instead of induction on the generalized hypothesis. This gives + results equivalent to ``inversion`` or ``dependent inversion`` if the + hypothesis is dependent. + +See also :ref:`TODO-10.1-dependentinduction` for a larger example of ``dependent induction`` +and an explanation of the underlying technique. + +.. tacn:: function induction (@qualid {+ @term}) + :name: function induction + + The tactic functional induction performs case analysis and induction + following the definition of a function. It makes use of a principle + generated by ``Function`` (see :ref:`TODO-2.3-Advancedrecursivefunctions`) or + ``Functional Scheme`` (see :ref:`TODO-13.2-Generationofinductionschemeswithfunctionalscheme`). + Note that this tactic is only available after a + +.. example:: + .. coqtop:: reset all + + Require Import FunInd. + Functional Scheme minus_ind := Induction for minus Sort Prop. + Check minus_ind. + Lemma le_minus (n m:nat) : n - m <= n. + functional induction (minus n m) using minus_ind; simpl; auto. + Qed. + +.. note:: + :n:`(@qualid {+ @term})` must be a correct full application + of :n:`@qualid`. In particular, the rules for implicit arguments are the + same as usual. For example use :n:`@qualid` if you want to write implicit + arguments explicitly. + +.. note:: + Parentheses over :n:`@qualid {+ @term}` are mandatory. + +.. note:: + :n:`functional induction (f x1 x2 x3)` is actually a wrapper for + :n:`induction x1, x2, x3, (f x1 x2 x3) using @qualid` followed by a cleaning + phase, where :n:`@qualid` is the induction principle registered for :g:`f` + (by the ``Function`` (see :ref:`TODO-2.3-Advancedrecursivefunctions`) or + ``Functional Scheme`` (see :ref:`TODO-13.2-Generationofinductionschemeswithfunctionalscheme`) + command) corresponding to the sort of the goal. Therefore + ``functional induction`` may fail if the induction scheme :n:`@qualid` is not + defined. See also :ref:`TODO-2.3-Advancedrecursivefunctions` for the function + terms accepted by ``Function``. + +.. note:: + There is a difference between obtaining an induction scheme + for a function by using :g:`Function` (see :ref:`TODO-2.3-Advancedrecursivefunctions`) + and by using :g:`Functional Scheme` after a normal definition using + :g:`Fixpoint` or :g:`Definition`. See :ref:`TODO-2.3-Advancedrecursivefunctions` + for details. + +See also: :ref:`TODO-2.3-Advancedrecursivefunctions` + :ref:`TODO-13.2-Generationofinductionschemeswithfunctionalscheme` + :tacn:`inversion` + +.. exn:: Cannot find induction information on @qualid +.. exn:: Not the right number of induction arguments + +.. tacv:: functional induction (@qualid {+ @term}) as @disj_conj_intro_pattern using @term with @bindings_list + + Similarly to :tacn:`induction` and :tacn:`elim`, this allows giving + explicitly the name of the introduced variables, the induction principle, and + the values of dependent premises of the elimination scheme, including + *predicates* for mutual induction when :n:`@qualid` is part of a mutually + recursive definition. + +.. tacn:: discriminate @term + :name: discriminate + + This tactic proves any goal from an assumption stating that two + structurally different :n:`@terms` of an inductive set are equal. For + example, from :g:`(S (S O))=(S O)` we can derive by absurdity any + proposition. + + The argument :n:`@term` is assumed to be a proof of a statement of + conclusion :n:`@term = @term` with the two terms being elements of an + inductive set. To build the proof, the tactic traverses the normal forms + [3]_ of the terms looking for a couple of subterms :g:`u` and :g:`w` (:g:`u` + subterm of the normal form of :n:`@term` and :g:`w` subterm of the normal + form of :n:`@term`), placed at the same positions and whose head symbols are + two different constructors. If such a couple of subterms exists, then the + proof of the current goal is completed, otherwise the tactic fails. + +.. note:: + The syntax :n:`discriminate @ident` can be used to refer to a hypothesis + quantified in the goal. In this case, the quantified hypothesis whose name is + :n:`@ident` is first introduced in the local context using + :n:`intros until @ident`. + +.. exn:: No primitive equality found +.. exn:: Not a discriminable equality + +.. tacv:: discriminate @num + + This does the same thing as :n:`intros until @num` followed by + :n:`discriminate @ident` where :n:`@ident` is the identifier for the last + introduced hypothesis. + +.. tacv:: discriminate @term with @bindings_list + + This does the same thing as :n:`discriminate @term` but using the given + bindings to instantiate parameters or hypotheses of :n:`@term`. + +.. tacv:: ediscriminate @num +.. tacv:: ediscriminate @term {? with @bindings_list} + + This works the same as ``discriminate`` but if the type of :n:`@term`, or the + type of the hypothesis referred to by :n:`@num`, has uninstantiated + parameters, these parameters are left as existential variables. + +.. tacv:: discriminate + + This behaves like :n:`discriminate @ident` if ident is the name of an + hypothesis to which ``discriminate`` is applicable; if the current goal is of + the form :n:`@term <> @term`, this behaves as + :n:`intro @ident; discriminate @ident`. + + .. exn:: No discriminable equalities + +.. tacn:: injection @term + :name: injection + + The injection tactic exploits the property that constructors of + inductive types are injective, i.e. that if :g:`c` is a constructor of an + inductive type and :g:`c t`:sub:`1` and :g:`c t`:sub:`2` are equal then + :g:`t`:sub:`1` and :g:`t`:sub:`2` are equal too. + + If :n:`@term` is a proof of a statement of conclusion :n:`@term = @term`, + then ``injection`` applies the injectivity of constructors as deep as + possible to derive the equality of all the subterms of :n:`@term` and + :n:`@term` at positions where the terms start to differ. For example, from + :g:`(S p, S n) = (q, S (S m))` we may derive :g:`S p = q` and + :g:`n = S m`. For this tactic to work, the terms should be typed with an + inductive type and they should be neither convertible, nor having a different + head constructor. If these conditions are satisfied, the tactic derives the + equality of all the subterms at positions where they differ and adds them as + antecedents to the conclusion of the current goal. + +.. example:: + + Consider the following goal: + + .. coqtop:: reset all + + Inductive list : Set := + | nil : list + | cons : nat -> list -> list. + Variable P : list -> Prop. + Goal forall l n, P nil -> cons n l = cons 0 nil -> P l. + intros. + injection H0. + + +Beware that injection yields an equality in a sigma type whenever the +injected object has a dependent type :g:`P` with its two instances in +different types :g:`(P t`:sub:`1` :g:`... t`:sub:`n` :g:`)` and +:g:`(P u`:sub:`1` :g:`... u`:sub:`n` :sub:`)`. If :g:`t`:sub:`1` and +:g:`u`:sub:`1` are the same and have for type an inductive type for which a decidable +equality has been declared using the command ``Scheme Equality`` (see :ref:`TODO-13.1-GenerationofinductionprincipleswithScheme`), +the use of a sigma type is avoided. + +.. note:: + If some quantified hypothesis of the goal is named :n:`@ident`, + then :n:`injection @ident` first introduces the hypothesis in the local + context using :n:`intros until @ident`. + +.. exn:: Not a projectable equality but a discriminable one +.. exn:: Nothing to do, it is an equality between convertible @terms +.. exn:: Not a primitive equality +.. exn:: Nothing to inject + +.. tacv:: injection @num + + This does the same thing as :n:`intros until @num` followed by + :n:`injection @ident` where :n:`@ident` is the identifier for the last + introduced hypothesis. + +.. tacv:: injection @term with @bindings_list + + This does the same as :n:`injection @term` but using the given bindings to + instantiate parameters or hypotheses of :n:`@term`. + +.. tacv:: einjection @num +.. tacv:: einjection @term {? with @bindings_list} + + This works the same as :n:`injection` but if the type of :n:`@term`, or the + type of the hypothesis referred to by :n:`@num`, has uninstantiated + parameters, these parameters are left as existential variables. + +.. tacv:: injection + + If the current goal is of the form :n:`@term <> @term` , this behaves as + :n:`intro @ident; injection @ident`. + + .. exn:: goal does not satisfy the expected preconditions + +.. tacv:: injection @term {? with @bindings_list} as {+ @intro_pattern} +.. tacv:: injection @num as {+ intro_pattern} +.. tacv:: injection as {+ intro_pattern} +.. tacv:: einjection @term {? with @bindings_list} as {+ intro_pattern} +.. tacv:: einjection @num as {+ intro_pattern} +.. tacv:: einjection as {+ intro_pattern} + + These variants apply :n:`intros {+ @intro_pattern}` after the call to + ``injection`` or ``einjection`` so that all equalities generated are moved in + the context of hypotheses. The number of :n:`@intro_pattern` must not exceed + the number of equalities newly generated. If it is smaller, fresh + names are automatically generated to adjust the list of :n:`@intro_pattern` + to the number of new equalities. The original equality is erased if it + corresponds to an hypothesis. + +It is possible to ensure that :n:`injection @term` erases the original +hypothesis and leaves the generated equalities in the context rather +than putting them as antecedents of the current goal, as if giving +:n:`injection @term as` (with an empty list of names). To obtain this +behavior, the option ``Set Structural Injection`` must be activated. This +option is off by default. + +By default, ``injection`` only creates new equalities between :n:`@terms` whose +type is in sort :g:`Type` or :g:`Set`, thus implementing a special behavior for +objects that are proofs of a statement in :g:`Prop`. This behavior can be +turned off by setting the option ``Set Keep Proof Equalities``. + +.. tacn:: inversion @ident + :name: inversion + + Let the type of :n:`@ident` in the local context be :g:`(I t)`, where :g:`I` + is a (co)inductive predicate. Then, ``inversion`` applied to :n:`@ident` + derives for each possible constructor :g:`c i` of :g:`(I t)`, all the + necessary conditions that should hold for the instance :g:`(I t)` to be + proved by :g:`c i`. + +.. note:: + If :n:`@ident` does not denote a hypothesis in the local context but + refers to a hypothesis quantified in the goal, then the latter is + first introduced in the local context using :n:`intros until @ident`. + +.. note:: + As ``inversion`` proofs may be large in size, we recommend the + user to stock the lemmas whenever the same instance needs to be + inverted several times. See :ref:`TODO-13.3-Generationofinversionprincipleswithderiveinversion`. + +.. note:: + Part of the behavior of the ``inversion`` tactic is to generate + equalities between expressions that appeared in the hypothesis that is + being processed. By default, no equalities are generated if they + relate two proofs (i.e. equalities between :n:`@terms` whose type is in sort + :g:`Prop`). This behavior can be turned off by using the option ``Set Keep + Proof Equalities``. + +.. tacv:: inversion @num + + This does the same thing as :n:`intros until @num` then :n:`inversion @ident` + where :n:`@ident` is the identifier for the last introduced hypothesis. + +.. tacv:: inversion_clear @ident + + This behaves as :n:`inversion` and then erases :n:`@ident` from the context. + +.. tacv:: inversion @ident as @intro_pattern + + This generally behaves as inversion but using names in :n:`@intro_pattern` + for naming hypotheses. The :n:`@intro_pattern` must have the form + :n:`[p`:sub:`11` :n:`... p`:sub:`1n` :n:`| ... | p`:sub:`m1` :n:`... p`:sub:`mn` :n:`]` + with `m` being the number of constructors of the type of :n:`@ident`. Be + careful that the list must be of length `m` even if ``inversion`` discards + some cases (which is precisely one of its roles): for the discarded + cases, just use an empty list (i.e. `n = 0`).The arguments of the i-th + constructor and the equalities that ``inversion`` introduces in the + context of the goal corresponding to the i-th constructor, if it + exists, get their names from the list :n:`p`:sub:`i1` :n:`... p`:sub:`in` in + order. If there are not enough names, ``inversion`` invents names for the + remaining variables to introduce. In case an equation splits into several + equations (because ``inversion`` applies ``injection`` on the equalities it + generates), the corresponding name :n:`p`:sub:`ij` in the list must be + replaced by a sublist of the form :n:`[p`:sub:`ij1` :n:`... p`:sub:`ijq` :n:`]` + (or, equivalently, :n:`(p`:sub:`ij1` :n:`, ..., p`:sub:`ijq` :n:`)`) where + `q` is the number of subequalities obtained from splitting the original + equation. Here is an example. The ``inversion ... as`` variant of + ``inversion`` generally behaves in a slightly more expectable way than + ``inversion`` (no artificial duplication of some hypotheses referring to + other hypotheses). To take benefit of these improvements, it is enough to use + ``inversion ... as []``, letting the names being finally chosen by Coq. + + .. example:: + + .. coqtop:: reset all + + Inductive contains0 : list nat -> Prop := + | in_hd : forall l, contains0 (0 :: l) + | in_tl : forall l b, contains0 l -> contains0 (b :: l). + Goal forall l:list nat, contains0 (1 :: l) -> contains0 l. + intros l H; inversion H as [ | l' p Hl' [Heqp Heql'] ]. + +.. tacv:: inversion @num as @intro_pattern + + This allows naming the hypotheses introduced by :n:`inversion @num` in the + context. + +.. tacv:: inversion_clear @ident as @intro_pattern + + This allows naming the hypotheses introduced by ``inversion_clear`` in the + context. Notice that hypothesis names can be provided as if ``inversion`` + were called, even though the ``inversion_clear`` will eventually erase the + hypotheses. + +.. tacv:: inversion @ident in {+ @ident} + + Let :n:`{+ @ident}` be identifiers in the local context. This tactic behaves as + generalizing :n:`{+ @ident}`, and then performing ``inversion``. + +.. tacv:: inversion @ident as @intro_pattern in {+ @ident} + + This allows naming the hypotheses introduced in the context by + :n:`inversion @ident in {+ @ident}`. + +.. tacv:: inversion_clear @ident in {+ @ident} + + Let :n:`{+ @ident}` be identifiers in the local context. This tactic behaves + as generalizing :n:`{+ @ident}`, and then performing ``inversion_clear``. + +.. tacv:: inversion_clear @ident as @intro_pattern in {+ @ident} + + This allows naming the hypotheses introduced in the context by + :n:`inversion_clear @ident in {+ @ident}`. + +.. tacv:: dependent inversion @ident + :name: dependent inversion + + That must be used when :n:`@ident` appears in the current goal. It acts like + ``inversion`` and then substitutes :n:`@ident` for the corresponding + :n:`@@term` in the goal. + +.. tacv:: dependent inversion @ident as @intro_pattern + + This allows naming the hypotheses introduced in the context by + :n:`dependent inversion @ident`. + +.. tacv:: dependent inversion_clear @ident + + Like ``dependent inversion``, except that :n:`@ident` is cleared from the + local context. + +.. tacv:: dependent inversion_clear @ident as @intro_pattern + + This allows naming the hypotheses introduced in the context by + :n:`dependent inversion_clear @ident`. + +.. tacv:: dependent inversion @ident with @term + :name: dependent inversion ... + + This variant allows you to specify the generalization of the goal. It is + useful when the system fails to generalize the goal automatically. If + :n:`@ident` has type :g:`(I t)` and :g:`I` has type :math:`\forall` + :g:`(x:T), s`, then :n:`@term` must be of type :g:`I:`:math:`\forall` + :g:`(x:T), I x -> s'` where :g:`s'` is the type of the goal. + +.. tacv:: dependent inversion @ident as @intro_pattern with @term + + This allows naming the hypotheses introduced in the context by + :n:`dependent inversion @ident with @term`. + +.. tacv:: dependent inversion_clear @ident with @term + + Like :tacn:`dependent inversion ...` with but clears :n:`@ident` from the + local context. + +.. tacv:: dependent inversion_clear @ident as @intro_pattern with @term + + This allows naming the hypotheses introduced in the context by + :n:`dependent inversion_clear @ident with @term`. + +.. tacv:: simple inversion @ident + + It is a very primitive inversion tactic that derives all the necessary + equalities but it does not simplify the constraints as ``inversion`` does. + +.. tacv:: simple inversion @ident as @intro_pattern + + This allows naming the hypotheses introduced in the context by + ``simple inversion``. + +.. tacv:: inversion @ident using @ident + + Let :n:`@ident` have type :g:`(I t)` (:g:`I` an inductive predicate) in the + local context, and :n:`@ident` be a (dependent) inversion lemma. Then, this + tactic refines the current goal with the specified lemma. + +.. tacv:: inversion @ident using @ident in {+ @ident} + + This tactic behaves as generalizing :n:`{+ @ident}`, then doing + :n:`inversion @ident using @ident`. + +.. tacv:: inversion_sigma + + This tactic turns equalities of dependent pairs (e.g., + :g:`existT P x p = existT P y q`, frequently left over by inversion on + a dependent type family) into pairs of equalities (e.g., a hypothesis + :g:`H : x = y` and a hypothesis of type :g:`rew H in p = q`); these + hypotheses can subsequently be simplified using :tacn:`subst`, without ever + invoking any kind of axiom asserting uniqueness of identity proofs. If you + want to explicitly specify the hypothesis to be inverted, or name the + generated hypotheses, you can invoke + :n:`induction H as [H1 H2] using eq_sigT_rect.` This tactic also works for + :g:`sig`, :g:`sigT2`, and :g:`sig2`, and there are similar :g:`eq_sig` + :g:`***_rect` induction lemmas. + +.. example:: + + *Non-dependent inversion*. + + Let us consider the relation Le over natural numbers and the following + variables: + + .. coqtop:: all + + Inductive Le : nat -> nat -> Set := + | LeO : forall n:nat, Le 0 n + | LeS : forall n m:nat, Le n m -> Le (S n) (S m). + Variable P : nat -> nat -> Prop. + Variable Q : forall n m:nat, Le n m -> Prop. + + Let us consider the following goal: + + .. coqtop:: none + + Goal forall n m, Le (S n) m -> P n m. + intros. + + .. coqtop:: all + + Show. + + To prove the goal, we may need to reason by cases on H and to derive + that m is necessarily of the form (S m 0 ) for certain m 0 and that + (Le n m 0 ). Deriving these conditions corresponds to prove that the + only possible constructor of (Le (S n) m) isLeS and that we can invert + the-> in the type of LeS. This inversion is possible because Le is the + smallest set closed by the constructors LeO and LeS. + + .. coqtop:: undo all + + inversion_clear H. + + Note that m has been substituted in the goal for (S m0) and that the + hypothesis (Le n m0) has been added to the context. + + Sometimes it is interesting to have the equality m=(S m0) in the + context to use it after. In that case we can use inversion that does + not clear the equalities: + + .. coqtop:: undo all + + inversion H. + +.. example:: + + *Dependent inversion.* + + Let us consider the following goal: + + .. coqtop:: reset none + + Inductive Le : nat -> nat -> Set := + | LeO : forall n:nat, Le 0 n + | LeS : forall n m:nat, Le n m -> Le (S n) (S m). + Variable P : nat -> nat -> Prop. + Variable Q : forall n m:nat, Le n m -> Prop. + Goal forall n m (H:Le (S n) m), Q (S n) m H. + intros. + + .. coqtop:: all + + Show. + + As H occurs in the goal, we may want to reason by cases on its + structure and so, we would like inversion tactics to substitute H by + the corresponding @term in constructor form. Neither Inversion nor + Inversion_clear make such a substitution. To have such a behavior we + use the dependent inversion tactics: + + .. coqtop:: all + + dependent inversion_clear H. + + Note that H has been substituted by (LeS n m0 l) andm by (S m0). + +.. example:: + + *Using inversion_sigma.* + + Let us consider the following inductive type of + length-indexed lists, and a lemma about inverting equality of cons: + + .. coqtop:: reset all + + Require Import Coq.Logic.Eqdep_dec. + + Inductive vec A : nat -> Type := + | nil : vec A O + | cons {n} (x : A) (xs : vec A n) : vec A (S n). + + Lemma invert_cons : forall A n x xs y ys, + @cons A n x xs = @cons A n y ys + -> xs = ys. + + Proof. + intros A n x xs y ys H. + + After performing inversion, we are left with an equality of existTs: + + .. coqtop:: all + + inversion H. + + We can turn this equality into a usable form with inversion_sigma: + + .. coqtop:: all + + inversion_sigma. + + To finish cleaning up the proof, we will need to use the fact that + that all proofs of n = n for n a nat are eq_refl: + + .. coqtop:: all + + let H := match goal with H : n = n |- _ => H end in + pose proof (Eqdep_dec.UIP_refl_nat _ H); subst H. + simpl in *. + + Finally, we can finish the proof: + + .. coqtop:: all + + assumption. + Qed. + +.. tacn:: fix ident num + :name: fix + + This tactic is a primitive tactic to start a proof by induction. In + general, it is easier to rely on higher-level induction tactics such + as the ones described in :tacn:`induction`. + + In the syntax of the tactic, the identifier :n:`@ident` is the name given to + the induction hypothesis. The natural number :n:`@num` tells on which + premise of the current goal the induction acts, starting from 1, + counting both dependent and non dependent products, but skipping local + definitions. Especially, the current lemma must be composed of at + least :n:`@num` products. + + Like in a fix expression, the induction hypotheses have to be used on + structurally smaller arguments. The verification that inductive proof + arguments are correct is done only at the time of registering the + lemma in the environment. To know if the use of induction hypotheses + is correct at some time of the interactive development of a proof, use + the command ``Guarded`` (see :ref:`TODO-7.3.2-Guarded`). + +.. tacv:: fix @ident @num with {+ (ident {+ @binder} [{struct @ident}] : @type)} + + This starts a proof by mutual induction. The statements to be simultaneously + proved are respectively :g:`forall binder ... binder, type`. + The identifiers :n:`@ident` are the names of the induction hypotheses. The identifiers + :n:`@ident` are the respective names of the premises on which the induction + is performed in the statements to be simultaneously proved (if not given, the + system tries to guess itself what they are). + +.. tacn:: cofix @ident + :name: cofix + + This tactic starts a proof by coinduction. The identifier :n:`@ident` is the + name given to the coinduction hypothesis. Like in a cofix expression, + the use of induction hypotheses have to guarded by a constructor. The + verification that the use of co-inductive hypotheses is correct is + done only at the time of registering the lemma in the environment. To + know if the use of coinduction hypotheses is correct at some time of + the interactive development of a proof, use the command ``Guarded`` + (see :ref:`TODO-7.3.2-Guarded`). + +.. tacv:: cofix @ident with {+ (@ident {+ @binder} : @type)} + + This starts a proof by mutual coinduction. The statements to be + simultaneously proved are respectively :g:`forall binder ... binder, type` + The identifiers :n:`@ident` are the names of the coinduction hypotheses. + +.. _rewritingexpressions: + +Rewriting expressions +--------------------- + +These tactics use the equality :g:`eq:forall A:Type, A->A->Prop` defined in +file ``Logic.v`` (see :ref:`TODO-3.1.2-Logic`). The notation for :g:`eq T t u` is +simply :g:`t=u` dropping the implicit type of :g:`t` and :g:`u`. + +.. tacn:: rewrite @term + :name: rewrite + +This tactic applies to any goal. The type of :n:`@term` must have the form + +``forall (x``:sub:`1` ``:A``:sub:`1` ``) ... (x``:sub:`n` ``:A``:sub:`n` ``). eq term``:sub:`1` ``term``:sub:`2` ``.`` + +where :g:`eq` is the Leibniz equality or a registered setoid equality. + +Then :n:`rewrite @term` finds the first subterm matching `term`\ :sub:`1` in the goal, +resulting in instances `term`:sub:`1`' and `term`:sub:`2`' and then +replaces every occurrence of `term`:subscript:`1`' by `term`:subscript:`2`'. +Hence, some of the variables :g:`x`\ :sub:`i` are solved by unification, +and some of the types :g:`A`\ :sub:`1`:g:`, ..., A`\ :sub:`n` become new +subgoals. + +.. exn:: The @term provided does not end with an equation + +.. exn:: Tactic generated a subgoal identical to the original goal. This happens if @term does not occur in the goal. + +.. tacv:: rewrite -> @term + + Is equivalent to :n:`rewrite @term` + +.. tacv:: rewrite <- @term + + Uses the equality :n:`@term`:sub:`1` :n:`= @term` :sub:`2` from right to left + +.. tacv:: rewrite @term in clause + + Analogous to :n:`rewrite @term` but rewriting is done following clause + (similarly to :ref:`performing computations <performingcomputations>`). For instance: + + + :n:`rewrite H in H`:sub:`1` will rewrite `H` in the hypothesis + `H`:sub:`1` instead of the current goal. + + :n:`rewrite H in H`:sub:`1` :g:`at 1, H`:sub:`2` :g:`at - 2 |- *` means + :n:`rewrite H; rewrite H in H`:sub:`1` :g:`at 1; rewrite H in H`:sub:`2` :g:`at - 2.` + In particular a failure will happen if any of these three simpler tactics + fails. + + :n:`rewrite H in * |-` will do :n:`rewrite H in H`:sub:`i` for all hypotheses + :g:`H`:sub:`i` :g:`<> H`. A success will happen as soon as at least one of these + simpler tactics succeeds. + + :n:`rewrite H in *` is a combination of :n:`rewrite H` and :n:`rewrite H in * |-` + that succeeds if at least one of these two tactics succeeds. + + Orientation :g:`->` or :g:`<-` can be inserted before the :n:`@term` to rewrite. + +.. tacv:: rewrite @term at occurrences + + Rewrite only the given occurrences of :n:`@term`. Occurrences are + specified from left to right as for pattern (:tacn:`pattern`). The rewrite is + always performed using setoid rewriting, even for Leibniz’s equality, so one + has to ``Import Setoid`` to use this variant. + +.. tacv:: rewrite @term by tactic + + Use tactic to completely solve the side-conditions arising from the + :tacn:`rewrite`. + +.. tacv:: rewrite {+ @term} + + Is equivalent to the `n` successive tactics :n:`{+ rewrite @term}`, each one + working on the first subgoal generated by the previous one. Orientation + :g:`->` or :g:`<-` can be inserted before each :n:`@term` to rewrite. One + unique clause can be added at the end after the keyword in; it will then + affect all rewrite operations. + + In all forms of rewrite described above, a :n:`@term` to rewrite can be + immediately prefixed by one of the following modifiers: + + + `?` : the tactic rewrite :n:`?@term` performs the rewrite of :n:`@term` as many + times as possible (perhaps zero time). This form never fails. + + `n?` : works similarly, except that it will do at most `n` rewrites. + + `!` : works as ?, except that at least one rewrite should succeed, otherwise + the tactic fails. + + `n!` (or simply `n`) : precisely `n` rewrites of :n:`@term` will be done, + leading to failure if these n rewrites are not possible. + +.. tacv:: erewrite @term + + This tactic works as :n:`rewrite @term` but turning + unresolved bindings into existential variables, if any, instead of + failing. It has the same variants as :tacn:`rewrite` has. + +.. tacn:: replace @term with @term + :name: replace + + This tactic applies to any goal. It replaces all free occurrences of :n:`@term` + in the current goal with :n:`@term` and generates the equality :n:`@term = + @term` as a subgoal. This equality is automatically solved if it occurs among + the assumption, or if its symmetric form occurs. It is equivalent to + :n:`cut @term = @term; [intro H`:sub:`n` :n:`; rewrite <- H`:sub:`n` :n:`; clear H`:sub:`n`:n:`|| assumption || symmetry; try assumption]`. + +.. exn:: @terms do not have convertible types + +.. tacv:: replace @term with @term by tactic + + This acts as :n:`replace @term` with :n:`@term` but applies tactic to solve the generated + subgoal :n:`@term = @term`. + +.. tacv:: replace @term + + Replaces :n:`@term` with :n:`@term’` using the first assumption whose type has + the form :n:`@term = @term’` or :n:`@term’ = @term`. + +.. tacv:: replace -> @term + + Replaces :n:`@term` with :n:`@term’` using the first assumption whose type has + the form :n:`@term = @term’` + +.. tacv:: replace <- @term + + Replaces :n:`@term` with :n:`@term’` using the first assumption whose type has + the form :n:`@term’ = @term` + +.. tacv:: replace @term with @term in clause +.. tacv:: replace @term with @term in clause by tactic +.. tacv:: replace @term in clause replace -> @term in clause +.. tacv:: replace <- @term in clause + + Acts as before but the replacements take place inclause (see + :ref:`performingcomputations`) and not only in the conclusion of the goal. The + clause argument must not contain any type of nor value of. + +.. tacv:: cutrewrite <- (@term = @term) + + This tactic is deprecated. It acts like :n:`replace @term with @term`, or, + equivalently as :n:`enough (@term = @term) as <-`. + +.. tacv:: cutrewrite -> (@term = @term) + + This tactic is deprecated. It can be replaced by enough :n:`(@term = @term) as ->`. + + +.. tacn:: subst @ident + :name: subst + + +This tactic applies to a goal that has :n:`@ident` in its context and (at +least) one hypothesis, say :g:`H`, of type :n:`@ident = t` or :n:`t = @ident` +with :n:`@ident` not occurring in :g:`t`. Then it replaces :n:`@ident` by +:g:`t` everywhere in the goal (in the hypotheses and in the conclusion) and +clears :n:`@ident` and :g:`H` from the context. + +If :n:`@ident` is a local definition of the form :n:`@ident := t`, it is also +unfolded and cleared. + + +.. note:: + When several hypotheses have the form :n:`@ident = t` or :n:`t = @ident`, the + first one is used. + + +.. note:: + If `H` is itself dependent in the goal, it is replaced by the proof of + reflexivity of equality. + + +.. tacv:: subst {+ @ident} + + This is equivalent to :n:`subst @ident`:sub:`1`:n:`; ...; subst @ident`:sub:`n`. + +.. tacv:: subst + + This applies subst repeatedly from top to bottom to all identifiers of the + context for which an equality of the form :n:`@ident = t` or :n:`t = @ident` + or :n:`@ident := t` exists, with :n:`@ident` not occurring in `t`. + + .. note:: + + The behavior of subst can be controlled using option ``Set Regular Subst + Tactic.`` When this option is activated, subst also deals with the + following corner cases: + + + A context with ordered hypotheses :n:`@ident`:sub:`1` :n:`= @ident`:sub:`2` + and :n:`@ident`:sub:`1` :n:`= t`, or :n:`t′ = @ident`:sub:`1`` with `t′` not + a variable, and no other hypotheses of the form :n:`@ident`:sub:`2` :n:`= u` + or :n:`u = @ident`:sub:`2`; without the option, a second call to + subst would be necessary to replace :n:`@ident`:sub:`2` by `t` or + `t′` respectively. + + The presence of a recursive equation which without the option would + be a cause of failure of :tacn:`subst`. + + A context with cyclic dependencies as with hypotheses :n:`@ident`:sub:`1` :n:`= f @ident`:sub:`2` + and :n:`@ident`:sub:`2` :n:`= g @ident`:sub:`1` which without the + option would be a cause of failure of :tacn:`subst`. + + Additionally, it prevents a local definition such as :n:`@ident := t` to be + unfolded which otherwise it would exceptionally unfold in configurations + containing hypotheses of the form :n:`@ident = u`, or :n:`u′ = @ident` + with `u′` not a variable. Finally, it preserves the initial order of + hypotheses, which without the option it may break. The option is on by + default. + + +.. tacn:: stepl @term + :name: stepl + + +This tactic is for chaining rewriting steps. It assumes a goal of the +form :n:`R @term @term` where `R` is a binary relation and relies on a +database of lemmas of the form :g:`forall x y z, R x y -> eq x z -> R z y` +where `eq` is typically a setoid equality. The application of :n:`stepl @term` +then replaces the goal by :n:`R @term @term` and adds a new goal stating +:n:`eq @term @term`. + +Lemmas are added to the database using the command ``Declare Left Step @term.`` +The tactic is especially useful for parametric setoids which are not accepted +as regular setoids for :tacn:`rewrite` and :tacn:`setoid_replace` (see +:ref:`TODO-27-Generalizedrewriting`). + +.. tacv:: stepl @term by tactic + + This applies :n:`stepl @term` then applies tactic to the second goal. + +.. tacv:: stepr @term stepr @term by tactic + + This behaves as :tacn:`stepl` but on the right-hand-side of the binary + relation. Lemmas are expected to be of the form :g:`forall x y z, R x y -> eq + y z -> R x z` and are registered using the command ``Declare Right Step + @term.`` + + +.. tacn:: change @term + :name: change + + This tactic applies to any goal. It implements the rule ``Conv`` given in + :ref:`TODO-4.4-Subtypingrules`. :g:`change U` replaces the current goal `T` + with `U` providing that `U` is well-formed and that `T` and `U` are + convertible. + +.. exn:: Not convertible + + +.. tacv:: change @term with @term + + This replaces the occurrences of :n:`@term` by :n:`@term` in the current goal. + The term :n:`@term` and :n:`@term` must be convertible. + +.. tacv:: change @term at {+ @num} with @term + + This replaces the occurrences numbered :n:`{+ @num}` of :n:`@term by @term` + in the current goal. The terms :n:`@term` and :n:`@term` must be convertible. + +.. exn:: Too few occurrences + +.. tacv:: change @term in @ident +.. tacv:: change @term with @term in @ident +.. tacv:: change @term at {+ @num} with @term in @ident + + This applies the change tactic not to the goal but to the hypothesis :n:`@ident`. + +See also: :ref:`Performing computations <performingcomputations>` + +.. _performingcomputations: + +Performing computations +--------------------------- + +This set of tactics implements different specialized usages of the +tactic :tacn:`change`. + +All conversion tactics (including :tacn:`change`) can be parameterized by the +parts of the goal where the conversion can occur. This is done using +*goal clauses* which consists in a list of hypotheses and, optionally, +of a reference to the conclusion of the goal. For defined hypothesis +it is possible to specify if the conversion should occur on the type +part, the body part or both (default). + +Goal clauses are written after a conversion tactic (tactics :tacn:`set`, +:tacn:`rewrite`, :tacn:`replace` and :tacn:`autorewrite` also use goal +clauses) and are introduced by the keyword `in`. If no goal clause is +provided, the default is to perform the conversion only in the +conclusion. + +The syntax and description of the various goal clauses is the +following: + ++ :n:`in {+ @ident} |-` only in hypotheses :n:`{+ @ident}` ++ :n:`in {+ @ident} |- *` in hypotheses :n:`{+ @ident}` and in the + conclusion ++ :n:`in * |-` in every hypothesis ++ :n:`in *` (equivalent to in :n:`* |- *`) everywhere ++ :n:`in (type of @ident) (value of @ident) ... |-` in type part of + :n:`@ident`, in the value part of :n:`@ident`, etc. + +For backward compatibility, the notation :n:`in {+ @ident}` performs +the conversion in hypotheses :n:`{+ @ident}`. + +.. tacn:: cbv {* flag} + :name: cbv +.. tacn:: lazy {* flag} + :name: lazy +.. tacn:: compute + :name: compute + + These parameterized reduction tactics apply to any goal and perform + the normalization of the goal according to the specified flags. In + correspondence with the kinds of reduction considered in Coq namely + :math:`\beta` (reduction of functional application), :math:`\delta` + (unfolding of transparent constants, see :ref:`TODO-6.10.2-Transparent`), + :math:`\iota` (reduction of + pattern-matching over a constructed term, and unfolding of :g:`fix` and + :g:`cofix` expressions) and :math:`\zeta` (contraction of local definitions), the + flags are either ``beta``, ``delta``, ``match``, ``fix``, ``cofix``, + ``iota`` or ``zeta``. The ``iota`` flag is a shorthand for ``match``, ``fix`` + and ``cofix``. The ``delta`` flag itself can be refined into + :n:`delta {+ @qualid}` or :n:`delta -{+ @qualid}`, restricting in the first + case the constants to unfold to the constants listed, and restricting in the + second case the constant to unfold to all but the ones explicitly mentioned. + Notice that the ``delta`` flag does not apply to variables bound by a let-in + construction inside the :n:`@term` itself (use here the ``zeta`` flag). In + any cases, opaque constants are not unfolded (see :ref:`TODO-6.10.1-Opaque`). + + Normalization according to the flags is done by first evaluating the + head of the expression into a *weak-head* normal form, i.e. until the + evaluation is bloked by a variable (or an opaque constant, or an + axiom), as e.g. in :g:`x u1 ... un` , or :g:`match x with ... end`, or + :g:`(fix f x {struct x} := ...) x`, or is a constructed form (a + :math:`\lambda`-expression, a constructor, a cofixpoint, an inductive type, a + product type, a sort), or is a redex that the flags prevent to reduce. Once a + weak-head normal form is obtained, subterms are recursively reduced using the + same strategy. + + Reduction to weak-head normal form can be done using two strategies: + *lazy* (``lazy`` tactic), or *call-by-value* (``cbv`` tactic). The lazy + strategy is a call-by-need strategy, with sharing of reductions: the + arguments of a function call are weakly evaluated only when necessary, + and if an argument is used several times then it is weakly computed + only once. This reduction is efficient for reducing expressions with + dead code. For instance, the proofs of a proposition :g:`exists x. P(x)` + reduce to a pair of a witness :g:`t`, and a proof that :g:`t` satisfies the + predicate :g:`P`. Most of the time, :g:`t` may be computed without computing + the proof of :g:`P(t)`, thanks to the lazy strategy. + + The call-by-value strategy is the one used in ML languages: the + arguments of a function call are systematically weakly evaluated + first. Despite the lazy strategy always performs fewer reductions than + the call-by-value strategy, the latter is generally more efficient for + evaluating purely computational expressions (i.e. with little dead code). + +.. tacv:: compute +.. tacv:: cbv + + These are synonyms for ``cbv beta delta iota zeta``. + +.. tacv:: lazy + + This is a synonym for ``lazy beta delta iota zeta``. + +.. tacv:: compute {+ @qualid} +.. tacv:: cbv {+ @qualid} + + These are synonyms of :n:`cbv beta delta {+ @qualid} iota zeta`. + +.. tacv:: compute -{+ @qualid} +.. tacv:: cbv -{+ @qualid} + + These are synonyms of :n:`cbv beta delta -{+ @qualid} iota zeta`. + +.. tacv:: lazy {+ @qualid} +.. tacv:: lazy -{+ @qualid} + + These are respectively synonyms of :n:`lazy beta delta {+ @qualid} iota zeta` + and :n:`lazy beta delta -{+ @qualid} iota zeta`. + +.. tacv:: vm_compute + + This tactic evaluates the goal using the optimized call-by-value evaluation + bytecode-based virtual machine described in :cite:`CompiledStrongReduction`. + This algorithm is dramatically more efficient than the algorithm used for the + ``cbv`` tactic, but it cannot be fine-tuned. It is specially interesting for + full evaluation of algebraic objects. This includes the case of + reflection-based tactics. + +.. tacv:: native_compute + + This tactic evaluates the goal by compilation to Objective Caml as described + in :cite:`FullReduction`. If Coq is running in native code, it can be + typically two to five times faster than ``vm_compute``. Note however that the + compilation cost is higher, so it is worth using only for intensive + computations. + + .. opt:: NativeCompute Profiling + + On Linux, if you have the ``perf`` profiler installed, this option makes + it possible to profile ``native_compute`` evaluations. + + .. opt:: NativeCompute Profile Filename + + This option specifies the profile output; the default is + ``native_compute_profile.data``. The actual filename used + will contain extra characters to avoid overwriting an existing file; that + filename is reported to the user. + That means you can individually profile multiple uses of + ``native_compute`` in a script. From the Linux command line, run ``perf report`` + on the profile file to see the results. Consult the ``perf`` documentation + for more details. + +.. opt:: Debug Cbv + + This option makes :tacn:`cbv` (and its derivative :tacn:`compute`) print + information about the constants it encounters and the unfolding decisions it + makes. + +.. tacn:: red + :name: red + + This tactic applies to a goal that has the form:: + + forall (x:T1) ... (xk:Tk), t + + with :g:`t` :math:`\beta`:math:`\iota`:math:`\zeta`-reducing to :g:`c t`:sub:`1` :g:`... t`:sub:`n` and :g:`c` a + constant. If :g:`c` is transparent then it replaces :g:`c` with its + definition (say :g:`t`) and then reduces + :g:`(t t`:sub:`1` :g:`... t`:sub:`n` :g:`)` according to :math:`\beta`:math:`\iota`:math:`\zeta`-reduction rules. + +.. exn:: Not reducible + +.. tacn:: hnf + :name: hnf + + This tactic applies to any goal. It replaces the current goal with its + head normal form according to the :math:`\beta`:math:`\delta`:math:`\iota`:math:`\zeta`-reduction rules, i.e. it + reduces the head of the goal until it becomes a product or an + irreducible term. All inner :math:`\beta`:math:`\iota`-redexes are also reduced. + + Example: The term :g:`forall n:nat, (plus (S n) (S n))` is not reduced by + :n:`hnf`. + +.. note:: + The :math:`\delta` rule only applies to transparent constants (see :ref:`TODO-6.10.1-Opaque` + on transparency and opacity). + +.. tacn:: cbn + :name: cbn +.. tacn:: simpl + :name: simpl + + These tactics apply to any goal. They try to reduce a term to + something still readable instead of fully normalizing it. They perform + a sort of strong normalization with two key differences: + + + They unfold a constant if and only if it leads to a :math:`\iota`-reduction, + i.e. reducing a match or unfolding a fixpoint. + + While reducing a constant unfolding to (co)fixpoints, the tactics + use the name of the constant the (co)fixpoint comes from instead of + the (co)fixpoint definition in recursive calls. + + The ``cbn`` tactic is claimed to be a more principled, faster and more + predictable replacement for ``simpl``. + + The ``cbn`` tactic accepts the same flags as ``cbv`` and ``lazy``. The + behavior of both ``simpl`` and ``cbn`` can be tuned using the + Arguments vernacular command as follows: + + + A constant can be marked to be never unfolded by ``cbn`` or ``simpl``: + + .. example:: + .. coqtop:: all + + Arguments minus n m : simpl never. + + After that command an expression like :g:`(minus (S x) y)` is left + untouched by the tactics ``cbn`` and ``simpl``. + + + A constant can be marked to be unfolded only if applied to enough + arguments. The number of arguments required can be specified using the + ``/`` symbol in the arguments list of the ``Arguments`` vernacular command. + + .. example:: + .. coqtop:: all + + Definition fcomp A B C f (g : A -> B) (x : A) : C := f (g x). + Notation "f \o g" := (fcomp f g) (at level 50). + Arguments fcomp {A B C} f g x /. + + After that command the expression :g:`(f \o g)` is left untouched by + ``simpl`` while :g:`((f \o g) t)` is reduced to :g:`(f (g t))`. + The same mechanism can be used to make a constant volatile, i.e. + always unfolded. + + .. example:: + .. coqtop:: all + + Definition volatile := fun x : nat => x. + Arguments volatile / x. + + + A constant can be marked to be unfolded only if an entire set of + arguments evaluates to a constructor. The ``!`` symbol can be used to mark + such arguments. + + .. example:: + .. coqtop:: all + + Arguments minus !n !m. + + After that command, the expression :g:`(minus (S x) y)` is left untouched + by ``simpl``, while :g:`(minus (S x) (S y))` is reduced to :g:`(minus x y)`. + + + A special heuristic to determine if a constant has to be unfolded + can be activated with the following command: + + .. example:: + + .. coqtop:: all + + Arguments minus n m : simpl nomatch. + + The heuristic avoids to perform a simplification step that would expose a + match construct in head position. For example the expression + :g:`(minus (S (S x)) (S y))` is simplified to :g:`(minus (S x) y)` + even if an extra simplification is possible. + + In detail, the tactic ``simpl`` first applies :math:`\beta`:math:`\iota`-reduction. Then, it + expands transparent constants and tries to reduce further using :math:`\beta`:math:`\iota`- + reduction. But, when no :math:`\iota` rule is applied after unfolding then + :math:`\delta`-reductions are not applied. For instance trying to use ``simpl`` on + :g:`(plus n O) = n` changes nothing. + + Notice that only transparent constants whose name can be reused in the + recursive calls are possibly unfolded by ``simpl``. For instance a + constant defined by :g:`plus' := plus` is possibly unfolded and reused in + the recursive calls, but a constant such as :g:`succ := plus (S O)` is + never unfolded. This is the main difference between ``simpl`` and ``cbn``. + The tactic ``cbn`` reduces whenever it will be able to reuse it or not: + :g:`succ t` is reduced to :g:`S t`. + +.. tacv:: cbn {+ @qualid} +.. tacv:: cbn -{+ @qualid} + + These are respectively synonyms of :n:`cbn beta delta {+ @qualid} iota zeta` + and :n:`cbn beta delta -{+ @qualid} iota zeta` (see :tacn:`cbn`). + +.. tacv:: simpl @pattern + + This applies ``simpl`` only to the subterms matching :n:`@pattern` in the + current goal. + +.. tacv:: simpl @pattern at {+ @num} + + This applies ``simpl`` only to the :n:`{+ @num}` occurrences of the subterms + matching :n:`@pattern` in the current goal. + + .. exn:: Too few occurrences + +.. tacv:: simpl @qualid +.. tacv:: simpl @string + + This applies ``simpl`` only to the applicative subterms whose head occurrence + is the unfoldable constant :n:`@qualid` (the constant can be referred to by + its notation using :n:`@string` if such a notation exists). + +.. tacv:: simpl @qualid at {+ @num} +.. tacv:: simpl @string at {+ @num} + + This applies ``simpl`` only to the :n:`{+ @num}` applicative subterms whose + head occurrence is :n:`@qualid` (or :n:`@string`). + +.. opt:: Debug RAKAM + + This option makes :tacn:`cbn` print various debugging information. + ``RAKAM`` is the Refolding Algebraic Krivine Abstract Machine. + +.. tacn:: unfold @qualid + :name: unfold + + This tactic applies to any goal. The argument qualid must denote a + defined transparent constant or local definition (see + :ref:`TODO-1.3.2-Definitions` and :ref:`TODO-6.10.2-Transparent`). The tactic + ``unfold`` applies the :math:`\delta` rule to each occurrence of the constant to which + :n:`@qualid` refers in the current goal and then replaces it with its + :math:`\beta`:math:`\iota`-normal form. + +.. exn:: @qualid does not denote an evaluable constant + +.. tacv:: unfold @qualid in @ident + + Replaces :n:`@qualid` in hypothesis :n:`@ident` with its definition + and replaces the hypothesis with its :math:`\beta`:math:`\iota` normal form. + +.. tacv:: unfold {+, @qualid} + + Replaces *simultaneously* :n:`{+, @qualid}` with their definitions and + replaces the current goal with its :math:`\beta`:math:`\iota` normal form. + +.. tacv:: unfold {+, @qualid at {+, @num }} + + The lists :n:`{+, @num}` specify the occurrences of :n:`@qualid` to be + unfolded. Occurrences are located from left to right. + + .. exn:: bad occurrence number of @qualid + + .. exn:: @qualid does not occur + +.. tacv:: unfold @string + + If :n:`@string` denotes the discriminating symbol of a notation (e.g. "+") or + an expression defining a notation (e.g. `"_ + _"`), and this notation refers to an unfoldable constant, then the + tactic unfolds it. + +.. tacv:: unfold @string%key + + This is variant of :n:`unfold @string` where :n:`@string` gets its + interpretation from the scope bound to the delimiting key :n:`key` + instead of its default interpretation (see :ref:`TODO-12.2.2-Localinterpretationrulesfornotations`). +.. tacv:: unfold {+, qualid_or_string at {+, @num}} + + This is the most general form, where :n:`qualid_or_string` is either a + :n:`@qualid` or a :n:`@string` referring to a notation. + +.. tacn:: fold @term + :name: fold + + This tactic applies to any goal. The term :n:`@term` is reduced using the + ``red`` tactic. Every occurrence of the resulting :n:`@term` in the goal is + then replaced by :n:`@term`. + +.. tacv:: fold {+ @term} + + Equivalent to :n:`fold @term ; ... ; fold @term`. + +.. tacn:: pattern @term + :name: pattern + + This command applies to any goal. The argument :n:`@term` must be a free + subterm of the current goal. The command pattern performs :math:`\beta`-expansion + (the inverse of :math:`\beta`-reduction) of the current goal (say :g:`T`) by + + + replacing all occurrences of :n:`@term` in :g:`T` with a fresh variable + + abstracting this variable + + applying the abstracted goal to :n:`@term` + + For instance, if the current goal :g:`T` is expressible as + :math:`\varphi`:g:`(t)` where the notation captures all the instances of :g:`t` + in :math:`\varphi`:g:`(t)`, then :n:`pattern t` transforms it into + :g:`(fun x:A =>` :math:`\varphi`:g:`(x)) t`. This command can be used, for + instance, when the tactic ``apply`` fails on matching. + +.. tacv:: pattern @term at {+ @num} + + Only the occurrences :n:`{+ @num}` of :n:`@term` are considered for + :math:`\beta`-expansion. Occurrences are located from left to right. + +.. tacv:: pattern @term at - {+ @num} + + All occurrences except the occurrences of indexes :n:`{+ @num }` + of :n:`@term` are considered for :math:`\beta`-expansion. Occurrences are located from + left to right. + +.. tacv:: pattern {+, @term} + + Starting from a goal :math:`\varphi`:g:`(t`:sub:`1` :g:`... t`:sub:`m`:g:`)`, + the tactic :n:`pattern t`:sub:`1`:n:`, ..., t`:sub:`m` generates the + equivalent goal + :g:`(fun (x`:sub:`1`:g:`:A`:sub:`1`:g:`) ... (x`:sub:`m` :g:`:A`:sub:`m` :g:`) =>`:math:`\varphi`:g:`(x`:sub:`1` :g:`... x`:sub:`m` :g:`)) t`:sub:`1` :g:`... t`:sub:`m`. + If :g:`t`:sub:`i` occurs in one of the generated types :g:`A`:sub:`j` these + occurrences will also be considered and possibly abstracted. + +.. tacv:: pattern {+, @term at {+ @num}} + + This behaves as above but processing only the occurrences :n:`{+ @num}` of + :n:`@term` starting from :n:`@term`. + +.. tacv:: pattern {+, @term {? at {? -} {+, @num}}} + + This is the most general syntax that combines the different variants. + +Conversion tactics applied to hypotheses +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. tacn:: conv_tactic in {+, @ident} + + Applies the conversion tactic :n:`conv_tactic` to the hypotheses + :n:`{+ @ident}`. The tactic :n:`conv_tactic` is any of the conversion tactics + listed in this section. + + If :n:`@ident` is a local definition, then :n:`@ident` can be replaced by + (Type of :n:`@ident`) to address not the body but the type of the local + definition. + + Example: :n:`unfold not in (Type of H1) (Type of H3)`. + +.. exn:: No such hypothesis : ident. + + +.. _automation: + +Automation +---------- + +.. tacn:: auto + :name: auto + +This tactic implements a Prolog-like resolution procedure to solve the +current goal. It first tries to solve the goal using the assumption +tactic, then it reduces the goal to an atomic one using intros and +introduces the newly generated hypotheses as hints. Then it looks at +the list of tactics associated to the head symbol of the goal and +tries to apply one of them (starting from the tactics with lower +cost). This process is recursively applied to the generated subgoals. + +By default, auto only uses the hypotheses of the current goal and the +hints of the database named core. + +.. tacv:: auto @num + + Forces the search depth to be :n:`@num`. The maximal search depth + is `5` by default. + +.. tacv:: auto with {+ @ident} + + Uses the hint databases :n:`{+ @ident}` in addition to the database core. See + :ref:`The Hints Databases for auto and eauto <thehintsdatabasesforautoandeauto>` for the list of + pre-defined databases and the way to create or extend a database. + +.. tacv:: auto with * + + Uses all existing hint databases. See + :ref:`The Hints Databases for auto and eauto <thehintsdatabasesforautoandeauto>` + +.. tacv:: auto using {+ @lemma} + + Uses :n:`{+ @lemma}` in addition to hints (can be combined with the with + :n:`@ident` option). If :n:`@lemma` is an inductive type, it is the + collection of its constructors which is added as hints. + +.. tacv:: info_auto + + Behaves like auto but shows the tactics it uses to solve the goal. This + variant is very useful for getting a better understanding of automation, or + to know what lemmas/assumptions were used. + +.. tacv:: debug auto + + Behaves like :tacn:`auto` but shows the tactics it tries to solve the goal, + including failing paths. + +.. tacv:: {? info_}auto {? @num} {? using {+ @lemma}} {? with {+ @ident}} + + This is the most general form, combining the various options. + +.. tacv:: trivial + :name: trivial + + This tactic is a restriction of auto that is not recursive + and tries only hints that cost `0`. Typically it solves trivial + equalities like :g:`X=X`. + +.. tacv:: trivial with {+ @ident} +.. tacv:: trivial with * +.. tacv:: trivial using {+ @lemma} +.. tacv:: debug trivial +.. tacv:: info_trivial +.. tacv:: {? info_}trivial {? using {+ @lemma}} {? with {+ @ident}} + +.. note:: + :tacn:`auto` either solves completely the goal or else leaves it + intact. :tacn:`auto` and :tacn:`trivial` never fail. + +The following options enable printing of informative or debug information for +the :tacn:`auto` and :tacn:`trivial` tactics: + +.. opt:: Info Auto +.. opt:: Debug Auto +.. opt:: Info Trivial +.. opt:: Info Trivial + +See also: :ref:`The Hints Databases for auto and eauto <thehintsdatabasesforautoandeauto>` + +.. tacn:: eauto + :name: eauto + +This tactic generalizes :tacn:`auto`. While :tacn:`auto` does not try +resolution hints which would leave existential variables in the goal, +:tacn:`eauto` does try them (informally speaking, it usessimple :tacn:`eapply` +where :tacn:`auto` uses simple :tacn:`apply`). As a consequence, :tacn:`eauto` +can solve such a goal: + +.. example:: + .. coqtop:: all + + Hint Resolve ex_intro. + Goal forall P:nat -> Prop, P 0 -> exists n, P n. + eauto. + +Note that :tacn:`ex_intro` should be declared as a hint. + + +.. tacv:: {? info_}eauto {? @num} {? using {+ @lemma}} {? with {+ @ident}} + + The various options for eauto are the same as for auto. + +:tacn:`eauto` also obeys the following options: + +.. opt:: Info Eauto +.. opt:: Debug Eauto + +See also: :ref:`The Hints Databases for auto and eauto <thehintsdatabasesforautoandeauto>` + + +.. tacn:: autounfold with {+ @ident} + :name: autounfold + + +This tactic unfolds constants that were declared through a ``Hint Unfold`` +in the given databases. + +.. tacv:: autounfold with {+ @ident} in clause + + Performs the unfolding in the given clause. + +.. tacv:: autounfold with * + + Uses the unfold hints declared in all the hint databases. + +.. tacn:: autorewrite with {+ @ident} + :name: autorewrite + +This tactic [4]_ carries out rewritings according the rewriting rule +bases :n:`{+ @ident}`. + +Each rewriting rule of a base :n:`@ident` is applied to the main subgoal until +it fails. Once all the rules have been processed, if the main subgoal has +progressed (e.g., if it is distinct from the initial main goal) then the rules +of this base are processed again. If the main subgoal has not progressed then +the next base is processed. For the bases, the behavior is exactly similar to +the processing of the rewriting rules. + +The rewriting rule bases are built with the ``Hint Rewrite vernacular`` +command. + +.. warn:: This tactic may loop if you build non terminating rewriting systems. + +.. tacv:: autorewrite with {+ @ident} using @tactic + + Performs, in the same way, all the rewritings of the bases :n:`{+ @ident}` + applying tactic to the main subgoal after each rewriting step. + +.. tacv:: autorewrite with {+ @ident} in @qualid + + Performs all the rewritings in hypothesis :n:`@qualid`. + +.. tacv:: autorewrite with {+ @ident} in @qualid using @tactic + + Performs all the rewritings in hypothesis :n:`@qualid` applying :n:`@tactic` + to the main subgoal after each rewriting step. + +.. tacv:: autorewrite with {+ @ident} in @clause + + Performs all the rewriting in the clause :n:`@clause`. The clause argument + must not contain any ``type of`` nor ``value of``. + +See also: :ref:`Hint-Rewrite <hintrewrite>` for feeding the database of lemmas used by +:tacn:`autorewrite`. + +See also: :tacn:`autorewrite` for examples showing the use of this tactic. + +.. tacn:: easy + :name: easy + + This tactic tries to solve the current goal by a number of standard closing steps. + In particular, it tries to close the current goal using the closing tactics + :tacn:`trivial`, reflexivity, symmetry, contradiction and inversion of hypothesis. + If this fails, it tries introducing variables and splitting and-hypotheses, + using the closing tactics afterwards, and splitting the goal using + :tacn:`split` and recursing. + + This tactic solves goals that belong to many common classes; in particular, many cases of + unsatisfiable hypotheses, and simple equality goals are usually solved by this tactic. + +.. tacv:: now @tactic + :name: now + + Run :n:`@tac` followed by ``easy``. This is a notation for :n:`@tactic; easy`. + +Controlling automation +-------------------------- + +.. _thehintsdatabasesforautoandeauto: + +The hints databases for auto and eauto +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The hints for ``auto`` and ``eauto`` are stored in databases. Each database +maps head symbols to a list of hints. One can use the command + +.. cmd:: Print Hint @ident + +to display the hints associated to the head symbol :n:`@ident` +(see :ref:`Print Hint <printhint>`). Each hint has a cost that is a nonnegative +integer, and an optional pattern. The hints with lower cost are tried first. A +hint is tried by ``auto`` when the conclusion of the current goal matches its +pattern or when it has no pattern. + +Creating Hint databases +``````````````````````` + +One can optionally declare a hint database using the command ``Create +HintDb``. If a hint is added to an unknown database, it will be +automatically created. + +.. cmd:: Create HintDb @ident {? discriminated}. + +This command creates a new database named :n:`@ident`. The database is +implemented by a Discrimination Tree (DT) that serves as an index of +all the lemmas. The DT can use transparency information to decide if a +constant should be indexed or not (c.f. :ref:`The hints databases for auto and eauto <thehintsdatabasesforautoandeauto>`), +making the retrieval more efficient. The legacy implementation (the default one +for new databases) uses the DT only on goals without existentials (i.e., ``auto`` +goals), for non-Immediate hints and do not make use of transparency +hints, putting more work on the unification that is run after +retrieval (it keeps a list of the lemmas in case the DT is not used). +The new implementation enabled by the discriminated option makes use +of DTs in all cases and takes transparency information into account. +However, the order in which hints are retrieved from the DT may differ +from the order in which they were inserted, making this implementation +observationally different from the legacy one. + +The general command to add a hint to some databases :n:`{+ @ident}` is + +.. cmd:: Hint hint_definition : {+ @ident} + +**Variants:** + +.. cmd:: Hint hint_definition + + No database name is given: the hint is registered in the core database. + +.. cmd:: Local Hint hint_definition : {+ @ident} + + This is used to declare hints that must not be exported to the other modules + that require and import the current module. Inside a section, the option + Local is useless since hints do not survive anyway to the closure of + sections. + +.. cmd:: Local Hint hint_definition + + Idem for the core database. + +The ``hint_definition`` is one of the following expressions: + ++ :n:`Resolve @term {? | {? @num} {? @pattern}}` + This command adds :n:`simple apply @term` to the hint list with the head symbol of the type of + :n:`@term`. The cost of that hint is the number of subgoals generated by + :n:`simple apply @term` or :n:`@num` if specified. The associated :n:`@pattern` + is inferred from the conclusion of the type of :n:`@term` or the given + :n:`@pattern` if specified. In case the inferred type of :n:`@term` does not + start with a product the tactic added in the hint list is :n:`exact @term`. + In case this type can however be reduced to a type starting with a product, + the tactic :n:`simple apply @term` is also stored in the hints list. If the + inferred type of :n:`@term` contains a dependent quantification on a variable + which occurs only in the premisses of the type and not in its conclusion, no + instance could be inferred for the variable by unification with the goal. In + this case, the hint is added to the hint list of :tacn:`eauto` instead of the + hint list of auto and a warning is printed. A typical example of a hint that + is used only by ``eauto`` is a transitivity lemma. + + .. exn:: @term cannot be used as a hint + + The head symbol of the type of :n:`@term` is a bound variable such that + this tactic cannot be associated to a constant. + + **Variants:** + + + :n:`Resolve {+ @term}` + Adds each :n:`Resolve @term`. + + + :n:`Resolve -> @term` + Adds the left-to-right implication of an equivalence as a hint (informally + the hint will be used as :n:`apply <- @term`, although as mentionned + before, the tactic actually used is a restricted version of ``apply``). + + + :n:`Resolve <- @term` + Adds the right-to-left implication of an equivalence as a hint. + ++ :n:`Immediate @term` + This command adds :n:`simple apply @term; trivial` to the hint list associated + with the head symbol of the type of :n:`@ident` in the given database. This + tactic will fail if all the subgoals generated by :n:`simple apply @term` are + not solved immediately by the ``trivial`` tactic (which only tries tactics + with cost 0).This command is useful for theorems such as the symmetry of + equality or :g:`n+1=m+1 -> n=m` that we may like to introduce with a limited + use in order to avoid useless proof-search.The cost of this tactic (which + never generates subgoals) is always 1, so that it is not used by ``trivial`` + itself. + + .. exn:: @term cannot be used as a hint + + **Variants:** + + + :n:`Immediate {+ @term}` + Adds each :n:`Immediate @term`. + ++ :n:`Constructors @ident` + If :n:`@ident` is an inductive type, this command adds all its constructors as + hints of type Resolve. Then, when the conclusion of current goal has the form + :n:`(@ident ...)`, ``auto`` will try to apply each constructor. + + .. exn:: @ident is not an inductive type + + **Variants:** + + + :n:`Constructors {+ @ident}` + Adds each :n:`Constructors @ident`. + ++ :n:`Unfold @qualid` + This adds the tactic :n:`unfold @qualid` to the hint list that will only be + used when the head constant of the goal is :n:`@ident`. + Its cost is 4. + + **Variants:** + + + :n:`Unfold {+ @ident}` + Adds each :n:`Unfold @ident`. + ++ :n:`Transparent`, :n:`Opaque @qualid` + This adds a transparency hint to the database, making :n:`@qualid` a + transparent or opaque constant during resolution. This information is used + during unification of the goal with any lemma in the database and inside the + discrimination network to relax or constrain it in the case of discriminated + databases. + + **Variants:** + + + :n:`Transparent`, :n:`Opaque {+ @ident}` + Declares each :n:`@ident` as a transparent or opaque constant. + ++ :n:`Extern @num {? @pattern} => tactic` + This hint type is to extend ``auto`` with tactics other than ``apply`` and + ``unfold``. For that, we must specify a cost, an optional :n:`@pattern` and a + :n:`tactic` to execute. Here is an example:: + + Hint Extern 4 (~(_ = _)) => discriminate. + + Now, when the head of the goal is a disequality, ``auto`` will try + discriminate if it does not manage to solve the goal with hints with a + cost less than 4. One can even use some sub-patterns of the pattern in + the tactic script. A sub-pattern is a question mark followed by an + identifier, like ``?X1`` or ``?X2``. Here is an example: + + .. example:: + .. coqtop:: reset all + + Require Import List. + Hint Extern 5 ({?X1 = ?X2} + {?X1 <> ?X2}) => generalize X1, X2; decide equality : eqdec. + Goal forall a b:list (nat * nat), {a = b} + {a <> b}. + Info 1 auto with eqdec. + ++ :n:`Cut @regexp` + + .. warning:: these hints currently only apply to typeclass + proof search and the ``typeclasses eauto`` tactic (:ref:`TODO-20.6.5-typeclasseseauto`). + + This command can be used to cut the proof-search tree according to a regular + expression matching paths to be cut. The grammar for regular expressions is + the following. Beware, there is no operator precedence during parsing, one can + check with ``Print HintDb`` to verify the current cut expression: + + .. productionlist:: `regexp` + e : ident hint or instance identifier + :|_ any hint + :| e\|e′ disjunction + :| e e′ sequence + :| e * Kleene star + :| emp empty + :| eps epsilon + :| ( e ) + + The `emp` regexp does not match any search path while `eps` + matches the empty path. During proof search, the path of + successive successful hints on a search branch is recorded, as a + list of identifiers for the hints (note Hint Extern’s do not have + an associated identifier). + Before applying any hint :n:`@ident` the current path `p` extended with + :n:`@ident` is matched against the current cut expression `c` associated to + the hint database. If matching succeeds, the hint is *not* applied. The + semantics of ``Hint Cut e`` is to set the cut expression to ``c | e``, the + initial cut expression being `emp`. + ++ :n:`Mode @qualid {* (+ | ! | -)}` + This sets an optional mode of use of the identifier :n:`@qualid`. When + proof-search faces a goal that ends in an application of :n:`@qualid` to + arguments :n:`@term ... @term`, the mode tells if the hints associated to + :n:`@qualid` can be applied or not. A mode specification is a list of n ``+``, + ``!`` or ``-`` items that specify if an argument of the identifier is to be + treated as an input (``+``), if its head only is an input (``!``) or an output + (``-``) of the identifier. For a mode to match a list of arguments, input + terms and input heads *must not* contain existential variables or be + existential variables respectively, while outputs can be any term. Multiple + modes can be declared for a single identifier, in that case only one mode + needs to match the arguments for the hints to be applied.The head of a term + is understood here as the applicative head, or the match or projection + scrutinee’s head, recursively, casts being ignored. ``Hint Mode`` is + especially useful for typeclasses, when one does not want to support default + instances and avoid ambiguity in general. Setting a parameter of a class as an + input forces proof-search to be driven by that index of the class, with ``!`` + giving more flexibility by allowing existentials to still appear deeper in the + index but not at its head. + +.. note:: + One can use an ``Extern`` hint with no pattern to do pattern-matching on + hypotheses using ``match goal`` with inside the tactic. + + +Hint databases defined in the Coq standard library +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Several hint databases are defined in the Coq standard library. The +actual content of a database is the collection of the hints declared +to belong to this database in each of the various modules currently +loaded. Especially, requiring new modules potentially extend a +database. At Coq startup, only the core database is non empty and can +be used. + +:core: This special database is automatically used by ``auto``, except when + pseudo-database ``nocore`` is given to ``auto``. The core database + contains only basic lemmas about negation, conjunction, and so on from. + Most of the hints in this database come from the Init and Logic directories. + +:arith: This database contains all lemmas about Peano’s arithmetic proved in the + directories Init and Arith. + +:zarith: contains lemmas about binary signed integers from the directories + theories/ZArith. When required, the module Omega also extends the + database zarith with a high-cost hint that calls ``omega`` on equations + and inequalities in nat or Z. + +:bool: contains lemmas about booleans, mostly from directory theories/Bool. + +:datatypes: is for lemmas about lists, streams and so on that are mainly proved + in the Lists subdirectory. + +:sets: contains lemmas about sets and relations from the directories Sets and + Relations. + +:typeclass_instances: contains all the type class instances declared in the + environment, including those used for ``setoid_rewrite``, + from the Classes directory. + +You are advised not to put your own hints in the core database, but +use one or several databases specific to your development. + +.. _removehints: + +.. cmd:: Remove Hints {+ @term} : {+ @ident} + +This command removes the hints associated to terms :n:`{+ @term}` in databases +:n:`{+ @ident}`. + +.. _printhint: + +.. cmd:: Print Hint + +This command displays all hints that apply to the current goal. It +fails if no proof is being edited, while the two variants can be used +at every moment. + +**Variants:** + + +.. cmd:: Print Hint @ident + + This command displays only tactics associated with :n:`@ident` in the hints + list. This is independent of the goal being edited, so this command will not + fail if no goal is being edited. + +.. cmd:: Print Hint * + + This command displays all declared hints. + +.. cmd:: Print HintDb @ident + + This command displays all hints from database :n:`@ident`. + +.. _hintrewrite: + +.. cmd:: Hint Rewrite {+ @term} : {+ @ident} + + This vernacular command adds the terms :n:`{+ @term}` (their types must be + equalities) in the rewriting bases :n:`{+ @ident}` with the default orientation + (left to right). Notice that the rewriting bases are distinct from the ``auto`` + hint bases and thatauto does not take them into account. + + This command is synchronous with the section mechanism (see :ref:`TODO-2.4-Sectionmechanism`): + when closing a section, all aliases created by ``Hint Rewrite`` in that + section are lost. Conversely, when loading a module, all ``Hint Rewrite`` + declarations at the global level of that module are loaded. + +**Variants:** + +.. cmd:: Hint Rewrite -> {+ @term} : {+ @ident} + + This is strictly equivalent to the command above (we only make explicit the + orientation which otherwise defaults to ->). + +.. cmd:: Hint Rewrite <- {+ @term} : {+ @ident} + + Adds the rewriting rules :n:`{+ @term}` with a right-to-left orientation in + the bases :n:`{+ @ident}`. + +.. cmd:: Hint Rewrite {+ @term} using tactic : {+ @ident} + + When the rewriting rules :n:`{+ @term}` in :n:`{+ @ident}` will be used, the + tactic ``tactic`` will be applied to the generated subgoals, the main subgoal + excluded. + +.. cmd:: Print Rewrite HintDb @ident + + This command displays all rewrite hints contained in :n:`@ident`. + +Hint locality +~~~~~~~~~~~~~ + +Hints provided by the ``Hint`` commands are erased when closing a section. +Conversely, all hints of a module ``A`` that are not defined inside a +section (and not defined with option ``Local``) become available when the +module ``A`` is imported (using e.g. ``Require Import A.``). + +As of today, hints only have a binary behavior regarding locality, as +described above: either they disappear at the end of a section scope, +or they remain global forever. This causes a scalability issue, +because hints coming from an unrelated part of the code may badly +influence another development. It can be mitigated to some extent +thanks to the ``Remove Hints`` command (see :ref:`Remove Hints <removehints>`), +but this is a mere workaround and has some limitations (for instance, external +hints cannot be removed). + +A proper way to fix this issue is to bind the hints to their module scope, as +for most of the other objects Coq uses. Hints should only made available when +the module they are defined in is imported, not just required. It is very +difficult to change the historical behavior, as it would break a lot of scripts. +We propose a smooth transitional path by providing the ``Loose Hint Behavior`` +option which accepts three flags allowing for a fine-grained handling of +non-imported hints. + +**Variants:** + +.. cmd:: Set Loose Hint Behavior "Lax" + + This is the default, and corresponds to the historical behavior, that + is, hints defined outside of a section have a global scope. + +.. cmd:: Set Loose Hint Behavior "Warn" + + When set, it outputs a warning when a non-imported hint is used. Note that + this is an over-approximation, because a hint may be triggered by a run that + will eventually fail and backtrack, resulting in the hint not being actually + useful for the proof. + +.. cmd:: Set Loose Hint Behavior "Strict" + + When set, it changes the behavior of an unloaded hint to a immediate fail + tactic, allowing to emulate an import-scoped hint mechanism. + +Setting implicit automation tactics +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. cmd:: Proof with tactic + + This command may be used to start a proof. It defines a default tactic + to be used each time a tactic command ``tactic``:sub:`1` is ended by ``...``. + In this case the tactic command typed by the user is equivalent to + ``tactic``:sub:`1` ``;tactic``. + +See also: Proof. in :ref:`TODO-7.1.4-Proofterm`. + +**Variants:** + +.. cmd:: Proof with tactic using {+ @ident} + + Combines in a single line ``Proof with`` and ``Proof using``, see :ref:`TODO-7.1.5-Proofusing` + +.. cmd:: Proof using {+ @ident} with tactic + + Combines in a single line ``Proof with`` and ``Proof using``, see :ref:`TODO-7.1.5-Proofusing` + +.. cmd:: Declare Implicit Tactic tactic + + This command declares a tactic to be used to solve implicit arguments + that Coq does not know how to solve by unification. It is used every + time the term argument of a tactic has one of its holes not fully + resolved. + +Here is an example: + +.. example:: + + .. coqtop:: all + + Parameter quo : nat -> forall n:nat, n<>0 -> nat. + Notation "x // y" := (quo x y _) (at level 40). + Declare Implicit Tactic assumption. + Goal forall n m, m<>0 -> { q:nat & { r | q * m + r = n } }. + intros. + exists (n // m). + + The tactic ``exists (n // m)`` did not fail. The hole was solved + by ``assumption`` so that it behaved as ``exists (quo n m H)``. + +.. _decisionprocedures: + +Decision procedures +------------------- + +.. tacn:: tauto + :name: tauto + +This tactic implements a decision procedure for intuitionistic propositional +calculus based on the contraction-free sequent calculi LJT* of Roy Dyckhoff +:cite:`Dyc92`. Note that :tacn:`tauto` succeeds on any instance of an +intuitionistic tautological proposition. :tacn:`tauto` unfolds negations and +logical equivalence but does not unfold any other definition. + +The following goal can be proved by :tacn:`tauto` whereas :tacn:`auto` would +fail: + +.. example:: + .. coqtop:: reset all + + Goal forall (x:nat) (P:nat -> Prop), x = 0 \/ P x -> x <> 0 -> P x. + intros. + tauto. + +Moreover, if it has nothing else to do, :tacn:`tauto` performs introductions. +Therefore, the use of :tacn:`intros` in the previous proof is unnecessary. +:tacn:`tauto` can for instance for: + +.. example:: + + .. coqtop:: reset all + + Goal forall (A:Prop) (P:nat -> Prop), A \/ (forall x:nat, ~ A -> P x) -> forall x:nat, ~ A -> P x. + tauto. + +.. note:: + In contrast, :tacn:`tauto` cannot solve the following goal + :g:`Goal forall (A:Prop) (P:nat -> Prop), A \/ (forall x:nat, ~ A -> P x) ->` + :g:`forall x:nat, ~ ~ (A \/ P x).` + because :g:`(forall x:nat, ~ A -> P x)` cannot be treated as atomic and + an instantiation of `x` is necessary. + +.. tacv:: dtauto + + While :tacn:`tauto` recognizes inductively defined connectives isomorphic to + the standard connective ``and, prod, or, sum, False, Empty_set, unit, True``, + :tacn:`dtauto` recognizes also all inductive types with one constructors and + no indices, i.e. record-style connectives. + +.. tacn:: intuition @tactic + :name: intuition + +The tactic :tacn:`intuition` takes advantage of the search-tree built by the +decision procedure involved in the tactic :tacn:`tauto`. It uses this +information to generate a set of subgoals equivalent to the original one (but +simpler than it) and applies the tactic :n:`@tactic` to them :cite:`Mun94`. If +this tactic fails on some goals then :tacn:`intuition` fails. In fact, +:tacn:`tauto` is simply :g:`intuition fail`. + +For instance, the tactic :g:`intuition auto` applied to the goal + +:: + + (forall (x:nat), P x)/\B -> (forall (y:nat),P y)/\ P O \/B/\ P O + + +internally replaces it by the equivalent one: +:: + + (forall (x:nat), P x), B |- P O + + +and then uses :tacn:`auto` which completes the proof. + +Originally due to César Muñoz, these tactics (:tacn:`tauto` and +:tacn:`intuition`) have been completely re-engineered by David Delahaye using +mainly the tactic language (see :ref:`TODO-9-thetacticlanguage`). The code is +now much shorter and a significant increase in performance has been noticed. +The general behavior with respect to dependent types, unfolding and +introductions has slightly changed to get clearer semantics. This may lead to +some incompatibilities. + +.. tacv:: intuition + + Is equivalent to :g:`intuition auto with *`. + +.. tacv:: dintuition + + While :tacn:`intuition` recognizes inductively defined connectives + isomorphic to the standard connective ``and, prod, or, sum, False, + Empty_set, unit, True``, :tacn:`dintuition` recognizes also all inductive + types with one constructors and no indices, i.e. record-style connectives. + +Some aspects of the tactic :tacn:`intuition` can be controlled using options. +To avoid that inner negations which do not need to be unfolded are +unfolded, use: + +.. cmd:: Unset Intuition Negation Unfolding + + +To do that all negations of the goal are unfolded even inner ones +(this is the default), use: + +.. cmd:: Set Intuition Negation Unfolding + +.. tacn:: rtauto + :name: rtauto + +The :tacn:`rtauto` tactic solves propositional tautologies similarly to what +:tacn:`tauto` does. The main difference is that the proof term is built using a +reflection scheme applied to a sequent calculus proof of the goal. The search +procedure is also implemented using a different technique. + +Users should be aware that this difference may result in faster proof- search +but slower proof-checking, and :tacn:`rtauto` might not solve goals that +:tacn:`tauto` would be able to solve (e.g. goals involving universal +quantifiers). + + +.. tacn:: firstorder + :name: firstorder + +The tactic :tacn:`firstorder` is an experimental extension of :tacn:`tauto` to +first- order reasoning, written by Pierre Corbineau. It is not restricted to +usual logical connectives but instead may reason about any first-order class +inductive definition. + +The default tactic used by :tacn:`firstorder` when no rule applies is :g:`auto +with \*`, it can be reset locally or globally using the ``Set Firstorder +Solver`` tactic vernacular command and printed using ``Print Firstorder +Solver``. + +.. tacv:: firstorder @tactic + + Tries to solve the goal with :n:`@tactic` when no logical rule may apply. + +.. tacv:: firstorder using {+ @qualid} + + Adds lemmas :n:`{+ @qualid}` to the proof-search environment. If :n:`@qualid` + refers to an inductive type, it is the collection of its constructors which are + added to the proof-search environment. + +.. tacv:: firstorder with {+ @ident} + + Adds lemmas from :tacn:`auto` hint bases :n:`{+ @ident}` to the proof-search + environment. + +.. tacv:: firstorder tactic using {+ @qualid} with {+ @ident} + + This combines the effects of the different variants of :tacn:`firstorder`. + +Proof-search is bounded by a depth parameter which can be set by +typing the ``Set Firstorder Depth n`` vernacular command. + +.. tacn:: congruence + :name: congruence + +The tactic :tacn:`congruence`, by Pierre Corbineau, implements the standard +Nelson and Oppen congruence closure algorithm, which is a decision procedure +for ground equalities with uninterpreted symbols. It also include the +constructor theory (see :tacn:`injection` and :tacn:`discriminate`). If the goal +is a non-quantified equality, congruence tries to prove it with non-quantified +equalities in the context. Otherwise it tries to infer a discriminable equality +from those in the context. Alternatively, congruence tries to prove that a +hypothesis is equal to the goal or to the negation of another hypothesis. + +:tacn:`congruence` is also able to take advantage of hypotheses stating +quantified equalities, you have to provide a bound for the number of extra +equalities generated that way. Please note that one of the members of the +equality must contain all the quantified variables in order for congruence to +match against it. + +.. example:: + .. coqtop:: reset all + + Theorem T (A:Type) (f:A -> A) (g: A -> A -> A) a b: a=(f a) -> (g b (f a))=(f (f a)) -> (g a b)=(f (g b a)) -> (g a b)=a. + intros. + congruence. + Qed. + + Theorem inj (A:Type) (f:A -> A * A) (a c d: A) : f = pair a -> Some (f c) = Some (f d) -> c=d. + intros. + congruence. + Qed. + +.. tacv:: congruence n + + Tries to add at most `n` instances of hypotheses stating quantified equalities + to the problem in order to solve it. A bigger value of `n` does not make + success slower, only failure. You might consider adding some lemmas as + hypotheses using assert in order for :tacn:`congruence` to use them. + +.. tacv:: congruence with {+ @term} + + Adds :n:`{+ @term}` to the pool of terms used by :tacn:`congruence`. This helps + in case you have partially applied constructors in your goal. + +.. exn:: I don’t know how to handle dependent equality + + The decision procedure managed to find a proof of the goal or of a + discriminable equality but this proof could not be built in Coq because of + dependently-typed functions. + +.. exn:: Goal is solvable by congruence but some arguments are missing. Try congruence with ..., replacing metavariables by arbitrary terms. + + The decision procedure could solve the goal with the provision that additional + arguments are supplied for some partially applied constructors. Any term of an + appropriate type will allow the tactic to successfully solve the goal. Those + additional arguments can be given to congruence by filling in the holes in the + terms given in the error message, using the with variant described above. + +.. opt:: Congruence Verbose + + This option makes :tacn:`congruence` print debug information. + + +Checking properties of terms +---------------------------- + +Each of the following tactics acts as the identity if the check +succeeds, and results in an error otherwise. + +.. tacn:: constr_eq @term @term + :name: constr_eq + + This tactic checks whether its arguments are equal modulo alpha + conversion and casts. + +.. exn:: Not equal + +.. tacn:: unify @term @term + :name: unify + + This tactic checks whether its arguments are unifiable, potentially + instantiating existential variables. + +.. exn:: Not unifiable + +.. tacv:: unify @term @term with @ident + + Unification takes the transparency information defined in the hint database + :n:`@ident` into account (see :ref:`the hints databases for auto and eauto <the-hints-databases-for-auto-and-eauto>`). + +.. tacn:: is_evar @term + :name: is_evar + + This tactic checks whether its argument is a current existential + variable. Existential variables are uninstantiated variables generated + by :tacn:`eapply` and some other tactics. + +.. exn:: Not an evar + +.. tacn:: has_evar @term + :name: has_evar + + This tactic checks whether its argument has an existential variable as + a subterm. Unlike context patterns combined with ``is_evar``, this tactic + scans all subterms, including those under binders. + +.. exn:: No evars + +.. tacn:: is_var @term + :name: is_var + + This tactic checks whether its argument is a variable or hypothesis in + the current goal context or in the opened sections. + +.. exn:: Not a variable or hypothesis + + +.. _equality: + +Equality +-------- + + +.. tacn:: f_equal + :name: f_equal + +This tactic applies to a goal of the form :g:`f a`:sub:`1` :g:`... a`:sub:`n` +:g:`= f′a′`:sub:`1` :g:`... a′`:sub:`n`. Using :tacn:`f_equal` on such a goal +leads to subgoals :g:`f=f′` and :g:`a`:sub:`1` = :g:`a′`:sub:`1` and so on up +to :g:`a`:sub:`n` :g:`= a′`:sub:`n`. Amongst these subgoals, the simple ones +(e.g. provable by :tacn:`reflexivity` or :tacn:`congruence`) are automatically +solved by :tacn:`f_equal`. + + +.. tacn:: reflexivity + :name: reflexivity + +This tactic applies to a goal that has the form :g:`t=u`. It checks that `t` +and `u` are convertible and then solves the goal. It is equivalent to apply +:tacn:`refl_equal`. + +.. exn:: The conclusion is not a substitutive equation + +.. exn:: Unable to unify ... with ... + + +.. tacn:: symmetry + :name: symmetry + +This tactic applies to a goal that has the form :g:`t=u` and changes it into +:g:`u=t`. + + +.. tacv:: symmetry in @ident + + If the statement of the hypothesis ident has the form :g:`t=u`, the tactic + changes it to :g:`u=t`. + + + +.. tacn:: transitivity @term + :name: transitivity + +This tactic applies to a goal that has the form :g:`t=u` and transforms it +into the two subgoals :n:`t=@term` and :n:`@term=u`. + + +Equality and inductive sets +--------------------------- + +We describe in this section some special purpose tactics dealing with +equality and inductive sets or types. These tactics use the +equality :g:`eq:forall (A:Type), A->A->Prop`, simply written with the infix +symbol :g:`=`. + +.. tacn:: decide equality + :name: decide equality + + This tactic solves a goal of the form :g:`forall x y:R, {x=y}+{ ~x=y}`, + where :g:`R` is an inductive type such that its constructors do not take + proofs or functions as arguments, nor objects in dependent types. It + solves goals of the form :g:`{x=y}+{ ~x=y}` as well. + +.. tacn:: compare @term @term + :name: compare + + This tactic compares two given objects :n:`@term` and :n:`@term` of an + inductive datatype. If :g:`G` is the current goal, it leaves the sub- + goals :n:`@term =@term -> G` and :n:`~ @term = @term -> G`. The type of + :n:`@term` and :n:`@term` must satisfy the same restrictions as in the + tactic ``decide equality``. + +.. tacn:: simplify_eq @term + :name: simplify_eq + + Let :n:`@term` be the proof of a statement of conclusion :n:`@term = @term`. + If :n:`@term` and :n:`@term` are structurally different (in the sense + described for the tactic :tacn:`discriminate`), then the tactic + ``simplify_eq`` behaves as :n:`discriminate @term`, otherwise it behaves as + :n:`injection @term`. + +.. note:: + If some quantified hypothesis of the goal is named :n:`@ident`, + then :n:`simplify_eq @ident` first introduces the hypothesis in the local + context using :n:`intros until @ident`. + +.. tacv:: simplify_eq @num + + This does the same thing as :n:`intros until @num` then + :n:`simplify_eq @ident` where :n:`@ident` is the identifier for the last + introduced hypothesis. + +.. tacv:: simplify_eq @term with @bindings_list + + This does the same as :n:`simplify_eq @term` but using the given bindings to + instantiate parameters or hypotheses of :n:`@term`. + +.. tacv:: esimplify_eq @num +.. tacv:: esimplify_eq @term {? with @bindings_list} + + This works the same as ``simplify_eq`` but if the type of :n:`@term`, or the + type of the hypothesis referred to by :n:`@num`, has uninstantiated + parameters, these parameters are left as existential variables. + +.. tacv:: simplify_eq + + If the current goal has form :g:`t1 <> t2`, it behaves as + :n:`intro @ident; simplify_eq @ident`. + +.. tacn:: dependent rewrite -> @ident + :name: dependent rewrite -> + + This tactic applies to any goal. If :n:`@ident` has type + :g:`(existT B a b)=(existT B a' b')` in the local context (i.e. each + :n:`@term` of the equality has a sigma type :g:`{ a:A & (B a)}`) this tactic + rewrites :g:`a` into :g:`a'` and :g:`b` into :g:`b'` in the current goal. + This tactic works even if :g:`B` is also a sigma type. This kind of + equalities between dependent pairs may be derived by the + :tacn:`injection` and :tacn:`inversion` tactics. + +.. tacv:: dependent rewrite <- @ident + + Analogous to :tacn:`dependent rewrite ->` but uses the equality from right to + left. + +Inversion +--------- + +.. tacn:: functional inversion @ident + :name: functional inversion + +:tacn:`functional inversion` is a tactic that performs inversion on hypothesis +:n:`@ident` of the form :n:`@qualid {+ @term} = @term` or :n:`@term = @qualid +{+ @term}` where :n:`@qualid` must have been defined using Function (see +:ref:`TODO-2.3-advancedrecursivefunctions`). Note that this tactic is only +available after a ``Require Import FunInd``. + + +.. exn:: Hypothesis @ident must contain at least one Function +.. exn:: Cannot find inversion information for hypothesis @ident + + This error may be raised when some inversion lemma failed to be generated by + Function. + + +.. tacv:: functional inversion @num + + This does the same thing as intros until num thenfunctional inversion ident + where ident is the identifier for the last introduced hypothesis. + +.. tacv:: functional inversion ident qualid +.. tacv:: functional inversion num qualid + + If the hypothesis :n:`@ident` (or :n:`@num`) has a type of the form + :n:`@qualid`:sub:`1` :n:`@term`:sub:`1` ... :n:`@term`:sub:`n` :n:`= + @qualid`:sub:`2` :n:`@term`:sub:`n+1` ... :n:`@term`:sub:`n+m` where + :n:`@qualid`:sub:`1` and :n:`@qualid`:sub:`2` are valid candidates to + functional inversion, this variant allows choosing which :n:`@qualid` is + inverted. + +.. tacn:: quote @ident + :name: quote + +This kind of inversion has nothing to do with the tactic :tacn:`inversion` +above. This tactic does :g:`change (@ident t)`, where `t` is a term built in +order to ensure the convertibility. In other words, it does inversion of the +function :n:`@ident`. This function must be a fixpoint on a simple recursive +datatype: see :ref:`TODO-10.3-quote` for the full details. + + +.. exn:: quote: not a simple fixpoint + + Happens when quote is not able to perform inversion properly. + + +.. tacv:: quote ident {* @ident} + + All terms that are built only with :n:`{* @ident}` will be considered by quote + as constants rather than variables. + +Classical tactics +----------------- + +In order to ease the proving process, when the Classical module is +loaded. A few more tactics are available. Make sure to load the module +using the ``Require Import`` command. + +.. tacn:: classical_left + :name: classical_left +.. tacv:: classical_right + :name: classical_right + + The tactics ``classical_left`` and ``classical_right`` are the analog of the + left and right but using classical logic. They can only be used for + disjunctions. Use ``classical_left`` to prove the left part of the + disjunction with the assumption that the negation of right part holds. + Use ``classical_right`` to prove the right part of the disjunction with + the assumption that the negation of left part holds. + +Automatizing +------------ + + +.. tacn:: btauto + :name: btauto + +The tactic :tacn:`btauto` implements a reflexive solver for boolean +tautologies. It solves goals of the form :g:`t = u` where `t` and `u` are +constructed over the following grammar: + +.. _btauto_grammar: + + .. productionlist:: `sentence` + t : x + :∣ true + :∣ false + :∣ orb t1 t2 + :∣ andb t1 t2 + :∣ xorb t1 t2 + :∣ negb t + :∣ if t1 then t2 else t3 + + Whenever the formula supplied is not a tautology, it also provides a + counter-example. + + Internally, it uses a system very similar to the one of the ring + tactic. + + +.. tacn:: omega + :name: omega + +The tactic :tacn:`omega`, due to Pierre Crégut, is an automatic decision +procedure for Presburger arithmetic. It solves quantifier-free +formulas built with `~`, `\/`, `/\`, `->` on top of equalities, +inequalities and disequalities on both the type :g:`nat` of natural numbers +and :g:`Z` of binary integers. This tactic must be loaded by the command +``Require Import Omega``. See the additional documentation about omega +(see Chapter :ref:`TODO-21-omega`). + + +.. tacn:: ring + :name: ring +.. tacn:: ring_simplify {+ @term} + :name: ring_simplify + +The :n:`ring` tactic solves equations upon polynomial expressions of a ring +(or semi-ring) structure. It proceeds by normalizing both hand sides +of the equation (w.r.t. associativity, commutativity and +distributivity, constant propagation) and comparing syntactically the +results. + +:n:`ring_simplify` applies the normalization procedure described above to +the terms given. The tactic then replaces all occurrences of the terms +given in the conclusion of the goal by their normal forms. If no term +is given, then the conclusion should be an equation and both hand +sides are normalized. + +See :ref:`TODO-Chapter-25-Theringandfieldtacticfamilies` for more information on +the tactic and how to declare new ring structures. All declared field structures +can be printed with the ``Print Rings`` command. + +.. tacn:: field + :name: field +.. tacn:: field_simplify {+ @term} + :name: field_simplify +.. tacn:: field_simplify_eq + :name: field_simplify_eq + +The field tactic is built on the same ideas as ring: this is a +reflexive tactic that solves or simplifies equations in a field +structure. The main idea is to reduce a field expression (which is an +extension of ring expressions with the inverse and division +operations) to a fraction made of two polynomial expressions. + +Tactic :n:`field` is used to solve subgoals, whereas :n:`field_simplify {+ @term}` +replaces the provided terms by their reduced fraction. +:n:`field_simplify_eq` applies when the conclusion is an equation: it +simplifies both hand sides and multiplies so as to cancel +denominators. So it produces an equation without division nor inverse. + +All of these 3 tactics may generate a subgoal in order to prove that +denominators are different from zero. + +See :ref:`TODO-Chapter-25-Theringandfieldtacticfamilies` for more information on the tactic and how to +declare new field structures. All declared field structures can be +printed with the Print Fields command. + +.. example:: + .. coqtop:: reset all + + Require Import Reals. + Goal forall x y:R, + (x * y > 0)%R -> + (x * (1 / x + x / (x + y)))%R = + ((- 1 / y) * y * (- x * (x / (x + y)) - 1))%R. + + intros; field. + +See also: file plugins/setoid_ring/RealField.v for an example of instantiation, +theory theories/Reals for many examples of use of field. + +.. tacn:: fourier + :name: fourier + +This tactic written by Loïc Pottier solves linear inequalities on real +numbers using Fourier’s method :cite:`Fourier`. This tactic must be loaded by +``Require Import Fourier``. + +.. example:: + .. coqtop:: reset all + + Require Import Reals. + Require Import Fourier. + Goal forall x y:R, (x < y)%R -> (y + 1 >= x - 1)%R. + intros; fourier. + +Non-logical tactics +------------------------ + + +.. tacn:: cycle @num + :name: cycle + + This tactic puts the :n:`@num` first goals at the end of the list of goals. + If :n:`@num` is negative, it will put the last :math:`|num|` goals at the + beginning of the list. + +.. example:: + + .. coqtop:: all reset + + Parameter P : nat -> Prop. + Goal P 1 /\ P 2 /\ P 3 /\ P 4 /\ P 5. + repeat split. + all: cycle 2. + all: cycle -3. + +.. tacn:: swap @num @num + :name: swap + + This tactic switches the position of the goals of indices :n:`@num` and + :n:`@num`. If either :n:`@num` or :n:`@num` is negative then goals are + counted from the end of the focused goal list. Goals are indexed from 1, + there is no goal with position 0. + +.. example:: + + .. coqtop:: reset all + + Parameter P : nat -> Prop. + Goal P 1 /\ P 2 /\ P 3 /\ P 4 /\ P 5. + repeat split. + all: swap 1 3. + all: swap 1 -1. + +.. tacn:: revgoals + :name: revgoals + +This tactics reverses the list of the focused goals. + +.. example:: + + .. coqtop:: all reset + + Parameter P : nat -> Prop. + Goal P 1 /\ P 2 /\ P 3 /\ P 4 /\ P 5. + repeat split. + all: revgoals. + +.. tacn:: shelve + :name: shelve + + This tactic moves all goals under focus to a shelf. While on the + shelf, goals will not be focused on. They can be solved by + unification, or they can be called back into focus with the command + :tacn:`Unshelve`. + +.. tacv:: shelve_unifiable + + Shelves only the goals under focus that are mentioned in other goals. + Goals that appear in the type of other goals can be solved by unification. + +.. example:: + + .. coqtop:: all reset + + Goal exists n, n=0. + refine (ex_intro _ _ _). + all:shelve_unifiable. + reflexivity. + +.. tacn:: Unshelve + :name: Unshelve + + This command moves all the goals on the shelf (see :tacn:`shelve`) + from the shelf into focus, by appending them to the end of the current + list of focused goals. + +.. tacn:: give_up + :name: give_up + + This tactic removes the focused goals from the proof. They are not + solved, and cannot be solved later in the proof. As the goals are not + solved, the proof cannot be closed. + + The ``give_up`` tactic can be used while editing a proof, to choose to + write the proof script in a non-sequential order. + +Simple tactic macros +------------------------- + +A simple example has more value than a long explanation: + +.. example:: + .. coqtop:: reset all + + Ltac Solve := simpl; intros; auto. + + Ltac ElimBoolRewrite b H1 H2 := + elim b; [ intros; rewrite H1; eauto | intros; rewrite H2; eauto ]. + +The tactics macros are synchronous with the Coq section mechanism: a +tactic definition is deleted from the current environment when you +close the section (see also :ref:`TODO-2.4Sectionmechanism`) where it was +defined. If you want that a tactic macro defined in a module is usable in the +modules that require it, you should put it outside of any section. + +:ref:`TODO-9-Thetacticlanguage` gives examples of more complex +user-defined tactics. + +.. [1] Actually, only the second subgoal will be generated since the + other one can be automatically checked. +.. [2] This corresponds to the cut rule of sequent calculus. +.. [3] Reminder: opaque constants will not be expanded by δ reductions. +.. [4] The behavior of this tactic has much changed compared to the + versions available in the previous distributions (V6). This may cause + significant changes in your theories to obtain the same result. As a + drawback of the re-engineering of the code, this tactic has also been + completely revised to get a very compact and readable version. diff --git a/doc/sphinx/replaces.rst b/doc/sphinx/replaces.rst new file mode 100644 index 0000000000..d4f6835ef4 --- /dev/null +++ b/doc/sphinx/replaces.rst @@ -0,0 +1,79 @@ +.. some handy replacements for common items + +.. role:: smallcaps + +.. |A_1| replace:: `A`\ :math:`_{1}` +.. |A_n| replace:: `A`\ :math:`_{n}` +.. |arg_1| replace:: `arg`\ :math:`_{1}` +.. |arg_n| replace:: `arg`\ :math:`_{n}` +.. |bdi| replace:: :math:`\beta\delta\iota` +.. |binder_1| replace:: `binder`\ :math:`_{1}` +.. |binder_n| replace:: `binder`\ :math:`_{n}` +.. |binders_1| replace:: `binders`\ :math:`_{1}` +.. |binders_n| replace:: `binders`\ :math:`_{n}` +.. |C_1| replace:: `C`\ :math:`_{1}` +.. |c_1| replace:: `c`\ :math:`_{1}` +.. |C_2| replace:: `C`\ :math:`_{2}` +.. |c_i| replace:: `c`\ :math:`_{i}` +.. |c_n| replace:: `c`\ :math:`_{n}` +.. |Cic| replace:: :smallcaps:`Cic` +.. |class_1| replace:: `class`\ :math:`_{1}` +.. |class_2| replace:: `class`\ :math:`_{2}` +.. |Coq| replace:: :smallcaps:`Coq` +.. |CoqIDE| replace:: :smallcaps:`CoqIDE` +.. |eq_beta_delta_iota_zeta| replace:: `=`\ :math:`_{\small{\beta\delta\iota\zeta}}` +.. |Gallina| replace:: :smallcaps:`Gallina` +.. |ident_0| replace:: `ident`\ :math:`_{0}` +.. |ident_1,1| replace:: `ident`\ :math:`_{1,1}` +.. |ident_1,k_1| replace:: `ident`\ :math:`_{1,k_1}`) +.. |ident_1| replace:: `ident`\ :math:`_{1}` +.. |ident_2| replace:: `ident`\ :math:`_{2}` +.. |ident_3| replace:: `ident`\ :math:`_{3}` +.. |ident_i| replace:: `ident`\ :math:`_{i}` +.. |ident_j| replace:: `ident`\ :math:`_{j}` +.. |ident_k| replace:: `ident`\ :math:`_{k}` +.. |ident_n,1| replace:: `ident`\ :math:`_{n,1}` +.. |ident_n,k_n| replace:: `ident`\ :math:`_{n,k_n}` +.. |ident_n| replace:: `ident`\ :math:`_{n}` +.. |L_tac| replace:: `L`:sub:`tac` +.. |ML| replace:: :smallcaps:`ML` +.. |mod_0| replace:: `mod`\ :math:`_{0}` +.. |mod_1| replace:: `mod`\ :math:`_{1}` +.. |mod_2| replace:: `mod`\ :math:`_{1}` +.. |mod_n| replace:: `mod`\ :math:`_{n}` +.. |module_0| replace:: `module`\ :math:`_{0}` +.. |module_1| replace:: `module`\ :math:`_{1}` +.. |module_expression_0| replace:: `module_expression`\ :math:`_{0}` +.. |module_expression_1| replace:: `module_expression`\ :math:`_{1}` +.. |module_expression_i| replace:: `module_expression`\ :math:`_{i}` +.. |module_expression_n| replace:: `module_expression`\ :math:`_{n}` +.. |module_n| replace:: `module`\ :math:`_{n}` +.. |module_type_0| replace:: `module_type`\ :math:`_{0}` +.. |module_type_1| replace:: `module_type`\ :math:`_{1}` +.. |module_type_i| replace:: `module_type`\ :math:`_{i}` +.. |module_type_n| replace:: `module_type`\ :math:`_{n}` +.. |N| replace:: ``N`` +.. |nat| replace:: ``nat`` +.. |Ocaml| replace:: :smallcaps:`OCaml` +.. |p_1| replace:: `p`\ :math:`_{1}` +.. |p_i| replace:: `p`\ :math:`_{i}` +.. |p_n| replace:: `p`\ :math:`_{n}` +.. |Program| replace:: :strong:`Program` +.. |SSR| replace:: :smallcaps:`SSReflect` +.. |t_1| replace:: `t`\ :math:`_{1}` +.. |t_i| replace:: `t`\ :math:`_{i}` +.. |t_m| replace:: `t`\ :math:`_{m}` +.. |t_n| replace:: `t`\ :math:`_{n}` +.. |term_0| replace:: `term`\ :math:`_{0}` +.. |term_1| replace:: `term`\ :math:`_{1}` +.. |term_2| replace:: `term`\ :math:`_{2}` +.. |term_n| replace:: `term`\ :math:`_{n}` +.. |type_0| replace:: `type`\ :math:`_{0}` +.. |type_1| replace:: `type`\ :math:`_{1}` +.. |type_2| replace:: `type`\ :math:`_{2}` +.. |type_3| replace:: `type`\ :math:`_{3}` +.. |type_n| replace:: `type`\ :math:`_{n}` +.. |x_1| replace:: `x`\ :math:`_{1}` +.. |x_i| replace:: `x`\ :math:`_{i}` +.. |x_n| replace:: `x`\ :math:`_{n}` +.. |Z| replace:: ``Z`` diff --git a/doc/sphinx/user-extensions/proof-schemes.rst b/doc/sphinx/user-extensions/proof-schemes.rst new file mode 100644 index 0000000000..583b73e53d --- /dev/null +++ b/doc/sphinx/user-extensions/proof-schemes.rst @@ -0,0 +1,374 @@ +.. _proofschemes: + +Proof schemes +=============== + +Generation of induction principles with ``Scheme`` +-------------------------------------------------------- + +The ``Scheme`` command is a high-level tool for generating automatically +(possibly mutual) induction principles for given types and sorts. Its +syntax follows the schema: + +.. cmd:: Scheme ident := Induction for ident' Sort sort {* with ident := Induction for ident' Sort sort} + +where each `ident'ᵢ` is a different inductive type identifier +belonging to the same package of mutual inductive definitions. This +command generates the `identᵢ`s to be mutually recursive +definitions. Each term `identᵢ` proves a general principle of mutual +induction for objects in type `identᵢ`. + +.. cmdv:: Scheme ident := Minimality for ident' Sort sort {* with ident := Minimality for ident' Sort sort} + + Same as before but defines a non-dependent elimination principle more + natural in case of inductively defined relations. + +.. cmdv:: Scheme Equality for ident + + Tries to generate a Boolean equality and a proof of the decidability of the usual equality. If `ident` + involves some other inductive types, their equality has to be defined first. + +.. cmdv:: Scheme Induction for ident Sort sort {* with Induction for ident Sort sort} + + If you do not provide the name of the schemes, they will be automatically computed from the + sorts involved (works also with Minimality). + +.. example:: + + Induction scheme for tree and forest. + + The definition of principle of mutual induction for tree and forest + over the sort Set is defined by the command: + + .. coqtop:: none + + Axiom A : Set. + Axiom B : Set. + + .. coqtop:: all + + Inductive tree : Set := node : A -> forest -> tree + with forest : Set := + leaf : B -> forest + | cons : tree -> forest -> forest. + + Scheme tree_forest_rec := Induction for tree Sort Set + with forest_tree_rec := Induction for forest Sort Set. + + You may now look at the type of tree_forest_rec: + + .. coqtop:: all + + Check tree_forest_rec. + + This principle involves two different predicates for trees andforests; + it also has three premises each one corresponding to a constructor of + one of the inductive definitions. + + The principle `forest_tree_rec` shares exactly the same premises, only + the conclusion now refers to the property of forests. + +.. example:: + + Predicates odd and even on naturals. + + Let odd and even be inductively defined as: + + .. coqtop:: all + + Inductive odd : nat -> Prop := oddS : forall n:nat, even n -> odd (S n) + with even : nat -> Prop := + | evenO : even 0 + | evenS : forall n:nat, odd n -> even (S n). + + The following command generates a powerful elimination principle: + + .. coqtop:: all + + Scheme odd_even := Minimality for odd Sort Prop + with even_odd := Minimality for even Sort Prop. + + The type of odd_even for instance will be: + + .. coqtop:: all + + Check odd_even. + + The type of `even_odd` shares the same premises but the conclusion is + `(n:nat)(even n)->(P0 n)`. + + +Automatic declaration of schemes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is possible to deactivate the automatic declaration of the +induction principles when defining a new inductive type with the +``Unset Elimination Schemes`` command. It may be reactivated at any time with +``Set Elimination Schemes``. + +The types declared with the keywords ``Variant`` (see :ref:`TODO-1.3.3`) and ``Record`` +(see :ref:`Record Types <record-types>`) do not have an automatic declaration of the induction +principles. It can be activated with the command +``Set Nonrecursive Elimination Schemes``. It can be deactivated again with +``Unset Nonrecursive Elimination Schemes``. + +In addition, the ``Case Analysis Schemes`` flag governs the generation of +case analysis lemmas for inductive types, i.e. corresponding to the +pattern-matching term alone and without fixpoint. +You can also activate the automatic declaration of those Boolean +equalities (see the second variant of ``Scheme``) with respectively the +commands ``Set Boolean Equality Schemes`` and ``Set Decidable Equality +Schemes``. However you have to be careful with this option since Coq may +now reject well-defined inductive types because it cannot compute a +Boolean equality for them. + +.. opt:: Rewriting Schemes + + This flag governs generation of equality-related schemes such as congruence. + +Combined Scheme +~~~~~~~~~~~~~~~~~~~~~~ + +The ``Combined Scheme`` command is a tool for combining induction +principles generated by the ``Scheme command``. Its syntax follows the +schema : + +.. cmd:: Combined Scheme ident from {+, ident} + +where each identᵢ after the ``from`` is a different inductive principle that must +belong to the same package of mutual inductive principle definitions. +This command generates the leftmost `ident` to be the conjunction of the +principles: it is built from the common premises of the principles and +concluded by the conjunction of their conclusions. + +.. example:: + + We can define the induction principles for trees and forests using: + + .. coqtop:: all + + Scheme tree_forest_ind := Induction for tree Sort Prop + with forest_tree_ind := Induction for forest Sort Prop. + + Then we can build the combined induction principle which gives the + conjunction of the conclusions of each individual principle: + + .. coqtop:: all + + Combined Scheme tree_forest_mutind from tree_forest_ind,forest_tree_ind. + + The type of tree_forest_mutrec will be: + + .. coqtop:: all + + Check tree_forest_mutind. + +Generation of induction principles with ``Functional`` ``Scheme`` +----------------------------------------------------------------- + +The ``Functional Scheme`` command is a high-level experimental tool for +generating automatically induction principles corresponding to +(possibly mutually recursive) functions. First, it must be made +available via ``Require Import FunInd``. Its syntax then follows the +schema: + +.. cmd:: Functional Scheme ident := Induction for ident' Sort sort {* with ident := Induction for ident' Sort sort} + +where each `ident'ᵢ` is a different mutually defined function +name (the names must be in the same order as when they were defined). This +command generates the induction principle for each `identᵢ`, following +the recursive structure and case analyses of the corresponding function +identᵢ’. + +Remark: There is a difference between obtaining an induction scheme by +using ``Functional Scheme`` on a function defined by ``Function`` or not. +Indeed, ``Function`` generally produces smaller principles, closer to the +definition written by the user. + +.. example:: + + Induction scheme for div2. + + We define the function div2 as follows: + + .. coqtop:: all + + Require Import FunInd. + Require Import Arith. + + Fixpoint div2 (n:nat) : nat := + match n with + | O => 0 + | S O => 0 + | S (S n') => S (div2 n') + end. + + The definition of a principle of induction corresponding to the + recursive structure of `div2` is defined by the command: + + .. coqtop:: all + + Functional Scheme div2_ind := Induction for div2 Sort Prop. + + You may now look at the type of div2_ind: + + .. coqtop:: all + + Check div2_ind. + + We can now prove the following lemma using this principle: + + .. coqtop:: all + + Lemma div2_le' : forall n:nat, div2 n <= n. + intro n. + pattern n, (div2 n). + apply div2_ind; intros. + auto with arith. + auto with arith. + simpl; auto with arith. + Qed. + + We can use directly the functional induction (:ref:`TODO-8.5.5`) tactic instead + of the pattern/apply trick: + + .. coqtop:: all + + Reset div2_le'. + + Lemma div2_le : forall n:nat, div2 n <= n. + intro n. + functional induction (div2 n). + auto with arith. + auto with arith. + auto with arith. + Qed. + + Remark: There is a difference between obtaining an induction scheme + for a function by using ``Function`` (see :ref:`advanced-recursive-functions`) and by using + ``Functional Scheme`` after a normal definition using ``Fixpoint`` or + ``Definition``. See :ref:`advanced-recursive-functions` for details. + +.. example:: + + Induction scheme for tree_size. + + We define trees by the following mutual inductive type: + + .. original LaTeX had "Variable" instead of "Axiom", which generates an ugly warning + + .. coqtop:: reset all + + Axiom A : Set. + + Inductive tree : Set := + node : A -> forest -> tree + with forest : Set := + | empty : forest + | cons : tree -> forest -> forest. + + We define the function tree_size that computes the size of a tree or a + forest. Note that we use ``Function`` which generally produces better + principles. + + .. coqtop:: all + + Require Import FunInd. + + Function tree_size (t:tree) : nat := + match t with + | node A f => S (forest_size f) + end + with forest_size (f:forest) : nat := + match f with + | empty => 0 + | cons t f' => (tree_size t + forest_size f') + end. + + Remark: Function generates itself non mutual induction principles + tree_size_ind and forest_size_ind: + + .. coqtop:: all + + Check tree_size_ind. + + The definition of mutual induction principles following the recursive + structure of `tree_size` and `forest_size` is defined by the command: + + .. coqtop:: all + + Functional Scheme tree_size_ind2 := Induction for tree_size Sort Prop + with forest_size_ind2 := Induction for forest_size Sort Prop. + + You may now look at the type of `tree_size_ind2`: + + .. coqtop:: all + + Check tree_size_ind2. + +Generation of inversion principles with ``Derive`` ``Inversion`` +----------------------------------------------------------------- + +The syntax of ``Derive`` ``Inversion`` follows the schema: + +.. cmd:: Derive Inversion ident with forall (x : T), I t Sort sort + +This command generates an inversion principle for the `inversion … using` +tactic. Let `I` be an inductive predicate and `x` the variables occurring +in t. This command generates and stocks the inversion lemma for the +sort `sort` corresponding to the instance `∀ (x:T), I t` with the name +`ident` in the global environment. When applied, it is equivalent to +having inverted the instance with the tactic `inversion`. + +.. cmdv:: Derive Inversion_clear ident with forall (x:T), I t Sort sort + + When applied, it is equivalent to having inverted the instance with the + tactic inversion replaced by the tactic `inversion_clear`. + +.. cmdv:: Derive Dependent Inversion ident with forall (x:T), I t Sort sort + + When applied, it is equivalent to having inverted the instance with + the tactic `dependent inversion`. + +.. cmdv:: Derive Dependent Inversion_clear ident with forall(x:T), I t Sort sort + + When applied, it is equivalent to having inverted the instance + with the tactic `dependent inversion_clear`. + +.. example:: + + Let us consider the relation `Le` over natural numbers and the following + variable: + + .. original LaTeX had "Variable" instead of "Axiom", which generates an ugly warning + + .. coqtop:: all + + Inductive Le : nat -> nat -> Set := + | LeO : forall n:nat, Le 0 n + | LeS : forall n m:nat, Le n m -> Le (S n) (S m). + + Axiom P : nat -> nat -> Prop. + + To generate the inversion lemma for the instance `(Le (S n) m)` and the + sort `Prop`, we do: + + .. coqtop:: all + + Derive Inversion_clear leminv with (forall n m:nat, Le (S n) m) Sort Prop. + Check leminv. + + Then we can use the proven inversion lemma: + + .. the original LaTeX did not have any Coq code to setup the goal + + .. coqtop:: none + + Goal forall (n m : nat) (H : Le (S n) m), P n m. + intros. + + .. coqtop:: all + + Show. + + inversion H using leminv. diff --git a/doc/sphinx/user-extensions/syntax-extensions.rst b/doc/sphinx/user-extensions/syntax-extensions.rst new file mode 100644 index 0000000000..6e6d664475 --- /dev/null +++ b/doc/sphinx/user-extensions/syntax-extensions.rst @@ -0,0 +1,1323 @@ +.. include:: ../replaces.rst + +.. _syntaxextensionsandinterpretationscopes: + +Syntax extensions and interpretation scopes +======================================================== + +In this chapter, we introduce advanced commands to modify the way Coq +parses and prints objects, i.e. the translations between the concrete +and internal representations of terms and commands. + +The main commands to provide custom symbolic notations for terms are +``Notation`` and ``Infix``. They are described in section 12.1. There is also a +variant of ``Notation`` which does not modify the parser. This provides with a +form of abbreviation and it is described in Section :ref:`Abbreviations`. It is +sometimes expected that the same symbolic notation has different meanings in +different contexts. To achieve this form of overloading, |Coq| offers a notion +of interpretation scope. This is described in Section :ref:`scopes`. + +The main command to provide custom notations for tactics is ``Tactic Notation``. +It is described in Section :ref:`TacticNotation`. + +.. coqtop:: none + + Set Printing Depth 50. + +Notations +--------- + +Basic notations +~~~~~~~~~~~~~~~ + +A *notation* is a symbolic expression denoting some term or term +pattern. + +A typical notation is the use of the infix symbol ``/\`` to denote the +logical conjunction (and). Such a notation is declared by + +.. coqtop:: in + + Notation "A /\ B" := (and A B). + +The expression :g:`(and A B)` is the abbreviated term and the string ``"A /\ B"`` +(called a *notation*) tells how it is symbolically written. + +A notation is always surrounded by double quotes (except when the +abbreviation has the form of an ordinary applicative expression; +see :ref:`Abbreviations`). The notation is composed of *tokens* separated by +spaces. Identifiers in the string (such as ``A`` and ``B``) are the *parameters* +of the notation. They must occur at least once each in the denoted term. The +other elements of the string (such as ``/\``) are the *symbols*. + +An identifier can be used as a symbol but it must be surrounded by +simple quotes to avoid the confusion with a parameter. Similarly, +every symbol of at least 3 characters and starting with a simple quote +must be quoted (then it starts by two single quotes). Here is an +example. + +.. coqtop:: in + + Notation "'IF' c1 'then' c2 'else' c3" := (IF_then_else c1 c2 c3). + +A notation binds a syntactic expression to a term. Unless the parser +and pretty-printer of Coq already know how to deal with the syntactic +expression (see 12.1.7), explicit precedences and associativity rules +have to be given. + +.. note:: + + The right-hand side of a notation is interpreted at the time the notation is + given. In particular, disambiguiation of constants, implicit arguments (see + Section :ref:`ImplicitArguments`), coercions (see Section :ref:`Coercions`), + etc. are resolved at the time of the declaration of the notation. + +Precedences and associativity +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Mixing different symbolic notations in the same text may cause serious +parsing ambiguity. To deal with the ambiguity of notations, Coq uses +precedence levels ranging from 0 to 100 (plus one extra level numbered +200) and associativity rules. + +Consider for example the new notation + +.. coqtop:: in + + Notation "A \/ B" := (or A B). + +Clearly, an expression such as :g:`forall A:Prop, True /\ A \/ A \/ False` +is ambiguous. To tell the Coq parser how to interpret the +expression, a priority between the symbols ``/\`` and ``\/`` has to be +given. Assume for instance that we want conjunction to bind more than +disjunction. This is expressed by assigning a precedence level to each +notation, knowing that a lower level binds more than a higher level. +Hence the level for disjunction must be higher than the level for +conjunction. + +Since connectives are not tight articulation points of a text, it +is reasonable to choose levels not so far from the highest level which +is 100, for example 85 for disjunction and 80 for conjunction [#and_or_levels]_. + +Similarly, an associativity is needed to decide whether :g:`True /\ False /\ False` +defaults to :g:`True /\ (False /\ False)` (right associativity) or to +:g:`(True /\ False) /\ False` (left associativity). We may even consider that the +expression is not well- formed and that parentheses are mandatory (this is a “no +associativity”) [#no_associativity]_. We do not know of a special convention of +the associativity of disjunction and conjunction, so let us apply for instance a +right associativity (which is the choice of Coq). + +Precedence levels and associativity rules of notations have to be +given between parentheses in a list of modifiers that the ``Notation`` +command understands. Here is how the previous examples refine. + +.. coqtop:: in + + Notation "A /\ B" := (and A B) (at level 80, right associativity). + Notation "A \/ B" := (or A B) (at level 85, right associativity). + +By default, a notation is considered non associative, but the +precedence level is mandatory (except for special cases whose level is +canonical). The level is either a number or the phrase `next level` +whose meaning is obvious. The list of levels already assigned is on +Figure 3.1. + +.. TODO I don't find it obvious -- CPC + +Complex notations +~~~~~~~~~~~~~~~~~ + +Notations can be made from arbitrarily complex symbols. One can for +instance define prefix notations. + +.. coqtop:: in + + Notation "~ x" := (not x) (at level 75, right associativity). + +One can also define notations for incomplete terms, with the hole +expected to be inferred at typing time. + +.. coqtop:: in + + Notation "x = y" := (@eq _ x y) (at level 70, no associativity). + +One can define *closed* notations whose both sides are symbols. In this case, +the default precedence level for the inner subexpression is 200, and the default +level for the notation itself is 0. + +.. coqtop:: in + + Notation "( x , y )" := (@pair _ _ x y). + +One can also define notations for binders. + +.. coqtop:: in + + Notation "{ x : A | P }" := (sig A (fun x => P)). + +In the last case though, there is a conflict with the notation for +type casts. The notation for types casts, as shown by the command :cmd:`Print +Grammar constr` is at level 100. To avoid ``x : A`` being parsed as a type cast, +it is necessary to put x at a level below 100, typically 99. Hence, a correct +definition is the following: + +.. coqtop:: all + + Notation "{ x : A | P }" := (sig A (fun x => P)) (x at level 99). + +More generally, it is required that notations are explicitly factorized on the +left. See the next section for more about factorization. + +Simple factorization rules +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Coq extensible parsing is performed by *Camlp5* which is essentially a LL1 +parser: it decides which notation to parse by looking tokens from left to right. +Hence, some care has to be taken not to hide already existing rules by new +rules. Some simple left factorization work has to be done. Here is an example. + +.. coqtop:: all + + Notation "x < y" := (lt x y) (at level 70). + Notation "x < y < z" := (x < y /\ y < z) (at level 70). + +In order to factorize the left part of the rules, the subexpression +referred by y has to be at the same level in both rules. However the +default behavior puts y at the next level below 70 in the first rule +(no associativity is the default), and at the level 200 in the second +rule (level 200 is the default for inner expressions). To fix this, we +need to force the parsing level of y, as follows. + +.. coqtop:: all + + Notation "x < y" := (lt x y) (at level 70). + Notation "x < y < z" := (x < y /\ y < z) (at level 70, y at next level). + +For the sake of factorization with Coq predefined rules, simple rules +have to be observed for notations starting with a symbol: e.g. rules +starting with “{” or “(” should be put at level 0. The list of Coq +predefined notations can be found in Chapter 3. + +.. cmd:: Print Grammar constr. + + This command displays the current state of the Coq term parser. + +.. cmd:: Print Grammar pattern. + + This displays the state of the subparser of patterns (the parser used in the + grammar of the match with constructions). + + +Displaying symbolic notations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The command ``Notation`` has an effect both on the Coq parser and on the +Coq printer. For example: + +.. coqtop:: all + + Check (and True True). + +However, printing, especially pretty-printing, also requires some +care. We may want specific indentations, line breaks, alignment if on +several lines, etc. For pretty-printing, |Coq| relies on |ocaml| +formatting library, which provides indentation and automatic line +breaks depending on page width by means of *formatting boxes*. + +The default printing of notations is rudimentary. For printing a +notation, a formatting box is opened in such a way that if the +notation and its arguments cannot fit on a single line, a line break +is inserted before the symbols of the notation and the arguments on +the next lines are aligned with the argument on the first line. + +A first, simple control that a user can have on the printing of a +notation is the insertion of spaces at some places of the notation. +This is performed by adding extra spaces between the symbols and +parameters: each extra space (other than the single space needed to +separate the components) is interpreted as a space to be inserted by +the printer. Here is an example showing how to add spaces around the +bar of the notation. + +.. coqtop:: in + + Notation "{{ x : A | P }}" := (sig (fun x : A => P)) (at level 0, x at level 99). + +.. coqtop:: all + + Check (sig (fun x : nat => x=x)). + +The second, more powerful control on printing is by using the format +modifier. Here is an example + +.. coqtop:: all + + Notation "'If' c1 'then' c2 'else' c3" := (IF_then_else c1 c2 c3) + (at level 200, right associativity, format + "'[v ' 'If' c1 '/' '[' 'then' c2 ']' '/' '[' 'else' c3 ']' ']'"). + +.. coqtop:: all + + Check + (IF_then_else (IF_then_else True False True) + (IF_then_else True False True) + (IF_then_else True False True)). + +A *format* is an extension of the string denoting the notation with +the possible following elements delimited by single quotes: + +- extra spaces are translated into simple spaces + +- tokens of the form ``'/ '`` are translated into breaking point, in + case a line break occurs, an indentation of the number of spaces after + the “ ``/``” is applied (2 spaces in the given example) + +- token of the form ``'//'`` force writing on a new line + +- well-bracketed pairs of tokens of the form ``'[ '`` and ``']'`` are + translated into printing boxes; in case a line break occurs, an extra + indentation of the number of spaces given after the “ ``[``” is applied + (4 spaces in the example) + +- well-bracketed pairs of tokens of the form ``'[hv '`` and ``']'`` are + translated into horizontal-orelse-vertical printing boxes; if the + content of the box does not fit on a single line, then every breaking + point forces a newline and an extra indentation of the number of + spaces given after the “ ``[``” is applied at the beginning of each + newline (3 spaces in the example) + +- well-bracketed pairs of tokens of the form ``'[v '`` and ``']'`` are + translated into vertical printing boxes; every breaking point forces a + newline, even if the line is large enough to display the whole content + of the box, and an extra indentation of the number of spaces given + after the “``[``” is applied at the beginning of each newline + +Notations do not survive the end of sections. No typing of the denoted +expression is performed at definition time. Type-checking is done only +at the time of use of the notation. + +.. note:: Sometimes, a notation is expected only for the parser. To do + so, the option ``only parsing`` is allowed in the list of modifiers + of ``Notation``. Conversely, the ``only printing`` modifier can be + used to declare that a notation should only be used for printing and + should not declare a parsing rule. In particular, such notations do + not modify the parser. + +The Infix command +~~~~~~~~~~~~~~~~~~ + +The ``Infix`` command is a shortening for declaring notations of infix +symbols. + +.. cmd:: Infix "@symbol" := @term ({+, @modifier}). + + This command is equivalent to + + :n:`Notation "x @symbol y" := (@term x y) ({+, @modifier}).` + + where ``x`` and ``y`` are fresh names. Here is an example. + + .. coqtop:: in + + Infix "/\" := and (at level 80, right associativity). + +Reserving notations +~~~~~~~~~~~~~~~~~~~ + +A given notation may be used in different contexts. Coq expects all +uses of the notation to be defined at the same precedence and with the +same associativity. To avoid giving the precedence and associativity +every time, it is possible to declare a parsing rule in advance +without giving its interpretation. Here is an example from the initial +state of Coq. + +.. coqtop:: in + + Reserved Notation "x = y" (at level 70, no associativity). + +Reserving a notation is also useful for simultaneously defining an +inductive type or a recursive constant and a notation for it. + +.. note:: The notations mentioned on Figure 3.1 are reserved. Hence + their precedence and associativity cannot be changed. + +Simultaneous definition of terms and notations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Thanks to reserved notations, the inductive, co-inductive, record, recursive +and corecursive definitions can benefit of customized notations. To do +this, insert a ``where`` notation clause after the definition of the +(co)inductive type or (co)recursive term (or after the definition of +each of them in case of mutual definitions). The exact syntax is given +on Figure 12.1 for inductive, co-inductive, recursive and corecursive +definitions and on Figure :ref:`record-syntax` for records. Here are examples: + +.. coqtop:: in + + Inductive and (A B:Prop) : Prop := conj : A -> B -> A /\ B + where "A /\ B" := (and A B). + + Fixpoint plus (n m:nat) {struct n} : nat := + match n with + | O => m + | S p => S (p+m) + end + where "n + m" := (plus n m). + +Displaying informations about notations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. opt:: Printing Notations + + To deactivate the printing of all notations, use the command + ``Unset Printing Notations``. To reactivate it, use the command + ``Set Printing Notations``. + + The default is to use notations for printing terms wherever possible. + +.. seealso:: + + :opt:`Printing All` + To disable other elements in addition to notations. + +Locating notations +~~~~~~~~~~~~~~~~~~ + +.. cmd:: Locate @symbol + + To know to which notations a given symbol belongs to, use the command + ``Locate symbol``, where symbol is any (composite) symbol surrounded by double + quotes. To locate a particular notation, use a string where the variables of the + notation are replaced by “_” and where possible single quotes inserted around + identifiers or tokens starting with a single quote are dropped. + + .. coqtop:: all + + Locate "exists". + Locate "exists _ .. _ , _". + + .. todo:: See also: Section 6.3.10. + +Notations and binders +~~~~~~~~~~~~~~~~~~~~~ + +Notations can include binders. This section lists +different ways to deal with binders. For further examples, see also +Section :ref:`RecursiveNotationsWithBinders`. + +Binders bound in the notation and parsed as identifiers ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Here is the basic example of a notation using a binder: + +.. coqtop:: in + + Notation "'sigma' x : A , B" := (sigT (fun x : A => B)) + (at level 200, x ident, A at level 200, right associativity). + +The binding variables in the right-hand side that occur as a parameter +of the notation (here :g:`x`) dynamically bind all the occurrences +in their respective binding scope after instantiation of the +parameters of the notation. This means that the term bound to :g:`B` can +refer to the variable name bound to :g:`x` as shown in the following +application of the notation: + +.. coqtop:: all + + Check sigma z : nat, z = 0. + +Notice the modifier ``x ident`` in the declaration of the +notation. It tells to parse :g:`x` as a single identifier. + +Binders bound in the notation and parsed as patterns +++++++++++++++++++++++++++++++++++++++++++++++++++++ + +In the same way as patterns can be used as binders, as in +:g:`fun '(x,y) => x+y` or :g:`fun '(existT _ x _) => x`, notations can be +defined so that any pattern (in the sense of the entry :n:`@pattern` of +Figure :ref:`term-syntax-aux`) can be used in place of the +binder. Here is an example: + +.. coqtop:: in reset + + Notation "'subset' ' p , P " := (sig (fun p => P)) + (at level 200, p pattern, format "'subset' ' p , P"). + +.. coqtop:: all + + Check subset '(x,y), x+y=0. + +The modifier ``p pattern`` in the declaration of the notation tells to parse +:g:`p` as a pattern. Note that a single variable is both an identifier and a +pattern, so, e.g., the following also works: + +.. coqtop:: all + + Check subset 'x, x=0. + +If one wants to prevent such a notation to be used for printing when the +pattern is reduced to a single identifier, one has to use instead +the modifier ``p strict pattern``. For parsing, however, a +``strict pattern`` will continue to include the case of a +variable. Here is an example showing the difference: + +.. coqtop:: in + + Notation "'subset_bis' ' p , P" := (sig (fun p => P)) + (at level 200, p strict pattern). + Notation "'subset_bis' p , P " := (sig (fun p => P)) + (at level 200, p ident). + +.. coqtop:: all + + Check subset_bis 'x, x=0. + +The default level for a ``pattern`` is 0. One can use a different level by +using ``pattern at level`` :math:`n` where the scale is the same as the one for +terms (Figure :ref:`init-notations`). + +Binders bound in the notation and parsed as terms ++++++++++++++++++++++++++++++++++++++++++++++++++ + +Sometimes, for the sake of factorization of rules, a binder has to be +parsed as a term. This is typically the case for a notation such as +the following: + +.. coqtop:: in + + Notation "{ x : A | P }" := (sig (fun x : A => P)) + (at level 0, x at level 99 as ident). + +This is so because the grammar also contains rules starting with :g:`{}` and +followed by a term, such as the rule for the notation :g:`{ A } + { B }` for the +constant :g:`sumbool` (see Section :ref:`sumbool`). + +Then, in the rule, ``x ident`` is replaced by ``x at level 99 as ident`` meaning +that ``x`` is parsed as a term at level 99 (as done in the notation for +:g:`sumbool`), but that this term has actually to be an identifier. + +The notation :g:`{ x | P }` is already defined in the standard +library with the ``as ident`` modifier. We cannot redefine it but +one can define an alternative notation, say :g:`{ p such that P }`, +using instead ``as pattern``. + +.. coqtop:: in + + Notation "{ p 'such' 'that' P }" := (sig (fun p => P)) + (at level 0, p at level 99 as pattern). + +Then, the following works: + +.. coqtop:: all + + Check {(x,y) such that x+y=0}. + +To enforce that the pattern should not be used for printing when it +is just an identifier, one could have said +``p at level 99 as strict pattern``. + +Note also that in the absence of a ``as ident``, ``as strict pattern`` or +``as pattern`` modifiers, the default is to consider subexpressions occurring +in binding position and parsed as terms to be ``as ident``. + +.. _NotationsWithBinders: + +Binders not bound in the notation ++++++++++++++++++++++++++++++++++ + +We can also have binders in the right-hand side of a notation which +are not themselves bound in the notation. In this case, the binders +are considered up to renaming of the internal binder. E.g., for the +notation + +.. coqtop:: in + + Notation "'exists_different' n" := (exists p:nat, p<>n) (at level 200). + +the next command fails because p does not bind in the instance of n. + +.. coqtop:: all + + Fail Check (exists_different p). + +.. coqtop:: in + + Notation "[> a , .. , b <]" := + (cons a .. (cons b nil) .., cons b .. (cons a nil) ..). + +.. _RecursiveNotationsWithBinders: + +Notations with recursive patterns +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A mechanism is provided for declaring elementary notations with +recursive patterns. The basic example is: + +.. coqtop:: all + + Notation "[ x ; .. ; y ]" := (cons x .. (cons y nil) ..). + +On the right-hand side, an extra construction of the form ``.. t ..`` can +be used. Notice that ``..`` is part of the Coq syntax and it must not be +confused with the three-dots notation “``…``” used in this manual to denote +a sequence of arbitrary size. + +On the left-hand side, the part “``x s .. s y``” of the notation parses +any number of time (but at least one time) a sequence of expressions +separated by the sequence of tokens ``s`` (in the example, ``s`` is just “``;``”). + +The right-hand side must contain a subterm of the form either +``φ(x, .. φ(y,t) ..)`` or ``φ(y, .. φ(x,t) ..)`` where :math:`φ([~]_E , [~]_I)`, +called the *iterator* of the recursive notation is an arbitrary expression with +distinguished placeholders and where :math:`t` is called the *terminating +expression* of the recursive notation. In the example, we choose the names +:math:`x` and :math:`y` but in practice they can of course be chosen +arbitrarily. Not atht the placeholder :math:`[~]_I` has to occur only once but +:math:`[~]_E` can occur several times. + +Parsing the notation produces a list of expressions which are used to +fill the first placeholder of the iterating pattern which itself is +repeatedly nested as many times as the length of the list, the second +placeholder being the nesting point. In the innermost occurrence of the +nested iterating pattern, the second placeholder is finally filled with the +terminating expression. + +In the example above, the iterator :math:`φ([~]_E , [~]_I)` is :math:`cons [~]_E [~]_I` +and the terminating expression is ``nil``. Here are other examples: + +.. coqtop:: in + + Notation "( x , y , .. , z )" := (pair .. (pair x y) .. z) (at level 0). + + Notation "[| t * ( x , y , .. , z ) ; ( a , b , .. , c ) * u |]" := + (pair (pair .. (pair (pair t x) (pair t y)) .. (pair t z)) + (pair .. (pair (pair a u) (pair b u)) .. (pair c u))) + (t at level 39). + +Notations with recursive patterns can be reserved like standard +notations, they can also be declared within interpretation scopes (see +section 12.2). + + +Notations with recursive patterns involving binders +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Recursive notations can also be used with binders. The basic example +is: + +.. coqtop:: all + + Notation "'exists' x .. y , p" := + (ex (fun x => .. (ex (fun y => p)) ..)) + (at level 200, x binder, y binder, right associativity). + +The principle is the same as in Section 12.1.12 except that in the iterator +:math:`φ([~]_E , [~]_I)`, the placeholder :math:`[~]_E` can also occur in +position of the binding variable of a ``fun`` or a ``forall``. + +To specify that the part “``x .. y``” of the notation parses a sequence of +binders, ``x`` and ``y`` must be marked as ``binder`` in the list of modifiers +of the notation. The binders of the parsed sequence are used to fill the +occurrences of the first placeholder of the iterating pattern which is +repeatedly nested as many times as the number of binders generated. If ever the +generalization operator ``'`` (see Section 2.7.19) is used in the binding list, +the added binders are taken into account too. + +Binders parsing exist in two flavors. If ``x`` and ``y`` are marked as binder, +then a sequence such as :g:`a b c : T` will be accepted and interpreted as +the sequence of binders :g:`(a:T) (b:T) (c:T)`. For instance, in the +notation above, the syntax :g:`exists a b : nat, a = b` is valid. + +The variables ``x`` and ``y`` can also be marked as closed binder in which +case only well-bracketed binders of the form :g:`(a b c:T)` or :g:`{a b c:T}` +etc. are accepted. + +With closed binders, the recursive sequence in the left-hand side can +be of the more general form ``x s .. s y`` where ``s`` is an arbitrary sequence of +tokens. With open binders though, ``s`` has to be empty. Here is an +example of recursive notation with closed binders: + +.. coqtop:: in + + Notation "'mylet' f x .. y := t 'in' u":= + (let f := fun x => .. (fun y => t) .. in u) + (at level 200, x closed binder, y closed binder, right associativity). + +A recursive pattern for binders can be used in position of a recursive +pattern for terms. Here is an example: + +.. coqtop:: in + + Notation "'FUNAPP' x .. y , f" := + (fun x => .. (fun y => (.. (f x) ..) y ) ..) + (at level 200, x binder, y binder, right associativity). + +If an occurrence of the :math:`[~]_E` is not in position of a binding +variable but of a term, it is the name used in the binding which is +used. Here is an example: + +.. coqtop:: in + + Notation "'exists_non_null' x .. y , P" := + (ex (fun x => x <> 0 /\ .. (ex (fun y => y <> 0 /\ P)) ..)) + (at level 200, x binder). + +Predefined entries +~~~~~~~~~~~~~~~~~~ + +By default, sub-expressions are parsed as terms and the corresponding +grammar entry is called :n:`@constr`. However, one may sometimes want +to restrict the syntax of terms in a notation. For instance, the +following notation will accept to parse only global reference in +position of :g:`x`: + +.. coqtop:: in + + Notation "'apply' f a1 .. an" := (.. (f a1) .. an) + (at level 10, f global, a1, an at level 9). + +In addition to ``global``, one can restrict the syntax of a +sub-expression by using the entry names ``ident`` or ``pattern`` +already seen in Section :ref:`NotationsWithBinders`, even when the +corresponding expression is not used as a binder in the right-hand +side. E.g.: + +.. coqtop:: in + + Notation "'apply_id' f a1 .. an" := (.. (f a1) .. an) + (at level 10, f ident, a1, an at level 9). + +Summary +~~~~~~~ + +Syntax of notations +~~~~~~~~~~~~~~~~~~~ + +The different syntactic variants of the command Notation are given on the +following figure. The optional :token:`scope` is described in the Section 12.2. + +.. productionlist:: coq + notation : [Local] Notation `string` := `term` [`modifiers`] [: `scope`]. + : | [Local] Infix `string` := `qualid` [`modifiers`] [: `scope`]. + : | [Local] Reserved Notation `string` [`modifiers`] . + : | Inductive `ind_body` [`decl_notation`] with … with `ind_body` [`decl_notation`]. + : | CoInductive `ind_body` [`decl_notation`] with … with `ind_body` [`decl_notation`]. + : | Fixpoint `fix_body` [`decl_notation`] with … with `fix_body` [`decl_notation`]. + : | CoFixpoint `cofix_body` [`decl_notation`] with … with `cofix_body` [`decl_notation`]. + decl_notation : [where `string` := `term` [: `scope`] and … and `string` := `term` [: `scope`]]. + modifiers : at level `natural` + : | `ident` , … , `ident` at level `natural` [`binderinterp`] + : | `ident` , … , `ident` at next level [`binderinterp`] + : | `ident` ident + : | `ident` global + : | `ident` bigint + : | `ident` [strict] pattern [at level `natural`] + : | `ident` binder + : | `ident` closed binder + : | left associativity + : | right associativity + : | no associativity + : | only parsing + : | only printing + : | format `string` + binderinterp : as ident + : | as pattern + : | as strict pattern + +.. note:: No typing of the denoted expression is performed at definition + time. Type-checking is done only at the time of use of the notation. + +.. note:: Many examples of Notation may be found in the files composing + the initial state of Coq (see directory :file:`$COQLIB/theories/Init`). + +.. note:: The notation ``"{ x }"`` has a special status in such a way that + complex notations of the form ``"x + { y }"`` or ``"x * { y }"`` can be + nested with correct precedences. Especially, every notation involving + a pattern of the form ``"{ x }"`` is parsed as a notation where the + pattern ``"{ x }"`` has been simply replaced by ``"x"`` and the curly + brackets are parsed separately. E.g. ``"y + { z }"`` is not parsed as a + term of the given form but as a term of the form ``"y + z"`` where ``z`` + has been parsed using the rule parsing ``"{ x }"``. Especially, level + and precedences for a rule including patterns of the form ``"{ x }"`` + are relative not to the textual notation but to the notation where the + curly brackets have been removed (e.g. the level and the associativity + given to some notation, say ``"{ y } & { z }"`` in fact applies to the + underlying ``"{ x }"``\-free rule which is ``"y & z"``). + +Persistence of notations +~~~~~~~~~~~~~~~~~~~~~~~~ + +Notations do not survive the end of sections. + +.. cmd:: Local Notation @notation + + Notations survive modules unless the command ``Local Notation`` is used instead + of ``Notation``. + +Interpretation scopes +---------------------- + +An *interpretation scope* is a set of notations for terms with their +interpretation. Interpretation scopes provide a weak, purely +syntactical form of notations overloading: the same notation, for +instance the infix symbol ``+`` can be used to denote distinct +definitions of the additive operator. Depending on which interpretation +scopes is currently open, the interpretation is different. +Interpretation scopes can include an interpretation for numerals and +strings. However, this is only made possible at the Objective Caml +level. + +See Figure 12.1 for the syntax of notations including the possibility +to declare them in a given scope. Here is a typical example which +declares the notation for conjunction in the scope ``type_scope``. + +.. coqdoc:: + + Notation "A /\ B" := (and A B) : type_scope. + +.. note:: A notation not defined in a scope is called a *lonely* + notation. + +Global interpretation rules for notations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At any time, the interpretation of a notation for term is done within +a *stack* of interpretation scopes and lonely notations. In case a +notation has several interpretations, the actual interpretation is the +one defined by (or in) the more recently declared (or open) lonely +notation (or interpretation scope) which defines this notation. +Typically if a given notation is defined in some scope ``scope`` but has +also an interpretation not assigned to a scope, then, if ``scope`` is open +before the lonely interpretation is declared, then the lonely +interpretation is used (and this is the case even if the +interpretation of the notation in scope is given after the lonely +interpretation: otherwise said, only the order of lonely +interpretations and opening of scopes matters, and not the declaration +of interpretations within a scope). + +The initial state of Coq declares three interpretation scopes and no +lonely notations. These scopes, in opening order, are ``core_scope``, +``type_scope`` and ``nat_scope``. + +.. cmd:: Open Scope @scope + + The command to add a scope to the interpretation scope stack is + :n:`Open Scope @scope`. + +.. cmd:: Close Scope @scope + + It is also possible to remove a scope from the interpretation scope + stack by using the command :n:`Close Scope @scope`. + + Notice that this command does not only cancel the last :n:`Open Scope @scope` + but all the invocations of it. + +.. note:: ``Open Scope`` and ``Close Scope`` do not survive the end of sections + where they occur. When defined outside of a section, they are exported + to the modules that import the module where they occur. + +.. cmd:: Local Open Scope @scope. + Local Close Scope @scope. + + These variants are not exported to the modules that import the module where + they occur, even if outside a section. + +.. cmd:: Global Open Scope @scope. + Global Close Scope @scope. + + These variants survive sections. They behave as if Global were absent when + not inside a section. + +Local interpretation rules for notations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In addition to the global rules of interpretation of notations, some +ways to change the interpretation of subterms are available. + +Local opening of an interpretation scope ++++++++++++++++++++++++++++++++++++++++++ + +It is possible to locally extend the interpretation scope stack using the syntax +:g:`(term)%key` (or simply :g:`term%key` for atomic terms), where key is a +special identifier called *delimiting key* and bound to a given scope. + +In such a situation, the term term, and all its subterms, are +interpreted in the scope stack extended with the scope bound tokey. + +.. cmd:: Delimit Scope @scope with @ident + + To bind a delimiting key to a scope, use the command + :n:`Delimit Scope @scope with @ident` + +.. cmd:: Undelimit Scope @scope + + To remove a delimiting key of a scope, use the command + :n:`Undelimit Scope @scope` + +Binding arguments of a constant to an interpretation scope ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. cmd:: Arguments @qualid {+ @name%@scope} + + It is possible to set in advance that some arguments of a given constant have + to be interpreted in a given scope. The command is + :n:`Arguments @qualid {+ @name%@scope}` where the list is a prefix of the + arguments of ``qualid`` eventually annotated with their ``scope``. Grouping + round parentheses can be used to decorate multiple arguments with the same + scope. ``scope`` can be either a scope name or its delimiting key. For + example the following command puts the first two arguments of :g:`plus_fct` + in the scope delimited by the key ``F`` (``Rfun_scope``) and the last + argument in the scope delimited by the key ``R`` (``R_scope``). + + .. coqtop:: in + + Arguments plus_fct (f1 f2)%F x%R. + + The ``Arguments`` command accepts scopes decoration to all grouping + parentheses. In the following example arguments A and B are marked as + maximally inserted implicit arguments and are put into the type_scope scope. + + .. coqtop:: in + + Arguments respectful {A B}%type (R R')%signature _ _. + + When interpreting a term, if some of the arguments of qualid are built + from a notation, then this notation is interpreted in the scope stack + extended by the scope bound (if any) to this argument. The effect of + the scope is limited to the argument itself. It does not propagate to + subterms but the subterms that, after interpretation of the notation, + turn to be themselves arguments of a reference are interpreted + accordingly to the arguments scopes bound to this reference. + +.. cmdv:: Arguments @qualid : clear scopes + + Arguments scopes can be cleared with :n:`Arguments @qualid : clear scopes`. + +.. cmdv:: Arguments @qualid {+ @name%scope} : extra scopes + + Defines extra argument scopes, to be used in case of coercion to Funclass + (see Chapter :ref:`Coercions-full`) or with a computed type. + +.. cmdv:: Global Arguments @qualid {+ @name%@scope} + + This behaves like :n:`Arguments qualid {+ @name%@scope}` but survives when a + section is closed instead of stopping working at section closing. Without the + ``Global`` modifier, the effect of the command stops when the section it belongs + to ends. + +.. cmdv:: Local Arguments @qualid {+ @name%@scope} + + This behaves like :n:`Arguments @qualid {+ @name%@scope}` but does not + survive modules and files. Without the ``Local`` modifier, the effect of the + command is visible from within other modules or files. + +.. seealso:: + + :cmd:`About @qualid` + The command to show the scopes bound to the arguments of a + function is described in Section 2. + +.. note:: + + In notations, the subterms matching the identifiers of the + notations are interpreted in the scope in which the identifiers + occurred at the time of the declaration of the notation. Here is an + example: + + .. coqtop:: all + + Parameter g : bool -> bool. + Notation "@@" := true (only parsing) : bool_scope. + Notation "@@" := false (only parsing): mybool_scope. + + Bind Scope bool_scope with bool. + Notation "# x #" := (g x) (at level 40). + Check # @@ #. + Arguments g _%mybool_scope. + Check # @@ #. + Delimit Scope mybool_scope with mybool. + Check # @@%mybool #. + +Binding types of arguments to an interpretation scope ++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. cmd:: Bind Scope @scope with @qualid + + When an interpretation scope is naturally associated to a type (e.g. the + scope of operations on the natural numbers), it may be convenient to bind it + to this type. When a scope ``scope`` is bound to a type type, any new function + defined later on gets its arguments of type type interpreted by default in + scope scope (this default behavior can however be overwritten by explicitly + using the command ``Arguments``). + + Whether the argument of a function has some type ``type`` is determined + statically. For instance, if f is a polymorphic function of type :g:`forall + X:Type, X -> X` and type :g:`t` is bound to a scope ``scope``, then :g:`a` of + type :g:`t` in :g:`f t a` is not recognized as an argument to be interpreted + in scope ``scope``. + + More generally, any coercion :n:`@class` (see Chapter :ref:`Coercions-full`) + can be bound to an interpretation scope. The command to do it is + :n:`Bind Scope @scope with @class` + + .. coqtop:: in + + Parameter U : Set. + Bind Scope U_scope with U. + Parameter Uplus : U -> U -> U. + Parameter P : forall T:Set, T -> U -> Prop. + Parameter f : forall T:Set, T -> U. + Infix "+" := Uplus : U_scope. + Unset Printing Notations. + Open Scope nat_scope. + + .. coqtop:: all + + Check (fun x y1 y2 z t => P _ (x + t) ((f _ (y1 + y2) + z))). + + .. note:: The scopes ``type_scope`` and ``function_scope`` also have a local + effect on interpretation. See the next section. + +.. seealso:: + + :cmd:`About` + The command to show the scopes bound to the arguments of a + function is described in Section 2. + +The ``type_scope`` interpretation scope +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. index:: type_scope + +The scope ``type_scope`` has a special status. It is a primitive interpretation +scope which is temporarily activated each time a subterm of an expression is +expected to be a type. It is delimited by the key ``type``, and bound to the +coercion class ``Sortclass``. It is also used in certain situations where an +expression is statically known to be a type, including the conclusion and the +type of hypotheses within an Ltac goal match (see Section +:ref:`ltac-match-goal`), the statement of a theorem, the type of a definition, +the type of a binder, the domain and codomain of implication, the codomain of +products, and more generally any type argument of a declared or defined +constant. + +The ``function_scope`` interpretation scope +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. index:: function_scope + +The scope ``function_scope`` also has a special status. +It is temporarily activated each time the argument of a global reference is +recognized to be a ``Funclass`` istance, i.e., of type :g:`forall x:A, B` or +:g:`A -> B`. + + +Interpretation scopes used in the standard library of Coq +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We give an overview of the scopes used in the standard library of Coq. +For a complete list of notations in each scope, use the commands Print +Scopes or Print Scope scope. + +``type_scope`` + This scope includes infix * for product types and infix + for sum types. It + is delimited by key ``type``, and bound to the coercion class + ``Sortclass``, as described above. + +``function_scope`` + This scope is delimited by key ``function``, and bound to the coercion class + ``Funclass``, as described above. + +``nat_scope`` + This scope includes the standard arithmetical operators and relations on type + nat. Positive numerals in this scope are mapped to their canonical + representent built from :g:`O` and :g:`S`. The scope is delimited by key + ``nat``, and bound to the type :g:`nat` (see above). + +``N_scope`` + This scope includes the standard arithmetical operators and relations on + type :g:`N` (binary natural numbers). It is delimited by key ``N`` and comes + with an interpretation for numerals as closed terms of type :g:`N`. + +``Z_scope`` + This scope includes the standard arithmetical operators and relations on + type :g:`Z` (binary integer numbers). It is delimited by key ``Z`` and comes + with an interpretation for numerals as closed term of type :g:`Z`. + +``positive_scope`` + This scope includes the standard arithmetical operators and relations on + type :g:`positive` (binary strictly positive numbers). It is delimited by + key ``positive`` and comes with an interpretation for numerals as closed + term of type :g:`positive`. + +``Q_scope`` + This scope includes the standard arithmetical operators and relations on + type :g:`Q` (rational numbers defined as fractions of an integer and a + strictly positive integer modulo the equality of the numerator- + denominator cross-product). As for numerals, only 0 and 1 have an + interpretation in scope ``Q_scope`` (their interpretations are 0/1 and 1/1 + respectively). + +``Qc_scope`` + This scope includes the standard arithmetical operators and relations on the + type :g:`Qc` of rational numbers defined as the type of irreducible + fractions of an integer and a strictly positive integer. + +``real_scope`` + This scope includes the standard arithmetical operators and relations on + type :g:`R` (axiomatic real numbers). It is delimited by key ``R`` and comes + with an interpretation for numerals using the :g:`IZR` morphism from binary + integer numbers to :g:`R`. + +``bool_scope`` + This scope includes notations for the boolean operators. It is delimited by + key ``bool``, and bound to the type :g:`bool` (see above). + +``list_scope`` + This scope includes notations for the list operators. It is delimited by key + ``list``, and bound to the type :g:`list` (see above). + +``core_scope`` + This scope includes the notation for pairs. It is delimited by key ``core``. + +``string_scope`` + This scope includes notation for strings as elements of the type string. + Special characters and escaping follow Coq conventions on strings (see + Section 1.1). Especially, there is no convention to visualize non + printable characters of a string. The file :file:`String.v` shows an example + that contains quotes, a newline and a beep (i.e. the ASCII character + of code 7). + +``char_scope`` + This scope includes interpretation for all strings of the form ``"c"`` + where :g:`c` is an ASCII character, or of the form ``"nnn"`` where nnn is + a three-digits number (possibly with leading 0's), or of the form + ``""""``. Their respective denotations are the ASCII code of c, the + decimal ASCII code nnn, or the ascii code of the character ``"`` (i.e. + the ASCII code 34), all of them being represented in the type :g:`ascii`. + + +Displaying informations about scopes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. cmd:: Print Visibility + + This displays the current stack of notations in scopes and lonely + notations that is used to interpret a notation. The top of the stack + is displayed last. Notations in scopes whose interpretation is hidden + by the same notation in a more recently open scope are not displayed. + Hence each notation is displayed only once. + +.. cmdv:: Print Visibility scope + + This displays the current stack of notations in scopes and lonely + notations assuming that scope is pushed on top of the stack. This is + useful to know how a subterm locally occurring in the scope ofscope is + interpreted. + +.. cmdv:: Print Scope scope + + This displays all the notations defined in interpretation scopescope. + It also displays the delimiting key if any and the class to which the + scope is bound, if any. + +.. cmdv:: Print Scopes + + This displays all the notations, delimiting keys and corresponding + class of all the existing interpretation scopes. It also displays the + lonely notations. + +Abbreviations +-------------- + +.. cmd:: {? Local} Notation @ident {+ @ident} := @term {? (only parsing)}. + + An *abbreviation* is a name, possibly applied to arguments, that + denotes a (presumably) more complex expression. Here are examples: + + .. coqtop:: none + + Require Import List. + Require Import Relations. + Set Printing Notations. + + .. coqtop:: in + + Notation Nlist := (list nat). + + .. coqtop:: all + + Check 1 :: 2 :: 3 :: nil. + + .. coqtop:: in + + Notation reflexive R := (forall x, R x x). + + .. coqtop:: all + + Check forall A:Prop, A <-> A. + Check reflexive iff. + + An abbreviation expects no precedence nor associativity, since it + is parsed as an usual application. Abbreviations are used as + much as possible by the Coq printers unless the modifier ``(only + parsing)`` is given. + + Abbreviations are bound to an absolute name as an ordinary definition + is, and they can be referred by qualified names too. + + Abbreviations are syntactic in the sense that they are bound to + expressions which are not typed at the time of the definition of the + abbreviation but at the time it is used. Especially, abbreviations can + be bound to terms with holes (i.e. with “``_``”). For example: + + .. coqtop:: none reset + + Set Strict Implicit. + Set Printing Depth 50. + + .. coqtop:: in + + Definition explicit_id (A:Set) (a:A) := a. + Notation id := (explicit_id _). + + .. coqtop:: all + + Check (id 0). + + Abbreviations do not survive the end of sections. No typing of the + denoted expression is performed at definition time. Type-checking is + done only at the time of use of the abbreviation. + +Tactic Notations +----------------- + +Tactic notations allow to customize the syntax of the tactics of the +tactic language. Tactic notations obey the following syntax: + +.. productionlist:: coq + tacn : [Local] Tactic Notation [`tactic_level`] [`prod_item` … `prod_item`] := `tactic`. + prod_item : `string` | `tactic_argument_type`(`ident`) + tactic_level : (at level `natural`) + tactic_argument_type : ident | simple_intropattern | reference + : | hyp | hyp_list | ne_hyp_list + : | constr | uconstr | constr_list | ne_constr_list + : | integer | integer_list | ne_integer_list + : | int_or_var | int_or_var_list | ne_int_or_var_list + : | tactic | tactic0 | tactic1 | tactic2 | tactic3 + : | tactic4 | tactic5 + +.. cmd:: {? Local} Tactic Notation {? (at level @level)} {+ @prod_item} := @tactic. + + A tactic notation extends the parser and pretty-printer of tactics with a new + rule made of the list of production items. It then evaluates into the + tactic expression ``tactic``. For simple tactics, it is recommended to use + a terminal symbol, i.e. a string, for the first production item. The + tactic level indicates the parsing precedence of the tactic notation. + This information is particularly relevant for notations of tacticals. + Levels 0 to 5 are available (default is 0). + + .. cmd:: Print Grammar tactic + + To know the parsing precedences of the existing tacticals, use the command + ``Print Grammar tactic``. + + Each type of tactic argument has a specific semantic regarding how it + is parsed and how it is interpreted. The semantic is described in the + following table. The last command gives examples of tactics which use + the corresponding kind of argument. + + .. list-table:: + :header-rows: 1 + + * - Tactic argument type + - parsed as + - interpreted as + - as in tactic + + * - ``ident`` + - identifier + - a user-given name + - intro + + * - ``simple_intropattern`` + - intro_pattern + - an intro_pattern + - intros + + * - ``hyp`` + - identifier + - an hypothesis defined in context + - clear + + * - ``reference`` + - qualified identifier + - a global reference of term + - unfold + + * - ``constr`` + - term + - a term + - exact + + * - ``uconstr`` + - term + - an untyped term + - refine + + * - ``integer`` + - integer + - an integer + - + + * - ``int_or_var`` + - identifier or integer + - an integer + - do + + * - ``tactic`` + - tactic at level 5 + - a tactic + - + + * - ``tacticn`` + - tactic at level n + - a tactic + - + + * - *entry*\ ``_list`` + - list of *entry* + - a list of how *entry* is interpreted + - + + * - ``ne_``\ *entry*\ ``_list`` + - non-empty list of *entry* + - a list of how *entry* is interpreted + - + + .. note:: In order to be bound in tactic definitions, each syntactic + entry for argument type must include the case of simple L tac + identifier as part of what it parses. This is naturally the case for + ``ident``, ``simple_intropattern``, ``reference``, ``constr``, ... but not for ``integer``. + This is the reason for introducing a special entry ``int_or_var`` which + evaluates to integers only but which syntactically includes + identifiers in order to be usable in tactic definitions. + + .. note:: The *entry*\ ``_list`` and ``ne_``\ *entry*\ ``_list`` entries can be used in + primitive tactics or in other notations at places where a list of the + underlying entry can be used: entry is either ``constr``, ``hyp``, ``integer`` + or ``int_or_var``. + +.. cmdv:: Local Tactic Notation + + Tactic notations do not survive the end of sections. They survive + modules unless the command Local Tactic Notation is used instead of + Tactic Notation. + +.. rubric:: Footnotes + +.. [#and_or_levels] which are the levels effectively chosen in the current + implementation of Coq + +.. [#no_associativity] Coq accepts notations declared as no associative but the parser on + which Coq is built, namely Camlp4, currently does not implement the + no-associativity and replaces it by a left associativity; hence it is + the same for Coq: no-associativity is in fact left associativity diff --git a/doc/sphinx/zebibliography.rst b/doc/sphinx/zebibliography.rst new file mode 100644 index 0000000000..0000caa301 --- /dev/null +++ b/doc/sphinx/zebibliography.rst @@ -0,0 +1,8 @@ +.. _bibliography: + +============ +Bibliography +============ + +.. bibliography:: biblio.bib + :cited: diff --git a/doc/tools/coqrst/coqdomain.py b/doc/tools/coqrst/coqdomain.py index 18f32d7a8a..663ab9d371 100644 --- a/doc/tools/coqrst/coqdomain.py +++ b/doc/tools/coqrst/coqdomain.py @@ -28,8 +28,10 @@ from docutils.parsers.rst.directives.admonitions import BaseAdmonition from sphinx import addnodes from sphinx.roles import XRefRole from sphinx.util.nodes import set_source_info, set_role_source_info, make_refnode +from sphinx.util.logging import getLogger from sphinx.directives import ObjectDescription from sphinx.domains import Domain, ObjType, Index +from sphinx.domains.std import token_xrefs from sphinx.ext.mathbase import MathDirective, displaymath from . import coqdoc @@ -155,6 +157,9 @@ class CoqObject(ObjectDescription): """Create a target and an index entry for name""" if name: target = self._add_target(signode, name) + # remove trailing . , found in commands, but not ... (ellipsis) + if name[-1] == "." and not name[-3:] == "..." : + name = name[0:-1] self._add_index_entry(name, target) return target @@ -173,19 +178,19 @@ class NotationObject(CoqObject): class TacticObject(PlainObject): """An object to represent Coq tactics""" subdomain = "tac" - index_suffix = "(tactic)" + index_suffix = "(tac)" annotation = None class GallinaObject(PlainObject): """An object to represent Coq theorems""" subdomain = "thm" - index_suffix = "(theorem)" + index_suffix = "(thm)" annotation = "Theorem" class VernacObject(NotationObject): """An object to represent Coq commands""" subdomain = "cmd" - index_suffix = "(command)" + index_suffix = "(cmd)" annotation = "Command" def _name_from_signature(self, signature): @@ -193,33 +198,72 @@ class VernacObject(NotationObject): class VernacVariantObject(VernacObject): """An object to represent variants of Coq commands""" - index_suffix = "(command variant)" + index_suffix = "(cmdv)" annotation = "Variant" class TacticNotationObject(NotationObject): """An object to represent Coq tactic notations""" subdomain = "tacn" - index_suffix = "(tactic notation)" + index_suffix = "(tacn)" annotation = None class TacticNotationVariantObject(TacticNotationObject): """An object to represent variants of Coq tactic notations""" - index_suffix = "(tactic variant)" + index_suffix = "(tacnv)" annotation = "Variant" class OptionObject(NotationObject): - """An object to represent variants of Coq options""" + """An object to represent Coq options""" subdomain = "opt" - index_suffix = "(option)" + index_suffix = "(opt)" annotation = "Option" def _name_from_signature(self, signature): return stringify_with_ellipses(signature) +class ProductionObject(NotationObject): + """An object to represent grammar productions""" + subdomain = "prodn" + index_suffix = None + annotation = None + + # override to create link targets for production left-hand sides + def run(self): + env = self.state.document.settings.env + objects = env.domaindata['std']['objects'] + + class ProdnError(Exception): + """Exception for ill-formed prodn""" + pass + + [idx, node] = super().run() + try: + # find LHS of production + inline_lhs = node[0][0][0][0] # may be fragile !!! + lhs_str = str(inline_lhs) + if lhs_str[0:7] != "<inline": + raise ProdnError("Expected atom on LHS") + lhs = inline_lhs[0] + # register link target + subnode = addnodes.production() + subnode['tokenname'] = lhs + idname = 'grammar-token-%s' % subnode['tokenname'] + if idname not in self.state.document.ids: + subnode['ids'].append(idname) + self.state.document.note_implicit_target(subnode, subnode) + objects['token', subnode['tokenname']] = env.docname, idname + subnode.extend(token_xrefs(lhs)) + # patch in link target + inline_lhs['ids'].append(idname) + except ProdnError as err: + getLogger(__name__).warning("Could not create link target for prodn: " + str(err) + + "\nSphinx represents the prodn as: " + str(node) + "\n") + return [idx, node] + class ExceptionObject(NotationObject): """An object to represent Coq errors.""" subdomain = "exn" - index_suffix = "(error)" + index_suffix = "(err)" annotation = "Error" # Uses “exn” since “err” already is a CSS class added by “writer_aux”. @@ -371,6 +415,7 @@ class InferenceDirective(Directive): required_arguments = 1 optional_arguments = 0 has_content = True + final_argument_whitespace = True def make_math_node(self, latex): node = displaymath() @@ -601,11 +646,13 @@ class CoqOptionIndex(CoqSubdomainsIndex): class CoqGallinaIndex(CoqSubdomainsIndex): name, localname, shortname, subdomains = "thmindex", "Gallina Index", "theorems", ["thm"] -class CoqExceptionIndex(CoqSubdomainsIndex): - name, localname, shortname, subdomains = "exnindex", "Error Index", "errors", ["exn"] +# we specify an index to make productions fit into the framework of notations +# but not likely to include a link to this index +class CoqProductionIndex(CoqSubdomainsIndex): + name, localname, shortname, subdomains = "prodnindex", "Production Index", "productions", ["prodn"] -class CoqWarningIndex(CoqSubdomainsIndex): - name, localname, shortname, subdomains = "warnindex", "Warning Index", "warnings", ["warn"] +class CoqExceptionIndex(CoqSubdomainsIndex): + name, localname, shortname, subdomains = "exnindex", "Errors and Warnings Index", "errors", ["exn", "warn"] class IndexXRefRole(XRefRole): """A link to one of our domain-specific indices.""" @@ -664,8 +711,9 @@ class CoqDomain(Domain): 'tacv': ObjType('tacv', 'tacn'), 'opt': ObjType('opt', 'opt'), 'thm': ObjType('thm', 'thm'), + 'prodn': ObjType('prodn', 'prodn'), 'exn': ObjType('exn', 'exn'), - 'warn': ObjType('warn', 'warn'), + 'warn': ObjType('warn', 'exn'), 'index': ObjType('index', 'index', searchprio=-1) } @@ -680,6 +728,7 @@ class CoqDomain(Domain): 'tacv': TacticNotationVariantObject, 'opt': OptionObject, 'thm': GallinaObject, + 'prodn' : ProductionObject, 'exn': ExceptionObject, 'warn': WarningObject, } @@ -691,6 +740,7 @@ class CoqDomain(Domain): 'tacn': XRefRole(), 'opt': XRefRole(), 'thm': XRefRole(), + 'prodn' : XRefRole(), 'exn': XRefRole(), 'warn': XRefRole(), # This one is special @@ -704,7 +754,7 @@ class CoqDomain(Domain): 'l': LtacRole, #FIXME unused? } - indices = [CoqVernacIndex, CoqTacticIndex, CoqOptionIndex, CoqGallinaIndex, CoqExceptionIndex, CoqWarningIndex] + indices = [CoqVernacIndex, CoqTacticIndex, CoqOptionIndex, CoqGallinaIndex, CoqProductionIndex, CoqExceptionIndex] data_version = 1 initial_data = { @@ -716,6 +766,7 @@ class CoqDomain(Domain): 'tacn': {}, 'opt': {}, 'thm': {}, + 'prodn' : {}, 'exn': {}, 'warn': {}, } @@ -807,6 +858,7 @@ def setup(app): app.connect('doctree-resolved', simplify_source_code_blocks_for_latex) # Add extra styles + app.add_stylesheet("fonts.css") app.add_stylesheet("ansi.css") app.add_stylesheet("coqdoc.css") app.add_javascript("notations.js") diff --git a/doc/tools/coqrst/notations/TacticNotations.g b/doc/tools/coqrst/notations/TacticNotations.g index 72ae8eb6be..5176c51d28 100644 --- a/doc/tools/coqrst/notations/TacticNotations.g +++ b/doc/tools/coqrst/notations/TacticNotations.g @@ -15,16 +15,18 @@ grammar TacticNotations; top: blocks EOF; blocks: block ((whitespace)? block)*; -block: atomic | hole | repeat | curlies; +block: atomic | meta | hole | repeat | curlies; repeat: LGROUP (ATOM)? WHITESPACE blocks (WHITESPACE)? RBRACE; curlies: LBRACE (whitespace)? blocks (whitespace)? RBRACE; whitespace: WHITESPACE; +meta: METACHAR; atomic: ATOM; hole: ID; LGROUP: '{' [+*?]; LBRACE: '{'; RBRACE: '}'; -ATOM: ~[@{} ]+; +METACHAR: '%' [|()]; +ATOM: '@' | ~[@{} ]+; ID: '@' [a-zA-Z0-9_]+; WHITESPACE: ' '+; diff --git a/doc/tools/coqrst/notations/TacticNotations.tokens b/doc/tools/coqrst/notations/TacticNotations.tokens index 4d41a38837..76ed2b065b 100644 --- a/doc/tools/coqrst/notations/TacticNotations.tokens +++ b/doc/tools/coqrst/notations/TacticNotations.tokens @@ -1,8 +1,9 @@ LGROUP=1 LBRACE=2 RBRACE=3 -ATOM=4 -ID=5 -WHITESPACE=6 +METACHAR=4 +ATOM=5 +ID=6 +WHITESPACE=7 '{'=2 '}'=3 diff --git a/doc/tools/coqrst/notations/TacticNotationsLexer.py b/doc/tools/coqrst/notations/TacticNotationsLexer.py index 4cac071ac3..ffa774b9ba 100644 --- a/doc/tools/coqrst/notations/TacticNotationsLexer.py +++ b/doc/tools/coqrst/notations/TacticNotationsLexer.py @@ -7,21 +7,24 @@ import sys def serializedATN(): with StringIO() as buf: - buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\b") - buf.write("&\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7") - buf.write("\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\5\6\5\30\n\5\r\5\16\5\31") - buf.write("\3\6\3\6\6\6\36\n\6\r\6\16\6\37\3\7\6\7#\n\7\r\7\16\7") - buf.write("$\2\2\b\3\3\5\4\7\5\t\6\13\7\r\b\3\2\5\4\2,-AA\6\2\"\"") - buf.write("BB}}\177\177\6\2\62;C\\aac|\2(\2\3\3\2\2\2\2\5\3\2\2\2") - buf.write("\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\3\17") - buf.write("\3\2\2\2\5\22\3\2\2\2\7\24\3\2\2\2\t\27\3\2\2\2\13\33") - buf.write("\3\2\2\2\r\"\3\2\2\2\17\20\7}\2\2\20\21\t\2\2\2\21\4\3") - buf.write("\2\2\2\22\23\7}\2\2\23\6\3\2\2\2\24\25\7\177\2\2\25\b") - buf.write("\3\2\2\2\26\30\n\3\2\2\27\26\3\2\2\2\30\31\3\2\2\2\31") - buf.write("\27\3\2\2\2\31\32\3\2\2\2\32\n\3\2\2\2\33\35\7B\2\2\34") - buf.write("\36\t\4\2\2\35\34\3\2\2\2\36\37\3\2\2\2\37\35\3\2\2\2") - buf.write("\37 \3\2\2\2 \f\3\2\2\2!#\7\"\2\2\"!\3\2\2\2#$\3\2\2\2") - buf.write("$\"\3\2\2\2$%\3\2\2\2%\16\3\2\2\2\6\2\31\37$\2") + buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\t") + buf.write(".\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7") + buf.write("\4\b\t\b\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\5\3\6\3") + buf.write("\6\6\6\36\n\6\r\6\16\6\37\5\6\"\n\6\3\7\3\7\6\7&\n\7\r") + buf.write("\7\16\7\'\3\b\6\b+\n\b\r\b\16\b,\2\2\t\3\3\5\4\7\5\t\6") + buf.write("\13\7\r\b\17\t\3\2\6\4\2,-AA\4\2*+~~\6\2\"\"BB}}\177\177") + buf.write("\6\2\62;C\\aac|\2\61\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2") + buf.write("\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\3") + buf.write("\21\3\2\2\2\5\24\3\2\2\2\7\26\3\2\2\2\t\30\3\2\2\2\13") + buf.write("!\3\2\2\2\r#\3\2\2\2\17*\3\2\2\2\21\22\7}\2\2\22\23\t") + buf.write("\2\2\2\23\4\3\2\2\2\24\25\7}\2\2\25\6\3\2\2\2\26\27\7") + buf.write("\177\2\2\27\b\3\2\2\2\30\31\7\'\2\2\31\32\t\3\2\2\32\n") + buf.write("\3\2\2\2\33\"\7B\2\2\34\36\n\4\2\2\35\34\3\2\2\2\36\37") + buf.write("\3\2\2\2\37\35\3\2\2\2\37 \3\2\2\2 \"\3\2\2\2!\33\3\2") + buf.write("\2\2!\35\3\2\2\2\"\f\3\2\2\2#%\7B\2\2$&\t\5\2\2%$\3\2") + buf.write("\2\2&\'\3\2\2\2\'%\3\2\2\2\'(\3\2\2\2(\16\3\2\2\2)+\7") + buf.write("\"\2\2*)\3\2\2\2+,\3\2\2\2,*\3\2\2\2,-\3\2\2\2-\20\3\2") + buf.write("\2\2\7\2\37!\',\2") return buf.getvalue() @@ -34,9 +37,10 @@ class TacticNotationsLexer(Lexer): LGROUP = 1 LBRACE = 2 RBRACE = 3 - ATOM = 4 - ID = 5 - WHITESPACE = 6 + METACHAR = 4 + ATOM = 5 + ID = 6 + WHITESPACE = 7 channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] @@ -46,9 +50,10 @@ class TacticNotationsLexer(Lexer): "'{'", "'}'" ] symbolicNames = [ "<INVALID>", - "LGROUP", "LBRACE", "RBRACE", "ATOM", "ID", "WHITESPACE" ] + "LGROUP", "LBRACE", "RBRACE", "METACHAR", "ATOM", "ID", "WHITESPACE" ] - ruleNames = [ "LGROUP", "LBRACE", "RBRACE", "ATOM", "ID", "WHITESPACE" ] + ruleNames = [ "LGROUP", "LBRACE", "RBRACE", "METACHAR", "ATOM", "ID", + "WHITESPACE" ] grammarFileName = "TacticNotations.g" diff --git a/doc/tools/coqrst/notations/TacticNotationsLexer.tokens b/doc/tools/coqrst/notations/TacticNotationsLexer.tokens index 4d41a38837..76ed2b065b 100644 --- a/doc/tools/coqrst/notations/TacticNotationsLexer.tokens +++ b/doc/tools/coqrst/notations/TacticNotationsLexer.tokens @@ -1,8 +1,9 @@ LGROUP=1 LBRACE=2 RBRACE=3 -ATOM=4 -ID=5 -WHITESPACE=6 +METACHAR=4 +ATOM=5 +ID=6 +WHITESPACE=7 '{'=2 '}'=3 diff --git a/doc/tools/coqrst/notations/TacticNotationsParser.py b/doc/tools/coqrst/notations/TacticNotationsParser.py index 357902ddb5..c7e28af52b 100644 --- a/doc/tools/coqrst/notations/TacticNotationsParser.py +++ b/doc/tools/coqrst/notations/TacticNotationsParser.py @@ -7,28 +7,29 @@ import sys def serializedATN(): with StringIO() as buf: - buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\b") - buf.write("A\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b") - buf.write("\t\b\4\t\t\t\3\2\3\2\3\2\3\3\3\3\5\3\30\n\3\3\3\7\3\33") - buf.write("\n\3\f\3\16\3\36\13\3\3\4\3\4\3\4\3\4\5\4$\n\4\3\5\3\5") - buf.write("\5\5(\n\5\3\5\3\5\3\5\5\5-\n\5\3\5\3\5\3\6\3\6\5\6\63") - buf.write("\n\6\3\6\3\6\5\6\67\n\6\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3") - buf.write("\t\3\t\2\2\n\2\4\6\b\n\f\16\20\2\2\2A\2\22\3\2\2\2\4\25") - buf.write("\3\2\2\2\6#\3\2\2\2\b%\3\2\2\2\n\60\3\2\2\2\f:\3\2\2\2") - buf.write("\16<\3\2\2\2\20>\3\2\2\2\22\23\5\4\3\2\23\24\7\2\2\3\24") - buf.write("\3\3\2\2\2\25\34\5\6\4\2\26\30\5\f\7\2\27\26\3\2\2\2\27") - buf.write("\30\3\2\2\2\30\31\3\2\2\2\31\33\5\6\4\2\32\27\3\2\2\2") - buf.write("\33\36\3\2\2\2\34\32\3\2\2\2\34\35\3\2\2\2\35\5\3\2\2") - buf.write("\2\36\34\3\2\2\2\37$\5\16\b\2 $\5\20\t\2!$\5\b\5\2\"$") - buf.write("\5\n\6\2#\37\3\2\2\2# \3\2\2\2#!\3\2\2\2#\"\3\2\2\2$\7") - buf.write("\3\2\2\2%\'\7\3\2\2&(\7\6\2\2\'&\3\2\2\2\'(\3\2\2\2()") - buf.write("\3\2\2\2)*\7\b\2\2*,\5\4\3\2+-\7\b\2\2,+\3\2\2\2,-\3\2") - buf.write("\2\2-.\3\2\2\2./\7\5\2\2/\t\3\2\2\2\60\62\7\4\2\2\61\63") - buf.write("\5\f\7\2\62\61\3\2\2\2\62\63\3\2\2\2\63\64\3\2\2\2\64") - buf.write("\66\5\4\3\2\65\67\5\f\7\2\66\65\3\2\2\2\66\67\3\2\2\2") - buf.write("\678\3\2\2\289\7\5\2\29\13\3\2\2\2:;\7\b\2\2;\r\3\2\2") - buf.write("\2<=\7\6\2\2=\17\3\2\2\2>?\7\7\2\2?\21\3\2\2\2\t\27\34") - buf.write("#\',\62\66") + buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\t") + buf.write("F\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b") + buf.write("\t\b\4\t\t\t\4\n\t\n\3\2\3\2\3\2\3\3\3\3\5\3\32\n\3\3") + buf.write("\3\7\3\35\n\3\f\3\16\3 \13\3\3\4\3\4\3\4\3\4\3\4\5\4\'") + buf.write("\n\4\3\5\3\5\5\5+\n\5\3\5\3\5\3\5\5\5\60\n\5\3\5\3\5\3") + buf.write("\6\3\6\5\6\66\n\6\3\6\3\6\5\6:\n\6\3\6\3\6\3\7\3\7\3\b") + buf.write("\3\b\3\t\3\t\3\n\3\n\3\n\2\2\13\2\4\6\b\n\f\16\20\22\2") + buf.write("\2\2F\2\24\3\2\2\2\4\27\3\2\2\2\6&\3\2\2\2\b(\3\2\2\2") + buf.write("\n\63\3\2\2\2\f=\3\2\2\2\16?\3\2\2\2\20A\3\2\2\2\22C\3") + buf.write("\2\2\2\24\25\5\4\3\2\25\26\7\2\2\3\26\3\3\2\2\2\27\36") + buf.write("\5\6\4\2\30\32\5\f\7\2\31\30\3\2\2\2\31\32\3\2\2\2\32") + buf.write("\33\3\2\2\2\33\35\5\6\4\2\34\31\3\2\2\2\35 \3\2\2\2\36") + buf.write("\34\3\2\2\2\36\37\3\2\2\2\37\5\3\2\2\2 \36\3\2\2\2!\'") + buf.write("\5\20\t\2\"\'\5\16\b\2#\'\5\22\n\2$\'\5\b\5\2%\'\5\n\6") + buf.write("\2&!\3\2\2\2&\"\3\2\2\2&#\3\2\2\2&$\3\2\2\2&%\3\2\2\2") + buf.write("\'\7\3\2\2\2(*\7\3\2\2)+\7\7\2\2*)\3\2\2\2*+\3\2\2\2+") + buf.write(",\3\2\2\2,-\7\t\2\2-/\5\4\3\2.\60\7\t\2\2/.\3\2\2\2/\60") + buf.write("\3\2\2\2\60\61\3\2\2\2\61\62\7\5\2\2\62\t\3\2\2\2\63\65") + buf.write("\7\4\2\2\64\66\5\f\7\2\65\64\3\2\2\2\65\66\3\2\2\2\66") + buf.write("\67\3\2\2\2\679\5\4\3\28:\5\f\7\298\3\2\2\29:\3\2\2\2") + buf.write(":;\3\2\2\2;<\7\5\2\2<\13\3\2\2\2=>\7\t\2\2>\r\3\2\2\2") + buf.write("?@\7\6\2\2@\17\3\2\2\2AB\7\7\2\2B\21\3\2\2\2CD\7\b\2\2") + buf.write("D\23\3\2\2\2\t\31\36&*/\659") return buf.getvalue() @@ -44,8 +45,8 @@ class TacticNotationsParser ( Parser ): literalNames = [ "<INVALID>", "<INVALID>", "'{'", "'}'" ] - symbolicNames = [ "<INVALID>", "LGROUP", "LBRACE", "RBRACE", "ATOM", - "ID", "WHITESPACE" ] + symbolicNames = [ "<INVALID>", "LGROUP", "LBRACE", "RBRACE", "METACHAR", + "ATOM", "ID", "WHITESPACE" ] RULE_top = 0 RULE_blocks = 1 @@ -53,19 +54,21 @@ class TacticNotationsParser ( Parser ): RULE_repeat = 3 RULE_curlies = 4 RULE_whitespace = 5 - RULE_atomic = 6 - RULE_hole = 7 + RULE_meta = 6 + RULE_atomic = 7 + RULE_hole = 8 ruleNames = [ "top", "blocks", "block", "repeat", "curlies", "whitespace", - "atomic", "hole" ] + "meta", "atomic", "hole" ] EOF = Token.EOF LGROUP=1 LBRACE=2 RBRACE=3 - ATOM=4 - ID=5 - WHITESPACE=6 + METACHAR=4 + ATOM=5 + ID=6 + WHITESPACE=7 def __init__(self, input:TokenStream, output:TextIO = sys.stdout): super().__init__(input, output) @@ -106,9 +109,9 @@ class TacticNotationsParser ( Parser ): self.enterRule(localctx, 0, self.RULE_top) try: self.enterOuterAlt(localctx, 1) - self.state = 16 + self.state = 18 self.blocks() - self.state = 17 + self.state = 19 self.match(TacticNotationsParser.EOF) except RecognitionException as re: localctx.exception = re @@ -157,24 +160,24 @@ class TacticNotationsParser ( Parser ): self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) - self.state = 19 + self.state = 21 self.block() - self.state = 26 + self.state = 28 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,1,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: if _alt==1: - self.state = 21 + self.state = 23 self._errHandler.sync(self) _la = self._input.LA(1) if _la==TacticNotationsParser.WHITESPACE: - self.state = 20 + self.state = 22 self.whitespace() - self.state = 23 + self.state = 25 self.block() - self.state = 28 + self.state = 30 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,1,self._ctx) @@ -196,6 +199,10 @@ class TacticNotationsParser ( Parser ): return self.getTypedRuleContext(TacticNotationsParser.AtomicContext,0) + def meta(self): + return self.getTypedRuleContext(TacticNotationsParser.MetaContext,0) + + def hole(self): return self.getTypedRuleContext(TacticNotationsParser.HoleContext,0) @@ -225,27 +232,32 @@ class TacticNotationsParser ( Parser ): localctx = TacticNotationsParser.BlockContext(self, self._ctx, self.state) self.enterRule(localctx, 4, self.RULE_block) try: - self.state = 33 + self.state = 36 self._errHandler.sync(self) token = self._input.LA(1) if token in [TacticNotationsParser.ATOM]: self.enterOuterAlt(localctx, 1) - self.state = 29 + self.state = 31 self.atomic() pass - elif token in [TacticNotationsParser.ID]: + elif token in [TacticNotationsParser.METACHAR]: self.enterOuterAlt(localctx, 2) - self.state = 30 + self.state = 32 + self.meta() + pass + elif token in [TacticNotationsParser.ID]: + self.enterOuterAlt(localctx, 3) + self.state = 33 self.hole() pass elif token in [TacticNotationsParser.LGROUP]: - self.enterOuterAlt(localctx, 3) - self.state = 31 + self.enterOuterAlt(localctx, 4) + self.state = 34 self.repeat() pass elif token in [TacticNotationsParser.LBRACE]: - self.enterOuterAlt(localctx, 4) - self.state = 32 + self.enterOuterAlt(localctx, 5) + self.state = 35 self.curlies() pass else: @@ -303,29 +315,29 @@ class TacticNotationsParser ( Parser ): self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) - self.state = 35 + self.state = 38 self.match(TacticNotationsParser.LGROUP) - self.state = 37 + self.state = 40 self._errHandler.sync(self) _la = self._input.LA(1) if _la==TacticNotationsParser.ATOM: - self.state = 36 + self.state = 39 self.match(TacticNotationsParser.ATOM) - self.state = 39 + self.state = 42 self.match(TacticNotationsParser.WHITESPACE) - self.state = 40 + self.state = 43 self.blocks() - self.state = 42 + self.state = 45 self._errHandler.sync(self) _la = self._input.LA(1) if _la==TacticNotationsParser.WHITESPACE: - self.state = 41 + self.state = 44 self.match(TacticNotationsParser.WHITESPACE) - self.state = 44 + self.state = 47 self.match(TacticNotationsParser.RBRACE) except RecognitionException as re: localctx.exception = re @@ -377,27 +389,27 @@ class TacticNotationsParser ( Parser ): self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) - self.state = 46 + self.state = 49 self.match(TacticNotationsParser.LBRACE) - self.state = 48 + self.state = 51 self._errHandler.sync(self) _la = self._input.LA(1) if _la==TacticNotationsParser.WHITESPACE: - self.state = 47 + self.state = 50 self.whitespace() - self.state = 50 + self.state = 53 self.blocks() - self.state = 52 + self.state = 55 self._errHandler.sync(self) _la = self._input.LA(1) if _la==TacticNotationsParser.WHITESPACE: - self.state = 51 + self.state = 54 self.whitespace() - self.state = 54 + self.state = 57 self.match(TacticNotationsParser.RBRACE) except RecognitionException as re: localctx.exception = re @@ -434,7 +446,7 @@ class TacticNotationsParser ( Parser ): self.enterRule(localctx, 10, self.RULE_whitespace) try: self.enterOuterAlt(localctx, 1) - self.state = 56 + self.state = 59 self.match(TacticNotationsParser.WHITESPACE) except RecognitionException as re: localctx.exception = re @@ -444,6 +456,43 @@ class TacticNotationsParser ( Parser ): self.exitRule() return localctx + class MetaContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def METACHAR(self): + return self.getToken(TacticNotationsParser.METACHAR, 0) + + def getRuleIndex(self): + return TacticNotationsParser.RULE_meta + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitMeta" ): + return visitor.visitMeta(self) + else: + return visitor.visitChildren(self) + + + + + def meta(self): + + localctx = TacticNotationsParser.MetaContext(self, self._ctx, self.state) + self.enterRule(localctx, 12, self.RULE_meta) + try: + self.enterOuterAlt(localctx, 1) + self.state = 61 + self.match(TacticNotationsParser.METACHAR) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + class AtomicContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): @@ -468,10 +517,10 @@ class TacticNotationsParser ( Parser ): def atomic(self): localctx = TacticNotationsParser.AtomicContext(self, self._ctx, self.state) - self.enterRule(localctx, 12, self.RULE_atomic) + self.enterRule(localctx, 14, self.RULE_atomic) try: self.enterOuterAlt(localctx, 1) - self.state = 58 + self.state = 63 self.match(TacticNotationsParser.ATOM) except RecognitionException as re: localctx.exception = re @@ -505,10 +554,10 @@ class TacticNotationsParser ( Parser ): def hole(self): localctx = TacticNotationsParser.HoleContext(self, self._ctx, self.state) - self.enterRule(localctx, 14, self.RULE_hole) + self.enterRule(localctx, 16, self.RULE_hole) try: self.enterOuterAlt(localctx, 1) - self.state = 60 + self.state = 65 self.match(TacticNotationsParser.ID) except RecognitionException as re: localctx.exception = re diff --git a/doc/tools/coqrst/notations/TacticNotationsVisitor.py b/doc/tools/coqrst/notations/TacticNotationsVisitor.py index 80e69d4335..c0bcc4af37 100644 --- a/doc/tools/coqrst/notations/TacticNotationsVisitor.py +++ b/doc/tools/coqrst/notations/TacticNotationsVisitor.py @@ -39,6 +39,11 @@ class TacticNotationsVisitor(ParseTreeVisitor): return self.visitChildren(ctx) + # Visit a parse tree produced by TacticNotationsParser#meta. + def visitMeta(self, ctx:TacticNotationsParser.MetaContext): + return self.visitChildren(ctx) + + # Visit a parse tree produced by TacticNotationsParser#atomic. def visitAtomic(self, ctx:TacticNotationsParser.AtomicContext): return self.visitChildren(ctx) @@ -50,4 +55,4 @@ class TacticNotationsVisitor(ParseTreeVisitor): -del TacticNotationsParser
\ No newline at end of file +del TacticNotationsParser diff --git a/doc/tools/coqrst/notations/html.py b/doc/tools/coqrst/notations/html.py index d91bbb64c4..44212d7889 100644 --- a/doc/tools/coqrst/notations/html.py +++ b/doc/tools/coqrst/notations/html.py @@ -42,6 +42,9 @@ class TacticNotationsToHTMLVisitor(TacticNotationsVisitor): def visitHole(self, ctx:TacticNotationsParser.HoleContext): tags.span(ctx.ID().getText()[1:], _class="hole") + def visitMeta(self, ctx:TacticNotationsParser.MetaContext): + tags.span(ctx.METACHAR().getText()[1:], _class="meta") + def visitWhitespace(self, ctx:TacticNotationsParser.WhitespaceContext): tags.span(" ") # TODO: no need for a <span> here diff --git a/doc/tools/coqrst/notations/parsing.py b/doc/tools/coqrst/notations/parsing.py index 73be6f26ed..506240d907 100644 --- a/doc/tools/coqrst/notations/parsing.py +++ b/doc/tools/coqrst/notations/parsing.py @@ -12,7 +12,7 @@ from .TacticNotationsParser import TacticNotationsParser from antlr4 import CommonTokenStream, InputStream -SUBSTITUTIONS = [("@bindings_list", "{+ (@id := @val) }"), +SUBSTITUTIONS = [#("@bindings_list", "{+ (@id := @val) }"), ("@qualid_or_string", "@id|@string")] def substitute(notation): diff --git a/doc/tools/coqrst/notations/plain.py b/doc/tools/coqrst/notations/plain.py index 5d4501892f..f6e82fc68e 100644 --- a/doc/tools/coqrst/notations/plain.py +++ b/doc/tools/coqrst/notations/plain.py @@ -41,6 +41,9 @@ class TacticNotationsToDotsVisitor(TacticNotationsVisitor): def visitHole(self, ctx:TacticNotationsParser.HoleContext): self.buffer.write("‘{}’".format(ctx.ID().getText()[1:])) + def visitMeta(self, ctx:TacticNotationsParser.MetaContext): + self.buffer.write(ctx.METACHAR().getText()[1:]) + def visitWhitespace(self, ctx:TacticNotationsParser.WhitespaceContext): self.buffer.write(" ") diff --git a/doc/tools/coqrst/notations/regexp.py b/doc/tools/coqrst/notations/regexp.py index cac6aaecbb..ea820c719e 100644 --- a/doc/tools/coqrst/notations/regexp.py +++ b/doc/tools/coqrst/notations/regexp.py @@ -47,6 +47,9 @@ class TacticNotationsToRegexpVisitor(TacticNotationsVisitor): def visitHole(self, ctx:TacticNotationsParser.HoleContext): self.buffer.write("([^();. \n]+)") # FIXME could allow more things + def visitMeta(self, ctx:TacticNotationsParser.MetaContext): + self.buffer.write(re.escape(ctx.METACHAR().getText()[1:])) + def visitWhitespace(self, ctx:TacticNotationsParser.WhitespaceContext): self.buffer.write(r"\s+") diff --git a/doc/tools/coqrst/notations/sphinx.py b/doc/tools/coqrst/notations/sphinx.py index 889bf70a46..26a5f69680 100644 --- a/doc/tools/coqrst/notations/sphinx.py +++ b/doc/tools/coqrst/notations/sphinx.py @@ -20,6 +20,8 @@ from .TacticNotationsVisitor import TacticNotationsVisitor from docutils import nodes from sphinx import addnodes +import sys + class TacticNotationsToSphinxVisitor(TacticNotationsVisitor): def defaultResult(self): return [] @@ -62,6 +64,12 @@ class TacticNotationsToSphinxVisitor(TacticNotationsVisitor): node = nodes.inline(hole, token_name, classes=["hole"]) return [addnodes.pending_xref(token_name, node, reftype='token', refdomain='std', reftarget=token_name)] + def visitMeta(self, ctx:TacticNotationsParser.MetaContext): + meta = ctx.METACHAR().getText() + metachar = meta[1:] # remove escape char + token_name = metachar + return [nodes.inline(metachar, token_name, classes=["meta"])] + def visitWhitespace(self, ctx:TacticNotationsParser.WhitespaceContext): return [nodes.Text(" ")] diff --git a/doc/tools/coqrst/repl/coqtop.py b/doc/tools/coqrst/repl/coqtop.py index 2df97d3dc5..efb5cb5505 100644 --- a/doc/tools/coqrst/repl/coqtop.py +++ b/doc/tools/coqrst/repl/coqtop.py @@ -59,7 +59,7 @@ class CoqTop: def next_prompt(self): "Wait for the next coqtop prompt, and return the output preceeding it." - self.coqtop.expect(CoqTop.COQTOP_PROMPT, timeout = 1) + self.coqtop.expect(CoqTop.COQTOP_PROMPT, timeout = 10) return self.coqtop.before def sendone(self, sentence): @@ -70,9 +70,12 @@ class CoqTop: """ # Suppress newlines, but not spaces: they are significant in notations sentence = re.sub(r"[\r\n]+", " ", sentence).strip() - # print("Sending {}".format(sentence)) self.coqtop.sendline(sentence) - output = self.next_prompt() + try: + output = self.next_prompt() + except: + print("Error while sending the following sentence to coqtop: {}".format(sentence)) + raise # print("Got {}".format(repr(output))) return output |
