aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/sphinx/addendum/extraction.rst28
-rw-r--r--doc/sphinx/addendum/generalized-rewriting.rst8
-rw-r--r--doc/sphinx/addendum/implicit-coercions.rst2
-rw-r--r--doc/sphinx/addendum/miscellaneous-extensions.rst4
-rw-r--r--doc/sphinx/addendum/omega.rst2
-rw-r--r--doc/sphinx/addendum/program.rst4
-rw-r--r--doc/sphinx/addendum/ring.rst2
-rw-r--r--doc/sphinx/addendum/sprop.rst2
-rw-r--r--doc/sphinx/addendum/type-classes.rst16
-rw-r--r--doc/sphinx/addendum/universe-polymorphism.rst22
-rw-r--r--doc/sphinx/changes.rst16
-rw-r--r--doc/sphinx/language/cic.rst7
-rw-r--r--doc/sphinx/language/core/coinductive.rst2
-rw-r--r--doc/sphinx/language/core/conversion.rst89
-rw-r--r--doc/sphinx/language/core/definitions.rst39
-rw-r--r--doc/sphinx/language/core/inductive.rst4
-rw-r--r--doc/sphinx/language/core/modules.rst18
-rw-r--r--doc/sphinx/language/core/records.rst12
-rw-r--r--doc/sphinx/language/core/sections.rst2
-rw-r--r--doc/sphinx/language/extensions/arguments-command.rst22
-rw-r--r--doc/sphinx/language/extensions/canonical.rst4
-rw-r--r--doc/sphinx/language/extensions/implicit-arguments.rst4
-rw-r--r--doc/sphinx/language/extensions/match.rst4
-rw-r--r--doc/sphinx/practical-tools/coq-commands.rst4
-rw-r--r--doc/sphinx/proof-engine/ltac.rst4
-rw-r--r--doc/sphinx/proof-engine/ltac2.rst18
-rw-r--r--doc/sphinx/proof-engine/tactics.rst5
-rw-r--r--doc/sphinx/proof-engine/vernacular-commands.rst149
-rw-r--r--doc/sphinx/proofs/automatic-tactics/auto.rst10
-rw-r--r--doc/sphinx/proofs/writing-proofs/proof-mode.rst9
-rw-r--r--doc/sphinx/proofs/writing-proofs/rewriting.rst820
-rw-r--r--doc/tools/docgram/common.edit_mlg42
-rw-r--r--doc/tools/docgram/fullGrammar972
-rw-r--r--doc/tools/docgram/orderedGrammar72
-rw-r--r--tactics/redexpr.ml13
35 files changed, 1244 insertions, 1187 deletions
diff --git a/doc/sphinx/addendum/extraction.rst b/doc/sphinx/addendum/extraction.rst
index 8e72bb4ffd..3c7449ee69 100644
--- a/doc/sphinx/addendum/extraction.rst
+++ b/doc/sphinx/addendum/extraction.rst
@@ -29,7 +29,7 @@ Generating ML Code
.. note::
In the following, a qualified identifier :token:`qualid`
- can be used to refer to any kind of Coq global "object" : constant,
+ can be used to refer to any kind of Coq global "object" : :term:`constant`,
inductive type, inductive constructor or module name.
The next two commands are meant to be used for rapid preview of
@@ -128,7 +128,7 @@ wants to generate an OCaml program. The optimizations can be split in two
groups: the type-preserving ones (essentially constant inlining and
reductions) and the non type-preserving ones (some function
abstractions of dummy types are removed when it is deemed safe in order
-to have more elegant types). Therefore some constants may not appear in the
+to have more elegant types). Therefore some :term:`constants <constant>` may not appear in the
resulting monolithic OCaml program. In the case of modular extraction,
even if some inlining is done, the inlined constants are nevertheless
printed, to ensure session-independent programs.
@@ -166,15 +166,15 @@ and commands:
.. flag:: Extraction AutoInline
- Default is on. The extraction mechanism inlines the bodies of
- some defined constants, according to some heuristics
+ Default is on. The extraction mechanism inlines the :term:`bodies <body>` of
+ some defined :term:`constants <constant>`, according to some heuristics
like size of bodies, uselessness of some arguments, etc.
Those heuristics are not always perfect; if you want to disable
this feature, turn this flag off.
.. cmd:: Extraction Inline {+ @qualid }
- In addition to the automatic inline feature, the constants
+ In addition to the automatic inline feature, the :term:`constants <constant>`
mentioned by this command will always be inlined during extraction.
.. cmd:: Extraction NoInline {+ @qualid }
@@ -194,24 +194,24 @@ and commands:
**Inlining and printing of a constant declaration:**
-The user can explicitly ask for a constant to be extracted by two means:
+The user can explicitly ask for a :term:`constant` to be extracted by two means:
* by mentioning it on the extraction command line
- * by extracting the whole Coq module of this constant.
+ * by extracting the whole Coq module of this :term:`constant`.
-In both cases, the declaration of this constant will be present in the
-produced file. But this same constant may or may not be inlined in
+In both cases, the declaration of this :term:`constant` will be present in the
+produced file. But this same :term:`constant` may or may not be inlined in
the following terms, depending on the automatic/custom inlining mechanism.
-For the constants non-explicitly required but needed for dependency
+For the :term:`constants <constant>` non-explicitly required but needed for dependency
reasons, there are two cases:
* If an inlining decision is taken, whether automatically or not,
- all occurrences of this constant are replaced by its extracted body,
- and this constant is not declared in the generated file.
+ all occurrences of this :term:`constant` are replaced by its extracted :term:`body`,
+ and this :term:`constant` is not declared in the generated file.
- * If no inlining decision is taken, the constant is normally
+ * If no inlining decision is taken, the :term:`constant` is normally
declared in the produced file.
Extra elimination of useless arguments
@@ -264,7 +264,7 @@ what ML term corresponds to a given axiom.
.. cmd:: Extract Constant @qualid {* @string__tv } => {| @ident | @string }
- Give an ML extraction for the given constant.
+ Give an ML extraction for the given :term:`constant`.
:n:`@string__tv`
If the type scheme axiom is an arity (a sequence of products followed
diff --git a/doc/sphinx/addendum/generalized-rewriting.rst b/doc/sphinx/addendum/generalized-rewriting.rst
index 9ac05fab2e..930d286010 100644
--- a/doc/sphinx/addendum/generalized-rewriting.rst
+++ b/doc/sphinx/addendum/generalized-rewriting.rst
@@ -699,7 +699,7 @@ other. If a signature mentions a relation ``R`` on the left of an
arrow ``==>``, then the signature also applies for any relation ``S`` that is
smaller than ``R``, and the inverse applies on the right of an arrow. One
can then declare only a few morphisms instances that generate the
-complete set of signatures for a particular constant. By default, the
+complete set of signatures for a particular :term:`constant`. By default, the
only declared subrelation is ``iff``, which is a subrelation of ``impl`` and
``inverse impl`` (the dual of implication). That’s why we can declare only
two morphisms for conjunction: ``Proper (impl ==> impl ==> impl) and`` and
@@ -714,8 +714,8 @@ example of a mostly user-space extension of the algorithm.
Constant unfolding
~~~~~~~~~~~~~~~~~~
-The resolution tactic is based on typeclasses and hence regards user-
-defined constants as transparent by default. This may slow down the
+The resolution tactic is based on typeclasses and hence regards user-defined
+:term:`constants <constant>` as transparent by default. This may slow down the
resolution due to a lot of unifications (all the declared ``Proper``
instances are tried at each node of the search tree). To speed it up,
declare your constant as rigid for proof search using the command
@@ -901,7 +901,7 @@ Hint databases created for :tacn:`autorewrite` can also be used
by :tacn:`rewrite_strat` using the ``hints`` strategy that applies any of the
lemmas at the current subterm. The ``terms`` strategy takes the lemma
names directly as arguments. The ``eval`` strategy expects a reduction
-expression (see :ref:`performingcomputations`) and succeeds
+expression (see :ref:`applyingconversionrules`) and succeeds
if it reduces the subterm under consideration. The ``fold`` strategy takes
a :token:`term` and tries to *unify* it to the current subterm, converting it to :token:`term`
on success. It is stronger than the tactic ``fold``.
diff --git a/doc/sphinx/addendum/implicit-coercions.rst b/doc/sphinx/addendum/implicit-coercions.rst
index 09b2bb003a..c1b2200741 100644
--- a/doc/sphinx/addendum/implicit-coercions.rst
+++ b/doc/sphinx/addendum/implicit-coercions.rst
@@ -190,7 +190,7 @@ Use :n:`:>` instead of :n:`:` before the
.. cmd:: Identity Coercion @ident : @class >-> @class
If ``C`` is the source `class` and ``D`` the destination, we check
- that ``C`` is a constant with a body of the form
+ that ``C`` is a :term:`constant` with a :term:`body` of the form
:g:`fun (x₁:T₁)..(xₙ:Tₙ) => D t₁..tₘ` where `m` is the
number of parameters of ``D``. Then we define an identity
function with type :g:`forall (x₁:T₁)..(xₙ:Tₙ)(y:C x₁..xₙ),D t₁..tₘ`,
diff --git a/doc/sphinx/addendum/miscellaneous-extensions.rst b/doc/sphinx/addendum/miscellaneous-extensions.rst
index 7d30cae525..8d70ffec01 100644
--- a/doc/sphinx/addendum/miscellaneous-extensions.rst
+++ b/doc/sphinx/addendum/miscellaneous-extensions.rst
@@ -15,12 +15,12 @@ it provides the following command:
standing for the existential variables but they are shelved, as
described in :tacn:`shelve`).
- When the proof ends two constants are defined:
+ When the proof ends two :term:`constants <constant>` are defined:
+ The first one is named :n:`@ident__1` and is defined as the proof of the
shelved goal (which is also the value of :g:`?x`). It is always
transparent.
- + The second one is named :n:`@ident__2`. It has type :n:`@type`, and its body is
+ + The second one is named :n:`@ident__2`. It has type :n:`@type`, and its :term:`body` is
the proof of the initially visible goal. It is opaque if the proof
ends with :cmd:`Qed`, and transparent if the proof ends with :cmd:`Defined`.
diff --git a/doc/sphinx/addendum/omega.rst b/doc/sphinx/addendum/omega.rst
index 0997c5e868..86bb0502c6 100644
--- a/doc/sphinx/addendum/omega.rst
+++ b/doc/sphinx/addendum/omega.rst
@@ -147,7 +147,7 @@ Options
.. flag:: Omega UseLocalDefs
- This flag (on by default) allows :tacn:`omega` to use the bodies of local
+ This flag (on by default) allows :tacn:`omega` to use the :term:`bodies <body>` of local
variables.
.. flag:: Omega System
diff --git a/doc/sphinx/addendum/program.rst b/doc/sphinx/addendum/program.rst
index 8f2b51ccce..a011c81f15 100644
--- a/doc/sphinx/addendum/program.rst
+++ b/doc/sphinx/addendum/program.rst
@@ -149,7 +149,7 @@ when reasoning with equality on the subset types themselves.
The next two commands are similar to their standard counterparts
:cmd:`Definition` and :cmd:`Fixpoint`
-in that they define constants. However, they may require the user to
+in that they define :term:`constants <constant>`. However, they may require the user to
prove some goals to construct the final definitions.
@@ -173,7 +173,7 @@ term :n:`@term__0`, checking that the type of :n:`@term__0` is coercible to
set of obligations generated during the interpretation of :n:`@term__0`
and the aforementioned coercion derivation are solved.
-.. seealso:: Sections :ref:`vernac-controlling-the-reduction-strategies`, :tacn:`unfold`
+.. seealso:: Sections :ref:`controlling-the-reduction-strategies`, :tacn:`unfold`
.. _program_fixpoint:
diff --git a/doc/sphinx/addendum/ring.rst b/doc/sphinx/addendum/ring.rst
index 954c2c1446..6b7b588137 100644
--- a/doc/sphinx/addendum/ring.rst
+++ b/doc/sphinx/addendum/ring.rst
@@ -1,4 +1,4 @@
-.. |bdi| replace:: :math:`\beta\delta\iota`
+.. |bdi| replace:: βδι
.. |ra| replace:: :math:`\rightarrow_{\beta\delta\iota}`
.. |la| replace:: :math:`\leftarrow_{\beta\delta\iota}`
.. |eq| replace:: `=`:sub:`(by the main correctness theorem)`
diff --git a/doc/sphinx/addendum/sprop.rst b/doc/sphinx/addendum/sprop.rst
index 8c20e08154..281473231d 100644
--- a/doc/sphinx/addendum/sprop.rst
+++ b/doc/sphinx/addendum/sprop.rst
@@ -44,7 +44,7 @@ are convertible:
exact Hx.
Qed.
-Since we have definitional :ref:`eta-expansion` for
+Since we have definitional :ref:`eta-expansion-sect` for
functions, the property of being a type of definitionally irrelevant
values is impredicative, and so is :math:`\SProp`:
diff --git a/doc/sphinx/addendum/type-classes.rst b/doc/sphinx/addendum/type-classes.rst
index 8dc0030115..45741b4bb8 100644
--- a/doc/sphinx/addendum/type-classes.rst
+++ b/doc/sphinx/addendum/type-classes.rst
@@ -315,7 +315,7 @@ Summary of the commands
inside records, and the trivial projection of an instance of such a
class is convertible to the instance itself. This can be useful to
make instances of existing objects easily and to reduce proof size by
- not inserting useless projections. The class constant itself is
+ not inserting useless projections. The class :term:`constant` itself is
declared rigid during resolution so that the class abstraction is
maintained.
@@ -326,7 +326,7 @@ Summary of the commands
.. cmd:: Existing Class @qualid
- This variant declares a class from a previously declared constant or
+ This variant declares a class from a previously declared :term:`constant` or
inductive definition. No methods or instances are defined.
.. warn:: @ident is already declared as a typeclass
@@ -363,7 +363,7 @@ Summary of the commands
This attribute can be used to leave holes or not provide all
fields in the definition of an instance and open the tactic mode
- to fill them. It works exactly as if no body had been given and
+ to fill them. It works exactly as if no :term:`body` had been given and
the :tacn:`refine` tactic has been used first.
.. cmd:: Declare Instance @ident_decl {* @binder } : @term {? @hint_info }
@@ -377,7 +377,7 @@ Summary of the commands
.. cmd:: Existing Instance @qualid {? @hint_info }
Existing Instances {+ @qualid } {? %| @natural }
- Adds a constant whose type ends with
+ Adds a :term:`constant` whose type ends with
an applied typeclass to the instance database with an optional
priority :token:`natural`. It can be used for redeclaring instances at the end of
sections, or declaring structure projections as instances. This is
@@ -418,7 +418,7 @@ Summary of the commands
unifier. When considering local hypotheses, we use the transparent
state of the first hint database given. Using an empty database
(created with :cmd:`Create HintDb` for example) with unfoldable variables and
- constants as the first argument of ``typeclasses eauto`` hence makes
+ :term:`constants <constant>` as the first argument of ``typeclasses eauto`` hence makes
resolution with the local hypotheses use full conversion during
unification.
@@ -494,7 +494,7 @@ Typeclasses Transparent, Typeclasses Opaque
Make :token:`qualid` opaque for typeclass search.
A shortcut for :cmd:`Hint Opaque` :n:`{+ @qualid } : typeclass_instances`.
- It is useful when some constants prevent some unifications and make
+ It is useful when some :term:`constants <constant>` prevent some unifications and make
resolution fail. It is also useful to declare constants which
should never be unfolded during proof search, like fixpoints or
anything which does not look like an abbreviation. This can
@@ -502,7 +502,7 @@ Typeclasses Transparent, Typeclasses Opaque
indexed by such rigid constants (see
:ref:`thehintsdatabasesforautoandeauto`).
-By default, all constants and local variables are considered transparent. One
+By default, all :term:`constants <constant>` and local variables are considered transparent. One
should take care not to make opaque any constant that is used to abbreviate a
type, like:
@@ -531,7 +531,7 @@ Settings
*unify* the goal with the conclusion of the hint. This can drastically
improve performance by calling unification less often, matching
syntactic patterns being very quick. This also provides more control
- on the triggering of instances. For example, forcing a constant to
+ on the triggering of instances. For example, forcing a :term:`constant` to
explicitly appear in the pattern will make it never apply on a goal
where there is a hole in that place.
diff --git a/doc/sphinx/addendum/universe-polymorphism.rst b/doc/sphinx/addendum/universe-polymorphism.rst
index d0b05a03f9..773567b803 100644
--- a/doc/sphinx/addendum/universe-polymorphism.rst
+++ b/doc/sphinx/addendum/universe-polymorphism.rst
@@ -24,7 +24,7 @@ and *monomorphic* definitions is given by the identity function:
Definition identity {A : Type} (a : A) := a.
-By default, constant declarations are monomorphic, hence the identity
+By default, :term:`constant` declarations are monomorphic, hence the identity
function declares a global universe (say ``Top.1``) for its domain.
Subsequently, if we try to self-apply the identity, we will get an
error:
@@ -150,7 +150,7 @@ Polymorphic, Monomorphic
attribute is used to override the default.
Many other commands can be used to declare universe polymorphic or
-monomorphic constants depending on whether the :flag:`Universe
+monomorphic :term:`constants <constant>` depending on whether the :flag:`Universe
Polymorphism` flag is on or the :attr:`universes(polymorphic)`
attribute is used:
@@ -341,13 +341,13 @@ Conversion and unification
The semantics of conversion and unification have to be modified a
little to account for the new universe instance arguments to
polymorphic references. The semantics respect the fact that
-definitions are transparent, so indistinguishable from their bodies
+definitions are transparent, so indistinguishable from their :term:`bodies <body>`
during conversion.
This is accomplished by changing one rule of unification, the first-
order approximation rule, which applies when two applicative terms
with the same head are compared. It tries to short-cut unfolding by
-comparing the arguments directly. In case the constant is universe
+comparing the arguments directly. In case the :term:`constant` is universe
polymorphic, we allow this rule to fire only when unifying the
universes results in instantiating a so-called flexible universe
variables (not given by the user). Similarly for conversion, if such
@@ -362,7 +362,7 @@ Minimization
Universe polymorphism with cumulativity tends to generate many useless
inclusion constraints in general. Typically at each application of a
-polymorphic constant :g:`f`, if an argument has expected type :g:`Type@{i}`
+polymorphic :term:`constant` :g:`f`, if an argument has expected type :g:`Type@{i}`
and is given a term of type :g:`Type@{j}`, a :math:`j ≤ i` constraint will be
generated. It is however often the case that an equation :math:`j = i` would
be more appropriate, when :g:`f`\'s universes are fresh for example.
@@ -550,7 +550,7 @@ underscore or by omitting the annotation to a polymorphic definition.
.. flag:: Private Polymorphic Universes
This flag, on by default, removes universes which appear only in
- the body of an opaque polymorphic definition from the definition's
+ the :term:`body` of an opaque polymorphic definition from the definition's
universe arguments. As such, no value needs to be provided for
these universes when instantiating the definition. Universe
constraints are automatically adjusted.
@@ -563,18 +563,18 @@ underscore or by omitting the annotation to a polymorphic definition.
Proof. exact Type. Qed.
Print foo.
- The universe :g:`Top.xxx` for the :g:`Type` in the body cannot be accessed, we
+ The universe :g:`Top.xxx` for the :g:`Type` in the :term:`body` cannot be accessed, we
only care that one exists for any instantiation of the universes
appearing in the type of :g:`foo`. This is guaranteed when the
transitive constraint ``Set <= Top.xxx < i`` is verified. Then when
- using the constant we don't need to put a value for the inner
+ using the :term:`constant` we don't need to put a value for the inner
universe:
.. coqtop:: all
Check foo@{_}.
- and when not looking at the body we don't mention the private
+ and when not looking at the :term:`body` we don't mention the private
universe:
.. coqtop:: all
@@ -643,11 +643,11 @@ sections, except in the following ways:
(in the above example the anonymous :g:`Type` constrains polymorphic
universe :g:`i` to be strictly smaller.)
-- no monomorphic constant or inductive may be declared if polymorphic
+- no monomorphic :term:`constant` or inductive may be declared if polymorphic
universes or universe constraints are present.
These restrictions are required in order to produce a sensible result
-when closing the section (the requirement on constants and inductives
+when closing the section (the requirement on :term:`constants <constant>` and inductive types
is stricter than the one on constraints, because constants and
inductives are abstracted by *all* the section's polymorphic universes
and constraints).
diff --git a/doc/sphinx/changes.rst b/doc/sphinx/changes.rst
index ea099eb52e..2ea8694ad6 100644
--- a/doc/sphinx/changes.rst
+++ b/doc/sphinx/changes.rst
@@ -1933,7 +1933,7 @@ Changes in 8.12.1
**Kernel**
- **Fixed:** Incompleteness of conversion checking on problems
- involving :ref:`eta-expansion` and :ref:`cumulative universe
+ involving :ref:`eta-expansion-sect` and :ref:`cumulative universe
polymorphic inductive types <cumulative>` (`#12738
<https://github.com/coq/coq/pull/12738>`_, fixes `#7015
<https://github.com/coq/coq/issues/7015>`_, by Gaëtan Gilbert).
@@ -5089,7 +5089,7 @@ Coq version 8.5 contains the result of five specific long-term projects:
Matthieu Sozeau.
- An implementation of primitive projections with
- :math:`\eta`\-conversion bringing significant performance improvements
+ η-conversion bringing significant performance improvements
when using records by Matthieu Sozeau.
The full integration of the proof engine, by Arnaud Spiwack and
@@ -5140,10 +5140,10 @@ messages in case of inconsistencies and allowing higher-level algorithms
like unification to be entirely type safe. The internal representation
of universes has been modified but this is invisible to the user.
-The underlying logic has been extended with :math:`\eta`\-conversion for
+The underlying logic has been extended with η-conversion for
records defined with primitive projections by Matthieu Sozeau. This
-additional form of :math:`\eta`\-conversion is justified using the same
-principle than the previously added :math:`\eta`\-conversion for function
+additional form of η-conversion is justified using the same
+principle than the previously added η-conversion for function
types, based on formulations of the Calculus of Inductive Constructions
with typed equality. Primitive projections, which do not carry the
parameters of the record and are rigid names (not defined as a
@@ -6160,9 +6160,9 @@ contributed many various refinements of CoqIDE.
Coq 8.4 also comes with a bunch of various smaller-scale changes
and improvements regarding the different components of the system.
-The underlying logic has been extended with :math:`\eta`-conversion
+The underlying logic has been extended with η-conversion
thanks to Hugo Herbelin, Stéphane Glondu and Benjamin Grégoire. The
-addition of :math:`\eta`-conversion is justified by the confidence that
+addition of η-conversion is justified by the confidence that
the formulation of the Calculus of Inductive Constructions based on
typed equality (such as the one considered in Lee and Werner to build a
set-theoretic model of CIC :cite:`LeeWerner11`) is
@@ -6171,7 +6171,7 @@ applicable to the concrete implementation of Coq.
The underlying logic benefited also from a refinement of the guard
condition for fixpoints by Pierre Boutillier, the point being that it is
safe to propagate the information about structurally smaller arguments
-through :math:`\beta`-redexes that are blocked by the “match”
+through β-redexes that are blocked by the “match”
construction (blocked commutative cuts).
Relying on the added permissiveness of the guard condition, Hugo
diff --git a/doc/sphinx/language/cic.rst b/doc/sphinx/language/cic.rst
index 1cfd8dac50..9f097b4fe9 100644
--- a/doc/sphinx/language/cic.rst
+++ b/doc/sphinx/language/cic.rst
@@ -1,8 +1,9 @@
Typing rules
====================================
-The underlying formal language of Coq is a *Calculus of Inductive
-Constructions* (|Cic|) whose inference rules are presented in this
+The underlying formal language of Coq is a
+:gdef:`Calculus of Inductive Constructions` (|Cic|) whose inference rules
+are presented in this
chapter. The history of this formalism as well as pointers to related
work are provided in a separate chapter; see *Credits*.
@@ -146,7 +147,7 @@ In the global environment,
:math:`(c:T)`, indicating that :math:`c` is of the type :math:`T`. *Definitions*
are written as :math:`c:=t:T`, indicating that :math:`c` has the value :math:`t`
and type :math:`T`. We shall call
-such names *constants*. For the rest of the chapter, the :math:`E;~c:T` denotes
+such names :term:`constants <constant>`. For the rest of the chapter, the :math:`E;~c:T` denotes
the global environment :math:`E` enriched with the assumption :math:`c:T`.
Similarly, :math:`E;~c:=t:T` denotes the global environment :math:`E` enriched with the
definition :math:`(c:=t:T)`.
diff --git a/doc/sphinx/language/core/coinductive.rst b/doc/sphinx/language/core/coinductive.rst
index e742139134..61952c1570 100644
--- a/doc/sphinx/language/core/coinductive.rst
+++ b/doc/sphinx/language/core/coinductive.rst
@@ -196,5 +196,5 @@ Top-level definitions of co-recursive functions
If :n:`@term` is omitted, :n:`@type` is required and Coq enters proof mode.
This can be used to define a term incrementally, in particular by relying on the :tacn:`refine` tactic.
- In this case, the proof should be terminated with :cmd:`Defined` in order to define a constant
+ In this case, the proof should be terminated with :cmd:`Defined` in order to define a :term:`constant`
for which the computational behavior is relevant. See :ref:`proof-editing-mode`.
diff --git a/doc/sphinx/language/core/conversion.rst b/doc/sphinx/language/core/conversion.rst
index 09c619338b..06b6c61ea9 100644
--- a/doc/sphinx/language/core/conversion.rst
+++ b/doc/sphinx/language/core/conversion.rst
@@ -1,11 +1,13 @@
.. _Conversion-rules:
Conversion rules
---------------------
+----------------
-In |Cic|, there is an internal reduction mechanism. In particular, it
-can decide if two programs are *intentionally* equal (one says
-:term:`convertible`). Convertibility is described in this section.
+Coq has conversion rules that can be used to determine if two
+terms are equal by definition, or :term:`convertible`.
+Conversion rules consist of reduction rules and expansion rules.
+See :ref:`applyingconversionrules`,
+which describes tactics that apply these conversion rules.
α-conversion
~~~~~~~~~~~~
@@ -14,56 +16,44 @@ Two terms are :gdef:`α-convertible <alpha-convertible>` if they are syntactical
equal ignoring differences in the names of variables bound within the expression.
For example `forall x, x + 0 = x` is α-convertible with `forall y, y + 0 = y`.
-.. _beta-reduction:
-
β-reduction
~~~~~~~~~~~
-We want to be able to identify some terms as we can identify the
-application of a function to a given argument with its result. For
-instance the identity function over a given type :math:`T` can be written
-:math:`λx:T.~x`. In any global environment :math:`E` and local context
-:math:`Γ`, we want to identify any object :math:`a` (of type
-:math:`T`) with the application :math:`((λ x:T.~x)~a)`. We define for
-this a *reduction* (or a *conversion*) rule we call :math:`β`:
+:gdef:`β-reduction <beta-reduction>` reduces a :gdef:`beta-redex`, which is
+a term in the form `(fun x => t) u`. (Beta-redex
+is short for "beta-reducible expression", a term from lambda calculus.
+See `Beta reduction <https://en.wikipedia.org/wiki/Beta_normal_form#Beta_reduction>`_
+for more background.)
-.. math::
+Formally, in any :term:`global environment` :math:`E` and :term:`local context`
+:math:`Γ`, the beta-reduction rule is:
- E[Γ] ⊢ ((λx:T.~t)~u)~\triangleright_β~\subst{t}{x}{u}
+.. inference:: Beta
+
+ --------------
+ E[Γ] ⊢ ((λx:T.~t)~u)~\triangleright_β~\subst{t}{x}{u}
We say that :math:`\subst{t}{x}{u}` is the *β-contraction* of
:math:`((λx:T.~t)~u)` and, conversely, that :math:`((λ x:T.~t)~u)` is the
*β-expansion* of :math:`\subst{t}{x}{u}`.
-According to β-reduction, terms of the *Calculus of Inductive
-Constructions* enjoy some fundamental properties such as confluence,
+.. todo: :term:`Calculus of Inductive Constructions` fails to build in CI for some reason :-()
+
+Terms of the *Calculus of Inductive Constructions*
+enjoy some fundamental properties such as confluence,
strong normalization, subject reduction. These results are
theoretically of great importance but we will not detail them here and
refer the interested reader to :cite:`Coq85`.
-
-.. _iota-reduction:
-
-ι-reduction
-~~~~~~~~~~~
-
-A specific conversion rule is associated with the inductive objects in
-the global environment. We shall give later on (see Section
-:ref:`Well-formed-inductive-definitions`) the precise rules but it
-just says that a destructor applied to an object built from a
-constructor behaves as expected. This reduction is called ι-reduction
-and is more precisely studied in :cite:`Moh93,Wer94`.
-
-
-.. _delta-reduction:
+.. _delta-reduction-sect:
δ-reduction
~~~~~~~~~~~
-We may have variables defined in local contexts or constants defined
-in the global environment. It is legal to identify such a reference
-with its value, that is to expand (or unfold) it into its value. This
-reduction is called δ-reduction and shows as follows.
+:gdef:`δ-reduction <delta-reduction>` replaces variables defined in
+:term:`local contexts <local context>`
+or :term:`constants <constant>` defined in the :term:`global environment` with their values.
+:gdef:`Unfolding <unfold>` means to replace a constant by its definition. Formally, this is:
.. inference:: Delta-Local
@@ -79,16 +69,29 @@ reduction is called δ-reduction and shows as follows.
--------------
E[Γ] ⊢ c~\triangleright_δ~t
+:term:`Delta-reduction <delta-reduction>` only unfolds :term:`constants <constant>` that are
+marked :gdef:`transparent`. :gdef:`Opaque <opaque>` is the opposite of
+transparent; :term:`delta-reduction` doesn't unfold opaque constants.
+
+ι-reduction
+~~~~~~~~~~~
-.. _zeta-reduction:
+A specific conversion rule is associated with the inductive objects in
+the global environment. We shall give later on (see Section
+:ref:`Well-formed-inductive-definitions`) the precise rules but it
+just says that a destructor applied to an object built from a
+constructor behaves as expected. This reduction is called
+:gdef:`ι-reduction <iota-reduction>`
+and is more precisely studied in :cite:`Moh93,Wer94`.
ζ-reduction
~~~~~~~~~~~
-Coq allows also to remove local definitions occurring in terms by
-replacing the defined variable by its value. The declaration being
-destroyed, this reduction differs from δ-reduction. It is called
-ζ-reduction and shows as follows.
+:gdef:`ζ-reduction <zeta-reduction>` removes :ref:`let-in definitions <let-in>`
+in terms by
+replacing the defined variable by its value. One way this reduction differs from
+δ-reduction is that the declaration is removed from the term entirely.
+Formally, this is:
.. inference:: Zeta
@@ -99,12 +102,12 @@ destroyed, this reduction differs from δ-reduction. It is called
E[Γ] ⊢ \letin{x}{u:U}{t}~\triangleright_ζ~\subst{t}{x}{u}
-.. _eta-expansion:
+.. _eta-expansion-sect:
η-expansion
~~~~~~~~~~~
-Another important concept is η-expansion. It is legal to identify any
+Another important concept is :gdef:`η-expansion <eta-expansion>`. It is legal to identify any
term :math:`t` of functional type :math:`∀ x:T,~U` with its so-called η-expansion
.. math::
diff --git a/doc/sphinx/language/core/definitions.rst b/doc/sphinx/language/core/definitions.rst
index 7196c082ed..fcf61a5bf4 100644
--- a/doc/sphinx/language/core/definitions.rst
+++ b/doc/sphinx/language/core/definitions.rst
@@ -31,43 +31,48 @@ for :n:`let @ident := fun {+ @binder} => @term__1 in @term__2`.
single: ... <: ...
single: ... <<: ...
+.. _type-cast:
+
Type cast
---------
.. insertprodn term_cast term_cast
.. prodn::
- term_cast ::= @term10 <: @type
+ term_cast ::= @term10 : @type
+ | @term10 <: @type
| @term10 <<: @type
- | @term10 : @type
| @term10 :>
The expression :n:`@term10 : @type` is a type cast expression. It enforces
the type of :n:`@term10` to be :n:`@type`.
-:n:`@term10 <: @type` locally sets up the virtual machine for checking that
-:n:`@term10` has type :n:`@type`.
+:n:`@term10 <: @type` specifies that the virtual machine will be used
+to type check that :n:`@term10` has type :n:`@type` (see :tacn:`vm_compute`).
+
+:n:`@term10 <<: @type` specifies that compilation to OCaml will be used
+to type check that :n:`@term10` has type :n:`@type` (see :tacn:`native_compute`).
-:n:`@term10 <<: @type` uses native compilation for checking that :n:`@term10`
-has type :n:`@type`.
+:n:`@term10 :>` casts to the support type in Program mode.
+See :ref:`syntactic_control`.
.. _gallina-definitions:
Top-level definitions
---------------------
-Definitions extend the global environment with associations of names to terms.
+Definitions extend the global environment by associating names to terms.
A definition can be seen as a way to give a meaning to a name or as a
way to abbreviate a term. In any case, the name can later be replaced at
any time by its definition.
The operation of unfolding a name into its definition is called
-:math:`\delta`-conversion (see Section :ref:`delta-reduction`). A
-definition is accepted by the system if and only if the defined term is
+:term:`delta-reduction`.
+A definition is accepted by the system if and only if the defined term is
well-typed in the current context of the definition and if the name is
not already used. The name defined by the definition is called a
-*constant* and the term it refers to is its *body*. A definition has a
-type which is the type of its body.
+:gdef:`constant` and the term it refers to is its :gdef:`body`. A definition has
+a type, which is the type of its :term:`body`.
A formal presentation of constants and environments is given in
Section :ref:`typing-rules`.
@@ -96,7 +101,7 @@ Section :ref:`typing-rules`.
If :n:`@term` is omitted, :n:`@type` is required and Coq enters proof mode.
This can be used to define a term incrementally, in particular by relying on the :tacn:`refine` tactic.
- In this case, the proof should be terminated with :cmd:`Defined` in order to define a constant
+ In this case, the proof should be terminated with :cmd:`Defined` in order to define a :term:`constant`
for which the computational behavior is relevant. See :ref:`proof-editing-mode`.
The form :n:`Definition @ident : @type := @term` checks that the type of :n:`@term`
@@ -151,7 +156,7 @@ The basic assertion command is:
over a mutually inductive assumption, or that assert mutually dependent
statements in some mutual co-inductive type. It is equivalent to
:cmd:`Fixpoint` or :cmd:`CoFixpoint` but using tactics to build the proof of
- the statements (or the body of the specification, depending on the point of
+ the statements (or the :term:`body` of the specification, depending on the point of
view). The inductive or co-inductive types on which the induction or
coinduction has to be done is assumed to be non ambiguous and is guessed by
the system.
@@ -202,10 +207,10 @@ the proof and adds it to the global environment.
statements still to be proved. Nonetheless, this practice is discouraged
and may stop working in future versions.
- #. Proofs ended by :cmd:`Qed` are declared opaque. Their content cannot be
- unfolded (see :ref:`performingcomputations`), thus
- realizing some form of *proof-irrelevance*. To be able to unfold a
- proof, the proof should be ended by :cmd:`Defined`.
+ #. Proofs ended by :cmd:`Qed` are declared :term:`opaque`. Their content cannot be
+ unfolded (see :ref:`applyingconversionrules`), thus
+ realizing some form of *proof-irrelevance*.
+ Proofs that end with :cmd:`Defined` can be unfolded.
#. :cmd:`Proof` is recommended but can currently be omitted. On the opposite
side, :cmd:`Qed` (or :cmd:`Defined`) is mandatory to validate a proof.
diff --git a/doc/sphinx/language/core/inductive.rst b/doc/sphinx/language/core/inductive.rst
index 4e892f709d..971a856899 100644
--- a/doc/sphinx/language/core/inductive.rst
+++ b/doc/sphinx/language/core/inductive.rst
@@ -25,7 +25,7 @@ Inductive types
respectively correspond to elimination principles on :g:`Type`, :g:`Prop`,
:g:`Set` and :g:`SProp`. The type of the destructors
expresses structural induction/recursion principles over objects of
- type :n:`@ident`. The constant :n:`@ident`\ ``_ind`` is always
+ type :n:`@ident`. The :term:`constant` :n:`@ident`\ ``_ind`` is always
generated, whereas :n:`@ident`\ ``_rec`` and :n:`@ident`\ ``_rect``
may be impossible to derive (for example, when :n:`@ident` is a
proposition).
@@ -415,7 +415,7 @@ constructions.
If :n:`@term` is omitted, :n:`@type` is required and Coq enters proof mode.
This can be used to define a term incrementally, in particular by relying on the :tacn:`refine` tactic.
- In this case, the proof should be terminated with :cmd:`Defined` in order to define a constant
+ In this case, the proof should be terminated with :cmd:`Defined` in order to define a :term:`constant`
for which the computational behavior is relevant. See :ref:`proof-editing-mode`.
This command accepts the :attr:`using` attribute.
diff --git a/doc/sphinx/language/core/modules.rst b/doc/sphinx/language/core/modules.rst
index 2e678c49d8..c42d444089 100644
--- a/doc/sphinx/language/core/modules.rst
+++ b/doc/sphinx/language/core/modules.rst
@@ -17,7 +17,7 @@ and :math:`id` an identifier, then :math:`p′.id` is an access path.
**Structure element.** A structure element is denoted by :math:`e` and
-is either a definition of a constant, an assumption, a definition of
+is either a definition of a :term:`constant`, an assumption, a definition of
an inductive, a definition of a module, an alias of a module or a module
type abbreviation.
@@ -134,7 +134,7 @@ is also used to terminate :ref:`Sections<section-mechanism>`.
:n:`End @ident` closes the interactive module or module type :token:`ident`.
If the module type was given, the command verifies that the content of the module
matches the module type. If the module is not a
-functor, its components (constants, inductive types, submodules etc.)
+functor, its components (:term:`constants <constant>`, inductive types, submodules etc.)
are now available through the dot notation.
.. exn:: No such label @ident.
@@ -170,7 +170,7 @@ are now available through the dot notation.
hints and the like valid for :n:`@ident__1` are the ones defined in :n:`@module_type`
rather then those defined in :n:`@ident__2` (or the module body).
#. Within an interactive module type definition, the :cmd:`Parameter` command declares a
- constant instead of definining a new axiom (which it does when not in a module type definition).
+ :term:`constant` instead of definining a new axiom (which it does when not in a module type definition).
#. Assumptions such as :cmd:`Axiom` that include the :n:`Inline` clause will be automatically
expanded when the functor is applied, except when the function application is prefixed by ``!``.
@@ -250,14 +250,14 @@ are now available through the dot notation.
make only those names available with short names, not other names
defined in the module nor will it activate other features.
- The names to import may be constants, inductive types and
+ The names to import may be :term:`constants <constant>`, inductive types and
constructors, and notation aliases (for instance, Ltac definitions
cannot be selectively imported). If they are from an inner module
to the one being imported, they must be prefixed by the inner path.
The name of an inductive type may also be followed by ``(..)`` to
import it, its constructors and its eliminators if they exist. For
- this purpose "eliminator" means a constant in the same module whose
+ this purpose "eliminator" means a :term:`constant` in the same module whose
name is the inductive type's name suffixed by one of ``_sind``,
``_ind``, ``_rec`` or ``_rect``.
@@ -332,7 +332,7 @@ Examples
Defined.
End M.
-Inside a module one can define constants, prove theorems and do anything
+Inside a module one can define :term:`constants <constant>`, prove theorems and do anything
else that can be done in the toplevel. Components of a closed
module can be accessed using the dot notation:
@@ -455,9 +455,9 @@ Typing Modules
In order to introduce the typing system we first slightly extend the syntactic
class of terms and environments given in section :ref:`The-terms`. The
-environments, apart from definitions of constants and inductive types now also
-hold any other structure elements. Terms, apart from variables, constants and
-complex terms, include also access paths.
+environments, apart from definitions of :term:`constants <constant>` and inductive types now also
+hold any other structure elements. Terms, apart from variables, :term:`constants <constant>` and
+complex terms, also include access paths.
We also need additional typing judgments:
diff --git a/doc/sphinx/language/core/records.rst b/doc/sphinx/language/core/records.rst
index 6671c67fb2..871bc0770c 100644
--- a/doc/sphinx/language/core/records.rst
+++ b/doc/sphinx/language/core/records.rst
@@ -207,7 +207,7 @@ other arguments are the parameters of the inductive type.
There may be three reasons:
#. The name :token:`ident` already exists in the global environment (see :cmd:`Axiom`).
- #. The body of :token:`ident` uses an incorrect elimination for
+ #. The :term:`body` of :token:`ident` uses an incorrect elimination for
:token:`ident` (see :cmd:`Fixpoint` and :ref:`Destructors`).
#. The type of the projections :token:`ident` depends on previous
projections which themselves could not be defined.
@@ -278,7 +278,7 @@ There are currently two ways to introduce primitive records types:
`r` ``= Build_``\ `R` ``(``\ `r`\ ``.(``\ |p_1|\ ``) …`` `r`\ ``.(``\ |p_n|\ ``))``.
Eta-conversion allows to define dependent elimination for these types as well.
#. Through the ``Inductive`` and ``CoInductive`` commands, when
- the body of the definition is a record declaration of the form
+ the :term:`body` of the definition is a record declaration of the form
``Build_``\ `R` ``{`` |p_1| ``:`` |t_1|\ ``; … ;`` |p_n| ``:`` |t_n| ``}``.
In this case the types can be recursive and eta-conversion is disallowed. These kind of record types
differ from their traditional versions in the sense that dependent
@@ -290,11 +290,11 @@ Reduction
The basic reduction rule of a primitive projection is
|p_i| ``(Build_``\ `R` |t_1| … |t_n|\ ``)`` :math:`{\rightarrow_{\iota}}` |t_i|.
-However, to take the :math:`{\delta}` flag into
+However, to take the δ flag into
account, projections can be in two states: folded or unfolded. An
unfolded primitive projection application obeys the rule above, while
the folded version delta-reduces to the unfolded version. This allows to
-precisely mimic the usual unfolding rules of constants. Projections
+precisely mimic the usual unfolding rules of :term:`constants <constant>`. Projections
obey the usual ``simpl`` flags of the ``Arguments`` command in particular.
There is currently no way to input unfolded primitive projections at the
user-level, and there is no way to display unfolded projections differently
@@ -305,8 +305,8 @@ Compatibility Projections and :g:`match`
++++++++++++++++++++++++++++++++++++++++
To ease compatibility with ordinary record types, each primitive
-projection is also defined as a ordinary constant taking parameters and
-an object of the record type as arguments, and whose body is an
+projection is also defined as an ordinary :term:`constant` taking parameters and
+an object of the record type as arguments, and whose :term:`body` is an
application of the unfolded primitive projection of the same name. These
constants are used when elaborating partial applications of the
projection. One can distinguish them from applications of the primitive
diff --git a/doc/sphinx/language/core/sections.rst b/doc/sphinx/language/core/sections.rst
index c16152ff4f..4c41ce8a89 100644
--- a/doc/sphinx/language/core/sections.rst
+++ b/doc/sphinx/language/core/sections.rst
@@ -55,7 +55,7 @@ usable outside the section as shown in this :ref:`example <section_local_declara
:name: Let; Let Fixpoint; Let CoFixpoint
These are similar to :cmd:`Definition`, :cmd:`Fixpoint` and :cmd:`CoFixpoint`, except that
- the declared constant is local to the current section.
+ the declared :term:`constant` is local to the current section.
When the section is closed, all persistent
definitions and theorems within it that depend on the constant
will be wrapped with a :n:`@term_let` with the same declaration.
diff --git a/doc/sphinx/language/extensions/arguments-command.rst b/doc/sphinx/language/extensions/arguments-command.rst
index 214541570c..87001251c2 100644
--- a/doc/sphinx/language/extensions/arguments-command.rst
+++ b/doc/sphinx/language/extensions/arguments-command.rst
@@ -226,10 +226,10 @@ Automatic declaration of implicit arguments
Print Implicit nil.
The computation of implicit arguments takes account of the unfolding
-of constants. For instance, the variable ``p`` below has type
+of :term:`constants <constant>`. For instance, the variable ``p`` below has type
``(Transitivity R)`` which is reducible to
``forall x,y:U, R x y -> forall z:U, R y z -> R x z``. As the variables ``x``, ``y`` and ``z``
-appear strictly in the body of the type, they are implicit.
+appear strictly in the :term:`body` of the type, they are implicit.
.. coqtop:: all
@@ -318,7 +318,7 @@ Binding arguments to a scope
Effects of :cmd:`Arguments` on unfolding
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ `simpl never` indicates that a constant should never be unfolded by :tacn:`cbn`,
++ `simpl never` indicates that a :term:`constant` should never be unfolded by :tacn:`cbn`,
:tacn:`simpl` or :tacn:`hnf`:
.. example::
@@ -330,7 +330,7 @@ Effects of :cmd:`Arguments` on unfolding
After that command an expression like :g:`(minus (S x) y)` is left
untouched by the tactics :tacn:`cbn` and :tacn:`simpl`.
-+ A constant can be marked to be unfolded only if it's applied to at least
++ A :term:`constant` can be marked to be unfolded only if it's applied to at least
the arguments appearing before the `/` in a :cmd:`Arguments` command.
.. example::
@@ -343,7 +343,7 @@ Effects of :cmd:`Arguments` on unfolding
After that command the expression :g:`(f \o g)` is left untouched by
:tacn:`simpl` while :g:`((f \o g) t)` is reduced to :g:`(f (g t))`.
- The same mechanism can be used to make a constant volatile, i.e.
+ The same mechanism can be used to make a :term:`constant` volatile, i.e.
always unfolded.
.. example::
@@ -353,7 +353,7 @@ Effects of :cmd:`Arguments` on unfolding
Definition volatile := fun x : nat => x.
Arguments volatile / x.
-+ A constant can be marked to be unfolded only if an entire set of
++ A :term:`constant` can be marked to be unfolded only if an entire set of
arguments evaluates to a constructor. The ``!`` symbol can be used to mark
such arguments.
@@ -366,7 +366,7 @@ Effects of :cmd:`Arguments` on unfolding
After that command, the expression :g:`(minus (S x) y)` is left untouched
by :tacn:`simpl`, while :g:`(minus (S x) (S y))` is reduced to :g:`(minus x y)`.
-+ `simpl nomatch` indicates that a constant should not be unfolded if it would expose
++ `simpl nomatch` indicates that a :term:`constant` should not be unfolded if it would expose
a `match` construct in the head position. This affects the :tacn:`cbn`,
:tacn:`simpl` and :tacn:`hnf` tactics.
@@ -379,10 +379,10 @@ Effects of :cmd:`Arguments` on unfolding
In this case, :g:`(minus (S (S x)) (S y))` is simplified to :g:`(minus (S x) y)`
even if an extra simplification is possible.
- In detail: the tactic :tacn:`simpl` first applies :math:`\beta`:math:`\iota`-reduction. Then, it
- expands transparent constants and tries to reduce further using :math:`\beta`:math:`\iota`-reduction.
- But, when no :math:`\iota` rule is applied after unfolding then
- :math:`\delta`-reductions are not applied. For instance trying to use :tacn:`simpl` on
+ In detail: the tactic :tacn:`simpl` first applies βι-reduction. Then, it
+ expands transparent :term:`constants <constant>` and tries to reduce further using βι-reduction.
+ But, when no ι rule is applied after unfolding then
+ δ-reductions are not applied. For instance trying to use :tacn:`simpl` on
:g:`(plus n O) = n` changes nothing.
diff --git a/doc/sphinx/language/extensions/canonical.rst b/doc/sphinx/language/extensions/canonical.rst
index 4cc35794cc..fbba6c30b8 100644
--- a/doc/sphinx/language/extensions/canonical.rst
+++ b/doc/sphinx/language/extensions/canonical.rst
@@ -34,7 +34,7 @@ in :ref:`canonicalstructures`; here only a simple example is given.
The first form of this command declares an existing :n:`@reference` as a
canonical instance of a structure (a record).
- The second form defines a new constant as if the :cmd:`Definition` command
+ The second form defines a new :term:`constant` as if the :cmd:`Definition` command
had been used, then declares it as a canonical instance as if the first
form had been used on the defined object.
@@ -113,7 +113,7 @@ in :ref:`canonicalstructures`; here only a simple example is given.
This displays the list of global names that are components of some
canonical structure. For each of them, the canonical structure of
- which it is a projection is indicated. If constants are given as
+ which it is a projection is indicated. If :term:`constants <constant>` are given as
its arguments, only the unification rules that involve or are
synthesized from simultaneously all given constants will be shown.
diff --git a/doc/sphinx/language/extensions/implicit-arguments.rst b/doc/sphinx/language/extensions/implicit-arguments.rst
index 765d04ec88..76a4d4a6ff 100644
--- a/doc/sphinx/language/extensions/implicit-arguments.rst
+++ b/doc/sphinx/language/extensions/implicit-arguments.rst
@@ -238,7 +238,7 @@ Here is an example:
This is triggered when setting an argument implicit in an
expression which does not correspond to the type of an assumption
- or to the body of a definition. Here is an example:
+ or to the :term:`body` of a definition. Here is an example:
.. coqtop:: all warn
@@ -448,7 +448,7 @@ function.
Turning this flag on (it is off by default) deactivates the use of implicit arguments.
- In this case, all arguments of constants, inductive types,
+ In this case, all arguments of :term:`constants <constant>`, inductive types,
constructors, etc, including the arguments declared as implicit, have
to be given as if no arguments were implicit. By symmetry, this also
affects printing.
diff --git a/doc/sphinx/language/extensions/match.rst b/doc/sphinx/language/extensions/match.rst
index 1c022448b0..818d130042 100644
--- a/doc/sphinx/language/extensions/match.rst
+++ b/doc/sphinx/language/extensions/match.rst
@@ -300,7 +300,7 @@ Conventions about unused pattern-matching variables
Pattern-matching variables that are not used on the right-hand side of ``=>`` are
considered the sign of a potential error. For instance, it could
-result from an undetected mispelled constant constructor. By default,
+result from an undetected misspelled constant constructor. By default,
a warning is issued in such situations.
.. warn:: Unused variable @ident catches more than one case.
@@ -366,7 +366,7 @@ only simple patterns remain. The original nesting of the ``match`` expressions
is recovered at printing time. An easy way to see the result
of the expansion is to toggle off the nesting performed at printing
(use here :flag:`Printing Matching`), then by printing the term with :cmd:`Print`
-if the term is a constant, or using the command :cmd:`Check`.
+if the term is a :term:`constant`, or using the command :cmd:`Check`.
The extended ``match`` still accepts an optional *elimination predicate*
given after the keyword ``return``. Given a pattern matching expression,
diff --git a/doc/sphinx/practical-tools/coq-commands.rst b/doc/sphinx/practical-tools/coq-commands.rst
index a10312972e..464af37fde 100644
--- a/doc/sphinx/practical-tools/coq-commands.rst
+++ b/doc/sphinx/practical-tools/coq-commands.rst
@@ -499,7 +499,7 @@ wrong. In the current version, it does not modify the compiled libraries to mark
them as successfully checked.
Note that non-logical information is not checked. By logical
-information, we mean the type and optional body associated with names.
+information, we mean the type and optional :term:`body` associated with names.
It excludes for instance anything related to the concrete syntax of
objects (customized syntax rules, association between short and long
names), implicit arguments, etc.
@@ -521,7 +521,7 @@ relative paths in object files ``-Q`` and ``-R`` have exactly the same meaning.
:-admit *module*: Do not check *module* and any of its dependencies,
unless explicitly required.
:-o: At exit, print a summary about the context. List the names of all
- assumptions and variables (constants without body).
+ assumptions and variables (constants without a :term:`body`).
:-silent: Do not write progress information to the standard output.
Environment variable ``$COQLIB`` can be set to override the location of
diff --git a/doc/sphinx/proof-engine/ltac.rst b/doc/sphinx/proof-engine/ltac.rst
index 013ff0a83f..b1759bf71b 100644
--- a/doc/sphinx/proof-engine/ltac.rst
+++ b/doc/sphinx/proof-engine/ltac.rst
@@ -1564,9 +1564,9 @@ Computing in a term: eval
Evaluation of a term can be performed with:
-.. tacn:: eval @red_expr in @term
+:n:`eval @red_expr in @term`
- :tacn:`eval` is a :token:`value_tactic`.
+See :tacn:`eval`. :tacn:`eval` is a :token:`value_tactic`.
Getting the type of a term
~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/sphinx/proof-engine/ltac2.rst b/doc/sphinx/proof-engine/ltac2.rst
index 1bb4216e4f..9f3f0ef3d5 100644
--- a/doc/sphinx/proof-engine/ltac2.rst
+++ b/doc/sphinx/proof-engine/ltac2.rst
@@ -1299,7 +1299,7 @@ Two examples of syntax differences:
to add the necessary notation.
- The built-in `simpl` tactic in Ltac1 supports the use of scope keys in delta flags, e.g. :n:`simpl ["+"%nat]`
which is not accepted by Ltac2. This is because Ltac2 uses a different
- definition for :token:`delta_flag`; compare it to :token:`ltac2_delta_flag`. This also affects
+ definition for :token:`delta_reductions`; compare it to :token:`ltac2_delta_reductions`. This also affects
:tacn:`compute`.
Ltac1 tactics are not automatically available in Ltac2. (Note that some of the tactics described
@@ -1461,9 +1461,9 @@ Other nonterminals that have syntactic classes are listed here.
- :token:`ltac2_bindings`
- :token:`bindings`
- * - :n:`strategy`
- - :token:`ltac2_strategy_flag`
- - :token:`strategy_flag`
+ * - :n:`reductions`
+ - :token:`ltac2_reductions`
+ - :token:`reductions`
* - :n:`reference`
- :token:`refglobal`
@@ -1571,19 +1571,19 @@ Here is the syntax for the :n:`q_*` nonterminals:
| @natural
| @lident
-.. insertprodn ltac2_strategy_flag ltac2_delta_flag
+.. insertprodn ltac2_reductions ltac2_delta_reductions
.. prodn::
- ltac2_strategy_flag ::= {+ @ltac2_red_flag }
- | {? @ltac2_delta_flag }
+ ltac2_reductions ::= {+ @ltac2_red_flag }
+ | {? @ltac2_delta_reductions }
ltac2_red_flag ::= beta
| iota
| match
| fix
| cofix
| zeta
- | delta {? @ltac2_delta_flag }
- ltac2_delta_flag ::= {? - } [ {+ @refglobal } ]
+ | delta {? @ltac2_delta_reductions }
+ ltac2_delta_reductions ::= {? - } [ {+ @refglobal } ]
.. insertprodn refglobal refglobal
diff --git a/doc/sphinx/proof-engine/tactics.rst b/doc/sphinx/proof-engine/tactics.rst
index 071fcbee11..fad02b2645 100644
--- a/doc/sphinx/proof-engine/tactics.rst
+++ b/doc/sphinx/proof-engine/tactics.rst
@@ -479,6 +479,7 @@ one or more of its hypotheses.
.. prodn::
occurrences ::= at @occs_nums
| in @goal_occurrences
+ simple_occurrences ::= @occurrences
occs_nums ::= {? - } {+ @nat_or_var }
nat_or_var ::= {| @natural | @ident }
goal_occurrences ::= {+, @hyp_occs } {? %|- {? @concl_occs } }
@@ -496,6 +497,10 @@ one or more of its hypotheses.
the conclusion of the goal. The second form can select occurrences
in the goal conclusion and in one or more hypotheses.
+ :n:`@simple_occurrences`
+ A semantically restricted form of :n:`@occurrences` that doesn't allow the
+ `at` clause anywhere within it.
+
:n:`{? - } {+ @nat_or_var }`
Selects the specified occurrences within a single goal or hypothesis.
Occurrences are numbered starting with 1 following a depth-first traversal
diff --git a/doc/sphinx/proof-engine/vernacular-commands.rst b/doc/sphinx/proof-engine/vernacular-commands.rst
index 37d605360d..22e4350c38 100644
--- a/doc/sphinx/proof-engine/vernacular-commands.rst
+++ b/doc/sphinx/proof-engine/vernacular-commands.rst
@@ -65,6 +65,9 @@ If no selector is provided,
the command applies to the current goal. If no proof is open, then the command only applies
to accessible objects. (see Section :ref:`invocation-of-tactics`).
+:cmd:`Eval` and :cmd:`Compute` are also :token:`query_command`\s, which are
+described elsewhere
+
.. cmd:: About @reference {? @univ_name_list }
Displays information about the :n:`@reference` object, which,
@@ -80,22 +83,6 @@ to accessible objects. (see Section :ref:`invocation-of-tactics`).
Displays the type of :n:`@term`. When called in proof mode, the
term is checked in the local context of the selected goal.
-.. cmd:: Eval @red_expr in @term
-
- Performs the specified reduction on :n:`@term` and displays
- the resulting term with its type. If a proof is open, :n:`@term`
- may reference hypotheses of the selected goal.
-
- .. seealso:: Section :ref:`performingcomputations`.
-
-
-.. cmd:: Compute @term
-
- Evaluates :n:`@term` using the bytecode-based virtual machine.
- It is a shortcut for :cmd:`Eval` :n:`vm_compute in @term`.
-
- .. seealso:: Section :ref:`performingcomputations`.
-
.. cmd:: Search {+ @search_query } {? {| inside | outside } {+ @qualid } }
This command can be used to filter the goal and the global context
@@ -936,136 +923,6 @@ Printing constructions in full
.. see a contrived example here: https://github.com/coq/coq/pull/11718#discussion_r415481854
-.. _vernac-controlling-the-reduction-strategies:
-
-Controlling the reduction strategies and the conversion algorithm
-----------------------------------------------------------------------
-
-
-Coq provides reduction strategies that the tactics can invoke and two
-different algorithms to check the convertibility of types. The first
-conversion algorithm lazily compares applicative terms while the other
-is a brute-force but efficient algorithm that first normalizes the
-terms before comparing them. The second algorithm is based on a
-bytecode representation of terms similar to the bytecode
-representation used in the ZINC virtual machine :cite:`Leroy90`. It is
-especially useful for intensive computation of algebraic values, such
-as numbers, and for reflection-based tactics. The commands to fine-
-tune the reduction strategies and the lazy conversion algorithm are
-described first.
-
-.. cmd:: Opaque {+ @reference }
-
- This command accepts the :attr:`global` attribute. By default, the scope
- of :cmd:`Opaque` is limited to the current section or module.
-
- This command has an effect on unfoldable constants, i.e. on constants
- defined by :cmd:`Definition` or :cmd:`Let` (with an explicit body), or by a command
- associated with a definition such as :cmd:`Fixpoint`, etc,
- or by a proof ended by :cmd:`Defined`. The command tells not to unfold the
- constants in the :n:`@reference` sequence in tactics using δ-conversion (unfolding
- a constant is replacing it by its definition).
-
- :cmd:`Opaque` has also an effect on the conversion algorithm of Coq, telling
- it to delay the unfolding of a constant as much as possible when Coq
- has to check the conversion (see Section :ref:`conversion-rules`) of two distinct
- applied constants.
-
-.. cmd:: Transparent {+ @reference }
-
- This command accepts the :attr:`global` attribute. By default, the scope
- of :cmd:`Transparent` is limited to the current section or module.
-
- This command is the converse of :cmd:`Opaque` and it applies on unfoldable
- constants to restore their unfoldability after an Opaque command.
-
- Note in particular that constants defined by a proof ended by Qed are
- not unfoldable and Transparent has no effect on them. This is to keep
- with the usual mathematical practice of *proof irrelevance*: what
- matters in a mathematical development is the sequence of lemma
- statements, not their actual proofs. This distinguishes lemmas from
- the usual defined constants, whose actual values are of course
- relevant in general.
-
- .. exn:: The reference @qualid was not found in the current environment.
-
- There is no constant named :n:`@qualid` in the environment.
-
-.. seealso:: :ref:`performingcomputations` and :ref:`proof-editing-mode`
-
-.. _vernac-strategy:
-
-.. cmd:: Strategy {+ @strategy_level [ {+ @reference } ] }
-
- .. insertprodn strategy_level strategy_level_or_var
-
- .. prodn::
- strategy_level ::= opaque
- | @integer
- | expand
- | transparent
- strategy_level_or_var ::= @strategy_level
- | @ident
-
- This command accepts the :attr:`local` attribute, which limits its effect
- to the current section or module, in which case the section and module
- behavior is the same as :cmd:`Opaque` and :cmd:`Transparent` (without :attr:`global`).
-
- This command generalizes the behavior of the :cmd:`Opaque` and :cmd:`Transparent`
- commands. It is used to fine-tune the strategy for unfolding
- constants, both at the tactic level and at the kernel level. This
- command associates a :n:`@strategy_level` with the qualified names in the :n:`@reference`
- sequence. Whenever two
- expressions with two distinct head constants are compared (for
- instance, this comparison can be triggered by a type cast), the one
- with lower level is expanded first. In case of a tie, the second one
- (appearing in the cast type) is expanded.
-
- Levels can be one of the following (higher to lower):
-
- + ``opaque`` : level of opaque constants. They cannot be expanded by
- tactics (behaves like +∞, see next item).
- + :n:`@integer` : levels indexed by an integer. Level 0 corresponds to the
- default behavior, which corresponds to transparent constants. This
- level can also be referred to as ``transparent``. Negative levels
- correspond to constants to be expanded before normal transparent
- constants, while positive levels correspond to constants to be
- expanded after normal transparent constants.
- + ``expand`` : level of constants that should be expanded first (behaves
- like −∞)
- + ``transparent`` : Equivalent to level 0
-
-.. cmd:: Print Strategy @reference
-
- This command prints the strategy currently associated with :n:`@reference`. It
- fails if :n:`@reference` is not an unfoldable reference, that is, neither a
- variable nor a constant.
-
- .. exn:: The reference is not unfoldable.
- :undocumented:
-
-.. cmd:: Print Strategies
-
- Print all the currently non-transparent strategies.
-
-
-.. cmd:: Declare Reduction @ident := @red_expr
-
- Declares a short name for the reduction expression :n:`@red_expr`, for
- instance ``lazy beta delta [foo bar]``. This short name can then be used
- in :n:`Eval @ident in` or ``eval`` constructs. This command
- accepts the :attr:`local` attribute, which indicates that the reduction
- will be discarded at the end of the
- file or module. The name is not qualified. In
- particular declaring the same name in several modules or in several
- functor applications will be rejected if these declarations are not
- local. The name :n:`@ident` cannot be used directly as an Ltac tactic, but
- nothing prevents the user from also performing a
- :n:`Ltac @ident := @red_expr`.
-
- .. seealso:: :ref:`performingcomputations`
-
-
.. _controlling-typing-flags:
Controlling Typing Flags
diff --git a/doc/sphinx/proofs/automatic-tactics/auto.rst b/doc/sphinx/proofs/automatic-tactics/auto.rst
index 30f7be5f13..d9945dd920 100644
--- a/doc/sphinx/proofs/automatic-tactics/auto.rst
+++ b/doc/sphinx/proofs/automatic-tactics/auto.rst
@@ -139,14 +139,13 @@ Programmable proof search
Like :tacn:`eauto`, but uses a
`breadth-first search <https://en.wikipedia.org/wiki/Breadth-first_search>`_.
-.. tacn:: autounfold {? @hintbases } {? @occurrences }
+.. tacn:: autounfold {? @hintbases } {? @simple_occurrences }
Unfolds constants that were declared through a :cmd:`Hint Unfold`
in the given databases.
- :n:`@occurrences`
- Performs the unfolding in the specified occurrences. The :n:`at @occs_nums`
- clause of :n:`@occurrences` is not supported.
+ :n:`@simple_occurrences`
+ Performs the unfolding in the specified occurrences.
.. tacn:: autorewrite {? * } with {+ @ident } {? @occurrences } {? using @ltac_expr }
@@ -376,6 +375,9 @@ Creating Hints
discrimination network to relax or constrain it in the case of discriminated
databases.
+ .. exn:: Cannot coerce @qualid to an evaluable reference.
+ :undocumented:
+
.. cmd:: Hint {| Constants | Variables } {| Transparent | Opaque } {? : {+ @ident } }
:name: Hint Constants; Hint Variables
diff --git a/doc/sphinx/proofs/writing-proofs/proof-mode.rst b/doc/sphinx/proofs/writing-proofs/proof-mode.rst
index 931ac905f6..bfe8c77c14 100644
--- a/doc/sphinx/proofs/writing-proofs/proof-mode.rst
+++ b/doc/sphinx/proofs/writing-proofs/proof-mode.rst
@@ -155,7 +155,7 @@ When the proof is completed, you can exit proof mode with commands such as
Passes a completed :term:`proof term` to Coq's kernel
to check that the proof term is :term:`well-typed` and
to verify that its type matches the theorem statement. If it's verified, the
- proof term is added to the global environment as an opaque constant
+ proof term is added to the global environment as an :term:`opaque` constant
using the declared name from the original goal.
It's very rare for a proof term to fail verification. Generally this
@@ -190,9 +190,10 @@ When the proof is completed, you can exit proof mode with commands such as
.. cmd:: Defined {? @ident }
- Similar to :cmd:`Qed` and :cmd:`Save`, except the proof is made *transparent*, which means
+ Similar to :cmd:`Qed` and :cmd:`Save`, except the proof is made
+ :term:`transparent`, which means
that its content can be explicitly used for type checking and that it can be
- unfolded in conversion tactics (see :ref:`performingcomputations`,
+ unfolded in conversion tactics (see :ref:`applyingconversionrules`,
:cmd:`Opaque`, :cmd:`Transparent`). If :token:`ident` is specified,
the proof is defined with the given name, which overrides any name
provided by the :cmd:`Theorem` command or its variants.
@@ -845,7 +846,7 @@ Requesting information
.. cmd:: Show Existentials
Displays all open goals / existential variables in the current proof
- along with the type and the context of each variable.
+ along with the context and type of each variable.
.. cmd:: Show Match @qualid
diff --git a/doc/sphinx/proofs/writing-proofs/rewriting.rst b/doc/sphinx/proofs/writing-proofs/rewriting.rst
index 4f937ad727..c2b877d372 100644
--- a/doc/sphinx/proofs/writing-proofs/rewriting.rst
+++ b/doc/sphinx/proofs/writing-proofs/rewriting.rst
@@ -47,7 +47,7 @@ Rewriting with Leibniz and setoid equality
Replaces subterms with other subterms that have been proven to be equal.
The type of :n:`@one_term` must have the form:
- :n:`{? forall {+ (x__i: A__i) } , } EQ @term__1 @term__2`
+ :n:`{? forall @open_binders , } EQ @term__1 @term__2`
.. todo :term:`Leibniz equality` does not work with Sphinx 2.3.1. It does with Sphinx 3.0.3.
@@ -55,14 +55,11 @@ Rewriting with Leibniz and setoid equality
Note that :n:`eq @term__1 @term__2` is typically written with the infix notation
:n:`@term__1 = @term__2`. You must `Require Setoid` to use the tactic
with a setoid equality or with :ref:`setoid rewriting <generalizedrewriting>`.
- In the general form, any :n:`@binder` may be used, not just :n:`(x__i: A__i)`.
-
- .. todo doublecheck the @binder comment is correct.
:n:`rewrite @one_term` finds subterms matching :n:`@term__1` in the goal,
and replaces them with :n:`@term__2` (or the reverse if `<-` is given).
Some of the variables :g:`x`\ :sub:`i` are solved by unification,
- and some of the types :n:`A__1, ..., A__n` may become new
+ and some of the types :n:`A__1, …, A__n` may become new
subgoals. :tacn:`rewrite` won't find occurrences inside `forall` that refer
to variables bound by the `forall`; use the more advanced :tacn:`setoid_rewrite`
if you want to find such occurrences.
@@ -315,7 +312,7 @@ Rewriting with definitional equality
.. tacn:: change {? @one_term__from {? at @occs_nums } with } @one_term__to {? @occurrences }
Replaces terms with other :term:`convertible` terms.
- If :n:`@one_term__from` is not specified, then :n:`@one_term__from` replaces the conclusion and/or
+ If :n:`@one_term__from` is not specified, then :n:`@one_term__to` replaces the conclusion and/or
the specified hypotheses. If :n:`@one_term__from` is specified, the tactic replaces occurrences
of :n:`@one_term__to` within the conclusion and/or the specified hypotheses.
@@ -326,8 +323,8 @@ Rewriting with definitional equality
whose value which will substituted for `x` in :n:`@one_term__to`, such as in
`change (f ?x ?y) with (g (x, y))` or `change (fun x => ?f x) with f`.
- The `at ... with ...` form is deprecated in 8.14; use `with ... at ...` instead.
- For `at ... with ... in H |-`, use `with ... in H at ... |-`.
+ The `at … with …` form is deprecated in 8.14; use `with … at …` instead.
+ For `at … with … in H |-`, use `with … in H at … |-`.
:n:`@occurrences`
If `with` is not specified, :n:`@occurrences` must only specify
@@ -346,7 +343,7 @@ Rewriting with definitional equality
make some proof steps explicit when refactoring a proof script
to make it readable.
- .. seealso:: :ref:`Performing computations <performingcomputations>`
+ .. seealso:: :ref:`applyingconversionrules`
.. tacn:: change_no_check {? @one_term__from {? at @occs_nums } with } @one_term__to {? @occurrences }
@@ -384,433 +381,594 @@ Rewriting with definitional equality
exact H.
Qed.
-.. _performingcomputations:
-
-Performing computations
----------------------------
-
-.. insertprodn red_expr pattern_occs
-
-.. prodn::
- red_expr ::= red
- | hnf
- | simpl {? @delta_flag } {? {| @reference_occs | @pattern_occs } }
- | cbv {? @strategy_flag }
- | cbn {? @strategy_flag }
- | lazy {? @strategy_flag }
- | compute {? @delta_flag }
- | vm_compute {? {| @reference_occs | @pattern_occs } }
- | native_compute {? {| @reference_occs | @pattern_occs } }
- | unfold {+, @reference_occs }
- | fold {+ @one_term }
- | pattern {+, @pattern_occs }
- | @ident
- delta_flag ::= {? - } [ {+ @reference } ]
- strategy_flag ::= {+ @red_flag }
- | @delta_flag
- red_flag ::= beta
- | iota
- | match
- | fix
- | cofix
- | zeta
- | delta {? @delta_flag }
- reference_occs ::= @reference {? at @occs_nums }
- pattern_occs ::= @one_term {? at @occs_nums }
-
-This set of tactics implements different specialized usages of the
-tactic :tacn:`change`.
-
-All conversion tactics (including :tacn:`change`) can be parameterized by the
-parts of the goal where the conversion can occur. This is done using
-*goal clauses* which consists in a list of hypotheses and, optionally,
-of a reference to the conclusion of the goal. For defined hypothesis
-it is possible to specify if the conversion should occur on the type
-part, the body part or both (default).
-
-Goal clauses are written after a conversion tactic (tactics :tacn:`set`,
-:tacn:`rewrite`, :tacn:`replace` and :tacn:`autorewrite` also use goal
-clauses) and are introduced by the keyword `in`. If no goal clause is
-provided, the default is to perform the conversion only in the
-conclusion.
-
-For backward compatibility, the notation :n:`in {+ @ident}` performs
-the conversion in hypotheses :n:`{+ @ident}`.
-
-.. tacn:: cbv {? @strategy_flag }
- lazy {? @strategy_flag }
- :name: cbv; lazy
-
- These parameterized reduction tactics apply to any goal and perform
- the normalization of the goal according to the specified flags. In
- correspondence with the kinds of reduction considered in Coq namely
- :math:`\beta` (reduction of functional application), :math:`\delta`
- (unfolding of transparent constants, see :ref:`vernac-controlling-the-reduction-strategies`),
- :math:`\iota` (reduction of
- pattern matching over a constructed term, and unfolding of :g:`fix` and
- :g:`cofix` expressions) and :math:`\zeta` (contraction of local definitions), the
- flags are either ``beta``, ``delta``, ``match``, ``fix``, ``cofix``,
- ``iota`` or ``zeta``. The ``iota`` flag is a shorthand for ``match``, ``fix``
- and ``cofix``. The ``delta`` flag itself can be refined into
- :n:`delta [ {+ @qualid} ]` or :n:`delta - [ {+ @qualid} ]`, restricting in the first
- case the constants to unfold to the constants listed, and restricting in the
- second case the constant to unfold to all but the ones explicitly mentioned.
- Notice that the ``delta`` flag does not apply to variables bound by a let-in
- construction inside the :n:`@term` itself (use here the ``zeta`` flag). In
- any cases, opaque constants are not unfolded (see :ref:`vernac-controlling-the-reduction-strategies`).
-
- Normalization according to the flags is done by first evaluating the
- head of the expression into a *weak-head* normal form, i.e. until the
- evaluation is blocked by a variable (or an opaque constant, or an
- axiom), as e.g. in :g:`x u1 ... un` , or :g:`match x with ... end`, or
- :g:`(fix f x {struct x} := ...) x`, or is a constructed form (a
- :math:`\lambda`-expression, a constructor, a cofixpoint, an inductive type, a
- product type, a sort), or is a redex that the flags prevent to reduce. Once a
+.. _applyingconversionrules:
+
+Applying conversion rules
+-------------------------
+
+These tactics apply reductions and expansions, replacing :term:`convertible` subterms
+with others that are equal by definition in |CiC|.
+They implement different specialized uses of the
+:tacn:`change` tactic. Other ways to apply these reductions are through
+the :cmd:`Eval` command, the `Eval` clause in the :cmd:`Definition`/:cmd:`Example`
+command and the :tacn:`eval` tactic.
+
+Tactics described in this section include:
+
+- :tacn:`lazy` and :tacn:`cbv`, which allow precise selection of which reduction
+ rules to apply
+- :tacn:`simpl` and :tacn:`cbn`, which are "clever" tactics meant to give the most
+ readable result
+- :tacn:`hnf` and :tacn:`red`, which apply reduction rules only to the head of the
+ term
+- :tacn:`vm_compute` and :tacn:`native_compute`, which are performance-oriented.
+
+Conversion tactics, with two exceptions, only change the types and contexts
+of existential variables
+and leave the proof term unchanged. (The :tacn:`vm_compute` and :tacn:`native_compute`
+tactics change existential variables in a way similar to other conversions while
+also adding a single explicit cast to the proof term to tell the kernel
+which reduction engine to use. See :ref:`type-cast`.) For example:
+
+ .. coqtop:: all
+
+ Goal 3 + 4 = 7.
+ Show Proof.
+ Show Existentials.
+ cbv.
+ Show Proof.
+ Show Existentials.
+
+ .. coqtop:: none
+
+ Abort.
+
+.. tacn:: lazy {? @reductions } @simple_occurrences
+ cbv {? @reductions } @simple_occurrences
+
+ .. insertprodn reductions delta_reductions
+
+ .. prodn::
+ reductions ::= {+ @reduction }
+ | @delta_reductions
+ reduction ::= beta
+ | delta {? @delta_reductions }
+ | match
+ | fix
+ | cofix
+ | iota
+ | zeta
+ delta_reductions ::= {? - } [ {+ @reference } ]
+
+ Normalize the goal as specified by :n:`@reductions`. If no reductions are
+ specified by name, all reductions are applied. If any reductions are specified by name,
+ then only the named reductions are applied. The reductions include:
+
+ `beta`
+ :term:`beta-reduction` of functional application
+
+ :n:`delta {? @delta_reductions }`
+ :term:`delta-reduction`: unfolding of transparent constants, see :ref:`controlling-the-reduction-strategies`.
+ The form in :n:`@reductions` without the keyword `delta` includes `beta`,
+ `iota` and `zeta` reductions in addition to `delta` using the given :n:`@delta_reductions`.
+
+ :n:`{? - } [ {+ @reference } ]`
+ without the `-`, limits delta unfolding to the listed constants. If the
+ `-` is present,
+ unfolding is applied to all constants that are not listed.
+ Notice that the ``delta`` doesn't apply to variables bound by a let-in
+ construction inside the term itself (use ``zeta`` to inline these).
+ Opaque constants are never unfolded except by :tacn:`vm_compute` and
+ :tacn:`native_compute`
+ (see `#4476 <https://github.com/coq/coq/issues/4476>`_ and
+ :ref:`controlling-the-reduction-strategies`).
+
+ `iota`
+ :term:`iota-reduction` of pattern matching (`match`) over a constructed term and reduction
+ of :g:`fix` and :g:`cofix` expressions. Shorthand for `match fix cofix`.
+
+ `zeta`
+ :term:`zeta-reduction`: reduction of :ref:`let-in definitions <let-in>`
+
+ Normalization is done by first evaluating the
+ head of the expression into :gdef:`weak-head normal form`, i.e. until the
+ evaluation is blocked by a variable, an opaque constant, an
+ axiom, such as in :n:`x u__1 … u__n`, :g:`match x with … end`,
+ :g:`(fix f x {struct x} := …) x`, a constructed form (a
+ :math:`\lambda`-expression, constructor, cofixpoint, inductive type,
+ product type or sort) or a redex for which flags prevent reduction of the redex. Once a
weak-head normal form is obtained, subterms are recursively reduced using the
same strategy.
- Reduction to weak-head normal form can be done using two strategies:
- *lazy* (``lazy`` tactic), or *call-by-value* (``cbv`` tactic). The lazy
- strategy is a call-by-need strategy, with sharing of reductions: the
+ There are two strategies for reduction to weak-head normal form:
+ *lazy* (the :tacn:`lazy` tactic), or *call-by-value* (the :tacn:`cbv` tactic).
+ The lazy strategy is a
+ `call by need <https://en.wikipedia.org/wiki/Evaluation_strategy#Call_by_need>`_
+ strategy, with sharing of reductions: the
arguments of a function call are weakly evaluated only when necessary,
and if an argument is used several times then it is weakly computed
only once. This reduction is efficient for reducing expressions with
dead code. For instance, the proofs of a proposition :g:`exists x. P(x)`
- reduce to a pair of a witness :g:`t`, and a proof that :g:`t` satisfies the
+ reduce to a pair of a witness :g:`t` and a proof that :g:`t` satisfies the
predicate :g:`P`. Most of the time, :g:`t` may be computed without computing
the proof of :g:`P(t)`, thanks to the lazy strategy.
The call-by-value strategy is the one used in ML languages: the
arguments of a function call are systematically weakly evaluated
- first. Despite the lazy strategy always performs fewer reductions than
+ first. The lazy strategy is similar to how Haskell reduces terms.
+ Although the lazy strategy always does fewer reductions than
the call-by-value strategy, the latter is generally more efficient for
evaluating purely computational expressions (i.e. with little dead code).
-.. tacv:: compute
- cbv
- :name: compute; _
+ .. tacn:: compute {? @delta_reductions } @simple_occurrences
- These are synonyms for ``cbv beta delta iota zeta``.
+ A variant form of :tacn:`cbv`.
-.. tacv:: lazy
+ :opt:`Debug` ``"Cbv"`` makes :tacn:`cbv` (and its derivative :tacn:`compute`) print
+ information about the constants it encounters and the unfolding decisions it
+ makes.
- This is a synonym for ``lazy beta delta iota zeta``.
+.. tacn:: simpl {? @delta_reductions } {? {| @reference_occs | @pattern_occs } } @simple_occurrences
-.. tacv:: compute [ {+ @qualid} ]
- cbv [ {+ @qualid} ]
+ .. insertprodn reference_occs pattern_occs
- These are synonyms of :n:`cbv beta delta {+ @qualid} iota zeta`.
+ .. prodn::
+ reference_occs ::= @reference {? at @occs_nums }
+ pattern_occs ::= @one_term {? at @occs_nums }
-.. tacv:: compute - [ {+ @qualid} ]
- cbv - [ {+ @qualid} ]
+ Reduces a term to
+ something still readable instead of fully normalizing it. It performs
+ a sort of strong normalization with two key differences:
- These are synonyms of :n:`cbv beta delta -{+ @qualid} iota zeta`.
+ + It unfolds constants only if they lead to an ι-reduction,
+ i.e. reducing a match or unfolding a fixpoint.
+ + When reducing a constant unfolding to (co)fixpoints, the tactic
+ uses the name of the constant the (co)fixpoint comes from instead of
+ the (co)fixpoint definition in recursive calls.
-.. tacv:: lazy [ {+ @qualid} ]
- lazy - [ {+ @qualid} ]
+ :n:`@simple_occurrences`
+ Permits selecting whether to reduce the conclusion and/or one or more
+ hypotheses. While the `at` option of :n:`@occurrences` is not allowed here,
+ :n:`@reference_occs` and :n:`@pattern_occs` have a somewhat less
+ flexible `at` option for selecting specific occurrences.
- These are respectively synonyms of :n:`lazy beta delta {+ @qualid} iota zeta`
- and :n:`lazy beta delta -{+ @qualid} iota zeta`.
+ :tacn:`simpl` can unfold transparent constants whose name can be reused in
+ recursive calls as well as those designated by :cmd:`Arguments` :n:`@reference … /`
+ commands. For instance, a constant :g:`plus' := plus` may be unfolded and
+ reused in recursive calls, but a constant such as :g:`succ := plus (S O)` is
+ not unfolded unless it was specifically designated in an :cmd:`Arguments`
+ command such as :n:`Arguments succ /.`.
-.. tacv:: vm_compute
- :name: vm_compute
+ :n:`{| @reference_occs | @pattern_occs }` can limit the application of :tacn:`simpl`
+ to:
- This tactic evaluates the goal using the optimized call-by-value evaluation
- bytecode-based virtual machine described in :cite:`CompiledStrongReduction`.
- This algorithm is dramatically more efficient than the algorithm used for the
- :tacn:`cbv` tactic, but it cannot be fine-tuned. It is especially interesting for
- full evaluation of algebraic objects. This includes the case of
- reflection-based tactics.
+ - applicative subterms whose :term:`head` is the
+ constant :n:`@qualid` or is the constant used
+ in the notation :n:`@string` (see :n:`@reference`)
+ - subterms matching a pattern :n:`@one_term`
-.. tacv:: native_compute
- :name: native_compute
+.. tacn:: cbn {? @reductions } @simple_occurrences
- This tactic evaluates the goal by compilation to OCaml as described
- in :cite:`FullReduction`. If Coq is running in native code, it can be
- typically two to five times faster than :tacn:`vm_compute`. Note however that the
- compilation cost is higher, so it is worth using only for intensive
- computations. Depending on the configuration, this tactic can either default to
- :tacn:`vm_compute`, recompile dependencies or fail due to some missing
- precompiled dependencies,
- see :ref:`the native-compiler option <native-compiler-options>` for details.
+ :tacn:`cbn` was intended to be a more principled, faster and more
+ predictable replacement for :tacn:`simpl`.
- .. flag:: NativeCompute Timing
+ The main difference between :tacn:`cbn` and :tacn:`simpl` is that
+ :tacn:`cbn` may unfold constants even when they cannot be reused in recursive calls:
+ in the previous example, :g:`succ t` is reduced to :g:`S t`.
- This flag causes all calls to the native compiler to print
- timing information for the conversion to native code,
- compilation, execution, and reification phases of native
- compilation. Timing is printed in units of seconds of
- wall-clock time.
+ :opt:`Debug` ``"RAKAM"`` makes :tacn:`cbn` print various debugging information.
+ ``RAKAM`` is the Refolding Algebraic Krivine Abstract Machine.
- .. flag:: NativeCompute Profiling
+.. tacn:: hnf @simple_occurrences
- On Linux, if you have the ``perf`` profiler installed, this flag makes
- it possible to profile :tacn:`native_compute` evaluations.
+ Replaces the current goal with its
+ weak-head normal form according to the βδιζ-reduction rules, i.e. it
+ reduces the :term:`head` of the goal until it becomes a product or an
+ irreducible term. All inner βι-redexes are also reduced. While :tacn:`hnf`
+ behaves similarly to :tacn:`simpl` and :tacn:`cbn`, unlike them, it does not
+ recurse into subterms.
+ The behavior of :tacn:`hnf` can be tuned using the :cmd:`Arguments` command.
- .. opt:: NativeCompute Profile Filename @string
- :name: NativeCompute Profile Filename
+ Example: The term :g:`fun n : nat => S n + S n` is not reduced by :n:`hnf`.
- This option specifies the profile output; the default is
- ``native_compute_profile.data``. The actual filename used
- will contain extra characters to avoid overwriting an existing file; that
- filename is reported to the user.
- That means you can individually profile multiple uses of
- :tacn:`native_compute` in a script. From the Linux command line, run ``perf report``
- on the profile file to see the results. Consult the ``perf`` documentation
- for more details.
+ .. note::
+ The δ rule only applies to transparent constants
+ (see :ref:`controlling-the-reduction-strategies` on transparency and opacity).
- :opt:`Debug` ``"Cbv"`` makes :tacn:`cbv` (and its derivative :tacn:`compute`) print
- information about the constants it encounters and the unfolding decisions it
- makes.
+.. tacn:: red @simple_occurrences
-.. tacn:: red
- :name: red
+ βιζ-reduces the constant at the head of `T` (which may be called
+ the :gdef:`head constant`; :gdef:`head` means the beginning
+ of the term), if possible,
+ in the selected hypotheses and/or the goal, which must have the form:
- This tactic applies to a goal that has the form::
+ :n:`{? forall @open_binders,} T`
- forall (x:T1) ... (xk:Tk), T
+ (where `T` does not begin with a `forall`) to :n:`c t__1 … t__n`
+ where :g:`c` is a constant.
+ If :g:`c` is transparent then it replaces :g:`c` with its
+ definition and reduces again until no further reduction is possible.
- with :g:`T` :math:`\beta`:math:`\iota`:math:`\zeta`-reducing to :g:`c t`:sub:`1` :g:`... t`:sub:`n` and :g:`c` a
- constant. If :g:`c` is transparent then it replaces :g:`c` with its
- definition (say :g:`t`) and then reduces
- :g:`(t t`:sub:`1` :g:`... t`:sub:`n` :g:`)` according to :math:`\beta`:math:`\iota`:math:`\zeta`-reduction rules.
+ .. exn:: No head constant to reduce.
+ :undocumented:
-.. exn:: No head constant to reduce.
- :undocumented:
+.. tacn:: unfold {+, @reference_occs } {? @occurrences }
-.. tacn:: hnf
- :name: hnf
+ Applies :term:`delta-reduction` to
+ the constants specified by each :n:`@reference_occs`.
+ The selected hypotheses and/or goals are then reduced to βιζ-normal form.
+ Use the general reduction tactics if you want to only apply the
+ δ rule, for example :tacn:`cbv` :n:`delta [ @reference ]`.
- This tactic applies to any goal. It replaces the current goal with its
- head normal form according to the :math:`\beta`:math:`\delta`:math:`\iota`:math:`\zeta`-reduction rules, i.e. it
- reduces the head of the goal until it becomes a product or an
- irreducible term. All inner :math:`\beta`:math:`\iota`-redexes are also reduced.
- The behavior of both :tacn:`hnf` can be tuned using the :cmd:`Arguments` command.
+ :n:`@reference_occs`
+ If :n:`@reference` is a :n:`@qualid`, it must be a defined transparent
+ constant or local definition (see :ref:`gallina-definitions` and
+ :ref:`controlling-the-reduction-strategies`).
- Example: The term :g:`fun n : nat => S n + S n` is not reduced by :n:`hnf`.
+ If :n:`@reference` is a :n:`@string {? @scope_key}`, the :n:`@string` is
+ the discriminating
+ symbol of a notation (e.g. "+") or an expression defining a notation (e.g. `"_ +
+ _"`) and the notation is an application whose head symbol
+ is an unfoldable constant, then the tactic unfolds it.
-.. note::
- The :math:`\delta` rule only applies to transparent constants (see :ref:`vernac-controlling-the-reduction-strategies`
- on transparency and opacity).
+ :n:`@occurrences`
+ If :n:`@occurrences` is specified,
+ the specified occurrences will be replaced in the selected hypotheses and/or goal.
+ Otherwise every occurrence of the constants in the goal is replaced.
+ If multiple :n:`@reference_occs` are given, any `at` clauses must be
+ in the :n:`@reference_occs` rather than in :n:`@occurrences`.
-.. tacn:: cbn
- simpl
- :name: cbn; simpl
+ .. exn:: Cannot turn {| inductive | constructor } into an evaluable reference.
- These tactics apply to any goal. They try to reduce a term to
- something still readable instead of fully normalizing it. They perform
- a sort of strong normalization with two key differences:
+ Occurs when trying to unfold something that is
+ defined as an inductive type (or constructor) and not as a
+ definition.
- + They unfold a constant if and only if it leads to a :math:`\iota`-reduction,
- i.e. reducing a match or unfolding a fixpoint.
- + While reducing a constant unfolding to (co)fixpoints, the tactics
- use the name of the constant the (co)fixpoint comes from instead of
- the (co)fixpoint definition in recursive calls.
+ .. example::
- The :tacn:`cbn` tactic was intended to be a more principled, faster and more
- predictable replacement for :tacn:`simpl`.
+ .. coqtop:: abort all fail
- The :tacn:`cbn` tactic accepts the same flags as :tacn:`cbv` and
- :tacn:`lazy`. The behavior of both :tacn:`simpl` and :tacn:`cbn`
- can be tuned using the :cmd:`Arguments` command.
+ Goal 0 <= 1.
+ unfold le.
- .. todo add "See <subsection about controlling the behavior of reduction strategies>"
- to TBA section
+ .. exn:: @ident is opaque.
- Notice that only transparent constants whose name can be reused in the
- recursive calls are possibly unfolded by :tacn:`simpl`. For instance a
- constant defined by :g:`plus' := plus` is possibly unfolded and reused in
- the recursive calls, but a constant such as :g:`succ := plus (S O)` is
- never unfolded. This is the main difference between :tacn:`simpl` and :tacn:`cbn`.
- The tactic :tacn:`cbn` reduces whenever it will be able to reuse it or not:
- :g:`succ t` is reduced to :g:`S t`.
+ Raised if you are trying to unfold a definition
+ that has been marked opaque.
-.. tacv:: cbn [ {+ @qualid} ]
- cbn - [ {+ @qualid} ]
+ .. example::
- These are respectively synonyms of :n:`cbn beta delta [ {+ @qualid} ] iota zeta`
- and :n:`cbn beta delta - [ {+ @qualid} ] iota zeta` (see :tacn:`cbn`).
+ .. coqtop:: abort all fail
-.. tacv:: simpl @pattern
+ Opaque Nat.add.
+ Goal 1 + 0 = 1.
+ unfold Nat.add.
- This applies :tacn:`simpl` only to the subterms matching
- :n:`@pattern` in the current goal.
+ .. exn:: Bad occurrence number of @qualid.
+ :undocumented:
-.. tacv:: simpl @pattern at {+ @natural}
+ .. exn:: @qualid does not occur.
+ :undocumented:
- This applies :tacn:`simpl` only to the :n:`{+ @natural}` occurrences of the subterms
- matching :n:`@pattern` in the current goal.
+.. tacn:: fold {+ @one_term } @simple_occurrences
- .. exn:: Too few occurrences.
- :undocumented:
+ First, this tactic reduces each :n:`@one_term` using the :tacn:`red` tactic.
+ Then, every occurrence of the resulting terms in the selected hypotheses and/or
+ goal will be replaced by its associated :n:`@one_term`. This tactic is particularly
+ useful for
+ reversing undesired unfoldings, which may make the goal very hard to read.
+ The undesired unfoldings may be due to the limited capabilities of
+ other reduction tactics.
+ On the other hand, when an unfolded function applied to its argument has been
+ reduced, the :tacn:`fold` tactic doesn't do anything.
-.. tacv:: simpl @qualid
- simpl @string
+ :tacn:`fold` :n:`@one_term__1 @one_term__2` is equivalent to
+ :n:`fold @one_term__1; fold @one_term__2`.
- This applies :tacn:`simpl` only to the applicative subterms whose head occurrence
- is the unfoldable constant :n:`@qualid` (the constant can be referred to by
- its notation using :n:`@string` if such a notation exists).
+ .. example:: :tacn:`fold` doesn't always undo :tacn:`unfold`
-.. tacv:: simpl @qualid at {+ @natural}
- simpl @string at {+ @natural}
+ .. coqtop:: all
- This applies :tacn:`simpl` only to the :n:`{+ @natural}` applicative subterms whose
- head occurrence is :n:`@qualid` (or :n:`@string`).
+ Goal ~0=0.
+ unfold not.
-:opt:`Debug` ``"RAKAM"`` makes :tacn:`cbn` print various debugging information.
-``RAKAM`` is the Refolding Algebraic Krivine Abstract Machine.
+ This :tacn:`fold` doesn't undo the preceeding :tacn:`unfold` (it makes no change):
-.. tacn:: unfold @qualid
- :name: unfold
+ .. coqtop:: all
- This tactic applies to any goal. The argument qualid must denote a
- defined transparent constant or local definition (see
- :ref:`gallina-definitions` and
- :ref:`vernac-controlling-the-reduction-strategies`). The tactic
- :tacn:`unfold` applies the :math:`\delta` rule to each occurrence
- of the constant to which :n:`@qualid` refers in the current goal
- and then replaces it with its :math:`\beta\iota\zeta`-normal form.
- Use the general reduction tactics if you want to avoid this final
- reduction, for instance :n:`cbv delta [@qualid]`.
+ fold not.
- .. exn:: Cannot coerce @qualid to an evaluable reference.
+ However, this :tacn:`pattern` followed by :tacn:`fold` does:
- This error is frequent when trying to unfold something that has
- defined as an inductive type (or constructor) and not as a
- definition.
+ .. coqtop:: all abort
- .. example::
+ pattern (0 = 0).
+ fold not.
- .. coqtop:: abort all fail
+ .. example:: Use :tacn:`fold` to reverse unfolding of `fold_right`
- Goal 0 <= 1.
- unfold le.
+ .. coqtop:: none
- This error can also be raised if you are trying to unfold
- something that has been marked as opaque.
+ Require Import Coq.Lists.List.
+ Local Open Scope list_scope.
- .. example::
+ .. coqtop:: all abort
- .. coqtop:: abort all fail
+ Goal forall x xs, fold_right and True (x::xs).
+ red.
+ fold (fold_right and True).
- Opaque Nat.add.
- Goal 1 + 0 = 1.
- unfold Nat.add.
+.. tacn:: pattern {+, @pattern_occs } {? @occurrences }
- .. tacv:: unfold @qualid in @goal_occurrences
+ Performs beta-expansion (the inverse of :term:`beta-reduction`) for the
+ selected hypotheses and/or goals.
+ The :n:`@one_term`\s in :n:`@pattern_occs` must be free subterms in the selected items.
+ The expansion is done for each selected item :g:`T`
+ for a set of :n:`@one_term`\s in the :n:`@pattern_occs` by:
- Replaces :n:`@qualid` in hypothesis (or hypotheses) designated
- by :token:`goal_occurrences` with its definition and replaces
- the hypothesis with its :math:`\beta`:math:`\iota` normal form.
+ + replacing all selected occurrences of the :n:`@one_term`\s in :g:`T` with fresh variables
+ + abstracting these variables
+ + applying the abstracted goal to the :n:`@one_term`\s
- .. tacv:: unfold {+, @qualid}
+ For instance, if the current goal :g:`T` is expressible as :n:`φ(t__1 … t__n)`
+ where the notation captures all the instances of the :n:`t__i` in φ, then :tacn:`pattern`
+ :n:`t__1, …, t__n` generates the equivalent goal
+ :n:`(fun (x__1:A__1 … (x__n:A__n) => φ(x__1 … x__n)) t__1 … t__n`.
+ If :n:`t__i` occurs in one of the generated types :n:`A__j`
+ (for `j > i`),
+ occurrences will also be considered and possibly abstracted.
- Replaces :n:`{+, @qualid}` with their definitions and replaces
- the current goal with its :math:`\beta`:math:`\iota` normal
- form.
+ This tactic can be used, for instance, when the tactic :tacn:`apply` fails
+ on matching or to better control the behavior of :tacn:`rewrite`.
- .. tacv:: unfold {+, @qualid at @occurrences }
+Fast reduction tactics: vm_compute and native_compute
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- The list :token:`occurrences` specify the occurrences of
- :n:`@qualid` to be unfolded. Occurrences are located from left
- to right.
+:tacn:`vm_compute` is a brute-force but efficient tactic that
+first normalizes the terms before comparing them. It is based on a
+bytecode representation of terms similar to the bytecode
+representation used in the ZINC virtual machine :cite:`Leroy90`. It is
+especially useful for intensive computation of algebraic values, such
+as numbers, and for reflection-based tactics.
- .. exn:: Bad occurrence number of @qualid.
- :undocumented:
+:tacn:`native_compute` is based on on converting the Coq code to OCaml.
- .. exn:: @qualid does not occur.
- :undocumented:
+Note that both these tactics ignore :cmd:`Opaque` markings
+(see issue `#4776 <https://github.com/coq/coq/issues/4776>`_), nor do they
+apply unfolding strategies such as from :cmd:`Strategy`.
- .. tacv:: unfold @string
+:tacn:`native_compute` is typically two to five
+times faster than :tacn:`vm_compute` at applying conversion rules
+when Coq is running native code, but :tacn:`native_compute` requires
+considerably more overhead. We recommend using :tacn:`native_compute`
+when all of the following are true (otherwise use :tacn:`vm_compute`):
- If :n:`@string` denotes the discriminating symbol of a notation
- (e.g. "+") or an expression defining a notation (e.g. `"_ +
- _"`), and this notation denotes an application whose head symbol
- is an unfoldable constant, then the tactic unfolds it.
+- the running time in :tacn:`vm_compute` at least 5-10 seconds
+- the size of the input term is small (e.g. hand-generated code rather than
+ automatically-generated code that may have nested destructs on inductives
+ with dozens or hundreds of constructors)
+- the output is small (e.g. you're returning a boolean, a natural number or
+ an integer rather than a large abstract syntax tree)
- .. tacv:: unfold @string%@ident
+These tactics change existential variables in a way similar to other conversions
+while also adding a single explicit cast (see :ref:`type-cast`) to the proof term
+to tell the kernel which reduction engine to use.
- This is variant of :n:`unfold @string` where :n:`@string` gets
- its interpretation from the scope bound to the delimiting key
- :token:`ident` instead of its default interpretation (see
- :ref:`Localinterpretationrulesfornotations`).
+.. tacn:: vm_compute {? {| @reference_occs | @pattern_occs } } {? @occurrences }
- .. tacv:: unfold {+, {| @qualid | @string{? %@ident } } {? at @occurrences } } {? in @goal_occurrences }
+ Evaluates the goal using the optimized call-by-value evaluation
+ bytecode-based virtual machine described in :cite:`CompiledStrongReduction`.
+ This algorithm is dramatically more efficient than the algorithm used for the
+ :tacn:`cbv` tactic, but it cannot be fine-tuned. It is especially useful for
+ full evaluation of algebraic objects. This includes the case of
+ reflection-based tactics.
- This is the most general form.
+.. tacn:: native_compute {? {| @reference_occs | @pattern_occs } } {? @occurrences }
-.. tacn:: fold @term
- :name: fold
+ Evaluates the goal by compilation to OCaml as described
+ in :cite:`FullReduction`. Depending on the configuration, this tactic can either default to
+ :tacn:`vm_compute`, recompile dependencies or fail due to some missing
+ precompiled dependencies,
+ see :ref:`the native-compiler option <native-compiler-options>` for details.
- This tactic applies to any goal. The term :n:`@term` is reduced using the
- :tacn:`red` tactic. Every occurrence of the resulting :n:`@term` in the goal is
- then replaced by :n:`@term`. This tactic is particularly useful when a fixpoint
- definition has been wrongfully unfolded, making the goal very hard to read.
- On the other hand, when an unfolded function applied to its argument has been
- reduced, the :tacn:`fold` tactic won't do anything.
+ .. flag:: NativeCompute Timing
- .. example::
+ This flag causes all calls to the native compiler to print
+ timing information for the conversion to native code,
+ compilation, execution, and reification phases of native
+ compilation. Timing is printed in units of seconds of
+ wall-clock time.
- .. coqtop:: all abort
+ .. flag:: NativeCompute Profiling
- Goal ~0=0.
- unfold not.
- Fail progress fold not.
- pattern (0 = 0).
- fold not.
+ On Linux, if you have the ``perf`` profiler installed, this flag makes
+ it possible to profile :tacn:`native_compute` evaluations.
- .. tacv:: fold {+ @term}
+ .. opt:: NativeCompute Profile Filename @string
- Equivalent to :n:`fold @term ; ... ; fold @term`.
+ This option specifies the profile output; the default is
+ ``native_compute_profile.data``. The actual filename used
+ will contain extra characters to avoid overwriting an existing file; that
+ filename is reported to the user.
+ That means you can individually profile multiple uses of
+ :tacn:`native_compute` in a script. From the Linux command line, run ``perf report``
+ on the profile file to see the results. Consult the ``perf`` documentation
+ for more details.
-.. tacn:: pattern @term
- :name: pattern
+Computing in a term: eval and Eval
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- This command applies to any goal. The argument :n:`@term` must be a free
- subterm of the current goal. The command pattern performs :math:`\beta`-expansion
- (the inverse of :math:`\beta`-reduction) of the current goal (say :g:`T`) by
+Evaluation of a term can be performed with:
- + replacing all occurrences of :n:`@term` in :g:`T` with a fresh variable
- + abstracting this variable
- + applying the abstracted goal to :n:`@term`
+.. tacn:: eval @red_expr in @term
- For instance, if the current goal :g:`T` is expressible as
- :math:`\varphi`:g:`(t)` where the notation captures all the instances of :g:`t`
- in :math:`\varphi`:g:`(t)`, then :n:`pattern t` transforms it into
- :g:`(fun x:A =>` :math:`\varphi`:g:`(x)) t`. This tactic can be used, for
- instance, when the tactic ``apply`` fails on matching.
-.. tacv:: pattern @term at {+ @natural}
+ .. insertprodn red_expr red_expr
- Only the occurrences :n:`{+ @natural}` of :n:`@term` are considered for
- :math:`\beta`-expansion. Occurrences are located from left to right.
+ .. prodn::
+ red_expr ::= lazy {? @reductions }
+ | cbv {? @reductions }
+ | compute {? @delta_reductions }
+ | vm_compute {? {| @reference_occs | @pattern_occs } }
+ | native_compute {? {| @reference_occs | @pattern_occs } }
+ | red
+ | hnf
+ | simpl {? @delta_reductions } {? {| @reference_occs | @pattern_occs } }
+ | cbn {? @reductions }
+ | unfold {+, @reference_occs }
+ | fold {+ @one_term }
+ | pattern {+, @pattern_occs }
+ | @ident
-.. tacv:: pattern @term at - {+ @natural}
+ :tacn:`eval` is a :token:`value_tactic`. It returns the result of
+ applying the conversion rules specified by :n:`@red_expr`. It does not
+ change the proof state.
- All occurrences except the occurrences of indexes :n:`{+ @natural }`
- of :n:`@term` are considered for :math:`\beta`-expansion. Occurrences are located from
- left to right.
+ The :n:`@red_expr` alternatives that begin with a keyword correspond to the
+ tactic with the same name, though in several cases with simpler syntax
+ than the tactic. :n:`@ident` is a named reduction expression created
+ with :cmd:`Declare Reduction`.
-.. tacv:: pattern {+, @term}
+ .. seealso:: Section :ref:`applyingconversionrules`.
- Starting from a goal :math:`\varphi`:g:`(t`:sub:`1` :g:`... t`:sub:`m`:g:`)`,
- the tactic :n:`pattern t`:sub:`1`:n:`, ..., t`:sub:`m` generates the
- equivalent goal
- :g:`(fun (x`:sub:`1`:g:`:A`:sub:`1`:g:`) ... (x`:sub:`m` :g:`:A`:sub:`m` :g:`) =>`:math:`\varphi`:g:`(x`:sub:`1` :g:`... x`:sub:`m` :g:`)) t`:sub:`1` :g:`... t`:sub:`m`.
- If :g:`t`:sub:`i` occurs in one of the generated types :g:`A`:sub:`j` these
- occurrences will also be considered and possibly abstracted.
+.. cmd:: Eval @red_expr in @term
+
+ Performs the specified reduction on :n:`@term` and displays
+ the resulting term with its type. If a proof is open, :n:`@term`
+ may reference hypotheses of the selected goal. :cmd:`Eval` is
+ a :token:`query_command`, so it may be prefixed with a goal selector.
+
+ .. cmd:: Compute @term
+
+ Evaluates :n:`@term` using the bytecode-based virtual machine.
+ It is a shortcut for :cmd:`Eval` :n:`vm_compute in @term`.
+ :cmd:`Compute` is a :token:`query_command`, so it may be prefixed
+ with a goal selector.
+
+.. cmd:: Declare Reduction @ident := @red_expr
+
+ Declares a short name for the reduction expression :n:`@red_expr`, for
+ instance ``lazy beta delta [foo bar]``. This short name can then be used
+ in :n:`Eval @ident in` or ``eval`` constructs. This command
+ accepts the :attr:`local` attribute, which indicates that the reduction
+ will be discarded at the end of the
+ file or module. The name is not qualified. In
+ particular declaring the same name in several modules or in several
+ functor applications will be rejected if these declarations are not
+ local. The name :n:`@ident` cannot be used directly as an Ltac tactic, but
+ nothing prevents the user from also performing a
+ :n:`Ltac @ident := @red_expr`.
+
+.. _controlling-the-reduction-strategies:
+
+Controlling reduction strategies and the conversion algorithm
+-------------------------------------------------------------
+
+The commands to fine-tune the reduction strategies and the lazy conversion
+algorithm are described in this section. Also see :ref:`Args_effect_on_unfolding`,
+which supports additional fine-tuning.
+
+.. cmd:: Opaque {+ @reference }
+
+ Marks the specified constants as :term:`opaque` so tactics won't :term:`unfold` them
+ with :term:`delta-reduction`.
+ "Constants" are items defined by commands such as :cmd:`Definition`,
+ :cmd:`Let` (with an explicit body), :cmd:`Fixpoint`, :cmd:`CoFixpoint`
+ and :cmd:`Function`.
-.. tacv:: pattern {+, @term at {+ @natural}}
+ This command accepts the :attr:`global` attribute. By default, the scope
+ of :cmd:`Opaque` is limited to the current section or module.
- This behaves as above but processing only the occurrences :n:`{+ @natural}` of
- :n:`@term` starting from :n:`@term`.
+ :cmd:`Opaque` also affects Coq's conversion algorithm, causing
+ it to delay unfolding the specified constants as much as possible when it
+ has to check that two distinct applied constants are convertible.
+ See Section :ref:`conversion-rules`.
-.. tacv:: pattern {+, @term {? at {? -} {+, @natural}}}
+.. cmd:: Transparent {+ @reference }
- This is the most general syntax that combines the different variants.
+ The opposite of :cmd:`Opaque`, it marks the specified constants
+ as :term:`transparent`
+ so that tactics may unfold them. See :cmd:`Opaque` above.
+
+ This command accepts the :attr:`global` attribute. By default, the scope
+ of :cmd:`Transparent` is limited to the current section or module.
+
+ Note that constants defined by proofs ending with :cmd:`Qed` are
+ irreversibly opaque; :cmd:`Transparent` will not make them transparent.
+ This is consistent
+ with the usual mathematical practice of *proof irrelevance*: what
+ matters in a mathematical development is the sequence of lemma
+ statements, not their actual proofs. This distinguishes lemmas from
+ the usual defined constants, whose actual values are of course
+ relevant in general.
+
+ .. exn:: The reference @qualid was not found in the current environment.
+
+ There is no constant named :n:`@qualid` in the environment.
+
+.. seealso:: :ref:`applyingconversionrules`, :cmd:`Qed` and :cmd:`Defined`
+
+.. _vernac-strategy:
+
+.. cmd:: Strategy {+ @strategy_level [ {+ @reference } ] }
+
+ .. insertprodn strategy_level strategy_level
+
+ .. prodn::
+ strategy_level ::= opaque
+ | @integer
+ | expand
+ | transparent
+
+ Generalizes the behavior of the :cmd:`Opaque` and :cmd:`Transparent`
+ commands. It is used to fine-tune the strategy for unfolding
+ constants, both at the tactic level and at the kernel level. This
+ command associates a :n:`@strategy_level` with the qualified names in the :n:`@reference`
+ sequence. Whenever two
+ expressions with two distinct head constants are compared (for
+ example, typechecking `f x` where `f : A -> B` and `x : C` will result in
+ converting `A` and `C`), the one
+ with lower level is expanded first. In case of a tie, the second one
+ (appearing in the cast type) is expanded.
+
+ This command accepts the :attr:`local` attribute, which limits its effect
+ to the current section or module, in which case the section and module
+ behavior is the same as :cmd:`Opaque` and :cmd:`Transparent` (without :attr:`global`).
+
+ Levels can be one of the following (higher to lower):
+
+ + ``opaque`` : level of opaque constants. They cannot be expanded by
+ tactics (behaves like +∞, see next item).
+ + :n:`@integer` : levels indexed by an integer. Level 0 corresponds to the
+ default behavior, which corresponds to transparent constants. This
+ level can also be referred to as ``transparent``. Negative levels
+ correspond to constants to be expanded before normal transparent
+ constants, while positive levels correspond to constants to be
+ expanded after normal transparent constants.
+ + ``expand`` : level of constants that should be expanded first (behaves
+ like −∞)
+ + ``transparent`` : Equivalent to level 0
+
+.. cmd:: Print Strategy @reference
+
+ This command prints the strategy currently associated with :n:`@reference`. It
+ fails if :n:`@reference` is not an unfoldable reference, that is, neither a
+ variable nor a constant.
+
+ .. exn:: The reference is not unfoldable.
+ :undocumented:
+
+.. cmd:: Print Strategies
+
+ Print all the currently non-transparent strategies.
.. tacn:: with_strategy @strategy_level_or_var [ {+ @reference } ] @ltac_expr3
- :name: with_strategy
+
+ .. insertprodn strategy_level_or_var strategy_level_or_var
+
+ .. prodn::
+ strategy_level_or_var ::= @strategy_level
+ | @ident
Executes :token:`ltac_expr3`, applying the alternate unfolding
behavior that the :cmd:`Strategy` command controls, but only for
@@ -918,15 +1076,3 @@ the conversion in hypotheses :n:`{+ @ident}`.
tactics to persist information about conversion hints in the
proof term. See `#12200
<https://github.com/coq/coq/issues/12200>`_ for more details.
-
-Conversion tactics applied to hypotheses
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- The form :n:`@tactic in {+, @ident }` applies :token:`tactic` (any of the
- conversion tactics listed in this section) to the hypotheses :n:`{+ @ident}`.
-
- If :token:`ident` is a local definition, then :token:`ident` can be replaced by
- :n:`type of @ident` to address not the body but the type of the local
- definition.
-
- Example: :n:`unfold not in (type of H1) (type of H3)`.
diff --git a/doc/tools/docgram/common.edit_mlg b/doc/tools/docgram/common.edit_mlg
index 27144fd1ad..24ecc65e9b 100644
--- a/doc/tools/docgram/common.edit_mlg
+++ b/doc/tools/docgram/common.edit_mlg
@@ -62,8 +62,8 @@ SPLICE: [
]
RENAME: [
-| G_LTAC2_delta_flag ltac2_delta_flag
-| G_LTAC2_strategy_flag ltac2_strategy_flag
+| G_LTAC2_delta_flag ltac2_delta_reductions
+| G_LTAC2_strategy_flag ltac2_reductions
| G_LTAC2_binder ltac2_binder
| G_LTAC2_branches ltac2_branches
| G_LTAC2_let_clause ltac2_let_clause
@@ -640,7 +640,7 @@ delta_flag: [
| OPTINREF
]
-ltac2_delta_flag: [
+ltac2_delta_reductions: [
| EDIT ADD_OPT "-" "[" refglobals "]" (* Ltac2 plugin *)
]
@@ -924,6 +924,10 @@ where: [
| "before" ident
]
+simple_occurrences: [
+(* placeholder (yuck) *)
+]
+
simple_tactic: [
| REPLACE "eauto" OPT nat_or_var OPT nat_or_var auto_using hintbases
| WITH "eauto" OPT nat_or_var auto_using hintbases
@@ -937,6 +941,26 @@ simple_tactic: [
| DELETE "autorewrite" "*" "with" LIST1 preident clause
| REPLACE "autorewrite" "*" "with" LIST1 preident clause "using" tactic
| WITH "autorewrite" OPT "*" "with" LIST1 preident clause OPT ( "using" tactic )
+| REPLACE "autounfold" hintbases clause_dft_concl
+| WITH "autounfold" hintbases OPT simple_occurrences
+| REPLACE "red" clause_dft_concl
+| WITH "red" simple_occurrences
+| REPLACE "simpl" OPT delta_flag OPT ref_or_pattern_occ clause_dft_concl
+| WITH "simpl" OPT delta_flag OPT ref_or_pattern_occ simple_occurrences
+| REPLACE "hnf" clause_dft_concl
+| WITH "hnf" simple_occurrences
+| REPLACE "cbv" strategy_flag clause_dft_concl
+| WITH "cbv" strategy_flag simple_occurrences
+| PRINT
+| REPLACE "compute" OPT delta_flag clause_dft_concl
+| WITH "compute" OPT delta_flag simple_occurrences
+| REPLACE "lazy" strategy_flag clause_dft_concl
+| WITH "lazy" strategy_flag simple_occurrences
+| REPLACE "cbn" strategy_flag clause_dft_concl
+| WITH "cbn" strategy_flag simple_occurrences
+| REPLACE "fold" LIST1 constr clause_dft_concl
+| WITH "fold" LIST1 constr simple_occurrences
+
| DELETE "cofix" ident
| REPLACE "cofix" ident "with" LIST1 cofixdecl
| WITH "cofix" ident OPT ( "with" LIST1 cofixdecl )
@@ -2460,6 +2484,10 @@ clause_dft_concl: [
| WITH occs
]
+simple_occurrences: [
+| clause_dft_concl (* semantically restricted: no "at" clause *)
+]
+
occs_nums: [
| EDIT ADD_OPT "-" LIST1 nat_or_var
]
@@ -2471,10 +2499,8 @@ variance_identref: [
conversion: [
| DELETE constr
| DELETE constr "with" constr
-| PRINT
| REPLACE constr "at" occs_nums "with" constr
| WITH OPT ( constr OPT ( "at" occs_nums ) "with" ) constr
-| PRINT
]
SPLICE: [
@@ -2762,6 +2788,10 @@ RENAME: [
| hypident_occ hyp_occs
| concl_occ concl_occs
| constr_with_bindings_arg one_term_with_bindings
+| red_flag reduction
+| strategy_flag reductions
+| delta_flag delta_reductions
+| q_strategy_flag q_reductions
]
simple_tactic: [
@@ -2806,7 +2836,7 @@ NOTINRSTS: [
| q_destruction_arg
| q_with_bindings
| q_bindings
-| q_strategy_flag
+| q_reductions
| q_reference
| q_clause
| q_occurrences
diff --git a/doc/tools/docgram/fullGrammar b/doc/tools/docgram/fullGrammar
index bc6b803bbb..be1b9d80fb 100644
--- a/doc/tools/docgram/fullGrammar
+++ b/doc/tools/docgram/fullGrammar
@@ -1,492 +1,6 @@
(* Coq grammar generated from .mlg files. Do not edit by hand. Not compiled into Coq *)
DOC_GRAMMAR
-Constr.ident: [
-| Prim.ident
-]
-
-Prim.name: [
-| "_"
-]
-
-global: [
-| Prim.reference
-]
-
-constr_pattern: [
-| constr
-]
-
-cpattern: [
-| lconstr
-]
-
-sort: [
-| "Set"
-| "Prop"
-| "SProp"
-| "Type"
-| "Type" "@{" "_" "}"
-| "Type" "@{" universe "}"
-]
-
-sort_family: [
-| "Set"
-| "Prop"
-| "SProp"
-| "Type"
-]
-
-universe_increment: [
-| "+" natural
-|
-]
-
-universe_name: [
-| global
-| "Set"
-| "Prop"
-]
-
-universe_expr: [
-| universe_name universe_increment
-]
-
-universe: [
-| "max" "(" LIST1 universe_expr SEP "," ")"
-| universe_expr
-]
-
-lconstr: [
-| term200
-]
-
-constr: [
-| term8
-| "@" global univ_annot
-]
-
-term200: [
-| binder_constr
-| term100
-]
-
-term100: [
-| term99 "<:" term200
-| term99 "<<:" term200
-| term99 ":" term200
-| term99 ":>"
-| term99
-]
-
-term99: [
-| term90
-]
-
-term90: [
-| term10
-]
-
-term10: [
-| term9 LIST1 arg
-| "@" global univ_annot LIST0 term9
-| "@" pattern_ident LIST1 identref
-| term9
-]
-
-term9: [
-| ".." term0 ".."
-| term8
-]
-
-term8: [
-| term1
-]
-
-term1: [
-| term0 ".(" global LIST0 arg ")"
-| term0 ".(" "@" global LIST0 ( term9 ) ")"
-| term0 "%" IDENT
-| term0
-]
-
-term0: [
-| atomic_constr
-| term_match
-| "(" term200 ")"
-| "{|" record_declaration bar_cbrace
-| "{" binder_constr "}"
-| "`{" term200 "}"
-| test_array_opening "[" "|" array_elems "|" lconstr type_cstr test_array_closing "|" "]" univ_annot
-| "`(" term200 ")"
-| "ltac" ":" "(" Pltac.ltac_expr ")"
-]
-
-array_elems: [
-| LIST0 lconstr SEP ";"
-]
-
-record_declaration: [
-| fields_def
-]
-
-fields_def: [
-| field_def ";" fields_def
-| field_def
-|
-]
-
-field_def: [
-| global binders ":=" lconstr
-]
-
-binder_constr: [
-| "forall" open_binders "," term200
-| "fun" open_binders "=>" term200
-| "let" name binders let_type_cstr ":=" term200 "in" term200
-| "let" "fix" fix_decl "in" term200
-| "let" "cofix" cofix_body "in" term200
-| "let" [ "(" LIST0 name SEP "," ")" | "()" ] as_return_type ":=" term200 "in" term200
-| "let" "'" pattern200 ":=" term200 "in" term200
-| "let" "'" pattern200 ":=" term200 case_type "in" term200
-| "let" "'" pattern200 "in" pattern200 ":=" term200 case_type "in" term200
-| "if" term200 as_return_type "then" term200 "else" term200
-| "fix" fix_decls
-| "cofix" cofix_decls
-| "if" term200 "is" ssr_dthen ssr_else (* SSR plugin *)
-| "if" term200 "isn't" ssr_dthen ssr_else (* SSR plugin *)
-| "let" ":" ssr_mpat ":=" lconstr "in" lconstr (* SSR plugin *)
-| "let" ":" ssr_mpat ":=" lconstr ssr_rtype "in" lconstr (* SSR plugin *)
-| "let" ":" ssr_mpat "in" pattern200 ":=" lconstr ssr_rtype "in" lconstr (* SSR plugin *)
-]
-
-arg: [
-| test_lpar_id_coloneq "(" identref ":=" lconstr ")"
-| term9
-]
-
-atomic_constr: [
-| global univ_annot
-| sort
-| NUMBER
-| string
-| "_"
-| "?" "[" identref "]"
-| "?" "[" pattern_ident "]"
-| pattern_ident evar_instance
-]
-
-inst: [
-| identref ":=" lconstr
-]
-
-evar_instance: [
-| "@{" LIST1 inst SEP ";" "}"
-|
-]
-
-univ_annot: [
-| "@{" LIST0 universe_level "}"
-|
-]
-
-universe_level: [
-| "Set"
-| "Prop"
-| "Type"
-| "_"
-| global
-]
-
-fix_decls: [
-| fix_decl
-| fix_decl "with" LIST1 fix_decl SEP "with" "for" identref
-]
-
-cofix_decls: [
-| cofix_body
-| cofix_body "with" LIST1 cofix_body SEP "with" "for" identref
-]
-
-fix_decl: [
-| identref binders_fixannot type_cstr ":=" term200
-]
-
-cofix_body: [
-| identref binders type_cstr ":=" term200
-]
-
-term_match: [
-| "match" LIST1 case_item SEP "," OPT case_type "with" branches "end"
-]
-
-case_item: [
-| term100 OPT [ "as" name ] OPT [ "in" pattern200 ]
-]
-
-case_type: [
-| "return" term100
-]
-
-as_return_type: [
-| OPT [ OPT [ "as" name ] case_type ]
-]
-
-branches: [
-| OPT "|" LIST0 eqn SEP "|"
-]
-
-mult_pattern: [
-| LIST1 pattern200 SEP ","
-]
-
-eqn: [
-| LIST1 mult_pattern SEP "|" "=>" lconstr
-]
-
-record_pattern: [
-| global ":=" pattern200
-]
-
-record_patterns: [
-| record_pattern ";" record_patterns
-| record_pattern
-|
-]
-
-pattern200: [
-| pattern100
-]
-
-pattern100: [
-| pattern99 ":" term200
-| pattern99
-]
-
-pattern99: [
-| pattern90
-]
-
-pattern90: [
-| pattern10
-]
-
-pattern10: [
-| pattern1 "as" name
-| pattern1 LIST1 pattern1
-| "@" Prim.reference LIST0 pattern1
-| pattern1
-]
-
-pattern1: [
-| pattern0 "%" IDENT
-| pattern0
-]
-
-pattern0: [
-| Prim.reference
-| "{|" record_patterns bar_cbrace
-| "_"
-| "(" pattern200 ")"
-| "(" pattern200 "|" LIST1 pattern200 SEP "|" ")"
-| NUMBER
-| string
-]
-
-fixannot: [
-| "{" "struct" identref "}"
-| "{" "wf" constr identref "}"
-| "{" "measure" constr OPT identref OPT constr "}"
-]
-
-binders_fixannot: [
-| ensure_fixannot fixannot
-| binder binders_fixannot
-|
-]
-
-open_binders: [
-| name LIST0 name ":" lconstr
-| name LIST0 name binders
-| name ".." name
-| closed_binder binders
-]
-
-binders: [
-| LIST0 binder
-| Pcoq.Constr.binders
-]
-
-binder: [
-| name
-| closed_binder
-]
-
-closed_binder: [
-| "(" name LIST1 name ":" lconstr ")"
-| "(" name ":" lconstr ")"
-| "(" name ":=" lconstr ")"
-| "(" name ":" lconstr ":=" lconstr ")"
-| "{" name "}"
-| "{" name LIST1 name ":" lconstr "}"
-| "{" name ":" lconstr "}"
-| "{" name LIST1 name "}"
-| "[" name "]"
-| "[" name LIST1 name ":" lconstr "]"
-| "[" name ":" lconstr "]"
-| "[" name LIST1 name "]"
-| "`(" LIST1 typeclass_constraint SEP "," ")"
-| "`{" LIST1 typeclass_constraint SEP "," "}"
-| "`[" LIST1 typeclass_constraint SEP "," "]"
-| "'" pattern0
-| [ "of" | "&" ] term99 (* SSR plugin *)
-]
-
-one_open_binder: [
-| name
-| name ":" lconstr
-| one_closed_binder
-]
-
-one_closed_binder: [
-| "(" name ":" lconstr ")"
-| "{" name "}"
-| "{" name ":" lconstr "}"
-| "[" name "]"
-| "[" name ":" lconstr "]"
-| "'" pattern0
-]
-
-typeclass_constraint: [
-| "!" term200
-| "{" name "}" ":" [ "!" | ] term200
-| test_name_colon name ":" [ "!" | ] term200
-| term200
-]
-
-type_cstr: [
-| ":" lconstr
-|
-]
-
-let_type_cstr: [
-| OPT [ ":" lconstr ]
-]
-
-preident: [
-| IDENT
-]
-
-ident: [
-| IDENT
-]
-
-pattern_ident: [
-| LEFTQMARK ident
-]
-
-identref: [
-| ident
-]
-
-hyp: [
-| identref
-]
-
-field: [
-| FIELD
-]
-
-fields: [
-| field fields
-| field
-]
-
-fullyqualid: [
-| ident fields
-| ident
-]
-
-name: [
-| "_"
-| ident
-]
-
-reference: [
-| ident fields
-| ident
-]
-
-qualid: [
-| reference
-]
-
-by_notation: [
-| ne_string OPT [ "%" IDENT ]
-]
-
-smart_global: [
-| reference
-| by_notation
-]
-
-ne_string: [
-| STRING
-]
-
-ne_lstring: [
-| ne_string
-]
-
-dirpath: [
-| ident LIST0 field
-]
-
-string: [
-| STRING
-]
-
-lstring: [
-| string
-]
-
-integer: [
-| bigint
-]
-
-natural: [
-| bignat
-]
-
-bigint: [
-| bignat
-| test_minus_nat "-" bignat
-]
-
-bignat: [
-| NUMBER
-]
-
-bar_cbrace: [
-| test_pipe_closedcurly "|" "}"
-]
-
-strategy_level: [
-| "expand"
-| "opaque"
-| integer
-| "transparent"
-]
-
-vernac_toplevel: [
-| "Drop" "."
-| "Quit" "."
-| "BackTo" natural "."
-| test_show_goal "Show" "Goal" natural "at" natural "."
-| "Show" "Proof" "Diffs" OPT "removed" "."
-| Pvernac.Vernac_.main_entry
-]
-
opt_hintbases: [
|
| ":" LIST1 IDENT
@@ -1467,6 +981,492 @@ binder_interp: [
| "as" "strict" "pattern"
]
+vernac_toplevel: [
+| "Drop" "."
+| "Quit" "."
+| "BackTo" natural "."
+| test_show_goal "Show" "Goal" natural "at" natural "."
+| "Show" "Proof" "Diffs" OPT "removed" "."
+| Pvernac.Vernac_.main_entry
+]
+
+Constr.ident: [
+| Prim.ident
+]
+
+Prim.name: [
+| "_"
+]
+
+global: [
+| Prim.reference
+]
+
+constr_pattern: [
+| constr
+]
+
+cpattern: [
+| lconstr
+]
+
+sort: [
+| "Set"
+| "Prop"
+| "SProp"
+| "Type"
+| "Type" "@{" "_" "}"
+| "Type" "@{" universe "}"
+]
+
+sort_family: [
+| "Set"
+| "Prop"
+| "SProp"
+| "Type"
+]
+
+universe_increment: [
+| "+" natural
+|
+]
+
+universe_name: [
+| global
+| "Set"
+| "Prop"
+]
+
+universe_expr: [
+| universe_name universe_increment
+]
+
+universe: [
+| "max" "(" LIST1 universe_expr SEP "," ")"
+| universe_expr
+]
+
+lconstr: [
+| term200
+]
+
+constr: [
+| term8
+| "@" global univ_annot
+]
+
+term200: [
+| binder_constr
+| term100
+]
+
+term100: [
+| term99 "<:" term200
+| term99 "<<:" term200
+| term99 ":" term200
+| term99 ":>"
+| term99
+]
+
+term99: [
+| term90
+]
+
+term90: [
+| term10
+]
+
+term10: [
+| term9 LIST1 arg
+| "@" global univ_annot LIST0 term9
+| "@" pattern_ident LIST1 identref
+| term9
+]
+
+term9: [
+| ".." term0 ".."
+| term8
+]
+
+term8: [
+| term1
+]
+
+term1: [
+| term0 ".(" global LIST0 arg ")"
+| term0 ".(" "@" global LIST0 ( term9 ) ")"
+| term0 "%" IDENT
+| term0
+]
+
+term0: [
+| atomic_constr
+| term_match
+| "(" term200 ")"
+| "{|" record_declaration bar_cbrace
+| "{" binder_constr "}"
+| "`{" term200 "}"
+| test_array_opening "[" "|" array_elems "|" lconstr type_cstr test_array_closing "|" "]" univ_annot
+| "`(" term200 ")"
+| "ltac" ":" "(" Pltac.ltac_expr ")"
+]
+
+array_elems: [
+| LIST0 lconstr SEP ";"
+]
+
+record_declaration: [
+| fields_def
+]
+
+fields_def: [
+| field_def ";" fields_def
+| field_def
+|
+]
+
+field_def: [
+| global binders ":=" lconstr
+]
+
+binder_constr: [
+| "forall" open_binders "," term200
+| "fun" open_binders "=>" term200
+| "let" name binders let_type_cstr ":=" term200 "in" term200
+| "let" "fix" fix_decl "in" term200
+| "let" "cofix" cofix_body "in" term200
+| "let" [ "(" LIST0 name SEP "," ")" | "()" ] as_return_type ":=" term200 "in" term200
+| "let" "'" pattern200 ":=" term200 "in" term200
+| "let" "'" pattern200 ":=" term200 case_type "in" term200
+| "let" "'" pattern200 "in" pattern200 ":=" term200 case_type "in" term200
+| "if" term200 as_return_type "then" term200 "else" term200
+| "fix" fix_decls
+| "cofix" cofix_decls
+| "if" term200 "is" ssr_dthen ssr_else (* SSR plugin *)
+| "if" term200 "isn't" ssr_dthen ssr_else (* SSR plugin *)
+| "let" ":" ssr_mpat ":=" lconstr "in" lconstr (* SSR plugin *)
+| "let" ":" ssr_mpat ":=" lconstr ssr_rtype "in" lconstr (* SSR plugin *)
+| "let" ":" ssr_mpat "in" pattern200 ":=" lconstr ssr_rtype "in" lconstr (* SSR plugin *)
+]
+
+arg: [
+| test_lpar_id_coloneq "(" identref ":=" lconstr ")"
+| term9
+]
+
+atomic_constr: [
+| global univ_annot
+| sort
+| NUMBER
+| string
+| "_"
+| "?" "[" identref "]"
+| "?" "[" pattern_ident "]"
+| pattern_ident evar_instance
+]
+
+inst: [
+| identref ":=" lconstr
+]
+
+evar_instance: [
+| "@{" LIST1 inst SEP ";" "}"
+|
+]
+
+univ_annot: [
+| "@{" LIST0 universe_level "}"
+|
+]
+
+universe_level: [
+| "Set"
+| "Prop"
+| "Type"
+| "_"
+| global
+]
+
+fix_decls: [
+| fix_decl
+| fix_decl "with" LIST1 fix_decl SEP "with" "for" identref
+]
+
+cofix_decls: [
+| cofix_body
+| cofix_body "with" LIST1 cofix_body SEP "with" "for" identref
+]
+
+fix_decl: [
+| identref binders_fixannot type_cstr ":=" term200
+]
+
+cofix_body: [
+| identref binders type_cstr ":=" term200
+]
+
+term_match: [
+| "match" LIST1 case_item SEP "," OPT case_type "with" branches "end"
+]
+
+case_item: [
+| term100 OPT [ "as" name ] OPT [ "in" pattern200 ]
+]
+
+case_type: [
+| "return" term100
+]
+
+as_return_type: [
+| OPT [ OPT [ "as" name ] case_type ]
+]
+
+branches: [
+| OPT "|" LIST0 eqn SEP "|"
+]
+
+mult_pattern: [
+| LIST1 pattern200 SEP ","
+]
+
+eqn: [
+| LIST1 mult_pattern SEP "|" "=>" lconstr
+]
+
+record_pattern: [
+| global ":=" pattern200
+]
+
+record_patterns: [
+| record_pattern ";" record_patterns
+| record_pattern
+|
+]
+
+pattern200: [
+| pattern100
+]
+
+pattern100: [
+| pattern99 ":" term200
+| pattern99
+]
+
+pattern99: [
+| pattern90
+]
+
+pattern90: [
+| pattern10
+]
+
+pattern10: [
+| pattern1 "as" name
+| pattern1 LIST1 pattern1
+| "@" Prim.reference LIST0 pattern1
+| pattern1
+]
+
+pattern1: [
+| pattern0 "%" IDENT
+| pattern0
+]
+
+pattern0: [
+| Prim.reference
+| "{|" record_patterns bar_cbrace
+| "_"
+| "(" pattern200 ")"
+| "(" pattern200 "|" LIST1 pattern200 SEP "|" ")"
+| NUMBER
+| string
+]
+
+fixannot: [
+| "{" "struct" identref "}"
+| "{" "wf" constr identref "}"
+| "{" "measure" constr OPT identref OPT constr "}"
+]
+
+binders_fixannot: [
+| ensure_fixannot fixannot
+| binder binders_fixannot
+|
+]
+
+open_binders: [
+| name LIST0 name ":" lconstr
+| name LIST0 name binders
+| name ".." name
+| closed_binder binders
+]
+
+binders: [
+| LIST0 binder
+| Pcoq.Constr.binders
+]
+
+binder: [
+| name
+| closed_binder
+]
+
+closed_binder: [
+| "(" name LIST1 name ":" lconstr ")"
+| "(" name ":" lconstr ")"
+| "(" name ":=" lconstr ")"
+| "(" name ":" lconstr ":=" lconstr ")"
+| "{" name "}"
+| "{" name LIST1 name ":" lconstr "}"
+| "{" name ":" lconstr "}"
+| "{" name LIST1 name "}"
+| "[" name "]"
+| "[" name LIST1 name ":" lconstr "]"
+| "[" name ":" lconstr "]"
+| "[" name LIST1 name "]"
+| "`(" LIST1 typeclass_constraint SEP "," ")"
+| "`{" LIST1 typeclass_constraint SEP "," "}"
+| "`[" LIST1 typeclass_constraint SEP "," "]"
+| "'" pattern0
+| [ "of" | "&" ] term99 (* SSR plugin *)
+]
+
+one_open_binder: [
+| name
+| name ":" lconstr
+| one_closed_binder
+]
+
+one_closed_binder: [
+| "(" name ":" lconstr ")"
+| "{" name "}"
+| "{" name ":" lconstr "}"
+| "[" name "]"
+| "[" name ":" lconstr "]"
+| "'" pattern0
+]
+
+typeclass_constraint: [
+| "!" term200
+| "{" name "}" ":" [ "!" | ] term200
+| test_name_colon name ":" [ "!" | ] term200
+| term200
+]
+
+type_cstr: [
+| ":" lconstr
+|
+]
+
+let_type_cstr: [
+| OPT [ ":" lconstr ]
+]
+
+preident: [
+| IDENT
+]
+
+ident: [
+| IDENT
+]
+
+pattern_ident: [
+| LEFTQMARK ident
+]
+
+identref: [
+| ident
+]
+
+hyp: [
+| identref
+]
+
+field: [
+| FIELD
+]
+
+fields: [
+| field fields
+| field
+]
+
+fullyqualid: [
+| ident fields
+| ident
+]
+
+name: [
+| "_"
+| ident
+]
+
+reference: [
+| ident fields
+| ident
+]
+
+qualid: [
+| reference
+]
+
+by_notation: [
+| ne_string OPT [ "%" IDENT ]
+]
+
+smart_global: [
+| reference
+| by_notation
+]
+
+ne_string: [
+| STRING
+]
+
+ne_lstring: [
+| ne_string
+]
+
+dirpath: [
+| ident LIST0 field
+]
+
+string: [
+| STRING
+]
+
+lstring: [
+| string
+]
+
+integer: [
+| bigint
+]
+
+natural: [
+| bignat
+]
+
+bigint: [
+| bignat
+| test_minus_nat "-" bignat
+]
+
+bignat: [
+| NUMBER
+]
+
+bar_cbrace: [
+| test_pipe_closedcurly "|" "}"
+]
+
+strategy_level: [
+| "expand"
+| "opaque"
+| integer
+| "transparent"
+]
+
simple_tactic: [
| "btauto"
| "congruence"
diff --git a/doc/tools/docgram/orderedGrammar b/doc/tools/docgram/orderedGrammar
index a34e96ac16..5674d28139 100644
--- a/doc/tools/docgram/orderedGrammar
+++ b/doc/tools/docgram/orderedGrammar
@@ -337,7 +337,7 @@ NOTINRSTS: [
| q_destruction_arg
| q_with_bindings
| q_bindings
-| q_strategy_flag
+| q_reductions
| q_reference
| q_clause
| q_occurrences
@@ -550,9 +550,9 @@ term_generalizing: [
]
term_cast: [
+| term10 ":" type
| term10 "<:" type
| term10 "<<:" type
-| term10 ":" type
| term10 ":>"
]
@@ -627,38 +627,38 @@ reduce: [
]
red_expr: [
-| "red"
-| "hnf"
-| "simpl" OPT delta_flag OPT [ reference_occs | pattern_occs ]
-| "cbv" OPT strategy_flag
-| "cbn" OPT strategy_flag
-| "lazy" OPT strategy_flag
-| "compute" OPT delta_flag
+| "lazy" OPT reductions
+| "cbv" OPT reductions
+| "compute" OPT delta_reductions
| "vm_compute" OPT [ reference_occs | pattern_occs ]
| "native_compute" OPT [ reference_occs | pattern_occs ]
+| "red"
+| "hnf"
+| "simpl" OPT delta_reductions OPT [ reference_occs | pattern_occs ]
+| "cbn" OPT reductions
| "unfold" LIST1 reference_occs SEP ","
| "fold" LIST1 one_term
| "pattern" LIST1 pattern_occs SEP ","
| ident
]
-delta_flag: [
-| OPT "-" "[" LIST1 reference "]"
-]
-
-strategy_flag: [
-| LIST1 red_flag
-| delta_flag
+reductions: [
+| LIST1 reduction
+| delta_reductions
]
-red_flag: [
+reduction: [
| "beta"
-| "iota"
+| "delta" OPT delta_reductions
| "match"
| "fix"
| "cofix"
+| "iota"
| "zeta"
-| "delta" OPT delta_flag
+]
+
+delta_reductions: [
+| OPT "-" "[" LIST1 reference "]"
]
reference_occs: [
@@ -1242,6 +1242,10 @@ occurrences: [
| "in" goal_occurrences
]
+simple_occurrences: [
+| occurrences
+]
+
occs_nums: [
| OPT "-" LIST1 nat_or_var
]
@@ -1741,7 +1745,7 @@ simple_tactic: [
| "info_eauto" OPT nat_or_var OPT auto_using OPT hintbases
| "dfs" "eauto" OPT nat_or_var OPT auto_using OPT hintbases
| "bfs" "eauto" OPT nat_or_var OPT auto_using OPT hintbases
-| "autounfold" OPT hintbases OPT occurrences
+| "autounfold" OPT hintbases OPT simple_occurrences
| "autounfold_one" OPT hintbases OPT ( "in" ident )
| "unify" one_term one_term OPT ( "with" ident )
| "typeclasses" "eauto" OPT "bfs" OPT nat_or_var OPT ( "with" LIST1 ident )
@@ -1811,17 +1815,17 @@ simple_tactic: [
| "inversion" [ ident | natural ] OPT as_or_and_ipat OPT ( "in" LIST1 ident )
| "inversion_clear" [ ident | natural ] OPT as_or_and_ipat OPT ( "in" LIST1 ident )
| "inversion" [ ident | natural ] "using" one_term OPT ( "in" LIST1 ident )
-| "red" OPT occurrences
-| "hnf" OPT occurrences
-| "simpl" OPT delta_flag OPT [ reference_occs | pattern_occs ] OPT occurrences
-| "cbv" OPT strategy_flag OPT occurrences
-| "cbn" OPT strategy_flag OPT occurrences
-| "lazy" OPT strategy_flag OPT occurrences
-| "compute" OPT delta_flag OPT occurrences
+| "red" simple_occurrences
+| "hnf" simple_occurrences
+| "simpl" OPT delta_reductions OPT [ reference_occs | pattern_occs ] simple_occurrences
+| "cbv" OPT reductions simple_occurrences
+| "cbn" OPT reductions simple_occurrences
+| "lazy" OPT reductions simple_occurrences
+| "compute" OPT delta_reductions simple_occurrences
| "vm_compute" OPT [ reference_occs | pattern_occs ] OPT occurrences
| "native_compute" OPT [ reference_occs | pattern_occs ] OPT occurrences
| "unfold" LIST1 reference_occs SEP "," OPT occurrences
-| "fold" LIST1 one_term OPT occurrences
+| "fold" LIST1 one_term simple_occurrences
| "pattern" LIST1 pattern_occs SEP "," OPT occurrences
| "change" OPT ( one_term OPT ( "at" occs_nums ) "with" ) one_term OPT occurrences
| "change_no_check" OPT ( one_term OPT ( "at" occs_nums ) "with" ) one_term OPT occurrences
@@ -2139,13 +2143,13 @@ ltac2_goal_tactics: [
| LIST0 ( OPT ltac2_expr ) SEP "|" (* Ltac2 plugin *)
]
-q_strategy_flag: [
-| ltac2_strategy_flag (* Ltac2 plugin *)
+q_reductions: [
+| ltac2_reductions (* Ltac2 plugin *)
]
-ltac2_strategy_flag: [
+ltac2_reductions: [
| LIST1 ltac2_red_flag (* Ltac2 plugin *)
-| OPT ltac2_delta_flag (* Ltac2 plugin *)
+| OPT ltac2_delta_reductions (* Ltac2 plugin *)
]
ltac2_red_flag: [
@@ -2155,10 +2159,10 @@ ltac2_red_flag: [
| "fix" (* Ltac2 plugin *)
| "cofix" (* Ltac2 plugin *)
| "zeta" (* Ltac2 plugin *)
-| "delta" OPT ltac2_delta_flag (* Ltac2 plugin *)
+| "delta" OPT ltac2_delta_reductions (* Ltac2 plugin *)
]
-ltac2_delta_flag: [
+ltac2_delta_reductions: [
| OPT "-" "[" LIST1 refglobal "]"
]
diff --git a/tactics/redexpr.ml b/tactics/redexpr.ml
index 87cae3abe5..3ee85f6b1b 100644
--- a/tactics/redexpr.ml
+++ b/tactics/redexpr.ml
@@ -271,11 +271,14 @@ let reduction_of_red_expr env r =
let error_illegal_clause () =
CErrors.user_err Pp.(str "\"at\" clause not supported in presence of an occurrence clause.")
+let error_multiple_patterns () =
+ CErrors.user_err Pp.(str "\"at\" clause in occurences not supported with multiple patterns or references.")
+
let error_illegal_non_atomic_clause () =
CErrors.user_err Pp.(str "\"at\" clause not supported in presence of a non atomic \"in\" clause.")
-let error_occurrences_not_unsupported () =
- CErrors.user_err Pp.(str "Occurrences not supported for this reduction tactic.")
+let error_at_in_occurrences_not_supported () =
+ CErrors.user_err Pp.(str "\"at\" clause not supported for this reduction tactic.")
let bind_red_expr_occurrences occs nbcl redexp =
let open Locus in
@@ -292,14 +295,14 @@ let bind_red_expr_occurrences occs nbcl redexp =
else
match redexp with
| Unfold (_::_::_) ->
- error_illegal_clause ()
+ error_multiple_patterns ()
| Unfold [(occl,c)] ->
if occl != AllOccurrences then
error_illegal_clause ()
else
Unfold [(occs,c)]
| Pattern (_::_::_) ->
- error_illegal_clause ()
+ error_multiple_patterns ()
| Pattern [(occl,c)] ->
if occl != AllOccurrences then
error_illegal_clause ()
@@ -322,7 +325,7 @@ let bind_red_expr_occurrences occs nbcl redexp =
CbvNative (Some (occs,c))
| Red _ | Hnf | Cbv _ | Lazy _ | Cbn _
| ExtraRedExpr _ | Fold _ | Simpl (_,None) | CbvVm None | CbvNative None ->
- error_occurrences_not_unsupported ()
+ error_at_in_occurrences_not_supported ()
| Unfold [] | Pattern [] ->
assert false