diff options
Diffstat (limited to 'doc')
35 files changed, 7887 insertions, 1267 deletions
diff --git a/doc/Makefile.rt b/doc/Makefile.rt deleted file mode 100644 index 6c32813462..0000000000 --- a/doc/Makefile.rt +++ /dev/null @@ -1,43 +0,0 @@ -# Makefile for building Coq Technical Reports - -# if coqc,coqtop,coq-tex are not in your PATH, you need the environment -# variable COQBIN to be correctly set -# (COQTOP is autodetected) -# (some files are preprocessed using Coq and some part of the documentation -# is automatically built from the theories sources) - -# To compile documentation, you need the following tools: -# Dvi: latex (latex2e), bibtex, makeindex, dviselect (package RPM dviutils) -# Ps: dvips, psutils (ftp://ftp.dcs.ed.ac.uk/pub/ajcd/psutils.tar.gz) -# Pdf: pdflatex -# Html: -# - hevea: http://para.inria.fr/~maranget/hevea/ -# - htmlSplit: http://coq.inria.fr/~delahaye -# Rapports INRIA: dviselect, rrkit (par Michel Mauny) - -include ./Makefile - -################### -# RT -################### -# Fabrication d'un RT INRIA (utilise rrkit de Michel Mauny) -rt/Reference-Manual-RT.dvi: refman/Reference-Manual.dvi rt/RefMan-cover.tex - dviselect -i refman/Reference-Manual.dvi -o rt/RefMan-body.dvi 3: - (cd rt; $(LATEX) RefMan-cover.tex) - set a=`tail -1 refman/Reference-Manual.log`;\ - set a=expr \("$$a" : '.*(\(.*\) pages.*'\) % 2;\ - (cd rt; if $(TEST) "$$a = 0";\ - then rrkit RefMan-cover.dvi RefMan-body.dvi Reference-Manual-RT.dvi;\ - else rrkit -odd RefMan-cover.dvi RefMan-body.dvi Reference-Manual-RT.dvi;\ - fi) - -# Fabrication d'un RT INRIA (utilise rrkit de Michel Mauny) -rt/Tutorial-RT.dvi : tutorial/Tutorial.v.dvi rt/Tutorial-cover.tex - dviselect -i rt/Tutorial.v.dvi -o rt/Tutorial-body.dvi 3: - (cd rt; $(LATEX) Tutorial-cover.tex) - set a=`tail -1 tutorial/Tutorial.v.log`;\ - set a=expr \("$$a" : '.*(\(.*\) pages.*'\) % 2;\ - (cd rt; if $(TEST) "$$a = 0";\ - then rrkit Tutorial-cover.dvi Tutorial-body.dvi Tutorial-RT.dvi;\ - else rrkit -odd Tutorial-cover.dvi Tutorial-body.dvi Tutorial-RT.dvi;\ - fi) diff --git a/doc/RecTutorial/RecTutorial.v b/doc/RecTutorial/RecTutorial.v index 8cfeebc28b..4b0ab31254 100644 --- a/doc/RecTutorial/RecTutorial.v +++ b/doc/RecTutorial/RecTutorial.v @@ -1,3 +1,5 @@ +Unset Automatic Introduction. + Check (forall A:Type, (exists x:A, forall (y:A), x <> y) -> 2 = 3). @@ -69,13 +71,13 @@ Check (Prop::Set::nil). Require Import Bvector. -Print vector. +Print Vector.t. -Check (Vnil nat). +Check (Vector.nil nat). -Check (fun (A:Type)(a:A)=> Vcons _ a _ (Vnil _)). +Check (fun (A:Type)(a:A)=> Vector.cons _ a _ (Vector.nil _)). -Check (Vcons _ 5 _ (Vcons _ 3 _ (Vnil _))). +Check (Vector.cons _ 5 _ (Vector.cons _ 3 _ (Vector.nil _))). Lemma eq_3_3 : 2 + 1 = 3. Proof. @@ -146,6 +148,7 @@ Proof. intros; absurd (p < p); eauto with arith. Qed. +Require Extraction. Extraction max. @@ -300,8 +303,8 @@ Section Le_case_analysis. (HS : forall m, n <= m -> Q (S m)). Check ( match H in (_ <= q) return (Q q) with - | le_n => H0 - | le_S m Hm => HS m Hm + | le_n _ => H0 + | le_S _ m Hm => HS m Hm end ). @@ -317,16 +320,16 @@ Proof. Qed. Definition Vtail_total - (A : Type) (n : nat) (v : vector A n) : vector A (pred n):= -match v in (vector _ n0) return (vector A (pred n0)) with -| Vnil => Vnil A -| Vcons _ n0 v0 => v0 + (A : Type) (n : nat) (v : Vector.t A n) : Vector.t A (pred n):= +match v in (Vector.t _ n0) return (Vector.t A (pred n0)) with +| Vector.nil _ => Vector.nil A +| Vector.cons _ _ n0 v0 => v0 end. -Definition Vtail' (A:Type)(n:nat)(v:vector A n) : vector A (pred n). +Definition Vtail' (A:Type)(n:nat)(v:Vector.t A n) : Vector.t A (pred n). intros A n v; case v. simpl. - exact (Vnil A). + exact (Vector.nil A). simpl. auto. Defined. @@ -498,10 +501,8 @@ Inductive typ : Type := Definition typ_inject: typ. split. -exact typ. +Fail exact typ. (* -Defined. - Error: Universe Inconsistency. *) Abort. @@ -920,7 +921,6 @@ Defined. Print minus_decrease. - Definition div_aux (x y:nat)(H: Acc lt x):nat. fix 3. intros. @@ -969,40 +969,40 @@ let rec div_aux x y = | Right -> div_aux (minus x y) y) *) -Lemma vector0_is_vnil : forall (A:Type)(v:vector A 0), v = Vnil A. +Lemma vector0_is_vnil : forall (A:Type)(v:Vector.t A 0), v = Vector.nil A. Proof. intros A v;inversion v. Abort. (* - Lemma vector0_is_vnil_aux : forall (A:Type)(n:nat)(v:vector A n), - n= 0 -> v = Vnil A. + Lemma vector0_is_vnil_aux : forall (A:Type)(n:nat)(v:Vector.t A n), + n= 0 -> v = Vector.nil A. Toplevel input, characters 40281-40287 -> Lemma vector0_is_vnil_aux : forall (A:Set)(n:nat)(v:vector A n), n= 0 -> v = Vnil A. +> Lemma vector0_is_vnil_aux : forall (A:Set)(n:nat)(v:Vector.t A n), n= 0 -> v = Vector.nil A. > ^^^^^^ Error: In environment A : Set n : nat -v : vector A n +v : Vector.t A n e : n = 0 -The term "Vnil A" has type "vector A 0" while it is expected to have type - "vector A n" +The term "Vector.nil A" has type "Vector.t A 0" while it is expected to have type + "Vector.t A n" *) Require Import JMeq. (* On devrait changer Set en Type ? *) -Lemma vector0_is_vnil_aux : forall (A:Type)(n:nat)(v:vector A n), - n= 0 -> JMeq v (Vnil A). +Lemma vector0_is_vnil_aux : forall (A:Type)(n:nat)(v:Vector.t A n), + n= 0 -> JMeq v (Vector.nil A). Proof. destruct v. auto. intro; discriminate. Qed. -Lemma vector0_is_vnil : forall (A:Type)(v:vector A 0), v = Vnil A. +Lemma vector0_is_vnil : forall (A:Type)(v:Vector.t A 0), v = Vector.nil A. Proof. intros a v;apply JMeq_eq. apply vector0_is_vnil_aux. @@ -1010,56 +1010,56 @@ Proof. Qed. -Implicit Arguments Vcons [A n]. -Implicit Arguments Vnil [A]. -Implicit Arguments Vhead [A n]. -Implicit Arguments Vtail [A n]. +Implicit Arguments Vector.cons [A n]. +Implicit Arguments Vector.nil [A]. +Implicit Arguments Vector.hd [A n]. +Implicit Arguments Vector.tl [A n]. -Definition Vid : forall (A : Type)(n:nat), vector A n -> vector A n. +Definition Vid : forall (A : Type)(n:nat), Vector.t A n -> Vector.t A n. Proof. destruct n; intro v. - exact Vnil. - exact (Vcons (Vhead v) (Vtail v)). + exact Vector.nil. + exact (Vector.cons (Vector.hd v) (Vector.tl v)). Defined. -Eval simpl in (fun (A:Type)(v:vector A 0) => (Vid _ _ v)). +Eval simpl in (fun (A:Type)(v:Vector.t A 0) => (Vid _ _ v)). -Eval simpl in (fun (A:Type)(v:vector A 0) => v). +Eval simpl in (fun (A:Type)(v:Vector.t A 0) => v). -Lemma Vid_eq : forall (n:nat) (A:Type)(v:vector A n), v=(Vid _ n v). +Lemma Vid_eq : forall (n:nat) (A:Type)(v:Vector.t A n), v=(Vid _ n v). Proof. destruct v. reflexivity. reflexivity. Defined. -Theorem zero_nil : forall A (v:vector A 0), v = Vnil. +Theorem zero_nil : forall A (v:Vector.t A 0), v = Vector.nil. Proof. intros. - change (Vnil (A:=A)) with (Vid _ 0 v). + change (Vector.nil (A:=A)) with (Vid _ 0 v). apply Vid_eq. Defined. Theorem decomp : - forall (A : Type) (n : nat) (v : vector A (S n)), - v = Vcons (Vhead v) (Vtail v). + forall (A : Type) (n : nat) (v : Vector.t A (S n)), + v = Vector.cons (Vector.hd v) (Vector.tl v). Proof. intros. - change (Vcons (Vhead v) (Vtail v)) with (Vid _ (S n) v). + change (Vector.cons (Vector.hd v) (Vector.tl v)) with (Vid _ (S n) v). apply Vid_eq. Defined. Definition vector_double_rect : - forall (A:Type) (P: forall (n:nat),(vector A n)->(vector A n) -> Type), - P 0 Vnil Vnil -> - (forall n (v1 v2 : vector A n) a b, P n v1 v2 -> - P (S n) (Vcons a v1) (Vcons b v2)) -> - forall n (v1 v2 : vector A n), P n v1 v2. + forall (A:Type) (P: forall (n:nat),(Vector.t A n)->(Vector.t A n) -> Type), + P 0 Vector.nil Vector.nil -> + (forall n (v1 v2 : Vector.t A n) a b, P n v1 v2 -> + P (S n) (Vector.cons a v1) (Vector.cons b v2)) -> + forall n (v1 v2 : Vector.t A n), P n v1 v2. induction n. intros; rewrite (zero_nil _ v1); rewrite (zero_nil _ v2). auto. @@ -1069,24 +1069,23 @@ Defined. Require Import Bool. -Definition bitwise_or n v1 v2 : vector bool n := - vector_double_rect bool (fun n v1 v2 => vector bool n) - Vnil - (fun n v1 v2 a b r => Vcons (orb a b) r) n v1 v2. - +Definition bitwise_or n v1 v2 : Vector.t bool n := + vector_double_rect bool (fun n v1 v2 => Vector.t bool n) + Vector.nil + (fun n v1 v2 a b r => Vector.cons (orb a b) r) n v1 v2. -Fixpoint vector_nth (A:Type)(n:nat)(p:nat)(v:vector A p){struct v} +Fixpoint vector_nth (A:Type)(n:nat)(p:nat)(v:Vector.t A p){struct v} : option A := match n,v with - _ , Vnil => None - | 0 , Vcons b _ _ => Some b - | S n', Vcons _ p' v' => vector_nth A n' p' v' + _ , Vector.nil => None + | 0 , Vector.cons b _ => Some b + | S n', @Vector.cons _ _ p' v' => vector_nth A n' p' v' end. Implicit Arguments vector_nth [A p]. -Lemma nth_bitwise : forall (n:nat) (v1 v2: vector bool n) i a b, +Lemma nth_bitwise : forall (n:nat) (v1 v2: Vector.t bool n) i a b, vector_nth i v1 = Some a -> vector_nth i v2 = Some b -> vector_nth i (bitwise_or _ v1 v2) = Some (orb a b). diff --git a/doc/common/styles/html/coqremote/modules/node/node.css b/doc/common/styles/html/coqremote/modules/node/node.css new file mode 100644 index 0000000000..60d01308e9 --- /dev/null +++ b/doc/common/styles/html/coqremote/modules/node/node.css @@ -0,0 +1,43 @@ + +.node-unpublished { + background-color: #fff4f4; +} +.preview .node { + background-color: #ffffea; +} +#node-admin-filter ul { + list-style-type: none; + padding: 0; + margin: 0; + width: 100%; +} +#node-admin-buttons { + float: left; /* LTR */ + margin-left: 0.5em; /* LTR */ + clear: right; /* LTR */ +} +td.revision-current { + background: #ffc; +} +.node-form .form-text { + display: block; + width: 95%; +} +.node-form .container-inline .form-text { + display: inline; + width: auto; +} +.node-form .standard { + clear: both; +} +.node-form textarea { + display: block; + width: 95%; +} +.node-form .attachments fieldset { + float: none; + display: block; +} +.terms-inline { + display: inline; +} diff --git a/doc/common/styles/html/coqremote/modules/system/defaults.css b/doc/common/styles/html/coqremote/modules/system/defaults.css new file mode 100644 index 0000000000..eb983b7f81 --- /dev/null +++ b/doc/common/styles/html/coqremote/modules/system/defaults.css @@ -0,0 +1,52 @@ + +/* +** HTML elements +*/ +fieldset { + margin-bottom: 1em; + padding: .5em; +} +form { + margin: 0; + padding: 0; +} +hr { + height: 1px; + border: 1px solid gray; +} +img { + border: 0; +} +table { + border-collapse: collapse; +} +th { + text-align: left; /* LTR */ + padding-right: 1em; /* LTR */ + border-bottom: 3px solid #ccc; +} + +/* +** Markup free clearing +** Details: http://www.positioniseverything.net/easyclearing.html +*/ +.clear-block:after { + content: "."; + display: block; + height: 0; + clear: both; + visibility: hidden; +} + +.clear-block { + display: inline-block; +} + +/* Hides from IE-mac \*/ +* html .clear-block { + height: 1%; +} +.clear-block { + display: block; +} +/* End hide from IE-mac */ diff --git a/doc/common/styles/html/coqremote/modules/system/system.css b/doc/common/styles/html/coqremote/modules/system/system.css new file mode 100644 index 0000000000..9371bb479e --- /dev/null +++ b/doc/common/styles/html/coqremote/modules/system/system.css @@ -0,0 +1,543 @@ + +/* +** HTML elements +*/ +body.drag { + cursor: move; +} +th.active img { + display: inline; +} +tr.even, tr.odd { + background-color: #eee; + border-bottom: 1px solid #ccc; + padding: 0.1em 0.6em; +} +tr.drag { + background-color: #fffff0; +} +tr.drag-previous { + background-color: #ffd; +} +td.active { + background-color: #ddd; +} +td.checkbox, th.checkbox { + text-align: center; +} +tbody { + border-top: 1px solid #ccc; +} +tbody th { + border-bottom: 1px solid #ccc; +} +thead th { + text-align: left; /* LTR */ + padding-right: 1em; /* LTR */ + border-bottom: 3px solid #ccc; +} + +/* +** Other common styles +*/ +.breadcrumb { + padding-bottom: .5em +} +div.indentation { + width: 20px; + height: 1.7em; + margin: -0.4em 0.2em -0.4em -0.4em; /* LTR */ + padding: 0.42em 0 0.42em 0.6em; /* LTR */ + float: left; /* LTR */ +} +div.tree-child { + background: url(../../misc/tree.png) no-repeat 11px center; /* LTR */ +} +div.tree-child-last { + background: url(../../misc/tree-bottom.png) no-repeat 11px center; /* LTR */ +} +div.tree-child-horizontal { + background: url(../../misc/tree.png) no-repeat -11px center; +} +.error { + color: #e55; +} +div.error { + border: 1px solid #d77; +} +div.error, tr.error { + background: #fcc; + color: #200; + padding: 2px; +} +.warning { + color: #e09010; +} +div.warning { + border: 1px solid #f0c020; +} +div.warning, tr.warning { + background: #ffd; + color: #220; + padding: 2px; +} +.ok { + color: #008000; +} +div.ok { + border: 1px solid #00aa00; +} +div.ok, tr.ok { + background: #dfd; + color: #020; + padding: 2px; +} +.item-list .icon { + color: #555; + float: right; /* LTR */ + padding-left: 0.25em; /* LTR */ + clear: right; /* LTR */ +} +.item-list .title { + font-weight: bold; +} +.item-list ul { + margin: 0 0 0.75em 0; + padding: 0; +} +.item-list ul li { + margin: 0 0 0.25em 1.5em; /* LTR */ + padding: 0; + list-style: disc; +} +ol.task-list li.active { + font-weight: bold; +} +.form-item { + margin-top: 1em; + margin-bottom: 1em; +} +tr.odd .form-item, tr.even .form-item { + margin-top: 0; + margin-bottom: 0; + white-space: nowrap; +} +tr.merge-down, tr.merge-down td, tr.merge-down th { + border-bottom-width: 0 !important; +} +tr.merge-up, tr.merge-up td, tr.merge-up th { + border-top-width: 0 !important; +} +.form-item input.error, .form-item textarea.error, .form-item select.error { + border: 2px solid red; +} +.form-item .description { + font-size: 0.85em; +} +.form-item label { + display: block; + font-weight: bold; +} +.form-item label.option { + display: inline; + font-weight: normal; +} +.form-checkboxes, .form-radios { + margin: 1em 0; +} +.form-checkboxes .form-item, .form-radios .form-item { + margin-top: 0.4em; + margin-bottom: 0.4em; +} +.marker, .form-required { + color: #f00; +} +.more-link { + text-align: right; /* LTR */ +} +.more-help-link { + font-size: 0.85em; + text-align: right; /* LTR */ +} +.nowrap { + white-space: nowrap; +} +.item-list .pager { + clear: both; + text-align: center; +} +.item-list .pager li { + background-image:none; + display:inline; + list-style-type:none; + padding: 0.5em; +} +.pager-current { + font-weight:bold; +} +.tips { + margin-top: 0; + margin-bottom: 0; + padding-top: 0; + padding-bottom: 0; + font-size: 0.9em; +} +dl.multiselect dd.b, dl.multiselect dd.b .form-item, dl.multiselect dd.b select { + font-family: inherit; + font-size: inherit; + width: 14em; +} +dl.multiselect dd.a, dl.multiselect dd.a .form-item { + width: 10em; +} +dl.multiselect dt, dl.multiselect dd { + float: left; /* LTR */ + line-height: 1.75em; + padding: 0; + margin: 0 1em 0 0; /* LTR */ +} +dl.multiselect .form-item { + height: 1.75em; + margin: 0; +} + +/* +** Inline items (need to override above) +*/ +.container-inline div, .container-inline label { + display: inline; +} + +/* +** Tab navigation +*/ +ul.primary { + border-collapse: collapse; + padding: 0 0 0 1em; /* LTR */ + white-space: nowrap; + list-style: none; + margin: 5px; + height: auto; + line-height: normal; + border-bottom: 1px solid #bbb; +} +ul.primary li { + display: inline; +} +ul.primary li a { + background-color: #ddd; + border-color: #bbb; + border-width: 1px; + border-style: solid solid none solid; + height: auto; + margin-right: 0.5em; /* LTR */ + padding: 0 1em; + text-decoration: none; +} +ul.primary li.active a { + background-color: #fff; + border: 1px solid #bbb; + border-bottom: #fff 1px solid; +} +ul.primary li a:hover { + background-color: #eee; + border-color: #ccc; + border-bottom-color: #eee; +} +ul.secondary { + border-bottom: 1px solid #bbb; + padding: 0.5em 1em; + margin: 5px; +} +ul.secondary li { + display: inline; + padding: 0 1em; + border-right: 1px solid #ccc; /* LTR */ +} +ul.secondary a { + padding: 0; + text-decoration: none; +} +ul.secondary a.active { + border-bottom: 4px solid #999; +} + +/* +** Autocomplete styles +*/ +/* Suggestion list */ +#autocomplete { + position: absolute; + border: 1px solid; + overflow: hidden; + z-index: 100; +} +#autocomplete ul { + margin: 0; + padding: 0; + list-style: none; +} +#autocomplete li { + background: #fff; + color: #000; + white-space: pre; + cursor: default; +} +#autocomplete li.selected { + background: #0072b9; + color: #fff; +} +/* Animated throbber */ +html.js input.form-autocomplete { + background-image: url(../../misc/throbber.gif); + background-repeat: no-repeat; + background-position: 100% 2px; /* LTR */ +} +html.js input.throbbing { + background-position: 100% -18px; /* LTR */ +} + +/* +** Collapsing fieldsets +*/ +html.js fieldset.collapsed { + border-bottom-width: 0; + border-left-width: 0; + border-right-width: 0; + margin-bottom: 0; + height: 1em; +} +html.js fieldset.collapsed * { + display: none; +} +html.js fieldset.collapsed legend { + display: block; +} +html.js fieldset.collapsible legend a { + padding-left: 15px; /* LTR */ + background: url(../../misc/menu-expanded.png) 5px 75% no-repeat; /* LTR */ +} +html.js fieldset.collapsed legend a { + background-image: url(../../misc/menu-collapsed.png); /* LTR */ + background-position: 5px 50%; /* LTR */ +} +/* Note: IE-only fix due to '* html' (breaks Konqueror otherwise). */ +* html.js fieldset.collapsed legend, +* html.js fieldset.collapsed legend *, +* html.js fieldset.collapsed table * { + display: inline; +} +/* For Safari 2 to prevent collapsible fieldsets containing tables from dissapearing due to tableheader.js. */ +html.js fieldset.collapsible { + position: relative; +} +html.js fieldset.collapsible legend a { + display: block; +} +/* Avoid jumping around due to margins collapsing into collapsible fieldset border */ +html.js fieldset.collapsible .fieldset-wrapper { + overflow: auto; +} + +/* +** Resizable text areas +*/ +.resizable-textarea { + width: 95%; +} +.resizable-textarea .grippie { + height: 9px; + overflow: hidden; + background: #eee url(../../misc/grippie.png) no-repeat center 2px; + border: 1px solid #ddd; + border-top-width: 0; + cursor: s-resize; +} +html.js .resizable-textarea textarea { + margin-bottom: 0; + width: 100%; + display: block; +} + +/* +** Table drag and drop. +*/ +.draggable a.tabledrag-handle { + cursor: move; + float: left; /* LTR */ + height: 1.7em; + margin: -0.4em 0 -0.4em -0.5em; /* LTR */ + padding: 0.42em 1.5em 0.42em 0.5em; /* LTR */ + text-decoration: none; +} +a.tabledrag-handle:hover { + text-decoration: none; +} +a.tabledrag-handle .handle { + margin-top: 4px; + height: 13px; + width: 13px; + background: url(../../misc/draggable.png) no-repeat 0 0; +} +a.tabledrag-handle-hover .handle { + background-position: 0 -20px; +} + +/* +** Teaser splitter +*/ +.joined + .grippie { + height: 5px; + background-position: center 1px; + margin-bottom: -2px; +} +/* Keeps inner content contained in Opera 9. */ +.teaser-checkbox { + padding-top: 1px; +} +div.teaser-button-wrapper { + float: right; /* LTR */ + padding-right: 5%; /* LTR */ + margin: 0; +} +.teaser-checkbox div.form-item { + float: right; /* LTR */ + margin: 0 5% 0 0; /* LTR */ + padding: 0; +} +textarea.teaser { + display: none; +} +html.js .no-js { + display: none; +} + +/* +** Progressbar styles +*/ +.progress { + font-weight: bold; +} +.progress .bar { + background: #fff url(../../misc/progress.gif); + border: 1px solid #00375a; + height: 1.5em; + margin: 0 0.2em; +} +.progress .filled { + background: #0072b9; + height: 1em; + border-bottom: 0.5em solid #004a73; + width: 0%; +} +.progress .percentage { + float: right; /* LTR */ +} +.progress-disabled { + float: left; /* LTR */ +} +.ahah-progress { + float: left; /* LTR */ +} +.ahah-progress .throbber { + width: 15px; + height: 15px; + margin: 2px; + background: transparent url(../../misc/throbber.gif) no-repeat 0px -18px; + float: left; /* LTR */ +} +tr .ahah-progress .throbber { + margin: 0 2px; +} +.ahah-progress-bar { + width: 16em; +} + +/* +** Formatting for welcome page +*/ +#first-time strong { + display: block; + padding: 1.5em 0 .5em; +} + +/* +** To be used with tableselect.js +*/ +tr.selected td { + background: #ffc; +} + +/* +** Floating header for tableheader.js +*/ +table.sticky-header { + margin-top: 0; + background: #fff; +} + +/* +** Installation clean URLs +*/ +#clean-url.install { + display: none; +} + +/* +** For anything you want to hide on page load when JS is enabled, so +** that you can use the JS to control visibility and avoid flicker. +*/ +html.js .js-hide { + display: none; +} + +/* +** Styles for the system modules page (admin/build/modules) +*/ +#system-modules div.incompatible { + font-weight: bold; +} + +/* +** Styles for the system themes page (admin/build/themes) +*/ +#system-themes-form div.incompatible { + font-weight: bold; +} + +/* +** Password strength indicator +*/ +span.password-strength { + visibility: hidden; +} +input.password-field { + margin-right: 10px; /* LTR */ +} +div.password-description { + padding: 0 2px; + margin: 4px 0 0 0; + font-size: 0.85em; + max-width: 500px; +} +div.password-description ul { + margin-bottom: 0; +} +.password-parent { + margin: 0 0 0 0; +} +/* +** Password confirmation checker +*/ +input.password-confirm { + margin-right: 10px; /* LTR */ +} +.confirm-parent { + margin: 5px 0 0 0; +} +span.password-confirm { + visibility: hidden; +} +span.password-confirm span { + font-weight: normal; +} diff --git a/doc/common/styles/html/coqremote/modules/user/user.css b/doc/common/styles/html/coqremote/modules/user/user.css new file mode 100644 index 0000000000..7b2163e3d3 --- /dev/null +++ b/doc/common/styles/html/coqremote/modules/user/user.css @@ -0,0 +1,58 @@ + +#permissions td.module { + font-weight: bold; +} +#permissions td.permission { + padding-left: 1.5em; /* LTR */ +} +#access-rules .access-type, #access-rules .rule-type { + margin-right: 1em; /* LTR */ + float: left; /* LTR */ +} +#access-rules .access-type .form-item, #access-rules .rule-type .form-item { + margin-top: 0; +} +#access-rules .mask { + clear: both; +} +#user-login-form { + text-align: center; +} +#user-admin-filter ul { + list-style-type: none; + padding: 0; + margin: 0; + width: 100%; +} +#user-admin-buttons { + float: left; /* LTR */ + margin-left: 0.5em; /* LTR */ + clear: right; /* LTR */ +} +#user-admin-settings fieldset .description { + font-size: 0.85em; + padding-bottom: .5em; +} + +/* Generated by user.module but used by profile.module: */ +.profile { + clear: both; + margin: 1em 0; +} +.profile .picture { + float: right; /* LTR */ + margin: 0 1em 1em 0; /* LTR */ +} +.profile h3 { + border-bottom: 1px solid #ccc; +} +.profile dl { + margin: 0 0 1.5em 0; +} +.profile dt { + margin: 0 0 0.2em 0; + font-weight: bold; +} +.profile dd { + margin: 0 0 1em 0; +} diff --git a/doc/common/styles/html/coqremote/sites/all/themes/coq/coqdoc.css b/doc/common/styles/html/coqremote/sites/all/themes/coq/coqdoc.css new file mode 100644 index 0000000000..d23ea8f362 --- /dev/null +++ b/doc/common/styles/html/coqremote/sites/all/themes/coq/coqdoc.css @@ -0,0 +1,329 @@ +body { padding: 0px 0px; + margin: 0px 0px; + background-color: white } + +#page { display: block; + padding: 0px; + margin: 0px; + padding-bottom: 10px; } + +#header { display: block; + position: relative; + padding: 0; + margin: 0; + vertical-align: middle; + border-bottom-style: solid; + border-width: thin } + +#header h1 { padding: 0; + margin: 0;} + + +/* Contents */ + +#main{ display: block; + padding: 10px; + font-family: sans-serif; + font-size: 100%; + line-height: 100% } + +#main h1 { line-height: 95% } /* allow for multi-line headers */ + +#main a.idref:visited {color : #416DFF; text-decoration : none; } +#main a.idref:link {color : #416DFF; text-decoration : none; } +#main a.idref:hover {text-decoration : none; } +#main a.idref:active {text-decoration : none; } + +#main a.modref:visited {color : #416DFF; text-decoration : none; } +#main a.modref:link {color : #416DFF; text-decoration : none; } +#main a.modref:hover {text-decoration : none; } +#main a.modref:active {text-decoration : none; } + +#main .keyword { color : #cf1d1d } +#main { color: black } + +.section { background-color: rgb(60%,60%,100%); + padding-top: 13px; + padding-bottom: 13px; + padding-left: 3px; + margin-top: 5px; + margin-bottom: 5px; + font-size : 175% } + +h2.section { background-color: rgb(80%,80%,100%); + padding-left: 3px; + padding-top: 12px; + padding-bottom: 10px; + font-size : 130% } + +h3.section { background-color: rgb(90%,90%,100%); + padding-left: 3px; + padding-top: 7px; + padding-bottom: 7px; + font-size : 115% } + +h4.section { +/* + background-color: rgb(80%,80%,80%); + max-width: 20em; + padding-left: 5px; + padding-top: 5px; + padding-bottom: 5px; +*/ + background-color: white; + padding-left: 0px; + padding-top: 0px; + padding-bottom: 0px; + font-size : 100%; + font-weight : bold; + text-decoration : underline; + } + +#main .doc { margin: 0px; + font-family: sans-serif; + font-size: 100%; + line-height: 125%; + max-width: 40em; + color: black; + padding: 10px; + background-color: #90bdff} + +.inlinecode { + display: inline; +/* font-size: 125%; */ + color: #666666; + font-family: monospace } + +.doc .inlinecode { + display: inline; + font-size: 120%; + color: rgb(30%,30%,70%); + font-family: monospace } + +.doc .inlinecode .id { + color: rgb(30%,30%,70%); +} + +.inlinecodenm { + display: inline; + color: #444444; +} + +.doc .code { + display: inline; + font-size: 120%; + color: rgb(30%,30%,70%); + font-family: monospace } + +.comment { + display: inline; + font-family: monospace; + color: rgb(50%,50%,80%); +} + +.code { + display: block; +/* padding-left: 15px; */ + font-size: 110%; + font-family: monospace; + } + +table.infrule { + border: 0px; + margin-left: 50px; + margin-top: 10px; + margin-bottom: 10px; +} + +td.infrule { + font-family: monospace; + text-align: center; +/* color: rgb(35%,35%,70%); */ + padding: 0px; + line-height: 100%; +} + +tr.infrulemiddle hr { + margin: 1px 0 1px 0; +} + +.infrulenamecol { + color: rgb(60%,60%,60%); + font-size: 80%; + padding-left: 1em; + padding-bottom: 0.1em +} + +/* Pied de page */ + +#footer { font-size: 65%; + font-family: sans-serif; } + +/* Identifiers: <span class="id" title="...">) */ + +.id { display: inline; } + +.id[title="constructor"] { + color: rgb(60%,0%,0%); +} + +.id[title="var"] { + color: rgb(40%,0%,40%); +} + +.id[title="variable"] { + color: rgb(40%,0%,40%); +} + +.id[title="definition"] { + color: rgb(0%,40%,0%); +} + +.id[title="abbreviation"] { + color: rgb(0%,40%,0%); +} + +.id[title="lemma"] { + color: rgb(0%,40%,0%); +} + +.id[title="instance"] { + color: rgb(0%,40%,0%); +} + +.id[title="projection"] { + color: rgb(0%,40%,0%); +} + +.id[title="method"] { + color: rgb(0%,40%,0%); +} + +.id[title="inductive"] { + color: rgb(0%,0%,80%); +} + +.id[title="record"] { + color: rgb(0%,0%,80%); +} + +.id[title="class"] { + color: rgb(0%,0%,80%); +} + +.id[title="keyword"] { + color : #cf1d1d; +/* color: black; */ +} + +/* Deprecated rules using the 'type' attribute of <span> (not xhtml valid) */ + +.id[type="constructor"] { + color: rgb(60%,0%,0%); +} + +.id[type="var"] { + color: rgb(40%,0%,40%); +} + +.id[type="variable"] { + color: rgb(40%,0%,40%); +} + +.id[type="definition"] { + color: rgb(0%,40%,0%); +} + +.id[type="abbreviation"] { + color: rgb(0%,40%,0%); +} + +.id[type="lemma"] { + color: rgb(0%,40%,0%); +} + +.id[type="instance"] { + color: rgb(0%,40%,0%); +} + +.id[type="projection"] { + color: rgb(0%,40%,0%); +} + +.id[type="method"] { + color: rgb(0%,40%,0%); +} + +.id[type="inductive"] { + color: rgb(0%,0%,80%); +} + +.id[type="record"] { + color: rgb(0%,0%,80%); +} + +.id[type="class"] { + color: rgb(0%,0%,80%); +} + +.id[type="keyword"] { + color : #cf1d1d; +/* color: black; */ +} + +.inlinecode .id { + color: rgb(0%,0%,0%); +} + + +/* TOC */ + +#toc h2 { + padding: 10px; + background-color: rgb(60%,60%,100%); +} + +#toc li { + padding-bottom: 8px; +} + +/* Index */ + +#index { + margin: 0; + padding: 0; + width: 100%; +} + +#index #frontispiece { + margin: 1em auto; + padding: 1em; + width: 60%; +} + +.booktitle { font-size : 140% } +.authors { font-size : 90%; + line-height: 115%; } +.moreauthors { font-size : 60% } + +#index #entrance { + text-align: center; +} + +#index #entrance .spacer { + margin: 0 30px 0 30px; +} + +#index #footer { + position: absolute; + bottom: 0; +} + +.paragraph { + height: 0.75em; +} + +ul.doclist { + margin-top: 0em; + margin-bottom: 0em; +} diff --git a/doc/common/styles/html/coqremote/sites/all/themes/coq/style.css b/doc/common/styles/html/coqremote/sites/all/themes/coq/style.css new file mode 100644 index 0000000000..32c0b33166 --- /dev/null +++ b/doc/common/styles/html/coqremote/sites/all/themes/coq/style.css @@ -0,0 +1,801 @@ +body +{ + background: white; + color:#444; + font:normal normal normal small/1.5em "Lucida Grande", Verdana, sans-serif; + margin:0; + padding:0; +} + +h2 +{ + font-size:150%; + font-weight:normal; + margin:20px 0 0; +} + +h3 +{ + font-size:130%; + font-weight:normal; +} + +a:link,a:visited +{ + color:#660403; + font-weight:normal; + text-decoration:none; +} + +a:hover +{ + color: red; + text-decoration:none; +} + +#container +{ + margin: 0; + padding: 0; + } + + /*----------header, logo and site name styles----------*/ + #headertop + { + display: block; + /* position:absolute; */ + min-width: 700px; + top: 0; + width: 100%; + height:30px; + z-index: 1; + background: transparent url('images/header_top.png') repeat-x; + } + + #header + { + min-width: 700px; + width: 100%; height:70px; + position: relative; + left: 0; top: 0; + background: transparent url('images/header_bot.png') repeat-x; + } + + #logo + { + float:left; + z-index: 2; + position: absolute; + top: -15px; + left: 0px; + } + + #logo img + { + border:0; + float:left; + } + + #logoWrapper + { + line-height:4em; + } + + #siteName + { + position: relative; + top: 10px; left: 80px; + color:#fff; + float:left; + font-size:350%; + } + + #siteName a + { + color:#fff; + text-decoration:none; + } + + #siteName a:hover + { + color:#ddd; + text-decoration:none; + } + + #siteSlogan + { + color:#eee; + float:left; + font-size:170%; + margin:50px 0 0 10px; + text-transform:lowercase; + white-space:nowrap; + } + + /*----------nav styles -- primary links in header----------*/ + + #nav +{ + position:absolute; right:0; + margin: 0; + padding: 5px; + } + +#nav ul + { + list-style:none outside none; + list-style-image:none; + margin:0; + padding:0; + } + + #nav li + { + display: inline; + margin: 0; padding: 4px; + } + + #nav li a + { + border:medium none; + color:#ccc; + font-weight:normal; + padding-left:10px; + padding-right:10px; + text-decoration:none; + } + + #nav li a:hover + { + background:#7B0505 none repeat; + border:medium none; + border-left:1px solid #ddd; + border-right:1px solid #ddd; + color:#fff; + padding: 6px 9px 5px 9px; + } + + +/************** FOOTER *******************/ + + +#footer +{ + background:transparent url('images/footer.png') repeat-x; + width:100%; + clear:both; + font-size:85%; + text-align:center; + /* position:fixed; */ + margin: 0; + padding: 0; +} + + +#nav-footer +{ + display: inline; + color:#444; + margin: 0; + padding: 0; + text-align:right; + } + +#nav-footer ul + { + list-style:none outside none; + list-style-image:none; + margin:0; + padding:0px; padding-right: 5px; + } + +#nav-footer li +{ + display:inline; padding: 4px; +} + + #nav-footer li a + { + border:medium none; + color:#ccc; + font-size: 11px; + font-weight:normal; + padding-left: 10px; + padding-right: 10px; + text-decoration:none; + } + + #nav-footer li a:hover + { + background:#7B0505 none repeat; + border:medium none; + border-left:1px solid #ddd; + border-right:1px solid #ddd; + color:#fff; + margin:0; + padding: 3px 9px 0px 9px; + } + + + /*----------main content----------*/ + #content + { + display: block; + position: static; + +/* min-width: 640px; */ + max-width: 800px; + + margin-left:40px; + margin-right:300px; + padding: 2ex 2ex; + + z-index:1; + } + +.content { + display: block; + position: relative; + + margin: 0; + padding: 0; +} + + /*----------sidebar styles----------*/ + #sidebarWrapper + { + /* background:transparent url('images/sidebar_bottom.jpg') no-repeat scroll left bottom;*/ + display:block; + position:fixed; + /* avant : top: 100px; right:0px*/ + top: 15px; /* 180 */ + right:0px; + left: auto; + + margin-right: 0px; + + /* avant + width: 12%; + min-width:80px; */ + + /* width: 18%; */ + /* min-*/ + width:270px; + + z-index:0; + overflow:hidden; + +/* ajout precedent:*/ +/* min-height:320px; + padding:10px; + background-image:url('http://www.lix.polytechnique.fr/Labo/Denis.Cousineau/data/coq/rttr340bis.png'); + background-repeat : repeat-x ;*/ + +/* last ajout */ + /* min-height:510px; */ /* 360 */ + padding-left:0px; + padding-right:0px; + padding-top:105px; /* 40 */ + padding-bottom:/*105px*/115px; + /* background:transparent url('http://www.lix.polytechnique.fr/Labo/Denis.Cousineau/data/coq/trig6b.png') no-repeat scroll left top; */ + background:transparent url('images/sidebarbot.png') no-repeat scroll right bottom; + + } + +#sidebar { + padding-left: 40px; + padding-top: 105px; + overflow: visible; + background:transparent url('images/sidebartop.png') no-repeat scroll right top; +} + +#sidebar .title +{ + /* avant :border-bottom:1px solid #eee;*/ + /* avant : color:#660403;*/ + color:#2D0102; + font-size:120%; + font-weight:bold; + line-height:19px; + margin:10px 0; +} + +/*----------page styles----------*/ +.pageTitle +{ + color:#2D0102; + font-size:220%; + margin:10px 0 20px; +} + +.mission +{ + background-color:#efefef; + border:solid 1px #ccc; + margin:0 0 10px 0; + padding:10px; +} + +.messages +{ + color:#C80000; + font-size:110%; + margin:10px 0; +} + +/*----------node styles----------*/ +.nodeTitle +{ + background: url('images/nodeTitle.gif') no-repeat 0 100%; + color:#9a0000; + font-size: 100%; + margin:0; +} + +.nodeTitle a +{ + color:#660403; + text-decoration:none; +} + +.nodeTitle a:hover +{ + color:#d00000; + text-decoration:none; +} + +.node +{ + margin:0 0 20px; +} + +.content p +{ + margin:10px 0; +} + +.submitted +{ + color:#a3a3a3; + font-size:70%; +} + +.nodeLinks +{ + font-size:95%; + margin:0; + padding:0; +} + +.taxonomy +{ + background:url('icons/tag_red.png') no-repeat 0 7px; + font-size:80%; + padding:0 0 5px 16px; +} + +/*----------comment styles----------*/ +.commentTitle +{ + Border-bottom:1px solid #ddd; + color:#9a0000; + font-size:130%; + margin:20px 0 0; +} + +.commentTitle a +{ + color:#660403; + text-decoration:none; +} + +.commentTitle a:hover +{ + color:#d00000; + text-decoration:none; +} + +.commentLinks +{ + background:#f7f7f7; + border:1px solid #e1e1e1; + color:#444; + font-size:95%; + margin:20px 0 30px; + padding:4px 0 4px 4px; +} + + +/*----------img styles----------*/ +img +{ + padding:3px; +} + +/*----------icons for links----------*/ +.comment_comments a +{ + background:url('icons/comment.png') no-repeat 0 2px; + padding-bottom:5px; + padding-left:20px; +} + +.node_read_more a +{ + background:url('icons/page_white_go.png') no-repeat; + padding-bottom:5px; + padding-left:20px; +} + +.comment_add a,.comment_reply a +{ + background:url('icons/comment_add.png') no-repeat; + padding-bottom:5px; + padding-left:20px; +} +.comment_delete a +{ + background:url('icons/comment_delete.png') no-repeat; + padding-bottom:5px; + padding-left:20px; +} + +.comment_edit a +{ + background:url('icons/comment_edit.png') no-repeat; + padding-bottom:5px; + padding-left:20px; +} + +/*----------TinyMCE editor----------*/ +body.mceContentBody +{ + background:#fff; + color:#000; + font-size:12px; +} + +body.mceContentBody a:link +{ + color:#ff0000; +} + +/*----------table styles----------*/ +table +{ + margin:1em 0; + width:100%; +} + +thead th +{ + border-bottom:2px solid #AAA; + color:#494949; + font-weight:bold; +} + +td,th +{ + padding:.3em 0 .5em; +} + +tr.even,tr.odd,tbody th +{ + border:solid #D5D6D7; + border-width:1px 0; +} + +tr.even +{ + background:#fff; +} + +td.region,td.module,td.container +{ + background:#D5D6D7; + border-bottom:1px solid #AAA; + border-top:1.5em solid #fff; + color:#455067; + font-weight:bold; +} + +tr:first-child td.region,tr:first-child td.module,tr:first-child td.container +{ + border-top-width:0; +} + +td.menu-disabled,td.menu-disabled a +{ + background-color:#D5C2C2; + color:#000; +} + +/*----------other styles----------*/ + +.block +{ + margin:5px 0 20px; +} + +.thumbnail,.preview +{ + border:1px solid #ccc; +} + +.lstlisting { + display: block; + font-family: monospace; + white-space: pre; + margin: 1em 0; +} +.center { + text-align: center; +} +.centered { + display: block-inline; +} + +/*----------download table------------*/ + +table.downloadtable +{ + width:90%; + margin-left:auto; + margin-right:auto; +} + +table.downloadtable td.downloadheader +{ +padding: 2px 1em; +font-weight: bold; +font-size: 120%; +color: white; +background: transparent url('images/header_bot.png') repeat-x; +/*background-color: #660403; */ +border: solid 2px white; +border-left: none; +} + +table.downloadtable td.downloadcategory +{ +padding: 2px 1em; +background-color: #dfbfbe; +text-indent: 0; +} + +table.downloadtable td.downloadsize +{ +text-indent: 0; +white-space: nowrap; +height: 52px; +} + +table.downloadtable td +{ +padding: 2px 1em; +background-color: #dfbfbe; +border-right: solid white 2px; +} + + +table.downloadtable td.downloadtopline +{ +border-top: solid white 2px; +} + +table.downloadtable td.downloadtoprightline +{ +border-top: solid 2px white; +border-right: solid 2px white; +} + +table.downloadtable td.downloadbottomline +{ +border-bottom: solid 2px white; +border-right: solid 2px white; +} + +table.downloadtable td.downloadbottomrightline +{ +border-bottom: solid 2px white; +border-right: solid 2px white; +} + +table.downloadtable td.downloadrightline +{ +border-right: solid 2px white; +} + +table.downloadtable td.downloadback +{ +background-color: #efe4e4; +} + +table.downloadtable td.downloadbottomback +{ +border-bottom: solid 2px white; +background-color: #efe4e4; +} + + +/*********** Normal text style ************/ + +p { + text-indent:3em; +} + +ul { + margin: 0px; + margin-left:4em; + padding: 0px; + list-style-type:square; +} + +li +{ + text-indent: 0px; + margin: 0px; + padding: 0px; +} + +tt { font-size: 1em; } + +pre { font-size: 1em; } + +/*********** Framework ***********/ +.framework +{ + display: block; + position:relative; + border:solid 1px #660033; + margin: 8ex 1em; /* 8ex 8ex 1em 1em; */ + padding: 0; +} + +.frameworkcontent +{ + position:relative; + left:0px; + + + margin: 0; + padding: .5ex 2em; + + text-indent: 2em; + text-align: justify; +} + + +.frameworklabel +{ + display: inline; + position:relative; + top:-1.3ex; + + margin-left:2ex; + padding-top:.4ex; + padding-bottom:.4ex; + padding-right:1ex; + padding-left:1ex; + + border: none; + background: white; + color: black; + + font-weight: bold; + font-size:115%; +} + +.frameworklinks { + display:block; + position:relative; + top:1.4ex; + + margin-right:2ex; + + text-align:right; + font-size:100% + } + +.frameworklinks ul +{ + display: inline; + padding: 0px 1ex; + + border: none; + background: white; +} + + +.frameworklinks li + { + display:inline; + padding: 1ex 0px; + } + + .frameworklinks li a +{ + border:medium none; + + margin: 0px 1ex; + padding-left:2px; + padding-right:3px; + + font-weight:normal; + text-decoration:none; + + color: #660003; +} + + .frameworklinks li a:hover + { + color: red; + + border: none; + } + +/* General flat lists */ +.flatlist li {display: inline} + +/* For sections in bycat.html */ +.bycatsection dt { + text-indent: 3em +} + +.bycatsection dt a +{ + font-weight: bold; + color:#444; +} + +/* footnote is used in the new contribution form */ +.footnote { + text-indent: 0pt; + font-size: 80%; + color: silver; + text-align: justify +} + +/****************** CoqIDE Screenshots *****************/ + + +.SCpager { + position:relative; + top:5px; + width:630px; + background: transparent url('images/header_bot.png') repeat-x; + padding:4px; +} + +.SCpagercontent { + width:390px; + position:relative; + margin-left:auto; + margin-right:auto; +} + +.SCthumb { + height:45px; + margin-left:2px; + margin-right:2px; +} + +.SCthumbselected { + height:55px; + margin-left:2px; + margin-right:2px; +} + +.SCcontent { + position:relative; + top:5px; + width:638px; + background-color: #dfbfbe; +} + +.SCscreenshot { + position:relative; + height:400px; + width:auto; + margin:15px auto 15px 19px; +} diff --git a/doc/refman/AsyncProofs.tex b/doc/refman/AsyncProofs.tex index 7ffe252253..1609e4a041 100644 --- a/doc/refman/AsyncProofs.tex +++ b/doc/refman/AsyncProofs.tex @@ -6,7 +6,7 @@ This chapter explains how proofs can be asynchronously processed by Coq. This feature improves the reactivity of the system when used in interactive -mode via CoqIDE. In addition to that, it allows Coq to take advantage of +mode via CoqIDE. In addition, it allows Coq to take advantage of parallel hardware when used as a batch compiler by decoupling the checking of statements and definitions from the construction and checking of proofs objects. @@ -22,7 +22,12 @@ For example, in interactive mode, some errors coming from the kernel of Coq are signaled late. The type of errors belonging to this category are universe inconsistencies. -Last, at the time of writing, only opaque proofs (ending with \texttt{Qed} or \texttt{Admitted}) can be processed asynchronously. +At the time of writing, only opaque proofs (ending with \texttt{Qed} or \texttt{Admitted}) can be processed asynchronously. + +Finally, asynchronous processing is disabled when running CoqIDE in Windows. The +current implementation of the feature is not stable on Windows. It can be +enabled, as described below at \ref{interactivecaveats}, though doing so is not +recommended. \section{Proof annotations} @@ -46,6 +51,12 @@ proof does not begin with \texttt{Proof using}, the system records in an auxiliary file, produced along with the \texttt{.vo} file, the list of section variables used. +\subsubsection{Automatic suggestion of proof annotations} + +The command \texttt{Set Suggest Proof Using} makes Coq suggest, when a +\texttt{Qed} command is processed, a correct proof annotation. It is up +to the user to modify the proof script accordingly. + \section{Proof blocks and error resilience} Coq 8.6 introduces a mechanism for error resiliency: in interactive mode Coq @@ -81,13 +92,7 @@ CoqIDE one of the following options: \texttt{-async-proofs-tactic-error-resilience off}, \texttt{-async-proofs-tactic-error-resilience all}, \texttt{-async-proofs-tactic-error-resilience $blocktype_1$,..., $blocktype_n$}. -Valid proof block types are: ``curly'', ``par'', ``indent'', ``bullet''. - -\subsubsection{Automatic suggestion of proof annotations} - -The command \texttt{Set Suggest Proof Using} makes Coq suggest, when a -\texttt{Qed} command is processed, a correct proof annotation. It is up -to the user to modify the proof script accordingly. +Valid proof block types are: ``curly'', ``par'', ``indent'', ``bullet''. \section{Interactive mode} @@ -112,6 +117,7 @@ the kernel to check all the proof objects, one has to click the button with the gears. Only then are all the universe constraints checked. \subsubsection{Caveats} +\label{interactivecaveats} The number of worker processes can be increased by passing CoqIDE the \texttt{-async-proofs-j $n$} flag. Note that the memory consumption @@ -120,7 +126,8 @@ the master process. Also note that increasing the number of workers may reduce the reactivity of the master process to user commands. To disable this feature, one can pass the \texttt{-async-proofs off} flag to -CoqIDE. +CoqIDE. Conversely, on Windows, where the feature is disabled by default, +pass the \texttt{-async-proofs on} flag to enable it. Proofs that are known to take little time to process are not delegated to a worker process. The threshold can be configure with \texttt{-async-proofs-delegation-threshold}. Default is 0.03 seconds. diff --git a/doc/refman/Classes.tex b/doc/refman/Classes.tex index acfc4bea93..7e07868a38 100644 --- a/doc/refman/Classes.tex +++ b/doc/refman/Classes.tex @@ -486,15 +486,17 @@ where there is a hole in that place. \subsection{\tt Set Typeclasses Legacy Resolution} \optindex{Typeclasses Legacy Resolution} +\emph{Deprecated since 8.7} This option (off by default) uses the 8.5 implementation of resolution. Use for compatibility purposes only (porting and debugging). \subsection{\tt Set Typeclasses Module Eta} \optindex{Typeclasses Modulo Eta} +\emph{Deprecated since 8.7} This option allows eta-conversion for functions and records during -unification of type-classes. This option is now unsupported in 8.6 with +unification of type-classes. This option is unsupported since 8.6 with {\tt Typeclasses Filtered Unification} set, but still affects the default unification strategy, and the one used in {\tt Legacy Resolution} mode. It is \emph{unset} by default. If {\tt Typeclasses @@ -505,7 +507,7 @@ pattern-matching is not up-to eta. \subsection{\tt Set Typeclasses Limit Intros} \optindex{Typeclasses Limit Intros} -This option (on by default in Coq 8.6 and below) controls the ability to +This option (on by default) controls the ability to apply hints while avoiding (functional) eta-expansions in the generated proof term. It does so by allowing hints that conclude in a product to apply to a goal with a matching product directly, avoiding an @@ -554,7 +556,7 @@ more efficient resolution behavior (the option is off by default). When a solution to the typeclass goal of this class is found, we never backtrack on it, assuming that it is canonical. -\subsection{\tt Typeclasses eauto := [debug] [dfs | bfs] [\emph{depth}]} +\subsection{\tt Typeclasses eauto := [debug] [(dfs) | (bfs)] [\emph{depth}]} \comindex{Typeclasses eauto} \label{TypeclassesEauto} diff --git a/doc/refman/Extraction.tex b/doc/refman/Extraction.tex index fa3d61b1cd..499239b6f3 100644 --- a/doc/refman/Extraction.tex +++ b/doc/refman/Extraction.tex @@ -21,9 +21,14 @@ be used (abusively) to refer to any of the three. Before using any of the commands or options described in this chapter, the extraction framework should first be loaded explicitly -via {\tt Require Extraction}. Note that in earlier versions of Coq, these -commands and options were directly available without any preliminary -{\tt Require}. +via {\tt Require Extraction}, or via the more robust +{\tt From Coq Require Extraction}. +Note that in earlier versions of Coq, these commands and options were +directly available without any preliminary {\tt Require}. + +\begin{coq_example} +Require Extraction. +\end{coq_example} \asection{Generating ML code} \comindex{Extraction} @@ -82,9 +87,20 @@ one monolithic file or one file per \Coq\ library. using prefixes \verb!coq_! or \verb!Coq_!. \end{description} -\noindent The list of globals \qualid$_i$ does not need to be -exhaustive: it is automatically completed into a complete and minimal -environment. +\noindent The following command is meant to help automatic testing of + the extraction, see for instance the {\tt test-suite} directory + in the \Coq\ sources. + +\begin{description} +\item {\tt Extraction TestCompile} \qualid$_1$ \dots\ \qualid$_n$. ~\par + All the globals (or modules) \qualid$_1$ \dots\ \qualid$_n$ and all + their dependencies are extracted to a temporary Ocaml file, just as in + {\tt Extraction "{\em file}"}. Then this temporary file and its + signature are compiled with the same Ocaml compiler used to built + \Coq. This command succeeds only if the extraction and the Ocaml + compilation succeed (and it fails if the current target language + of the extraction is not Ocaml). +\end{description} \asection{Extraction options} @@ -365,6 +381,9 @@ some specific {\tt Extract Constant} when primitive counterparts exist. \Example Typical examples are the following: +\begin{coq_eval} +Require Extraction. +\end{coq_eval} \begin{coq_example} Extract Inductive unit => "unit" [ "()" ]. Extract Inductive bool => "bool" [ "true" "false" ]. diff --git a/doc/refman/Program.tex b/doc/refman/Program.tex index 2fc1c8764a..f60908da6c 100644 --- a/doc/refman/Program.tex +++ b/doc/refman/Program.tex @@ -278,7 +278,8 @@ tactic is replaced by the default one if not specified. as implicit arguments of the special constant \texttt{Program.Tactics.obligation}. \item {\tt Set Shrink Obligations}\optindex{Shrink Obligations} - Control whether obligations should have their +\emph{Deprecated since 8.7} + This option (on by default) controls whether obligations should have their context minimized to the set of variables used in the proof of the obligation, to avoid unnecessary dependencies. \end{itemize} diff --git a/doc/refman/RefMan-cic.tex b/doc/refman/RefMan-cic.tex index 96fb1eb752..ad795d4064 100644 --- a/doc/refman/RefMan-cic.tex +++ b/doc/refman/RefMan-cic.tex @@ -558,7 +558,7 @@ $\Sort$ is called the sort of the inductive type $t$. \paragraph{Examples} \newcommand\ind[3]{$\mathsf{Ind}~[#1]\left(\hskip-.4em - \begin{array}{r @{\mathrm{~:=~}} l} + \begin{array}{r@{\mathrm{~:=~}}l} #2 & #3 \\ \end{array} \hskip-.4em @@ -569,7 +569,7 @@ The declaration for parameterized lists is: \begin{latexonly} \vskip.5em -\ind{1}{[\List:\Set\ra\Set]}{\left[\begin{array}{r \colon l} + \ind{1}{[\List:\Set\ra\Set]}{\left[\begin{array}{r@{:}l} \Nil & \forall A:\Set,\List~A \\ \cons & \forall A:\Set, A \ra \List~A \ra \List~A \end{array} @@ -613,8 +613,8 @@ Inductive list (A:Set) : Set := \noindent The declaration for a mutual inductive definition of {\tree} and {\forest} is: \begin{latexonly} \vskip.5em -\ind{~}{\left[\begin{array}{r \colon l}\tree&\Set\\\forest&\Set\end{array}\right]} - {\left[\begin{array}{r \colon l} +\ind{~}{\left[\begin{array}{r@{:}l}\tree&\Set\\\forest&\Set\end{array}\right]} + {\left[\begin{array}{r@{:}l} \node & \forest \ra \tree\\ \emptyf & \forest\\ \consf & \tree \ra \forest \ra \forest\\ @@ -680,15 +680,15 @@ with forest : Set := \noindent The declaration for a mutual inductive definition of {\even} and {\odd} is: \begin{latexonly} - \newcommand\GammaI{\left[\begin{array}{r \colon l} - \even & \nat\ra\Prop \\ - \odd & \nat\ra\Prop + \newcommand\GammaI{\left[\begin{array}{r@{:}l} + \even & \nat\ra\Prop \\ + \odd & \nat\ra\Prop \end{array} \right]} - \newcommand\GammaC{\left[\begin{array}{r \colon l} - \evenO & \even~\nO \\ - \evenS & \forall n : \nat, \odd~n \ra \even~(\nS~n)\\ - \oddS & \forall n : \nat, \even~n \ra \odd~(\nS~n) + \newcommand\GammaC{\left[\begin{array}{r@{:}l} + \evenO & \even~\nO \\ + \evenS & \forall n : \nat, \odd~n \ra \even~(\nS~n)\\ + \oddS & \forall n : \nat, \even~n \ra \odd~(\nS~n) \end{array} \right]} \vskip.5em @@ -769,7 +769,7 @@ Provided that our environment $E$ contains inductive definitions we showed befor these two inference rules above enable us to conclude that: \vskip.5em \newcommand\prefix{E[\Gamma]\vdash\hskip.25em} -$\begin{array}{@{} l} +$\begin{array}{@{}l} \prefix\even : \nat\ra\Prop\\ \prefix\odd : \nat\ra\Prop\\ \prefix\evenO : \even~\nO\\ @@ -1425,6 +1425,9 @@ If there is an hypothesis $h:a=b$ in the local context, it can be used for rewriting not only in logical propositions but also in any type. % In that case, the term \verb!eq_rec! which was defined as an axiom, is % now a term of the calculus. +\begin{coq_eval} +Require Extraction. +\end{coq_eval} \begin{coq_example} Print eq_rec. Extraction eq_rec. diff --git a/doc/refman/RefMan-ext.tex b/doc/refman/RefMan-ext.tex index 939fc87a6e..713f344cbe 100644 --- a/doc/refman/RefMan-ext.tex +++ b/doc/refman/RefMan-ext.tex @@ -38,21 +38,19 @@ construction allows defining ``signatures''. \end{figure} \noindent In the expression - -\smallskip -{\tt Record} {\ident} {\params} \texttt{:} - {\sort} := {\ident$_0$} \verb+{+ - {\ident$_1$} \binders$_1$ \texttt{:} {\term$_1$}; - \dots - {\ident$_n$} \binders$_n$ \texttt{:} {\term$_n$} \verb+}+. -\smallskip - +\begin{quote} +{\tt Record {\ident} {\params} : {\sort} := {\ident$_0$} \{ \\ + {\ident$_1$} \binders$_1$ : {\term$_1$} ; ... ; \\ + {\ident$_n$} \binders$_n$ : {\term$_n$} \}.} +\end{quote} \noindent the identifier {\ident} is the name of the defined record and {\sort} is its type. The identifier {\ident$_0$} is the name of its constructor. If {\ident$_0$} is omitted, the default name {\tt -Build\_{\ident}} is used. If {\sort} is omitted, the default sort is ``{\Type}''. -The identifiers {\ident$_1$}, .., -{\ident$_n$} are the names of fields and {\tt forall} \binders$_1${\tt ,} {\term$_1$}, ..., {\tt forall} \binders$_n${\tt ,} {\term$_n$} +Build\_{\ident}} is used. +If {\sort} is omitted, the default sort is {\Type}. +The identifiers {\ident$_1$}, \dots, {\ident$_n$} are the names of +fields and {\tt forall {\binders$_1$}, {\term$_1$}}, \dots, +{\tt forall {\binders$_n$}, {\term$_n$}} their respective types. Remark that the type of {\ident$_i$} may depend on the previous {\ident$_j$} (for $j<i$). Thus the order of the fields is important. Finally, {\params} are the parameters of the @@ -82,26 +80,15 @@ Record Rat : Set := mkRat forall x y z:nat, (x * y) = top /\ (x * z) = bottom -> x = 1}. \end{coq_example} -Remark here that the field -\verb+Rat_cond+ depends on the field \verb+bottom+. - -%Let us now see the work done by the {\tt Record} macro. -%First the macro generates an inductive definition -%with just one constructor: -% -%\medskip -%\noindent -%{\tt Inductive {\ident} \zeroone{\binders} : {\sort} := \\ -%\mbox{}\hspace{0.4cm} {\ident$_0$} : forall ({\ident$_1$}:{\term$_1$}) .. -%({\ident$_n$}:{\term$_n$}), {\ident} {\rm\sl params}.} -%\medskip +Remark here that the field \verb+Rat_bottom_cond+ depends +on the field \verb+bottom+ and \verb+Rat_irred_cond+ depends +on both \verb+top+ and \verb+bottom+. Let us now see the work done by the {\tt Record} macro. First the macro generates a variant type definition with just one constructor: \begin{quote} -{\tt Variant {\ident} {\params} :{\sort} :=} \\ -\qquad {\tt - {\ident$_0$} ({\ident$_1$}:{\term$_1$}) .. ({\ident$_n$}:{\term$_n$}).} +{\tt Variant {\ident} {\params} : {\sort} := \\ + {\ident$_0$} ({\ident$_1$} : {\term$_1$}) ... ({\ident$_n$} : {\term$_n$}).} \end{quote} To build an object of type {\ident}, one should provide the constructor {\ident$_0$} with $n$ terms filling the fields of @@ -109,28 +96,9 @@ the record. As an example, let us define the rational $1/2$: \begin{coq_example*} -Require Import Arith. Theorem one_two_irred : forall x y z:nat, x * y = 1 /\ x * z = 2 -> x = 1. -\end{coq_example*} -\begin{coq_eval} -Lemma mult_m_n_eq_m_1 : forall m n:nat, m * n = 1 -> m = 1. -destruct m; trivial. -intros; apply f_equal with (f := S). -destruct m; trivial. -destruct n; simpl in H. - rewrite <- mult_n_O in H. - discriminate. - rewrite <- plus_n_Sm in H. - discriminate. -Qed. - -intros x y z [H1 H2]. - apply mult_m_n_eq_m_1 with (n := y); trivial. -\end{coq_eval} -\ldots -\begin{coq_example*} -Qed. +Admitted. \end{coq_example*} \begin{coq_example} Definition half := mkRat true 1 2 (O_S 1) one_two_irred. @@ -139,80 +107,6 @@ Definition half := mkRat true 1 2 (O_S 1) one_two_irred. Check half. \end{coq_example} -The macro generates also, when it is possible, the projection -functions for destructuring an object of type {\ident}. These -projection functions have the same name that the corresponding -fields. If a field is named ``\verb=_='' then no projection is built -for it. In our example: - -\begin{coq_example} -Eval compute in half.(top). -Eval compute in half.(bottom). -Eval compute in half.(Rat_bottom_cond). -\end{coq_example} -\begin{coq_eval} -Reset Initial. -\end{coq_eval} - -Records defined with the {\tt Record} keyword are not allowed to be -recursive (references to the record's name in the type of its field -raises an error). To define recursive records, one can use the {\tt - Inductive} and {\tt CoInductive} keywords, resulting in an inductive -or co-inductive record. A \emph{caveat}, however, is that records -cannot appear in mutually inductive (or co-inductive) definitions. -Induction schemes are automatically generated for inductive records. -Automatic generation of induction schemes for non-recursive records -defined with the {\tt Record} keyword can be activated with the -{\tt Nonrecursive Elimination Schemes} option -(see~\ref{set-nonrecursive-elimination-schemes}). - -\begin{Warnings} -\item {\tt {\ident$_i$} cannot be defined.} - - It can happen that the definition of a projection is impossible. - This message is followed by an explanation of this impossibility. - There may be three reasons: - \begin{enumerate} - \item The name {\ident$_i$} already exists in the environment (see - Section~\ref{Axiom}). - \item The body of {\ident$_i$} uses an incorrect elimination for - {\ident} (see Sections~\ref{Fixpoint} and~\ref{Caseexpr}). - \item The type of the projections {\ident$_i$} depends on previous - projections which themselves could not be defined. - \end{enumerate} -\end{Warnings} - -\begin{ErrMsgs} - -\item \errindex{Records declared with the keyword Record or Structure cannot be recursive.} - - The record name {\ident} appears in the type of its fields, but uses - the keyword {\tt Record}. Use the keyword {\tt Inductive} or {\tt - CoInductive} instead. -\item \errindex{Cannot handle mutually (co)inductive records.} - - Records cannot be defined as part of mutually inductive (or - co-inductive) definitions, whether with records only or mixed with - standard definitions. -\item During the definition of the one-constructor inductive - definition, all the errors of inductive definitions, as described in - Section~\ref{gal-Inductive-Definitions}, may also occur. - -\end{ErrMsgs} - -\SeeAlso Coercions and records in Section~\ref{Coercions-and-records} -of the chapter devoted to coercions. - -\Rem {\tt Structure} is a synonym of the keyword {\tt Record}. - -\Rem Creation of an object of record type can be done by calling {\ident$_0$} -and passing arguments in the correct order. - -\begin{coq_example} -Record point := { x : nat; y : nat }. -Definition a := Build_point 5 3. -\end{coq_example} - \begin{figure}[t] \begin{centerframe} \begin{tabular}{lcl} @@ -226,15 +120,17 @@ Definition a := Build_point 5 3. \label{fig:fieldsyntax} \end{figure} -A syntax is available for creating objects by using named fields, as +Alternatively, the following syntax allows creating objects by using named fields, as shown on Figure~\ref{fig:fieldsyntax}. The fields do not have to be in any particular order, nor do they have to be all present if the missing ones can be inferred or prompted for (see Section~\ref{Program}). \begin{coq_example} -Definition b := {| x := 5; y := 3 |}. -Definition c := {| y := 3; x := 5 |}. +Definition half' := + {| sign := true; + Rat_bottom_cond := O_S 1; + Rat_irred_cond := one_two_irred |}. \end{coq_example} This syntax can be disabled globally for printing by @@ -256,23 +152,52 @@ This syntax can also be used for pattern matching. \begin{coq_example} Eval compute in ( - match b with - | {| y := S n |} => n + match half with + | {| sign := true; top := n |} => n | _ => 0 end). \end{coq_example} -\begin{coq_eval} -Reset Initial. -\end{coq_eval} +The macro generates also, when it is possible, the projection +functions for destructuring an object of type {\ident}. These +projection functions are given the names of the corresponding +fields. If a field is named ``\verb=_='' then no projection is built +for it. In our example: + +\begin{coq_example} +Eval compute in top half. +Eval compute in bottom half. +Eval compute in Rat_bottom_cond half. +\end{coq_example} + +An alternative syntax for projections based on a dot notation is +available: + +\begin{coq_example} +Eval compute in half.(top). +\end{coq_example} -\Rem A syntax for projections based on a dot notation is -available. The command to activate it is +It can be activated for printing with the command \optindex{Printing Projections} \begin{quote} {\tt Set Printing Projections.} \end{quote} +\begin{coq_example} +Set Printing Projections. +Check top half. +\end{coq_example} + +The corresponding grammar rules are given in Figure~\ref{fig:projsyntax}. +When {\qualid} denotes a projection, the syntax {\tt + {\term}.({\qualid})} is equivalent to {\qualid~\term}, the syntax +{\term}{\tt .(}{\qualid}~{\termarg}$_1$ {\ldots} {\termarg}$_n${\tt )} to +{\qualid~{\termarg}$_1$ {\ldots} {\termarg}$_n$~\term}, and the syntax +{\term}{\tt .(@}{\qualid}~{\term}$_1$~\ldots~{\term}$_n${\tt )} to +{@\qualid~{\term}$_1$ {\ldots} {\term}$_n$~\term}. In each case, {\term} +is the object projected and the other arguments are the parameters of +the inductive type. + \begin{figure}[t] \begin{centerframe} \begin{tabular}{lcl} @@ -285,18 +210,66 @@ available. The command to activate it is \label{fig:projsyntax} \end{figure} -The corresponding grammar rules are given Figure~\ref{fig:projsyntax}. -When {\qualid} denotes a projection, the syntax {\tt - {\term}.({\qualid})} is equivalent to {\qualid~\term}, the syntax -{\term}{\tt .(}{\qualid}~{\termarg}$_1$ {\ldots} {\termarg}$_n${\tt )} to -{\qualid~{\termarg}$_1$ {\ldots} {\termarg}$_n$~\term}, and the syntax -{\term}{\tt .(@}{\qualid}~{\term}$_1$~\ldots~{\term}$_n${\tt )} to -{@\qualid~{\term}$_1$ {\ldots} {\term}$_n$~\term}. In each case, {\term} -is the object projected and the other arguments are the parameters of -the inductive type. +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +\begin{Remarks} + +\item Records defined with the {\tt Record} keyword are not allowed to be +recursive (references to the record's name in the type of its field +raises an error). To define recursive records, one can use the {\tt +Inductive} and {\tt CoInductive} keywords, resulting in an inductive +or co-inductive record. +A \emph{caveat}, however, is that records +cannot appear in mutually inductive (or co-inductive) definitions. + +\item Induction schemes are automatically generated for inductive records. +Automatic generation of induction schemes for non-recursive records +defined with the {\tt Record} keyword can be activated with the +{\tt Nonrecursive Elimination Schemes} option +(see~\ref{set-nonrecursive-elimination-schemes}). + +\item {\tt Structure} is a synonym of the keyword {\tt Record}. -To deactivate the printing of projections, use -{\tt Unset Printing Projections}. +\end{Remarks} + +\begin{Warnings} +\item {\tt {\ident$_i$} cannot be defined.} + + It can happen that the definition of a projection is impossible. + This message is followed by an explanation of this impossibility. + There may be three reasons: + \begin{enumerate} + \item The name {\ident$_i$} already exists in the environment (see + Section~\ref{Axiom}). + \item The body of {\ident$_i$} uses an incorrect elimination for + {\ident} (see Sections~\ref{Fixpoint} and~\ref{Caseexpr}). + \item The type of the projections {\ident$_i$} depends on previous + projections which themselves could not be defined. + \end{enumerate} +\end{Warnings} + +\begin{ErrMsgs} + +\item \errindex{Records declared with the keyword Record or Structure cannot be recursive.} + + The record name {\ident} appears in the type of its fields, but uses + the keyword {\tt Record}. Use the keyword {\tt Inductive} or {\tt + CoInductive} instead. +\item \errindex{Cannot handle mutually (co)inductive records.} + + Records cannot be defined as part of mutually inductive (or + co-inductive) definitions, whether with records only or mixed with + standard definitions. +\item During the definition of the one-constructor inductive + definition, all the errors of inductive definitions, as described in + Section~\ref{gal-Inductive-Definitions}, may also occur. + +\end{ErrMsgs} + +\SeeAlso Coercions and records in Section~\ref{Coercions-and-records} +of the chapter devoted to coercions. \subsection{Primitive Projections} \optindex{Primitive Projections} @@ -732,20 +705,20 @@ when the {\tt FunInd} library has been loaded via {\tt Require Import FunInd}: This command can be seen as a generalization of {\tt Fixpoint}. It is actually a wrapper for several ways of defining a function \emph{and other useful related objects}, namely: an induction principle that reflects the -recursive structure of the function (see \ref{FunInduction}), and its +recursive structure of the function (see \ref{FunInduction}) and its fixpoint equality. The meaning of this declaration is to define a function {\it ident}, similarly to {\tt Fixpoint}. Like in {\tt Fixpoint}, the decreasing argument must be -given (unless the function is not recursive), but it must not -necessary be \emph{structurally} decreasing. The point of the {\tt +given (unless the function is not recursive), but it might not +necessarily be \emph{structurally} decreasing. The point of the {\tt \{\}} annotation is to name the decreasing argument \emph{and} to describe which kind of decreasing criteria must be used to ensure termination of recursive calls. -The {\tt Function} construction enjoys also the {\tt with} extension +The {\tt Function} construction also enjoys the {\tt with} extension to define mutually recursive definitions. However, this feature does -not work for non structural recursive functions. % VRAI?? +not work for non structurally recursive functions. % VRAI?? See the documentation of {\tt functional induction} (see Section~\ref{FunInduction}) and {\tt Functional Scheme} @@ -776,7 +749,7 @@ Function plus (n m : nat) {struct n} : nat := \end{coq_example*} \paragraph[Limitations]{Limitations\label{sec:Function-limitations}} -\term$_0$ must be build as a \emph{pure pattern-matching tree} +\term$_0$ must be built as a \emph{pure pattern-matching tree} (\texttt{match...with}) with applications only \emph{at the end} of each branch. @@ -803,7 +776,7 @@ For now dependent cases are not treated for non structurally terminating functio The generation of the graph relation \texttt{(R\_\ident)} used to compute the induction scheme of \ident\ raised a typing error. Only - the ident is defined, the induction scheme will not be generated. + the ident is defined; the induction scheme will not be generated. This error happens generally when: @@ -875,14 +848,14 @@ the following: being the decreasing argument and \term$_1$ being a function from type of \ident$_0$ to \texttt{nat} for which value on the decreasing argument decreases (for the {\tt lt} order on {\tt - nat}) at each recursive call of \term$_0$, parameters of the + nat}) at each recursive call of \term$_0$. Parameters of the function are bound in \term$_0$; \item {\tt \{wf} \term$_1$ \ident$_0${\tt\}} with \ident$_0$ being the decreasing argument and \term$_1$ an ordering relation on the type of \ident$_0$ (i.e. of type T$_{\ident_0}$ $\to$ T$_{\ident_0}$ $\to$ {\tt Prop}) for which the decreasing argument decreases at each recursive call of - \term$_0$. The order must be well founded. parameters of the + \term$_0$. The order must be well founded. Parameters of the function are bound in \term$_0$. \end{itemize} @@ -2011,6 +1984,11 @@ Check (fun x y => _) 0 1. Unset Printing Existential Instances. \end{coq_eval} +Existential variables can be named by the user upon creation using +the syntax {\tt ?[\ident]}. This is useful when the existential +variable needs to be explicitly handled later in the script (e.g. +with a named-goal selector, see~\ref{ltac:selector}). + \subsection{Explicit displaying of existential instances for pretty-printing \label{SetPrintingExistentialInstances} \optindex{Printing Existential Instances}} diff --git a/doc/refman/RefMan-gal.tex b/doc/refman/RefMan-gal.tex index 3814e4403a..ef12fe416a 100644 --- a/doc/refman/RefMan-gal.tex +++ b/doc/refman/RefMan-gal.tex @@ -3,7 +3,7 @@ \label{BNF-syntax} % Used referred to as a chapter label This chapter describes \gallina, the specification language of {\Coq}. -It allows developing mathematical theories and to prove specifications +It allows developing mathematical theories and proofs of specifications of programs. The theories are built from axioms, hypotheses, parameters, lemmas, theorems and definitions of constants, functions, predicates and sets. The syntax of logical objects involved in @@ -37,7 +37,7 @@ Similarly, the notation ``\nelist{\entry}{}'' stands for a non empty sequence of expressions parsed by the ``{\entry}'' entry, without any separator between. -At the end, the notation ``\sequence{\entry}{\tt sep}'' stands for a +Finally, the notation ``\sequence{\entry}{\tt sep}'' stands for a possibly empty sequence of expressions parsed by the ``{\entry}'' entry, separated by the literal ``{\tt sep}''. diff --git a/doc/refman/RefMan-ind.tex b/doc/refman/RefMan-ind.tex deleted file mode 100644 index 43bd2419f0..0000000000 --- a/doc/refman/RefMan-ind.tex +++ /dev/null @@ -1,510 +0,0 @@ - -%\documentstyle[11pt]{article} -%\input{title} - -%\include{macros} -%\makeindex - -%\begin{document} -%\coverpage{The module {\tt Equality}}{Cristina CORNES} - -%\tableofcontents - -\chapter[Tactics for inductive types and families]{Tactics for inductive types and families\label{Addoc-equality}} - -This chapter details a few special tactics useful for inferring facts -from inductive hypotheses. They can be considered as tools that -macro-generate complicated uses of the basic elimination tactics for -inductive types. - -Sections \ref{inversion_introduction} to \ref{inversion_using} present -inversion tactics and Section~\ref{scheme} describes -a command {\tt Scheme} for automatic generation of induction schemes -for mutual inductive types. - -%\end{document} -%\documentstyle[11pt]{article} -%\input{title} - -%\begin{document} -%\coverpage{Module Inv: Inversion Tactics}{Cristina CORNES} - -\section[Generalities about inversion]{Generalities about inversion\label{inversion_introduction}} -When working with (co)inductive predicates, we are very often faced to -some of these situations: -\begin{itemize} -\item we have an inconsistent instance of an inductive predicate in the - local context of hypotheses. Thus, the current goal can be trivially - proved by absurdity. - -\item we have a hypothesis that is an instance of an inductive - predicate, and the instance has some variables whose constraints we - would like to derive. -\end{itemize} - -The inversion tactics are very useful to simplify the work in these -cases. Inversion tools can be classified in three groups: -\begin{enumerate} -\item tactics for inverting an instance without stocking the inversion - lemma in the context: - (\texttt{Dependent}) \texttt{Inversion} and - (\texttt{Dependent}) \texttt{Inversion\_clear}. -\item commands for generating and stocking in the context the inversion - lemma corresponding to an instance: \texttt{Derive} - (\texttt{Dependent}) \texttt{Inversion}, \texttt{Derive} - (\texttt{Dependent}) \texttt{Inversion\_clear}. -\item tactics for inverting an instance using an already defined - inversion lemma: \texttt{Inversion \ldots using}. -\end{enumerate} - -These tactics work for inductive types of arity $(\vec{x}:\vec{T})s$ -where $s \in \{Prop,Set,Type\}$. Sections \ref{inversion_primitive}, -\ref{inversion_derivation} and \ref{inversion_using} -describe respectively each group of tools. - -As inversion proofs may be large in size, we recommend the user to -stock the lemmas whenever the same instance needs to be inverted -several times.\\ - -Let's consider the relation \texttt{Le} over natural numbers and the -following variables: - -\begin{coq_eval} -Restore State "Initial". -\end{coq_eval} - -\begin{coq_example*} -Inductive Le : nat -> nat -> Set := - | LeO : forall n:nat, Le 0%N n - | LeS : forall n m:nat, Le n m -> Le (S n) (S m). -Variable P : nat -> nat -> Prop. -Variable Q : forall n m:nat, Le n m -> Prop. -\end{coq_example*} - -For example purposes we defined \verb+Le: nat->nat->Set+ - but we may have defined -it \texttt{Le} of type \verb+nat->nat->Prop+ or \verb+nat->nat->Type+. - - -\section[Inverting an instance]{Inverting an instance\label{inversion_primitive}} -\subsection{The non dependent case} -\begin{itemize} - -\item \texttt{Inversion\_clear} \ident~\\ -\index{Inversion-clear@{\tt Inversion\_clear}} - Let the type of \ident~ in the local context be $(I~\vec{t})$, - where $I$ is a (co)inductive predicate. Then, - \texttt{Inversion} applied to \ident~ derives for each possible - constructor $c_i$ of $(I~\vec{t})$, {\bf all} the necessary - conditions that should hold for the instance $(I~\vec{t})$ to be - proved by $c_i$. Finally it erases \ident~ from the context. - - - -For example, consider the goal: -\begin{coq_eval} -Lemma ex : forall n m:nat, Le (S n) m -> P n m. -intros. -\end{coq_eval} - -\begin{coq_example} -Show. -\end{coq_example} - -To prove the goal we may need to reason by cases on \texttt{H} and to - derive that \texttt{m} is necessarily of -the form $(S~m_0)$ for certain $m_0$ and that $(Le~n~m_0)$. -Deriving these conditions corresponds to prove that the -only possible constructor of \texttt{(Le (S n) m)} is -\texttt{LeS} and that we can invert the -\texttt{->} in the type of \texttt{LeS}. -This inversion is possible because \texttt{Le} is the smallest set closed by -the constructors \texttt{LeO} and \texttt{LeS}. - - -\begin{coq_example} -inversion_clear H. -\end{coq_example} - -Note that \texttt{m} has been substituted in the goal for \texttt{(S m0)} -and that the hypothesis \texttt{(Le n m0)} has been added to the -context. - -\item \texttt{Inversion} \ident~\\ -\index{Inversion@{\tt Inversion}} - This tactic differs from {\tt Inversion\_clear} in the fact that - it adds the equality constraints in the context and - it does not erase the hypothesis \ident. - - -In the previous example, {\tt Inversion\_clear} -has substituted \texttt{m} by \texttt{(S m0)}. Sometimes it is -interesting to have the equality \texttt{m=(S m0)} in the -context to use it after. In that case we can use \texttt{Inversion} that -does not clear the equalities: - -\begin{coq_example*} -Undo. -\end{coq_example*} -\begin{coq_example} -inversion H. -\end{coq_example} - -\begin{coq_eval} -Undo. -\end{coq_eval} - -Note that the hypothesis \texttt{(S m0)=m} has been deduced and -\texttt{H} has not been cleared from the context. - -\end{itemize} - -\begin{Variants} - -\item \texttt{Inversion\_clear } \ident~ \texttt{in} \ident$_1$ \ldots - \ident$_n$\\ -\index{Inversion_clear...in@{\tt Inversion\_clear...in}} - Let \ident$_1$ \ldots \ident$_n$, be identifiers in the local context. This - tactic behaves as generalizing \ident$_1$ \ldots \ident$_n$, and then performing - {\tt Inversion\_clear}. - -\item \texttt{Inversion } \ident~ \texttt{in} \ident$_1$ \ldots \ident$_n$\\ -\index{Inversion ... in@{\tt Inversion ... in}} - Let \ident$_1$ \ldots \ident$_n$, be identifiers in the local context. This - tactic behaves as generalizing \ident$_1$ \ldots \ident$_n$, and then performing - \texttt{Inversion}. - - -\item \texttt{Simple Inversion} \ident~ \\ -\index{Simple Inversion@{\tt Simple Inversion}} - It is a very primitive inversion tactic that derives all the necessary - equalities but it does not simplify - the constraints as \texttt{Inversion} and - {\tt Inversion\_clear} do. - -\end{Variants} - - -\subsection{The dependent case} -\begin{itemize} -\item \texttt{Dependent Inversion\_clear} \ident~\\ -\index{Dependent Inversion-clear@{\tt Dependent Inversion\_clear}} - Let the type of \ident~ in the local context be $(I~\vec{t})$, - where $I$ is a (co)inductive predicate, and let the goal depend both on - $\vec{t}$ and \ident. Then, - \texttt{Dependent Inversion\_clear} applied to \ident~ derives - for each possible constructor $c_i$ of $(I~\vec{t})$, {\bf all} the - necessary conditions that should hold for the instance $(I~\vec{t})$ to be - proved by $c_i$. It also substitutes \ident~ for the corresponding - term in the goal and it erases \ident~ from the context. - - -For example, consider the goal: -\begin{coq_eval} -Lemma ex_dep : forall (n m:nat) (H:Le (S n) m), Q (S n) m H. -intros. -\end{coq_eval} - -\begin{coq_example} -Show. -\end{coq_example} - -As \texttt{H} occurs in the goal, we may want to reason by cases on its -structure and so, we would like inversion tactics to -substitute \texttt{H} by the corresponding term in constructor form. -Neither \texttt{Inversion} nor {\tt Inversion\_clear} make such a -substitution. To have such a behavior we use the dependent inversion tactics: - -\begin{coq_example} -dependent inversion_clear H. -\end{coq_example} - -Note that \texttt{H} has been substituted by \texttt{(LeS n m0 l)} and -\texttt{m} by \texttt{(S m0)}. - - -\end{itemize} - -\begin{Variants} - -\item \texttt{Dependent Inversion\_clear } \ident~ \texttt{ with } \term\\ -\index{Dependent Inversion_clear...with@{\tt Dependent Inversion\_clear...with}} - \noindent Behaves as \texttt{Dependent Inversion\_clear} but allows giving - explicitly the good generalization of the goal. It is useful when - the system fails to generalize the goal automatically. If - \ident~ has type $(I~\vec{t})$ and $I$ has type - $(\vec{x}:\vec{T})s$, then \term~ must be of type - $I:(\vec{x}:\vec{T})(I~\vec{x})\rightarrow s'$ where $s'$ is the - type of the goal. - - - -\item \texttt{Dependent Inversion} \ident~\\ -\index{Dependent Inversion@{\tt Dependent Inversion}} - This tactic differs from \texttt{Dependent Inversion\_clear} in the fact that - it also adds the equality constraints in the context and - it does not erase the hypothesis \ident~. - -\item \texttt{Dependent Inversion } \ident~ \texttt{ with } \term \\ -\index{Dependent Inversion...with@{\tt Dependent Inversion...with}} - Analogous to \texttt{Dependent Inversion\_clear .. with..} above. -\end{Variants} - - - -\section[Deriving the inversion lemmas]{Deriving the inversion lemmas\label{inversion_derivation}} -\subsection{The non dependent case} - -The tactics (\texttt{Dependent}) \texttt{Inversion} and (\texttt{Dependent}) -{\tt Inversion\_clear} work on a -certain instance $(I~\vec{t})$ of an inductive predicate. At each -application, they inspect the given instance and derive the -corresponding inversion lemma. If we have to invert the same -instance several times it is recommended to stock the lemma in the -context and to reuse it whenever we need it. - -The families of commands \texttt{Derive Inversion}, \texttt{Derive -Dependent Inversion}, \texttt{Derive} \\ {\tt Inversion\_clear} and \texttt{Derive Dependent Inversion\_clear} -allow to generate inversion lemmas for given instances and sorts. Next -section describes the tactic \texttt{Inversion}$\ldots$\texttt{using} that refines the -goal with a specified inversion lemma. - -\begin{itemize} - -\item \texttt{Derive Inversion\_clear} \ident~ \texttt{with} - $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort~ \\ -\index{Derive Inversion_clear...with@{\tt Derive Inversion\_clear...with}} - Let $I$ be an inductive predicate and $\vec{x}$ the variables - occurring in $\vec{t}$. This command generates and stocks - the inversion lemma for the sort \sort~ corresponding to the instance - $(\vec{x}:\vec{T})(I~\vec{t})$ with the name \ident~ in the {\bf - global} environment. When applied it is equivalent to have - inverted the instance with the tactic {\tt Inversion\_clear}. - - - For example, to generate the inversion lemma for the instance - \texttt{(Le (S n) m)} and the sort \texttt{Prop} we do: -\begin{coq_example} -Derive Inversion_clear leminv with (forall n m:nat, Le (S n) m) Sort - Prop. -\end{coq_example} - -Let us inspect the type of the generated lemma: -\begin{coq_example} -Check leminv. -\end{coq_example} - - - -\end{itemize} - -%\variants -%\begin{enumerate} -%\item \verb+Derive Inversion_clear+ \ident$_1$ \ident$_2$ \\ -%\index{Derive Inversion_clear@{\tt Derive Inversion\_clear}} -% Let \ident$_1$ have type $(I~\vec{t})$ in the local context ($I$ -% an inductive predicate). Then, this command has the same semantics -% as \verb+Derive Inversion_clear+ \ident$_2$~ \verb+with+ -% $(\vec{x}:\vec{T})(I~\vec{t})$ \verb+Sort Prop+ where $\vec{x}$ are the free -% variables of $(I~\vec{t})$ declared in the local context (variables -% of the global context are considered as constants). -%\item \verb+Derive Inversion+ \ident$_1$~ \ident$_2$~\\ -%\index{Derive Inversion@{\tt Derive Inversion}} -% Analogous to the previous command. -%\item \verb+Derive Inversion+ $num$ \ident~ \ident~ \\ -%\index{Derive Inversion@{\tt Derive Inversion}} -% This command behaves as \verb+Derive Inversion+ \ident~ {\it -% namehyp} performed on the goal number $num$. -% -%\item \verb+Derive Inversion_clear+ $num$ \ident~ \ident~ \\ -%\index{Derive Inversion_clear@{\tt Derive Inversion\_clear}} -% This command behaves as \verb+Derive Inversion_clear+ \ident~ -% \ident~ performed on the goal number $num$. -%\end{enumerate} - - - -A derived inversion lemma is adequate for inverting the instance -with which it was generated, \texttt{Derive} applied to -different instances yields different lemmas. In general, if we generate -the inversion lemma with -an instance $(\vec{x}:\vec{T})(I~\vec{t})$ and a sort $s$, the inversion lemma will -expect a predicate of type $(\vec{x}:\vec{T})s$ as first argument. \\ - -\begin{Variant} -\item \texttt{Derive Inversion} \ident~ \texttt{with} - $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort\\ -\index{Derive Inversion...with@{\tt Derive Inversion...with}} - Analogous of \texttt{Derive Inversion\_clear .. with ..} but - when applied it is equivalent to having - inverted the instance with the tactic \texttt{Inversion}. -\end{Variant} - -\subsection{The dependent case} -\begin{itemize} -\item \texttt{Derive Dependent Inversion\_clear} \ident~ \texttt{with} - $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort~ \\ -\index{Derive Dependent Inversion\_clear...with@{\tt Derive Dependent Inversion\_clear...with}} - Let $I$ be an inductive predicate. This command generates and stocks - the dependent inversion lemma for the sort \sort~ corresponding to the instance - $(\vec{x}:\vec{T})(I~\vec{t})$ with the name \ident~ in the {\bf - global} environment. When applied it is equivalent to having - inverted the instance with the tactic \texttt{Dependent Inversion\_clear}. -\end{itemize} - -\begin{coq_example} -Derive Dependent Inversion_clear leminv_dep with - (forall n m:nat, Le (S n) m) Sort Prop. -\end{coq_example} - -\begin{coq_example} -Check leminv_dep. -\end{coq_example} - -\begin{Variants} -\item \texttt{Derive Dependent Inversion} \ident~ \texttt{with} - $(\vec{x}:\vec{T})(I~\vec{t})$ \texttt{Sort} \sort~ \\ -\index{Derive Dependent Inversion...with@{\tt Derive Dependent Inversion...with}} - Analogous to \texttt{Derive Dependent Inversion\_clear}, but when - applied it is equivalent to having - inverted the instance with the tactic \texttt{Dependent Inversion}. - -\end{Variants} - -\section[Using already defined inversion lemmas]{Using already defined inversion lemmas\label{inversion_using}} -\begin{itemize} -\item \texttt{Inversion} \ident \texttt{ using} \ident$'$ \\ -\index{Inversion...using@{\tt Inversion...using}} - Let \ident~ have type $(I~\vec{t})$ ($I$ an inductive - predicate) in the local context, and \ident$'$ be a (dependent) inversion - lemma. Then, this tactic refines the current goal with the specified - lemma. - - -\begin{coq_eval} -Abort. -\end{coq_eval} - -\begin{coq_example} -Show. -\end{coq_example} -\begin{coq_example} -inversion H using leminv. -\end{coq_example} - - -\end{itemize} -\variant -\begin{enumerate} -\item \texttt{Inversion} \ident~ \texttt{using} \ident$'$ \texttt{in} \ident$_1$\ldots \ident$_n$\\ -\index{Inversion...using...in@{\tt Inversion...using...in}} -This tactic behaves as generalizing \ident$_1$\ldots \ident$_n$, -then doing \texttt{Use Inversion} \ident~\ident$'$. -\end{enumerate} - -\section[\tt Scheme ...]{\tt Scheme ...\index{Scheme@{\tt Scheme}}\label{Scheme} -\label{scheme}} -The {\tt Scheme} command is a high-level tool for generating -automatically (possibly mutual) induction principles for given types -and sorts. Its syntax follows the schema : - -\noindent -{\tt Scheme {\ident$_1$} := Induction for \term$_1$ Sort {\sort$_1$} \\ - with\\ - \mbox{}\hspace{0.1cm} .. \\ - with {\ident$_m$} := Induction for {\term$_m$} Sort - {\sort$_m$}}\\ -\term$_1$ \ldots \term$_m$ are different inductive types belonging to -the same package of mutual inductive definitions. This command -generates {\ident$_1$}\ldots{\ident$_m$} to be mutually recursive -definitions. Each term {\ident$_i$} proves a general principle -of mutual induction for objects in type {\term$_i$}. - -\Example -The definition of principle of mutual induction for {\tt tree} and -{\tt forest} over the sort {\tt Set} is defined by the command: -\begin{coq_eval} -Restore State "Initial". -Variables A B : Set. -Inductive tree : Set := - node : A -> forest -> tree -with forest : Set := - | leaf : B -> forest - | cons : tree -> forest -> forest. -\end{coq_eval} -\begin{coq_example*} -Scheme tree_forest_rec := Induction for tree - Sort Set - with forest_tree_rec := Induction for forest Sort Set. -\end{coq_example*} -You may now look at the type of {\tt tree\_forest\_rec} : -\begin{coq_example} -Check tree_forest_rec. -\end{coq_example} -This principle involves two different predicates for {\tt trees} and -{\tt forests}; it also has three premises each one corresponding to a -constructor of one of the inductive definitions. - -The principle {\tt tree\_forest\_rec} shares exactly the same -premises, only the conclusion now refers to the property of forests. -\begin{coq_example} -Check forest_tree_rec. -\end{coq_example} - -\begin{Variant} -\item {\tt Scheme {\ident$_1$} := Minimality for \term$_1$ Sort {\sort$_1$} \\ - with\\ - \mbox{}\hspace{0.1cm} .. \\ - with {\ident$_m$} := Minimality for {\term$_m$} Sort - {\sort$_m$}}\\ -Same as before but defines a non-dependent elimination principle more -natural in case of inductively defined relations. -\end{Variant} - -\Example -With the predicates {\tt odd} and {\tt even} inductively defined as: -% \begin{coq_eval} -% Restore State "Initial". -% \end{coq_eval} -\begin{coq_example*} -Inductive odd : nat -> Prop := - oddS : forall n:nat, even n -> odd (S n) -with even : nat -> Prop := - | evenO : even 0%N - | evenS : forall n:nat, odd n -> even (S n). -\end{coq_example*} -The following command generates a powerful elimination -principle: -\begin{coq_example*} -Scheme odd_even := Minimality for odd Sort Prop - with even_odd := Minimality for even Sort Prop. -\end{coq_example*} -The type of {\tt odd\_even} for instance will be: -\begin{coq_example} -Check odd_even. -\end{coq_example} -The type of {\tt even\_odd} shares the same premises but the -conclusion is {\tt (n:nat)(even n)->(Q n)}. - -\subsection[\tt Combined Scheme ...]{\tt Combined Scheme ...\index{CombinedScheme@{\tt Combined Scheme}}\label{CombinedScheme} -\label{combinedscheme}} -The {\tt Combined Scheme} command is a tool for combining -induction principles generated by the {\tt Scheme} command. -Its syntax follows the schema : - -\noindent -{\tt Combined Scheme {\ident$_0$} from {\ident$_1$}, .., {\ident$_n$}}\\ -\ident$_1$ \ldots \ident$_n$ are different inductive principles that must belong to -the same package of mutual inductive principle definitions. This command -generates {\ident$_0$} to be the conjunction of the principles: it is -build from the common premises of the principles and concluded by the -conjunction of their conclusions. For exemple, we can combine the -induction principles for trees and forests: - -\begin{coq_example*} -Combined Scheme tree_forest_mutind from tree_ind, forest_ind. -Check tree_forest_mutind. -\end{coq_example*} - -%\end{document} - diff --git a/doc/refman/RefMan-int.tex b/doc/refman/RefMan-int.tex index fbeccb664d..2b9e4e6051 100644 --- a/doc/refman/RefMan-int.tex +++ b/doc/refman/RefMan-int.tex @@ -58,7 +58,7 @@ Chapter~\ref{Addoc-coqide}. \section*{How to read this book} -This is a Reference Manual, not a User Manual, then it is not made for a +This is a Reference Manual, not a User Manual, so it is not made for a continuous reading. However, it has some structure that is explained below. @@ -100,6 +100,11 @@ corresponds to the Chapter~\ref{Addoc-syntax}. presented. Finally, Chapter~\ref{Addoc-coqide} describes the \Coq{} integrated development environment. + +\item The fifth part documents a number of advanced features, including + coercions, canonical structures, typeclasses, program extraction, and + specialized solvers and tactics. See the table of contents for a complete + list. \end{itemize} At the end of the document, after the global index, the user can find @@ -120,15 +125,6 @@ documents: user can read also the tutorial on recursive types (document {\tt RecTutorial.ps}). -\item[Addendum] The fifth part (the Addendum) of the Reference Manual - is distributed as a separate document. It contains more - detailed documentation and examples about some specific aspects of the - system that may interest only certain users. It shares the indexes, - the page numbers and - the bibliography with the Reference Manual. If you see in one of the - indexes a page number that is outside the Reference Manual, it refers - to the Addendum. - \item[Installation] A text file INSTALL that comes with the sources explains how to install \Coq{}. diff --git a/doc/refman/RefMan-ltac.tex b/doc/refman/RefMan-ltac.tex index bb679ecba7..3ce1d4ecd8 100644 --- a/doc/refman/RefMan-ltac.tex +++ b/doc/refman/RefMan-ltac.tex @@ -392,7 +392,7 @@ all selected goals. \item{} [{\ident}] {\tt :} {\tacexpr} In this variant, {\tacexpr} is applied locally to a goal - previously named by the user. + previously named by the user (see~\ref{ExistentialVariables}). \item {\num} {\tt :} {\tacexpr} @@ -891,7 +891,7 @@ behavior can be retrieved with the {\tt Tactic Compat Context} flag. \end{Variants} -\subsubsection[Pattern matching on goals]{Pattern matching on goals\index{Ltac!match goal@\texttt{match goal}} +\subsubsection[Pattern matching on goals]{Pattern matching on goals\index{Ltac!match goal@\texttt{match goal}}\label{ltac-match-goal} \index{Ltac!match reverse goal@\texttt{match reverse goal}} \index{match goal@\texttt{match goal}!in Ltac} \index{match reverse goal@\texttt{match reverse goal}!in Ltac}} @@ -1131,8 +1131,9 @@ on. This can be obtained thanks to the option below. \optindex{Shrink Abstract} {\tt Set Shrink Abstract} \end{quote} +\emph{Deprecated since 8.7} -When set, all lemmas generated through \texttt{abstract {\tacexpr}} +When set (default), all lemmas generated through \texttt{abstract {\tacexpr}} and \texttt{transparent\_abstract {\tacexpr}} are quantified only over the variables that appear in the term constructed by \texttt{\tacexpr}. diff --git a/doc/refman/RefMan-oth.tex b/doc/refman/RefMan-oth.tex index 3daaac88b1..8f43ebcfbc 100644 --- a/doc/refman/RefMan-oth.tex +++ b/doc/refman/RefMan-oth.tex @@ -656,7 +656,7 @@ dynamically. searched into the current {\ocaml} loadpath (see the command {\tt Add ML Path} in the Section~\ref{loadpath}). Loading of {\ocaml} files is only possible under the bytecode version of {\tt coqtop} -(i.e. {\tt coqtop} called with options {\tt -byte}, see chapter +(i.e. {\tt coqtop.byte}, see chapter \ref{Addoc-coqc}), or when {\Coq} has been compiled with a version of {\ocaml} that supports native {\tt Dynlink} ($\ge$ 3.11). @@ -739,7 +739,7 @@ the command {\tt Declare ML Module} in the Section~\ref{compiled}). \subsection[\tt Print ML Path {\str}.]{\tt Print ML Path {\str}.\comindex{Print ML Path}} This command displays the current {\ocaml} loadpath. This command makes sense only under the bytecode version of {\tt -coqtop}, i.e. using option {\tt -byte} (see the +coqtop}, i.e. {\tt coqtop.byte} (see the command {\tt Declare ML Module} in the section \ref{compiled}). @@ -974,7 +974,20 @@ line provided it does not exceed the printing width (See {\tt Set Printing Width} above). \subsection[\tt Test Printing Compact Contexts.]{\tt Test Printing Compact Contexts.\optindex{Printing Compact Contexts}} -This command displays the current state of compaction of goal d'isolat. +This command displays the current state of compaction of goal. + + +\subsection[\tt Unset Printing Unfocused.]{\tt Unset Printing Unfocused.\optindex{Printing Unfocused}} +This command resets the displaying of goals to focused goals only +(default). Unfocused goals are created by focusing other goals with +bullets(see~\ref{bullets}) or curly braces (see~\ref{curlybacket}). + +\subsection[\tt Set Printing Unfocused.]{\tt Set Printing Unfocused.\optindex{Printing Unfocused}} +This command enables the displaying of unfocused goals. The goals are +displayed after the focused ones and are distinguished by a separator. + +\subsection[\tt Test Printing Unfocused.]{\tt Test Printing Unfocused.\optindex{Printing Unfocused}} +This command displays the current state of unfocused goals display. \subsection[\tt Set Printing Dependent Evars Line.]{\tt Set Printing Dependent Evars Line.\optindex{Printing Dependent Evars Line}} This command enables the printing of the ``{\tt (dependent evars: \ldots)}'' diff --git a/doc/refman/RefMan-pro.tex b/doc/refman/RefMan-pro.tex index b66659dc8c..eb59ca584e 100644 --- a/doc/refman/RefMan-pro.tex +++ b/doc/refman/RefMan-pro.tex @@ -308,7 +308,7 @@ last {\tt Focus} command. Succeeds in the proof is fully unfocused, fails is there are some goals out of focus. -\subsection[\tt \{ \textrm{and} \}]{\tt \{ \textrm{and} \}\comindex{\{}\comindex{\}}} +\subsection[\tt \{ \textrm{and} \}]{\tt \{ \textrm{and} \}\comindex{\{}\comindex{\}}}\label{curlybacket} The command {\tt \{} (without a terminating period) focuses on the first goal, much like {\tt Focus.} does, however, the subproof can only be unfocused when it has been fully solved (\emph{i.e.} when @@ -327,7 +327,7 @@ unfocus it or focus the next one. \end{ErrMsgs} \subsection[Bullets]{Bullets\comindex{+ (command)} - \comindex{- (command)}\comindex{* (command)}\index{Bullets}} + \comindex{- (command)}\comindex{* (command)}\index{Bullets}}\label{bullets} Alternatively to {\tt \{} and {\tt \}}, proofs can be structured with bullets. The use of a bullet $b$ for the first time focuses on the first goal $g$, the same bullet cannot be used again until the proof @@ -421,6 +421,24 @@ This command displays the current goals. \item \errindex{No focused proof} \end{ErrMsgs} +\item {\tt Show {\ident}.}\\ + Displays the named goal {\ident}. + This is useful in particular to display a shelved goal but only works + if the corresponding existential variable has been named by the user + (see~\ref{ExistentialVariables}) as in the following example. + +\begin{coq_eval} +Reset Initial. +\end{coq_eval} + +\begin{coq_example*} +Goal exists n, n = 0. + eexists ?[n]. +\end{coq_example*} +\begin{coq_example} + Show n. +\end{coq_example} + \item {\tt Show Script.}\comindex{Show Script}\\ Displays the whole list of tactics applied from the beginning of the current proof. diff --git a/doc/refman/RefMan-sch.tex b/doc/refman/RefMan-sch.tex index d3719bed46..23a1c9b029 100644 --- a/doc/refman/RefMan-sch.tex +++ b/doc/refman/RefMan-sch.tex @@ -227,6 +227,7 @@ We define the function \texttt{div2} as follows: \begin{coq_eval} Reset Initial. +Require Import FunInd. \end{coq_eval} \begin{coq_example*} diff --git a/doc/refman/RefMan-ssr.tex b/doc/refman/RefMan-ssr.tex new file mode 100644 index 0000000000..61f7421c44 --- /dev/null +++ b/doc/refman/RefMan-ssr.tex @@ -0,0 +1,4932 @@ +\achapter{The SSReflect proof language} +\aauthor{Georges Gonthier, Assia Mahboubi, Enrico Tassi} + +\newcommand{\ssr}{{\sc SSReflect}} + +% listing +\ifhevea\newcommand{\ssrC}[1]{\texttt{#1}}\else\newcommand{\ssrC}[1]{\text{\lstinline!#1!}}\fi +\ifhevea\renewenvironment{center}{\@open{div}{class="center"}\@open{div}{class="centered"}}{\@close{div}\@close{div}}\fi +% non-terminal +%\newcommand\ssrN[2][]{\ensuremath{\langle\mbox{\itshape\rmfamily\small #2}\rangle_{#1}}} +\newcommand\ssrN[2][]{{\textsl {#2}}\ensuremath{_{#1}}} +\ifhevea\newcommand{\underbar}[1]{\underline{#1}}\fi + +% TODO: only use \ssrC +\let\ssrL=\lstinline + +\newcommand{\iitem}{{\it i-item}} +\newcommand{\ditem}{{\it d-item}} +\newcommand{\optional}[1]{{\it[}#1{\it]}} +\newcommand{\optsep}{{\it|}} +\newcommand{\idx}[1]{\tacindex{#1 (ssreflect)}} +\newcommand{\idxC}[1]{\comindex{#1 (ssreflect)}} + +\newenvironment{new}% + {\begin{Sbox}\begin{minipage}{0.97\textwidth}% + \begin{flushright}\textcolor{red}{\fbox{Version 1.3}}% + \end{flushright}\noindent}% + {\end{minipage}\end{Sbox}\noindent\doublebox{\TheSbox}} +\section{Introduction}\label{sec:intro} + +This chapter describes a set of tactics known as \ssr{} +originally designed to provide support for the so-called \emph{small scale +reflection} proof methodology. Despite the original purpose this set of tactic +is of general interest and is available in Coq starting from version 8.7. + +\ssr{} was developed independently of the tactics described in +Chapter~\ref{Tactics}. Indeed the scope of the tactics part of +\ssr{} largely overlaps with the standard set of tactics. Eventually +the overlap will be reduced in future releases of Coq. + +Proofs written in \ssr{} typically look quite different from the +ones written using only tactics as per Chapter~\ref{Tactics}. +We try to summarise here the most ``visible'' ones in order to +help the reader already accustomed to the tactics described in +Chapter~\ref{Tactics}to read this chapter. + +The first difference between the tactics described in this +chapter and the tactics described in Chapter~\ref{Tactics} is the way +hypotheses are managed (we call this \emph{bookkeeping}). +In Chapter~\ref{Tactics} the most common +approach is to avoid moving explicitly hypotheses back and forth +between the context and the conclusion of the goal. On the contrary +in \ssr{} +all bookkeeping is performed on the conclusion of the goal, using for +that purpose a couple of syntactic constructions behaving similar to +tacticals (and often named as such in this chapter). +The \ssrC{:} tactical moves hypotheses from the context to the +conclusion, while \ssrC{=>} moves hypotheses from the +conclusion to the context, and \ssrC{in} moves back +and forth an hypothesis from the context to the conclusion for the +time of applying an action to it. + +While naming hypotheses is commonly done by means of an \ssrC{as} +clause in the basic model of Chapter~\ref{Tactics}, it is here to +\ssrC{=>} that this task is devoted. As tactics leave +new assumptions in the conclusion, and are often followed by +\ssrC{=>} to explicitly name them. +While generalizing the goal is normally +not explicitly needed in Chapter~\ref{Tactics}, it is an explicit +operation performed by \ssrC{:}. + +Beside the difference of bookkeeping model, this chapter includes +specific tactics which have no explicit counterpart in +Chapter~\ref{Tactics} such as tactics to mix forward steps and +generalizations as \ssrC{generally have} or \ssrC{without loss}. + +\ssr{} adopts the point of view that rewriting, definition +expansion and partial evaluation participate all to a same concept of +rewriting a goal in a larger sense. As such, all these functionalities are +provided by the \ssrC{rewrite} tactic. + +\ssrC{} includes a little language of patterns to select subterms in tactics +or tacticals where it matters. Its most notable application +is in the \ssrC{rewrite} tactic, where patterns are used to specify +where the rewriting step has to take place. + +Finally, \ssr{} supports the so-called reflection steps, typically +allowing to switch back and forth between the computational view and +logical view of a concept. + +To conclude it is worth mentioning that \ssr{} tactics +can be mixed with non \ssr{} tactics in the same proof, +or in the same LTac expression. The few exceptions +to this statement are described in section~\label{sec:compat}. + +\iffalse +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection*{How to read this documentation} + +The syntax of the tactics is presented as follows: +\begin{itemize} +\item \ssrC{terminals} are in typewriter font and \ssrN{non terminals} are + between angle brackets. +\item Optional parts of the grammar are surrounded by \optional{ } + brackets. These should not be confused with verbatim brackets + \ssrC{[ ]}, which are delimiters in the \ssr{} syntax. +\item A vertical rule {\optsep} indicates an alternative in the syntax, and + should not be confused with a + verbatim vertical rule between verbatim brackets \ssrC{[ | ]}. +\item A non empty list of non terminals (at least one item should be + present) is represented by \ssrN{non terminals}$^+$. A possibly empty + one is represented by \ssrN{non terminals}$^*$. +\item In a non empty list of non terminals, items are separated by blanks. +\end{itemize} +\fi + +% Hevea has no colors +\ifhevea \else +\noindent We follow the default color scheme of the \ssr{} mode for +ProofGeneral provided in the distribution: + +\centerline{ +\textcolor{dkblue}{\texttt{tactic}} or \textcolor{dkviolet}{\tt + Command} or \textcolor{dkgreen}{\tt keyword} or +\textcolor{dkpink}{\tt tactical}} + +\noindent Closing tactics/tacticals like \ssrC{exact} or \ssrC{by} (see section +\ref{ssec:termin}) are in red. +\fi + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection*{Acknowledgments} +The authors would like to thank Fr\'ed\'eric Blanqui, Fran\,cois Pottier +and Laurence Rideau for their comments and suggestions. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\newpage\section{Usage} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Getting started}\label{sec:files} +To be available, the tactics presented in this manual need the +following minimal set of libraries to loaded: {\tt ssreflect.v}, {\tt +ssrfun.v} and {\tt ssrbool.v}. Moreover, these tactics come with a +methodology specific to the authors of Ssreflect and which requires a +few options to be set in a different way than in their default +way. All in all, this corresponds to working in the following context: + +\begin{lstlisting} + From Coq Require Import ssreflect ssrfun ssrbool. + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. +\end{lstlisting} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Compatibility issues}\label{sec:compat} +Requiring the above modules creates an environment which +is mostly compatible with the rest of \Coq{}, up to a few discrepancies: +\begin{itemize} +\item New keywords (\ssrC{is}) might clash with variable, constant, +tactic or tactical names, or with quasi-keywords in tactic or +vernacular notations. +\item New tactic(al)s names (\ssrC{last}, \ssrC{done}, \ssrC{have}, + \ssrC{suffices}, \ssrC{suff}, + \ssrC{without loss}, \ssrC{wlog}, \ssrC{congr}, \ssrC{unlock}) might clash + with user tactic names. +\item Identifiers with both leading and trailing \ssrC{_}, such as \ssrC{_x_}, +are reserved by \ssr{} and cannot appear in scripts. +\item The extensions to the \ssrC{rewrite} tactic are partly +incompatible with those available in current versions of \Coq{}; +in particular: +\ssrC{rewrite .. in (type of k)} or \\ \ssrC{rewrite .. in *} or any other +variant of \ssrC{rewrite} will not work, and the \ssr{} syntax and semantics for occurrence selection and +rule chaining is different. + +Use an explicit rewrite direction (\ssrC{rewrite <-} $\dots$ or \ssrC{rewrite ->} $\dots$) +to access the \Coq{} \ssrC{rewrite} tactic. +\item New symbols (\ssrC{//, /=, //=}) might clash with adjacent existing + symbols (e.g., '\ssrC{//}') instead of '\ssrC{/}''\ssrC{/}'). This can be avoided + by inserting white spaces. +\item New constant and theorem names might clash with the user +theory. This can be avoided by not importing all of \ssr{}: +\begin{lstlisting} + From Coq Require ssreflect. + Import ssreflect.SsrSyntax. +\end{lstlisting} +Note that the full syntax of \ssr{}'s {\tt rewrite} and reserved identifiers are +enabled only if the \ssrC{ssreflect} module has been required and if +\ssrC{SsrSyntax} has been imported. Thus a file that requires (without importing) + \ssrC{ssreflect} and imports \ssrC{SsrSyntax}, can be +required and imported without automatically enabling \ssr{}'s +extended rewrite syntax and reserved identifiers. +\item Some user notations (in particular, defining an infix ';') might +interfere with the "open term", parenthesis free, syntax of tactics +such as \ssrC{have}, \ssrC{set} and \ssrC{pose}. +\item The generalization of \ssrC{if} statements to non-Boolean +conditions is turned off by \ssr{}, because it is mostly subsumed by +\ssrC{Coercion} to \ssrC{bool} of the \ssrC{sum}XXX types (declared in +\ssrC{ssrfun.v}) +and the \ssrC{if} {\term} \ssrC{is} \ssrN{pattern} \ssrC{then} {\term} \ssrC{else} {\term} construct (see +\ref{ssec:patcond}). To use the generalized form, turn off the \ssr{} +Boolean \ssrC{if} notation using the command: +\begin{lstlisting} + Close Scope boolean_if_scope. +\end{lstlisting} +\item The following two options can be unset to disable the + incompatible \ssrC{rewrite} syntax and allow + reserved identifiers to appear in scripts. +\begin{lstlisting} + Unset SsrRewrite. + Unset SsrIdents. +\end{lstlisting} +\end{itemize} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Gallina extensions} + +Small-scale reflection makes an extensive use of the programming +subset of Gallina, \Coq{}'s logical specification language. This subset +is quite suited to the description of functions on representations, +because it closely follows the well-established design of the ML +programming language. The \ssr{} extension provides three additions +to Gallina, for pattern assignment, pattern testing, and polymorphism; +these mitigate minor but annoying discrepancies between Gallina and ML. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Pattern assignment}\label{ssec:patass} +The \ssr{} extension provides the following construct for +irrefutable pattern matching, that is, destructuring assignment: + +\ssrC{let: } \ssrN{pattern} \ssrC{:=} \ssrN[1]{term} \ssrC{in} \ssrN[2]{term} + +Note the colon `\ssrC{:}' after the \ssrC{let} keyword, which avoids any +ambiguity with a function +definition or \Coq{}'s basic destructuring \ssrC{let}. The \ssrC{let:} +construct differs from the latter in that +\begin{itemize} +\item The pattern can be nested (deep pattern matching), in + particular, this allows expression of the form: +\begin{lstlisting} + let: exist (x, y) p_xy := Hp in ... +\end{lstlisting} +\item The destructured constructor is explicitly given in the + pattern, and is used for type inference, e.g., +\begin{lstlisting} + Let f u := let: (m, n) := u in m + n. +\end{lstlisting} +using a colon \ssrC{let:}, infers \ssrC{f : nat * nat -> nat}, whereas +\begin{lstlisting} + Let f u := let (m, n) := u in m + n. +\end{lstlisting} +with a usual \ssrC{let}, requires an extra type annotation. +\end{itemize} +The \ssrC{let:} construct is just (more legible) notation for the primitive Gallina expression + +\begin{center} +\ssrC{match} \ssrN[1]{term} \ssrC{with} \ssrN{pattern} \ssrC{=>} \ssrN[2]{term} \ssrC{end} +\end{center} + +The \ssr{} destructuring assignment supports all the dependent match +annotations; the full syntax is + +\begin{center} +\ssrC{let:} \ssrN[1]{pattern} \ssrC{as} \ssrN{ident} \ssrC{in} \ssrN[2]{pattern} \ssrC{:=} \ssrN[1]{term} \ssrC{return} \ssrN[2]{term} \ssrC{in} \ssrN[3]{term} +\end{center} + +where \ssrN[2]{pattern} is a \emph{type} pattern and \ssrN[1]{term} and +\ssrN[2]{term} are types. + +When the \ssrC{as} and \ssrC{return} are both present, then \ssrN{ident} is bound +in both the type \ssrN[2]{term} and the expression \ssrN[3]{term}; +variables in the optional type pattern \ssrN[2]{pattern} are +bound only in the type \ssrN[2]{term}, and other variables in \ssrN[1]{pattern} are +bound only in the expression \ssrN[3]{term}, however. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Pattern conditional}\label{ssec:patcond} +The following construct can be used for a refutable pattern matching, +that is, pattern testing: + +\begin{center} +\ssrC{if}\ \ssrN[1]{term} \ssrC{is} \ssrN[1]{pattern} \ssrC{then} \ssrN[2]{term} \ssrC{else} \ssrN[3]{term} +\end{center} + +Although this construct is not strictly ML (it does exits in variants +such as the pattern calculus or the $\rho$-calculus), it turns out to be +very convenient for writing functions on representations, +because most such functions manipulate simple datatypes such as Peano +integers, options, +lists, or binary trees, and the pattern conditional above is almost +always the right construct +for analyzing such simple types. For example, the \ssrC{null} and +\ssrC{all} list function(al)s can be defined as follows: +\begin{lstlisting} + Variable d: Set. + Fixpoint |*null*| (s : list d) := if s is nil then true else false. + Variable a : d -> bool. + Fixpoint |*all*| (s : list d) : bool := + if s is cons x s' then a x && all s' else true. +\end{lstlisting} + +The pattern conditional also provides a notation for destructuring +assignment with a refutable pattern, adapted to the pure functional +setting of Gallina, which lacks a \\\texttt{Match\_Failure} exception. + +Like \ssrC{let:} above, the \ssrC{if}$\dots$\ssrC{is} construct is just (more legible) +notation for the primitive Gallina expression: + +\begin{center} +\ssrC{match} \ssrN[1]{term} \ssrC{with} \ssrN{pattern} \ssrC{=>} \ssrN[2]{term} \ssrC{| _ =>} \ssrN[2]{term} \ssrC{end} +\end{center} + +Similarly, it will always be displayed as the expansion of this form +in terms of primitive \ssrC{match} expressions (where the default +expression $\ssrN[3]{term}$ may be replicated). + + +Explicit pattern testing also largely subsumes the generalization of +the \ssrC{if} construct to all binary datatypes; compare: + +\begin{center} +\ssrC{if} {\term} \ssrC{is inl _ then} \ssrN[l]{term} \ssrC{else} \ssrN[r]{term} +\end{center} + +and: + +\begin{center} +\ssrC{if} {\term} \ssrC{then} \ssrN[l]{term} \ssrC{else} \ssrN[r]{term} +\end{center} + +The latter appears to be marginally shorter, but it is quite +ambiguous, and indeed often +requires an explicit annotation term : \ssrC{\{_\}+\{_\}} to type-check, +which evens the character count. + +Therefore, \ssr{} restricts by default the condition of a plain \ssrC{if} +construct to the standard \ssrC{bool} type; this avoids spurious type +annotations, e.g., in: +\begin{lstlisting} + Definition |*orb*| b1 b2 := if b1 then true else b2. +\end{lstlisting} +As pointed out in section~\ref{sec:compat}, this restriction can be removed with +the command: +\begin{lstlisting} + Close Scope boolean_if_scope. +\end{lstlisting} +Like \ssrC{let:} above, the \ssrC{if} {\term} \ssrC{is} \ssrN{pattern} +\ssrC{else} {\term} construct +supports the dependent \ssrC{match} annotations: + +\begin{center} +\ssrC{if} \ssrN[1]{term} \ssrC{is} \ssrN[1]{pattern} \ssrC{as} \ssrN{ident} \ssrC{in} \ssrN[2]{pattern} \ssrC{return} \ssrN[2]{term} \ssrC{then} \ssrN[3]{term} \ssrC{else} \ssrN[4]{term} +\end{center} + +As in \ssrC{let:} the variable \ssrN{ident} (and those in +the type pattern \ssrN[2]{pattern}) are bound in \ssrN[2]{term}; \ssrN{ident} is +also bound in \ssrN[3]{term} (but not in \ssrN[4]{term}), while the +variables in \ssrN[1]{pattern} are bound only in \ssrN[3]{term}. + +\noindent +Another variant allows to treat the else case first: + +\begin{center} +\ssrC{if} \ssrN[1]{term} \ssrC{isn't} \ssrN[1]{pattern} \ssrC{then} \ssrN[2]{term} \ssrC{else} \ssrN[3]{term} +\end{center} + +Note that \ssrN[1]{pattern} eventually binds variables in \ssrN[3]{term} +and not \ssrN[2]{term}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Parametric polymorphism}\label{ssec:parampoly} + +Unlike ML, polymorphism in core Gallina is explicit: the type +parameters of polymorphic functions must be declared explicitly, and +supplied at each point of use. However, \Coq{} provides two features +to suppress redundant parameters: +\begin{itemize} +\item Sections are used to provide (possibly implicit) parameters for + a set of definitions. +\item Implicit arguments declarations are used to tell \Coq{} to use + type inference to deduce some parameters from the context at each + point of call. +\end{itemize} +The combination of these features provides a fairly good emulation of ML-style +polymorphism, but unfortunately this emulation breaks down for +higher-order programming. Implicit arguments are indeed not inferred +at all points of use, but only at +points of call, leading to expressions such as +\begin{lstlisting} + Definition |*all_null*| (s : list T) := all (@null T) s. +\end{lstlisting} +Unfortunately, such higher-order expressions are quite frequent in +representation functions, especially those which use \Coq{}'s +\ssrC{Structure}s to emulate Haskell type classes. + +Therefore, \ssr{} provides a variant of \Coq{}'s implicit argument +declaration, which causes \Coq{} to fill in some implicit parameters +at each point of use, e.g., the above definition can be written: +\begin{lstlisting} + Definition |*all_null*| (s : list d) := all null s. +\end{lstlisting} +Better yet, it can be omitted entirely, since \ssrC{all_null s} isn't +much of an improvement over \ssrC{all null s}. + +The syntax of the new declaration is + +\begin{center} +\ssrC{Prenex Implicits} \ssrN{ident}$^+$. +\end{center} + +Let us denote $_1 \dots c_n$ the list of identifiers given to a +\ssrC{Prenex Implicits} command. +The command checks that each $c_i$ is the name of a functional +constant, whose implicit arguments are prenex, i.e., the first $n_i > +0$ arguments of $c_i$ are implicit; then it assigns +\ssrC{Maximal Implicit} status to these arguments. + +As these prenex implicit arguments are ubiquitous and have often large +display strings, it is strongly recommended to change the default +display settings of \Coq{} so that they are not printed (except after a +\ssrC{Set Printing All} command). +All \ssr{} library files thus start with the incantation +\begin{lstlisting} + Set Implicit Arguments. + Unset Strict Implicit. + Unset Printing Implicit Defensive. +\end{lstlisting} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Anonymous arguments} + +When in a definition, the type of a certain argument is mandatory, but +not its name, one usually use ``arrow'' abstractions for prenex +arguments, or the \ssrC{(_ : }{\term}\ssrC{)} syntax for inner arguments. +In \ssr{}, the latter can be replaced by the open syntax `\ssrC{of\ }{\term}' +or (equivalently) `\ssrC{& }{\term}', which are both syntactically +equivalent to a \ssrC{(_ : }{\term}\ssrC{)} expression. + +For instance, the usual two-contrsuctor polymorphic type \ssrC{list}, +i.e. the one of the +standard {\tt List} library, can be defined by the following +declaration: +\begin{lstlisting} + Inductive list (A : Type) : Type := nil | cons of A & list A. +\end{lstlisting} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Wildcards}\label{ssec:wild} + +The terms passed as arguments +to \ssr{} tactics can contain \emph{holes}, materialized by wildcards +\ssrC{_}. +Since \ssr{} allows a more powerful form of type inference for these +arguments, it enhances the possibilities of using such wildcards. +These holes are in particular used as a convenient shorthand for +abstractions, especially in local definitions or type expressions. + +Wildcards may be interpreted as abstractions (see for example sections +\ref{ssec:pose} and \ref{ssec:struct}), or their content can be +inferred from the whole +context of the goal (see for example section \ref{ssec:set}). +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Definitions} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Definitions}\label{ssec:pose} +\idx{pose \dots{} := \dots{}} +\idx{pose fix \dots{} := \dots{}} +\idx{pose cofix \dots{} := \dots{}} + +The \ssrC{pose} tactic allows to add a defined constant to a +proof context. \ssr{} generalizes this tactic in several ways. +In particular, the \ssr{} \ssrC{pose} tactic supports \emph{open syntax}: +the body of +the definition does not need surrounding parentheses. For instance: +\begin{lstlisting} + pose t := x + y. +\end{lstlisting} +is a valid tactic expression. + +The \ssrC{pose} tactic is also improved for the +local definition of higher order terms. +Local definitions of functions can use the same syntax as +global ones. The tactic: +\begin{lstlisting} + pose f x y := x + y. +\end{lstlisting} +adds to the context the defined constant: +\begin{lstlisting} + f := fun x y : nat => x + y : nat -> nat -> nat +\end{lstlisting} + +The \ssr{} \ssrC{pose} tactic also supports (co)fixpoints, +by providing the local counterpart of the +\ssrC{Fixpoint f := $\dots$ } and \ssrC{CoFixpoint f := $\dots$ } constructs. +For instance, the following tactic: +\begin{lstlisting} + pose fix f (x y : nat) {struct x} : nat := + if x is S p then S (f p y) else 0. +\end{lstlisting} +defines a local fixpoint \ssrC{f}, which mimics the standard \ssrC{plus} +operation on natural numbers. + +Similarly, local cofixpoints can be defined by a tactic of the form: +\begin{lstlisting} + pose cofix f (arg : T) ... +\end{lstlisting} + +The possibility to include wildcards in the body of the definitions + offers a smooth +way of defining local abstractions. The type of ``holes'' is +guessed by type inference, and the holes are abstracted. +For instance the tactic: +\begin{lstlisting} + pose f := _ + 1. +\end{lstlisting} +is shorthand for: +\begin{lstlisting} + pose f n := n + 1. +\end{lstlisting} + +When the local definition of a function involves both arguments and +holes, hole abstractions appear first. For instance, the +tactic: +\begin{lstlisting} + pose f x := x + _. +\end{lstlisting} +is shorthand for: +\begin{lstlisting} + pose f n x := x + n. +\end{lstlisting} + + +The interaction of the \ssrC{pose} tactic with the interpretation of +implicit arguments results in a powerful and concise syntax for local +definitions involving dependent types. +For instance, the tactic: +\begin{lstlisting} + pose f x y := (x, y). +\end{lstlisting} +adds to the context the local definition: +\begin{lstlisting} + pose f (Tx Ty : Type) (x : Tx) (y : Ty) := (x, y). +\end{lstlisting} +The generalization of wildcards makes the use of the \ssrC{pose} tactic +resemble ML-like definitions of polymorphic functions. + +% The use of \ssrC{Prenex Implicits} declarations (see section +% \ref{ssec:parampoly}), makes this feature specially convenient. +% Note that this combines with the interpretation of wildcards, and that +% it is possible to define: +% \begin{lstlisting} +% pose g x y : prod _ nat := (x, y). +% \end{lstlisting} +% which is equivalent to: +% \begin{lstlisting} +% pose g x (y : nat) := (x, y). +% \end{lstlisting} + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Abbreviations}\label{ssec:set} +\idx{set \dots{} := \dots{}} + + +The \ssr{} \ssrC{set} tactic performs abbreviations: it introduces a +defined constant for a subterm appearing in the goal and/or in the +context. + +\ssr{} extends the \ssrC{set} tactic by supplying: +\begin{itemize} +\item an open syntax, similarly to the \ssrC{pose} tactic; +\item a more aggressive matching algorithm; +\item an improved interpretation of wildcards, taking advantage of the + matching algorithm; +\item an improved occurrence selection mechanism allowing to abstract only + selected occurrences of a term. +\end{itemize} + +The general syntax of this tactic is +\begin{center} +\ssrC{set} \ssrN{ident} \optional{\ssrC{:} \ssrN[1]{term}} \ssrC{:=} \optional{\ssrN{occ-switch}} \ssrN[2]{term} +\end{center} +\begin{center} +\ssrN{occ-switch} ::= \ssrC{\{}[\ssrC{+}|\ssrC{-}] {\naturalnumber}$^*$ \ssrC{\}} +\end{center} + + +where: + +\begin{itemize} +\item \ssrN{ident} is a fresh identifier chosen by the user. +\item \ssrN[1]{term} is +an optional type annotation. The type annotation \ssrN[1]{term} can be +given in open syntax (no surrounding parentheses). If no \ssrN{occ-switch} +(described hereafter) is present, it is also +the case for \ssrN[2]{term}. +On the other hand, in presence of \ssrN{occ-switch}, parentheses +surrounding \ssrN[2]{term} are mandatory. +\item In the occurrence switch \ssrN{occ-switch}, if the first element + of the list is a {\naturalnumber}, this element should be a number, and not + an Ltac variable. The empty list \ssrC{\{\}} is not interpreted as a + valid occurrence switch. +\end{itemize} +% For example, the script: +% \begin{lstlisting} +% Goal forall (f : nat -> nat)(x y : nat), f x + f x = f x. +% move=> f x y. +% \end{lstlisting} + +The tactic: +\begin{lstlisting} + set t := f _. +\end{lstlisting} +transforms the goal \ssrC{f x + f x = f x} into \ssrC{t + t = t}, adding +\ssrC{t := f x} to the context, and the tactic: +\begin{lstlisting} + set t := {2}(f _). +\end{lstlisting} +transforms it into \ssrC{f x + t = f x}, adding \ssrC{t := f x} to the context. + +The type annotation \ssrN[1]{term} may +contain wildcards, which will be filled with the appropriate value by +the matching process. + +The tactic first tries to find a subterm of the goal matching +\ssrN[2]{term} (and its type \ssrN[1]{term}), +and stops at the first subterm it finds. Then the occurrences +of this subterm selected by the optional \ssrN{occ-switch} are replaced +by \ssrN{ident} and a definition \ssrN{ident} \ssrC{:=} {\term} is added to +the context. If no \ssrN{occ-switch} is present, then all the +occurrences are abstracted. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Matching} + +The matching algorithm compares a pattern \textit{term} + with a subterm of the goal by comparing their heads +and then pairwise unifying their arguments (modulo conversion). Head +symbols match under the following conditions: + +\begin{itemize} +\item If the head of \textit{term} is a constant, then it + should be syntactically equal to the head symbol of the subterm. +\item If this head is a projection of a canonical structure, + then canonical structure equations are used for the matching. +\item If the head of \textit{term} is \emph{not} a constant, the + subterm should have the same structure ($\lambda$ abstraction, + \ssrC{let}$\dots$\ssrC{in} structure \dots). +\item If the head of \textit{term} is a hole, the subterm should have + at least as many arguments as \textit{term}. For instance the tactic: +\begin{lstlisting} + set t := _ x. +\end{lstlisting} +transforms the goal \ssrL-x + y = z- into \ssrC{t y = z} and adds +\ssrC{t := plus x : nat -> nat} to the context. + +\item In the special case where \textit{term} is of the form + \ssrC{(let f := }$t_0$ \ssrC{in f) }$t_1\dots t_n$, + then the pattern \textit{term} is treated +as \ssrC{(_ }$t_1\dots t_n$\ssrC{)}. For each subterm in +the goal having the form $(A\ u_1\dots u_{n'})$ with $n' \geq n$, the +matching algorithm successively tries to find the largest +partial application $(A\ u_1\dots u_{i'})$ convertible to the head +$t_0$ of \textit{term}. For instance the following tactic: +\begin{lstlisting} + set t := (let g y z := y.+1 + z in g) 2. +\end{lstlisting} +transforms the goal +\begin{lstlisting} + (let f x y z := x + y + z in f 1) 2 3 = 6. +\end{lstlisting} +into \ssrC{t 3 = 6} and adds the local definition of \ssrC{t} to the +context. +\end{itemize} + +Moreover: +\begin{itemize} +\item Multiple holes in \textit{term} are treated as independent + placeholders. For instance, the tactic: +\begin{lstlisting} + set t := _ + _. +\end{lstlisting} +transforms the goal \ssrC{x + y = z} into \ssrC{t = z} and pushes +\ssrC{t := x + y : nat} in the context. +\item The type of the subterm matched should fit the type + (possibly casted by some type annotations) of the pattern + \textit{term}. +\item The replacement of the subterm found by the instantiated pattern + should not capture variables, hence the following script: +\begin{lstlisting} + Goal forall x : nat, x + 1 = 0. + set u := _ + 1. +\end{lstlisting} +raises an error message, since \ssrC{x} is bound in the goal. +\item Typeclass inference should fill in any residual hole, but +matching should never assign a value to a global existential variable. + +\end{itemize} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Occurrence selection}\label{sssec:occselect} + +\ssr{} provides a generic syntax for the selection of occurrences by +their position indexes. These \emph{occurrence switches} are shared by +all +\ssr{} tactics which require control on subterm selection like rewriting, +generalization, \dots + +An \emph{occurrence switch} can be: +\begin{itemize} +\item A list \ssrC{\{} {\naturalnumber}$^*$ \ssrC{\}} of occurrences affected by the + tactic. +For instance, the tactic: +\begin{lstlisting} + set x := {1 3}(f 2). +\end{lstlisting} +transforms the goal \ssrC{f 2 + f 8 = f 2 + f 2} into +\ssrC{x + f 8 = f 2 + x}, and adds the abbreviation +\ssrC{x := f 2} in the +context. Notice that some occurrences of a +given term may be hidden to the user, for example because of a +notation. The vernacular \ssrC{$\texttt{\textcolor{dkviolet}{Set }}$ + Printing All} command displays all +these hidden occurrences and should be used to find the correct +coding of the occurrences to be selected\footnote{Unfortunately, +even after a call to the Set Printing All command, some occurrences are +still not displayed to the user, essentially the ones possibly hidden +in the predicate of a dependent match structure.}. For instance, the +following script: +\begin{lstlisting} + Notation "a < b":= (le (S a) b). + Goal forall x y, x < y -> S x < S y. + intros x y; set t := S x. +\end{lstlisting} +generates the goal +\ssrC{t <= y -> t < S y} since \ssrC{x < y} is now a notation for +\ssrC{S x <= y}. +\item A list \ssrC{\{}{\naturalnumber}$^+$\ssrC{\}}. This is equivalent to + \ssrC{\{} {\naturalnumber}$^+$ \ssrC{\}} but the list should start with a number, and + not with an Ltac variable. +\item A list \ssrC{\{}{\naturalnumber}$^*$\ssrC{\}} of occurrences \emph{not} to be + affected by the tactic. For instance, the tactic: +\begin{lstlisting} + set x := {-2}(f 2). +\end{lstlisting} +behaves like +\begin{lstlisting} + set x := {1 3}(f 2). +\end{lstlisting} +on the goal \ssrL-f 2 + f 8 = f 2 + f 2- which has three occurrences of +the the term \ssrC{f 2} +\item In particular, the switch \ssrC{\{+\}} selects \emph{all} the + occurrences. This switch is useful to turn + off the default behavior of a tactic which automatically clears + some assumptions (see section \ref{ssec:discharge} for instance). +\item The switch \ssrC{\{-\}} imposes that \emph{no} occurrences of the + term should be affected by the tactic. The tactic: +\begin{lstlisting} + set x := {-}(f 2). +\end{lstlisting} +leaves the goal unchanged and adds the definition \ssrC{x := f 2} to the +context. This kind of tactic may be used to take advantage of the +power of the matching algorithm in a local definition, instead of +copying large terms by hand. +\end{itemize} + + +It is important to remember that matching \emph{precedes} occurrence +selection, hence the tactic: +\begin{lstlisting} + set a := {2}(_ + _). +\end{lstlisting} +transforms the goal \ssrC{x + y = x + y + z} into \ssrC{x + y = a + z} +and fails on the goal \\ +\ssrC{(x + y) + (z + z) = z + z} with the error message: +\begin{lstlisting} + User error: only 1 < 2 occurrence of (x + y + (z + z)) +\end{lstlisting} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Localization}\label{ssec:loc} + + +It is possible to define an abbreviation for a term appearing in the +context of a goal thanks to the \ssrC{in} tactical. + +A tactic of the form: + +\begin{center} + \ssrC{set x :=} {\term} \ssrC{in} \ssrN[1]{fact}\ssrC{...}\ssrN[n]{fact}. +\end{center} + +introduces a defined constant called \ssrC{x} in the context, and folds +it in the facts \textit{fact$_1 \dots$ fact$_n$} +The body of \ssrC{x} is the first subterm matching \textit{term} in +\textit{fact$_1 \dots$ fact$_n$}. + +A tactic of the form: + +\begin{center} + \ssrC{set x :=} {\term} \ssrC{in} \ssrN[1]{fact}\ssrC{...}\ssrN[n]{fact} \ssrC{*.} +\end{center} + +matches {\term} and then folds \ssrC{x} similarly in +\textit{fact$_1 \dots$ fact$_n$}, but also folds \ssrC{x} in the goal. + +A goal \ssrL-x + t = 4-, whose context contains \ssrC{Hx : x = 3}, is left +unchanged by the tactic: +\begin{lstlisting} + set z := 3 in Hx. +\end{lstlisting} +but the context is extended with the definition \ssrC{z := 3} and \ssrC{Hx} becomes +\ssrC{Hx : x = z}. +On the same goal and context, the tactic: +\begin{lstlisting} + set z := 3 in Hx *. +\end{lstlisting} +will moreover change the goal into \ssrL-x + t = S z-. Indeed, remember +that \ssrC{4} is just a notation for \ssrC{(S 3)}. + +The use of the \ssrC{in} tactical is not limited to the localization of +abbreviations: for a complete description of the \ssrC{in} tactical, see +section \ref{ssec:profstack}. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Basic tactics}\label{sec:book} + + + +A sizable fraction of proof scripts consists of steps that do not +"prove" anything new, but instead perform menial bookkeeping tasks +such as selecting the names of constants and assumptions or splitting +conjuncts. Although they are logically trivial, bookkeeping steps are +extremely important because they define the structure of the data-flow +of a proof script. This is especially true for reflection-based +proofs, which often involve large numbers of constants and +assumptions. Good bookkeeping consists in always explicitly declaring +(i.e., naming) all new constants and assumptions in the script, and +systematically pruning irrelevant constants and assumptions in the +context. This is essential in the context of an interactive +development environment (IDE), because it facilitates navigating the +proof, allowing to instantly "jump back" to the point at which a +questionable assumption was added, and to find relevant assumptions by +browsing the pruned context. While novice or casual \Coq{} users may +find the automatic name selection feature convenient, the usage of +such a feature severely undermines the readability and maintainability +of proof scripts, much like automatic variable declaration in programming +languages. The \ssr{} tactics are therefore designed to support +precise bookkeeping and to eliminate name generation heuristics. +The bookkeeping features of \ssr{} are implemented as tacticals (or +pseudo-tacticals), shared across most \ssr{} tactics, and thus form +the foundation of the \ssr{} proof language. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Bookkeeping}\label{ssec:profstack} +\idx{move: \dots{}} +\idx{move=> \dots{}} +\idx{move: \dots{} => \dots{}} +\idx{\dots{} in \dots{}} + +During the course of a proof \Coq{} always present the user with +a \emph{sequent} whose general form is +\begin{displaymath}\begin{array}{l} +%\arrayrulecolor{dkviolet} +c_i\ \ssrC{:}\ T_i \\ +\dots\\ +d_j\ \ssrC{:=}\ e_j\ \ssrC{:}\ T_j \\ +\dots\\ +F_k\ \ssrC{:}\ P_k \\ +\dots \\[3pt] +\hline\hline\\[-8pt] +\ssrC{forall}\ \ssrC{(}x_\ell\ \ssrC{:}\ T_\ell\ssrC{)}\ \dots,\\ +\ssrC{let}\ y_m\ \ssrC{:=}\ b_m\ \ssrC{in}\ \dots\ \ssrC{in}\\ +P_n\ \ssrC{->}\ \dots\ \ssrC{->}\ C +\end{array}\end{displaymath} +The \emph{goal} to be proved appears below the double line; above the line is +the \emph{context} of the sequent, a set of declarations of +\emph{constants}~$c_i$, \emph{defined constants}~$d_i$, and +\emph{facts}~$F_k$ that can be used to prove the goal (usually, $T_i, +T_j\;:\;\ssrC{Type}$ and $P_k\;:\;\ssrC{Prop}$). The various kinds of +declarations can come in any order. The top part of the context +consists of declarations produced by the \ssrC{Section} commands +\ssrC{Variable}, \ssrC{Let}, and \ssrC{Hypothesis}. This \emph{section context} +is never affected by the \ssr{} tactics: they only operate on +the lower part --- the \emph{proof context}. +As in the figure above, the goal often decomposes into a series of +(universally) quantified \emph{variables} +$\ssrC{(}x_\ell\;\ssrC{:}\;T_\ell\ssrC{)}$, local \emph{definitions} +$\ssrC{let}\;y_m\:\ssrC{:=}\;b_m\;\ssrC{in}$, and \emph{assumptions} +$P_n\;\ssrC{->}$, and a \emph{conclusion}~$C$ (as in the context, variables, +definitions, and assumptions can appear in any order). The conclusion +is what actually needs to be proved --- the rest of the goal can be +seen as a part of the proof context that happens to be ``below the line''. + +However, although they are logically equivalent, there are fundamental +differences between constants and facts on the one hand, and variables +and assumptions on the others. Constants and facts are +\emph{unordered}, but \emph{named} explicitly in the proof text; +variables and assumptions are \emph{ordered}, but \emph{unnamed}: the +display names of variables may change at any time because of +$\alpha$-conversion. + +Similarly, basic deductive steps such as \ssrC{apply} can only operate on +the goal because the Gallina terms that control their action (e.g., +the type of the lemma used by \ssrC{apply}) only provide unnamed bound +variables.\footnote{Thus scripts that depend on bound variable names, e.g., +via \ssrC{intros} or \ssrC{with}, are inherently fragile.} Since the proof +script can only refer directly to the context, it must constantly +shift declarations from the goal to the context and conversely in +between deductive steps. + +In \ssr{} these moves are performed by two \emph{tacticals} `\ssrC{=>}' +and `\ssrC{:}', so that the bookkeeping required by a deductive step can +be directly associated to that step, and that tactics in an \ssr{} +script correspond to actual logical steps in the proof rather than +merely shuffle facts. Still, some isolated bookkeeping is unavoidable, +such as naming variables and assumptions at the beginning of a proof. +\ssr{} provides a specific \ssrC{move} tactic for this purpose. + +Now \ssrC{move} does essentially nothing: it is mostly a placeholder for +`\ssrC{=>}' and `\ssrC{:}'. The `\ssrC{=>}' tactical moves variables, local +definitions, and assumptions to the context, while the `\ssrC{:}' +tactical moves facts and constants to the goal. For example, the proof +of\footnote{The name \ssrC{subnK} reads as +``right cancellation rule for \ssrC{nat} subtraction''.} +\begin{lstlisting} + Lemma |*subnK*| : forall m n, n <= m -> m - n + n = m. +\end{lstlisting}\noindent +might start with +\begin{lstlisting} + move=> m n le_n_m. +\end{lstlisting} +where \ssrC{move} does nothing, but \ssrL|=> m n le_m_n| changes the +variables and assumption of the goal in the constants \ssrC{m n : nat} +and the fact \ssrL|le_n_m : n <= m|, thus exposing the conclusion\\ + \ssrC{m - n + n = m}. + +The `\ssrC{:}' tactical is the converse of `\ssrC{=>}': it removes facts +and constants from the context by turning them into variables and assumptions. +Thus +\begin{lstlisting} + move: m le_n_m. +\end{lstlisting} +turns back \ssrC{m} and \ssrL|le_m_n| into a variable and an assumption, removing +them from the proof context, and changing the goal to +\begin{lstlisting} + forall m, n <= m -> m - n + n = m. +\end{lstlisting} +which can be proved by induction on \ssrC{n} using \ssrC{elim: n}. + +\noindent +Because they are tacticals, `\ssrC{:}' and `\ssrC{=>}' can be combined, as in +\begin{lstlisting} + move: m le_n_m => p le_n_p. +\end{lstlisting} +simultaneously renames \ssrL|m| and \ssrL|le_m_n| into \ssrL|p| and \ssrL|le_n_p|, +respectively, by first turning them into unnamed variables, then +turning these variables back into constants and facts. + +Furthermore, \ssr{} redefines the basic \Coq{} tactics \ssrC{case}, +\ssrC{elim}, and \ssrC{apply} so that they can take better advantage of +'\ssrC{:}' and `\ssrC{=>}'. In there \ssr{} variants, these tactic operate +on the first variable or constant of the goal and they do not use or +change the proof context. The `\ssrC{:}' tactical is used to operate on +an element in the context. For instance the proof of \ssrC{subnK} could +continue with +\begin{lstlisting} + elim: n. +\end{lstlisting} +instead of \ssrC{elim n}; this has the advantage of +removing \ssrC{n} from the context. Better yet, this \ssrC{elim} can be combined +with previous \ssrC{move} and with the branching version of the \ssrC{=>} tactical +(described in~\ref{ssec:intro}), +to encapsulate the inductive step in a single command: +\begin{lstlisting} + elim: n m le_n_m => [|n IHn] m => [_ | lt_n_m]. +\end{lstlisting} +which breaks down the proof into two subgoals, +\begin{lstlisting} + m - 0 + 0 = m +\end{lstlisting} +given \ssrC{m : nat}, and +\begin{lstlisting} + m - S n + S n = m +\end{lstlisting} +given \ssrC{m n : nat}, \ssrL|lt_n_m : S n <= m|, and +\begin{lstlisting} + IHn : forall m, n <= m -> m - n + n = m. +\end{lstlisting} +The '\ssrC{:}' and `\ssrC{=>}' tacticals can be explained very simply +if one views the goal as a stack of variables and assumptions piled +on a conclusion: +\begin{itemize} +\item {\tac} \ssrC{:} $a$ $b$ $c$ pushes the context constants $a$, $b$, $c$ +as goal variables \emph{before} performing {\tac}. +\item {\tac} \ssrC{=>} $a$ $b$ $c$ pops the top three goal variables as +context constants $a$, $b$, $c$, \emph{after} {\tac} +has been performed. +\end{itemize} +These pushes and pops do not need to balance out as in the examples above, +so +\begin{lstlisting} + move: m le_n_m => p. +\end{lstlisting} +would rename \ssrC{m} into \ssrC{p}, but leave an extra assumption \ssrC{n <= p} +in the goal. + +Basic tactics like \ssrC{apply} and \ssrC{elim} can also be used without the +'\ssrC{:}' tactical: for example we can directly start a proof of \ssrC{subnK} +by induction on the top variable \ssrC{m} with +\begin{lstlisting} + elim=> [|m IHm] n le_n. +\end{lstlisting} + +\noindent +The general form of the localization tactical \ssrC{in} is also best +explained in terms of the goal stack: + +\begin{center} + {\tac} \ssrC{in a H1 H2 *.} +\end{center} + +is basically equivalent to + +\begin{center} + \ssrC{move: a H1 H2;} {\tac} \ssrC{=> a H1 H2.} +\end{center} + +with two differences: the \ssrC{in} tactical will preserve the body of \ssrC{a} if +\ssrC{a} is a defined constant, and if the `\ssrC{*}' is omitted it +will use a temporary abbreviation to hide the statement of the goal +from \ssrC{/*tactic*/}. + +The general form of the \ssrC{in} tactical can be used directly with +the \ssrC{move}, \ssrC{case} and \ssrC{elim} tactics, so that one can write +\begin{lstlisting} + elim: n => [|n IHn] in m le_n_m *. +\end{lstlisting} +instead of +\begin{lstlisting} + elim: n m le_n_m => [|n IHn] m le_n_m. +\end{lstlisting} +This is quite useful for inductive proofs that involve many facts. + +\noindent See section \ref{ssec:gloc} for the general syntax and presentation +of the \ssrC{in} tactical. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{The defective tactics}\label{ssec:basictac} + +In this section we briefly present the three basic tactics performing +context manipulations and the main backward chaining tool. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{The \ssrC{move} tactic.}\label{sssec:move} +\idx{move} + +The \ssrC{move} tactic, in its +defective form, behaves like the primitive \ssrC{hnf} \Coq{} tactic. For +example, such a defective: +\begin{lstlisting} + move. +\end{lstlisting} +exposes the first assumption in the goal, i.e. its changes the goal +\ssrC{\~ False} into \ssrC{False -> False}. + +More precisely, the \ssrC{move} tactic inspects the goal and does nothing +(\ssrC{idtac}) if an introduction step is possible, i.e. if the +goal is a product or a \ssrC{let}$\dots$\ssrC{in}, and performs \ssrC{hnf} +otherwise. + +Of course this tactic is most often used in combination with the +bookkeeping tacticals (see section \ref{ssec:intro} and +\ref{ssec:discharge}). These combinations mostly subsume the \ssrC{intros}, +\ssrC{generalize}, \ssrC{revert}, \ssrC{rename}, \ssrC{clear} and +\textcolor{dkblue}{\texttt{pattern}} tactics. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{The \ssrC{case} tactic.} +\idx{case: \dots{}} + +The \ssrC{case} tactic performs +\emph{primitive case analysis} on (co)inductive types; specifically, +it destructs the top variable or assumption of the goal, +exposing its constructor(s) and its arguments, as well as setting the value +of its type family indices if it belongs to a type family +(see section \ref{ssec:typefam}). + +The \ssr{} \ssrC{case} tactic has a special behavior on +equalities. +If the top assumption of the goal is an equality, the \ssrC{case} tactic +``destructs'' it as a set of equalities between the constructor +arguments of its left and right hand sides, as per the +tactic \ssrC{injection}. +For example, \ssrC{case} changes the goal +\begin{lstlisting} + (x, y) = (1, 2) -> G. +\end{lstlisting} +into +\begin{lstlisting} + x = 1 -> y = 2 -> G. +\end{lstlisting} + +Note also that the case of \ssr{} performs \ssrC{False} +elimination, even if no branch is generated by this case operation. +Hence the command: +\begin{lstlisting} + case. +\end{lstlisting} +on a goal of the form \ssrC{False -> G} will succeed and prove the goal. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{The \ssrC{elim} tactic.} +\idx{elim: \dots{}} + +The \ssrC{elim} tactic performs +inductive elimination on inductive types. +The defective: +\begin{lstlisting} + elim. +\end{lstlisting} +tactic performs inductive elimination on a goal whose top assumption +has an inductive type. For example on goal of the form: +\begin{lstlisting} + forall n : nat, m <= n +\end{lstlisting} + in a context containing \ssrC{m : nat}, the +\begin{lstlisting} + elim. +\end{lstlisting} +tactic produces two goals, +\begin{lstlisting} + m <= 0 +\end{lstlisting} +on one hand and +\begin{lstlisting} + forall n : nat, m <= n -> m <= S n +\end{lstlisting} +on the other hand. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{The \ssrC{apply} tactic.}\label{sssec:apply} +\idx{apply: \dots{}} + +The \ssrC{apply} tactic is the main +backward chaining tactic of the proof system. It takes as argument any +\ssrC{/*term*/} and applies it to the goal. +Assumptions in the type of \ssrC{/*term*/} that don't directly match the +goal may generate one or more subgoals. + +In fact the \ssr{} tactic: +\begin{lstlisting} + apply. +\end{lstlisting} +is a synonym for: +\begin{lstlisting} + intro top; first [refine top | refine (top _) | refine (top _ _) | ...]; clear top. +\end{lstlisting} +where \ssrC{top} is fresh name, and the sequence of \ssrC{refine} tactics +tries to catch the appropriate number of wildcards to be inserted. +Note that this use of the \ssrC{refine} tactic implies that the tactic +tries to match the goal up to expansion of +constants and evaluation of subterms. + +\ssr{}'s \ssrC{apply} has a special behaviour on goals containing +existential metavariables of sort \ssrC{Prop}. Consider the +following example: +\begin{lstlisting} +Goal (forall y, 1 < y -> y < 2 -> exists x : { n | n < 3 }, proj1_sig x > 0). +move=> y y_gt1 y_lt2; apply: (ex_intro _ (exist _ y _)). + by apply: gt_trans _ y_lt2. +by move=> y_lt3; apply: lt_trans y_gt1. +\end{lstlisting} +Note that the last \ssrC{_} of the tactic \ssrC{apply: (ex_intro _ (exist _ y _))} +represents a proof that \ssrC{y < 3}. Instead of generating the following +goal +\begin{lstlisting} + 0 < (n:=3) (m:=y) ?54 +\end{lstlisting} +\noindent the system tries to prove \ssrC{y < 3} calling the \ssrC{trivial} +tactic. If it succeeds, let's say because the context contains +\ssrC{H : y < 3}, then the system generates the following goal: +\begin{lstlisting} + 0 < proj1_sig (exist (fun n => n < 3) y H +\end{lstlisting} +\noindent Otherwise the missing proof is considered to be irrelevant, and +is thus discharged generating the following goals: +\begin{lstlisting} + y < 3 + forall H : y < 3, proj1_sig (exist (fun n => n < 3) y H) +\end{lstlisting} +Last, the user can replace the \ssrC{trivial} tactic by defining +an Ltac expression named \ssrC{ssrautoprop}. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Discharge}\label{ssec:discharge} +\idx{\dots{} : \dots{}} + +The general syntax of the discharging tactical `\ssrC{:}' is: +\begin{center} + {\tac} \optional{\ssrN{ident}} \ssrC{:} \ssrN[1]{d-item} $\dots$ \ssrN[n]{d-item} \optional{\ssrN{clear-switch}} +\end{center} +where $n > 0$, and \ssrN{d-item} and \ssrN{clear-switch} are defined as +\begin{longtable}{rcl} +\ssrN{d-item} & ::= & \optional{\ssrN{occ-switch} {\optsep} \ssrN{clear-switch}} {\term} \\ +\ssrN{clear-switch}& ::=& \ssrC{\{} \ssrN[1]{ident}\, \ldots\, \ssrN[m]{ident} \ssrC{\}} +\end{longtable} +with the following requirements: +\begin{itemize} +\item {\tac} must be one of the four basic tactics described + in~\ref{ssec:basictac}, i.e., \ssrC{move}, \ssrC{case}, \ssrC{elim} or \ssrC{apply}, + the \ssrC{exact} tactic (section \ref{ssec:termin}), + the \ssrC{congr} tactic (section \ref{ssec:congr}), or the application + of the \emph{view} tactical `\ssrC{/}' (section \ref{ssec:assumpinterp}) + to one of \ssrC{move}, \ssrC{case}, or \ssrC{elim}. +\item The optional \ssrN{ident} specifies \emph{equation generation} + (section \ref{ssec:equations}), and is only allowed if {\tac} + is \ssrC{move}, \ssrC{case} or \ssrC{elim}, or the application of the + view tactical `\ssrC{/}' (section \ref{ssec:assumpinterp}) + to \ssrC{case} or \ssrC{elim}. +\item An \ssrN{occ-switch} selects occurrences of {\term}, + as in \ref{sssec:occselect}; \ssrN{occ-switch} is not allowed if + {\tac} is \ssrC{apply} or \ssrC{exact}. +\item A clear item \ssrN{clear-switch} specifies facts and constants to be + deleted from the proof context (as per the \ssrC{clear} tactic). +\end{itemize} +The `\ssrC{:}' tactical first \emph{discharges} all the \ssrN{d-item}s, +right to left, and then performs {\tac}, i.e., for each \ssrN{d-item}, +starting with $\ssrN[n]{d-item}$: +\begin{enumerate} +\item The \ssr{} matching algorithm described in section~\ref{ssec:set} + is used to find occurrences of {\term} in the goal, + after filling any holes `\ssrC{_}' in {\term}; however if {\tac} + is \ssrC{apply} or \ssrC{exact} a different matching algorithm, + described below, is used + \footnote{Also, a slightly different variant may be used for the first + \ssrN{d-item} of \ssrC{case} and \ssrC{elim}; see section~\ref{ssec:typefam}.}. +\item~\label{enum:gen} These occurrences are replaced by a new + variable; in particular, + if {\term} is a fact, this adds an assumption to the goal. +\item~\label{enum:clr} If {\term} is \emph{exactly} the name of a constant + or fact in the proof context, it is deleted from the context, + unless there is an \ssrN{occ-switch}. +\end{enumerate} +Finally, {\tac} is performed just after $\ssrN[1]{d-item}$ has been +generalized --- +that is, between steps \ref{enum:gen} and \ref{enum:clr} for $\ssrN[1]{d-item}$. +The names listed in the final \ssrN{clear-switch} (if it is present) +are cleared first, before $\ssrN[n]{d-item}$ is discharged. + +\noindent +Switches affect the discharging of a \ssrN{d-item} as follows: +\begin{itemize} +\item An \ssrN{occ-switch} restricts generalization (step~\ref{enum:gen}) + to a specific subset of the occurrences of {\term}, as per + \ref{sssec:occselect}, and prevents clearing (step~\ref{enum:clr}). +\item All the names specified by a \ssrN{clear-switch} are deleted from the + context in step~\ref{enum:clr}, possibly in addition to {\term}. +\end{itemize} +For example, the tactic: +\begin{lstlisting} + move: n {2}n (refl_equal n). +\end{lstlisting} +\begin{itemize} +\item first generalizes \ssrC{(refl_equal n : n = n)}; +\item then generalizes the second occurrence of \ssrC{n}. +\item finally generalizes all the other occurrences of \ssrC{n}, + and clears \ssrC{n} from the proof context + (assuming \ssrC{n} is a proof constant). +\end{itemize} +Therefore this tactic changes any goal \ssrC{G} into +\begin{lstlisting} + forall n n0 : nat, n = n0 -> G. +\end{lstlisting} +where the name \ssrC{n0} is picked by the \Coq{} display function, +and assuming \ssrC{n} appeared only in~\ssrC{G}. + +Finally, note that a discharge operation generalizes defined constants +as variables, and not as local definitions. To override this behavior, +prefix the name of the local definition with a \ssrC{@}, +like in \ssrC{move: @n}. + +This is in contrast with the behavior of the \ssrC{in} tactical (see section +\ref{ssec:gloc}), which preserves local definitions by default. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Clear rules} + +The clear step will fail if {\term} is a proof constant that +appears in other facts; in that case either the facts should be +cleared explicitly with a \ssrN{clear-switch}, or the clear step should be +disabled. The latter can be done by adding an \ssrN{occ-switch} or simply by +putting parentheses around {\term}: both +\begin{lstlisting} + move: (n). +\end{lstlisting} +and +\begin{lstlisting} + move: {+}n. +\end{lstlisting} +generalize \ssrC{n} without clearing \ssrC{n} from the proof context. + +The clear step will also fail if the \ssrN{clear-switch} contains a +\ssrN{ident} that is not in the \emph{proof} context. +Note that \ssr{} never clears a section constant. + +If {\tac} is \ssrC{move} or \ssrC{case} and an equation \ssrN{ident} is given, +then clear (step~\ref{enum:clr}) for $\ssrN[1]{d-item}$ is suppressed +(see section \ref{ssec:equations}). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Matching for \ssrC{apply} and \ssrC{exact}}\label{sss:strongapply} + +The matching algorithm for \ssrN{d-item}s of the \ssr{} \ssrC{apply} and +\ssrC{exact} tactics +exploits the type of $\ssrN[1]{d-item}$ to interpret +wildcards in the other \ssrN{d-item} and to determine which occurrences of +these should be generalized. +Therefore, \ssrN{occur switch}es are not needed for \ssrC{apply} and \ssrC{exact}. + +Indeed, the \ssr{} tactic \ssrC{apply: H x} is equivalent to +\begin{lstlisting} + refine (@H _ ... _ x); clear H x +\end{lstlisting} +with an appropriate number of wildcards between \ssrC{H} and~\ssrC{x}. + +Note that this means that matching for \ssrC{apply} and \ssrC{exact} has +much more context to interpret wildcards; in particular it can accommodate +the `\ssrC{_}' \ssrN{d-item}, which would always be rejected after `\ssrC{move:}'. +For example, the tactic +\begin{lstlisting} + apply: trans_equal (Hfg _) _. +\end{lstlisting} +transforms the goal \ssrC{f a = g b}, whose context contains +\ssrC{(Hfg : forall x, f x = g x)}, into \ssrC{g a = g b}. +This tactic is equivalent (see section \ref{ssec:profstack}) to: +\begin{lstlisting} + refine (trans_equal (Hfg _) _). +\end{lstlisting} +and this is a common idiom for applying transitivity on the left hand side +of an equation. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{The \ssrC{abstract} tactic}\label{ssec:abstract} +\idx{abstract: \dots{}} + +The \ssrC{abstract} tactic assigns an abstract constant previously introduced with +the \ssrC{[: name ]} intro pattern (see section~\ref{ssec:intro}, +page~\pageref{ssec:introabstract}). +In a goal like the following: +\begin{lstlisting} + m : nat + abs : <hidden> + n : nat + ============= + m < 5 + n +\end{lstlisting} +The tactic \ssrC{abstract: abs n} first generalizes the goal with respect to +\ssrC{n} (that is not visible to the abstract constant \ssrC{abs}) and then +assigns \ssrC{abs}. The resulting goal is: +\begin{lstlisting} + m : nat + n : nat + ============= + m < 5 + n +\end{lstlisting} +Once this subgoal is closed, all other goals having \ssrC{abs} in their context +see the type assigned to \ssrC{abs}. In this case: +\begin{lstlisting} + m : nat + abs : forall n, m < 5 + n +\end{lstlisting} + +For a more detailed example the user should refer to section~\ref{sssec:have}, +page~\pageref{sec:havetransparent}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Introduction}\label{ssec:intro} +\idx{\dots{} => \dots{}} + +The application of a tactic to a given goal can generate +(quantified) variables, assumptions, or definitions, which the user may want to +\emph{introduce} as new facts, constants or defined constants, respectively. +If the tactic splits the goal into several subgoals, +each of them may require the introduction of different constants and facts. +Furthermore it is very common to immediately decompose +or rewrite with an assumption instead of adding it to the context, +as the goal can often be simplified and even +proved after this. + +All these operations are performed by the introduction tactical +`\ssrC{=>}', whose general syntax is +\begin{center} + {\tac} \ssrC{=>} \ssrN[1]{i-item} $\dots$ \ssrN[n]{i-item} +\end{center} +where {\tac} can be any tactic, $n > 0$ and +\begin{longtable}{rcl} + \ssrN{i-item}& ::=& \ssrN{i-pattern} {\optsep} \ssrN{s-item} {\optsep} \ssrN{clear-switch} {\optsep} \ssrC{/}{\term} \\ + \ssrN{s-item}& ::=& \ssrC{/=} {\optsep} \ssrC{//} {\optsep} \ssrC{//=} \\ + \ssrN{i-pattern}& ::=& \ssrN{ident} {\optsep} \ssrC{_} {\optsep} \ssrC{?} {\optsep} \ssrC{*} {\optsep} \optional{\ssrN{occ-switch}}\ssrC{->} {\optsep} \optional{\ssrN{occ-switch}}\ssrC{<-} {\optsep} \\ + && \ssrC{[} \ssrN[1]{i-item}$^*$ \ssrC{|} $\dots$ \ssrC{|} \ssrN[m]{i-item}$^*$ \ssrC{]} {\optsep} \ssrC{-} {\optsep} \ssrC{[:} \ssrN{ident}$^+$ \ssrC{]} +\end{longtable} + +The `\ssrC{=>}' tactical first executes {\tac}, then the +\ssrN{i-item}s, left to right, i.e., starting from $\ssrN[1]{i-item}$. An +\ssrN{s-item} specifies a simplification operation; a $\ssrN{clear +switch}$ specifies context pruning as in~\ref{ssec:discharge}. The +\ssrN{i-pattern}s can be seen as a variant of \emph{intro patterns}~\ref{intros-pattern}: +each performs an introduction operation, i.e., pops some variables or +assumptions from the goal. + +An \ssrN{s-item} can simplify the set of subgoals or the subgoal themselves: +\begin{itemize} +\item \ssrC{//} removes all the ``trivial'' subgoals that can be resolved by + the \ssr{} tactic \ssrC{done} described in~\ref{ssec:termin}, i.e., it + executes \ssrC{try done}. +\item \ssrC{/=} simplifies the goal by performing partial evaluation, as + per the tactic \ssrC{simpl}.\footnote{Except \ssrC{/=} does not + expand the local definitions created by the \ssr{} \ssrC{in} tactical.} +\item \ssrC{//=} combines both kinds of simplification; it is equivalent + to \ssrC{/= //}, i.e., \ssrC{simpl; try done}. +\end{itemize} +When an \ssrN{s-item} bears a \ssrN{clear-switch}, then the \ssrN{clear-switch} is +executed \emph{after} the \ssrN{s-item}, e.g., \ssrL|{IHn}//| will solve +some subgoals, possibly using the fact \ssrL|IHn|, and will erase \ssrL|IHn| +from the context of the remaining subgoals. + +The last entry in the \ssrN{i-item} grammar rule, \ssrC{/}{\term}, +represents a view (see section~\ref{sec:views}). If $\ssrN[k+1]{i-item}$ +is a view \ssrN{i-item}, the view is applied to the assumption in top +position once $\ssrN[1]{i-item} \dots \ssrN[k]{i-item}$ have been performed. + +The view is applied to the top assumption. + +\ssr{} supports the following \ssrN{i-pattern}s: +\begin{itemize} +\item \ssrN{ident} pops the top variable, assumption, or local definition into + a new constant, fact, or defined constant \ssrN{ident}, respectively. + Note that defined constants cannot be introduced when + $\delta$-expansion is required to expose the top variable or assumption. +\item \ssrC{?} pops the top variable into an anonymous constant or fact, + whose name is picked by the tactic interpreter. + \ssr{} only generates names that + cannot appear later in the user script.\footnote{\ssr{} reserves + all identifiers of the form ``\ssrC{_x_}'', which is used for such + generated names.} +\item \ssrC{_} pops the top variable into an anonymous constant that will be + deleted from + the proof context of all the subgoals produced by the \ssrC{=>} tactical. + They should thus never be displayed, except in an error message + if the constant is still actually used in the goal or context after + the last \ssrN{i-item} has been executed (\ssrN{s-item}s can erase goals + or terms where the constant appears). +\item \ssrC{*} pops all the remaining apparent variables/assumptions + as anonymous constants/facts. Unlike \ssrC{?} and \ssrC{move} the \ssrC{*} + \ssrN{i-item} does not expand definitions in the goal to expose + quantifiers, so it may be useful to repeat a \ssrC{move=> *} tactic, + e.g., on the goal +\begin{lstlisting} + forall a b : bool, a <> b +\end{lstlisting} +a first \ssrC{move=> *} adds only \ssrC{_a_ : bool} and \ssrC{_b_ : bool} to +the context; it takes a second \ssrC{move=> *} to add +\ssrC{_Hyp_ : _a_ = _b_}. +\item $[\ssrN{occ-switch}]$\ssrC{->} (resp. $[\ssrN{occ-switch}]$\ssrC{<-}) + pops the top assumption + (which should be a rewritable proposition) into an anonymous fact, + rewrites (resp. rewrites right to left) the goal with this fact + (using the \ssr{} \ssrC{rewrite} tactic described in section~\ref{sec:rw}, + and honoring the optional occurrence selector), + and finally deletes the anonymous fact from the context. +\item\ssrC{[ $\ssrN[1]{i-item}^*$ | $\dots$ | $\ssrN[m]{i-item}^*$ ]}, + when it is the very \emph{first} \ssrN{i-pattern} after ${\tac}\;\ssrC{=>}$ + tactical \emph{and} {\tac} is not a \ssrC{move}, is a \emph{branching} + \ssrN{i-pattern}. It executes + the sequence $\ssrN[i]{i-item}^*$ on the $i^{\rm th}$ + subgoal produced by {\tac}. The execution of {\tac} + should thus generate exactly $m$ + subgoals, unless the \ssrC{[$\dots$]} \ssrN{i-pattern} comes after an initial + \ssrC{//} or \ssrC{//=} \ssrN{s-item} that closes some of the goals produced by + {\tac}, in which case exactly $m$ subgoals should remain after the + \ssrN{s-item}, or we have the trivial branching \ssrN{i-pattern} \ssrC{[]}, + which always does nothing, regardless of the number of remaining subgoals. +\item\ssrC{[ $\ssrN[1]{i-item}^*$ | $\dots$ | $\ssrN[m]{i-item}^*$ ]}, when it is + \emph{not} the first \ssrN{i-pattern} or when {\tac} is a + \ssrC{move}, is a \emph{destructing} \ssrN{i-pattern}. It starts by + destructing the top variable, using the \ssr{} \ssrC{case} tactic + described in~\ref{ssec:basictac}. It then behaves as the + corresponding branching \ssrN{i-pattern}, executing the sequence + $\ssrN[i]{i-item}^*$ in the $i^{\rm th}$ subgoal generated by the case + analysis; unless we have the trivial destructing \ssrN{i-pattern} + \ssrC{[]}, the latter should generate exactly $m$ subgoals, i.e., the + top variable should have an inductive type with exactly $m$ + constructors.\footnote{More precisely, it should have a quantified + inductive type with $a$ assumptions and $m - a$ constructors.} + While it is good style to use the $\ssrN[i]{i-item}^*$ + to pop the variables and assumptions corresponding to each constructor, + this is not enforced by \ssr{}. +\item\ssrC{-} does nothing, but counts as an intro pattern. It can also + be used to force the interpretation of + \ssrC{[ $\ssrN[1]{i-item}^*$ | $\dots$ | $\ssrN[m]{i-item}^*$ ]} + as a case analysis like in \ssrC{move=> -[H1 H2]}. It can also be used + to indicate explicitly the link between a view and a name like in + \ssrC{move=> /eqP-H1}. Last, it can serve as a separator between + views. Section~\ref{ssec:multiview} explains in which respect + the tactic \ssrC{move=> /v1/v2} differs from the tactic + \ssrC{move=> /v1-/v2}. +\item\ssrC{[: $\ssrN{ident}^+$ ]} introduces in the context an abstract constant + for each \ssrN{ident}. Its type has to be fixed later on by using + the \ssrC{abstract} tactic (see page~\pageref{ssec:abstract}). Before then + the type displayed is \ssrC{<hidden>}.\label{ssec:introabstract} +\end{itemize} +Note that \ssr{} does not support the syntax +$\ssrC{(}\ssrN{ipat}\ssrC{,}\dots\ssrC{,}\ssrN{ipat}\ssrC{)}$ for destructing +intro-patterns. + +Clears are deferred until the end of the intro pattern. For +example, given the goal: +\begin{lstlisting} +x, y : nat +================== +0 < x = true -> (0 < x) && (y < 2) = true +\end{lstlisting} +the tactic \ssrC{move=> \{x\} ->} successfully rewrites the goal and +deletes \ssrC{x} and the anonymous equation. The goal is thus turned into: +\begin{lstlisting} +y : nat +================== +true && (y < 2) = true +\end{lstlisting} +If the cleared names are reused in the same intro pattern, a renaming +is performed behind the scenes. + +Facts mentioned in a clear switch must be valid +names in the proof context (excluding the section context). + +The rules for interpreting branching and destructing \ssrN{i-pattern} +are motivated by the fact that it would be pointless to have a branching +pattern if {\tac} is a \ssrC{move}, and in most of the remaining cases +{\tac} is \ssrC{case} or \ssrC{elim}, which implies destruction. +The rules above imply that +\begin{lstlisting} + move=> [a b]. + case=> [a b]. + case=> a b. +\end{lstlisting} +are all equivalent, so which one to use is a matter of style; +\ssrC{move} should be used for casual decomposition, +such as splitting a pair, and \ssrC{case} should be used for actual decompositions, +in particular for type families (see~\ref{ssec:typefam}) +and proof by contradiction. + +The trivial branching \ssrN{i-pattern} can be used to force the branching +interpretation, e.g., +\begin{lstlisting} + case=> [] [a b] c. + move=> [[a b] c]. + case; case=> a b c. +\end{lstlisting} +are all equivalent. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Generation of equations}\label{ssec:equations} +\idx{move eq : \dots{}} + +The generation of named equations option stores the definition of a +new constant as an equation. The tactic: +\begin{lstlisting} + move En: (size l) => n. +\end{lstlisting} +where \ssrC{l} is a list, replaces \ssrC{size l} by \ssrC{n} in the goal and +adds the fact \ssrC{En : size l = n} to the context. + This is quite different from: +\begin{lstlisting} + pose n := (size l). +\end{lstlisting} +which generates a definition \ssrC{n := (size l)}. It is not possible to +generalize or +rewrite such a definition; on the other hand, it is automatically +expanded during +computation, whereas expanding the equation \ssrC{En} requires explicit +rewriting. + +The use of this equation name generation option with a \ssrC{case} or an +\ssrC{elim} tactic changes the status of the first \iitem{}, in order to +deal with the possible parameters of the constants introduced. + +On the +goal \ssrC{a <> b} where \ssrC{a, b} are natural numbers, the tactic: +\begin{lstlisting} + case E : a => [|n]. +\end{lstlisting} +generates two subgoals. The equation \ssrC{E : a = 0} (resp. \ssrC{E : a = + S n}, and the constant \ssrC{n : nat}) has been added to +the context of the goal \ssrC{0 <> b} (resp. \ssrC{S n <> b}). + +If the user does not provide a branching \iitem{} as first \iitem{}, +or if the \iitem{} does not provide enough names for the arguments of +a constructor, +then the constants generated are introduced under fresh \ssr{} names. +For instance, on the goal \ssrC{a <> b}, the tactic: +\begin{lstlisting} + case E : a => H. +\end{lstlisting} +also generates two subgoals, both requiring a proof of \ssrC{False}. + The hypotheses \ssrC{E : a = 0} and +\ssrC{H : 0 = b} (resp. \ssrC{E : a = S _n_} and +\ssrC{H : S _n_ = b}) have been added to the context of the first +subgoal (resp. the second subgoal). + +Combining the generation of named equations mechanism with the +\ssrC{case} tactic strengthens the power of a case analysis. On the other +hand, when combined with the \ssrC{elim} tactic, this feature is mostly +useful for +debug purposes, to trace the values of decomposed parameters and +pinpoint failing branches. + +% This feature is also useful +% to analyse and debug generate-and-test style scripts that prove program +% properties by generating a large set of input patterns and uniformly +% solving the corresponding subgoals by computation and rewriting, e.g, + +% \begin{lstlisting} +% case: et => [|e' et]; first by case: s. +% case: e => //; case: b; case: w. +% \end{lstlisting} +% If the above sequence fails, then it's easy enough to replace the line +% above with +% \begin{lstlisting} +% case: et => [|e' et]. +% case Ds: s; case De: e => //; case Db: b; case Dw: w=> [|s' w'] //=. +% \end{lstlisting} +% Then the first subgoal that appears will be the failing one, and the +% equations \ssrC{Ds}, \ssrC{De}, \ssrC{Db} +% and \ssrC{Dw} will pinpoint its branch. When the constructors of +% the decomposed type have arguments (like \ssrC{w : (seq nat)} +% above) these need to be +% introduced in order to generate the equation, so there should +% always be an explicit \iitem{} (\ssrC{\[|s' w'\]} above) that +% assigns names to these arguments. If this \iitem{} +% is omitted the arguments are introduced with default +% name; this +% feature should be +% avoided except for quick debugging runs (it has some uses in complex tactic +% sequences, however). + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Type families}\label{ssec:typefam} +\idx{case: \dots{} / \dots{}} + +When the top assumption of a goal has an inductive type, two +specific operations are possible: the case analysis performed by the +\ssrC{case} tactic, and the application of an induction principle, +performed by the \ssrC{elim} tactic. When this top assumption has an +inductive type, which is moreover an instance of a type family, \Coq{} +may need help from the user to specify which occurrences of the parameters +of the type should be substituted. + +A specific \ssrC{/} switch indicates the type family parameters of the +type of a \ditem{} immediately following this \ssrC{/} switch, using the +syntax: + +\begin{center} + \ssrC{[} \ssrC{case} {\optsep} \ssrC{elim} \ssrC{]:} \ssrN{d-item}$^+$ \ssrC{/} \ssrN{d-item}$^*$ +\end{center} + +The \ssrN{d-item}s on the right side of the \ssrC{/} switch are discharged +as described in section \ref{ssec:discharge}. The case analysis or +elimination will be done on the type of the top assumption after these +discharge operations. + +Every \ssrN{d-item} preceding the \ssrC{/} is interpreted as arguments of this +type, which should be an instance of an inductive type family. These terms are +not actually generalized, but rather selected for substitution. Occurrence +switches can be used to restrict the substitution. If a {\term} is left +completely implicit (e.g. writing just $\ssrC{\_}$), then a pattern is inferred +looking at the type of the top assumption. This allows for the compact syntax +\ssrC{case: \{2\}\_ / eqP}, were \ssrC{\_} is interpreted as \ssrC{(\_ == \_)}. Moreover +if the \ssrN{d-item}s list is too short, it is padded with an initial +sequence of $\ssrC{\_}$ of the right length. + +Here is a small example on lists. We define first a function which +adds an element at the end of a given list. +\begin{lstlisting} + Require Import List. + + Section LastCases. + Variable A : Type. + + Fixpoint |*add_last*|(a : A)(l : list A): list A := + match l with + |nil => a :: nil + |hd :: tl => hd :: (add_last a tl) + end. +\end{lstlisting} +Then we define an inductive predicate for +case analysis on lists according to their last element: +\begin{lstlisting} + Inductive |*last_spec*| : list A -> Type := + | LastSeq0 : last_spec nil + | LastAdd s x : last_spec (add_last x s). + + Theorem |*lastP*| : forall l : list A, last_spec l. +\end{lstlisting} +Applied to the goal: +\begin{lstlisting} + Goal forall l : list A, (length l) * 2 = length (app l l). +\end{lstlisting} +the command: +\begin{lstlisting} + move=> l; case: (lastP l). +\end{lstlisting} +generates two subgoals: +\begin{lstlisting} + length nil * 2 = length (nil ++ nil) +\end{lstlisting} +and +\begin{lstlisting} + forall (s : list A) (x : A), + length (add_last x s) * 2 = length (add_last x s ++ add_last x s) +\end{lstlisting} +both having \ssrC{l : list A} in their context. + +Applied to the same goal, the command: +\begin{lstlisting} + move=> l; case: l / (lastP l). +\end{lstlisting} +generates the same subgoals but \ssrC{l} has been cleared from both +contexts. + +Again applied to the same goal, the command: +\begin{lstlisting} + move=> l; case: {1 3}l / (lastP l). +\end{lstlisting} +generates the subgoals \ssrL-length l * 2 = length (nil ++ l)- and +\ssrL-forall (s : list A) (x : A), length l * 2 = length (add_last x s++l)- +where the selected occurrences on the left of the \ssrC{/} switch have +been substituted with \ssrC{l} instead of being affected by the case +analysis. + +The equation name generation feature combined with a type family \ssrC{/} + switch generates an equation for the \emph{first} dependent d-item +specified by the user. +Again starting with the above goal, the command: +\begin{lstlisting} + move=> l; case E: {1 3}l / (lastP l)=>[|s x]. +\end{lstlisting} +adds \ssrC{E : l = nil} and \ssrC{E : l = add_last x s}, +respectively, to the context of the two subgoals it generates. + +There must be at least one \emph{d-item} to the left of the \ssrC{/} +switch; this prevents any +confusion with the view feature. However, the \ditem{}s to the right of +the \ssrC{/} are optional, and if they are omitted the first assumption +provides the instance of the type family. + +The equation always refers to the first \emph{d-item} in the actual +tactic call, before any padding with initial $\ssrC{\_}$s. Thus, if an +inductive type has two family parameters, it is possible to have +\ssr{} generate an equation for the second one by omitting the pattern +for the first; note however that this will fail if the type of the +second parameter depends on the value of the first parameter. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Control flow} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Indentation and bullets}\label{ssec:indent} + +A linear development of \Coq{} scripts gives little information on +the structure of the proof. In addition, replaying a proof after some +changes in the statement to be proved will usually not display information to +distinguish between the various branches of case analysis for instance. + +To help the user in this organization of the proof script at +development time, \ssr{} provides some bullets to highlight the +structure of branching proofs. The available bullets are \ssrC{-}, +\ssrC{+} and \ssrC{*}. Combined with tabulation, this lets us highlight four +nested levels of branching; the most we have ever +needed is three. Indeed, the use of ``simpl and closing'' switches, of +terminators (see above section \ref{ssec:termin}) and selectors (see + section \ref{ssec:select}) is powerful enough +to avoid most of the time more than two levels of indentation. + +Here is a fragment of such a structured script: + +\begin{lstlisting} +case E1: (abezoutn _ _) => [[| k1] [| k2]]. +- rewrite !muln0 !gexpn0 mulg1 => H1. + move/eqP: (sym_equal F0); rewrite -H1 orderg1 eqn_mul1. + by case/andP; move/eqP. +- rewrite muln0 gexpn0 mulg1 => H1. + have F1: t %| t * S k2.+1 - 1. + apply: (@dvdn_trans (orderg x)); first by rewrite F0; exact: dvdn_mull. + rewrite orderg_dvd; apply/eqP; apply: (mulgI x). + rewrite -{1}(gexpn1 x) mulg1 gexpn_add leq_add_sub //. + by move: P1; case t. + rewrite dvdn_subr in F1; last by exact: dvdn_mulr. + + rewrite H1 F0 -{2}(muln1 (p ^ l)); congr (_ * _). + by apply/eqP; rewrite -dvdn1. + + by move: P1; case: (t) => [| [| s1]]. +- rewrite muln0 gexpn0 mul1g => H1. +... +\end{lstlisting} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Terminators}\label{ssec:termin} +\idx{by \dots{}} + +To further structure scripts, \ssr{} +supplies \emph{terminating} tacticals to explicitly close off tactics. +When replaying scripts, we then have the nice property that +an error immediately occurs when a closed tactic fails to prove its +subgoal. + +It is hence recommended practice that the proof of any subgoal should +end with a tactic which \emph{fails if it does not solve the current + goal}, like \ssrC{discriminate}, \ssrC{contradiction} or \ssrC{assumption}. + +In fact, \ssr{} provides a generic tactical which turns any tactic into +a closing one (similar to \ssrC{now}). Its general syntax is: + +\begin{center} + \ssrC{by} {\tac}\ssrC{.} +\end{center} + +The Ltac expression: + +\begin{center} + \ssrC{by [}\ssrN[1]{tactic} \ssrC{| [}\ssrN[2]{tactic} \ssrC{| ...].} +\end{center} + +is equivalent to: + +\begin{center} + \ssrC{[by} \ssrN[1]{tactic} \ssrC{| by} \ssrN[2]{tactic} \ssrC{| ...].} +\end{center} + +and this form should be preferred to the former. + +In the script provided as example in section \ref{ssec:indent}, the +paragraph corresponding to each sub-case ends with a tactic line prefixed +with a \ssrC{by}, like in: + +\begin{center} + \ssrC{by apply/eqP; rewrite -dvdn1.} +\end{center} + + +The \ssrC{by} tactical is implemented using the user-defined, +and extensible \ssrC{done} tactic. This \ssrC{done} tactic tries to solve +the current goal by some trivial means and fails if it doesn't succeed. +Indeed, the tactic expression: + +\begin{center} + \ssrC{by} {\tac}\ssrC{.} +\end{center} + +is equivalent to: + +\begin{center} + {\tac}\ssrC{; done.} +\end{center} + +Conversely, the tactic + +\begin{center} + \ssrC{by [ ].} +\end{center} + +is equivalent to: + +\begin{center} + \ssrC{done.} +\end{center} + +The default implementation of the \ssrC{done} tactic, in the {\tt + ssreflect.v} file, is: + +\begin{lstlisting} +Ltac done := + trivial; hnf; intros; solve + [ do ![solve [trivial | apply: sym_equal; trivial] + | discriminate | contradiction | split] + | case not_locked_false_eq_true; assumption + | match goal with H : ~ _ |- _ => solve [case H; trivial] end ]. +\end{lstlisting} + +The lemma \ssrC{|*not_locked_false_eq_true*|} is needed to discriminate +\emph{locked} boolean predicates (see section \ref{ssec:lock}). +The iterator tactical \ssrC{do} is presented in section +\ref{ssec:iter}. +This tactic can be customized by the user, for instance to include an +\ssrC{auto} tactic. + +A natural and common way of closing a goal is to apply a lemma which +is the \ssrC{exact} one needed for the goal to be solved. The defective +form of the tactic: +\begin{lstlisting} + exact. +\end{lstlisting} +is equivalent to: +\begin{lstlisting} + do [done | by move=> top; apply top]. +\end{lstlisting} +where \ssrC{top} is a fresh name affected to the top assumption of the goal. +This applied form is supported by the \ssrC{:} discharge tactical, and +the tactic: +\begin{lstlisting} + exact: MyLemma. +\end{lstlisting} +is equivalent to: +\begin{lstlisting} + by apply: MyLemma. +\end{lstlisting} +(see section \ref{sss:strongapply} for the documentation of the \ssrC{apply:} +combination). + +\textit{Warning} The list of tactics, possibly chained by +semi-columns, that follows a \ssrC{by} keyword is considered as a +parenthesized block +applied to the current goal. Hence for example if the tactic: +\begin{lstlisting} + by rewrite my_lemma1. +\end{lstlisting} +succeeds, then the tactic: +\begin{lstlisting} + by rewrite my_lemma1; apply my_lemma2. +\end{lstlisting} +usually fails since it is equivalent to: +\begin{lstlisting} + by (rewrite my_lemma1; apply my_lemma2). +\end{lstlisting} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Selectors}\label{ssec:select} +\idx{last \dots{} first} +\idx{first \dots{} last} + +When composing tactics, the two tacticals \ssrC{first} and +\ssrC{last} let the user restrict the application of a tactic to only one +of the subgoals generated by the previous tactic. This +covers the frequent cases where a tactic generates two subgoals one of +which can be easily disposed of. + +This is an other powerful way of linearization of scripts, since it +happens very often that a trivial subgoal can be solved in a less than +one line tactic. For instance, the tactic: + +\begin{center} + \ssrN[1]{tactic}\ssrC{; last by} \ssrN[2]{tactic}\ssrC{.} +\end{center} + +tries to solve the last subgoal generated by \ssrN[1]{tactic} using the +\ssrN[2]{tactic}, and fails if it does not succeeds. Its analogous + +\begin{center} + \ssrN[1]{tactic}\ssrC{; first by} \ssrN[2]{tactic}. +\end{center} + +tries to solve the first subgoal generated by \ssrN[1]{tactic} using the +tactic \ssrN[2]{tactic}, and fails if it does not succeeds. + + +\ssr{} also offers an extension of this facility, by supplying +tactics to \emph{permute} the subgoals generated by a tactic. +The tactic: + +\begin{center} + {\tac}\ssrC{; last first.} +\end{center} + +inverts the order of the subgoals generated by {\tac}. It is +equivalent to: + +\begin{center} + {\tac}\ssrC{; first last.} +\end{center} + + +More generally, the tactic: + +\begin{center} + {\tac}\ssrC{; last }${\naturalnumber}$ \ssrC{first.} +\end{center} + +where ${\naturalnumber}$ is +a \Coq{} numeral, or and Ltac variable denoting +a \Coq{} numeral, having the value $k$. It +rotates the $n$ subgoals $G_1, +\dots, G_n$ generated by {\tac}. The first subgoal becomes +$G_{n + 1 - k}$ and the circular order of subgoals remains unchanged. + +Conversely, the tactic: + + {\tac}\ssrC{; first }${\naturalnumber}$ \ssrC{last.} + +rotates the $n$ subgoals $G_1, +\dots, G_n$ generated by \ssrC{tactic} in order that the first subgoal +becomes $G_{k}$. + +Finally, the tactics \ssrC{last} and \ssrC{first} combine with the +branching syntax of Ltac: +if the tactic $\ssrN[0]{tactic}$ generates $n$ +subgoals on a given goal, then the tactic + + $tactic_0$\ssrC{; last }${\naturalnumber}$ \ssrC{[}$tactic_1$\ssrC{|}$\dots$\ssrC{|}$tactic_m$\ssrC{] || }$tactic_{m+1}$\ssrC{.} + +where ${\naturalnumber}$ denotes the integer $k$ as above, applies $tactic_1$ to the +$n -k + 1$-th goal, $\dots tactic_m$ to the $n -k + 2 - m$-th +goal and $tactic_{m+1}$ to the others. + +For instance, the script: +\begin{lstlisting} + Inductive test : nat -> Prop := + C1 : forall n, test n | C2 : forall n, test n | + C3 : forall n, test n | C4 : forall n, test n. + + Goal forall n, test n -> True. + move=> n t; case: t; last 2 [move=> k| move=> l]; idtac. +\end{lstlisting} + +creates a goal with four subgoals, the first and the last being +\ssrC{nat -> True}, the second and the third being \ssrC{True} with +respectively \ssrC{k : nat} and \ssrC{l : nat} in their context. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Iteration}\label{ssec:iter} +\idx{do \dots{} [ \dots{} ]} + +\ssr{} offers an accurate control on the repetition of +tactics, thanks to the \ssrC{do} tactical, whose general syntax is: + +\begin{center} + \ssrC{do} \optional{\ssrN{mult}} \ssrC{[} \ssrN[1]{tactic} \ssrC{|} $\dots$ \ssrC{|} \ssrN[n]{tactic} \ssrC{]} +\end{center} +where \ssrN{mult} is a \emph{multiplier}. + +Brackets can only be omitted if a single tactic is given \emph{and} a +multiplier is present. + +A tactic of the form: + +\begin{center} + \ssrC{do [} \tac$_1$ \ssrC{|} $\dots$ \ssrC{|} \tac$_n$\ssrC{].} +\end{center} + +is equivalent to the standard Ltac expression: + +\begin{center} + \ssrC{first [} \tac$_1$ \ssrC{|} $\dots$ \ssrC{|} \tac$_n$\ssrC{].} +\end{center} + + +The optional multiplier \ssrN{mult} specifies how many times +the action of {\tac} should be repeated on the current subgoal. + +There are four kinds of multipliers: + \begin{itemize} + \item \ssrC{n!}: the step {\tac} is repeated exactly $n$ times + (where $n$ is a positive integer argument). + \item \ssrC{!}: the step {\tac} is repeated as many times as possible, + and done at least once. + \item \ssrC{?}: the step {\tac} is repeated as many times as possible, + optionally. + \item \ssrC{n?}: the step {\tac} is repeated up to $n$ times, + optionally. + \end{itemize} + +For instance, the tactic: + +\begin{center} + {\tac} \ssrL+; do 1?rewrite mult_comm.+ +\end{center} + +rewrites at most one time the lemma \ssrC{mult_com} in all the subgoals +generated by {\tac} , whereas the tactic: + +\begin{center} + {\tac} \ssrL+; do 2!rewrite mult_comm.+ +\end{center} + +rewrites exactly two times the lemma \ssrC{mult_com} in all the subgoals +generated by {\tac}, and fails if this rewrite is not possible +in some subgoal. + +Note that the combination of multipliers and \ssrC{rewrite} is so often +used that multipliers are in fact integrated to the syntax of the \ssr{} +\ssrC{rewrite} tactic, see section \ref{sec:rw}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Localization}\label{ssec:gloc} +\idx{\dots{} in \dots{}} + +In sections \ref{ssec:loc} and \ref{ssec:profstack}, we have already +presented the \emph{localization} tactical \ssrC{in}, whose general +syntax is: +\begin{center} + {\tac} \ssrC{in} \ssrN{ident}$^+$ \optional{\ssrC{*}} +\end{center} + +where \ssrN{ident}$^+$ is a non empty list of fact +names in the context. On the left side of \ssrC{in}, {\tac} can be +\ssrC{move}, \ssrC{case}, \ssrC{elim}, \ssrC{rewrite}, \ssrC{set}, + or any tactic formed with the general iteration tactical \ssrC{do} (see + section \ref{ssec:iter}). + +The operation described by {\tac} is performed in the facts +listed in \ssrN{ident}$^+$ and in the goal if a \ssrC{*} ends +the list. + +The \ssrC{in} tactical successively: +\begin{itemize} +\item generalizes the selected hypotheses, possibly ``protecting'' the + goal if \ssrC{*} is not present, +\item performs {\tac}, on the obtained goal, +\item reintroduces the generalized facts, under the same names. +\end{itemize} + +This defective form of the \ssrC{do} tactical is useful to avoid clashes +between standard Ltac \ssrC{in} and the \ssr{} tactical \ssrC{in}. +For example, in the following script: +\begin{lstlisting} + Ltac |*mytac*| H := rewrite H. + + Goal forall x y, x = y -> y = 3 -> x + y = 6. + move=> x y H1 H2. + do [mytac H2] in H1 *. +\end{lstlisting} +the last tactic rewrites the hypothesis \ssrC{H2 : y = 3} both in +\ssrC{H1 : x = y} and in the goal \ssrC{x + y = 6}. + +By default \ssrC{in} keeps the body of local definitions. To erase +the body of a local definition during the generalization phase, +the name of the local definition must be written between parentheses, +like in \ssrC{rewrite H in H1 (def_n) $\;\;$H2}. + +From \ssr{} 1.5 the grammar for the \ssrC{in} tactical has been extended +to the following one: + +\begin{center} + {\tac} \ssrC{in} \optional{ + \ssrN{clear-switch} {\optsep} + \optional{\ssrC{@}}\ssrN{ident} {\optsep} + \ssrC{(}\ssrN{ident}\ssrC{)} {\optsep} + \ssrC{(}\optional{\ssrC{@}}\ssrN{ident} \ssrC{:=} \ssrN{c-pattern}\ssrC{)} + }$^+$ \optional{\ssrC{*}} +\end{center} + +In its simplest form the last option lets one rename hypotheses that can't be +cleared (like section variables). For example \ssrC{(y := x)} generalizes +over \ssrC{x} and reintroduces the generalized +variable under the name \ssrC{y} (and does not clear \ssrC{x}).\\ +For a more precise description the $\ssrC{(}[\ssrC{@}]\ssrN{ident}\ \ssrC{:=}\ \ssrN{c-pattern}\ssrC{)}$ +item refer to the ``Advanced generalization'' paragraph at page~\pageref{par:advancedgen}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Structure}\label{ssec:struct} + +Forward reasoning structures the script by explicitly specifying some +assumptions to be added to the proof context. It is closely associated +with the declarative style of proof, since an extensive use of these +highlighted statements +make the script closer to a (very detailed) text book proof. + +Forward chaining tactics allow to state an intermediate lemma and start a +piece of script dedicated to the proof of this statement. The use of +closing tactics (see section \ref{ssec:termin}) and of +indentation makes syntactically explicit the portion of the script +building the proof of the intermediate statement. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{The \ssrC{have} tactic.} +\label{sssec:have} +\idx{have: \dots{}} +\idx{have: \dots{} := \dots{}} + +The main \ssr{} forward reasoning tactic is the \ssrC{have} tactic. It +can be use in two modes: one starts a new (sub)proof for an +intermediate result in the main proof, and the other +provides explicitly a proof term for this intermediate step. + +In the first mode, the syntax of \ssrC{have} in its defective form is: + + \ssrC{have: }{\term}\ssrC{.} + +This tactic supports open syntax for {\term}. +Applied to a goal \ssrC{G}, it generates a first subgoal requiring a +proof of {\term} in the context of \ssrC{G}. The second generated +subgoal is of the form {\term} \ssrC{-> G}, where {\term} becomes +the new top assumption, instead of being introduced with a fresh +name. At the proof-term level, the \ssrC{have} tactic creates a $\beta$ +redex, and introduces the lemma under a fresh name, automatically +chosen. + + +Like in the case of the \ssrC{pose} tactic (see section \ref{ssec:pose}), +the types of the holes are abstracted in {\term}. +For instance, the tactic: +\begin{lstlisting} + have: _ * 0 = 0. +\end{lstlisting} +is equivalent to: +\begin{lstlisting} + have: forall n : nat, n * 0 = 0. +\end{lstlisting} +The \ssrC{have} tactic also enjoys the same abstraction mechanism as the +\ssrC{pose} tactic for the non-inferred implicit arguments. For instance, +the tactic: +\begin{lstlisting} + have: forall x y, (x, y) = (x, y + 0). +\end{lstlisting} +opens a new subgoal to prove that: + +\noindent\ssrC{forall (T : Type) (x : T) (y : nat), (x, y) = (x, y + 0)} + + +The behavior of the defective \ssrC{have} tactic makes it possible to +generalize it in the +following general construction: +\begin{center} + \ssrC{have} \ssrN{i-item}$^*$ \optional{\ssrN{i-pattern}} + \optional{\ssrN{s-item} {\optsep} \ssrN{binder}$^+$} + \optional{\ssrC{:} \ssrN[1]{term}} + \optional{\ssrC{:=} \ssrN[2]{term} {\optsep} \ssrC{by} {\tac}} +\end{center} + +Open syntax is supported for $\ssrN[1]{term}$ and $\ssrN[2]{term}$. For the +description of +\iitem{}s and clear switches see section \ref{ssec:intro}. +The first mode of the \ssrC{have} tactic, which opens a sub-proof for an +intermediate result, uses tactics of the form: + +\begin{center} + \ssrC{have} \ssrN{clear-switch} \ssrN{i-item} \ssrC{:} {\term} \ssrC{by} {\tac}. +\end{center} + +which behave like:\\ + +\begin{center} + \ssrC{have:} {\term} \ssrC{; first by } {\tac}. +\end{center} +\begin{center} + \ssrC{ move=>} \ssrN{clear-switch} \ssrN{i-item}. +\end{center} + + +Note that the \ssrN{clear-switch} \emph{precedes} the +\ssrN{i-item}, which allows to reuse a name of the context, possibly used +by the proof of the assumption, to introduce the new assumption +itself. + +The \ssrC{by} feature is especially convenient when the proof script of the +statement is very short, basically when it fits in one line like in: +\begin{lstlisting} + have H : forall x y, x + y = y + x by move=> x y; rewrite addnC. +\end{lstlisting} + +The possibility of using \iitem{}s supplies a very concise +syntax for the further use of the intermediate step. For instance, +\begin{lstlisting} + have -> : forall x, x * a = a. +\end{lstlisting} +on a goal \ssrC{G}, opens a new subgoal asking for a proof of +\ssrC{forall x, x * a = a}, and a second subgoal in which the lemma + \ssrC{forall x, x * a = a} has been rewritten in the goal \ssrC{G}. Note + that in this last subgoal, the intermediate result does not appear in + the context. +Note that, thanks to the deferred execution of clears, the following +idiom is supported (assuming \ssrC{x} occurs in the goal only): +\begin{lstlisting} + have {x} -> : x = y +\end{lstlisting} + +An other frequent use of the intro patterns combined with \ssrC{have} is the +destruction of existential assumptions like in the tactic: +\begin{lstlisting} + have [x Px]: exists x : nat, x > 0. +\end{lstlisting} +which opens a new subgoal asking for a proof of \ssrC{exists x : nat, x > + 0} and a second subgoal in which the witness is introduced under +the name \ssrC{x : nat}, and its property under the name \ssrC{Px : x > 0}. + +An alternative use of the \ssrC{have} tactic is to provide the explicit proof +term for the intermediate lemma, using tactics of the form: + +\begin{center} + \ssrC{have} \optional{\ssrN{ident}} \ssrC{:=} {\term}. +\end{center} + +This tactic creates a new assumption of type the type of +{\term}. If the +optional \ssrN{ident} is present, this assumption is introduced under +the name \ssrN{ident}. Note that the body of the constant is lost for +the user. + +Again, non inferred implicit arguments and explicit holes are abstracted. For +instance, the tactic: +\begin{lstlisting} + have H := forall x, (x, x) = (x, x). +\end{lstlisting} +adds to the context \ssrC{H : Type -> Prop}. This is a schematic example but +the feature is specially useful when the proof term to give involves +for instance a lemma with some hidden implicit arguments. + +After the \ssrN{i-pattern}, a list of binders is allowed. +For example, if \ssrC{Pos_to_P} is a lemma that proves that +\ssrC{P} holds for any positive, the following command: +\begin{lstlisting} + have H x (y : nat) : 2 * x + y = x + x + y by auto. +\end{lstlisting} +will put in the context \ssrC{H : forall x, 2 * x = x + x}. A proof term +provided after \ssrC{:=} can mention these bound variables (that are +automatically introduced with the given names). +Since the \ssrN{i-pattern} can be omitted, to avoid ambiguity, bound variables +can be surrounded with parentheses even if no type is specified: +\begin{lstlisting} + have (x) : 2 * x = x + x by auto. +\end{lstlisting} + +The \ssrN{i-item}s and \ssrN{s-item} can be used to interpret the +asserted hypothesis with views (see section~\ref{sec:views}) or +simplify the resulting goals. + +The \ssrC{have} tactic also supports a \ssrC{suff} modifier which allows for +asserting that a given statement implies the current goal without +copying the goal itself. For example, given a goal \ssrC{G} the tactic +\ssrC{have suff H : P} results in the following two goals: +\begin{lstlisting} + |- P -> G + H : P -> G |- G +\end{lstlisting} +Note that \ssrC{H} is introduced in the second goal. The \ssrC{suff} +modifier is not compatible with the presence of a list of binders. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Generating \ssrC{let in} context entries with \ssrC{have}} +\label{sec:havetransparent} + +Since \ssr{} 1.5 the \ssrC{have} tactic supports a ``transparent'' modifier to +generate \ssrC{let in} context entries: the \ssrC{@} symbol in front of the context +entry name. For example: + +\begin{lstlisting} +have @i : 'I_n by apply: (Sub m); auto. +\end{lstlisting} +generates the following two context entry: +\begin{lstlisting} +i := Sub m proof_produced_by_auto : 'I_n +\end{lstlisting} + +Note that the sub-term produced by \ssrC{auto} is in general huge and +uninteresting, and hence one may want to hide it. + +For this purpose the \ssrC{[: name ]} intro pattern and the tactic +\ssrC{abstract} (see page~\pageref{ssec:abstract}) are provided. +Example: +\begin{lstlisting} +have [:blurb] @i : 'I_n by apply: (Sub m); abstract: blurb; auto. +\end{lstlisting} +generates the following two context entries: +\begin{lstlisting} +blurb : (m < n) (*1*) +i := Sub m blurb : 'I_n +\end{lstlisting} +The type of \ssrC{blurb} can be cleaned up by its annotations by just simplifying +it. The annotations are there for technical reasons only. + +When intro patterns for abstract constants are used in conjunction +with \ssrC{have} and an explicit term, they must be used as follows: + +\begin{lstlisting} +have [:blurb] @i : 'I_n := Sub m blurb. + by auto. +\end{lstlisting} + +In this case the abstract constant \ssrC{blurb} is assigned by using it +in the term that follows \ssrC{:=} and its corresponding goal is left to +be solved. Goals corresponding to intro patterns for abstract constants +are opened in the order in which the abstract constants are declared (not +in the ``order'' in which they are used in the term). + +Note that abstract constants do respect scopes. Hence, if a variable +is declared after their introduction, it has to be properly generalized (i.e. +explicitly passed to the abstract constant when one makes use of it). +For example any of the following two lines: +\begin{lstlisting} +have [:blurb] @i k : 'I_(n+k) by apply: (Sub m); abstract: blurb k; auto. +have [:blurb] @i k : 'I_(n+k) := apply: Sub m (blurb k); first by auto. +\end{lstlisting} +generates the following context: +\begin{lstlisting} +blurb : (forall k, m < n+k) (*1*) +i := fun k => Sub m (blurb k) : forall k, 'I_(n+k) +\end{lstlisting} + +Last, notice that the use of intro patterns for abstract constants is +orthogonal to the transparent flag \ssrC{@} for \ssrC{have}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{The \ssrC{have} tactic and type classes resolution} +\label{ssec:havetcresolution} + +Since \ssr{} 1.5 the \ssrC{have} tactic behaves as follows with respect to type +classes inference. + +\begin{itemize} +\item \ssrC{have foo : ty.} + Full inference for \ssrC{ty}. + The first subgoal demands a proof of such instantiated statement. +\item \ssrC{have foo : ty := .} + No inference for \ssrC{ty}. Unresolved instances are quantified in + \ssrC{ty}. The first subgoal demands a proof of such quantified + statement. Note that no proof term follows \ssrC{:=}, hence two + subgoals are generated. +\item \ssrC{have foo : ty := t.} + No inference for \ssrC{ty} and \ssrC{t}. +\item \ssrC{have foo := t.} + No inference for \ssrC{t}. Unresolved instances are quantified in the + (inferred) type of \ssrC{t} and abstracted in \ssrC{t}. +\end{itemize} + +The behavior of \ssr{} 1.4 and below (never resolve type classes) +can be restored with the option \ssrC{Set SsrHave NoTCResolution}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Variants: the \ssrC{suff} and \ssrC{wlog} tactics.} +\label{ssec:wlog} +\idx{suff: \dots{}} +\idx{suffices: \dots{}} +\idx{wlog: \dots{} / \dots{}} +\idx{without loss: \dots{} / \dots{}} + +As it is often the case in mathematical textbooks, forward +reasoning may be used in slightly different variants. +One of these variants is to show that the intermediate step $L$ +easily implies the initial goal $G$. By easily we mean here that +the proof of $L \Rightarrow G$ is shorter than the one of $L$ +itself. This kind of reasoning step usually starts with: +``It suffices to show that \dots''. + +This is such a frequent way of reasoning that \ssr{} has a variant of the +\ssrC{have} tactic called \ssrC{suffices} (whose abridged name is +\ssrC{suff}). The \ssrC{have} and \ssrC{suff} tactics are equivalent and +have the same syntax but: +\begin{itemize} +\item the order of the generated subgoals is inversed +\item but the optional clear item is still performed in the + \emph{second} branch. This means that the tactic: +\begin{lstlisting} + suff {H} H : forall x : nat, x >= 0. +\end{lstlisting} +fails if the context of the current goal indeed contains an +assumption named \ssrC{H}. +\end{itemize} +The rationale of this clearing policy is to make possible ``trivial'' +refinements of an assumption, without changing its name in the main +branch of the reasoning. + +The \ssrC{have} modifier can follow the \ssrC{suff} tactic. +For example, given a goal \ssrC{G} the tactic +\ssrC{suff have H : P} results in the following two goals: +\begin{lstlisting} + H : P |- G + |- (P -> G) -> G +\end{lstlisting} +Note that, in contrast with \ssrC{have suff}, the name \ssrC{H} has been introduced +in the first goal. + +Another useful construct is reduction, +showing that a particular case is in fact general enough to prove +a general property. This kind of reasoning step usually starts with: +``Without loss of generality, we can suppose that \dots''. +Formally, this corresponds to the proof of a goal \ssrC{G} by introducing +a cut \ssrN{wlog\_statement} \ssrC{-> G}. Hence the user shall provide a +proof for both \ssrC{(}\ssrN{wlog\_statement} \ssrC{-> G) -> G} and +\ssrN{wlog\_statement} \ssrC{-> G}. However, such cuts are usually rather +painful to perform by hand, because the statement +\ssrN{wlog\_statement} is tedious to write by hand, and somtimes even +to read. + +\ssr{} implements this kind of reasoning step through the \ssrC{without loss} +tactic, whose short name is \ssrC{wlog}. It offers support to describe +the shape of the cut statements, by providing the simplifying +hypothesis and by pointing at the elements of the initial goals which +should be generalized. The general syntax of \ssrC{without loss} is: + +\begin{center} + \ssrC{wlog} \optional{\ssrC{suff}} \optional{\ssrN{clear-switch}} \optional{\ssrN{i-item}} \ssrC{:} \optional{\ssrN[1]{ident} $\dots$ \ssrN[n]{ident}} \ssrC{/} {\term} +\end{center} + +where \ssrN[1]{ident} $\dots$ \ssrN[n]{ident} are identifiers for constants +in the context of the goal. Open syntax is supported for {\term}. + +In its defective form: + +\begin{center} + \ssrC{wlog: /} {\term}. +\end{center} + +on a goal \ssrC{G}, it creates two subgoals: a first one to prove the formula +\ssrC{(}{\term} \ssrC{-> G) -> G} and a second one to prove the formula +{\term} \ssrC{-> G}. + +:browse confirm wa +If the optional list \ssrN[1]{ident} $\dots$ \ssrN[n]{ident} is present on the left +side of \ssrC{/}, these constants are generalized in the premise +\ssrC{(}{\term} \ssrC{-> G)} of the first subgoal. By default the body of +local definitions is erased. This behavior can be inhibited +prefixing the name of the local definition with the \ssrC{@} character. + +In the second subgoal, the tactic: + +\begin{center} + \ssrC{move=>} \ssrN{clear-switch}\ssrC{} \ssrN{i-item}\ssrC{.} +\end{center} + +is performed if at least one of these optional switches is present in +the \ssrC{wlog} tactic. + +The \ssrC{wlog} tactic is specially useful when a symmetry argument +simplifies a proof. Here is an example showing the beginning of the +proof that quotient and reminder of natural number euclidean division +are unique. +\begin{lstlisting} + Lemma quo_rem_unicity: forall d q1 q2 r1 r2, + q1*d + r1 = q2*d + r2 -> r1 < d -> r2 < d -> (q1, r1) = (q2, r2). + move=> d q1 q2 r1 r2. + wlog: q1 q2 r1 r2 / q1 <= q2. + by case (le_gt_dec q1 q2)=> H; last symmetry; eauto with arith. +\end{lstlisting} + +The \ssrC{wlog suff} variant is simpler, since it cuts +\ssrN{wlog\_statement} instead of \ssrN{wlog\_statement} \ssrC{-> G}. It thus +opens the goals \ssrN{wlog\_statement} \ssrC{-> G} and \ssrN{wlog\_statement}. + +In its simplest form +the \ssrC{generally have :...} tactic +is equivalent to \ssrC{wlog suff :...} followed by \ssrC{last first}. +When the \ssrC{have} tactic +is used with the \ssrC{generally} (or \ssrC{gen}) modifier it accepts an +extra identifier followed by a comma before the usual intro pattern. +The identifier will name the new hypothesis in its more general form, +while the intro pattern will be used to process its instance. For example: +\begin{lstlisting} + Lemma simple n (ngt0 : 0 < n ) : P n. + gen have ltnV, /andP[nge0 neq0] : n ngt0 / (0 <= n) && (n != 0). +\end{lstlisting} +The first subgoal will be +\begin{lstlisting} + n : nat + ngt0 : 0 < n + ==================== + (0 <= n) && (n != 0) +\end{lstlisting} +while the second one will be +\begin{lstlisting} + n : nat + ltnV : forall n, 0 < n -> (0 <= n) && (n != 0) + nge0 : 0 <= n + neqn0 : n != 0 + ==================== + P n +\end{lstlisting} + +\paragraph{Advanced generalization}\label{par:advancedgen} +The complete syntax for the items on the left hand side of the \ssrC{/} +separator is the following one: +\begin{center} +\ssrN{clear-switch} {\optsep} \optional{\ssrC{@}} \ssrN{ident} {\optsep} \ssrC{(}\optional{\ssrC{@}}\ssrN{ident} \ssrC{:=} \ssrN{c-pattern}\ssrC{)} +\end{center} +Clear operations are intertwined with generalization operations. This +helps in particular avoiding dependency issues while generalizing some facts. + +\noindent +If an \ssrN{ident} is prefixed with the \ssrC{@} prefix mark, then a +let-in redex is created, which keeps track if its body (if any). The +syntax \ssrC{(}\ssrN{ident}\ssrC{:=}\ssrN{c-pattern}\ssrC{)} allows to +generalize an arbitrary term using a given name. Note that its simplest +form \ssrC{(x := y)} is just a renaming of \ssrC{y} into \ssrC{x}. In +particular, this can be useful in order to simulate the generalization +of a section variable, otherwise not allowed. Indeed renaming does not +require the original variable to be cleared. + + +\noindent +The syntax \ssrC{(@x := y)} generates a let-in abstraction but with the following +caveat: \ssrC{x} will not bind \ssrC{y}, but its body, whenever \ssrC{y} can be +unfolded. This cover the case of both local and global definitions, as +illustrated in the following example: + +\begin{lstlisting} +Section Test. +Variable x : nat. +Definition addx z := z + x. +Lemma test : x <= addx x. +wlog H : (y := x) (@twoy := addx x) / twoy = 2 * y. +\end{lstlisting} +\noindent +The first subgoal is: +\begin{lstlisting} + (forall y : nat, let twoy := y + y in twoy = 2 * y -> y <= twoy) -> + x <= addx x +\end{lstlisting} +\noindent +To avoid unfolding the term captured by the pattern \ssrC{add x} one +can use the pattern \ssrC{id (addx x)}, that would produce the following first +subgoal: +\begin{lstlisting} + (forall y : nat, let twoy := addx y in twoy = 2 * y -> y <= twoy) -> + x <= addx x +\end{lstlisting} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Rewriting}\label{sec:rw} +\idx{rewrite \dots{}} + +The generalized use of reflection implies that most of the +intermediate results handled are properties of effectively computable +functions. The most efficient mean of establishing such results are +computation and simplification of expressions involving such +functions, i.e., rewriting. \ssr{} therefore includes an extended +\ssrC{rewrite} tactic, that unifies and combines most of the rewriting +functionalities. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{An extended \ssrC{rewrite} tactic}\label{ssec:extrw} +The main features of the \ssrC{rewrite} tactic are: +\begin{itemize} +\item It can perform an entire series of such operations in any + subset of the goal and/or context; +\item It allows to perform rewriting, + simplifications, folding/unfolding of definitions, closing of goals; +\item Several rewriting operations can be chained in a single tactic; +\item Control over the occurrence at which rewriting is to be performed is + significantly enhanced. +\end{itemize} + + +The general form of an \ssr{} rewrite tactic is: + +\begin{center} + \ssrC{rewrite} \ssrN{rstep}$^+$\ssrC{.} +\end{center} + +The combination of a rewrite tactic with the \ssrC{in} tactical (see +section \ref{ssec:loc}) performs rewriting in both the context and the +goal. + +A rewrite step \ssrN{rstep} has the general form: + +\begin{center} + \optional{\ssrN{r-prefix}}\ssrN{r-item} +\end{center} + +where: + +\begin{longtable}{rcl} +\ssrN{r-prefix} & ::= & + \optional{\ssrC{-}} \optional{\ssrN{mult}} \optional{\ssrN{occ-switch} {\optsep} \ssrN{clear-switch}} \optional{\ssrC{[}\ssrN{r-pattern}\ssrC{]}}\\ +\ssrN{r-pattern} & ::= & +{\term} {\optsep} \ssrC{in} \optional{\ssrN{ident} \ssrC{in}} {\term} {\optsep} \optional{{\term} \ssrC{in} {\optsep} {\term} \ssrC{as} } \ssrN{ident} \ssrC{in} {\term}\\ +\ssrN{r-item} & ::= & +\optional{\ssrC{/}}{\term} {\optsep} \ssrN{s-item} \\ +\end{longtable} + + +% \begin{eqnarray*} +% \ssrN{r-prefix} & ::= & +% [\ssrC{-}]\ [\ssrN{mult}][\ssrN{occ-switch} | \ssrN{cl-item}][{\term}]\\ +% \ssrN{r-item} & ::= & +% [\ssrC{-}]{\term}\ |\ [\ssrC{-}]\ssrC{[}\ssrN[1]{term}\ssrC{]}\ssrC{/(}\ssrN[2]{term}\ssrC{)} \ |\ +% \ssrN{simpl switch} \ |\ \\ +% && \ssrN{eq-term} \ |\ \ssrC{(} \ssrN[1]{eq-term}\ssrC{,}\dots +% \ssrC{,}\ssrN[n]{eq-term} \ssrC{)} \ |\ \ssrC{(_ :}\ssrN{eq-term} \ssrC{)} +% \end{eqnarray*} + + +An \ssrN{r-prefix} contains annotations to qualify where and how the +rewrite operation should be performed: +\begin{itemize} +\item The optional initial \ssrC{-} indicates the direction of the rewriting + of \ssrN{r-item}: if present the direction is right-to-left and it is + left-to-right otherwise. +\item The multiplier \ssrN{mult} (see section \ref{ssec:iter}) + specifies if and how the rewrite operation should be repeated. +\item A rewrite operation matches the occurrences of a \emph{rewrite + pattern}, and replaces these occurrences by an other term, according + to the given \ssrN{r-item}. + The optional \emph{redex switch} $\ssrC{[}\ssrN{r-pattern}\ssrC{]}$, which + should always be surrounded by brackets, gives explicitly this + rewrite pattern. In its simplest form, it is a regular term. + If no explicit redex switch + is present the rewrite pattern to be matched is inferred from the + \ssrN{r-item}. +\item This optional {\term}, or + the \ssrN{r-item}, may be preceded by an + occurrence switch (see section \ref{ssec:select}) or a clear item + (see section \ref{ssec:discharge}), these two possibilities being + exclusive. An occurrence switch selects the occurrences of the + rewrite pattern which should be affected by the rewrite operation. +\end{itemize} + + +An \ssrN{r-item} can be: + + +\begin{itemize} +\item A \emph{simplification r-item}, represented by a + \ssrN{s-item} (see section \ref{ssec:intro}). +% In some cases, \ssrN{r-prefix}es are not supported. + Simplification operations are + intertwined with the possible other rewrite operations specified by + the list of r-items. +\item A \emph{folding/unfolding r-item}. The tactic: + + \ssrC{rewrite /}{\term} + +unfolds the head constant of \textit{term} in every occurrence of the +first matching of \textit{term} in the goal. In particular, if +\ssrC{my_def} is a (local or global) defined constant, the tactic: +\begin{lstlisting} + rewrite /my_def. +\end{lstlisting} +is analogous to: +\begin{lstlisting} + unfold my_def. +\end{lstlisting} +Conversely: +\begin{lstlisting} + rewrite -/my_def. +\end{lstlisting} +is equivalent to: +\begin{lstlisting} + fold my_def. +\end{lstlisting} +%\emph{Warning} The combination of redex switch with unfold +%\ssrN{r-item} is not yet implemented. + +When an unfold r-item is combined with a redex pattern, a conversion +operation is performed. A tactic of the form: + +\begin{center} + \ssrC{rewrite -[}\ssrN[1]{term}\ssrC{]/}\ssrN[2]{term}\ssrC{.} +\end{center} + +is equivalent to: + + +\begin{center} + \ssrC{change} \ssrN[1]{term} \ssrC{with} \ssrN[2]{term}\ssrC{.} +\end{center} + + +If \ssrN[2]{term} is a single constant and \ssrN[1]{term} head symbol +is not \ssrN[2]{term}, then the head symbol of \ssrN[1]{term} is +repeatedly unfolded until \ssrN[2]{term} appears. + +\begin{lstlisting} + Definition double x := x + x. + Definition ddouble x := double (double x). + Lemma ex1 x : ddouble x = 4 * x. + rewrite [ddouble _]/double. +\end{lstlisting} + +The resulting goal is: + +\begin{lstlisting} + double x + double x = 4 * x +\end{lstlisting} + +\emph{Warning} The \ssr{} terms containing holes are \emph{not} +typed as abstractions in this context. Hence the following script: +\begin{lstlisting} + Definition f := fun x y => x + y. + Goal forall x y, x + y = f y x. + move=> x y. + rewrite -[f y]/(y + _). +\end{lstlisting} +raises the error message +\begin{verbatim} + User error: fold pattern (y + _) does not match redex (f y) +\end{verbatim} +but the script obtained by replacing the last line with: +\begin{lstlisting} + rewrite -[f y x]/(y + _). +\end{lstlisting} +is valid. + + +\item A term, which can be: + \begin{itemize} + \item A term whose type has the form: + $$\ssrC{forall}\ (x_1\ :\ A_1)\dots(x_n\ :\ A_n),\ eq\ term_1\ term_2$$ + where $eq$ is the Leibniz equality or a registered setoid + equality. %In the case of setoid relations, the only supported + %r-prefix is the directional \ssrC{-}. + \item A list of terms $(t_1,\dots,t_n)$, each $t_i$ having a type of the + form: $$\ssrC{forall}\ (x_1\ :\ A_1)\dots(x_n\ :\ A_n),\ eq\ term_1\ term_2$$ where + $eq$ is the Leibniz equality or a registered setoid + equality. The tactic: + + \centerline{\ssrC{rewrite} \ssrN{r-prefix}\ssrC{(}$t_1$\ssrC{,}$\dots$\ssrC{,}$t_n$\ssrC{).}} + + is equivalent to: + + \centerline{\ssrC{do [rewrite} \ssrN{r-prefix} $t_1$ \ssrC{|} $\dots$ \ssrC| rewrite} \ssrN{r-prefix} $t_n$\ssrC{].} + + \item An anonymous rewrite lemma + \ssrC{(_ :} {\term}), where \textit{term} has again the form: + $$\ssrC{forall}\ (x_1\ :\ A_1)\dots(x_n\ :\ A_n),\ eq\ term_1\ term_2$$ + The tactic: + + \centerline{\ssrC{rewrite (_ :} {\term}\ssrC{)}} + + is in fact synonym of: + + \centerline{\ssrC{cutrewrite (}{\term}\ssrC{).}} + + + \end{itemize} + +\end{itemize} + + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Remarks and examples}\label{ssec:rwex} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Rewrite redex selection} +The general strategy of \ssr{} +is to grasp as many redexes as possible and to let the user select the +ones to be rewritten thanks to the improved syntax for the control of +rewriting. + +This may be a source of incompatibilities between the two \ssrC{rewrite} +tactics. + +In a rewrite tactic of the form: + + \ssrC{rewrite} \ssrN{occ-switch}\ssrC{[}\ssrN[1]{term}\ssrC{]}\ssrN[2]{term}. + +\ssrN[1]{term} is the explicit rewrite redex and +\ssrN[2]{term} is the +rewrite rule. This execution of this tactic unfolds as follows: + +\begin{itemize} +\item First \ssrN[1]{term} and \ssrN[2]{term} are $\beta\iota$ normalized. Then + \ssrN[2]{term} is put in head normal form if the Leibniz equality + constructor \ssrC{eq} is not the head symbol. This may involve $\zeta$ + reductions. +\item Then, the matching algorithm (see section \ref{ssec:set}) + determines the first subterm of the goal matching the rewrite pattern. + The rewrite pattern is + given by \ssrN[1]{term}, if an explicit redex pattern switch is provided, or by + the type of \ssrN[2]{term} otherwise. However, matching skips over + matches that would lead to trivial rewrites. All the + occurrences of this subterm in the goal are candidates for rewriting. +\item Then only the occurrences coded by \ssrN{occ-switch} (see again + section \ref{ssec:set}) are finally selected for rewriting. +\item The left hand side of $\ssrN[2]{term}$ is unified with the subterm found + by the matching algorithm, and if this succeeds, all the selected + occurrences in the goal are replaced by the right hand side of + $\ssrN[2]{term}$. +\item Finally the goal is $\beta\iota$ normalized. +\end{itemize} + +In the case $\ssrN[2]{term}$ is a list of terms, the first top-down (in +the goal) left-to-right (in the list) matching rule gets selected. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Chained rewrite steps} + + +The possibility to chain rewrite operations in a single tactic makes +scripts more compact and gathers in a single command line a bunch +of surgical +operations which would be described by a one sentence in a pen and +paper proof. + +Performing rewrite and simplification operations in a single tactic +enhances significantly the concision of scripts. For instance the +tactic: +\begin{lstlisting} + rewrite /my_def {2}[f _]/= my_eq //=. +\end{lstlisting} +unfolds \ssrC{my_def} in the goal, simplifies the second occurrence of the +first subterm matching pattern \ssrC{[f _]}, rewrites \ssrC{my_eq}, +simplifies the whole goal and closes trivial goals. + +Here are some concrete examples of chained rewrite operations, in the +proof of basic results on natural numbers arithmetic: + +\begin{lstlisting} + Lemma |*addnS*| : forall m n, m + n.+1 = (m + n).+1. + Proof. by move=> m n; elim: m. Qed. + + Lemma |*addSnnS*| : forall m n, m.+1 + n = m + n.+1. + Proof. move=> *; rewrite addnS; apply addSn. Qed. + + Lemma |*addnCA*| : forall m n p, m + (n + p) = n + (m + p). + Proof. by move=> m n; elim: m => [|m Hrec] p; rewrite ?addSnnS -?addnS. Qed. + + Lemma |*addnC*| : forall m n, m + n = n + m. + Proof. by move=> m n; rewrite -{1}[n]addn0 addnCA addn0. Qed. +\end{lstlisting} + +Note the use of the \ssrC{?} switch for parallel rewrite operations in +the proof of \ssrC{|*addnCA*|}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Explicit redex switches are matched first} +If an \ssrN{r-prefix} involves a \emph{redex switch}, the first step is to +find a subterm matching this redex pattern, independently from the left hand +side \ssrC{t1} of the equality the user wants to rewrite. + +For instance, if \ssrL-H : forall t u, t + u = u + t- is in the context of a +goal \ssrL-x + y = y + x-, the tactic: +\begin{lstlisting} + rewrite [y + _]H. +\end{lstlisting} +transforms the goal into \ssrL-x + y = x + y-. + +Note that if this first pattern matching is not compatible with the +\emph{r-item}, the rewrite fails, even if the goal contains a correct +redex matching both the redex switch and the left hand side of the +equality. For instance, if \ssrL-H : forall t u, t + u * 0 = t- is +in the context of a goal \ssrL-x + y * 4 + 2 * 0 = x + 2 * 0-, then tactic: +\begin{lstlisting} + rewrite [x + _]H. +\end{lstlisting} +raises the error message: +\begin{verbatim} + User error: rewrite rule H doesn't match redex (x + y * 4) +\end{verbatim} +while the tactic: +\begin{lstlisting} + rewrite (H _ 2). +\end{lstlisting} +transforms the goal into \ssrL-x + y * 4 = x + 2 * 0-. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Occurrence switches and redex switches} +The tactic: +\begin{lstlisting} + rewrite {2}[_ + y + 0](_: forall z, z + 0 = z). +\end{lstlisting} +transforms the goal: +\begin{lstlisting} + x + y + 0 = x + y + y + 0 + 0 + (x + y + 0) +\end{lstlisting} +into: +\begin{lstlisting} + x + y + 0 = x + y + y + 0 + 0 + (x + y) +\end{lstlisting} +and generates a second subgoal: +\begin{lstlisting} + forall z : nat, z + 0 = z +\end{lstlisting} +The second subgoal is generated by the use of an anonymous lemma in +the rewrite tactic. The effect of the tactic on the initial goal is to +rewrite this lemma at the second occurrence of the first matching +\ssrL-x + y + 0- of the explicit rewrite redex \ssrL-_ + y + 0-. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Occurrence selection and repetition} +Occurrence selection has priority over repetition switches. This means +the repetition of a rewrite tactic specified by a multiplier +will perform matching each time an elementary rewrite operation is +performed. Repeated rewrite tactics apply to every subgoal generated +by the previous tactic, including the previous instances of the +repetition. For example: +\begin{lstlisting} + Goal forall x y z : nat, x + 1 = x + y + 1. + move=> x y z. +\end{lstlisting} +creates a goal \ssrC{ x + 1 = x + y + 1}, which is turned into \ssrC{z = z} +by the additional tactic: +\begin{lstlisting} + rewrite 2!(_ : _ + 1 = z). +\end{lstlisting} +In fact, this last tactic generates \emph{three} subgoals, +respectively +\ssrC{ x + y + 1 = z}, \ssrC{ z = z} and \ssrC{x + 1 = z}. Indeed, the second +rewrite operation specified with the \ssrC{2!} multiplier applies to +the two subgoals generated by the first rewrite. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Multi-rule rewriting} +The \ssrC{rewrite} tactic can be provided a \emph{tuple} of rewrite rules, +or more generally a tree of such rules, since this tuple can feature +arbitrary inner parentheses. We call \emph{multirule} such a +generalized rewrite rule. This feature is of special interest when it +is combined with multiplier switches, which makes the \ssrC{rewrite} +tactic iterates the rewrite operations prescribed by the rules on the +current goal. For instance, let us define two triples \ssrC{multi1} and +\ssrC{multi2} as: +\begin{lstlisting} + Variables (a b c : nat). + + Hypothesis eqab : a = b. + + Hypothesis eqac : a = c. +\end{lstlisting} + +Executing the tactic: +\begin{lstlisting} + rewrite (eqab, eqac) +\end{lstlisting} +on the goal: +\begin{lstlisting} + ========= + a = a +\end{lstlisting} +turns it into \ssrC{b = b}, as rule \ssrC{eqab} is the first to apply among +the ones gathered in the tuple passed to the \ssrC{rewrite} +tactic. This multirule \ssrC{(eqab, eqac)} is actually a \Coq{} term and we +can name it with a definition: +\begin{lstlisting} +Definition |*multi1*| := (eqab, eqac). +\end{lstlisting} +In this case, the tactic \ssrC{rewrite multi1} is a synonym for +\ssrC{(eqab, eqac)}. More precisely, a multirule rewrites +the first subterm to which one of the rules applies in a left-to-right +traversal of the goal, with the first rule from the multirule tree in +left-to-right order. Matching is performed according to the algorithm +described in Section~\ref{ssec:set}, but literal matches have +priority. For instance if we add a definition and a new multirule to +our context: + +\begin{lstlisting} + Definition |*d*| := a. + + Hypotheses eqd0 : d = 0. + + Definition |*multi2*| := (eqab, eqd0). +\end{lstlisting} +then executing the tactic: +\begin{lstlisting} + rewrite multi2. +\end{lstlisting} +on the goal: +\begin{lstlisting} + ========= + d = b +\end{lstlisting} +turns it into \ssrC{0 = b}, as rule \ssrC{eqd0} applies without unfolding +the definition of \ssrC{d}. For repeated rewrites the selection process +is repeated anew. For instance, if we define: + +\begin{lstlisting} + Hypothesis eq_adda_b : forall x, x + a = b. + + Hypothesis eq_adda_c : forall x, x + a = c. + + Hypothesis eqb0 : b = 0. + + Definition |*multi3*| := (eq_adda_b, eq_adda_c, eqb0). +\end{lstlisting} +then executing the tactic: +\begin{lstlisting} + rewrite 2!multi3. +\end{lstlisting} +on the goal: +\begin{lstlisting} + ========= + 1 + a = 12 + a +\end{lstlisting} +turns it into \ssrC{0 = 12 + a}: it uses \ssrC{eq_adda_b} then \ssrC{eqb0} on the +left-hand side only. Now executing the tactic \ssrC{rewrite !multi3} +turns the same goal into \ssrC{0 = 0}. + +The grouping of rules inside a multirule does not affect the selection +strategy but can make it easier to include one rule set in another or +to (universally) quantify over the parameters of a subset of rules (as +there is special code that will omit unnecessary quantifiers for rules that +can be syntactically extracted). It is also possible to +reverse the direction of a rule subset, using a special dedicated syntax: +the tactic \ssrC{rewrite (=~ multi1)} is equivalent to +\ssrC{rewrite multi1_rev} with: +\begin{lstlisting} + Hypothesis eqba : b = a. + + Hypothesis eqca : c = a. + + Definition |*multi1_rev*| := (eqba, eqca). +\end{lstlisting} +except that the constants \ssrC{eqba, eqab, mult1_rev} have not been created. + +Rewriting with multirules +is useful to implement simplification or transformation +procedures, to be applied on terms of small to medium size. For +instance the library \ssrL{ssrnat} provides two implementations for +arithmetic operations on natural numbers: an elementary one and a tail +recursive version, less inefficient but also less convenient for +reasoning purposes. The library also provides one lemma per such +operation, stating that both versions return the same values when +applied to the same arguments: + +\begin{lstlisting} + Lemma |*addE*| : add =2 addn. + Lemma |*doubleE*| : double =1 doublen. + Lemma |*add_mulE*| n m s : add_mul n m s = addn (muln n m) s. + Lemma |*mulE*| : mul =2 muln. + Lemma |*mul_expE*| m n p : mul_exp m n p = muln (expn m n) p. + Lemma |*expE*| : exp =2 expn. + Lemma |*oddE*| : odd =1 oddn. +\end{lstlisting} + +The operation on the left hand side of each lemma is the efficient +version, and the corresponding naive implementation is on the right +hand side. In order to reason conveniently on expressions involving +the efficient operations, we gather all these rules in the +definition \ssrC{|*trecE*|}: +\begin{lstlisting} + Definition |*trecE*| := (addE, (doubleE, oddE), (mulE, add_mulE, (expE, mul_expE))). +\end{lstlisting} +The tactic: +\begin{lstlisting} + rewrite !trecE. +\end{lstlisting} +restores the naive versions of each operation in a goal involving the +efficient ones, e.g. for the purpose of a correctness proof. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Wildcards vs abstractions} + The \ssrC{rewrite} tactic supports r-items containing holes. For example + in the tactic $(1)$: +\begin{lstlisting} + rewrite (_ : _ * 0 = 0). +\end{lstlisting} + the term \ssrC{_ * 0 = 0} is interpreted as \ssrC{forall n : nat, n * 0 = 0}. + Anyway this tactic is \emph{not} equivalent to the tactic $(2)$: +\begin{lstlisting} + rewrite (_ : forall x, x * 0 = 0). +\end{lstlisting} + The tactic $(1)$ transforms the goal + \ssrL-(y * 0) + y * (z * 0) = 0- into \ssrC{y * (z * 0) = 0} + and generates a new subgoal to prove the statement \ssrC{y * 0 = 0}, + which is the \emph{instance} of the\\ \ssrC{forall x, x * 0 = 0} + rewrite rule that + has been used to perform the rewriting. On the other hand, tactic + $(2)$ performs the same rewriting on the current goal but generates a + subgoal to prove \ssrC{forall x, x * 0 = 0}. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{When \ssr{} \ssrC{rewrite} fails on standard \Coq{} licit rewrite} +In a few cases, the \ssr{} \ssrC{rewrite} tactic fails +rewriting some redexes which standard \Coq{} successfully rewrites. +There are two main cases: + +\begin{itemize} +\item \ssr{} never accepts to rewrite indeterminate patterns like: +\begin{lstlisting} + Lemma |*foo*| : forall x : unit, x = tt. +\end{lstlisting} +\ssr{} will however accept the $\eta\zeta$ expansion of this rule: +\begin{lstlisting} + Lemma |*fubar*| : forall x : unit, (let u := x in u) = tt. +\end{lstlisting} +\item In standard \Coq{}, suppose that we work in the following context: +\begin{lstlisting} + Variable g : nat -> nat. + Definition |*f*| := g. +\end{lstlisting} +then rewriting \ssrC{H : forall x, f x = 0} in the goal +\ssrC{g 3 + g 3 = g 6} succeeds +and transforms the goal into \ssrC{0 + 0 = g 6}. + +This rewriting is not possible in \ssr{} because there is no +occurrence of the head symbol \ssrC{f} of the rewrite rule in the +goal. Rewriting with \ssrC{H} first requires unfolding the occurrences of +\ssrC{f} where the substitution is to be performed (here there is a single +such occurrence), using tactic \ssrC{rewrite /f} (for a global +replacement of \ssrC{f} by \ssrC{g}) or \ssrC{rewrite $\ \ssrN{pattern}$/f}, for a +finer selection. +\end{itemize} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Existential metavariables and rewriting} +\label{ssec:rewcaveats} +The \ssrC{rewrite} tactic will not instantiate existing existential +metavariables when matching a redex pattern. + +If a rewrite rule generates a goal +with new existential metavariables, these will be generalized as for \ssrC{apply} +(see page~\pageref{sssec:apply}) and corresponding new goals will be generated. +For example, consider the following script: + +\begin{lstlisting} + Lemma |*ex3*| (x : 'I_2) y (le_1 : y < 1) (E : val x = y) : Some x = insub y. + rewrite insubT ?(leq_trans le_1)// => le_2. +\end{lstlisting} + +Since \ssrC{insubT} has the following type: + +\begin{lstlisting} + forall T P (sT : subType P) (x : T) (Px : P x), insub x = Some (Sub x Px) +\end{lstlisting} + +and since the implicit argument corresponding to the \ssrC{Px} abstraction is not +supplied by the user, the resulting goal should be \ssrC{Some x = Some (Sub y +$\;\;?_{Px}$)}. Instead, \ssr{} \ssrC{rewrite} tactic generates the two following +goals: +\begin{lstlisting} + y < 2 + forall Hyp0 : y < 2, Some x = Some (Sub y Hyp0) +\end{lstlisting} +The script closes the former with \ssrC{?(leq_trans le_1)//}, then it introduces +the new generalization naming it \ssrC{le_2}. + +\begin{lstlisting} + x : 'I_2 + y : nat + le_1 : y < 1 + E : val x = y + le_2 : y < 2 + ============================ + Some x = Some (Sub y le_2) +\end{lstlisting} + +As a temporary limitation, this behavior is available only if the rewriting +rule is stated using Leibniz equality (as opposed to setoid relations). +It will be extended to other rewriting relations in the future. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Locking, unlocking} \label{ssec:lock} + +As program proofs tend to generate large goals, it is important to be +able to control the partial evaluation performed by the simplification +operations that are performed by the tactics. These evaluations can +for example come from a \ssrC{/=} simplification switch, or from rewrite steps +which may expand large terms while performing conversion. We definitely +want to avoid repeating large subterms of the goal in +the proof script. We do this by +``clamping down'' selected function symbols in the goal, which +prevents them from +being considered in simplification or rewriting steps. This clamping +is accomplished by using the occurrence switches (see section +\ref{sssec:occselect}) together with ``term tagging'' operations. + +\ssr{} provides two levels of tagging. + +The first one uses auxiliary definitions to introduce a provably equal +copy of any term \ssrC{t}. However this copy is (on purpose) +\emph{not convertible} to \ssrC{t} in the \Coq{} system\footnote{This is + an implementation feature: there is not such obstruction in the + metatheory}. The job is done by the following construction: + +\begin{lstlisting} + Lemma |*master_key*| : unit. Proof. exact tt. Qed. + Definition |*locked*| A := let: tt := master_key in fun x : A => x. + Lemma |*lock*| : forall A x, x = locked x :> A. +\end{lstlisting} +Note that the definition of \ssrC{|*master_key*|} is explicitly opaque. +The equation \ssrC{t = locked t} given by the \ssrC{lock} lemma can be used +for selective rewriting, blocking on the fly the reduction in the +term \ssrC{t}. +For example the script: +\begin{lstlisting} + Require Import List. + Variable A : Type. + + Fixpoint |*my_has*| (p : A -> bool)(l : list A){struct l} : bool:= + match l with + |nil => false + |cons x l => p x || (my_has p l) + end. + + Goal forall a x y l, a x = true -> my_has a ( x :: y :: l) = true. + move=> a x y l Hax. +\end{lstlisting} +where \ssrL{||} denotes the boolean disjunction, results in a goal +\ssrC{my_has a ( x :: y :: l) = true}. The tactic: +\begin{lstlisting} + rewrite {2}[cons]lock /= -lock. +\end{lstlisting} +turns it into \ssrC{a x || my_has a (y :: l) = true}. +Let us now start by reducing the initial goal without blocking reduction. +The script: +\begin{lstlisting} + Goal forall a x y l, a x = true -> my_has a ( x :: y :: l) = true. + move=> a x y l Hax /=. +\end{lstlisting} +creates a goal \ssrC{(a x) || (a y) || (my_has a l) = true}. Now the +tactic: +\begin{lstlisting} + rewrite {1}[orb]lock orbC -lock. +\end{lstlisting} +where \ssrC{orbC} states the commutativity of \ssrC{orb}, changes the +goal into\\ \ssrC{(a x) || (my_has a l) || (a y) = true}: only the +arguments of the second disjunction where permuted. + + +It is sometimes desirable to globally prevent a definition from being +expanded by simplification; this is done by adding \ssrC{locked} in the +definition. + +For instance, the function \ssrC{|*fgraph_of_fun*|} maps a function whose +domain and codomain are finite types to a concrete representation of +its (finite) graph. Whatever implementation of this transformation we +may use, we want it to be hidden to simplifications and tactics, to +avoid the collapse of the graph object: +\begin{lstlisting} + Definition |*fgraph_of_fun*| := + locked + (fun (d1 :finType) (d2 :eqType) (f : d1 -> d2) => Fgraph (size_maps f _)). +\end{lstlisting} + +We provide a special tactic \ssrC{unlock} for unfolding such definitions +while removing ``locks'', e.g., the tactic: + + \ssrC{unlock} \ssrN{occ-switch}\ssrC{fgraph_of_fun}. + +replaces the occurrence(s) of \ssrC{fgraph_of_fun} coded by the \ssrN{occ-switch} +with \ssrC{(Fgraph (size_maps _ _))} in the goal. + +We found that it was usually preferable to prevent the expansion of +some functions by the partial evaluation switch ``/='', unless +this allowed the evaluation of a condition. This is possible thanks to +an other mechanism of term tagging, resting on the following +\emph{Notation}: +\begin{lstlisting} + Notation "'nosimpl' t" := (let: tt := tt in t). +\end{lstlisting} + +The term \ssrC{(nosimpl t)} simplifies to t \emph{except} in a +definition. More precisely, +given: +\begin{lstlisting} + Definition |*foo*| := (nosimpl bar). +\end{lstlisting} +the term \ssrC{foo (or (foo t'))} will \emph{not} be expanded by the +\emph{simpl} tactic unless it is in a forcing context (e.g., in +\ssrC{match foo t' with $\dots$ end}, \ssrC{foo t'} will be reduced if this allows +\ssrC{match} to be reduced). Note that \ssrC{nosimpl bar} is simply notation +for a term that reduces to \ssrC{bar}; hence \ssrC{unfold foo} will replace + \ssrC{foo} by \ssrC{bar}, and \ssrC{fold foo} will replace \ssrC{bar} by + \ssrC{foo}. + +\emph{Warning} The \ssrC{nosimpl} trick only works if no reduction is +apparent in \ssrC{t}; in particular, the declaration: +\begin{lstlisting} + Definition |*foo*| x := nosimpl (bar x). +\end{lstlisting} +will usually not work. Anyway, the common practice is to tag only the +function, and to use the following definition, which blocks the +reduction as expected: +\begin{lstlisting} + Definition |*foo*| x := nosimpl bar x. +\end{lstlisting} + + +A standard example making this technique shine is the case of +arithmetic operations. We define for instance: +\begin{lstlisting} + Definition |*addn*| := nosimpl plus. +\end{lstlisting} +The operation \ssrC{addn} behaves exactly like plus, except that +\ssrC{(addn (S n) m)} will not +simplify spontaneously to \ssrC{(S (addn n m))} (the two terms, however, are +inter-convertible). In addition, the unfolding step: +\begin{lstlisting} +rewrite /addn +\end{lstlisting} +will replace \ssrC{addn} directly with \ssrC{plus}, so the \ssrC{nosimpl} form +is essentially invisible. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Congruence}\label{ssec:congr} + +Because of the way matching interferes with type families parameters, +the tactic: +\begin{lstlisting} + apply: my_congr_property. +\end{lstlisting} +will generally fail to perform congruence simplification, even on +rather simple cases. We therefore provide a +more robust alternative in which the function is supplied: +$$\ssrC{congr}\ [\ssrN{int}]\ {\term}$$ + +This tactic: +\begin{itemize} +\item checks that the goal is a Leibniz equality +\item matches both sides of this equality with ``{\term} applied to + some arguments'', inferring the right number of arguments from the goal + and the type of {\term}. This may + expand some definitions or fixpoints. +\item generates the subgoals corresponding to pairwise equalities of + the arguments present in the goal. +\end{itemize} + +The goal can be a non dependent product \ssrC{P -> Q}. +In that case, the system asserts the equation \ssrC{P = Q}, uses it to solve +the goal, and calls the \ssrC{congr} tactic on the remaining goal +\ssrC{P = Q}. This can be useful for instance to perform a transitivity +step, like in the following situation: +\begin{lstlisting} + x, y, z : nat + =============== + x = y -> x = z +\end{lstlisting} +the tactic \ssrC{congr (_ = _)} turns this goal into: + +\begin{lstlisting} + x, y, z : nat + =============== + y = z +\end{lstlisting} +which can also be obtained starting from: +\begin{lstlisting} + x, y, z : nat + h : x = y + =============== + x = z +\end{lstlisting} +and using the tactic \ssrC{congr (_ = _): h}. + +The optional \ssrN{int} forces the number of arguments for which the +tactic should generate equality proof obligations. + +This tactic supports equalities between applications with dependent +arguments. Yet dependent arguments should have exactly the same +parameters on both sides, and these parameters should appear as first +arguments. + +The following script: +\begin{lstlisting} + Definition f n := match n with 0 => plus | S _ => mult end. + Definition g (n m : nat) := plus. + + Goal forall x y, f 0 x y = g 1 1 x y. + by move=> x y; congr plus. + Qed. +\end{lstlisting} +shows that the \ssrC{congr} tactic matches \ssrC{plus} with \ssrC{f 0} on the +left hand side and \ssrC{g 1 1} on the right hand side, and solves the goal. + +The script: +\begin{lstlisting} + Goal forall n m, m <= n -> S m + (S n - S m) = S n. + move=> n m Hnm; congr S; rewrite -/plus. +\end{lstlisting} +generates the subgoal \ssrC{m + (S n - S m) = n}. The tactic +\ssrC{rewrite -/plus} folds back the expansion of \ssrC{plus} which was +necessary for matching both sides of the equality with an application +of \ssrC{S}. + +Like most \ssr{} arguments, {\term} can contain wildcards. +The script: +\begin{lstlisting} + Goal forall x y, x + (y * (y + x - x)) = x * 1 + (y + 0) * y. + move=> x y; congr ( _ + (_ * _)). +\end{lstlisting} +generates three subgoals, respectively \ssrC{x = x * 1}, \ssrC{y = y + 0} +and \ssrC{ y + x - x = y}. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Contextual patterns} +\label{ssec:rewp} + +The simple form of patterns used so far, ${\term}s$ possibly containing +wild cards, often require an additional \ssrN{occ-switch} to be specified. +While this may work pretty fine for small goals, the use of polymorphic +functions and dependent types may lead to an invisible duplication of functions +arguments. These copies usually end up in types hidden by the implicit +arguments machinery or by user defined notations. In these situations +computing the right occurrence numbers is very tedious because they must be +counted on the goal as printed after setting the \ssrC{Printing All} flag. +Moreover the resulting script is not really informative for the reader, since +it refers to occurrence numbers he cannot easily see. + +Contextual patterns mitigate these issues allowing to specify occurrences +according to the context they occur in. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Syntax} + +The following table summarizes the full syntax of +\ssrN{c-pattern} and the corresponding subterm(s) identified +by the pattern. +In the third column we use s.m.r. for +``the subterms matching the redex'' specified in the second column. + +\begin{center} +%\begin{tabularx}{\textwidth}{>{\arraybackslash}m{0.30\textwidth}|>{\arraybackslash}m{0.21\textwidth}|>{\arraybackslash}m{0.39\textwidth}} +\begin{tabular}{llp{10em}} +\ssrN{c-pattern} & redex & subterms affected \\ +\hline +{\term} & {\term} & all occurrences of {\term}\\ +\hline +$\ssrN{ident}\ \ssrC{in}\ {\term}$ & + subterm of {\term} selected by \ssrN{ident} & + all the subterms identified by \ssrN{ident} in all + the occurrences of {\term} \\ +\hline +$\ssrN[1]{term}\ \ssrC{in}\ \ssrN{ident}\ \ssrC{in}\ \ssrN[2]{term}$ & $\ssrN[1]{term}$ & + in all s.m.r. in all the subterms identified by \ssrN{ident} in all + the occurrences of $\ssrN[2]{term}$ \\ +\hline +$\ssrN[1]{term}\ \ssrC{as}\ \ssrN{ident}\ \ssrC{in}\ \ssrN[2]{term}$ & $\ssrN[1]{term}$ & + in all the subterms identified by \ssrN{ident} in all + the occurrences of $\ssrN[2]{term}[\ssrN[1]{term}/\ssrN{ident}]$\\ +\hline +%\end{tabularx} +\end{tabular} +\end{center} + +The \ssrC{rewrite} tactic supports two more patterns obtained +prefixing the first two with \ssrC{in}. The intended meaning is that the +pattern identifies all subterms of the specified context. The +\ssrC{rewrite} tactic will infer a pattern for the redex looking at the +rule used for rewriting. + +\begin{center} +\begin{tabularx}{\textwidth}{>{\arraybackslash}m{0.30\textwidth}|>{\arraybackslash}m{0.21\textwidth}|>{\arraybackslash}m{0.39\textwidth}} +\ssrN{r-pattern} & redex & subterms affected \\ +\hline +$\ssrC{in}\ {\term}$ & inferred from rule & + in all s.m.r. in all occurrences of {\term}\\ +\hline +$\ssrC{in}\ \ssrN{ident}\ \ssrC{in}\ {\term}$ & inferred from rule & + in all s.m.r. in all the subterms identified by \ssrN{ident} in all + the occurrences of {\term} \\ +\hline +\end{tabularx} +\end{center} + +The first \ssrN{c-pattern} is the simplest form matching any +context but selecting a specific redex and has been described in the +previous sections. We have seen so far that the possibility of +selecting a redex using a term with holes is already a powerful mean of redex +selection. Similarly, any {\term}s provided by the +user in the more complex forms of \ssrN{c-pattern}s presented in the +tables above can contain holes. + +For a quick glance at what can be expressed with the last +\ssrN{r-pattern} consider the goal \ssrC{a = b} and the tactic +\begin{lstlisting} + rewrite [in X in _ = X]rule. +\end{lstlisting} +It rewrites all occurrences of the left hand side of \ssrC{rule} inside +\ssrC{b} only (\ssrC{a}, and the hidden type of the equality, are ignored). +Note that the variant \ssrC{rewrite [X in _ = X]rule} would have +rewritten \ssrC{b} exactly (i.e., it would only work if \ssrC{b} and the +left hand side of \ssrC{rule} can be unified). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Matching contextual patterns} + +The \ssrN{c-pattern}s and \ssrN{r-pattern}s involving +{\term}s with holes are matched +against the goal in order to find a closed instantiation. This +matching proceeds as follows: + +\begin{center} +\begin{tabularx}{\textwidth}{>{\arraybackslash}m{0.30\textwidth}|>{\arraybackslash}m{0.65\textwidth}} +\ssrN{c-pattern} & instantiation order and place for $\ssrN[i]{term}$ and redex\\ +\hline +{\term} & {\term} is matched against the goal, redex is unified with + the instantiation of {\term}\\ +\hline +$\ssrN{ident}\ \ssrC{in}\ {\term}$ & + {\term} is matched against the goal, redex is + unified with the subterm of the + instantiation of {\term} identified by \ssrN{ident}\\ +\hline +$\ssrN[1]{term}\ \ssrC{in}\ \ssrN{ident}\ \ssrC{in}\ \ssrN[2]{term}$ & + $\ssrN[2]{term}$ is matched against the goal, $\ssrN[1]{term}$ is + matched against the subterm of the + instantiation of $\ssrN[1]{term}$ identified by \ssrN{ident}, + redex is unified with the instantiation of $\ssrN[1]{term}$\\ +\hline +$\ssrN[1]{term}\ \ssrC{as}\ \ssrN{ident}\ \ssrC{in}\ \ssrN[2]{term}$ & + $\ssrN[2]{term}[\ssrN[1]{term}/\ssrN{ident}]$ + is matched against the goal, + redex is unified with the instantiation of $\ssrN[1]{term}$\\ +\hline +\end{tabularx} +\end{center} + +In the following patterns, the redex is intended to be inferred from the +rewrite rule. + +\begin{center} +\begin{tabularx}{\textwidth}{>{\arraybackslash}m{0.30\textwidth}|>{\arraybackslash}m{0.65\textwidth}} +\ssrN{r-pattern} & instantiation order and place for $\ssrN[i]{term}$ and redex\\ +\hline +$\ssrC{in}\ \ssrN{ident}\ \ssrC{in}\ {\term}$ & + {\term} is matched against the goal, the redex is + matched against the subterm of the + instantiation of {\term} identified by \ssrN{ident}\\ +\hline +$\ssrC{in}\ {\term}$ & {\term} is matched against the goal, redex is + matched against the instantiation of {\term}\\ +\hline +\end{tabularx} +\end{center} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Examples} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection{Contextual pattern in \ssrC{set} and the \ssrC{:} tactical} + +As already mentioned in section~\ref{ssec:set} the \ssrC{set} tactic +takes as an argument a term in open syntax. This term is interpreted +as the simplest for of \ssrN{c-pattern}. To void confusion in the grammar, +open syntax is supported only for the simplest form of patterns, while + parentheses are required around more complex patterns. + +\begin{lstlisting} +set t := (X in _ = X). +set t := (a + _ in X in _ = X). +\end{lstlisting} + +Given the goal \ssrC{a + b + 1 = b + (a + 1)} the first tactic +captures \ssrC{b + (a + 1)}, while the latter \ssrC{a + 1}. + +Since the user may define an infix notation for \ssrC{in} the former +tactic may result ambiguous. The disambiguation rule implemented is +to prefer patterns over simple terms, but to interpret a pattern with +double parentheses as a simple term. For example +the following tactic would capture any occurrence of the term `\ssrC{a in A}'. + +\begin{lstlisting} +set t := ((a in A)). +\end{lstlisting} + +Contextual pattern can also be used as arguments of the \ssrC{:} tactical. +For example: +\begin{lstlisting} +elim: n (n in _ = n) (refl_equal n). +\end{lstlisting} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection{Contextual patterns in \ssrC{rewrite}} +As a more comprehensive example consider the following goal: +\begin{lstlisting} + (x.+1 + y) + f (x.+1 + y) (z + (x + y).+1) = 0 +\end{lstlisting} +The tactic \ssrC{rewrite [in f _ _]addSn} turns it into: +\begin{lstlisting} + (x.+1 + y) + f (x + y).+1 (z + (x + y).+1) = 0 +\end{lstlisting} +since the simplification rule \ssrC{addSn} is applied only under the \ssrC{f} symbol. +Then we simplify also the first addition and expand \ssrC{0} into \ssrC{0+0}. +\begin{lstlisting} + rewrite addSn -[X in _ = X]addn0. +\end{lstlisting} +obtaining: +\begin{lstlisting} + (x + y).+1 + f (x + y).+1 (z + (x + y).+1) = 0 + 0 +\end{lstlisting} +Note that the right hand side of \ssrC{addn0} is undetermined, but the +rewrite pattern specifies the redex explicitly. The right hand side of +\ssrC{addn0} is unified with the term identified by \ssrC{X}, \ssrC{0} here. + +The following pattern does not specify a redex, since it +identifies an entire region, hence the rewrite rule has to be instantiated +explicitly. Thus the tactic: +\begin{lstlisting} + rewrite -{2}[in X in _ = X](addn0 0). +\end{lstlisting} +changes the goal as follows: +\begin{lstlisting} + (x + y).+1 + f (x + y).+1 (z + (x + y).+1) = 0 + (0 + 0) +\end{lstlisting} +The following tactic is quite tricky: +\begin{lstlisting} + rewrite [_.+1 in X in f _ X](addnC x.+1). +\end{lstlisting} +and the resulting goals is: +\begin{lstlisting} + (x + y).+1 + f (x + y).+1 (z + (y + x.+1)) = 0 + (0 + 0) +\end{lstlisting} +The explicit redex \ssrC{_.+1} is important since its head +constant \ssrC{S} differs from the head constant inferred from +\ssrC{(addnC x.+1)} (that is \ssrC{addn}, denoted \ssrC{+} here). +Moreover, the pattern \ssrC{f _ X} is important to rule out the first occurrence +of \ssrC{(x + y).+1}. Last, only the subterms of \ssrC{f _ X} identified by \ssrC{X} are +rewritten, thus the first argument of \ssrC{f} is skipped too. +Also note the pattern \ssrC{_.+1} is interpreted in the context +identified by \ssrC{X}, thus it gets instantiated to \ssrC{(y + x).+1} and +not \ssrC{(x + y).+1}. + +The last rewrite pattern allows to specify exactly the shape of the term +identified by \ssrC{X}, that is thus unified with the left hand side of the +rewrite rule. +\begin{lstlisting} + rewrite [x.+1 + y as X in f X _]addnC. +\end{lstlisting} +The resulting goal is: +\begin{lstlisting} + (x + y).+1 + f (y + x.+1) (z + (y + x.+1)) = 0 + (0 + 0) +\end{lstlisting} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Patterns for recurrent contexts} + +The user can define shortcuts for recurrent contexts corresponding to the +\ssrN{ident} \ssrC{in} {\term} part. The notation scope identified +with \ssrC{\%pattern} provides a special notation `\ssrC{(X in t)}' the user +must adopt to define context shortcuts. + +The following example is taken from \ssrC{ssreflect.v} where the +\ssrC{LHS} and \ssrC{RHS} shortcuts are defined. + +\begin{lstlisting} +Notation RHS := (X in _ = X)%pattern. +Notation LHS := (X in X = _)%pattern. +\end{lstlisting} + +Shortcuts defined this way can be freely used in place of the +trailing \ssrN{ident} \ssrC{in} {\term} part of any contextual +pattern. +Some examples follow: + +\begin{lstlisting} +set rhs := RHS. +rewrite [in RHS]rule. +case: (a + _ in RHS). +\end{lstlisting} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Views and reflection}\label{sec:views} + +The bookkeeping facilities presented in section \ref{sec:book} are +crafted to ease simultaneous introductions and generalizations of facts and +casing, +naming $\dots$ operations. It also a common practice to make a stack +operation immediately followed by an \emph{interpretation} of the fact +being pushed, +that is, to apply a lemma to this fact before passing it +to a tactic for decomposition, application and so on. + + +% possibly + +% Small scale reflection consists in using a two levels +% approach locally when developing formal proofs. This means that a +% fact, which may be an assumption, or the goal itself, will often be +% \emph{interpreted} before being passed to a tactic +% for decomposition, application and so on. + +\ssr{} provides a convenient, unified syntax to combine these +interpretation operations with the proof stack operations. This +\emph{view mechanism} relies on the combination of the \ssrC{/} view +switch with bookkeeping tactics and tacticals. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Interpreting eliminations} +\idx{elim/\dots{}} + +The view syntax combined with the \ssrC{elim} tactic specifies an +elimination scheme to +be used instead of the default, generated, one. Hence the \ssr{} tactic: +\begin{lstlisting} + elim/V. +\end{lstlisting} +is a synonym for: +\begin{lstlisting} + intro top; elim top using V; clear top. +\end{lstlisting} +where \ssrC{top} is a fresh name and \ssrC{V} any second-order lemma. + +Since an elimination view supports the two bookkeeping tacticals of +discharge and introduction (see section \ref{sec:book}), the \ssr{} tactic: +\begin{lstlisting} + elim/V: x => y. +\end{lstlisting} +is a synonym for: +\begin{lstlisting} + elim x using V; clear x; intro y. +\end{lstlisting} +where \ssrC{x} is a variable in the context, \ssrC{y} a fresh name and \ssrC{V} +any second order lemma; \ssr{} relaxes the syntactic restrictions of +the \Coq{} \ssrC{elim}. The first pattern following \ssrC{:} can be a \ssrC{_} +wildcard if the conclusion of the view \ssrC{V} specifies a pattern for +its last argument (e.g., if \ssrC{V} is a functional induction lemma +generated by the \ssrC{Function} command). + +The elimination view mechanism is compatible with the equation name +generation (see section \ref{ssec:equations}). + +The following script illustrate a toy example of this feature. Let us +define a function adding an element at the end of a list: +\begin{lstlisting} + Require Import List. + + Variable d : Type. + + Fixpoint |*add_last*|(s : list d) (z : d) {struct s} : list d := + match s with + | nil => z :: nil + | cons x s' => cons x (add_last s' z) + end. +\end{lstlisting} + +One can define an alternative, reversed, induction principle on inductively +defined \ssrC{list}s, by proving the following lemma: + +\begin{lstlisting} + Lemma |*last_ind_list*| : forall (P : list d -> Type), + P nil -> + (forall (s : list d) (x : d), P s -> P (add_last s x)) -> forall s : list d, P s. +\end{lstlisting} + +Then the combination of elimination views with equation names result +in a concise syntax for reasoning inductively using the user +defined elimination scheme. The script: +\begin{lstlisting} + Goal forall (x : d)(l : list d), l = l. + move=> x l. + elim/last_ind_list E : l=> [| u v]; last first. +\end{lstlisting} +generates two subgoals: the first one to prove \ssrC{nil = nil} in a +context featuring \ssrC{E : l = nil} and the second to prove +\ssrC{add_last u v = add_last u v}, in a context containing +\ssrC{E : l = add_last u v}. + +User provided eliminators (potentially generated with the +\ssrC{Function} \Coq{}'s command) can be combined with the type family switches +described in section~\ref{ssec:typefam}. Consider an eliminator +\ssrC{foo_ind} of type: + + \ssrC{foo_ind : forall $\dots$, forall x : T, P p$_1$ $\dots$ p$_m$} + +and consider the tactic + + \ssrC{elim/foo_ind: e$_1$ $\dots$ / e$_n$} + +The \ssrC{elim/} tactic distinguishes two cases: +\begin{description} +\item[truncated eliminator] when \ssrC{x} does not occur in \ssrC{P p$_1 \dots$ p$_m$} + and the type of \ssrC{e$_n$} unifies with \ssrC{T} and \ssrC{e$_n$} is not \ssrC{_}. + In that case, \ssrC{e$_n$} is passed to the eliminator as the last argument + (\ssrC{x} in \ssrC{foo_ind}) and \ssrC{e$_{n-1} \dots$ e$_1$} are used as patterns + to select in the goal the occurrences that will be bound by the + predicate \ssrC{P}, thus it must be possible to unify the sub-term of + the goal matched by \ssrC{e$_{n-1}$} with \ssrC{p$_m$}, the one matched by + \ssrC{e$_{n-2}$} with \ssrC{p$_{m-1}$} and so on. +\item[regular eliminator] in all the other cases. Here it must be + possible to unify the term matched by + \ssrC{e$_n$} with \ssrC{p$_m$}, the one matched by + \ssrC{e$_{n-1}$} with \ssrC{p$_{m-1}$} and so on. Note that + standard eliminators have the shape \ssrC{$\dots$forall x, P $\dots$ x}, thus + \ssrC{e$_n$} is the pattern identifying the eliminated term, as expected. +\end{description} +As explained in section~\ref{ssec:typefam}, the initial prefix of +\ssrC{e$_i$} can be omitted. + +Here an example of a regular, but non trivial, eliminator: +\begin{lstlisting} + Function |*plus*| (m n : nat) {struct n} : nat := + match n with 0 => m | S p => S (plus m p) end. +\end{lstlisting} +The type of \ssrC{plus_ind} is +\begin{lstlisting} +plus_ind : forall (m : nat) (P : nat -> nat -> Prop), + (forall n : nat, n = 0 -> P 0 m) -> + (forall n p : nat, n = p.+1 -> P p (plus m p) -> P p.+1 (plus m p).+1) -> + forall n : nat, P n (plus m n) +\end{lstlisting} +Consider the following goal +\begin{lstlisting} + Lemma |*exF*| x y z: plus (plus x y) z = plus x (plus y z). +\end{lstlisting} +The following tactics are all valid and perform the same elimination +on that goal. +\begin{lstlisting} + elim/plus_ind: z / (plus _ z). + elim/plus_ind: {z}(plus _ z). + elim/plus_ind: {z}_. + elim/plus_ind: z / _. +\end{lstlisting} +In the two latter examples, being the user provided pattern a wildcard, the +pattern inferred from the type of the eliminator is used instead. For both +cases it is \ssrC{(plus _ _)} and matches the subterm \ssrC{plus (plus x y)$\;$z} thus +instantiating the latter \ssrC{_} with \ssrC{z}. Note that the tactic +\ssrC{elim/plus_ind: y / _} would have resulted in an error, since \ssrC{y} and \ssrC{z} +do no unify but the type of the eliminator requires the second argument of +\ssrC{P} to be the same as the second argument of \ssrC{plus} in the second +argument of \ssrC{P}. + +Here an example of a truncated eliminator. Consider the goal +\begin{lstlisting} + p : nat_eqType + n : nat + n_gt0 : 0 < n + pr_p : prime p + ================= + p %| \prod_(i <- prime_decomp n | i \in prime_decomp n) i.1 ^ i.2 -> + exists2 x : nat * nat, x \in prime_decomp n & p = x.1 +\end{lstlisting} +and the tactic +\begin{lstlisting} +elim/big_prop: _ => [| u v IHu IHv | [q e] /=]. +\end{lstlisting} +where the type of the eliminator is +\begin{lstlisting} +big_prop: forall (R : Type) (Pb : R -> Type) (idx : R) (op1 : R -> R -> R), + Pb idx -> + (forall x y : R, Pb x -> Pb y -> Pb (op1 x y)) -> + forall (I : Type) (r : seq I) (P : pred I) (F : I -> R), + (forall i : I, P i -> Pb (F i)) -> + Pb (\big[op1/idx]_(i <- r | P i) F i) +\end{lstlisting} +Since the pattern for the argument of \ssrC{Pb} is not specified, the inferred one +is used instead: \ssrC{(\\big[_/_]_(i <- _ | _ i) _ i)}, and after the +introductions, the following goals are generated. +\begin{lstlisting} +subgoal 1 is: + p %| 1 -> exists2 x : nat * nat, x \in prime_decomp n & p = x.1 +subgoal 2 is: + p %| u * v -> exists2 x : nat * nat, x \in prime_decomp n & p = x.1 +subgoal 3 is: + (q, e) \in prime_decomp n -> p %| q ^ e -> + exists2 x : nat * nat, x \in prime_decomp n & p = x.1 +\end{lstlisting} +Note that the pattern matching algorithm instantiated all the variables +occurring in the pattern. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Interpreting assumptions}\label{ssec:assumpinterp} +\idx{move/\dots{}} + +Interpreting an assumption in the context of a proof is applying it a +correspondence lemma before generalizing, and/or decomposing it. +For instance, with the extensive use of boolean reflection (see +section \ref{ssec:boolrefl}), it is +quite frequent to need to decompose the logical interpretation of (the +boolean expression of) a +fact, rather than the fact itself. +This can be achieved by a combination of \ssrC{move : _ => _} +switches, like in the following script, where \ssrC{||} is a notation for +the boolean disjunction: +\begin{lstlisting} + Variables P Q : bool -> Prop. + Hypothesis |*P2Q*| : forall a b, P (a || b) -> Q a. + + Goal forall a, P (a || a) -> True. + move=> a HPa; move: {HPa}(P2Q _ _ HPa) => HQa. +\end{lstlisting} +which transforms the hypothesis \ssrC{HPn : P n} which has been +introduced from the initial statement into \ssrC{HQn : Q n}. +This operation is so common that the tactic shell has +specific syntax for it. +The following scripts: +\begin{lstlisting} + Goal forall a, P (a || a) -> True. + move=> a HPa; move/P2Q: HPa => HQa. +\end{lstlisting} +or more directly: +\begin{lstlisting} + Goal forall a, P (a || a) -> True. + move=> a; move/P2Q=> HQa. +\end{lstlisting} +are equivalent to the former one. The former script shows how to +interpret a fact (already in the context), thanks to the discharge +tactical (see section \ref{ssec:discharge}) and the latter, how to +interpret the top assumption of a goal. Note +that the number of wildcards to be inserted to find the correct +application of the view lemma to the hypothesis has been automatically +inferred. + +The view mechanism is compatible with the \ssrC{case} tactic and with the +equation name generation mechanism (see section \ref{ssec:equations}): +\begin{lstlisting} + Variables P Q: bool -> Prop. + Hypothesis |*Q2P*| : forall a b, Q (a || b) -> P a \/ P b. + + Goal forall a b, Q (a || b) -> True. + move=> a b; case/Q2P=> [HPa | HPb]. +\end{lstlisting} +creates two new subgoals whose contexts no more contain +\ssrC{HQ : Q (a || b)} but respectively \ssrC{HPa : P a} and +\ssrC{HPb : P b}. This view tactic +performs: +\begin{lstlisting} + move=> a b HQ; case: {HQ}(Q2P _ _ HQ) => [HPa | HPb]. +\end{lstlisting} + +The term on the right of the \ssrC{/} view switch is called a \emph{view + lemma}. Any \ssr{} term coercing to a product type can be used as a +view lemma. + + +The examples we have given so far explicitly provide the direction of the +translation to be performed. In fact, view lemmas need not to be +oriented. The view mechanism is able to detect which +application is relevant for the current goal. For instance, the +script: +\begin{lstlisting} + Variables P Q: bool -> Prop. + Hypothesis |*PQequiv*| : forall a b, P (a || b) <-> Q a. + + Goal forall a b, P (a || b) -> True. + move=> a b; move/PQequiv=> HQab. +\end{lstlisting} +has the same behavior as the first example above. + +The view mechanism can insert automatically a \emph{view hint} to +transform the double implication into the expected simple implication. +The last script is in fact equivalent to: +\begin{lstlisting} + Goal forall a b, P (a || b) -> True. + move=> a b; move/(iffLR (PQequiv _ _)). +\end{lstlisting} +where: +\begin{lstlisting} + Lemma |*iffLR*| : forall P Q, (P <-> Q) -> P -> Q. +\end{lstlisting} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Specializing assumptions} +\idx{move/\dots{}} + +The special case when the \emph{head symbol} of the view lemma is a +wildcard is used to interpret an assumption by \emph{specializing} +it. The view mechanism hence offers the possibility to +apply a higher-order assumption to some given arguments. + +For example, the script: +\begin{lstlisting} + Goal forall z, (forall x y, x + y = z -> z = x) -> z = 0. + move=> z; move/(_ 0 z). +\end{lstlisting} +changes the goal into: +\begin{lstlisting} + (0 + z = z -> z = 0) -> z = 0 +\end{lstlisting} + + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Interpreting goals}\label{ssec:goalinterp} + +In a similar way, it is also often convenient to interpret a goal by changing +it into an equivalent proposition. The view mechanism of \ssr{} has a +special syntax \ssrC{apply/} for combining simultaneous goal +interpretation operations and +bookkeeping steps in a single tactic. + +With the hypotheses of section \ref{ssec:assumpinterp}, the following +script, where \ssrL+~~+ denotes the boolean negation: +\begin{lstlisting} + Goal forall a, P ((~~ a) || a). + move=> a; apply/PQequiv. +\end{lstlisting} +transforms the goal into \ssrC{Q (~~ a)}, and is equivalent to: +\begin{lstlisting} + Goal forall a, P ((~~ a) || a). + move=> a; apply: (iffRL (PQequiv _ _)). +\end{lstlisting} +where \ssrC{iffLR} is the analogous of \ssrC{iffRL} for the converse +implication. + +Any \ssr{} term whose type coerces to a double implication can be used +as a view for goal interpretation. + +Note that the goal interpretation view mechanism supports both +\ssrC{apply} and \ssrC{exact} tactics. As expected, a goal interpretation +view command \ssrC{exact/$term$} should solve the current goal or it will +fail. + + +\emph{Warning} Goal interpretation view tactics are \emph{not} compatible +with the bookkeeping tactical \ssrC{=>} since this would be redundant with +the \ssrC{apply:} {\term} \ssrC{=> _} construction. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Boolean reflection}\label{ssec:boolrefl} +In the Calculus of Inductive Construction, there is +an obvious distinction between logical propositions and boolean values. +On the one hand, logical propositions are objects +of \emph{sort} \ssrC{Prop} which is the carrier of intuitionistic +reasoning. Logical connectives in \ssrC{Prop} are \emph{types}, which give precise +information on the structure of their proofs; this information is +automatically exploited by \Coq{} tactics. For example, \Coq{} knows that a +proof of \ssrL+A \/ B+ is either a proof of \ssrC{A} or a proof of \ssrC{B}. +The tactics \ssrC{left} and \ssrC{right} change the goal \ssrL+A \/ B+ +to \ssrC{A} and \ssrC{B}, respectively; dualy, the tactic \ssrC{case} reduces the goal +\ssrL+A \/ B => G+ to two subgoals \ssrC{A => G} and \ssrC{B => G}. + +On the other hand, \ssrC{bool} is an inductive \emph{datatype} +with two constructors \ssrC{true} and \ssrC{false}. +Logical connectives on \ssrC{bool} are \emph{computable functions}, defined by +their truth tables, using case analysis: +\begin{lstlisting} + Definition (b1 || b2) := if b1 then true else b2. +\end{lstlisting} +Properties of such connectives are also established using case +analysis: the tactic \ssrC{by case: b} solves the goal +\begin{lstlisting} + b || ~~ b = true +\end{lstlisting} +by replacing \ssrC{b} first by \ssrC{true} and then by \ssrC{false}; in either case, +the resulting subgoal reduces by computation to the trivial +\ssrC{true = true}. + +Thus, \ssrC{Prop} and \ssrC{bool} are truly complementary: the former +supports robust natural deduction, the latter allows brute-force +evaluation. +\ssr{} supplies +a generic mechanism to have the best of the two worlds and move freely +from a propositional version of a +decidable predicate to its boolean version. + +First, booleans are injected into propositions +using the coercion mechanism: +\begin{lstlisting} + Coercion |*is_true*| (b : bool) := b = true. +\end{lstlisting} +This allows any boolean formula~\ssrC{b} to be used in a context +where \Coq{} would expect a proposition, e.g., after \ssrC{Lemma $\dots$ : }. +It is then interpreted as \ssrC{(is_true b)}, i.e., +the proposition \ssrC{b = true}. Coercions are elided by the pretty-printer, +so they are essentially transparent to the user. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{The \ssrC{reflect} predicate}\label{ssec:reflpred} + +To get all the benefits of the boolean reflection, it is in fact +convenient to introduce the following inductive predicate +\ssrC{reflect} to relate propositions and booleans: + +\begin{lstlisting} + Inductive |*reflect*| (P: Prop): bool -> Type := + | Reflect_true: P => reflect P true + | Reflect_false: ~P => reflect P false. +\end{lstlisting} + +The statement \ssrC{(reflect P b)} asserts that \ssrC{(is_true b)} +and \ssrC{P} are logically equivalent propositions. + +For instance, the following lemma: +\begin{lstlisting} + Lemma |*andP*|: forall b1 b2, reflect (b1 /\ b2) (b1 && b2). +\end{lstlisting} +relates the boolean conjunction \ssrC{&&} to +the logical one \ssrL+/\+. +Note that in \ssrC{andP}, \ssrC{b1} and \ssrC{b2} are two boolean variables and +the proposition \ssrL+b1 /\ b2+ hides two coercions. +The conjunction of \ssrC{b1} and \ssrC{b2} can then be viewed +as \ssrL+b1 /\ b2+ or as \ssrC{b1 && b2}. + + +Expressing logical equivalences through this family of inductive types +makes possible to take benefit from \emph{rewritable equations} +associated to the case analysis of \Coq{}'s inductive types. + +Since the equivalence predicate is defined in \Coq{} as: +\begin{lstlisting} + Definition |*iff*| (A B:Prop) := (A -> B) /\ (B -> A). +\end{lstlisting} +where \ssrC{/\\} is a notation for \ssrC{and}: +\begin{lstlisting} + Inductive |*and*| (A B:Prop) : Prop := + conj : A -> B -> and A B +\end{lstlisting} + +This make case analysis very different according to the way an +equivalence property has been defined. + + +For instance, if we have proved the lemma: +\begin{lstlisting} + Lemma |*andE*|: forall b1 b2, (b1 /\ b2) <-> (b1 && b2). +\end{lstlisting} +let us compare the respective behaviours of \ssrC{andE} and \ssrC{andP} on a +goal: +\begin{lstlisting} + Goal forall b1 b2, if (b1 && b2) then b1 else ~~(b1||b2). +\end{lstlisting} + +The command: +\begin{lstlisting} + move=> b1 b2; case (@andE b1 b2). +\end{lstlisting} +generates a single subgoal: +\begin{lstlisting} + (b1 && b2 -> b1 /\ b2) -> (b1 /\ b2 -> b1 && b2) -> + if b1 && b2 then b1 else ~~ (b1 || b2) +\end{lstlisting} + +while the command: +\begin{lstlisting} + move=> b1 b2; case (@andP b1 b2). +\end{lstlisting} +generates two subgoals, respectively \ssrL+b1 /\ b2 -> b1+ and +\ssrL+~ (b1 /\ b2) -> ~~ (b1 || b2)+. + + + +Expressing reflection relation through the \ssrC{reflect} predicate +is hence a very convenient way to deal with classical reasoning, by +case analysis. Using the \ssrC{reflect} predicate allows moreover to +program rich specifications inside +its two constructors, which will be automatically taken into account +during destruction. This formalisation style gives far more +efficient specifications than quantified (double) implications. + + +A naming convention in \ssr{} is to postfix the name of view lemmas with \ssrC{P}. +For example, \ssrC{orP} relates \ssrC{||} and \ssrL+\/+, \ssrC{negP} relates +\ssrL+~~+ and \ssrL+~+. + +The view mechanism is compatible with \ssrC{reflect} predicates. + +For example, the script +\begin{lstlisting} + Goal forall a b : bool, a -> b -> a /\\ b. + move=> a b Ha Hb; apply/andP. +\end{lstlisting} +changes the goal \ssrL+a /\ b+ to \ssrC{a && b} (see section \ref{ssec:goalinterp}). + +Conversely, the script +\begin{lstlisting} + Goal forall a b : bool, a /\ b -> a. + move=> a b; move/andP. +\end{lstlisting} +changes the goal \ssrL+a /\ b -> a+ into \ssrC{a && b -> a} (see section +\ref{ssec:assumpinterp}). + + +The same tactics can also be used to perform the converse +operation, changing a boolean conjunction into a logical one. The view +mechanism guesses the direction of the +transformation to be used i.e., the constructor of the \ssrC{reflect} +predicate which should be chosen. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{General mechanism for interpreting goals and assumptions} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Specializing assumptions} +\idx{move/\dots{}} + +The \ssr{} +tactic: + + \ssrC{move/(_} \ssrN[1]{term} $\dots$ \ssrN[n]{term}\ssrC{)} + +\noindent +is equivalent to the tactic: + + \ssrC{intro top; generalize (top} \ssrN[1]{term} $\dots$ \ssrN[n]{term}\ssrC{); clear top.} + +\noindent +where \ssrC{top} is a fresh name for introducing the top assumption of +the current goal. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Interpreting assumptions} +\label{sssec:hypview} +The general form of an assumption view tactic is: + +\begin{center} + \optional{\ssrC{move} {\optsep} \ssrC{case}} \ssrC{/} \ssrN[0]{term} +\end{center} + +The term \ssrN[0]{term}, called the \emph{view lemma} can be: +\begin{itemize} +\item a (term coercible to a) function; +\item a (possibly quantified) implication; +\item a (possibly quantified) double implication; +\item a (possibly quantified) instance of the \ssrC{reflect} predicate + (see section \ref{ssec:reflpred}). +\end{itemize} + +Let \ssrC{top} be the top assumption in the goal. + +There are three steps in the behaviour of an assumption view tactic: +\begin{itemize} +\item It first introduces \ssrL+top+. +\item If the type of \ssrN[0]{term} is neither a double implication nor + an instance of the \ssrC{reflect} predicate, then the tactic + automatically generalises a term of the form: + +\begin{center} + \ssrC{(}\ssrN[0]{term} \ssrN[1]{term} $\dots$ \ssrN[n]{term}\ssrC{)} +\end{center} + + where the terms \ssrN[1]{term} $\dots$ \ssrN[n]{term} instantiate the + possible quantified variables of \ssrN[0]{term}, in order for + \ssrC{(}\ssrN[0]{term} \ssrN[1]{term} $\dots$ \ssrN[n]{term} \ssrC{top)} to be well typed. +\item If the type of $\ssrN[0]{term}$ is an equivalence, or + an instance of the \ssrC{reflect} predicate, it generalises a term of + the form: + \begin{center} + (\ssrN[vh]{term} (\ssrN[0]{term} \ssrN[1]{term} $\dots$ \ssrN[n]{term})) + \end{center} + where the term \ssrN[vh]{term} inserted is called an + \emph{assumption interpretation view hint}. +\item It finally clears \ssrC{top}. +\end{itemize} +For a \ssrC{case/}\ssrN[0]{term} tactic, the generalisation step is +replaced by a case analysis step. + +\emph{View hints} are declared by the user (see section +\ref{ssec:vhints}) and are stored in the \ssrC{Hint View} database. +The proof engine automatically +detects from the shape of the top assumption \ssrC{top} and of the view +lemma $\ssrN[0]{term}$ provided to the tactic the appropriate view hint in +the database to be inserted. + +If $\ssrN[0]{term}$ is a double implication, then the view hint \ssrC{A} will +be one of the defined view hints for implication. These hints are by +default the ones present in the file {\tt ssreflect.v}: +\begin{lstlisting} + Lemma |*iffLR*| : forall P Q, (P <-> Q) -> P -> Q. +\end{lstlisting} +which transforms a double implication into the left-to-right one, or: +\begin{lstlisting} + Lemma |*iffRL*| : forall P Q, (P <-> Q) -> Q -> P. +\end{lstlisting} +which produces the converse implication. In both cases, the two first +\ssrC{Prop} arguments are implicit. + +If $\ssrN[0]{term}$ is an instance of the \ssrC{reflect} predicate, then \ssrC{A} +will be one of the defined view hints for the \ssrC{reflect} +predicate, which are by +default the ones present in the file {\tt ssrbool.v}. +These hints are not only used for choosing the appropriate direction of +the translation, but they also allow complex transformation, involving +negations. + For instance the hint: +\begin{lstlisting} + Lemma |*introN*| : forall (P : Prop) (b : bool), reflect P b -> ~ P -> ~~ b. +\end{lstlisting} +makes the following script: +\begin{lstlisting} + Goal forall a b : bool, a -> b -> ~~ (a && b). + move=> a b Ha Hb. apply/andP. +\end{lstlisting} +transforms the goal into \ssrC{ \~ (a /\ b)}. +In fact\footnote{The current state of the proof shall be displayed by + the \ssrC{Show Proof} command of \Coq{} proof mode.} +this last script does not exactly use the hint \ssrC{introN}, but the +more general hint: +\begin{lstlisting} + Lemma |*introNTF*| : forall (P : Prop) (b c : bool), + reflect P b -> (if c then ~ P else P) -> ~~ b = c +\end{lstlisting} +The lemma \ssrL+|*introN*|+ is an instantiation of \ssrC{introNF} using + \ssrC{c := true}. + +Note that views, being part of \ssrN{i-pattern}, can be used to interpret +assertions too. For example the following script asserts \ssrC{a \&\& b} +but actually used its propositional interpretation. +\begin{lstlisting} + Lemma |*test*| (a b : bool) (pab : b && a) : b. + have /andP [pa ->] : (a && b) by rewrite andbC. +\end{lstlisting} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsubsection*{Interpreting goals} +\idx{apply/\dots{}} + +A goal interpretation view tactic of the form: + +\begin{center} + \ssrC{apply/} \ssrN[0]{term} +\end{center} +applied to a goal \ssrC{top} is interpreted in the following way: +\begin{itemize} +\item If the type of $\ssrN[0]{term}$ is not an instance of the + \ssrC{reflect} predicate, nor an equivalence, + then the term $\ssrN[0]{term}$ is applied to the current goal \ssrC{top}, + possibly inserting implicit arguments. +\item If the type of $\ssrN[0]{term}$ is an instance of the \ssrC{reflect} + predicate or an equivalence, then +a \emph{goal interpretation view hint} can possibly be inserted, which +corresponds to the application of a term +\ssrC{($\ssrN[vh]{term}$ ($\ssrN[0]{term}$ _ $\dots$ _))} to the current +goal, possibly inserting implicit arguments. +\end{itemize} + +Like assumption interpretation view hints, goal interpretation ones +are user defined lemmas stored (see section \ref{ssec:vhints}) in the +\ssrC{Hint View} database bridging +the possible gap between the type of $\ssrN[0]{term}$ and the type of the +goal. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Interpreting equivalences} +\idx{apply/\dots{}/\dots{}} + +Equivalent boolean propositions are simply \emph{equal} boolean terms. +A special construction helps the user to prove boolean equalities by +considering them as logical double implications (between their coerced +versions), while +performing at the same time logical operations on both sides. + +The syntax of double views is: +\begin{center} + \ssrC{apply/} \ssrN[l]{term} \ssrC{/} \ssrN[r]{term} +\end{center} + +The term \ssrN[l]{term} is the view lemma applied to the left hand side of the +equality, \ssrN[r]{term} is the one applied to the right hand side. + +In this context, the identity view: +\begin{lstlisting} +Lemma |*idP*| : reflect b1 b1. +\end{lstlisting} +is useful, for example the tactic: +\begin{lstlisting} + apply/idP/idP. +\end{lstlisting} +transforms the goal +\ssrL+~~ (b1 || b2)= b3+ + into two subgoals, respectively + \ssrL+~~ (b1 || b2) -> b3+ and \\ +\ssrL+b3 -> ~~ (b1 || b2).+ + +The same goal can be decomposed in several ways, and the user may +choose the most convenient interpretation. For instance, the tactic: +\begin{lstlisting} + apply/norP/idP. +\end{lstlisting} +applied on the same goal \ssrL+~~ (b1 || b2) = b3+ generates the subgoals +\ssrL+~~ b1 /\ ~~ b2 -> b3+ and\\ +\ssrL+b3 -> ~~ b1 /\ ~~ b2+. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Declaring new \ssrC{Hint View}s}\label{ssec:vhints} +\idxC{Hint View} + +The database of hints for the view mechanism is extensible via a +dedicated vernacular command. As library {\tt ssrbool.v} already +declares a corpus of hints, this feature is probably useful only for +users who define their own logical connectives. Users can declare +their own hints following the syntax used in {\tt ssrbool.v}: + +\begin{center} + \ssrC{Hint View for} {\tac} \ssrC{/} \ssrN{ident} \optional{\ssrC{|}{\naturalnumber}} +\end{center} + + where {\tac}$\in \{$\ssrC{move, apply}$\}$, \ssrN{ident} is the +name of the lemma to be declared as a hint, and ${\naturalnumber}$ a natural +number. If \ssrL+move+ is used as {\tac}, the hint is declared for +assumption interpretation tactics, \ssrL+apply+ declares hints for goal +interpretations. +Goal interpretation view hints are declared for both simple views and +left hand side views. The optional natural number ${\naturalnumber}$ is the +number of implicit arguments to be considered for the declared hint +view lemma \ssrC{name_of_the_lemma}. + +The command: + +\begin{center} + \ssrC{Hint View for apply//} \ssrN{ident}\optional{\ssrC{|}{\naturalnumber}}. +\end{center} + +with a double slash \ssrL+//+, declares hint views for right hand sides of +double views. + + +\noindent See the files {\tt ssreflect.v} and {\tt ssrbool.v} for examples. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection{Multiple views}\label{ssec:multiview} + +The hypotheses and the goal can be interpreted applying multiple views in +sequence. Both \ssrC{move} and \ssrC{apply} can be followed by an arbitrary number +of \ssrC{/}$\ssrN[i]{term}$. The main difference between the following two tactics +\begin{lstlisting} + apply/v1/v2/v3. + apply/v1; apply/v2; apply/v3. +\end{lstlisting} +is that the former applies all the views to the principal goal. +Applying a view with hypotheses generates new goals, and the second line +would apply the view \ssrC{v2} to all the goals generated by \ssrC{apply/v1}. +Note that the NO-OP intro pattern \ssrC{-} can be used to separate two +views, making the two following examples equivalent: +\begin{lstlisting} + move=> /v1; move=> /v2. + move=> /v1-/v2. +\end{lstlisting} + +The tactic \ssrC{move} can be used together with the \ssrC{in} +tactical to pass a given hypothesis to a lemma. For example, if +\ssrC{P2Q : P -> Q } and \ssrC{Q2R : Q -> R}, the following +tactic turns the hypothesis \ssrC{p : P} into \ssrC{P : R}. +\begin{lstlisting} + move/P2Q/Q2R in p. +\end{lstlisting} + +If the list of views is of length two, \ssrC{Hint View}s for interpreting +equivalences are indeed taken into account, otherwise only single +\ssrC{Hint View}s are used. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{\ssr{} searching tool} +\idxC{Search \dots{}} + +\ssr{} proposes an extension of the \ssrC{Search} command. Its syntax is: + +\begin{center} + \ssrC{Search} \optional{\ssrN{pattern}} \optional{\optional{\ssrC{\-}} \optional{\ssrN{string}\optional{\ssrC{\%}\ssrN{key}} {\optsep} \ssrN{pattern}}}$^*$ \optional{\ssrC{in} \optional{\optional{\ssrC{\-}} \ssrN{name} }$^+$} +\end{center} + +% \begin{lstlisting} +% Search [[\~]\ssrN{string}]$^*$ [\ssrN{pattern}] [[$\ssrN[1]{pattern} \dots $ $\ssrN[n]{pattern}$]] $[[$inside$|$outside$]$ $M_1 \dots M_n$]. +% \end{lstlisting} + +% This tactic returns the list of defined constants matching the +% given criteria: +% \begin{itemize} +% \item \ssrL+[[-]\ssrN{string}]$^*$+ is an open sequence of strings, which sould +% all appear in the name of the returned constants. The optional \ssrL+-+ +% prefixes strings that are required \emph{not} to appear. +% % \item \ssrN{pattern} should be a subterm of the +% % \emph{conclusion} of the lemmas found by the command. If a lemma features +% % an occurrence +% % of this pattern only in one or several of its assumptions, it will not be +% % selected by the searching tool. +% \item +% \ssrL=[$\ssrN{pattern}^+$]= +% is a list of \ssr{} terms, which may +% include types, that are required to appear in the returned constants. +% Terms with holes should be surrounded by parentheses. +% \item $\ssrC{in}\ [[\ssrC{\-}]M]^+$ limits the search to the signature +% of open modules given in the list, but the ones preceeded by the +% $\ssrC{\-}$ flag. The +% command: +% \begin{lstlisting} +% Search in M. +% \end{lstlisting} +% is hence a way of obtaining the complete signature of the module \ssrL{M}. +% \end{itemize} +where \ssrN{name} is the name of an open module. +This command search returns the list of lemmas: +\begin{itemize} +\item whose \emph{conclusion} contains a subterm matching the optional + first \ssrN{pattern}. A $\ssrC{-}$ reverses the test, producing the list + of lemmas whose conclusion does not contain any subterm matching + the pattern; +\item whose name contains the given string. A $\ssrC{-}$ prefix reverses + the test, producing the list of lemmas whose name does not contain the + string. A string that contains symbols or +is followed by a scope \ssrN{key}, is interpreted as the constant whose +notation involves that string (e.g., \ssrL=+= for \ssrL+addn+), if this is +unambiguous; otherwise the diagnostic includes the output of the +\ssrC{Locate} vernacular command. + +\item whose statement, including assumptions and types, contains a + subterm matching the next patterns. If a pattern is prefixed by + $\ssrC{-}$, the test is reversed; +\item contained in the given list of modules, except the ones in the + modules prefixed by a $\ssrC{-}$. +\end{itemize} + +Note that: +\begin{itemize} +\item As for regular terms, patterns can feature scope + indications. For instance, the command: +\begin{lstlisting} + Search _ (_ + _)%N. +\end{lstlisting} +lists all the lemmas whose statement (conclusion or hypotheses) +involve an application of the binary operation denoted by the infix +\ssrC{+} symbol in the \ssrC{N} scope (which is \ssr{} scope for natural numbers). +\item Patterns with holes should be surrounded by parentheses. +\item Search always volunteers the expansion of the notation, avoiding the + need to execute Locate independently. Moreover, a string fragment + looks for any notation that contains fragment as + a substring. If the \ssrL+ssrbool+ library is imported, the command: +\begin{lstlisting} + Search "~~". +\end{lstlisting} +answers : +\begin{lstlisting} +"~~" is part of notation (~~ _) +In bool_scope, (~~ b) denotes negb b +negbT forall b : bool, b = false -> ~~ b +contra forall c b : bool, (c -> b) -> ~~ b -> ~~ c +introN forall (P : Prop) (b : bool), reflect P b -> ~ P -> ~~ b +\end{lstlisting} + \item A diagnostic is issued if there are different matching notations; + it is an error if all matches are partial. +\item Similarly, a diagnostic warns about multiple interpretations, and + signals an error if there is no default one. +\item The command \ssrC{Search in M.} +is a way of obtaining the complete signature of the module \ssrL{M}. +\item Strings and pattern indications can be interleaved, but the + first indication has a special status if it is a pattern, and only + filters the conclusion of lemmas: +\begin{itemize} + \item The command : + \begin{lstlisting} + Search (_ =1 _) "bij". + \end{lstlisting} +lists all the lemmas whose conclusion features a '$\ssrC{=1}$' and whose +name contains the string \verb+bij+. +\item The command : + \begin{lstlisting} + Search "bij" (_ =1 _). + \end{lstlisting} +lists all the lemmas whose statement, including hypotheses, features a +'$\ssrC{=1}$' and whose name contains the string \verb+bij+. + +\end{itemize} + +\end{itemize} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Synopsis and Index} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection*{Parameters} + +\begin{minipage}[c]{\textwidth}\renewcommand{\footnoterule}{} +\begin{longtable}{lcl} +\ssrN{d-tactic} && one of the + \ssrC{elim}, \ssrC{case}, \ssrC{congr}, \ssrC{apply}, \ssrC{exact} + and \ssrC{move} \ssr{} tactics \\ +\ssrN{fix-body} && standard \Coq{} \textit{fix\_body}\\ +\ssrN{ident} && standard \Coq{} identifier\\ +\ssrN{int} && integer literal \\ +\ssrN{key} && notation scope\\ +\ssrN{name} && module name\\ +${\naturalnumber}$ && \ssrN{int} or Ltac variable denoting a standard \Coq{} numeral\footnote{The name of this Ltac variable should not be the name of a tactic which can be followed by a bracket + \ssrL+[+, like \ssrL+do+, \ssrL+ have+,\dots}\\ +\ssrN{pattern} && synonym for {\term}\\ +\ssrN{string} && standard \Coq{} string\\ +{\tac} && standard \Coq{} tactic or \ssr{} tactic\\ +{\term} & \hspace{1cm} & Gallina term, possibly containing wildcards\\ +%\ssrN{view} && global constant\\ +\end{longtable} +\end{minipage} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection*{Items and switches} + +\begin{longtable}{lclr} +\ssrN{binder} & {\ident} {\optsep} \ssrC{(} {\ident} \optional{\ssrC{:} {\term} } \ssrC{)} & binder& p. \pageref{ssec:pose}\\ +\\ +\ssrN{clear-switch} & \ssrC{\{} {\ident}$^+$ \ssrC{\}} & clear switch & p. \pageref{ssec:discharge}\\ +\\ +\ssrN{c-pattern} & \optional{{\term} \ssrC{in} {\optsep} {\term} \ssrC{as}} {\ident} \ssrC{in} {\term} & context pattern & p. \pageref{ssec:rewp} \\ +\\ +\ssrN{d-item} & \optional{\ssrN{occ-switch} {\optsep} \ssrN{clear-switch}} \optional{{\term} {\optsep} \ssrC{(}\ssrN{c-pattern}\ssrC{)}} & discharge item & p. \pageref{ssec:discharge}\\ +\\ +\ssrN{gen-item} & \optional{\ssrC{@}}{\ident} {\optsep} \ssrC{(}{\ident}\ssrC{)} {\optsep} \ssrC{(}\optional{\ssrC{@}}{\ident} \ssrC{:=} \ssrN{c-pattern}\ssrC{)} & generalization item & p. \pageref{ssec:struct}\\ +\\ +\ssrN{i-pattern} & {\ident} {\optsep} \ssrC{_} {\optsep} \ssrC{?} {\optsep} \ssrC{*} {\optsep} \optional{\ssrN{occ-switch}}\ssrC{->} {\optsep} \optional{\ssrN{occ-switch}}\ssrC{<-} {\optsep} & intro pattern & p. \pageref{ssec:intro}\\ +& \ssrC{[} \ssrN{i-item}$^*$ \ssrC{|} $\dots$ \ssrC{|} \ssrN{i-item}$^*$ \ssrC{]} {\optsep} \ssrC{-} {\optsep} \ssrC{[:} {\ident}$^+$\ssrC{]} &\\ +\\ +\ssrN{i-item} & \ssrN{clear-switch} {\optsep} \ssrN{s-item} {\optsep} \ssrN{i-pattern} {\optsep} \ssrC{/}{\term} & intro item & p. \pageref{ssec:intro}\\ +\\ +\ssrN{int-mult} & \optional{{\naturalnumber}} \ssrN{mult-mark} & multiplier & p. \pageref{ssec:iter}\\ +\\ +\ssrN{occ-switch} & \ssrC{\{} \optional{\ssrC{+} {\optsep} \ssrC{-}} {\naturalnumber}$^*$\ssrC{\}} & occur. switch & p. \pageref{sssec:occselect}\\ +\\ +\ssrN{mult} & \optional{{\naturalnumber}} \ssrN{mult-mark} & multiplier & p. \pageref{ssec:iter}\\ +\\ +\ssrN{mult-mark} & \ssrC{?} {\optsep} \ssrC{!} & multiplier mark & p. \pageref{ssec:iter}\\ +\\ +\ssrN{r-item} & \optional{\ssrC{/}} {\term} {\optsep} \ssrN{s-item} & rewrite item & p. \pageref{ssec:extrw}\\ +\\ +\ssrN{r-prefix} & \optional{\ssrC{-}} \optional{\ssrN{int-mult}} \optional{\ssrN{occ-switch} {\optsep} \ssrN{clear-switch}} \optional{\ssrC{[}\ssrN{r-pattern}\ssrC{]}} & rewrite prefix & p. \pageref{ssec:extrw}\\ +\\ +\ssrN{r-pattern} & {\term} {\optsep} \ssrN{c-pattern} {\optsep} \ssrC{in} \optional{{\ident} \ssrC{in}} {\term} & rewrite pattern & p. \pageref{ssec:extrw}\\ +\\ +\ssrN{r-step} & \optional{\ssrN{r-prefix}}\ssrN{r-item} & rewrite step & p. \pageref{ssec:extrw}\\ +\\ +\ssrN{s-item} & \ssrC{/=} {\optsep} \ssrC{//} {\optsep} \ssrC{//=} & simplify switch & p. \pageref{ssec:intro}\\ +\\ +\end{longtable} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection*{Tactics} +\emph{Note}: \ssrC{without loss} and \ssrC{suffices} are synonyms for \ssrC{wlog} and +\ssrC{suff} respectively. + +\begin{longtable}{llr} +\ssrC{move} & \textcolor{dkblue}{\texttt{idtac}} or \ssrC{hnf}& p. \pageref{ssec:profstack} \\ +\ssrC{apply} & application & p. \pageref{ssec:basictac}\\ +\ssrC{exact} &&\\ +\ssrC{abstract} && p. \pageref{ssec:abstract}, \pageref{sec:havetransparent}\\ +\\ +\ssrC{elim} & induction & p. \pageref{ssec:basictac}\\ +\ssrC{case} & case analysis & p. \pageref{ssec:basictac}\\ +\\ +\ssrC{rewrite} \ssrN{rstep}$^+$ & rewrite& p. \pageref{ssec:extrw}\\ +\\ +\ssrC{have} \ssrN{i-item}$^*$ \optional{\ssrN{i-pattern}} \optional{\ssrN{s-item} {\optsep} \ssrN{binder}$^+$} \optional{\ssrC{:} {\term}} \ssrC{:=} {\term} & forward & p. \pageref{ssec:struct}\\ +\ssrC{have} \ssrN{i-item}$^*$ \optional{\ssrN{i-pattern}} \optional{\ssrN{s-item}{\optsep} \ssrN{binder}$^+$} \ssrC{:} {\term} \optional{\ssrC{by} {\tac}} & chaining & \\ +\ssrC{have suff} \optional{\ssrN{clear-switch}} \optional{\ssrN{i-pattern}} \optional{\ssrC{:} {\term}} \ssrC{:=} {\term} & & \\ +\ssrC{have suff} \optional{\ssrN{clear-switch}} \optional{\ssrN{i-pattern}} \ssrC{:} {\term} \optional{\ssrC{by} {\tac}} & & \\ +\ssrC{gen have} \optional{{\ident}\ssrC{,}} \optional{\ssrN{i-pattern}} \ssrC{:} \ssrN{gen-item}$^+$ \ssrC{/} {\term} \optional{\ssrC{by} {\tac}} & & \\ +\\ +\ssrC{wlog} \optional{\ssrC{suff}} \optional{\ssrN{i-item}} \ssrC{:} \optional{\ssrN{gen-item}{\optsep} \ssrN{clear-switch}}$^*$ \ssrC{/} {\term} & specializing & p. \pageref{ssec:struct} \\ +\\ +\ssrC{suff} \ssrN{i-item}$^*$ \optional{\ssrN{i-pattern}} \optional{\ssrN{binder}$^+$} \ssrC{:} {\term} \optional{\ssrC{by} {\tac}} & backchaining & p. \pageref{ssec:struct}\\ +\ssrC{suff} \optional{\ssrC{have}} \optional{\ssrN{clear-switch}} \optional{\ssrN{i-pattern}} \ssrC{:} {\term} \optional{\ssrC{by} {\tac}} & & \\ +\\ +\ssrC{pose} {\ident} \ssrC{:=} {\term} & local definition& p. \pageref{ssec:pose}\\ +\ssrC{pose} {\ident} \ssrN{binder}$^+$ \ssrC{:=} {\term} & \rlap{local function definition}& \\ +\ssrC{pose fix} \ssrN{fix-body} & \rlap{local fix definition} & \\ +\ssrC{pose cofix} \ssrN{fix-body} & \rlap{local cofix definition} & \\ +\\ +\ssrC{set} {\ident} \optional{\ssrC{:} {\term}} \ssrC{:=} \optional{\ssrN{occ-switch}} \optional{{\term}{\optsep} \ssrC{(}\ssrN{c-pattern}\ssrC{)}} & abbreviation&p. \pageref{ssec:set}\\ +\\ +\ssrC{unlock} \optional{\ssrN{r-prefix}]{\ident}}$^*$ & unlock & p. \pageref{ssec:lock}\\ +\\ +\ssrC{congr} \optional{\naturalnumber} {\term} & congruence& p. \pageref{ssec:congr}\\ +\end{longtable} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection*{Tacticals} + +\begin{longtable}{lclr} +\ssrN{d-tactic} \optional{\ident} \ssrC{:} \ssrN{d-item}$^{+}$ \optional{\ssrN{clear-switch}} & & discharge & p. \pageref{ssec:discharge}\\ +\\ +{\tac} \ssrC{=>} \ssrN{i-item}$^+$ && introduction & p. \pageref{ssec:intro}\\ +\\ +{\tac} \ssrC{in} \optional{\ssrN{gen-item} {\optsep} \ssrN{clear-switch}}$^+$ \optional{\ssrC{*}} && localization & p. \pageref{ssec:gloc}\\ +\\ +\ssrC{do} \optional{\ssrN{mult}} \ssrC{[} \nelist{\tac}{|} \ssrC{]}&& iteration & p. \pageref{ssec:iter}\\ +\ssrC{do} \ssrN{mult} {\tac} &&& \\ +\\ +{\tac} \ssrC{ ; first} \optional{\naturalnumber} \ssrC{[}\nelist{\tac}{|}\ssrC{]} && selector & p. \pageref{ssec:select}\\ +{\tac} \ssrC{ ; last} \optional{\naturalnumber} \ssrC{[}\nelist{\tac}{|}\ssrC{]} \\ +{\tac} \ssrC{ ; first} \optional{\naturalnumber} \ssrC{last} && subgoals & p. \pageref{ssec:select}\\ +{\tac} \ssrC{; last} \optional{\naturalnumber} \ssrC{first} && rotation & \\ +\\ +\ssrC{by [} \nelist{\tac}{|} \ssrC{]} && closing & p. \pageref{ssec:termin}\\ +\ssrC{by []} \\ +\ssrC{by} {\tac} \\ +\end{longtable} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\subsection*{Commands} +\begin{longtable}{lclr} +\ssrL+Hint View for+ \optional{\ssrL+move+ {\it |} \ssrL+apply+} {\tt /} {\ident} \optional{{\tt|} {\naturalnumber}} && view hint +declaration & p. \pageref{ssec:vhints}\\ +\\ +\ssrL+Hint View for apply//+ {\ident} \optional{{\tt|}{\naturalnumber}} && right hand side double + & p. \pageref{ssec:vhints}\\ +&& view hint declaration &\\ +\\ +%\ssrL+Import Prenex Implicits+ && enable prenex implicits & +%p. \pageref{ssec:parampoly}\\ +%\\ +\ssrL+Prenex Implicits+ {\ident}$^+$ & \hspace{.6cm} & prenex implicits decl. + & p. \pageref{ssec:parampoly}\\ + +\end{longtable} + +\iffalse + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Changes} + +\subsection{\ssr{} version 1.3} +All changes are retrocompatible extensions but for: +\begin{itemize} +\item Occurrences in the type family switch now refer only to the goal, while + before they used to refer also to the types in the abstractions of the + predicate used by the eliminator. This bug used to affect lemmas like + \ssrC{boolP}. See the relative comments in \ssrC{ssrbool.v}. +\item Clear switches can only mention existing hypothesis and + otherwise fail. This can in particular affect intro patterns + simultaneously applied to several goals. + % commit: 2686 +\item A bug in the \ssrC{rewrite} tactic allowed to + instantiate existential metavariables occurring in the goal. + This is not the case any longer (see section~\ref{ssec:rewcaveats}). +\item The \ssrC{fold} and \ssrC{unfold} \ssrN{r-items} for \ssrC{rewrite} used to + fail silently when used in combination with a \ssrN{r-pattern} matching no + goal subterm. They now fail. The old behavior can be obtained using + the \ssrC{?} multiplier (see section~\ref{ssec:extrw}). +\item \Coq{} 8.2 users with a statically linked toplevel must comment out the\\ + \ssrC{Declare ML Module "ssreflect".}\\ + line at the beginning of \ssrC{ssreflect.v} to compile the 1.3 library. +\end{itemize} +New features: +\begin{itemize} +\item Contextual \ssrC{rewrite} patterns. + The context surrounding the redex can now be used to specify which + redex occurrences should be rewritten (see section~\ref{ssec:rewp}).\\ + \ssrC{rewrite [in X in _ = X]addnC.} + % commit: 2690, 2689, 2718, 2733 +\item Proof irrelevant interpretation of goals with existential metavariables. + Goals containing an existential metavariable of sort \ssrC{Prop} are + generalized over it, and a new goal for the missing subproof is + generated (see page~\pageref{sssec:apply} and + section~\ref{ssec:rewcaveats}).\\ + \ssrC{apply: (ex_intro _ (@Ordinal _ y _)).}\\ + \ssrC{rewrite insubT.} + % commit: 2553, 2544, 2543, 2733 +\item Views are now part of \ssrN{i-pattern} and can thus be used + inside intro patterns (see section~\ref{ssec:intro}).\\ + \ssrC{move=> a b /andP [Ha Hb].} + % commit: 2720 +\item Multiple views for \ssrC{move}, \ssrC{move $\dots$ in} and \ssrC{apply} + (see section~\ref{ssec:multiview}).\\ + \ssrC{move/v1/v2/v3.}\\ + \ssrC{move/v1/v2/v3 in H.}\\ + \ssrC{apply/v1/v2/v3.} + % commit: 2720 +\item \ssrC{have} and \ssrC{suff} idiom with view (see section~\ref{sssec:hypview}). +\begin{lstlisting} + Lemma |*test*| (a b : bool) (pab : a && b) : b. + have {pab} /= /andP [pa ->] // : true && (a && b) := pab. +\end{lstlisting} + % commit: 2726 +\item \ssrC{have suff}, \ssrC{suff have} and \ssrC{wlog suff} forward reasoning + tactics (see section~\ref{ssec:struct}).\\ + \ssrC{have suff H : P.} + % commit: 2633 +\item Binders support in \ssrC{have} (see section~\ref{sssec:have}).\\ + \ssrC{have H x y (r : R x y) : P x -> Q y.} + % commit: 2633 +\item Deferred clear switches. Clears are deferred to the end of the + intro pattern. In the meanwhile, cleared variables are still + part of the context, thus the goal can mention them, but are + renamed to non accessible dummy names (see section~\ref{ssec:intro}).\\ + \ssrC{suff: G \\x H = K; first case/dprodP=> \{G H\} [[G H -> -> defK]].} + % commit: 2660 +\item Relaxed alternation condition in intro patterns. The + \ssrN{i-item} grammar rule is simplified (see section~\ref{ssec:intro}).\\ + \ssrC{move=> a \{H\} /= \{H1\} // b c /= \{H2\}.} + % commit: 2713 +\item Occurrence selection for \ssrC{->} and \ssrC{<-} intro pattern + (see section~\ref{ssec:intro}).\\ + \ssrC{move=> a b H \{2\}->.} + % commit: 2714 +\item Modifiers for the discharging '\ssrC{:}' and \ssrC{in} tactical to override + the default behavior when dealing with local definitions (let-in): + \ssrC{@f} forces the body of \ssrC{f} to be kept, \ssrC{(f)} forces the body of + \ssrC{f} to be dropped (see sections~\ref{ssec:discharge} + and~\ref{ssec:gloc}).\\ + \ssrC{move: x y @f z.}\\ + \ssrC{rewrite rule in (f) $\;\;$H.} + %commit: 2659, 2710 +\item Type family switch in \ssrC{elim} and \ssrC{case} + can contain patterns with occurrence switch + (see section~\ref{ssec:typefam}).\\ + \ssrC{case: \{2\}(_ == x) / eqP.} + % commit: 2593, 2598, 2539, 2538, 2527, 2529 +\item Generic second order predicate support for \ssrC{elim} + (see section~\ref{sec:views}).\\ + \ssrC{elim/big\_prop: _} + % commit: 2767 +\item The \ssrC{congr} tactic now also works on products (see + section~\ref{ssec:congr}). +\begin{lstlisting} + Lemma |*test*| x (H : P x) : P y. + congr (P _): H. +\end{lstlisting} + % commit: 2608 +\item Selectors now support Ltac variables + (see section~\ref{ssec:select}).\\ + \ssrC{let n := 3 in tac; first n last.} + % commit: 2725 +\item Deprecated use of \ssrC{Import Prenex Implicits} directive. + It must be replaced with the \Coq{} \ssrC{Unset Printing + Implicit Defensive} vernacular command. +\item New synonym \ssrC{Canonical} for \ssrC{Canonical Structure}. +\end{itemize} +\subsection{\ssr{} version 1.4} +New features: +\begin{itemize} +\item User definable recurrent contexts (see section~\ref{ssec:rewp}).\\ + \ssrC{Notation RHS := (X in _ = X)\%pattern} +\item Contextual patterns in + \ssrC{set} and `\ssrC{:}' (see section~\ref{ssec:rewp}).\\ + \ssrC{set t := (a + _ in RHS)} +\item NO-OP intro pattern (see section~\ref{ssec:intro}).\\ + \ssrC{move=> /eqP-H /fooP-/barP} +\item \ssrC{if $\ {\term}\ $ isn't $\ \ssrN{pattern}\ $ then $\ {\term}\ $ + else $\ {\term}\ $} notation (see section~\ref{ssec:patcond}).\\ + \ssrC{if x isn't Some y then simple else complex y} +\end{itemize} +\subsection{\ssr{} version 1.5} +Incompatibilities: +\begin{itemize} +\item The \ssrC{have} tactic now performs type classes resolution. The old + behavior can be restored with \ssrC{Set SsrHave NoTCResolution} +\end{itemize} +Fixes: +\begin{itemize} +\item The \ssrC{let foo := type of t in} syntax of standard \ssrC{Ltac} has + been made compatible with \ssr{} and can be freely used even if + the \ssr{} plugin is loaded +\end{itemize} +New features: +\begin{itemize} +\item Generalizations supported in have (see section~\ref{ssec:struct}).\\ + \ssrC{generally have hx2px, pa : a ha / P a.} +\item Renaming and patterns in wlog (see section~\ref{ssec:struct} and + page \pageref{par:advancedgen}).\\ + \ssrC{wlog H : (n := m)$\;$ (x := m + _)$\;$ / T x}.\\ + \ssrC{wlog H : (n := m)$\;$ (@ldef := secdef m)$\;$ / T x}. +\item Renaming, patterns and clear switches in \ssrC{in} + tactical (see section~\ref{ssec:gloc}).\\ + \ssrC{$\dots$ in H1 \{H2\} (n := m).} +\item Handling of type classes in \ssrC{have} + (see page~\pageref{ssec:havetcresolution}).\\ + \ssrC{have foo : ty. (* TC inference for ty *)}\\ + \ssrC{have foo : ty := . (* no TC inference for ty *)}\\ + \ssrC{have foo : ty := t. (* no TC inference for ty and t *)}\\ + \ssrC{have foo := t. (* no TC inference for t *)} +\item Transparent flag for \ssrC{have} to generate a \ssrC{let in} context entry + (see page~\pageref{sec:havetransparent}).\\ + \ssrC{have @i : 'I\_n by apply: (Sub m); auto.} +\item Intro pattern \ssrC{[: foo bar ]} to create abstract variables + (see page~\pageref{ssec:introabstract}). +\item Tactic \ssrC{abstract:} to assign an abstract variable + (see page~\pageref{ssec:abstract}).\\ + \ssrC{have [: blurb ] @i : 'I\_n by apply: (Sub m); abstract: blurb; auto.}\\ + \ssrC{have [: blurb ] i : 'I\_n := Sub m blurb; first by auto.} + +\end{itemize} + +\fi diff --git a/doc/refman/RefMan-syn.tex b/doc/refman/RefMan-syn.tex index ecaf82806e..084317776b 100644 --- a/doc/refman/RefMan-syn.tex +++ b/doc/refman/RefMan-syn.tex @@ -13,7 +13,7 @@ described in Section~\ref{scopes}. were present for a while in {\Coq} are no longer available from {\Coq} version 8.0. The underlying AST structure is also no longer available. The functionalities of the command {\tt Syntactic Definition} are -still available, see Section~\ref{Abbreviations}. +still available; see Section~\ref{Abbreviations}. \section[Notations]{Notations\label{Notation} \comindex{Notation}} @@ -35,8 +35,8 @@ The expression \texttt{(and A B)} is the abbreviated term and the string \verb="A /\ B"= (called a {\em notation}) tells how it is symbolically written. -A notation is always surrounded by double quotes (excepted when the -abbreviation is a single identifier, see \ref{Abbreviations}). The +A notation is always surrounded by double quotes (except when the +abbreviation is a single identifier; see \ref{Abbreviations}). The notation is composed of {\em tokens} separated by spaces. Identifiers in the string (such as \texttt{A} and \texttt{B}) are the {\em parameters} of the notation. They must occur at least once each in the @@ -68,7 +68,7 @@ declaration of the notation. \subsection[Precedences and associativity]{Precedences and associativity\index{Precedences} \index{Associativity}} -Mixing different symbolic notations in a same text may cause serious +Mixing different symbolic notations in the same text may cause serious parsing ambiguity. To deal with the ambiguity of notations, {\Coq} uses precedence levels ranging from 0 to 100 (plus one extra level numbered 200) and associativity rules. @@ -88,8 +88,8 @@ precedence level to each notation, knowing that a lower level binds more than a higher level. Hence the level for disjunction must be higher than the level for conjunction. -Since connectives are the less tight articulation points of a text, it -is reasonable to choose levels not so far from the higher level which +Since connectives are not tight articulation points of a text, it +is reasonable to choose levels not so far from the highest level which is 100, for example 85 for disjunction and 80 for conjunction\footnote{which are the levels effectively chosen in the current implementation of {\Coq}}. @@ -102,10 +102,10 @@ even consider that the expression is not well-formed and that parentheses are mandatory (this is a ``no associativity'')\footnote{ {\Coq} accepts notations declared as no associative but the parser on which {\Coq} is built, namely {\camlpppp}, currently does not implement the -no-associativity and replace it by a left associativity; hence it is +no-associativity and replaces it by a left associativity; hence it is the same for {\Coq}: no-associativity is in fact left associativity}. We don't know of a special convention of the associativity of -disjunction and conjunction, let's apply for instance a right +disjunction and conjunction, so let's apply for instance a right associativity (which is the choice of {\Coq}). Precedence levels and associativity rules of notations have to be @@ -701,11 +701,11 @@ Notation}. % Introduction An {\em interpretation scope} is a set of notations for terms with -their interpretation. Interpretation scopes provides with a weak, -purely syntactical form of notations overloading: a same notation, for -instance the infix symbol \verb=+= can be used to denote distinct -definitions of an additive operator. Depending on which interpretation -scopes is currently open, the interpretation is different. +their interpretation. Interpretation scopes provide a weak, +purely syntactical form of notation overloading: the same notation, for +instance the infix symbol \verb=+=, can be used to denote distinct +definitions of the additive operator. Depending on which interpretation +scope is currently open, the interpretation is different. Interpretation scopes can include an interpretation for numerals and strings. However, this is only made possible at the {\ocaml} level. @@ -889,7 +889,8 @@ statically. For instance, if {\tt f} is a polymorphic function of type recognized as an argument to be interpreted in scope {\scope}. \comindex{Bind Scope} -More generally, any {\class} (see Chapter~\ref{Coercions-full}) can be +\label{bindscope} +More generally, any coercion {\class} (see Chapter~\ref{Coercions-full}) can be bound to an interpretation scope. The command to do it is \begin{quote} {\tt Bind Scope} {\scope} \texttt{with} {\class} @@ -908,7 +909,7 @@ Open Scope nat_scope. (* Define + on the nat as the default for + *) Check (fun x y1 y2 z t => P _ (x + t) ((f _ (y1 + y2) + z))). \end{coq_example} -\Rem The scope {\tt type\_scope} has also a local effect on +\Rem The scopes {\tt type\_scope} and {\tt function\_scope} also have a local effect on interpretation. See the next section. \SeeAlso The command to show the scopes bound to the arguments of a @@ -940,10 +941,21 @@ Check # @@%mybool #. The scope {\tt type\_scope} has a special status. It is a primitive interpretation scope which is temporarily activated each time a -subterm of an expression is expected to be a type. This includes goals -and statements, types of binders, domain and codomain of implication, -codomain of products, and more generally any type argument of a -declared or defined constant. +subterm of an expression is expected to be a type. It is delimited by +the key {\tt type}, and bound to the coercion class {\tt Sortclass}. It is also +used in certain situations where an expression is statically known to +be a type, including the conclusion and the type of hypotheses within +an {\tt Ltac} goal match (see Section~\ref{ltac-match-goal}) +the statement of a theorem, the type of +a definition, the type of a binder, the domain and codomain of +implication, the codomain of products, and more generally any type +argument of a declared or defined constant. + +\subsection[The {\tt function\_scope} interpretation scope]{The {\tt function\_scope} interpretation scope\index{function\_scope@\texttt{function\_scope}}} + +The scope {\tt function\_scope} also has a special status. +It is temporarily activated each time the argument of a global reference is +recognized to be a {\tt Funclass instance}, i.e., of type {\tt forall x:A, B} or {\tt A -> B}. \subsection{Interpretation scopes used in the standard library of {\Coq}} @@ -953,38 +965,39 @@ commands {\tt Print Scopes} or {\tt Print Scope {\scope}}. \subsubsection{\tt type\_scope} -This includes infix {\tt *} for product types and infix {\tt +} for -sum types. It is delimited by key {\tt type}. +This scope includes infix {\tt *} for product types and infix {\tt +} for +sum types. It is delimited by key {\tt type}, and bound to the coercion class +{\tt Sortclass}, as described at \ref{bindscope}. \subsubsection{\tt nat\_scope} -This includes the standard arithmetical operators and relations on +This scope includes the standard arithmetical operators and relations on type {\tt nat}. Positive numerals in this scope are mapped to their canonical representent built from {\tt O} and {\tt S}. The scope is -delimited by key {\tt nat}. +delimited by key {\tt nat}, and bound to the type {\tt nat} (see \ref{bindscope}). \subsubsection{\tt N\_scope} -This includes the standard arithmetical operators and relations on +This scope includes the standard arithmetical operators and relations on type {\tt N} (binary natural numbers). It is delimited by key {\tt N} and comes with an interpretation for numerals as closed term of type {\tt Z}. \subsubsection{\tt Z\_scope} -This includes the standard arithmetical operators and relations on +This scope includes the standard arithmetical operators and relations on type {\tt Z} (binary integer numbers). It is delimited by key {\tt Z} and comes with an interpretation for numerals as closed term of type {\tt Z}. \subsubsection{\tt positive\_scope} -This includes the standard arithmetical operators and relations on +This scope includes the standard arithmetical operators and relations on type {\tt positive} (binary strictly positive numbers). It is delimited by key {\tt positive} and comes with an interpretation for numerals as closed term of type {\tt positive}. \subsubsection{\tt Q\_scope} -This includes the standard arithmetical operators and relations on +This scope includes the standard arithmetical operators and relations on type {\tt Q} (rational numbers defined as fractions of an integer and a strictly positive integer modulo the equality of the numerator-denominator cross-product). As for numerals, only $0$ and @@ -993,13 +1006,13 @@ interpretations are $\frac{0}{1}$ and $\frac{1}{1}$ respectively). \subsubsection{\tt Qc\_scope} -This includes the standard arithmetical operators and relations on the +This scope includes the standard arithmetical operators and relations on the type {\tt Qc} of rational numbers defined as the type of irreducible fractions of an integer and a strictly positive integer. \subsubsection{\tt real\_scope} -This includes the standard arithmetical operators and relations on +This scope includes the standard arithmetical operators and relations on type {\tt R} (axiomatic real numbers). It is delimited by key {\tt R} and comes with an interpretation for numerals as term of type {\tt R}. The interpretation is based on the binary decomposition. The @@ -1014,35 +1027,40 @@ those of {\tt R}. \subsubsection{\tt bool\_scope} -This includes notations for the boolean operators. It is -delimited by key {\tt bool}. +This scope includes notations for the boolean operators. It is +delimited by key {\tt bool}, and bound to the type {\tt bool} (see \ref{bindscope}). \subsubsection{\tt list\_scope} -This includes notations for the list operators. It is -delimited by key {\tt list}. +This scope includes notations for the list operators. It is +delimited by key {\tt list}, and bound to the type {\tt list} (see \ref{bindscope}). + +\subsubsection{\tt function\_scope} + +This scope is delimited by the key {\tt function}, and bound to the coercion class {\tt Funclass}, +as described at \ref{bindscope}. \subsubsection{\tt core\_scope} -This includes the notation for pairs. It is delimited by key {\tt core}. +This scope includes the notation for pairs. It is delimited by key {\tt core}. \subsubsection{\tt string\_scope} -This includes notation for strings as elements of the type {\tt +This scope includes notation for strings as elements of the type {\tt string}. Special characters and escaping follow {\Coq} conventions on strings (see Section~\ref{strings}). Especially, there is no convention to visualize non printable characters of a string. The file {\tt String.v} shows an example that contains quotes, a newline -and a beep (i.e. the ascii character of code 7). +and a beep (i.e. the ASCII character of code 7). \subsubsection{\tt char\_scope} -This includes interpretation for all strings of the form -\verb!"!$c$\verb!"! where $c$ is an ascii character, or of the form +This scope includes interpretation for all strings of the form +\verb!"!$c$\verb!"! where $c$ is an ASCII character, or of the form \verb!"!$nnn$\verb!"! where $nnn$ is a three-digits number (possibly with leading 0's), or of the form \verb!""""!. Their respective -denotations are the ascii code of $c$, the decimal ascii code $nnn$, -or the ascii code of the character \verb!"! (i.e. the ascii code +denotations are the ASCII code of $c$, the decimal ASCII code $nnn$, +or the ASCII code of the character \verb!"! (i.e. the ASCII code 34), all of them being represented in the type {\tt ascii}. \subsection{Displaying informations about scopes} diff --git a/doc/refman/RefMan-tac.tex b/doc/refman/RefMan-tac.tex index be75dc9d56..b3b0df5c8a 100644 --- a/doc/refman/RefMan-tac.tex +++ b/doc/refman/RefMan-tac.tex @@ -352,7 +352,7 @@ Section~\ref{pattern} to transform the goal so that it gets the form The tactic {\tt eapply} behaves like {\tt apply} but it does not fail when no instantiations are deducible for some variables in the - premises. Rather, it turns these variables into + premises. Rather, it turns these variables into existential variables which are variables still to instantiate (see Section~\ref{evars}). The instantiation is intended to be found later in the proof. @@ -1411,7 +1411,7 @@ in the list of subgoals remaining to prove. quantifications or non-dependent implications) are instantiated by concrete terms coming either from arguments \term$_1$ $\ldots$ \term$_n$ or from a bindings list (see - Section~\ref{Binding-list} for more about bindings lists). + Section~\ref{Binding-list} for more about bindings lists). In the first form the application to \term$_1$ {\ldots} \term$_n$ can be partial. The first form is equivalent to {\tt assert ({\ident} := {\ident} {\term$_1$} \dots\ \term$_n$)}. @@ -1515,23 +1515,33 @@ The {\tt evar} tactic creates a new local definition named \ident\ with type \term\ in the context. The body of this binding is a fresh existential variable. -\subsection{\tt instantiate ( {\num} := {\term} )} +\subsection{\tt instantiate ( {\ident} := {\term} )} \tacindex{instantiate} \label{instantiate} The {\tt instantiate} tactic refines (see Section~\ref{refine}) -an existential variable -with the term \term. The \num\ argument is the position of the -existential variable from right to left in the conclusion. This cannot be -the number of the existential variable since this number is different -in every session. +an existential variable {\ident} with the term {\term}. +It is equivalent to {\tt only [\ident]: refine \term} (preferred alternative). -When you are referring to hypotheses which you did not name +\begin{Remarks} +\item To be able to refer to an existential variable by name, the +user must have given the name explicitly (see~\ref{ExistentialVariables}). + +\item When you are referring to hypotheses which you did not name explicitly, be aware that Coq may make a different decision on how to name the variable in the current goal and in the context of the existential variable. This can lead to surprising behaviors. +\end{Remarks} \begin{Variants} + + \item {\tt instantiate ( {\num} := {\term} )} + This variant allows to refer to an existential variable which was not + named by the user. The {\num} argument is the position of the + existential variable from right to left in the goal. + Because this variant is not robust to slight changes in the goal, + its use is strongly discouraged. + \item {\tt instantiate ( {\num} := {\term} ) in \ident} \item {\tt instantiate ( {\num} := {\term} ) in ( Value of {\ident} )} @@ -2594,6 +2604,21 @@ Abort. This tactic behaves as generalizing \ident$_1$\dots\ \ident$_n$, then doing \texttt{inversion {\ident} using \ident$'$}. +\item \tacindex{inversion\_sigma} \texttt{inversion\_sigma} + + This tactic turns equalities of dependent pairs (e.g., + \texttt{existT P x p = existT P y q}, frequently left over by + \texttt{inversion} on a dependent type family) into pairs of + equalities (e.g., a hypothesis \texttt{H : x = y} and a hypothesis + of type \texttt{rew H in p = q}); these hypotheses can subsequently + be simplified using \texttt{subst}, without ever invoking any kind + of axiom asserting uniqueness of identity proofs. If you want to + explicitly specify the hypothesis to be inverted, or name the + generated hypotheses, you can invoke \texttt{induction H as [H1 H2] + using eq\_sigT\_rect}. This tactic also works for \texttt{sig}, + \texttt{sigT2}, and \texttt{sig2}, and there are similar + \texttt{eq\_sig\emph{*}\_rect} induction lemmas. + \end{Variants} \firstexample @@ -2688,6 +2713,64 @@ dependent inversion_clear H. Note that \texttt{H} has been substituted by \texttt{(LeS n m0 l)} and \texttt{m} by \texttt{(S m0)}. +\example{Using \texorpdfstring{\texttt{inversion\_sigma}}{inversion\_sigma}} + +Let us consider the following inductive type of length-indexed lists, +and a lemma about inverting equality of \texttt{cons}: + +\begin{coq_eval} +Reset Initial. +Set Printing Compact Contexts. +\end{coq_eval} + +\begin{coq_example*} +Require Coq.Logic.Eqdep_dec. + +Inductive vec A : nat -> Type := +| nil : vec A O +| cons {n} (x : A) (xs : vec A n) : vec A (S n). + +Lemma invert_cons : forall A n x xs y ys, + @cons A n x xs = @cons A n y ys + -> xs = ys. +Proof. +\end{coq_example*} + +\begin{coq_example} +intros A n x xs y ys H. +\end{coq_example} + +After performing \texttt{inversion}, we are left with an equality of +\texttt{existT}s: + +\begin{coq_example} +inversion H. +\end{coq_example} + +We can turn this equality into a usable form with +\texttt{inversion\_sigma}: + +\begin{coq_example} +inversion_sigma. +\end{coq_example} + +To finish cleaning up the proof, we will need to use the fact that +that all proofs of \texttt{n = n} for \texttt{n} a \texttt{nat} are +\texttt{eq\_refl}: + +\begin{coq_example} +let H := match goal with H : n = n |- _ => H end in +pose proof (Eqdep_dec.UIP_refl_nat _ H); subst H. +simpl in *. +\end{coq_example} + +Finally, we can finish the proof: + +\begin{coq_example} +assumption. +Qed. +\end{coq_example} + \subsection{\tt fix {\ident} {\num}} \tacindex{fix} \label{tactic:fix} @@ -2988,7 +3071,7 @@ activated, {\tt subst} also deals with the following corner cases: \item The presence of a recursive equation which without the option would be a cause of failure of {\tt subst}. - + \item A context with cyclic dependencies as with hypotheses {\tt \ident$_1$ = f~\ident$_2$} and {\tt \ident$_2$ = g~\ident$_1$} which without the option would be a cause of failure of {\tt subst}. @@ -3283,7 +3366,7 @@ a sort of strong normalization with two key differences: \begin{itemize} \item They unfold a constant if and only if it leads to a $\iota$-reduction, i.e. reducing a match or unfolding a fixpoint. -\item While reducing a constant unfolding to (co)fixpoints, +\item While reducing a constant unfolding to (co)fixpoints, the tactics use the name of the constant the (co)fixpoint comes from instead of the (co)fixpoint definition in recursive calls. @@ -3402,6 +3485,7 @@ reduced to \texttt{S t}. \optindex{Refolding Reduction} {\tt Refolding Reduction} \end{quote} +\emph{Deprecated since 8.7} This option (off by default) controls the use of the refolding strategy of {\tt cbn} while doing reductions in unification, type inference and @@ -4014,7 +4098,7 @@ Abort. & & e * & \text{ Kleene star } \\ & & \texttt{emp} & \text{ empty } \\ & & \texttt{eps} & \text{ epsilon } \\ - & & \texttt{(} e \texttt{)} & + & & \texttt{(} e \texttt{)} & \end{array}\] The \texttt{emp} regexp does not match any search path while @@ -4029,7 +4113,7 @@ is to set the cut expression to $c | e$, the initial cut expression being \texttt{emp}. -\item \texttt{Mode} {\tt (+ | ! | -)}$^*$ {\qualid} +\item \texttt{Mode} {\qualid} {\tt (+ | ! | -)}$^*$ \label{HintMode} \comindex{Hint Mode} @@ -4587,50 +4671,15 @@ congruence. \end{ErrMsgs} +\section{Checking properties of terms} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\section{Everything after this point has yet to be sorted} - +Each of the following tactics acts as the identity if the check succeeds, and results in an error otherwise. \subsection{\tt constr\_eq \term$_1$ \term$_2$} \tacindex{constr\_eq} \label{constreq} -This tactic applies to any goal. It checks whether its arguments are -equal modulo alpha conversion and casts. +This tactic checks whether its arguments are equal modulo alpha conversion and casts. \ErrMsg \errindex{Not equal} @@ -4638,8 +4687,8 @@ equal modulo alpha conversion and casts. \tacindex{unify} \label{unify} -This tactic applies to any goal. It checks whether its arguments are -unifiable, potentially instantiating existential variables. +This tactic checks whether its arguments are unifiable, potentially +instantiating existential variables. \ErrMsg \errindex{Not unifiable} @@ -4654,9 +4703,9 @@ unifiable, potentially instantiating existential variables. \tacindex{is\_evar} \label{isevar} -This tactic applies to any goal. It checks whether its argument is an -existential variable. Existential variables are uninstantiated -variables generated by e.g. {\tt eapply} (see Section~\ref{apply}). +This tactic checks whether its argument is a current existential +variable. Existential variables are uninstantiated variables generated +by {\tt eapply} (see Section~\ref{apply}) and some other tactics. \ErrMsg \errindex{Not an evar} @@ -4664,10 +4713,9 @@ variables generated by e.g. {\tt eapply} (see Section~\ref{apply}). \tacindex{has\_evar} \label{hasevar} -This tactic applies to any goal. It checks whether its argument has an -existential variable as a subterm. Unlike {\tt context} patterns -combined with {\tt is\_evar}, this tactic scans all subterms, -including those under binders. +This tactic checks whether its argument has an existential variable as +a subterm. Unlike {\tt context} patterns combined with {\tt is\_evar}, +this tactic scans all subterms, including those under binders. \ErrMsg \errindex{No evars} @@ -4675,8 +4723,8 @@ including those under binders. \tacindex{is\_var} \label{isvar} -This tactic applies to any goal. It checks whether its argument is a -variable or hypothesis in the current goal context or in the opened sections. +This tactic checks whether its argument is a variable or hypothesis in the +current goal context or in the opened sections. \ErrMsg \errindex{Not a variable or hypothesis} @@ -5178,7 +5226,7 @@ Reset Initial. \subsection[\tt swap \num$_1$ \num$_2$]{\tt swap \num$_1$ \num$_2$\tacindex{swap}} -This tactic switches the position of the goals of indices $\num_1$ and $\num_2$. If either $\num_1$ or $\num_2$ is negative then goals are counted from the end of the focused goal list. Goals are indexed from $1$, there is no goal with position $0$. +This tactic switches the position of the goals of indices $\num_1$ and $\num_2$. If either $\num_1$ or $\num_2$ is negative then goals are counted from the end of the focused goal list. Goals are indexed from $1$, there is no goal with position $0$. \Example \begin{coq_example*} diff --git a/doc/refman/RefMan-tacex.tex b/doc/refman/RefMan-tacex.tex index 9f4ddc8044..cb8f916f13 100644 --- a/doc/refman/RefMan-tacex.tex +++ b/doc/refman/RefMan-tacex.tex @@ -849,7 +849,7 @@ Ltac DSimplif trm := Ltac Length trm := match trm with | (_ * ?B) => let succ := Length B in constr:(S succ) - | _ => constr:1 + | _ => constr:(1) end. Ltac assoc := repeat rewrite <- Ass. \end{coq_example} diff --git a/doc/refman/RefMan-uti.tex b/doc/refman/RefMan-uti.tex index 08cdbee503..768d0df763 100644 --- a/doc/refman/RefMan-uti.tex +++ b/doc/refman/RefMan-uti.tex @@ -60,7 +60,7 @@ subdirectory of the sources. The majority of \Coq\ projects are very similar: a collection of {\tt .v} files and eventually some {\tt .ml} ones (a \Coq\ plugin). The main piece -of metadata needed in order to build the project are the command +of metadata needed in order to build the project are the command line options to {\tt coqc} (e.g. {\tt -R, -I}, \SeeAlso Section~\ref{coqoptions}). Collecting the list of files and options is the job of the {\tt \_CoqProject} file. @@ -98,7 +98,7 @@ Such command generates the following files: An optional file {\bf {\tt CoqMakefile.local}} can be provided by the user in order to extend {\tt CoqMakefile}. In particular one can declare custom actions to be performed before or after the build process. Similarly one can customize the install target or even provide new targets. Extension points are documented in the {\tt CoqMakefile} file. The extensions of the files listed in {\tt \_CoqProject} is -used in order to decide how to build them In particular: +used in order to decide how to build them. In particular: \begin{itemize} \item {\Coq} files must use the \texttt{.v} extension @@ -108,12 +108,171 @@ used in order to decide how to build them In particular: \end{itemize} The use of \texttt{.mlpack} files has to be preferred over \texttt{.mllib} -files, since it results in a ``packed'' plugin: All auxiliary +files, since it results in a ``packed'' plugin: All auxiliary modules (as {\tt Baz} and {\tt Bazaux}) are hidden inside the plugin's ``name space'' ({\tt Qux\_plugin}). This reduces the chances of begin unable to load two distinct plugins because of a clash in their auxiliary module names. +\paragraph{Timing targets and performance testing} +The generated \texttt{Makefile} supports the generation of two kinds +of timing data: per-file build-times, and per-line times for an +individual file. + +The following targets and \texttt{Makefile} variables allow collection +of per-file timing data: +\begin{itemize} +\item \texttt{TIMED=1} --- passing this variable will cause + \texttt{make} to emit a line describing the user-space build-time + and peak memory usage for each file built. + + \texttt{Note}: On Mac OS, this works best if you've installed + \texttt{gnu-time}. + + \texttt{Example}: For example, the output of \texttt{make TIMED=1} + may look like this: +\begin{verbatim} +COQDEP Fast.v +COQDEP Slow.v +COQC Slow.v +Slow (user: 0.34 mem: 395448 ko) +COQC Fast.v +Fast (user: 0.01 mem: 45184 ko) +\end{verbatim} +\item \texttt{pretty-timed} --- this target stores the output of + \texttt{make TIMED=1} into \texttt{time-of-build.log}, and displays + a table of the times, sorted from slowest to fastest, which is also + stored in \texttt{time-of-build-pretty.log}. If you want to + construct the log for targets other than the default one, you can + pass them via the variable \texttt{TGTS}, e.g., \texttt{make + pretty-timed TGTS="a.vo b.vo"}. + + \texttt{Note}: This target requires \texttt{python} to build the table. + + \texttt{Note}: This target will \emph{append} to the timing log; if + you want a fresh start, you must remove the file + \texttt{time-of-build.log} or run \texttt{make cleanall}. + + \texttt{Example}: For example, the output of \texttt{make + pretty-timed} may look like this: +\begin{verbatim} +COQDEP Fast.v +COQDEP Slow.v +COQC Slow.v +Slow (user: 0.36 mem: 393912 ko) +COQC Fast.v +Fast (user: 0.05 mem: 45992 ko) +Time | File Name +-------------------- +0m00.41s | Total +-------------------- +0m00.36s | Slow +0m00.05s | Fast +\end{verbatim} +\item \texttt{print-pretty-timed-diff} --- this target builds a table + of timing changes between two compilations; run \texttt{make + make-pretty-timed-before} to build the log of the ``before'' + times, and run \texttt{make make-pretty-timed-after} to build the + log of the ``after'' times. The table is printed on the command + line, and stored in \texttt{time-of-build-both.log}. This target is + most useful for profiling the difference between two commits to a + repo. + + \texttt{Note}: This target requires \texttt{python} to build the table. + + \texttt{Note}: The \texttt{make-pretty-timed-before} and + \texttt{make-pretty-timed-after} targets will \emph{append} to the + timing log; if you want a fresh start, you must remove the files + \texttt{time-of-build-before.log} and + \texttt{time-of-build-after.log} or run \texttt{make cleanall} + \emph{before} building either the ``before'' or ``after'' targets. + + \texttt{Note}: The table will be sorted first by absolute time + differences rounded towards zero to a whole-number of seconds, then + by times in the ``after'' column, and finally lexicographically by + file name. This will put the biggest changes in either direction + first, and will prefer sorting by build-time over subsecond changes + in build time (which are frequently noise); lexicographic sorting + forces an order on files which take effectively no time to compile. + + \texttt{Example}: For example, the output table from \texttt{make + print-pretty-timed-diff} may look like this: +\begin{verbatim} +After | File Name | Before || Change | % Change +-------------------------------------------------------- +0m00.39s | Total | 0m00.35s || +0m00.03s | +11.42% +-------------------------------------------------------- +0m00.37s | Slow | 0m00.01s || +0m00.36s | +3600.00% +0m00.02s | Fast | 0m00.34s || -0m00.32s | -94.11% +\end{verbatim} +\end{itemize} + +The following targets and \texttt{Makefile} variables allow collection +of per-line timing data: +\begin{itemize} +\item \texttt{TIMING=1} --- passing this variable will cause + \texttt{make} to use \texttt{coqc -time} to write to a + \texttt{.v.timing} file for each \texttt{.v} file compiled, which + contains line-by-line timing information. + + \texttt{Example}: For example, running \texttt{make all TIMING=1} may + result in a file like this: +\begin{verbatim} +Chars 0 - 26 [Require~Coq.ZArith.BinInt.] 0.157 secs (0.128u,0.028s) +Chars 27 - 68 [Declare~Reduction~comp~:=~vm_c...] 0. secs (0.u,0.s) +Chars 69 - 162 [Definition~foo0~:=~Eval~comp~i...] 0.153 secs (0.136u,0.019s) +Chars 163 - 208 [Definition~foo1~:=~Eval~comp~i...] 0.239 secs (0.236u,0.s) +\end{verbatim} + +\item \texttt{print-pretty-single-time-diff + BEFORE=path/to/file.v.before-timing + AFTER=path/to/file.v.after-timing} --- this target will make a + sorted table of the per-line timing differences between the timing + logs in the \texttt{BEFORE} and \texttt{AFTER} files, display it, + and save it to the file specified by the + \texttt{TIME\_OF\_PRETTY\_BUILD\_FILE} variable, which defaults to + \texttt{time-of-build-pretty.log}. + + To generate the \texttt{.v.before-timing} or + \texttt{.v.after-timing} files, you should pass + \texttt{TIMING=before} or \texttt{TIMING=after} rather than + \texttt{TIMING=1}. + + \texttt{Note}: The sorting used here is the same as in the + \texttt{print-pretty-timed-diff} target. + + \texttt{Note}: This target requires \texttt{python} to build the table. + + \texttt{Example}: For example, running + \texttt{print-pretty-single-time-diff} might give a table like this: +\begin{verbatim} +After | Code | Before || Change | % Change +--------------------------------------------------------------------------------------------------- +0m00.50s | Total | 0m04.17s || -0m03.66s | -87.96% +--------------------------------------------------------------------------------------------------- +0m00.145s | Chars 069 - 162 [Definition~foo0~:=~Eval~comp~i...] | 0m00.192s || -0m00.04s | -24.47% +0m00.126s | Chars 000 - 026 [Require~Coq.ZArith.BinInt.] | 0m00.143s || -0m00.01s | -11.88% + N/A | Chars 027 - 068 [Declare~Reduction~comp~:=~nati...] | 0m00.s || +0m00.00s | N/A +0m00.s | Chars 027 - 068 [Declare~Reduction~comp~:=~vm_c...] | N/A || +0m00.00s | N/A +0m00.231s | Chars 163 - 208 [Definition~foo1~:=~Eval~comp~i...] | 0m03.836s || -0m03.60s | -93.97% +\end{verbatim} + +\item \texttt{all.timing.diff}, \texttt{path/to/file.v.timing.diff} + --- The \texttt{path/to/file.v.timing.diff} target will make a + \texttt{.v.timing.diff} file for the corresponding \texttt{.v} file, + with a table as would be generated by the + \texttt{print-pretty-single-time-diff} target; it depends on having + already made the corresponding \texttt{.v.before-timing} and + \texttt{.v.after-timing} files, which can be made by passing + \texttt{TIMING=before} and \texttt{TIMING=after}. The + \texttt{all.timing.diff} target will make such timing difference + files for all of the \texttt{.v} files that the \texttt{Makefile} + knows about. It will fail if some \texttt{.v.before-timing} or + \texttt{.v.after-timing} files don't exist. + + \texttt{Note}: This target requires \texttt{python} to build the table. +\end{itemize} + \paragraph{Notes about including the generated Makefile} This practice is discouraged. The contents of this file, including variable names @@ -121,6 +280,43 @@ and status of rules shall change in the future. Users are advised to include {\tt Makefile.conf} or call a target of the generated Makefile as in {\tt make -f Makefile target} from another Makefile. +One way to get access to all targets of the generated +\texttt{CoqMakefile} is to have a generic target for invoking unknown +targets. For example: +\begin{verbatim} +# KNOWNTARGETS will not be passed along to CoqMakefile +KNOWNTARGETS := CoqMakefile extra-stuff extra-stuff2 +# KNOWNFILES will not get implicit targets from the final rule, and so +# depending on them won't invoke the submake +# Warning: These files get declared as PHONY, so any targets depending +# on them always get rebuilt +KNOWNFILES := Makefile _CoqProject + +.DEFAULT_GOAL := invoke-coqmakefile + +CoqMakefile: Makefile _CoqProject + $(COQBIN)coq_makefile -f _CoqProject -o CoqMakefile + +invoke-coqmakefile: CoqMakefile + $(MAKE) --no-print-directory -f CoqMakefile $(filter-out $(KNOWNTARGETS),$(MAKECMDGOALS)) + +.PHONY: invoke-coqmakefile $(KNOWNFILES) + +#################################################################### +#################################################################### +#################################################################### +#################################################################### +## Your targets here ## +#################################################################### +#################################################################### +#################################################################### +#################################################################### + +# This should be the last rule, to handle any targets not declared above +%: invoke-coqmakefile + @true +\end{verbatim} + \paragraph{Notes for users of {\tt coq\_makefile} with version $<$ 8.7} \begin{itemize} @@ -128,7 +324,7 @@ as in {\tt make -f Makefile target} from another Makefile. or after the build (like invoking make on a subdirectory) one can hook in {\tt pre-all} and {\tt post-all} extension points \item \texttt{-extra-phony} and \texttt{-extra} are deprecated. To provide - additional target ({\tt .PHONY} or not) please use + additional target ({\tt .PHONY} or not) please use {\tt CoqMakefile.local} \end{itemize} diff --git a/doc/refman/Reference-Manual.tex b/doc/refman/Reference-Manual.tex index 291c07de4c..fc1c01cf24 100644 --- a/doc/refman/Reference-Manual.tex +++ b/doc/refman/Reference-Manual.tex @@ -22,6 +22,17 @@ \usepackage{xspace} \usepackage{pmboxdraw} \usepackage{float} +\usepackage{color} + \definecolor{dkblue}{rgb}{0,0.1,0.5} + \definecolor{lightblue}{rgb}{0,0.5,0.5} + \definecolor{dkgreen}{rgb}{0,0.4,0} + \definecolor{dk2green}{rgb}{0.4,0,0} + \definecolor{dkviolet}{rgb}{0.6,0,0.8} + \definecolor{dkpink}{rgb}{0.2,0,0.6} +\usepackage{listings} + \def\lstlanguagefiles{coq-listing.tex} +\usepackage{tabularx} +\usepackage{array,longtable} \floatstyle{boxed} \restylefloat{figure} @@ -99,6 +110,11 @@ Options A and B of the licence are {\em not} elected.} \include{RefMan-ltac.v}% Writing tactics \include{RefMan-tacex.v}% Detailed Examples of tactics +\lstset{language=SSR} +\lstset{moredelim=[is][]{|*}{*|}} +\lstset{moredelim=*[is][\itshape\rmfamily]{/*}{*/}} +\include{RefMan-ssr} + \part{User extensions} \include{RefMan-syn.v}% The Syntax and the Grammar commands %%SUPPRIME \include{RefMan-tus.v}% Writing tactics diff --git a/doc/refman/Setoid.tex b/doc/refman/Setoid.tex index 2c9602a229..6c79284389 100644 --- a/doc/refman/Setoid.tex +++ b/doc/refman/Setoid.tex @@ -156,9 +156,9 @@ compatibility constraints. \begin{cscexample}[Rewriting] Continuing the previous examples, suppose that the user must prove \texttt{set\_eq int (union int (union int S1 S2) S2) (f S1 S2)} under the -hypothesis \texttt{H: set\_eq int S2 (nil int)}. It is possible to +hypothesis \texttt{H: set\_eq int S2 (@nil int)}. It is possible to use the \texttt{rewrite} tactic to replace the first two occurrences of -\texttt{S2} with \texttt{nil int} in the goal since the context +\texttt{S2} with \texttt{@nil int} in the goal since the context \texttt{set\_eq int (union int (union int S1 nil) nil) (f S1 S2)}, being a composition of morphisms instances, is a morphism. However the tactic will fail replacing the third occurrence of \texttt{S2} unless \texttt{f} diff --git a/doc/refman/Universes.tex b/doc/refman/Universes.tex index 2bb1301c79..6ea2537399 100644 --- a/doc/refman/Universes.tex +++ b/doc/refman/Universes.tex @@ -134,12 +134,14 @@ producing global universe constraints, one can use the \asection{{\tt Cumulative, NonCumulative}} \comindex{Cumulative} \comindex{NonCumulative} -\optindex{Inductive Cumulativity} +\optindex{Polymorphic Inductive Cumulativity} -Inductive types, coinductive types, variants and records can be +Polymorphic inductive types, coinductive types, variants and records can be declared cumulative using the \texttt{Cumulative}. Alternatively, -there is an option \texttt{Set Inductive Cumulativity} which when set, -makes all subsequent inductive definitions cumulative. Consider the examples below. +there is an option \texttt{Set Polymorphic Inductive Cumulativity} which when set, +makes all subsequent \emph{polymorphic} inductive definitions cumulative. When set, +inductive types and the like can be enforced to be +\emph{non-cumulative} using the \texttt{NonCumulative} prefix. Consider the examples below. \begin{coq_example*} Polymorphic Cumulative Inductive list {A : Type} := | nil : list @@ -158,24 +160,61 @@ This also means that any two instances of \texttt{list} are convertible: $\WTEGCONV{\mathtt{list@\{i\}} A}{\mathtt{list@\{j\}} B}$ whenever $\WTEGCONV{A}{B}$ and furthermore their corresponding (when fully applied to convertible arguments) constructors. See Chapter~\ref{Cic} for more details on convertibility and subtyping. -Also notice the subtyping constraints for the \emph{non-cumulative} version of list: +The following is an example of a record with non-trivial subtyping relation: \begin{coq_example*} -Polymorphic NonCumulative Inductive list' {A : Type} := -| nil' : list' -| cons' : A -> list' -> list'. +Polymorphic Cumulative Record packType := {pk : Type}. \end{coq_example*} \begin{coq_example} -Print list'. +Print packType. +\end{coq_example} +Notice that as expected, \texttt{packType@\{i\}} and \texttt{packType@\{j\}} are +convertible if and only if \texttt{i $=$ j}. + +Cumulative inductive types, coninductive types, variants and records +only make sense when they are universe polymorphic. Therefore, an +error is issued whenever the user uses the \texttt{Cumulative} or +\texttt{NonCumulative} prefix in a monomorphic context. +Notice that this is not the case for the option \texttt{Set Polymorphic Inductive Cumulativity}. +That is, this option, when set, makes all subsequent \emph{polymorphic} +inductive declarations cumulative (unless, of course the \texttt{NonCumulative} prefix is used) +but has no effect on \emph{monomorphic} inductive declarations. +Consider the following examples. +\begin{coq_example} +Monomorphic Cumulative Inductive Unit := unit. +\end{coq_example} +\begin{coq_example} +Monomorphic NonCumulative Inductive Unit := unit. \end{coq_example} -The following is an example of a record with non-trivial subtyping relation: \begin{coq_example*} -Polymorphic Cumulative Record packType := {pk : Type}. +Set Polymorphic Inductive Cumulativity. +Inductive Unit := unit. \end{coq_example*} \begin{coq_example} -Print packType. +Print Unit. \end{coq_example} -Notice that as expected, \texttt{packType@\{i\}} and \texttt{packType@\{j\}} are convertible if and only if \texttt{i $=$ j}. +\subsection*{An example of a proof using cumulativity} + +\begin{coq_example} +Set Universe Polymorphism. +Set Polymorphic Inductive Cumulativity. + +Inductive eq@{i} {A : Type@{i}} (x : A) : A -> Type@{i} := eq_refl : eq x x. + +Definition funext_type@{a b e} (A : Type@{a}) (B : A -> Type@{b}) + := forall f g : (forall a, B a), + (forall x, eq@{e} (f x) (g x)) + -> eq@{e} f g. + +Section down. + Universes a b e e'. + Constraint e' < e. + Lemma funext_down {A B} + (H : @funext_type@{a b e} A B) : @funext_type@{a b e'} A B. + Proof. + exact H. + Defined. +\end{coq_example} \asection{Global and local universes} @@ -293,8 +332,18 @@ Universes k l. Check (le@{k l}). \end{coq_example} -User-named universes are considered rigid for unification and are never -minimized. +User-named universes and the anonymous universe implicitly attached to +an explicit $Type$ are considered rigid for unification and are never +minimized. Flexible anonymous universes can be produced with an +underscore or by omitting the annotation to a polymorphic definition. + +\begin{coq_example} + Check (fun x => x) : Type -> Type. + Check (fun x => x) : Type -> Type@{_}. + + Check le@{k _}. + Check le. +\end{coq_example} \subsection{\tt Unset Strict Universe Declaration. \optindex{Strict Universe Declaration} diff --git a/doc/refman/coq-listing.tex b/doc/refman/coq-listing.tex new file mode 100644 index 0000000000..c69c3b1b81 --- /dev/null +++ b/doc/refman/coq-listing.tex @@ -0,0 +1,152 @@ +%======================================================================= +% Listings LaTeX package style for Gallina + SSReflect (Assia Mahboubi 2007) + +\lstdefinelanguage{SSR} { + +% Anything betweeen $ becomes LaTeX math mode +mathescape=true, +% Comments may or not include Latex commands +texcl=false, + + +% Vernacular commands +morekeywords=[1]{ +From, Section, Module, End, Require, Import, Export, Defensive, Function, +Variable, Variables, Parameter, Parameters, Axiom, Hypothesis, Hypotheses, +Notation, Local, Tactic, Reserved, Scope, Open, Close, Bind, Delimit, +Definition, Let, Ltac, Fixpoint, CoFixpoint, Add, Morphism, Relation, +Implicit, Arguments, Set, Unset, Contextual, Strict, Prenex, Implicits, +Inductive, CoInductive, Record, Structure, Canonical, Coercion, +Theorem, Lemma, Corollary, Proposition, Fact, Remark, Example, +Proof, Goal, Save, Qed, Defined, Hint, Resolve, Rewrite, View, +Search, Show, Print, Printing, All, Graph, Projections, inside, +outside, Locate, Maximal}, + +% Gallina +morekeywords=[2]{forall, exists, exists2, fun, fix, cofix, struct, + match, with, end, as, in, return, let, if, is, then, else, + for, of, nosimpl}, + +% Sorts +morekeywords=[3]{Type, Prop}, + +% Various tactics, some are std Coq subsumed by ssr, for the manual purpose +morekeywords=[4]{ + pose, set, move, case, elim, apply, clear, + hnf, intro, intros, generalize, rename, pattern, after, + destruct, induction, using, refine, inversion, injection, + rewrite, congr, unlock, compute, ring, field, + replace, fold, unfold, change, cutrewrite, simpl, + have, gen, generally, suff, wlog, suffices, without, loss, nat_norm, + assert, cut, trivial, revert, bool_congr, nat_congr, abstract, + symmetry, transitivity, auto, split, left, right, autorewrite}, + +% Terminators +morekeywords=[5]{ + by, done, exact, reflexivity, tauto, romega, omega, + assumption, solve, contradiction, discriminate}, + + +% Control +morekeywords=[6]{do, last, first, try, idtac, repeat}, + +% Various symbols +% For the ssr manual we turn off the prettyprint of formulas +% literate= +% {->}{{$\rightarrow\,$}}2 +% {->}{{\tt ->}}3 +% {<-}{{$\leftarrow\,$}}2 +% {<-}{{\tt <-}}2 +% {>->}{{$\mapsto$}}3 +% {<=}{{$\leq$}}1 +% {>=}{{$\geq$}}1 +% {<>}{{$\neq$}}1 +% {/\\}{{$\wedge$}}2 +% {\\/}{{$\vee$}}2 +% {<->}{{$\leftrightarrow\;$}}3 +% {<=>}{{$\Leftrightarrow\;$}}3 +% {:nat}{{$~\in\mathbb{N}$}}3 +% {fforall\ }{{$\forall_f\,$}}1 +% {forall\ }{{$\forall\,$}}1 +% {exists\ }{{$\exists\,$}}1 +% {negb}{{$\neg$}}1 +% {spp}{{:*:\,}}1 +% {~}{{$\sim$}}1 +% {\\in}{{$\in\;$}}1 +% {/\\}{$\land\,$}1 +% {:*:}{{$*$}}2 +% {=>}{{$\,\Rightarrow\ $}}1 +% {=>}{{\tt =>}}2 +% {:=}{{{\tt:=}\,\,}}2 +% {==}{{$\equiv$}\,}2 +% {!=}{{$\neq$}\,}2 +% {^-1}{{$^{-1}$}}1 +% {elt'}{elt'}1 +% {=}{{\tt=}\,\,}2 +% {+}{{\tt+}\,\,}2, +literate= + {isn't }{{{\ttfamily\color{dkgreen} isn't }}}1, + +% Comments delimiters, we do turn this off for the manual +%comment=[s]{(*}{*)}, + +% Spaces are not displayed as a special character +showstringspaces=false, + +% String delimiters +morestring=[b]", +morestring=[d]", + +% Size of tabulations +tabsize=3, + +% Enables ASCII chars 128 to 255 +extendedchars=true, + +% Case sensitivity +sensitive=true, + +% Automatic breaking of long lines +breaklines=true, + +% Default style fors listings +basicstyle=\ttfamily, + +% Position of captions is bottom +captionpos=b, + +% Full flexible columns +columns=[l]fullflexible, + +% Style for (listings') identifiers +identifierstyle={\ttfamily\color{black}}, +% Note : highlighting of Coq identifiers is done through a new +% delimiter definition through an lstset at the begining of the +% document. Don't know how to do better. + +% Style for declaration keywords +keywordstyle=[1]{\ttfamily\color{dkviolet}}, + +% Style for gallina keywords +keywordstyle=[2]{\ttfamily\color{dkgreen}}, + +% Style for sorts keywords +keywordstyle=[3]{\ttfamily\color{lightblue}}, + +% Style for tactics keywords +keywordstyle=[4]{\ttfamily\color{dkblue}}, + +% Style for terminators keywords +keywordstyle=[5]{\ttfamily\color{red}}, + + +%Style for iterators +keywordstyle=[6]{\ttfamily\color{dkpink}}, + +% Style for strings +stringstyle=\ttfamily, + +% Style for comments +commentstyle=\rmfamily, + +} diff --git a/doc/rt/RefMan-cover.tex b/doc/rt/RefMan-cover.tex deleted file mode 100644 index ac1686c25e..0000000000 --- a/doc/rt/RefMan-cover.tex +++ /dev/null @@ -1,45 +0,0 @@ -\documentstyle[RRcover]{book} - % The use of the style `french' forces the french abstract to appear first. - -\RRtitle{Manuel de r\'ef\'erence du syst\`eme Coq \\ version V7.1} -\RRetitle{The Coq Proof Assistant \\ Reference Manual \\ Version 7.1 -\thanks -{This research was partly supported by ESPRIT Basic Research -Action ``Types'' and by the GDR ``Programmation'' co-financed by MRE-PRC and CNRS.} -} -\RRauthor{Bruno Barras, Samuel Boutin, Cristina Cornes, -Judica\"el Courant, Jean-Christophe Filli\^atre, Eduardo Gim\'enez, -Hugo Herbelin, G\'erard Huet, C\'esar Mu\~noz, Chetan Murthy, -Catherine Parent, Christine Paulin-Mohring, -Amokrane Sa{\"\i}bi, Benjamin Werner} -\authorhead{} -\titlehead{Coq V7.1 Reference Manual} -\RRtheme{2} -\RRprojet{Coq} -\RRNo{0123456789} -\RRdate{May 1997} -%\RRpages{} -\URRocq - -\RRresume{Coq est un syst\`eme permettant le d\'eveloppement et la -v\'erification de preuves formelles dans une logique d'ordre -sup\'erieure incluant un riche langage de d\'efinitions de fonctions. -Ce document constitue le manuel de r\'ef\'erence de la version V7.1 -qui est distribu\'ee par ftp anonyme \`a l'adresse -\url{ftp://ftp.inria.fr/INRIA/coq/}} - -\RRmotcle{Coq, Syst\`eme d'aide \`a la preuve, Preuves formelles, -Calcul des Constructions Inductives} - - -\RRabstract{Coq is a proof assistant based on a higher-order logic -allowing powerful definitions of functions. -Coq V7.1 is available by anonymous -ftp at \url{ftp://ftp.inria.fr/INRIA/coq/}} - -\RRkeyword{Coq, Proof Assistant, Formal Proofs, Calculus of Inductives -Constructions} - -\begin{document} -\makeRT -\end{document} diff --git a/doc/rt/Tutorial-cover.tex b/doc/rt/Tutorial-cover.tex deleted file mode 100644 index aefea8d429..0000000000 --- a/doc/rt/Tutorial-cover.tex +++ /dev/null @@ -1,47 +0,0 @@ -\documentstyle[RRcover]{book} - % The use of the style `french' forces the french abstract to appear first. -\RRetitle{ -The Coq Proof Assistant \\ A Tutorial \\ Version 7.1 -\thanks{This research was partly supported by ESPRIT Basic Research -Action ``Types'' and by the GDR ``Programmation'' co-financed by MRE-PRC and CNRS.} -} -\RRtitle{Coq \\ Une introduction \\ V7.1 } -\RRauthor{G\'erard Huet, Gilles Kahn and Christine Paulin-Mohring} -\RRtheme{2} -\RRprojet{{Coq -\\[15pt] -{INRIA Rocquencourt} -{\hskip -5.25pt} -~~{\bf ---}~~ - \def\thefootnote{\arabic{footnote}\hss} -{CNRS - ENS Lyon} -\footnote[1]{LIP URA 1398 du CNRS, -46 All\'ee d'Italie, 69364 Lyon CEDEX 07, France.} -{\hskip -14pt}}} - -%\RRNo{0123456789} -\RRNo{0204} -\RRdate{Ao\^ut 1997} - -\URRocq -\RRresume{Coq est un syst\`eme permettant le d\'eveloppement et la -v\'erification de preuves formelles dans une logique d'ordre -sup\'erieure incluant un riche langage de d\'efinitions de fonctions. -Ce document constitue une introduction pratique \`a l'utilisation de -la version V7.1 qui est distribu\'ee par ftp anonyme \`a l'adresse -\url{ftp://ftp.inria.fr/INRIA/coq/}} - -\RRmotcle{Coq, Syst\`eme d'aide \`a la preuve, Preuves formelles, Calcul -des Constructions Inductives} - -\RRabstract{Coq is a proof assistant based on a higher-order logic -allowing powerful definitions of functions. This document is a -tutorial for the version V7.1 of Coq. This version is available by -anonymous ftp at \url{ftp://ftp.inria.fr/INRIA/coq/}} - -\RRkeyword{Coq, Proof Assistant, Formal Proofs, Calculus of Inductives -Constructions} - -\begin{document} -\makeRT -\end{document} diff --git a/doc/stdlib/index-list.html.template b/doc/stdlib/index-list.html.template index 48f82f2d92..48048b7a0f 100644 --- a/doc/stdlib/index-list.html.template +++ b/doc/stdlib/index-list.html.template @@ -591,5 +591,6 @@ through the <tt>Require Import</tt> command.</p> theories/Compat/AdmitAxiom.v theories/Compat/Coq85.v theories/Compat/Coq86.v + theories/Compat/Coq87.v </dd> </dl> diff --git a/doc/tools/Translator.tex b/doc/tools/Translator.tex index ed1d336d9e..3ee65d6f22 100644 --- a/doc/tools/Translator.tex +++ b/doc/tools/Translator.tex @@ -614,7 +614,7 @@ is compiled by a Makefile with the following constraints: \begin{itemize} \item compilation is achieved by invoking make without specifying a target \item options are passed to Coq with make variable COQFLAGS that - includes variables OPT, COQLIBS, OTHERFLAGS and COQ_XML. + includes variables OPT, COQLIBS, and OTHERFLAGS. \end{itemize} These constraints are met by the makefiles produced by {\tt coq\_makefile} diff --git a/doc/tutorial/Tutorial.tex b/doc/tutorial/Tutorial.tex index 30b6304c16..77ce8574f2 100644 --- a/doc/tutorial/Tutorial.tex +++ b/doc/tutorial/Tutorial.tex @@ -23,7 +23,7 @@ of Inductive Constructions. It allows the interactive construction of formal proofs, and also the manipulation of functional programs consistently with their specifications. It runs as a computer program on many architectures. -%, and mainly on Unix machines. + It is available with a variety of user interfaces. The present document does not attempt to present a comprehensive view of all the possibilities of \Coq, but rather to present in the most elementary @@ -33,63 +33,34 @@ proof tools. For more advanced information, the reader could refer to the \Coq{} Reference Manual or the \textit{Coq'Art}, a book by Y. Bertot and P. Castéran on practical uses of the \Coq{} system. -Coq can be used from a standard teletype-like shell window but -preferably through the graphical user interface -CoqIde\footnote{Alternative graphical interfaces exist: Proof General -and Pcoq.}. - Instructions on installation procedures, as well as more comprehensive documentation, may be found in the standard distribution of \Coq, -which may be obtained from \Coq{} web site \url{https://coq.inria.fr/}. - -In the following, we assume that \Coq{} is called from a standard -teletype-like shell window. All examples preceded by the prompting -sequence \verb:Coq < : represent user input, terminated by a -period. - -The following lines usually show \Coq's answer as it appears on the -users screen. When used from a graphical user interface such as -CoqIde, the prompt is not displayed: user input is given in one window +which may be obtained from \Coq{} web site +\url{https://coq.inria.fr/}\footnote{You can report any bug you find +while using \Coq{} at \url{https://coq.inria.fr/bugs}. Make sure to +always provide a way to reproduce it and to specify the exact version +you used. You can get this information by running \texttt{coqc -v}}. +\Coq{} is distributed together with a graphical user interface called +CoqIDE. Alternative interfaces exist such as +Proof General\footnote{See \url{https://proofgeneral.github.io/}.}. + +In the following examples, lines preceded by the prompt \verb:Coq < : +represent user input, terminated by a period. +The following lines usually show \Coq's answer. +When used from a graphical user interface such as +CoqIDE, the prompt is not displayed: user input is given in one window and \Coq's answers are displayed in a different window. -The sequence of such examples is a valid \Coq{} -session, unless otherwise specified. This version of the tutorial has -been prepared on a PC workstation running Linux. The standard -invocation of \Coq{} delivers a message such as: - -\begin{small} -\begin{flushleft} -\begin{verbatim} -unix:~> coqtop -Welcome to Coq 8.2 (January 2009) - -Coq < -\end{verbatim} -\end{flushleft} -\end{small} - -The first line gives a banner stating the precise version of \Coq{} -used. You should always return this banner when you report an anomaly -to our bug-tracking system -\url{https://coq.inria.fr/bugs/}. - \chapter{Basic Predicate Calculus} \section{An overview of the specification language Gallina} A formal development in Gallina consists in a sequence of {\sl declarations} -and {\sl definitions}. You may also send \Coq{} {\sl commands} which are -not really part of the formal development, but correspond to information -requests, or service routine invocations. For instance, the command: -\begin{verbatim} -Coq < Quit. -\end{verbatim} -terminates the current session. +and {\sl definitions}. \subsection{Declarations} -A declaration associates a {\sl name} with -a {\sl specification}. +A declaration associates a {\sl name} with a {\sl specification}. A name corresponds roughly to an identifier in a programming language, i.e. to a string of letters, digits, and a few ASCII symbols like underscore (\verb"_") and prime (\verb"'"), starting with a letter. @@ -165,25 +136,25 @@ in the current context: Check gt. \end{coq_example} -which tells us that \verb:gt: is a function expecting two arguments of -type \verb:nat: in order to build a logical proposition. +which tells us that \texttt{gt} is a function expecting two arguments of +type \texttt{nat} in order to build a logical proposition. What happens here is similar to what we are used to in a functional -programming language: we may compose the (specification) type \verb:nat: -with the (abstract) type \verb:Prop: of logical propositions through the +programming language: we may compose the (specification) type \texttt{nat} +with the (abstract) type \texttt{Prop} of logical propositions through the arrow function constructor, in order to get a functional type -\verb:nat->Prop:: +\texttt{nat -> Prop}: \begin{coq_example} Check (nat -> Prop). \end{coq_example} -which may be composed one more times with \verb:nat: in order to obtain the -type \verb:nat->nat->Prop: of binary relations over natural numbers. -Actually the type \verb:nat->nat->Prop: is an abbreviation for -\verb:nat->(nat->Prop):. +which may be composed once more with \verb:nat: in order to obtain the +type \texttt{nat -> nat -> Prop} of binary relations over natural numbers. +Actually the type \texttt{nat -> nat -> Prop} is an abbreviation for +\texttt{nat -> (nat -> Prop)}. Functional notions may be composed in the usual way. An expression $f$ of type $A\ra B$ may be applied to an expression $e$ of type $A$ in order to form the expression $(f~e)$ of type $B$. Here we get that -the expression \verb:(gt n): is well-formed of type \verb:nat->Prop:, +the expression \verb:(gt n): is well-formed of type \texttt{nat -> Prop}, and thus that the expression \verb:(gt n O):, which abbreviates \verb:((gt n) O):, is a well-formed proposition. \begin{coq_example} @@ -193,11 +164,12 @@ Check gt n O. \subsection{Definitions} The initial prelude contains a few arithmetic definitions: -\verb:nat: is defined as a mathematical collection (type \verb:Set:), constants -\verb:O:, \verb:S:, \verb:plus:, are defined as objects of types -respectively \verb:nat:, \verb:nat->nat:, and \verb:nat->nat->nat:. +\texttt{nat} is defined as a mathematical collection (type \texttt{Set}), +constants \texttt{O}, \texttt{S}, \texttt{plus}, are defined as objects of +types respectively \texttt{nat}, \texttt{nat -> nat}, and \texttt{nat -> +nat -> nat}. You may introduce new definitions, which link a name to a well-typed value. -For instance, we may introduce the constant \verb:one: as being defined +For instance, we may introduce the constant \texttt{one} as being defined to be equal to the successor of zero: \begin{coq_example} Definition one := (S O). @@ -217,17 +189,18 @@ argument \verb:m: of type \verb:nat: in order to build its result as \verb:(plus m m):: \begin{coq_example} -Definition double (m:nat) := plus m m. +Definition double (m : nat) := plus m m. \end{coq_example} This introduces the constant \texttt{double} defined as the -expression \texttt{fun m:nat => plus m m}. -The abstraction introduced by \texttt{fun} is explained as follows. The expression -\verb+fun x:A => e+ is well formed of type \verb+A->B+ in a context -whenever the expression \verb+e+ is well-formed of type \verb+B+ in -the given context to which we add the declaration that \verb+x+ -is of type \verb+A+. Here \verb+x+ is a bound, or dummy variable in -the expression \verb+fun x:A => e+. For instance we could as well have -defined \verb:double: as \verb+fun n:nat => (plus n n)+. +expression \texttt{fun m : nat => plus m m}. +The abstraction introduced by \texttt{fun} is explained as follows. +The expression \texttt{fun x : A => e} is well formed of type +\texttt{A -> B} in a context whenever the expression \texttt{e} is +well-formed of type \texttt{B} in the given context to which we add the +declaration that \texttt{x} is of type \texttt{A}. Here \texttt{x} is a +bound, or dummy variable in the expression \texttt{fun x : A => e}. +For instance we could as well have defined \texttt{double} as +\texttt{fun n : nat => (plus n n)}. Bound (local) variables and free (global) variables may be mixed. For instance, we may define the function which adds the constant \verb:n: @@ -243,19 +216,17 @@ Binding operations are well known for instance in logic, where they are called quantifiers. Thus we may universally quantify a proposition such as $m>0$ in order to get a universal proposition $\forall m\cdot m>0$. Indeed this operator is available in \Coq, with -the following syntax: \verb+forall m:nat, gt m O+. Similarly to the +the following syntax: \texttt{forall m : nat, gt m O}. Similarly to the case of the functional abstraction binding, we are obliged to declare explicitly the type of the quantified variable. We check: \begin{coq_example} -Check (forall m:nat, gt m 0). -\end{coq_example} -We may revert to the clean state of -our initial session using the \Coq{} \verb:Reset: command: -\begin{coq_example} -Reset Initial. +Check (forall m : nat, gt m 0). \end{coq_example} + \begin{coq_eval} +Reset Initial. Set Printing Width 60. +Set Printing Compact Contexts. \end{coq_eval} \section{Introduction to the proof engine: Minimal Logic} @@ -270,7 +241,7 @@ Variables A B C : Prop. \end{coq_example} We shall consider simple implications, such as $A\ra B$, read as -``$A$ implies $B$''. Remark that we overload the arrow symbol, which +``$A$ implies $B$''. Note that we overload the arrow symbol, which has been used above as the functionality type constructor, and which may be used as well as propositional connective: \begin{coq_example} @@ -318,7 +289,7 @@ apply H. We are now in the situation where we have two judgments as conjectures that remain to be proved. Only the first is listed in full, for the others the system displays only the corresponding subgoal, without its -local hypotheses list. Remark that \verb:apply: has kept the local +local hypotheses list. Note that \verb:apply: has kept the local hypotheses of its father judgment, which are still available for the judgments it generated. @@ -340,17 +311,12 @@ the current goal is solvable from the current local assumptions: assumption. \end{coq_example} -The proof is now finished. We may either discard it, by using the -command \verb:Abort: which returns to the standard \Coq{} toplevel loop -without further ado, or else save it as a lemma in the current context, -under name say \verb:trivial_lemma:: +The proof is now finished. We are now going to ask \Coq{}'s kernel +to check and save the proof. \begin{coq_example} -Save trivial_lemma. +Qed. \end{coq_example} -As a comment, the system shows the proof script listing all tactic -commands used in the proof. - Let us redo the same proof with a few variations. First of all we may name the initial goal as a conjectured lemma: \begin{coq_example} @@ -383,46 +349,30 @@ We may thus complete the proof of \verb:distr_impl: with one composite tactic: apply H; [ assumption | apply H0; assumption ]. \end{coq_example} -Let us now save lemma \verb:distr_impl:: -\begin{coq_example} -Qed. -\end{coq_example} - -Here \verb:Qed: needs no argument, since we gave the name \verb:distr_impl: -in advance. +You should be aware however that relying on automatically generated names is +not robust to slight updates to this proof script. Consequently, it is +discouraged in finished proof scripts. As for the composition of tactics with +\texttt{:} it may hinder the readability of the proof script and it is also +harder to see what's going on when replaying the proof because composed +tactics are evaluated in one go. Actually, such an easy combination of tactics \verb:intro:, \verb:apply: and \verb:assumption: may be found completely automatically by an automatic tactic, called \verb:auto:, without user guidance: -\begin{coq_example} -Lemma distr_imp : (A -> B -> C) -> (A -> B) -> A -> C. -auto. -\end{coq_example} - -This time, we do not save the proof, we just discard it with the \verb:Abort: -command: -\begin{coq_example} +\begin{coq_eval} Abort. +\end{coq_eval} +\begin{coq_example} +Lemma distr_impl : (A -> B -> C) -> (A -> B) -> A -> C. +auto. \end{coq_example} -At any point during a proof, we may use \verb:Abort: to exit the proof mode -and go back to Coq's main loop. We may also use \verb:Restart: to restart -from scratch the proof of the same lemma. We may also use \verb:Undo: to -backtrack one step, and more generally \verb:Undo n: to -backtrack n steps. - -We end this section by showing a useful command, \verb:Inspect n.:, -which inspects the global \Coq{} environment, showing the last \verb:n: declared -notions: +Let us now save lemma \verb:distr_impl:: \begin{coq_example} -Inspect 3. +Qed. \end{coq_example} -The declarations, whether global parameters or axioms, are shown preceded by -\verb:***:; definitions and lemmas are stated with their specification, but -their value (or proof-term) is omitted. - \section{Propositional Calculus} \subsection{Conjunction} @@ -438,7 +388,7 @@ connective. Let us show how to use these ideas for the propositional connectives \begin{coq_example} Lemma and_commutative : A /\ B -> B /\ A. -intro. +intro H. \end{coq_example} We make use of the conjunctive hypothesis \verb:H: with the \verb:elim: tactic, @@ -453,8 +403,11 @@ conjunctive goal into the two subgoals: split. \end{coq_example} and the proof is now trivial. Indeed, the whole proof is obtainable as follows: +\begin{coq_eval} +Abort. +\end{coq_eval} \begin{coq_example} -Restart. +Lemma and_commutative : A /\ B -> B /\ A. intro H; elim H; auto. Qed. \end{coq_example} @@ -465,7 +418,7 @@ conjunction introduction operator \verb+conj+ Check conj. \end{coq_example} -Actually, the tactic \verb+Split+ is just an abbreviation for \verb+apply conj.+ +Actually, the tactic \verb+split+ is just an abbreviation for \verb+apply conj.+ What we have just seen is that the \verb:auto: tactic is more powerful than just a simple application of local hypotheses; it tries to apply as well @@ -498,6 +451,17 @@ clear away unnecessary hypotheses which may clutter your screen. clear H. \end{coq_example} +The tactic \verb:destruct: combines the effects of \verb:elim:, \verb:intros:, +and \verb:clear:: + +\begin{coq_eval} +Abort. +\end{coq_eval} +\begin{coq_example} +Lemma or_commutative : A \/ B -> B \/ A. +intros H; destruct H. +\end{coq_example} + The disjunction connective has two introduction rules, since \verb:P\/Q: may be obtained from \verb:P: or from \verb:Q:; the two corresponding proof constructors are called respectively \verb:or_introl: and @@ -528,8 +492,11 @@ such a simple tautology. The reason is that we want to keep A complete tactic for propositional tautologies is indeed available in \Coq{} as the \verb:tauto: tactic. +\begin{coq_eval} +Abort. +\end{coq_eval} \begin{coq_example} -Restart. +Lemma or_commutative : A \/ B -> B \/ A. tauto. Qed. \end{coq_example} @@ -541,8 +508,8 @@ currently defined in the context: Print or_commutative. \end{coq_example} -It is not easy to understand the notation for proof terms without a few -explanations. The \texttt{fun} prefix, such as \verb+fun H:A\/B =>+, +It is not easy to understand the notation for proof terms without some +explanations. The \texttt{fun} prefix, such as \verb+fun H : A\/B =>+, corresponds to \verb:intro H:, whereas a subterm such as \verb:(or_intror: \verb:B H0): @@ -572,15 +539,17 @@ Lemma Peirce : ((A -> B) -> A) -> A. try tauto. \end{coq_example} -Note the use of the \verb:Try: tactical, which does nothing if its tactic +Note the use of the \verb:try: tactical, which does nothing if its tactic argument fails. This may come as a surprise to someone familiar with classical reasoning. Peirce's lemma is true in Boolean logic, i.e. it evaluates to \verb:true: for every truth-assignment to \verb:A: and \verb:B:. Indeed the double negation of Peirce's law may be proved in \Coq{} using \verb:tauto:: -\begin{coq_example} +\begin{coq_eval} Abort. +\end{coq_eval} +\begin{coq_example} Lemma NNPeirce : ~ ~ (((A -> B) -> A) -> A). tauto. Qed. @@ -651,26 +620,20 @@ function and predicate symbols. \subsection{Sections and signatures} Usually one works in some domain of discourse, over which range the individual -variables and function symbols. In \Coq{} we speak in a language with a rich -variety of types, so me may mix several domains of discourse, in our +variables and function symbols. In \Coq{}, we speak in a language with a rich +variety of types, so we may mix several domains of discourse, in our multi-sorted language. For the moment, we just do a few exercises, over a domain of discourse \verb:D: axiomatised as a \verb:Set:, and we consider two predicate symbols \verb:P: and \verb:R: over \verb:D:, of arities -respectively 1 and 2. Such abstract entities may be entered in the context -as global variables. But we must be careful about the pollution of our -global environment by such declarations. For instance, we have already -polluted our \Coq{} session by declaring the variables -\verb:n:, \verb:Pos_n:, \verb:A:, \verb:B:, and \verb:C:. +1 and 2, respectively. -\begin{coq_example} -Reset Initial. -\end{coq_example} \begin{coq_eval} +Reset Initial. Set Printing Width 60. +Set Printing Compact Contexts. \end{coq_eval} -We shall now declare a new \verb:Section:, which will allow us to define -notions local to a well-delimited scope. We start by assuming a domain of +We start by assuming a domain of discourse \verb:D:, and a binary relation \verb:R: over \verb:D:: \begin{coq_example} Section Predicate_calculus. @@ -686,24 +649,25 @@ a theory, but rather local hypotheses to a theorem, we open a specific section to this effect. \begin{coq_example} Section R_sym_trans. -Hypothesis R_symmetric : forall x y:D, R x y -> R y x. -Hypothesis R_transitive : forall x y z:D, R x y -> R y z -> R x z. +Hypothesis R_symmetric : forall x y : D, R x y -> R y x. +Hypothesis R_transitive : + forall x y z : D, R x y -> R y z -> R x z. \end{coq_example} -Remark the syntax \verb+forall x:D,+ which stands for universal quantification +Note the syntax \verb+forall x : D,+ which stands for universal quantification $\forall x : D$. \subsection{Existential quantification} We now state our lemma, and enter proof mode. \begin{coq_example} -Lemma refl_if : forall x:D, (exists y, R x y) -> R x x. +Lemma refl_if : forall x : D, (exists y, R x y) -> R x x. \end{coq_example} -Remark that the hypotheses which are local to the currently opened sections +The hypotheses that are local to the currently opened sections are listed as local hypotheses to the current goals. -The rationale is that these hypotheses are going to be discharged, as we -shall see, when we shall close the corresponding sections. +That is because these hypotheses are going to be discharged, as +we shall see, when we shall close the corresponding sections. Note the functional syntax for existential quantification. The existential quantifier is built from the operator \verb:ex:, which expects a @@ -711,19 +675,19 @@ predicate as argument: \begin{coq_example} Check ex. \end{coq_example} -and the notation \verb+(exists x:D, P x)+ is just concrete syntax for -the expression \verb+(ex D (fun x:D => P x))+. +and the notation \verb+(exists x : D, P x)+ is just concrete syntax for +the expression \verb+(ex D (fun x : D => P x))+. Existential quantification is handled in \Coq{} in a similar -fashion to the connectives \verb:/\: and \verb:\/: : it is introduced by +fashion to the connectives \verb:/\: and \verb:\/:: it is introduced by the proof combinator \verb:ex_intro:, which is invoked by the specific -tactic \verb:Exists:, and its elimination provides a witness \verb+a:D+ to -\verb:P:, together with an assumption \verb+h:(P a)+ that indeed \verb+a+ +tactic \verb:exists:, and its elimination provides a witness \verb+a : D+ to +\verb:P:, together with an assumption \verb+h : (P a)+ that indeed \verb+a+ verifies \verb:P:. Let us see how this works on this simple example. \begin{coq_example} intros x x_Rlinked. \end{coq_example} -Remark that \verb:intros: treats universal quantification in the same way +Note that \verb:intros: treats universal quantification in the same way as the premises of implications. Renaming of bound variables occurs when it is needed; for instance, had we started with \verb:intro y:, we would have obtained the goal: @@ -773,8 +737,8 @@ End R_sym_trans. All the local hypotheses have been discharged in the statement of \verb:refl_if:, which now becomes a general theorem in the first-order language declared in section -\verb:Predicate_calculus:. In this particular example, the use of section -\verb:R_sym_trans: has not been really significant, since we could have +\verb:Predicate_calculus:. In this particular example, section +\verb:R_sym_trans: has not been really useful, since we could have instead stated theorem \verb:refl_if: in its general form, and done basically the same proof, obtaining \verb:R_symmetric: and \verb:R_transitive: as local hypotheses by initial \verb:intros: rather @@ -802,7 +766,7 @@ Lemma weird : (forall x:D, P x) -> exists a, P a. \end{coq_example} First of all, notice the pair of parentheses around -\verb+forall x:D, P x+ in +\verb+forall x : D, P x+ in the statement of lemma \verb:weird:. If we had omitted them, \Coq's parser would have interpreted the statement as a truly trivial fact, since we would @@ -820,7 +784,7 @@ systematically inhabited, lemma \verb:weird: only holds in signatures which allow the explicit construction of an element in the domain of the predicate. -Let us conclude the proof, in order to show the use of the \verb:Exists: +Let us conclude the proof, in order to show the use of the \verb:exists: tactic: \begin{coq_example} exists d; trivial. @@ -836,8 +800,8 @@ We shall need classical reasoning. Instead of loading the \verb:Classical: module as we did above, we just state the law of excluded middle as a local hypothesis schema at this point: \begin{coq_example} -Hypothesis EM : forall A:Prop, A \/ ~ A. -Lemma drinker : exists x:D, P x -> forall x:D, P x. +Hypothesis EM : forall A : Prop, A \/ ~ A. +Lemma drinker : exists x : D, P x -> forall x : D, P x. \end{coq_example} The proof goes by cases on whether or not there is someone who does not drink. Such reasoning by cases proceeds @@ -847,10 +811,11 @@ proper instance of \verb:EM:: elim (EM (exists x, ~ P x)). \end{coq_example} -We first look at the first case. Let Tom be the non-drinker: +We first look at the first case. Let Tom be the non-drinker. +The following combines at once the effect of \verb:intros: and +\verb:destruct:: \begin{coq_example} -intro Non_drinker; elim Non_drinker; - intros Tom Tom_does_not_drink. +intros (Tom, Tom_does_not_drink). \end{coq_example} We conclude in that case by considering Tom, since his drinking leads to @@ -860,9 +825,10 @@ exists Tom; intro Tom_drinks. \end{coq_example} There are several ways in which we may eliminate a contradictory case; -a simple one is to use the \verb:absurd: tactic as follows: +in this case, we use \verb:contradiction: to let \Coq{} find out the +two contradictory hypotheses: \begin{coq_example} -absurd (P Tom); trivial. +contradiction. \end{coq_example} We now proceed with the second case, in which actually any person will do; @@ -893,7 +859,7 @@ Check weird. Check drinker. \end{coq_example} -Remark how the three theorems are completely generic in the most general +Note how the three theorems are completely generic in the most general fashion; the domain \verb:D: is discharged in all of them, \verb:R: is discharged in \verb:refl_if: only, \verb:P: is discharged only in \verb:weird: and @@ -901,12 +867,13 @@ the domain \verb:D: is discharged in all of them, \verb:R: is discharged in Finally, the excluded middle hypothesis is discharged only in \verb:drinker:. -Note that the name \verb:d: has vanished as well from +Note, too, that the name \verb:d: has vanished from the statements of \verb:weird: and \verb:drinker:, since \Coq's pretty-printer replaces -systematically a quantification such as \verb+forall d:D, E+, where \verb:d: -does not occur in \verb:E:, by the functional notation \verb:D->E:. -Similarly the name \verb:EM: does not appear in \verb:drinker:. +systematically a quantification such as \texttt{forall d : D, E}, +where \texttt{d} does not occur in \texttt{E}, +by the functional notation \texttt{D -> E}. +Similarly the name \texttt{EM} does not appear in \texttt{drinker}. Actually, universal quantification, implication, as well as function formation, are @@ -935,12 +902,12 @@ intros. generalize H0. \end{coq_example} -Sometimes it may be convenient to use a lemma, although we do not have -a direct way to appeal to such an already proven fact. The tactic \verb:cut: -permits to use the lemma at this point, keeping the corresponding proof -obligation as a new subgoal: +Sometimes it may be convenient to state an intermediate fact. +The tactic \verb:assert: does this and introduces a new subgoal +for this fact to be proved first. The tactic \verb:enough: does +the same while keeping this goal for later. \begin{coq_example} -cut (R x x); trivial. +enough (R x x) by auto. \end{coq_example} We clean the goal by doing an \verb:Abort: command. \begin{coq_example*} @@ -951,10 +918,10 @@ Abort. \subsection{Equality} The basic equality provided in \Coq{} is Leibniz equality, noted infix like -\verb+x=y+, when \verb:x: and \verb:y: are two expressions of -type the same Set. The replacement of \verb:x: by \verb:y: in any -term is effected by a variety of tactics, such as \verb:rewrite: -and \verb:replace:. +\texttt{x = y}, when \texttt{x} and \texttt{y} are two expressions of +type the same Set. The replacement of \texttt{x} by \texttt{y} in any +term is effected by a variety of tactics, such as \texttt{rewrite} +and \texttt{replace}. Let us give a few examples of equality replacement. Let us assume that some arithmetic function \verb:f: is null in zero: @@ -1009,10 +976,14 @@ In case the equality $t=u$ generated by \verb:replace: $u$ \verb:with: $t$ is an assumption (possibly modulo symmetry), it will be automatically proved and the corresponding goal will not appear. For instance: -\begin{coq_example} + +\begin{coq_eval} Restart. -replace (f 0) with 0. -rewrite f10; rewrite foo; trivial. +\end{coq_eval} +\begin{coq_example} +Lemma L2 : f (f 1) = 0. +replace (f 1) with (f 0). +replace (f 0) with 0; trivial. Qed. \end{coq_example} @@ -1033,20 +1004,20 @@ predicates over some universe \verb:U:. For instance: \begin{coq_example} Variable U : Type. Definition set := U -> Prop. -Definition element (x:U) (S:set) := S x. -Definition subset (A B:set) := - forall x:U, element x A -> element x B. +Definition element (x : U) (S : set) := S x. +Definition subset (A B : set) := + forall x : U, element x A -> element x B. \end{coq_example} Now, assume that we have loaded a module of general properties about relations over some abstract type \verb:T:, such as transitivity: \begin{coq_example} -Definition transitive (T:Type) (R:T -> T -> Prop) := - forall x y z:T, R x y -> R y z -> R x z. +Definition transitive (T : Type) (R : T -> T -> Prop) := + forall x y z : T, R x y -> R y z -> R x z. \end{coq_example} -Now, assume that we want to prove that \verb:subset: is a \verb:transitive: +We want to prove that \verb:subset: is a \verb:transitive: relation. \begin{coq_example} Lemma subset_transitive : transitive set subset. @@ -1071,9 +1042,12 @@ auto. \end{coq_example} Many variations on \verb:unfold: are provided in \Coq. For instance, -we may selectively unfold one designated occurrence: -\begin{coq_example} +instead of unfolding all occurrences of \verb:subset:, we may want to +unfold only one designated occurrence: +\begin{coq_eval} Undo 2. +\end{coq_eval} +\begin{coq_example} unfold subset at 2. \end{coq_example} @@ -1111,6 +1085,7 @@ are {\sl transparent}. \begin{coq_eval} Reset Initial. Set Printing Width 60. +Set Printing Compact Contexts. \end{coq_eval} \section{Data Types as Inductively Defined Mathematical Collections} @@ -1166,11 +1141,14 @@ right; trivial. \end{coq_example} Indeed, the whole proof can be done with the combination of the - \verb:simple: \verb:induction:, which combines \verb:intro: and \verb:elim:, + \verb:destruct:, which combines \verb:intro: and \verb:elim:, with good old \verb:auto:: +\begin{coq_eval} +Abort. +\end{coq_eval} \begin{coq_example} -Restart. -simple induction b; auto. +Lemma duality : forall b:bool, b = true \/ b = false. +destruct b; auto. Qed. \end{coq_example} @@ -1194,7 +1172,7 @@ Check nat_rec. Let us start by showing how to program the standard primitive recursion operator \verb:prim_rec: from the more general \verb:nat_rec:: \begin{coq_example} -Definition prim_rec := nat_rec (fun i:nat => nat). +Definition prim_rec := nat_rec (fun i : nat => nat). \end{coq_example} That is, instead of computing for natural \verb:i: an element of the indexed @@ -1205,22 +1183,27 @@ About prim_rec. \end{coq_example} Oops! Instead of the expected type \verb+nat->(nat->nat->nat)->nat->nat+ we -get an apparently more complicated expression. Indeed the type of -\verb:prim_rec: is equivalent by rule $\beta$ to its expected type; this may -be checked in \Coq{} by command \verb:Eval Cbv Beta:, which $\beta$-reduces -an expression to its {\sl normal form}: +get an apparently more complicated expression. +In fact, the two types are convertible and one way of having the proper +type would be to do some computation before actually defining \verb:prim_rec: +as such: + +\begin{coq_eval} +Reset Initial. +Set Printing Width 60. +Set Printing Compact Contexts. +\end{coq_eval} + \begin{coq_example} -Eval cbv beta in - ((fun _:nat => nat) O -> - (forall y:nat, - (fun _:nat => nat) y -> (fun _:nat => nat) (S y)) -> - forall n:nat, (fun _:nat => nat) n). +Definition prim_rec := + Eval compute in nat_rec (fun i : nat => nat). +About prim_rec. \end{coq_example} Let us now show how to program addition with primitive recursion: \begin{coq_example} Definition addition (n m:nat) := - prim_rec m (fun p rec:nat => S rec) n. + prim_rec m (fun p rec : nat => S rec) n. \end{coq_example} That is, we specify that \verb+(addition n m)+ computes by cases on \verb:n: @@ -1244,15 +1227,11 @@ Fixpoint plus (n m:nat) {struct n} : nat := end. \end{coq_example} -For the rest of the session, we shall clean up what we did so far with -types \verb:bool: and \verb:nat:, in order to use the initial definitions -given in \Coq's \verb:Prelude: module, and not to get confusing error -messages due to our redefinitions. We thus revert to the initial state: +\begin{coq_eval} \begin{coq_example} Reset Initial. -\end{coq_example} -\begin{coq_eval} Set Printing Width 60. +Set Printing Compact Contexts. \end{coq_eval} \subsection{Simple proofs by induction} @@ -1261,20 +1240,21 @@ Let us now show how to do proofs by structural induction. We start with easy properties of the \verb:plus: function we just defined. Let us first show that $n=n+0$. \begin{coq_example} -Lemma plus_n_O : forall n:nat, n = n + 0. +Lemma plus_n_O : forall n : nat, n = n + 0. intro n; elim n. \end{coq_example} -What happened was that \verb:elim n:, in order to construct a \verb:Prop: -(the initial goal) from a \verb:nat: (i.e. \verb:n:), appealed to the -corresponding induction principle \verb:nat_ind: which we saw was indeed +What happened was that \texttt{elim n}, in order to construct a \texttt{Prop} +(the initial goal) from a \texttt{nat} (i.e. \texttt{n}), appealed to the +corresponding induction principle \texttt{nat\_ind} which we saw was indeed exactly Peano's induction scheme. Pattern-matching instantiated the -corresponding predicate \verb:P: to \verb+fun n:nat => n = n + 0+, and we get -as subgoals the corresponding instantiations of the base case \verb:(P O): , -and of the inductive step \verb+forall y:nat, P y -> P (S y)+. -In each case we get an instance of function \verb:plus: in which its second +corresponding predicate \texttt{P} to \texttt{fun n : nat => n = n + 0}, +and we get as subgoals the corresponding instantiations of the base case +\texttt{(P O)}, and of the inductive step +\texttt{forall y : nat, P y -> P (S y)}. +In each case we get an instance of function \texttt{plus} in which its second argument starts with a constructor, and is thus amenable to simplification -by primitive recursion. The \Coq{} tactic \verb:simpl: can be used for +by primitive recursion. The \Coq{} tactic \texttt{simpl} can be used for this purpose: \begin{coq_example} simpl. @@ -1305,12 +1285,12 @@ We now proceed to the similar property concerning the other constructor Lemma plus_n_S : forall n m:nat, S (n + m) = n + S m. \end{coq_example} -We now go faster, remembering that tactic \verb:simple induction: does the +We now go faster, using the tactic \verb:induction:, which does the necessary \verb:intros: before applying \verb:elim:. Factoring simplification and automation in both cases thanks to tactic composition, we prove this lemma in one line: \begin{coq_example} -simple induction n; simpl; auto. +induction n; simpl; auto. Qed. Hint Resolve plus_n_S . \end{coq_example} @@ -1324,7 +1304,7 @@ Lemma plus_com : forall n m:nat, n + m = m + n. Here we have a choice on doing an induction on \verb:n: or on \verb:m:, the situation being symmetric. For instance: \begin{coq_example} -simple induction m; simpl; auto. +induction m as [ | m IHm ]; simpl; auto. \end{coq_example} Here \verb:auto: succeeded on the base case, thanks to our hint @@ -1332,7 +1312,7 @@ Here \verb:auto: succeeded on the base case, thanks to our hint \verb:auto: does not handle: \begin{coq_example} -intros m' E; rewrite <- E; auto. +rewrite <- IHm; auto. Qed. \end{coq_example} @@ -1344,13 +1324,13 @@ the constructors \verb:O: and \verb:S:: it computes to \verb:False: when its argument is \verb:O:, and to \verb:True: when its argument is of the form \verb:(S n):: \begin{coq_example} -Definition Is_S (n:nat) := match n with - | O => False - | S p => True - end. +Definition Is_S (n : nat) := match n with + | O => False + | S p => True + end. \end{coq_example} -Now we may use the computational power of \verb:Is_S: in order to prove +Now we may use the computational power of \verb:Is_S: to prove trivially that \verb:(Is_S (S n)):: \begin{coq_example} Lemma S_Is_S : forall n:nat, Is_S (S n). @@ -1389,8 +1369,11 @@ Actually, a specific tactic \verb:discriminate: is provided to produce mechanically such proofs, without the need for the user to define explicitly the relevant discrimination predicates: +\begin{coq_eval} +Abort. +\end{coq_eval} \begin{coq_example} -Restart. +Lemma no_confusion : forall n:nat, 0 <> S n. intro n; discriminate. Qed. \end{coq_example} @@ -1403,12 +1386,13 @@ may define inductive families, and for instance inductive predicates. Here is the definition of predicate $\le$ over type \verb:nat:, as given in \Coq's \verb:Prelude: module: \begin{coq_example*} -Inductive le (n:nat) : nat -> Prop := +Inductive le (n : nat) : nat -> Prop := | le_n : le n n - | le_S : forall m:nat, le n m -> le n (S m). + | le_S : forall m : nat, le n m -> le n (S m). \end{coq_example*} -This definition introduces a new predicate \verb+le:nat->nat->Prop+, +This definition introduces a new predicate +\verb+le : nat -> nat -> Prop+, and the two constructors \verb:le_n: and \verb:le_S:, which are the defining clauses of \verb:le:. That is, we get not only the ``axioms'' \verb:le_n: and \verb:le_S:, but also the converse property, that @@ -1426,7 +1410,7 @@ Check le_ind. Let us show how proofs may be conducted with this principle. First we show that $n\le m \Rightarrow n+1\le m+1$: \begin{coq_example} -Lemma le_n_S : forall n m:nat, le n m -> le (S n) (S m). +Lemma le_n_S : forall n m : nat, le n m -> le (S n) (S m). intros n m n_le_m. elim n_le_m. \end{coq_example} @@ -1442,10 +1426,14 @@ intros; apply le_S; trivial. Now we know that it is a good idea to give the defining clauses as hints, so that the proof may proceed with a simple combination of -\verb:induction: and \verb:auto:. +\verb:induction: and \verb:auto:. \verb:Hint Constructors le: +is just an abbreviation for \verb:Hint Resolve le_n le_S:. +\begin{coq_eval} +Abort. +\end{coq_eval} \begin{coq_example} -Restart. -Hint Resolve le_n le_S . +Hint Constructors le. +Lemma le_n_S : forall n m : nat, le n m -> le (S n) (S m). \end{coq_example} We have a slight problem however. We want to say ``Do an induction on @@ -1453,7 +1441,7 @@ hypothesis \verb:(le n m):'', but we have no explicit name for it. What we do in this case is to say ``Do an induction on the first unnamed hypothesis'', as follows. \begin{coq_example} -simple induction 1; auto. +induction 1; auto. Qed. \end{coq_example} @@ -1483,6 +1471,7 @@ Qed. \begin{coq_eval} Reset Initial. Set Printing Width 60. +Set Printing Compact Contexts. \end{coq_eval} \section{Opening library modules} @@ -1552,6 +1541,7 @@ known lemmas about both the successor and the less or equal relation, just ask: \begin{coq_eval} Reset Initial. Set Printing Width 60. +Set Printing Compact Contexts. \end{coq_eval} \begin{coq_example} Search S le. @@ -1562,14 +1552,13 @@ predicate appears at the head position in the conclusion. SearchHead le. \end{coq_example} -A new and more convenient search tool is \verb:SearchPattern: -developed by Yves Bertot. It allows finding the theorems with a -conclusion matching a given pattern, where \verb:_: can be used in -place of an arbitrary term. We remark in this example, that \Coq{} +The \verb:Search: commands also allows finding the theorems +containing a given pattern, where \verb:_: can be used in +place of an arbitrary term. As shown in this example, \Coq{} provides usual infix notations for arithmetic operators. \begin{coq_example} -SearchPattern (_ + _ = _). +Search (_ + _ = _). \end{coq_example} \section{Now you are on your own} |
